diff --git "a/992.jsonl" "b/992.jsonl" new file mode 100644--- /dev/null +++ "b/992.jsonl" @@ -0,0 +1,450 @@ +{"seq_id":"74508909046","text":"import os\nimport pickle\nfrom collections import defaultdict, Counter\nimport numpy as np\nimport argparse\nimport matplotlib.pyplot as plt\nfrom hashlib import sha256\nimport networkx as nx\nfrom nas_lib.visualize.visualize_close_domain_reverse import draw_plot_nasbench_nlp, draw_plot_nasbench_asr, \\\n draw_plot_nasbench_201, draw_plot_nasbench_101\nfrom nas_lib.data.data_nasbench_nlp import adj_distance as adj_dist_nlp\n\n\n# model_lists_nasbench = ['Random', 'EA', 'BANANAS', 'BANANAS_F', 'NPENAS-BO', 'NPENAS-NP', 'NPENAS-NP-NEW', 'ORACLE']\n# model_masks_nasbench = [True, True, True, True, True, True, True, True]\n\n# model_lists_nasbench = ['Random', 'REA', 'BANANAS', 'NPENAS-NP', 'ORACLE']\n# model_masks_nasbench = [True, True, True, True, True]\n\n# model_lists_nasbench = ['NPENAS-NP-10', 'NPENAS-NP-100', 'ORACLE']\n# model_masks_nasbench = [True, True, False]\n\nmodel_lists_nasbench = ['RELU', 'CELU', 'NPENAS-GT']\nmodel_masks_nasbench = [True, True, False]\n\n\n# model_lists_nasbench = ['NPENAS-NP-10']\n# model_masks_nasbench = [True]\n\noptions = {\"node_size\": 25, \"alpha\": 0.8}\n\npredictor_values = {0: 'RELU', 1: 'CELU'}\n# predictor_values = {0: 'SCALING FACTOR=10', 1: 'SCALING FACTOR=100'}\n\n\ndef get_kt_list(all_files):\n total_results = defaultdict(list)\n total_top_results = defaultdict(list)\n for f in all_files:\n with open(f, 'rb') as fb:\n full_data = pickle.load(fb)[2]\n for idx, data in enumerate(full_data):\n type = data['type']\n if type == 'rea' or type == 'oracle':\n pass\n else:\n type, final_data, kt_list, kt_top_list, mutate_list = data['type'], data['final_data'], \\\n data['kt_list'], data['kt_top_list'], data['mutate_list']\n # type_new = type.replace('_', '-').upper()\n type_new = ''\n # total_results[f'{type_new}{predictor_values[idx]}'].append(kt_list)\n total_results[f'{type_new}{idx}'].append(kt_list)\n total_top_results[f'{type_new}{idx}'].append(kt_top_list)\n # total_results[type].append(kt_list)\n return total_results, total_top_results\n\n\ndef mutate_rea_information(all_files):\n total_results = defaultdict(list)\n for f in all_files:\n G = nx.Graph()\n cmap = plt.cm.YlGn\n cmap2 = plt.cm.autumn\n with open(f, 'rb') as fb:\n full_data = pickle.load(fb)[2]\n p_dict = {}\n c_dict = defaultdict(list)\n for data in full_data:\n type = data['type']\n if type == 'rea':\n p_list, c_list = data['p_list'], data['c_list']\n for idx, p in enumerate(p_list):\n p_info = sha256(str(p[6]).encode('utf-8')).hexdigest()\n p_dict[p_info] = p\n c_dict[p_info].append(c_list[idx])\n key_list = list(c_dict.keys())\n p_key_list = []\n c_key_list = []\n p_performance_list = []\n c_performance_list = []\n edge_list = []\n edge_color_list = []\n dist_list = []\n p_key_dict = {}\n c_key_dict = {}\n counter = len(key_list)\n for idx, k in enumerate(key_list):\n p_key_dict[k] = idx\n for idx, c in enumerate(c_dict[k]):\n c_k = sha256(str(c[6]).encode('utf-8')).hexdigest()\n c_key_dict[c_k] = counter + idx\n dist = adj_dist_nlp(([], p_dict[k][1], p_dict[k][2]), ([], c[1], c[2]))\n dist_list.append(dist)\n counter = counter + len(c_dict[k])\n for idx, k in enumerate(key_list):\n parent = p_dict[k]\n children_list = c_dict[k]\n if len(children_list) >= 1:\n draw_graph(parent, children_list, p_key_list, c_key_list,\n p_key_dict, c_key_dict, edge_list, edge_color_list, p_performance_list,\n c_performance_list, G)\n parent_node_list = []\n parent_node_color = []\n for k1 in key_list:\n for k2 in key_list:\n if k1 != k2:\n pair = (p_key_dict[k1], p_key_dict[k2])\n if pair not in parent_node_list:\n parent_node_list.append(pair)\n dist = adj_dist_nlp(([], p_dict[k1][1], p_dict[k1][2]), ([], p_dict[k2][1], p_dict[k2][2]))\n parent_node_color.append(dist)\n dist_list.append(dist)\n min_dist, max_dist = min(dist_list), max(dist_list)\n p_min, p_max = min(p_performance_list), max(p_performance_list)\n c_min, c_max = min(c_performance_list), max(c_performance_list)\n node_min, node_max = min(p_min, c_min), max(p_max, c_max)\n pos = nx.spring_layout(G) # positions for all nodes\n nx.draw_networkx_nodes(G, pos, nodelist=p_key_list, node_color=p_performance_list, cmap=cmap2,\n vmin=node_min, vmax=node_max, alpha=0.8, node_size=50)\n nx.draw_networkx_nodes(G, pos, nodelist=c_key_list, node_color=c_performance_list, cmap=cmap2, alpha=0.8,\n vmin=node_min, vmax=node_max, node_size=10)\n nx.draw_networkx_edges(G, pos, edgelist=edge_list, edge_color=edge_color_list, edge_vmin=min_dist,\n edge_vmax=max_dist, edge_cmap=cmap)\n # nx.draw_networkx_edges(G, pos, edgelist=parent_node_list, edge_color=parent_node_color, edge_vmin=min_dist,\n # edge_vmax=max_dist, edge_cmap=cmap)\n # nx.draw(G, with_labels=False, font_weight='bold', node_size=3)\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=min_dist, vmax=max_dist))\n sm2 = plt.cm.ScalarMappable(cmap=cmap2, norm=plt.Normalize(vmin=node_min, vmax=node_max))\n sm._A = []\n sm2._A = []\n plt.colorbar(sm)\n plt.colorbar(sm2)\n plt.show()\n return total_results\n\n\ndef mutate_information(all_files):\n total_results = defaultdict(list)\n for f in all_files:\n G = nx.Graph()\n cmap = plt.cm.YlGn\n with open(f, 'rb') as fb:\n full_data = pickle.load(fb)[2]\n p_dict = {}\n c_dict = defaultdict(list)\n for data in full_data:\n type = data['type']\n if type == 'rea' or type == 'oracle':\n continue\n else:\n type, final_data, kt_list, mutate_list = data['type'], data['final_data'], \\\n data['kt_list'], data['mutate_list']\n print(mutate_list)\n return total_results\n\n\ndef draw_graph(parent, child_list, p_key_list, c_key_list, p_key_dict, c_key_dict,\n edge_list, edge_color_list, p_performance_list,\n c_performance_list, graph):\n # add parent node\n p_key = sha256(str(parent[6]).encode('utf-8')).hexdigest()\n graph.add_node(p_key_dict[p_key])\n p_key_list.append(p_key_dict[p_key])\n p_performance_list.append(parent[4])\n for c in child_list:\n c_k = sha256(str(c[6]).encode('utf-8')).hexdigest()\n c_key_list.append(c_key_dict[c_k])\n graph.add_node(c_key_dict[c_k])\n graph.add_edge(p_key_dict[p_key], c_key_dict[c_k])\n edge_list.append((p_key_dict[p_key], c_key_dict[c_k]))\n dist = adj_dist_nlp(([], parent[1], parent[2]), ([], c[1], c[2]))\n edge_color_list.append(dist)\n c_performance_list.append(c[4])\n\n\ndef visual_kt_list(kt_results, results_folder, search_space, comparison_type=\"relu_celu\", train_dataset=\"cifar100\"):\n if comparison_type == \"relu_celu\":\n model_lists_nasbench = ['RELU', 'CELU', 'NPENAS-GT']\n model_masks_nasbench = [True, True, False]\n elif comparison_type == \"scale_factor\":\n model_lists_nasbench = ['SCALING FACTOR=#', 'SCALING FACTOR=*', 'NPENAS-GT']\n model_masks_nasbench = [True, True, False]\n else:\n raise ValueError(f\"comparison type {comparison_type} does not support at present!\")\n if comparison_type == \"scale_factor\":\n rate = get_rate(results_folder)\n key1 = \"SCALING FACTOR=\" + str(int(rate[0]))\n key2 = \"SCALING FACTOR=\" + str(int(rate[1]))\n kt_results_dict = {}\n kt_results_std_dict = {}\n\n for k, v in kt_results.items():\n np_v = np.nan_to_num(np.array(v))\n kt_results_dict[k] = np.mean(np_v, axis=0)\n kt_results_std_dict[k] = np.std(np_v, axis=0)\n\n if comparison_type == \"scale_factor\":\n kt_results_dict[key1] = kt_results_dict[\"0\"]\n kt_results_dict[key2] = kt_results_dict[\"1\"]\n kt_results_dict.pop(\"0\")\n kt_results_dict.pop(\"1\")\n\n if comparison_type == \"relu_celu\":\n kt_results_dict[\"RELU\"] = kt_results_dict[\"0\"]\n kt_results_dict[\"CELU\"] = kt_results_dict[\"1\"]\n kt_results_dict.pop(\"0\")\n kt_results_dict.pop(\"1\")\n\n if args.search_space == \"nasbench_101\":\n idx = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140])\n else:\n idx = np.array([10, 20, 30, 40, 50, 60, 70, 80, 90])\n fig, ax = plt.subplots(1)\n for k, v in kt_results_dict.items():\n plt.plot(idx, v, label=k, marker='s', linewidth=1, ms=3)\n ax.grid(False)\n fig.set_dpi(600.0)\n ax.set_xlabel('Number of Samples')\n ax.set_ylabel('Kendall Tau Correlation')\n plt.legend(loc='upper left')\n # plt.legend(loc='upper right')\n plt.show()\n if search_space == 'nasbench_nlp':\n draw_plot_nasbench_nlp(results_folder, draw_type='ERRORBAR', model_lists=model_lists_nasbench,\n model_masks=model_masks_nasbench, order=False, comparison_type=comparison_type)\n elif search_space == 'nasbench_asr':\n draw_plot_nasbench_asr(results_folder, draw_type='ERRORBAR', model_lists=model_lists_nasbench,\n model_masks=model_masks_nasbench, order=False, comparison_type=comparison_type)\n elif search_space == 'nasbench_201':\n # cifar10-valid, cifar100, ImageNet16-120\n draw_plot_nasbench_201(results_folder, draw_type='ERRORBAR', model_lists=model_lists_nasbench,\n model_masks=model_masks_nasbench, train_data=train_dataset, order=False,\n comparison_type=comparison_type)\n elif search_space == 'nasbench_101':\n pass\n\n\ndef get_rate(folder):\n files = os.listdir(folder)\n file_name = \"\"\n for f in files:\n if \"full\" not in f and \"log\" not in f:\n file_name = f\n break\n file_path = os.path.join(folder, file_name)\n with open(file_path, \"rb\") as fs:\n algorithm_params, metann_params, results, walltimes = pickle.load(fs)\n rate = [algorithm_params[0][\"rate\"], algorithm_params[1][\"rate\"]]\n return rate\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Args for visualize darts architecture')\n parser.add_argument('--search_space', type=str, default='nasbench_201',\n choices=['nasbench_101', 'nasbench_201', 'nasbench_nlp', 'nasbench_asr'],\n help='The algorithm output folder')\n parser.add_argument('--comparison_type', type=str, default='relu_celu',\n choices=['scale_factor', 'relu_celu'],\n help='The algorithm output folder')\n parser.add_argument('--save_dir', type=str,\n default='/home/albert_wei/Desktop/results/relu_celu/npenas_nasbench_201_cifar10_relu_celu/',\n help='The algorithm output folder')\n parser.add_argument('--train_data', type=str, default='cifar10-valid',\n choices=['cifar10-valid', 'cifar100', 'ImageNet16-120'],\n help='The evaluation of dataset of NASBench-201.')\n args = parser.parse_args()\n\n all_files = [os.path.join(args.save_dir, f) for f in os.listdir(args.save_dir) if 'full' in f]\n kt_total_results, kt_total_top_results = get_kt_list(all_files)\n visual_kt_list(kt_total_results, args.save_dir, args.search_space, comparison_type=args.comparison_type,\n train_dataset=args.train_data)\n # visual_kt_list(kt_total_top_results, args.save_dir, args.search_space)\n\n # mutate_rea_information(all_files)\n # mutate_information(all_files)","repo_name":"auroua/NPENASv1","sub_path":"tools_ss_analysis/predictor_analysis.py","file_name":"predictor_analysis.py","file_ext":"py","file_size_in_byte":12509,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"76"} +{"seq_id":"28250255460","text":"# Advent of Code, 2015\n# Day 24: It Hangs in the Balance\n# https://adventofcode.com/2015/day/24\n# https://github.com/v1neethnc/advent-of-code-solutions\n\n\nfrom itertools import combinations\nfrom functools import reduce\nfrom operator import mul\ndef entanglement_checker(data, groups):\n\tsize = sum(data) // groups\n\tfor i in range(len(data)):\n\t\tvals = [reduce(mul, comb) for comb in combinations(data, i) if sum(comb) == size]\n\t\tif len(vals) > 0:\n\t\t\treturn min(vals)\n\n\nwith open(\"2015_24.txt\") as file_data:\n\tdata = [int(i.strip()) for i in file_data.readlines()]\n\n\tprint(f\"Part A: {entanglement_checker(data, 3)}\")\n\tprint(f\"Part B: {entanglement_checker(data, 4)}\")","repo_name":"v1neethnc/advent-of-code-solutions","sub_path":"2015/2015_24.py","file_name":"2015_24.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42668235818","text":"from calendar import month\r\nfrom random import randint, random, sample\r\nimport mysql.connector\r\n\r\nfrom faker import Faker\r\n\r\nfaker = Faker()\r\n\r\ncerts = [\r\n\t\"Certified Business Analysis Professional (CBAP)\",\r\n\t\"APICS Certified Supply Chain Professional (CSCP)\",\r\n\t\"Certified Information Security Manager (CISM)\",\r\n\t\"Certified Information Systems Security Professional (CISSP)\",\r\n\t\"Certified Patient Care Technician (CPCT)\",\r\n\t\"Certified Clinical Medical Assistant (CCMA)\",\r\n\t\"Certified Commercial Investment Member (CCIM)\",\r\n\t\"Certified Legal Manager (CLM)\",\r\n\t\"Certified Business Economist (CBE)\",\r\n\t\"Certified Economic Developer (CED)\",\r\n\t\"First Aid Certification\",\r\n\t\"Notary Public Certification\",\r\n\t\"Certification in Full Stack Web Development\",\r\n\t\"Certification in Algorithms & Data Structures\",\r\n\t\"Coding Boot Camp\",\r\n\t\"Certified Pediatric Nurse (CPN)\",\r\n\t\"Certified Public Accountant (CPA)\",\r\n\t\"Certified ScrumMaster (CSM)\",\r\n\t\"Professional Educator License (PEL)\",\r\n]\r\n\r\n\r\ndef pop_certifications(connection):\r\n\t\r\n\tmycursor = connection.cursor()\r\n\t\r\n\tmycursor.execute(\"SELECT uemail FROM Student\")\r\n\t\r\n\tmyresult = mycursor.fetchall()\r\n\tcount = 0\r\n\t\r\n\tfor x in myresult:\r\n\t\tnum = randint(1,4)\r\n\t\temail = x[0]\r\n\t\tcrts = sample(certs, k=randint(0,4))\r\n\t\t\r\n\t\tfor certificate in crts:\r\n\t\t\t\r\n\t\t\tdate = str(faker.date_between(start_date=\"-4y\", end_date=\"today\"))\r\n\t\t\turl = certificate.replace(\" \", \"-\") + \".net/certificate/crt=\" + str(randint(10000, 20000))\r\n\t\t\tmySql_insert_query = \"INSERT INTO Certifications VALUES ('\"+date + \"','\" + certificate + \"','\" + url + \"','\" + email + \"')\"\r\n\t\t\tcursor = connection.cursor()\r\n\t\t\tcursor.execute(mySql_insert_query)\r\n\t\t\tconnection.commit()\r\n\t\t\tcursor.close()\r\n\t\t\tcount +=1\r\n\t\t\t\r\n\tprint(count, \"Record inserted successfully into Certifications table\")\r\n\r\n","repo_name":"Saiid2001/cmps277-db-project","sub_path":"database/pop_certs.py","file_name":"pop_certs.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"17298821682","text":"#!/usr/bin/env python\n\nimport rospy\nimport smach\nimport smach_ros\nimport time\nfrom pyrobot import Robot\n\nbase_config_dict={'base_controller': 'ilqr'}\nbot = Robot('locobot', base_config=base_config_dict)\n\n# define state GoHome\nclass GoHome(smach.State):\n def __init__(self):\n smach.State.__init__(self, outcomes=['outcome1','outcome2'])\n self.counter = 0\n\n def execute(self, userdata):\n target_position = [0,0,0]\n rospy.loginfo('Executing state GoHome')\n #bot.arm.go_home()\n bot.base.go_to_absolute(target_position)\n if self.counter < 3:\n self.counter += 1\n return 'outcome1'\n else:\n return 'outcome2'\n\n# define state Move\nclass Move_straight(smach.State):\n\n def __init__(self):\n smach.State.__init__(self, outcomes=['outcome2'])\n\n def execute(self, userdata):\n target_position = [0.3,0,0]\n target_joints = [[0.408, 0.721, -0.471, -1.4, 0.920], [-0.675, 0, 0.23, 1, -0.70]]\n rospy.loginfo('Moving')\n for joint in target_joints:\n #bot.arm.set_joint_positions(joint, plan=False)\n bot.base.go_to_absolute(target_position)\n time.sleep(1)\n return 'outcome2'\n\n# define state Move\nclass Move_right(smach.State):\n\n def __init__(self):\n smach.State.__init__(self, outcomes=['outcome5'])\n\n def execute(self, userdata):\n target_position = [0.3,-0.3,-1.57]\n target_joints = [[0.408, 0.721, -0.471, -1.4, 0.920], [-0.675, 0, 0.23, 1, -0.70]]\n rospy.loginfo('Moving')\n for joint in target_joints:\n #bot.arm.set_joint_positions(joint, plan=False)\n bot.base.go_to_absolute(target_position)\n time.sleep(1)\n return 'outcome5'\n\nclass Move_left(smach.State):\n\n def __init__(self):\n smach.State.__init__(self, outcomes=['outcome6'])\n\n def execute(self, userdata):\n target_position = [0.3,0,0]\n target_joints = [[0.408, 0.721, -0.471, -1.4, 0.920], [-0.675, 0, 0.23, 1, -0.70]]\n rospy.loginfo('Moving')\n for joint in target_joints:\n #bot.arm.set_joint_positions(joint, plan=False)\n bot.base.go_to_absolute(target_position)\n time.sleep(1)\n return 'outcome6'\n\n# main\ndef main():\n #rospy.init_node('smach_example_state_machine')\n\n # Create a SMACH state machine\n sm = smach.StateMachine(outcomes=['outcome3', 'outcome4'])\n\n # Open the container\n with sm:\n # Add states to the container\n smach.StateMachine.add('GoHome', GoHome(), transitions={'outcome1':'Move_straight', 'outcome2':'outcome3'})\n smach.StateMachine.add('Move_straight', Move_straight(), transitions={'outcome2':'Move_right'})\n smach.StateMachine.add('Move_right', Move_right(), transitions={'outcome5':'Move_left'})\n smach.StateMachine.add('Move_left', Move_left(), transitions={'outcome6':'GoHome'})\n\n # Create and start the introspection server\n sis = smach_ros.IntrospectionServer('my_smach_introspection_server', sm, '/SM_ROOT')\n sis.start()\n\n # Execute SMACH plan\n outcome = sm.execute()\n\n # Wait for ctrl-c to stop the application\n rospy.spin()\n sis.stop()\n\nif __name__ == '__main__':\n main()\n","repo_name":"jinglinjackychen/smach","sub_path":"examples/smach_pyrobot.py","file_name":"smach_pyrobot.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9071566899","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 14 13:28:08 2017\r\n\r\n@author: mechd\r\n\"\"\"\r\n\r\ndef flow(vf1, vf2, rho1, rho2, T1, T2, delta, theta, d_theta, n, tol):\r\n# flow(521, 428, 0.161443, 0.942478, 2.51327, 25, 0.01)\r\n import numpy as np\r\n import math\r\n import matplotlib.pyplot as plt\r\n from mpl_toolkits.mplot3d import Axes3D\r\n import random\r\n# OSWPI(1.5, 1.273, 300, 1.4, 286.9, 0.161443, 1000000, 1e-3)\r\n# grid(0.942478, 2.51327, 25, 0.01)\r\n V1 = np.zeros((n*n*n, 5))\r\n V2 = np.zeros((n*n*n, 5))\r\n V2t = np.zeros(n*n*n)\r\n V = np.zeros((2*n*n*n, 5))\r\n kc = 0\r\n kd = 0\r\n kt1 = 0\r\n kt2 = 0\r\n for k in range(n):\r\n for j in range(n):\r\n for i in range(n):\r\n V1[kc, 0] = rho1\r\n V1[kc, 1] = vf1\r\n V1[kc, 2] = 0\r\n V1[kc, 3] = 0\r\n V1[kc, 4] = T1\r\n kc = kc+1\r\n for k in range(n):\r\n for j in range(n):\r\n m = 0\r\n for i in range(n):\r\n m = m + tol\r\n V2[kd, 0] = rho2\r\n V2[kd, 1] = vf2*np.cos(delta)\r\n V2[kd, 2] = vf2*np.sin(delta)\r\n# V2[kd, 1] = vf2*(np.cos(delta) - np.sin(delta)/np.tan(theta+m*d_theta))\r\n# V2[kd, 2] = vf2*np.sin(delta)/np.sin(theta+m*d_theta)\r\n V2[kd, 3] = 0\r\n V2[kd, 4] = T2\r\n V2t[kd] = np.sqrt(V2[kd, 0]**2 + V2[kd, 1]**2 + 2*V2[kd, 0]*V2[kd, 1]*np.cos(theta+m*d_theta))\r\n kd = kd+1\r\n for k in range(n):\r\n for j in range(n):\r\n for i in range(n):\r\n V[kt1, 0] = V1[kt2, 0]\r\n V[kt1, 1] = V1[kt2, 1]\r\n V[kt1, 2] = V1[kt2, 2]\r\n V[kt1, 3] = V1[kt2, 3]\r\n V[kt1, 4] = V1[kt2, 4]\r\n V[kt1+n, 0] = V2[kt2, 0]\r\n V[kt1+n, 1] = V2[kt2, 1]\r\n V[kt1+n, 2] = V2[kt2, 2]\r\n V[kt1+n, 3] = V2[kt2, 3]\r\n V[kt1+n, 4] = V2[kt2, 4]\r\n kt2 = kt2+1\r\n kt1 = kt1+1\r\n kt1 = kt1+n\r\n# print(V[2*15612:2*15624])\r\n np.savetxt('flowdata.o', V)\r\n return","repo_name":"kalagotla/python-scripts","sub_path":"Flowdata.py","file_name":"Flowdata.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42797710975","text":"from rest_framework import serializers\nfrom .models import ConfirmCode, User\nimport random\nimport string\nletters_and_digits = string.ascii_letters + string.digits\n\nclass LoginSerializer(serializers.Serializer):\n \"\"\"Сериалайзер для обработки авторизации и регистрации юзеров\"\"\"\n phone = serializers.CharField(max_length=255)\n referal = serializers.CharField(required=False, max_length=255)\n token = serializers.CharField(max_length=255, read_only=True)\n\n def validate(self, data):\n phone = data.get('phone', None)\n referal = data.get('referal', None)\n code = data.get('code', None)\n if phone is None:\n raise serializers.ValidationError(\n 'An phone number is required to log in.'\n )\n if not ConfirmCode.objects.filter(phone=phone).exists():\n raise serializers.ValidationError(\n 'Invalid code'\n )\n else:\n code = ConfirmCode.objects.get(phone=phone)\n code.delete()\n try:\n user = User.objects.get(phone=phone)\n except:\n link = ''.join(random.sample(letters_and_digits, 6))\n user = User(phone=phone, referal_link=link)\n if referal is not None:\n if not User.objects.filter(referal_link=referal).exists():\n raise serializers.ValidationError(\n \"nonexistent referal_link\"\n )\n user.referal = referal\n user.save()\n\n if not user.is_active:\n raise serializers.ValidationError(\n 'This user has been deactivated.'\n )\n\n token = user.token\n return {\n 'phone': user.phone,\n 'token': token,\n 'referal': user.referal_link\n }\n\n\nclass UserSerializer(serializers.Serializer):\n \"\"\"Сериалайзер для получения инфромации о профиле юзера\"\"\"\n phone = serializers.CharField(max_length=200)\n user_referals = serializers.DictField(allow_empty=True)\n referal = serializers.CharField(max_length=6)\n\nclass UserUpdateSerializer(serializers.Serializer):\n \"\"\"Сериалайзер для изменения юзером его информации\"\"\"\n phone = serializers.CharField(max_length=200)\n referal = serializers.CharField(max_length=6)\n def update(self, instance, validated_data):\n \"\"\" Выполняет обновление User. \"\"\"\n for key, value in validated_data.items():\n setattr(instance, key, value)\n instance.save()\n return instance\n\n","repo_name":"ITerekhov/referal_api","sub_path":"referal_api/authentication/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"3082585872","text":"from datetime import date\nfrom dateutil.relativedelta import relativedelta\nfrom collections import namedtuple\nfrom collections import namedtuple\nfrom io import BytesIO\nfrom flask import Blueprint, render_template, redirect, url_for, flash, make_response, abort\nfrom flaskr.forms.worklogs import WorkLogForm, WorkLogFormStaff\nfrom flaskr.services.worklogs import WorkLogService\nfrom flaskr.reports.worklogs import WorkLogReport\nfrom flaskr import app, db\nfrom flaskr.models import Person\nfrom flaskr.utils.datetime import date_x\nfrom flaskr.workers.worklogs import update_worklogs_value\nfrom flaskr.workers.performlogs import update_performlogs_enabled\nfrom flaskr.utils.roles import login_required_staff, login_required_person\n\nbp = Blueprint('worklogs', __name__, url_prefix=\"/worklogs\")\n\n@bp.route('//')\n@login_required_person\ndef index(id, yymm):\n person = Person.get_or_404(id)\n today = date_x.yymm_dd(yymm, 1)\n first = today\n last = first + relativedelta(months=1)\n prev = first - relativedelta(months=1)\n this = date.today()\n items = []\n while first.date < last.date:\n ym = first.date.strftime('%Y%m')\n d = first.date.day\n worklog = WorkLogService.get_or_new(id, ym, d)\n items.append(worklog)\n first += relativedelta(days=1)\n Foot = namedtuple('Foor', ('presented', 'value', 'break_t', 'over_t', 'absence', 'late', 'leave'))\n foot = Foot(\n len([i for i in items if i.presented]),\n sum([i.value for i in items if i.value is not None]),\n sum([i.break_t for i in items if i.break_t is not None]),\n sum([i.over_t for i in items if i.over_t is not None]),\n len([i for i in items if i.absence]),\n len([i for i in items if i.late]),\n len([i for i in items if i.leave]),\n )\n kw = dict(\n id = id,\n yymm = yymm,\n staff = person.staff,\n name = person.name,\n today = today.date,\n this = this.strftime('%Y%m'),\n prev = prev.date.strftime('%Y%m'),\n next = last.date.strftime('%Y%m'),\n items = items,\n foot = foot\n )\n return render_template('worklogs/index.pug', **kw)\n\n@bp.route('///
/edit', methods=['GET', 'POST'])\n@login_required_staff\ndef edit(id, yymm, dd):\n try:\n date_x.yymm_dd(yymm, dd)\n except ValueError:\n abort(400)\n person = Person.get_or_404(id)\n worklog = WorkLogService.get_or_new(id, yymm, dd)\n if person.staff:\n form = WorkLogFormStaff(obj=worklog)\n else:\n form = WorkLogForm(obj=worklog)\n if form.validate_on_submit():\n try:\n if person.staff:\n worklog.update_staff(form)\n else:\n worklog.update_no_staff(form)\n update_worklogs_value.delay(id, yymm, dd)\n if not person.staff:\n update_performlogs_enabled.delay(id, yymm)\n flash('勤怠の登録ができました', 'success')\n return redirect(url_for('worklogs.index', id=id, yymm=yymm))\n except Exception as e:\n db.session.rollback()\n flash('勤怠登録時にエラーが発生しました {}'.format(e), 'danger')\n app.logger.exception(e)\n kw = dict(\n id = id,\n yymm = yymm,\n dd = dd,\n form = form,\n item = worklog\n )\n return render_template('worklogs/edit.pug', **kw)\n\n@bp.route('///
/destroy')\n@login_required_staff\ndef destory(id, yymm, dd):\n worklog = WorkLogService.get_or_404((id, yymm, dd))\n try:\n worklog.delete()\n flash('勤怠の削除ができました', 'success')\n except ValueError as e:\n db.session.rollback()\n flash(str(e), 'danger')\n except Exception as e:\n db.session.rollback()\n flash('勤怠削除時にエラーが発生しました {}'.format(e), 'danger')\n app.logger.exception(e)\n return redirect(url_for('worklogs.index', id=id, yymm=yymm))\n\n@bp.route('///report')\n@login_required_person\ndef report(id, yymm):\n with BytesIO() as output:\n pdf = WorkLogReport(id, yymm)\n pdf(output)\n response = make_response(output.getvalue())\n response.mimetype = 'application/pdf'\n return response\n\n@bp.route('///update')\n@login_required_staff\ndef update(id, yymm):\n update_worklogs_value.delay(id, yymm)\n return redirect(url_for('worklogs.index', id=id, yymm=yymm))\n","repo_name":"abtoc/ofpp-app","sub_path":"flaskr/views/worklogs.py","file_name":"worklogs.py","file_ext":"py","file_size_in_byte":4443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"21085518546","text":"import datetime\nimport hashlib\nimport json\nfrom flask import Flask, jsonify\n\n# Part 1 - Buiding a Blockchain\n\n\nclass Blockchain:\n\n def __init__(self):\n self.chain = []\n self.create_block(proof=1, prev_hash='0', data='gensis block')\n\n def create_block(self, proof, prev_hash, data):\n block = {'index': len(self.chain) + 1,\n 'data': data,\n 'timestamp': str(datetime.datetime.now()),\n 'proof': proof,\n 'prev_hash': prev_hash}\n\n self.chain.append(block)\n return block\n\n def get_prev_block(self):\n return self.chain[-1]\n\n def proof_of_work(self, prev_proof):\n new_proof = 1\n check_proof = False\n while check_proof is False:\n hash_operation = hashlib.sha256(\n str(new_proof ** 2 - prev_proof ** 2).encode()).hexdigest()\n\n if hash_operation[:4] == '0000':\n check_proof = True\n else:\n new_proof += 1\n\n return new_proof\n\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(encoded_block).hexdigest()\n\n def is_chain_valid(self, chain):\n previous_block = chain[0]\n block_index = 1\n while block_index < len(chain):\n block = chain[block_index]\n if block['prev_hash'] != self.hash(previous_block):\n return False\n previous_proof = previous_block['proof']\n proof = block['proof']\n hash_operation = hashlib.sha256(\n str(proof ** 2 - previous_proof ** 2).encode()).hexdigest()\n\n if hash_operation[:4] != \"0000\":\n return False\n previous_block = block\n block_index + 1\n return True\n\n # Part 2 - Mining our Blockchain\n\n def mining(self, data):\n prev_block = self.get_prev_block()\n new_proof = self.proof_of_work(prev_block['proof'])\n prev_hash = self.hash(prev_block)\n print(\"prev_hash:\", prev_hash)\n self.create_block(new_proof, prev_hash, data)\n\n\nif __name__ == \"__main__\":\n\n ubc = Blockchain()\n ubc.mining(\"a buy a house.\")\n ubc.mining(\"b buy a car.\")\n\n for i in ubc.chain:\n print(i['data'])\n","repo_name":"caoxufeng/python-blockchain","sub_path":"blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12485696069","text":"# -*- coding: utf-8 -*-\n\nfrom astropy import units as u\nimport numpy as np\nimport sunpy.map as mp\nfrom copy import deepcopy\n\n__all__ = [\"decompose_ang_len\", \"si_this_map_OLD\", \"si_this_map\"]\n\n\ndef decompose_ang_len(qua_input, **kwargs):\n \"\"\"\n Function to help decompose quantities that have an equivilence between angles\n and length, such as photospheric observational angles and object sizes.\n The function uses an equivilence to convert into either length or angle\n physical_type units.\n\n Parameters\n ----------\n\n qua_input : `astropy.units.quantity.Quantity`\n The quantity you wish decomposed.\n\n working_units : `astropy.units.quantity.Quantity`, optional\n Unit that will be used for internal working and the returned quantity.\n Ensure that it is of the correct physical type, angle or length.\n\n equivalencies : astropy equivilence,\n Equivilence used to relate the length and angle units.\n\n \"\"\"\n # Parameters\n working_units = kwargs.get('working_units', u.m) * 1.0\n equivalence = kwargs.get('equivalencies', u.dimensionless_angles())\n\n # Do nothing if the input is dimensionless.\n if qua_input.unit is u.Quantity(1.0).unit:\n return qua_input.decompose()\n else:\n # Components of the quantity\n value = qua_input.value\n length_unit = 0.0 * u.m\n length_exponent = 0.0\n angle_unit = 0.0 * u.radian\n angle_exponent = 0.0\n\n # If we have at least 1 base, populate from the first base\n if len(qua_input.unit.bases) > 0:\n if qua_input.unit.bases[0].physical_type is u.m.physical_type:\n length_unit = 1.0 * qua_input.unit.bases[0]\n length_exponent = qua_input.unit.powers[0]\n\n # convert to SI (meter here)\n length_unit = length_unit.to(u.m)\n elif qua_input.unit.bases[0].physical_type is u.radian.physical_type:\n angle_unit = 1.0 * qua_input.unit.bases[0]\n angle_exponent = qua_input.unit.powers[0]\n\n # Convert to SI (radian here)\n angle_unit = angle_unit.to(u.radian)\n\n # If we have 2 bases, populate from the second base\n if len(qua_input.unit.bases) > 1:\n if qua_input.unit.bases[1].physical_type is u.m.physical_type:\n length_unit = 1.0 * qua_input.unit.bases[1]\n length_exponent = qua_input.unit.powers[1]\n\n # Convert to SI (meter here)\n length_unit = length_unit.to(u.m)\n elif qua_input.unit.bases[1].physical_type is u.radian.physical_type:\n angle_unit = 1.0 * qua_input.unit.bases[1]\n angle_exponent = qua_input.unit.powers[1]\n\n # Convert to SI (radian here)\n angle_unit = angle_unit.to(u.radian)\n\n # Convert the incompatible base to the working units using the equivilence\n if working_units.unit.physical_type is u.m.physical_type:\n angle_unit = angle_unit.to(working_units, equivalencies=equivalence)\n\n # Strip out the units, so the output doesn't have squared lenth units\n #angle_unit = angle_unit.value # Kept in-case it causes bugs\n elif working_units.unit.physical_type is u.radian.physical_type:\n length_unit = length_unit.to(working_units, equivalencies=equivalence)\n\n # Strip out the units, so the output doesn't have squared length units\n #length_unit = length_unit.value # Kept in-case it causes bugs\n # The quantity to return\n quantity = value * length_unit ** length_exponent * angle_unit ** angle_exponent\n # Change to the working unit if not dimensionless\n if quantity.unit.physical_type is not (u.m / u.m).decompose().physical_type:\n quantity.to(working_units)\n return quantity.decompose()\n\n\ndef si_this_map_OLD(map):\n \"\"\"\n Basic function to create a deep copy of a map but with all units in SI.\n \"\"\"\n # Find out the value units and convert this and data to SI\n units = 1.0 * u.Unit(map.meta['bunit']).to(u.Tesla) * u.Tesla\n data = deepcopy(map.data) * units.value\n\n # ATM I don't convert the x-axis and y-axis to SI\n\n # Modify the map header to reflect all these changes\n meta = deepcopy(map.meta)\n meta['bunit'] = units.unit\n meta['datamax'] = data.max()\n meta['datamin'] = data.min()\n #meta['cdelt1'] = 0.504295 # Following modified if we convert x/y-axes\n #meta['cdelt2'] = 0.504295\n #meta['cunit1'] = 'arcsec'\n #meta['cunit2'] = 'arcsec'\n #meta['crpix1'] = data.shape[1] / 2.0 + 0.5, # central x-pixel\n #meta['crpix2'] = data.shape[0] / 2.0 + 0.5, # cnetral y-pixel\n #meta['CRVAL1'] = 0.000000\n #meta['CRVAL2'] = 0.000000\n\n # Return the modified map\n return mp.Map((data, meta))\n\n\ndef si_this_map(map):\n \"\"\"\n Basic function to create a deep copy of a map but with all units in SI.\n \"\"\"\n # Find out the value units and convert this and data to SI\n units = 1.0 * u.Unit(map.meta['bunit']).to(u.Tesla) * u.Tesla\n data = deepcopy(map.data) * units.value\n\n # Setup the arc to length equivilence\n obs_distance = map.dsun - map.rsun_meters\n radian_length = [ (u.radian, u.meter, lambda x: obs_distance * x, lambda x: x / obs_distance) ]\n\n # Convert the x-axis and y-axis to SI\n cdelt1 = (float(map.meta['cdelt1']) * u.Unit(map.meta['cunit1'])).to(u.meter, equivalencies=radian_length)\n cdelt2 = (float(map.meta['cdelt2']) * u.Unit(map.meta['cunit2'])).to(u.meter, equivalencies=radian_length)\n crpix1 = (float(map.meta['crpix1']) * u.Unit(map.meta['cunit1'])).to(u.meter, equivalencies=radian_length)\n crpix2 = (float(map.meta['crpix2']) * u.Unit(map.meta['cunit2'])).to(u.meter, equivalencies=radian_length)\n\n # Modify the map header to reflect all these changes\n meta = deepcopy(map.meta)\n meta['bunit'] = 'Tesla' #units.unit\n meta['datamax'] = data.max()\n meta['datamin'] = data.min()\n # Following modified if we convert x/y-axes\n meta['cdelt1'] = str(cdelt1.value)\n meta['cdelt2'] = str(cdelt2.value)\n meta['cunit1'] = str(cdelt1.unit)\n meta['cunit2'] = str(cdelt2.unit)\n meta['crpix1'] = str(crpix1.value)\n meta['crpix2'] = str(crpix2.value)\n #meta['CRVAL1'] = 0.000000 # Reference data coordinates\n #meta['CRVAL2'] = 0.000000\n\n # Return the modified map\n return mp.Map((data, meta))\n","repo_name":"sunpy/solarbextrapolation","sub_path":"solarbextrapolation/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":6449,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"76"} +{"seq_id":"18537125591","text":"from board import Board\nfrom square import Square\nfrom mnkgame import MNKGame\nfrom agent import RandomAgent\nfrom searchAgents import SearchAgent\nfrom nnAgents import cnnAgent\n\n# This class stores settings for ALL the constituent parts for the pipeline\nclass Settings(object):\n def __init__(self):\n # Board parameters\n self.m = 7\n self.n = 6\n self.k = 5\n\n # Agent parameters. Specify different agent types here via constructor\n self.Xagent = RandomAgent(Square.X_HAS, self.m, self.n, self.k)\n self.Oagent = cnnAgent(Square.O_HAS, self.m, self.n, self.k)\n self.numGamesToEstimateValue = 5\n\n # Outermost loop parameters\n self.numGamesToTest = 10\n self.numGamesToTrain = 10\n self.verbose = False\n\n\n# this function plays a number of games stored in the settings and reports results\ndef playGames(settings):\n Xwins = 0.0\n Xloses = 0.0\n draws = 0.0\n\n print(\"****Testing \", settings.numGamesToTest, \" games of agents looking for sequences of length k=\", settings.k, \" using \", settings.numGamesToEstimateValue, \" games to estimate value\")\n print(\"Agents: X: \", settings.Xagent)\n print(\" and O: \", settings.Oagent)\n\n for i in range(settings.numGamesToTest):\n # new game, create a fresh board\n if settings.verbose:\n print(\"Creating a M x N board, where m =\", settings.m, \" and n=\", settings.n, \"\\n\")\n board = Board(settings.m, settings.n)\n\n # play the game, taking turns being first to act\n if i % 2 == 0:\n winner = MNKGame().playGame(board, settings.Xagent, settings.Oagent, settings)\n else:\n winner = MNKGame().playGame(board, settings.Oagent, settings.Xagent, settings)\n\n # do the bookkeeping now that a result is obtained\n if winner == settings.Xagent:\n Xwins += 1\n if settings.verbose:\n print(\"X emerges victorious over the vile O!!!!! in game \", i)\n elif winner == settings.Oagent:\n Xloses += 1\n if settings.verbose:\n print(\"O has defeated the disgusting X!!!!! in game \", i)\n elif winner == None:\n draws += 1\n if settings.verbose:\n print(\"fought to a draw... maybe next time. In game \", i)\n\n # All games complete, generate some final output\n if settings.verbose:\n print(\"Xwins=\", Xwins, \"Xloses=\", Xloses, \"draws=\", draws, )\n\ndef trainAndTestCNNAgent(settings):\n ourHero = cnnAgent(Square.X_HAS, settings.m, settings.n, settings.k)\n trainingPartner = SearchAgent(Square.O_HAS, settings.m, settings.n, settings.k)\n\n gauntlet1 = RandomAgent(Square.O_HAS, settings.m, settings.n, settings.k)\n gauntlet2 = SearchAgent(Square.O_HAS, settings.m, settings.n, settings.k) # FIXME but add more search (settings specifying that parameter globally is bad)\n theGauntlet = [gauntlet1, gauntlet2]\n\n trainingSessions = 100\n print(\"****Testing \", settings.numGamesToTest, \" games of agents looking for sequences of length k=\", settings.k,\n \" using \", settings.numGamesToEstimateValue, \" games to estimate value\")\n print(\"Agents: X: \", ourHero)\n print(\" and O: \", trainingPartner)\n\n print(\"\\tTestSession\\tHeroWins\\tHeroLoses\\tHeroDraws\\tAvgIllegalMoves\\tAvgGameLength\\tMaxGameLength\\tOpponent\\t(REPEAT)\")\n\n for session in range(trainingSessions):\n for i in range(settings.numGamesToTrain):\n # new game, create a fresh board\n if settings.verbose:\n print(\"Creating a M x N board, where m =\", settings.m, \" and n=\", settings.n, \"\\n\")\n board = Board(settings.m, settings.n)\n\n # play the game, taking turns being first to act\n if i % 2 == 0:\n MNKGame().playGame(board, ourHero, trainingPartner, settings)\n else:\n MNKGame().playGame(board, trainingPartner, ourHero, settings)\n\n # test vs the gauntlet\n for opponent in theGauntlet:\n heroWins = 0\n heroLoses = 0\n heroDraws = 0\n totalIllegalMoves = 0\n maxGameLength = 0\n totalGameLength = 0\n for j in range(settings.numGamesToTest):\n # new game, create a fresh board\n if settings.verbose:\n print(\"Creating a M x N board, where m =\", settings.m, \" and n=\", settings.n, \"\\n\")\n board = Board(settings.m, settings.n)\n\n # play the game, taking turns being first to act\n if j % 2 == 0:\n winner, moveCount, illegalMoveCount = MNKGame().playGame(board, ourHero, opponent, settings)\n\n else:\n winner, moveCount, illegalMoveCount = MNKGame().playGame(board, opponent, ourHero, settings)\n\n\n # do the bookkeeping now that a result is obtained\n totalIllegalMoves += illegalMoveCount\n totalGameLength += moveCount\n if moveCount > maxGameLength:\n maxGameLength = moveCount\n\n if winner == ourHero:\n heroWins += 1\n if settings.verbose:\n print(\"X emerges victorious over the vile O!!!!! in game \", i)\n elif winner != None:\n heroLoses += 1\n if settings.verbose:\n print(\"O has defeated the disgusting X!!!!! in game \", i)\n elif winner == None:\n heroDraws += 1\n if settings.verbose:\n print(\"fought to a draw... maybe next time. In game \", i)\n\n # All games vs this opponent complete, generate some final output\n\n print(\"\\t\", session, \"\\t\", heroWins, \"\\t\", heroLoses, \"\\t\", heroDraws, \"\\t\", float(totalIllegalMoves)/settings.numGamesToTest, \"\\t\", float(totalGameLength)/settings.numGamesToTest, \"\\t\", maxGameLength, \"\\t\", opponent, end='' )\n print(\"\")\nif __name__ == '__main__':\n settings = Settings()\n #playGames(settings) # feel free to define your own \"main\" by calling newFunction() and commenting this out\n trainAndTestCNNAgent(settings)\n","repo_name":"dodgej/MNKgames","sub_path":"source/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6219,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"40538848097","text":"#!/usr/bin/env python3\nimport argparse\nimport collections\nimport os\n\nos.environ.setdefault(\"TF_CPP_MIN_LOG_LEVEL\", \"2\") # Report only TF errors by default\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nimport wrappers\n\nparser = argparse.ArgumentParser()\n# These arguments will be set appropriately by ReCodEx, even if you change them.\n#parser.add_argument(\"--env\", default=\"BipedalWalker-v3\", type=str, help=\"Environment.\")\nparser.add_argument(\"--env\", default=\"Pendulum-v1\", type=str, help=\"Environment.\")\nparser.add_argument(\"--recodex\", default=False, action=\"store_true\", help=\"Running in ReCodEx\")\nparser.add_argument(\"--render_each\", default=0, type=int, help=\"Render some episodes.\")\nparser.add_argument(\"--seed\", default=None, type=int, help=\"Random seed.\")\nparser.add_argument(\"--threads\", default=1, type=int, help=\"Maximum number of threads to use.\")\n# For these and any other arguments you add, ReCodEx will keep your default value.\nparser.add_argument(\"--batch_size\", default=256, type=int, help=\"Batch size.\")\nparser.add_argument(\"--envs\", default=16, type=int, help=\"Environments.\")\nparser.add_argument(\"--evaluate_each\", default=100, type=int, help=\"Evaluate each number of updates.\")\nparser.add_argument(\"--evaluate_for\", default=16, type=int, help=\"Evaluate the given number of episodes.\")\nparser.add_argument(\"--gamma\", default=0.99, type=float, help=\"Discounting factor.\")\nparser.add_argument(\"--hidden_layer_size\", default=256, type=int, help=\"Size of hidden layer.\")\nparser.add_argument(\"--critic_learning_rate\", default=0.001, type=float, help=\"Critic learning rate.\")\nparser.add_argument(\"--actor_learning_rate\", default=0.0005, type=float, help=\"Actor learning rate.\")\nparser.add_argument(\"--model_path\", default=\"walker.model\", type=str, help=\"Model path\")\nparser.add_argument(\"--replay_buffer_size\", default=1_000_000, type=int, help=\"Replay buffer size\")\nparser.add_argument(\"--target_entropy\", default=-1, type=float, help=\"Target entropy per action component.\")\nparser.add_argument(\"--target_tau\", default=0.005, type=float, help=\"Target network update weight.\")\n\nclass Network:\n def __init__(self, env: wrappers.EvaluationEnv, args: argparse.Namespace) -> None:\n # TODO: Create an actor. Because we will be sampling (and `sample()` from\n # `tfp.distributions` does not play nice with functional models) and because\n # we need the `alpha` variable, we use subclassing to create the actor.\n class Actor(tf.keras.Model):\n def __init__(self, hidden_layer_size: int):\n super().__init__()\n # TODO: Create\n # - two hidden layers with `hidden_layer_size` and ReLU activation\n # - a layer for generating means with `env.action_space.shape[0]` units and no activation\n # - a layer for generating sds with `env.action_space.shape[0]` units and `tf.math.exp` activation\n # - finally, create a variable representing a logarithm of alpha, using for example the following:\n\n self.hidden_1 = tf.keras.layers.Dense(hidden_layer_size, activation=\"relu\")\n self.hidden_2 = tf.keras.layers.Dense(hidden_layer_size, activation=\"relu\")\n\n self.means_head = tf.keras.layers.Dense(env.action_space.shape[0])\n self.sds_head = tf.keras.layers.Dense(env.action_space.shape[0], activation=tf.math.exp)\n #self.sds_head = tf.keras.layers.Dense(env.action_space.shape[0], activation=tf.math.softplus)\n\n self._log_alpha = tf.Variable(np.log(0.1), dtype=tf.float32)\n\n self.action_low = env.action_space.low\n self.action_high = env.action_space.high\n\n def call(self, inputs: tf.Tensor, sample: bool):\n # TODO: Perform the actor computation\n # - First, pass the inputs through the first hidden layer\n # and then through the second hidden layer.\n t = self.hidden_1(inputs)\n t = self.hidden_2(t)\n # - From these hidden states, compute\n # - `mus` (the means),\n # - `sds` (the standard deviations).\n mus = self.means_head(t)\n sds = self.sds_head(t) if sample else tf.zeros_like(mus)\n # - Then, create the action distribution using `tfp.distributions.Normal`\n # with the `mus` and `sds`. Note that to support computation without\n # sampling, the easiest is to pass zeros as standard deviations when\n # `sample == False`.\n actions_distribution = tfp.distributions.Normal(mus, sds)\n # - We then bijectively modify the distribution so that the actions are\n # in the given range. Luckily, `tfp.bijectors` offers classes that\n # can transform a distribution.\n # - first run\n # tfp.bijectors.Tanh()(actions_distribution)\n # to squash the actions to [-1, 1] range,\n # - then run\n # tfp.bijectors.Scale((env.action_space.high - env.action_space.low) / 2)(actions_distribution)\n # to scale the action ranges to [-(high-low)/2, (high-low)/2],\n # - finally,\n # tfp.bijectors.Shift((env.action_space.high + env.action_space.low) / 2)(actions_distribution)\n # shifts the ranges to [low, high].\n # In case you wanted to do this manually, sample from a normal distribution, pass the samples\n # through the `tanh` and suitable scaling, and then compute the log-prob by using `log_prob`\n # from the normal distribution and manually accounting for the `tanh` as shown in the slides.\n # However, the formula from the slides is not numerically stable, for a better variant see\n # https://github.com/tensorflow/probability/blob/ef1f64a434/tensorflow_probability/python/bijectors/tanh.py#L70-L81\n actions_distribution = tfp.bijectors.Tanh()(actions_distribution)\n actions_distribution = tfp.bijectors.Scale((self.action_high - self.action_low) / 2)(actions_distribution)\n actions_distribution = tfp.bijectors.Shift((self.action_high + self.action_low) / 2)(actions_distribution)\n # - Sample the actions by a `sample()` call.\n actions = actions_distribution.sample()\n # - Then, compute the log-probabilities of the sampled actions by using `log_prob()`\n # call. An action is actually a vector, so to be precise, compute for every batch\n # element a scalar, an average of the log-probabilities of individual action components.\n avg_log_probs = tf.reduce_mean(actions_distribution.log_prob(actions), axis=1)\n # - Finally, compute `alpha` as exponentiation of `self._log_alpha`.\n alpha = tf.math.exp(self._log_alpha)\n # - Return actions, log_prob, and alpha.\n return actions, avg_log_probs, alpha\n\n # TODO: Instantiate the actor as `self._actor` and compile it.\n self._actor = Actor(args.hidden_layer_size)\n self._actor.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=args.actor_learning_rate))\n\n # TODO: Create a critic, which\n # - takes observations and actions as inputs,\n # - concatenates them,\n # - passes the result through two dense layers with `args.hidden_layer_size` units\n # and ReLU activation,\n # - finally, using a last dense layer produces a single output with no activation\n # This critic needs to be cloned so that two critics and two target critics are created.\n critic = self._create_critic(env, args)\n self._critic_A = tf.keras.models.clone_model(critic)\n self._target_critic_A = tf.keras.models.clone_model(critic)\n self._critic_B = tf.keras.models.clone_model(critic)\n self._target_critic_B = tf.keras.models.clone_model(critic)\n\n for c in [self._critic_A, self._critic_B]:\n c.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=args.critic_learning_rate),\n loss=tf.keras.losses.MeanSquaredError())\n\n self.target_tau = args.target_tau\n # TODO: should the multiplication be there?\n #self.target_entropy = args.target_entropy * env.action_space.shape[0]\n self.target_entropy = args.target_entropy\n\n def _create_critic(self, env, args):\n obs_input = tf.keras.layers.Input(shape=env.observation_space.shape)\n actions_input = tf.keras.layers.Input(shape=env.action_space.shape)\n concat = tf.keras.layers.Concatenate()([obs_input, actions_input])\n hidden_1 = tf.keras.layers.Dense(args.hidden_layer_size, activation=\"relu\")(concat)\n hidden_2 = tf.keras.layers.Dense(args.hidden_layer_size, activation=\"relu\")(hidden_1)\n output = tf.keras.layers.Dense(1)(hidden_2)\n\n critic = tf.keras.models.Model(inputs=[obs_input, actions_input], outputs=output)\n return critic\n\n def save_actor(self, path: str):\n # Because we use subclassing for creating the actor, the easiest way of\n # serializing an actor is just to save weights.\n self._actor.save_weights(path, save_format=\"h5\")\n\n def load_actor(self, path: str, env: wrappers.EvaluationEnv):\n # When deserializing, we need to make sure the variables are created\n # first -- we do so by processing a batch with a random observation.\n self.predict_mean_actions([env.observation_space.sample()])\n self._actor.load_weights(path)\n\n @wrappers.typed_np_function(np.float32, np.float32, np.float32)\n @wrappers.raw_tf_function(dynamic_dims=1)\n def train(self, states: np.ndarray, actions: np.ndarray, returns: np.ndarray) -> None:\n # TODO: Separately train:\n # - the actor, by using two objectives:\n # - the objective for the actor itself; in this objective, `tf.stop_gradient(alpha)`\n # should be used (for the `alpha` returned by the actor) to avoid optimizing `alpha`,\n # - the objective for `alpha`, where `tf.stop_gradient(log_prob)` should be used\n # to avoid computing gradient for other variables than `alpha`.\n # Use `args.target_entropy` as the target entropy (the default of -1 per action\n # component is fine and does not need to be tuned for the agent to train).\n self._train_actor(states)\n # - the critics using MSE loss.\n for critic, target_critic in zip([self._critic_A, self._critic_B],\n [self._target_critic_A, self._target_critic_B]):\n critic.optimizer.minimize(\n lambda: critic.compiled_loss(\n returns,\n critic([states, actions], training=True)),\n var_list=critic.trainable_variables\n )\n #\n # Finally, update the two target critic networks exponential moving\n # average with weight `args.target_tau`, using something like\n # for var, target_var in zip(critic.trainable_variables, target_critic.trainable_variables):\n # target_var.assign(target_var * (1 - target_tau) + var * target_tau)\n self._moving_average_update(critic, target_critic)\n\n def _train_actor(self, states):\n with tf.GradientTape() as actor_tape:\n actions, log_probs, alpha = self._actor(states, sample=True)\n values_a = self._critic_A([states, actions])\n values_b = self._critic_B([states, actions])\n values = tf.minimum(tf.squeeze(values_a), tf.squeeze(values_b))\n\n actor_loss = tf.reduce_mean(tf.stop_gradient(alpha) * log_probs - values)\n alpha_loss = -tf.reduce_mean(alpha * (tf.stop_gradient(log_probs) + self.target_entropy))\n loss = actor_loss + alpha_loss\n actor_grad = actor_tape.gradient(loss, self._actor.trainable_variables)\n self._actor.optimizer.apply_gradients(zip(actor_grad, self._actor.trainable_variables))\n\n def _moving_average_update(self, network, target_network):\n for var, target_var in zip(network.trainable_variables, target_network.trainable_variables):\n target_var.assign(target_var * (1 - self.target_tau) + var * self.target_tau)\n\n # Predict actions without sampling.\n @wrappers.typed_np_function(np.float32)\n @wrappers.raw_tf_function(dynamic_dims=1)\n def predict_mean_actions(self, states: np.ndarray) -> np.ndarray:\n # Return predicted actions, assuming the actor is in `self._actor`.\n return self._actor(states, sample=False)[0]\n\n # Predict actions with sampling.\n @wrappers.typed_np_function(np.float32)\n @wrappers.raw_tf_function(dynamic_dims=1)\n def predict_sampled_actions(self, states: np.ndarray) -> np.ndarray:\n # Return predicted actions, assuming the actor is in `self._actor`.\n return self._actor(states, sample=True)[0]\n\n @wrappers.typed_np_function(np.float32)\n @wrappers.raw_tf_function(dynamic_dims=1)\n def predict_values(self, states: np.ndarray) -> np.ndarray:\n # TODO: Produce the predicted returns, which are the minimum of\n # target_critic(s, a) - alpha * log_prob\n # considering both target critics and actions sampled from the actor.\n actions, log_probs, alpha = self._actor(states, sample=True)\n values_a = self._target_critic_A([states, actions])\n values_b = self._target_critic_B([states, actions])\n return tf.minimum(tf.squeeze(values_a), tf.squeeze(values_b)) - alpha * log_probs\n\n\ndef main(env: wrappers.EvaluationEnv, args: argparse.Namespace) -> None:\n # Set random seeds and number of threads\n if args.seed is not None:\n tf.keras.utils.set_random_seed(args.seed)\n tf.config.threading.set_inter_op_parallelism_threads(args.threads)\n tf.config.threading.set_intra_op_parallelism_threads(args.threads)\n\n # Construct the network\n network = Network(env, args)\n\n def evaluate_episode(start_evaluation: bool = False, logging: bool = True) -> float:\n rewards, state, done = 0, env.reset(start_evaluation=start_evaluation, logging=logging)[0], False\n while not done:\n # TODO: Predict the action using the greedy policy.\n action = network.predict_mean_actions([state])[0]\n state, reward, terminated, truncated, _ = env.step(action)\n done = terminated or truncated\n rewards += reward\n return rewards\n\n # Evaluation in ReCodEx\n if args.recodex:\n network.load_actor(args.model_path, env)\n while True:\n evaluate_episode(True)\n\n # Create the asynchronous vector environment for training.\n venv = gym.vector.make(args.env, args.envs, asynchronous=True)\n\n # Replay memory of a specified maximum size.\n replay_buffer = collections.deque(maxlen=args.replay_buffer_size)\n Transition = collections.namedtuple(\"Transition\", [\"state\", \"action\", \"reward\", \"done\", \"next_state\"])\n\n state, training = venv.reset(seed=args.seed)[0], True\n while training:\n for _ in range(args.evaluate_each):\n # Predict actions by calling `network.predict_sampled_actions`.\n action = network.predict_sampled_actions(state)\n\n next_state, reward, terminated, truncated, _ = venv.step(action)\n\n ended = reward <= -100\n shaped_rewards = ended * (reward + 95) + np.logical_not(ended) * reward\n done = terminated | truncated\n\n for i in range(args.envs):\n replay_buffer.append(Transition(state[i], action[i], shaped_rewards[i], done[i], next_state[i]))\n state = next_state\n\n # Training\n if len(replay_buffer) >= 4 * args.batch_size:\n # Note that until now we used `np.random.choice` with `replace=False` to generate\n # batch indices. However, this call is extremely slow for large buffers, because\n # it generates a whole permutation. With `np.random.randint`, indices may repeat,\n # but once the buffer is large, it happens with little probability.\n batch = np.random.randint(len(replay_buffer), size=args.batch_size)\n states, actions, rewards, dones, next_states = map(np.array, zip(*[replay_buffer[i] for i in batch]))\n # TODO: Perform the training\n next_values = network.predict_values(next_states)\n est_returns = rewards + args.gamma * np.logical_not(dones) * np.squeeze(next_values)\n network.train(states, actions, est_returns)\n\n # Periodic evaluation\n returns = [evaluate_episode() for _ in range(args.evaluate_for)]\n # TODO: when done, exit and save\n\n # Final evaluation\n while True:\n evaluate_episode(start_evaluation=True)\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args([] if \"__file__\" not in globals() else None)\n\n # Create the environment\n env = wrappers.EvaluationEnv(gym.make(args.env), args.seed, args.render_each)\n\n main(env, args)\n","repo_name":"Kripner/mff","sub_path":"reinforcement_learning/08/walker.py","file_name":"walker.py","file_ext":"py","file_size_in_byte":17351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35831345265","text":"#\n# @lc app=leetcode id=88 lang=python\n#\n# [88] Merge Sorted Array\n#\n# https://leetcode.com/problems/merge-sorted-array/description/\n#\n# algorithms\n# Easy (36.49%)\n# Total Accepted: 398.3K\n# Total Submissions: 1.1M\n# Testcase Example: '[1,2,3,0,0,0]\\n3\\n[2,5,6]\\n3'\n#\n# Given two sorted integer arrays nums1 and nums2, merge nums2 into nums1 as\n# one sorted array.\n# \n# Note:\n# \n# \n# The number of elements initialized in nums1 and nums2 are m and n\n# respectively.\n# You may assume that nums1 has enough space (size that is greater or equal to\n# m + n) to hold additional elements from nums2.\n# \n# \n# Example:\n# \n# \n# Input:\n# nums1 = [1,2,3,0,0,0], m = 3\n# nums2 = [2,5,6], n = 3\n# \n# Output: [1,2,2,3,5,6]\n# \n# \n#\nclass Solution(object):\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n :type nums1: List[int]\n :type m: int\n :type nums2: List[int]\n :type n: int\n :rtype: None Do not return anything, modify nums1 in-place instead.\n \"\"\"\n if len(nums2) == 0:\n return nums1\n index = len(nums1)-1\n while index >=0:\n if m>0 and nums1[m-1] > nums2[n-1]:\n nums1[index] = nums1[m-1]\n m = m - 1\n elif n > 0:\n nums1[index] = nums2[n-1]\n n = n - 1\n index = index - 1\n return nums1\n","repo_name":"sarahgonsalves223/DSA_Python","sub_path":"Easy/88_easy_merge-sorted-array.py","file_name":"88_easy_merge-sorted-array.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"9983052019","text":"import csv\nfrom mnist import MNIST\nfrom digits_classifyer import classify_digits, check_classification\nfrom pic_to_vector import PicToVec\nimport os\nimport run_perceptron\nfrom typing import List\n\ntrain_features_file_name = 'features/train_images.csv'\nbackup_train_features_file_name = 'features/backup_train_images.csv'\ntest_features_file_name = 'features/test_images.csv'\nbackup_test_features_file_name = 'features/backup_test_images.csv'\n\ntrain_tags_file_name = \"tags/train_tags\"\ntest_tags_file_name = \"tags/test_tags\"\n\nmndata = MNIST('data')\n\ntrain_images, train_labels = mndata.load_training()\n\ntest_images, test_labels = mndata.load_testing()\n\nUSE_BACKUP = True\n\n\ndef write_feature_to_file(filename: str, is_backup: bool, images):\n with open(filename, 'w', newline='', encoding='utf-8') as csvoutForFeatures1:\n csvoutForFeatures = csv.writer(csvoutForFeatures1)\n for i in range(len(images)):\n\n if is_backup:\n pic = PicToVec(images[i], [0])\n var = pic.back_up_format()\n else:\n pic = PicToVec(images[i])\n var = pic.format()\n\n csvoutForFeatures.writerow(var)\n csvoutForFeatures1.flush()\n\n\ndef build_features(use_backup: bool = False):\n files_to_remove = [os.path.join('features', f) for f in os.listdir('features')]\n for f in files_to_remove:\n os.remove(f)\n write_feature_to_file(train_features_file_name, images=train_images, is_backup=False)\n write_feature_to_file(test_features_file_name, images=test_images, is_backup=False)\n if use_backup:\n write_feature_to_file(backup_train_features_file_name, images=train_images, is_backup=True)\n write_feature_to_file(backup_test_features_file_name, images=test_images, is_backup=True)\n\n\ndef build_tags(use_backup: bool = False):\n predictions_results: List[List[bool]] = []\n backup_predictions_results: List[List[bool]] = []\n for i in range(0, 10):\n train_tags_file = train_tags_file_name + i.__str__() + '.csv'\n test_tags_file = test_tags_file_name + i.__str__() + '.csv'\n with open(train_tags_file, 'w', newline='', encoding='utf-8') as csvoutForTrainTags1, \\\n open(test_tags_file, 'w', newline='', encoding='utf-8') as csvoutForTestTags1:\n csvoutForTrainTags = csv.writer(csvoutForTrainTags1)\n csvoutForTestTags = csv.writer(csvoutForTestTags1)\n counter = 0\n for label in train_labels:\n counter += 1\n tag_to_write = (1 if label == i else 0).__str__()\n csvoutForTrainTags.writerow(tag_to_write)\n csvoutForTrainTags1.flush()\n counter = 0\n for label in test_labels:\n counter += 1\n tag_to_write = (1 if label == i else 0).__str__()\n csvoutForTestTags.writerow(tag_to_write)\n csvoutForTestTags1.flush()\n print(\"Sucsses rate for digit \", i)\n\n restult_list = []\n predictions_results.append(\n run_perceptron.run(train_features_file_name, train_tags_file, test_features_file_name, test_tags_file))\n if use_backup:\n backup_predictions_results.append(\n run_perceptron.run(backup_train_features_file_name, train_tags_file, backup_test_features_file_name,\n test_tags_file))\n check_classification(\n classify_digits(binary_classification_results=predictions_results,\n backup_binary_classification_results=backup_predictions_results),\n list(test_labels))\n\n\ndef assert_number_of_lines(number_of_lines, file_path):\n with open(file_path, 'r') as file:\n counter = 0\n content = file.read()\n coList = content.split('\\n')\n for i in coList:\n if i:\n counter += 1\n print(counter)\n assert (counter == number_of_lines)\n\n\nif __name__ == '__main__':\n build_features(USE_BACKUP)\n build_tags(USE_BACKUP)\n","repo_name":"raneldan/ML-ex2","sub_path":"data_parser.py","file_name":"data_parser.py","file_ext":"py","file_size_in_byte":4023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2540128535","text":"import os\nfrom dotenv import load_dotenv, find_dotenv\n\nload_dotenv(dotenv_path=find_dotenv()\n #,verbose=True\n )\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\npostgres_local_base = os.getenv('DATABASE_URL') #'postgresql://admin:123456@localhost:5434/balticlsc'\n\n\nclass Config:\n SECRET_KEY = os.getenv('SECRET_KEY')\n DEBUG = False\n\n\nclass DevelopmentConfig(Config):\n DEBUG = True\n SQLALCHEMY_DATABASE_URI = postgres_local_base\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n\n\nclass TestingConfig(Config):\n DEBUG = True\n TESTING = True\n PRESERVE_CONTEXT_ON_EXCEPTION = False\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n SQLALCHEMY_DATABASE_URI = postgres_local_base\n\n\nclass ProductionConfig(Config):\n DEBUG = False\n SQLALCHEMY_DATABASE_URI = postgres_local_base\n\n\nconfig_by_name = dict(\n dev=DevelopmentConfig,\n test=TestingConfig,\n prod=ProductionConfig\n)\n\nkey = Config.SECRET_KEY\n","repo_name":"kolendomichal/popgrupaa","sub_path":"Pop-Grupa-A-Backend/app/main/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"22342507049","text":"\ndata = {'a':1,'b':2,'c':4}\na,b,c = data.keys()\n# print(a)\n# print(b)\n# print(c)\n\ndef my_func(a,b,c):\n print(a)\n print(b)\n print(c)\ndef my_func1(click): # argument musi się nazywać tak jak klucz słwonika\n print(click)\n\ndata_dict = {'a':1,'c':2,'b':[3]}\ndict1 = {\"click\":[{\"click button mouse\":{\"button\": \"left\"}}]}\ndict2 = {\"click\":2}\n# my_func(*data_dict) # wypakowanie kluczy slownika\nmy_func(**data_dict) # wypakowanie wartosci slownika\nmy_func1(**dict1) # wypakowanie wartosci slownika\n","repo_name":"mixer123/kacper_obiekty","sub_path":"modul4/wypakowanie_2.py","file_name":"wypakowanie_2.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29058942228","text":"from PyQt6 import QtWidgets\r\nfrom PyQt6.QtCore import QItemSelectionModel, Qt\r\nfrom PyQt6.QtSql import QSqlTableModel\r\n\r\nimport src.ui as ui\r\n\r\n\r\nclass SkillChooser(QtWidgets.QDialog, ui.choose_skills.Ui_Dialog):\r\n def __init__(self, editor: \"ux.task_editor.Editor\", task=None):\r\n super().__init__()\r\n self.setupUi(self)\r\n self.task = task\r\n self.editor = editor\r\n model = QSqlTableModel()\r\n model.setTable(\"skills\")\r\n model.setSort(1, Qt.SortOrder.AscendingOrder)\r\n model.select()\r\n self.listView.setModel(model)\r\n self.listView.setModelColumn(1)\r\n\r\n if task:\r\n # holy crap, that was a difficult birth..\r\n self.listView.selectionModel().clear()\r\n for index in range(model.rowCount()):\r\n if model.itemData(model.index(index, 0))[0] in task.skill_ids:\r\n self.listView.selectionModel().select(\r\n model.index(index, 1),\r\n QItemSelectionModel.Select,\r\n )\r\n\r\n def accept(self):\r\n super().accept()\r\n self.editor.skill_ids = [\r\n self.listView.model().record(idx.row()).value(\"skill_id\")\r\n for idx in self.listView.selectedIndexes()\r\n ]\r\n\r\n\r\nimport src.ux as ux # noqa: E402\r\n","repo_name":"amogorkon/watnu","sub_path":"src/ux/choose_skills.py","file_name":"choose_skills.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"43476161176","text":"import tensorflow as tf\n\nclass AFD_RNN(object):\n\n def __init__(self, net_config, test_batch_size=None, time_step=None):\n\n self.class_num = int(net_config['class_num'])\n self.num_units = int(net_config['num_units'])\n self.senor_data_num = int(net_config['senor_data_num'])\n\n if time_step is None:\n self.time_step = int(net_config['time_step'])\n else:\n self.time_step = time_step\n\n if test_batch_size is None:\n self.batch_size = int(net_config['batch_size'])\n else:\n self.batch_size = test_batch_size\n\n def build_net_graph(self):\n self.input_tensor = tf.placeholder(tf.float32, [None, self.time_step, self.senor_data_num])\n\n # 创建输出层\n input_x = tf.reshape(self.input_tensor, [-1, self.senor_data_num])\n weights_x = self._get_variable_weights([self.senor_data_num, self.num_units], 'input_weights')\n biases_x = self._get_variable_biases([self.num_units], 'input_biases')\n\n x_output = tf.reshape(tf.add(tf.matmul(input_x, weights_x), biases_x),\n [-1, self.time_step, self.num_units])\n\n # 创建rnn\n x_output = tf.unstack(x_output, axis=1)\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self.num_units)\n self.cell_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32)\n\n # outputs shape =[batch_size, max_time, cell_state_size]\n # LSTM final_state shape = [2, batch_size, cell_state_size]\n cell_outputs, final_state = tf.nn.static_rnn(lstm_cell,\n x_output,\n initial_state=self.cell_state)\n # 创建网路输出层\n outputs = tf.reshape(cell_outputs, [-1, self.num_units])\n weights_outputs = self._get_variable_weights([self.num_units, self.class_num], 'outputs_weights')\n biases_outputs = self._get_variable_biases([self.class_num], 'outputs_biases')\n\n predict = tf.reshape(tf.add(tf.matmul(outputs, weights_outputs), biases_outputs),\n [self.time_step, self.batch_size, self.class_num])\n\n return predict\n\n def _get_variable_weights(self, shape, name):\n return tf.Variable(tf.truncated_normal(shape, stddev=0.1), dtype=tf.float32, name=name)\n\n def _get_variable_biases(self, shape, name):\n return tf.Variable(tf.constant(0.1, shape=shape), dtype=tf.float32, name=name)","repo_name":"chizhanyuefeng/Realtime-Fall-Detection-for-RNN","sub_path":"build_rnn.py","file_name":"build_rnn.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"76"} +{"seq_id":"37221639628","text":"import os\nfrom datetime import date\nfrom cryptography.fernet import Fernet\nfrom my_classes.Context import Context\nfrom my_classes.GoogleSheet import get_google_sheet\nfrom my_classes.Schedule import Schedule\n\n\nclass Student:\n def __init__(self, ctx, first_name: str, last_name: str, student_id: str, program_degree: str, discord_id: int):\n self.ctx = Context(ctx) # the object that represents this member's Content.\n\n self.first = first_name # the student's first name.\n self.last = last_name # the student's last name\n self.student_id = student_id # the student's student id.\n self.course = None # the course object that the student is in tutoring for.\n self.program_degree = program_degree # the degree the student is current pursuing.\n self.discord_id = discord_id # the student's discord id.\n\n self.times_helped = 0 # the number of times the student has been helped by the tutor.\n self.prev_voice_channel = None # the voice channel id the student is in prior to joining the tutor's.\n self.being_helped = False # if the student is currently being helped by a tutor.\n\n def encrypt(self):\n \"\"\"encrypt and display student's information.\n\n encryption is done by using a Fernet Key:\n API here: https://cryptography.io/en/latest/index.html\n encryption converts str to bytes.\n need to strip bytes' format to be printed.\n student's account is NOT stored locally for in a database for transparency.\n student's account are stored in a discord channel to be read by the bot in the future.\n \"\"\"\n # encrypt student information\n information = f'{self.first} {self.last} {self.student_id} {self.program_degree} {self.discord_id}'\n fernet = Fernet(os.getenv(\"FERNET_KEY\"))\n encrypted_account = fernet.encrypt(information.encode('utf-8'))\n\n return encrypted_account.decode('utf-8')\n\n async def sign_in(self, first_name):\n \"\"\"send the student their custom sign-in sheet link.\n\n REQUIREMENT: google form.\n the google form requires: course code, tutor's name, student name, student id, and degree program.\n WARNING: https links spaces are represented with '+'\n this will be used when adding a space between student's first and last name.\n override default tutor's name:\n if student passed in a tutor's name as an argument.\n\n Parameters\n ----------\n :param str first_name: the str that represents the tutor's first name.\n :return: a str that represents the student's custom sign-in link.\n \"\"\"\n\n if first_name is None:\n # get tutor's name from schedule.\n schedule = self.course.schedule\n tutor = schedule.tutor_name()\n if tutor is not None:\n tutor = tutor.replace(' ', '+')\n else:\n # default tutor's name\n tutor = first_name.lower().capitalize()\n\n # generate custom sign-in link.\n return f'https://docs.google.com/forms/d/e/1FAIpQLSeLjQ8XunqxtzlWGHKB5Kt52-ZAyBqPiyBmLPfNcDuYhb5dsg/viewform?usp=pp_url&entry.1178312123={self.course.code}&entry.1604735080={tutor}&entry.174697377={self.first}+{self.last}&entry.1854395744={self.student_id}+&entry.905892592={self.program_degree}'\n\n def verify(self):\n \"\"\"verify student if they submitted their sign-in sheet via google forms.\n\n the bot will give the students a link to sign-in, but the student is still required to submit the form.\n this function will verify if the student submitted the form.\n\n :return: True if the student has submitted their sign-in sheet, otherwise False.\n \"\"\"\n # verify student's sign-in.\n for content in get_google_sheet():\n # only check entries that were submitted today.\n if content['Timestamp'].split(' ')[0] != date.today().strftime('%m-%d-%Y'):\n return False\n # check if student signed-in.\n schedule = Schedule(self.course.code)\n if content['Student Name'] == self.name() and \\\n str(content['Student ID']) == self.student_id and \\\n content['Course Code'] == self.course.code and \\\n content['Degree'] == self.program_degree and \\\n content['Tutor'] == schedule.tutor_name():\n\n return True\n\n def course_error_msg(self):\n \"\"\"display the sign in error message when course is not assigned.\"\"\"\n return f'<@!{self.discord_id}> *need to sign-in.*'\n\n def name(self):\n \"\"\":return: str of the student's first and last name.\"\"\"\n return f'{self.first} {self.last}'\n\n\ndef to_student(student_info):\n \"\"\"convert an encrypted student information to a Student object.\n\n the student information is given as a decrypted str.\n\n Parameters\n ----------\n :param str student_info: the str that represents the student's information.\n :return: a Student object that represents the given student's information.\n \"\"\"\n # decrypt student information.\n fernet = Fernet(os.getenv(\"FERNET_KEY\"))\n decrypt_info = fernet.decrypt(student_info.encode('utf-8'))\n\n # parse string for Student object.\n info = decrypt_info.decode('utf-8').split(' ')\n\n # generate Student object.\n return Student(None, info[0], info[1], info[2], info[3], int(info[4]))\n","repo_name":"steven-phun/Discord_Tutoring_App","sub_path":"my_classes/Student.py","file_name":"Student.py","file_ext":"py","file_size_in_byte":5449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38443991702","text":"from selenium import webdriver\nimport pandas as pd\n\noptions = webdriver.ChromeOptions()\noptions.add_argument('--headless')\noptions.add_argument('--incognito')\n\n\nheaders = {\"jms_number\":(), \"cp_case_number\":(), \"municipal_case_number\":(), \"other_case_number\":(), \"court_date\":(),\n \"orc_code\":(), \"charge_description\":(), \"bond_type\":(), \"bond_amount\":(), \"disposition\":(),\n \"fine\":(), \"comments\":(), \"holder\":(), \"timestamp\":()\n}\n\nstart = pd.DataFrame(headers)\nstart.to_csv('/home/paul/Documents/people.csv', index=False)\n\nip = pd.read_csv(\"/home/paul/Documents/ip.csv\")\n# assigns 'view all inmates' table (ip.csv) to ip\nip.columns = ['jms_number', 'last_name', 'first_name', 'admit_date', 'proj_release_date', 'holder']\n# names columns for ip object for later usage\nurl_pattern = \"http://apps.hcso.org/InmateDetail.aspx?ID=\"\n# creates base url that jms_number can be concatenated to the end of to create url for each person\n\nip.insert(0, 'ip_url', url_pattern + ip['jms_number'].map(str))\n# inserts a column at the far left that is url pattern + person's jms number\n\ndriver = webdriver.Chrome(options=options)\n# assigns webdriver.Chrome() to object driver for easier reference later\n\nip_url = ip.iloc[0:, 0]\n# assigns object ip_url to first row, first column of ip (which is the url to lookup\n\ncounter = 0\n\n# below loop iterates through each individual on the 'view all inmates' table, visits their page,\n# and scrapes their charges info from that table, then adds one to the value of \"counter\" at end of loop\n# continues through this while loop until the value of counter is the same as the length of ip_url column\n# might need to change statement to:\n# while counter < 1+ len(ip_url): to get last element on list?\n\nwhile counter < len(ip_url):\n url = ip_url[counter]\n driver.get(url)\n raw = pd.read_html(driver.page_source)\n indiv = raw[1]\n timestamp = pd.to_datetime(\"today\")\n indiv.insert(0, 'jms_number', driver.current_url[-7:])\n indiv.insert(13, 'timestamp', timestamp)\n indiv.to_csv('/home/paul/Documents/people.csv', mode='a', header=False, index=False)\n # turns the dataframe into csv and appends what has been scraped in this loop to the existing file\n # also removes header and index\n\n counter+=1","repo_name":"pcencula/incarcerated-persons","sub_path":"03_pull_ip_charges.py","file_name":"03_pull_ip_charges.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14283651859","text":"#Gina Saifullah\n\n# Returns True if divisible by 3\ndef MultipleOfThree(num):\n\tif (num % 3 == 0):\n\t\treturn True\n\treturn False\n\n# Returns True if divisible by 5\ndef MultipleOfFive(num):\n\tif (num % 5 == 0):\n\t\treturn True\n\treturn False\t\n\n\ndef main():\n\tpass\n\n# Main\nmain()\n\nprint()\nprint(\"The FizzBuzz Problem\")\nprint()\n\nfor i in range(1, 101, 1):\n\tout_str = \"\"\n\tif (MultipleOfThree(i) is True):\n\t\tout_str += \"Fizz\"\n\tif (MultipleOfFive(i) is True):\n\t\tout_str += \"Buzz\"\n\tif (len(out_str) == 0):\n\t\tout_str = i\n\tprint(out_str)\n\t\t","repo_name":"UWPCE-PythonCert-ClassRepos/GP_Python210B_Winter_2019","sub_path":"students/ginafs/session02/FizzBuzz.py","file_name":"FizzBuzz.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14893652257","text":"'''\n問題文\nAtcoder国にはN個の県があり、これらの県には合計で M個の市が属しています。\n市iが誕生したのはYi年であり、県Piに属しています。\nただし、同じ年に誕生した市が複数存在することはないとします。\nそれぞれの市に12桁の認識番号を割り振ることとなりました。\n市iが 県Piに属する市の中でx番目に誕生した市のとき、市iの認識番号の上6桁はPi、下6桁はxとなります。\nただし、Piやxが6桁に満たない場合は6桁になるまで0を左に追加するものとします。\n全ての市の認識番号を求めてください。\nただし、市が1つも属さない県がある場合に注意してください。\n制約\n1≤N≤105\n1≤M≤105\n1≤Pi≤N\n1≤Yi≤109\nYiは全て異なる\n入力は全て整数\n'''\n\nn,m=map(int, input().split())\nprefecture=[]\nfor i in range(m):\n pk,yk=map(int, input().split())\n prefecture.append([pk,yk,i])\n\nprefecture.sort(key=lambda x:(x[0],x[1])) # x[0]:pで整理したうえでx[1]:yで整理\np_num=prefecture[0][0]\ncity_num=1\nfor i in range(m):\n if(prefecture[i][0]!=p_num):\n p_num=prefecture[i][0]\n city_num=1\n\n prefecture[i].append(city_num)\n city_num+=1\n\nprefecture.sort(key=lambda x:x[2])\n\nfor i in range(m):\n print('{0:06d}{1:06d}'.format(prefecture[i][0], prefecture[i][3]))\n\n# p=[]\n# y=[]\n# prefecture=[[] for j in range(n)]\n# for i in range(m):\n# pk,yk=map(int, input().split())\n# p.append(pk)\n# y.append(yk)\n# prefecture[p[i]-1].append(y[i])\n#\n# for j in range(n):\n# prefecture[j].sort()\n#\n# for i in range(m):\n# print('{0:06d}{1:06d}'.format(p[i],prefecture[p[i]-1].index(y[i])+1))\n","repo_name":"ryogoOkura/atcoder","sub_path":"entrant/abc113/abc113-c.py","file_name":"abc113-c.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"71492799284","text":"from pyfiglet import Figlet\nimport sys # take arguments at the command line\nimport random\n\n'''\n Expects zero or two command-line arguments:\n Zero if the user would like to output text in a random font.\n Two if the user would like to output text in a specific font, in which\n case the first of the two should be -f or --font,\n and the second of the two should be the name of the font.\n Prompts the user for a str of text.\n Outputs that text in the desired font.\n'''\n\nfiglet = Figlet()\n\ndef main():\n\n if 0 < len(sys.argv) < 3 or len(sys.argv) > 3:\n sys.exit('Invalid usage')\n\n elif sys.argv[1] not in ['-f', '--font', 'renders']:\n sys.exit('Invalid usage')\n\n elif sys.argv[2] not in figlet.getFonts():\n sys.exit('Invalid usage')\n\n elif len(sys.argv) == 1:\n user_str = input('Input: ').strip().lower()\n figlet.setFont(font=random.choice(figlet.getFonts()))\n print(figlet.renderText(user_str))\n\n else:\n user_str = input('Input: ').strip()\n figlet.setFont(font=sys.argv[2])\n print(figlet.renderText(user_str))\n\n\nif __name__ == '__main__':\n main()\n\n'''\n# pip install cowsay\n\nimport cowsay\nimport sys\n\nif len(sys.argv) == 2:\n cowsay.trex(\"hello, \" + sys.argv[1])\n\npython say.py David\n'''\n\n# check50 cs50/problems/2022/python/figlet\n# submit50 cs50/problems/2022/python/figlet\n# pip install pyfiglet\n\n'''\n\npython figlet.py test\npython figlet.py -a slant\npython figlet.py -f invalid_font\npython figlet.py -f slant, type CS50\npython figlet.py -f rectangles, then type Hello, world\npython figlet.py -f alphabet, then type Moo\n\n'''\n\n","repo_name":"newsgae/CS50_python","sub_path":"wk4/figlet/figlet.py","file_name":"figlet.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8447484348","text":"from routers.member.returnValue import memberListReturnValue\nimport sql.memberSql as memSql\nfrom dotenv import load_dotenv\nimport os\nload_dotenv()\nIS_DEV = os.environ.get('IS_DEV')\npwd = \"/Users/josephkim/Desktop/bitcoin_trading_back\" if IS_DEV == \"True\" else \"/data/4season/bitcoin_trading_back\"\nimport sys\nsys.path.append(pwd) \nfrom routers.trade.tradeFn import TradeFn\n\n\nclass MemberFn():\n async def getMemberListFn(self, bit):\n returnValue = []\n RawMemberList = await bit.mysql.Select(memSql.getMemberListSql)\n for member in RawMemberList:\n data = memberListReturnValue(member)\n returnValue.append(data)\n return returnValue\n\n def controlTradingFn(self,bit, idx, status):\n TF = TradeFn()\n if status == 1:\n res = TF.autoTradingOn(idx)\n elif status == 0:\n res = TF.autoTradingOff(bit, idx)\n return res","repo_name":"KimTeaSick/bitcoin_tranding","sub_path":"routers/member/memberFn.py","file_name":"memberFn.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"9984687476","text":"import tensorflow as tf \r\nfrom tensorflow.python import keras\r\nimport numpy as np \r\nimport pickle as pk \r\nprint(\"finish import.\")\r\n\r\npath = \"D:\\\\python_proj\\\\Mnist\\\\result\\\\convNN_res.krmd\"\r\nconv_model = keras.models.load_model(path)\r\n\r\ndef load_test_data():\r\n print(\"loading training data..\")\r\n file1 = open(\"D:\\\\python_proj\\\\Mnist\\\\data\\\\test_image.pkl\", \"rb\")\r\n list1 = pk.load(file1)\r\n file1.close()\r\n for i in range(10000):\r\n list1[i] = np.reshape(list1[i], (1,28,28))\r\n list1[i] = np.subtract(np.divide(list1[i], 64), 2)\r\n file2 = open(\"D:\\\\python_proj\\\\Mnist\\\\data\\\\test_label.pkl\", \"rb\")\r\n list2 = pk.load(file2)\r\n file2.close()\r\n return np.array(list1), np.array(list2)\r\n\r\nimgs, labels = load_test_data()\r\nlabels = keras.utils.to_categorical(labels)\r\n\r\nres = conv_model.evaluate(x=imgs, y=labels)\r\nprint()\r\nprint(\"test acc:\", res[1])\r\n\r\n###\r\n\"\"\"\r\nthis convolutionary model\r\ngot 98.8% acc on test data\r\n\"\"\"\r\n###","repo_name":"SJTUqizhenlin/MNIST","sub_path":"result/convNN_test.py","file_name":"convNN_test.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1838380174","text":"from splinter import Browser\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\n## initiate browser and path\ndef initiate_browser():\n executable_path = {'executable_path':'/usr/local/bin/chromedriver'}\n return Browser('chrome', **executable_path, headless=False)\n\ndef scrape_info():\n browser = initiate_browser()\n\n # Create mars_data dictionary to insert into mongo\n mars_data_dict = {}\n \n # NASA Mars News\n url = ('https://mars.nasa.gov/news/')\n browser.visit(url)\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n article = soup.find(\"div\", class_=\"list_text\")\n news_p = article.find(\"div\", class_=\"article_teaser_body\").text\n news_title = article.find(\"div\", class_=\"content_title\").text\n\n mars_data_dict['news_title'] = news_title\n mars_data_dict['news_p'] = news_p\n\n # JPL Mars Space Images\n url_2 = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\n browser.visit(url_2)\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n image = soup.find('img', class_='thumb')['src']\n featured_image_url = 'https://jpl.nasa.gov'+image\n\n mars_data_dict['featured_image_url'] = featured_image_url\n\n # Mars Facts\n url_3 = 'https://space-facts.com/mars/'\n browser.visit(url_3)\n\n read_table = pd.read_html(url_3)\n mars_data_df = pd.DataFrame(read_table[0])\n mars_data_df.columns=['Parameter','Values']\n mars_data_df = mars_data_df.set_index('Parameter')\n mars_table = mars_data_df.to_html(classes='mars_table')\n mars_table = mars_table.replace('\\n', ' ')\n\n mars_data_dict['mars facts'] = mars_table\n\n # Mars Hemispheres\n url_4 = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n browser.visit(url_4)\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n\n core_url = soup.find_all('div', class_='item')\n titles = []\n hemisphere_image_urls = []\n base_url = 'https://astrogeology.usgs.gov'\n\n for i in core_url:\n title = i.find('h3').text\n url = i.find('a')['href']\n hemi_url = base_url+url\n\n browser.visit(hemi_url)\n html = browser.html\n soup = BeautifulSoup(html, 'html.parser')\n hemi_image_ori = soup.find('div', class_='downloads')\n hem_img_url = hemi_image_ori.find('a')['href']\n\n print(hem_img_url)\n image_dict_info = dict({'title':title, 'image_url':hem_img_url})\n hemisphere_image_urls.append(image_dict_info)\n\n mars_data_dict['hemisphere_image'] = hemisphere_image_urls\n\n browser.quit()\n return mars_data_dict","repo_name":"KristenBlanchard/Web-Scraping-Challenge","sub_path":"Missions_to_Mars/scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"21434583698","text":"import unittest\nimport os\n\nfrom ose.agent import Agent, load_agents\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n base_path = os.path.abspath(\n os.path.join(os.path.dirname(__file__), os.pardir))\n self.filename = os.path.join(\n base_path, '../../data/test/agents_sample.json')\n\n def tearDown(self):\n pass\n\n def test_agent(self):\n agent = Agent(\n name='Fabio',\n role='supervisor',\n groups=[{'id': 0, 'label': 'MEN', 'type': None}])\n\n self.assertEqual(agent.__repr__(), \"Fabio (supervisor)\")\n\n def test_load_agent(self):\n agents = load_agents(self.filename)\n self.assertEqual(len(agents), 9)\n groups = [\n {'id': 1, 'label': 'Ecole du chemin', 'type': 'ecole'},\n {'id': 3, 'label': 'Enseignant Duchmol', 'type': 'classe'},\n {'id': 5480, 'label': 'L3-Anglais', 'type': 'groupe'}\n ]\n self.assertEqual(agents[1].groups, groups)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"open-student-environment/open-student-environment","sub_path":"ose/agent/test/test_agent.py","file_name":"test_agent.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"33558914044","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nfrom ecomScraper.custom_logging import setup_logger\nfrom ecomScraper.custom_selectors.selenium_selectors import find_element_by_css_selector, find_elements_by_css_selector\n\nlogger = setup_logger('sense_scraper info logger', 'logs/selenium/sense.log')\n\n\ndef sense_scraper(url: str):\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument(\"--headless\")\n user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ' \\\n 'Chrome/83.0.4103.116 Safari/537.36'\n chrome_options.add_argument(f'user-agent={user_agent}')\n browser = webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=chrome_options)\n browser.maximize_window()\n browser.get(url)\n\n BRAND_SELECTOR_PATH = \"a[id='pdpBrandNameText']\"\n NAME_SELECTOR_PATH = \"h2[id='pdpProductNameText']\"\n PRICE_SELECTOR_PATH = \"span.product-price__price\"\n IMAGES_SELECTOR_PATH = \"div.image-wrapper img\"\n\n brand = find_element_by_css_selector(browser, BRAND_SELECTOR_PATH, \"BRAND NAME\", logger)\n name = find_element_by_css_selector(browser, NAME_SELECTOR_PATH, \"PRODUCT NAME\", logger)\n price = find_element_by_css_selector(browser, PRICE_SELECTOR_PATH, \"PRODUCT PRICE\", logger)\n images = find_elements_by_css_selector(browser, IMAGES_SELECTOR_PATH, \"PRODUCT IMAGE\", logger)\n\n absolute_image_src = [image.get_attribute('data-srcset') for image in images]\n while '' in absolute_image_src:\n absolute_image_src.remove('')\n\n item = {\n 'brand_name': brand.strip(),\n 'product_name': name.strip(),\n 'product_price': price.strip(),\n 'product_image': absolute_image_src\n }\n browser.close()\n return {\"status\": \"success\", \"data\": item}\n","repo_name":"rushanshaikh98/EcommerceWebScraping","sub_path":"ecomScraper/selenium_scrapers/sense.py","file_name":"sense.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16834487257","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n# Load the CSV file\ndata = pd.read_csv(\"international_matches.csv\")\n\n# Create a new column to represent whether the home team won or not\ndata[\"host_team_won\"] = data[\"home_team_result\"] == \"Win\"\nfeatures = [\"home_team_fifa_rank\", \"away_team_fifa_rank\"]\ntarget = \"host_team_won\"\ntrain_data, test_data, train_target, test_target = train_test_split(data[features], data[target], test_size=0.2, random_state=42)\n\n# logistic regression classifier\nclf = LogisticRegression()\nclf.fit(train_data, train_target)\npredictions = clf.predict(test_data)\n\n# accuracy of the test in relation to prediction\naccuracy = accuracy_score(test_target, predictions)\nprint(f\"Accuracy of model: {accuracy}\")\n\n# win of home team\nhost_wins = test_target[test_target == True]\nhost_win_percentage = len(host_wins) / len(test_target) * 100\n\n# win of away team\naway_wins = test_target[test_target == False]\naway_win_percentage = len(away_wins) / len(test_target) * 100\n\nprint(f\"Percentage of games won by the host team: {host_win_percentage:.2f}%\")\nprint(f\"Percentage of games won by the away team: {away_win_percentage:.2f}%\")\n\n# plot the win percentages\nlabels = [\"Home team wins\", \"Away team wins\"]\nsizes = [host_win_percentage, away_win_percentage]\ncolors = [\"lightblue\", \"pink\"]\nplt.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%', startangle=90)\nplt.axis('equal')\nplt.title(\"Win Percentage\")\nplt.show()\n","repo_name":"yoelchemla/world_cup_ML","sub_path":"win_percentage.py","file_name":"win_percentage.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2545263887","text":"from PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import *\n\nfrom mainwindow_ui import Ui_MainWindow\n\n\nclass MyForm(QtWidgets.QMainWindow):\n def __init__(self, parent=None):\n QtWidgets.QWidget.__init__(self, parent)\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n\n def openFileNamesDialog(self) -> str:\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n file_path, _ = QFileDialog.getOpenFileName(self, \"Select graph to load\", \"\",\n \"Graph Modelling Language (*.gml)\", options=options)\n return file_path\n\n def showErrorDialog(self, errorMessage):\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n msg.setText(errorMessage)\n msg.setWindowTitle(\"Error\")\n msg.exec_()\n","repo_name":"kbednars/ALHE","sub_path":"myform.py","file_name":"myform.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"44888580647","text":"# encoding: utf-8\n\"\"\"\n@author: yen-nan ho\n@contact: aaron1aaron2@gmail.com\n@gitHub: https://github.com/aaron1aaron2\n@Create Date: 2021/1/5\n\"\"\"\nimport zipfile\nimport os\nimport shutil\n\n__all__ = [\"get_data\"]\n\ndef get_data(path, file_id=None, remove_zip=True):\n import gdown\n\n if not file_id:\n file_id = \"10YKwzUB61hIYvqXAdyMslmLPyVBCr84Z\" # file_id\n\n dataset_url = 'https://drive.google.com/uc?id={}'.format(file_id)\n \n data_zip = os.path.join(path, \"data.zip\")\n if not os.path.exists(path):\n os.makedirs(path)\n \n try:\n gdown.download(dataset_url, data_zip)\n except:\n print(\"fail to download pretrain model, please download the data at {}\".format(dataset_url))\n\n try:\n print('start extracting dataset ...')\n with zipfile.ZipFile(data_zip, 'r') as zf:\n zf.extractall(path)\n \n if remove_zip:\n os.remove(data_zip)\n \n shutil.move(os.path.join(path, \"data\"), path)\n os.remove(os.path.join(path, \"data\"))\n print('the dataset is extracted at: {}'.format(path))\n except:\n print(\"fail to extracting dataset, please check data at {}\".format(data_zip))\n\n \n\n \nif __name__ == \"__main__\":\n\n file_id = \"10YKwzUB61hIYvqXAdyMslmLPyVBCr84Z\"\n get_data(path=\"pretrain\", file_id=file_id, remove_zip=True)\n\n\n","repo_name":"jet-c-21/Horus","sub_path":"ReID/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19963113283","text":"class Song:\n \"\"\"dziesma\"\"\"\n def __init__(self, title=\"\", author=\"\", lyrics=tuple()):\n self.title = title\n self.author = author\n self.lyrics = tuple(lyrics)\n print (\"New Song made:\", title,\" - \", author)\n \n def sing(self, max_lines=-1):\n print(\"SINGING\", self.title, \"-\", self.author, \":\")\n if max_lines == -1:\n max_lines = len(self.lyrics)\n for line in self.lyrics[:max_lines]:\n print(line)\n return self\n \n def print_author_title(self):\n if self.author:\n print(self.author, end=\" - \") \n if self.title:\n print(self.title)\n else:\n print(\"=\"*40)\n\n def yell(self, max_lines=-1):\n print(\"YELLING\", self.title, \"-\", self.author, \":\")\n if max_lines == -1:\n max_lines = len(self.lyrics)\n for line in self.lyrics[:max_lines]:\n print(line.upper())\n return self\n\nsong_nr_1 = Song(\"Iron sky\", \"Paolo Nutini\"\n , [\"We are proud individuals\"\n , \"living on the city\"\n , \"But the flames couldn't go much higher\"\n , \"We find gods and religions to\"\n , \"To paint us with salvation\"\n , \"But no one\", \"No nobody\", \"Can give you the power\", \"....\"])\n\nsong_nr_1.sing().yell().sing(2).yell(3)\n\n\nsong_nr_2 = Song(\"Zaz\", \"Je Veux\", [\"Donnez-moi une suite au Ritz, je n'en veux pas!\"\n, \"Des bijoux de chez Chanel, je n'en veux pas\", \"Donnez-moi une limousine, j'en ferais quoi? (Papala-papapala)\"\n, \"Offrez-moi du personnel, j'en ferais quoi?\"\n, \"Un manoir à Neuchâtel, c'n'est pas pour moi\"\n, \"Offrez-moi la Tour Eiffel, j'en ferais quoi?\", \"......\"])\n\nsong_nr_2.sing().yell().sing(2).yell(3)\n\nclass Rap(Song):\n def break_it(self,max_lines=2_000_000,drop=\"yeah\"):\n self.print_author_title()\n drop = \" \" + drop.upper() + \" \"\n # if max_lines == -1:\n # max_lines = len(self.lyrics)\n for line in self.lyrics[:max_lines]:\n temp = line.split()\n temp = drop.join(temp)\n temp += drop\n print(temp) \n return self \n\nzrap = Rap(\"Ziemeļmeita\", \"Jumprava\", [\"Gāju meklēt ziemeļmeitu\",\"Garu, tālu ceļu veicu\"])\n\nzrap.sing().break_it().break_it(1,\"tak\").break_it(drop=\"aha\").yell(max_lines=4)","repo_name":"ValRCS/Python_RTU_08_20","sub_path":"Diena_10_Classes_Objects/d10_s27_u1.py","file_name":"d10_s27_u1.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"76"} +{"seq_id":"5450493479","text":"\nimport logging\nimport os\nimport random\nimport re\nimport socket\nimport subprocess\nimport sys\nimport time\n\nimport unittest2\n\nimport mitogen.core\nimport mitogen.master\nimport mitogen.utils\n\ntry:\n import urlparse\nexcept ImportError:\n import urllib.parse as urlparse\n\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\n\nLOG = logging.getLogger(__name__)\nDATA_DIR = os.path.join(os.path.dirname(__file__), 'data')\nsys.path.append(DATA_DIR)\n\nif mitogen.is_master:\n mitogen.utils.log_to_file()\n\n\ndef data_path(suffix):\n path = os.path.join(DATA_DIR, suffix)\n if path.endswith('.key'):\n # SSH is funny about private key permissions.\n os.chmod(path, int('0600', 8))\n return path\n\n\ndef subprocess__check_output(*popenargs, **kwargs):\n # Missing from 2.6.\n process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)\n output, _ = process.communicate()\n retcode = process.poll()\n if retcode:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = popenargs[0]\n raise subprocess.CalledProcessError(retcode, cmd)\n return output\n\nif hasattr(subprocess, 'check_output'):\n subprocess__check_output = subprocess.check_output\n\n\ndef wait_for_port(\n host,\n port,\n pattern=None,\n connect_timeout=0.5,\n receive_timeout=0.5,\n overall_timeout=5.0,\n sleep=0.1,\n ):\n \"\"\"Attempt to connect to host/port, for upto overall_timeout seconds.\n If a regex pattern is supplied try to find it in the initial data.\n Return None on success, or raise on error.\n \"\"\"\n start = time.time()\n end = start + overall_timeout\n addr = (host, port)\n\n while time.time() < end:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(connect_timeout)\n try:\n sock.connect(addr)\n except socket.error:\n # Failed to connect. So wait then retry.\n time.sleep(sleep)\n continue\n\n if not pattern:\n # Success: We connected & there's no banner check to perform.\n sock.shutdown(socket.SHUTD_RDWR)\n sock.close()\n return\n\n sock.settimeout(receive_timeout)\n data = mitogen.core.b('')\n found = False\n while time.time() < end:\n try:\n resp = sock.recv(1024)\n except socket.timeout:\n # Server stayed up, but had no data. Retry the recv().\n continue\n\n if not resp:\n # Server went away. Wait then retry the connection.\n time.sleep(sleep)\n break\n\n data += resp\n if re.search(mitogen.core.b(pattern), data):\n found = True\n break\n\n try:\n sock.shutdown(socket.SHUT_RDWR)\n except socket.error:\n e = sys.exc_info()[1]\n # On Mac OS X - a BSD variant - the above code only succeeds if the\n # operating system thinks that the socket is still open when\n # shutdown() is invoked. If Python is too slow and the FIN packet\n # arrives before that statement can be reached, then OS X kills the\n # sock.shutdown() statement with:\n #\n # socket.error: [Errno 57] Socket is not connected\n #\n # Protect shutdown() with a try...except that catches the\n # socket.error, test to make sure Errno is right, and ignore it if\n # Errno matches.\n if e.errno == 57:\n pass\n else:\n raise\n sock.close()\n\n if found:\n # Success: We received the banner & found the desired pattern\n return\n else:\n # Failure: The overall timeout expired\n if pattern:\n raise socket.timeout('Timed out while searching for %r from %s:%s'\n % (pattern, host, port))\n else:\n raise socket.timeout('Timed out while connecting to %s:%s'\n % (host, port))\n\n\ndef sync_with_broker(broker, timeout=10.0):\n \"\"\"\n Insert a synchronization barrier between the calling thread and the Broker\n thread, ensuring it has completed at least one full IO loop before\n returning.\n\n Used to block while asynchronous stuff (like defer()) happens on the\n broker.\n \"\"\"\n sem = mitogen.core.Latch()\n broker.defer(sem.put, None)\n sem.get(timeout=10.0)\n\n\nclass CaptureStreamHandler(logging.StreamHandler):\n def __init__(self, *args, **kwargs):\n super(CaptureStreamHandler, self).__init__(*args, **kwargs)\n self.msgs = []\n\n def emit(self, msg):\n self.msgs.append(msg)\n return super(CaptureStreamHandler, self).emit(msg)\n\n\nclass LogCapturer(object):\n def __init__(self, name=None):\n self.sio = StringIO()\n self.logger = logging.getLogger(name)\n self.handler = CaptureStreamHandler(self.sio)\n self.old_propagate = self.logger.propagate\n self.old_handlers = self.logger.handlers\n self.old_level = self.logger.level\n\n def start(self):\n self.logger.handlers = [self.handler]\n self.logger.propagate = False\n self.logger.level = logging.DEBUG\n\n def raw(self):\n return self.sio.getvalue()\n\n def msgs(self):\n return self.handler.msgs\n\n def __enter__(self):\n self.start()\n return self\n\n def __exit__(self, _1, _2, _3):\n self.stop()\n\n def stop(self):\n self.logger.level = self.old_level\n self.logger.handlers = self.old_handlers\n self.logger.propagate = self.old_propagate\n return self.raw()\n\n\nclass TestCase(unittest2.TestCase):\n def assertRaises(self, exc, func, *args, **kwargs):\n \"\"\"Like regular assertRaises, except return the exception that was\n raised. Can't use context manager because tests must run on Python2.4\"\"\"\n try:\n func(*args, **kwargs)\n except exc:\n e = sys.exc_info()[1]\n return e\n except BaseException:\n LOG.exception('Original exception')\n e = sys.exc_info()[1]\n assert 0, '%r raised %r, not %r' % (func, e, exc)\n assert 0, '%r did not raise %r' % (func, exc)\n\n\ndef get_docker_host():\n url = os.environ.get('DOCKER_HOST')\n if url in (None, 'http+docker://localunixsocket'):\n return 'localhost'\n\n parsed = urlparse.urlparse(url)\n return parsed.netloc.partition(':')[0]\n\n\nclass DockerizedSshDaemon(object):\n image = None\n\n def get_image(self):\n if not self.image:\n distro = os.environ.get('MITOGEN_TEST_DISTRO', 'debian')\n self.image = 'mitogen/%s-test' % (distro,)\n return self.image\n\n # 22/tcp -> 0.0.0.0:32771\n PORT_RE = re.compile(r'([^/]+)/([^ ]+) -> ([^:]+):(.*)')\n port = None\n\n def _get_container_port(self):\n s = subprocess__check_output(['docker', 'port', self.container_name])\n for line in s.decode().splitlines():\n dport, proto, baddr, bport = self.PORT_RE.match(line).groups()\n if dport == '22' and proto == 'tcp':\n self.port = int(bport)\n\n self.host = self.get_host()\n if self.port is None:\n raise ValueError('could not find SSH port in: %r' % (s,))\n\n def start_container(self):\n self.container_name = 'mitogen-test-%08x' % (random.getrandbits(64),)\n args = [\n 'docker',\n 'run',\n '--detach',\n '--privileged',\n '--publish-all',\n '--name', self.container_name,\n self.get_image()\n ]\n subprocess__check_output(args)\n self._get_container_port()\n\n def __init__(self):\n self.start_container()\n\n def get_host(self):\n return get_docker_host()\n\n def wait_for_sshd(self):\n wait_for_port(self.get_host(), self.port, pattern='OpenSSH')\n\n def close(self):\n args = ['docker', 'rm', '-f', self.container_name]\n subprocess__check_output(args)\n\n\nclass BrokerMixin(object):\n broker_class = mitogen.master.Broker\n\n def setUp(self):\n super(BrokerMixin, self).setUp()\n self.broker = self.broker_class()\n\n def tearDown(self):\n self.broker.shutdown()\n self.broker.join()\n super(BrokerMixin, self).tearDown()\n\n def sync_with_broker(self):\n sync_with_broker(self.broker)\n\n\nclass RouterMixin(BrokerMixin):\n router_class = mitogen.master.Router\n\n def setUp(self):\n super(RouterMixin, self).setUp()\n self.router = self.router_class(self.broker)\n\n\nclass DockerMixin(RouterMixin):\n @classmethod\n def setUpClass(cls):\n super(DockerMixin, cls).setUpClass()\n cls.dockerized_ssh = DockerizedSshDaemon()\n cls.dockerized_ssh.wait_for_sshd()\n\n @classmethod\n def tearDownClass(cls):\n cls.dockerized_ssh.close()\n super(DockerMixin, cls).tearDownClass()\n\n def docker_ssh(self, **kwargs):\n kwargs.setdefault('hostname', self.dockerized_ssh.host)\n kwargs.setdefault('port', self.dockerized_ssh.port)\n kwargs.setdefault('check_host_keys', 'ignore')\n kwargs.setdefault('ssh_debug_level', 3)\n return self.router.ssh(**kwargs)\n\n def docker_ssh_any(self, **kwargs):\n return self.docker_ssh(\n username='mitogen__has_sudo_nopw',\n password='has_sudo_nopw_password',\n )\n","repo_name":"ConnectBox/wifi-test-framework","sub_path":"ansible/plugins/mitogen-0.2.3/tests/testlib.py","file_name":"testlib.py","file_ext":"py","file_size_in_byte":9522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39012802963","text":"import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.tree import DecisionTreeRegressor\r\nfrom sklearn.linear_model import LinearRegression, Lasso\r\nfrom sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\r\nfrom sklearn.model_selection import GridSearchCV\r\nfrom sklearn.model_selection import train_test_split, cross_val_score\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error\r\n\r\ndf_3 = pd.read_csv(\"D:/DS Contests/1 Accenture/4 Datasets by Others/DOE data 3.csv\")\r\ndf_3 = df_3.drop(columns=[\"Unnamed: 0\"])\r\ndf_3[\"YYYY-MM\"] = pd.to_datetime(df_3[\"YYYY-MM\"])\r\n\r\n\r\ndf_3_Luzon = df_3.loc[df_3[\"Grid\"] == \"Luzon\"].sort_values(by=\"YYYY-MM\")\r\ntime = df_3_Luzon[\"YYYY-MM\"]\r\n# print(len(time))\r\n\r\ndf_3_Luzon_Peak_Demand = df_3_Luzon.drop(columns=\"YYYY-MM\")\r\ndf_3_Luzon_Peak_Demand = df_3_Luzon_Peak_Demand.drop(columns=\"Grid\")\r\n\r\nfuture_days = 40\r\ndf_3_Luzon_Peak_Demand[\"Prediction\"] = df_3_Luzon_Peak_Demand[[\"Peak Demand (MW)\"]].shift(-future_days)\r\n# print(df_3_Luzon_Peak_Demand)\r\nX = np.array(df_3_Luzon_Peak_Demand.drop([\"Prediction\"], 1))[:-future_days]\r\n# print(len(X))\r\ny = np.array(df_3_Luzon_Peak_Demand[\"Prediction\"])[:-future_days]\r\n# print(len(y))\r\n\r\nx_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.20, shuffle=False)\r\n\r\nfrom sklearn.linear_model import SGDRegressor\r\nfrom sklearn.kernel_ridge import KernelRidge\r\nfrom sklearn.linear_model import ElasticNet\r\nfrom sklearn.linear_model import BayesianRidge\r\nfrom sklearn.svm import SVR\r\nfrom lightgbm import LGBMRegressor\r\n\r\nlgbm = LGBMRegressor().fit(x_train, y_train)\r\nkr = KernelRidge().fit(x_train, y_train)\r\nen = ElasticNet().fit(x_train, y_train)\r\nbr = BayesianRidge().fit(x_train, y_train)\r\nsvr = SVR().fit(x_train, y_train)\r\n\r\ntree = DecisionTreeRegressor().fit(x_train, y_train)\r\nlr = LinearRegression().fit(x_train, y_train)\r\ncvs = np.mean(cross_val_score(lr, x_train, y_train, scoring=\"neg_mean_absolute_error\"))\r\nprint(cvs)\r\nrf = RandomForestRegressor().fit(x_train, y_train)\r\n# lasso = Lasso(alpha=1).fit(x_train, y_train)\r\n# csv_lasso = np.mean(cross_val_score(lasso, x_train, y_train, scoring=\"neg_mean_absolute_error\"))\r\n# print(csv_lasso)\r\nlasso = Lasso(alpha=1).fit(x_train, y_train)\r\ncsv_lasso = np.mean(cross_val_score(lasso, x_train, y_train, scoring=\"neg_mean_absolute_error\"))\r\nprint(csv_lasso)\r\nxgb = GradientBoostingRegressor().fit(x_train, y_train)\r\n\r\n\r\ndef find_alpha():\r\n alpha = []\r\n error = []\r\n b = 8e-10\r\n for i in range(1, 100):\r\n alpha.append(b * i)\r\n lml = Lasso(alpha=(b * i))\r\n error.append(np.mean(cross_val_score(lml, x_train, y_train, scoring=\"neg_mean_absolute_error\")))\r\n\r\n plt.plot(alpha, error)\r\n plt.show()\r\n\r\n\r\nparameters = {\"n_estimators\": range(10, 100, 10), \"criterion\": (\"mse\", \"mae\"), \"max_features\": (\"auto\", \"sqrt\")}\r\n# gs = GridSearchCV(rf, parameters, scoring=\"neg_mean_absolute_error\").fit(x_train, y_train)\r\n\r\nx_future = df_3_Luzon_Peak_Demand.drop([\"Prediction\"], 1)[:-future_days]\r\nx_future = x_future.tail(future_days)\r\nx_future = np.array(x_future)\r\n# print(len(x_future))\r\n# print(len(x_test))\r\n\r\nlgbm_prediction = lgbm.predict(x_future)\r\nkr_prediction = kr.predict(x_future)\r\nen_prediction = en.predict(x_future)\r\nbr_prediction = br.predict(x_future)\r\nsvr_prediction = svr.predict(x_future)\r\n\r\ntree_prediction = tree.predict(x_future)\r\nlr_prediction = lr.predict(x_future)\r\nlasso_prediction = lasso.predict(x_future)\r\nxgb_prediction = xgb.predict(x_future)\r\nrf_prediction = rf.predict(x_future)\r\n# gs_rf_prediction = gs.best_estimator_.predict(x_future)\r\navg_prediction = (0*lasso_prediction + 2*lr_prediction + xgb_prediction)/3\r\navg_prediction_2 = (0*lasso_prediction + 2*lr_prediction + rf_prediction)/3\r\n# print(x_test[-45:-35])\r\n# print(x_future[-45:-35])\r\n\r\n\r\ndef plot_year_vs_PD(method, title):\r\n valid = df_3_Luzon_Peak_Demand[X.shape[0]:]\r\n valid[\"Predictions\"] = method\r\n # print(time[len(time) - future_days:])\r\n plt.plot(time, df_3_Luzon_Peak_Demand.loc[:, \"Peak Demand (MW)\"], label=\"Actual\")\r\n # plt.plot(time[len(time)-future_days:], valid[\"Peak Demand (MW)\"])\r\n plt.plot(time[len(time) - future_days:], valid[\"Predictions\"], label=\"Predictions\")\r\n plt.legend()\r\n plt.title(title)\r\n plt.show()\r\n rmse = mean_squared_error(valid[\"Peak Demand (MW)\"], valid[\"Predictions\"])\r\n mae = mean_absolute_error(valid[\"Peak Demand (MW)\"], valid[\"Predictions\"])\r\n # print(title, \" rmse: \", rmse)\r\n print(title, \" mae: \", mae)\r\n # print(y_test)\r\n # print(valid[\"Peak Demand (MW)\"])\r\n\r\n\r\n# plot_year_vs_PD(tree_prediction, \"Decision Tree Regression\")\r\nplot_year_vs_PD(lgbm_prediction, \"lgbm Regression\")\r\nplot_year_vs_PD(kr_prediction, \"KR Regression\")\r\nplot_year_vs_PD(en_prediction, \"EN Regression\")\r\nplot_year_vs_PD(br_prediction, \"BR Regression\")\r\nplot_year_vs_PD(svr_prediction, \"SVR Regression\")\r\n# plot_year_vs_PD(avg_prediction, \"Average of 2xOLS and XGB\")\r\n# plot_year_vs_PD(avg_prediction_2, \"Average of 2xOLS and RF\")\r\n","repo_name":"csvaldellon/RenewabALL","sub_path":"ML C DOE data 7.py","file_name":"ML C DOE data 7.py","file_ext":"py","file_size_in_byte":5035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6116873590","text":"import json\nimport numpy as np\nimport os\nimport re\nimport unicodedata\nimport random\n\ndef unicodeToAscii(s):\n return ''.join(\n c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn'\n )\n\ndef normalizeString(s):\n s = unicodeToAscii(s.lower().strip())\n s = re.sub(r\"([.!?])\", r\"\", s)\n s = re.sub(r\"[^a-zA-Z.!?]+\", r\" \", s)\n s = re.sub(r\"\\s+\", r\" \", s).strip()\n return s\n\ndef readLabel(dataPath):\n vocabs = {}\n padding = {}\n vocabs_count = 0\n with open(dataPath) as jsonFile:\n label = json.loads(jsonFile.read())\n for index, item in enumerate(label):\n print(\"\\rIteration: {}, read words in {}\".format(index+1, item['id']), end='', flush=True)\n for s in item['caption']:\n words = normalizeString(s).split(' ')\n if len(words) < 30:\n for w in words:\n if w not in vocabs:\n vocabs[w] = vocabs_count\n vocabs_count += 1\n vocabs['EOS'] = vocabs_count\n vocabs['BOS'] = vocabs_count + 1\n print('\\nTotal words: {}'.format(len(vocabs)))\n for index, item in enumerate(label):\n print(\"\\rIteration: {}, padding words in {}\".format(index+1, item['id']), end='', flush=True)\n padding[item['id']] = []\n for s in item['caption']:\n words = normalizeString(s).split(' ')\n if len(words) < 9:\n words.insert(0,'BOS')\n words.append('EOS')\n one = np.zeros((10,len(vocabs)))\n for w_index, w in enumerate(words):\n one[w_index][vocabs[w]] = 1\n padding[item['id']].append(one)\n print()\n return padding, vocabs\n\ndef readTraingFeature():\n training_Y_dic, vocabs = readLabel('./data/training_label.json')\n training_Y = []\n training_data = []\n for dirPath, dirNames, fileNames in os.walk(\"./data/training_data/feat/\"):\n for index, f in enumerate(fileNames):\n print(\"\\rIteration: {}, read {}\".format(index+1, f), end='', flush=True)\n data = np.load(dirPath + f)\n random_caption_index = random.randint(0, len(training_Y_dic[f[:-4]])-1)\n training_Y.append(training_Y_dic[f[:-4]][random_caption_index])\n training_data.append(data)\n print(\"\\nTransform to np array\")\n training_data = np.array(training_data)\n training_Y = np.array(training_Y)\n return training_data, training_Y\n\nif __name__ == '__main__':\n readTraingFeature()\n","repo_name":"ryanC1993/ADLxMLDS2017","sub_path":"hw2/parseData.py","file_name":"parseData.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"13484659994","text":"#!/usr/bin/python\r\nimport sqlite3\r\nfrom sqlite3 import Error\r\nfrom SafeZone.logs import logger\r\n\r\n\r\ndb_path = r\"C:\\Shares\\School\\Vakken\\p3\\Project\\SafeZone\\db\\Project.db\"\r\n\r\n\r\ndef create_connection(db_file):\r\n global conn\r\n \"\"\" create a database connection to a SQLite database \"\"\"\r\n conn = None\r\n try:\r\n conn = sqlite3.connect(db_file, timeout=10)\r\n except Error as e:\r\n logger.warning(e)\r\n finally:\r\n return conn\r\n\r\n\r\ndef submit_data_to_db(R_ID, T_ID, ts):\r\n try:\r\n conn = create_connection(db_path)\r\n sql = \"INSERT INTO hits(Reader_ID, Tag_ID, ts) VALUES('{}',{},{})\".format(str(R_ID), T_ID, str(ts))\r\n conn.execute(sql)\r\n conn.commit()\r\n conn.close()\r\n except Exception as e:\r\n logger.warning(e)\r\n\r\n\r\ndef submit_reader_to_db(R_ID,ts):\r\n try:\r\n conn = create_connection(db_path)\r\n sql = \"INSERT INTO readers(Reader_ID, last_online) VALUES('{}',{})\".format(str(R_ID), str(ts))\r\n conn.execute(sql)\r\n conn.commit()\r\n conn.close()\r\n except Exception as e:\r\n logger.warning(e)\r\n\r\n\r\ndef update_reader_last_seen(R_ID,ts):\r\n try:\r\n conn = create_connection(db_path)\r\n sql = \"UPDATE readers SET last_online = '{}' WHERE Reader_ID = '{}'\".format(str(ts), str(R_ID))\r\n conn.execute(sql)\r\n conn.commit()\r\n conn.close()\r\n except Exception as e:\r\n logger.warning(e)\r\n\r\n\r\ndef get_person_from_T_id(T_ID):\r\n try:\r\n conn = create_connection(db_path)\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT Name FROM clients WHERE Tag = {}\".format(T_ID))\r\n query_result = cur.fetchone()\r\n conn.close()\r\n if query_result is not None:\r\n return query_result\r\n else:\r\n return False\r\n except Exception as e:\r\n logger.warning(e)\r\n return False\r\n\r\n\r\ndef get_all_clients():\r\n try:\r\n conn = create_connection(db_path)\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT Name, Tag, ID FROM clients\")\r\n query_result = cur.fetchall()\r\n conn.close()\r\n return query_result\r\n except Exception as e:\r\n logger.warning(e)\r\n return False\r\n\r\n\r\ndef get_all_readers():\r\n try:\r\n conn = create_connection(db_path)\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT Reader_ID, last_online FROM readers\")\r\n query_result = cur.fetchall()\r\n conn.close()\r\n return query_result\r\n except Exception as e:\r\n logger.warning(e)\r\n return False\r\n\r\ndef get_last_hit(client_id):\r\n conn = create_connection(db_path)\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT Tag FROM clients WHERE ID = {};\".format(client_id))\r\n query_result = cur.fetchone()\r\n conn.close()\r\n conn = create_connection(db_path)\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT max(ts) Time, Reader_ID FROM hits WHERE Tag_ID = {};\".format(query_result[0]))\r\n query_result = cur.fetchone()\r\n conn.close()\r\n return [query_result[0], query_result[1]]\r\n\r\n","repo_name":"AboveColin/SafeZone","sub_path":"SafeZone/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"74052315125","text":"#!/usr/bin/python\n# encoding: utf-8\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport collections\nfrom PIL import Image\nimport numpy as np\nimport random\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import sampler\nimport torchvision.transforms as transforms\nimport lmdb\nimport six\nimport sys\nimport cv2\n\n\nclass lmdbDataset(Dataset):\n\n def __init__(self, root=None, transform=None, target_transform=None):\n self.env = lmdb.open(\n root,\n max_readers=1,\n readonly=True,\n lock=False,\n readahead=False,\n meminit=False)\n\n if not self.env:\n print('cannot creat lmdb from %s' % (root))\n sys.exit(0)\n\n with self.env.begin(write=False) as txn:\n nSamples = int(txn.get('num-samples'.encode()))\n self.nSamples = nSamples\n\n self.transform = transform\n self.target_transform = target_transform\n\n def __len__(self):\n return self.nSamples\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n index += 1\n with self.env.begin(write=False) as txn:\n img_key = 'image-%09d' % index\n imgbuf = txn.get(img_key.encode())\n\n buf = six.BytesIO()\n buf.write(imgbuf)\n buf.seek(0)\n try:\n img = Image.open(buf).convert('L')\n except IOError:\n print('Corrupted image for %d' % index)\n return self[index + 1]\n\n if self.transform is not None:\n img = self.transform(img)\n\n label_key = 'label-%09d' % index\n label = txn.get(label_key.encode()).decode()\n\n if self.target_transform is not None:\n label = self.target_transform(label)\n\n return (img, label)\n\n\nclass resizeNormalize(object):\n\n def __init__(self, size_h, size_w=None, interpolation=Image.BILINEAR):\n self.size_h = size_h\n self.size_w = size_w\n self.interpolation = interpolation\n self.toTensor = transforms.ToTensor()\n\n def __call__(self, img):\n '''\n # 竖版文字支持,转90度处理\n w, h = img.size\n if h > w*1.5:\n img = img.rotate(90, expand=1)\n\n img = img.resize(self.size, self.interpolation)\n\n img = self.toTensor(img)\n img.sub_(0.5).div_(0.5)\n return img\n '''\n\n # 竖版文字支持,转90度处理\n w, h = img.size\n if h > w*1.5:\n img = img.rotate(90, expand=1)\n\n if self.size_w == None:\n scale = img.size[1]*1.0 / self.size_h\n w = img.size[0] / scale\n w = int(w)\n img = img.resize((w, self.size_h), self.interpolation)\n else:\n img = img.resize((self.size_w, self.size_h), self.interpolation)\n\n img = (np.array(img)/255.0-0.5)/0.5\n img = torch.Tensor(img).view(1, img.shape[0], img.shape[1])\n return img\n\n\nclass strLabelConverter(object):\n \"\"\"Convert between str and label.\n\n NOTE:\n Insert `blank` to the alphabet for CTC.\n\n Args:\n alphabet (str): set of the possible characters.\n ignore_case (bool, default=True): whether or not to ignore all of the case.\n \"\"\"\n\n def __init__(self, alphabet, ignore_case=False):\n self._ignore_case = ignore_case\n if self._ignore_case:\n alphabet = alphabet.lower()\n self.alphabet = alphabet # + '-' # for `-1` index\n\n self.dict = {}\n for i, char in enumerate(alphabet):\n # NOTE: 0 is reserved for 'blank' required by wrap_ctc\n self.dict[char] = i\n\n def encode(self, text):\n \"\"\"Support batch or single str.\n\n Args:\n text (str or list of str): texts to convert.\n\n Returns:\n torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.\n torch.IntTensor [n]: length of each text.\n \"\"\"\n if isinstance(text, str):\n # print(str(text))\n text = [\n self.dict[char.lower() if self._ignore_case else char]\n for char in text\n ]\n length = [len(text)]\n elif isinstance(text, collections.Iterable):\n length = [len(s) for s in text]\n text = ''.join(text)\n text, _ = self.encode(text)\n return (torch.IntTensor(text), torch.IntTensor(length))\n\n def decode(self, t, length, raw=False):\n \"\"\"Decode encoded texts back into strs.\n\n Args:\n torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.\n torch.IntTensor [n]: length of each text.\n\n Raises:\n AssertionError: when the texts and its length does not match.\n\n Returns:\n text (str or list of str): texts to convert.\n \"\"\"\n if length.numel() == 1:\n length = length[0]\n assert t.numel() == length, \"text with length: {} does not match declared length: {}\".format(\n t.numel(), length)\n if raw:\n # for i in t:\n # print(self.alphabet[i])\n return ''.join([self.alphabet[i] for i in t])\n else:\n char_list = []\n for i in range(length):\n if t[i] != len(self.alphabet) - 1 and (not (i > 0 and t[i - 1] == t[i])):\n char_list.append(self.alphabet[t[i]])\n return ''.join(char_list)\n else:\n # batch mode\n assert t.numel() == length.sum(\n ), \"texts with length: {} does not match declared length: {}\".format(t.numel(), length.sum())\n texts = []\n index = 0\n for i in range(length.numel()):\n l = length[i]\n texts.append(\n self.decode(\n t[index:index + l], torch.IntTensor([l]), raw=raw))\n index += l\n return texts\n\n\nclass averager(object):\n \"\"\"Compute average for `torch.Variable` and `torch.Tensor`. \"\"\"\n\n def __init__(self):\n self.reset()\n\n def add(self, v):\n if isinstance(v, Variable):\n count = v.data.numel()\n v = v.data.sum()\n elif isinstance(v, torch.Tensor):\n count = v.numel()\n v = v.sum()\n\n self.n_count += count\n self.sum += v\n\n def reset(self):\n self.n_count = 0\n self.sum = 0\n\n def val(self):\n res = 0\n if self.n_count != 0:\n res = self.sum / float(self.n_count)\n return res\n","repo_name":"zhaobomin/pytorch-ocr","sub_path":"model_crnn/helper/dataset_crnn.py","file_name":"dataset_crnn.py","file_ext":"py","file_size_in_byte":6679,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"26137588711","text":"#https://school.programmers.co.kr/learn/courses/30/lessons/12902#\ndef solution(n):\n if n%2 : return 0\n else:\n dp = [2]*(n//2)\n dp[0]=3\n s = 0 \n for i in range(n//2-1):\n dp[i+1] += (dp[i]*3 +s)%1000000007\n s+=dp[i]*2%1000000007\n return dp[-1]\n","repo_name":"jun981015/algorithm","sub_path":"programmers/3n타일링.py","file_name":"3n타일링.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33917569116","text":"\"\"\"\nA binary tree is univalued if every node in the tree has the same value. Return true if and only if the given tree is univalued.\n\nExample 1:\n\n Input: [1,1,1,1,1,null,1]\n Output: true\n\nExample 2:\n\n Input: [2,2,2,5,2]\n Output: false\n\nNote:\n 1. The number of nodes in the given tree will be in the range [1, 100].\n 2. Each node's value will be an integer in the range [0, 99].\n\"\"\"\n\n\nclass TreeNode:\n \"\"\"Definition for a binary tree node.\"\"\"\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def isUnivalTree(self, root: TreeNode) -> bool:\n self.value = root.val\n def helper(node):\n if node:\n if node.val != self.value:\n return False\n else:\n return helper(node.left) and helper(node.right)\n else:\n return True\n \n return helper(root)\n","repo_name":"chaosWsF/Python-Practice","sub_path":"leetcode/0965_univalued_binary_tree.py","file_name":"0965_univalued_binary_tree.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"71835682487","text":"import pytest\nimport torch\nimport torch.nn.functional as F\nfrom einops import rearrange\n\nfrom pipegoose.distributed.parallel_context import ParallelContext\nfrom pipegoose.distributed.parallel_mode import ParallelMode\nfrom pipegoose.nn.tensor_parallel.loss import VocabParallelCrossEntropy\nfrom pipegoose.testing.utils import spawn\n\n\ndef check_equal(A, B):\n assert torch.allclose(A, B, rtol=1e-3, atol=1e-1) or torch.allclose(A, B)\n\n\ndef run_parallel_cross_entropy(\n rank, world_size, port, tensor_parallel_size, pipeline_parallel_size, data_parallel_size, logits, targets, loss, grads\n):\n def get_partition(logits):\n local_world_size = parallel_context.get_world_size(parallel_mode=ParallelMode.TENSOR)\n per_partition = N_LABELS // local_world_size\n chunks = torch.split(logits, per_partition, dim=-1)\n return chunks[local_rank]\n\n parallel_context = ParallelContext(\n rank=rank,\n local_rank=rank,\n world_size=world_size,\n local_world_size=world_size,\n host=\"localhost\",\n port=port,\n seed=69,\n backend=\"gloo\",\n tensor_parallel_size=tensor_parallel_size,\n pipeline_parallel_size=pipeline_parallel_size,\n data_parallel_size=data_parallel_size,\n )\n\n local_rank = parallel_context.get_local_rank(parallel_mode=ParallelMode.TENSOR)\n ranks_in_group = parallel_context.get_ranks_in_group(parallel_mode=ParallelMode.TENSOR)\n\n if local_rank in ranks_in_group:\n N_LABELS = logits.shape[-1]\n parallel_logits = get_partition(logits)\n parallel_logits.requires_grad = True\n\n parallel_cross_entropy = VocabParallelCrossEntropy(parallel_context=parallel_context)\n parallel_loss = parallel_cross_entropy(parallel_logits, targets)\n\n assert torch.allclose(parallel_loss, loss)\n\n # parallel_loss.backward()\n # assert torch.allclose(parallel_logits.grad.data, get_partition(grads))\n\n\n@pytest.mark.parametrize(\"tensor_parallel_size\", [1, 2])\ndef test_parallel_cross_entropy(tensor_parallel_size):\n PIPELINE_PARALLEL_SIZE = 1\n DATA_PARALLEL_SIZE = 1\n\n BATCH_SIZE = 1\n SEQ_LEN = 2\n VOCAB_SIZE = 4\n\n torch.manual_seed(69)\n\n logits = torch.randn(BATCH_SIZE, SEQ_LEN, VOCAB_SIZE, requires_grad=True)\n targets = torch.randint(0, VOCAB_SIZE, (BATCH_SIZE, SEQ_LEN))\n\n loss = F.cross_entropy(\n rearrange(logits, \"batch_size seq_len vocab_size -> (batch_size seq_len) vocab_size\"),\n rearrange(targets, \"batch_size seq_len -> (batch_size seq_len)\"),\n )\n\n loss.backward()\n grads = logits.grad.data\n\n spawn(\n run_parallel_cross_entropy,\n world_size=tensor_parallel_size,\n tensor_parallel_size=tensor_parallel_size,\n pipeline_parallel_size=PIPELINE_PARALLEL_SIZE,\n data_parallel_size=DATA_PARALLEL_SIZE,\n logits=logits.detach(),\n targets=targets,\n loss=loss.detach(),\n grads=grads.detach(),\n )\n","repo_name":"xrsrke/pipegoose","sub_path":"tests/nn/tensor_parallel/test_loss.py","file_name":"test_loss.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"76"} +{"seq_id":"70662101685","text":"import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nos.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'\n\nimport time\nimport tensorflow as tf\nimport tensorflow.keras as keras\nfrom layer.discriminator32 import Discriminator512\nfrom layer.generator32 import Generator512\nfrom layer.loss import *\nfrom layer.dataloader import *\nfrom tqdm import tqdm\nimport numpy as np \nimport pandas as pd \nfrom tensorflow_addons.optimizers import AdamW\n\nnp.random.seed(1234)\n\nprint(\"Num GPUs Available: \", len(tf.config.list_physical_devices('GPU')))\n\nBATCH_SIZE = 64 #32, 64, 128 \nSTEPS_PER_EPOCH = 2185 #2000 1171\nEPOCHS = 2000 #60\n# KERNEL_SIZE = (5, 5)\nGENERATOR_INPUT = (4, 4, 16) #(4, 4, 16)\nLR_GEN = 1e-4 #1e-4\nLR_DISC = 4e-4 #4e-4\n\n#num_examples_to_generate = 4\n#seed = tf.random.normal([num_examples_to_generate, 8, 8, 1])\npath = './data/training_dataset/*.png' #*.png'\nhistory_path = './ADAM/history.xlsx' #'./ADAM/history/history.xlsx'\n#====================================================\n# lr_schedule = keras.optimizers.schedules.ExponentialDecay(\n# initial_learning_rate=1e-5,\n# decay_steps=5000,\n# decay_rate=0.9\n# )\n# lr_schedule = keras.optimizers.schedules.PiecewiseConstantDecay([8000], [1e-5, 5e-6])\n# TODO: Try AdamW done\n# TODO: Try turning on amsgrad option to TRUE -- did not work\ncross_entropy = keras.losses.BinaryCrossentropy(from_logits=False)\ngenerator_optimizer = keras.optimizers.Adam(learning_rate=LR_GEN, beta_1=0.9, beta_2=0.99)\ndiscriminator_optimizer = keras.optimizers.Adam(learning_rate=LR_DISC, beta_1=0.9, beta_2=0.99)\n# generator_optimizer = tf.keras.optimizers.RMSprop(LR_GEN)\n# discriminator_optimizer = tf.keras.optimizers.RMSprop(LR_DISC)\n# generator_optimizer = AdamW(weight_decay=1e-5, learning_rate=LR_GEN, beta_1=0.9, beta_2=0.99, amsgrad=False)\n# discriminator_optimizer = AdamW(weight_decay=1e-5, learning_rate=LR_DISC, beta_1=0.9, beta_2=0.99, amsgrad=False)\n\n\ngenerator = Generator512(generator_input_shape=GENERATOR_INPUT, kernel_size=(7, 7))\ndiscriminator = Discriminator512(discriminator_input_shape=(64, 64, 1), kernel_size=(3, 3))\n\nprint(generator.summary())\nprint(discriminator.summary())\nprint('--- Loading Data ---')\n\ndataset = generate_dataset(mode='train', file_path=path,\n batch_size=BATCH_SIZE,\n patch_size=64,\n num_threads=16,) #16\ntime.sleep(0.5)\nprint('========================================')\ntime.sleep(0.5)\nprint('--- Successfully Loaded ---')\ntime.sleep(0.5)\nprint('========================================')\ntime.sleep(0.5)\n#checkpoint_dir = './training_checkpoints'\ncheckpoint_dir = './ADAM/ckpt_32' #'./LR5_5/64/checkpoints16' for adamw use history_cleaned\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\ncheckpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,\n discriminator_optimizer=discriminator_optimizer,\n generator=generator,\n discriminator=discriminator)\n\n@tf.function\ndef train_step(images):\n noise = tf.random.normal([BATCH_SIZE, *GENERATOR_INPUT])\n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n generated_images = generator(noise, training=True)\n\n real_output = discriminator(images, training=True)\n fake_output = discriminator(generated_images, training=True)\n\n gen_loss = generator_loss(fake_output, cross_entropy)\n disc_loss = discriminator_loss(real_output, fake_output, cross_entropy)\n\n gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)\n gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)\n\n generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))\n discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))\n\n return {'gen_loss': gen_loss, \n 'disc_loss': disc_loss, \n 'gen_lr': generator_optimizer._decayed_lr(tf.float32), \n 'disc_lr': discriminator_optimizer._decayed_lr(tf.float32)}\n\ndef train(dataset, epochs, steps_per_epoch = 2000):\n outer = tqdm(total=epochs, desc='EPOCH', leave=False, position=0)\n history = pd.DataFrame()\n for epoch in range(epochs):\n inner = tqdm(total=steps_per_epoch, desc='Steps', leave=False, position=1)\n step = 0\n for image in dataset:\n lr = train_step(image)\n \n inner.update(1)\n if step == steps_per_epoch:\n inner.close()\n break\n step = step + 1\n\n lr['gen_loss'] = lr['gen_loss'].numpy()\n lr['disc_loss'] = lr['disc_loss'].numpy()\n lr['gen_lr'] = lr['gen_lr'].numpy()\n lr['disc_lr'] = lr['disc_lr'].numpy()\n history=history.append(lr, ignore_index=True)\n # print(history)\n outer.update(1)\n checkpoint.save(file_prefix=checkpoint_prefix)\n history.to_excel(history_path)\n\n \n\nprint('--- Started Training ---')\ntrain(dataset, EPOCHS, STEPS_PER_EPOCH)\nprint('---Training Completed Successfully---')\n","repo_name":"mahadev1995/Heightmap-Generation-with-Spatial-GAN","sub_path":"model/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25583448643","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator, EmptyPage, InvalidPage\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.models import User\n\nfrom myapp.modulos.principal.models import userSoftSystemProject\nfrom myapp.modulos.principal.functions import members_only\nfrom myapp.modulos.estado_3.models import StateThree, DefinicionRaiz, DefinicionRaizCATWOE\nfrom myapp.modulos.estado_3.forms import nombreDefinicionRaizForm, catwoeForm, addDefinicionRaizForm\nfrom myapp.modulos.estado_3.functions import propuestaDefinicionRaiz\nfrom myapp.modulos.estado_2.models import StateTwo\nfrom myapp.modulos.estado_1.forms import resumenAnalisisForm, comentaryForm\nfrom myapp.modulos.estado_1.models import Comentario\nfrom myapp.modulos.comunicacion.functions import notificar\n# Create your views here.\n@login_required(login_url='/login/')\ndef general_tres_view(request, id_ssp):\n\tif members_only(id_ssp, request):\n\t\tproyecto = userSoftSystemProject.objects.get(id=id_ssp)\n\t\tstateThree = StateThree.objects.get(ssp_stateThree=proyecto)\n\t\tdefinicionesRaices = stateThree.returnDefinicionesRaices()[:5]\n\t\tdestinatarios = proyecto.returnAllusers(request.user.get_username())\n\n\t\tctx = {'proyecto' : proyecto, 'destinatarios' : destinatarios, 'definicionesRaices' : definicionesRaices}\n\t\treturn render(request, 'estado_tres/estado_tres_general.html', ctx)\n\telse:\n\t\treturn render(request, 'comunicacion/error.html')\n\n@login_required(login_url='/login/')\ndef definicionesRaiz_view(request, id_ssp, page):\n\tif members_only(id_ssp, request):\n\t\tproyecto = userSoftSystemProject.objects.get(id=id_ssp)\n\t\tstateThree = StateThree.objects.get(ssp_stateThree=proyecto)\n\t\tdefinicionesRaices = stateThree.returnDefinicionesRaices()\n\t\tdestinatarios = proyecto.returnAllusers(request.user.get_username())\n\n\t\tpaginator = Paginator(definicionesRaices, 5)\n\t\ttry:\n\t\t\tpagina = int(page)\n\t\texcept:\n\t\t\tpage = 1\n\t\ttry:\n\t\t\tlist_definiciones = paginator.page(pagina)\n\t\texcept (EmptyPage, InvalidPage):\n\t\t\tlist_definiciones = paginator.page(paginator.num_pages)\n\n\t\tctx = {'proyecto' : proyecto, 'destinatarios' : destinatarios, 'definicionesRaices' : list_definiciones}\n\t\treturn render(request, 'estado_tres/estado_tres_definicionesRaices.html', ctx)\n\telse:\n\t\treturn render(request, 'comunicacion/error.html')\n\n@login_required(login_url='/login/')\ndef definicionRaiz_crear_view(request, id_ssp):\n\tif members_only(id_ssp, request):\n\t\tif request.method == \"POST\":\n\t\t\tform = nombreDefinicionRaizForm(request.POST)\n\t\t\tif form.is_valid():\n\t\t\t\tname_dr = form.cleaned_data['name_dr']\n\t\t\t\tnewDR = DefinicionRaizCATWOE.objects.create(name_dr=name_dr, created_by=request.user.get_username())\n\t\t\t\tnewDR.save()\n\n\t\t\t\tproyecto = userSoftSystemProject.objects.get(id=id_ssp)\n\t\t\t\tstateThree = StateThree.objects.get(ssp_stateThree=proyecto)\n\t\t\t\tstateThree.ssp_definicionesRaices.append(newDR.id)\n\t\t\t\tstateThree.save()\n\n\t\t\t\tnotificar(id_ssp, request.user.id, '/verDefinicionRaiz/%s/%s'%(id_ssp,newDR.id), 'Agrego una nueva Definicion Raiz', newDR.id, 'DefinicionRaiz')\n\n\n\t\t\t\treturn HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\t\t\treturn HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\telse:\n\t\treturn render(request, 'comunicacion/error.html')\n\n@login_required(login_url='/login/')\ndef definicionRaiz_eliminar_view(request, id_ssp, id_dr):\n\tif members_only(id_ssp, request):\n\t\tproyecto = userSoftSystemProject.objects.get(id=id_ssp)\n\t\tstateThree = StateThree.objects.get(ssp_stateThree=proyecto)\n\t\tdefinicionRaiz = DefinicionRaizCATWOE.objects.get(id=id_dr)\n\t\tcomentariosDefinicion = definicionRaiz.returnComments()\n\t\tdefiniciones = definicionRaiz.returnDefiniciones()\n\n\t\tfor c in comentariosDefinicion:\n\t\t\tc.delete()\n\t\t\n\t\tfor d in definiciones:\n\t\t\td.delete()\n\n\t\tdel stateThree.ssp_definicionesRaices[stateThree.ssp_definicionesRaices.index(definicionRaiz.id)]\n\n\t\tstateThree.save()\n\t\tdefinicionRaiz.delete()\n\t\treturn HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\telse:\n\t\treturn render(request, 'comunicacion/error.html')\n\n@login_required(login_url='/login/')\ndef definicionRaiz_desarrollo_view(request, id_ssp, id_dr):\n\tif members_only( id_ssp, request):\n\t\tproyecto = userSoftSystemProject.objects.get(id=id_ssp)\n\t\tdestinatarios = proyecto.returnAllusers(request.user.get_username())\n\t\tdefinicionRaiz = DefinicionRaizCATWOE.objects.get(id=id_dr)\n\t\tstateTwo = StateTwo.objects.get(ssp_stateTwo=proyecto)\n\t\trichPictures = stateTwo.returnRichPictures()\n\t\trichPictureFinal = definicionRaiz.returnRichPicture()\n\n\t\tformCATWOE = catwoeForm(initial={\n\t\t\t\t'clientes_dr' : definicionRaiz.clientes_dr,\n\t\t\t\t'actores_dr' : definicionRaiz.actores_dr,\n\t\t\t\t'trans_input_dr' : definicionRaiz.trans_input_dr,\n\t\t\t\t'trans_output_dr' : definicionRaiz.trans_output_dr,\n\t\t\t\t'cosmo_dr' : definicionRaiz.cosmo_dr,\n\t\t\t\t'propietario_dr' : definicionRaiz.propietario_dr,\n\t\t\t\t'entorno_dr' : definicionRaiz.entorno_dr,\n\t\t\t})\n\n\t\tsugerencia = propuestaDefinicionRaiz(id_dr)\n\n\n\t\tresumenForm = resumenAnalisisForm(initial={\n\t\t\t\t'description_analisis' : definicionRaiz.description_dr\n\t\t\t})\n\t\tdefinicionesRaiz = definicionRaiz.returnDefiniciones()\n\t\tdefinicionRaizFinal = definicionRaiz.returnDefinicion()\n\t\tctx = {'proyecto' : proyecto, 'destinatarios' : destinatarios, \n\t\t\t\t'definicionRaiz' : definicionRaiz, 'richPictures' : richPictures, \n\t\t\t\t'richPictureFinal' : richPictureFinal, 'formCATWOE' : formCATWOE, \n\t\t\t\t'definicionesRaiz' : definicionesRaiz, 'definicionRaizFinal' : definicionRaizFinal,\n\t\t\t\t'resumenForm' : resumenForm, 'sugerencia' : sugerencia}\n\t\treturn render(request, 'estado_tres/estado_tres_desarrollo.html', ctx)\n\telse:\n\t\treturn render(request, 'comunicacion/error.html')\n\n@login_required(login_url='/login/')\ndef definicionRaiz_adjuntarRichPicture_view(request, id_dr):\n\tidRichPicture = request.POST['e12']\n\tdefinicionRaiz = DefinicionRaizCATWOE.objects.get(id=id_dr)\n\tdefinicionRaiz.richPicture_dr = idRichPicture\n\tdefinicionRaiz.save()\n\treturn HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url='/login/')\ndef definicionRaiz_adjuntarDefinicionRaiz_view(request, id_dr):\n\tidDefinicionRaiz = request.POST['e13']\n\tdefinicionRaiz = DefinicionRaizCATWOE.objects.get(id=id_dr)\n\tdefinicionRaiz.definicionFinal_dr = idDefinicionRaiz\n\tdefinicionRaiz.save()\n\treturn HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url='/login/')\ndef definicionRaiz_newCatwoe_view(request, id_dr):\n\tdefinicionRaiz = DefinicionRaizCATWOE.objects.get(id=id_dr)\n\tif request.method == \"POST\":\n\t\tform = catwoeForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tclientes_dr = form.cleaned_data['clientes_dr']\n\t\t\tactores_dr = form.cleaned_data['actores_dr']\n\t\t\ttrans_input_dr = form.cleaned_data['trans_input_dr']\n\t\t\ttrans_output_dr = form.cleaned_data['trans_output_dr']\n\t\t\tcosmo_dr = form.cleaned_data['cosmo_dr']\n\t\t\tpropietario_dr = form.cleaned_data['propietario_dr']\n\t\t\tentorno_dr = form.cleaned_data['entorno_dr']\n\n\t\t\tdefinicionRaiz.clientes_dr = clientes_dr\n\t\t\tdefinicionRaiz.actores_dr = actores_dr\n\t\t\tdefinicionRaiz.trans_input_dr = trans_input_dr\n\t\t\tdefinicionRaiz.trans_output_dr = trans_output_dr\n\t\t\tdefinicionRaiz.cosmo_dr = cosmo_dr\n\t\t\tdefinicionRaiz.propietario_dr = propietario_dr\n\t\t\tdefinicionRaiz.entorno_dr = entorno_dr\n\n\t\t\tdefinicionRaiz.save()\n\t\t\treturn HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\t\treturn HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\n@login_required(login_url='/login/')\ndef definicionRaiz_add_view(request, id_dr):\n\tdefinicionRaiz = DefinicionRaizCATWOE.objects.get(id=id_dr)\n\tif request.method == \"POST\":\n\t\tform = addDefinicionRaizForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tname_DR = form.cleaned_data['name_DR']\n\t\t\tdescription_DR = form.cleaned_data['description_DR']\n\t\t\tnewDR = DefinicionRaiz.objects.create(name_DR=name_DR, description_DR=description_DR, created_by=request.user.get_username())\n\t\t\tnewDR.save()\n\t\t\tdefinicionRaiz.definiciones_dr.append(newDR.id)\n\t\t\tdefinicionRaiz.save()\n\t\t\treturn HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\t\treturn HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\n@login_required(login_url='/login/')\ndef DF_eliminar_view(request, id_ssp,id_dr, id_dr2):\n\tif members_only(id_ssp, request):\n\t\tdefinicionRaiz = DefinicionRaiz.objects.get(id=id_dr2)\n\t\tdefinicionRaizCat = DefinicionRaizCATWOE.objects.get(id=id_dr)\n\n\t\tdel definicionRaizCat.definiciones_dr[definicionRaizCat.definiciones_dr.index(definicionRaiz.id)]\n\t\tif definicionRaizCat.definicionFinal_dr == definicionRaiz.id:\n\t\t\tdefinicionRaizCat.definicionFinal_dr = None\n\t\tdefinicionRaizCat.save()\n\n\t\tdefinicionRaiz.delete()\n\t\treturn HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\telse:\n\t\treturn render(request, 'comunicacion/error.html')\n\n@login_required(login_url='/login/')\ndef definicionRaiz_resumen_view(request, id_dr):\n\tif request.method == \"POST\":\n\t\tform = resumenAnalisisForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tresumen = form.cleaned_data['description_analisis']\n\t\t\tdefinicionRaiz = DefinicionRaizCATWOE.objects.get(id=id_dr)\n\t\t\tdefinicionRaiz.description_dr = resumen\n\t\t\tdefinicionRaiz.save()\n\t\t\treturn HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\t\treturn HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\n@login_required(login_url='/login/')\ndef definicionRaiz_single_view(request, id_ssp,id_dr):\n\tif members_only(id_ssp, request):\n\t\ttry:\n\t\t\tproyecto = userSoftSystemProject.objects.get(id=id_ssp)\n\t\t\tdestinatarios = proyecto.returnAllusers(request.user.get_username())\n\t\t\tdefinicionRaiz = DefinicionRaizCATWOE.objects.get(id=id_dr)\n\t\t\tdefinicionRaizFinal = definicionRaiz.returnDefinicion()\n\t\t\trichPictureFinal = definicionRaiz.returnRichPicture()\n\t\t\tcomentarios = definicionRaiz.returnComments()\n\t\t\tctx = {'proyecto' : proyecto, 'destinatarios' : destinatarios, \n\t\t\t\t'definicionRaiz' : definicionRaiz, 'definicionRaizFinal': definicionRaizFinal,\n\t\t\t\t'richPictureFinal' : richPictureFinal, 'comentarios' : comentarios}\n\t\t\treturn render(request, 'estado_tres/estado_tres_definicionraiz_single.html', ctx)\n\t\texcept:\n\t\t\treturn render(request, 'comunicacion/error.html')\n\telse:\n\t\treturn render(request, 'comunicacion/error.html')\n\t\t\n@login_required(login_url='/login/')\ndef definicionRaiz_comentar_view(request, id_dr, id_ssp):\n\tif members_only(id_ssp, request):\n\t\tdefinicionRaiz = DefinicionRaizCATWOE.objects.get(id=id_dr)\n\t\tuser = User.objects.get(username__exact=request.user.get_username())\n\t\tform = comentaryForm()\n\t\tif request.method == \"POST\":\n\t\t\tform = comentaryForm(request.POST)\n\t\t\tif form.is_valid():\n\t\t\t\tcomentario = form.cleaned_data['comentary']\n\t\t\t\tnewComment = Comentario.objects.create(autor_comentary=user, content_comentary=comentario)\n\t\t\t\tnewComment.save()\n\t\t\t\tdefinicionRaiz.comments_dr.append(newComment.id)\n\t\t\t\tdefinicionRaiz.save()\n\t\t\t\tnotificar(id_ssp, request.user.id, '/verDefinicionRaiz/%s/%s'%(id_ssp,id_dr), 'Ha comentado una Definicion Raiz', id_dr, 'DefinicionRaiz')\n\n\t\t\t\treturn HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\t\t\treturn HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\telse:\n\t\treturn render(request, 'comunicacion/error.html')","repo_name":"AldoNavarreteBIGBOSS/ssmanager","sub_path":"myapp/modulos/estado_3/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11201,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19317322430","text":"import cv2\nimport os\n\n# 画像を比較して類似度を得る処理\n\nIMAGE_DIR = os.path.abspath(os.path.dirname(__file__)) + '/comparison/'\n# 画像サイズが一緒でない場合は画像サイズを合わせたほうがいい\n# IMAGE_SIZE = (993, 449)\n\n# 特徴量抽出処理の選定\n# detector = cv2.AKAZE_create()\ndetector = cv2.ORB_create()\n\n# 比較元の特徴量抽出(今回はグレースケールでの抽出とする)\nSOURCE_FILE_NAME = '1.png'\nsource = cv2.imread(IMAGE_DIR + SOURCE_FILE_NAME, cv2.IMREAD_GRAYSCALE)\n# source = cv2.resize(source, IMAGE_SIZE)\n\n(source_kp, source_des) = detector.detectAndCompute(source, None)\n\n# 比較対象の設定\nTARGET_FILE_NAME = '2.png'\ntarget = cv2.imread(IMAGE_DIR + TARGET_FILE_NAME, cv2.IMREAD_GRAYSCALE)\n# target = cv2.resize(source, IMAGE_SIZE)\n\n(target_kp, target_des) = detector.detectAndCompute(target, None)\n\n# 類似度確認\n# matches = cv2.BFMatcher(cv2.NORM_HAMMING).knnMatch(source_des, target_des, k=2)\nmatches = cv2.BFMatcher(cv2.NORM_HAMMING).knnMatch(source_des, target_des, k=5)\n\n# ratio test\n# ratio = 0.75\n# similar = []\n# for m, n in matches:\n# if m.distance < ratio * n.distance:\n# similar.append([m])\n\nsimilar = []\nfor m in matches:\n distances = [info.distance for info in m]\n ratio = sum(distances)/len(m)\n similar.append(ratio)\n\n\n# 画像出力確認e\n# output = cv2.drawMatchesKnn(target, target_kp, source, source_kp, matches, None, flags=5)\n# cv2.imshow('img', output)\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n\n# dist = []\n# for match_list in similar:\n# dist.append(match_list[0].distance)\n# ret = sum(dist) / len(dist)\nret = sum(similar) / len(similar)\n\n# 距離を算出しているので、数値が小さいほど類似度が高いといえる\nprint('類似度:' + str(ret))\n","repo_name":"mahya8585/toolbox","sub_path":"py/openCvLab/graph/comparefile_knnmatch.py","file_name":"comparefile_knnmatch.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"74899813365","text":"from __future__ import absolute_import, print_function\n\nimport abc\nfrom vedis import Vedis\n\n\nclass StorageBackendBase(metaclass=abc.ABCMeta):\n @classmethod\n @abc.abstractproperty\n def __backend_name__(cls):\n raise NotImplementedError()\n\n @abc.abstractmethod\n def __init__(self, **kwargs):\n raise NotImplementedError()\n\n @abc.abstractmethod\n def store(self, key, field, value):\n raise NotImplementedError()\n\n @abc.abstractmethod\n def load(self, key, field):\n raise NotImplementedError()\n\n @abc.abstractmethod\n def delete(self, key, field):\n raise NotImplementedError()\n\n\nclass StorageBackend(StorageBackendBase):\n backends = dict()\n\n def __init_subclass__(cls, *args, **kwargs):\n cls.backends[cls.__backend_name__] = cls\n super().__init_subclass__(*args, **kwargs)\n\n @classmethod\n def get_backend(cls, backend_name, **kwargs):\n backend = cls.backends.get(backend_name)\n if backend is None:\n raise Exception(f\"{backend_name} Backend not supported\")\n return backend(**kwargs)\n\n\nclass StorageBackendVedis(StorageBackend):\n __backend_name__ = 'vedis'\n\n def __init__(self, **kwargs):\n if kwargs is None or kwargs.get('database_path') is None:\n raise Exception(\"Vedis backend requires path argument\")\n\n self.database_path = kwargs.get('database_path')\n self.db = Vedis(self.database_path)\n\n def store(self, key, field, value):\n if not all(map(lambda x: isinstance(x, str), [key, field, value])):\n raise Exception('key, field, value must be string')\n\n _hash = self.db.Hash(key)\n _hash[field] = value\n return True\n\n def load(self, key, field):\n if not all(map(lambda x: isinstance(x, str), [key, field])):\n raise Exception('key, field must be string')\n\n _hash = self.db.Hash(key)\n return _hash[field]\n\n def delete(self, key, field):\n if not all(map(lambda x: isinstance(x, str), [key, field])):\n raise Exception('key, field must be string')\n\n _hash = self.db.Hash(key)\n del _hash[field]\n return True\n\n def cleanup(self, key):\n if not isinstance(key, str):\n raise Exception('key must be string')\n \n _hash = self.db.Hash(key)\n try:\n for _hkey in _hash:\n del _hash[_hkey]\n except:\n pass\n\nclass StorageBackendRedis(StorageBackend):\n __backend_name__ = 'redis'\n pass\n","repo_name":"finixbit/motex","sub_path":"motex/storage/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"76"} +{"seq_id":"7025609531","text":"import mtc\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nimport time\n\ncycles = 100_000\nduration = 15 # seconds\nshape = (50, 50)\nB = 0\nsneed = int(time.time()) # sneeding for each simualtion\nprint(f'seeding with {sneed} for each sub-simulation.')\n\nnet_magnet = []\ntemperatures = [*np.linspace(0, 2, 20)] # * = explode\nres = 5\n\nfor i, T in enumerate(temperatures):\n random.seed(sneed)\n mag = 0\n for k in range(res):\n model = mtc.nano.Model(shape, T, B)\n model.simulate(cycles)\n mag += model.magnetisation()\n\n net_magnet.append(mag/res)\n #net_magnet.append(model.magnetisation())\n mtc.log(f\"\\rdata points: {i+1}/{len(temperatures)}\", end=\"\")\n\nmtc.log(\"\\ndone!\")\nplt.ylabel(\"Normalised Magnetisation\")\nplt.xlabel(\"Temperature\")\nplt.plot(temperatures, net_magnet, 'x')\nplt.plot(temperatures, net_magnet, '-')\nplt.show()\n\n","repo_name":"SamuelJamesFrost/MagneticTimeCrystals","sub_path":"models/temp_v_magnetisation.py","file_name":"temp_v_magnetisation.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"21108604119","text":"import os, sys\nimport numpy as np\nimport timeit\nfrom collections import deque\nimport warnings\nwarnings.simplefilter('ignore')\nos.system(\"export SUMO_HOME=/usr/share/sumo\")\ntry:\n sys.path.append(\"/usr/share/sumo/tools\")\nexcept ImportError:\n sys.exit(\"please declare environment variable 'SUMO_HOME' as the root directory of your sumo installation (it should contain folders 'bin', 'tools' and 'docs')\")\nimport traci\n# phase codes based on environment.net.xml\nPHASE_NS_GREEN = 0 # action 0 code 00\nPHASE_NS_YELLOW = 1\nPHASE_NSL_GREEN = 2 # action 1 code 01\nPHASE_NSL_YELLOW = 3\nPHASE_EW_GREEN = 4 # action 2 code 10\nPHASE_EW_YELLOW = 5\nPHASE_EWL_GREEN = 6 # action 3 code 11\nPHASE_EWL_YELLOW = 7\n\nclass SimSOTL:\n def __init__(self, TrafficGen, params, sumo_cmd, netdata):\n self._TrafficGen = TrafficGen\n self.g_min = params['g_min']\n self.theta = params['theta']\n self.mu = params['mu']\n self.omega = params['omega']\n self.max_steps = params['max_steps']\n self.kappa = 0\n self.sumo_cmd = sumo_cmd\n self.time_in_phase = 0\n self.phase_index = 0\n self.g_phases = self.green_phases()\n self.phase_red_lanes = self.get_phase_red_lanes()\n self.phase_deque = deque([self.g_phases[self.phase_index]])\n self.all_red = len((self.g_phases[0]))*'r'\n self.phase = self.all_red\n self.netdata = netdata\n self.phase_lanes = self.phase_lanes(self.g_phases)\n\n def run(self, episode):\n start_time = timeit.default_timer()\n self._TrafficGen.generate_rutefile(seed=episode)\n traci.start(self.sumo_cmd)\n print('Simulating')\n while self.time_in_phase < self.max_steps:\n next_phase = self.next_phase()\n self.simulate(next_phase)\n traci.simulationStep()\n print(self.time_in_phase)\n\n\n def next_phase(self):\n max_lane = None\n light_phase = traci.trafficlight.getPhase('TL')\n ap_red, ap_red_len = self.red_approach(light_phase=light_phase)\n n = max(ap_red_len)\n self.kappa += n\n max_lane = max(ap_red_len, key=ap_red_len.get)\n if self.time_in_phase >= self.g_min:\n if n > self.mu or n == 0:\n if self.kappa > self.theta:\n self.phase_index += 1\n self.kappa = 0\n next_green = self.g_phases[self.phase_index % len(self.green_phases())]\n phases = self.get_intermediate_phases(self.phase, next_green)\n self.phase_deque.extend(phases+[next_green])\n\n next_phase = self.phase_deque.popleft()\n if next_phase is not self.phase:\n self.time_in_phase = 0\n\n return next_phase\n\n def simulate(self, next_phase):\n pass\n\n def get_phase_red_lanes(self):\n all_incoming_lanes = []\n for g in self.green_phases():\n all_incoming_lanes.extend(self.phase_lanes[g])\n all_incoming_lanes = set(all_incoming_lanes)\n\n #store all lanes that are red\n #under any given green phase\n phase_red_lanes = {}\n for g in self.green_phases():\n phase_red_lanes[g] = all_incoming_lanes - set(self.phase_lanes[g])\n\n return phase_red_lanes\n def green_phases(self):\n logic = traci.trafficlight.getCompleteRedYellowGreenDefinition('TL')[0]\n #get only the green phases\n green_phases = [p.state for p in logic.getPhases()\n if 'y' not in p.state\n and ('G' in p.state or 'g' in p.state)]\n\n #sort to ensure parity between sims (for RL actions)\n return sorted(green_phases)\n\n def get_intermediate_phases(self, phase, next_phase):\n if phase == next_phase or phase == self.all_red:\n return []\n else:\n yellow_phase = ''.join([p if p == 'r' else 'y' for p in phase])\n return [yellow_phase, self.all_red]\n def phase_lanes(self, actions):\n phase_lanes = {a:[] for a in actions}\n for a in actions:\n green_lanes = set()\n red_lanes = set()\n for s in range(len(a)):\n if a[s] == 'g' or a[s] == 'G':\n green_lanes.add(self.netdata['inter']['TL']['tlsindex'][s])\n elif a[s] == 'r':\n red_lanes.add(self.netdata['inter']['TL']['tlsindex'][s])\n\n ###some movements are on the same lane, removes duplicate lanes\n pure_green = [l for l in green_lanes if l not in red_lanes]\n if len(pure_green) == 0:\n phase_lanes[a] = list(set(green_lanes))\n else:\n phase_lanes[a] = list(set(pure_green))\n return phase_lanes\n\n def red_approach(self, light_phase):\n red_lane_dict = {'NS': [], 'NSL': [], 'EW': [], 'EWL': []}\n red_lane_len_dict = {'0': 0, '2': 0, '4': 0, '6': 0}\n light_0_list = ['N2TL_0', 'N2TL_1', 'N2TL_2', 'S2TL_0', 'S2TL_1', 'S2TL_2']\n light_2_list = ['N2TL_3', 'S2TL_3']\n light_4_list = ['E2TL_0', 'E2TL_1', 'E2TL_2', 'W2TL_0', 'W2TL_1', 'W2TL_2']\n light_6_list = ['E2TL_3', 'W2TL_3']\n car_list = traci.vehicle.getIDList()\n for car_id in car_list:\n road_id = traci.vehicle.getRoadID(car_id)\n if light_phase == 0:\n if road_id not in light_0_list:\n if ('N_E' in car_id) or ('S_W' in car_id):\n red_lane_dict['NSL'].append(car_id)\n elif ('E_W' in car_id) or ('W_E' in car_id):\n red_lane_dict['EW'].append(car_id)\n elif ('E_S' in car_id) or ('W_N' in car_id):\n red_lane_dict['EWL'].append(car_id)\n else:\n pass\n elif light_phase == 2:\n if road_id not in light_2_list:\n if ('N_S' in car_id) or ('S_N' in car_id):\n red_lane_dict['NS'].append(car_id)\n elif ('E_W' in car_id) or ('W_E' in car_id):\n red_lane_dict['EW'].append(car_id)\n elif ('E_S' in car_id) or ('W_N' in car_id):\n red_lane_dict['EWL'].append(car_id)\n else:\n pass\n elif light_phase == 4:\n if road_id not in light_4_list:\n if ('N_S' in car_id) or ('S_N' in car_id):\n red_lane_dict['NS'].append(car_id)\n elif ('N_E' in car_id) or ('S_W' in car_id):\n red_lane_dict['NSL'].append(car_id)\n elif ('E_S' in car_id) or ('W_N' in car_id):\n red_lane_dict['EWL'].append(car_id)\n else:\n pass\n elif light_phase == 6:\n if road_id not in light_6_list:\n if ('N_S' in car_id) or ('S_N' in car_id):\n red_lane_dict['NS'].append(car_id)\n elif ('N_E' in car_id) or ('S_W' in car_id):\n red_lane_dict['NSL'].append(car_id)\n elif ('E_W' in car_id) or ('W_E' in car_id):\n red_lane_dict['EW'].append(car_id)\n else:\n pass\n\n for k in red_lane_dict.keys():\n if k is '0':\n red_lane_len_dict['NS'] = len(red_lane_dict[k])\n elif k is '2':\n red_lane_len_dict['NSL'] = len(red_lane_dict[k])\n elif k is '4':\n red_lane_len_dict['EW'] = len(red_lane_dict[k])\n elif k is '6':\n red_lane_len_dict['EWL'] = len(red_lane_dict[k])\n else:\n pass\n return red_lane_dict, red_lane_len_dict\n\n\n\n\n","repo_name":"TakumiSeo/SOTL","sub_path":"SOTL/sotl_run.py","file_name":"sotl_run.py","file_ext":"py","file_size_in_byte":7796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5271366243","text":"import nanopq\nimport numpy as np\n\n\n\ndef extract_embeddings(net, input_tesnor):\n net.eval()\n out_put = net.foward(input_tesnor)\n out_put = out_put.cpu()\n return out_put\n\n\n\ndef pq_search(source_dataset, query_vector):\n pq = nanopq.PQ(M=8)\n pq.fit(source_dataset)\n\n source_code = pq.encode(source_dataset)\n\n dists = pq.dtable(query_vector).adist(source_code) # (10000, )\n\n print(dists)\n\n\n\n","repo_name":"cswwp/landmark-siamese-triplet-attention","sub_path":"PQ_SEARCH.py","file_name":"PQ_SEARCH.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"6497715790","text":"#! /usr/bin/env python3\n\"\"\"\n\"This module implements.\"\n-- linear solving.\n\n\nAuthor: Christian Parpart & Kei Thoma\nDate: 2019-11-13\nLicense: GPL-3\n\"\"\"\nimport random\n\nimport numpy as np\nfrom scipy import sparse as sm\nfrom scipy.sparse import linalg as slina\n\nimport block_matrix\n\ndef solve_lu(pr, l, u, pc, b):\n \"\"\" Solves the linear system Ax = b via forward and backward substitution\n given the decomposition pr * A * pc = l * u.\n\n => A = pr^-1 * l * u * pc^-1\n\n Parameters\n ----------\n pr : scipy.sparse.csr_matrix\n row permutation matrix of LU-decomposition\n l : scipy.sparse.csr_matrix\n lower triangular unit diagonal matrix of LU-decomposition\n u : scipy.sparse.csr_matrix\n upper triangular matrix of LU-decomposition\n pc : scipy.sparse.csr_matrix\n column permutation matrix of LU-decomposition\n b : numpy.ndarray\n vector of the right-hand-side of the linear system\n\n Returns\n -------\n x : numpy.ndarray\n solution of the linear system\n \"\"\"\n _ = slina.spsolve(sm.csc_matrix(slina.inv(sm.csc_matrix(pr))), b)\n _ = slina.spsolve_triangular(l, _, lower=True)\n _ = slina.spsolve_triangular(u, _, lower=False)\n _ = slina.spsolve(sm.csc_matrix(slina.inv(sm.csc_matrix(pc))), _)\n return _\n\ndef test_validity(n_upto=20, num_test=20, show=False):\n \"\"\"\n CHECKS THE ABOVE FUNCTION FOR CORECTNESS. Loops through some random values for b and compares the\n function above with the spsolve from SciPy.\n\n Parameters\n ----------\n n_upto : int\n Checks the correctness from 2 to this integer.\n num_test : int\n Number of tests for each n.\n show : boolean\n Option to print out the values.\n\n Returns\n -------\n boolean\n True if no wrongness was encountered, False if an error was seen.\n \"\"\"\n for d in [1, 2, 3]:\n for n in range(2, n_upto):\n for i in range(0, num_test):\n mat = block_matrix.BlockMatrix(d, n)\n b = np.array([random.randint(-20, 20) for _ in range(0, mat.extend)])\n x_demo = solve_lu(*mat.get_lu(), b)\n x_true = slina.spsolve(sm.csc_matrix(mat.data), b)\n\n if np.all((x_demo, x_true)):\n print(\"d = {} | n = {} | TEST {}/{} PASSED!\".format(d, n, i + 1, num_test))\n if show:\n print()\n print(\"A = {}\".format(mat.data.toarray()))\n print(\"b = {}\\n\".format(b))\n print(\"x = {}\".format(x_demo))\n print()\n print(\"============================================================\\n\")\n else:\n print()\n print(\"d = {} | n = {} | TEST {}/{} FAILED FOR\".format(d, n, i + 1, num_test))\n print(\"A = {}\".format(mat.data.toarray()))\n print(\"b = {}\\n\".format(b))\n print(\"x = {}\".format(x_demo))\n print()\n return False\n return True\n\ndef main():\n \"\"\"\n JOIN THE GLORIOUS MAIN FUNCTION. Uses the test function to demonstrate.\n \"\"\"\n print(\"WE SOLVE LINEAR EQUATION, Ax = b WHERE A AND b ARE KNOWN\\n\\n\")\n test_validity(5, 1, True)\n\nif __name__ == '__main__':\n main()\n","repo_name":"keithoma/forevermore","sub_path":"third_base/linear_solvers.py","file_name":"linear_solvers.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16922248647","text":"#import PySimpleGUI as sg\n\n#sg.Popup(\"CHESS PLAYGROUND\")\n\nprint(\"chess playground log\") \n\n#Class \"Chess Piece\" defines a list of black rooks and a list of white rooks, as well as a dictionary {ALL PIECES}\n\nclass Chess_Piece:\n \t\n all_pieces = {} #{Piece Name:Piece Position}\n \n black_rook_list = [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\"] #A list of the maximum number of black rooks\n \n white_rook_list = [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\"] #A list of the maximum number of white rooks\n \n\n#Used for configuring how the rook piece works \n\nclass Rook(Chess_Piece): #Class ROOK\n \t\n def __init__(self, col_x, col_y, color): #The Constructor for the Rook Class\n \t\n self.color = color #Will either be White or Black\n \n self.pos = str(col_x) + str(col_y) #The Position of the Rook\n \n self.col_x = col_x #col_x and col_y are used for positioning\n \n self.col_y = col_y #These are now attributes of the Rook Object / Rook Class\n \n legal_moves = [] #The Creation of a List '[]' that would be filled of all of the legal moves of the Rook\n \n columns = 8 #The Columns on the Board\n \n columns2 = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\"] #What they are referred to\n \n rows2 = [1,2,3,4,5,6,7,8] #What the Rows on the board are referred to\n \n rows = 8 #No. of Rows\n \n if col_x in columns2: #If the variable col_x is in the list of column references\n \t\n for i in columns2: #For each item in columns2\n \t\n if i == col_x: #If the item is the same as col_x\n \t\n legal_column = i #My Legal Column is I\n \n #This Code does the exact thing shown above\n if col_y in rows2:\n for i in rows2:\n if i == col_y: #col_y is ROWS, while col_x is COLUMNS\n legal_row = i\n \n #initializing the piece, globally\n \n if self.color == \"Black\": #If the Color is Black\n \t\n self.all_pieces[self.color + \"Rook\" + self.black_rook_list[0]] = self.pos #BlackRook[N], Where N refers to the first value in the Black_Rook_List.\n \n self.global_name = self.color + \"Rook\" + self.black_rook_list[0] #Sets the attribute 'Global_Name' to BlackRook[N], Where N refers to the first value in the Black_Rook_List.\n \n del self.black_rook_list[0] #Deletes the first item in black_rook_list, because it has been used.\n \n elif self.color == \"White\": #Otherwise, if the color is White (ignores special cases [e.g. Red, Orange, Green, Purple, Yellow])\n \t#Does the exact same thing as shown above, only using replacing references of 'BLACK' with 'WHITE'.\n self.all_pieces[self.color + \"Rook\" + self.white_rook_list[0]] = self.pos #WhiteRook[N], Where N refers to the first value in the White_Rook_List.\n self.global_name = self.color + \"Rook\" + self.white_rook_list[0] #Sets the attribute 'Global_Name' to WhiteRook[N], Where N refers to the first value in the White_Rook_List.\n del self.white_rook_list[0] #Deletes the first item in white_rook_list, because it has been used.\n \n #legalizing all columns + rows\n pos_x = -1 #Set to Negative One, because the first thing it will do is add one (0)\n \n pos_y = -1 #Same reason as shown above.\n \n for i in rows2: #For each item in Rows (what it is referred to),\n \t\n pos_x += 1 #Adds one to pos_x [you'll see why]\n \n legal_moves.append(str(legal_column) + str(rows2[pos_x])) #e.g. if pos is B8, this would append 'B', which is the legal column- and rows2(0), which is 1. B1.\n \n for i in columns2: #Does a similar thing as shown above.\n pos_y += 1\n legal_moves.append(str(columns2[pos_y]) + str(legal_row)) #Columns2[POS_Y] is basically Columns2[0] (for instance), which can be A, and str(legal_row), which can be 8. A8.\n \n pos = 0 #Sets the position variable to 0.\n \n for i in legal_moves: #For each item in the legal moves list,\n \t\n if i in self.all_pieces.values() and i != self.pos: #If the item is the position of a different chess piece AND isn't theh position of the object,\n \t\n legal_moves[pos] = str(legal_moves[pos][0]) + \"x\" + str(legal_moves[pos][1]) #Sets the move (B4), for example, to Bx4 [Takes on B4]\n \n else: #Otherwise\n \t\n pos += 1 #Add one to the position variable\n \n self.legal_moves = legal_moves #Sets the object's attribute to legal_moves\n \n pos = 0 #ONCE AGAIN, resets the position\n \n for i in self.legal_moves: #for each item in the legal moves attribute\n \t\n if i == self.pos: #If the ITEM is equal to self.pos\n \t\n del self.legal_moves[pos] #deletes that position\n pos += 1 #adds one\n continue #moves on\n \n else:\n pos += 1 #adds one\n \n #Updating Legal Moves [Refer to lines 32 through line 113] \n def legal_move_update(self):\n col_x, col_y = self.col_x, self.col_y #Refreshed COL_X and COL_Y\n legal_moves = [] #Reset Legal Moves\n columns = 8 \n columns2 = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\"]\n rows2 = [1,2,3,4,5,6,7,8]\n rows = 8\n \n #determining if a column or row is legal?\n if col_x in columns2:\n for i in columns2:\n if i == col_x:\n legal_column = i\n \n if col_y in rows2:\n for i in rows2:\n if i == col_y:\n legal_row = i\n \n #legalizing all columns + rows\n pos_x = -1\n pos_y = -1\n \n for i in rows2:\n pos_x += 1\n legal_moves.append(str(legal_column) + str(rows2[pos_x]))\n \n for i in columns2:\n pos_y += 1\n legal_moves.append(str(columns2[pos_y]) + str(legal_row))\n \n pos = 0\n \n for i in legal_moves:\n if i in self.all_pieces.values() and i != self.pos:\n for j in self.all_pieces:\n if self.all_pieces[j] == i:\n if self.color in j:\n pass\n \t#adds an X to moves that can take a piece\n else:\n legal_moves[pos] = str(legal_moves[pos][0]) + \"x\" + str(legal_moves[pos][1])\n else:\n pos += 1\n \n self.legal_moves = legal_moves\n pos = 0\n \n for i in self.legal_moves:\n if i == self.pos:\n del self.legal_moves[pos]\n pos += 1\n continue\n \n else:\n pos += 1\n #allows a chess peice to move by changing its location \n def move(self, x, y):\n \t\n pos = str(x) + str(y) #The new position is made.\n \n previous_x = self.col_x #Backups of col_x and col_y\n previous_y = self.col_y\n \n self.col_x = x #Replaces them with x and y\n self.col_y = y\n \n #check if move is possible\n if pos in self.legal_moves: #If the new pos is in legal moves\n \n del previous_x #Deletes the backups\n del previous_y\n \n self.all_pieces[self.global_name] = pos #Updates the global position\n \n self.pos = pos #Updates self.pos\n \n self.legal_move_update() #Updates legal mvoes\n \n else: #Else\n \t\n self.col_x = previous_x #Restores the backups of X and Y\n self.col_y = previous_y\n \n print(\"Illegal Move\") #Reminds you that you commited a crime\n \n\n#Create scenarios down here! \nrook1 = Rook(\"B\", 3, \"White\")\nrook2 = Rook(\"C\", 3, \"White\")\nrook3 = Rook(\"F\", 3, \"White\")\nrook1.legal_move_update()\nrook2.legal_move_update()\nrook3.legal_move_update()\nprint(rook1.all_pieces)\nprint(rook1.global_name, rook1.pos, rook1.legal_moves)\nrook1.move(\"B\",7)\nprint(rook1.global_name, rook1.pos, rook1.legal_moves)\n","repo_name":"Eriks-Gadgets/chess-playground","sub_path":"chess.py","file_name":"chess.py","file_ext":"py","file_size_in_byte":8509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"14699286379","text":"import json, math\nfrom functools import reduce\n\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.backends import ModelBackend\nfrom django.db.models import Q\nfrom django.views.generic.base import View\nfrom django.contrib.auth.hashers import make_password\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\n\nfrom background_task.models import Task\nfrom equipments.tasks import notify_user\n\nfrom .models import UserProfile, EmailVerifyRecord\nfrom operation.models import UserEquipment, UserFavorite, UserMessage\nfrom .forms import LoginForm, RegisterForm, ForgetForm, ModifyPwdForm\nfrom utils.email_send import send_register_email\nfrom pure_pagination import Paginator, EmptyPage, PageNotAnInteger\n\nfrom organization.models import Team, Engineer\nfrom equipments.models import Equipment\nfrom .models import Banner\nfrom .forms import UploadImageForm, UserInfoForm\n\n\nclass CustomBackend(ModelBackend): # 有了这个就可以实现用户和邮箱都可以登录,authenticate在这里被重写此方法自动被调用\n def authenticate(self, request, username=None, password=None, **kwargs):\n try: # Model的objects管理器 ???\n user = UserProfile.objects.get(Q(username=username) | Q(email=username)) # 到数据库查询用户名或者邮箱是否有并做检查是否一致,取出 user。这里不检查密码因为django后台存的是密文没法把前端页面传进来的明文进行数据库查询\n if user.check_password(password): # user是UserProfile继承AbstractUser,AbstractUser有一个方法check_password;明文传进去并加密与后台数据库中user的password密文对比\n return user\n except Exception as e:\n return None\n\n\nclass ActiveUserView(View): #用户点击激活邮件里的链接会跳到这里是GET 方法\n def get(self, request, active_code): #active_code 要与url参数中的名称一样,提取出来啦\n all_records = EmailVerifyRecord.objects.filter(code= active_code) # 发送链接邮件之前已经将EmailVerifyRecord信息code,email,send_type保存到数据库users_emailverifyrecord 参见email_send.py文件\n # 因为现在用户点击这个链接了,需要查一下这个链接是否存在,通过 users_emailverifyrecord表中的code(这里是参数active_code)字段到表users_emailverifyrecord中查询是否有记录\n if all_records: # 如果users_emailverifyrecord中有记录,就到这个表里去获取email(email = record.email)并通过这个email 在users_userprofile表中找到这个user(UserProfile类型)激活它(is_active = 1)(RegisterView的POST里is_active = False 并保存userprofile后发送邮件,等用户点击后在此激活)\n for record in all_records:\n email = record.email\n user = UserProfile.objects.get(email= email) # user 是一个UserProfile类型\n user.is_active = True #用户点击那个链接了 激活吧\n user.save() # 保存到users_userprofile表\n else:\n return render(request, 'active_fail.html') # 用户试图在浏览器输入他邮箱里的链接并访问来激活注册的账号,可是链接失效,给一个信息提示\n return render(request, 'login.html')\n\n\nclass RegisterView(View):\n def get(self, request):\n register_form = RegisterForm()\n return render(request, 'register.html', {'register_form': register_form})\n # 这里的register_form是可以传给前端页面使用的,返回form的验证信息到页面,验证的逻辑和输出信息是django的forms自动完成的,配置好就可以了\n\n def post(self, request):\n register_form = RegisterForm(request.POST)\n if register_form.is_valid():\n user_name = request.POST.get('email', '') # 注册的时候用的是 email注册\n if UserProfile.objects.filter(email=user_name):\n return render(request, 'register.html', {'register_form': register_form,\n 'msg': '用户已经存在!'}) # 回填用户信息register_form(html中的value=部分), 需要显示它的验证码\n pass_word = request.POST.get('password', '') # 前端输入的是明文, 不是django数据库中的密文\n user_profile = UserProfile()\n user_profile.username = user_name # 其实这里user_name是邮箱因为注册是用邮箱注册的\n user_profile.email = user_name\n user_profile.is_active = False # 用户点击那个链接后才能激活\n user_profile.password = make_password(pass_word) # 对前端明文加密然后存到后台数据库\n user_profile.save() # 保存到数据库users_userprofile表中不是users_emailverifyrecord\n\n # 写入欢迎注册消息\n user_message = UserMessage()\n user_message.user = user_profile.id # 因为operation\\models\\UserMessage\\user不是指向外键UserProfile,而是一个int\n user_message.message = \"欢迎注册Test&Lab\"\n user_message.save()\n\n send_register_email(user_name, 'register') # 发送动作之前是要保存code,email,send_type到��据库users_emailverifyrecord中\n return render(request, 'login.html')\n else:\n return render(request, 'register.html', {'register_form': register_form})\n\n\nimport re\nclass LogoutView(View):\n '''\n 刚开始是用户登出到首页index.html,后台根据https://juejin.im/post/5a780ced6fb9a063395c4b6f登出到\n '''\n def get(self, request):\n next = request.GET.get('next', '')\n logout(request)\n if next:\n if 'equipments/info' in next or 'equipments/comment'in next or 'equipments/reserve'in next:\n return HttpResponseRedirect('/equipments/detail/' + re.findall('\\d+', next)[0] +'/')\n return HttpResponseRedirect(next)\n return HttpResponseRedirect(reverse('index'))\n # logout(request)\n # # HttpResponseRedirect重定向到一个url的地址\n # return HttpResponseRedirect(reverse('index')) # reverse可以把url的名称name反解成url的地址\n # # 传equipments:equipment_list等也是可以\n\n\nclass LoginView(View): # 重写View的get和post方法。View会根据get方法自动调用get函数,post方法就自动调用post函数。 request是django自动给我们添加的;当在urls.py中配置了url,django就会自动生成一个request放到函数View.get/post里面作为参数。\n # def get(self, request): # 刷新页面login就是get。就是django自动注册进来的request, 如果是get方法,自动调用View的get函数\n # return render(request, 'login.html', {})\n\n def get(self, request):\n # 获取到next参数,渲染到template中,在form表单添加一个hidden类型的元素\n next = request.GET.get('next', '')\n return render(request, \"login.html\", {'next': next})\n\n def post(self, request):\n # #下面邮件发送是为了调试用\n # email_body = 'request.POST里的next是:{}' \\\n # .format(request.POST.get('next', ''))\n # send_status = send_mail('测试request.POST里的next是?', email_body,\n # EMAIL_FROM,\n # ['419099632@qq.com'])\n login_form = LoginForm(request.POST) # 会把login.html中的username和password字段对应到forms.py中的LoginForm中username和password字段去并做两个字段的校验\n # 上面实例化的时候View已经做了form的检查验证(每个字段是否合法):包括两个必填字段,密码最少5个字符\n if login_form.is_valid(): # is_valid就是检查login_form下的_errors是否为空;如果字段都合法(LoginForm中的两个字段必填,密码至少5个字符),则_errors为空\n user_name = request.POST.get('username', '') # 字典的用法 必须用username password? 因为前端页面login用这两个名称,保持一致?\n pass_word = request.POST.get('password', '') # 字典的用法 csrtoken那个不用管,因为能进入这里说明django已经验证过了的否则进不了这里的\n user = authenticate(username=user_name, password=pass_word) # authenticate只是向数据库发起验证二者是否存在若存在返回UserProfile类型,并没有登录\n if user is not None: # user是UserProfile类型,邮箱和用户名都可以实现登录因为有重写authenticate方法\n if user.is_active: # 激活的用户才登录哦\n login(request, user) # session 和 cookie里有讲login如何实现登录机制。注意这里的request/user是注册到index.html中去的\n if request.POST.get('next', ''):\n # 如果request.POST.get('next', '')存在,直接跳转到指定页面\n return HttpResponseRedirect(request.POST.get('next', ''))\n # return render(request, 'index.html') # 登录成功 返回首页,html中对登录成功后的页面改显示什么有判断处理\n return HttpResponseRedirect(reverse('index'))#这样用户登录后就会进入IndexView把相关数据传递到index.html;而上面的render到index.html则没有数据传递到index.html\n\n else: #未激活的用户当然不能登录哦,回到登录页面\n return render(request, 'login.html', {'login_form': login_form, 'msg': '用户未激活!'}) # 这里的msg是可以传给前端页面使用的,'login_form'返回是用于回填\n else:\n return render(request, 'login.html', {'login_form': login_form, 'msg': '用户名或者密码错误!'}) # 这里的msg是可以传给前端页面使用的,'login_form'返回是用于回填\n else:\n return render(request, 'login.html', {'login_form': login_form}) # 这里的login_form是可以传给前端页面使用的,返回form的验证信息到页面,验证的逻辑和输出信息是django的forms自动完成的,配置好就可以了\n\n\nclass ForgetPwdView(View):\n def get(self, request):\n forget_form = ForgetForm()\n return render(request, 'forgetpwd.html', {'forget_form': forget_form})\n\n def post(self, request):\n forget_form = ForgetForm(request.POST) # 会把forgetpwd.html中的email字段对应到forms.py中的ForgetForm中email字段去并做字段的校验\n # 上面实例化的时候View已经做了form的检查验证(字段是否合法):email字段\n if forget_form.is_valid():# is_valid就是检查forget_form下的_errors是否为空;如果字段都合法,则_errors为空\n email = request.POST.get('email', '')\n send_register_email(email, 'forget')\n\n # 在ActiveUserView中用户试图在浏览器输入他邮箱里的链接并访问来激活注册的账号,可是链接失效,给一个信息提示\n # 邮件发送成功也给一个邮件发送成功的提示\n return render(request, 'send_success.html')\n else:\n return render(request, 'forgetpwd.html', {'forget_form': forget_form})# 这里的forget_form是可以传给前端页面使用的,返回form的验证信息到页面,验证的逻辑和输出信息是django的forms自动完成的,配置好就可以了\n\n\nclass ResetView(View):\n '''\n 用户在浏览器输入http://127.0.0.1:8000/reset/********** 是get **********是存在EmailVerifyRecord中的code\n '''\n def get(self, request, active_code):\n all_records = EmailVerifyRecord.objects.filter(code= active_code) # 发送链接邮件之前已经将EmailVerifyRecord信息code,email,send_type保存到数据库users_emailverifyrecord 参见email_send.py文件\n # 因为现在用户点击这个链接了,需要查一下这个链接是否存在,通过 users_emailverifyrecord表中的code(这里是参数active_code)字段到表users_emailverifyrecord中查询是否有记录\n if all_records:# 如果users_emailverifyrecord中有记录,就到这个表里去获取email(email = record.email). 然后返回一个密码重置页面password_reset.html并把email返回,在此页面中有个hidden的input框会把这个email以POST方法返回(
)到ModifyPwdView,在ModifyPwdView中这样取email = request.POST.get('email', '')\n for record in all_records:#record是EmailVerifyRecord类型\n email = record.email\n return render(request, 'password_reset.html', {'email': email})\n else:\n return render(request, 'active_fail.html')\n return render(request, 'login.html')\n\n\nclass ModifyPwdView(View):\n '''\n 修改用户密码,用户在http://127.0.0.1:8000/reset/uaJKy1TJtW7NAR44/ 页面点击提交是post\n '''\n def post(self, request):\n modify_form = ModifyPwdForm(request.POST)\n if modify_form.is_valid():\n pwd1 = request.POST.get('password1', '') #password1,2是password_reset Html页面input框传过来的,email也是。\n pwd2 = request.POST.get('password2', '')\n email = request.POST.get('email', '') #password_reset.html中的隐藏的输入框传来的 \n if pwd1 != pwd2:\n return render(request, 'password_reset.html', {'modify_form': modify_form, 'email': email, 'msg': '密码不一致!'})\n user = UserProfile.objects.get(email=email) # 因为是老用户找回密码所以这个email应该在users_userprofile表中\n # 注意:通过注册和激活的步骤,这个user信息是保存到 users_userprofile中的\n user.password = make_password(pwd1)\n user.save()\n return render(request, 'login.html', {'email': email, 'modify_form': modify_form}) # 在login页面能否回填来自ModifyPwdView的密码?\n else:\n email = request.POST.get('email', '')\n return render(request, 'password_reset.html', {'email': email, 'modify_form': modify_form})\n\n\nfrom utils.mixin_utils import LoginRequiredMixin\n\n\nclass UserinfoView(LoginRequiredMixin, View):\n '''\n 用户个人信息\n '''\n def get(self, request):\n return render(request, 'usercenter-info.html', {\n })\n\n def post(self, request):\n user_info_form = UserInfoForm(request.POST, instance=request.user)#一定要指明instance,否则是新增而不是修改, 与用户咨询不一样\n if user_info_form.is_valid():\n user_info_form.save()\n return HttpResponse('{\"status\":\"success\"}', content_type='application/json')\n else:\n return HttpResponse(json.dumps(user_info_form.errors), content_type='application/json')\n\n\nclass UploadImageView(LoginRequiredMixin, View):\n '''\n 用户修改头像\n '''\n def post(self, request):\n image_form = UploadImageForm(request.POST, request.FILES, instance=request.user) #instance传的就是modelform:UploadImageForm指代的model:UserProfile。 image_form就具有了modelform的功能(里面有model):直接存\n if image_form.is_valid():\n # image = image_form.cleaned_data['image']\n # request.user.image = image\n # request.user.save()\n image_form.save()\n return HttpResponse('{\"status\":\"success\"}', content_type='application/json')\n else:\n return HttpResponse('{\"status\":\"fail\"}', content_type='application/json')\n\n\nclass UpdatePwdView(View):\n '''\n 在个人用户中心修改用户密码\n '''\n def post(self, request):\n modify_form = ModifyPwdForm(request.POST)\n if modify_form.is_valid():\n pwd1 = request.POST.get('password1', '') #password1,2是password_reset Html页面input框传过来的,email也是。\n pwd2 = request.POST.get('password2', '')\n # email = request.POST.get('email', '')\n if pwd1 != pwd2:\n return HttpResponse('{\"status\":\"fail\", \"msg\":\"密码不一致!\"}', content_type='application/json')\n user = request.user\n user.password = make_password(pwd1)\n user.save()\n return HttpResponse('{\"status\":\"success\"}', content_type='application/json')\n else:\n return HttpResponse(json.dumps(modify_form.errors), content_type='application/json')\n\n\nclass SendEmailCodeView(LoginRequiredMixin, View):\n '''\n 个人中心修改邮箱时发送邮箱验证码\n '''\n def get(self,request):\n email = request.GET.get('email', '')\n if UserProfile.objects.filter(email=email):\n return HttpResponse('{\"email\":\"邮箱已经存在\"}', content_type='application/json')\n send_register_email(email, 'update_email')\n return HttpResponse('{\"status\":\"success\"}', content_type='application/json')\n\n\nclass UpdateEmailView(LoginRequiredMixin, View):\n '''\n 个人中心拿到验证码后修改个人邮箱\n '''\n def post(self, request):\n email = request.POST.get('email', '')\n code = request.POST.get('code', '') # 为什么是code? usercenter-base.html中有\n\n existed_records = EmailVerifyRecord.objects.filter(email=email, code=code, send_type='update_email')\n if existed_records:\n user = request.user\n user.email = email\n user.save() # 这样 数据库中userprofile里的邮箱也改成与emailverifyrecord中的邮箱一致了\n return HttpResponse('{\"status\":\"success\"}', content_type='application/json')\n else:\n return HttpResponse('{\"email\":\"验证码出错\"}', content_type='application/json')\n\n\nclass MyEquipmentView(LoginRequiredMixin, View):\n '''\n 个人中心 我借用过的设备\n '''\n def get(self, request):\n # 下面这句是我未还的设备\n user_equipments = UserEquipment.objects.filter(return_time__isnull=True, user=request.user)\n # 下面4行代码实现对我用过的设备里同样设备去重\n user_equipments_ids = UserEquipment.objects.filter(user=request.user).values('equipment_id').distinct()\n equipments_list = []\n for user_equipment_id in user_equipments_ids:\n equipments_list += Equipment.objects.filter(id=user_equipment_id['equipment_id'])\n return render(request, 'usercenter-myequipment.html', {\n 'user_equipments': user_equipments,\n 'equipments_list': equipments_list,\n\n })\n\n\nclass MyFavTeamView(LoginRequiredMixin, View):\n '''\n 个人中心 我收藏的team\n '''\n def get(self, request):\n team_list =[]\n fav_teams = UserFavorite.objects.filter(user=request.user, fav_type=2)\n for fav_team in fav_teams:\n team_id = fav_team.fav_id\n team = Team.objects.get(id=team_id)\n team_list.append(team)\n return render(request, 'usercenter-fav-team.html', {\n 'team_list': team_list,\n\n })\n\n\nclass MyFavEngineerView(LoginRequiredMixin, View):\n '''\n 个人中心 我收藏的工程师\n '''\n def get(self, request):\n engineer_list =[]\n fav_engineers = UserFavorite.objects.filter(user=request.user, fav_type=3)\n for fav_engineer in fav_engineers:\n engineer_id = fav_engineer.fav_id\n engineer = Engineer.objects.get(id=engineer_id)\n engineer_list.append(engineer)\n return render(request, 'usercenter-fav-engineer.html', {\n 'engineer_list': engineer_list,\n\n })\n\n\nclass MyFavEquipmentView(LoginRequiredMixin, View):\n '''\n 个人中心 我收藏的设备\n '''\n def get(self, request):\n equipment_list =[]\n fav_equipments = UserFavorite.objects.filter(user=request.user, fav_type=1)\n for fav_equipment in fav_equipments:\n equipment_id = fav_equipment.fav_id\n equipment = Equipment.objects.get(id=equipment_id)\n equipment_list.append(equipment)\n return render(request, 'usercenter-fav-equipment.html', {\n 'equipment_list': equipment_list,\n\n })\n\n\nclass MymessageView(LoginRequiredMixin, View):\n '''\n 个人中心 我的消息\n '''\n def get(self, request):\n all_messages = UserMessage.objects.filter(user=request.user.id) #因为operation\\models\\UserMessage\\user不是指向外键UserProfile,而是一个int\n # 用户进入个人消息后清空未读消息记录\n all_unread_messages = UserMessage.objects.filter(user=request.user.id, has_read=False)\n for unread_message in all_unread_messages:\n unread_message.has_read = True\n unread_message.save()\n\n # 对消息进行分页\n try: # request.GET \n page = request.GET.get('page', 1) # 取页数编号 返回的是 1,2 这样的int页数\n except PageNotAnInteger:\n page = 1\n\n p = Paginator(all_messages, 10, request=request) # p是Paginator对象类型,all_orgs是QuerySet\n\n messages = p.page(page) # orgs是Page类型: Page 1 of 2 / Page 2 of 2\n\n return render(request, 'usercenter-message.html', {\n \"messages\": messages,\n })\n\n\nclass MytoolView(LoginRequiredMixin, View):\n '''\n 个人中心 我的工具\n '''\n def get(self, request):\n\n return render(request, 'usercenter-mytool.html', {\n })\n\n\nclass MyQuizView(LoginRequiredMixin, View):\n '''\n 个人中心 我的答卷测试\n '''\n def get(self, request):\n\n return render(request, 'usercenter-myquiz.html', {\n })\n\n\ndef mean(L=[]):\n r = reduce(lambda x,y:x+y,L)\n return r*1.0/len(L)\n\n\nfrom django.core.mail import send_mail\nfrom TestLabManagement.settings import EMAIL_FROM\nclass MytoolUcalView(LoginRequiredMixin, View):\n '''\n 个人中心 我的工具 直接测量量不确定度计算\n '''\n def post(self, request):# 对表单form的请求是post\n list_value = []\n test_value1 = request.POST.get('test_value1', '')\n if test_value1:\n list_value.append(float(test_value1))\n test_value2 = request.POST.get('test_value2', '')\n if test_value2:\n list_value.append(float(test_value2))\n test_value3 = request.POST.get('test_value3', '')\n if test_value3:\n list_value.append(float(test_value3))\n test_value4 = request.POST.get('test_value4', '')\n if test_value4:\n list_value.append(float(test_value4))\n test_value5 = request.POST.get('test_value5', '')\n if test_value5:\n list_value.append(float(test_value5))\n test_value6 = request.POST.get('test_value6', '')\n if test_value6:\n list_value.append(float(test_value6))\n test_value7 = request.POST.get('test_value7', '')\n if test_value7:\n list_value.append(float(test_value7))\n test_value8 = request.POST.get('test_value8', '')\n if test_value8:\n list_value.append(float(test_value8))\n test_value9 = request.POST.get('test_value9', '')\n if test_value9:\n list_value.append(float(test_value9))\n test_value10 = request.POST.get('test_value10', '')\n if test_value10:\n list_value.append(float(test_value10))\n\n Umean = mean(list_value)\n\n list_temp_submean = [value-Umean for value in list_value]\n\n list_temp_submean_square = [value**2 for value in list_temp_submean]\n\n sUx = math.sqrt(sum(list_temp_submean_square) / (len(list_temp_submean_square) - 1))\n # 测量一次报结果,由重复性引入的不确定度\n uUx = sUx/1\n\n resolution = request.POST.get('resolution', '')\n if resolution:\n resolution = float(resolution)\n\n # 由仪器分辨力引入的不确定度分量\n uUr = 0.5*resolution/math.sqrt(3)\n\n readout = request.POST.get('readout', '')\n if readout:\n readout = float(readout)\n range_value = request.POST.get('range_value', '')\n if range_value:\n range_value = float(range_value)\n mpe = request.POST.get('mpe', '')\n if mpe:\n mpe = float(mpe)\n # 由仪表最大允差引入的不确定度\n uUn = mpe/math.sqrt(3)\n\n # 合成标准不确定度\n uc = math.sqrt(uUx**2 + uUr**2 + uUn**2)\n\n # 扩展不确定度\n U = 2 * uc\n\n #下面邮件发送是为了调试用\n email_body = '你输入的重复性评定测量值是:{}\\n\\n你输入的此次读数:{}\\n所用量程是:{}\\n仪器最大允许误差(此读数与此量程下)是:{}\\n\\n仪器分��力(此量程下):{}\\n\\n ----------------------------------\\n\\n uUx重复性引入的不确定度分量:{} \\n\\n uUr仪器分辨力引入的不确定度分量:{}\\n\\n uUn仪表本身引入的不确定度:{}\\n\\nuc合成标准不确定度是:{}\\n\\nU扩展不确定度是:{}'.format\\\n (list_value, readout, range_value, mpe, resolution, uUx, uUr, uUn, uc, U)\n send_status = send_mail('直接测量量不确定度计算', email_body, EMAIL_FROM, [request.user.email])\n return HttpResponse('{\"status\":\"success\",\"msg\":\"testing...\"}', content_type='application/json')\n\n\nclass MytoolUtcalView(LoginRequiredMixin, View):\n '''\n 个人中心 我的工具 pt1000温度测试不确定度计算\n '''\n def post(self, request):# 对表单form的请求是post\n list_value = []\n t_value1 = request.POST.get('t_value1', '')\n if t_value1:\n list_value.append(float(t_value1))\n t_value2 = request.POST.get('t_value2', '')\n if t_value2:\n list_value.append(float(t_value2))\n t_value3 = request.POST.get('test_value3', '')\n if t_value3:\n list_value.append(float(t_value3))\n t_value4 = request.POST.get('t_value4', '')\n if t_value4:\n list_value.append(float(t_value4))\n t_value5 = request.POST.get('t_value5', '')\n if t_value5:\n list_value.append(float(t_value5))\n t_value6 = request.POST.get('t_value6', '')\n if t_value6:\n list_value.append(float(t_value6))\n t_value7 = request.POST.get('t_value7', '')\n if t_value7:\n list_value.append(float(t_value7))\n t_value8 = request.POST.get('t_value8', '')\n if t_value8:\n list_value.append(float(t_value8))\n t_value9 = request.POST.get('t_value9', '')\n if t_value9:\n list_value.append(float(t_value9))\n t_value10 = request.POST.get('t_value10', '')\n if t_value10:\n list_value.append(float(t_value10))\n\n Umean = mean(list_value)\n\n list_temp_submean = [value - Umean for value in list_value]\n\n list_temp_submean_square = [value ** 2 for value in list_temp_submean]\n\n sUx = math.sqrt(sum(list_temp_submean_square) / (len(list_temp_submean_square) - 1))\n # 测量一次报结果,由重复性引入的不确定度\n uUx = sUx / 1\n\n t_readout = request.POST.get('t_readout', '')\n if t_readout:\n t_readout = float(t_readout)\n mpe = 0.3 + 0.005*abs(t_readout)\n # 由于传感器的允差引入的不确定度分量\n uUn = mpe / math.sqrt(3)\n\n resistor = request.POST.get('resistor', '')\n if resistor:\n resistor = float(resistor)\n temp = resistor*0.1/0.391\n # 由于引线电阻引入的不确定度分量\n uUw = temp / math.sqrt(3)\n\n t_max = request.POST.get('t_max', '')\n if t_max:\n t_max = float(t_max)\n # 由于软件转换引入的不确定度分量\n uUsoft = t_max / math.sqrt(3)\n\n # 合成标准不确定度\n uc = math.sqrt(uUx ** 2 + uUn ** 2 + uUw ** 2 + uUsoft ** 2)\n\n # 扩展不确定度\n U = 2 * uc\n\n #下面邮件发送是为了调试用\n email_body = '你输入的重复性评定测量值是:{}\\n\\n你输入的此次读数:{}\\n\\n你输入的引线阻值:{}\\n\\n你输入的程序转换的最大温度差值:{}\\n\\n ----------------------------------\\n\\nuUx重复性引入的不确定度分量:{} \\n\\n uUn由于传感器的允差引入的不确定度分量:{}\\n\\n uUw由于引线电阻引入的不确定度分量:{}\\n\\nuUsoft由于软件转换引入的不确定度分量:{}\\n\\nuc合成标准不确定度是:{}\\n\\nU扩展不确定度是:{}'.format\\\n (list_value, t_readout, resistor, t_max, uUx, uUn, uUw, uUsoft, uc, U)\n send_status = send_mail('温度测试不确定度计算', email_body, EMAIL_FROM, [request.user.email])\n return HttpResponse('{\"status\":\"success\",\"msg\":\"testing...\"}', content_type='application/json')\n\n\nclass MytoolSafetyQuizView(LoginRequiredMixin, View):\n '''\n 个人中心 我的答卷测试 安全培训后测试 如果测试通过在这里发一封邮件给相关人员\n '''\n def post(self, request):# 对表单form的请求是post\n #下面邮件发送是为了调试用\n email_body = '{}:\\n\\n恭喜你通过了测试,你这次测试的分数是:{}\\n\\n\\n\\n\\n------------------------------------------------------------\\n以上信息来自 Test&Lab管理后台 自动发送内容,请勿回复此邮件!'.format(request.user.username, request.POST.get('score',''))\n send_status = send_mail('测试通过!', email_body, EMAIL_FROM, [request.user.email])\n return HttpResponse('{\"status\":\"success\",\"msg\":\"通过测试!\"}', content_type='application/json')\n\n\nclass IndexView(View):\n '''\n 首页\n '''\n def get(self, request):\n # 取出轮播图\n # print (1/0)\n all_banners = Banner.objects.all().order_by('index')\n equipments = Equipment.objects.filter(is_banner=False)[:6]\n banner_equipments = Equipment.objects.filter(is_banner=True)[:3]\n teams = Team.objects.all()[:15]\n\n # # 查询background_task是否有任务存在并执行\n # if Task.objects.filter(task_name=\"equipments.tasks.notify_user\").exists():\n # pass\n # else:\n # notify_user(repeat=60)#向后台注册任务\n\n # notify_user(repeat=120)#向后台注册任务\n return render(request, 'index.html', {\n 'all_banners': all_banners,\n 'equipments': equipments,\n 'banner_equipments': banner_equipments,\n 'teams': teams,\n })\n\n\ndef page_not_found(request):\n # 全局404处理函数\n from django.shortcuts import render_to_response\n response = render_to_response('404.html', {})\n response.status_code = 404\n return response\n\n\ndef page_error(request):\n # 全局500处理函数\n from django.shortcuts import render_to_response\n response = render_to_response('500.html', {})\n response.status_code = 500\n return response\n\n\n\n\n\n","repo_name":"WenLiang619/testlabmanagement","sub_path":"apps/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":31675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"28470141582","text":"from cgi import print_form\nfrom enum import unique\nimport json\nimport re\nfrom venv import create\nfrom app.config.database import create_connection\nfrom datetime import date, datetime\nimport uuid\nimport asyncpg.exceptions as pg_exceptions\nfrom app.errs.err import err_response\nfrom app.models import organizations as organization_model\n\nasync def create_organizations(organization: organization_model.Organizations):\n con = await create_connection()\n tr = con.transaction()\n try:\n await tr.start()\n statement = await con.fetch(\n \"INSERT INTO organizations (public_id,name,code,avatar_icon,avatar_color,image_url,description,created_at,updated_at,deleted_at) VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10) RETURNING id\",\n str(uuid.uuid4()),\n organization.name,\n organization.code,\n organization.avatar_icon,\n organization.avatar_color,\n organization.image_url,\n organization.description,\n datetime.now(),\n datetime.now(),\n None\n )\n except pg_exceptions.UniqueViolationError as e:\n await tr.rollback()\n await err_response(\n status_code=409,\n message=\"code already exists\"\n )\n if statement:\n await tr.commit()\n await con.close()\n return({\n \"message\" : {\n \"id\" : ((statement[0].get(\"id\"))),\n }\n })\n \nasync def get_one_organization(organization_id):\n con = await create_connection()\n statement = await con.fetch(\n \"SELECT * FROM organizations WHERE id = $1 AND deleted_at IS NULL\",\n organization_id\n )\n await con.close()\n return statement\n\nasync def get_all_organizations_paginated(page_number, page_size):\n con = await create_connection()\n statement = await con.fetch(\n \"SELECT * FROM organizations WHERE deleted_at IS NULL ORDER BY id DESC LIMIT $1 OFFSET $2\",\n page_size,\n (page_number - 1) * page_size\n )\n await con.close()\n return statement\n\nasync def update_organizations(organization_id,organization: organization_model.Organizations):\n con = await create_connection()\n tr = con.transaction()\n try:\n await tr.start()\n statement = await con.fetch(\n \"UPDATE organizations SET name = $1,code = $2,avatar_icon = $3,avatar_color = $4,image_url = $5,description = $6,updated_at = $7 WHERE id = $8 RETURNING id\",\n organization.name,\n organization.code,\n organization.avatar_icon,\n organization.avatar_color,\n organization.image_url,\n organization.description,\n datetime.now(),\n organization_id\n )\n except pg_exceptions.UniqueViolationError as e:\n await tr.rollback()\n await err_response(\n status_code=409,\n message=\"email already exists\"\n )\n if statement:\n await tr.commit()\n await con.close()\n return ({\n \"message\": {\n \"id\": ((statement[0].get(\"id\"))),\n }\n })\n \nasync def delete_organizations(organization_id):\n con = await create_connection()\n statement = await con.fetch(\n \"UPDATE organizations SET deleted_at = $1 WHERE id = $2\",\n datetime.now(),\n organization_id\n )\n await con.close()\n return statement\n ","repo_name":"Perrrr/expense_manage_system","sub_path":"expense-app-gateway/server/app/repository/organizations_db.py","file_name":"organizations_db.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"10650779963","text":"### Chapter 6\n\n## Looping Through a Dictionary\n\n# Loooping Through All Key-Value Pairs\nuser_0 = {\n 'username': 'efermi',\n 'first': 'enrico',\n 'last': 'fermi'\n }\n\nfor key, value in user_0.items():\n print(f'\\nKey: {key}')\n print(f'Value: {value}')\n\nfor k, v in user_0.items(): # Abbreviations can be used for key and value\n print(f'\\nKey: {key}')\n print(f'Value: {value}')","repo_name":"briantaylorjohnson/python-practice","sub_path":"user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36818373656","text":"import sys\nfrom config import *\nfrom analytics import Research\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 2:\n r = Research(sys.argv[1])\n fr = r.file_reader()\n count = r.Calculations(fr).counts()\n percent = r.Calculations(fr).fractions(count)\n rand_obs = r.Analytics(fr).predict_random(num_of_steps)\n data = template.format(\n sum(count),\n count[0],\n count[1],\n percent[0],\n percent[1],\n num_of_steps,\n list(map(sum, zip(*rand_obs)))[0],\n list(map(sum, zip(*rand_obs)))[1]\n )\n r.Analytics.save_file(data, name_of_file, type_of_file)\n else:\n print(\"Use: python3.10 make_report.py data.csv\")\n","repo_name":"aperop/ds","sub_path":"day02/ex05/make_report.py","file_name":"make_report.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"37167750796","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport logging\n\nfrom dbferret.retriever import DbFerret\nfrom dbferret.file_writer import FileWriter\n\n\"\"\"\nRun something like this for a redshift db:\n\n python driver.py \\\n --user --pw \\\n --hostname -d \n\nFor postgres specify the engine_type, port and perhaps you'll need ssl_mode:\n\n python driver.py \\\n --user --pw --hostname \\\n -d --engine_type postgresql -p 5432 --ssl_mode True\n\n\"\"\"\n\n\ndef main():\n args = parse_args()\n logging.basicConfig(level=getattr(logging, args.log_level.upper(), None))\n\n # Instantiate ferret object to get db metadata in subsequent steps\n dbferret = DbFerret(hostname=args.hostname, user=args.user, pw=args.pw,\n db=args.db, ssl_mode=args.ssl_mode,\n engine_type=args.engine_type, schema=args.schema,\n port=args.port, warehouse=args.warehouse,\n schema_list=args.schema_list)\n\n # Collect data\n table_metadata = dbferret.extract_table_metadata()\n view_ddls = dbferret.extract_view_ddl()\n\n # Write results\n file_writer = FileWriter(db=args.db, engine_type=args.engine_type)\n file_writer.output_table_metadata_to_tsv(table_metadata)\n file_writer.output_view_ddl_to_sql(view_ddls)\n\n\ndef parse_args():\n \"\"\"\n :return:\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"ferret collects metadata from a database via \"\n \"reflection, provide connection information to run\")\n\n parser.add_argument(\n \"-e\",\n \"--engine_type\",\n dest=\"engine_type\",\n help=\"The database type, such as postgres or redshift used \"\n \"for the connection string by sqlalchemy.\",\n default=\"redshift\"\n )\n parser.add_argument(\n \"-u\",\n \"--user\",\n dest=\"user\",\n help=\"The database user used to login. For postgres, at least, \"\n \"any user normally will be able to crawl the database.\"\n )\n parser.add_argument(\n \"-pw\",\n \"--pw\",\n dest=\"pw\",\n help=\"Password to connect to the db with the specified user. \"\n \"Information is passed through to the db but not recorded.\"\n )\n parser.add_argument(\n \"-hn\",\n \"--hostname\",\n dest=\"hostname\",\n help=\"The host where the database is located.\"\n )\n parser.add_argument(\n \"-p\",\n \"--port\",\n dest=\"port\",\n help=\"The port used by the database for connections.\"\n )\n parser.add_argument(\n \"-d\",\n \"--db\",\n dest=\"db\",\n help=\"The database instance of the database.\"\n )\n parser.add_argument(\n \"-l\",\n \"--ssl_mode\",\n dest=\"ssl_mode\",\n help=\"A boolean indicating if connections must be encrypted \"\n \"to the database with SSL.\",\n default=False\n )\n parser.add_argument(\n \"-s\",\n \"--schema\",\n dest=\"schema\",\n help=\"The schema to operate against for Snowflake databases\",\n default=\"public\"\n )\n parser.add_argument(\n \"-w\",\n \"--warehouse\",\n dest=\"warehouse\",\n help=\"The warehouse to operate against for Snowflake databases\"\n )\n parser.add_argument(\n \"--debug\",\n dest=\"debug\",\n help=\"Collects and diagnoses will be prepared but not run\",\n action=\"store_true\",\n default=False\n )\n parser.add_argument(\n \"--log_level\",\n dest=\"log_level\",\n help=\"Sets the logging severity level\",\n default=\"INFO\"\n )\n parser.add_argument(\n \"--schema_list\",\n dest=\"schema_list\",\n help=\"Comma delimited list of schemas\"\n )\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"john-cenzano-fong/db-ferret","sub_path":"runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"73658511606","text":"def coroutine(gen):\r\n def inner():\r\n generator = gen()\r\n next(generator)\r\n return generator\r\n return inner\r\n\r\n@coroutine \r\ndef storage():\r\n values = set()\r\n was_there = False \r\n while True:\r\n val = yield was_there\r\n was_there = val in values\r\n if not was_there:\r\n values.add(val)\r\n \r\nst = storage()\r\n#next(st)\r\nprint(st.send(42))\r\nprint(st.send(42))","repo_name":"Km1zZzoU/Python","sub_path":"minka13.py","file_name":"minka13.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41600569578","text":"from torch import nn\n\ndef decomp_alexnet(net, rank_func, decomp_func):\n i = 1\n \n while i < len(net.features):\n # find out the rank of the first conv layer\n layer_i = net.features[i]\n\n if not isinstance(layer_i, nn.Conv2d):\n i += 1\n continue\n \n layer_i = net.features[i]\n rank = rank_func(layer_i)\n print('rank of the {}th layer: {}'.format(i, rank))\n \n # debugging\n print(\"begin decomposing layer {}\".format(i))\n decomp_layers = decomp_func(layer_i, rank)\n print(\"finished decomposing layer {}\".format(i))\n\n net.features = nn.Sequential(\\\n *(list(net.features[:i]) + decomp_layers + list(net.features[i + 1:])))\n\n i += len(decomp_layers)\n\n return net\n","repo_name":"ruihangdu/Decompose-CNN","sub_path":"scripts/decomp_alexnet.py","file_name":"decomp_alexnet.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"76"} +{"seq_id":"43262478342","text":"from django.urls import include, path, re_path\nfrom django.views.generic.base import TemplateView\n\nfrom songuploader.utils import LoginRequiredTemplateView, slice_song\n\nfrom .forms import SubmissionUploadForm\nfrom .views.file_views import FileDownload\nfrom .views.submission_views import SubmissionCreateView, SubmissionUpdateView\nfrom .views.views import IndexView, LoginView, LogoutView, DownloadPlaylistView\n\nsong_urlpatterns = [\n path(\n \"choose/\",\n LoginRequiredTemplateView.as_view(\n template_name=\"uploader/choose_upload_type.html\"\n ),\n name=\"choose-song\",\n ),\n path(\"from-youtube/\", SubmissionCreateView.as_view(), name=\"from-youtube\"),\n path(\n \"from-youtube/update/\",\n SubmissionUpdateView.as_view(),\n name=\"update-from-youtube\",\n ),\n path(\n \"from-file/\",\n SubmissionCreateView.as_view(\n form_class=SubmissionUploadForm,\n update_name=\"update-from-file\",\n submit_action=slice_song,\n ),\n name=\"from-file\",\n ),\n path(\n \"from-file/update/\",\n SubmissionUpdateView.as_view(\n form_class=SubmissionUploadForm, submit_action=slice_song\n ),\n name=\"update-from-file\",\n ),\n path(\"upload/\", SubmissionCreateView.as_view(), name=\"upload\"),\n path(\"playlist/\", DownloadPlaylistView.as_view(), name=\"download-playlist\")\n]\n\n\nurlpatterns = [\n path(\"\", IndexView.as_view(), name=\"index\"),\n path(\"song/\", include(song_urlpatterns)),\n re_path(r\"^media/(?P.*)/$\", FileDownload.as_view(), name=\"download-file\"),\n path(\"login/\", LoginView.as_view(), name=\"login\"),\n path(\"logout/\", LogoutView.as_view(), name=\"logout\"),\n]\n","repo_name":"SFSeeger/Abitur-Song-Uploader","sub_path":"src/uploader/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"72595703925","text":"import numpy as np\nimport torch\nfrom torchvision.models import resnet50\nimport torchvision.transforms as transforms\nfrom PIL import Image\nfrom slam_rcnn.nn.base_projection_layer import BaseProjectionLayer\nfrom typing import Dict, Any, Callable\n\n\nclass ResNetProjectionLayer(BaseProjectionLayer):\n \"\"\"Create a feature projection layer in PyTorch that maintains a voxel\n grid description of the world world, where each voxel grid cell has\n a feature vector associated with it, typically semantic labels.\n\n Arguments:\n\n camera_height: int\n the map_height of the image generated by a pinhole camera onboard the\n agent, corresponding to a map_depth and semantic observation.\n camera_width: int\n the map_width of the image generated by a pinhole camera onboard the\n agent, corresponding to a map_depth and semantic observation.\n vertical_fov: float\n the vertical field of view of the onboard camera, measure in\n radians from the bottom of the viewport to the top.\n\n map_height: int\n the number of grid cells along the 'map_height' axis of the semantic\n map, as rendered using the top down rendering function.\n map_width: int\n the number of grid cells along the 'map_width' axis of the semantic\n map, as rendered using the top down rendering function.\n map_depth: int\n the number of grid cells that are collapsed along the 'up'\n direction by the top down rendering function.\n feature_size: int\n the number of units in each feature vector associated with every\n grid cell, such as the number of image segmentation categories.\n\n origin_y: float\n the center of the semantic map along the 'map_height' axis of the\n semantic map as viewed from a top-down render of the map.\n origin_x: float\n the center of the semantic map along the 'map_width' axis of the\n semantic map as viewed from a top-down render of the map.\n origin_z: float\n the center of the semantic map along the 'map_depth' axis of the\n semantic map as viewed from a top-down render of the map.\n grid_resolution: float\n the length of a single side of each voxel in the semantic map in\n units of the world coordinate system, which is typically meters.\n\n interpolation_weight: float\n float representing the interpolation weight used when adding\n new features in the feature map weighted by interpolation_weight.\n initial_feature_map: torch.Tensor\n tensor representing the initial feature map tensor,\n which will be set to zero if the value not specified by default.\n\n \"\"\"\n\n def __init__(self, camera_height: int = 224, camera_width: int = 224,\n vertical_fov: float = 90.0, map_height: int = 256,\n map_width: int = 256, map_depth: int = 64,\n feature_size: int = 256, dtype: torch.dtype = torch.float32,\n origin_y: float = 0.0, origin_x: float = 0.0,\n origin_z: float = 0.0, grid_resolution: float = 0.05,\n interpolation_weight: float = 0.5,\n initial_feature_map: torch.Tensor = None):\n \"\"\"Create a feature projection layer in PyTorch that maintains a voxel\n grid description of the world world, where each voxel grid cell has\n a feature vector associated with it, typically semantic labels.\n\n Arguments:\n\n camera_height: int\n the map_height of the image generated by a pinhole camera onboard the\n agent, corresponding to a map_depth and semantic observation.\n camera_width: int\n the map_width of the image generated by a pinhole camera onboard the\n agent, corresponding to a map_depth and semantic observation.\n vertical_fov: float\n the vertical field of view of the onboard camera, measure in\n radians from the bottom of the viewport to the top.\n\n map_height: int\n the number of grid cells along the 'map_height' axis of the semantic\n map, as rendered using the top down rendering function.\n map_width: int\n the number of grid cells along the 'map_width' axis of the semantic\n map, as rendered using the top down rendering function.\n map_depth: int\n the number of grid cells that are collapsed along the 'up'\n direction by the top down rendering function.\n feature_size: int\n the number of units in each feature vector associated with every\n grid cell, such as the number of image segmentation categories.\n\n origin_y: float\n the center of the semantic map along the 'map_height' axis of the\n semantic map as viewed from a top-down render of the map.\n origin_x: float\n the center of the semantic map along the 'map_width' axis of the\n semantic map as viewed from a top-down render of the map.\n origin_z: float\n the center of the semantic map along the 'map_depth' axis of the\n semantic map as viewed from a top-down render of the map.\n grid_resolution: float\n the length of a single side of each voxel in the semantic map in\n units of the world coordinate system, which is typically meters.\n\n interpolation_weight: float\n float representing the interpolation weight used when adding\n new features in the feature map weighted by interpolation_weight.\n initial_feature_map: torch.Tensor\n tensor representing the initial feature map tensor,\n which will be set to zero if the value not specified by default.\n\n \"\"\"\n\n super(ResNetProjectionLayer, self).__init__(\n camera_height=camera_height // 4,\n camera_width=camera_width // 4,\n vertical_fov=vertical_fov,\n map_height=map_height,\n map_width=map_width,\n map_depth=map_depth,\n feature_size=feature_size, dtype=dtype,\n origin_y=origin_y, origin_x=origin_x, origin_z=origin_z,\n grid_resolution=grid_resolution,\n interpolation_weight=interpolation_weight,\n initial_feature_map=initial_feature_map)\n\n self.resnet_model = resnet50(pretrained=True)\n self.resnet_model.eval()\n self.resnet_preprocess = transforms.Compose([\n transforms.Resize(224), \n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], \n std=[0.229, 0.224, 0.225])\n ])\n\n def pseudo_forward(self, x):\n\n with torch.no_grad():\n\n x = self.resnet_model.conv1(x)\n x = self.resnet_model.bn1(x)\n x = self.resnet_model.relu(x)\n x = self.resnet_model.maxpool(x)\n\n x = self.resnet_model.layer1(x)\n # x = self.resnet_model.layer2(x)\n # x = self.resnet_model.layer3(x)\n # x = self.resnet_model.layer4(x)\n\n return x\n\n def update(self, observation: Dict[str, torch.Tensor]):\n \"\"\"Update the semantic map given a map_depth image and a feature image\n by projecting the features onto voxels in the semantic map using\n a set of rays emanating from a virtual pinhole camera.\n\n Arguments:\n\n observation[\"position\"]: torch.Tensor\n the position of the agent in the world coordinate system, where\n the position will be binned to voxels in a semantic map.\n observation[\"yaw\"]: torch.Tensor\n a tensor representing the yaw in radians of the coordinate,\n starting from the x-axis and turning counter-clockwise.\n observation[\"elevation\"]: torch.Tensor\n a tensor representing the elevation in radians about the x-axis,\n with positive corresponding to upwards tilt.\n\n observation[\"map_depth\"]: torch.FloatTensor\n the length of the corresponding ray in world coordinates before\n hitting a surface, with shape: [height, width, 1].\n observation[\"features\"]: Any\n a feature vector for every pixel in the imaging plane, to be\n scattered on the map, with shape: [height, width, num_classes].\n\n \"\"\"\n\n # ensure all tensors have the appropriate device and dtype\n position = torch.as_tensor(observation[\n \"position\"], dtype=torch.float32, device=self.data.device)\n yaw = torch.as_tensor(observation[\n \"yaw\"], dtype=torch.float32, device=self.data.device)\n elevation = torch.as_tensor(observation[\n \"elevation\"], dtype=torch.float32, device=self.data.device)\n depth = torch.as_tensor(observation[\n \"depth\"], dtype=torch.float32, device=self.data.device)\n\n # prepare the image for processing with the clip model\n # by copying the image to the gpu and normalizing it appropriately\n features = self.resnet_preprocess(Image.fromarray(\n np.uint8(255.0 * observation[\"rgb\"]))\n .convert('RGB')).unsqueeze(0).to(self.data.device)\n\n features = self.pseudo_forward(features).squeeze(0).permute(1, 2, 0)\n image_downsampling_factor = depth.shape[0] // features.shape[0]\n\n # update the clip feature map using the latest observations\n super(ResNetProjectionLayer, self).update( # from the environment\n dict(position=position, yaw=yaw, elevation=elevation,\n depth=depth[image_downsampling_factor // 2::\n image_downsampling_factor,\n image_downsampling_factor // 2::\n image_downsampling_factor],\n features=features))\n\n return self # return self for chaining additional functions\n\n def top_down(self, depth_slice: slice = slice(0, 32)):\n \"\"\"Render a top-down view of a map of features organized as a three\n dimensional grid of voxels, by taking the zero vector to be empty\n voxels and rendering the top-most non-empty voxel to a pixel in a grid.\n\n Arguments:\n\n depth_slice: slice\n an slice that specifies which map_depth components to use\n when rendering a top down visualization of the feature map.\n\n Returns:\n\n feature_image: torch.Tensor\n an image with a feature vector associated with every cell in the\n image corresponding to a visible voxel in the original feature map.\n\n \"\"\"\n\n # downsample the provided slice object because\n if depth_slice is not None: # this map has a lower resolution\n depth_slice = slice(\n depth_slice.start,\n depth_slice.stop,\n depth_slice.step\n if depth_slice.step is not None else depth_slice.step)\n\n # call top down super method with down-sampled slice\n return super(ResNetProjectionLayer,\n self).top_down(depth_slice=depth_slice)\n\n def visualize(self, obs: Dict[str, Any],\n depth_slice: slice = slice(4, 32)):\n \"\"\"Helper function that returns a list of images that are used for\n visualizing the contents of the feature map contained in subclasses,\n such as visualizing object categories, or which voxels are obstacles.\n\n Arguments:\n\n obs: Dict[str, dict]\n the current observation, as a dict or Tensor, which can be\n used to visualize the current location of the agent in the scene.\n depth_slice: Union[slice, Dict[str, slice]]\n an slice that specifies which map_depth components to use\n when rendering a top down visualization of the feature map.\n\n Returns:\n\n image: np.ndarray\n a list of numpy arrays that visualize the contents of this layer,\n such as an image showing semantic categories.\n\n \"\"\"\n\n pass\n","repo_name":"brandontrabucco/mass","sub_path":"mass/nn/applications/resnet_projection_layer.py","file_name":"resnet_projection_layer.py","file_ext":"py","file_size_in_byte":11991,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"23152885826","text":"from track_simulator.interactor.get_trackanalysis_statistics import GetTrackAnalysisStatistics\nfrom track_simulator.entities.track_point import TrackPoint as Point\nimport pandas as pd\nfrom pandas import DataFrame\n\n\nclass GetTrackAnalysisStatisticsImpl(GetTrackAnalysisStatistics):\n def apply(self, data: DataFrame) -> DataFrame:\n data = self.__add_point_projection_distance(data)\n data = self.__add_next_point_distance(data)\n return data[['id', 'DistanceToNext', 'DistancePointProjection']]\n\n def __add_next_point_distance(self, data: DataFrame) -> DataFrame:\n data['point_lon_shift'] = data.Point_lon.shift()\n data['point_lat_shift'] = data.Point_lat.shift()\n data['DistanceToNext'] = data.apply(lambda x: Point(x['Point_lon'], x['Point_lat']).haversine_distance(\n Point(x['point_lon_shift'],\n x['point_lat_shift'])),\n axis=1)\n return data.drop(columns=['point_lat_shift', 'point_lon_shift'])\n\n def __add_point_projection_distance(self, data: DataFrame) -> DataFrame:\n data['DistancePointProjection'] = data.apply(lambda x: Point(x['Point_lon'],\n x['Point_lat']).haversine_distance(\n Point(x['Projection_lon'],\n x['Projection_lat'])),\n axis=1)\n return data\n\n","repo_name":"tboutaour/track-simulator","sub_path":"src/track_simulator/interactor/get_trackanalysis_statistics_impl.py","file_name":"get_trackanalysis_statistics_impl.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"43183069676","text":"\"\"\"Real-time example.\n\nThis example shows how a real-time visualizer of multiple digital signals can\nbe written with Galry.\n\n\"\"\"\nimport numpy as np\nfrom galry import *\n\n# initial values\nnsamples = 1000\nnplots = 10\nt = np.tile(np.linspace(-1., 1., nsamples), (nplots, 1))\nx = .01 * np.random.randn(nplots, nsamples) + np.linspace(-.75, .75, nplots)[:,np.newaxis]\n\n# this function returns 10*nplots new values at each call\ndef get_new_data():\n return .01 * np.random.randn(nplots, 10) + np.linspace(-.75, .75, nplots)[:,np.newaxis]\n\n# this function updates the plot at each call\ndef anim(fig, _):\n # append new data to the signal\n global x\n x = np.hstack((x[:,10:], get_new_data()))\n \n # create the new 1000*nplots*2 position array with x, y values at each row\n position = np.vstack((t.flatten(), x.flatten())).T\n \n # update the position of the plot\n fig.set_data(position=position)\n\n# plot the signal\nplot(t, x)\n\n# animate the plot: anim is called every 25 milliseconds\nanimate(anim, dt=.025)\n\n# show the figure\nshow()\n","repo_name":"rossant/galry","sub_path":"examples/realtime_multi.py","file_name":"realtime_multi.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":188,"dataset":"github-code","pt":"76"} +{"seq_id":"12708551947","text":"def merge_sort(arr):\n if len(arr) <= 1:\n return arr\n\n mid = len(arr) // 2\n left_arr = arr[:mid]\n right_arr = arr[mid:]\n\n left_arr = merge_sort(left_arr)\n right_arr = merge_sort(right_arr)\n\n return merge(left_arr, right_arr)\n\n\ndef merge(left_arr, right_arr):\n result = []\n left_idx, right_idx = 0, 0\n while left_idx < len(left_arr) and right_idx < len(right_arr):\n if left_arr[left_idx] <= right_arr[right_idx]:\n result.append(left_arr[left_idx])\n left_idx += 1\n else:\n result.append(right_arr[right_idx])\n right_idx += 1\n\n if left_idx < len(left_arr):\n result.extend(left_arr[left_idx:])\n else:\n result.extend(right_arr[right_idx:])\n\n return result\n\n\nif __name__ == '__main__':\n arr = [5, 1, 6, 3, 4, 2, 7]\n print(\"정렬 전 : \", arr)\n\n arr = merge_sort(arr)\n\n print(\"정렬 후 : \", arr)","repo_name":"KimBeomGi/STUDYduringSSAFY","sub_path":"python/수업/230329_online_병합정렬_내가찾은것.py","file_name":"230329_online_병합정렬_내가찾은것.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"11203295131","text":"# -*- coding: utf-8 -*-\nimport subprocess\nfrom testutils import notary_url\n\ndef sign_image(registry_ip, project_name, image, tag):\n try:\n ret = subprocess.check_output([\"./tests/apitests/python/sign_image.sh\", registry_ip, project_name, image, tag, notary_url], shell=False)\n print(\"sign_image return: \", ret)\n except subprocess.CalledProcessError as exc:\n raise Exception(\"Failed to sign image error is {} {}.\".format(exc.returncode, exc.output))\n\n","repo_name":"dundun9/harbor-arm64","sub_path":"tests/apitests/python/library/sign.py","file_name":"sign.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"21557764249","text":"import datetime\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport statistics\nfrom statistics import mean\nimport sys\nsys.path.append('C:/Users/Benedikt/Documents/dev/MA_LogParser/logparser/ILM_WS202223')\nimport generalfunctions as gf\n\nfrom pymongo import MongoClient\n\n\n###\n### For figuring out the operating times for teleporting\n###\ndef get_database():\n # Provide the mongodb atlas url to connect python to mongodb using pymongo\n CONNECTION_STRING = \"mongodb://fredix:memphis55@kurz-containar.de:27017\"\n\n # Create a connection using MongoClient. You can import MongoClient or use pymongo.MongoClient\n client = MongoClient(CONNECTION_STRING)\n # Create the database for our example (we will use the same database throughout the tutorial\n return client['ilm']\n\n\n\ndef aggregateData(array):\n lenArray = len(array)\n print(\"Len array \" + str(lenArray))\n data = []\n\n for i in range(0, lenArray):\n for elem in array[i]:\n data.append(elem)\n\n return data;\n\n\n\ndef writeToDb(name, value):\n dbname = get_database()\n tresor = dbname[\"tresor\"]\n\n dto = tresor.find({\"name\": name})\n dto = list(dto)\n\n print(\"Dto\")\n print(dto)\n\n if (len(dto) > 0):\n dto = {\"name\": name}\n newvalues = {\"$set\": {\"values\": value}}\n tresor.update_one(dto, newvalues)\n print(\"Update element\")\n else:\n dto = {\"name\": name, \"values\": value}\n tresor.insert_one(dto)\n print(\"New element\")\n\n\n\n# This is added so that many files can reuse the function get_database()\ndef boxplotCap(valArray):\n return f'\\n n = {len(valArray)} \\n' \\\n f'Me. = {round(statistics.median(valArray), 3)} s \\n ' \\\n f'Mi. = {round(statistics.mean(valArray), 2)}s \\n ' \\\n f'S. Abw. = {round(statistics.stdev(valArray), 3)} s \\n ' \\\n f'M. Abw. = {round(mean(valArray), 3)} s \\n ';\n\ndef convertToFloat(arr):\n\n arr = arr.get('values')\n\n print(arr)\n\n arr = list(arr)\n lenArray = len(arr)\n\n print(lenArray)\n\n allValues = []\n\n for e in range(0, lenArray):\n floatValues = []\n\n for elem in arr[e]:\n floatValues.append(float(elem))\n\n allValues.append(floatValues)\n\n return allValues\n\n\ndef convertToFloat1D(arr):\n\n arr = arr.get('values')\n arr = list(arr)\n lenArray = len(arr)\n\n\n allValues = []\n\n for elem in arr:\n allValues.append(float(elem))\n\n\n return allValues\n\n\n\ndef setXTicks_param(valArray, descArray):\n xtick = []\n i = 0\n\n # The descirption of fields\n for elem in valArray:\n s = boxplotCap(elem)\n\n if descArray:\n xtick.append(descArray[i] + s)\n else:\n xtick.append(s)\n i = i + 1\n\n lenVA = len(valArray)\n\n # First Parameter the number of fields\n elements = []\n for elem in range(0, lenVA):\n elements.append((elem + 1))\n\n return (elements, xtick)\n\n\nif __name__ == \"__main__\":\n # Get the database\n dbname = get_database()\n\n col = dbname[\"uwp\"]\n tresor = dbname[\"tresor\"]\n\n probands = ['A01', 'A02', 'A03', 'A04', 'A05', 'A06', 'A07', 'A08', 'A09', 'A10',\n 'A11', 'A12', 'A13', 'A14', 'A15', 'A16', 'A17', 'A18', 'A19', 'A20',\n 'A21', 'A22', 'A23', 'A24', 'A25', 'A26', 'A27', 'A28']\n\n\n # sceneGaze_HPG2 = runAnalyzeElementSteps(probands, sceneName, devices, hand)\n sceneGaze_R_MQP = tresor.find_one({'name': 'Gaze_AD_R_MQP'})\n sceneGaze_L_MQP = tresor.find_one({'name': 'Gaze_AD_L_MQP'})\n sceneGaze_R_MQ2 = tresor.find_one({'name': 'Gaze_AD_R_MQ2'})\n sceneGaze_L_MQ2 = tresor.find_one({'name': 'Gaze_AD_L_MQ2'})\n\n sceneGaze_HL2_first = tresor.find_one({'name': 'Gaze_UWP_HL2_first'})\n sceneGaze_HL2_second = tresor.find_one({'name': 'Gaze_UWP_HL2_second'})\n\n sceneGaze_HPG2_first = tresor.find_one({'name': 'Gaze_UWP_HPG2_first'})\n sceneGaze_HPG2_second = tresor.find_one({'name': 'Gaze_UWP_HPG2_second'})\n\n Ga_Ad = tresor.find_one({'name': 'Ga-Ad'})\n Ga_Ad = convertToFloat1D(Ga_Ad)\n\n Ga_1_Wi_HPG2 = tresor.find_one({'name': 'Ga-1-Wi-HPG2'})\n Ga_1_Wi_HPG2 = convertToFloat1D(Ga_1_Wi_HPG2)\n\n Ga_2_Wi_HPG2 = tresor.find_one({'name': 'Ga-2-Wi-HPG2'})\n Ga_2_Wi_HPG2 = convertToFloat1D(Ga_2_Wi_HPG2)\n\n Ga_VR = [Ga_Ad, Ga_1_Wi_HPG2, Ga_2_Wi_HPG2]\n Ga_VR = aggregateData(Ga_VR)\n\n\n\n\n sceneGaze_R_MQP = convertToFloat(sceneGaze_R_MQP)\n sceneGaze_L_MQP = convertToFloat(sceneGaze_L_MQP)\n sceneGaze_R_MQ2 = convertToFloat(sceneGaze_R_MQ2)\n sceneGaze_L_MQ2 = convertToFloat(sceneGaze_L_MQ2)\n\n sceneGaze_HL2_first = convertToFloat1D(sceneGaze_HL2_first)\n sceneGaze_HL2_second = convertToFloat1D(sceneGaze_HL2_second)\n sceneGaze_HPG2_first = convertToFloat1D(sceneGaze_HPG2_first)\n sceneGaze_HPG2_second = convertToFloat1D(sceneGaze_HPG2_second)\n\n\n sceneGaze_R_MQP = aggregateData(sceneGaze_R_MQP)\n sceneGaze_L_MQP = aggregateData(sceneGaze_L_MQP)\n sceneGaze_R_MQ2 = aggregateData(sceneGaze_R_MQ2)\n sceneGaze_L_MQ2 = aggregateData(sceneGaze_L_MQ2)\n\n\n\n # Aggregieren der Daten\n operatorGaze_second = [sceneGaze_R_MQP, sceneGaze_L_MQP, sceneGaze_R_MQ2, sceneGaze_L_MQ2, sceneGaze_HL2_second, sceneGaze_HPG2_second]\n\n operatorGaze_UWP_second = [sceneGaze_HL2_second, sceneGaze_HPG2_second]\n operatorGaze_AD_second = [sceneGaze_R_MQP, sceneGaze_L_MQP, sceneGaze_R_MQ2, sceneGaze_L_MQ2]\n\n operatorGaze_first = [sceneGaze_HL2_first, sceneGaze_HPG2_first]\n\n operatorGaze_first = aggregateData(operatorGaze_first)\n operatorGaze_second = aggregateData(operatorGaze_second)\n\n operatorGaze_UWP_second = aggregateData(operatorGaze_UWP_second)\n operatorGaze_AD_second = aggregateData(operatorGaze_AD_second)\n\n writeToDb(\"Gaze_first\", operatorGaze_first)\n writeToDb(\"Gaze_second\", operatorGaze_second)\n\n writeToDb(\"Gaze_AD_second\", operatorGaze_AD_second)\n writeToDb(\"Gaze_UWP_second\", operatorGaze_UWP_second)\n\n writeToDb(\"Ga-VR\", Ga_VR)\n writeToDb(\"Ga-AR-1\", sceneGaze_HL2_first)\n writeToDb(\"Ga-AR-2\", sceneGaze_HL2_second)\n\n\n operatorGaze = [operatorGaze_AD_second, operatorGaze_UWP_second, Ga_VR]\n\n\n print(\"Operator Gaze\")\n print(operatorGaze)\n\n\n\n box_MQP = [sceneGaze_R_MQP, sceneGaze_L_MQP]\n box_MQ2 = [sceneGaze_R_MQ2, sceneGaze_L_MQ2]\n\n box_R = [sceneGaze_R_MQP, sceneGaze_R_MQ2]\n box_L = [sceneGaze_L_MQP, sceneGaze_L_MQ2]\n\n writeToDb(\"Gaze_AD_MQP\", box_MQP)\n writeToDb(\"Gaze_AD_MQ2\", box_MQ2)\n\n writeToDb(\"Gaze_AD_R\", box_R)\n writeToDb(\"Gaze_AD_L\", box_L)\n\n\n\n ### Graphic\n fig, axs = plt.subplots(1, 1, figsize=(10, 8))\n\n fig.suptitle('Bearbeitungszeit mit dem nachgelagerten Schaltflächen des Gaze-Operators: Gesamt mit HPG2, MQ2, MQP, HL2')\n # ax = fig.add_axes(['Rechte Hand', 'Linke Hand'])\n plt.violinplot(operatorGaze)\n\n plt.ylabel('Sekunden')\n # axs[1].set(ylabel='Sekunden')\n\n\n descArrayXTicks = [\"Android Plattform\", \"Windows Plattform\", \"Ga-VR\"]\n\n (elemALL, xALL, dfALL) = gf.setXTicks_param(operatorGaze, descArrayXTicks)\n\n # axs[0].set_title('Gaze mit der MS HoloLens 2')\n plt.xticks(elemALL, xALL)\n\n plt.show()\n","repo_name":"UGIW-Dortmund/logparser","sub_path":"ILM_WS202223/scene_Gaze/ALL_gaze_Times.py","file_name":"ALL_gaze_Times.py","file_ext":"py","file_size_in_byte":7151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42511018912","text":"import psycopg2\nfrom flask import Flask, render_template\nimport os\n\nconn = None\ncur = None\n\nuser = None\nuser_boards = []\nuser_pages = []\n\n\n# @app.route('/contact')\n# def contact():\n# return render_template('contact.html')\n\n\ndef create_app():\n app = Flask(__name__)\n app.config['SECRET_KEY'] = '9fc26a8960d2f5ed4126dde9830e49f4'\n\n global conn, cur\n try:\n # conn = psycopg2.connect(\"dbname='Merken' user='postgres' host='localhost' password='D3VAR5H'\")\n conn = psycopg2.connect(\"dbname='d91afrge4kgtr6' user='yitfmownerxauw' host='ec2-52-201-55-4.compute-1.amazonaws.com' password='4fc0f67d20051a8b20ebbb756edf60a303c29b73c17e355fe7e187aa390cef6e'\")\n except:\n print(\"I am unable to connect to the database\")\n cur = conn.cursor()\n\n from useraccounts import useraccounts_routes\n from boardnotes import boardnotes_routes\n\n app.register_blueprint(useraccounts_routes.useraccounts_bp, url_prefix='/accounts')\n app.register_blueprint(boardnotes_routes.boardnotes_bp, url_prefix='/boards')\n\n def get_data():\n global user\n global user_boards\n global user_pages\n if user is None:\n user = useraccounts_routes.get_user()\n user_boards = useraccounts_routes.get_boards()\n user_pages = useraccounts_routes.get_pages()\n\n @app.route('/')\n def index():\n return render_template('welcome.html')\n\n @app.route('/about')\n def about():\n get_data()\n return render_template('about.html', user=user, user_boards=user_boards, user_pages=user_pages)\n\n @app.route('/terms_and_conditions')\n def tc():\n return render_template('terms&condition.html')\n\n return app\n\n\nif __name__ == \"__main__\":\n app = create_app()\n app.run(debug=True)\n","repo_name":"D3VAR5H/Merken","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"30528922248","text":"# python3\r\n\r\n# Task: In this problem your goal is to implement the Rabin–Karp’s algorithm for searching the given pattern in the given text\r\n# Input Format: There are two strings in the input: the pattern P and the text T.\r\n# Constraints: 1 ≤ length of pattern ≤ length of Text ≤ 5·10**5. The total length of all occurrences of P in T doesn’t exceed 108.\r\n# The pattern and the text contain only latin letters\r\n# Output Format: Print all the positions of the occurrences of P in T in the ascending order.\r\n# Use 0-based indexing of positions in the the text T.\r\n# Memory Limit: 512MB.\r\n# Sample 1: Input: aba abacaba\r\n# Output: 0 4\r\n# Explanation: The pattern aba can be found in positions 0 (abacaba) and 4 (abacaba) of the text abacaba.\r\n# Sample 2: Input: Test testTesttesT\r\n# Output: 4\r\n# Explanation: Pattern and text are case-sensitive in this problem.\r\n# Pattern Test can only be found in position 4 in the text testTesttesT.\r\n# Sample 3: Input: aaaaa baaaaaaa\r\n# Output: 1 2 3\r\n# Note that the occurrences of the pattern in the text can be overlapping, and that’s ok, you still need to output all of them.\r\n# It is clear that the algorithm should use function that checks similarity between two patterns as rare as possible,\r\n# and the idea is to use precomputed hash values of the string, the example of that is provided below PolyHash function\r\n\r\n\r\nfrom random import randint\r\nimport sympy\r\n\r\n\r\nclass prepare_for_RabinKarp:\r\n def __init__(self, text, pattern):\r\n self.text = text\r\n self.pattern = pattern\r\n self.text_length = len(text)\r\n self.pattern_length = len(pattern)\r\n def generate_prime_number(self): # generation of a big prime number\r\n return sympy.nextprime(self.text_length*1000000000000)\r\n def generate_x(self, prime_number):\r\n return randint(1, prime_number-1)\r\n def PolyHash(self, string, prime_number, x): # Function to hash pattern occurence using polynomial hashing\r\n hash = 0\r\n for i in range(len(string)-1, -1, -1):\r\n hash = (hash*x + ord(string[i])) % prime_number # in this project every substring is assigned a value of ASCII code\r\n return hash\r\n # Suppose we have text 'abcbd' and pattern length = 3.\r\n # Hashing the strings will be done from the end of the text to the front.\r\n # Assign a unique value to each character: lets suppose for character 'a' the value 0 is assigned, for 'b' = 1, for 'c' = 2 and\r\n # for 'd' = 3. Then hash substrings of pattern length: h('cbd') = (2 + x + 3x**2) modulo prime number.\r\n # Explanation: Multiply each hash value of a character by x to the power of it's corresponding positing starting from zero\r\n # And now to compute the next hash value we do not necessarily need to compute it again, because the next substring is 'bcb'\r\n # Since we already know the coefficients of it's last (pattern_length - 1) elements, we can assign them directly multiplying by x to\r\n # the corresponding power\r\n # Example: h('cbd') = (2 + x + 3x**2) mod prime_number, h('cbd') = (2 + x + 3x**2) mod prime_number\r\n # | |\r\n # ------ ------\r\n # | |\r\n # h('bcb') = (1 + 2x + x**2) mod prime_number, h('bcb') = (1 + 2x + x**2) mod prime_number\r\n # The following function does that\r\n def Precompute_Hashes(self, prime_number, x):\r\n hash_array = [None]*(self.text_length - self.pattern_length + 1)\r\n string = text[self.text_length - self.pattern_length: self.text_length]\r\n hash_array[self.text_length - self.pattern_length] = self.PolyHash(string, prime_number, x)\r\n y = 1\r\n for i in range(1, self.pattern_length+1):\r\n y = (y * x) % prime_number\r\n for i in range(self.text_length - self.pattern_length - 1, -1, -1):\r\n hash_array[i] = (x*hash_array[i+1] + ord(text[i]) - y*ord(text[i+self.pattern_length])) % prime_number\r\n return hash_array\r\n def AreEqual(self, string, pattern):\r\n if string == pattern:\r\n return True\r\n return False\r\n\r\ndef RabinKarp(text, pattern):\r\n processor = prepare_for_RabinKarp(text, pattern)\r\n prime_number = processor.generate_prime_number()\r\n x = processor.generate_x(prime_number)\r\n result = []\r\n pHash = processor.PolyHash(pattern, prime_number, x)\r\n hash_array = processor.Precompute_Hashes(prime_number, x)\r\n for i in range(0, processor.text_length - processor.pattern_length + 1):\r\n if pHash != hash_array[i]:\r\n continue\r\n if processor.AreEqual(text[i:i + processor.pattern_length], pattern):\r\n result.append(i)\r\n return result\r\n\r\n\r\nif __name__ == '__main__':\r\n pattern, text = input().rstrip(), input().rstrip()\r\n result = RabinKarp(text, pattern)\r\n # for i in result:\r\n # print(i)\r\n print(len(result))\r\n\r\n# The running time of functions: Precompute_Hashes: O(length of text + length of pattern)\r\n# PolyHash(pattern): O(length of pattern)\r\n# Total time spent in AreEqual: O(q * length of pattern), where q is the number of occurences of pattern in the text\r\n# Average running time: O(length of text + (q + 1) * length of pattern), usually q is small, therefore\r\n# this takes much less time than O(length of text * length of pattern)\r\n# Worst case scenario: O(length of text * length of pattern) happens when function AreEqual is called over\r\n# and over again. Example: Text = AAAAAAAA, pattern = AAA.","repo_name":"AkhtemWays/Algorithms-and-data-structures-coursera-specialization","sub_path":"Data Structures/hash__substring.py","file_name":"hash__substring.py","file_ext":"py","file_size_in_byte":5830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"23700924910","text":"import numpy as np\nfrom numpy import random\nimport math\nfrom src.neuron import *\n\n\nclass kohonenNetwork:\n\n def __init__(self, k: int, r_0: int, learnRateConstant: False, learnRate_0: float, inputs, labels, isRandom=True):\n self.k = k\n self.r_0 = r_0\n self.learnRateConstant = learnRateConstant\n if not learnRateConstant:\n self.learnRate_0 = learnRate_0\n self.grid = [[None for _ in range(k)] for _ in range(k)]\n self.initialIdxs = []\n #rows = []\n #for j in range(0,k):\n # row = [neuron(inputs[random.randint(0, len(inputs))]) for i in range(0, k)]\n # rows.append(row)\n #self.grid = rows\n if isRandom is True:\n for i in range(0, k):\n for j in range(0, k):\n idx = random.randint(0, len(inputs))\n self.grid[i][j] = neuron(inputs[idx])\n self.grid[i][j].addEntry(labels[idx])\n self.initialIdxs.append(idx)\n\n else:\n idx = 0\n for i in range(0, k):\n for j in range(0, k):\n self.grid[i][j] = neuron(inputs[idx])\n idx += 1\n # print(f'{i}, {j}: {inputs[idx]}')\n self.iterations = 50000\n print(self.initialIdxs)\n # self.iterations = 5\n\n\n def train(self, inputs, labels):\n k = 0\n r = self.r_0\n n = self.learnRate_0\n while k < self.iterations:\n idx = random.randint(0, len(inputs))\n x, y = self.findBestCandidate(inputs[idx])\n self.updateNeighbours(x, y, self.grid, r, n, inputs[idx])\n k, r, n = self.updateParameters(k)\n\n def test(self, inputs, labels):\n\n for i in range(len(inputs)):\n x, y = self.findBestCandidate(inputs[i])\n self.grid[x][y].addEntry(labels[i])\n\n def findBestCandidate(self, candidate):\n distance = np.Infinity\n bestX = -1\n bestY = -1\n for x in range(0, self.k):\n for y in range(0, self.k):\n\n current = self.grid[x][y].distanceTo(candidate)\n # print(f'candidate = {candidate}, name = {name}, current = {current}, distance = {distance}')\n # print(current)\n if current < distance:\n distance = current\n bestX = x\n bestY = y\n # self.grid[bestX][bestY].addEntry(name)\n # print(f'{bestX}, {bestY}')\n return bestX, bestY\n\n def updateNeighbours(self, x, y, grid, r, n, input):\n\n # print(f'x={x}, y={y}, r={r}, n={n}')\n for i in range(x - r, x + r + 1):\n for j in range(y - r, y + r + 1):\n # need to check for borders\n if 0 <= i < self.k and 0 <= j < self.k and math.hypot(x - i, y - j) <= r:\n # print(i, j)\n # print(f'old weights = {grid[i][j].weights}, input = {input}')\n grid[i][j].weights += n * (input - grid[i][j].weights)\n # print(f'new weights = {grid[i][j].weights}')\n self.grid = grid\n\n def updateParameters(self, iters):\n iters += 1\n learnRate = self.learnRate_0\n if not self.learnRateConstant:\n learnRate = self.learnRate_0 - ((1 - self.learnRate_0) / self.iterations * iters)\n r = int((1/self.r_0 - self.r_0) * (((iters / self.iterations))**0.5)) + self.r_0\n # print(f'i: {iters} -> new r: {r}, new n = {learnRate}')\n return iters, r, learnRate\n # update r_0 and n_0\n","repo_name":"ManuelDizen/SIA","sub_path":"TP4 - Métodos de Aprendizaje NO supervisado/src/kohonenNetwork.py","file_name":"kohonenNetwork.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25656390751","text":"import pickle\nimport traceback\n\nfrom fabric_cf.actor.core.apis.abc_actor_mixin import ABCActorMixin\nfrom fabric_cf.actor.core.apis.abc_base_plugin import ABCBasePlugin\nfrom fabric_cf.actor.core.apis.abc_callback_proxy import ABCCallbackProxy\nfrom fabric_cf.actor.core.apis.abc_concrete_set import ABCConcreteSet\nfrom fabric_cf.actor.core.apis.abc_proxy import ABCProxy\nfrom fabric_cf.actor.core.common.constants import Constants\nfrom fabric_cf.actor.core.common.exceptions import ProxyException\nfrom fabric_cf.actor.core.kernel.resource_set import ResourceSet\nfrom fabric_cf.actor.core.registry.actor_registry import ActorRegistrySingleton\nfrom fabric_cf.actor.core.util.id import ID\nfrom fabric_cf.actor.security.auth_token import AuthToken\n\n\nclass Proxy(ABCProxy):\n \"\"\"\n Proxy class represents a stub to an actor. Proxies define a general interface, which is implementation\n independent and enables easy implementation of new communication protocols.\n \"\"\"\n PropertyProxyActorName = \"prx_name\"\n\n @staticmethod\n def get_callback(*, actor: ABCActorMixin, protocol: str) -> ABCCallbackProxy:\n \"\"\"\n Obtains a callback for the specified actor\n @param actor actor\n @param protocol protocol\n @return ICallbackProxy\n \"\"\"\n if actor is None:\n raise ProxyException(Constants.NOT_SPECIFIED_PREFIX.format(\"actor\"))\n\n callback = ActorRegistrySingleton.get().get_callback(protocol=protocol, actor_name=actor.get_name())\n if callback is None:\n raise ProxyException(\"Could not obtain callback proxy: protocol={}\".format(protocol))\n return callback\n\n @staticmethod\n def get_proxy(*, proxy_reload_from_db) -> ABCProxy:\n \"\"\"\n Obtains a proxy object from the specified properties list. If a suitable\n proxy object has already been created and registered with the\n ActorRegistry, the already existing object is returned and\n no new object is created. Otherwise, the method creates the proxy object\n and registers it with the ActorRegistry\n @param proxy_reload_from_db proxy_reload_from_db\n @return IProxy\n @throws Exception in case of error\n \"\"\"\n proxy_type = proxy_reload_from_db.get_type()\n name = proxy_reload_from_db.get_name()\n\n is_callback = proxy_reload_from_db.callback\n\n proxy = None\n if is_callback:\n proxy = ActorRegistrySingleton.get().get_callback(protocol=proxy_type, actor_name=name)\n else:\n proxy = ActorRegistrySingleton.get().get_proxy(protocol=proxy_type, actor_name=name)\n\n if proxy is None:\n proxy = Proxy.recover_proxy(proxy_reload_from_db=proxy_reload_from_db, register=True)\n else:\n proxy = Proxy.recover_proxy(proxy_reload_from_db=proxy_reload_from_db, register=False)\n return proxy\n\n @staticmethod\n def recover_proxy(*, proxy_reload_from_db: ABCProxy, register: bool) -> ABCProxy:\n \"\"\"\n Creates a proxy list from a properties list representing the\n serialization of the proxy. Optionally, the resulting object may be\n registered with the ActorRegistry so that it becomes visible\n to the rest of the system.\n @param proxy_reload_from_db proxy_reload_from_db\n @param register If true, the resulting proxy is registered with the\n container's ActorRegistry\n @return Proxy\n @throws Exception in case of error\n \"\"\"\n\n from fabric_cf.actor.core.container.globals import GlobalsSingleton\n proxy_reload_from_db.set_logger(logger=GlobalsSingleton.get().get_logger())\n\n if register:\n if proxy_reload_from_db.callback:\n ActorRegistrySingleton.get().register_callback(callback=proxy_reload_from_db)\n else:\n ActorRegistrySingleton.get().register_proxy(proxy=proxy_reload_from_db)\n return proxy_reload_from_db\n\n @staticmethod\n def decode(*, encoded, plugin: ABCBasePlugin) -> ABCConcreteSet:\n try:\n decoded_resource = pickle.loads(encoded)\n decoded_resource.restore(plugin=plugin, reservation=None)\n return decoded_resource\n except Exception as e:\n traceback.print_exc()\n print(\"Exception occurred while decoding {}\".format(e))\n return None\n\n def __init__(self, *, auth: AuthToken = None):\n self.logger = None\n self.proxy_type = None\n self.callback = False\n self.actor_name = None\n if auth is not None:\n self.actor_name = auth.get_name()\n self.actor_guid = None\n if auth is not None:\n self.actor_guid = auth.get_guid()\n self.auth = auth\n\n def __getstate__(self):\n state = self.__dict__.copy()\n del state['logger']\n return state\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n self.logger = None\n\n def get_guid(self) -> ID:\n return self.actor_guid\n\n def get_identity(self) -> AuthToken:\n return self.auth\n\n def get_name(self) -> str:\n return self.actor_name\n\n def get_type(self) -> str:\n return self.proxy_type\n\n def set_logger(self, *, logger):\n self.logger = logger\n\n def get_logger(self):\n return self.logger\n\n def abstract_clone_authority(self, *, rset: ResourceSet) -> ResourceSet:\n \"\"\"\n Clones the resource set, but without any of the concrete sets. Preserves\n only the configuration properties. This method should be used when\n sending a redeem/extend/close request to an authority.\n @param rset resource set\n @return a resources set that is a copy of the current but without any\n concrete sets.\n \"\"\"\n return ResourceSet(units=rset.get_units(), rtype=rset.get_type(), sliver=rset.get_sliver())\n\n def abstract_clone_broker(self, *, rset: ResourceSet) -> ResourceSet:\n \"\"\"\n Clones the resource set, but without any of the concrete sets. Preserves\n only the configuration properties. This method should be used when\n sending a redeem/extend/close request to an authority.\n @param rset resource set\n @return a resources set that is a copy of the current but without any\n concrete sets.\n \"\"\"\n return ResourceSet(units=rset.get_units(), rtype=rset.get_type(), sliver=rset.get_sliver())\n\n @staticmethod\n def abstract_clone_return(rset: ResourceSet) -> ResourceSet:\n \"\"\"\n Clones the resource set, but without any of the concrete sets. Preserves\n only the configuration properties. This method should be used when\n sending a redeem/extend/close request to an authority.\n @param rset resource set\n @return a resources set that is a copy of the current but without any\n concrete sets.\n \"\"\"\n return ResourceSet(units=rset.get_units(), rtype=rset.get_type(), sliver=rset.get_sliver())\n","repo_name":"fabric-testbed/ControlFramework","sub_path":"fabric_cf/actor/core/proxies/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":7041,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"24098046777","text":"from flask import Flask, render_template, redirect, url_for, session,request\nfrom flask_mail import Mail, Message\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField, SelectMultipleField,RadioField\nfrom wtforms.validators import DataRequired, Email, EqualTo\nfrom wtforms import ValidationError\n\nclass ProjectForm(FlaskForm):\n name=StringField('Name',validators=[DataRequired()])\n mobile=StringField('Mobile Number', validators=[DataRequired()])\n email=StringField('Email ID',validators=[DataRequired(),Email()])\n title=StringField('Project Title',validators=[DataRequired()])\n description=StringField('Project Description',validators=[DataRequired()])\n submit1=SubmitField('Submit Request')\n\nclass WorkshopForm(FlaskForm):\n name2=StringField('Name',validators=[DataRequired()])\n organisation=StringField('Organisation',validators=[DataRequired()])\n mobile2=StringField('Mobile Number',validators=[DataRequired()])\n email2=StringField('Email ID',validators=[DataRequired(),Email()])\n workshop=RadioField('Workshops',choices=[('Drone Programming','Drone Programming'),('RC Plane Design','RC Plane Design'),('Structural and Flow Analysis of RC Planes','Structural and Flow Analysis of RC Planes'),('Material Selection for RC Planes','Maaterial Selection')])\n attendees=StringField('Expected Number of Attendees',validators=[DataRequired()])\n submit2=SubmitField('Submit Request')\n\nclass ContactForm(FlaskForm):\n name3=StringField('Name',validators=[DataRequired()])\n mobile3=StringField('Mobile Number', validators=[DataRequired()])\n email3=StringField('Email ID',validators=[DataRequired(),Email()])\n message=StringField('Type your message here',validators=[DataRequired()])\n submit3=SubmitField('Submit')\n\napp=Flask(__name__)\n\napp.config['SECRET_KEY']='mykey'\napp.config['DEBUG']=True\napp.config['TESTING']=False\napp.config['MAIL_SERVER']='smtp.gmail.com'\napp.config['MAIL_PORT']=587\napp.config['MAIL_USE_TLS']=True\napp.config['MAIL_USERNAME']='teamaviatorsforms@gmail.com'\napp.config['MAIL_PASSWORD']='ljmbnoiplxwgpexz'\napp.config['MAIL_DEFAULT_SENDER']='teamaviatorsforms@gmail.com'\napp.config['MAIL_MAX_MAILS']=None\napp.config['MAIL_ASCII_ATTACHMENTS']=False\n\n\nmail=Mail(app)\n\n#configure the functions for mailing\n\ndef project_mail(name,mobile,email,title,description):\n global mail\n msg1=Message('Project Request Received',recipients=[email])\n msg1.body='Dear '+name+',\\n\\nYour project request has been received by Team Aviators International. We will review your request and get back to you soon.\\n\\nRegards,\\nTeam Aviators International'\n mail.send(msg1)\n msg2=Message('New Project Request',recipients=['teamaviatorsinternational@gmail.com'])\n msg2.body='Name of Applicant: {}\\n\\nMobile Number: {}\\n\\nEmail ID: {}\\n\\nProject Title: {}\\n\\nProject Description: {}'.format(name,mobile,email,title,description)\n mail.send(msg2)\n\ndef contact_mail(name,mobile,email,message):\n global mail\n msg1=Message('Contact Enquiry Received',recipients=[email])\n msg1.body='Dear '+name+',\\n\\nThank you for reaching out to us. Somebody from our team will soon contact you to further know your interests and queries.\\n\\nRegards,\\nTeam Aviators International'\n mail.send(msg1)\n msg2=Message('New Contact Enquiry',recipients=['teamaviatorsinternational@gmail.com'])\n msg2.body='Name of Applicant: {}\\n\\nMobile Number: {}\\n\\nEmail ID: {}\\n\\nMessage: {}'.format(name,mobile,email,message)\n mail.send(msg2)\n\ndef workshop_mail(name,organisation,mobile,email,workshop,attendees):\n global mail\n msg1=Message('Workshop Request Received',recipients=[email])\n msg1.body='Dear '+name+',\\n\\nYour request for a workshop on '+workshop+ ' has been received successfully by Team Aviators International. We will contact you soon for further proceedings.\\n\\nRegards,\\nTeam Aviators International'\n mail.send(msg1)\n msg2=Message('New Workshop Request',recipients=['teamaviatorsinternational@gmail.com'])\n msg2.body='Name of Applicant: {}\\n\\nOrganisation: {}\\n\\nMobile Number: {}\\n\\nEmail ID: {}\\n\\nWorkshop: {}\\n\\nExpected Number of Attendees: {}'.format(name,organisation,mobile,email,workshop,attendees)\n mail.send(msg2)\n\n@app.route('/',methods=['GET','POST'])\ndef index():\n form1=ProjectForm()\n form2=WorkshopForm()\n form3=ContactForm()\n\n if(form1.submit1.data and form1.validate()):\n session['name']=form1.name.data\n session['mobile']=form1.mobile.data\n session['email']=form1.email.data \n session['title']=form1.title.data\n session['description']=form1.description.data\n project_mail(session['name'],session['mobile'],session['email'],session['title'],session['description'])\n return redirect(url_for('index'))\n \n if(form2.submit2.data and form2.validate()):\n session['name2']=form2.name2.data\n session['organisation']=form2.organisation.data\n session['mobile2']=form2.mobile2.data\n session['email2']=form2.email2.data\n session['workshop']=form2.workshop.data\n session['attendees']=form2.attendees.data\n workshop_mail(session['name2'],session['organisation'],session['mobile2'],session['email2'],session['workshop'],session['attendees'])\n return redirect(url_for('index'))\n \n if(form3.submit3.data and form3.validate()):\n session['name3']=form3.name3.data\n session['mobile3']=form3.mobile3.data\n session['email3']=form3.email3.data\n session['message']=form3.message.data\n contact_mail(session['name3'],session['mobile3'],session['email3'],session['message'])\n return redirect(url_for('index'))\n\n\n return render_template(\"homepage.html\",form1=form1, form2=form2, form3=form3)\n\n\n@app.route('/about_us',methods=['GET','POST'])\ndef about_us():\n form1=ProjectForm()\n form2=WorkshopForm()\n form3=ContactForm()\n\n if(form1.submit1.data and form1.validate()):\n session['name']=form1.name.data\n session['mobile']=form1.mobile.data\n session['email']=form1.email.data \n session['title']=form1.title.data\n session['description']=form1.description.data\n project_mail(session['name'],session['mobile'],session['email'],session['title'],session['description'])\n return redirect(url_for('about_us'))\n \n if(form2.submit2.data and form2.validate()):\n session['name2']=form2.name2.data\n session['organisation']=form2.organisation.data\n session['mobile2']=form2.mobile2.data\n session['email2']=form2.email2.data\n session['workshop']=form2.workshop.data\n session['attendees']=form2.attendees.data\n workshop_mail(session['name2'],session['organisation'],session['mobile2'],session['email2'],session['workshop'],session['attendees'])\n return redirect(url_for('about_us'))\n \n if(form3.submit3.data and form3.validate()):\n session['name3']=form3.name3.data\n session['mobile3']=form3.mobile3.data\n session['email3']=form3.email3.data\n session['message']=form3.message.data\n contact_mail(session['name3'],session['mobile3'],session['email3'],session['message'])\n return redirect(url_for('about_us'))\n return render_template(\"team.html\",form1=form1, form2=form2, form3=form3)\n\n@app.route('/gallery',methods=['GET','POST'])\ndef gallery():\n form1=ProjectForm()\n form2=WorkshopForm()\n form3=ContactForm()\n\n if(form1.submit1.data and form1.validate()):\n session['name']=form1.name.data\n session['mobile']=form1.mobile.data\n session['email']=form1.email.data \n session['title']=form1.title.data\n session['description']=form1.description.data\n project_mail(session['name'],session['mobile'],session['email'],session['title'],session['description'])\n return redirect(url_for('gallery'))\n \n if(form2.submit2.data and form2.validate()):\n session['name2']=form2.name2.data\n session['organisation']=form2.organisation.data\n session['mobile2']=form2.mobile2.data\n session['email2']=form2.email2.data\n session['workshop']=form2.workshop.data\n session['attendees']=form2.attendees.data\n workshop_mail(session['name2'],session['organisation'],session['mobile2'],session['email2'],session['workshop'],session['attendees'])\n return redirect(url_for('gallery'))\n \n if(form3.submit3.data and form3.validate()):\n session['name3']=form3.name3.data\n session['mobile3']=form3.mobile3.data\n session['email3']=form3.email3.data\n session['message']=form3.message.data\n contact_mail(session['name3'],session['mobile3'],session['email3'],session['message'])\n return redirect(url_for('gallery'))\n return render_template(\"gallery.html\",form1=form1, form2=form2, form3=form3)\n\n@app.route('/projects',methods=['GET','POST'])\ndef projects():\n form1=ProjectForm()\n form2=WorkshopForm()\n form3=ContactForm()\n\n if(form1.submit1.data and form1.validate()):\n session['name']=form1.name.data\n session['mobile']=form1.mobile.data\n session['email']=form1.email.data \n session['title']=form1.title.data\n session['description']=form1.description.data\n project_mail(session['name'],session['mobile'],session['email'],session['title'],session['description'])\n return redirect(url_for('projects'))\n \n if(form2.submit2.data and form2.validate()):\n session['name2']=form2.name2.data\n session['organisation']=form2.organisation.data\n session['mobile2']=form2.mobile2.data\n session['email2']=form2.email2.data\n session['workshop']=form2.workshop.data\n session['attendees']=form2.attendees.data\n workshop_mail(session['name2'],session['organisation'],session['mobile2'],session['email2'],session['workshop'],session['attendees'])\n return redirect(url_for('projects'))\n \n if(form3.submit3.data and form3.validate()):\n session['name3']=form3.name3.data\n session['mobile3']=form3.mobile3.data\n session['email3']=form3.email3.data\n session['message']=form3.message.data\n contact_mail(session['name3'],session['mobile3'],session['email3'],session['message'])\n return redirect(url_for('projects'))\n return render_template(\"projects.html\",form1=form1, form2=form2, form3=form3)\n\n\nif __name__=='__main__':\n app.run()\n","repo_name":"Harsh-Naicker/TAI-Website","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9224501467","text":"import os\nos.environ[\"CUDA_MAX_MEM_ALLOC_PERCENT\"] = \"95\"\nimport torch\n#from torch.utils.tensorboard import SummaryWriter\nimport torch.nn as nn\nfrom torch.nn import functional as F\nimport torch.optim as optim\nfrom torch import optim\nfrom torch.utils import data\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.autograd import Variable\nfrom torch import tensor\n\nimport torchvision.models as models\nimport torchvision\nfrom torchvision import datasets, models, transforms\nfrom efficientnet_pytorch import EfficientNet\n\nimport glob\nimport os\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nimport matplotlib.pyplot as plt\n#from PIL import Image\nimport cv2\nimport seaborn as sns\n\n\nfrom sklearn.utils import shuffle\nfrom sklearn import preprocessing as p\nfrom sklearn.metrics import RocCurveDisplay\nfrom sklearn.metrics import confusion_matrix, roc_curve, auc\n\n\nfrom torchmetrics.classification import ROC\nfrom torchmetrics import AUROC\n\nimport optuna\nfrom optuna.trial import TrialState\n\nfrom segmentation_models_pytorch import losses\ndice_loss = losses.DiceLoss('binary')\nfoc_loss = losses.FocalLoss('binary')\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nval_transforms = transforms.Compose([torchvision.transforms.ToTensor(),\n transforms.Resize((224,224)),\n torchvision.transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225],\n ),\n ])\n\nclass myDataset_test(Dataset):\n\n def __init__(self, transform=None): \n #folder containing class folders with images\n self.imgs_path = \"/home/viktoriia.trokhova/Mri_slices_new/test/\"\n self.masks_path = \"/home/viktoriia.trokhova/Mask_slices/test/\"\n file_list = glob.glob(self.imgs_path + \"*\")\n msk_list = glob.glob(self.masks_path + \"*\")\n #msk_list[0], msk_list[1] = msk_list[1], msk_list[0]\n self.images = []\n self.targets = []\n self.masks = []\n for class_path in file_list:\n class_name = class_path.split(\"/\")[-1]\n for img_path in sorted(glob.glob(class_path + \"/*\")):\n self.images.append(img_path)\n for img_path in sorted(glob.glob(class_path + \"/*\")):\n self.targets.append(class_name)\n for msk_path in msk_list:\n for masks_path in sorted(glob.glob(msk_path + \"/*\")):\n self.masks.append(masks_path)\n self.images, self.targets, self.masks = shuffle(self.images, self.targets, self.masks, random_state=101)\n print(self.images[-100])\n print(self.targets[-100])\n print(self.masks[-100])\n print(len(self.images))\n print(len(self.targets))\n print(len(self.masks))\n self.class_map = {\"HGG_t2\" : 0, \"LGG_t2\": 1}\n self.img_dim = (224, 224)\n\n def __len__(self):\n return len(self.images)\n\n def __getitem__(self, idx):\n img_path = self.images[idx]\n class_name = self.targets[idx]\n masks_path = self.masks[idx]\n img = np.load(img_path)\n msk = np.load(masks_path)\n min_max_scaler = p.MinMaxScaler()\n img = min_max_scaler.fit_transform(img)\n msk = min_max_scaler.fit_transform(msk)\n img_float32 = np.float32(img)\n img_color = cv2.cvtColor(img_float32, cv2.COLOR_GRAY2RGB)\n img_tensor = val_transforms(img_color)\n msk_float32 = np.float32(msk)\n msk_color = cv2.cvtColor(msk_float32, cv2.COLOR_GRAY2RGB)\n msk_tensor = val_transforms(msk_color)\n class_id = self.class_map[class_name]\n class_id = torch.tensor(class_id)\n \n return img_tensor, class_id, msk_tensor\n\n\nclass SelfAttention(nn.Module):\n def __init__(self, in_channels):\n super().__init__()\n self.query = nn.Conv2d(in_channels, in_channels // 8, kernel_size=1)\n self.key = nn.Conv2d(in_channels, in_channels // 8, kernel_size=1)\n self.value = nn.Conv2d(in_channels, in_channels, kernel_size=1)\n self.softmax = nn.Softmax(dim=-1)\n self.gamma = nn.Parameter(torch.zeros(1))\n\n def forward(self, x):\n batch_size, channels, height, width = x.size()\n proj_query = self.query(x).view(batch_size, -1, height * width).permute(0, 2, 1)\n proj_key = self.key(x).view(batch_size, -1, height * width)\n energy = torch.bmm(proj_query, proj_key)\n attention = self.softmax(energy)\n proj_value = self.value(x).view(batch_size, -1, height * width)\n out = torch.bmm(proj_value, attention.permute(0, 2, 1))\n out = out.view(batch_size, channels, height, width)\n out = self.gamma * out + x\n return out\n\n\nclass MyCustomEfficientNetB0(nn.Module):\n def __init__(self, pretrained=True):\n super().__init__()\n \n efficientnet_b0 = EfficientNet.from_pretrained('efficientnet-b0').to(device)\n self.features = efficientnet_b0.extract_features\n in_features = efficientnet_b0._fc.in_features\n self.attention = SelfAttention(in_features)\n self.last_pooling_operation = nn.AdaptiveAvgPool2d((1, 1))\n self.fc1 = nn.Linear(in_features, 128)\n self.fc2 = nn.Linear(128, 2)\n\n\n def forward(self, input_imgs, targets=None, masks=None, batch_size = None, xe_criterion=nn.CrossEntropyLoss(), l1_criterion=nn.L1Loss(), dropout=None):\n images_feats = self.features(input_imgs)\n images_att = self.attention(images_feats)\n output = self.last_pooling_operation(images_att)\n output = output.view(input_imgs.size(0), -1)\n images_outputs = self.fc1(output)\n output = dropout(images_outputs)\n images_outputs = F.relu(self.fc2(output))\n #images_outputs = nn.ReLU(self.fc2(output))\n\n\n # # compute gcam for images\n orig_gradcam_mask = compute_gradcam(images_outputs, images_feats, targets)\n\n # #upsample gradcam to (224, 224, 3)\n gcam_losses = 0.0\n\n for i in range(batch_size):\n #print(orig_gradcam_mask[i].shape)\n img_grad = orig_gradcam_mask[i].unsqueeze(0).permute(1, 2, 0)\n img_grad_1 = img_grad.cpu()\n img_grad_2 = img_grad_1.detach().numpy()\n img_grad_3 = cv2.resize(img_grad_2, (224,224), cv2.INTER_LINEAR)\n img_grad_4 = cv2.cvtColor(img_grad_3, cv2.COLOR_GRAY2RGB)\n img_grad_5 = torch.from_numpy(img_grad_4)\n img_grad_6 = img_grad_5.to(device)\n #img_grad_6 = torch.nn.ReLU(inplace=True)(img_grad_6)\n\n\n #masks to same dimension\n masks_per = masks[i].permute(1, 2, 0)\n masks_per = cv2.normalize(masks_per.cpu().numpy(), None, alpha = 0, beta = 1, norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_32F)\n img_grad_6 = cv2.normalize(img_grad_6.cpu().numpy(), None, alpha = 0, beta = 1, norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_32F)\n masks_per[np.mean(masks_per, axis=-1)<0.2] = 0\n masks_per[np.mean(masks_per, axis=-1)>=0.2] = 1\n\n gcam_loss = foc_loss(torch.from_numpy(img_grad_6), torch.from_numpy(masks_per))\n #print(gcam_loss)\n #gcam_loss = l1_criterion(img_grad_6, masks_per)\n gcam_losses += gcam_loss\n\n # gcam_loss = l1_criterion(img_grad_6, masks_per)\n # gcam_losses += gcam_loss\n\n #gcam_losses += gcam_loss.item() * input_imgs.size(0)\n #gcam_losses = gcam_losses/batch_size\n xe_loss = xe_criterion(images_outputs, targets)\n \n\n return images_outputs, targets, xe_loss, gcam_losses #return images_outputs\n\ndef compute_gradcam(output, feats, target):\n \"\"\"\n Compute normalized Grad-CAM for the given target using the model output and features\n :param output:\n :param feats:\n :param target:\n :return:\n \"\"\"\n eps = 1e-8\n\n target = target.cpu().detach().numpy()\n one_hot = np.zeros((output.shape[0], output.shape[-1]), dtype=np.float32)\n indices_range = np.arange(output.shape[0])\n one_hot[indices_range, target[indices_range]] = 1\n one_hot = torch.from_numpy(one_hot)\n one_hot = Variable(output, requires_grad=True)\n\n # Compute the Grad-CAM for the original image\n one_hot_cuda = torch.sum(one_hot.to(device) * output)\n dy_dz1, = torch.autograd.grad(one_hot_cuda, feats, grad_outputs=torch.ones(one_hot_cuda.size()).to(device),\n retain_graph=True, create_graph=True)\n\n # We compute the dot product of grad and features (Element-wise Grad-CAM) to preserve grad spatial locations\n gcam512_1 = dy_dz1 * feats\n gradcam = gcam512_1.sum(dim=1)\n gradcam = torch.nn.ReLU(inplace=True)(gradcam)\n spatial_sum1 = gradcam.sum(dim=[1, 2]).unsqueeze(-1).unsqueeze(-1)\n gradcam = (gradcam / (spatial_sum1 + eps)) + eps\n\n\n return gradcam\n\nmodel = MyCustomEfficientNetB1(pretrained=True).to(device)\n\n\nmodel.load_state_dict(torch.load('/home/viktoriia.trokhova/model_weights/model_best.pt'), strict=False)\n\ntest_dataset = myDataset_test(transform = None)\ntest_dataloader = torch.utils.data.DataLoader(myDataset_test(transform = None),\n batch_size=32,\n shuffle=False,\n num_workers=0)\n\nmodel.eval()\nrunning_loss = 0.0\nrunning_corrects = 0.0\nfor inputs, labels, masks in test_dataloader:\n inputs = inputs.to(device)\n labels = labels.to(device)\n masks = masks.to(device)\n \n outputs, targets_, xe_loss_, gcam_losses_ = model(inputs, labels, masks, batch_size = inputs.size(0), dropout=nn.Dropout(0.79))\n\n loss = xe_loss_.mean() + 0.575 * gcam_losses_.mean()\n #loss = xe_loss_.mean()\n \n _, preds = torch.max(outputs, 1) \n\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\nepoch_loss = running_loss / 1611\nepoch_acc = running_corrects.double() / 1611\nprint('Test loss: {:.4f}, acc: {:.4f}'.format(epoch_loss,\n epoch_acc))\n","repo_name":"Viktoriia-Dasci/master_4","sub_path":"test_effnet.py","file_name":"test_effnet.py","file_ext":"py","file_size_in_byte":10145,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"342237209","text":"from torch import nn\nfrom functools import partial\nfrom einops.layers.torch import Rearrange, Reduce\n\npair = lambda x: x if isinstance(x, tuple) else (x, x)\n\nclass PreNormResidual(nn.Module):\n def __init__(self, dim, fn):\n super().__init__()\n self.fn = fn\n self.norm = nn.LayerNorm(dim)\n\n def forward(self, x):\n return self.fn(self.norm(x)) + x\n\ndef FeedForward(dim, expansion_factor = 4, dropout = 0., dense = nn.Linear):\n inner_dim = int(dim * expansion_factor)\n return nn.Sequential(\n dense(dim, inner_dim),\n nn.GELU(),\n nn.Dropout(dropout),\n dense(inner_dim, dim),\n nn.Dropout(dropout)\n )\n\ndef MLPMixer(*, image_size, channels, patch_size, dim, depth, num_classes, expansion_factor = 4, expansion_factor_token = 0.5, dropout = 0.):\n image_h, image_w = pair(image_size)\n assert (image_h % patch_size) == 0 and (image_w % patch_size) == 0, 'image must be divisible by patch size'\n num_patches = (image_h // patch_size) * (image_w // patch_size)\n chan_first, chan_last = partial(nn.Conv1d, kernel_size = 1), nn.Linear\n\n return nn.Sequential(\n Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),\n nn.Linear((patch_size ** 2) * channels, dim),\n *[nn.Sequential(\n PreNormResidual(dim, FeedForward(num_patches, expansion_factor, dropout, chan_first)),\n PreNormResidual(dim, FeedForward(dim, expansion_factor_token, dropout, chan_last))\n ) for _ in range(depth)],\n nn.LayerNorm(dim),\n Reduce('b n c -> b c', 'mean'),\n nn.Linear(dim, num_classes)\n )\n","repo_name":"lucidrains/mlp-mixer-pytorch","sub_path":"mlp_mixer_pytorch/mlp_mixer_pytorch.py","file_name":"mlp_mixer_pytorch.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":922,"dataset":"github-code","pt":"76"} +{"seq_id":"26736834561","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 22 11:06:17 2016\n\nA scientific calculator that accepts inputs of +-*/^% as well as ().\nAlso accepts trigonometric inputs (sin(x), cos(x), tan(x), asin(x), acos(x), atan(x),\nsqrt(x), root(x,n), log(x), ln(x), fact(x) and abs(x) functions.\nAccepts exponential notation in the format 2.5 EXP 7 (not case sensitive)\nCan use ans to use the calculators previous answer. \n\n@author: dmcauslan\n\nmodified 28/09/2016:\n * Calculator now accepts negative numbers as inputs\n * Calculator now checks if there's unmatched brackets and brings up an error\n * Calculator now brings up an error if the number of numerical inputs is incorrect\n * Calculator now accpets % to perform modulo calculation\n * Calculator now parses special functions - toPostfix just passes if it finds a fn atm.\nmodified 29/09/2016:\n * Calculator now accepts trigonometric functions, sqrt(), log(), ln(), abs as inputs\n * Now accepts pi and e as special numerical inputs\n * Have implemented scientific notation with exp.\n \nmodified 30/09/2016:\n * Now accepts 'ans' input, to recall the answer of the last calculation\n * Implemented function 'root(x,n)' which calculates the nth root of x\n * Implement factorial function fact(x) which calculates x!\n * Tidyed up functions a bit.\n * Created standalone exe version\n \nTo Do:\n Create GUI\n\"\"\"\n\nimport operator as op\nimport string\nimport math as m\n\ndef isFloat(value):\n \"\"\" Checks whether a number is a float\"\"\" \n try:\n float(value)\n return True\n except ValueError:\n return False\n \ndef nthRoot(x,n):\n \"\"\" Calculates the nth root of a \"\"\"\n return op.pow(x,1/n)\n\n\nclass Calculator:\n # Dictionary of their operators and the corresponding operation and their precedence\n operators = {'+': (op.add,0), '-': (op.sub,0), \n '*': (op.mul,1), '/': (op.truediv,1), \n '^': (op.pow,2), '%': (op.mod,3) }\n # Dictionary of the available special functions for use \n functions = {'sqrt': m.sqrt, 'log': m.log10, 'ln': m.log, 'abs': m.fabs,\n 'sin': m.sin, 'cos': m.cos, 'tan': m.tan,\n 'asin': m.asin, 'acos': m.acos, 'atan': m.atan,\n 'root': nthRoot, 'fact': m.factorial}\n # Special numbers\n specialNumbers = {'pi': m.pi, 'e': m.e}\n \n def __init__(self, previousAnswer):\n self.previousAnswer = previousAnswer\n\n \n def evaluate(self, inputString):\n \"\"\" Main method of the calculator that performs all of the calculation steps, returns the final answer\"\"\"\n # parse string so that it is broken up into the individual pieces\n parsedString = self.parseInput(inputString)\n if parsedString == \"Error\":\n return \"Please enter a valid input!\"\n # convert the input to postfix notation\n postfix = self.toPostfix(parsedString)\n if postfix == \"Unmatched Error\":\n return \"Your input has unmatched brackets!\"\n# print(postfix)\n # Then perform the calculation and return the answer\n answer = self.postfixCalc(postfix)\n if answer == \"Too Many Error\":\n return \"Your input has too many numbers!\"\n if answer == \"Too Few Error\":\n return \"Your input has too few numbers!\"\n return round(answer,10)\n\n \n def toPostfix (self,infix):\n \"\"\"Takes in an array of characters in infix notation and returns an array of the same\n expression in postfix notation.\"\"\"\n postfix = []\n stack = []\n # Loop over characters in the input string\n for char in infix:\n # If char is a number add it to postfix\n if isFloat(char):\n postfix.append(char)\n # If its a special number add it to postfix\n elif char in Calculator.specialNumbers:\n postfix.append(char)\n # If char is a function push it onto the stack\n elif char in Calculator.functions:\n stack.append(char)\n # If the char is a function argument separator (,) pop operators off the stack onto\n # postfix until ( is reached\n elif char == ',':\n while stack[-1] != '(':\n postfix.append(stack.pop())\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # If char is an operator O\n elif char in Calculator.operators:\n # While there is an operator, P, on the top of stack\n while len(stack)>0 and stack[-1] in Calculator.operators:\n stackTop = stack[-1]\n precChar = Calculator.operators[char][1]\n precStackTop = Calculator.operators[stackTop][1]\n # If O in -?+* and its precedence is <= P, pop P off stack\n if char in Calculator.operators and precChar <= precStackTop:\n postfix.append(stack.pop())\n else:\n break\n # Push O onto stack\n stack.append(char)\n # If char is (, push it onto the stack\n elif char == '(':\n stack.append(char)\n # If char is )\n elif char == ')':\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # While top of stack isn't ( pop operators off the top of the stack\n while stack[-1] != '(':\n postfix.append(stack.pop())\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # Pop ( off the stack, but not onto output queue\n stack.pop()\n # If the token at the top of the stack is a function pop it off the stack and add to postfix\n if len(stack) > 0 and stack[-1] in Calculator.functions:\n postfix.append(stack.pop())\n # Finally pop all the operators off the stack onto postfix\n while len(stack)>0:\n # If the operator on the top of the stack is () then there are unmatched brackets\n if stack[-1] in '()':\n return \"Unmatched Error\"\n postfix.append(stack.pop())\n return postfix\n \n \n def postfixCalc(self,tokens):\n \"\"\" Takes a postfix expression as a list and evaluates it \"\"\"\n if len(tokens) == 0:\n return 0\n stack = []\n # while expr is not empty\n while len(tokens)>0:\n toke = tokens.pop(0)\n # if token is a number push it onto the stack\n if isFloat(toke):\n stack.append(float(toke))\n # if token is a special number push it onto the stack\n elif toke in Calculator.specialNumbers:\n stack.append(Calculator.specialNumbers[toke])\n else:\n # Operators take 2 inputs, functions take 1 input except root which takes 2\n if toke in Calculator.operators or toke == 'root':\n n = 2\n elif toke in Calculator.functions:\n n = 1\n # If the length of the stack is less than the required number of operators the user has not \n # input enough values.\n if len(stack) 1:\n return \"Too Many Error\"\n # Return the value on the stack (should only be 1 value left)\n return stack[-1]\n \n\n def inputChecker(self, inputString):\n ''' Loops over the string and checks whether all of the user inputs are valid '''\n stringArray = []\n tmpString = ''\n n = 0\n while n < len(inputString): \n char = inputString[n]\n # If its a number or '.' add it to a temporary string\n if char in string.digits or char == '.':\n tmpString+=char\n else:\n if tmpString != '':\n stringArray.append(tmpString)\n tmpString=''\n # If its a space, pass\n if char == ' ':\n pass\n # If its a special character, add it to the string array\n elif char in Calculator.operators or char in '(),': \n stringArray.append(char) \n # If its a letter, check to see whether its a function\n elif char in string.ascii_letters:\n fnString = ''\n foundFn = False\n # Look ahead\n while foundFn == False and n < len(inputString) and inputString[n] in string.ascii_letters:\n fnString+=inputString[n].lower()\n n+=1\n # if it finds the letter e, look ahead to check ahead to see whether the next letter is x\n # otherwise exp will always get picked up as the number e - then throw an error.\n if fnString == 'e' and n < len(inputString) and inputString[n].lower()=='x':\n pass\n # If it finds a string thats a function, a special number or exp, break the loop\n elif fnString in Calculator.functions or fnString == 'exp' or fnString in Calculator.specialNumbers:\n # Add the string to stringArr\n stringArray.append(fnString)\n foundFn = True\n # If it finds the string 'ans' then add the previous answer\n elif fnString == 'ans':\n stringArray.append(self.previousAnswer)\n foundFn = True\n # If it finds a string that's not a function or a special number, return an error\n if foundFn == False:\n return \"Error\"\n n-=1 \n # Else its an invalid character\n else:\n return \"Error\"\n # If you've reached the end of the array, and tmpString isn't empty add it to the array\n if n == len(inputString)-1 and tmpString != '':\n stringArray.append(tmpString)\n # Finally increment n by 1\n n+=1\n return stringArray\n \n \n def negativeCheck(self, stringArray):\n ''' Loops over stringArray and checks to see whether minus symbols should correspond to negative numbers'''\n newStringArray = []\n n = 0\n while n < len(stringArray):\n # If the first character is a - sign then the next charater should be a negative number\n if n == 0 and stringArray[n]=='-':\n newStringArray.append(stringArray[n]+stringArray[n+1])\n n+=1\n # If the character is a - sign and it is preceded by an operator or ( or , or exp then the next character should be a negative number\n elif stringArray[n]=='-' and (stringArray[n-1] in Calculator.operators or stringArray[n-1] in '(,' or stringArray[n-1] == 'exp'):\n newStringArray.append(stringArray[n]+stringArray[n+1])\n n+=1 \n else:\n newStringArray.append(stringArray[n])\n n+=1\n return newStringArray\n \n \n def scientificNotationCheck(self, stringArray):\n ''' Loop over stringArray and see if it any of the numbers are in scientific notation'''\n newStringArray=[]\n n = 0\n while n < len(stringArray):\n # If the character is 'exp' and it has a number either side (including negatives) then change the last item in the\n # list to aeb where a is the first number, b is the second, else throw an error \n if stringArray[n] == 'exp':\n try:\n newStringArray[-1] = str(float('{}e{}'.format(newStringArray[-1], stringArray[n+1])))\n n+=1\n except ValueError:\n return \"Error\"\n except IndexError:\n return \"Error\"\n else:\n newStringArray.append(stringArray[n])\n n+=1\n return newStringArray\n \n \n def parseInput(self, inputString):\n \"\"\" Takes the user input, checks to see whether it's valid and returns it as an array\n split into the indivual pieces \"\"\"\n stringArray = self.inputChecker(inputString)\n if stringArray == \"Error\":\n return stringArray\n # Correctly assigns negative numbers\n stringArray = self.negativeCheck(stringArray)\n # Checks whether any of the numbers have been written in scientific notation\n stringArray = self.scientificNotationCheck(stringArray)\n# print(stringArray)\n return stringArray\n\n\nprint('------------------------------Scientific Calculator -----------------------------------\\n\\\nA scientific calculator that accepts inputs of +-*/^%.\\n\\\nBrackets () are also accepted inputs.\\n\\\nSupports a range of fucntions such as trigonometric inputs (sin(x), cos(x), tan(x), asin(x),\\n\\\nacos(x), atan(x)), sqrt(x), root(x,n), log(x), ln(x), fact(x) and abs(x) functions.\\n\\\nExponential notation is supported in the format 2.5 EXP 7 (not case sensitive).\\n\\\nThe keyword ans is used to use the calculators previous answer.\\n')\nprint('What would you like to calculate? (type q to quit)\\n')\nanswer = 0\nwhile True:\n calcIn = input(\"--> \")\n if calcIn in'qQ':\n break\n answer = Calculator(answer).evaluate(calcIn)\n print(answer,\"\\n\")\n\n","repo_name":"dlmcauslan/Calculator","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":14802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27635802972","text":"from flask import Flask, request, render_template, flash, redirect\nfrom models import db, connect_db, Pet\nfrom forms import AddPetForm\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///adoptionAgency'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = True\napp.config['SECRET_KEY'] = \"OOooOOOoOOOoo000\"\n\nconnect_db(app)\n\n@app.route('/')\ndef home():\n pets = Pet.query.all()\n return render_template('home.html', pets = pets)\n\n\n\"new pet form\"\n@app.route('//new-pet', methods=['GET', 'POST'])\ndef new_pet():\n form = AddPetForm()\n\n if form.validate_on_submit():\n \"Get all form values\"\n name = form.name.data\n species = form.species.data\n photo_url = form.photo_url.data\n age = form.age.data\n notes = form.notes.data\n avalible = form.avalible.data\n \"Create new pet and add to db\"\n pet = Pet(name=name,species=species,photo_url=photo_url,age=age,notes=notes,avalible=avalible)\n db.session.add(pet)\n db.session.commit()\n return redirect('/')\n else:\n \"If form isnt valid, return to the page\"\n return render_template('new-pet.html', form=form)\n\n\n\"View a pets details\"\n\n@app.route('/view/')\ndef view_pet(user_id):\n pet = Pet.query.get_or_404(user_id)\n return render_template('view-pet.html',pet=pet)\n\n\n\"edit Pet\"\n@app.route('/edit/', methods=[\"GET\", \"POST\"])\ndef edit_pet(user_id):\n pet = Pet.query.get_or_404(user_id)\n form = AddPetForm(obj=pet)\n if form.validate_on_submit():\n \"Get all form values\"\n pet.name = form.name.data\n pet.species = form.species.data\n pet.photo_url = form.photo_url.data\n pet.age = form.age.data\n pet.notes = form.notes.data\n pet.avalible = form.avalible.data\n db.session.commit()\n return redirect('/')\n else:\n return render_template('edit-pet.html',pet=pet, form=form)\n\n\n\n\n","repo_name":"podop29/AnimalAdoptionAgency","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18866742050","text":"# \n# Reto #0\n# EL FAMOSO \"FIZZ BUZZ\"\n# Fecha publicación enunciado: 27/12/21\n# Fecha publicación resolución: 03/01/22\n# Dificultad: FÁCIL\n# Enunciado: Escribe un programa que muestre por consola (con un print) los números de 1 a 100 (ambos incluidos y con un salto de línea entre cada impresión), sustituyendo los siguientes:\n# - Múltiplos de 3 por la palabra \"fizz\".\n# - Múltiplos de 5 por la palabra \"buzz\".\n# - Múltiplos de 3 y de 5 a la vez por la palabra \"fizzbuzz\".\n\n\n\"\"\" Este algoritmo es para saber como identificar los multiplos de n numero, esto siginifica que si un numero dividido entre otro numero da 0 es mutiplo de 3, 5 o de los dos en este caso.\n\"\"\"\n\ndef fizzbuzz(n):\n \n for i in range(1, n+1):\n # if i % 2 == 0:\n # print(f\"{i} -> par\")\n if i % 3 == 0 and i % 5 == 0: #Se tienen que cumpir las dos para que entre al bloque y se pone primero siempre \n print(f\"{i} -> FizzBuzz\")\n elif i % 3 == 0:\n print(f\"{i} -> Fizz\")\n elif i % 5 == 0:\n print(f\"{i} -> Buzz\")\n else:\n print(i)\n\n\nfizzbuzz(100)","repo_name":"Alexxbar-pro/Python-programmig","sub_path":"ejericicosGithub/fizzbuzz#0.py","file_name":"fizzbuzz#0.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8308540331","text":"from django.urls import path, re_path\nfrom . import views\nfrom .views import DiagListView, NuevoDiagCreateView, DiagDetailView, Eliminar_diag, Update_diagnostico\n\nurlpatterns = [\n path('lista_diagnostico/', DiagListView.as_view(), name='lista_diagnostico'),\n path('nuevo_diagnostico/', NuevoDiagCreateView.as_view(), name='nuevo_diagnostico'),\n path('detalle_diagnostico/', DiagDetailView.as_view(), name= 'detalle_diagnostico'),\n #Eliminar un diagnostico\n path('eliminar_diag//', Eliminar_diag.as_view(), name='eliminar_diag'), \n path('update_diagnostico//', Update_diagnostico.as_view(), name=\"update_diagnostico\" ) \n]\n","repo_name":"GiudiciAlejandro/TO_consultorio","sub_path":"TO_consultorio/applications/diagnostico/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"24377293128","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n\"\"\"\n\nimport math\nimport sys\nimport time\n\n# from PyQt4.QtCore import Qt, QTimer, QRectF\n# from PyQt4.QtGui import *\nimport numpy as np\nfrom PyQt4 import QtCore, QtGui\n\n\nclass Overlay(QtGui.QWidget):\n\n def __init__(self, speed=18, parent=None):\n super(Overlay, self).__init__(parent)\n palette = QtGui.QPalette(self.palette())\n palette.setColor(palette.Background, QtCore.Qt.transparent)\n self.setPalette(palette)\n\n self.theta_start = 90\n self.theta = 0 # Specifies straight up\n self.radius = 50\n self.speed = speed\n self.colorList = [QtGui.QColor(16, 159, 221), QtGui.QColor(221, 89, 2)]\n\n def paintEvent(self, event):\n painter = QtGui.QPainter()\n painter.begin(self)\n\n painter.setRenderHint(QtGui.QPainter.Antialiasing)\n painter.fillRect(event.rect(), QtGui.QBrush(QtGui.QColor(255, 255, 255, 50)))\n painter.setPen(QtGui.QPen(QtCore.Qt.NoPen))\n\n painter.setBrush(QtGui.QBrush(self.colorList[0]))\n\n painter.drawEllipse(self.width() / 2 - self.radius, self.height() / 2 - self.radius, self.radius * 2,\n self.radius * 2)\n\n # painter.setPen(QPen(QColor(255,0,0)))\n painter.setBrush(QtGui.QBrush(self.colorList[1]))\n\n pieRect = QtCore.QRectF(self.width() / 2 - self.radius, self.height() / 2 - self.radius, self.radius * 2,\n self.radius * 2)\n painter.drawPie(pieRect, self.theta_start * 16, -self.theta * 16)\n # painter.drawLine(self.width()/2 , self.height() / 2, self.width() / 2 + np.sin(self.theta) * self.radius, self.height()/2.0 + np.cos(self.theta) * self.radius)\n # painter.drawLine(self.width()/2, self.height()/2, 50,50 )\n painter.end()\n\n def showEvent(self, event):\n self.timer = self.startTimer(50)\n\n def timerEvent(self, event):\n self.theta += self.speed\n\n if self.theta > 360:\n self.theta = 0\n self.colorList.reverse()\n\n print(self.theta)\n self.update()\n\n def endTimer(self):\n self.killTimer(self.timer)\n self.hide()\n\n\nclass MainWindow(QtGui.QMainWindow):\n\n def __init__(self, parent=None):\n QtGui.QMainWindow.__init__(self, parent)\n\n widget = QtGui.QWidget(self)\n self.editor = QtGui.QTextEdit()\n self.editor.setPlainText(\"0123456789\" * 100)\n layout = QtGui.QGridLayout(widget)\n layout.addWidget(self.editor, 0, 0, 1, 3)\n button = QtGui.QPushButton(\"Wait\")\n layout.addWidget(button, 1, 1, 1, 1)\n\n self.setCentralWidget(widget)\n self.overlay = Overlay(speed=15, parent=self.centralWidget())\n\n button.clicked.connect(self.overlay.show)\n\n def resizeEvent(self, event):\n self.overlay.resize(event.size())\n event.accept()\n\n\nif __name__ == \"__main__\":\n app = QtGui.QApplication(sys.argv)\n window = MainWindow()\n window.show()\n sys.exit(app.exec_())\n","repo_name":"sabirhusnain577/BIGKAT-MIX","sub_path":"PSAT_3_CAM_RPI_USE_THIS/overlay.py","file_name":"overlay.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18628394460","text":"import re\nimport codecs\nimport os\nimport sys\nfrom pathlib import Path\n\n# this script takes an argument of an sdf file containing\n# molecules and parses this sdf into one file per molecule\n\n# store script argument in variable\nfile_in = sys.argv[1]\n\n###############################################################################\n# SETUP\n###############################################################################\ndir_in = \"/data/p275927/workdir/libraries/\"\ndir_out = \"/data/p275927/workdir/ligands/\"\n\n# counter for number of molecules\nmol_n = 1\n\n# switch for start of writing into new sdf file\nbegin = False\n\n# cut away file extension to keep the file name\nfilename = os.path.splitext(os.path.basename(file_in))[0]\n# join file path to target sdf file\nsdf_file_path = os.path.join(dir_in, file_in)\n\n###############################################################################\n# PARSE\n###############################################################################\n# read in sdf tranche file containing 1000 mol x 4 poses\nwith codecs.open(sdf_file_path, 'r', encoding = 'utf-8', errors = 'ignore') as sdf_in:\n # iterate over all lines of file\n for line in sdf_in:\n # use regex to trigger opening/writing new file\n trig = re.search(\".OpenBabel.+\", line) # matches two lines\n if trig:\n begin = True\n # open a new sdf file for writing and name it by ID and pose\n sdf_out = open(dir_out + filename +\n '_' + str(mol_n) + '.sdf', 'w')\n # write the molecule name stored in previous loop iteration\n sdf_out.write(mol_name)\n # keep writing into files\n if begin == True:\n sdf_out.write(line)\n # store molecule name from line preceding regex match\n mol_name = line\n # stop writing in specific file once line indicates end of sdf\n if line[:4] == \"$$$$\":\n sdf_out.close()\n begin = False\n mol_n += 1 # increment counter\n","repo_name":"dn440/PPP-Virtual-Screen","sub_path":"scripts/legacy/phase1_sdf_extract.py","file_name":"phase1_sdf_extract.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7385114580","text":"# 현재 맵을 방문하지 않았다면 방문할 것\n# 방문 할 때 최소 피로도를 충족하는지 검사\n# 현재 턴에서 방문 했다면 다른 턴에서도 방문할 수 있도록 false처리\n\nans = 0\n\n\ndef dfs(idx: int, pirodo: int, dugeons: list, visit: list):\n global ans\n if idx == len(dugeons):\n return\n\n for i in range(len(dugeons)):\n if not visit[i] and dugeons[i][0] <= pirodo:\n visit[i] = True\n dfs(idx + 1, pirodo - dugeons[i][1], dugeons, visit)\n visit[i] = False\n\n ans = max(idx, ans)\n\n\ndef solution(k, dungeons):\n visit = [False] * len(dungeons)\n dfs(0, k, dungeons, visit)\n return ans\n","repo_name":"ChanghwanK/coding-test-python","sub_path":"programmers/lv2/피로도/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6429067061","text":"from django.core.management import call_command\nfrom notification.models import DeviceToken, CertFile\nfrom unittest import TestCase\nfrom .compatibility import Mock\n\nimport json\nimport notification.apns.apns\nimport sys\n\nPYTHON_VERSION = sys.version_info\n\n\nclass ManagementCommandsMultiPushTest(TestCase):\n\n def setUp(self):\n self.command_name = 'multipush'\n self.args = []\n self.options = {'verbosity': 1,\n 'settings': None,\n 'pythonpath': None,\n 'traceback': False,\n 'no_color': False,\n 'sandbox': False,\n 'device_tokens': None,\n 'all': False,\n 'title': None,\n 'subtitle': None,\n 'body': None,\n 'sound': 'default',\n 'badge': 1,\n 'content_available': False,\n 'mutable_content': False,\n 'extra': None}\n CertFile(filename='cert.pem').save()\n self.device_token = DeviceToken(device_token='8a0d7cba3ffad34bd3dcb37728080a95d6ee78a83a68ead033614acbab9b7e76',\n uuid='XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX')\n self.device_token.save()\n\n def tearDown(self):\n self.args = []\n self.options = {'verbosity': 1,\n 'settings': None,\n 'pythonpath': None,\n 'traceback': False,\n 'no_color': False,\n 'sandbox': False,\n 'device_tokens': None,\n 'all': False,\n 'title': None,\n 'subtitle': None,\n 'body': None,\n 'sound': 'default',\n 'badge': 1,\n 'content_available': False,\n 'mutable_content': False,\n 'extra': None}\n CertFile.objects.all().delete()\n self.device_token.delete()\n\n def test_without_device_tokens_and_title(self):\n with self.assertRaises(ValueError):\n call_command(self.command_name, *self.args, **self.options)\n\n def test_with_device_token_for_specified_and_title(self):\n self.options['sandbox'] = True\n self.options['device_tokens'] = [self.device_token.device_token]\n self.options['title'] = 'test case title'\n\n notification.apns.apns.GatewayConnection.send_notification = Mock(return_value=None)\n call_command(self.command_name, *self.args, **self.options)\n\n def test_with_device_token_for_all_without_title(self):\n self.options['all'] = True\n\n with self.assertRaises(ValueError):\n call_command(self.command_name, *self.args, **self.options)\n\n def test_target_develop_with_device_token_for_all_and_title_without_cert(self):\n self.options['sandbox'] = True\n self.options['all'] = True\n self.options['title'] = 'test case title'\n CertFile.objects.all().delete()\n\n with self.assertRaises(CertFile.DoesNotExist):\n call_command(self.command_name, *self.args, **self.options)\n\n def test_valid_custom(self):\n self.options['extra'] = \"{'key':'value'}\"\n self.options['sandbox'] = True\n self.options['all'] = True\n self.options['title'] = 'test case title'\n CertFile.objects.all().delete()\n\n with self.assertRaises(CertFile.DoesNotExist):\n call_command(self.command_name, *self.args, **self.options)\n\n def test_invalid_custom(self):\n self.options['extra'] = \"{'key':'value','key2'}\"\n self.options['sandbox'] = True\n self.options['all'] = True\n self.options['title'] = 'test case title'\n CertFile.objects.all().delete()\n\n if PYTHON_VERSION.major == 3 and PYTHON_VERSION.minor >= 5:\n with self.assertRaises(json.decoder.JSONDecodeError):\n call_command(self.command_name, *self.args, **self.options)\n elif (PYTHON_VERSION.major == 3 and PYTHON_VERSION.minor <= 4) or PYTHON_VERSION.major == 2:\n with self.assertRaises(ValueError):\n call_command(self.command_name, *self.args, **self.options)\n","repo_name":"nnsnodnb/django-ios-notifications","sub_path":"notification/tests/test_notification_command_multipush.py","file_name":"test_notification_command_multipush.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"74971071284","text":"from collections import defaultdict\n\nday = 6\n\nexample_filename = f'day{day}/day{day}_ex.txt'\nexample_input = open(example_filename).readlines()\n\nfilename = f'day{day}/day{day}.txt'\npuzzle_input = open(filename).readlines()\n\ndef get_initial_timers(input):\n timers = defaultdict(int)\n for val in input[0].split(','):\n timers[int(val)] += 1\n return timers\n\ndef simulate_day(timers):\n new_timers = {key-1: val for key, val in timers.items() if key != 0}\n if 0 in timers:\n new_timers[6] = new_timers[6] + timers[0] if 6 in new_timers else timers[0]\n new_timers[8] = new_timers[8] + timers[0] if 8 in new_timers else timers[0]\n return new_timers\n \ndef simulate_n_days(input, n):\n timers = get_initial_timers(input)\n for _ in range(n):\n timers = simulate_day(timers)\n return timers\n\ndef part_1(input):\n return sum(simulate_n_days(input, 80).values())\n\ndef part_2(input):\n initial_timers = get_initial_timers(input)\n return sum(simulate_n_days(input, 256).values())\n\n\nprint(f'Part 1 example: {part_1(example_input)}')\nprint(f'Part 1 puzzle: {part_1(puzzle_input)}')\n\nprint(f'Part 2 example: {part_2(example_input)}')\nprint(f'Part 2 puzzle: {part_2(puzzle_input)}')\n","repo_name":"anniebryan/advent-of-code","sub_path":"2021/day6/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18825819082","text":"#!/usr/bin/env python\n\nfrom collections import Counter\nfrom ranking import BM25\nfrom re import compile, split\nfrom struct import pack\n\nterm_regex = compile('[^a-zA-Z-/\\']')\n\ndef tokenize(content, stoplist=None):\n \"\"\"\n Converts a list of words into terms based on a set of rules/\n\n We decided to keep words containing either hyphens, forward-slashes or apostrophes\n in our earlier regex because they themselves are composed of multiple words.\n We now use another another regex that matches word groups to extract the words.\n\n The reasoning behind these decisions is to maintain the structure of the words.\n This is so the index reflects what a typical user expects. For example in a search\n function where the user wants to find occurrences of \"campus\", it is preferable to\n recognize \"on-campus\" as a legitimate source of \"campus\".\n\n :param content: a string of text\n :return terms: a list of words after tokenization\n \"\"\"\n words = term_regex.sub(' ', content).lower().split()\n terms = []\n for word in words:\n if word.isalpha():\n terms.append(word)\n else:\n terms.extend(split('\\W+', word))\n if stoplist:\n terms = [t for t in terms if t not in stoplist]\n return terms\n\n\nclass Document:\n\n def __init__(self, id, docno, terms):\n \"\"\"\n Contains the identifiers and terms found within a document.\n\n Includes the length of the document as the combined length of each term.\n Using the invariant that the terms only contains the alphabet,\n we assume that each character is equal to 1 byte in UTF-8 encoding.\n Hence, the length of a document is the number of indexed bytes.\n\n :param id: an incremental identifier assigned to each document\n :param docno: an identifier parsed from []\n :param terms: a list of terms in order of discovery\n \"\"\"\n self.id = id\n self.docno = docno\n self.terms = terms\n self.length = sum(map(len, terms))\n\n\nclass Collection:\n\n def __init__(self, stoplist=None):\n \"\"\"\n Contains functions to index a collection.\n\n :param sourcefile: a path to a collection\n :param stoplist: an optional path to a stoplist\n \"\"\"\n self.stoplist = None\n if stoplist:\n with open(stoplist, 'r') as f:\n self.stoplist = set(f.read().split('\\n'))\n self.documents = []\n\n def parse_document(self, collection, docno):\n with open(collection, 'r') as f:\n content = ''\n appending = False\n found = False\n for line in f:\n if not found:\n if line.startswith('') and docno == line[8:-10]:\n found = True\n else:\n if line == '\\n' or line == '\\n':\n appending = True\n elif line == '\\n' or line == '\\n':\n appending = False\n elif appending and not line.startswith(('<', '\\n':\n terms = tokenize(content, self.stoplist)\n self.documents.append(Document(len(self.documents), docno, terms))\n return True\n return False\n\n def parse_collection(self, collection):\n \"\"\"\n Iterates the one line at time to store important data.\n\n > begin if line equals \"\" or \"\"\n > stop if line equals \"\" or \"\"\n > appends the line to if and not a tag ELSE\n > assigns if it encounters a tag ELSE\n > resets if line equals \"\"\n > tokenizes if line equals \"\" and increments \n\n :param collection: a path to a \n \"\"\"\n with open(collection, 'r') as f:\n id = 0\n docno = ''\n content = ''\n appending = False\n for line in f:\n if line == '\\n' or line == '\\n':\n appending = True\n elif line == '\\n' or line == '\\n':\n appending = False\n elif appending and not line.startswith(('<', ''):\n docno = line[8:-10]\n elif line == '\\n':\n content = ''\n elif line == '\\n':\n id += 1\n terms = self.tokenize_terms(content, self.stoplist)\n self.documents.append(Document(id, docno, terms))\n\n def write_map_to_disk(self):\n \"\"\"\n Writes a file to the current working directory.\n\n The file is line-separated, where each line consists of ' '.\n The is calculated as the value of K in BM25's scoring function.\n \"\"\"\n al = sum(map(lambda d: d.length, self.documents)) / len(self.documents)\n ranker = BM25()\n with open('map', 'w') as f:\n for document in self.documents:\n f.write(str(document.id) + ' ' + document.docno + ' ' + str(ranker.document_weight(document.length, al)) + '\\n')\n\n def create_postings(self):\n \"\"\"\n Builds a dictionary for the term occurrence statistics of the Collection.\n\n :return postings: a dictionary with \"term\" as a key and a list of Document \"id\" as a value\n \"\"\"\n postings = {}\n if self.documents:\n for document in self.documents:\n for term in document.terms:\n if term in postings:\n postings[term].append(document.id)\n else:\n postings[term] = [document.id]\n return postings\n\n def write_invlists_lexicon_to_disk(self):\n \"\"\"\n Writes an file and file to the current working directory.\n\n is a binary integer file (32-bit) composed of sequential inverted lists.\n Each inverted list is preceded by a document frequency integer.\n This is followed by an number of \" \" pairs equal to the document frequency.\n is a key-value-pair where every \"key\" is \"term\" that is assigned a byte-offset from .\n The order of terms/documents in and are unordered because is unordered.\n \"\"\"\n postings = self.create_postings()\n with open('invlists', 'wb') as invlists_file, open('lexicon', 'w') as lexicon_file:\n for term in postings.keys():\n lexicon_file.write(term + ' ' + str(invlists_file.tell()) + '\\n') # gets the position of the file\n term_occurrences = Counter(postings[term]) # tallies term occurrences by document \n document_frequency = len(term_occurrences.keys())\n invlists_file.write(str(pack('I', document_frequency)))\n for document_id in term_occurrences.keys():\n invlists_file.write(pack('II', document_id, term_occurrences[document_id]))\n","repo_name":"pcaenngtaeera/Information-Retrieval","sub_path":"Automatic Query Expansion/collection.py","file_name":"collection.py","file_ext":"py","file_size_in_byte":7417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"75110496246","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('elcid', '0017_merge'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='antimicrobial',\n old_name='no_antimicriobials',\n new_name='no_antimicrobials',\n ),\n ]\n","repo_name":"openhealthcare/elcid","sub_path":"elcid/migrations/0018_auto_20151026_1205.py","file_name":"0018_auto_20151026_1205.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"76"} +{"seq_id":"158838321","text":"from support import rand_list\n\n\n# Definition for a point\nclass Point:\n def __init__(self, a=0, b=0):\n self.x = a\n self.y = b\n \n def __str__(self):\n return \"(%s, %s)\" % (self.x, self.y)\n \n __repr__ = __str__ \n \n\nclass Solution:\n # @param points, a list of Points\n # @return an integer\n def maxPoints(self, points):\n if not points:\n return 0\n \n size = len(points)\n if size < 3:\n return size \n \n def calc_slope(p1, p2):\n x, y = (p2.x - p1.x), (p2.y - p1.y)\n if x == 0:\n return None\n else:\n return (float)(p2.y - p1.y) / (p2.x - p1.x)\n \n max_points = 0\n \n for i in xrange(size):\n p1 = points[i]\n mapping = {\"same point\": 1}\n for j in xrange(i+1, size):\n p2 = points[j]\n \n if p2.x == p1.x and p2.y == p1.y:\n mapping[\"same point\"] += 1\n else:\n slope = calc_slope(p1, p2)\n \n if slope not in mapping:\n mapping[slope] = 2\n else:\n mapping[slope] += 1\n \n same_count = mapping.pop(\"same point\")\n max_points = max(max_points, same_count)\n for s in mapping:\n max_points = max(max_points, mapping[s] + same_count - 1)\n \n return max_points\n \n \ndef test_main(): \n sol = Solution()\n \n for i in xrange(10):\n \n numbers = rand_list(i*2) \n numbers2 = rand_list(i*2)\n \n #input = [(0,-12),(5,2),(2,5),(0,-5),(1,5),(2,-2),(5,-4),\n #(3,4),(-2,4),(-1,4),(0,-5),(0,-8),(-2,-1),(0,-11),(0,-9)]\n #input = [(3,7),(3,1),(5,7),(4,1)]\n #input = [(1,1), (2,2), (3,3), (5,4), (0, 0), (0, 0)]\n #input = [(0,0),(1,1),(0,0)]\n #input = [(0,0),(0,0)]\n #input = [(1,1),(1,1),(1,1)]\n #input = [(1,1),(1,1),(2,3)]\n #numbers = [x for x,y in input]\n #numbers2 = [y for x,y in input]\n \n #numbers = numbers2 = range(i)\n \n points = [] \n for i in xrange(len(numbers)):\n points.append(Point(numbers[i], numbers2[i]))\n \n print(\"maxPoints in %s\" % points)\n \n print(sol.maxPoints(points))\n \n \nif __name__ == \"__main__\":\n test_main()\n \n","repo_name":"deepgully/codes","sub_path":"leetcode/python/max-points-on-a-line.py","file_name":"max-points-on-a-line.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"74702489525","text":"import os\nimport pickle\nimport random\nimport absl.flags\nimport absl.app\n\nfrom tqdm import tqdm\nimport torch\nimport torchvision\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom src import segmentations\nfrom src import settings\nfrom src import model_utils\nfrom src import mask_utils\nfrom src import activation_utils\nfrom src import utils\nfrom src import constants as C\nfrom src import metrics\nfrom src import formula as F\n\n# user flags\nabsl.flags.DEFINE_string(\n \"subset\", \"ade20k\", \"subset to use. Values:[ade20k, pascal]\"\n)\nabsl.flags.DEFINE_string(\n \"model\",\n \"resnet18\",\n \"model to use. Values:[resnet18, alexnet, resnet50, vgg16, densenet161]\",\n)\nabsl.flags.DEFINE_string(\n \"pretrained\",\n \"places365\",\n \"whether to use pretrained weights. Values [imagenet, places365, None]\",\n)\nabsl.flags.DEFINE_string(\"device\", \"cuda\", \"device to use to store the model\")\nabsl.flags.DEFINE_integer(\"length\", 3, \"length of explanations\")\nabsl.flags.DEFINE_integer(\"num_clusters\", 5, \"number of clusters\")\nabsl.flags.DEFINE_integer(\"top_k\", 5, \"top k samples\")\nabsl.flags.DEFINE_integer(\"random_units\", 100, \"number of units\")\nabsl.flags.DEFINE_string(\n \"root_models\", \"data/model/\", \"root directory for models\"\n)\nabsl.flags.DEFINE_string(\n \"root_datasets\", \"data/dataset/\", \"root directory for datasets\"\n)\nabsl.flags.DEFINE_string(\n \"root_segmentations\",\n \"data/cache/segmentations/\",\n \"root directory for segmentations\",\n)\nabsl.flags.DEFINE_string(\n \"root_activations\",\n \"data/cache/activations/\",\n \"root directory for activations\",\n)\nabsl.flags.DEFINE_string(\n \"root_results\", \"data/results/\", \"root directory for results\"\n)\nabsl.flags.DEFINE_string(\n \"dir_images\", \"figures/units/\", \"Where to save the images\"\n)\nabsl.flags.DEFINE_integer(\"seed\", 0, \"seed to use to set reproducibility\")\nFLAGS = absl.flags.FLAGS\n\n\ndef show(imgs, labels=None):\n \"\"\"Show images in a grid\"\"\"\n if not isinstance(imgs, list):\n imgs = [imgs]\n fig, axs = plt.subplots(\n nrows=len(imgs), squeeze=False,\n gridspec_kw={'wspace': 0, 'hspace': 0.5})\n for i, img in enumerate(imgs):\n img = img.detach()\n img = torchvision.transforms.functional.to_pil_image(img)\n axs[i, 0].imshow(np.asarray(img))\n axs[i, 0].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])\n if labels is not None:\n axs[i, 0].set_title(labels[i])\n return fig\n\n\ndef main(argv):\n if FLAGS.num_clusters < 1:\n raise ValueError(\"num_clusters must be greater than 0\")\n # Set seed\n generator = utils.set_seed(FLAGS.seed)\n\n # Parameters\n cfg = settings.Settings(\n subset=FLAGS.subset,\n model=FLAGS.model,\n pretrained=FLAGS.pretrained,\n num_clusters=FLAGS.num_clusters,\n beam_limit=5,\n device=FLAGS.device,\n root_models=FLAGS.root_models,\n root_datasets=FLAGS.root_datasets,\n root_segmentations=FLAGS.root_segmentations,\n root_activations=FLAGS.root_activations,\n root_results=FLAGS.root_results,\n )\n sparse_segmentation_directory = cfg.get_segmentation_directory()\n mask_shape = cfg.get_mask_shape()\n\n # Load data\n dataset = segmentations.BrodenDataset(\n cfg.dir_datasets,\n subset=cfg.index_subset,\n resolution=cfg.get_img_size(),\n broden_version=1,\n transform_image=torchvision.transforms.Compose(\n [\n torchvision.transforms.Resize(cfg.get_img_size()),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n cfg.get_image_mean(), cfg.get_image_stdev()\n ),\n ]\n ),\n )\n\n # Load data without normalization\n image_dataset = segmentations.BrodenDataset(\n cfg.dir_datasets,\n subset=cfg.index_subset,\n resolution=cfg.get_img_size(),\n broden_version=1,\n transform_image=torchvision.transforms.Compose([\n torchvision.transforms.Resize(112)])\n )\n segmentation_loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=C.BATCH_SIZE,\n worker_init_fn=utils.seed_worker,\n generator=generator,\n )\n\n # Load Model\n model = model_utils.load_model_from_settings(cfg, device=cfg.device)\n\n # Load Masks\n masks = mask_utils.get_masks(\n sparse_segmentation_directory, segmentation_loader, dataset.labels,\n cfg.device\n )\n\n # Loop over all the selected layers\n for _, layer_name in enumerate(cfg.get_feature_names()):\n # Get the number of units in the layer\n num_units = model_utils.get_number_of_units(model, layer_name, cfg)\n\n # Get activations\n activations = model_utils.get_layer_activations(\n segmentation_loader,\n model,\n layer_name,\n range(num_units),\n cfg.get_activation_directory(),\n )\n\n # Select units\n if FLAGS.random_units == 0:\n selected_units = range(num_units)\n else:\n selected_units = random.sample(\n range(0, num_units), FLAGS.random_units\n )\n\n # Select 20 random units\n with tqdm(\n selected_units,\n desc=\"Computing Compostional explanations per unit\") as pbar:\n for unit in pbar:\n # Get unit activations\n unit_activations = activations[unit]\n activation_ranges = activation_utils.compute_activation_ranges(\n unit_activations, FLAGS.num_clusters)\n # Loop over all the activation ranges\n images_list = []\n labels_list = []\n for _, activation_range in enumerate(\n sorted(activation_ranges)\n ):\n bitmaps = activation_utils.compute_bitmaps(\n unit_activations,\n activation_range,\n mask_shape=mask_shape,\n )\n bitmaps = bitmaps.to(cfg.device)\n\n dir_current_results = (\n f\"{cfg.get_results_directory()}/\"\n + f\"{layer_name}/{unit}/{activation_range}\"\n )\n file_algo_results = (\n f\"{dir_current_results}/\" + f\"{FLAGS.length}.pickle\"\n )\n\n # Load results\n with open(file_algo_results, \"rb\") as file:\n best_label, best_iou, _ = pickle.load(file)\n\n # Filter the top k candidates samples that\n label_mask = mask_utils.get_formula_mask(\n best_label, masks).to(FLAGS.device)\n # - contain the concept\n samples_formula = label_mask.sum(1) > 0\n # - have the neuron firing\n neuron_fires = bitmaps.sum(1) > 0\n # - have a high iou\n samples_iou = metrics.sample_iou(bitmaps, label_mask)\n above_iou = samples_iou > best_iou\n candidates = neuron_fires & samples_formula & above_iou\n nonzero = torch.nonzero(candidates).flatten()\n top_k = random.sample(nonzero.tolist(), FLAGS.top_k)\n\n # Plot the top k samples\n \n if not os.path.exists(FLAGS.dir_images):\n os.makedirs(FLAGS.dir_images)\n images = []\n for index_sample in top_k:\n data, _, _ = image_dataset[index_sample]\n image = torch.from_numpy(np.array(data))\n image = image.permute(2, 0, 1)\n mask_concept = ~bitmaps[index_sample]\n mask_concept = mask_concept.reshape(\n mask_shape[0], mask_shape[1])\n segmented_image = torchvision.utils.draw_segmentation_masks(\n image, mask_concept, alpha=1, colors='black')\n images.append(segmented_image)\n images_list.append(\n torchvision.utils.make_grid(\n images, padding=2, pad_value=255)\n )\n title = f\"{F.get_formula_str(best_label, dataset.labels)}\"\n labels_list.append(title)\n fig = show(images_list, labels_list)\n fig.set_size_inches(6, 7)\n fig.savefig(\n f'{FLAGS.dir_images}/' +\n f'unit_{unit}_c_{FLAGS.num_clusters}.png')\n pbar.update(0)\n\n\nif __name__ == \"__main__\":\n absl.app.run(main)\n","repo_name":"KRLGroup/Clustered-Compositional-Explanations","sub_path":"scripts/generate_images.py","file_name":"generate_images.py","file_ext":"py","file_size_in_byte":8912,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"31358192763","text":"\"\"\"\nThis Program controls an RGB LED through PWM\nIan Samuel Valdovinos Granados\n\"\"\"\n\nimport RPi.GPIO as GPIO\nimport time\n\nled_1_pin = 32\n\n\n# Set pin and board modes\nGPIO.setmode(GPIO.BOARD)\nGPIO.setwarnings(False)\nGPIO.setup(led_1_pin, GPIO.OUT)\n\n# Setup LED pins as PWM output pins \npwm_1 = GPIO.PWM(led_1_pin, 50)\npwm_1.start(0)\n\n\nwhile True:\n for i in range(0, 50):\n pwm_1.ChangeDutyCycle(i)\n time.sleep(0.1)\n\n for i in range(50, 0, -1):\n pwm_1.ChangeDutyCycle(i)\n time.sleep(0.1)\n","repo_name":"IanValdovinos/RaspberryPi","sub_path":"RGP_LED_PWM.py","file_name":"RGP_LED_PWM.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"74067259766","text":"class Solution:\n # TLE Solution, using DFS:\n # def shortestPathBinaryMatrix(self, grid: List[List[int]]) -> int:\n # if grid[0][0] == 1:\n # return -1\n #\n # dirs = [(1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1), (0, -1), (1, -1)]\n # n = len(grid)\n #\n # def dfs(r: int, c: int, curr: int, visited: set) -> int:\n # if not (0 <= r < n and 0 <= c < n):\n # return inf\n #\n # if r == n - 1 and c == n - 1 and grid[r][c] == 0:\n # return curr\n #\n # if grid[r][c] == 1 or (r, c) in visited:\n # return inf\n #\n # v = visited.copy()\n # v.add((r, c))\n # val = inf\n #\n # for dr, dc in dirs:\n # val = min(val, dfs(r + dr, c + dc, curr + 1, v))\n #\n # return val\n #\n # Min = dfs(0, 0, 1, set())\n # if Min == inf:\n # return -1\n #\n # return Min\n\n # BFS Solution:\n def shortestPathBinaryMatrix(self, grid: List[List[int]]) -> int:\n n = len(grid)\n\n if grid[0][0] != 0:\n return -1\n\n if n == 1:\n return 1 if grid[0][0] == 0 else -1\n\n dirs = [(1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1), (0, -1), (1, -1)]\n\n visited = set()\n visited.add((0, 0))\n q = deque([(0, 0, 1)])\n\n while len(q) > 0:\n r, c, d = q.popleft()\n for dr, dc in dirs:\n new_r, new_c = r + dr, c + dc\n if not (0 <= new_r < n and 0 <= new_c < n):\n continue\n\n if (new_r, new_c) not in visited and grid[new_r][new_c] == 0:\n if new_r == n - 1 and new_c == n - 1:\n return d + 1\n\n visited.add((new_r, new_c))\n q.append((new_r, new_c, d + 1))\n\n return -1\n","repo_name":"Lei-Tin/Leetcode","sub_path":"Medium/#1091 shortestPathBinaryMatrix.py","file_name":"#1091 shortestPathBinaryMatrix.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"40546605917","text":"# Programme de saisir des notes, affiche la moyenne, la note Max et Min\n#Prend fin si le user entre un valeur negative\n\nnote= 0\nliste_note= []\nnote_max= 0\nnote_min= 20\nmoyen= 0\nnbre_note= 0\nsom= 0\n\n# Rempli notre liste avec les notes\nwhile(note>=0) :\n note= int(input(\"Entrez une note positive de 0 a 20 : \"))# demande d une note\n if note<0 : \n print(\"Termine\") # arret si la note est negative\n \n else:\n liste_note.append(note) # mise a jour de note liste\n\nif not liste_note: # cas ou la liste est vide\n print(\"Impossible d effectuer des calculs sur un tableau vide \")\nelse:\n\n # Calcule la note maximale et minimale\n nbre_note= len(liste_note)\n i= 0\n while inote_max) :\n note_max= liste_note[i] # on trouve la note maximale\n if(liste_note[i] 0:\r\n context['totalPercentage'] = sum(totalList) / context['allCap'] * 100\r\n else:\r\n context['totalPercentage'] = 0\r\n context['capData'] = capsContext\r\n\r\n vals = list(categoricalSpend.values())\r\n context['mostExpCat'] = context['capData'][vals.index(max(vals))]\r\n return render(request, 'transactions/caps.html', context)\r\n else:\r\n return render(validateID(request, accountID)[0], validateID(request, accountID)[1],\r\n validateID(request, accountID)[2])\r\n\r\n\r\n# noinspection PySimplifyBooleanCheck\r\n@login_required\r\ndef transactions(request):\r\n request.session.set_expiry(600)\r\n if getAccount(request) == \"All\":\r\n return redirect('summary')\r\n\r\n accountID = getAccount(request)\r\n\r\n # update categorical caps if necessary\r\n updateCaps(request)\r\n\r\n if validateID(request, accountID) == True:\r\n context, rows = makeAggContext(request, accountID), getRows(request, accountID)\r\n\r\n # gets date range selected by user, parses it and then updates transactions+details displayed\r\n if request.method == \"POST\" and 'submit' in request.POST:\r\n if request.POST['submit'] == \"Enter\":\r\n request.user.profile.setDateRange(request.POST.get('datetimes'))\r\n if request.POST['submit'] == \"Clear\":\r\n request.user.profile.setUseDateFilter(\"0\")\r\n\r\n if request.user.profile.useDateFilter == \"1\":\r\n rawDates = request.user.profile.getDateRange().split(\"-\")\r\n startDate, endDate = rawDates[0], rawDates[1]\r\n rows = getFilteredRows(rows, startDate, endDate)\r\n context['dateIndicator'] = \"Transactions between \" + str(startDate) + \" - \" + str(endDate)\r\n else:\r\n context['dateIndicator'] = \"All transactions\"\r\n context['rows'] = rows\r\n return render(request, 'transactions/transactions.html',\r\n updateContext(context, rows, request, accountID, False))\r\n else:\r\n return render(validateID(request, accountID)[0], validateID(request, accountID)[1],\r\n validateID(request, accountID)[2])\r\n\r\n\r\n@login_required\r\ndef profile(request):\r\n request.session.set_expiry(600)\r\n if request.method == 'POST':\r\n uForm = UserUpdateForm(request.POST, request.FILES, instance=request.user)\r\n pForm = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)\r\n if uForm.is_valid() and pForm.is_valid():\r\n newAccountID = pForm.cleaned_data.get('accountID')\r\n request.user.profile.addToAccountList(newAccountID)\r\n\r\n # getDataForAccount(newAccountID)\r\n uForm.save()\r\n pForm.save()\r\n messages.success(request, f'Account successfully updated')\r\n return redirect('profile')\r\n # return statement in line above is to prevent user from falling to line below\r\n # phenomenon called 'get-redirect pattern'- when u reload browser after submitting data\r\n # post request will be duplicated.\r\n else:\r\n uForm = UserUpdateForm(instance=request.user)\r\n pForm = ProfileUpdateForm(instance=request.user.profile)\r\n context = {\r\n 'uForm': uForm,\r\n 'pForm': pForm,\r\n 'accountIDs': getAccountIDsFromModel(request.user.profile)\r\n }\r\n return render(request, \"transactions/profile.html\", context)\r\n\r\n\r\n@login_required\r\ndef report(request):\r\n request.session.set_expiry(600)\r\n return render(request, 'transactions/report.html')\r\n\r\n\r\n@login_required\r\ndef helpPage(request):\r\n request.session.set_expiry(600)\r\n if request.method == \"POST\":\r\n form = ContactForm(request.POST)\r\n if form.is_valid:\r\n form.save()\r\n messages.success(request, f'Message sent!')\r\n send_mail(form.cleaned_data.get('subject'),\r\n form.cleaned_data.get('message') + \"\\n\\n Reply to: \" + form.cleaned_data.get('email'),\r\n 'pwresetst45@gmail.com', ['pwresetst45@gmail.com'])\r\n return redirect('home')\r\n else:\r\n form = ContactForm()\r\n context = {\r\n 'form': form\r\n }\r\n return render(request, 'transactions/help.html', context)\r\n\r\n\r\n@login_required\r\ndef delete(request):\r\n request.session.set_expiry(600)\r\n if request.method == \"POST\":\r\n idToRemove = request.POST.get('accountDropdown')\r\n if idToRemove == \"All\":\r\n request.user.profile.clearAccountList()\r\n if idToRemove == \"AllCurr\":\r\n request.user.profile.clearCurrAccounts()\r\n if idToRemove == \"AllCC\":\r\n request.user.profile.clearCCAccounts()\r\n elif idToRemove in getAccountIDsFromModel(request.user.profile):\r\n request.user.profile.deleteAccount(idToRemove)\r\n\r\n accountList = getAccountIDsFromModel(request.user.profile)\r\n if len(accountList) > 0:\r\n request.user.profile.setAccountID(accountList[0])\r\n else:\r\n request.user.profile.setAccountID(\"None\")\r\n return redirect('profile')\r\n","repo_name":"RaghibMrz/webapp-testing","sub_path":"transactions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9894,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"1075738340","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 22 10:02:16 2021\n\n@author: andrea\n\"\"\"\n\nfrom ScopeFoundry import BaseMicroscopeApp\n\nclass HexSimAnalysisApp(BaseMicroscopeApp):\n\n # this is the name of the microscope that ScopeFoundry uses \n # when storing data\n name = 'hex_sim_app'\n \n # You must define a setup function that adds all the \n #capablities of the microscope and sets default settings\n def setup(self):\n \n from HexSimAnalyser_measurement import HexSimAnalysis\n self.add_measurement(HexSimAnalysis)\n \n \n # show ui\n self.ui.show()\n self.ui.activateWindow()\n\n\nif __name__ == '__main__':\n import sys\n \n app = HexSimAnalysisApp(sys.argv)\n app.settings_load_ini(\".\\\\Settings\\\\HexSIM_Analysis.ini\")\n \n sys.exit(app.exec_())","repo_name":"micropolimi/HexSimAnalyser","sub_path":"HexSimAnalyser_App.py","file_name":"HexSimAnalyser_App.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"33063631165","text":"import sys\nsys.stdin = open('input.txt', 'r')\n\nT = int(input())\n\nfor tc in range(1, T+1):\n B = int(input())\n B_list = []\n\n\n for i in range(1, B*B+1):\n B_list.append(i)\n\n list_B = []\n\n for j in range(1, tc):\n\n\n for k in range(1, tc+1):\n tmp = []\n for q in range(1 + tc*(k-1), (k * tc) + 1):\n tmp.append(q)\n list_B.append(tmp)\n print(list_B)\n\n cnt = tc * tc # 채워줄 숫자\n # deltas : 우 하 좌 상 B/C 달팽이 순회 순서\n dy = [0, 1, 0, -1]\n dx = [1, 0, -1, 0]\n\n x = 0 # 가로 좌표\n y = 0 # 세로 좌표\n d = 0 # 델타 인덱스\n num = 1 # 증가하는 숫자 (배열을 채울 숫자)\n\n while num <= cnt: # 총 tc * tc 개의 숫자를 채워야함\n\n # 벽 이거나, 채울 수 없으면 방향 전환\n\n if 0 <= x < tc and 0 <= y < tc and not arr[x][y]: # 시작점이 [0,0] 일때만 출발하라.\n arr[x][y] = num\n num += 1\n\n else: # 범위를 벗어남\n x -= dx[d]\n y -= dy[d]\n d = (d + 1) % 4 # 4 이내에서 수가 반복해야 하므로 % 4\n\n x += dx[d]\n y += dy[d]\n\n for row in arr:\n print(row)\n\n","repo_name":"HYUNJUN-KANG/TIL","sub_path":"알고리즘/알고리즘/강의내용/달팽이/prob_1954.py","file_name":"prob_1954.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"10689066593","text":"from .db import db, environment, SCHEMA, add_prefix_for_prod\n\nclass Cart(db.Model):\n __tablename__ = 'carts'\n\n if environment == \"production\":\n __table_args__ = {'schema': SCHEMA}\n\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey(add_prefix_for_prod(\"users.id\")), nullable=False)\n item_id = db.Column(db.Integer, db.ForeignKey(add_prefix_for_prod(\"items.id\")), nullable=False)\n count = db.Column(db.Integer, nullable=False)\n purchased = db.Column(db.Boolean, nullable=False, default=False, index=True)\n purchased_at = db.Column(db.DateTime, default=None, index=True)\n\n user = db.relationship(\"User\", back_populates=\"cart\")\n item = db.relationship(\"Item\", back_populates=\"cart\")\n\n @property\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'item_id': self.item_id,\n 'count': self.count,\n 'purchased': self.purchased,\n 'item': {\n 'name': self.item.name,\n 'image': self.item.image,\n 'price': self.item.price,\n 'discount': self.item.discount\n },\n 'purchased_at': self.purchased_at\n }\n","repo_name":"JonathanSCarter/Nile","sub_path":"app/models/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2010368153","text":"#!/usr/bin/python\nimport gi\ngi.require_version(\"Gst\",'1.0')\nfrom gi.repository import Gst, GObject,Gtk\n\nclient_id = 'enter_cliend_id'\nstream_url = 'https://soundcloud.com/girlsgeneration_smtown/girls-generation-everyday-love'\nmp3_stream_url = 'http://www.richardfarrar.com/audio/right.mp3'\ntrack_stream_url = 'https://api.soundcloud.com/tracks/134204364/stream?client_id=' + client_id\n\ndef play_stream(music_stream_uri):\n music_stream_uri = \"/home/pi/Documents/google-hackfair/hackfair-speech/resources/test_dj.mp3\"\n #creates a playbin (plays media form an uri) \n player = Gst.ElementFactory.make(\"playbin\",\"player\")\n print(\"player: \",player, \" track_url: \" + track_stream_url) \n #set the uri\n #player.set_property('uri',\"file://\" + music_stream_uri)\n player.set_property('uri',track_stream_url)\n\n #start playing\n player.set_state(Gst.State.PLAYING)\n\n\nif __name__ == '__main__':\n Gst.init(None)\n play_stream(stream_url)\n\n#wait and let the music play\nraw_input('Press enter to stop playing...')\n","repo_name":"DjangoGirlsSeoul/hackfair-speech","sub_path":"gstream_test_1.py","file_name":"gstream_test_1.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"10706632286","text":"from PyQt4.QtCore import *\r\nfrom PyQt4.QtGui import *\r\nimport MarkovChain\r\n\r\nclass DynamicUser(QDialog):\r\n def __init__(self, parent = None):\r\n super(DynamicUser, self).__init__(parent)\r\n \r\n #Main App\r\n mainLayout = QBoxLayout(QBoxLayout.TopToBottom)\r\n mainLayout.addWidget(self.topSection())\r\n mainLayout.addWidget(self.fileSection())\r\n mainLayout.addWidget(self.dataSection())\r\n self.setLayout(mainLayout)\r\n self.setWindowTitle(\"DynamicUser\")\r\n \r\n #Top Section\r\n def topSection(self):\r\n #Label\r\n nameLabel = QLabel(\"Name:\")\r\n \r\n #LineEdit\r\n self.nameText = QLineEdit()\r\n \r\n #Buttons\r\n self.saveAddButton = QPushButton()\r\n self.saveAddButton.setText(\"Save / Add\")\r\n self.cancelButton = QPushButton()\r\n self.cancelButton.setText(\"Cancel\")\r\n \r\n #Connect objects with signals\r\n self.connect(self.saveAddButton, SIGNAL(\"clicked()\"), self.addExperiment)\r\n \r\n #Group\r\n mainGroup = QGroupBox()\r\n #Layout\r\n mainLayout = QHBoxLayout()\r\n mainLayout.addWidget(nameLabel)\r\n mainLayout.addWidget(self.nameText)\r\n mainLayout.addWidget(self.saveAddButton)\r\n mainLayout.addWidget(self.cancelButton)\r\n mainGroup.setLayout(mainLayout)\r\n return mainGroup\r\n \r\n #File Section\r\n def fileSection(self):\r\n #Labels\r\n fileChooseLabel = QLabel(\"Choose file of probability of presence:\")\r\n timeStepLabel = QLabel(\"Time step:\")\r\n totalTimeLabel = QLabel(\"Total time of simulation:\")\r\n #LineEdit\r\n self.fileChooseText = QLineEdit()\r\n #Buttons\r\n self.fileBrowseButton = QPushButton()\r\n self.fileBrowseButton.setText(\"Browse\")\r\n #SpinBoxes\r\n self.timeStepSpinBox = QSpinBox()\r\n self.timeStepSpinBox.setRange(0, 10000)\r\n self.totalTimeSpinBox = QSpinBox()\r\n self.totalTimeSpinBox.setRange(0, 60)\r\n #ComboBoxes\r\n self.timeStepComboBox = QComboBox()\r\n self.timeStepComboBox.addItems([\"seg\", \"min\", \"hour\", \"day\"]) \r\n self.totalTimeComboBox = QComboBox()\r\n self.totalTimeComboBox.addItems([\"seg\", \"min\", \"hour\", \"day\"])\r\n \r\n #Connect objects with signals\r\n self.connect(self.fileBrowseButton, SIGNAL(\"clicked()\"), self.setProfileFile)\r\n \r\n #Main Group\r\n fileGroup = QGroupBox()\r\n fileGroup.setTitle(\"File\")\r\n \r\n #Layout\r\n fileLayout = QBoxLayout(QBoxLayout.TopToBottom)\r\n topFileLayout = QHBoxLayout()\r\n bottomFileLayout = QHBoxLayout()\r\n \r\n #Top Layout\r\n topFileLayout.addWidget(fileChooseLabel)\r\n topFileLayout.addWidget(self.fileChooseText)\r\n topFileLayout.addWidget(self.fileBrowseButton)\r\n #Bottom Layout\r\n bottomFileLayout.addWidget(timeStepLabel)\r\n bottomFileLayout.addWidget(self.timeStepSpinBox)\r\n bottomFileLayout.addWidget(self.timeStepComboBox)\r\n \r\n bottomFileLayout.addWidget(totalTimeLabel)\r\n bottomFileLayout.addWidget(self.totalTimeSpinBox)\r\n bottomFileLayout.addWidget(self.totalTimeComboBox)\r\n #Adds layouts to main layout\r\n fileLayout.addLayout(topFileLayout)\r\n fileLayout.addLayout(bottomFileLayout)\r\n fileGroup.setLayout(fileLayout)\r\n \r\n return fileGroup\r\n \r\n #Data Section\r\n def dataSection(self):\r\n #Labels\r\n mobilityLabel = QLabel(\"Mobility:\")\r\n numberLabel = QLabel(\"Number of long absences:\")\r\n distributionLabel = QLabel(\"Distribution of duration of long:\")\r\n #ComboBoxes\r\n self.mobilityComboBox = QComboBox()\r\n self.mobilityComboBox.addItems([\"Low\", \"Medium\", \"High\"])\r\n #SpinBoxes\r\n self.numberSpinBox = QSpinBox()\r\n self.numberSpinBox.setRange(0, 1000)\r\n #LineEdit\r\n self.distributionLineEdit = QLineEdit()\r\n #Button\r\n self.distributionButton = QPushButton()\r\n self.distributionButton.setText(\"Browse\")\r\n \r\n #Group\r\n dataGroup = QGroupBox()\r\n dataGroup.setTitle(\"Data\")\r\n dataLayout = QBoxLayout(QBoxLayout.TopToBottom)\r\n \r\n topLayout = QBoxLayout(QBoxLayout.LeftToRight)\r\n middleLayout = QBoxLayout(QBoxLayout.LeftToRight)\r\n bottomLayout = QBoxLayout(QBoxLayout.LeftToRight)\r\n #Top layout\r\n topLayout.addWidget(mobilityLabel)\r\n topLayout.addWidget(self.mobilityComboBox)\r\n #Middle Layout\r\n middleLayout.addWidget(numberLabel)\r\n middleLayout.addWidget(self.numberSpinBox)\r\n #Bottom Layout\r\n bottomLayout.addWidget(distributionLabel)\r\n bottomLayout.addWidget(self.distributionLineEdit)\r\n bottomLayout.addWidget(self.distributionButton)\r\n \r\n #Add Layouts to data Layout\r\n dataLayout.addLayout(topLayout)\r\n dataLayout.addLayout(middleLayout)\r\n dataLayout.addLayout(bottomLayout)\r\n dataGroup.setLayout(dataLayout)\r\n \r\n return dataGroup\r\n \r\n #Catch Browse file signal\r\n def setProfileFile(self):\r\n fileName = QFileDialog.getOpenFileName(self, \"Select Profile File\")\r\n if fileName == '':\r\n self.alertDialog(\"Error\", \"You must select a file\")\r\n else:\r\n markov = MarkovChain.MarkovChain()\r\n self.pares = markov.Create_List_Pairs(fileName)\r\n self.fileChooseText.setText(fileName)\r\n \r\n #Add a new experiment\r\n def addExperiment(self):\r\n if self.nameText.text() == \"\":\r\n self.alertDialog(\"Error\", \"You must enter a experiment name\");\r\n else:\r\n if self.fileChooseText.text() != \"\":\r\n timeStep = self.timeStepSpinBox.value()\r\n timeStepUnits = self.timeStepComboBox.currentText()\r\n totalTime = self.totalTimeSpinBox.value()\r\n totalTimeUnits = self.totalTimeComboBox.currentText()\r\n fileName = self.fileChooseText.text();\r\n experimentName = self.nameText.text();\r\n resultLine = \"File: %s\\nExp Name: %s\\n TimeStep: %i %s \\n TotalTime: %i %s \\n %s\" \\\r\n % (fileName, experimentName, timeStep, timeStepUnits, totalTime, totalTimeUnits, self.pares)\r\n self.alertDialog(\"Result\", resultLine)\r\n else:\r\n self.setProfileFile()\r\n \r\n #Open a new dialog box, for testing purposes\r\n def alertDialog(self, title, text):\r\n dialog = QDialog()\r\n dialog.setWindowTitle(title)\r\n result = QLabel()\r\n result.setText(text)\r\n layout = QBoxLayout(QBoxLayout.LeftToRight)\r\n layout.addWidget(result)\r\n dialog.setLayout(layout)\r\n dialog.show()\r\n dialog.exec_()\r\n\r\n#Launch the App \r\n#if __name__ == \"__main__\":\r\n# app = QApplication(sys.argv)\r\n# form = DynamicUser()\r\n# form.show()\r\n# app.exec_()\r\n \r\n ","repo_name":"sbarrat/proyect","sub_path":"DynamicUser.py","file_name":"DynamicUser.py","file_ext":"py","file_size_in_byte":7069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42777716266","text":"import unittest\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom unittest.mock import MagicMock\nfrom unittest.mock import Mock\nfrom unittest.mock import patch\nfrom tap_google_ads.streams import ReportStream\nfrom tap_google_ads.streams import make_request\nimport singer\nimport pytz\n\nresource_schema = {\n \"accessible_bidding_strategy\": {\n \"fields\": {}\n },\n\n}\n\nclass TestEndDate(unittest.TestCase):\n\n def get_queries_from_sync(self, fake_make_request):\n all_queries_requested = []\n for request_sent in fake_make_request.call_args_list:\n # The function signature is gas, query, customer_id, config\n _, query, _, _ = request_sent.args\n all_queries_requested.append(query)\n return all_queries_requested\n\n def run_sync(self, start_date, end_date, fake_make_request):\n\n # Create the stream so we can call sync\n my_report_stream = ReportStream(\n fields=[],\n google_ads_resource_names=['accessible_bidding_strategy'],\n resource_schema=resource_schema,\n primary_keys=['foo']\n )\n\n # Create a config that maybe has an end date\n config = {\"start_date\": str(start_date),}\n\n # If end_date exists, write it to the config\n if end_date:\n config[\"end_date\"] = str(end_date)\n\n my_report_stream.sync(\n Mock(),\n {\"customerId\": \"123\",\n \"loginCustomerId\": \"456\"},\n {\"tap_stream_id\": \"hi\",\n \"stream\": \"hi\",\n \"metadata\": []},\n config,\n {},\n None\n )\n\n @patch('singer.utils.now')\n @patch('tap_google_ads.streams.make_request')\n def test_no_end_date(self, fake_make_request, fake_datetime_now):\n start_date = datetime(2022, 1, 1, 0, 0, 0)\n end_date = datetime(2022, 3, 1, 0, 0, 0)\n\n # Adding tzinfo helped the mock to work and avoids a\n # TypeError(can't subtract offset-naive and offset-aware\n # datetimes) here in the test\n fake_datetime_now.return_value = end_date.replace(tzinfo=pytz.UTC)\n\n # Don't pass in end_date to test the tap's fallback to today\n self.run_sync(start_date, None, fake_make_request)\n all_queries_requested = self.get_queries_from_sync(fake_make_request)\n\n date_delta = end_date - start_date\n\n # Add one to make it inclusive of the end date\n days_between_start_and_end = date_delta.days + 1\n\n # Compute the range of expected days, because end_date will always shift\n expected_days = [\n '2022-01-01', '2022-01-02', '2022-01-03', '2022-01-04',\n '2022-01-05', '2022-01-06', '2022-01-07', '2022-01-08',\n '2022-01-09', '2022-01-10', '2022-01-11', '2022-01-12',\n '2022-01-13', '2022-01-14', '2022-01-15', '2022-01-16',\n '2022-01-17', '2022-01-18', '2022-01-19', '2022-01-20',\n '2022-01-21', '2022-01-22', '2022-01-23', '2022-01-24',\n '2022-01-25', '2022-01-26', '2022-01-27', '2022-01-28',\n '2022-01-29', '2022-01-30', '2022-01-31', '2022-02-01',\n '2022-02-02', '2022-02-03', '2022-02-04', '2022-02-05',\n '2022-02-06', '2022-02-07', '2022-02-08', '2022-02-09',\n '2022-02-10', '2022-02-11', '2022-02-12', '2022-02-13',\n '2022-02-14', '2022-02-15', '2022-02-16', '2022-02-17',\n '2022-02-18', '2022-02-19', '2022-02-20', '2022-02-21',\n '2022-02-22', '2022-02-23', '2022-02-24', '2022-02-25',\n '2022-02-26', '2022-02-27', '2022-02-28', '2022-03-01',\n ]\n\n for day in expected_days:\n self.assertTrue(\n any(\n day in query for query in all_queries_requested\n )\n )\n\n @patch('tap_google_ads.streams.make_request')\n def test_end_date_one_day_after_start(self, fake_make_request):\n start_date = datetime(2022, 3, 5, 0, 0, 0)\n end_date = datetime(2022, 3, 6, 0, 0, 0)\n self.run_sync(start_date, end_date, fake_make_request)\n all_queries_requested = self.get_queries_from_sync(fake_make_request)\n\n expected_days = [\n \"2022-03-05\",\n \"2022-03-06\",\n ]\n\n for day in expected_days:\n self.assertTrue(\n any(\n day in query for query in all_queries_requested\n )\n )\n\n @patch('tap_google_ads.streams.make_request')\n def test_end_date_one_day_before_start(self, fake_make_request):\n start_date = datetime(2022, 3, 6, 0, 0, 0)\n end_date = datetime(2022, 3, 5, 0, 0, 0)\n self.run_sync(start_date, end_date, fake_make_request)\n all_queries_requested = self.get_queries_from_sync(fake_make_request)\n\n # verify no requests are made with an invalid start/end date configuration\n self.assertEqual(len(all_queries_requested), 0)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"singer-io/tap-google-ads","sub_path":"tests/unittests/test_sync.py","file_name":"test_sync.py","file_ext":"py","file_size_in_byte":4995,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"30010806658","text":"from collections import deque\n\ndef errorCheck(num):\n if not isinstance(num, int):\n print(\"not an int\")\n return False\n return True\n \ndef breakUp(num):\n if not errorCheck(num): return None\n result = []\n for c in str(num):\n result.append(int(c))\n return result\n \ndef breakUpMath(num):\n if not errorCheck(num): return None\n result = deque()\n addNegative = False\n if (num < 0):\n addNegative = True\n num = abs(num)\n if (num == 0):\n return [0]\n pwr = 0 \n while (10**pwr <= num):\n pwr += 1\n # num 1024, pwr 2 -> (24 - 4)//10 = 2\n val = (num%(10**pwr) - (num % 10**(pwr-1))) // (10 ** (pwr-1))\n result.appendleft(val)\n # Need to track if negative or positive\n if addNegative: \n result.appendleft(\"-\")\n else:\n result.appendleft(\"+\")\n \n result = list(result)\n print(\"output {}\".format(result))\n return result\n\ndef addNum(l, num):\n l = list(l) #copy value of l in to l so its no longer referencing argument passed in\n if ((num < 0 and l[0] == \"+\") or (num > 0 and l[0] == \"-\")):\n return subNum(l, num)\n bothNegative = False\n if (num < 0 and l[0] < 0):\n bothNegative = True\n digits = breakUpMath(num);\n digits = digits[1:]\n l = l[1:]\n if len(digits) > len(l):\n top = digits\n bottom = l\n else:\n top = l\n bottom = digits\n bottom.reverse()\n top.reverse()\n carryOver = 0\n for i in range(len(top)):\n toAdd = 0\n if(i < len(bottom)):\n toAdd = bottom[i]\n newNum = toAdd + carryOver + top[i]\n if newNum > 9:\n carryOver = 1\n newNum = newNum % 10\n else:\n carryOver = 0\n top[i] = newNum\n if (carryOver == 1):\n top = top + [1]\n top.reverse()\n if(bothNegative):\n top = [\"-\"] + top\n print(\"add: {}\".format(top))\n return top\n\ndef subNum(l, num):\n print(\"balls\")\n\n# Take a number convert it to an array and then add a taget number\n# 123 + 11 => [1,2,3] + 11 => [1,3,4]\n#How do we handle negatives?\nif __name__ == \"__main__\":\n print(\"Num Breakup\")\n breakUp(1234)\n result = breakUpMath(1234)\n breakUpMath(0)\n breakUpMath(1)\n print(912+1234)\n addNum(result, 912)\n \n print(99999+1234)\n print(result)\n addNum(result, -99999)\n \n ","repo_name":"halfpeaw/CodeProblems","sub_path":"round2/NumBreakUp.py","file_name":"NumBreakUp.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"10109791301","text":"\"\"\"\n Pre-train embeddings using gensim w2v implementation (CBOW by default)\n\"\"\"\nimport gensim.models.word2vec as w2v\nimport csv\n\nfrom constants import *\n\nclass ProcessedIter(object):\n\n def __init__(self, Y, filename):\n self.filename = filename\n\n def __iter__(self):\n with open(self.filename) as f:\n r = csv.reader(f)\n next(r)\n for row in r:\n yield (row[3].split())\n\ndef word_embeddings(Y, notes_file, embedding_size, min_count, n_iter):\n modelname = \"processed_%s.w2v\" % (Y)\n sentences = ProcessedIter(Y, notes_file)\n\n model = w2v.Word2Vec(size=embedding_size, min_count=min_count, workers=4, iter=n_iter)\n print(\"building word2vec vocab on %s...\" % (notes_file))\n \n model.build_vocab(sentences)\n print(\"training...\")\n model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)\n out_file = '/'.join(notes_file.split('/')[:-1] + [modelname])\n print(\"writing embeddings to %s\" % (out_file))\n model.save(out_file)\n return out_file\n\n","repo_name":"jamesmullenbach/caml-mimic","sub_path":"dataproc/word_embeddings.py","file_name":"word_embeddings.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":248,"dataset":"github-code","pt":"76"} +{"seq_id":"73232482166","text":"#!/usr/bin/python3\n\"\"\" this module contains the append_write function \"\"\"\n\n\ndef append_write(filename=\"\", text=\"\"):\n \"\"\" appends a string at the end of a text file and\n return the number of lines \"\"\"\n num_lines = 0\n with open(filename, 'a') as fl:\n num_lines = fl.write(text)\n return num_lines\n","repo_name":"I7RANK/holbertonschool-higher_level_programming","sub_path":"0x0B-python-input_output/4-append_write.py","file_name":"4-append_write.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"74971082804","text":"from itertools import cycle\n\n############################\n# Advent of Code 2022 Day 17\n############################\n\ndef get_rocks():\n # in a function rather than a constant so that cycle resets after each call\n return cycle([\n [{'x': 0, 'y': 0}, {'x': 1, 'y': 0}, {'x': 2, 'y': 0}, {'x': 3, 'y': 0}],\n [{'x': 1, 'y': 0}, {'x': 0, 'y': 1}, {'x': 1, 'y': 1}, {'x': 2, 'y': 1}, {'x': 1, 'y': 2}],\n [{'x': 0, 'y': 0}, {'x': 1, 'y': 0}, {'x': 2, 'y': 0}, {'x': 2, 'y': 1}, {'x': 2, 'y': 2}],\n [{'x': 0, 'y': 0}, {'x': 0, 'y': 1}, {'x': 0, 'y': 2}, {'x': 0, 'y': 3}],\n [{'x': 0, 'y': 0}, {'x': 1, 'y': 0}, {'x': 0, 'y': 1}, {'x': 1, 'y': 1}]\n ])\n\n\ndef can_move_horizontal(rock: list[dict], dx: int, ground: set[tuple]):\n for d in rock:\n x, y = d['x'], d['y']\n if not(0 <= x + dx <= 6):\n return False\n if (x + dx, y) in ground:\n return False\n return True\n\n\ndef can_drop(rock: list[dict], ground: set[tuple]):\n for d in rock:\n x, y = d['x'], d['y']\n if (x, y - 1) in ground:\n return False\n return True\n\n\ndef drop(rock: list[dict], ground: set[tuple], jet_queue: cycle):\n while True:\n j = jet_queue.__next__()\n dx = {\"<\": -1, \">\": 1}[j]\n if can_move_horizontal(rock, dx, ground):\n for d in rock:\n d['x'] += dx\n if can_drop(rock, ground):\n for d in rock:\n d['y'] -= 1\n else:\n break\n\n\ndef update_cycles(history: dict[int, int], i: int, cycles: dict[int, tuple[int, int]]):\n for c in range(i):\n cycle_len = c + 1\n if i % cycle_len == 0:\n diff = history[i] - history[i - cycle_len]\n if cycle_len not in cycles:\n cycles[cycle_len] = (diff, 1)\n else:\n (d, n) = cycles[cycle_len]\n if d == diff:\n cycles[cycle_len] = (d, n + 1)\n # need to see same difference for 10 cycles\n if n >= 9:\n return (cycle_len, d)\n else:\n cycles[cycle_len] = (diff, 1)\n return (None, None)\n\n\ndef drop_n_rocks(jet_queue, n):\n rocks = get_rocks()\n height = 0\n ground = set([(i, 0) for i in range(7)])\n history, cycles = {}, {}\n cycle_len, d, return_idx = None, None, None\n\n for i in range(n):\n if i == return_idx:\n return int(height + ((n - i) / cycle_len) * d)\n\n rock = [{'x': d['x'] + 2, 'y': d['y'] + height + 4} for d in rocks.__next__()]\n drop(rock, ground, jet_queue)\n ground.update((d['x'], d['y']) for d in rock)\n height = max(height, max(d['y'] for d in rock))\n\n if return_idx is None:\n history[i] = height\n (cycle_len, d) = update_cycles(history, i, cycles)\n if cycle_len is not None:\n return_idx = i + (n % cycle_len)\n\n return height\n\n\ndef part_1(input):\n jet_queue = cycle(input[0])\n return drop_n_rocks(jet_queue, 2022)\n\ndef part_2(input):\n jet_queue = cycle(input[0])\n return drop_n_rocks(jet_queue, 1000000000000)\n\n\nday = 17\n\n\nwith open(f'day{day}/day{day}_ex.txt') as ex_filename:\n example_input = [r.strip() for r in ex_filename.readlines()]\n print(\"---Example---\")\n print(f'Part 1: {part_1(example_input)}')\n print(f'Part 2: {part_2(example_input)}')\n\nwith open(f'day{day}/day{day}.txt') as filename:\n puzzle_input = [r.strip() for r in filename.readlines()]\n print(\"---Puzzle---\")\n print(f'Part 1: {part_1(puzzle_input)}')\n print(f'Part 2: {part_2(puzzle_input)}')\n","repo_name":"anniebryan/advent-of-code","sub_path":"2022/day17/day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72768462966","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.14.5\n# kernelspec:\n# display_name: Python 3 (ipykernel)\n# language: python\n# name: python3\n# ---\n# ruff: noqa: E402\n# ruff: noqa: E501\n\n# %% [markdown]\n# # Fitting a polynomial with Gaussian priors\n#\n# We fit a simple polynomial with Gaussian priors, which is an example of a Gauss-linear\n# problem for which the results obtained using Subspace Iterative Ensemble Smoother\n# (SIES) tend to those obtained using Ensemble Smoother (ES).\n# This notebook illustrated this property.\n# %%\nimport itertools\n\nimport numpy as np\nimport pandas as pd\n\nnp.set_printoptions(suppress=True)\nrng = np.random.default_rng(12345)\n\nimport matplotlib.pyplot as plt\n\nCOLORS = list(plt.rcParams[\"axes.prop_cycle\"].by_key()[\"color\"])\n\nplt.rcParams[\"figure.figsize\"] = (6, 6)\nplt.rcParams.update({\"font.size\": 10})\nfrom ipywidgets import interact # noqa # isort:skip\nimport ipywidgets as widgets # noqa # isort:skip\nfrom p_tqdm import p_map\n\nimport iterative_ensemble_smoother as ies\n\n# %% [markdown]\n# ## Define synthetic truth and use it to create noisy observations\n\n# %%\nensemble_size = 200\n\n\ndef poly(a, b, c, x):\n return a * x**2 + b * x + c\n\n\n# True patameter values\na_t = 0.5\nb_t = 1.0\nc_t = 3.0\n\nnoise_scale = 0.1\nx_observations = [0, 2, 4, 6, 8]\nobservations = [\n (\n rng.normal(loc=1, scale=noise_scale) * poly(a_t, b_t, c_t, x),\n noise_scale * poly(a_t, b_t, c_t, x),\n x,\n )\n for x in x_observations\n]\n\nd = pd.DataFrame(observations, columns=[\"value\", \"sd\", \"x\"])\nd = d.set_index(\"x\")\nnum_obs = d.shape[0]\n\nfig, ax = plt.subplots(figsize=(7, 4))\nx_plot = np.linspace(0, 10, 2**8)\nax.set_title(\"Truth and noisy observations\")\nax.set_xlabel(\"Time step\")\nax.set_ylabel(\"Response\")\nax.plot(x_plot, poly(a_t, b_t, c_t, x_plot))\nax.plot(d.index.get_level_values(\"x\"), d.value.values, \"o\")\nax.grid()\nfig.tight_layout()\nplt.show()\n\n# %% [markdown]\n# ## Assume diagonal observation error covariance matrix and define perturbed observations\n\n# %%\nR = np.diag(d.sd.values**2)\n\nE = rng.multivariate_normal(mean=np.zeros(len(R)), cov=R, size=ensemble_size).T\nassert E.shape == (num_obs, ensemble_size)\n\nD = d.value.values.reshape(-1, 1) + E\n\n# %% [markdown]\n# ## Define Gaussian priors\n\n# %%\ncoeff_a = rng.standard_normal(size=ensemble_size)\ncoeff_b = rng.standard_normal(size=ensemble_size)\ncoeff_c = rng.standard_normal(size=ensemble_size)\n\nX = np.vstack([coeff_a, coeff_b, coeff_c])\n\n# %% [markdown]\n# ## Run forward model in parallel\n\n# %%\nfwd_runs = p_map(\n poly,\n coeff_a,\n coeff_b,\n coeff_c,\n [np.arange(max(x_observations) + 1)] * ensemble_size,\n desc=\"Running forward model.\",\n)\n\n# %% [markdown]\n# ## Pick responses where we have observations\n\n# %%\nY = np.array(\n [fwd_run[d.index.get_level_values(\"x\").to_list()] for fwd_run in fwd_runs]\n).T\n\nassert Y.shape == (\n num_obs,\n ensemble_size,\n), \"Measured responses must be a matrix with dimensions (number of observations x number of realisations)\"\n\n# %% [markdown]\n# ## Condition on observations to calculate posterior using both `ES` and `SIES`\n\n# %%\nfrom iterative_ensemble_smoother.utils import steplength_exponential\n\nX_ES_ert = X.copy()\nY_ES_ert = Y.copy()\nsmoother_es = ies.SIES(\n parameters=X_ES_ert,\n covariance=d.sd.values**2,\n observations=d.value.values,\n seed=42,\n)\nX_ES_ert = smoother_es.sies_iteration(Y_ES_ert, step_length=1.0)\n\n\nX_IES_ert = X.copy()\nY_IES_ert = Y.copy()\nsmoother_ies = ies.SIES(\n parameters=X_IES_ert,\n covariance=d.sd.values**2,\n observations=d.value.values,\n seed=42,\n)\nn_ies_iter = 7\nfor i in range(n_ies_iter):\n\n step_length = steplength_exponential(i + 1)\n X_IES_ert = smoother_ies.sies_iteration(Y_IES_ert, step_length=step_length)\n\n _coeff_a, _coeff_b, _coeff_c = X_IES_ert\n\n _fwd_runs = p_map(\n poly,\n _coeff_a,\n _coeff_b,\n _coeff_c,\n [np.arange(max(x_observations) + 1)] * ensemble_size,\n desc=f\"SIES ert iteration {i}\",\n )\n\n Y_IES_ert = np.array(\n [fwd_run[d.index.get_level_values(\"x\").to_list()] for fwd_run in _fwd_runs]\n ).T\n\n\n# %% [markdown]\n# ## Plots to compare results\n\n\n# %%\ndef plot_posterior(ax, posterior, method):\n for i, param in enumerate(\"abc\"):\n ax[i].set_title(param)\n ax[i].hist(posterior[i, :], label=f\"{method} posterior\", alpha=0.5, bins=\"fd\")\n ax[i].legend()\n\n fig.tight_layout()\n return ax\n\n\nfig, ax = plt.subplots(nrows=3, figsize=(7, 8))\n\nfor i in range(3):\n ax[i].hist(X[i, :], label=\"prior\", bins=\"fd\")\n\nax[0].axvline(a_t, color=\"k\", linestyle=\"--\", label=\"truth\")\nax[1].axvline(b_t, color=\"k\", linestyle=\"--\", label=\"truth\")\nax[2].axvline(c_t, color=\"k\", linestyle=\"--\", label=\"truth\")\n\nplot_posterior(ax, X_ES_ert, method=\"ES ert\")\n_ = plot_posterior(ax, X_IES_ert, method=f\"SIES ert ({n_ies_iter})\")\n\n# %%\nfig, axes = plt.subplots(1, 3, figsize=(8, 2.75))\naxes = axes.ravel()\nlabels = \"abc\"\ntrue_parameters = [a_t, b_t, c_t]\n\nfig.suptitle(f\"SIES ert ({n_ies_iter}) Posterior distribution\")\nfor k, (i, j) in enumerate(itertools.combinations([0, 1, 2], 2)):\n axes[k].scatter(X[i, :], X[j, :], s=15, alpha=0.6)\n axes[k].scatter(X_ES_ert[i, :], X_ES_ert[j, :], s=15, alpha=0.2)\n axes[k].scatter(X_IES_ert[i, :], X_IES_ert[j, :], s=15, alpha=0.2)\n axes[k].scatter(\n [true_parameters[i]],\n [true_parameters[j]],\n color=\"black\",\n s=100,\n label=\"True value\",\n )\n axes[k].set_xlabel(labels[i])\n axes[k].set_ylabel(labels[j])\n\n\naxes[k].legend()\nfig.tight_layout()\nplt.show()\n\n# %%\nfig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(8, 2.5), sharex=True, sharey=True)\nx_plot = np.linspace(0, 10, 2**8)\n\n# Plot the prior\nax1.set_title(\"Prior\")\nax1.plot(x_plot, poly(a_t, b_t, c_t, x_plot), zorder=10, lw=4, color=\"black\")\nfor parameter_prior in X.T:\n ax1.plot(\n x_plot, poly(*parameter_prior, x_plot), color=COLORS[0], alpha=0.1, zorder=5\n )\n\n# Plot the posterior\nax2.set_title(\"ES ert posterior\")\nax2.plot(x_plot, poly(a_t, b_t, c_t, x_plot), zorder=10, lw=4, color=\"black\")\nfor parameter_posterior in X_ES_ert.T:\n ax2.plot(\n x_plot, poly(*parameter_posterior, x_plot), alpha=0.1, zorder=5, color=COLORS[1]\n )\n\n# Plot the posterior\nax3.set_title(f\"SIES ert ({n_ies_iter}) posterior\")\nax3.plot(x_plot, poly(a_t, b_t, c_t, x_plot), zorder=10, lw=4, color=\"black\")\nfor parameter_posterior in X_IES_ert.T:\n ax3.plot(\n x_plot, poly(*parameter_posterior, x_plot), alpha=0.1, zorder=5, color=COLORS[2]\n )\n\n# Common axes setup\nfor ax in [ax1, ax2, ax3]:\n ax.set_ylim([0, 70])\n ax.set_xlabel(\"Time step\")\n ax.set_ylabel(\"Response\")\n ax.grid(zorder=0)\n\nfig.tight_layout()\nplt.show()\n","repo_name":"equinor/iterative_ensemble_smoother","sub_path":"docs/source/Polynomial.py","file_name":"Polynomial.py","file_ext":"py","file_size_in_byte":6879,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"76"} +{"seq_id":"31080708570","text":"# import torch\n# import uuid\n# from ultralytics import yolo\n# def runModel(img):\n# model = torch.hub.load('ultralytics/yolov5', 'yolov5m')\n# # img = 'https://i.ytimg.com/vi/q71MCWAEfL8/maxresdefault.jpg' # or file, Path, PIL, OpenCV, numpy, list\n# results = model(img)\n# results.print()\n# results.save(save_dir='results')\n\n# runModel('https://i.ytimg.com/vi/q71MCWAEfL8/maxresdefault.jpg')\n\nfrom ultralytics import YOLO\n\n# Load a model\nmodel = YOLO(\"yolov8n.yaml\") # build a new model from scratch\nmodel = YOLO(\"yolov8n.pt\") # load a pretrained model (recommended for training)\n\n# Use the model\nmodel.train(data=\"coco128.yaml\", epochs=3) # train the model\nmetrics = model.val() # evaluate model performance on the validation set\nresults = model(\"https://ultralytics.com/images/bus.jpg%22\") # predict on an image\nsuccess = model.export(format=\"onnx\") # export the model to ONNX format","repo_name":"MoulyAkash/ARIA","sub_path":"ML/OD/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37514612842","text":"import math\nimport descr_fit as df\nimport numpy as np\nimport scipy.optimize as opt\nimport os, sys, itertools\nimport random\nfrom time import sleep\nfrom scipy.stats import poisson, sem\nimport pdb\n\n# Don't duplicate work that's already done in the \"main\" helper_fcns\n\nmaindir = os.path.abspath('../../'); # it's two directories up\nsys.path.insert(0, maindir);\n##############\n### Imports from \"main\" helper_fcns\n##############\n# -- basic things\nfrom helper_fcns import nan_rm, np_smart_load, bw_lin_to_log, bw_log_to_lin, resample_array, random_in_range\nfrom helper_fcns import polar_vec_mean, phase_fit_name, phase_advance, project_resp, get_phAdv_model\nfrom helper_fcns import descrLoss_name, descrMod_name, descrFit_name\nfrom helper_fcns import flatten_list as flatten\n# -- rvc\nfrom helper_fcns import rvc_mod_suff, rvc_fit_name, get_rvc_model\nfrom helper_fcns import naka_rushton, get_rvcResp, rvc_fit\n# -- sf \nfrom helper_fcns import dog_charFreq, dog_get_param, dog_init_params, deriv_gauss, compute_SF_BW, fix_params\nfrom helper_fcns import DiffOfGauss, DoGsach, dog_prefSfMod, dog_charFreqMod, get_xc_from_slope\nfrom helper_fcns import DoG_loss, get_descrResp\nfrom helper_fcns import flexible_Gauss_np as flexible_Gauss\nfrom helper_fcns import descr_prefSf as dog_prefSf # to keep the function call here unchanged from previous version\n\n##############\n### Code written *here*, i.e. just for Sach stuff\n##############\n\n# load_modParams - [UNUSED] load the 4 parameters from the Tony fits...\n\n# unpack_f1arr - to keep back-compatability, unpack f1arr[con][sf] dict-of-dicts into array\n\n# var_explained - compute the variance explained given data/model fit\n\n# dog_fit - used to fit the Diff of Gauss responses -- either separately for each con, or jointly for all cons within a given dispersion\n\n# blankResp - return mean/std of blank responses (i.e. baseline firing rate) for Sach's experiment\n# tabulateResponses - Organizes measured and model responses for Sach's experiment\n\n# writeDataTxt - write [sf mean sem] for a given cell/contrast\n# writeCellTxt - call writeDataTxt for all contrasts for a cell\n\ndef load_modParams(which_cell, contrast, loadPath='/home/pl1465/SF_diversity/LGN/sach/structures/tonyFits/'):\n \n nParams = 4;\n\n loadName = 'cell%d_con%d.txt+.fit' % (which_cell, contrast);\n fits = open(str(loadPath + loadName), 'r');\n allLines = fits.readlines();\n firstLine = allLines[0].split();\n fL = [float(x) for x in firstLine]\n\n return fL[0:nParams]; \n\n#######\n\ndef unpack_f1arr(f1arr):\n assert len(f1arr.keys())>0;\n assert len(f1arr[0].keys())>0;\n return np.array([[f1arr[x][y] for y in f1arr[x].keys()] for x in f1arr.keys()]);\n\ndef var_expl_direct(obs_mean, pred_mean):\n # Just compute variance explained given the data and model responses (assumed same SF for each)\n resp_dist = lambda x, y: np.sum(np.square(x-y))/np.maximum(len(x), len(y))\n var_expl = lambda m, r, rr: 100 * (1 - resp_dist(m, r)/resp_dist(r, rr));\n\n obs_grand_mean = np.mean(obs_mean) * np.ones_like(obs_mean); # make sure it's the same shape as obs_mean\n \n return var_expl(pred_mean, obs_mean, obs_grand_mean);\n\ndef var_explained(data, modParams, whichInd=None, DoGmodel=1, rvcModel=None, whichSfs = None, ref_params=None, ref_rc_val=None, dataAreResps=False):\n ''' given a set of responses and model parameters, compute the variance explained by the model (DoGsach)\n --- whichInd is either the contrast index (if doing SF tuning)\n or SF index (if doing RVCs)\n '''\n resp_dist = lambda x, y: np.sum(np.square(x-y))/np.maximum(len(x), len(y))\n var_expl = lambda m, r, rr: 100 * (1 - resp_dist(m, r)/resp_dist(r, rr));\n\n if dataAreResps:\n obs_mean = data; # we've directly passed in the means of interest\n else:\n respsSummary, stims, allResps = tabulateResponses(data); # Need to fit on f1 \n f1 = respsSummary[1];\n if rvcModel is None: # SF\n all_sfs = stims[1];\n obs_mean = f1['mean'][whichInd, :];\n else:\n all_cons = stims[0];\n obs_mean = f1['mean'][:, whichInd];\n\n if whichSfs is not None:\n all_sfs = whichSfs; # maybe we've passed in the Sfs to use...\n \n if rvcModel is None: # then we're doing vExp for SF tuning\n pred_mean = get_descrResp(modParams, all_sfs, DoGmodel, ref_rc_val=ref_rc_val);\n else: # then we've getting RVC responses!\n pred_mean = get_rvcResp(modParams, cons, rvcMod)\n\n obs_grand_mean = np.mean(obs_mean) * np.ones_like(obs_mean); # make sure it's the same shape as obs_mean\n \n return var_expl(pred_mean, obs_mean, obs_grand_mean);\n\n## - for fitting DoG models\n\ndef dog_fit(resps, all_cons, all_sfs, DoGmodel, loss_type, n_repeats, joint=0, ref_varExpl=None, veThresh=-np.nan, fracSig=1, ftol=2.220446049250313e-09, jointMinCons=3):\n ''' Helper function for fitting descriptive funtions to SF responses\n if joint=True, (and DoGmodel is 1 or 2, i.e. not flexGauss), then we fit assuming\n a fixed ratio for the center-surround gains and [freq/radius]\n - i.e. of the 4 DoG parameters, 2 are fit separately for each contrast, and 2 are fit \n jointly across all contrasts!\n - note that ref_varExpl (optional) will be of the same form that the output for varExpl will be\n - note that jointMinCons is the minimum # of contrasts that must be included for a joint fit to be run (e.g. 2)\n\n inputs: self-explanatory, except for resps, which should be \"f1\" from tabulateResponses \n outputs: bestNLL, currParams, varExpl, prefSf, charFreq, [overallNLL, paramList; if joint=True]\n '''\n nCons = len(all_cons);\n if DoGmodel == 0:\n nParam = 5;\n else:\n nParam = 4;\n\n # unpack responses\n resps_mean = resps['mean'];\n resps_sem = resps['sem'];\n\n # next, let's compute some measures about the responses\n max_resp = np.nanmax(resps_mean.flatten());\n min_resp = np.nanmin(resps_mean.flatten());\n ############\n ### WARNING - we're subtracting min_resp-1 from all responses\n ############ \n #resps_mean = np.subtract(resps_mean, min_resp-1); # i.e. make the minimum response 1 spk/s...\n\n # and set up initial arrays\n bestNLL = np.ones((nCons, ), dtype=np.float32) * np.nan;\n currParams = np.ones((nCons, nParam), dtype=np.float32) * np.nan;\n varExpl = np.ones((nCons, ), dtype=np.float32) * np.nan;\n prefSf = np.ones((nCons, ), dtype=np.float32) * np.nan;\n charFreq = np.ones((nCons, ), dtype=np.float32) * np.nan;\n if joint>0:\n overallNLL = np.nan;\n params = np.nan;\n success = False;\n else:\n success = np.zeros((nCons, ), dtype=np.bool_);\n\n ### set bounds\n if DoGmodel == 0:\n min_bw = 1/4; max_bw = 10; # ranges in octave bandwidth\n bound_baseline = (0, max_resp);\n bound_range = (0, 1.5*max_resp);\n bound_mu = (0.01, 10);\n bound_sig = (np.maximum(0.1, min_bw/(2*np.sqrt(2*np.log(2)))), max_bw/(2*np.sqrt(2*np.log(2)))); # Gaussian at half-height\n if fracSig:\n bound_sigFrac = (0.2, 2);\n allBounds = (bound_baseline, bound_range, bound_mu, bound_sig, bound_sigFrac);\n else:\n allBounds = (bound_baseline, bound_range, bound_mu, bound_sig, bound_sig);\n elif DoGmodel == 1: # SACH\n bound_gainCent = (1, 3*max_resp);\n bound_radiusCent= (1e-2, 1.5);\n bound_gainSurr = (1e-2, 1); # multiplier on gainCent, thus the center must be weaker than the surround\n bound_radiusSurr = (1, 10); # (1,10) # multiplier on radiusCent, thus the surr. radius must be larger than the center\n if joint>0:\n if joint == 1: # original joint (fixed gain and radius ratios across all contrasts)\n bound_gainRatio = (1e-3, 1); # the surround gain will always be less than the center gain\n bound_radiusRatio= (1, 10); # the surround radius will always be greater than the ctr r\n # we'll add to allBounds later, reflecting joint gain/radius ratios common across all cons\n allBounds = (bound_gainRatio, bound_radiusRatio);\n elif joint == 2: # fixed surround radius for all contrasts\n allBounds = (bound_radiusSurr, );\n elif joint == 3: # fixed center AND surround radius for all contrasts\n allBounds = (bound_radiusCent, bound_radiusSurr);\n # In advance of the thesis/publishing the LGN data, we will replicate some of Sach's key results\n # In particular, his thesis covers 4 joint models:\n # -- volume ratio: center and surround radii are fixed, but gains can vary (already covered in joint == 3)\n # -- center radius: fixed center radius across contrast (joint=4) AND fixed volume (i.e. make surround gain constant across contrast)\n # -- surround radius: fixed surround radius across contrast (joint=5) AND fixed volume (i.e. make surround gain constant across contrast) // fixed not in proportion to center, but in absolute value\n # -- center-surround: center and surround radii can vary, but ratio of gains is fixed (joint == 6)\n # ---- NOTE: joints 3-5 have 2*nCons + 2 parms; joint==6 has 3*nCons + 1\n elif joint == 4: # fixed center radius\n allBounds = (bound_radiusCent, bound_gainSurr, ); # center radius AND bound_gainSurr are fixed across condition\n elif joint == 5: # fixed surround radius (again, in absolute terms here, not relative, as is usually specified)\n allBounds = (bound_gainSurr, bound_radiusSurr, ); # surround radius AND bound_gainSurr are fixed across condition\n elif joint == 6: # fixed center:surround gain ratio\n allBounds = (bound_gainSurr, ); # we can fix the ratio by allowing the center gain to vary and keeping the surround in fixed proportion\n elif joint == 7 or joint == 8: # center radius determined by slope! we'll also fixed surround radius; if joint == 8, fixed surround gain instead of radius\n bound_xc_slope = (-1, 1); # 220505 fits inbounded; 220519 fits bounded (-1,1)\n bound_xc_inter = (None, None); #bound_radiusCent; # intercept - shouldn't start outside the bounds we choose for radiusCent\n allBounds = (bound_xc_inter, bound_xc_slope, bound_radiusSurr, ) if joint == 7 else (bound_xc_slope, bound_xc_inter, bound_gainSurr, )\n else:\n allBounds = (bound_gainCent, bound_radiusCent, bound_gainSurr, bound_radiusSurr);\n elif DoGmodel == 2:\n bound_gainCent = (1e-3, None);\n bound_freqCent = (1e-3, 2e1);\n bound_gainFracSurr = (1e-3, 2); # surround gain always less than center gain NOTE: SHOULD BE (1e-3, 1)\n bound_freqFracSurr = (5e-2, 1); # surround freq always less than ctr freq NOTE: SHOULD BE (1e-1, 1)\n if joint>0:\n if joint == 1: # original joint (fixed gain and radius ratios across all contrasts)\n bound_gainRatio = (1e-3, 3);\n bound_freqRatio = (1e-1, 1); \n # we'll add to allBounds later, reflecting joint gain/radius ratios common across all cons\n allBounds = (bound_gainRatio, bound_freqRatio);\n elif joint == 2: # fixed surround radius for all contrasts\n allBounds = (bound_freqFracSurr,);\n elif joint == 3: # fixed center AND surround radius for all contrasts\n allBounds = (bound_freqCent, bound_freqFracSurr);\n elif joint==0:\n bound_gainFracSurr = (1e-3, 1);\n bound_freqFracSurr = (1e-1, 1);\n allBounds = (bound_gainCent, bound_freqCent, bound_gainFracSurr, bound_freqFracSurr);\n\n ### organize responses -- and fit, if joint=0\n allResps = []; allRespsSem = []; allSfs = []; valCons = []; start_incl = 0; incl_inds = [];\n base_rate = np.min(resps_mean.flatten());\n for con in range(nCons):\n if all_cons[con] == 0: # skip 0 contrast...\n continue;\n else:\n valCons.append(all_cons[con]);\n valSfInds_curr = np.where(~np.isnan(resps_mean[con,:]))[0];\n resps_curr = resps_mean[con, valSfInds_curr];\n sem_curr = resps_sem[con, valSfInds_curr];\n\n ### prepare for the joint fitting, if that's what we've specified!\n if joint>0:\n if resps_curr.size == 0:\n continue;\n if ref_varExpl is None:\n start_incl = 1; # hacky...\n if start_incl == 0:\n if ref_varExpl[con] < veThresh:\n continue; # i.e. we're not adding this; yes we could move this up, but keep it here for now\n else:\n start_incl = 1; # now we're ready to start adding to our responses that we'll fit!\n\n allResps.append(resps_curr);\n allRespsSem.append(sem_curr);\n allSfs.append(all_sfs[valSfInds_curr]);\n incl_inds.append(con);\n # and add to the bounds list!\n if DoGmodel == 1:\n if joint == 1: # add the center gain and center radius for each contrast \n allBounds = (*allBounds, bound_gainCent, bound_radiusCent);\n if joint == 2: # add the center and surr. gain and center radius for each contrast \n allBounds = (*allBounds, bound_gainCent, bound_radiusCent, bound_gainSurr);\n if joint == 3: # add the center and surround gain for each contrast \n allBounds = (*allBounds, bound_gainCent, bound_gainSurr);\n elif joint == 4: # fixed center radius, so add all other parameters\n allBounds = (*allBounds, bound_gainCent, bound_radiusSurr);\n elif joint == 5: # add the center and surr. gain and center radius for each contrast \n allBounds = (*allBounds, bound_gainCent, bound_radiusCent);\n elif joint == 6: # fixed center:surround gain ratio\n allBounds = (*allBounds, bound_gainCent, bound_radiusCent, bound_radiusSurr);\n elif joint == 7: # center radius det. by slope, surround radius fixed\n allBounds = (*allBounds, bound_gainCent, bound_gainSurr);\n elif joint == 8: # center radius det. by slope, surround gain fixed\n allBounds = (*allBounds, bound_gainCent, bound_radiusSurr);\n elif DoGmodel == 2:\n if joint == 1: # add the center gain and center radius for each contrast \n allBounds = (*allBounds, bound_gainCent, bound_freqCent);\n if joint == 2: # add the center and surr. gain and center radius for each contrast \n allBounds = (*allBounds, bound_gainCent, bound_freqCent, bound_gainFracSurr);\n if joint == 3: # add the center and surround gain for each contrast \n allBounds = (*allBounds, bound_gainCent, bound_gainFracSurr);\n\n continue;\n\n ### otherwise, we're really going to fit here! [i.e. if joint is False]\n # first, specify the objection function!\n obj = lambda params: DoG_loss(params, resps_curr, all_sfs[valSfInds_curr], resps_std=sem_curr, loss_type=loss_type, DoGmodel=DoGmodel, joint=joint); # if we're here, then joint=0, but we'll still keep joint=joint\n\n for n_try in range(n_repeats):\n ###########\n ### pick initial params\n ###########\n init_params = dog_init_params(resps_curr, base_rate, all_sfs, valSfInds_curr, DoGmodel, fracSig=fracSig, bounds=allBounds)\n\n # choose optimization method\n if np.mod(n_try, 2) == 0:\n methodStr = 'L-BFGS-B';\n else:\n methodStr = 'TNC';\n \n try:\n wax = opt.minimize(obj, init_params, method=methodStr, bounds=allBounds);\n except:\n continue; # the fit has failed (bound issue, for example); so, go back to top of loop, try again\n \n # compare\n NLL = wax['fun'];\n params = wax['x'];\n\n if np.isnan(bestNLL[con]) or NLL < bestNLL[con]:\n bestNLL[con] = NLL;\n currParams[con, :] = params;\n curr_mod = get_descrResp(params, all_sfs[valSfInds_curr], DoGmodel);\n # TODO: 22.05.10 --> previously ignored sf==0 case for varExpl\n varExpl[con] = var_expl_direct(resps_curr, curr_mod);\n prefSf[con] = dog_prefSf(params, dog_model=DoGmodel, all_sfs=all_sfs[all_sfs>0]); # do not include 0 c/deg SF condition\n charFreq[con] = dog_charFreq(params, DoGmodel=DoGmodel);\n success[con] = wax['success'];\n\n if joint==0: # then we're DONE\n return bestNLL, currParams, varExpl, prefSf, charFreq, None, None, success; # placeholding None for overallNLL, params [full list]\n\n ### NOW, we do the fitting if joint=True\n if joint>0:\n if len(allResps)0], ref_rc_val=ref_rc_val);\n charFreq[conInd] = dog_charFreq(curr_params, DoGmodel=DoGmodel); \n\n # and NOW, we can return!\n return bestNLL, currParams, varExpl, prefSf, charFreq, overallNLL, params, success;\n##\n\n\n#####\n\n#####\n\ndef blankResp(data, get_dc=0):\n blanks = np.where(data['cont'] == 0);\n\n key = 'f0' if get_dc else 'f1';\n mu = np.mean(data[key][blanks]);\n std = np.std(data[key][blanks]);\n\n return mu, std;\n\ndef tabulateResponses(data, resample=False, sub_f1_blank=False, phAdjusted=1, dir=1, cross_val=1, redo_phAdv=True):\n ''' Given the dictionary containing all of the data, organize the data into the proper responses\n Specifically, we know that Sach's experiments varied contrast and spatial frequency\n Thus, we will organize responses along these dimensions [con, sf] OR [con][sf] (mean/arr, respectively)\n NOTE: If phAdjusted=1, then we return the phase-adjusted responses (amplitudes)!\n if phAdjusted=0, then we return vec-corrected but NOT phase-amplitude adjusted \n if phAdjusted=-1, then we do the (dumb, non-vector) scalar average\n ---- : We discovered on 22.04.07 that Sach's mean F1 phase/amplitude were not done using proper vector math (i.e. he simply took the mean of the amplitudes)\n ---- : So, we not only do the proper vector math but also apply the phase-amplitude relationship correction that we apply for my own LGN data\n '''\n all_cons = np.unique(data['cont']);\n all_cons = all_cons[all_cons>0];\n all_sfs = np.unique(data['sf']);\n\n f0 = dict();\n f0mean= np.nan * np.zeros((len(all_cons), len(all_sfs))); \n f0sem = np.nan * np.zeros((len(all_cons), len(all_sfs))); \n f1 = dict();\n f1mean = np.nan * np.zeros((len(all_cons), len(all_sfs)));\n f1mean_phCorrOnMeans = np.copy(f1mean);\n f1sem = np.nan * np.zeros((len(all_cons), len(all_sfs))); \n\n # rather than getting just the mean/s.e.m., we can also record/transfer the firing rate of each individual stimulus run\n f0arr = dict();\n f1arr = dict();\n f1arr_prePhCorr = dict();\n f1phs = dict();\n\n to_sub = blankResp(data, get_dc=False)[0] if sub_f1_blank else 0;\n\n n_trials = data['f1arr'].shape[-1]; # nConds x nTrials\n cntr_sizes = np.unique(data['cntr_size']); # choose larger size\n val_size = np.where(data['cntr_size']==cntr_sizes[-1])[0]; # why specifying size? Cell 33 has multiple sizes!!!\n if ~np.isnan(data['opac1'][0]):\n # then also make sure that the size takes into account when the opacity of the second grating is 0 (i.e. off)\n val_size = np.where(np.logical_and(val_size, data['opac1'][val_size]==0))[-1];\n for con in range(len(all_cons)):\n val_con = np.where(data['cont'][val_size] == all_cons[con])[0];\n f0arr[con] = dict();\n f1arr[con] = dict();\n f1arr_prePhCorr[con] = dict();\n f1phs[con] = dict();\n for sf in range(len(all_sfs)):\n val_sf = np.where(data['sf'][val_size][val_con] == all_sfs[sf])[0];\n f0arr[con][sf] = np.nan * np.zeros((n_trials, ));\n f1arr[con][sf] = np.nan * np.zeros((n_trials, ));\n f1phs[con][sf] = np.nan * np.zeros((n_trials, ));\n f1arr_prePhCorr[con][sf] = np.nan * np.zeros((n_trials, ));\n \n # Organize ALL trial -- we'll resample afterwards\n non_nan = nan_rm(data['f0arr'][val_size][val_con][val_sf]);\n f0arr[con][sf][0:len(non_nan)] = non_nan;\n f1amps_curr = nan_rm(data['f1arr'][val_size][val_con][val_sf] - to_sub)\n f1phs_curr = nan_rm(data['f1pharr'][val_size][val_con][val_sf]);\n\n if cross_val is None:\n holdout_frac = 1;\n else:\n holdout_frac = cross_val if cross_val<=1 else None;\n non_nan_inds = np.where(~np.isnan(data['f1arr'][val_size][val_con][val_sf]))[-1];\n new_inds = resample_array(resample, non_nan_inds, holdout_frac=holdout_frac);\n save_inds = new_inds if holdout_frac<1 else range(len(new_inds));\n\n # store the (potentially) resampled amplitudes, phases\n f1arr_prePhCorr[con][sf][save_inds] = f1amps_curr[new_inds];\n f1phs[con][sf][save_inds] = f1phs_curr[new_inds];\n # and if it is resample, then we should update data\n if resample and redo_phAdv and phAdjusted==1:\n data['f1arr'][val_size[val_con[val_sf[0]]]][save_inds] = f1amps_curr[new_inds];\n data['f1pharr'][val_size[val_con[val_sf[0]]]][save_inds] = f1phs_curr[new_inds];\n \n ### [end of loop over conditions] NOW, make the phAmp corrections, take means, etc\n # -- phAdv_model is used to project the responses; all_opts is organized by SF (ascending)\n phAdv_model, all_opts = df.phase_advance_fit(data, None, 'phAdv_dummy', dir=dir, to_save=0);\n\n # NOW, we go through by condition and store the (corrected, if applicable) answers\n # compute the mean amp, mean ph for vecF1 corrections (apply all data to resample and nonresampled)\n for con, sf in itertools.product(range(len(all_cons)), range(len(all_sfs))):\n f1amp_curr = nan_rm(f1arr_prePhCorr[con][sf]);\n f1ph_curr = nan_rm(f1phs[con][sf]);\n mean_amp, mean_ph,_,_ = polar_vec_mean([f1amp_curr], [f1ph_curr]);\n\n if phAdjusted==1:\n f1arr[con][sf][range(len(f1amp_curr))] = project_resp([f1amp_curr], [f1ph_curr], phAdv_model, [all_opts[sf]], disp=0)[0];\n elif phAdjusted==0:\n f1arr[con][sf][range(len(f1amp_curr))] = np.multiply(f1amp_curr, np.cos(np.deg2rad(mean_ph) - np.deg2rad(f1ph_curr)));\n elif phAdjusted==-1:\n f1arr[con][sf][range(len(f1amp_curr))] = f1amp_curr;\n\n # take mean, since some conditions have repeats - just average them\n # --- this applies regardless of phAdjustment, since the amplitudes would then be corrected\n f0mean[con, sf] = np.nanmean(f0arr[con][sf]); #np.mean(data['f0'][val_con][val_sf]);\n f0sem[con, sf] = sem(f0arr[con][sf], nan_policy='omit'); #np.mean(data['f0sem'][val_con][val_sf]);\n f1mean[con, sf] = np.nanmean(f1arr[con][sf]); #np.mean(data['f1'][val_con][val_sf]);\n if phAdjusted==1: # instead of projecting individual responses, we project on the mean\n f1mean_phCorrOnMeans[con, sf] = project_resp([mean_amp], [mean_ph], phAdv_model, [all_opts[sf]], disp=0)[0];\n f1sem[con, sf] = sem(f1arr[con][sf], nan_policy='omit'); #np.mean(data['f1sem'][val_con][val_sf]);\n\n f0['mean'] = f0mean;\n f0['sem'] = f0sem;\n f1['mean'] = f1mean_phCorrOnMeans; # was previously f1mean...\n f1['sem'] = f1sem;\n\n return [f0, f1], [all_cons, all_sfs], [f0arr, f1arr]#; [f0arr, f1arr, f1arr_prePhCorr];\n\ndef writeDataTxt(cellNum, f1, sfs, contrast, save_loc):\n \n obs_mean = f1['mean'][contrast, :];\n obs_sem = f1['sem'][contrast, :];\n \n write_name = 'cell%d_con%d.txt' % (cellNum, contrast);\n file = open(str(save_loc + write_name), 'w');\n\n for i in range(len(sfs)):\n file.write('%.3f %.3f %.3f\\n' % (sfs[i], obs_mean[i], obs_sem[i]));\n\n file.close();\n\ndef writeCellTxt(cellNum, load_path, save_loc):\n\n dataList = np_smart_load(load_path + 'sachData.npy');\n data = dataList[cellNum-1]['data'];\n\n resps, conds, _ = tabulateResponses(data);\n f1 = resps[1];\n all_cons = conds[0];\n all_sfs = conds[1];\n \n for i in range(len(all_cons)):\n writeDataTxt(cellNum, f1, all_sfs, i, save_loc);\n","repo_name":"paul-levy/SF_diversity","sub_path":"LGN/sach/helper_fcns_sach.py","file_name":"helper_fcns_sach.py","file_ext":"py","file_size_in_byte":30536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"22907651982","text":"import glob\nimport cv2\nimport glob\nfrom sklearn.feature_extraction.image import extract_patches\nfrom itertools import cycle\nimport numpy as np\nimport os\nfrom Naked.toolshed.shell import muterun_js\nimport base64\n\n\nclass Generator():\n def __init__(self, patch_size, batch_size, identity):\n self.files = muterun_js(\"./application/dataQuery.js\", identity).stdout.decode('utf-8').split(\"\\n\")[7:-2]\n self.whole_size = int(muterun_js(\"./application/getTotalEnrollCount.js\", identity).stdout.decode('utf-8').split(\"\\n\")[-2])\n self.train_img = []\n self.train_mask = []\n self.preprocess()\n self.length = len(self.train_img)\n self.train = cycle(zip(self.train_img, self.train_mask))\n self.patch_size = patch_size\n self.batch_size = batch_size\n self.patch_length = self.length\n\n def preprocess(self):\n for i in range(len(self.files)):\n if i % 2 == 0:\n self.train_img.append(base64.b64decode(self.files[i]))\n else:\n self.train_mask.append(base64.b64decode(self.files[i]))\n def generator(self):\n\n data = np.zeros((0, self.patch_size, self.patch_size, 3))\n masks = np.zeros((0, self.patch_size, self.patch_size, 1))\n\n while 1:\n while data.shape[0] < self.batch_size:\n img, mask = next(self.train)\n img, mask = self.get_patches(img, mask)\n\n\n data = np.append(data, img, axis = 0)\n masks = np.append(masks, mask, axis = 0)\n\n x = data[:self.batch_size, :, :, :]\n y = masks[:self.batch_size, :, :, :]\n\n data = data[self.batch_size:, :, :, :]\n masks = masks[self.batch_size:, :, :, :]\n\n yield x, y\n\n def get_patches(self, img, mask):\n img = np.frombuffer(img, np.uint8)\n mask = np.frombuffer(mask, np.uint8)\n\n img = cv2.imdecode(img, cv2.IMREAD_COLOR)\n mask = cv2.imdecode(mask, cv2.IMREAD_GRAYSCALE)\n _, mask = cv2.threshold(mask, 100, 255, cv2.THRESH_BINARY)\n mask = (mask/255.0).astype('uint8')\n\n img = cv2.resize(img, (self.patch_size, self.patch_size))\n mask = cv2.resize(mask, (self.patch_size, self.patch_size))\n\n img = img.reshape(-1, img.shape[0], img.shape[1], img.shape[2])\n mask = mask.reshape(-1, mask.shape[0], mask.shape[1], 1)\n\n return img, mask\n\n\n\n","repo_name":"MediCha-i-n/test_Deep","sub_path":"Polyp_gen.py","file_name":"Polyp_gen.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"31543767795","text":"from django.contrib import admin\nfrom django.urls import path\nfrom app import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('history', views.history, name='history'),\n path('impression', views.impression, name='impression'),\n path('impression/add', views.add_impression, name='add_impression'),\n path('admin/', admin.site.urls),\n]\n","repo_name":"vladibuyanov/Obecne_Musem_Brhlovce","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7753757557","text":"import pytest\n\nfrom pymoo.algorithms.moo.nsga2 import NSGA2\nfrom pymoo.algorithms.soo.nonconvex.ga import GA\nfrom pymoo.core.crossover import ElementwiseCrossover\nfrom pymoo.factory import get_crossover, get_problem\nfrom pymoo.operators.crossover.dex import DEX\nfrom pymoo.operators.crossover.sbx import SBX\nfrom pymoo.operators.mutation.inversion import InversionMutation\nfrom pymoo.operators.sampling.rnd import PermutationRandomSampling, FloatRandomSampling\nfrom pymoo.optimize import minimize\nfrom pymoo.problems.single.traveling_salesman import create_random_tsp_problem\n\n\n@pytest.mark.parametrize('name', ['real_de', 'real_sbx', 'real_pcx', 'real_exp'])\ndef test_crossover_real(name):\n crossover = get_crossover(name, prob=0.95)\n method = GA(pop_size=20, crossover=crossover)\n minimize(get_problem(\"sphere\"), method, (\"n_gen\", 20))\n assert True\n\n\n@pytest.mark.parametrize('name', ['bin_ux', 'bin_hux', 'bin_one_point', 'bin_two_point'])\ndef test_crossover_bin(name):\n crossover = get_crossover(name, prob=0.95)\n method = NSGA2(pop_size=20, crossover=crossover)\n minimize(get_problem(\"zdt5\"), method, (\"n_gen\", 20))\n assert True\n\n\n@pytest.mark.parametrize('name', ['perm_ox', 'perm_erx'])\ndef test_crossover_perm(name):\n crossover = get_crossover(name, prob=0.95)\n method = GA(pop_size=20, crossover=crossover, mutation=InversionMutation(), sampling=PermutationRandomSampling())\n minimize(create_random_tsp_problem(10), method, (\"n_gen\", 20))\n assert True\n\n\ndef test_elementwise_crossover():\n\n problem = get_problem('zdt1')\n\n sampling = FloatRandomSampling()\n\n pop = sampling.do(problem, n_samples=3)\n\n crossover = ElementwiseCrossover(SBX(20))\n off = crossover.do(problem, pop[0], pop[1])\n assert len(off) == 2\n\n crossover = ElementwiseCrossover(DEX())\n off = crossover.do(problem, pop[0], pop[1], pop[2])\n assert len(off) == 1","repo_name":"ngctnnnn/RN-MuOENAS","sub_path":"Model/ManyObjARTS/algorithm/pymoo/tests/operators/test_crossover.py","file_name":"test_crossover.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"35291341359","text":"from mandala.all import *\nfrom mandala.tests.utils import *\n\n\ndef test_unit():\n storage = Storage()\n\n @op\n def f(x) -> int:\n return Transient(x + 1)\n\n with storage.run(attach_call_to_outputs=True):\n a = f(42)\n call: Call = a._call\n assert call.transient\n assert a.transient\n assert unwrap(a) == 43\n\n with storage.run(attach_call_to_outputs=True):\n a = f(42)\n call: Call = a._call\n # assert not a.in_memory - no longer true b/c caching\n assert a.transient\n assert call.transient\n\n with storage.run(recompute_transient=True, attach_call_to_outputs=True):\n a = f(42)\n call: Call = a._call\n\n assert a.in_memory\n assert a.transient\n assert call.transient\n\n\ndef test_composition():\n storage = Storage()\n\n @op\n def f(x) -> int:\n return Transient(x + 1)\n\n @op\n def g(x) -> int:\n return Transient(x**2)\n\n with storage.run():\n a = f(42)\n b = g(a)\n\n with storage.run():\n a = f(23)\n\n storage.cache.evict_all()\n\n try:\n with storage.run():\n a = f(23)\n b = g(a)\n assert False\n except ValueError as e:\n assert True\n\n with storage.run(recompute_transient=True):\n a = f(23)\n b = g(a)\n assert unwrap(b) == 576\n","repo_name":"amakelov/mandala","sub_path":"mandala/tests/test_transient.py","file_name":"test_transient.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"76"} +{"seq_id":"42817210852","text":"# -*- coding:utf-8 -*-\nfrom naver_scrapper import crawling as crawl_naver\nfrom google_scrapper import crawling as crawl_google\nfrom save import save_images\nimport argparse\n\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"-name\", \"--people\", required=True)\nargs = parser.parse_args()\npeople = args.people\n\n\ndef crawl_images(name):\n naver_urls = crawl_naver(name)\n google_urls = crawl_google(name)\n imgUrls = naver_urls + google_urls\n print(f\"Total: {len(imgUrls)} images scrapped. \")\n # 원본, 얼굴, 흑백 저장\n save_images(name, imgUrls)\n\n\ndef main():\n names = people.split(',')\n for name in names:\n crawl_images(name)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rheeeuro/actor-scrapper","sub_path":"keyword_scrapper/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"13415505470","text":"__author__ = 'mattilyra'\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nimport pytest\n\nfrom naklar.experiment import _find_conf_files\nfrom naklar.experiment import _read_conf_dicts\n\n\n@pytest.fixture\ndef experiment_tree(tmpdir):\n for i in xrange(10):\n pth = tmpdir.mkdir('exp{}'.format(i)).join('conf.pkl')\n d = {'a': 1, 'b': 2}\n pth.write(pickle.dumps(d))\n return tmpdir\n\n\ndef test_conf_discovery(experiment_tree):\n confs = _find_conf_files(experiment_tree.strpath)\n assert(len(list(confs)) == 10)\n for i, fh in enumerate(confs):\n assert(fh.name.endswith('conf.pkl'))\n\n\ndef test_conf_dict_load(experiment_tree):\n confs = _find_conf_files(experiment_tree.strpath)\n dicts = _read_conf_dicts(confs)\n assert(len(list(dicts)) == 10)\n","repo_name":"mattilyra/naklar","sub_path":"test/test_experiment.py","file_name":"test_experiment.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"76"} +{"seq_id":"10602472941","text":"from Bio import SeqIO,AlignIO\nfrom numpy import log2\n\n#FX\n\ndef Sobs(string, letter):\n\tlenstring = len(string)\n\tlettcount = string.count(letter)\n\tnumber = lettcount / lenstring\n\tif number != 0:\n\t\tvalue = -(number*log2(number))\n\t\t#value = number\n\telse:\n\t\tvalue = 0\n\treturn value\n\ndef bitscorefreq(string, letter, weight):\n\tlenstring = len(string)\n\tlettcount = string.count(letter)\n\tnumber = lettcount / lenstring\n\tif number != 0:\n\t\tvalue = weight*number\n\telse:\n\t\tvalue = 0\n\treturn value\n\n#files:\t\nsignalpfile = \"plastid_refs-asafind.txt\"\npredisifile = \"plastid_refs-predisi.txt\"\npredslfile = \"plastid_refs-predsl.txt\"\n\nwith open(signalpfile) as f:\n\tasafind = f.readlines()\n\nwith open(predisifile) as f:\n\tpredisi = f.readlines()\n\nwith open(predslfile) as f:\n\tpredsl = f.readlines()\n\n#parse predictions\nmonstr_dic = {}\nreport_errors = False\nprint(\"Parsing signal peptide predictions and sequences into data dictionary...\")\nnames = []\nnewnames = set()\nfor line in predisi:\n\tline = line.split(\"\\t\")\n\t#FASTA-ID\tCleavage Position\tSignal Peptide ?\tScore\n\t#[0]\t\t[1]\t\t\t\t\t[2]\t\t\t\t\t[3]\n\tname = line[0]\n\tif line[0] not in names:\n\t\tnames.append(name)\n\t\tif line[2] == \"Y\" and int(line[1]) < 101: #sometimes we see signal very far in the protein\n\t\t\tmonstr_dic[name] = {\"PrediSi site\": int(line[1])+1, \"PrediSi SPscore\": float(line[3])}\n\t\telse:\n\t\t\tmonstr_dic[name] = {\"PrediSi site\": \"-\", \"PrediSi SPscore\": float(line[3])}\n\telse:\n\t\tnewnames.add(name)\n\t\tprint(\"NEW NAME predisi! \" + name)\nprint(\"->PrediSi: done\")\n\nnames.sort()\n#print(names)\n\nfor line in predsl:\n\tline = line.split(\"\\t\")\n\t#sequence id\tmTP score\tSP score\tprediction\tcleavage site\n\t#[0]\t\t\t[1]\t\t\t[2]\t\t\t[3]\t\t\t[4]\t\t\t\n\tname = line[0]\n\tif name in names:\n\t\t#line[3] for nonplant prediction - complex algae\n\t\tif line[3] == \"secreted\":\n\t\t\tmonstr_dic[name].update({\"PredSL site\": int(line[4])+1, \"PredSL SPscore\": float(line[2])})\n\t\telse:\n\t\t\tmonstr_dic[name].update({\"PredSL site\": \"-\", \"PredSL SPscore\": float(line[2])})\n\telse:\n\t\tnewnames.add(name)\n\t\tprint(\"NEW NAME predsl! \" + name)\nprint(\"->PredSL: done\")\n\nfor line in asafind:\n\t#Identifier\tSignalP\tASAfind cleavage position\tASAfind/SignalP cleavage site offset\tASAfind 20aa transit score\tASAfind Prediction\n\t#[0]\t\t[1]\t\t[2]\t\t\t\t\t\t\t[3]\t\t\t\t\t\t\t\t\t\t[4]\t\t\t\t\t\t\t[5]\t\n\t\n\tif not line.startswith('#') and len(line) != 0:\n\t\tline = line.split('\\t')\n\t\tname = line[0]\n\t\tif name in names:\n\t\t\ttry:\n\t\t\t\tsp = int(line[2])\n\t\t\t\tasa = sp + int(line[3])\n\t\t\texcept ValueError:\n\t\t\t\tsp = \"-\"\n\t\t\t\tasa = \"-\"\n\t\t\tmonstr_dic[name].update({'SignalP site': sp, 'ASAfind site': asa})\n\t\telse:\n\t\t\tnewnames.add(name)\n\t\t\tprint(\"NEW NAME asafind! \" + name)\nprint(\"->ASAfind/SignalP: done\")\n\nif len(newnames) > 0:\n\tprint(\"ERROR, new sequence names appeared!\")\n\treport_errors = True\n\n#load fastas:\nfasta = SeqIO.parse(\"plastid_refs.fasta\", \"fasta\")\nseq_dic = {}\nfor seq in fasta:\n\tseq_dic[seq.name] = seq.seq\n#print(seq_dic)\n\n\n#process cleavage sites\t\nwith open(\"plastid_signals.txt\", \"w\") as out, open(\"cleavage_site.fasta\", \"w\") as outfas:\n\tagreed = 0\n\tfor name in names:\n\t\t#print(name)\n\t\tsequence = str(seq_dic[name])\n\t\tvalues = [monstr_dic[name][\"PrediSi site\"], monstr_dic[name][\"PredSL site\"], monstr_dic[name][\"SignalP site\"], monstr_dic[name][\"ASAfind site\"]]\n\t\tvalueset = set(values)\n\t\tfor i in valueset:\n\t\t\tif i != \"-\":\n\t\t\t\tif len(valueset - {\"-\"}) == 1 and values.count(i) == 2:\n\t\t\t\t\tagreed += 1\n\t\t\t\t\tprint(name, i)\n\t\t\t\t\toutfas.write(\">{}\\n{}\\n\".format(name, sequence[int(i)-6:int(i)+19]))\n\t\t\t\telif values.count(i) >=3:\n\t\t\t\t\tagreed += 1\n\t\t\t\t\tprint(name, i)\n\t\t\t\t\toutfas.write(\">{}\\n{}\\n\".format(name, sequence[int(i)-6:int(i)+19]))\n\t\t\t\n\t\tout.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(name, monstr_dic[name][\"PrediSi site\"], monstr_dic[name][\"PredSL site\"], \n\t\t\tmonstr_dic[name][\"SignalP site\"], monstr_dic[name][\"ASAfind site\"]))\n\n\tprint(agreed, \"sequences passed and written to alignment\")\n\naacids = [\"A\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"K\",\"L\",\"M\",\"N\",\"P\",\"Q\",\"R\",\"S\",\"T\",\"V\",\"W\",\"Y\",\"X\"]\n\nalignmentcvel = AlignIO.read(\"cleavage_site_cvel.fasta\", \"fasta\")\nalignmentvbra = AlignIO.read(\"cleavage_site_vbra.fasta\", \"fasta\")\n#Rseq values calculated in Supplementary File S2 of the chromerid localizations paper\ncvel_rseq = [0.628142594, 0.901165584, 1.679998503, 0.899323492, 2.865290913, 2.930248396, 1.218694579, 1.216631242, 0.723489751, 0.796375671, 0.62329697, 0.67311194, 0.660968002, 0.832345896, 0.930850003, 0.68832463, 0.701284019, 0.768918374, 0.590580854, 0.8231282, 0.710945491, 0.88885071, 0.651382747, 0.761732523, 0.743346288]\nvbra_rseq = [0.817328158, 1.0450741, 1.916029505, 0.715175362, 3.370870006, 1.554297938, 0.863928534, 1.004913307, 0.97219529, 0.779193055, 0.870728277, 0.732232068, 0.787914214, 1.006489747, 0.892195734, 0.821817644, 0.999711101, 0.742618301, 0.895313802, 0.755506939, 0.848577712, 0.88178165, 0.706938911, 0.867174927, 0.857195538]\n\nif cvel_rseq == []: #assuming Rseq values have not been calculated...\n\twith open(\"cvelmatrix.txt\", \"w\") as cvelmatrix, open(\"vbramatrix.txt\", \"w\") as vbramatrix:\n\t\tfor aa in aacids:\n\t\t\tcveldata = [aa]\n\t\t\tvbradata = [aa]\n\t\t\tfor i in range(25):\n\t\t\t\t#print(i)\n\t\t\t\tcveldata.append(Sobs(alignmentcvel[:, i], aa))\n\t\t\t\tvbradata.append(Sobs(alignmentvbra[:, i], aa))\n\t\t\tcvelstring = [str(x) for x in cveldata]\n\t\t\tvbrastring = [str(x) for x in vbradata]\n\t\t\tcvelmatrix.write(\"{}\\n\".format(\"\\t\".join(cvelstring)))\n\t\t\tvbramatrix.write(\"{}\\n\".format(\"\\t\".join(vbrastring)))\nelse:\n\twith open(\"cvelbitscorematrix.txt\", \"w\") as cvelmatrix, open(\"vbrabitscorematrix.txt\", \"w\") as vbramatrix:\n\t\tfor aa in aacids:\n\t\t\tcveldata = [aa]\n\t\t\tvbradata = [aa]\n\t\t\tfor i in range(25):\n\t\t\t\t#print(i)\n\t\t\t\tcveldata.append(bitscorefreq(alignmentcvel[:, i], aa, cvel_rseq[i]))\n\t\t\t\tvbradata.append(bitscorefreq(alignmentvbra[:, i], aa, vbra_rseq[i]))\n\t\t\tcvelstring = [str(x) for x in cveldata]\n\t\t\tvbrastring = [str(x) for x in vbradata]\n\t\t\tcvelmatrix.write(\"{}\\n\".format(\"\\t\".join(cvelstring)))\n\t\t\tvbramatrix.write(\"{}\\n\".format(\"\\t\".join(vbrastring)))\n\n\n","repo_name":"morpholino/PYTHON","sub_path":"signalparse_bitscorematrix.py","file_name":"signalparse_bitscorematrix.py","file_ext":"py","file_size_in_byte":5946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"41583111342","text":"def find_sum_between_two_nums(a, b):\n answer = 0\n \n # b > a 인 경우 a,b를 swap\n if a > b: \n temp = b \n b = a \n a = temp\n \n for i in range(a, b+1):\n answer += i\n \n return answer","repo_name":"Hongik-Human-Developer/algorithm_study","sub_path":"sungjun/day1/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"13397670772","text":"class TreeNode:\r\n def __init__(self, x):\r\n self.val = x\r\n self.left = None\r\n self.right = None\r\n\r\nclass Solution:\r\n # def searchBST(self, root: TreeNode, val: int) -> TreeNode:\r\n # if root.val == val: return True, root\r\n # if root.left is None or root.right is None: return False, None\r\n\r\n # if not root.left is None or root.right is None:\r\n # flag, ans = self.searchBST(root.left, val) or self.searchBST(root.right, val)\r\n\r\n # return ans\r\n\r\n def searchBST(self, root: TreeNode, val: int) -> TreeNode:\r\n if root is None: return None\r\n\r\n if root.val == val:\r\n return root\r\n elif root.val > val:\r\n return self.searchBST(root.left, val)\r\n elif root.val < val:\r\n return self.searchBST(root.right, val)\r\n\r\n # if root and val < root.val: return self.searchBST(root.left, val)\r\n # elif root and val > root.val: return self.searchBST(root.right, val)\r\n # return root\r\n\r\n\r\n # if root and root.val == val:\r\n # return root\r\n # if root and root.val > val:\r\n # return self.searchBST(root.left, val)\r\n # elif root and root.val < val:\r\n # return self.searchBST(root.right, val)\r\n\r\n # import pdb; pdb.set_trace()\r\n # return None\r\n\r\nt = TreeNode(4)\r\nt.left = TreeNode(2)\r\nt.right = TreeNode(7)\r\nt.left.left = TreeNode(1)\r\nt.left.right = TreeNode(3)\r\n\r\nprint(Solution().searchBST(t, 2))\r\nprint(Solution().searchBST(t, 5))\r\n\r\nt = TreeNode(40)\r\nt.left = TreeNode(20)\r\nt.right = TreeNode(60)\r\nt.left.left = TreeNode(10)\r\nt.left.right = TreeNode(30)\r\nt.right.left = TreeNode(50)\r\nt.right.right = TreeNode(70)\r\nprint(Solution().searchBST(t, 25))\r\nprint(Solution().searchBST(t, 70))","repo_name":"takin6/algorithm-practice","sub_path":"leetcode/recursion/bst.py","file_name":"bst.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33804968343","text":"#Con el siguiente diccionario, debes crear un programa que pregunte al usuario por un número; el programa debe imprimir el jugador al que hace referencia ese número\n\n'''{\n\n 1 : \"Casillas\", 15 : \"Ramos\",\n\n 3 : \"Pique\", 5 : \"Puyol\",\n\n 11 : \"Capdevila\", 14 : \"Xabi Alonso\",\n\n 16 : \"Busquets\", 8 : \"Xavi Hernandez\",\n\n 18 : \"Pedrito\", 6 : \"Iniesta\",\n\n 7 : \"Villa\"\n\n}'''\n\ndiccionarioJugadores={1 : \"Casillas\", 15 : \"Ramos\",3 : \"Pique\", 5 : \"Puyol\",11 : \"Capdevila\", 14 : \"Xabi Alonso\",16 : \"Busquets\", 8 : \"Xavi Hernandez\",18 : \"Pedrito\", 6 : \"Iniesta\",7 : \"Villa\"}\n\njugador= int(input(\"Introduce el numero del jugador: \"))\n\nif jugador in diccionarioJugadores:\n print(\"El jugador con el numero {} es: {}\" .format(jugador, diccionarioJugadores.get(jugador)))\nelse: \n print(\"No existe ningun jugador con el numero introducido\")","repo_name":"silviagomin/python","sub_path":"EstructuraDatos/Diccionarios/Ejercicio2.py","file_name":"Ejercicio2.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72922112244","text":"# 문제 링크: https://leetcode.com/problems/check-if-all-1s-are-at-least-length-k-places-away/\n\nclass Solution:\n def kLengthApart(self, nums: List[int], k: int) -> bool:\n if 1 not in nums or k == 0:\n return True\n\n prev = nums.index(1)\n for i in range(prev + 1, len(nums)):\n if nums[i] == 0:\n continue\n\n if i - prev <= k:\n return False\n\n prev = i\n\n return True","repo_name":"jamesujeon/coding-problem-solutions","sub_path":"leetcode/python 3/1437.py","file_name":"1437.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"1321413419","text":"import os\nimport json\nimport pynput\nfrom .pressed_keys import PressedKeys\nimport shutil\n\n\n# Classes for settings component of the program\n\n\nclass Settings:\n def __init__(self, settings_file_location):\n self.settings_file_location = settings_file_location\n self.settings = self.load_or_create()\n self.save()\n\n def load_or_create(self):\n try:\n settings = self.load()\n except FileNotFoundError:\n settings = self.create()\n except json.decoder.JSONDecodeError:\n settings = self.create()\n return settings\n\n def load(self):\n with open(self.settings_file_location, \"r\") as f:\n settings = json.load(f)\n return settings\n\n def save(self):\n with open(self.settings_file_location, \"w\") as f:\n json.dump(self.settings, f, indent=4, sort_keys=True)\n\n def create(self):\n pass\n\n\nclass HotkeySettings(Settings):\n def __init__(self, settings_file_location=\"hotkey_settings.json\"):\n super().__init__(settings_file_location=settings_file_location)\n\n def create(self):\n # Each hotkey is a PressedKey object\n next_hotkey = PressedKeys()\n next_hotkey.add_modifier(pynput.keyboard.Key.up)\n previous_hotkey = PressedKeys()\n previous_hotkey.add_modifier(pynput.keyboard.Key.down)\n mute_hotkey = PressedKeys()\n mute_hotkey.add_modifier(pynput.keyboard.Key.left)\n settings = {\n \"next\": next_hotkey.hashed,\n \"previous\": previous_hotkey.hashed,\n \"mute\": mute_hotkey.hashed\n }\n return settings\n\n def set_hotkey(self, which, hotkey: PressedKeys):\n self.settings[which] = hotkey.hashed\n\n def get_next_hotkey(self):\n return PressedKeys(self.settings[\"next\"])\n\n def get_previous_hotkey(self):\n return PressedKeys(self.settings[\"previous\"])\n\n def get_mute_hotkey(self):\n return PressedKeys(self.settings[\"mute\"])\n\n\nclass InternalSettings(Settings):\n def __init__(self, settings_file_location=\"internal_settings.json\"):\n super().__init__(settings_file_location=settings_file_location)\n self.sounds_dir = self.settings[\"sounds_dir\"]\n self.presets_dir = self.settings[\"presets_dir\"]\n\n def create(self):\n settings = {\n # 2 folders in 'stimulant_noise' folder\n \"sounds_dir\": \"sounds\",\n \"presets_dir\": \"presets\"\n }\n return settings\n\n\nclass PresetSettings(Settings):\n def __init__(self, settings_file_location=None, internal_settings=None):\n if settings_file_location is None:\n settings_file_location = os.path.join(internal_settings.presets_dir,\n f\"Preset_{len(os.listdir(internal_settings.presets_dir))}.json\")\n self.name = os.path.splitext(os.path.basename(settings_file_location))[0]\n if internal_settings is None:\n internal_settings = InternalSettings()\n self.internal_settings = internal_settings\n super().__init__(settings_file_location=settings_file_location)\n\n def create(self):\n settings = {\n \"name\": self.name,\n \"volume\": 50,\n \"mute\": True,\n \"sounds\": {\n },\n }\n return settings\n\n def add_sound(self, path, volume=0.5, mute=True):\n sound_name = os.path.basename(path)\n if sound_name in self.settings[\"sounds\"]:\n raise Exception(\"Sound already exists\")\n self.settings[\"sounds\"][sound_name] = {\n \"path\": path,\n \"volume\": volume,\n \"mute\": mute,\n }\n return sound_name\n\n def remove_sound(self, sound_name):\n if sound_name not in self.settings[\"sounds\"]:\n raise Exception(\"Sound does not exist\")\n del self.settings[\"sounds\"][sound_name]\n return self.settings[\"sounds\"]\n\n def set_volume(self, volume):\n self.settings[\"volume\"] = volume\n return volume\n\n def set_mute(self, mute):\n self.settings[\"mute\"] = mute\n return mute\n\n def set_name(self, name):\n self.settings[\"name\"] = name\n return name\n\n def set_sound_volume(self, sound_name, volume):\n if sound_name not in self.settings[\"sounds\"]:\n return False\n self.settings[\"sounds\"][sound_name][\"volume\"] = volume\n return True\n\n def set_sound_mute(self, sound_name, mute):\n if sound_name not in self.settings[\"sounds\"]:\n return False\n self.settings[\"sounds\"][sound_name][\"mute\"] = mute\n return True\n\n\nclass PresetsManagerSettings(Settings):\n def __init__(self, settings_file_location=\"presets_settings.json\", internal_settings=None):\n if internal_settings is None:\n internal_settings = InternalSettings()\n self.internal_settings = internal_settings\n super().__init__(settings_file_location=settings_file_location)\n\n def create(self):\n settings = {\n \"presets\": {\n # \"preset_name\": preset_path\n },\n \"presets_order\": [],\n \"current_preset\": \"None\",\n }\n # Check if the presets directory exists. If not, create it.\n if not os.path.exists(self.internal_settings.presets_dir):\n os.mkdir(self.internal_settings.presets_dir)\n # Check if the directory is empty. If so, create a default preset.\n if len(os.listdir(self.internal_settings.presets_dir)) == 0:\n PresetSettings(internal_settings=self.internal_settings)\n\n for preset in os.listdir(self.internal_settings.presets_dir):\n preset_path = os.path.join(self.internal_settings.presets_dir, preset)\n preset_name = PresetSettings(preset_path, internal_settings=self.internal_settings).settings[\"name\"]\n settings[\"presets\"][preset_name] = preset_path\n settings[\"presets_order\"].append(preset_name)\n\n settings[\"current_preset\"] = settings[\"presets_order\"][0]\n\n return settings\n\n def add_preset(self, preset_name, to_order=True):\n if preset_name in self.settings[\"presets\"]:\n raise ValueError(\"Preset with name {} already exists\".format(preset_name))\n preset_path = os.path.join(self.internal_settings.presets_dir, preset_name + \".json\")\n PresetSettings(preset_path, internal_settings=self.internal_settings)\n self.settings[\"presets\"][preset_name] = preset_path\n if to_order:\n self.settings[\"presets_order\"].append(preset_name)\n\n def remove_preset(self, preset_name):\n os.remove(self.settings[\"presets\"][preset_name])\n del self.settings[\"presets\"][preset_name]\n self.settings[\"presets_order\"].remove(preset_name)\n\n def set_current_preset(self, preset_name):\n if preset_name not in self.settings[\"presets\"].keys():\n raise ValueError(\"Preset does not exist\")\n self.settings[\"current_preset\"] = preset_name\n return self.settings[\"current_preset\"]\n\n def change_preset_order(self, preset_name, after_preset_name):\n if preset_name not in self.settings[\"presets\"]:\n raise ValueError(\"Preset does not exist\")\n if after_preset_name not in self.settings[\"presets\"]:\n raise ValueError(\"Preset does not exist\")\n self.settings[\"presets_order\"].remove(preset_name)\n self.settings[\"presets_order\"].insert(self.settings[\"presets_order\"].index(after_preset_name) + 1, preset_name)\n return self.settings[\"presets_order\"]\n\n def change_preset_name(self, preset_name, new_preset_name):\n if preset_name not in self.settings[\"presets\"]:\n raise ValueError(\"Preset does not exist\")\n if new_preset_name in self.settings[\"presets\"]:\n raise ValueError(f\"Preset with name {new_preset_name} already exists\")\n if preset_name == self.settings[\"current_preset\"]:\n self.settings[\"current_preset\"] = new_preset_name\n preset_path = self.settings[\"presets\"][preset_name]\n shutil.copy(preset_path, os.path.join(self.internal_settings.presets_dir, new_preset_name + \".json\"))\n preset_index = self.settings[\"presets_order\"].index(preset_name)\n self.remove_preset(preset_name)\n new_preset_settings = PresetSettings(os.path.join(self.internal_settings.presets_dir,\n new_preset_name + \".json\"),\n internal_settings=self.internal_settings)\n new_preset_settings.set_name(new_preset_name)\n new_preset_settings.save()\n self.settings[\"presets\"][new_preset_name] = os.path.join(self.internal_settings.presets_dir,\n new_preset_name + \".json\")\n\n self.settings[\"presets_order\"].insert(preset_index, new_preset_name)\n return new_preset_name\n","repo_name":"teleoflexuous/Noise-Stimulant","sub_path":"stimulant_noise/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":8935,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"33775448211","text":"import cv2\nimport numpy as np\n\np = r\"C:\\Users\\bhask\\Downloads\\wallpaperflare.com_wallpaper.jpg\"\nc = cv2.VideoCapture(p)\n\np1 = np.float32([[56, 65], [368, 52], [28, 387], [389, 390]])\np2 = np.float32([[100, 50], [300, 0], [0, 300], [300, 300]])\nm = cv2.getPerspectiveTransform(p1, p2)\n\nwhile True:\n ret, r = c.read()\n if not ret:\n break\n\n r1 = cv2.resize(r, (700, 700))\n d = cv2.warpPerspective(r1, m, (700, 700))\n cv2.imshow(\"a\", d)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\nc.release()\ncv2.destroyAllWindows()\n","repo_name":"sbhaskar03/COMPUTER-VISION","sub_path":"program13.py","file_name":"program13.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19599699658","text":"#Problem : https://www.acmicpc.net/problem/2800\n#Comment : 풀줄 몰라서 다른사람 풀이 보고 이해함\n\nfrom itertools import combinations\n# 문자열을 리스트로 저장\nproblem=[*input().strip()]\np, idx_brs=[],[]\n\nfor i,v in enumerate(problem):\n if v=='(':\n #문자열에서 ( 를 제거\n problem[i]=''\n # (가 있었던 index를 저장\n p+=[i]\n if v==')':\n #문자열에서 )를 제거\n problem[i]=''\n #p에 저장되어있던 ( 의 위치와 맞는 )의 위치를 idx_brs에 저장\n idx_brs+=[[p.pop(),i]] \n#print(problem)\n#print(idx_brs)\n\n#이거는 중복을 피하니깐 사용한듯\nout=set()\n\n#combinations함수를 이용해서 순열을 만든다.\nfor i in range(len(idx_brs)):\n for j in combinations(idx_brs,i):\n #print(j)\n P=problem[:]\n #print(P)\n for v,w in j:\n P[v]='('\n P[w]=')'\n out.add(''.join(P))\n #print(out)\n\nfor i in sorted(out):\n print(i)","repo_name":"bn-tw2020/2020_winter_algorithm","sub_path":"participants/SaeHyeon/Algorithms/Data_structure/BOJ2800.py","file_name":"BOJ2800.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"73894229366","text":"class car():\n def __init__(self,name,cost,colour,length):\n self.name = name\n self.cost = cost\n self.colour = colour\n self.length = length\n print(\"name :\",self.name)\n print(\"cost :\",self.cost)\n print(\"colour :\",self.colour)\n print(\"length :\",self.length)\n def disc(self):\n if(self.length > 4):\n print(\"you got a 10% discount!!\\n\")\n print(\"so your price is now\",self.cost - self.cost*0.1)\n \nstring = input(\"enter a name : \")\nc = int(input(\"enter the cost : \"))\nstring1 = input(\"enter colour : \")\nl = int(input(\"enter the length : \"))\n\na = car(string,c,string1,l)\na.disc()\n","repo_name":"manastole03/Programming-practice-2","sub_path":"python 3/day 09/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"12708344817","text":"# [문제]\n# 0에서 9까지 숫자가 적힌 N장의 카드가 주어진다.\n# 가장 많은 카드에 적힌 숫자와 카드가 몇 장인지 출력하는 프로그램을 만드시오.\n# 카드 장수가 같을 때는 적힌 숫자가 큰 쪽을 출력한다.\n\n# [입력]\n# 첫 줄에 테스트 케이스 개수 T가 주어진다. ( 1 ≤ T ≤ 50 )\n# 다음 줄부터 테스트케이스의 첫 줄에 카드 장수 N이 주어진다. ( 5 ≤ N ≤ 100 )\n# 다음 줄에 N개의 숫자 ai가 여백없이 주어진다. (0으로 시작할 수도 있다.) ( 0 ≤ ai ≤ 9 ) \n\n# [출력]\n# 각 줄마다 \"#T\" (T는 테스트 케이스 번호)를 출력한 뒤, 가장 많은 카드의 숫자와 장 수를 차례로 출력한다.\n\n# [입력]\n# 3\n# 5\n# 49679\n# 5\n# 08271\n# 10\n# 7797946543\n\n# [출력]\n# #1 9 2\n# #2 8 1\n# #3 7 3\n\n\n# [문제풀이]\n# 1. 0~9의 한자리 숫자가 적힌 N장의 카드가 주어지며 0~9까지 각 자리수마다 여러 장이 있을 수 있다.\n# 2. 따라서 각 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 의 각 장 수를 입력받을 수 있는 리스트를 만들어\n# 2-1. 리스트[0] = 숫자 0의 장 수 이도록 하면 편리하게 만들 수 있다.\n# 3. N개의 숫자 ai가 여백없이 주어지므로 해당 숫자로 받는 문자열을 분리 시켜줄 필요가 있다.\n# 4. 각 카드의 장수가 들어있는 리스트를 만들어서 해당 리스트에 각 카드의 장수를 집어넣어준다.\n# 5. 반복문을 통해 각 카드의 장수가 가장 많은 인덱스를 찾아내고, 해당 인덱스가 카드의 번호이므로 인덱스, 리스트[인덱스]로 출력한다.\n\nT = int(input())\n\nfor testcase in range(1, T+1): # 1~T만큼 testcase 반복 실행\n N = int(input()) # N 은 카드 장 수\n num_list = list(map(int, input())) # 카드에 적혀 있는 숫자들을 리스트화\n card_nums = [0]*10 # 인덱스가 총 10개인 리스트\n for i in range(N): # 반복문 범위는 0~(N-1)까지이므로 num_list의 숫자 활용 가능\n card_nums[num_list[i]] += 1 # num_list[i]에 있는 숫자 예로 6이라면 card_nums[6]에 +1을 한다.\n # 참고로 card_nums = [0,0,0,0,0,0,0,0,0,0]으로 [0]~[9]로 이루어져있다.\n many_num = 0 # 추후 이용하기 위해 가장 많은 장수 할당\n solve_num = 0 # 추후 이용하기 위해 가장 많은 장수의 카드 번호를 할당\n for j in range(len(card_nums)-1, -1, -1): # card_nums의 길이인 10에서 j를 인자로 하는 0~9의 반복문을 돌린다. \n # for j in range(len(card_nums))를 사용하려 했으나 #2 8 1이 나와야 해서 역순 출력했다.\n if card_nums[j] > many_num: # 해당 번호의 카드 장 수가 현재까지 나온 가장 많은 장 수 보다 많다면\n many_num = card_nums[j] # 해당 번호의 카드 장 수를 가장 많은 장 수로 입력\n solve_num = j\n print(f'#{testcase} {solve_num} {many_num}')","repo_name":"KimBeomGi/STUDYduringSSAFY","sub_path":"python/수업/1일차-숫자카드.py","file_name":"1일차-숫자카드.py","file_ext":"py","file_size_in_byte":3334,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"40346437777","text":"import Adafruit_DHT as dht\nimport RPi.GPIO as gpio\nimport time as delay\nfrom app import app\nfrom flask import render_template\n\n\ngpio.setmode(gpio.BOARD)\ngpio.setwarnings(False)\n\nledvermelho = 11\nledverde = 12\npin_dht = 4\npin_t = 15 \npin_e = 16\nlixeira_v = 20\n\ndht_sensor = dht.DHT11\n\nstsledred = ''\nstsledgreen = ''\n\ngpio.setup(ledvermelho, gpio.OUT)\ngpio.setup(ledverde, gpio.OUT)\ngpio.setup(pin_t, gpio.OUT)\ngpio.setup(pin_e, gpio.IN)\n\ngpio.output(ledvermelho, gpio.LOW)\ngpio.output(ledverde, gpio.LOW)\n\ndef statusledvermelho():\n if gpio.input(ledvermelho) == 1:\n statusledvermelho = 'LED vermelho ON'\n else:\n statusledvermelho = 'LED vermelho OFF' \n return statusledvermelho\n\ndef statusledverde():\n if gpio.input(ledverde) == 1:\n statusledverde = 'LED verde ON'\n else:\n statusledverde = 'LED verde OFF'\n return statusledverde\n\ndef umid_temp():\n umid, temp = dht.read(dht_sensor, pin_dht)\n if umid is not None:\n umidade = ('{0:0.0f}%'.format(umid))\n else:\n umidade = 'Erro ao ler sensor'\n if temp is not None:\n temperatura = ('{0:0.0f}*C'.format(temp, umid))\n else:\n temperatura = 'Erro ao ler sensor'\n return umidade, temperatura\n\ndef ocupacao_lixeira():\n gpio.output(pin_t, True)\n delay.sleep(0.000001)\n gpio.output(pin_t, False)\n tempo_i = delay.time()\n tempo_f = delay.time()\n \n while gpio.input(pin_e) == False:\n tempo_i = delay.time()\n while gpio.input(pin_e) == True:\n tempo_f = delay.time()\n \n tempo_d = tempo_f - tempo_i\n \n distancia = (tempo_d*34300)/2\n\n ocupacao_l = (distancia/lixeira_v)*100\n ocupacao_f = 100 - ocupacao_l\n ocupacao_lixeira = ('{0:0.0f}%'.format(ocupacao_f))\n return ocupacao_lixeira\n\n@app.route(\"/\")\ndef index():\n templateData = {\n 'ledred' : statusledvermelho(),\n 'ledgreen' : statusledverde(),\n 'umid' : umid_temp()[0],\n 'temp' : umid_temp()[1],\n 'ocup_lixeira' : ocupacao_lixeira(),\n }\n return render_template('index.html', **templateData)\n\n@app.route(\"/led_vermelho/\")\ndef led_vermelho(action):\n if action == 'on':\n gpio.output(ledvermelho, gpio.HIGH)\n if action == 'off':\n gpio.output(ledvermelho, gpio.LOW)\n\n templateData = {\n 'ledred' : statusledvermelho(),\n 'ledgreen' : statusledverde(),\n 'umid' : umid_temp()[0],\n 'temp' : umid_temp()[1],\n 'ocup_lixeira' : ocupacao_lixeira(),\n }\n return render_template('index.html', **templateData)\n\n@app.route(\"/led_verde/\")\ndef led_verde(action):\n if action == 'on':\n gpio.output(ledverde, gpio.HIGH)\n if action == 'off':\n gpio.output(ledverde, gpio.LOW)\n \n templateData = {\n 'ledred' : statusledvermelho(),\n 'ledgreen' : statusledverde(),\n 'umid' : umid_temp()[0],\n 'temp' : umid_temp()[1],\n 'ocup_lixeira' : ocupacao_lixeira(),\n }\n return render_template('index.html', **templateData)","repo_name":"GiovaniMarcarini/Aula_Flask_IoT---TI26S-2022","sub_path":"app/controllers/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5746829115","text":"from django.conf.urls import url\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom . import views\n# These are all the available urls for django\n# Add all the urls that belong to drug_search here\nurlpatterns = [\n url(r'^$', views.initialize, name=\"start\"),\n url(r'browse',views.browse,name=\"browse\"),\n url(r'grab_chip_seq', views.grab_chip_seq,name=\"grab_chip_seq\"),\n url(r'grab_snp_data', views.grab_snp_data,name=\"grab_snp_data\"),\n url(r'get_phenotypes',views.get_phenotypes,name=\"get_phenotypes\"),\n url(r'test', views.test,name=\"test\")\n]\n#This last part is important so django can find static files in the static folder\nurlpatterns += staticfiles_urlpatterns()","repo_name":"danich1/Non-Coding-Variation-Browser","sub_path":"nc_snp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42039728218","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.db import connections\nfrom django.template.loader import get_template, render_to_string\nfrom django import template\n\ndef index(request):\n message = \"Salut tout le monde !\"\n return HttpResponse(message)\n\n\n\ndef search(request):\n #price_lte = request.GET\n # body = [\n # {'title' : 'First Post', 'body' : 'z'},\n # {'title': 'First Post', 'body': 'a'},\n # ]\n\n c = connections['default'].cursor()\n c.execute(\"\"\"\n SELECT\n musiques.titre,\n artistes.nom,\n musiques.featuring,\n types.nom\n FROM\n musiques\n INNER JOIN artistes ON musiques.artiste_id = artistes.id \n INNER JOIN albums ON musiques.album_id = albums.id\n INNER JOIN types ON musiques.type_id = types.id; \n \"\"\")\n rows = c.fetchall()\n\n print(rows[0])\n\n body = render_to_string('pages/search.html', {'querry_musiques':rows})\n\n return render(request, 'blueunicorn_tailwind/pannel.html', {'body':body})","repo_name":"wiloker/djangoProject","sub_path":"djangoProject/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"25542880887","text":"\n\n\ndef fibonacci_memo(n, memo = {0:0, 1:0, 2:1}):\n\n if n in memo:\n return memo[n]\n else:\n memo[n] = fibonacci_memo(n-1, memo) + fibonacci_memo(n-2, memo)\n return memo[n]\n\n\ndef fibonacci_iter(n):\n\n last = 0\n secondLast = 1\n counter = 2\n\n while(counter <= n):\n currFibbo = last + secondLast\n last = secondLast\n secondLast = currFibbo\n counter += 1\n\n return secondLast if n > 1 else last\n\n\n\n\nprint(fibonacci_memo(10))\n\nprint(fibonacci_iter(10))\n","repo_name":"Vaibhav3M/Coding-challenges","sub_path":"6-NthFibonacci/Fibonacci.py","file_name":"Fibonacci.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18662791493","text":"'''\nDescripción: Analizador de protocolos\nAutores:\n David Armando Rodríguez Varón - 20181020041\n Juan Sebastián Sanchez Tabares - 20181020008\n Johan Sneider Mendez Vega - 20172020070\n Juan Sebastián Mancera Gaitán - 20171020047\n'''\n\nimport socket\nimport struct\nimport textwrap\nimport binascii\n\n'''\nConstantes para organizar la información\n'''\nTAB_1 = '\\t - '\nTAB_2 = '\\t\\t - '\nTAB_3 = '\\t\\t\\t - '\nTAB_4 = '\\t\\t\\t\\t - '\n\nDATA_TAB_1 = '\\t '\nDATA_TAB_2 = '\\t\\t '\nDATA_TAB_3 = '\\t\\t\\t '\nDATA_TAB_4 = '\\t\\t\\t\\t '\n\ndef main():\n #Ultimo argumento verifica que sea compatible entre todos los dispositivos\n conn = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(3))\n #ARP\n connarp = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(0x003))\n\n while True: # Mientras reciba paquetes\n raw_data, addr = conn.recvfrom(65536)\n dest_mac, src_mac, eth_proto, data = capture_packages(raw_data)\n if eth_proto != 1544:\n print('\\nPaquete :')\n print('Destino: {}, Origen: {}, Protocolo: {}'.format(dest_mac, src_mac,\n eth_proto))\n # 8 / IP\n if eth_proto == 8:\n (version, header_length, ttl, proto, src, target, data) = ip_packet(data)\n print(TAB_1 + 'Paquete IP: ')\n print(TAB_2 + 'Versión {}, Longitud del encabezado: {}, TTL: {}'.format(version, header_length, ttl))\n print(TAB_2 + 'Protocolo {}, Fuente: {}, Destino: {}'.format(proto, src, target))\n\n #ICMP\n if proto == 1:\n icmp_type, code, checksum, data = icmp_packet(data)\n print(TAB_1 + 'Paquete ICMP: ')\n print(TAB_2 + 'Tipo: {}, Código: {}, Checksum: {}, '.format(icmp_type, code, checksum))\n print(TAB_2 + 'Datos: ')\n print(format_multi_line(DATA_TAB_3, data))\n #TCP\n elif proto == 6:\n src_port, dest_port, sequence, acknowledgement, flag_urg, flag_ack, flag_psh, flag_rst, flag_syn, flag_fin, data = tcp_segment(data)\n print(TAB_1 + 'Segmento TCP: ')\n print(TAB_2 + 'Puerto de origen: {}, Puerto de destino: {}, '.format(src_port, dest_port))\n print(TAB_2 + 'Secuencia: {}, Acknowlodegment: {}, '.format(sequence, acknowledgement))\n print(TAB_2 + 'Banderas: ')\n print(TAB_3 + 'URG: {}, ACK, {}, PSH: {}, RST: {}, SYN: {}, FIN: {}'.format(flag_urg, flag_ack,\n flag_psh, flag_rst, flag_syn, flag_fin))\n print(TAB_2 + 'Datos: ')\n print(format_multi_line(DATA_TAB_3, data))\n #UDP\n elif proto == 17:\n src_port, dest_port, length, data = udp_segment(data)\n print(TAB_1 + 'Segmento UDP:')\n print(TAB_2 + 'Puerto de origen: {}, Puerto de destino: {}, longitud: {}'.format(src_port, dest_port,\n length))\n print(TAB_2 + 'Datos: ')\n print(format_multi_line(DATA_TAB_3, data))\n #Otro\n else:\n print(TAB_1 + 'Datos: ')\n print(format_multi_line(DATA_TAB_1, data)) \n \n elif eth_proto != 1544:\n print(TAB_1 + 'Datos: ')\n print(format_multi_line(DATA_TAB_1, data))\n\n paquete_arp = connarp.recvfrom(2048)\n ethernet_header = paquete_arp[0][:14]\n ethernet_detalles = struct.unpack('!6s6s2s', ethernet_header)\n\n cabecera_arp = paquete_arp[0][14:42]\n arp_detalles = struct.unpack('2s2s1s1s2s6s4s6s4s', cabecera_arp)\n ethertype = ethernet_detalles[2]\n\n #Paquete ARP\n if ethertype == b'\\x08\\x06':\n print('\\nPaquete ARP:')\n print(TAB_1 + 'Tipo de hardware: {}, Tipo de protocolo: {}'.format(str(binascii.hexlify(arp_detalles[0]), 'utf-8'),\n str(binascii.hexlify(arp_detalles[1]), 'utf-8')))\n print(TAB_1 + 'Tamaño del hardware: {}, Tamaño del protocolo: {}, opcode: {}'.format(str(binascii.hexlify(arp_detalles[2]), 'utf-8'),\n str(binascii.hexlify(arp_detalles[3]), 'utf-8'),\n str(binascii.hexlify(arp_detalles[4]), 'utf-8')))\n print(TAB_1 + 'Dirección MAC origen: {}, Dirección IP origen: {}'.format(str(binascii.hexlify(arp_detalles[5]), 'utf-8'),\n socket.inet_ntoa(arp_detalles[6])))\n print(TAB_1 + 'Dirección MAC destino: {}, Dirección IP destino: {}'.format(str(binascii.hexlify(arp_detalles[7]), 'utf-8'),\n socket.inet_ntoa(arp_detalles[8])))\n \ndef capture_packages(data):\n '''\n Obtiene la información del paquete\n ---\n Sync -- Sincroniza el dispositivo y el router\n Receiver -- Quien lo recibe\n Sender -- Quien lo envia\n Type -- IP4, IP6, ARP, etc ...\n Payload -- (IP/ARP frame + padding), datos\n CRC -- manejo de errores, se asegura de que se reciba la información correctamente\n ---\n :param data: paquete\n :return: direcciones mac de destino, origen, tipo de protocolo y payload\n '''\n destination, source, protocol = struct.unpack('! 6s 6s H', data[:14])\n return get_mac_addr(destination), get_mac_addr(source), socket.htons(protocol), data[14:]\n\ndef get_mac_addr(bytes_addr):\n '''\n Pasa la dirección mac a formato legible\n :param bytes_addr: dirección mac en bytes\n :return: dirección mac en formato legible\n '''\n bytes_string = map('{:02x}'.format, bytes_addr)\n return ':'.join(bytes_string).upper()\n\ndef ip_packet(data):\n '''\n Información que viene antes del payload\n ---\n Version\n IHL -- Longitud del encabezado\n TTL -- Time To Live\n Procol -- protocolo usado TCP, UDP etc \n Source address -- ip de origen\n Destination address -- ip de destino\n ---\n :param data: paquete ip\n :return version, header_length, ttl, protocol, source ip, target ip, payload\n '''\n version_header_length = data[0]\n version = version_header_length >> 4 #Movimiento hacia la derecha\n header_length = (version_header_length & 15) * 4\n ttl, proto, src, target = struct.unpack('! 8x B B 2x 4s 4s', data[:20])\n\n return version, header_length, ttl, proto, ipv(src), ipv(target), data[header_length:]\n\ndef ipv(addr):\n '''\n Pasa la dirección ip a formato legible\n :param addr: dirección ip en bytes\n :return dirección ip en formato XXX.XXX.X.X\n '''\n return '.'.join(map(str, addr))\n\ndef icmp_packet(data):\n '''\n Obtiene la información para el protocolo ICMP\n :param data: payload de tipo ICMP\n :return tipo de icmp, code, checksum, información del paquete\n '''\n icmp_type, code, checksum = struct.unpack('! B B H', data[:4])\n\n return icmp_type, code, checksum, data[4:]\n\n\ndef tcp_segment(data):\n '''\n Obtiene la información para el protocolo TCP/IP\n :param data: datos de tipo TCP/IP\n :return puerto de origen, puerto de destino, sequence, acknowledgement, banderas, datos\n '''\n (source_port, dest_port, sequence, acknowledgement, offset_reserved_flags) = struct.unpack(\n '! H H L L H', data[:14])\n\n offset = (offset_reserved_flags >> 12) * 4\n bandera_urg = (offset_reserved_flags & 32) >> 5\n bandera_ack = (offset_reserved_flags & 16) >> 4\n bandera_psh = (offset_reserved_flags & 8) >> 3\n bandera_rst = (offset_reserved_flags & 4) >> 2\n bandera_syn = (offset_reserved_flags & 2) >> 1\n bandera_fin = offset_reserved_flags & 1\n \n return source_port, dest_port, sequence, acknowledgement, bandera_urg, bandera_ack, bandera_psh, bandera_rst, bandera_syn, bandera_fin, data[offset:]\n\ndef udp_segment(data):\n '''\n Obtiene la información para el protocolo UDP\n :param data: payload de tipo UDP\n :return puerto de origen, puerto de destino, tamaño, información del paquete\n '''\n source_port, dest_port, size = struct.unpack('! H H 2x H', data[:8])\n return source_port, dest_port, size, data[8:]\n\n#Organiza multi-line data\ndef format_multi_line(prefix, string, size= 80):\n '''\n Identa lineas para strings de gran tamaño\n :param prefix: prefijo\n :param string: data\n :param size: tamaño\n :return información identada\n '''\n size -= len(prefix)\n if isinstance(string, bytes):\n string = ''.join(r'\\x{:02x}'.format(byte) for byte in string)\n if size % 2:\n size -= 1\n return '\\n'.join([prefix + line for line in textwrap.wrap(string, size)])\n\nif __name__ == '__main__':\n main()\n","repo_name":"Darvaron/Analizador-de-protocolos-b-sico","sub_path":"sniffer_redes.py","file_name":"sniffer_redes.py","file_ext":"py","file_size_in_byte":9055,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19359194900","text":"\"\"\"CLI.\"\"\"\nimport os\nfrom os import cpu_count, getcwd, chdir, getenv\nimport datetime\nimport json\nfrom pathlib import Path\nimport click\nfrom tempfile import TemporaryDirectory\nimport toml\nfrom pkg_resources import resource_filename as pkgr_fn\n\n\ndef validate_name(ctx, param, value):\n \"\"\"Check whether this template already exists in the Archive.\"\"\"\n from templateflow.api import templates\n\n value = value[4:] if value.startswith(\"tpl-\") else value\n if value in templates():\n raise click.BadParameter(\n f\"A template with name {value} already exists in the Archive.\"\n )\n return value\n\n\ndef is_set(ctx, param, value):\n \"\"\"Check that an argument is set.\"\"\"\n if not value:\n raise click.BadParameter(\n f\"Please set it explicitly or define the corresponding environment variable.\"\n )\n return value\n\n\n@click.group()\n@click.version_option(message=\"TF Archive manager %(version)s\")\ndef cli():\n \"\"\"The TemplateFlow Archive manager assists you in adding and updating templates.\"\"\"\n pass\n\n\n@cli.command()\n@click.argument(\"template_id\", callback=validate_name)\n@click.option(\"--osf-user\", envvar=\"OSF_USERNAME\", callback=is_set)\n@click.password_option(\n \"--osf-password\",\n envvar=\"OSF_PASSWORD\",\n prompt=\"OSF password\",\n confirmation_prompt=False,\n)\n@click.option(\"--osf-overwrite\", is_flag=True)\n@click.option(\n \"--gh-user\",\n envvar=\"GITHUB_USER\",\n)\n@click.password_option(\n \"--gh-token\",\n prompt=\"GitHub personal authentication token\",\n confirmation_prompt=False,\n envvar=\"GITHUB_TOKEN\",\n)\n@click.option(\"--path\", type=click.Path(exists=True))\n@click.option(\"-j\", \"--nprocs\", type=click.IntRange(min=1), default=cpu_count())\ndef add(\n template_id,\n osf_user,\n osf_password,\n osf_overwrite,\n gh_user,\n gh_token,\n path,\n nprocs,\n):\n \"\"\"Add a new template.\"\"\"\n from .io import run_command\n from .utils import copy_template\n import shutil\n from datalad import api as dl\n\n if not gh_user or not gh_token:\n raise click.BadParameter(\"Insufficient secrets to login into GitHub\")\n\n os.environ[\"DATALAD_CREDENTIAL_GITHUB_TOKEN\"] = gh_token\n\n path = Path(path or f\"tpl-{template_id}\").absolute()\n cwd = Path.cwd()\n\n if not path.exists():\n raise click.UsageError(f\"<{path}> does not exist.\")\n\n metadata = {}\n\n # Check metadata\n if (path / \"template_description.json\").exists():\n metadata = json.loads((path / \"template_description.json\").read_text())\n metadata[\"Identifier\"] = template_id\n\n # Check license\n license_path = path / \"LICENSE\"\n if not license_path.exists():\n license_path = path / \"LICENCE\"\n if not license_path.exists():\n license_path = path / \"COPYING\"\n\n if not license_path.exists():\n license_prompt = click.prompt(\n text=\"\"\"\\\nA LICENSE file MUST be distributed with the template. The TemplateFlow Manager can \\\nset a license (either CC0 or CC-BY) for you.\"\"\",\n type=click.Choice((\"CC0\", \"CC-BY\", \"Custom (abort)\")),\n default=\"Custom (abort)\",\n )\n if license_prompt == \"Custom (abort)\":\n raise click.UsageError(\n \"Cannot proceed without a valid license. Please write a LICENSE \"\n \"file before uploading.\"\n )\n\n license_path = Path(pkgr_fn(\"tfmanager\", f\"data/{license_prompt}.LICENSE\"))\n metadata[\"License\"] = license_prompt\n\n # Check RRID\n if not metadata.get(\"RRID\"):\n rrid = click.prompt(\n text=\"Has a RRID (research resource ID) already been assigned?\",\n type=str,\n default=''\n ) or None\n\n if rrid:\n metadata[\"RRID\"] = rrid\n\n # Check short description\n if not metadata.get(\"Name\", \"\").strip():\n short_desc = click.prompt(\n text=\"\"\"\\\nThe \"Name\" metadata is not found within the file. \\\nPlease provide a short description for this resource.\"\"\",\n type=str,\n )\n\n if not short_desc:\n raise click.UsageError(\n \"Cannot proceed without a short description.\"\n )\n\n metadata[\"Name\"] = short_desc\n\n # Check authors\n authors_prompt = [a.strip() for a in metadata.get(\"Authors\", []) if a.strip()]\n if not authors_prompt:\n authors_prompt = [\n n.strip() for n in click.prompt(\n text=\"\"\"\\\nThe \"Authors\" metadata is not found within the file. \\\nPlease provide a list of authors separated by semicolon (;) in format.\"\"\",\n type=str,\n ).split(\";\")\n if n\n ]\n if not authors_prompt:\n click.confirm(\"No authors were given, do you want to continue?\", abort=True)\n\n metadata[\"Authors\"] = authors_prompt\n\n # Check references\n refs_prompt = [\n f\"\"\"\\\n{'https://doi.org/' if not a.strip().startswith('http') else ''}\\\n{a.replace(\"doi:\", \"\").strip()}\"\"\"\n for a in metadata.get(\"ReferencesAndLinks\", []) if a.strip()\n ]\n if not refs_prompt:\n refs_prompt = [\n n.replace('\"', \"\").strip() for n in click.prompt(\n text=\"\"\"\\\nThe \"ReferencesAndLinks\" metadata is not found within the file. \\\nPlease provide a list of links and publications within double-quotes \\\n(for example, \"doi:10.1101/2021.02.10.430678\") and separated by spaces (< >).\"\"\",\n type=str,\n ).split(\" \")\n if n\n ]\n if not refs_prompt:\n click.confirm(\"No authors were given, do you want to continue?\", abort=True)\n metadata[\"ReferencesAndLinks\"] = refs_prompt\n\n with TemporaryDirectory() as tmpdir:\n repodir = Path(tmpdir) / \"templateflow\"\n\n # Clone root /templateflow project - fork if necessary\n click.echo(f\"Preparing Pull-Request (wd={tmpdir}).\")\n clone = run_command(\n f\"git clone https://github.com/{gh_user}/templateflow.git \"\n \"--branch tpl-intake --single-branch\",\n cwd=tmpdir,\n capture_output=False,\n )\n if clone.returncode != 0:\n run_command(\n \"hub clone templateflow/templateflow\",\n cwd=tmpdir,\n capture_output=False,\n env={\"GITHUB_TOKEN\": gh_token},\n )\n run_command(\n \"hub fork --remote-name origin\",\n cwd=str(repodir),\n capture_output=False,\n env={\"GITHUB_TOKEN\": gh_token},\n )\n else:\n run_command(\n \"git remote add upstream https://github.com/templateflow/templateflow.git\",\n cwd=str(repodir),\n capture_output=False,\n )\n\n chdir(repodir)\n\n # Create datalad dataset\n dl.create(\n path=f\"tpl-{template_id}\",\n cfg_proc=\"text2git\",\n initopts={\"initial-branch\": \"main\"},\n description=metadata[\"Name\"],\n )\n gitattr = (repodir / f\"tpl-{template_id}\" / \".gitattributes\").read_text().strip()\n (repodir / f\"tpl-{template_id}\" / \".gitattributes\").write_text(\"\\n\".join([\n gitattr,\n \"*.gii annex.largefiles=anything\",\n \"\",\n ]))\n\n # Populate template\n copy_template(\n path=path,\n dest=repodir / f\"tpl-{template_id}\",\n )\n # Copy license\n shutil.copy(license_path, repodir / f\"tpl-{template_id}\" / \"LICENSE\")\n # (Over)write template_description.json\n (repodir / f\"tpl-{template_id}\" / \"template_description.json\").write_text(\n json.dumps(metadata, indent=2)\n )\n # Init/update CHANGELOG\n changelog = repodir / f\"tpl-{template_id}\" / \"CHANGES\"\n changes = [f\"\"\"\n## {datetime.date.today().ctime()} - TemplateFlow Manager Upload\nPopulated contents after NIfTI sanitizing by the TF Manager.\n\n\"\"\"]\n if changelog.exists():\n changes += [changelog.read_text()]\n changelog.write_text(\"\\n\".join(changes))\n\n # Init OSF sibling\n rrid_str = f\" (RRID: {metadata['RRID']})\" if metadata.get(\"RRID\") else \"\"\n dl.create_sibling_osf(\n title=f\"TemplateFlow resource: <{template_id}>{rrid_str}\",\n name=\"osf\",\n dataset=f\"./tpl-{template_id}\",\n public=True,\n category=\"data\",\n description=metadata[\"Name\"],\n tags=[\"TemplateFlow dataset\", template_id]\n )\n # Init GH sibling\n dl.create_sibling_github(\n reponame=f\"tpl-{template_id}\",\n dataset=str(repodir / f\"tpl-{template_id}\"),\n publish_depends=\"osf-storage\",\n existing=\"replace\",\n access_protocol=\"ssh\"\n )\n\n # Save added contents\n dl.save(\n dataset=str(repodir / f\"tpl-{template_id}\"),\n message=\"ADD: TemplateFlow Manager initialized contents\"\n )\n\n # Push to siblings\n dl.push(\n dataset=str(repodir / f\"tpl-{template_id}\"),\n to=\"github\",\n jobs=cpu_count(),\n )\n\n # Back home\n chdir(cwd)\n\n run_command(\n \"git fetch upstream tpl-intake\", cwd=str(repodir), capture_output=False,\n )\n run_command(\n f\"git checkout -b pr/tpl-{template_id} upstream/tpl-intake\",\n cwd=str(repodir),\n capture_output=False,\n )\n (repodir / f\"{path.name}.toml\").write_text(\n toml.dumps({\"github\": {\"user\": gh_user},})\n )\n run_command(\n f\"git add {path.name}.toml\", cwd=str(repodir), capture_output=False,\n )\n run_command(\n f\"git commit -m 'add(tpl-{template_id}): create intake file'\",\n cwd=str(repodir),\n capture_output=False,\n )\n run_command(\n f\"git push -u origin pr/tpl-{template_id}\",\n cwd=str(repodir),\n capture_output=False,\n env={\"GITHUB_USER\": gh_user, \"GITHUB_TOKEN\": gh_token},\n )\n\n (repodir.parent / \"message.md\").write_text(\n f\"\"\"\\\nADD: ``tpl-{template_id}``\n\n## {metadata.get('Name', '')}\n\nIdentifier: {metadata.get('Identifier', '')}\nDatalad: https://github.com/{gh_user}/tpl-{template_id}\n\n### Authors\n{', '.join(metadata['Authors'])}.\n\n### License\n{metadata.get('License', metadata.get('Licence', ''))}\n\n### Cohorts\n{' '.join(('The dataset contains', str(len(metadata.get('cohort', []))), 'cohorts.'))\n if metadata.get('cohort') else 'The dataset does not contain cohorts.'}\n\n### References and links\n{', '.join(metadata.get('ReferencesAndLinks', [])) or 'N/A'}\n\n\"\"\"\n )\n run_command(\n \"hub pull-request -b templateflow:tpl-intake \"\n f\"-h {gh_user}:pr/tpl-{template_id} \"\n f\"-F {repodir.parent / 'message.md'}\",\n cwd=str(repodir),\n capture_output=False,\n env={\"GITHUB_TOKEN\": gh_token},\n )\n\n del os.environ[\"DATALAD_CREDENTIAL_GITHUB_TOKEN\"]\n\n\n@cli.command()\n@click.argument(\"template_id\", callback=validate_name)\n@click.option(\"--osf-project\", envvar=\"OSF_PROJECT\", callback=is_set)\n@click.option(\"--osf-user\", envvar=\"OSF_USERNAME\", callback=is_set)\n@click.password_option(\n \"--osf-password\",\n envvar=\"OSF_PASSWORD\",\n prompt=\"OSF password\",\n confirmation_prompt=False,\n)\n@click.option(\"--osf-overwrite\", is_flag=True)\n@click.option(\"--path\", type=click.Path(exists=True))\n@click.option(\"-j\", \"--nprocs\", type=click.IntRange(min=1), default=cpu_count())\ndef push(\n template_id, osf_project, osf_user, osf_password, osf_overwrite, path, nprocs,\n):\n \"\"\"Push a new template, but do not create PR.\"\"\"\n from .osf import upload as _upload\n path = Path(path or f\"tpl-{template_id}\")\n\n if not path.exists():\n raise click.UsageError(f\"<{path}> does not exist.\")\n\n _upload(\n template_id, osf_project, osf_user, osf_password, osf_overwrite, path, nprocs,\n )\n\n\n@cli.command()\n@click.argument(\"template_id\")\n@click.option(\"--osf-project\", envvar=\"OSF_PROJECT\", callback=is_set)\n@click.option(\"--overwrite\", is_flag=True)\n@click.option(\"--path\", type=click.Path(exists=False))\n@click.option(\"-j\", \"--nprocs\", type=click.IntRange(min=1), default=cpu_count())\ndef get(\n template_id, osf_project, overwrite, path, nprocs,\n):\n \"\"\"Add a new template.\"\"\"\n from .osf import get_template as _get\n if template_id.startswith(\"tpl-\"):\n template_id = template_id[4:]\n\n path = Path(path or f\"tpl-{template_id}\")\n\n if path.name != f\"tpl-{template_id}\":\n path = path / f\"tpl-{template_id}\"\n\n if path.exists():\n click.echo(f\"WARNING: <{path}> exists.\")\n\n _get(template_id, osf_project, overwrite, path, nprocs)\n\n\n@cli.command()\n@click.argument(\"template_id\")\n@click.option(\"--osf-project\", envvar=\"OSF_PROJECT\", default=\"ue5gx\")\n@click.option(\"-o\", \"--out-csv\", type=click.Path(exists=False))\ndef geturls(template_id, osf_project, out_csv):\n \"\"\"Add a new template.\"\"\"\n from .osf import get_project_urls\n\n if template_id.startswith(\"tpl-\"):\n template_id = template_id[4:]\n\n urls = get_project_urls(\n f\"\"\"\\\nhttps://files.osf.io/v1/resources/{osf_project}/providers/osfstorage/\\\n\"\"\",\n f\"tpl-{template_id}\",\n )\n if out_csv:\n Path(out_csv).write_text(urls)\n return\n print(urls)\n\n\n@cli.command()\n@click.argument(\"template_dir\", type=click.Path(exists=True), default=getcwd())\n@click.option(\"--normalize/--no-normalize\", default=True)\n@click.option(\"--force-dtype/--no-force-dtype\", default=True)\n@click.option(\"--deoblique/--no-deoblique\", default=False)\ndef sanitize(template_dir, normalize, force_dtype, deoblique):\n \"\"\"Check orientation and datatypes of NIfTI files in template folder.\"\"\"\n from .utils import copy_template as _copy_template\n\n updated = _copy_template(template_dir, normalize, force_dtype, deoblique)\n if updated:\n print(\n \"\\n * \".join(\n [\"Modified:\"] + [f\"<{u.relative_to(template_dir)}>\" for u in updated]\n )\n )\n\n\n@cli.command()\n@click.argument(\"template_id\")\n@click.argument(\"field\")\n@click.option(\"--path\", type=click.Path(exists=True))\ndef metadata(template_id, field, path):\n \"\"\"Get a metadata entry from a template.\"\"\"\n import json\n if template_id.startswith(\"tpl-\"):\n template_id = template_id[4:]\n path = Path(path or f\"tpl-{template_id}\")\n metadata = json.loads((path / \"template_description.json\").read_text())\n print(metadata.get(field))\n\n\nif __name__ == \"__main__\":\n \"\"\" Install entry-point \"\"\"\n cli()\n","repo_name":"templateflow/python-manager","sub_path":"tfmanager/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":14790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7431229252","text":"import json\nwith open ('5_task.json','r') as f:\n j=json.load(f)\nra={}\ni=0\npav=0\nwhile i= 13 and cantidad <= 100:\n precioTotal = precioBase * 12 + (precioBase * 0.9) * (cantidad - 12)\n cantVentas10 += 1\n else:\n precioTotal = precioBase * 12 + (precioBase * 0.9) * 88 + (precioBase * 0.75) * (cantidad - 100)\n cantVentas += 1\n precioPromedio = precioTotal / cantidad\n print(\"Precio total: \", precioTotal)\n print(\"Precio promedio: \", precioPromedio)\n\nprint(\"Cantidad de ventas realizadas total: \", cantVentas)\nprint(\"Cantidad de ventas en las que se aplicó un 10% de descuento: \", cantVentas10)\nprint(\"Cantidad de ventas en las que solo se aplicó el precio base, es decir que no se le realizaron descuentos: \", cantVentas0)\n","repo_name":"torrresagus/UADE-Introduccion-a-la-Algoritmia","sub_path":"Guia de Trabajos Practicos 2022/Trabajo Practico 5 - Ejercicios integradores/Ejercicio3/Ejercicio3.py","file_name":"Ejercicio3.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5921160093","text":"#!/usr/bin/env python3\n\nimport sys, csv\nfrom Bio import SeqIO\nfrom collections import defaultdict\n\ndef gen_ogd(ogs):\n ogd = defaultdict(dict)\n with open(ogs) as ogsh:\n csvr = csv.reader(ogsh, delimiter='\\t')\n for i, row in enumerate(csvr):\n if i == 0:\n isolates = row[1:]\n for j, header in enumerate(isolates):\n if not row[j+1].split(', ') == ['']:\n ogd[row[0]][header] = row[j+1].split(', ')\n else:\n ogd[row[0]][header] = []\n return ogd\n\ndef gen_product_dict(gb, rorq):\n prod_d = {}\n for chrom in gb:\n if chrom.id == 'api' or chrom.id == 'mit':\n continue\n for feat in chrom.features:\n if feat.type == 'CDS':\n if rorq == 'r':\n prod_d[feat.qualifiers['protein_id'][0]] = feat.qualifiers['product'][0]\n elif rorq == 'q':\n prod_d[feat.qualifiers['locus_tag'][0]] = feat.qualifiers['product'][0]\n else:\n sys.exit(\"rorq is bad\")\n return prod_d\n\n\ndef id_func_annots(ref_prod_d, q_prod_d, ogd, query, ref, qgb):\n class products: pass\n products.new_names = {}\n products.too_many_products = {}\n products.no_products = {}\n for chrom in qgb:\n if chrom.id == 'api' or chrom.id == 'mit':\n continue\n for feat in chrom.features:\n if feat.type == 'CDS':\n qltag = feat.qualifiers['locus_tag'][0]\n qproduct = feat.qualifiers['product'][0]\n og = find_query_og(ogd, qltag, query)\n if og != False:\n ref_ids = ogd[og][ref]\n if len(ref_ids) > 1:\n uniq_products = [ ref_prod_d[refid].strip() for refid in ref_ids ]\n if len(set(uniq_products)) == 1:\n products.new_names[qltag] = uniq_products[0]\n else:\n products.too_many_products[qltag] = [(refid, ref_prod_d[refid]) for refid in ref_ids]\n products.new_names[qltag] = qproduct\n elif len(ref_ids) == 1:\n products.new_names[qltag] = ref_prod_d[ref_ids[0]]\n elif len(ref_ids) == 0:\n products.new_names[qltag] = qproduct\n products.no_products[qltag] = qproduct\n else:\n products.new_names[qltag] = qproduct\n products.no_products[qltag] = qproduct\n return products\n\n\ndef find_query_og(ogd, qltag, isolate):\n for og in list(ogd):\n if qltag+'-T1' in ogd[og][isolate]:\n return og\n return False\n\ndef generate_new_genbank_file(products, qgb):\n for chrom in qgb:\n if chrom.id == 'api' or chrom.id == 'mit':\n continue\n for feat in chrom.features:\n if feat.type == 'CDS' or feat.type == 'mRNA':\n feat.qualifiers['product'] = [products.new_names[feat.qualifiers['locus_tag'][0]]]\n return qgb\n\ndef output_product_details(products, query):\n outfile = '{}.too_many_products.csv'.format(query)\n with open(outfile, 'w') as outh:\n outw = csv.writer(outh)\n for qltag in list(products.too_many_products):\n outw.writerow([qltag, products.too_many_products[qltag][0], products.too_many_products[qltag][1]])\n outfile = '{}.no_products.csv'.format(query)\n with open(outfile, 'w') as outh:\n outw = csv.writer(outh)\n for qltag in list(products.no_products):\n outw.writerow([qltag, products.no_products[qltag]])\n \n \ndef main():\n ref_gb = list(SeqIO.parse(sys.argv[1], 'gb'))\n q_gbs = [list(SeqIO.parse(sys.argv[2], 'gb')), list(SeqIO.parse(sys.argv[3], 'gb'))]\n ogs = sys.argv[4]\n ref = 'Theileria_orientalis_Shintoku'\n ref_prod_d = gen_product_dict(ref_gb, 'r')\n ogd = gen_ogd(ogs)\n for i, query in enumerate(['Theileria_orientalis_Fish_Creek', 'Theileria_orientalis_Goon_Nure']):\n q_prod_d = gen_product_dict(q_gbs[i], 'q')\n products = id_func_annots(ref_prod_d, q_prod_d, ogd, query, ref, q_gbs[i])\n new_gb = generate_new_genbank_file(products, q_gbs[i])\n SeqIO.write(new_gb, '{}.shintoku_product_names.gbk'.format(query), 'gb')\n output_product_details(products, query)\n\nif __name__ == '__main__':\n main()\n","repo_name":"bogemad/theileria_orientalis_complete_genome_scripts","sub_path":"func_annotation_transfer.py","file_name":"func_annotation_transfer.py","file_ext":"py","file_size_in_byte":4450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19655326311","text":"# -*- coding: utf-8 -*-\n# @Project:AID1810\n# @Author:biabu\n# @Date:2019/3/25 16:11\n# @File_name:demo06_LabelEncode.py\n# @IDE:PyCharm\n\n\"\"\"\n标签编码\n\"\"\"\n\nimport numpy as np\nimport sklearn.preprocessing as sp\nraw_samples = np.array([\n 'audi', 'ford', 'audi', 'toyota',\n 'ford', 'bmw', 'toyota', 'byd',\n 'audi'])\n\n# 创建标签编码器\nlbe = sp.LabelEncoder()\n# 执行标签编码\nresult = lbe.fit_transform(raw_samples)\nprint(result)\n\n# 逆向回推原始数据\nr_samples = lbe.inverse_transform(result)\nprint(r_samples)\n","repo_name":"biabulinxi/Python-ML-DL","sub_path":"AI/Machine_Learning/Day01/demo06_LabelEncode.py","file_name":"demo06_LabelEncode.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7748907418","text":"import pathlib\nimport functions\nimport visualisation\n\nimport subprocess\nimport modules.imageProcessing\n\n\n######### Parameters ##########\n\nappPath = pathlib.Path(r\"D:\\Stage\\ACCAL\\ACCAL\") #### To change with the app folder\ndataFolderPath = pathlib.Path(r\"D:\\Stage\\ACCAL\\data\\dataTest2\") #### To change with teh dataSet folder\nfeatureAppPath = pathlib.Path(appPath,\"features.py\")\n\n\n## Dissimilarity Matrix\nDENOISE_RATIO = 0.1\nCLIP_LIMIT = 0.01\nLENGTH_KERNEL = 4.0\nPIXEL_SIDE = 360\nFEATURE_NUMBER = 250\n\nNUMBER_CORES = 2 \n\n\n## Clustering\nRATIO_LOW = 0.001\nRATIO_HIGH = 0.01\nNB_BURNIN = 30\nNB_SAMPLE = 100\nEACH_SAMPLE = 2\n\n\n## Visualisation\nPROB_LIMIT = 0.5\n\n\n###################################################\n###################################################\n\n\n### Compute and save tomporary images used for further processing\n#modules.imageProcessing.computeAndSaveTempImages(dataFolderPath=dataFolderPath,denoiseRatio=DENOISE_RATIO,clipLimit=CLIP_LIMIT)\n\n\n### Compute and save features for each image \n#functions.saveFeatures(dataFolderPath=dataFolderPath,absAppPath=appPath,pixelSide=PIXEL_SIDE,lengthKernel=LENGTH_KERNEL,featureNumber=FEATURE_NUMBER)\n#subprocess.run([\"python\",str(featureAppPath),str(dataFolderPath),str(appPath),str(PIXEL_SIDE),str(LENGTH_KERNEL),str(FEATURE_NUMBER),str(NUMBER_CORES)])\n\n## Compute and save the dissimilarity Matrix\n#functions.saveDissMatrix(dataFolderPath=dataFolderPath)\n\n\n## Compute and save Probability matrix\nfunctions.clustering(dataFolderPath=dataFolderPath,ratioLow=RATIO_LOW,ratioHigh=RATIO_HIGH,nbBurnin=NB_BURNIN,nbSample=NB_SAMPLE,eachSample=EACH_SAMPLE)\n\n\n## Save Results\nvisualisation.saveResults(dataFolderPath=dataFolderPath,probLimit=PROB_LIMIT)\n\n","repo_name":"alexandreBerezin/ACCAL","sub_path":"ACCAL/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"2680880239","text":"import numpy as np\nfrom sklearn import metrics\nfrom sklearn.metrics import roc_auc_score\n\nfrom matplotlib import pyplot as plt\n\n\n\ndef get_precision(_df, threshold=7):\n # TR|FP|TN|FN\n ground_positive = _df[_df['pIC50'] >= threshold]['title'].values\n predct_positive = _df[_df['rescore'] >= threshold]['title'].values\n\n true_positive = np.intersect1d(predct_positive, ground_positive)\n \n if len(true_positive) == 0:\n precision = 0\n else:\n # precision\n precision = len(true_positive) / (len(np.union1d(ground_positive, predct_positive)))\n\n return precision\n\n\ndef get_recall(_df, threshold=7):\n # TR|FP|TN|FN\n ground_positive = _df[_df['pIC50'] >= threshold]['title'].values\n predct_positive = _df[_df['rescore'] >= threshold]['title'].values\n\n true_positive = np.intersect1d(predct_positive, ground_positive)\n \n if len(true_positive) == 0:\n recall = 0\n else:\n # recall\n recall = len(true_positive) / len(ground_positive)\n\n return recall\n\n\ndef calc_auc(y_pred,y_test):\n '''\n instead of roc_auc_score\n '''\n fpr, tpr, thresholds = metrics.roc_curve(y_test,y_pred,pos_label=1)\n return metrics.auc(fpr,tpr)\n\n\ndef return_gold_plt(frame_data_frame):\n _corr = np.mean(frame_data_frame['gold_corr'].values)\n xx = frame_data_frame['pIC50']\n yy = frame_data_frame['Gold.PLP.Fitness_best']\n\n fp1 = np.polyfit(xx, yy, 1)\n f1 = np.poly1d(fp1)\n\n fig = plt.figure(figsize=(10, 5))\n ax = fig.add_subplot(1, 1, 1)\n ax.scatter(xx, \n yy, \n s=150, \n lw=0.2, \n alpha=0.5, \n edgecolors='black', \n color = '#1f77b4', \n label='rescore/pIC50')\n ax.text(10,\n 60,\n f'\\ncorr avg. : {_corr:.3f}',\n color = '#1f77b4',\n size=10)\n ax.set_xlabel('pIC50')\n ax.set_ylabel('Fitness_best')\n ax.legend(loc='upper right')\n plt.plot(xx, f1(xx), lw=2, color='r', label='polyfit')\n\n return plt\n","repo_name":"sangwon-hwang/fetal_monitoring","sub_path":"fetal_monitor/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1405726087","text":"class Solution(object):\n \n # hash set, find reflection line: O(N) time & space\n def isReflected(self, points):\n pts = set()\n maxX, minX = float('-inf'), float('inf')\n for point in points:\n maxX = max(maxX, point[0])\n minX = min(minX, point[0])\n pts.add(tuple(point))\n mid = float(maxX + minX) / 2\n for point in points:\n x = 2 * mid - point[0]\n y = point[1]\n if (x, y) not in pts:\n return False\n return True\n ","repo_name":"haomingchan0811/Leetcode","sub_path":"356. Line Reflection.py","file_name":"356. Line Reflection.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36351516025","text":"# coding: utf-8\n\n\"\"\"\n 获取应用推送事件订阅状态\n\n 通过access_token查询该应用事件订阅状态\n\n \n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass EventStatusUpdateBody(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'list': 'list[EventStatus]'\n }\n\n attribute_map = {\n 'list': 'list'\n }\n\n def __init__(self, list=None): # noqa: E501\n \"\"\"EventStatusUpdateBody - a model defined in Swagger\"\"\" # noqa: E501\n self._list = None\n self.discriminator = None\n self.list = list\n\n @property\n def list(self):\n \"\"\"Gets the list of this EventStatusUpdateBody. # noqa: E501\n\n\n :return: The list of this EventStatusUpdateBody. # noqa: E501\n :rtype: list[EventStatus]\n \"\"\"\n return self._list\n\n @list.setter\n def list(self, list):\n \"\"\"Sets the list of this EventStatusUpdateBody.\n\n\n :param list: The list of this EventStatusUpdateBody. # noqa: E501\n :type: list[EventStatus]\n \"\"\"\n if list is None:\n raise ValueError(\"Invalid value for `list`, must not be `None`\") # noqa: E501\n\n self._list = list\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(EventStatusUpdateBody, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, EventStatusUpdateBody):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","repo_name":"uimeet/douyin","sub_path":"douyin/open/events/model/event_status_update_body.py","file_name":"event_status_update_body.py","file_ext":"py","file_size_in_byte":3074,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"76"} +{"seq_id":"39088773865","text":"import turtle\n\npen = turtle.Turtle()\npen.color(\"black\")\npen.fillcolor(\"grey\")\npen.pensize(5)\npen.shape(\"turtle\")\n\n\ndef draw_squares(x, y):\n pen.begin_fill()\n pen.up()\n pen.goto(x, y)\n pen.down()\n for i in range(0, 4):\n pen.forward(100)\n pen.left(90)\n pen.end_fill()\n\n\nfor x in range(-200, 200, 120):\n draw_squares(x, 200)\n","repo_name":"sakshi303/Python_challenges","sub_path":"line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1286930173","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom math import pi\nfrom coord import Coord\n\n# possible orientations\nupw = 'upw'\nhor = 'hor'\ndwd = 'dwd'\n\nspeed = 45 # mm/s # for timing purposes\nang_speed = pi / 4.7 # rad/s # pi / 4,7s\n\n\n# TODO: speed in rad per sec instead of lin speed\n\n\nclass Positions(object):\n \"\"\"constant world coordinates translated to each robot KOS\"\"\"\n __init = False\n\n def __init__(self, robot):\n self.home = Coord(r=160, z=180, ort=hor)\n r0_r1 = Coord(isvect=True, x=465) # vector from global-origin/r0-origin to r1-origin\n\n if robot.id is 0:\n self.center = (r0_r1 / 2) + Coord(z=180, x=-5, ort=hor)\n self.cube = (r0_r1 / 2) + Coord(z=43, x=-7, ort=dwd)\n self.cube_retr = self.cube + Coord(z=50)\n\n if robot.id is 1:\n self.center = (r0_r1 / 2) + Coord(z=180, x=-5, ort=hor)\n self.cube = (r0_r1 / 2) + Coord(z=55, x=-5, ort=dwd)\n self.cube_retr = self.cube + Coord(z=50)\n\n # fixed turning positions\n self.a_hold = Coord(x=300, y=2, z=70, ort=hor)\n self.a_turn = Coord(x=154, z=95, ort=dwd)\n self.a_retr = self.a_turn + Coord(z=40)\n # self.a_turn_prepare = Coord(th=-(pi * 20 / 180), r=154, z=95, ort=dwd)\n # self.a_retr_prepare = Coord(th=-(pi * 20 / 180), r=155, z=120, ort=dwd)\n\n self.b_hold = self.center\n self.b_turn = self.center - Coord(x=18.5, z=10)\n self.b_retr = self.b_turn - Coord(x=50)\n\n # comp: q3 -= 3deg\n self.c_hold = Coord(x=220, z=377, ort=upw)\n self.c_turn = Coord(x=195, z=363, ort=hor)\n self.c_retr = self.c_turn - Coord(x=50)\n\n self.__init = True\n\n def __setattr__(self, attr, value):\n if self.__init:\n raise Exception('const value. may not be modified!')\n else:\n super(Positions, self).__setattr__(attr, value)\n\n\nclass HardwareLimits(object):\n \"\"\"\n Approximated hardware limits. Exceeding them would result in structural collision.\n No collision warning or detection with base-plate or other objects\n \"\"\"\n __init = False\n\n def __init__(self):\n self.th0min = -pi\n self.th1min = -pi / 2\n self.th2min = -pi * 3 / 4\n self.th3min = -pi * 5 / 9 # -100*pi/180\n self.th4min = -pi\n self.th5min = -pi / 2\n\n self.th0max = pi\n self.th1max = pi / 2\n self.th2max = pi / 2\n self.th3max = pi * 5 / 9 # 100*pi/180\n self.th4max = pi\n self.th5max = pi / 2\n\n self.__init = True\n\n def __setattr__(self, attr, value):\n if self.__init:\n raise Exception('const value. may not be modified!')\n else:\n super(HardwareLimits, self).__setattr__(attr, value)\n\n\nclass RobotStructure(object):\n \"\"\"structural parameters. maybe get values from stl files for better precision\"\"\"\n __init = False\n\n def __init__(self):\n \"\"\"\n # base height\n self.d0 = 12.5 + 78\n self.d1 = 26\n # shoulder length\n self.d2 = 150\n #self.d2r = 5*m.sqrt(59) #38.4057 # ~ 22 + (32 / 2)\n self.d2z = 145\n self.psir = 1.3118747847887 #m.asin(self.d2z / self.d2) #~1.31\n self.psiz = 0.25892154200621 #m.acos(self.d2z / self.d2) #~0.26\n # elbow length\n self.d3 = 147\n # wrist length\n self.d4 = 70\n self.d5 = 93\n \"\"\"\n\n self.d01 = 104.5 + 8.0\n self.d2 = 150\n self.d2z = 145.146\n self.d2r = 37.851\n self.psiz = 0.255092\n self.psir = 1.3157\n self.d3 = 144.471\n self.d45 = 115.5\n self.dgrip = 45.175\n\n self.closed = GripStructure().closed\n\n self.__init = True\n\n def __setattr__(self, attr, value):\n if self.__init:\n raise Exception('const value. may not be modified!')\n else:\n super(RobotStructure, self).__setattr__(attr, value)\n\n\nclass GripStructure(object):\n \"\"\"structureal parameters of cube and gripper\"\"\"\n __init = False\n\n def __init__(self):\n # self.cubesize = 56.0\n # self.servo_horn_radius = 8.0\n # self.gripper_arm_length = 29.0\n # self.sponge_dist = 3.0\n # self.sponge_squish = 0.0\n self.closed = 54 * pi / 180 # für 19mm greifer-stangen # cossatz( a = self.servo_horn_radius,\n # self.closed = 30 * pi / 180 # std greifer-stangen\n # b = 32,#(self.cubesize/2)+(self.sponge_dist - self.sponge_squish),\n # c = self.gripper_arm_length )\n self.__init = True\n\n def __setattr__(self, attr, value):\n if self.__init:\n raise Exception('const value. may not be modified!')\n else:\n super(GripStructure, self).__setattr__(attr, value)\n","repo_name":"Gorschel/catkin_ws_DualRobotCubeSolver","sub_path":"src/control_dual_robot/scripts/consts.py","file_name":"consts.py","file_ext":"py","file_size_in_byte":4761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"21295120575","text":"import heapq \nimport sys \n\nInf = int(1e9) \ninput = sys.stdin.readline \n\nv, e = map(int, input().split()) \n\ndistances = [Inf] * (v + 1) \n\ngraph = [[] for _ in range(v + 1)]\n\nstart = int(input()) \n\nfor _ in range(e): \n a, b, c = map(int, input().split()) \n graph[a].append((b, c)) \n\n\ndef djikstra(start):\n q = []\n heapq.heappush(q, (0, start))\n distances[start] = 0\n while q:\n dist, now = heapq.heappop(q)\n if distances[now] < dist:\n continue\n\n for i in graph[now]:\n cost = dist + i[1]\n if cost < distances[i[0]]:\n distances[i[0]] = cost\n heapq.heappush(q, (cost, i[0]))\n\n\ndjikstra(start) \n\nfor i in range(1, v + 1): \n if distances[i] == Inf: \n print(\"INF\") \n else: \n print(distances[i]) \n","repo_name":"speciling/ps-study","sub_path":"5주차/임지환/1753_최단경로.py","file_name":"1753_최단경로.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"19017812307","text":"__author__ = 'snouto'\n\n\nfrom system import *\nfrom mongoengine.base import *\n\n\nclass UserSite(Document):\n\n folder = db.StringField(max_length=255,required=True)\n name = db.StringField(max_length=255,required=True)\n url = db.StringField(max_length=600,required=True)\n username=db.StringField(max_length=255,required=True)\n password=db.StringField(max_length=255,required=True)\n note=db.StringField(required=False)\n icon = db.StringField(required=False)\n settings = db.DictField(required=False,default={'favourite':False,'autofill':False,'autologin':False})\n fields = db.ListField(required=False)\n user = db.ReferenceField(User)\n\n\n meta = {\n 'indexes':[{\n 'fields':['name','folder','user']\n }]\n }\n\n\n\n\n\nclass Note(Document):\n\n name = db.StringField(required=True,max_length=255)\n folder = db.StringField(required=True,max_length=255)\n type = db.StringField(required=True,max_length=255)\n note = db.StringField(required=False)\n settings = db.DictField(required=False,default={'favorite':False,'require_reprompt':False})\n meta = {\n\n 'indexes':[\n {\n 'fields':['name','folder','type']\n }\n\n ]\n\n }\n\n node_fields = db.ListField(DictField(),required=False)\n user_id = db.ReferenceField(User)\n\n\n\nclass FormFields(Document):\n profile_name = db.StringField(required=True,max_length=255)\n language = db.StringField(required=True,max_length=255)\n settings = db.DictField(required=False,default={'favorite':False,'require_reprompt':False})\n fields_definitions = db.ListField(DictField(),required=False)\n meta = {\n 'indexes':[\n {\n 'fields':['profile_name','language']\n }\n ]\n }\n user_id = db.ReferenceField(User)\n\n\n\n\n\n\n\n","repo_name":"penshield/justlogmein","sub_path":"justlogmein/db/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"43803058047","text":"# from common import *\nfrom augmentation import *\nimport pandas as pd\nimport numpy as np\nfrom torch.utils.data import DataLoader, Dataset, SequentialSampler, Sampler\nimport torch\nimport pdb\n\nimage_size = 1024\n\ndef make_fold(df, fold=0):\n train_df = df.query(\"fold!=@fold\").reset_index(drop=True)\n valid_df = df.query(\"fold==@fold\").reset_index(drop=True)\n\n # train_df = df[df.patient_id.isin(train_id)].reset_index(drop=True)\n # valid_df = df[df.patient_id.isin(valid_id)].reset_index(drop=True)\n return train_df, valid_df\n\n\nclass build_dataset(Dataset):\n def __init__(self, df, train_val_flag=True, transforms=None):\n\n self.df = df\n self.train_val_flag = train_val_flag #\n self.img_paths = df['img_path'].tolist() \n self.ids = df['img_name'].tolist()\n self.transforms = transforms\n\n if train_val_flag:\n self.label = df['img_label'].tolist()\n \n def __len__(self):\n return len(self.df)\n # return 8\n \n def __getitem__(self, index):\n #### id\n id = self.ids[index]\n #### image\n img_path = self.img_paths[index]\n img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED) # [h, w, c]\n \n if self.train_val_flag: # train\n ### augmentations\n data = self.transforms(image=img)\n img = np.transpose(data['image'], (2, 0, 1)) # [c, h, w]\n gt = self.label[index]\n # pdb.set_trace()\n return torch.tensor(img), torch.tensor(int(gt))\n \n else: # test\n ### augmentations\n data = self.transforms(image=img)\n img = np.transpose(data['image'], (2, 0, 1)) # [c, h, w]\n # pdb.set_trace()\n return torch.tensor(img), id\n\nclass DTTDataset(Dataset):\n def __init__(self, df, augment=None):\n self.df = df\n # self.train_val_flag = train_val_flag #\n self.img_paths = df['img_path'].tolist() \n self.ids = df['img_name'].tolist()\n self.label = df['img_label'].tolist()\n self.augment = augment\n\n # def __str__(self):\n # num_patient = len(set(self.df.patient_id))\n # num_image = len(self.df)\n\n # string = ''\n # string += f'\\tlen = {len(self)}\\n'\n # string += f'\\tnum_patient = {num_patient}\\n'\n # string += f'\\tnum_image = {num_image}\\n'\n\n # count = dict(self.df.cancer.value_counts())\n # for k in [0,1]:\n # string += f'\\t\\tcancer{k} = {count[k]:5d} ({count[k]/len(self.df):0.3f})\\n'\n # return string\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, index):\n\n #### id\n id = self.ids[index]\n #### image\n img_path = self.img_paths[index] \n img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED) # [h, w, c]\n img = cv2.resize(img, (1024,1024), interpolation=cv2.INTER_LINEAR)\n img = img.astype(np.float32)/255\n # pdb.set_trace()\n # if self.train_val_flag: # train\n ### augmentations\n if self.augment != None:\n img = self.augment(img)\n img = np.transpose(img, (2, 0, 1)) # [c, h, w]\n gt = self.label[index]\n # pdb.set_trace()\n \n \n # else: # test\n # ### augmentations\n # data = self.transforms(image=img)\n # img = np.transpose(data['image'], (2, 0, 1)) # [c, h, w]\n # gt = id\n # # pdb.set_trace()\n\n\n d = self.df.iloc[index]\n # # pdb.set_trace()\n # image = read_data(d)\n\n # if self.augment is not None:\n # image = self.augment(image)\n\n r = {}\n r['index'] = index\n r['d'] = d\n # r['patient_id'] = d.patient_id #\n r['image' ] = torch.from_numpy(img).float()\n r['label'] = torch.FloatTensor(gt)\n\n return r\n\ntensor_key = ['image', 'label']\ndef null_collate(batch):\n d = {}\n key = batch[0].keys()\n for k in key:\n v = [b[k] for b in batch]\n if k in tensor_key:\n v = torch.stack(v,0)\n d[k] = v\n # d['image']= d['image'].unsqueeze(1)\n d['label']= d['label'].reshape(-1)\n return d\n\nclass BalanceSampler(Sampler):\n\n def __init__(self, dataset, ratio=8):\n self.r = ratio-1\n self.dataset = dataset\n self.pos_index = np.where(dataset.df.img_label>0)[0]\n self.neg_index = np.where(dataset.df.img_label==0)[0]\n\n self.length = self.r*int(np.floor(len(self.neg_index)/self.r))\n\n def __iter__(self):\n pos_index = self.pos_index.copy()\n neg_index = self.neg_index.copy()\n np.random.shuffle(pos_index)\n np.random.shuffle(neg_index)\n\n neg_index = neg_index[:self.length].reshape(-1,self.r)\n pos_index = np.random.choice(pos_index, self.length//self.r).reshape(-1,1)\n\n index = np.concatenate([pos_index,neg_index],-1).reshape(-1)\n return iter(index)\n\n def __len__(self):\n return self.length\n\n#################################################################################\n\ndef train_augment_v00a(image):\n image = do_random_hflip(image) # hflip, vflip or both\n #image, target = do_random_hflip(image, target)\n\n if np.random.rand() < 0.2:\n for func in np.random.choice([\n lambda image : do_random_affine( image, degree=15, translate=0.1, scale=0.2, shear=10),\n lambda image : do_random_rotate(image, degree=15),\n lambda image : do_random_stretch(image, stretch=(0.2,0.2)),\n ], 1):\n image = func(image)\n\n if np.random.rand() < 0.1:\n image = do_elastic_transform(\n image,\n alpha=image_size,\n sigma=image_size* 0.05,\n alpha_affine=image_size* 0.03\n )\n\n if np.random.rand() < 0.2:\n for func in np.random.choice([\n lambda image: do_random_contrast(image),\n ], 1):\n image = func(image)\n pass\n\n return image\n\ndef train_augment_v00(image):\n image = do_random_hflip(image) # hflip, vflip or both\n #image, target = do_random_hflip(image, target)\n\n if np.random.rand() < 0.7:\n for func in np.random.choice([\n lambda image : do_random_affine( image, degree=30, translate=0.1, scale=0.3, shear=20),\n lambda image : do_random_rotate(image, degree=30),\n lambda image : do_random_stretch(image, stretch=(0.3,0.3)),\n ], 1):\n image = func(image)\n\n if np.random.rand() < 0.25:\n image = do_elastic_transform(\n image,\n alpha=image_size,\n sigma=image_size* 0.05,\n alpha_affine=image_size* 0.03\n )\n if np.random.rand() < 0.25:\n image = do_random_cutout(\n image, num_block=5,\n block_size=[0.1,0.3],\n fill='constant'\n )\n\n if np.random.rand() < 0.5:\n for func in np.random.choice([\n lambda image: do_random_contrast(image),\n lambda image: do_random_noise(image, m=0.1),\n ], 1):\n image = func(image)\n pass\n\n return image\n\n#################################################################################\n\ndef run_check_dataset():\n train_df, valid_df = make_fold()\n dataset = RsnaDataset(train_df, augment=train_augment_v00)\n print(dataset)\n\n for i in range(100):\n i = 0 #240*8+ i#np.random.choice(len(dataset))\n r = dataset[i]\n print(r['index'], 'id = ', r['patient_id'], '-----------')\n for k in tensor_key :\n v = r[k]\n print(k)\n print('\\t', 'dtype:', v.dtype)\n print('\\t', 'shape:', v.shape)\n if len(v)!=0:\n print('\\t', 'min/max:', v.min().item(),'/', v.max().item())\n print('\\t', 'is_contiguous:', v.is_contiguous())\n print('\\t', 'values:')\n print('\\t\\t', v.reshape(-1)[:8].data.numpy().tolist(), '...')\n print('\\t\\t', v.reshape(-1)[-8:].data.numpy().tolist())\n print('')\n if 1:\n image = r['image'].data.cpu().numpy()\n\n # image_show_norm('image', image)\n cv2.waitKey(0)\n\n\n loader = DataLoader(\n dataset,\n #sampler=SequentialSampler(dataset),\n sampler=BalanceSampler(dataset),\n batch_size=8,\n drop_last=True,\n num_workers=0,\n pin_memory=False,\n worker_init_fn=lambda id: np.random.seed(torch.initial_seed() // 2 ** 32 + id),\n collate_fn=null_collate,\n )\n print(loader.batch_size, len(loader), len(dataset))\n print('')\n\n for t, batch in enumerate(loader):\n if t > 5: break\n print('batch ', t, '===================')\n print('index', batch['index'])\n for k in tensor_key:\n v = batch[k]\n print(k)\n print('\\t', 'shape:', v.shape)\n print('\\t', 'dtype:', v.dtype)\n print('\\t', 'is_contiguous:', v.is_contiguous())\n print('\\t', 'value:')\n print('\\t\\t', v.reshape(-1)[:8].data.numpy().tolist())\n if k=='cancer':\n print('\\t\\tsum ', v.sum().item())\n\n print('')\n\ndef run_check_augment():\n\n train_df, valid_df = make_fold()\n dataset = RsnaDataset(train_df)\n print(dataset)\n\n #---------------------------------------------------------------\n def augment(image):\n # image, target = do_random_hflip(image, target)\n #image, target = do_random_flip(image, target)\n\n #image, target = do_random_affine( image, target, degree=10, translate=0.1, scale=0.2, shear=10)\n #image, target = do_random_rotate(image, target, degree=45)\n #image, target = do_random_rotate90(image, target)\n\n #image, target = do_random_perspective(image, target, m=0.3)\n #image, target = do_random_zoom_small(image, target)\n\n #image = do_random_hsv(image, h=20, s=50, v=50)\n # image = do_random_contrast(image)\n # image = do_random_gray(image)\n # image = do_random_guassian_blur(image, k=[3, 5], s=[0.1, 2.0])\n # image = do_random_noise(image, m=0.08)\n return image\n\n for i in range(10):\n #i = 2424 #np.random.choice(len(dataset))#272 #2627\n print(i)\n r = dataset[i]\n\n image = r['image'].data.cpu().numpy()\n # image_show_norm('image',image, min=0, max=1,resize=1)\n #cv2.waitKey(0)\n\n for t in range(100):\n #image1 = augment(image.copy())\n image1 = train_augment_v00(image.copy())\n # image_show_norm('image1', image1, min=0, max=1,resize=1)\n cv2.waitKey(0)\n\n\n# main #################################################################\nif __name__ == '__main__':\n run_check_dataset()\n #run_check_augment()","repo_name":"bee992/Text-Manipulation-Classification","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":10814,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"33991638200","text":"MENU = {\n \"espresso\": {\n \"ingredients\": {\n \"water\": 50,\n \"coffee\": 18,\n },\n \"cost\": 1.5,\n },\n \"latte\": {\n \"ingredients\": {\n \"water\": 200,\n \"milk\": 150,\n \"coffee\": 24,\n },\n \"cost\": 2.5,\n },\n \"cappuccino\": {\n \"ingredients\": {\n \"water\": 250,\n \"milk\": 100,\n \"coffee\": 24,\n },\n \"cost\": 3.0,\n }\n}\n\nresources = {\n \"water\": 300,\n \"milk\": 200,\n \"coffee\": 100,\n}\n\ncoins = {\n \"quarters\": 0.25,\n \"dimes\": 0.10,\n \"nickels\": 0.05,\n \"pennies\": 0.01\n}\n\nprofit = 0\n\ndef print_resources():\n for item in resources:\n unit = \"g\" if item.lower() == \"coffee\" else \"ml\"\n print(f\"{item}: {resources[item]}{unit}\")\n print(f\"Profit: ${'{:.2f}'.format(round(profit))}\")\n\n\ndef get_money(item, item_cost):\n paid = 0\n\n while paid < item_cost:\n if paid > 0:\n rounded_paid = '{:.2f}'.format(round(paid, 2))\n rounded_remainder = '{:.2f}'.format(round(item_cost - paid, 2))\n print(f\"You did not pay enough. You paid ${rounded_paid} but it costs ${'{:.2f}'.format(round(item_cost))}. Please pay ${rounded_remainder} more.\")\n for coin in coins:\n amount_coin = int(input(f\"How many {coin}?: \"))\n paid += amount_coin * coins[coin]\n\n if paid > item_cost:\n change = '{:.2f}'.format(round(paid - item_cost, 2))\n print(f\"Your change is ${change}\")\n return\n elif paid == item_cost:\n print(\"No change here! Thanks for paying.\")\n return\n\n\ndef use_resources(item):\n item_resources = MENU[item][\"ingredients\"]\n\n for resource in item_resources:\n resources[resource] -= item_resources[resource]\n\n\ndef check_can_make(item):\n ingredients_needed = MENU[item][\"ingredients\"]\n\n for ingredient in ingredients_needed:\n if ingredients_needed[ingredient] > resources[ingredient]:\n return False\n return True\n\n\ndef make_coffee(item, cost):\n global profit\n profit += cost\n use_resources(item)\n return\n\ndef handle_initial_ask():\n user_input = input(\"What would you like? (expresso/latte/cappuccino): \").lower()\n\n if user_input == \"report\":\n print_resources()\n return\n elif user_input == \"off\":\n return \"shut off\"\n elif user_input in MENU:\n can_make = check_can_make(user_input)\n if can_make:\n cost = MENU[user_input][\"cost\"]\n print(f\"Great! The cost of a {user_input} is ${'{:.2f}'.format(round(cost))}. Please pay me.\")\n get_money(user_input, cost)\n make_coffee(user_input, cost)\n return user_input\n else:\n print(\"Sorry, we do not have enough ingredients to make this. Please try again later\")\n return\n\n\ndef run():\n machine_on = True\n\n while machine_on:\n item = handle_initial_ask()\n if item == \"shut off\":\n machine_on = False\n\n item and print(f\"{item} ☕️ is ready! Please enjoy\")\n\n print(\"Machine has been shut off for maintenance\")\n\n\nrun()","repo_name":"mkuhn0411/simple-python-coffee-maker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"35283978829","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\nsetuptools.setup(\n name='free_proxy',\n version='1.1.1',\n author=\"jundymek\",\n author_email=\"jundymek@gmail.com\",\n description=\"Proxy scraper for further use\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/jundymek/free-proxy\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n install_requires=['lxml', 'requests']\n)\n","repo_name":"jundymek/free-proxy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":190,"dataset":"github-code","pt":"76"} +{"seq_id":"7829064044","text":"from types import *\nimport logging\nimport urllib\nimport json\n\nfrom edge.opensearch.responsewriter import ResponseWriter\nfrom edge.dateutility import DateUtility\nfrom edge.httputility import HttpUtility\nfrom edge.spatialsearch import SpatialSearch\nimport re\n\nclass GranuleWriter(ResponseWriter):\n def __init__(self, configFilePath, requiredParams = None):\n super(GranuleWriter, self).__init__(configFilePath, requiredParams)\n self.startIndex = 0\n self.entriesPerPage = self._configuration.getint('solr', 'entriesPerPage')\n\n def get(self, requestHandler):\n super(GranuleWriter, self).get(requestHandler)\n #searchParameters = {}\n #logging.debug('uri: '+str(requestHandler.request.headers))\n \n #startIndex = 0\n try:\n self.startIndex = requestHandler.get_argument('startIndex')\n except:\n pass\n\n #entriesPerPage = self._configuration.getint('solr', 'entriesPerPage')\n try:\n self.entriesPerPage = requestHandler.get_argument('itemsPerPage')\n #cap entries per age at 400\n if (int(self.entriesPerPage) > 400):\n self.entriesPerPage = 400\n self.searchParameters['itemsPerPage'] = self.entriesPerPage\n except:\n pass\n\n #pretty = True\n try:\n if requestHandler.get_argument('pretty').lower() == 'false':\n self.pretty = False\n self.searchParameters['pretty'] = 'false'\n except:\n pass\n\n try:\n if requestHandler.get_argument('full').lower() == 'true':\n self.searchParameters['full'] = 'true'\n except:\n pass\n \n try:\n self.searchParameters['format'] = requestHandler.get_argument('format')\n except:\n pass\n\n parameters = ['startTime', 'endTime', 'keyword', 'granuleName', 'datasetId', 'shortName', 'bbox', 'sortBy']\n #variables = {}\n for parameter in parameters:\n try:\n value = requestHandler.get_argument(parameter)\n self.variables[parameter] = value\n self.searchParameters[parameter] = value\n except:\n pass\n\n if 'keyword' in self.variables:\n self.variables['keyword'] = self.variables['keyword'].replace('*', '')\n self.variables['keyword'] = self.variables['keyword'].lower()\n\n #Fetch dataset metadata from Solr\n datasetVariables = {}\n if 'datasetId' in self.variables:\n datasetVariables['datasetId'] = self.variables['datasetId']\n if 'shortName' in self.variables:\n datasetVariables['shortName'] = self.variables['shortName']\n self._getSingleSolrDatasetResponse(datasetVariables, self._onSolrDetermineProcessLevelResponse)\n\n def _getSolrResponse(self, startIndex, entriesPerPage, variables):\n query = self._constructSolrQuery(startIndex, entriesPerPage, variables)\n url = self._configuration.get('solr', 'granuleUrl')\n\n httpUtility = HttpUtility()\n httpUtility.getResponse(url+'/select/?'+query, self._onSolrResponse)\n\n def _constructSolrQuery(self, startIndex, entriesPerPage, variables):\n #set default sort order\n sort='Granule-StartTimeLong+desc'\n filterQuery = None\n queries = []\n for key, value in variables.iteritems():\n #query = ''\n if key == 'startTime':\n startTime = DateUtility.convertISOToUTCTimestamp(value)\n if startTime is not None:\n query = 'Granule-StopTimeLong:'\n query += '['+str(startTime)+'%20TO%20*]'\n queries.append(query)\n elif key == 'endTime':\n stopTime = DateUtility.convertISOToUTCTimestamp(value)\n if stopTime is not None:\n query = 'Granule-StartTimeLong:'\n query += '[*%20TO%20'+str(stopTime)+']'\n queries.append(query)\n elif key == 'keyword':\n newValue = urllib.quote(value)\n\n query = 'SearchableText-LowerCased:('+newValue+')'\n queries.append(query)\n elif key == 'datasetId':\n query = 'Dataset-PersistentId:'+self._urlEncodeSolrQueryValue(value)\n queries.append(query)\n elif key == 'shortName':\n query = 'Dataset-ShortName-Full:'+self._urlEncodeSolrQueryValue(value)\n queries.append(query)\n elif key == 'granuleName':\n query = 'Granule-Name-Full:'+self._urlEncodeSolrQueryValue(value)\n queries.append(query)\n elif key == 'granuleIds':\n granuleIds = []\n for granuleId in value:\n granuleIds.append(str(granuleId))\n query = 'Granule-Id:('+'+OR+'.join(granuleIds)+')'\n queries.append(query)\n\n startIndex = 0\n elif key == 'sortBy':\n sortByMapping = {'timeAsc': 'Granule-StartTimeLong+asc'}\n if value in sortByMapping.keys():\n sort = sortByMapping[value]\n elif key == 'bbox':\n filterQuery = self._constructBoundingBoxQuery(value)\n #if query != '':\n # queries.append('%2B'+query)\n\n if len(queries) == 0:\n queries.append('*')\n\n query = 'q='+'+AND+'.join(queries)+'&fq=Granule-AccessType:(OPEN+OR+PREVIEW+OR+SIMULATED+OR+REMOTE)+AND+Granule-Status:ONLINE&version=2.2&start='+str(startIndex)+'&rows='+str(entriesPerPage)+'&indent=on&wt=json&sort='+sort\n if filterQuery is not None:\n query += '&' + filterQuery\n logging.debug('solr query: '+query)\n \n return query\n \n def _onSolrDetermineProcessLevelResponse(self, response):\n try:\n #Determine dataset processing level\n processingLevel = None\n solrJson = json.loads(response.body)\n if len(solrJson['response']['docs']) >= 1:\n if 'bbox' in self.variables:\n processingLevel = solrJson['response']['docs'][0]['Dataset-ProcessingLevel-Full'][0]\n \n if processingLevel is not None and processingLevel.find('2') != -1:\n if self._configuration.get('service', 'bbox') == 'l2':\n #Call Matt's L2 Search Service\n #raise Exception(self._configuration.get('service', 'l2')+'?'+requestHandler.request.query)\n httpUtility = HttpUtility()\n url = self._configuration.get('service', 'l2') + '?'\n if 'format' not in self.requestHandler.request.arguments:\n url += 'format=atom&'\n url += self.requestHandler.request.query\n logging.debug(\"Calling L2 Service: \" + url)\n result = httpUtility.getResponse(url, self._onL2Response)\n else:\n points = self.variables['bbox'].split(',')\n if len(points) == 4:\n spatialSearch = SpatialSearch(\n self._configuration.get('service', 'database')\n )\n spatialResult = spatialSearch.searchGranules(\n int(self.startIndex),\n int(self.entriesPerPage),\n float(points[0]),\n float(points[1]),\n float(points[2]),\n float(points[3])\n )\n logging.debug(\"Granule spatial search returned\")\n #if len(spatialResult[0]) > 0:\n self.variables['granuleIds'] = spatialResult[0]\n self.variables['granuleIdsFound'] = spatialResult[1]\n \n del self.variables['bbox']\n solrJson = {'responseHeader': {'params': {}}, 'response': {}}\n solrJson['response']['numFound'] = int(self.variables['granuleIdsFound'])\n solrJson['response']['start'] = int(self.startIndex)\n solrJson['responseHeader']['params']['rows'] = int(self.entriesPerPage)\n solrJson['response']['docs'] = []\n for name in self.variables['granuleIds']:\n solrJson['response']['docs'].append({'Granule-Name': [name]})\n solrResponse = json.dumps(solrJson)\n \n searchText = ''\n if 'keyword' in self.variables:\n searchText = self.variables['keyword']\n openSearchResponse = self._generateOpenSearchResponse(\n solrResponse,\n searchText,\n self._configuration.get('service', 'url')+self.requestHandler.request.path,\n self.searchParameters,\n self.pretty\n )\n \n self.requestHandler.set_header(\"Content-Type\", \"application/xml\")\n #requestHandler.set_header(\"Content-Type\", \"application/rss+xml\")\n #requestHandler.write(solrResponse)\n self.requestHandler.write(openSearchResponse)\n self.requestHandler.finish()\n else:\n #Dataset is not an L2 dataset so handle search via Solr\n try:\n self._getSolrResponse(self.startIndex, self.entriesPerPage, self.variables)\n except:\n logging.exception('Failed to get solr response.')\n else:\n #Not a bounding box search so handle search via Solr\n try:\n self._getSolrResponse(self.startIndex, self.entriesPerPage, self.variables)\n except:\n logging.exception('Failed to get solr response.')\n else:\n #Dataset metadata cannot be retreived so return empty search result\n solrJson = {'responseHeader': {'params': {}}, 'response': {}}\n solrJson['response']['numFound'] = 0\n solrJson['response']['start'] = int(self.startIndex)\n solrJson['responseHeader']['params']['rows'] = int(self.entriesPerPage)\n solrJson['response']['docs'] = []\n solrResponse = json.dumps(solrJson)\n \n self._writeResponse(solrResponse)\n except BaseException as exception:\n logging.exception('Failed to determine dataset processing level for bbox search ' + str(exception))\n self._handleException(str(exception))\n\n def _onL2Response(self, response):\n if response.error:\n self._handleException(str(response.error))\n else:\n try:\n logging.debug('header: Content-Type '+response.headers['Content-Type'])\n self.requestHandler.set_header('Content-Type', response.headers['Content-Type'])\n logging.debug('header: Content-Length '+response.headers['Content-Length'])\n self.requestHandler.set_header('Content-Length', response.headers['Content-Length'])\n except:\n pass\n self.requestHandler.write(response.body)\n self.requestHandler.finish()\n \n","repo_name":"dataplumber/edge","sub_path":"src/main/python/libraries/edge/opensearch/granulewriter.py","file_name":"granulewriter.py","file_ext":"py","file_size_in_byte":12082,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"76"} +{"seq_id":"71627642484","text":"from matplotlib import pyplot as plt\n\nimport numpy as np\nimport os\nimport tda\n\n\ndef poisson_pp(intensity=100, width=1, dim_pcd=2):\n num_poisson = np.random.poisson(intensity)\n x = np.random.uniform(0, width, num_poisson)\n y = np.random.uniform(0, width, num_poisson)\n mat = np.c_[x, y]\n if dim_pcd == 3:\n z = np.random.uniform(0, width, num_poisson)\n mat = np.c_[mat, z]\n return mat\n\n\ndef matern_one_pp(pcd, distance=0.1):\n num_points = pcd.shape[0]\n list_thin = []\n for i in range(num_points):\n for j in range(num_points):\n r = np.linalg.norm(pcd[i] - pcd[j])\n if 0 < r < distance:\n list_thin.append(i)\n break\n return np.delete(pcd, list_thin, 0)\n\n\ndef matern_two_pp(pcd, distance=0.1):\n num_points = pcd.shape[0]\n vec_weight = np.random.uniform(0, 1, num_points)\n\n list_thin = []\n for i in range(num_points):\n for j in range(num_points):\n r = np.linalg.norm(pcd[i] - pcd[j])\n t = vec_weight[i] - vec_weight[j]\n if 0 < r < distance and t < 0:\n list_thin.append(i)\n break\n\n return np.delete(pcd, list_thin, 0)\n\n\ndef save_points(list_lat, width, name_save):\n num_lattice = len(list_lat)\n plt.figure()\n for k in range(num_lattice):\n point = list_lat[k]\n plt.plot(point[0], point[1], \"bo\")\n plt.xlim(-0.1 * width, 1.1 * width)\n plt.ylim(-0.1 * width, 1.1 * width)\n plt.savefig(name_save)\n plt.close()\n\n\nNAME_DIR = \"../data\"\nif not os.path.exists(NAME_DIR):\n os.mkdir(NAME_DIR)\nNAME_DIR += \"/matern\"\nif not os.path.exists(NAME_DIR):\n os.mkdir(NAME_DIR)\n\nmain = [True, False][0]\nplot = [True, False][0]\nCONST_PCD = 2\nCONST_LAMBDA = 100\nCONST_WIDTH = 1\nCONST_DISTANCE = 0.05\n\nif main:\n CONST_IID = 100\n\n # make directory to save point sets as txt file\n name_parameter = \"pcd%s_lambda%s_width%s_distance%s_iid%s\" % (\n CONST_PCD, CONST_LAMBDA, CONST_WIDTH, \"%03d\" % (CONST_DISTANCE * 100),\n CONST_IID)\n\n for temp_type in range(3):\n name_dir_data = \"%s/%s_type_%s\" % (NAME_DIR, name_parameter, temp_type)\n if not os.path.exists(name_dir_data):\n os.mkdir(name_dir_data)\n name_dir_pcd = \"%s/pcd_pd\" % name_dir_data\n if not os.path.exists(name_dir_pcd):\n os.mkdir(name_dir_pcd)\n\n # generate point sets\n for temp_iid in range(CONST_IID):\n print(\"%s\" % temp_iid)\n pcd_poisson = poisson_pp(CONST_LAMBDA, CONST_WIDTH)\n pcd_matern_one = matern_one_pp(pcd_poisson, CONST_DISTANCE)\n pcd_matern_two = matern_two_pp(pcd_poisson, CONST_DISTANCE)\n\n np.savetxt(\"%s/%s_type_0/pcd_pd/pcd_%s.txt\" % (\n NAME_DIR, name_parameter, temp_iid), np.asarray(pcd_poisson),\n delimiter='\\t')\n np.savetxt(\"%s/%s_type_1/pcd_pd/pcd_%s.txt\" % (\n NAME_DIR, name_parameter, temp_iid), np.asarray(pcd_matern_one),\n delimiter='\\t')\n np.savetxt(\"%s/%s_type_2/pcd_pd/pcd_%s.txt\" % (\n NAME_DIR, name_parameter, temp_iid), np.asarray(pcd_matern_two),\n delimiter='\\t')\n\nif plot:\n CONST_IID = 10\n\n # make directory to plot point sets as png file\n name_parameter = \"pcd%s_lambda%s_width%s_distance%s_iid%s\" % (\n CONST_PCD, CONST_LAMBDA, CONST_WIDTH, \"%03d\" % (CONST_DISTANCE * 100),\n CONST_IID)\n name_dir_png = \"%s/plot_point_%s\" % (NAME_DIR, name_parameter)\n tda.os_mkdir(name_dir_png)\n\n # plot point sets\n for temp_iid in range(CONST_IID):\n print(\"%s\" % temp_iid)\n pcd_poisson = poisson_pp(CONST_LAMBDA, CONST_WIDTH)\n pcd_matern_one = matern_one_pp(pcd_poisson, CONST_DISTANCE)\n pcd_matern_two = matern_two_pp(pcd_poisson, CONST_DISTANCE)\n\n save_points(pcd_poisson, CONST_WIDTH, \"%s/%s_%s.png\" % (\n name_dir_png, \"type_0\", temp_iid))\n save_points(pcd_matern_one, CONST_WIDTH, \"%s/%s_%s.png\" % (\n name_dir_png, \"type_1\", temp_iid))\n save_points(pcd_matern_two, CONST_WIDTH, \"%s/%s_%s.png\" % (\n name_dir_png, \"type_2\", temp_iid))\n","repo_name":"genki-kusano/python-pwgk","sub_path":"code/make_matern.py","file_name":"make_matern.py","file_ext":"py","file_size_in_byte":4141,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"70498415927","text":"import logging\n\nimport pandas as pd\nimport pymongo\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filemode='w')\n\n\nclass dataset_analyser:\n def __init__(self):\n self.analyze = CountVectorizer(min_df=1, strip_accents='unicode', stop_words='english').build_analyzer()\n # self.word2vec = word2vec_gensim()\n\n def dblp_sentences(self, file):\n # file = '../data/DBLP1.csv'\n df = pd.read_csv(file, encoding='iso-8859-1')\n df = df.fillna('')\n\n df['sent'] = df['authors'] + ' ' + df['title'] + ' ' + df['venue']\n sents = df['sent'].tolist()\n sents = [self.analyze(sent) for sent in sents]\n return sents\n\n def dblp_sent(self, df):\n # file = '../data/DBLP1.csv'\n # df = pd.read_csv(file, encoding='iso-8859-1')\n df = df.fillna('')\n\n df['sent'] = df['authors'] + ' ' + df['title'] + ' ' + df['venue']\n sents = df['sent'].tolist()\n sents = [self.analyze(sent) for sent in sents]\n # df['sent'] = sents\n return sents\n\n def scholar_sentences(self, file):\n # file = '../data/Scholar.csv'\n df = pd.read_csv(file, encoding='iso-8859-1')\n df = df.fillna('')\n\n df['sent'] = df['authors'] + ' ' + df['title'] + ' ' + df['venue']\n sents = df['sent'].tolist()\n sents = [self.analyze(sent) for sent in sents]\n # df['sent'] = sents\n return sents\n\n def scholar_sent(self, df):\n # file = '../data/Scholar.csv'\n # df = pd.read_csv(file, encoding='iso-8859-1')\n df = df.fillna('')\n\n df['sent'] = df['authors'] + ' ' + df['title'] + ' ' + df['venue']\n sents = df['sent'].tolist()\n sents = [self.analyze(sent) for sent in sents]\n # df['sent'] = sents\n return sents\n\n def dblp_traning_sentences(self):\n client = pymongo.MongoClient('localhost', 27017)\n db = client['dblp']\n tbl = db.get_collection('dblp')\n\n data = pd.DataFrame(list(tbl.find()))\n data.pop('_id')\n data = data.fillna('')\n\n data['sent'] = data['author'] + ' ' + data['title'] + ' ' + data['journal']\n\n sents = data['sent'].tolist()\n sents = [self.analyze(sent) for sent in sents]\n print(sents[0])\n return sents\n\n\nif __name__ == \"__main__\":\n file = '../data/DBLP1.csv'\n\n u = dataset_analyser()\n\n # print(len(scholar_recs2sent(file)))\n # print(len(dblp_traning_rec2sent()))\n\n # print(u.().nonzero())\n\n pass\n","repo_name":"fanf6g/erqg","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72181315446","text":"from imdb import *\nimport random\nimport argparse\n\ndef parse_args():\n\tap = argparse.ArgumentParser()\n\tap.add_argument('-trm', '--top-rated-movies', action='store_const', const=Scraper.TOP_RATED_MOVIES_URL, dest='choice', help='Escolha esse argumento caso você prefira ser indicado dos filmes mais bem votados')\n\tap.add_argument('-trs', '--top-rated-shows', action='store_const', const=Scraper.TOP_RATED_SHOWS_URL, dest='choice', help='Escolha esse argumento caso você prefira ser indicado das séries mais bem votados')\n\tap.add_argument('-pm', '--popular-movies', action='store_const', const=Scraper.MOST_POPULAR_MOVIES_URL, dest='choice', help='Escolha esse argumento caso você prefira ser indicado dos filmes mais populares')\n\tap.add_argument('-ps', '--popular-shows', action='store_const', const=Scraper.MOST_POPULAR_SHOWS_URL, dest='choice', help='Escolha esse argumento caso você prefira ser indicado das séries mais populares')\n\treturn vars(ap.parse_args())\n\ndef main(arg):\n\tsome = Scraper.get(IMDBBase, arg)\n\trand = random.choice(some)\n\tprint(rand.details())\n\nif __name__ == '__main__':\n\ttry:\n\t\targ = parse_args()['choice']\n\t\tmain(arg)\n\texcept Exception as ex:\n\t\tprint(ex)","repo_name":"glbessa/IMDbWebScraping","sub_path":"aleatorio_imdb.py","file_name":"aleatorio_imdb.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"27543751242","text":"class Node:\n def __init__(self, node_id, node_name, node_type):\n self.node_id = node_id\n self.name = node_name\n self.type = node_type\n self.attributes = {}\n self.children = []\n\n def __eq__(self, other):\n \"\"\"Override the default Equals behavior\"\"\"\n if isinstance(other, self.__class__):\n return self.node_id == other.node_id\n return False\n","repo_name":"jasmina94/ExPreSsiVeNess","sub_path":"django_project/core_app/core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"26181659050","text":"\n\n# 3rd party imports\nimport guitarpro\n\nimport parser\n\n\n# updated the gp5_song....\ngp5_file_1 = \"./gp5files/test_scores/test_write_gp5_1.gp5\"\ngp5song_1 = guitarpro.parse(gp5_file_1)\n\ngp5_file_2 = \"./gp5files/test_scores/test_write_gp5_2.gp5\"\ngp5song_2 = guitarpro.parse(gp5_file_2)\napi_song_2 = parser.API.get_functions.get_song_data(gp5song_2)\n\nparser.API.write_functions.api_to_gp5(api_song_2[0], gp5song_1)\n\nguitarpro.write(gp5song_1, \"./gp5files/test_scores/test_write_gp5_1_revised.gp5\")\n\n\ngp5_file = \"./gp5files/test_scores/tied_hammer-ons.gp5\"\ngp5song = guitarpro.parse(gp5_file)\napi_song_tied_hammers = parser.API.get_functions.get_song_data(gp5song)\n\n\ngp5_file = \"./gp5files/test_scores/tied_hammer-ons_no_hammer.gp5\"\ngp5song_no_hammer = guitarpro.parse(gp5_file)\napi_no_hammers = parser.API.get_functions.get_song_data(gp5song_no_hammer)\n\nparser.API.write_functions.api_to_gp5(api_song_tied_hammers[0], gp5song_no_hammer)\nguitarpro.write(gp5song, \"./gp5files/test_scores/tied_hammers_revised.gp5\")\n\ngp5_file_2 = \"./gp5files/test_scores/test_write_gp5_2.gp5\"\ngp5song_2 = guitarpro.parse(gp5_file_2)\napi_song_json = parser.API.get_functions.get_song_data(gp5song_2)\n\nparser.API.write_functions.api_to_json(api_song_json)\n","repo_name":"callumgoddard/Adorn-o","sub_path":"Adorn_o/test_write_functions.py","file_name":"test_write_functions.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"20160973540","text":"import os\nimport sys\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\n\nsys.path.insert(0, \"../common\")\nfrom utils import show_boxplots\n\nif len(sys.argv) < 2:\n print(\"Need to specify the directory containing the input data. Exiting.\")\n sys.exit(1)\ninput_dir = sys.argv[1] # e.g., ../../../data/\nif len(sys.argv) < 3:\n print(\"Need to specify the directory containing the output data. Exiting.\")\n sys.exit(1)\nbase_output_dir = os.path.join(sys.argv[2], \"rq1\") # e.g., ../../../results/\n\ninput_filepath = os.path.join(input_dir, \"rq1_input.csv\")\ntop_25_filepath = os.path.join(input_dir, \"top25cwe.csv\")\n\ndf_vccs = pd.read_csv(input_filepath, delimiter=\",\")\ntop_25 = pd.read_csv(top_25_filepath)[[\"cwe\", \"position\"]]\n\ndef run_analysis(output_dir, df_vccs, top_25):\n output_filepath = os.path.join(output_dir, \"output.txt\")\n\n vccs_per_cve_overall_summary_filepath = os.path.join(output_dir, \"overall/vccs_per_cve.csv\")\n vccs_ratio_per_project_overall_summary_filepath = os.path.join(output_dir, \"overall/vccs_ratio_per_project.csv\")\n insertion_window_per_cve_overall_summary_filepath = os.path.join(output_dir, \"overall/insertion_window_per_cve.csv\")\n vccs_per_cve_cwe_summary_filepath = os.path.join(output_dir, \"cwes/vccs_per_cve.csv\")\n vccs_per_cve_cwe_boxplots_filepath = os.path.join(output_dir, \"cwes/boxplots/vccs_per_cve_cwe_{}.pdf\")\n\n files_per_vcc_overall_summary_filepath = os.path.join(output_dir, \"overall/files_per_vcc.csv\")\n added_lines_per_vcc_overall_summary_filepath = os.path.join(output_dir, \"overall/added_lines_per_vcc.csv\")\n removed_lines_per_vcc_overall_summary_filepath = os.path.join(output_dir, \"overall/removed_lines_per_vcc.csv\")\n files_per_vcc_cwe_summary_filepath = os.path.join(output_dir, \"cwes/files_per_vcc.csv\")\n files_per_vcc_cwe_boxplots_filepath = os.path.join(output_dir, \"cwes/boxplots/files_per_vcc_cwe_{}.pdf\")\n files_previous_changes_first_vcc_summary_filepath = os.path.join(output_dir, \"overall/files_previous_changes_first_vcc.csv\")\n files_previous_changes_last_vcc_summary_filepath = os.path.join(output_dir, \"overall/files_previous_changes_last_vcc.csv\")\n\n df_vccs_top_25 = df_vccs[df_vccs[\"cwe\"].isin(top_25[\"cwe\"])]\n cve_groups = df_vccs.groupby(\"cve\", sort=False)\n project_groups = df_vccs.groupby(\"repo\", sort=False)\n vcc_groups = df_vccs.groupby([\"repo\", \"hash\"], sort=False)\n\n # General Profiling\n num_vccs = df_vccs.groupby([\"repo\", \"hash\"]).size().count()\n num_repos = len(project_groups)\n num_cwes = df_vccs[\"cwe\"].nunique()\n num_files = df_vccs.groupby([\"repo\", \"file\"]).size().count()\n\n # Part 1: VCCs per CVE\n\n ## (Overall) Summary\n num_cves = len(cve_groups)\n vccs_per_cve = cve_groups[\"hash\"].nunique()\n vccs_per_cve.describe().to_csv(vccs_per_cve_overall_summary_filepath)\n\n ## (Overall) VCCs Ratio per Project\n project_num_vccs = project_groups.size().rename(\"vccs\").to_frame().reset_index()\n project_num_commits = project_groups.first()[\"repo_commits\"].to_frame().reset_index()\n projects = project_num_vccs.merge(project_num_commits, on=\"repo\")\n projects[\"vccs_ratio\"] = projects[\"vccs\"] / projects[\"repo_commits\"]\n projects.describe().to_csv(vccs_ratio_per_project_overall_summary_filepath)\n\n ## (Overall) CVEs with > 1 VCCs\n cve_groups_many_vccs = cve_groups.filter(lambda g: g[\"hash\"].nunique() > 1).groupby(\"cve\", sort=False)\n cve_with_many_vccs_count = len(cve_groups_many_vccs)\n cve_with_many_vccs_perc = cve_with_many_vccs_count / num_cves * 100\n\n ## (Overall) Insertion window analysis\n first_vccs = cve_groups_many_vccs[\"date\"].min().to_frame().reset_index().rename(columns={\"date\": \"first_date\"})\n last_vccs = cve_groups_many_vccs[\"date\"].max().to_frame().reset_index().rename(columns={\"date\": \"last_date\"})\n cves_windows = first_vccs.merge(last_vccs, on=\"cve\")\n cves_windows[[\"first_date\", \"last_date\"]] = cves_windows[[\"first_date\", \"last_date\"]].apply(pd.to_datetime)\n cves_windows[\"days_distance\"] = (cves_windows[\"last_date\"] - cves_windows[\"first_date\"]).dt.days\n window_duration_mean = cves_windows[\"days_distance\"].mean()\n cves_windows[\"days_distance\"].describe().to_csv(insertion_window_per_cve_overall_summary_filepath)\n\n ## (Overall) CVE w/ largest number of VCCs\n vccs_per_cve_idxmax = vccs_per_cve.idxmax()\n vccs_per_cve_max = vccs_per_cve.max()\n cve_max_vccs = cve_groups.get_group(vccs_per_cve_idxmax)\n cve_max_vccs_files_count = cve_max_vccs[\"file\"].nunique()\n cve_max_vccs_added_lines = cve_max_vccs[\"added_lines\"].sum()\n cve_max_vccs_removed_lines = cve_max_vccs[\"removed_lines\"].sum()\n cve_max_vccs_window_duration = cves_windows.loc[cves_windows[\"cve\"] == vccs_per_cve_idxmax][\"days_distance\"].values[0]\n\n ## (Overall) CVEs w/ shortest and largest insertion window\n cves_windows_max = cves_windows.loc[cves_windows[\"days_distance\"].idxmax()]\n cves_windows_max_cve = cves_windows_max[\"cve\"]\n cves_windows_max_duration = cves_windows_max[\"days_distance\"]\n cves_windows_min = cves_windows.loc[cves_windows[\"days_distance\"].idxmin()]\n cves_windows_min_cve = cves_windows_min[\"cve\"]\n cves_windows_min_duration = cves_windows_min[\"days_distance\"]\n\n ## (Per CWEs) Summary and Boxplots\n\n ### Makes the \"double\" groupby and then orders according to the Top 25 order\n vccs_per_cve_cwe = df_vccs_top_25 \\\n .groupby(\"cwe\", sort=False)[[\"hash\", \"cve\"]] \\\n .apply(lambda x: x.groupby(\"cve\", sort=False).nunique()) \\\n .reset_index() \\\n .merge(top_25, how=\"left\", on=\"cwe\") \\\n .sort_values(\"position\", ignore_index=True) \\\n .drop(columns=[\"cve\", \"position\"])\n vccs_per_cve_cwe.groupby(\"cwe\", sort=False)[\"hash\"].describe().to_csv(vccs_per_cve_cwe_summary_filepath)\n show_boxplots(vccs_per_cve_cwe, \"cwe\", \"hash\", \"VCCs per CVE\", vccs_per_cve_cwe_boxplots_filepath, log=True)\n\n # Part 2: Touched Files per VCC\n\n ## (Overall) Summary\n files_per_vcc = vcc_groups[\"file\"].nunique()\n files_per_vcc.describe().to_csv(files_per_vcc_overall_summary_filepath)\n\n ## (Overall) Code Churn\n added_lines_per_vcc = vcc_groups[\"added_lines\"].sum()\n added_lines_per_vcc.describe().to_csv(added_lines_per_vcc_overall_summary_filepath)\n removed_lines_per_vcc = vcc_groups[\"removed_lines\"].sum()\n removed_lines_per_vcc.describe().to_csv(removed_lines_per_vcc_overall_summary_filepath)\n\n ## (Overall) VCCs touching > 10 files\n vccs_more_ten_files_count = files_per_vcc[files_per_vcc > 10].count()\n vccs_more_ten_files_perc = vccs_more_ten_files_count / num_vccs * 100\n\n ## (Overall) VCC w/ largest number of Files\n files_per_vcc_idxmax = files_per_vcc.idxmax()\n files_per_vcc_idxmax_repo = files_per_vcc_idxmax[0]\n files_per_vcc_idxmax_hash = files_per_vcc_idxmax[1]\n files_per_vcc_max = files_per_vcc.max()\n vcc_max_files = vcc_groups.get_group(files_per_vcc_idxmax)\n vcc_max_files_count = vcc_max_files[\"file\"].nunique()\n vcc_max_cve_counts = vcc_max_files[\"cve\"].value_counts()\n vcc_max_files_added_lines = vcc_max_files[\"added_lines\"].iloc[0]\n vcc_max_files_removed_lines = vcc_max_files[\"removed_lines\"].iloc[0]\n\n ## (Per CWEs) Summary and Boxplots\n files_per_vcc_cwe = df_vccs_top_25 \\\n .groupby(\"cwe\", sort=False)[[\"repo\", \"hash\", \"file\"]] \\\n .apply(lambda x: x.groupby([\"repo\", \"hash\"], sort=False).nunique()) \\\n .reset_index() \\\n .merge(top_25, how=\"left\", on=\"cwe\") \\\n .sort_values(\"position\", ignore_index=True) \\\n .drop(columns=[\"repo\", \"hash\", \"position\"])\n files_per_vcc_cwe.groupby(\"cwe\", sort=False)[\"file\"].describe().to_csv(files_per_vcc_cwe_summary_filepath)\n show_boxplots(files_per_vcc_cwe, \"cwe\", \"file\", \"Touched Files per VCC\", files_per_vcc_cwe_boxplots_filepath, log=True)\n\n # Part 3: Number of Previous Changes for files in First/Last VCC\n\n ## (Overall) Summary\n\n df_vccs_first = df_vccs[cve_groups[\"date\"].transform(min) == df_vccs[\"date\"]]\n previous_changes_vccs_first = df_vccs_first[\"previous_changes\"]\n df_vccs_last = df_vccs[cve_groups[\"date\"].transform(max) == df_vccs[\"date\"]]\n previous_changes_vccs_last = df_vccs_last[\"previous_changes\"]\n previous_changes_vccs_first.describe().to_csv(files_previous_changes_first_vcc_summary_filepath)\n previous_changes_vccs_last.describe().to_csv(files_previous_changes_last_vcc_summary_filepath)\n\n # Part 4: Files that were created in a VCC (i.e., previous_changes == 0) + Files that were created in a VCC that is the sole the CVE\n files_created_vcc = df_vccs.loc[df_vccs[\"previous_changes\"] == 0, [\"repo\", \"file\"]].drop_duplicates(ignore_index=True)\n files_cves_one_vcc = cve_groups.filter(lambda x: len(x) == 1)[[\"repo\", \"file\"]].drop_duplicates(ignore_index=True)\n files_cves_one_vcc_created = pd.merge(files_created_vcc, files_cves_one_vcc, on=[\"repo\", \"file\"])\n files_created_vcc_count = len(files_created_vcc)\n files_cves_one_vcc_created_count = len(files_cves_one_vcc_created)\n\n # Part 5: Files that were changed > 500 times before VCC\n files_first_vcc = df_vccs[df_vccs.groupby([\"repo\", \"file\"], sort=False)[\"previous_changes\"].transform(min) == df_vccs[\"previous_changes\"]]\n files_over_500 = files_first_vcc.loc[files_first_vcc[\"previous_changes\"] > 500, [\"repo\", \"file\", \"previous_changes\"]].sort_values(by=\"previous_changes\", ascending=False, ignore_index=True)\n files_over_500_count = len(files_over_500)\n\n with open(output_filepath, \"w\") as output_file:\n print(f\"RQ1 Context:\", file=output_file)\n print(f\"* CVEs: {num_cves}\", file=output_file)\n print(f\"* VCCs: {num_vccs}\", file=output_file)\n print(f\"* Repos: {num_repos}\", file=output_file)\n print(f\"* CWEs: {num_cwes}\", file=output_file)\n print(f\"* Files (distinct): {num_files}\", file=output_file)\n print(file=output_file)\n print(f\"RQ1 Main Results:\", file=output_file)\n print(f\"* CVEs with > 1 VCC: {cve_with_many_vccs_count}/{num_cves} ({round(cve_with_many_vccs_perc, 2)}%)\", file=output_file)\n print(f\"* Mean Nr. Days between first and last VCCs: {round(window_duration_mean, 3)} ({round(window_duration_mean/365, 3)} years)\", file=output_file)\n print(f\"* {vccs_per_cve_idxmax} has the largest number of VCCs: {vccs_per_cve_max}. Considering all VCCs:\", file=output_file)\n print(f\" - Touched a total of {cve_max_vccs_files_count} valid files\", file=output_file)\n print(f\" - Added a total of {cve_max_vccs_added_lines} lines\", file=output_file)\n print(f\" - Removed a total of {cve_max_vccs_removed_lines} lines\", file=output_file)\n print(f\" - Taking {cve_max_vccs_window_duration} days ({round(cve_max_vccs_window_duration/365, 3)} years) between first and last VCCs\", file=output_file)\n print(f\"* {cves_windows_max_cve} has the largest insertion window: {cves_windows_max_duration} days ({round(cves_windows_max_duration/365, 3)} years)\", file=output_file)\n print(f\"* {cves_windows_min_cve} has the shortest insertion window: {cves_windows_min_duration} days ({round(cves_windows_min_duration/365, 3)} years)\", file=output_file)\n print(file=output_file)\n print(f\"* VCCs that touched > 10 files: {vccs_more_ten_files_count}/{num_vccs} ({round(vccs_more_ten_files_perc, 2)}%)\", file=output_file)\n print(f\"* VCC {files_per_vcc_idxmax_hash} of {files_per_vcc_idxmax_repo} touched the largest number of valid files: {files_per_vcc_max}. Specifically:\", file=output_file)\n print(f\" - Contributed to: {vcc_max_cve_counts.index.values}\", file=output_file) \n print(f\" - Touched {vcc_max_files_count} valid files\", file=output_file)\n print(f\" - Added {vcc_max_files_added_lines} lines\", file=output_file)\n print(f\" - Removed {vcc_max_files_removed_lines} lines\", file=output_file)\n print(f\"* Files created within a VCC: {files_created_vcc_count}/{num_files} ({round(files_created_vcc_count / num_files * 100, 2)}%)\", file=output_file)\n print(f\" - Files created in \\\"single-VCC\\\" vulnerabilities: {files_cves_one_vcc_created_count}/{files_created_vcc_count} ({round(files_cves_one_vcc_created_count / files_created_vcc_count * 100, 2)}%)\", file=output_file)\n print(f\"* Files that started becoming vulnerable after >= 500 commits: {files_over_500_count}/{num_files} ({round(files_over_500_count / num_files * 100, 2)}%)\", file=output_file)\n\nrun_analysis(os.path.join(base_output_dir, \"main\"), df_vccs, top_25)\n\n# Drop outliers: VCCS whose CVE has lots of VCCS\ncve_groups = df_vccs.groupby(\"cve\", sort=False)\nvccs_per_cve = cve_groups[\"hash\"].nunique()\nlarge_cves = vccs_per_cve.loc[np.abs(stats.zscore(vccs_per_cve)) > 3].index.tolist()\ndf_vccs_no_large_cves = df_vccs.loc[~df_vccs[\"cve\"].isin(large_cves)]\nrun_analysis(os.path.join(base_output_dir, \"no_large_cves\"), df_vccs_no_large_cves, top_25)\n\n# Drop outliers: VCCS having touched lots of files\nvcc_groups = df_vccs.groupby([\"repo\", \"hash\"], sort=False)\nfiles_per_vcc = vcc_groups[\"file\"].nunique()\nlarge_vccs = files_per_vcc.loc[np.abs(stats.zscore(files_per_vcc)) > 3].index.tolist()\nlarge_vccs = [lv[0] + \"/commit/\" + lv[1] for lv in large_vccs]\ntmp_df_vccs = df_vccs.copy()\ntmp_df_vccs[\"vcc\"] = df_vccs[\"repo\"] + \"/commit/\" + df_vccs[\"hash\"]\ndf_vccs_no_large_vccs = tmp_df_vccs.loc[~tmp_df_vccs[\"vcc\"].isin(large_vccs)]\nrun_analysis(os.path.join(base_output_dir, \"no_large_vccs\"), df_vccs_no_large_vccs, top_25)\n","repo_name":"sesalab/OnlineAppendices","sub_path":"TSE21-VulnerabilityLifecycle/scripts/secretlife/data_analysis/rq1.py","file_name":"rq1.py","file_ext":"py","file_size_in_byte":13496,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"482926827","text":"import numpy as np\nfrom corner import corner\nfrom chi2 import likelihood_func_LambdaCDM, likelihood_func_wCDM\n\nLambdaCDM_Omega_M = np.linspace(0.0, 0.5, 30)\nLambdaCDM_Omega_Lambda = np.linspace(0.4, 1.1, 30)\nwCDM_Omega_M = np.linspace(0.0, 0.5, 30)\nwCDM_w = np.linspace(-1.75, -0.5, 30)\n\n\ndef draw(par1, par2, lf, figurename, range, labels):\n pos = []\n likelihoods = []\n for om in par1:\n for ol in par2:\n pos.append([om, ol])\n likelihoods.append(lf([om, ol]))\n\n pos = np.array(pos)\n likelihoods = np.exp(np.array(likelihoods) - max(likelihoods))\n\n figure = corner(pos,bins=30, range=range, weights=likelihoods,\n show_titles=True, levels=(0.68, 0.95, 0.99),\n labels=labels )\n figure.savefig(figurename)\n\ndraw(LambdaCDM_Omega_M, LambdaCDM_Omega_Lambda,\n likelihood_func_LambdaCDM, \"2par_l.pdf\", range=[[0.0,0.5],[0.4,1.1]],\n labels=[r\"$\\Omega_M$\", r\"$\\Omega_\\Lambda$\"])\ndraw(wCDM_Omega_M, wCDM_w, likelihood_func_wCDM,\n \"2par_w.pdf\", range=[[0.0, 0.5],[-1.75, -0.5]],\n labels=[r\"$\\Omega_M$\",r\"$w$\"])","repo_name":"HoU-Wa/phy526proj","sub_path":"two_pars.py","file_name":"two_pars.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7283349119","text":"import time\nimport os\nimport json\n\nimport cloudconvert\n\nCREDENTIAL_FILE = '../credentials/cloud_convert.json'\n\nclass CloudConvert():\n def __init__(self):\n self._service = cloudconvert.Api(api_key=self.get_credentials())\n\n def get_credentials(self):\n \"\"\"Read API key from file and return it\"\"\"\n with open(CREDENTIAL_FILE) as credentials:\n data = json.load(credentials)\n api_key = data['API_KEY']\n\n return api_key\n\n def convert_url_to_pdf(self, url):\n process = self._service.convert({\n 'inputformat': 'website',\n 'outputformat': 'pdf',\n 'input': 'url',\n 'file': url\n })\n process.wait()\n\n file_name = '../data/cc_download_{0}.pdf'.format(time.time())\n process.download(localfile=file_name)\n\n return file_name\n\n def delete_file(self, file):\n os.remove(file)\n\ndef main():\n cc_instance = CloudConvert()\n cc_instance.convert_url_to_pdf('https://www.howtographql.com/basics/0-introduction/')\n\nif __name__ == '__main__':\n main()","repo_name":"zpritchett/zapiest","sub_path":"api/CloudConvert.py","file_name":"CloudConvert.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20010306643","text":"from typing import List, Iterator\n\n\nclass HonkLanguage:\n pass # Used to specify different Honk keys\n\n\nHONKMAP = {'h': 0, 'H': 1, 'o': 0, 'O': 1, 'n': 0, 'N': 1, 'k': 0, 'K': 1}\nalphabet = \"abcdefghijklmnopqrstuvwxyz\"\nalphakey = {}\nfor i in range(26):\n alphakey[i + 1] = alphabet[i] # Map Decimals to letters\n\nrev_alphakey = {}\nfor key in alphakey: # Map letters to numbers\n rev_alphakey[alphakey[key]] = key\n\nneutral_honk = \"honk\"\nEND_OF_WORD = \"knoh\"\n\n\n### Convert Honks to a message\n\ndef _honk_to_decimal(honks: str) -> int:\n \"\"\"\n Precondition: correct Honks\n :param honks: a string denoting honks\n :return: A decimal value\n \"\"\"\n sum = 0\n for i in range(1, len(honks) + 1):\n char = honks[-i]\n power = 2 ** (i - 1)\n sum += HONKMAP[char] * power\n return sum\n\n\ndef _honk_to_letter(honks: str) -> str:\n \"\"\"\n Takes a honk string and converts it to the corresponding letter\n :param honks:\n :return:\n \"\"\"\n return alphakey[_honk_to_decimal(honks)]\n\n\ndef _honks_to_word(honks: str) -> str:\n \"\"\"\n Takes a set of honks representing a word, and converts it to a word.\n :param honks:\n :return:\n \"\"\"\n honk_list = list(map(str.strip, honks.split(\" \")))\n #print(honk_list + [\" yo\"])\n word = \"\"\n for honk in honk_list:\n if len(honk) in (4,8):\n word = word + _honk_to_letter(honk)\n return word\n\n\ndef honks_to_words(honks: str) -> str:\n \"\"\"\n Takes a set of honks representing sentences, and converts it to sentences.\n :param honks:\n :return:\n \"\"\"\n\n honk_words = _splice_honks(honks)\n #print(honk_words)\n sentence = \"\"\n for enc_word in honk_words:\n sentence = sentence + _honks_to_word(enc_word) + \" \"\n #print(sentence)\n return sentence\n\n\ndef _splice_honks(honks: str) -> List[str]:\n \"\"\"\n Splice honks string based on encryption key.\n :param honks:\n :return: An iterator, iterates through each word\n \"\"\"\n words = honks.split(END_OF_WORD)\n return list(map(str.strip, words))\n\n\n### Convert your message to Honks\n\ndef _letter_to_binary(letter: str) -> str:\n \"\"\"\n Takes a letter and convert it to its mapped binary value\n :param letter: a character\n :return: a binary string representing the letter\n\n\n\n \"\"\"\n number = rev_alphakey[letter]\n binary_str = str(bin(number))[2:]\n return binary_str\n\n\nbin_to_honk_map = {1: {'0': 'k', '1': 'K'},\n 3: {'0': 'o', '1': 'O'},\n 2: {'0': 'n', '1': 'N'},\n 4: {'0': 'h', '1': 'H'}}\n\n\n##\ndef _bin_to_honk(bin_inc_honk: str) -> str:\n \"\"\"\n :param inc_honk: binary string of length 4 or less\n :return:\n\n >>> _bin_to_honk(\"1000\")\n 'Honk'\n >>> _bin_to_honk(\"0001\")\n 'honK'\n\n \"\"\"\n acc = \"\"\n for i in range(1, len(bin_inc_honk) + 1):\n acc = bin_to_honk_map[i][bin_inc_honk[-i]] + acc\n return acc\n\n\ndef _binary_to_honk(bin_incomplete_honk: str) -> str:\n \"\"\"\n\n :param bin_incomplete_honk: binary string of up to length 8\n :return: Corresponding honk letters.\n\n >>>\n\n \"\"\"\n if len(bin_incomplete_honk) == 0:\n return \"\"\n honk_piece1 = bin_incomplete_honk[:-4]\n honk_piece2 = bin_incomplete_honk[-4:]\n #print(honk_piece1)\n #print(honk_piece2)\n return _bin_to_honk(honk_piece1) + _bin_to_honk(honk_piece2)\n\n\ndef _bin_to_honk_w_fill(bin_incomplete_honk: str) -> str:\n \"\"\"\n\n :param bin_incomplete_honk: a binary string representing a letter.\n :return: the correct corresponding honks\n \"\"\"\n honk_piece = \"\"\n if len(bin_incomplete_honk) < 4: # 0 to 3\n need = 4 - len(bin_incomplete_honk)\n honk_piece = neutral_honk[:need]\n #print(honk_piece)\n elif 4 < len(bin_incomplete_honk) < 8:\n need = 8 - len(bin_incomplete_honk)\n honk_piece = neutral_honk[:need]\n #print(honk_piece)\n rest_of_honk = _binary_to_honk(bin_incomplete_honk)\n #print(rest_of_honk)\n return honk_piece + _binary_to_honk(bin_incomplete_honk)\n\n\ndef letter_to_honk(letter: str) -> str:\n return _bin_to_honk_w_fill(_letter_to_binary(letter))\n\n\ndef word_to_honk(word: str) -> str:\n acc = \"\"\n for char in word:\n acc += letter_to_honk(char) + \" \"\n acc += END_OF_WORD\n return acc\n\n\ndef words_to_honk(words: str) -> str:\n \"\"\"\n\n :param words: words, lowercase only and no punctuation\n :return:\n \"\"\"\n words = list(map(str.strip, words.split(\" \")))\n honk_str = \"\"\n for word in words:\n honk_str += word_to_honk(word) + \" \"\n return honk_str\n\n\n##\n\n\nif __name__ == \"__main__\":\n #print(_honk_to_decimal(\"HONKHONK\"))\n #print(_honk_to_letter(\"honK\") +\n # \" \" + _honk_to_letter(\"hoNK\") + _honk_to_letter(\"honkHONK\")\n # + _honk_to_letter(\"honKhONK\"))\n # print(letter_to_honk(\"z\"))\n #print(letter_to_honk(\"a\"))\n #print(letter_to_honk(\"z\"))\n\n print(words_to_honk(\"cow piss\"))\n print(honks_to_words(words_to_honk(\"cow piss\")))\n print(honks_to_words(words_to_honk(\"how is life my friends\")))\n print(words_to_honk(\"am fine the c videos for two oh nine are super boring though\"))\n","repo_name":"wuemily2/goozebot","sub_path":"honk_encryption.py","file_name":"honk_encryption.py","file_ext":"py","file_size_in_byte":5125,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"39738503021","text":"line1 = [\" \",\" \",\" \"]\r\nline2 = [\" \",\" \",\" \"]\r\nline3 = [\" \",\" \",\" \"]\r\nmap = [line1,line2,line3]\r\n\r\nprint(f\"{line1}\\n{line2}\\n{line3}\")\r\nprint(\"Hiding your treasure! X marks the spot.\")\r\n\r\nposition = input(\"Where do you want to hide the treasure? \")\r\n\r\n#Should use abc.index() to get number from letter. \r\nx = int(position[0])\r\ny = int(position[1])\r\n\r\nmap[x][y] = \"X\"\r\n\r\nprint(f\"{line1}\\n{line2}\\n{line3}\")","repo_name":"Tamari21/100DaysOfCode","sub_path":"Day 4/ex3.py","file_name":"ex3.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"12514546319","text":"from pytest_bdd import (\n scenarios,\n given,\n then,\n when,\n parsers,\n)\nimport fixtures\n\nscenarios('.')\n\n\ndef userify(txt):\n return txt\n\n\n@given('I have ABN credentials at the DCL web interface')\ndef web_ui():\n return {\n 'url': fixtures.DCL_WRITE_DOMAIN,\n 'user': fixtures.DCL_TEST_USER,\n 'password': fixtures.DCL_TEST_SECRET\n }\n\n\n@when('I authenticate')\ndef authenticate(browser, web_ui):\n \"\"\"I authenticate.\"\"\"\n browser.visit('{}/openid/openid/SimGuard/'.format(web_ui['url']))\n if 'idp.testpoint.io' in browser.url:\n # we have login page\n browser.fill('login', web_ui['user'])\n browser.fill('password', web_ui['password'])\n # warning: assuming user already have logged in single time with this ABN,\n # so Oauth access confirmation doesn't appear\n button = browser.find_by_xpath('//button[@type=\"submit\"]').first\n button.click()\n else:\n # login worked magically (idp remembers us from previous login)\n pass\n\n\n@then(parsers.parse(\"I submit the form\"))\ndef submit_the_form(page_text, browser):\n button = browser.find_by_xpath('//button[@type=\"submit\"]').first\n button.click()\n\n\n@then(parsers.parse(\"I see '{page_text:.}'\"))\n@then(parsers.parse('I see \"{page_text:.}\"'))\ndef see_some_text(page_text, browser):\n page_text = userify(page_text)\n assert browser.is_text_present(page_text), u'Text {} not present on the page {}'.format(\n page_text,\n browser.url\n )\n\n\n@then(parsers.parse(\"I click '{button_name}'\"))\n@then(parsers.parse('I click \"{button_name}\"'))\ndef click_some_button(button_name, browser):\n button_name = userify(button_name)\n button = browser.find_by_xpath(\n \"//*[contains(text(), '{}')]\".format(button_name)\n ).first\n button.click()\n\n\n@then(parsers.parse(\"I fill the '{field}' field by '{value}' value\"))\n@then(parsers.parse('I fill the \"{field}\" field by \"{value}\" value'))\ndef fill_field_with_text(field, value, browser):\n value = userify(value)\n browser.fill(field, value)\n\n\n@then(parsers.parse(\"I go '{new_url}'\"))\n@when(parsers.parse(\"I go '{new_url}'\"))\n@then(parsers.parse('I go \"{new_url}\"'))\n@when(parsers.parse('I go \"{new_url}\"'))\ndef i_go(new_url, browser, web_ui):\n new_url = userify(new_url)\n if new_url.endswith('/'):\n new_url = new_url[:-1]\n browser.visit('{}{}'.format(web_ui['url'], new_url))\n\n# @when('I click the \"confirm\" button')\n# def i_click_the_confirm_button():\n# \"\"\"I click the \"confirm\" button.\"\"\"\n# return\n\n\n# @when('I enter new value in the SMP update form')\n# def i_enter_new_value_in_the_dcp_update_form():\n# \"\"\"I enter new value in the SMP update form.\"\"\"\n# return\n\n\n# @when('click the \"update my SMP\" button')\n# def click_the_update_my_dcp_button():\n# \"\"\"Click the \"update my SMP\" button.\"\"\"\n# return\n\n\n# @when('then I click the \"save\" button')\n# def then_i_click_the_save_button():\n# \"\"\"Then I click the \"save\" button.\"\"\"\n# return\n\n\n# @then('I see \"update my SMP\" button')\n# def i_see_update_my_dcp_button():\n# \"\"\"I see \"\"update my SMP\" button.\"\"\"\n# return\n\n\n# @then('I see \"SMP updated\" message')\n# def i_see_dcp_updated_message():\n# \"\"\"I see \"SMP updated\" message.\"\"\"\n# return\n\n\n# @then('I see \"save\" button')\n# def i_see_save_button():\n# \"\"\"I see \"save\" button.\"\"\"\n# return\n\n\n# @then('I see the \"confirm\" button')\n# def i_see_the_confirm_button():\n# \"\"\"I see the \"confirm\" button.\"\"\"\n# return\n\n\n# @then('I see the SMP update form')\n# def i_see_the_dcp_update_form():\n# \"\"\"I see the SMP update form.\"\"\"\n# return\n","repo_name":"test-point/testpoint-dcl","sub_path":"tests/bdd-remote/management_api/test_update_dcp.py","file_name":"test_update_dcp.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"15746667400","text":"from library import *\n\n\"\"\"\n透过爬取 'https://xeno-canto.org' 网页上的音档获得鸟类的声音资料\n以便于后续资料前处理与深度学习的进行\n\"\"\"\n\n# 设定路径与预设值\n\npath = './data/鸟类.txt'\n\nname1 = []\nname2 = []\n\n# 开启鸟类txt档\nwith open(path, 'r', encoding='utf-8') as f:\n # 逐行读取txt档\n for line in f.readlines():\n # 分割出鸟类名称并将其存至list中\n s = line.split(' ')\n name1.append(s[0]) # 资料夹储存名称\n name2.append(s[1]) # 爬虫url所需名称\n\n# 重复四次,代表四种鸟类\nfor i in range(0, 4):\n # 显示目前正在下载的鸟类种类\n name_1 = str(name1[i])\n\n # 设定音档路径\n audio_path = './data/audio/' + name_1 + '/'\n if os.path.exists(audio_path):\n # 如果资料夹存在,则pass\n pass\n else:\n # 如果资料夹不存在,建立一个资料夹,名称为name_1\n os.mkdir(audio_path)\n\n # 整理爬虫之鸟类名称\n name_2 = name2[i].replace(\"\\n\", \"\")\n print(name_2)\n\n try:\n page(i, name_2, name1, audio_path) # 执行page函式\n print(str(name_2) + \"下载成功\") # 下载完了整页的音档\n\n except IndexError:\n print('下载完成')\n # 如果index里没有资料了,就代表载完了全部的音档\n","repo_name":"liu7388/BirdSound","sub_path":"scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9436508512","text":"def sort(array):\n \"\"\"\n Best Scenario: Partially ordered arrays and perform ordered insertions\n Worst Scenario: Array sorted in reverse order\n Time complexity:\n Best: O(n)\n Average/Worst: O(n^2)\n Space complexity: O(1)\n Stable, two equal keys are guaranteed to be in the same order as the input on the output\n \"\"\"\n for i, to_insert in enumerate(array):\n j = i - 1\n # find where the element is supposed to be and insert\n while j >= 0 and to_insert < array[j]:\n array[j + 1] = array[j]\n j -= 1\n array[j + 1] = to_insert\n\n\ndef insert(array, element, start=0):\n \"\"\"\n Time complexity: O(n)\n \"\"\"\n for i in range(start, len(array), 1):\n if array[i] >= element:\n array[i:i] = [element]\n return\n array.append(element)\n","repo_name":"thiagofigcosta/algo-data-design","sub_path":"algo_data_design/algorithms/sorting/insertion.py","file_name":"insertion.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8001008911","text":"import datetime\nimport argparse\nimport timeit\nimport imutils\nimport numpy as np\nimport cv2\nfrom lcb.low_contrast_blemish import lcb\nfrom io_bin.preprocess import preprocess\n\nap = argparse.ArgumentParser()\n\nap.add_argument(\"-i\", \"--imageinput\", required=True, help=\"path to the raw image\")\nap.add_argument(\"--inputbayerformat\", type=str, default=\"rggb\", help=\"bayerformat of input image data\")\nap.add_argument(\"-o\", \"--outputformat\", type=str, default=\"raw\", help=\"format of output image data\")\nap.add_argument(\"-p\", \"--pedestal\", type=int, default=64, help=\"amount of pedestal to add\")\nap.add_argument(\"-m\", \"--mode\", type=int, default=2, help=\"crop mode\")\nap.add_argument(\"-f\", \"--FOV\", type=int, default=0, help=\"Field of view\")\nap.add_argument(\"-w\", \"--whitebalance\", type=bool, default=True, help=\"whether apply whitebalance\")\nap.add_argument(\"-b\", \"--bitdepth\", type=int, default=10, help=\"depth of the color\")\nap.add_argument(\"-s\", \"--signed\", type=bool, default=True, help=\"Whether all pixels value will be signed\")\n\nargs = vars(ap.parse_args())\n\ntime1 = datetime.datetime.now()\n# custom_source = preprocess(imageinput=args[\"imageinput\"], outputformat=\"bayer\", more_precise=True, custom_size=[3856, 2340], custom_decoding=\"B\")\nID = lcb(args[\"imageinput\"], compensation=False, mode=0, roiSize=[9, 9], threshold=3.1)\ntime2 = datetime.datetime.now()\nprint(time2 - time1)\n\nresized = imutils.resize(ID, width=800)\nheat_map = cv2.applyColorMap(resized, cv2.COLORMAP_JET)\nmax = np.where(resized == np.max(resized))\n\n# for i in range(len(max)):\n# cv2.circle(heat_map, (max[1][i], max[0][i]), 20, (128, 128, 128), 3)\ncv2.imshow(\"LCB\", heat_map)\ncv2.waitKey()\n# t1 = timeit.Timer(lambda: lcb(args[\"imageinput\"]))\n# print(timeit.timeit())\n","repo_name":"zylo117/PixelPowerPy","sub_path":"defect_detection/lcb/test_lcb.py","file_name":"test_lcb.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"5092852456","text":"import discord\r\nimport typing\r\nfrom discord.ext import commands\r\nimport asyncio\r\nimport datetime\r\nimport random\r\n\r\n\r\nclass Embed(discord.Embed):\r\n def __init__(self, bot, message: typing.Union[commands.Context, discord.Message] = None, **kwargs):\r\n super().__init__(**kwargs)\r\n asyncio.create_task(self.__ainit__(bot, message, **kwargs))\r\n\r\n async def __ainit__(self, bot, message, **kwargs):\r\n self._colour = discord.Colour.from_hsv(random.random(), 1, 1)\r\n if isinstance(message, commands.Context):\r\n message = message.message\r\n title = kwargs.get(\"title\")\r\n if title:\r\n kwargs.pop(\"title\")\r\n\r\n if title:\r\n avatar_url = message.author.avatar_url_as(format=\"png\") if message else None\r\n self.set_author(name=title, icon_url=avatar_url)\r\n\r\n icon_url = bot.user.avatar_url_as(format=\"png\")\r\n self.set_footer(icon_url=icon_url)\r\n\r\n self._timestamp = datetime.datetime.utcnow()","repo_name":"Zomatree/Maid","sub_path":"maid/utils/embed.py","file_name":"embed.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"27845397080","text":"\nimport os\nimport re\nimport struct\nimport numpy as np\nimport pandas as pd\nimport h5py\nimport netCDF4\nimport xarray as xr\nimport geopy.distance\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\n# import horizon as hor\nfrom topocalc.viewf import viewf, gradient_d8\nimport pickle\nfrom geospatialtools import gdal_tools\n# import tensorflow as tf\nimport demfuncs as dem\nimport tilefuncs as til\nimport string\nfrom sklearn.linear_model import LinearRegression\n\n\nimport matplotlib\nmatplotlib.use('Agg')\n# matplotlib.use('Qt5Agg') # dyn show plots\n\ndem.matplotlib_update_settings()\n\n# scenarios = ['10', '25', '100'] # number of HRUs in each simulation\n# scenarios = ['100'] # number of HRUs in each simulation\n# scenarios = ['sia10', 'sia25', 'sia100'] # ONLY cos(sia) used in the clustering\n# scenarios = ['notnorm10', 'notnorm25', 'notnorm100'] # ONLY cos(sia) used in the clustering\n\n# scenarios = ['Peru']\nscenarios = ['EastAlps', 'Nepal', 'Peru']\nnhills = 2\n\nres_all = []\n\ncosz = 0.7\nadir = 0.5\nphi = np.pi / 2\n# flux_term = 'fdir'\ndo_averaging = False\naveblock = 55\n\n# outfigdir = os.path.join(datadir, 'outfigdir')\n# outdir = os.path.join(datadir, 'output')\noutfigdir = os.path.join('//', 'home', 'enrico', 'Documents',\n 'dem_datasets', 'outfigdir')\nres_all = {}\nfor isc, sc in enumerate(scenarios):\n\n datadir_all = os.path.join('..', '..', '..', 'Documents',\n 'res_hmc_light_p4')\n datadir = os.path.join(datadir_all, 'res_{}_{}'.format(sc, nhills))\n\n # datadir = os.path.join('..', '..', '..', 'Documents', 'res_{}'.format(sc))\n # datadir = os.path.join('..', '..', '..', 'Documents', 'res_all_hmc', 'res_Peru_10')\n #\n # res = til.read_tile_properties(datadir=datadir, do_averaging=do_averaging,\n # aveblock= aveblock,\n # cosz = cosz, phi = phi, adir=adir,\n # modeldir=None)\n\n modeldir = os.path.join('//', 'home', 'enrico', 'Documents',\n 'dem_datasets', 'trained_models',\n 'domain_EastAlps_buffer_0.1',\n 'models_ave_{}'.format(aveblock),\n )\n\n res = til.read_tile_properties(datadir=datadir,\n do_averaging=do_averaging,\n aveblock=aveblock,\n cosz=cosz, phi=phi, adir=adir,\n modeldir=modeldir)\n # # save results on\n # res_all['{}_{}'.format(sc, nhills)] = res\n\n\n # res['mappedtile_sia'].shape\n # res['map_sia'].shape\n\n # show results for a subset::\n\n # equal area map - distance in [km]\n\n # load coords\n X, Y = np.meshgrid(res['xlon_ea'], res['ylat_ea']) # coords equal area in latlon\n Xl, Yl = np.meshgrid(res['xlon_latlon'], res['ylat_latlon']) # coords equal area in latlon\n\n # X = np.flipud( np.abs(X) )\n # Y = np.flipud( np.abs(Y) )\n\n matplotlib.use('Qt5Agg') # dyn show plots\n fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8,8))\n # cm1 = axes[0].pcolormesh(X, Y, res['map_fdir'], cmap='jet', vmin=-1, vmax=1)\n cm0 = axes.pcolormesh(X, Y, res['map_ele'], cmap='terrain')\n axes.set_title('{}'.format(sc))\n\n if Y[0, 0]<0:\n axes.set_ylabel('Latitude [S]')\n else:\n axes.set_ylabel('Latitude [N]')\n\n if X[0, 0] < 0:\n axes.set_xlabel('Longitude [W]')\n else:\n axes.set_xlabel('Longitude [E]')\n\n # ylabs = abs(axes.get_yticks())\n from matplotlib.ticker import FormatStrFormatter\n import matplotlib.ticker as ticker\n\n @ticker.FuncFormatter\n def major_formatter(x, pos):\n mylab = -x if x < 0 else x\n label = '%.1f' % mylab\n return label\n\n # axes.set_yticklabels(ylabs)\n axes.yaxis.set_major_formatter(major_formatter)\n axes.xaxis.set_major_formatter(major_formatter)\n # axes.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))\n\n cbar = fig.colorbar(cm0)\n cbar.set_label(r'Elevation [m m.s.l.]')\n # plt.tight_layout()\n plt.savefig( os.path.join(outfigdir, 'elevmap_{}.png'.format(sc)), dpi = 300)\n plt.show()\n\n\n\n#\n#\n#\n# matplotlib.use('Qt5Agg') # dyn show plots\n# fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(17,8))\n# cm1 = axes[0].pcolormesh(X, Y, res['map_fdir'], cmap='jet', vmin=-1, vmax=1)\n# cm0 = axes[1].pcolormesh(X, Y, res['mappedtile_fdir'], cmap='jet', vmin=-1, vmax=1)\n# axes[0].set_title('High-res predictions')\n# axes[1].set_title(r'tile-by-tile predictions ($n_T = {}$)'.format(res['ntiles']))\n# axes[0].set_xlabel('x [km]')\n# axes[0].set_ylabel('y [km]')\n#\n# axes[1].set_xlabel('x [km]')\n# axes[1].set_ylabel('y [km]')\n# cbar = fig.colorbar(cm0)\n# cbar.set_label(r'(3D - PP)/PP')\n# plt.savefig( os.path.join(outfigdir, 'comp_fdir.png'))\n# plt.show()\n#\n#\n# matplotlib.use('Qt5Agg') # dyn show plots\n# bnd = 0.5\n# fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(17,8))\n# cm1 = axes[0].pcolormesh(X, Y, res['map_fdif'], cmap='jet', vmin=-bnd, vmax=bnd)\n# cm0 = axes[1].pcolormesh(X, Y, res['mappedtile_fdif'], cmap='jet', vmin=-bnd, vmax=bnd)\n# axes[0].set_title('High-res predictions')\n# axes[1].set_title(r'tile-by-tile predictions ($n_T = {}$)'.format(res['ntiles']))\n# axes[0].set_xlabel('x [km]')\n# axes[0].set_ylabel('y [km]')\n#\n# axes[1].set_xlabel('x [km]')\n# axes[1].set_ylabel('y [km]')\n# cbar = fig.colorbar(cm0)\n# cbar.set_label(r'(3D - PP)/PP')\n# plt.savefig( os.path.join(outfigdir, 'comp_fdif.png'))\n# plt.show()\n#\n#\n# matplotlib.use('Qt5Agg') # dyn show plots\n# bnd = 0.5\n# fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(17,8))\n# cm1 = axes[0].pcolormesh(X, Y, res['map_fcoupn'], cmap='jet', vmin=-bnd, vmax=bnd)\n# cm0 = axes[1].pcolormesh(X, Y, res['mappedtile_fcoupn'], cmap='jet', vmin=-bnd, vmax=bnd)\n# axes[0].set_title('High-res predictions')\n# axes[1].set_title(r'tile-by-tile predictions ($n_T = {}$)'.format(res['ntiles']))\n# axes[0].set_xlabel('x [km]')\n# axes[0].set_ylabel('y [km]')\n#\n# axes[1].set_xlabel('x [km]')\n# axes[1].set_ylabel('y [km]')\n# cbar = fig.colorbar(cm0)\n# cbar.set_label(r'(3D - PP)/PP')\n# plt.savefig( os.path.join(outfigdir, 'comp_fcoupn.png'))\n# plt.show()\n#\n# matplotlib.use('Qt5Agg') # dyn show plots\n# plt.figure()\n# plt.imshow(res['mappedtile_tcf'])\n# plt.imshow(res['map_tcf'])\n# plt.colorbar()\n# plt.show()\n#\n# plt.figure()\n# plt.hist( np.ravel( res['map_fdir']) , bins=50 )\n# plt.show()\n#\n# plt.figure()\n# plt.plot( np.ravel(res['map_fdir']), np.ravel(res['mappedtile_fdir']), 'o')\n# # plt.plot( np.ravel(res['map_fdif']), np.ravel(res['mappedtile_fdif']), 'o')\n# # plt.plot( np.ravel(res['map_frdirn']), np.ravel(res['mappedtile_frdirn']), 'o')\n# # plt.plot( np.ravel(res['map_fcoupn']), np.ravel(res['mappedtile_fcoupn']), 'o')\n# plt.plot( [-1, 1], [-1, 1], 'k')\n# plt.show()\n#\n# plt.figure()\n# # plt.plot( np.ravel(res['map_sia']), np.ravel(res['mappedtile_sia']), 'o')\n# plt.plot( np.ravel(res['map_svf']), np.ravel(res['mappedtile_svf']), 'o')\n# # plt.plot( np.ravel(res['map_tcf']), np.ravel(res['mappedtile_tcf']), 'o')\n# # plt.plot( np.ravel(res['map_ele']), np.ravel(res['mappedtile_ele']), 'o')\n# # plt.plot( np.ravel(res['map_sde']), np.ravel(res['mappedtile_sde']), 'o')\n# plt.plot( [0, 1], [0, 1], 'k')\n# plt.show()\n#\n# res['map_fdif'].shape\n# res['mappedtile_fdif'].shape","repo_name":"EnricoZorzetto/gmd_2022_radiation","sub_path":"codes_gmd/global_map_threedoms.py","file_name":"global_map_threedoms.py","file_ext":"py","file_size_in_byte":7363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"32463172057","text":"# This code ONLY supports the ADXL34x accelerometers!!!\n# You can get one at: https://www.adafruit.com/product/4097\n# To enable this feature, run:\n# sudo pip3 install adafruit-circuitpython-ADXL34x\n\nimport imp\ntry:\n\timp.find_module('adafruit_adxl34x')\n\tstub = False\n\timport board\n\timport busio\n\timport adafruit_adxl34x\n\ti2c = busio.I2C(board.SCL, board.SDA)\n\taccel = adafruit_adxl34x.ADXL345(i2c)\nexcept ImportError:\n\t# If the hardware is not present, we fall back to stub mode.\n\tstub = True\n\n# Orientation tells which side is currently the top one.\n# With current setup:\n# \n# top \n# +---------------+ \n# | +-----------+ | \n# | | | | \n# | | | | \n# | | | | \n# left | | | | right\n# | | | | \n# | | | | \n# | | | | \n# | +-----------+ | \n# +---------------+ \n# bottom \n#\n# top: +x\n# left: -y\n# bottom: -x\n# right: +y\norientation = \"top\"\n\n\n# Polls the accelerometer and caches the orientation value.\ndef update():\n\tglobal stub\n\tif stub:\n\t\t# When operating in stub mode, the function will always return top orientation.\n\t\treturn\n\n\tglobal accel, orientation\n\tx = accel.acceleration[0]\n\ty = accel.acceleration[1]\n\tz = accel.acceleration[2]\n\n\tif abs(z) > 9:\n\t\torientation = \"top\"\n\t\treturn\n\n\tif abs(x) > abs(y):\n\t\tif x < 0:\n\t\t\torientation = \"bottom\"\n\t\telse:\n\t\t\torientation = \"top\"\n\telse:\n\t\tif y < 0:\n\t\t\torientation = \"left\"\n\t\telse:\n\t\t\torientation = \"right\"\n\ndef get():\n\treturn orientation\n\ndef is_landscape():\n\tglobal orientation\n\treturn orientation == \"left\" or orientation == \"right\"\n","repo_name":"Martenfur/magic_frame","sub_path":"src/orientation.py","file_name":"orientation.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"76"} +{"seq_id":"71833200565","text":"SERVICES_PROVIDED = {'change_oil': 2, 'inflate_tires': 5, 'diagnostic': 30}\n\n\nclass Ticket:\n ticket_counter = 0\n\n def __init__(self, service):\n Ticket.ticket_counter += 1\n self.id = Ticket.ticket_counter\n self.service = service\n self.wait_time = 0\n\n def __str__(self):\n return f'id:{self.id}\\nwait_time:{self.wait_time}\\nservice:{self.service}'\n\n\nclass LineOfCars:\n current_line = {'change_oil': [], 'inflate_tires': [], 'diagnostic': []}\n services_provided = SERVICES_PROVIDED\n\n def __init__(self):\n self.whole_que_wait_time = 0\n self.current_ticket = ''\n\n def add_ticket_to_line(self, ticket):\n self.current_line[ticket.service].append(ticket)\n self.calculate_wait_times()\n\n def calculate_wait_times(self):\n wait_time = 0\n for service in self.services_provided:\n for ticket in self.current_line[service]:\n ticket.wait_time = wait_time\n wait_time += self.services_provided[service]\n self.whole_que_wait_time = wait_time\n\n def make_que_list(self):\n que_list = list()\n [que_list.extend(v) for _, v in self.current_line.items() if v]\n return que_list\n\n def pop_first_ticket_from_que(self):\n ticket = ''\n for v in self.current_line.values():\n if v:\n ticket = v.pop(0)\n break\n self.current_ticket = ticket\n return self.current_ticket\n\n def get_current_ticket(self):\n return self.current_ticket\n","repo_name":"desirekaleba/HyperCar","sub_path":"tickets/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"5209719514","text":"#asking user to input two number\n\nnum1=int(input(\"Enter first number\"))\nnum2=int(input(\"Enter second number\"))\n\n#used nested if to check the condition\nif num1>0 and num2>0:\n print(\"The product of the two numbers is:\",num1*num2)\nelse:\n print(\"The number must be positive\")\n","repo_name":"Mayank-B01/Python-programming","sub_path":"week 4/Tutorial/cw2.py","file_name":"cw2.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19868118758","text":"import time\nimport os\nimport pymysql\nfrom DBUtils.PooledDB import PooledDB, SharedDBConnection\nimport numpy as np\nimport datetime\nimport pandas as pd\n\n\n\nPOOLSensor = PooledDB(\n creator=pymysql,\n maxconnections=6,\n mincached=2,\n maxcached=5,\n maxshared=3,\n blocking=True,\n maxusage=None,\n setsession=[],\n ping=0,\n host='192.168.110.11',\n port=3306,\n user='furnace', # 用户名\n password='furnace', # 密码\n database='car_cone1', # 库名\n charset='utf8'\n)\n\n\n\n# 建立连接MYSQL程序\ndef readMysqlToArry(mysql1, POOL0):\n conn = POOL0.connection() #建立连接\n cursor = conn.cursor() ## 使用cursor()方法获取操作游标\n # print(\"mysql1:\", mysql1)\n try:\n cursor.execute(mysql1) # 使用execute方法执行SQL语句\n ccc = []\n for x in cursor.fetchall():\n ccc.append(list(x))\n # 将数据库里面读取出来的数据bbb全部写成矩阵形式。\n arr1 = np.array(ccc)\n conn.commit()\n #print(\"读取出来的数据为:\", arr1)\n # print(\"读取数据库成功\")\n except Exception as r:\n conn.rollback()\n print(\"读取数据库失败\")\n print(\"故障码:\", r)\n conn.close()\n finally:\n conn.close()\n return arr1\n\n\n\n\n\nm = 1\nwhile 1:\n\n mysql =\"SELECT * FROM car_cone1.cone1ping order by timeID desc limit 1\"\n kk = readMysqlToArry(mysql,POOLSensor)\n print(kk)\n while 1:\n if kk[0,m] == '1':\n print(\"cone1_%dping网络正常\"%m)\n else :\n print(\"cone1_%dping网络异常!!!!!!\"%m)\n m += 1\n if m > 10:\n break\n time.sleep(600)\n","repo_name":"zcs416008138/fsxgt","sub_path":"读取MYSQL发送钢铁侠.py","file_name":"读取MYSQL发送钢铁侠.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"32889967455","text":"from django.test import TestCase\n\nfrom apps.ml.regressor.cost_regressor import CostRegressor\n\nimport inspect\nfrom apps.ml.registry import MLRegistry\n\nclass MLTests(TestCase):\n\n def test_registry(self):\n registry = MLRegistry()\n self.assertEqual(len(registry.endpoints), 0)\n endpoint_name = \"regressor\"\n algorithm_object = CostRegressor()\n algorithm_name = \"cost regressor\"\n algorithm_status = \"production\"\n algorithm_version = \"0.0.3\"\n algorithm_owner = \"Mauricio\"\n algorithm_description = \"Cost Regressor with simple pre- and post-processing\"\n algorithm_code = inspect.getsource(CostRegressor)\n # add to registry\n registry.add_algorithm(endpoint_name, algorithm_object, algorithm_name,\n algorithm_status, algorithm_version, algorithm_owner,\n algorithm_description, algorithm_code)\n # there should be one endpoint available\n self.assertEqual(len(registry.endpoints), 1)\n\n\n def test_rf_algorithm(self):\n input_data = { \n \"MSSubClass\": 60 ,\n \"MSZoning\": \"RL\" ,\n \"LotFrontage\": 65 ,\n \"LotArea\": 8450 ,\n \"Alley\": \"None\" ,\n \"LotShape\": \"Reg\" ,\n \"LandContour\": \"Lvl\" ,\n \"LotConfig\": \"Inside\" ,\n \"LandSlope\": \"Gtl\" ,\n \"Neighborhood\": \"CollgCr\" ,\n \"Condition1\": \"Norm\" ,\n \"Condition2\": \"Norm\" ,\n \"BldgType\": \"1Fam\" ,\n \"HouseStyle\": \"2Story\",\n \"OverallQual\": 7 ,\n \"OverallCond\": 5 ,\n \"YearBuilt\": 2003 ,\n \"YearRemodAdd\": 2003 ,\n \"RoofStyle\": \"Gable\" ,\n \"RoofMatl\": \"CompShg\" ,\n \"Exterior1st\": \"VinylSd\" ,\n \"Exterior2nd\": \"VinylSd\" ,\n \"MasVnrType\": \"BrkFace\" ,\n \"MasVnrArea\": 196 ,\n \"ExterQual\": \"Gd\" ,\n \"ExterCond\": \"TA\" ,\n \"Foundation\": \"PConc\" ,\n \"BsmtQual\": \"Gd\" ,\n \"BsmtCond\": \"TA\" ,\n \"BsmtExposure\": \"No\" ,\n \"BsmtFinType1\": \"GLQ\" ,\n \"BsmtFinSF1\": 706 ,\n \"BsmtFinType2\": \"Unf\" ,\n \"BsmtFinSF2\": 0 ,\n \"BsmtUnfSF\": 150 ,\n \"TotalBsmtSF\": 856 ,\n \"Heating\": \"GasA\" ,\n \"HeatingQC\": \"Ex\" ,\n \"CentralAir\": \"Y\" ,\n \"Electrical\": \"SBrkr\" ,\n \"1stFlrSF\": 856 ,\n \"2ndFlrSF\": 854 ,\n \"LowQualFinSF\": 0 ,\n \"GrLivArea\": 1710 ,\n \"BsmtFullBath\": 1 ,\n \"BsmtHalfBath\": 0 ,\n \"FullBath\": 2 ,\n \"HalfBath\": 1 ,\n \"BedroomAbvGr\": 3 ,\n \"KitchenAbvGr\": 1 ,\n \"KitchenQual\": \"Gd\" ,\n \"TotRmsAbvGrd\": 8 ,\n \"Functional\": \"Typ\" ,\n \"Fireplaces\": 0 ,\n \"FireplaceQu\": \"None\" ,\n \"GarageType\": \"Attchd\" ,\n \"GarageYrBlt\": 2003 ,\n \"GarageFinish\": \"RFn\" ,\n \"GarageCars\": 2 ,\n \"GarageArea\": 548 ,\n \"GarageQual\": \"TA\" ,\n \"GarageCond\": \"TA\" ,\n \"PavedDrive\": \"Y\" ,\n \"WoodDeckSF\": 0 ,\n \"OpenPorchSF\": 61 ,\n \"EnclosedPorch\": 0 ,\n \"3SsnPorch\": 0 ,\n \"ScreenPorch\": 0 ,\n \"PoolArea\": 0 ,\n \"Fence\": \"None\" ,\n \"MiscFeature\": \"None\" ,\n \"MiscVal\": 0 ,\n \"MoSold\": 2 ,\n \"YrSold\": 2008 ,\n \"SaleType\": \"WD\" ,\n \"SaleCondition\": \"Normal\"\n }\n my_alg = CostRegressor()\n response = my_alg.compute_prediction(input_data)\n print(response)","repo_name":"Mauricio1812/HousePrices","sub_path":"backend/server/apps/ml/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"991170048","text":"from django.conf import settings\nfrom drf_spectacular.utils import extend_schema\n\nfrom rest_framework import viewsets, status\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import NotAuthenticated, ValidationError\nfrom rest_framework.response import Response\n\nfrom garpix_user.exceptions import NotAuthenticateException\nfrom garpix_user.serializers import PhoneConfirmSendSerializer, PhoneConfirmCheckCodeSerializer, \\\n PhonePreConfirmSendSerializer, UserSessionTokenSerializer\nfrom garpix_user.models import UserSession\nfrom django.utils.translation import ugettext as _\n\nfrom garpix_user.utils.drf_spectacular import user_session_token_header_parameter\n\n\n@extend_schema(\n parameters=[\n user_session_token_header_parameter()\n ]\n)\nclass PhoneConfirmationView(viewsets.GenericViewSet):\n\n def get_serializer_class(self):\n user = self.request.user\n if self.action == 'send_code':\n if user.is_authenticated:\n return PhoneConfirmSendSerializer\n return PhonePreConfirmSendSerializer\n if self.action == 'check_code':\n return PhoneConfirmCheckCodeSerializer\n return PhoneConfirmCheckCodeSerializer\n\n @extend_schema(summary=_('Phone confirmation. Step 1'))\n @action(methods=['POST'], detail=False)\n def send_code(self, request, *args, **kwargs):\n user = request.user\n\n if user.is_authenticated:\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n result = user.send_phone_confirmation_code(serializer.data.get('phone', None))\n if result is not True:\n result.raise_exception(exception_class=ValidationError)\n return Response({'result': 'success'})\n else:\n if settings.GARPIX_USER.get('USE_PREREGISTRATION_PHONE_CONFIRMATION', False):\n user = UserSession.get_or_create_user_session(request)\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n result = user.send_phone_confirmation_code(serializer.data['phone'])\n if result is not True:\n result.raise_exception(exception_class=ValidationError)\n return Response({'result': 'success'})\n\n raise NotAuthenticateException().raise_exception(exception_class=NotAuthenticated)\n\n @extend_schema(summary=_('Phone confirmation. Step 2'))\n @action(methods=['POST'], detail=False)\n def check_code(self, request, *args, **kwargs):\n user = request.user\n if not user.is_authenticated:\n if settings.GARPIX_USER.get('USE_PREREGISTRATION_PHONE_CONFIRMATION', False):\n user = UserSession.get_or_create_user_session(request)\n else:\n raise NotAuthenticateException().raise_exception(exception_class=NotAuthenticated)\n\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n result = user.confirm_phone(serializer.data['phone_confirmation_code'])\n if result is not True:\n result.raise_exception(exception_class=ValidationError)\n return Response({'result': 'success'})\n","repo_name":"garpixcms/garpix_user","sub_path":"backend/garpix_user/views/phone_confirmation_view.py","file_name":"phone_confirmation_view.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"1966604542","text":"import numpy\r\n\r\ndef slot_machine():\r\n\r\n class slots:\r\n symbols = [\"#\", \"@\", \"&\", \"0\", \"£\", \"+\", \"*\"]\r\n symbol_1 = \"A\"\r\n symbol_2 = \"A\"\r\n symbol_3 = \"A\"\r\n def __init__(self):\r\n self.symbol_1 = self.roll()\r\n self.symbol_2 = self.roll()\r\n self.symbol_3 = self.roll()\r\n def roll(self):\r\n self.symbol_1 = numpy.random.choice(self.symbols, p=[50/156, 40/156, 30/156, 20/156, 10/156, 5/156, 1/156])\r\n self.symbol_2 = numpy.random.choice(self.symbols, p=[50/156, 40/156, 30/156, 20/156, 10/156, 5/156, 1/156])\r\n self.symbol_3 = numpy.random.choice(self.symbols, p=[50/156, 40/156, 30/156, 20/156, 10/156, 5/156, 1/156])\r\n\r\n our_slots = slots()\r\n credits = int(input(\"Deposit your credits: \"))\r\n continue_playing = True\r\n if credits == 0:\r\n continue_playing = False\r\n\r\n while continue_playing == True:\r\n\r\n bet = int(input(\"How many credits do you wanna bet? \"))\r\n while (bet > credits or bet <= 0):\r\n if (bet <= 0):\r\n bet = int(input(\"Really? Try again: \"))\r\n else:\r\n w=[\"Are you stupid? You dont't have enought credits for that bet.\", \"Please, insert a valid number of credits.\", \"You're to poor for that bet.\", \"Are you dumb? Insert a valid number of credits!\"]\r\n s = numpy.random.choice(w)\r\n print(s)\r\n bet = int(input(\"Try again: \"))\r\n\r\n our_slots.roll()\r\n print(\"\\n --------- \")\r\n print(f\"| {our_slots.symbol_1} {our_slots.symbol_2} {our_slots.symbol_3} |\")\r\n print(\" --------- \")\r\n\r\n if our_slots.symbol_1 == our_slots.symbol_2 and our_slots.symbol_1 == our_slots.symbol_3:\r\n print(\"***JACKPOT***\")\r\n points = [5, 10, 20, 70, 200, 1000, 100000]\r\n i = 0\r\n found = False\r\n while i < 7 and not found:\r\n if our_slots.symbol_1 == our_slots.symbols[i]:\r\n credits += bet*points[i]\r\n found = True\r\n i += 1\r\n else:\r\n credits -= bet\r\n\r\n print(f\"\\nYou have {credits} credits left\")\r\n\r\n if credits == 0:\r\n print(\"GAME OVER\")\r\n continue_playing = False\r\n else:\r\n check = input(\"Do you wanna keep playing? (y/n)) \")\r\n if (check == \"n\"):\r\n continue_playing = False\r\n print(f\"\\nYou ended up with {credits} credits. Congrats! (or I'm sorry if you're poor now)\")\r\n\r\nslot_machine()","repo_name":"05raquel/Slot_Machine_HS","sub_path":"slot_machine.py","file_name":"slot_machine.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1477080962","text":"import turtle\r\nimport math\r\n\r\nx1,y1,x2,y2,x3,y3 = input(\"Enter six points :\")\r\n\r\na = math.sqrt((x2-x3) * (x2-x3) + (y2-y3) * (y2-y3))\r\nb = math.sqrt((x1-x3) * (x1-x3) + (y1-y3) * (y1-y3))\r\nc = math.sqrt((x1-x2) * (x1-x2) + (y1-y2) * (y1-y2))\r\n\r\nA = math.degrees(math.acos((a * a - b * b - c * c) / (-2 * b * c)))\r\nB = math.degrees(math.acos((b * b - a * a - c * c) / (-2 * a * c)))\r\nC = math.degrees(math.acos((c * c - b * b - a * a) / (-2 * a * b)))\r\n\r\nprint(\"The three angles are \",round(A * 100) / 100.0, round(B * 100) / 100.0, round(C*100)/100.0)\r\n \r\nangle_1 = round(A * 100) / 100.0\r\nangle_2 = round(B * 100) / 100.0\r\nangle_3 = round(C*100)/100.0\r\n\r\nturtle.penup()\r\nturtle.goto(0, -50)\t\r\nturtle.pendown()\r\nturtle.goto(x1,x2) \r\nturtle.write(angle_1)\r\nturtle.goto(x2,x1)\r\nturtle.write(angle_2)\r\nturtle.goto(x3,x2)\r\nturtle.write(angle_3)\r\nturtle.hideturtle()\r\nturtle.done()","repo_name":"raheemtaha11/python","sub_path":"Chapter_3/ComputeAngles.py","file_name":"ComputeAngles.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33798894388","text":"#!/usr/bin/env python\n# coding=utf-8\nimport numpy as np\nimport time\nimport random\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn import metrics\nfrom sklearn.ensemble import AdaBoostClassifier\nimport matplotlib.pyplot as plt \nfrom .constant import cam2idx,idx2cam \nfrom sklearn.ensemble.partial_dependence import plot_partial_dependence,partial_dependence\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn import naive_bayes as nb\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pickle as pkl\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import roc_curve, auc\nfrom collections import defaultdict\nimport torch\nfrom torch import nn\nfrom torch import optim\ntorch.set_printoptions(profile=\"full\")\nclass SpatialModel(object):\n def __init__(self,**args):\n pass\n def predict(self,):\n pass\n def predict_proba(self,):\n pass\n def fit(self,x,y):\n pass\n\nclass GaussianMLP(nn.Module):\n def __init__(self,middle=100):\n self.act=nn.Sigmod()\n self.input=nn.Linear(2,middle)\n self.hidden=nn.Linear(middle,middle)\n self.output=nn.Linear(middle,2)\n self.model=nn.Sequential([self.input,self.act,self.hidden,self.act,self.output])\n\n def forward(self,xs,ds,labels):\n pass\n\nclass GaussianPoly(nn.Module):\n def __init__(self,n1,n2):\n super(GaussianPoly,self).__init__()\n self.alpha=nn.Parameter(torch.randn((n1),requires_grad=True))\n self.beta=nn.Parameter(torch.randn((n2),requires_grad=True))\n self.ce=nn.CrossEntropyLoss()\n self.act=nn.Sigmoid()\n def predict_us(self,d):\n temp=np.zeros(n1)\n for i in range(1,n1):\n temp[i]=temp[i-1]*d\n u=torch.sum(torch.Tensor(temp)*self.alpha)\n s=torch.sum(torch.Tensor(temp)*self.beta)\n return us\n\n def predict_prob(self,x,d):\n u,s=self.predict_us(d)\n return torch.exp((x-u)**2/s**2)/(torch.sqrt(2*np.pi)*s)\n\n def test(self,x,y):\n pass\n\n \n\n def forward(self,xs,ds,labels):\n ds=np.array(ds)\n xs=np.array(xs)\n labels=torch.Tensor(labels)\n unique_d=np.unique(ds)\n idx=np.zeros(ds.shape)\n n1=self.alpha.size()[0]\n n2=self.beta.size()[0]\n prob=torch.zeros((xs.shape[0],2))\n params=[]\n param_us=torch.zeros(xs.shape)\n param_ss=torch.zeros(xs.shape)\n loss=0\n print(unique_d)\n for i,d in enumerate(unique_d):\n args=np.where(ds==d)\n idx[args]=i\n temp=np.zeros(n1)\n temp[0]=1\n for i in range(1,n1):\n temp[i]=temp[i-1]*d\n u=torch.sum(torch.Tensor(temp[:n1])*torch.exp(self.alpha))\n s=torch.sum(torch.Tensor(temp[:n2])*torch.exp(self.beta))\n# print(temp,u,s)\n# print(u,s,args)\n# print(temp.shape,self.alpha.size(),u.size(),s.size())\n param_us[args]=u\n param_ss[args]=s\n xs=torch.Tensor(xs)\n ds=torch.Tensor(ds)\n param_us=torch.Tensor(param_us)\n param_ss=torch.Tensor(param_ss)\n dt=1\n# print(self.alpha,self.beta)\n prob[:,1]=self.act(torch.exp(-(xs-param_us)**2/param_ss**2)/((torch.sqrt(torch.tensor(2*3.141592657))*param_ss)+1e-8))\n prob[:,0]=1-prob[:,1]\n# prob=prob.reshape((prob.shape[0],1))\n labels=labels.long()\n# print(prob.shape,labels.shape)\n S=self.ce(prob,labels)\n auc=metrics.roc_auc_score(labels.detach().numpy().astype(np.int),prob.detach().numpy()[:,1])\n print('auc',auc)\n #S=torch.sum((xs-param_us)**2/(param_ss**2)*labels)+torch.sum(torch.log(param_ss)*labels)+dt*(torch.sum(self.alpha**2)+1e-1*torch.sum(self.beta**2))\n #S/=labels.sum() #xs.size()[0]\n print('Loss',S)\n return S\n\nclass GaussianSpatialModel(SpatialModel):\n def __init__(self,**args):\n super(GaussianSpatialModel,self).__init__(**args)\n self.distance2y=defaultdict(list)\n self.distance2hist={}\n def fit(self,x,y):\n for i,k in enumerate(x):\n self.distance2y[k[0]].append(k[1])\n fig=plt.figure()\n ax=fig.gca(projection='3d')\n for key in self.distance2y.keys():\n self.distance2hist[key],edges=np.histogram(self.distance2y[key],bins=20)\n q=np.array([key for edge in edges[:-1]])\n #print(q.shape,edges.shape)\n ax.plot(q,edges[:-1],self.distance2hist[key]*1.0/np.sum(self.distance2hist[key]))\n ax.view_init(elev=35., azim=35)\n ax.set_ylabel('time difference')\n ax.set_xlabel('distance')\n ax.set_ylim(0,30000)\n ax.set_xlim(0,800)\n ax.set_zlim(0,1)\n ax.set_zlabel('frequency')\n plt.savefig('1234.svg',dpi=3000, bbox_inches = 'tight')\n #print('123 finished')\n #exit()\n# print(self.distance2hist)\n\nclass GaussianModel(SpatialModel):\n def __init__(self,**args):\n if 'n1' in args.keys():\n self.n1=args['n1']\n else:\n self.n1=4\n if 'n2' in args.keys():\n self.n2=args['n2']\n else:\n self.n2=4\n self.model=GaussianPoly(self.n1,self.n2)\n self.iters=10000\n self.optimizer = optim.SGD(self.model.parameters(), lr = 1e-1, momentum = 0.9)\n\n def fit(self,x,y):\n ss=[t[0] for t in x]\n xs=[t[1] for t in x]\n ls=[]\n for l in y:\n if l==1:\n ls.append(l)\n else:\n ls.append(0)\n y=ls\n for i in range(self.iters):\n self.optimizer.zero_grad()\n loss=self.model.forward(xs,ss,y)\n loss.backward()\n self.optimizer.step()\n exit(0)\n def predict_proba(self,x):\n pass\n\nclass Model:\n def __init__(self,model_name,**args):\n names2model_fun={'decision_tree':DecisionTreeClassifier,\n 'adaboost':AdaBoostClassifier,\n 'gbrt':GradientBoostingClassifier,\n 'svm':SVC,\n 'gaussianNB':nb.GaussianNB,\n 'multinomialNB':nb.MultinomialNB,\n 'bernoulliNB':nb.BernoulliNB,\n 'MLP':MLPClassifier,\n 'gauss_poly':GaussianModel}\n if model_name not in names2model_fun:\n print('model_name should be in name2model_fun!')\n exit(0)\n self.model=names2model_fun[model_name](**args)\n self.model_name=model_name\n\n\n def predict(self,x):\n return self.model.predict_proba(x)\n\n def fit(self,x,y):\n self.model.fit(x,y)\n\n def score(self,x,y):\n return self.model.score(x,y)\n\n def distance_predict(self,dist,x):\n dist=np.ones(x.shape[0])*dist.reshape((x.shape[0],1))\n feature=np.hstack(x,dist)\n return self.model.predict(feature)\n\n def distance_score(self,dist,x,y):\n dist=np.ones(x.shape[0])*dist.reshape((x.shape[0],1))\n feature=np.hstack(x,dist)\n return self.model.score(feature,y)\n\n def distance_fit(self,dist,x,y):\n dist=np.ones(x.shape[0])*dist.reshape((x.shape[0],1))\n feature=np.hstack(x,dist)\n self.model.fit(feature,y)\n\n\nclass Experiment:\n def __init__(self,train_x,train_y,test_x,test_y,model_name,mode,locationmat,args,**kwargs):\n self.model_name=model_name\n self.train_x=train_x\n self.train_y=train_y\n self.test_x=test_x\n self.test_y=test_y\n self.locationmat=locationmat\n self.args=args\n self.save_dir=args.save_dir\n self.mode=mode\n self.kwargs=kwargs\n\n def save_model(self,mode):\n with open('{}/{}_{}.pkl'.format(self.save_dir,mode,self.model_name),'wb') as out:\n print('{}/{}_{}.pkl'.format(self.save_dir,mode,self.model_name))\n pkl.dump(self.model,out)\n\n def save_csv(self,name,text):\n path='{}/{}.csv'.format(self.save_dir,name)\n np.savetxt(path,text)\n\n def visual_data(self,index):\n plt.figure()\n i=int(index//9)\n j=int(index%9)\n# print(i,j)\n# print(idx2cam[i],idx2cam[j])\n colors=[0,1]\n \n num=1\n d=self.train_x[i][j]\n y=self.train_y[i][j]\n c=[colors[item] for item in y]\n x=[num*k for k in c]\n plt.scatter(d,x,c=c)\n\n num=2\n colors=[3,5]\n d=self.test_x[i][j]\n y=self.test_y[i][j]\n c=[colors[item] for item in y]\n x=[num*k+2 for k in c]\n plt.scatter(d,x,c=c)\n plt.show()\n\n def random_train(self,seed,num):\n start = time.perf_counter()\n self.model=Model(self.model_name,**self.kwargs)\n np.random.seed(seed)\n pairs=[]\n for i in range(9):\n for j in range(i+1,9):\n pairs.append((i,j))\n #indexs=range(len(pairs))# random.sample(range(len(pairs)),num)\n indexs=random.sample(range(len(pairs)),num)\n print(indexs)\n #train_idx=[pairs[i] for i in indexs]\n train_idx=[[2,5],[5,7],[2,8],[2,4],[0,2],[7,8],[2,7],[5,8],[4,6],[0,6],[0,4],[7,7],[0,0],[6,6],[4,4],[2,2],[5,5],[8,8]]\n r2_score_mat=np.zeros((9,9))\n auc_mat=np.zeros((9,9))\n train_x=[]\n train_y=[]\n test_x=[]\n test_y=[]\n flag=np.zeros((9,9))\n for idx in train_idx:\n flag[idx[0]][idx[1]]=1\n flag[idx[1]][idx[0]]=1\n for i,item in enumerate(self.train_x[idx[0]][idx[1]]):\n train_x.append([self.locationmat[idx[0]][idx[1]],item])\n train_y.append(self.train_y[idx[0]][idx[1]][i])\n for i in range(9):\n for j in range(9):\n for k,item in enumerate(self.test_x[i][j]):\n test_x.append([self.locationmat[i][j],item[0]])\n test_y.append(self.test_y[i][j][k])\n sp=GaussianSpatialModel()\n sp.fit(train_x,train_y)\n #sp.fit(test_x,test_y)\n exit()\n self.model.fit(train_x,train_y)\n end = time.perf_counter()\n print(\"训练运行时间为\", round(end-start), 'seconds')\n args=np.where(np.array(test_y)==1)\n feature=np.array(test_x)[args]\n ypred=self.model.predict(train_x)[:,1]\n auc_mat=metrics.roc_auc_score(train_y,ypred)\n print(self.model_name,1,auc_mat)\n ypred=self.model.predict(test_x)[:,1]\n auc_mat=metrics.roc_auc_score(test_y,ypred)\n print(self.model_name,2,auc_mat)\n #self.save_model('group')\n #exit()\n return self.model\n \n def dist_go(self,idxs=None):\n self.model=Model(self.model_name,**self.kwargs)\n self.visual_data(23)\n if idxs==None:\n train_idx=[[2,5],[5,7],[2,8],[2,4],[0,2],[7,8],[2,7],[5,8],[4,6],[0,6],[0,4],[7,7],[0,0],[6,6],[4,4],[2,2],[5,5],[8,8]]\n else:\n train_idx=idxs\n r2_score_mat=np.zeros((9,9))\n auc_mat=np.zeros((9,9))\n train_x=[]\n train_y=[]\n test_x=[]\n test_y=[]\n flag=np.zeros((9,9))\n for idx in train_idx:\n flag[idx[0]][idx[1]]=1\n flag[idx[1]][idx[0]]=1\n for i,item in enumerate(self.train_x[idx[0]][idx[1]]):\n train_x.append([self.locationmat[idx[0]][idx[1]],item])\n train_y.append(self.train_y[idx[0]][idx[1]][i])\n for i in range(9):\n for j in range(9):\n #for k,item in enumerate(self.train_x[i][j]):\n # test_x.append([self.locationmat[i][j],item])\n # test_y.append(self.train_y[i][j][k])\n for k,item in enumerate(self.test_x[i][j]):\n test_x.append([self.locationmat[i][j],item[0]])\n test_y.append(self.test_y[i][j][k])\n self.model.fit(train_x,train_y)\n\n #args=np.where(np.array(train_y)==1)\n #feature=np.array(train_x)[args]\n #sp=GaussianSpatialModel()\n #sp.fit(feature,train_y)\n\n\n\n args=np.where(np.array(test_y)==1)\n feature=np.array(test_x)[args]\n #sp=GaussianSpatialModel()\n #sp.fit(feature,test_y)\n #print(test_x[0])\n #r2_score=self.model.score(test_x,test_y)\n\n plt.figure()\n ypred=self.model.predict(train_x)[:,1]\n auc_mat=metrics.roc_auc_score(train_y,ypred)\n print(self.model_name,1,auc_mat)\n ypred=self.model.predict(test_x)[:,1]\n auc_mat=metrics.roc_auc_score(test_y,ypred)\n print(self.model_name,2,auc_mat)\n fpr, tpr, thersholds = roc_curve(test_y, ypred, pos_label=1)\n plt.plot(fpr, tpr, label='ROC (area = {0:.2f})'.format(auc_mat), lw=2)\n with open('{}_roc.pkl'.format(self.model_name),'wb') as outp:\n pkl.dump({self.model_name:{'fpr':fpr,'tpr':tpr,'auc':auc_mat}},outp)\n plt.savefig('{}_roc.jpg'.format(self.model_name))\n plt.figure()\n\n\n answer=np.zeros((ypred.shape[0],2))\n answer[:,0]=ypred\n answer[:,1]=test_y\n#· np.savetxt('./logs/answer.csv',answer)\n #self.save_csv('answer',answer)\n plt.figure()\n colors=['r','b']\n x=[item[0] for item in train_x]\n y=[item[1] for item in train_x]\n c=[colors[item] for item in train_y]\n plt.scatter(x,y,c=c)\n plt.savefig('a.jpg')\n plt.figure()\n\n #fig = plt.figure()\n #ax = fig.gca(projection='3d')\n #k=defaultdict(list)\n #for i,item in enumerate(x):\n # k[item].append(y[i])\n #ax.plot(x, y, z)\n\n\n\n x=[item[0] for item in test_x]\n y=[item[1] for item in test_x]\n c=[colors[item] for item in test_y]\n plt.scatter(x,y,c=c)\n plt.savefig('b.jpg')\n\n x=np.arange(0,400,10)\n y=np.arange(0,2000,20)\n X,Y=np.meshgrid(x,y)\n XX=X.flatten()\n YY=Y.flatten()\n fx=np.vstack([XX,YY]).transpose()\n\n #plot_partial_dependence(self.model.model,test_x,features=[0,1,(0,1)],feature_names=['distance','diff_time'])\n #plt.savefig('{}_partial.jpg'.format(self.model_name))\n\n fig=plt.figure(figsize=(12,12))\n ax=fig.add_subplot(2,1,1,projection='3d')\n ax.set_top_view()\n# help(partial_dependence)\n #pdp,axes=partial_dependence(self.model.model,X=fx,target_variables=[[0,1]],grid_resolution=2)\n # pdp,axes=partial_dependence(self.model.model,X=fx,target_variables=[[0,1]])\n #pdp=self.model.model.predict(fx)\n pdp=self.model.model.predict_proba(fx)[:,1]\n pdp=pdp.reshape(X.shape)\n ax.view_init(elev=30., azim=45)\n # X,Y=np.meshgrid(axes[0],axes[1])\n # print(X.shape,Y.shape,pdp.shape)\n ax.plot_surface(X,Y,pdp*1000,cmap=plt.cm.Spectral,cstride=1,rstride=1)\n # ax=fig.add_subplot(2,1,2)\n plt.xlabel('distance')\n plt.ylabel('time difference')\n plt.contour(X,Y,pdp*1000,cmap=plt.cm.Spectral)\n plt.savefig('{}_f.jpg'.format(self.model_name))\n self.save_model('UnionModel')\n #with open('./logs/UnionModel_{}.pkl'.format(self.model_name),'wb') as out:\n # pkl.dump(self.model,out)\n with open('{}_f.txt'.format(self.model_name),'w') as out:\n for i in range(X.shape[0]):\n for j in range(X[i].shape[0]):\n out.write('{} {} {}\\n'.format(X[i][j],Y[i][j],pdp[i][j]))\n out.write('\\n')\n return self.model\n\n def go(self):\n self.model={}\n for i in range(9):\n if i not in self.model.keys():\n self.model[i]={}\n for j in range(9):\n self.model[i][j]=Model(self.model_name,**self.args)\n r2_score_mat=np.zeros((9,9))\n auc_mat=np.zeros((9,9))\n train_true_num=np.zeros((9,9))\n train_false_num=np.zeros((9,9))\n test_true_num=np.zeros((9,9))\n test_false_num=np.zeros((9,9))\n self.visual_data(23)\n mean_mat=np.zeros((9,9))\n median_mat=np.zeros((9,9))\n max_mat=np.zeros((9,9))\n min_mat=np.zeros((9,9))\n hist_max_mat=np.zeros((9,9))\n bins=np.arange(0,2000,50)\n\n for i in range(9):\n for j in range(9):\n tidx=np.where(np.array(self.train_y[i][j])==1)\n #print(self.train_y[i][j],tidx)\n if tidx[0].shape[0]>0:\n max_mat[i][j]=np.max(np.array(self.train_x[i][j])[tidx])\n min_mat[i][j]=np.min(np.array(self.train_x[i][j])[tidx])\n hist=plt.hist(np.array(self.train_x[i][j])[tidx],bins=bins)\n hist_max_mat[i][j]=hist[1][np.argmax(hist[0])]+25\n else:\n hist_max_mat[i][j]=-1\n max_mat[i][j]=-1\n min_mat[i][j]=-1\n mean_mat[i][j]=np.mean(np.array(self.train_x[i][j])[tidx])\n median_mat[i][j]=np.median(np.array(self.train_x[i][j])[tidx])\n train_true_num[i][j]+=sum(self.train_y[i][j])\n train_false_num[i][j]+=sum(1-np.array(self.train_y[i][j]))\n test_false_num[i][j]+=sum(1-np.array(self.test_y[i][j]))\n test_true_num[i][j]+=sum(self.test_y[i][j])\n if len(self.train_x[i][j])==0 or len(self.test_y[i][j])==0:\n r2_score_mat[i][j]=-1\n auc_mat[i][j]=-1\n continue\n self.model[i][j].fit(np.array(self.train_x[i][j]).reshape((len(self.train_x[i][j]),1)),self.train_y[i][j])\n r2_score_mat[i][j]=self.model[i][j].score(self.test_x[i][j],self.test_y[i][j])\n ypred=self.model[i][j].predict(self.test_x[i][j])\n try: \n auc_mat[i][j]=metrics.roc_auc_score(self.test_y[i][j],ypred)\n except:\n auc_mat[i][j]=-1\n #np.savetxt('./logs/train_hist_max.txt',hist_max_mat.reshape(81),fmt='%.03f')\n #np.savetxt('./logs/train_max.txt',max_mat.reshape(81),fmt='%.03f')\n #np.savetxt('./logs/train_min.txt',min_mat.reshape(81),fmt='%.03f')\n #np.savetxt('./logs/train_mean.txt',mean_mat.reshape(81),fmt='%.03f')\n #np.savetxt('./logs/train_median.txt',median_mat.reshape(81),fmt='%.03f')\n #np.savetxt('./logs/r2_score_mat.csv',r2_score_mat,fmt='%.03f')\n #np.savetxt('./logs/auc_score_mat.csv',auc_mat,fmt='%.03f')\n total=np.zeros((81,6))\n total[:,0]=train_true_num.reshape((81))\n total[:,1]=train_false_num.reshape((81))\n total[:,2]=test_true_num.reshape((81))\n total[:,3]=test_false_num.reshape((81))\n total[:,4]=auc_mat.reshape((81))\n total[:,5]=r2_score_mat.reshape(81)\n\n self.save_csv('total',total)\n # np.savetxt('./logs/total.csv',total)\n #self.save_model('Seperate')\n return model\n\n # with open('./logs/Seperate_{}.pkl'.format(self.model_name),'wb') as out:\n # pkl.dump(self.model,out)\n","repo_name":"zhangxin1995/PTD","sub_path":"code/module/pathmodel/pathmodel.py","file_name":"pathmodel.py","file_ext":"py","file_size_in_byte":18980,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"4624015118","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pygame\nfrom pygame.locals import K_DOWN, K_LEFT, K_RIGHT, K_SPACE, K_UP, K_a, K_d, K_s, K_w\nimport evdev\nfrom evdev import ecodes, InputDevice\nimport numpy as np\nfrom scipy.spatial.transform import Rotation as R\nfrom icecream import ic as print\nimport socket\nimport threading\nimport time\nimport sys\nimport math\nimport cv2\nfrom shapely.geometry import Polygon, Point\n\nfrom informer import Informer\nfrom proto.python_out import marker_pb2, geometry_msgs_pb2, path_msgs_pb2, cmd_msgs_pb2\nfrom config_5g import cfg_server\n\n\nHOST_ADDRESS = '127.0.0.1'\nBLACK = (0, 0, 0)\nGREY = (192, 192, 192)\nBLUE = (0, 0, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\nWHITE = (255, 255, 255)\nWINDOW_WIDTH = 1920\nWINDOW_HEIGHT = 1080\nROBOT_SIZE = 20\nBUTTON_WIDTH = 300\nBUTTON_HEIGHT = 100\nBUTTON_LIGHT = (170, 170, 170)\nBUTTON_DARK = (100, 100, 100)\nBUTTON_GOAL_X = 50\nBUTTON_GOAL_Y = 50\nBUTTON_LASER_X = 50\nBUTTON_LASER_Y = 200\nBUTTON_SATELLITE_X = 50\nBUTTON_SATELLITE_Y = 350\nBUTTON_JOYSTICK_X = 50\nBUTTON_JOYSTICK_Y = 500\n# read map\nLASER_MAP = pygame.image.load('./maps/laser_map.jpg')\nSATELLITE_MAP = pygame.image.load('./maps/satellite_map.jpg')\nDISPLAY_MAP = LASER_MAP\nmap_offset = np.array([0, 0])\nrobot_goal = None\nrobot_pos = []\nrobot_heading = []\nrobot_cmd = []\nbounding_box = dict()\npath_pos = []\nrobot_clicked_id = None\nrobot_img = None\nbox_clicked_id = None\n# flags\nmap_draging = False\ngoal_setting = False\nrobot_clicked = False\nview_image = False\nbox_clicked = False\nuse_joystick = False\n\nclass Receiver(object):\n def __init__(self):\n self.path_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.path_sock.settimeout(1.0)\n self.path_sock.bind((HOST_ADDRESS, 23333))\n self.path_thread = threading.Thread(target=self.receive_path)\n self.path_thread.start()\n self.gesture_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.gesture_sock.settimeout(1.0)\n self.gesture_sock.bind((HOST_ADDRESS, 23335))\n self.gesture_thread = threading.Thread(target=self.receive_gesture)\n self.gesture_thread.start()\n self.timeout = False\n\n def receive_path(self):\n while True:\n try:\n data, _ = self.path_sock.recvfrom(4096)\n data = data.decode(\"utf-8\").split(';')\n MAP_WIDTH, MAP_HEIGHT = DISPLAY_MAP.get_size()\n offset = np.array([WINDOW_WIDTH//2 - MAP_WIDTH//2, WINDOW_HEIGHT//2 - MAP_HEIGHT//2])\n global path_pos\n path_pos = np.array([np.array([float(pos.split(',')[0]), float(pos.split(',')[1])]) + offset\n for pos in data if pos != ''])\n # print(path_pos, len(path_pos))\n self.timeout = False\n except socket.timeout:\n self.timeout = True\n time.sleep(0.01)\n \n def receive_gesture(self):\n while True:\n try:\n data, _ = self.gesture_sock.recvfrom(4096)\n gesture = data.decode(\"utf-8\")\n # print(gesture)\n self.timeout = False\n except socket.timeout:\n self.timeout = True\n time.sleep(0.01)\n \ndef parse_message(message):\n global bounding_box\n marker_list = marker_pb2.MarkerList()\n marker_list.ParseFromString(message)\n MAP_WIDTH, MAP_HEIGHT = DISPLAY_MAP.get_size()\n offset = np.array([WINDOW_WIDTH//2 - MAP_WIDTH//2, WINDOW_HEIGHT//2 - MAP_HEIGHT//2])\n for marker in marker_list.marker_list:\n try:\n center_pos = np.array([int(marker.pose.position.y*(-20)+2089), int(2949-20*marker.pose.position.x)]) + offset\n orientation = R.from_quat([marker.pose.orientation.x, marker.pose.orientation.y, marker.pose.orientation.z, marker.pose.orientation.w]).as_euler('xyz', degrees=False)[2]\n orientation += np.pi / 2\n height, width = 10*marker.scale.x, 10*marker.scale.y\n vertex_A = center_pos + height*np.array([np.cos(orientation), np.sin(orientation)]) + width*np.array([-np.sin(orientation), np.cos(orientation)])\n vertex_B = center_pos + height*np.array([np.cos(orientation), np.sin(orientation)]) - width*np.array([-np.sin(orientation), np.cos(orientation)])\n vertex_C = center_pos - height*np.array([np.cos(orientation), np.sin(orientation)]) - width*np.array([-np.sin(orientation), np.cos(orientation)])\n vertex_D = center_pos - height*np.array([np.cos(orientation), np.sin(orientation)]) + width*np.array([-np.sin(orientation), np.cos(orientation)])\n marker_id = marker.id\n new_box = np.array([vertex_A, vertex_B, vertex_C, vertex_D])\n # overlap filter\n overlap = False\n p1 = Polygon(new_box)\n for id, pos in bounding_box.items():\n p2 = Polygon(pos)\n if p1.intersects(p2) and id != marker_id:\n overlap = True\n break\n if not overlap:\n bounding_box[marker_id] = np.array(new_box)\n except:\n pass\n # print(bounding_box)\n\ndef parse_odometry(message):\n global robot_pos, robot_heading\n odometry = geometry_msgs_pb2.Pose()\n odometry.ParseFromString(message)\n MAP_WIDTH, MAP_HEIGHT = DISPLAY_MAP.get_size()\n offset = np.array([WINDOW_WIDTH//2 - MAP_WIDTH//2, WINDOW_HEIGHT//2 - MAP_HEIGHT//2])\n robot_pos = [np.array([int(odometry.position.y*(-20)+2089), int(2949-20*odometry.position.x)]) + offset]\n robot_heading = [R.from_quat([odometry.orientation.x, odometry.orientation.y, odometry.orientation.z, odometry.orientation.w]).as_euler('xyz', degrees=False)[2]]\n\ndef parse_cmd(message):\n global robot_cmd\n # print('grt cmd !!!')\n cmd = cmd_msgs_pb2.Cmd()\n cmd.ParseFromString(message)\n robot_cmd = [[cmd.v, cmd.w]]\n\ndef send_path(path_list):\n global ifm\n path = path_msgs_pb2.Path()\n for i in range(len(path_list)):\n pose = path_msgs_pb2.Pose2D()\n pose.x = path_list[i][0]\n pose.y = path_list[i][0]\n pose.theta = path_list[i][0]\n\n path.poses.append(pose)\n\n sent_data = path.SerializeToString()\n # print('send', len(sent_data))\n ifm.send_path(sent_data)\n\nclass Server(Informer):\n def msg_recv(self):\n self.recv('msg', parse_message)\n\n def odm_recv(self):\n self.recv('odm', parse_odometry)\n\n def cmd_recv(self):\n self.recv('cmd', parse_cmd)\n\n def send_path(self, message):\n self.send(message, 'path')\n\n\ndef sendGoal(goal):\n goal_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n if len(robot_pos) == 0:\n goal_str = str(goal[0]) + ',' + str(goal[1])\n else:\n MAP_WIDTH, MAP_HEIGHT = DISPLAY_MAP.get_size()\n offset = np.array([WINDOW_WIDTH//2 - MAP_WIDTH//2, WINDOW_HEIGHT//2 - MAP_HEIGHT//2])\n pos = robot_pos[0] - offset\n goal_str = str(goal[0]) + ',' + str(goal[1]) + ',' + str(pos[0]) + ',' + str(pos[1])\n goal_sock.sendto(bytes(goal_str, 'ascii'), (HOST_ADDRESS, 23334))\n\ndef screen2pos(x, y):\n MAP_WIDTH, MAP_HEIGHT = DISPLAY_MAP.get_size()\n pos = np.array([x, y]) - np.array([WINDOW_WIDTH//2 - MAP_WIDTH//2, WINDOW_HEIGHT//2 - MAP_HEIGHT//2])\n return pos\n\ndef pos2screen(x, y):\n return x, y\n\ndef parse_vehicle_wheel(joystick, clock):\n keys = pygame.key.get_pressed()\n milliseconds = clock.get_time()\n\n throttle = 1.0 if keys[K_UP] or keys[K_w] else 0.0\n steer_increment = 5e-4 * milliseconds\n if keys[K_LEFT] or keys[K_a]:\n steer_cache -= steer_increment\n elif keys[K_RIGHT] or keys[K_d]:\n steer_cache += steer_increment\n else:\n steer_cache = 0.0\n steer_cache = min(0.7, max(-0.7, steer_cache))\n steer = round(steer_cache, 1)\n brake = 1.0 if keys[K_DOWN] or keys[K_s] else 0.0\n\n numAxes = joystick.get_numaxes()\n jsInputs = [float(joystick.get_axis(i)) for i in range(numAxes)]\n\n # Custom function to map range of inputs [1, -1] to outputs [0, 1] i.e 1 from inputs means nothing is pressed\n # For the steering, it seems fine as it is\n K1 = 1.0 # 0.55\n steerCmd = K1 * math.tan(1.1 * jsInputs[0])\n\n K2 = 1.6 # 1.6\n throttleCmd = K2 + (2.05 * math.log10(\n -0.7 * jsInputs[2] + 1.4) - 1.2) / 0.92\n if throttleCmd <= 0:\n throttleCmd = 0\n elif throttleCmd > 1:\n throttleCmd = 1\n\n brakeCmd = 1.6 + (2.05 * math.log10(\n -0.7 * jsInputs[3] + 1.4) - 1.2) / 0.92\n if brakeCmd <= 0:\n brakeCmd = 0\n elif brakeCmd > 1:\n brakeCmd = 1\n\n steer = steerCmd\n brake = brakeCmd\n throttle = throttleCmd\n\n return steer, throttle, brake\n\ndef drawRobots():\n # for pos, heading, cmd in zip(robot_pos, robot_heading, robot_cmd):\n for pos, heading in zip(robot_pos, robot_heading):\n cmd = [0.5]\n pygame.draw.circle(SCREEN, GREEN, pos + map_offset, ROBOT_SIZE)\n pygame.draw.line(SCREEN, BLUE, pos + map_offset, pos + map_offset + min(max(40*cmd[0], 25), 40)*np.array([np.cos(heading+np.pi/2), -np.sin(heading+np.pi/2)]), 5)\n \ndef drawGoal():\n if robot_goal is not None:\n # pygame.draw.circle(SCREEN, GREEN, robot_goal + map_offset, ROBOT_SIZE)\n cicle = (robot_goal + map_offset)\n marker_size = 20\n width = 10\n pygame.draw.line(SCREEN, RED, (cicle[0]-marker_size, cicle[1]-marker_size), (cicle[0]+marker_size, cicle[1]+marker_size), width)\n pygame.draw.line(SCREEN, RED, (cicle[0]-marker_size, cicle[1]+marker_size), (cicle[0]+marker_size, cicle[1]-marker_size), width)\n\n\ndef drawBoundingBox():\n bounding_box_copy = bounding_box.copy()\n for _, pos in bounding_box_copy.items():\n # x, y = pos + map_offset\n # pygame.draw.rect(SCREEN, BLUE, pygame.Rect(x, y, 60, 100), 10)\n pygame.draw.lines(SCREEN, BLUE, True, pos + map_offset, 10)\n\ndef drawPath():\n if len(path_pos) > 1:\n pygame.draw.lines(SCREEN, RED, False, path_pos + map_offset, 10)\n\ndef drawButton():\n # font settings\n FONT = pygame.font.SysFont('Corbel', 75)\n\n # get mouse position\n mouse = pygame.mouse.get_pos()\n\n # button: set goal\n text = FONT.render('Set Goal', True, WHITE)\n if BUTTON_GOAL_X <= mouse[0] <= BUTTON_GOAL_X + BUTTON_WIDTH and BUTTON_GOAL_Y <= mouse[1] <= BUTTON_GOAL_Y + BUTTON_HEIGHT:\n pygame.draw.rect(SCREEN, BUTTON_LIGHT, [BUTTON_GOAL_X, BUTTON_GOAL_Y, BUTTON_WIDTH, BUTTON_HEIGHT])\n else:\n pygame.draw.rect(SCREEN, BUTTON_DARK, [BUTTON_GOAL_X, BUTTON_GOAL_Y, BUTTON_WIDTH, BUTTON_HEIGHT])\n SCREEN.blit(text, (BUTTON_GOAL_X+45, BUTTON_GOAL_Y+25))\n # button: laser map\n text = FONT.render('LASER', True, WHITE)\n if BUTTON_LASER_X <= mouse[0] <= BUTTON_LASER_X + BUTTON_WIDTH and BUTTON_LASER_Y <= mouse[1] <= BUTTON_LASER_Y + BUTTON_HEIGHT:\n pygame.draw.rect(SCREEN, BUTTON_LIGHT, [BUTTON_LASER_X, BUTTON_LASER_Y, BUTTON_WIDTH, BUTTON_HEIGHT])\n else:\n pygame.draw.rect(SCREEN, BUTTON_DARK, [BUTTON_LASER_X, BUTTON_LASER_Y, BUTTON_WIDTH, BUTTON_HEIGHT])\n SCREEN.blit(text, (BUTTON_LASER_X+60, BUTTON_LASER_Y+25))\n # button: satellite map\n text = FONT.render('SATELLITE', True, WHITE)\n if BUTTON_SATELLITE_X <= mouse[0] <= BUTTON_SATELLITE_X + BUTTON_WIDTH and BUTTON_SATELLITE_Y <= mouse[1] <= BUTTON_SATELLITE_Y + BUTTON_HEIGHT:\n pygame.draw.rect(SCREEN, BUTTON_LIGHT, [BUTTON_SATELLITE_X, BUTTON_SATELLITE_Y, BUTTON_WIDTH, BUTTON_HEIGHT])\n else:\n pygame.draw.rect(SCREEN, BUTTON_DARK, [BUTTON_SATELLITE_X, BUTTON_SATELLITE_Y, BUTTON_WIDTH, BUTTON_HEIGHT])\n SCREEN.blit(text, (BUTTON_SATELLITE_X+10, BUTTON_SATELLITE_Y+25))\n # button: joystick mode\n text = FONT.render('JOYSTICK', True, WHITE)\n if (BUTTON_JOYSTICK_X <= mouse[0] <= BUTTON_JOYSTICK_X + BUTTON_WIDTH and BUTTON_JOYSTICK_Y <= mouse[1] <= BUTTON_JOYSTICK_Y + BUTTON_HEIGHT) or use_joystick:\n pygame.draw.rect(SCREEN, BUTTON_LIGHT, [BUTTON_JOYSTICK_X, BUTTON_JOYSTICK_Y, BUTTON_WIDTH, BUTTON_HEIGHT])\n else:\n pygame.draw.rect(SCREEN, BUTTON_DARK, [BUTTON_JOYSTICK_X, BUTTON_JOYSTICK_Y, BUTTON_WIDTH, BUTTON_HEIGHT])\n SCREEN.blit(text, (BUTTON_JOYSTICK_X+20, BUTTON_JOYSTICK_Y+25))\n\ndef drawMessageBox():\n # font settings\n FONT = pygame.font.SysFont('Corbel', 75)\n\n # get mouse position\n mouse = pygame.mouse.get_pos()\n\n if robot_clicked:\n # box\n BOX_X, BOX_Y = robot_pos[robot_clicked_id] + map_offset + np.array([25, -150])\n BOX_WIDTH, BOX_HEIGHT = 350, 150\n BOX_COLOR = (255, 255, 255)\n pygame.draw.rect(SCREEN, BOX_COLOR, [BOX_X, BOX_Y, BOX_WIDTH, BOX_HEIGHT])\n\n # button: view image\n text = FONT.render('View Image', True, WHITE)\n BUTTON_IMAGE_X, BUTTON_IMAGE_Y = robot_pos[robot_clicked_id] + map_offset + np.array([50, -125])\n if BUTTON_IMAGE_X <= mouse[0] <= BUTTON_IMAGE_X + BUTTON_WIDTH and BUTTON_IMAGE_Y <= mouse[1] <= BUTTON_IMAGE_Y + BUTTON_HEIGHT:\n pygame.draw.rect(SCREEN, BUTTON_LIGHT, [BUTTON_IMAGE_X, BUTTON_IMAGE_Y, BUTTON_WIDTH, BUTTON_HEIGHT])\n else:\n pygame.draw.rect(SCREEN, BUTTON_DARK, [BUTTON_IMAGE_X, BUTTON_IMAGE_Y, BUTTON_WIDTH, BUTTON_HEIGHT])\n SCREEN.blit(text, (BUTTON_IMAGE_X+10, BUTTON_IMAGE_Y+25))\n\n if box_clicked:\n # box\n box_center = np.mean(bounding_box[box_clicked_id], axis=0)\n BOX_X, BOX_Y = box_center + map_offset + np.array([25, -150])\n BOX_WIDTH, BOX_HEIGHT = 350, 150\n BOX_COLOR = (255, 255, 255)\n pygame.draw.rect(SCREEN, BOX_COLOR, [BOX_X, BOX_Y, BOX_WIDTH, BOX_HEIGHT])\n\n # button: get id\n text = FONT.render('Get ID', True, WHITE)\n BUTTON_ID_X, BUTTON_ID_Y = box_center + map_offset + np.array([50, -125])\n if BUTTON_ID_X <= mouse[0] <= BUTTON_ID_X + BUTTON_WIDTH and BUTTON_ID_Y <= mouse[1] <= BUTTON_ID_Y + BUTTON_HEIGHT:\n pygame.draw.rect(SCREEN, BUTTON_LIGHT, [BUTTON_ID_X, BUTTON_ID_Y, BUTTON_WIDTH, BUTTON_HEIGHT])\n else:\n pygame.draw.rect(SCREEN, BUTTON_DARK, [BUTTON_ID_X, BUTTON_ID_Y, BUTTON_WIDTH, BUTTON_HEIGHT])\n SCREEN.blit(text, (BUTTON_ID_X+75, BUTTON_ID_Y+25))\n\n\ndef drawMaps():\n WINDOW_WIDTH, WINDOW_HEIGHT = pygame.display.get_surface().get_size()\n MAP_WIDTH, MAP_HEIGHT = DISPLAY_MAP.get_size()\n map_pos = np.array([WINDOW_WIDTH//2 - MAP_WIDTH//2, WINDOW_HEIGHT//2 - MAP_HEIGHT//2]) + map_offset\n SCREEN.blit(DISPLAY_MAP, map_pos)\n\n\nif __name__ == \"__main__\":\n pygame.init()\n SCREEN = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))#, pygame.RESIZABLE)\n pygame.display.set_caption('5G Monitor')\n icon = pygame.image.load('icon.png')\n pygame.display.set_icon(icon)\n CLOCK = pygame.time.Clock()\n SCREEN.fill(GREY)\n data_receiver = Receiver()\n # 5G server setup\n try:\n server = Server(cfg_server)\n except:\n pass\n # joystick setup\n try:\n device = evdev.list_devices()[0]\n evtdev = InputDevice(device)\n val = 25000 #[0,65535]\n evtdev.write(ecodes.EV_FF, ecodes.FF_AUTOCENTER, val)\n pygame.joystick.init()\n joystick_count = pygame.joystick.get_count()\n joystick = pygame.joystick.Joystick(0)\n joystick.init()\n except:\n pass\n\n cnt = 0\n while True:\n start_time = time.time()\n cnt += 1\n SCREEN.fill(GREY)\n drawMaps()\n drawGoal()\n drawRobots()\n drawBoundingBox()\n drawPath()\n drawButton()\n drawMessageBox()\n\n for event in pygame.event.get():\n mods = pygame.key.get_mods()\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.MOUSEBUTTONDOWN and mods & pygame.KMOD_CTRL:\n if event.button == 1: \n map_draging = True\n start_pos = event.pos\n elif event.type == pygame.MOUSEBUTTONDOWN:\n # get mouse position\n mouse = pygame.mouse.get_pos()\n # button: set goal\n if BUTTON_GOAL_X <= mouse[0] <= BUTTON_GOAL_X + BUTTON_WIDTH and BUTTON_GOAL_Y <= mouse[1] <= BUTTON_GOAL_Y + BUTTON_HEIGHT:\n goal_setting = True\n elif goal_setting:\n goal_setting = False\n robot_goal = mouse - map_offset\n # button: laser map\n elif BUTTON_LASER_X <= mouse[0] <= BUTTON_LASER_X + BUTTON_WIDTH and BUTTON_LASER_Y <= mouse[1] <= BUTTON_LASER_Y + BUTTON_HEIGHT:\n DISPLAY_MAP = LASER_MAP\n # button: satellite map\n elif BUTTON_SATELLITE_X <= mouse[0] <= BUTTON_SATELLITE_X + BUTTON_WIDTH and BUTTON_SATELLITE_Y <= mouse[1] <= BUTTON_SATELLITE_Y + BUTTON_HEIGHT:\n DISPLAY_MAP = SATELLITE_MAP\n # button: joystick mode\n elif BUTTON_JOYSTICK_X <= mouse[0] <= BUTTON_JOYSTICK_X + BUTTON_WIDTH and BUTTON_JOYSTICK_Y <= mouse[1] <= BUTTON_JOYSTICK_Y + BUTTON_HEIGHT:\n use_joystick = not use_joystick\n # button: robot\n if robot_clicked:\n # button: view image\n BUTTON_IMAGE_X, BUTTON_IMAGE_Y = robot_pos[robot_clicked_id] + map_offset + np.array([50, -125])\n if BUTTON_IMAGE_X <= mouse[0] <= BUTTON_IMAGE_X + BUTTON_WIDTH and BUTTON_IMAGE_Y <= mouse[1] <= BUTTON_IMAGE_Y + BUTTON_HEIGHT:\n view_image = True\n print('show image')\n robot_clicked = False\n for idx, pos in enumerate(robot_pos):\n if math.hypot(mouse[0] - (pos + map_offset)[0], mouse[1] - (pos + map_offset)[1]) <= ROBOT_SIZE:\n print('click robot {}'.format(idx))\n robot_clicked = True\n robot_clicked_id = idx\n break\n # button: bounding box\n if box_clicked:\n # button: get id\n box_center = np.mean(bounding_box[box_clicked_id], axis=0)\n BUTTON_ID_X, BUTTON_ID_Y = box_center + map_offset + np.array([50, -125])\n if BUTTON_ID_X <= mouse[0] <= BUTTON_ID_X + BUTTON_WIDTH and BUTTON_ID_Y <= mouse[1] <= BUTTON_ID_Y + BUTTON_HEIGHT:\n print('get id')\n box_clicked = False\n bounding_box_copy = bounding_box.copy()\n for idx, box in bounding_box_copy.items():\n p1 = Point(mouse)\n p2 = Polygon(box + map_offset)\n if p2.contains(p1):\n print('click box {}'.format(idx))\n box_clicked = True\n box_clicked_id = idx\n break\n elif event.type == pygame.MOUSEBUTTONUP and mods & pygame.KMOD_CTRL:\n if event.button == 1: \n map_draging = False\n elif event.type == pygame.MOUSEMOTION and mods & pygame.KMOD_CTRL:\n if map_draging:\n end_pos = event.pos\n map_offset = map_offset + end_pos - start_pos\n start_pos = end_pos\n elif event.type == pygame.JOYBUTTONDOWN:\n print(\"Joystick button pressed.\")\n elif event.type == pygame.JOYBUTTONUP:\n print(\"Joystick button released.\")\n\n # send goal\n if robot_goal is not None:\n if cnt % 10 == 0: \n sendGoal(screen2pos(*robot_goal))\n \n # view image\n if view_image and robot_img is not None:\n cv2.imshow('Robot Image', robot_img)\n if cv2.waitKey(25) & 0xFF == ord('q'):\n view_image = False\n cv2.destroyAllWindows()\n\n # parse joystick\n if use_joystick:\n try:\n steer, throttle, brake = parse_vehicle_wheel(joystick, Clock)\n print(steer, throttle, brake)\n except:\n pass\n\n pygame.display.update()\n CLOCK.tick(20)\n end_time = time.time()\n # print('frequency', 1/(end_time-start_time))\n","repo_name":"5G-Swarm/ROS-Comm","sub_path":"src/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":20325,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"76"} +{"seq_id":"31224505143","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.homepage, name='homepage'),\n path('chatbox/', views.chatbot, name='chatbot'),\n path('login/',views.login, name='login'),\n path('pronouncequiz/',views.pronouncequiz, name='pronounce'),\n path('newfeed/', views.newsfeed, name='newsfeed'),\n\n] ","repo_name":"Nimosteve88/HackPackBGNHachathon","sub_path":"conversationAI/conversationAIProject/members/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6340488054","text":"# File:\t\tdownload_tedtalk.py\r\n# Author:\tE. Partridge\r\n# Date:\t\t8 August 2012\r\n# Description:\r\n#\t\t\tThis script parses the TED Talk audio feed and proceeds to\r\n#\t\t\tdownload all audio files into the same directory that\r\n#\t\t\tthis script is located in. Files are prepended with the publication\r\n#\t\t\tdate for convenience.\r\n#\r\n# Note: \tThis has only been tested on Windows 7 64-bit, with Python 2.7.2.5\r\n# Note2: \tTED Talk audio files contain ID3v2.4 tags, which are not supported\r\n# \t\t \tnatively by Windows. I used foobar2000 to convert the tags to ID3v2.3,\r\n#\t\t\twhich Windows does support. To do this, open the MP3 files in\r\n#\t\t\tfoobar2000, right click and select Tagging > MP3 Tag Types... Check\r\n#\t\t\t\"Override ID3v2 revision:\" and select the ID3v2.3 radio button.\r\n#\t\t\tAfter that, I was able to view metadata in Windows Explorer and\r\n#\t\t\tWindows Media Player.\r\nimport urllib\r\nimport feedparser\r\nimport time\r\n\r\ntedtalk_rss_url = 'http://feeds.feedburner.com/TEDTalks_audio'\r\ntedtalk_feed = feedparser.parse(tedtalk_rss_url)\r\n\r\ndef GetFeedContent(entry):\r\n\tcontent_url = entry.enclosures[0].href\r\n\tfile_name = content_url.split('/')[-1]\r\n\tfile_date = time.strptime(entry.published[5:16], '%d %b %Y')\r\n\tdate_str = '{:04}-{:02}-{:02}'.format(file_date.tm_year, file_date.tm_mon, file_date.tm_mday)\r\n\tfile_name = date_str + ' ' + file_name\r\n\ttry:\r\n\t\twith open(file_name) as f:\r\n\t\t\tprint('File exists: ' + file_name)\r\n\texcept IOError as e:\r\n\t\tprint('Downloading: ' + file_name)\r\n\t\turllib.urlretrieve(content_url, file_name)\r\n\treturn\r\n\r\nfor entry in tedtalk_feed.entries:\r\n\tGetFeedContent(entry)","repo_name":"mrtumnus/scrape-tedtalks","sub_path":"download_tedtalk.py","file_name":"download_tedtalk.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38309540050","text":"from drf_yasg import openapi\nfrom .swagger import unauthorized_401\n\nlogout_schema = openapi.Schema(\n title=\"logout\",\n type=openapi.TYPE_OBJECT,\n properties={\n \"refresh_token\": openapi.Schema(type= openapi.TYPE_STRING, description=\"refresh token to be blacklisted\",\n example=\"jknJj93nc4JN8ssn38JHBfjJ7dnHJ4hUJ4h\")\n }\n)\nresponse_login = {\n \"401\": openapi.Response(\n description=\"invalid email or password\",\n examples={\n \"application/json\": {\n \"detail\": \"No active account found with the given credentials\"\n }\n }),\n \"200\": openapi.Response(\n description=\"Login was successful\",\n examples={\n \"application/json\":\n {\n \"refresh\": \"eyJhbI6IkpXVCJ5_nu_A8IZIeZ0x1WJq8X9AISLA4\",\n \"access\": \"eyOjE2NjgzpczASEhgbG6uuhmmkjj8SbFeaumOxyuk\"\n }\n }),\n}\nresponse_refresh_token = {\n \"401\": openapi.Response(\n description=\"invalid token or invalid token type\",\n examples={\n \"application/json\": {\n \"detail\": \"Token is invalid or expired\",\n \"code\": \"token_not_valid\"\n}\n }),\n \"201\": openapi.Response(\n description=\"Get new access token\",\n examples={\n \"application/json\":\n {\n \"access\": \"eyOjE2NjgzpczASEhgbG6uuhmmkjj8SbFeaumOxyuk\"\n }\n }),\n}\nlogout_response = {\n \"200\": openapi.Response(\n description=\"successfully logged out\",\n examples={\n \"application/json\":\n {\n \"detail\": \"Successfully logged out\"\n }\n }),\n #\"401\": unauthorized_401,\n \"400\": openapi.Response(\n description=\"invalid refresh token in request\",\n examples={\n \"application/json\":\n {\n \"detail\": \"Token is invalid or expired\"\n }\n })\n}\nregister_response = {\n \"201\": openapi.Response(\n description=\"successfully registered\",\n examples={\n \"application/json\":\n {\n \"id\": 5,\n \"email\": \"test2@email.com\",\n \"first_name\": \"Mary\",\n \"last_name\": \"Watson\",\n \"password\": \"pbkdf2_sha256$390000$IW8Bnc2qEsYnRJOFC9cCJ6$gF9yJi8cfkTq4WKdB8BN32w8tW9tmBUEf+tw8Bt/9Pk=\"\n }\n }\n ),\n \"400\": openapi.Response(\n description=\"invalid user data\",\n examples={\n \"application/json\":\n {\n \"email\": [\n \"user with this email already exists.\"\n ],\n \"password\": [\n \"Password should contain at least one uppercase letter, one lowercase letter and be at least 8 symbols long\"\n ]\n }\n })\n}","repo_name":"khrystyna-yaryna-kolba/kkolba","sub_path":"programming/programming_and_practice_8(authentification_roles)/rest_uni/rest_app/swagger/authentification_swagger.py","file_name":"authentification_swagger.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"31723521592","text":"import sys\n\nn = str(sys.stdin.readline().rstrip())\n\nif '0' not in n:\n print(-1)\nelse:\n sum = 0\n for num in n:\n sum += int(num)\n if (sum % 3) == 0:\n seperated = []\n for i in n:\n seperated.append(int(i))\n seperated.sort(reverse=True)\n print(int(''.join(str(e) for e in seperated)))\n else:\n print(-1)\n \n ","repo_name":"moonkey48/altgorithm_python","sub_path":"boj_10610_greedy.py","file_name":"boj_10610_greedy.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"20749483813","text":"#!/usr/bin/python3\nimport numpy as np\n\nwith open('14_input.txt') as f:\n lines = [line.rstrip() for line in f]\n\nDO_PART1=False\n\ndef printCave(cave,extentX,extentY,curSand):\n for y in range(extentY[0],extentY[1]+1):\n str=\"\"\n for x in range(extentX[0],extentX[1]+1):\n if cave[x][y]==1:\n str+=\"#\"\n elif cave[x][y]==2 or (x==curSand[0] and y==curSand[1]):\n str+=\"o\"\n elif x==500 and y==0:\n str+=\"+\"\n else:\n str+=\".\"\n print(str)\n\ndef runSandFlow(cave):\n settledCount=0\n withinCave=True\n while withinCave:\n sandPos=[500,0]\n settled=False\n while not settled and withinCave:\n if cave[sandPos[0]][sandPos[1]+1]==0: # air below - so move down\n sandPos=[sandPos[0],sandPos[1]+1]\n elif cave[sandPos[0]][sandPos[1]+1]>0: # rock or sand below\n if cave[sandPos[0]-1][sandPos[1]+1]>0:\n if cave[sandPos[0]+1][sandPos[1]+1]>0:\n settled=True\n settledCount+=1\n cave[sandPos[0],sandPos[1]]=\"2\"\n else:\n sandPos=[sandPos[0]+1,sandPos[1]+1] \n else:\n sandPos=[sandPos[0]-1,sandPos[1]+1]\n if sandPos==[500,0]: # source blocked!\n return settledCount\n withinCave = sandPos[1]<=extentY[1] and sandPos[0]>=extentX[0] and sandPos[0]<=extentX[1]\n return settledCount\n\n#parse input\nnumCols=1000\nnumRows=200\nextentX=[numCols,0]\nextentY=[numRows,0]\ncave=np.zeros(shape=(numCols, numRows), dtype=np.uint8)\nfor line in lines:\n nodes=line.split(\" -> \")\n for nodeID in range(len(nodes)-1):\n start=[int(x) for x in nodes[nodeID].split(\",\")]\n end=[int(x) for x in nodes[nodeID+1].split(\",\")]\n\n extentX=[min(extentX[0],start[0],end[0]),max(extentX[1],start[0],end[0])]\n extentY=[min(extentY[0],start[1],end[1]),max(extentY[1],start[1],end[1])]\n \n if start[0]==end[0]:\n for y in range(min(start[1],end[1]),max(start[1],end[1])+1):\n cave[start[0]][y]=1\n else:\n for x in range(min(start[0],end[0]),max(start[0],end[0])+1):\n cave[x][start[1]]=1\n\n\nextentY[0]=0\n\n\nif DO_PART1:\n #part 1\n printCave(cave,extentX,extentY,[0,0])\n settledCount=runSandFlow(cave) \n print(\"Part 1 answer:\",settledCount)\nelse:\n #part 2\n for x in range(numCols):\n cave[x][extentY[1]+2]=1\n extentY[1]=extentY[1]+2\n extentX[0]=0\n extentX[1]=numCols-1\n printCave(cave,extentX,extentY,[0,0])\n settledCount=runSandFlow(cave)\n printCave(cave,extentX,extentY,[0,0])\n print(\"Part 2 answer:\",settledCount)\n","repo_name":"stuwrigley/advent-of-code-2022","sub_path":"14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3271668656","text":"#coding:utf-8\n\n\n# 2、小明爱跑步,爱吃东西。\n# 1)小明体重75.0公斤\n# 2)每次跑步会减肥0.5公斤\n# 3)每次吃东西体重会增加1公斤\n# 4)小美的体重是45.0公斤\n\n\n'''\n1.需要定义的类:人\n2.类的属性:名字,体重\n3.类的方法:跑步;吃东西\n4.跑步方法:体重减少0.5公斤\n5.吃东西:体重增加1公斤\n\n\n'''\n\n#定义\nclass Person:\n def __init__(self,name,weight):\n self.name = name\n self.weight = weight\n\n def __str__(self):\n return ('%s的体重是%0.1f公斤'%(self.name,self.weight))\n\n def run(self):\n self.weight -= 0.5\n print('%s跑步减肥0.5公斤'%(self.name))\n\n def eat(self):\n self.weight += 1\n print('%s吃东西体重增加1公斤' % (self.name))\n\n#使用\np = Person('小明',75)\nprint(p)\np.run()\nprint(p)\n#\np2 = Person('小美',45)\nprint(p2)\np2.eat()\nprint(p2)\n","repo_name":"rainshine1190/VIPtest-base","sub_path":"日常作业/接口第一天/面向对象练习题2.py","file_name":"面向对象练习题2.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"zh","doc_type":"code","stars":17,"dataset":"github-code","pt":"76"} +{"seq_id":"36353267278","text":"from flask import Flask, render_template, request, redirect, session\n\napp = Flask(__name__)\napp.secret_key = 'keep it secret, keep it safe' # set a secret key for security purposes\n\n@app.route('/', methods=['GET'])\ndef counter():\n if 'count' not in session: session['count'] = 0 \n else: session['count'] += 1\n\n if 'visits' not in session: session['visits'] = 1\n else: session['visits'] += 1\n \n return render_template(\"index.html\")\n\n@app.route('/add2', methods=['POST'])\ndef add2():\n\n session['count'] += 1\n print(session['count'])\n # Never render a template on a POST request.\n # Instead we will redirect to our index route.\n return redirect('/')\n\n@app.route('/destroy_session', methods=['GET'])\ndef destroy_session():\n\n session.clear()\n return redirect('/')\n\n\n@app.route('/reset', methods=['POST'])\ndef reset():\n\n session['count'] = 0\n session['visits'] = 0\n print(session['count'])\n # Never render a template on a POST request.\n # Instead we will redirect to our index route.\n return redirect('/')\n\n@app.route('/increment_by', methods=['POST'])\ndef increment_by():\n \n session['count'] = session['count'] + int(request.form['increment']) - 1\n \n return redirect('/')\n\nif __name__ == (\"__main__\"):\n app.run(debug=True )","repo_name":"LukeJech/Jech_Luke_Counter","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"761522201","text":"\"\"\"\nПрограмма, моделирующая последовательность\nантагонистических игр с заданной матрицей платежей\n\nАвтор: Афанасьев И.Е.\nДата написания: 20.09.2020\n\"\"\"\n\niterations = 10000 # Количество ходов\nstart_money = 10000 # Начальное количество денег у каждого игрока\nmatrix1 = [ [-4, 5], [8, -7] ] # Матрица платежей\n\n# Инвертируем матрицу (Платежи для второго игрока)\nq = len(matrix1)\nmatrix2 = []\nfor i in range(q):\n matrix2.append([])\nfor i in matrix1:\n for pos, j in enumerate(i):\n matrix2[pos % q].append(- j)\n\n# Класс игрок с матрицей платежей, текущим количеством\n# денег, коэффициентами для принятия решения на основе\n# стратегии противника и последним ходом\nclass gamer:\n def __init__(self, matrice, money):\n self.m = matrice\n self.money = money\n self.koef = [0] * len(matrice[0])\n self.last = 0\n def strikeback(self, enemy_strat):\n self.koef[enemy_strat] += 1\n self.money += self.m[self.last][enemy_strat]\n return self.money\n # Вычисление оптимального хода на основе\n # предыдущих ходов противника\n def math(self):\n M = []\n for i in self.m:\n process = 0\n for j in range(len(self.koef)):\n process += self.koef[j] * i[j]\n M.append(process)\n self.last = M.index(max(M))\n return self.last\n\n# Импортируем графическую библиотеку\nimport matplotlib.pyplot as plt\n# Создаём два объекта класса \"игрок\"\nplayer1 = gamer(matrix1, start_money)\nplayer2 = gamer(matrix2, start_money)\n\n# Инициализируем фигуру, на которой будем рисовать\nfig = plt.figure()\nplt.ion()\ncolor = ['red', 'green', 'blue', 'brown']\ny = [] # Ось y\nx = [] # Ось x\nfor i in range(len(color)):\n y.append([])\n x.append([])\n\n# Собственно, игра\nfor i in range(iterations):\n str1 = player1.math()\n str2 = player2.math()\n \n y[str1].append(player1.strikeback(str2))\n x[str1].append(i)\n \n y[str2 + 2].append(player2.strikeback(str1))\n x[str2 + 2].append(i)\n\nfor i in range(len(x)):\n plt.plot(x[i], y[i], c = color[i])\nplt.show() # Отображение графика\nprint('Длительность тестовой серии игры: ', iterations)\nprint(\n 'Стратегии первого игрока: %s\\nДеньги первого игрока: %d\\n' % \n (player2.koef, player1.money)\n )\nprint(\n 'Стратегии второго игрока: %s\\nДеньги второго игрока: %d' % \n (player1.koef, player2.money)\n )","repo_name":"Naughtyk/Python","sub_path":"project11 - theory of games/Лабы Ване/labrab.py","file_name":"labrab.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8620515908","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.uic import loadUi\n\nfrom App import excel_reader, database\nfrom App.logs import logger\nimport os\n\nBASE_DIR = os.getenv(\"BASE_DIR\")\n\n\nclass StudentsFrame(QFrame):\n def __init__(self):\n super().__init__() \n loadUi(os.path.join(BASE_DIR, \"Forms\", \"ogrenciler_frame.ui\"), self)\n \n self.ogrencilerList = database.get_all_students()\n\n self.set_signs() #SIGNALS\n self.set_ts() #TABLE SETTINGS\n\n def set_signs(self):\n \"\"\"\n Sets the signals, buttons or etc. which has relationship between them.\n \"\"\"\n self.searchByCombo.currentIndexChanged.connect(self.change_search_by)\n self.searchButton.pressed.connect(lambda: self.draw_table(searchBy = self.searchByCombo.currentText()))\n self.importButton.clicked.connect(self.import_dialog)\n self.addButton.clicked.connect(self.add_dialog)\n self.editButton.clicked.connect(self.edit_dialog)\n self.deleteButton.clicked.connect(lambda: self.remove_student(removeBy = True))\n self.deleteAllButton.clicked.connect(lambda: self.remove_student(all = True))\n\n self.table.itemSelectionChanged.connect(self.select_row)\n self.table.selectionModel().selectionChanged.connect(self.on_selection_changed) #KUTUCUK SECILINCE UYARI OLUSTURMA\n\n self.table.horizontalHeader().sectionClicked.connect(self.sort)\n self.resetOrderButton.clicked.connect(self.draw_table)\n \n def sort(self, sectionIndex):\n if sectionIndex == 0:\n self.draw_table(order=True, sectionIndex=sectionIndex)\n\n elif sectionIndex == 1:\n self.draw_table(order=True, sectionIndex=sectionIndex)\n\n elif sectionIndex == 2:\n self.draw_table(order=True, sectionIndex=sectionIndex)\n\n elif sectionIndex == 3:\n self.draw_table(order=True, sectionIndex=sectionIndex)\n\n elif sectionIndex == 4:\n self.draw_table(order=True, sectionIndex=sectionIndex)\n \n def draw_table(self, searchBy = False, order = False, sectionIndex = 0):\n BY_NO = \"Numaraya göre\"\n BY_FULLNAME = \"Tam ada göre\"\n BY_CLASS = \"Sınıfa göre\"\n\n if order:\n self.ogrencilerList = sorted(self.ogrencilerList, key=lambda ogrenci: ogrenci[sectionIndex])\n self.set_table_items()\n return\n \n searchContent = self.searchIn.text().strip().upper()\n if searchContent == \"\":\n self.ogrencilerList = database.get_all_students()\n\n elif searchBy == BY_NO:\n self.ogrencilerList = database.get_all_students(number = searchContent)\n\n elif searchBy == BY_FULLNAME:\n self.ogrencilerList = database.get_all_students(fullname = searchContent)\n\n elif searchBy == BY_CLASS:\n self.ogrencilerList = database.get_all_students(grade = searchContent)\n\n self.set_table_items()\n\n def set_table_items(self):\n self.table.setRowCount(len(self.ogrencilerList))\n for rowInd, student in enumerate(self.ogrencilerList):\n for columnInd, data_raw in enumerate(student):\n self.table.setItem(rowInd, columnInd, QTableWidgetItem(str(data_raw)))\n\n self.table.show()\n\n def change_search_by(self):\n self.searchBy = self.searchByCombo.currentText()\n\n def select_row(self):\n items = self.table.selectedItems()\n rowIndexes = set()\n for item in items:\n rowIndexes.add(item.row())\n\n rowIndex = min(rowIndexes)\n self.table.selectRow(rowIndex) \n\n def import_dialog(self):\n dialog = QFileDialog(caption=\"Dosya seçiniz\", filter=\"(*.xls)\")\n if dialog.exec_() == dialog.Accepted:\n filePath = str(dialog.selectedFiles()[0])\n else:\n return\n \n ogrencilerList = excel_reader.get_workbook_content(filePath)\n database.add_multiple_students(students = ogrencilerList)\n \n self.ogrencilerList = database.get_all_students()\n self.draw_table()\n\n def add_dialog(self):\n dialog = EkleDuzenleDialog()\n if dialog.toAdd:\n database.add_one_student(dialog.student)\n\n self.ogrencilerList = database.get_all_students()\n self.draw_table()\n\n def edit_dialog(self):\n #ogrenci = get_ogrenci_by_satir_no() -> Bu fonksiyon ogrenciler tarafindan tasarlanacaktir.\n dialog = EkleDuzenleDialog(ogrenci = [2949, \"Yusuf\", \"Kiris\", \"9/A\"])\n if dialog.toUpdate:\n database.update_student(dialog.student)\n \n self.ogrencilerList = database.get_all_students()\n self.draw_table()\n\n def remove_student(self, removeBy = False, all = False):\n if all:\n onaydialog = OgrencilerSilmeOnayDialog()\n if onaydialog.result:\n database.remove_all_students()\n\n elif removeBy:\n rowIndexes = [item.row() for item in self.table.selectedItems()]\n for rowIndex in rowIndexes:\n lastIndex = rowIndex\n if rowIndex != lastIndex:\n pass\n # TÜM SEÇİLİ SATIRLARIN UZUNLUĞUNU KONTROL ET. EĞER 1 TANE SATIR SEÇİLİ İSE ONUN NUMARASINA BAK VE SİL\n #print(self.table.itemAt(QPoint(0, self.table.selectedItems()[0].row())).text())\n database.remove_one_student(number = self.table.itemAt(QPoint(0, self.table.selectedItems()[0].row())).text())\n\n self.ogrencilerList = database.get_all_students()\n self.draw_table()\n\n def add_student(self):\n no = int(self.noIn.text().strip()) #\n name = self.nameIn.text().strip().upper() #\n surname = self.surnameIn.text().strip().upper() #\n grade = self.gradeCombo.currentText() # INPUTS\n classs = self.classCombo.currentText() #\n gc = grade + \"/\" + classs #\n if \"Sınıf\" == grade or \"Şube\" == classs :\n return\n \n student = [no, name, surname, gc] #\n\n database.add_one_student(student = student) # ADD TO DATABASE\n\n self.draw_table() # REDRAW THE TABLE\n \n [input.clear() for input in [self.noIn, self.nameIn, self.surnameIn]] #CLEAR THE LINE_EDITS\n [combo.setCurrentIndex(0) for combo in [self.gradeCombo, self.classCombo]] #RESET THE COMBO_BOXES\n\n def on_selection_changed(self, selected: QItemSelection, deselected: QItemSelection):\n #Bu fonksiyon öğrenciler tarafından tasarlanacak.\n pass\n \n def set_ts(self):\n \"\"\"\n Set the 'table settings'.\n \"\"\"\n self.table.setColumnCount(5)\n columnHeaders = [\"Numara\", \"Ad\", \"Soyad\", \"Cinsiyet\", \"Sınıf\"]\n self.table.setHorizontalHeaderLabels(columnHeaders)\n self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.table.verticalHeader().setSectionResizeMode(QHeaderView.Fixed)\n self.table.setEditTriggers(QTableWidget.NoEditTriggers)\n self.draw_table() \n \nclass EkleDuzenleDialog(QDialog):\n def __init__(self, ogrenci = False):\n super().__init__()\n loadUi(os.path.join(BASE_DIR, \"Forms\", \"ekle_duzenle_dialog.ui\"), self)\n self.lineEditItems = [self.noIn, self.nameIn, self.surnameIn]\n self.comboBoxItems = [self.gradeCombo, self.classCombo]\n self.student = ogrenci\n self.toUpdate = False\n self.toAdd = False\n \n self.set_signals()\n \n def set_signals(self):\n # Empty style sheets meant to make it normal\n self.noIn.textChanged.connect(lambda: self.noIn.setStyleSheet(\"\"))\n self.nameIn.textChanged.connect(lambda: self.nameIn.setStyleSheet(\"\"))\n self.surnameIn.textChanged.connect(lambda: self.surnameIn.setStyleSheet(\"\"))\n self.saveButton.clicked.connect(self.check)\n self.exitButton.clicked.connect(self.close)\n\n if not self.student:\n self.setWindowTitle(\"Öğrenci ekle\")\n else:\n self.set_values()\n self.setWindowTitle(\"Öğrenci bilgilerini düzenle\")\n\n self.set_ws()\n \n def set_values(self):\n [lineEdit.setText(str(data)) for lineEdit, data in zip(self.lineEditItems, self.student)]\n grade, classs = self.student[-1].split(\"/\")\n self.gradeCombo.setCurrentText(grade)\n self.classCombo.setCurrentText(classs)\n\n def check(self):\n no = self.noIn.text().strip()\n name = self.nameIn.text().strip().upper()\n surname = self.surnameIn.text().strip().upper()\n grade = self.gradeCombo.currentText() + \"/\" + self.classCombo.currentText()\n sex = self.sexCombo.currentText()\n student = [no, name, surname, sex, grade]\n \n if not no.isnumeric():\n self.noIn.setStyleSheet(\"background-color: red;\")\n return\n elif not name.isalpha():\n self.nameIn.setStyleSheet(\"background-color: red;\")\n return\n elif not surname.isalpha():\n self.surnameIn.setStyleSheet(\"background-color: red;\")\n return\n\n if all(map(len, student)):\n if not self.student:\n database.add_one_student(student=student)\n else:\n database.update_student(student=student)\n self.close()\n\n def set_ws(self):\n \"\"\"\n Adjust the window settings\n \"\"\"\n self.exec_()\n\n\nclass OgrencilerSilmeOnayDialog(QDialog):\n def __init__(self):\n super().__init__()\n loadUi(os.path.join(BASE_DIR, \"Forms\", \"ogrenciler_silme_onay_dialog.ui\"), self)\n self.checkk()\n\n self.okayButton.clicked.connect(self.closee)\n self.checkBox.stateChanged.connect(self.checkk)\n \n self.result = False\n self.set_ws()\n\n def checkk(self):\n if self.checkBox.isChecked():\n self.okayButton.setEnabled(True)\n else:\n self.okayButton.setEnabled(False)\n\n def closee(self):\n self.result = True\n self.close()\n\n def set_ws(self):\n \"\"\"\n Adjust the window settings\n \"\"\"\n self.exec_()\n","repo_name":"yoseidonn/Kelebek","sub_path":"App/Frames/students_frame.py","file_name":"students_frame.py","file_ext":"py","file_size_in_byte":10287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3254363966","text":"words = lambda t : list(map(t, input().split()))\n\nn = int(input())\na = words(int)\nb = words(int)\n\ndef getNext(i):\n if i == n-1:\n return 0\n else:\n return i+1\n\ndef getPrev(i):\n if i == 0:\n return n-1\n else:\n return i-1\n\nfrom collections import deque\nq = deque()\n\ndef verify(i):\n if b[i] != a[i] and b[i] - (b[getNext(i)] + b[getPrev(i)]) >= a[i]:\n return True\n else:\n return False\n\nfor i in range(len(b)):\n if b[i] >= a[i] and verify(i):\n q.append(i)\n\nans = 0\nsucceed = True\nwhile not len(q) == 0:\n i = q.popleft()\n ni = getNext(i)\n pi = getPrev(i)\n #print(i, b)\n d = b[ni] + b[pi]\n if b[i] % d == a[i] % d:\n ans += b[i] // d - (a[i] // d)\n b[i] = a[i]\n else:\n ans += b[i] // d\n b[i] %= d\n if b[i] < a[i]:\n succeed = False\n break\n if verify(ni):\n q.append(ni)\n if verify(pi):\n q.append(pi)\n\nfor i in range(len(b)):\n if a[i] != b[i]:\n succeed = False\n break\n\nif succeed:\n print(ans)\nelse:\n print(-1)\n","repo_name":"ugnom/programming_competition_archive","sub_path":"agc/agc036/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29706990059","text":"n=input().split(\" \")\nlst=[]\nfor i in range(int(n[0])):\n lst.append(input().split(\" \"))\n\nsets=int(n[0])*int(n[1])\n# di={\"0\":[],\"1\":[]}\n# for i in range(len(lst)):\n# di[\"0\"].append([])\n# di[\"1\"].append([])\n# for j in range(len(lst[i])):\n# di[lst[i][j]][-1].append(j)\n\nfor row in lst:\n for i in range(len(row)):\n for j in range(i+2,len(row),2):\n if row[j]==row[i]:\n sets+=1\n else:\n break\n\nfor i in range(len(lst)):\n for j in range(len(lst[i])):\n for k in range(i+2,len(lst),2):\n if lst[i][j]==lst[k][j]:\n print(\"here: \",(i,j),\" \",(k,j))\n sets+=1\n else:\n break\nprint(sets)\n\n\n","repo_name":"Selamawit926/Competitive_Programming","sub_path":"CampQuestions/Rectangles.py","file_name":"Rectangles.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"73594337845","text":"#!/usr/bin/env pythOn\n# -*- coding: utf-8 -*-\n'''\nname: xss特殊字符/函数/标签 FUZZ检测\nreferer: unknown\nauthor: Lucifer\ndescriptiOn: FUZZ反射型跨站所需要的负载有无过滤情况。\n'''\nimport re\nimport sys\nimport requests\nimport warnings\nfrom termcolor import cprint\n\nclass xss_characterfuzz_check_BaseVerify:\n def __init__(self, url):\n self.url = url\n\n def run(self):\n key = \"FUZZING\"\n headers = {\n \"User-Agent\":\"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50\"\n }\n start_md5 = \"c28c0db26d39331a\"\n end_md5 = \"15b86f2d013b2618\"\n character = [\",\", \".\", \"?\", \"<\", \">\", \"/\", \";\", \":\", \"'\", '\"', \"|\", \"\\\\\", \"[\", \"]\", \"{\", \"}\", \"=\", \"!\", \"@\", \"$\", \"%\", \"(\", \")\"\n ]\n drive_func = ['OnaBort ', 'OnacTivate ', 'OnafTerprint ', 'OnafTerupdate ', 'OnbeForeactivate ', \n 'OnbeForecopy ', 'OnbeForecut ', 'OnbeForedeactivate ', 'OnbeForeeditfocus ', 'OnbeForepaste ', \n 'OnbeForeprint ', 'OnbeForeunload ', 'OnbeForeupdate ', 'OnblUr ', 'OnboUnce ', 'OncEllchange ', \n 'OncHange ', 'OncLick ', 'OncOnTextmenu ', 'OncOnTrolselEct ', 'OnCopy ', 'OncUt ', 'OndaTaavailable ', \n 'OndatAsetchanged ', 'OndAtasetcomplete ', 'OndBlclick ', 'OndeacTivate ', 'OndrAg ', 'OndrAgend ', \n 'OndrAgenter ', 'OndrAgleave ', 'OndrAgover ', 'OndrAgstart ', 'OndRop ', 'OneRror ', 'OnerRorupdate ', \n 'OnfilTerchange ', 'OnfiNish ', 'OnfOcus ', 'OnfOcusin ', 'OnfOcusout ', 'OnhElp ', 'OnkeyDown ', 'OnkeyPress ', \n 'OnkeyUp ', 'OnlayoutcOmplete ', 'OnloAd ', 'OnlosecApture ', 'OnmoUsedown ', 'OnmoUseenter ', 'OnmoUseleave ', \n 'OnmoUsemove ', 'OnmoUseout ', 'OnmoUseover ', 'OnmoUseup ', 'OnmoUsewheel ', 'OnmOve ', 'OnmOveend ', \n 'OnmOvestart ', 'OnpAste ', 'OnpropertyChange ', 'OnreadyStatechange ', 'OnrEset ', 'OnrEsize ', \n 'OnrEsizeend ', 'OnreSizestart ', 'OnrOwenter ', 'OnrOwexit ', 'OnrowsDelete ', 'OnrowsiNserted ', 'OnScroll ', \n 'OnsElect ', 'OnsElectiOnchange ', 'OnsElectstart ', 'OnstArt ', 'OnsTop ', 'OnsuBmit ', 'OnuNload ' \n ]\n label_func = ['javAscRipt ', 'vbscRipt ', 'exprEssiOn ', 'appLet ', 'meTa ', 'xMl ', 'Blink ', 'lInk ', \n 'stYle ', 'scRipT ', 'eMbed ', 'oBject ', 'ifRame ', 'frAme ', 'fraMeset ', 'ilayEr ', 'lAyer ', \n 'bgsOund ', 'tiTle ', 'bAse ', 'iMg ', 'viDeo '\n ]\n window_func = ['alert ', 'confirm ', 'prompt']\n rawurl = self.url.replace(\"FUZZING\", start_md5)\n cprint(\">>执行xss测试..\", \"cyan\")\n req = requests.get(rawurl, headers=headers, timeout=6, verify=False)\n if start_md5 in req.text:\n cprint(\"[+]输入参数带入回显,可能存在XSS漏洞..\", \"red\")\n\n #执行character 过滤FUZZ\n cprint(\">>执行特殊字符FUZZ判断..\", \"cyan\")\n characterlist = list()\n characterlist2 = list()\n for char in character:\n char = char.strip()\n rawurl = self.url.replace(\"FUZZING\", start_md5 + char + end_md5) \n res = requests.get(rawurl, headers=headers, timeout=6, verify=False)\n if start_md5+char+end_md5 in str(res.text):\n characterlist.append(char+' ')\n else:\n characterlist2.append(char+' ')\n cprint(\"[+]未被过滤的特殊字符: \"+''.join(characterlist), \"green\")\n cprint(\"[-]被过滤的特殊字符: \"+''.join(characterlist2), \"red\")\n\n #执行事件驱动 过滤FUZZ\n\n cprint(\">>执行事件驱动FUZZ判断..\", \"cyan\")\n drive_funclist = list()\n drive_funclist2 = list()\n for drive in drive_func:\n drive = drive.strip()\n rawurl = self.url.replace(\"FUZZING\", start_md5 + drive + end_md5)\n res = requests.get(rawurl, headers=headers, timeout=6, verify=False)\n if start_md5+drive+end_md5 in str(res.text):\n drive_funclist.append(drive+' ')\n else:\n drive_funclist2.append(drive+' ')\n cprint(\"[+]未被过滤的事件驱动: \"+''.join(drive_funclist), \"green\")\n cprint(\"[-]被过滤的事件驱动: \"+''.join(drive_funclist2), \"red\")\n \n #执行标签 过滤FUZZ\n cprint(\">>执行标签FUZZ判断..\", \"cyan\")\n labellist = list()\n labellist2 = list()\n for label in label_func:\n label = label.strip()\n rawurl = self.url.replace(\"FUZZING\", start_md5 + label + end_md5)\n res = requests.get(rawurl, headers=headers, timeout=6, verify=False)\n if start_md5+label+end_md5 in str(res.text):\n labellist.append(label+' ')\n else:\n labellist2.append(label+' ')\n\n cprint(\"[+]未被过滤的标签: \"+''.join(labellist), \"green\")\n cprint(\"[-]被过滤的标签: \"+''.join(labellist2), \"red\")\n\n #执行标签 过滤FUZZ\n cprint(\">>执行弹窗函数FUZZ判断..\", \"cyan\")\n windowlist = list()\n windowlist2 = list()\n for window in window_func:\n window = window.strip()\n rawurl = self.url.replace(\"FUZZING\", start_md5 + window + end_md5)\n res = requests.get(rawurl, headers=headers, timeout=6, verify=False)\n if start_md5+window+end_md5 in str(res.text):\n windowlist.append(window+' ')\n else:\n windowlist2.append(window+' ')\n\n cprint(\"[+]未被过滤的弹窗函数: \"+''.join(windowlist), \"green\")\n cprint(\"[-]被过滤的弹窗函数: \"+''.join(windowlist2), \"red\")\n else:\n cprint(\"[-]不存在XSS\", \"green\")\n\nif __name__ == \"__main__\":\n warnings.filterwarnings(\"ignore\")\n if len(sys.argv) < 2:\n cprint(\"usage: python3 xss_characterfuzz_check.py http://test.com/test.php?id=FUZZING\", \"cyan\")\n cprint(\"[*]将需要测试XSS的参数替换为FUZZING即可\", \"cyan\")\n else:\n testVuln = xss_characterfuzz_check_BaseVerify(sys.argv[1])\n testVuln.run()\n","repo_name":"Lucifer1993/AngelSword","sub_path":"scan/xss_characterfuzz_check.py","file_name":"xss_characterfuzz_check.py","file_ext":"py","file_size_in_byte":6374,"program_lang":"python","lang":"en","doc_type":"code","stars":1424,"dataset":"github-code","pt":"76"} +{"seq_id":"9638971291","text":"\nimport os\nimport logging\nfrom importlib.machinery import SourceFileLoader\n\nlogger = logging.getLogger('rg')\n\nITEMS_PROTOTYPES_FILES = {\n\t1: 'sword',\n\t2: 'bow',\n\t10: 'axe',\n\t11: 'hammer',\n\t12: 'knife',\n\t13: 'mace',\n\t14: 'pike',\n\t15: 'spear',\n\t16: 'lash',\n\t17: 'blowgun',\n\t18: 'crossbow',\n\n\t20: 'staff',\n\n\t4: 'arrow',\n\t24: 'bolt',\n\t34: 'dart',\n\n\t3: 'quiver',\n\t6: 'backpack',\n\n\t5: 'worldmap',\n\n\t50: 'emptybutle',\n\t51: 'hppotion',\n\t52: 'mppotion',\n\n\t300: 'scrollfireshit',\n\n\t101: 'deadhuman',\n\t102: 'deadrat',\n\t103: 'deadelf',\n\t104: 'deaddwarf',\n\n\t201: 'meatrat'\n}\n\ndef load_item_prototype(id):\n\n\tif not id in ITEMS_PROTOTYPES_FILES:\n\t\tlogger.warn('Item #{0} not found'.format(id))\n\t\treturn None\n\n\tname = ITEMS_PROTOTYPES_FILES[id]\n\tpath = 'items/{0}.py'.format(name)\n\n\tif not os.path.exists(path):\n\t\tlogger.warn('Item #{0} ({1}) not found'.format(id, name))\n\t\treturn None\n\n\titem_loader = SourceFileLoader(name, path)\n\titem = item_loader.load_module()\n\titem.id = id\n\n\treturn check_item_prototype(item, name)\n\ndef check_item_prototype(item, name):\n\titem.code_name = name\n\n\trequired = ( 'name', )\n\n\tfor r in required:\n\t\tif not hasattr(item, r):\n\t\t\tlogger.warn('Item \"{0}\" has no attribute {1}!'.format(name, r))\n\t\t\treturn None\n\n\tdefaults = [\n\t\t( lambda *args: None, ( 'afterUsingAttackMethod', )),\n\t\t( lambda *args: 0, ( 'getDamage', )),\n\t\t( lambda *args: True, ( 'isCanUseAttackMethod', )),\n\t\t( lambda *args: False, ( 'makeAction', )),\n\t\t( lambda *args: (), ( 'getAttackMethodSkills', 'getButcheringResults' )),\n\t\t( (), ( 'actions', 'attackMethods' )),\n\t\t( False, ( 'isStackable', 'isContainer', 'isButcherable', 'isWeapon' ))\n\t]\n\n\tfor def_val, names in defaults:\n\t\tfor name in names:\n\t\t\tif not hasattr(item, name):\n\t\t\t\tsetattr(item, name, def_val)\n\n\treturn item\n\n\nABILITES_FILES = {\n\t1: 'punch',\n\t2: 'kick',\n\t3: 'bite',\n\t4: 'fireball',\n\t5: 'fireshit'\n}\n\ndef load_ability(id):\n\n\tif not id in ABILITES_FILES:\n\t\treturn None\n\n\tname = ABILITES_FILES[id]\n\tpath = 'abilities/{0}.py'.format(name)\n\n\tif not os.path.exists(path):\n\t\treturn None\n\n\tability_loader = SourceFileLoader(name, path)\n\tability = ability_loader.load_module()\n\tability.id = id\n\n\treturn check_ability(ability, name)\n\ndef check_ability(ability, name):\n\tability.code_name = name\n\n\trequired = ( 'name', )\n\n\tfor r in required:\n\t\tif not hasattr(ability, r):\n\t\t\tlogger.warn('Ability \"{0}\" has no attribute {1}!'.format(name, r))\n\t\t\treturn None\n\n\tdefaults = [\n\t\t( lambda *args: 0, ( 'getDamage', )),\n\t\t( lambda *args: None, ( 'afterAttack', )),\n\t\t( lambda *args: True, ( 'isCanAttack', )),\n\t\t( lambda *args: False, ( 'isNatural', )),\n\t\t( None, ( 'usingAttribute', )),\n\t\t( (), ( 'usingSkills', )),\n\t]\n\n\tfor def_val, names in defaults:\n\t\tfor name in names:\n\t\t\tif not hasattr(ability, name):\n\t\t\t\tsetattr(ability, name, def_val)\n\n\treturn ability\n\n\nTRADERS_FILES = {\n\t1: 'weapon',\n\t2: 'food',\n\t3: 'cloth',\n\t4: 'potion'\n}\n\ndef load_trader(id):\n\n\tif not id in TRADERS_FILES:\n\t\treturn None\n\n\tname = TRADERS_FILES[id]\n\tpath = 'traders/{0}.py'.format(name)\n\n\tif not os.path.exists(path):\n\t\treturn None\n\n\ttrader_loader = SourceFileLoader(name, path)\n\ttrader = trader_loader.load_module()\n\ttrader.id = id\n\n\treturn check_trader(trader, name)\n\ndef traderGetSellPrice(self, user, protoId):\n\tpricelist = self.getSellList(user)\n\tif protoId in pricelist:\n\t\treturn pricelist[protoId]\n\telse:\n\t\treturn None\n\ndef traderGetBuyPrice(self, user, protoId):\n\tpricelist = self.getBuyList(user)\n\tif protoId in pricelist:\n\t\treturn pricelist[protoId]\n\telse:\n\t\treturn None\n\ndef check_trader(trader, name):\n\ttrader.code_name = name\n\n\trequired = ( 'name', )\n\n\tfor r in required:\n\t\tif not hasattr(trader, r):\n\t\t\tlogger.warn('Trader \"{0}\" has no attribute {1}!'.format(name, r))\n\t\t\treturn None\n\n\tdefaults = [\n\t\t( lambda *args: (), ( 'getSellList', 'getBuyList' )),\n\t\t( lambda *args: True, ( 'isCanTrade', )),\n\t\t( lambda *args: 'Что вы хотите продать или купить?', ( 'getHelloMessage', ))\n\t]\n\n\ttrader.getSellPrice = lambda user, protoId: traderGetSellPrice(trader, user, protoId)\n\ttrader.getBuyPrice = lambda user, protoId: traderGetBuyPrice(trader, user, protoId)\n\n\tfor def_val, names in defaults:\n\t\tfor name in names:\n\t\t\tif not hasattr(trader, name):\n\t\t\t\tsetattr(trader, name, def_val)\n\n\treturn trader\n\n\n\nMOBS_PROTOTYPES_FILES = {\n\t1: 'human',\n\t2: 'rat',\n\t3: 'elf',\n\t4: 'dwarf'\n}\n\ndef load_mob_prototype(id):\n\n\tif not id in MOBS_PROTOTYPES_FILES:\n\t\treturn None\n\n\tname = MOBS_PROTOTYPES_FILES[id]\n\tpath = 'mobs/{0}.py'.format(name)\n\n\tif not os.path.exists(path):\n\t\treturn None\n\n\tmob_loader = SourceFileLoader(name, path)\n\tmob = mob_loader.load_module()\n\tmob.id = id\n\n\treturn check_mob(mob, name)\n\ndef check_mob(mob, name):\n\tmob.code_name = name\n\n\trequired = ( 'name', 'st', 'dx', 'iq', 'ht', 'hpMax', 'mpMax' )\n\n\tfor r in required:\n\t\tif not hasattr(mob, r):\n\t\t\tlogger.warn('Mob \"{0}\" has no attribute {1}!'.format(name, r))\n\t\t\treturn None\n\n\tdefaults = [\n\t\t( lambda *args: None, ( 'afterDead', )),\n\t\t( (), ( 'abilities', ))\n\t]\n\n\tfor def_val, names in defaults:\n\t\tfor name in names:\n\t\t\tif not hasattr(mob, name):\n\t\t\t\tsetattr(mob, name, def_val)\n\n\treturn mob","repo_name":"Tairesh/telegram-rpg","sub_path":"prototypesloader.py","file_name":"prototypesloader.py","file_ext":"py","file_size_in_byte":5097,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"70450057526","text":"from django.contrib import admin\nfrom django.db import models\nfrom textareacounter.widget import TextareaWithCounter\n\nclass TextAreaCounterAdminMixin(admin.ModelAdmin):\n formfield_overrides = {\n models.TextField: {'widget': TextareaWithCounter},\n }\n class Media:\n css = {\n \"all\": (\"textareacounter/textareacounter.css\", )\n }\n js = (\n \"https://code.jquery.com/jquery-3.6.0.slim.min.js\",\n \"textareacounter/textareacounter.js\",\n )","repo_name":"DocTocToc/doctoctocbot","sub_path":"src/textareacounter/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"76"} +{"seq_id":"8577026608","text":"\"\"\"empty message\n\nRevision ID: f2f900bc689d\nRevises: fdcd84cdec43\nCreate Date: 2018-03-04 17:01:50.180397\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = 'f2f900bc689d'\ndown_revision = 'fdcd84cdec43'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(u'students_ibfk_3', 'students', type_='foreignkey')\n op.drop_column('students', 'parent')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('students', sa.Column('parent', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))\n op.create_foreign_key(u'students_ibfk_3', 'students', 'users', ['parent'], ['id'])\n # ### end Alembic commands ###\n","repo_name":"anaf007/school","sub_path":"migrations/versions/f2f900bc689d_.py","file_name":"f2f900bc689d_.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36437243559","text":"\"\"\"\n\"\"\"\nimport os\n\nimport cv2\nimport numpy as np\nimport torch\nfrom matplotlib import pyplot as plt\nfrom torch.utils.data import DataLoader\nfrom torchvision.transforms import transforms\n\nimport utils\nfrom utils import YoloDataset\nimport model\nimport torch.nn.functional as F\n\n\ndef mnist_predict(img_pah):\n # mnist数据集一共是 10种类型\n global pred_class_num, use_gpu\n pred_class_num = MNIST_PRED_CLASS_NUM\n if use_gpu:\n if not torch.cuda.is_available():\n use_gpu = False\n print(\"Not support GPU for train!\")\n\n # 是否加载已有模型\n if load_model_path is not None:\n net = model.AlexNet(load_model_path=load_model_path, pred_class_num=pred_class_num)\n else:\n raise\n #\n image = cv2.imread(img_pah)\n # 设为单通道灰色\n image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n\n # 调整大小\n img = cv2.resize(image, (image_size, image_size))\n\n # 转格式\n transform = transforms.Compose([\n transforms.ToTensor()\n ])\n\n # 转tensor调整为(3*224*224)\n img = transform(img)\n img = img.expand(3, image_size, image_size)\n img = img.unsqueeze(0)\n\n ii = np.transpose(img[0], (1, 2, 0))\n plt.imshow(ii)\n plt.show()\n\n pred = net(img)\n\n\n pred = F.softmax(pred, dim=1)\n _, index = pred.max(1)\n if _[0].item() > 0.2:\n print(\"Result is : [%s]\" % (str(index[0]),))\n else:\n print(\"No results\")\n\n\n\nif __name__ == '__main__':\n root_path = utils.PROJECT_ROOT_PATH\n # MNIST 种类\n MNIST_PRED_CLASS_NUM = 10\n # 训练模型的数据大小\n image_size = 224\n # 学习率可以设置为3、1、0.5、0.1、0.05、0.01、0.005,0.005、0.0001、0.00001\n learning_rate = .1\n # 每次训练的图片数量\n batch_size = 128\n # 保存间隔次数\n per_batch_size_to_save = 30\n # 已有模型\n load_model_path = os.path.join(root_path, r'AlexNet\\output\\models\\alexnet_mnist.pth')\n # 训练好的模型保存路径\n # be_save_model_path = os.path.join(root_path, r'AlexNet\\output\\models\\alexnet_mnist.pth')\n # 是否使用GPU\n use_gpu = True\n # 是预测阈值\n threshold = 0.9\n predict_image_path = None\n # 1:测试MNIST\n print(\"START MNIST TRAIN!\")\n pred_class_num = MNIST_PRED_CLASS_NUM\n mnist_predict(os.path.join(root_path, r'AlexNet\\img.png'))\n print(\"END!\")\n","repo_name":"belzx/MachineLearning","sub_path":"AlexNet/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70786096246","text":"import importlib\nimport sys\nimport torch\nfrom src.models.unet3d import unet3d\nfrom torchvision import transforms\n\n\nfrom src.dataset.train_val_split import train_val_split\nfrom src.losses.ce_dice_loss import CrossEntropyDiceLoss3D\n\nfrom src.losses import dice_loss, region_based_loss, new_losses\n\nfrom src.models.io_model import load_model\nfrom src.train.trainer import Trainer, TrainerArgs\nfrom torch.optim import lr_scheduler\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nfrom src.config import BratsConfiguration\nfrom src.dataset.augmentations import color_augmentations, spatial_augmentations\n\nfrom src.dataset.utils import dataset, visualization as visualization\nfrom src.models.vnet import vnet, asymm_vnet\nfrom src.logging_conf import logger\nfrom src.dataset.loaders.brats_dataset import BratsDataset\n\n\ndef num_params(net_params):\n n_params = sum([p.data.nelement() for p in net_params])\n logger.info(f\"Number of params: {n_params}\")\n\n\n######## PARAMS\nlogger.info(\"Processing Parameters...\")\n\nconfig = BratsConfiguration(sys.argv[1])\nmodel_config = config.get_model_config()\ndataset_config = config.get_dataset_config()\nbasic_config = config.get_basic_config()\n\npatch_size = config.patch_size\ntensorboard_logdir = basic_config.get(\"tensorboard_logs\")\ncheckpoint_path = model_config.get(\"checkpoint\")\nbatch_size = dataset_config.getint(\"batch_size\")\nn_patches = dataset_config.getint(\"n_patches\")\nn_classes = dataset_config.getint(\"classes\")\nloss = model_config.get(\"loss\")\n\ndevice = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\nlogger.info(f\"Device: {device}\")\n\n\n######## DATASET\nlogger.info(\"Creating Dataset...\")\n\ndata, _ = dataset.read_brats(dataset_config.get(\"train_csv\"), lgg_only=dataset_config.getboolean(\"lgg_only\"))\ndata_train, data_val = train_val_split(data, val_size=0.2)\ndata_train = data_train * n_patches\ndata_val = data_val * n_patches\n\nn_modalities = dataset_config.getint(\"n_modalities\") # like color channels\nsampling_method = importlib.import_module(dataset_config.get(\"sampling_method\"))\n\n\ntransform = transforms.Compose([color_augmentations.RandomIntensityShift(),\n color_augmentations.RandomIntensityScale(),\n spatial_augmentations.RandomMirrorFlip(p=0.5),\n spatial_augmentations.RandomRotation90(p=0.5)])\n\n\ncompute_patch = basic_config.getboolean(\"compute_patches\")\ntrain_dataset = BratsDataset(data_train, sampling_method, patch_size, compute_patch=compute_patch, transform=transform)\ntrain_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)\n\nval_dataset = BratsDataset(data_val, sampling_method, patch_size, compute_patch=compute_patch, transform=transform)\nval_loader = DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=True, num_workers=4)\n\nif basic_config.getboolean(\"plot\"):\n data_batch, labels_batch = next(iter(train_loader))\n data_batch.reshape(data_batch.shape[0] * data_batch.shape[1], data_batch.shape[2], data_batch.shape[3],\n data_batch.shape[4], data_batch.shape[5])\n labels_batch.reshape(labels_batch.shape[0] * labels_batch.shape[1], labels_batch.shape[2], labels_batch.shape[3],\n labels_batch.shape[4])\n\n print(data_batch.shape)\n logger.info('Plotting images')\n visualization.plot_batch_slice(data_batch, labels_batch, slice=30, save=True)\n\n\n######## MODEL\nlogger.info(\"Initiating Model...\")\n\nconfig_network = model_config[\"network\"]\nif config_network== \"vnet\":\n\n network = vnet.VNet(elu=model_config.getboolean(\"use_elu\"),\n in_channels=n_modalities,\n classes=n_classes,\n init_features_maps=model_config.getint(\"init_features_maps\"))\n\nelif config_network == \"vnet_asymm\":\n network = asymm_vnet.VNet(non_linearity=model_config.get(\"non_linearity\"), in_channels=n_modalities, classes=n_classes,\n init_features_maps=model_config.getint(\"init_features_maps\"), kernel_size=model_config.getint(\"kernel_size\"),\n padding=model_config.getint(\"padding\"))\n\nelif config_network == \"3dunet_residual\":\n\n network = unet3d.ResidualUNet3D(in_channels=n_modalities, out_channels=n_classes, final_sigmoid=False,\n f_maps=model_config.getint(\"init_features_maps\"), layer_order=\"crg\",\n num_levels=4, num_groups=4,conv_padding=1)\n\nelif config_network == \"3dunet\":\n\n network = unet3d.UNet3D(in_channels=n_modalities, out_channels=n_classes, final_sigmoid=False,\n f_maps=model_config.getint(\"init_features_maps\"), layer_order=\"crg\",\n num_levels=4, num_groups=4,conv_padding=1)\nelse:\n raise ValueError(\"Bad parameter for network {}\".format(model_config.get(\"network\")))\n\nnum_params(network.parameters())\n\n\n##### TRAIN\nlogger.info(\"Start Training\")\nnetwork.to(device)\n\noptim = model_config.get(\"optimizer\")\n\nif optim == \"SGD\":\n optimizer = torch.optim.SGD(network.parameters(), lr=model_config.getfloat(\"learning_rate\"),\n momentum=model_config.getfloat(\"momentum\"), weight_decay=model_config.getfloat(\"weight_decay\"))\nelif optim == \"ADAM\":\n optimizer = torch.optim.Adam(network.parameters(), lr=model_config.getfloat(\"learning_rate\"), weight_decay=model_config.getfloat(\"weight_decay\"), amsgrad=False)\n\nelse:\n raise ValueError(\"Bad optimizer. Current options: [SGD, ADAM]\")\n\nbest_loss = 1000\nif basic_config.getboolean(\"resume\"):\n logger.info(\"Loading model from checkpoint..\")\n model, optimizer, start_epoch, best_loss = load_model(network, checkpoint_path, device, optimizer, True)\n logger.info(f\"Loaded model with starting epoch {start_epoch}\")\nelse:\n start_epoch = 0\n\nwriter = SummaryWriter(tensorboard_logdir)\nscheduler = lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=model_config.getfloat(\"lr_decay\"),\n patience=model_config.getint(\"patience\"))\n\nif loss == \"dice\":\n criterion = dice_loss.DiceLoss(classes=n_classes, eval_regions=model_config.getboolean(\"eval_regions\"),\n sigmoid_normalization=True)\n\nelif loss == \"combined\":\n # 0. back, 1: ncr, 2: ed, 3: et\n ce_weigh = torch.tensor([0.1, 0.35, 0.2 , 0.35])\n criterion = CrossEntropyDiceLoss3D(weight=ce_weigh, classes=n_classes,\n eval_regions=model_config.getboolean(\"eval_regions\"), sigmoid_normalization=True)\nelif loss == \"both_dice\":\n criterion = region_based_loss.RegionBasedDiceLoss3D(classes=n_classes, sigmoid_normalization=True)\n\nelif loss == \"gdl\":\n criterion = new_losses.GeneralizedDiceLoss()\n\nelse:\n raise ValueError(f\"Bad loss value {loss}. Expected ['dice', combined]\")\n\nargs = TrainerArgs(model_config.getint(\"n_epochs\"), device, model_config.get(\"model_path\"), loss)\ntrainer = Trainer(args, network, optimizer, criterion, start_epoch, train_loader, val_loader, scheduler, writer)\ntrainer.start(best_loss=best_loss)\n\n\nprint(\"Finished!\")\n","repo_name":"imatge-upc/mri-braintumor-segmentation","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7206,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"76"} +{"seq_id":"13112046499","text":"num = int(input(''))\n\nfor i in range(1, num + 1):\n a, b = map(int,input().split())\n \n if b == 0:\n print('divisao impossivel')\n \n if b != 0:\n div = a / b\n print('{:.1f}'.format(div))","repo_name":"EwertonWeb/UriOnlineJudge","sub_path":"1116_urionlinejudge.py","file_name":"1116_urionlinejudge.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"5191543248","text":"from typing import List, Dict\nfrom PySpice.Spice.Netlist import Circuit\nfrom PySpice.Unit.SiUnits import Farad, Second\nfrom PySpice.Unit import *\nfrom PySpice.Logging import Logging\n\nfrom itertools import product\n\nfrom .util import *\nfrom .piece_wise_linear import *\nfrom .ngspice_simulation import simulate_circuit\nfrom lccommon.net_util import get_subcircuit_ports\n\nimport logging\n\nfrom scipy import interpolate\n\npyspice_logger = Logging.setup_logging()\nlogger = logging.getLogger(__name__)\n\n\ndef characterize_input_capacitances(cell_name: str,\n input_pins: List[str],\n active_pin: str,\n output_pins: List[str],\n supply_voltage: float,\n trip_points: TripPoints,\n timing_corner: CalcMode,\n\n spice_netlist_file: str,\n spice_include_files: List[str] = None,\n time_resolution=50 @ u_ps,\n temperature=27,\n ):\n logger.debug(\"characterize_input_capacitances()\")\n # Find ports of the SPICE netlist.\n ports = get_subcircuit_ports(spice_netlist_file, cell_name)\n logger.info(\"Subcircuit ports: {}\".format(\", \".join(ports)))\n\n # TODO: find correct names for GND/VDD from netlist.\n ground = 'GND'\n supply = 'VDD'\n\n circuit = Circuit('Timing simulation of {}'.format(cell_name), ground=ground)\n\n if spice_include_files is None:\n spice_include_files = []\n spice_include_files = spice_include_files + [spice_netlist_file]\n \n # Load include files.\n logger.info(\"Load SPICE include files.\")\n for inc in spice_include_files:\n logger.info(\"Include '{}'\".format(inc))\n circuit.include(inc)\n\n # Instantiate circuit under test.\n logger.info(\"Instantiate circuit under test.\")\n circuit.X('circuit_unter_test', cell_name, *ports)\n\n # Power supply.\n logger.info(\"Instantiate power supply: {} V\".format(supply_voltage))\n circuit.V('power_vdd', supply, circuit.gnd, supply_voltage @ u_V)\n\n # Find function to summarize different timing arcs.\n reduction_function = {\n CalcMode.WORST: max,\n CalcMode.BEST: min,\n CalcMode.TYPICAL: np.mean\n }[timing_corner]\n logger.info(\"Reduction function for summarizing multiple timing arcs: {}\".format(reduction_function.__name__))\n\n logger.info(\"Measuring input capactiance.\")\n result = measure_input_capacitance(\n circuit=circuit,\n inputs_nets=input_pins,\n active_pin=active_pin,\n output_nets=output_pins,\n vdd=supply_voltage,\n trip_points=trip_points,\n temperature=temperature,\n output_load_capacitance=0 @ u_pF,\n time_step=time_resolution,\n simulation_duration_hint=1 @ u_ns,\n reduction_function=reduction_function\n )\n\n logger.info(\"Characterizing input capacitances: Done\")\n\n return result\n\n\ndef measure_input_capacitance(circuit: Circuit,\n inputs_nets: List[str],\n active_pin: str,\n output_nets: List[str],\n vdd: float,\n trip_points: TripPoints,\n temperature: float = 25,\n output_load_capacitance: Farad = 0.0 @ u_pF,\n time_step: Second = 100 @ u_ps,\n simulation_duration_hint: Second = 200 @ u_ns,\n reduction_function=max\n ) -> Dict[str, float]:\n \"\"\" Measure the input capacitance of the `active_pin`.\n\n :param circuit: Circuit to be characterized. (Without output load)\n :param inputs_nets: Names of input signals.\n :param active_pin: Name of the input signal to be toggled.\n :param output_net: Name of the output signal.\n :param vdd: Supply voltage.\n :param trip_points: TripPoints object.\n :param temperature:\n :param output_load_capacitance:\n :param time_step: Simulation time step.\n :param simulation_duration_hint: A hint on how long to simulate the circuit.\n This should be in the order of magnitude of propagation delays.\n When chosen too short, the simulation time will be prolonged automatically.\n :param reduction_function: Function used to create default timing arc from conditional timing arcs.\n Should be one of {min, max, np.mean}\n :return: A dict containing values of 'rise_capacitance' and 'fall_capacitance' in Farads.\n \"\"\"\n logger.debug(\"measure_input_capacitance()\")\n # Create an independent copy of the circuit.\n logger.debug(\"Create an independent copy of the circuit.\")\n circuit = circuit.clone(title='Input capacitance measurement for pin \"{}\"'.format(active_pin))\n\n if float(output_load_capacitance) > 0:\n # Add output capacitances.\n for output_net in output_nets:\n circuit.C('load', circuit.gnd, output_net, output_load_capacitance)\n\n static_input_nets = [i for i in inputs_nets if i != active_pin]\n\n num_inputs = len(static_input_nets)\n static_inputs = list(product(*([[0, 1]] * num_inputs)))\n\n # TODO: set initial voltage at active_pin.\n\n input_current = 10000 @ u_nA\n logger.info(\"Input current: {}\".format(input_current))\n\n time_step = 1 @ u_ps\n # Guess of necessary simulation duration.\n period = 1000 @ u_ps\n logger.info(\"Guess of necessary simulation duration: {}\".format(period))\n # Loop through all combinations of inputs.\n capacitances_rising = []\n capacitances_falling = []\n for static_input in static_inputs:\n for input_rising in [True, False]:\n _circuit = circuit.clone()\n\n # Get voltages at static inputs.\n input_voltages = {net: vdd * value @ u_V for net, value in zip(static_input_nets, static_input)}\n logger.debug(\"Static input voltages: {}\".format(input_voltages))\n\n # Switch polarity of current for falling edges.\n _input_current = input_current if input_rising else -input_current\n # Create constant current source to drive the active pin.\n _circuit.I('src_{}'.format(active_pin), circuit.gnd, active_pin, dc_value=_input_current)\n\n # Get initial voltage of active pin.\n initial_voltage = 0 @ u_V if input_rising else vdd @ u_V\n\n # Run simulation\n # Loop because it might be necessary to run a longer simulation.\n logger.info(\"Run simulation.\")\n while True:\n analysis = simulate_circuit(_circuit, input_voltages, step_time=time_step,\n end_time=period, temperature=temperature,\n initial_voltages={active_pin: initial_voltage @ u_V}\n )\n\n time = np.array(analysis.time)\n assert len(time) > 0\n input_voltage = np.array(analysis[active_pin])\n output_voltage = np.array(analysis[output_nets[0]])\n logger.debug(\"Input voltage at start input_voltage[0]: {}\".format(input_voltage[0]))\n logger.debug(\"Input voltage at end input_voltage[-1]: {}\".format(input_voltage[-1]))\n\n if input_voltage[0] < 0.1 * vdd and input_voltage[-1] > vdd or \\\n input_voltage[0] > 0.9 * vdd and input_voltage[-1] < 0:\n # The input voltage spans the whole range from 0 to vdd.\n # So the simulation was long enough.\n break\n else:\n # Simulation was not long enough, double it.\n period = period * 2\n logger.info(\"Simulation was not long enough. New simulation time: {}\".format(period))\n\n if period>100000 @ u_ps:\n logger.info(\"VDD: {}\".format(vdd))\n logger.info(\"input_voltage[0]: {}\".format(input_voltage[0]))\n logger.info(\"input_voltage[-1]: {}\".format(input_voltage[-1]))\n logger.error(\"Error: Simulation took too long! It seems the input voltage did not span the whole range from 0 to vdd!\")\n break\n\n\n # if input_rising:\n # input_condition = \"B = {}\".format(str(input_voltages['B']))\n # plt.plot(time*1e9, input_voltage, label='Input when {}'.format(input_condition))\n # plt.plot(time*1e9, output_voltage, label='Output when {}'.format(input_condition))\n # plt.ylabel('input voltage [V]')\n # plt.xlabel('time [ns]')\n # plt.xlim(0, 0.3)\n # plt.legend()\n # plt.show()\n\n # Calculate average derivative of voltage by finding the slope of the line\n # through the crossing point of the voltage with the two thresholds.\n #\n # TODO: How to chose the thresholds?\n if input_rising:\n thresh1 = vdd * trip_points.slew_lower_threshold_rise\n thresh2 = vdd * trip_points.slew_upper_threshold_rise\n assert thresh1 < thresh2\n else:\n thresh1 = vdd * trip_points.slew_upper_threshold_fall\n thresh2 = vdd * trip_points.slew_lower_threshold_fall\n assert thresh1 > thresh2\n\n # Find transition times for both thresholds.\n transition_time1 = transition_time(input_voltage, time, threshold=thresh1, assert_one_crossing=True)\n transition_time2 = transition_time(input_voltage, time, threshold=thresh2, assert_one_crossing=True)\n assert transition_time2 > transition_time1\n\n f_input_voltage = interpolate.interp1d(x=time, y=input_voltage)\n dt = transition_time2 - transition_time1\n dv = f_input_voltage(transition_time2) - f_input_voltage(transition_time1)\n # dv = input_voltage[-1] - input_voltage[0]\n # dt = time[-1] - time[0]\n\n # Compute capacitance\n capacitance = float(_input_current) / (float(dv) / float(dt))\n\n logger.debug(\"dV: {}\".format(dv))\n logger.debug(\"dt: {}\".format(dt))\n logger.debug(\"I: {}\".format(input_current))\n logger.info(\"Capacitance: {}\".format(capacitance))\n\n if input_rising:\n capacitances_rising.append(capacitance)\n else:\n capacitances_falling.append(capacitance)\n\n # plt.ylabel('Input voltage [V]')\n # plt.xlabel('Time [ns]')\n # plt.ylim(0, vdd*1.1)\n # plt.xlim(0, 0.25)\n # plt.legend()\n # plt.show()\n\n # Find max, min or average depending on 'reduction_function'.\n logger.debug(\"Convert capacitances of all timing arcs into the default capacitance ({})\".format(reduction_function.__name__))\n final_capacitance_falling = reduction_function(capacitances_falling)\n final_capacitance_rising = reduction_function(capacitances_rising)\n final_capacitance = reduction_function([final_capacitance_falling, final_capacitance_rising])\n\n return {\n 'rise_capacitance': final_capacitance_falling,\n 'fall_capacitance': final_capacitance_rising,\n 'capacitance': final_capacitance\n }\n","repo_name":"1Kartikgupta/test_wafer_generator","sub_path":"librecell-lib/lclib/characterization/input_capacitance.py","file_name":"input_capacitance.py","file_ext":"py","file_size_in_byte":11473,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"29579825689","text":"from Environment import *\nfrom Minimax import *\n#load previous trained q_table\n# path = 'file location'\n# file = open(path,'rb')\n# q_table = pickle.load(file)\n# file.close()\n# or start with new q_table\n# q_table = dict()\n# keys = q_table.keys()\n\nepisode = 2001\nN = 6\n\"\"\"Training the agent\"\"\"\nimport random\nfrom IPython.display import clear_output\n# Hyperparameters\nalpha = 0.2\ngamma = 0.6\nepsilon = 0.1\n# For plotting metrics\nall_epochs = []\nall_penalties = []\nfor ii in range(1001, episode):\n state = resetBoard() \n epochs, penalties, reward, = 0, 0, 0\n done = False \n RLtile=1\n computerTile=2 \n if ii%2 == 0: # เลขคู่ computer move first \n if random.uniform(0, 1) <= 0.2:\n# print('computer move minimax')\n state,reward, done = agentminimax(state, computerTile)\n else:\n# print('computer move random')\n action = random.choice(getValidMoves(state, computerTile))\n state,reward, done = makeMoveRl(action, state, computerTile)\n while not done:\n valid = getValidMoves(state,RLtile)\n validnum = [tmp[0]*N+tmp[1] for tmp in valid]\n keys = q_table.keys()\n tup_state = tuple(state.reshape(state.size,))\n if tup_state not in keys: #ดูว่า มี state นี่อยู่แล้วหรือไม่\n q_table[tup_state] = np.zeros((N*N,))\n q_table[tup_state][validnum] = (1/(0.5*(N*N-4)))*np.ones((len(validnum),))\n# print(q_table[tup_state])\n if len(valid) != 0 and gameEnd(state) == False: # RL มีตำแหน่งลงได้\n if random.uniform(0, 1) < epsilon:\n# print('RL move random')\n action = random.choice(valid) # Explore action space\n else:\n# print('RL move q_table')\n tmp = validnum[np.argmax(q_table[tup_state][validnum])]\n action = numToList(tmp) \n next_state, reward, done = makeMoveRl(action, state, RLtile)\n else: # เพิ่มจาก version 20210621 ถ้า RL ลงไม่ได้ต้องให้ next_state = state เลย \n next_state = state # แต่ของเดิม ก่อนมาถึงจุดนี้ ให้ state = next_stae ตอนจบ loop อยู่แล้วน่าจะไม่ผิด\n\n if len(getValidMoves(next_state,computerTile)) != 0 and gameEnd(state) == False:\n if random.uniform(0, 1) <= 0.5:\n# print('computer move minimax')\n next_state,reward, done = agentminimax(next_state, computerTile)\n else:\n# print('computer move random')\n agent_act = random.choice(getValidMoves(next_state, computerTile))\n next_state,reward, done = makeMoveRl(agent_act, next_state, computerTile)\n# ถ้า RL ลงไม่ได้ \"action\" จะเป็นขอตาก่อนหน้า เพราะ ตานี้ไม่มี action เราต้องไม่ใส่คะแนนเข้าไปใน q_table\n# edit from version 20210621\n if len(valid) != 0:\n valid = getValidMoves(next_state,RLtile)\n validnum = [tmp[0]*N+tmp[1] for tmp in valid]\n keys = q_table.keys()\n tup_nextstate = tuple(next_state.reshape(next_state.size,))\n if tup_nextstate not in keys:\n q_table[tup_nextstate] = np.zeros((N*N,))\n q_table[tup_nextstate][validnum] = (1/(0.5*(N*N-4)))*np.ones((len(validnum),))\n old_value = q_table[tup_state][action[0]*4+action[1]]\n next_max = np.max(q_table[tup_nextstate]) \n new_value = (1 - alpha) * old_value + alpha * (reward + gamma * next_max)\n q_table[tup_state][action[0]*4+action[1]] = new_value\n else: # สร้าง q_table ของ next_state\n valid = getValidMoves(next_state,RLtile)\n validnum = [tmp[0]*N+tmp[1] for tmp in valid]\n keys = q_table.keys()\n tup_nextstate = tuple(next_state.reshape(next_state.size,))\n if tup_nextstate not in keys:\n q_table[tup_nextstate] = np.zeros((N*N,))\n q_table[tup_nextstate][validnum] = (1/(0.5*(N*N-4)))*np.ones((len(validnum),))\n state = next_state\n","repo_name":"githubakira/AI_Builders","sub_path":"RL_minimax_training.py","file_name":"RL_minimax_training.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"38203201655","text":"__author__ = \"chenty\"\n\nimport dateutil.parser\nimport random\n\nfrom rpc.judicator_rpc import Judicator\nfrom rpc.judicator_rpc.ttypes import *\nfrom thrift.transport import TSocket, TTransport\nfrom thrift.protocol import TBinaryProtocol\n\n\ndef extract(task, brief=False, compile=True, execute=True, result=True):\n \"\"\"\n Extract dictionary from rpc Task structure (class)\n :param task: The original structure\n :param brief: If the structure is a brief one (TaskBrief)\n :param compile: If compile information needs to be extracted\n :param execute: Same to compile, but execute information\n :param result: Same to compile, but result information\n :return: Extracted dictionary\n \"\"\"\n # Fundamental fields\n # This is all TaskBrief structure contains\n # Time string is parsed into datetime object\n res = {\n \"id\": task.id,\n \"user\": task.user,\n \"add_time\": dateutil.parser.parse(task.add_time) if task.add_time else None,\n \"done\": task.done,\n \"status\": task.status,\n \"executor\": task.executor,\n \"report_time\": dateutil.parser.parse(task.report_time) if task.report_time else None\n }\n if brief:\n return res\n\n # Extract compile information\n if compile and task.compile:\n res[\"compile\"] = {\n \"source\": task.compile.source,\n \"command\": task.compile.command,\n \"timeout\": task.compile.timeout\n }\n else:\n res[\"compile\"] = None\n\n # Execute information\n if execute and task.execute:\n res[\"execute\"] = {\n \"input\": task.execute.input,\n \"data\": task.execute.data,\n \"command\": task.execute.command,\n \"timeout\": task.execute.timeout,\n \"standard\": task.execute.standard\n }\n else:\n res[\"execute\"] = None\n\n # Result information\n if result and task.result:\n res[\"result\"] = {\n \"compile_output\": task.result.compile_output,\n \"compile_error\": task.result.compile_error,\n \"execute_output\": task.result.execute_output,\n \"execute_error\": task.result.execute_error,\n }\n else:\n res[\"result\"] = None\n\n return res\n\ndef generate(task, brief=False, compile=True, execute=True, result=True):\n \"\"\"\n Generate rpc Task structure (class) from a given dictionary\n :param task: The dictionary\n :param brief: If a TaskBrief structure should be generated\n :param compile: If compile information needs to be included\n :param execute: Same to compile, but execute information\n :param result: Same to compile, but result information\n :return: Generated Task structure\n \"\"\"\n # The add_time must be converted into a string, if necessary\n if task[\"add_time\"] is None or isinstance(task[\"add_time\"], str):\n add_time = task[\"add_time\"]\n else:\n add_time = task[\"add_time\"].isoformat()\n\n # Same to the report_time\n if task[\"report_time\"] is None or isinstance(task[\"report_time\"], str):\n report_time = task[\"report_time\"]\n else:\n report_time = task[\"report_time\"].isoformat()\n\n # If BriefTask should be generated, just return fundamental fields\n if brief:\n return TaskBrief(\n task[\"id\"],\n task[\"user\"],\n add_time,\n task[\"done\"],\n task[\"status\"],\n task[\"executor\"],\n report_time\n )\n\n # Generate compile information\n if compile and task[\"compile\"]:\n c = Compile(\n task[\"compile\"][\"source\"],\n task[\"compile\"][\"command\"],\n task[\"compile\"][\"timeout\"]\n )\n else:\n c = None\n\n # Execute information\n if execute and task[\"execute\"]:\n e = Execute(\n task[\"execute\"][\"input\"],\n task[\"execute\"][\"data\"],\n task[\"execute\"][\"command\"],\n task[\"execute\"][\"timeout\"],\n task[\"execute\"][\"standard\"]\n )\n else:\n e = None\n\n # Result information\n if result and task[\"result\"]:\n r = Result(\n task[\"result\"][\"compile_output\"],\n task[\"result\"][\"compile_error\"],\n task[\"result\"][\"execute_output\"],\n task[\"result\"][\"execute_error\"]\n )\n else:\n r = None\n\n return Task(\n task[\"id\"],\n task[\"user\"],\n c,\n e,\n add_time,\n task[\"done\"],\n task[\"status\"],\n task[\"executor\"],\n report_time,\n r\n )\n\ndef select_from_etcd_and_call(func, local_etcd, judicator_path, logger, *args, **kwargs):\n \"\"\"\n Select a judicator from etcd and call rpc process.\n :param func: Name of the function\n :param local_etcd: Etcd proxy\n :param judicator_path: Path to judicator services on etcd\n :param logger: Logger object.\n :return: Return of the rpc call.\n \"\"\"\n # Get all judicator rpc address and choose one randomly\n judicator = local_etcd.get(judicator_path)\n logger.info(\"Got judicator list %s.\" % str(judicator))\n if not judicator:\n raise Exception(\"No judicator rpc service detected.\")\n name, address = random.choice(tuple(judicator.items()))\n host, port = address.split(\":\")\n logger.debug(\"Making %s call to judicator %s at %s\" % (func, name, address))\n\n # Start rpc transport\n transport = TTransport.TBufferedTransport(TSocket.TSocket(host, int(port)))\n client = Judicator.Client(TBinaryProtocol.TBinaryProtocol(transport))\n transport.open()\n # Call and return\n res = client.__getattribute__(func)(*args, **kwargs)\n transport.close()\n\n return res\n","repo_name":"sbofgayschool/KV2","sub_path":"utility/rpc.py","file_name":"rpc.py","file_ext":"py","file_size_in_byte":5643,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"76"} +{"seq_id":"36905933020","text":"import pytest\nimport pandas as pd\nimport numpy as np\n\nfrom mercury.dataschema.calculator import StatCalculatorFactory, PandasStatCalculator\nfrom mercury.dataschema.feature import Feature\n\n\n@pytest.fixture(scope='module')\ndef pandas_df():\n data = [['tom', 10], ['nick', 15], ['juli', 14]]\n return pd.DataFrame(data, columns=['Name', 'Age'])\n\n\ndef test_calculator_factory(pandas_df):\n assert isinstance(StatCalculatorFactory.build_calculator(pandas_df), PandasStatCalculator)\n\n\ndef test_calculator(pandas_df):\n calculator = StatCalculatorFactory.build_calculator(pandas_df)\n\n feature = Feature()\n\n calculator.min(pandas_df['Age'], feature)\n calculator.max(pandas_df['Age'], feature)\n calculator.std(pandas_df['Age'], feature)\n calculator.mean(pandas_df['Age'], feature)\n\n assert feature.stats['min'] == 10\n assert feature.stats['max'] == 15\n assert feature.stats['mean'] == 13\n\n\ndef test_set_config(pandas_df):\n calculator = StatCalculatorFactory.build_calculator(pandas_df)\n with pytest.raises(ValueError):\n calculator.set_config(**{'nonexistingattr': 10})\n\n # assert it assigns the property well\n calculator.set_config(**{'distribution_bins_method': 10})\n assert calculator.distribution_bins_method == 10\n\n # Assert does nothing with None\n calculator.set_config()\n","repo_name":"BBVA/mercury-dataschema","sub_path":"tests/dataschema/test_calculator.py","file_name":"test_calculator.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"76"} +{"seq_id":"28469969422","text":"from fastapi import APIRouter ,status , File, UploadFile\nfrom pydantic import BaseModel, Field\nfrom fastapi.responses import JSONResponse\nfrom app.service import departments\nfrom typing import Union\nimport json\nfrom app.models import departments as department_model\n\nrouter = APIRouter()\ndepartment_service = departments.DepartmentsService()\n\n@router.post(\"/\")\nasync def CreateDepartment(department: department_model.CreateDepartmentForm):\n return await department_service.CreateDepartment(department)\n\n@router.get(\"/{department_id}\")\nasync def GetOneDepartment(department_id: int):\n return await department_service.GetOneDepartment(department_id)\n\n@router.get(\"/\")\nasync def GetAllDepartmentsPaginated(page_number: int, page_size: int):\n return await department_service.GetAllDepartmentsPaginated(page_number, page_size)\n\n@router.put(\"/{department_id}\")\nasync def UpdateDepartment(department_id: int, department: department_model.CreateDepartmentForm):\n return await department_service.UpdateDepartment(department_id, department)\n\n@router.delete(\"/{department_id}\")\nasync def DeleteDepartment(department_id: int):\n return await department_service.DeleteDepartment(department_id)\n ","repo_name":"Perrrr/expense_manage_system","sub_path":"expense-app-gateway/server/app/handler/departments.py","file_name":"departments.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"42170147486","text":"from collections import deque\n\nH, W, K = map(int, input().split())\nx1, y1, x2, y2 = map(int, input().split())\nC = [[None] * (W + 2) for _ in range(H + 2)]\n\nfor w in range(W + 2):\n C[0][w] = C[-1][w] = -1\nfor h in range(H + 2):\n C[h][0] = C[h][-1] = -1\nfor h in range(1, H + 1):\n S = input()\n for w, c in enumerate(S, 1):\n if c == '@': C[h][w] = -1\n\nqueue = deque([\n (x1 + 1, y1, 1, +1, 0),\n (x1 - 1, y1, 1, -1, 0),\n (x1, y1 + 1, 1, 0, +1),\n (x1, y1 - 1, 1, 0, -1),\n])\n\n\ndef printC():\n for row in C:\n R = ['.' if c is None else '@' if c == -1 else str(c) for c in row]\n print(''.join(R))\n\n\nC[x1][y1] = 0\n\nwhile queue:\n x0, y0, d, dx, dy = queue.popleft()\n dxL, dyL = -dy, dx\n dxR, dyR = dy, -dx\n\n for k in range(K):\n x = x0 + dx * k\n y = y0 + dy * k\n if C[x][y] is not None:\n if C[x][y] < d:\n k = -1\n break\n continue\n\n C[x][y] = d\n\n queue.append((x + dxL, y + dyL, d + 1, dxL, dyL))\n queue.append((x + dxR, y + dyR, d + 1, dxR, dyR))\n\n if k == K - 1:\n queue.append((x + dx, y + dy, d + 1, dx, dy))\n\nans = C[x2][y2]\nprint(ans if ans is not None else -1)\n","repo_name":"ymtz13/CompetitiveProgramming","sub_path":"AtCoder/ABC170/F3.py","file_name":"F3.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"19149372210","text":"source = open('lyrics.txt', 'r')\nresult = open('processed.txt', 'w')\n\nlyricString = \"\"\n\nlines = source.readlines()\nlines = filter(lambda x: not x.isspace(), lines)\n\n\nfor line in lines:\n #table = str.maketrans(dict.fromkeys(\"()\"))\n #line.translate(table)\n line = ''.join(x for x in line if x not in '()')\n line = line.translate({ord(i): None for i in ','})\n line = line.translate({ord(i): None for i in '\\n'})\n line = line + \" \"\n\n line = line.lower()\n\n lyricString = lyricString + line\n\nwords = lyricString.split()\n\nfor word in words:\n result.write('\"')\n result.write(word)\n result.write('\", ')\n\nsource.close()\nresult.close()\n\n","repo_name":"eromland/lyrics","sub_path":"prepareLyrics.py","file_name":"prepareLyrics.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37492032908","text":"import os\nimport xlrd\nimport jieba\njieba.set_dictionary(\"./dict.txt\")\njieba.initialize()\nfrom textrank4zh import TextRank4Keyword\n\n\n\nfor f in os.listdir(\"./\"):\n\tif os.path.splitext(f)[1] == '.xlsx':\n\t\twb = xlrd.open_workbook(f)\n\t\tsheet = wb.sheet_by_index(0)\n\t\tcols = sheet.col_values(sheet.ncols-7)\n\t\ttr4w = TextRank4Keyword()\n\t\ttr4w.analyze(text=\" \".join(cols))\n\t\twith open('./result.txt','w+') as f0: \n\t\t\tfor item in tr4w.get_keywords(20, word_min_len=2):\n\t\t\t\tprint(item,file=f0)","repo_name":"hanzhsun/rank","sub_path":"rank.py","file_name":"rank.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"13243385820","text":"def convert_to_copper(price):\n # Parse price string to extract value and currency type\n value, currency = price.split(' ')\n value = int(value)\n\n # Conversion rates\n conversion_rates = {\n 'cp': 1,\n 'sp': 10,\n 'ep': 50,\n 'gp': 100,\n 'pp': 1000\n }\n return value * conversion_rates[currency]\n\ndef convert_to_highest_currency(copper_value):\n # Conversion rates\n conversion_rates = {\n 'cp': 1,\n 'sp': 10,\n 'gp': 100,\n 'pp': 1000 # 1 pp is 10 gp\n }\n \n # Precision rates\n precision_rates = {\n 'cp': 1,\n 'sp': 1,\n 'gp': 1,\n 'pp': 2\n }\n\n # Start from highest currency and go down\n for currency, rate in reversed(conversion_rates.items()):\n # Check if copper_value can be converted into current currency\n if copper_value >= rate:\n # Check if copper_value is at least 50 gp for pp conversion\n if currency == 'pp' and copper_value < 5000: # 5000 cp is 50 gp\n continue\n \n precision = precision_rates[currency]\n value = round(copper_value / rate, precision) # Divide by rate and round to appropriate decimal place\n\n # Check if the value is an integer and doesn't have a decimal part\n if value.is_integer():\n value = int(value) # Convert to integer if it's a whole number\n\n return value, currency\n\n # If no match found, return in copper pieces\n return round(copper_value, 1), 'cp'","repo_name":"BasUitermark/RandomShopData","sub_path":"CLI/app/model/currency_conversions.py","file_name":"currency_conversions.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"6734417027","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Feb 16 09:22:35 2020\r\n\r\n@author: MaryamHashemi\r\n\"\"\"\r\n\r\nimport numpy as np\r\n#import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nimport math\r\n#import tensorflow as tf\r\n#from tensorflow.python.client import device_lib \r\n#from keras.preprocessing import image\r\nfrom keras.utils import np_utils\r\nfrom skimage.transform import resize\r\nimport glob\r\nfrom keras.models import Sequential\r\nfrom keras.applications.vgg16 import VGG16\r\nfrom keras.layers import Dense, InputLayer, Dropout, Activation\r\nfrom keras.utils import plot_model\r\nfrom sklearn.metrics import classification_report\r\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D, MaxPooling1D, Convolution1D\r\nfrom keras.applications.vgg16 import preprocess_input\r\nfrom keras import losses\r\n#oc curve and auc\r\nfrom sklearn.datasets import make_classification\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import roc_curve\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom keras.models import model_from_json\r\nfrom sklearn.metrics import average_precision_score\r\n\r\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\r\nfrom keras.utils import np_utils\r\nfrom keras.optimizers import SGD, Adadelta, Adagrad\r\nfrom keras.layers.recurrent import LSTM\r\n\r\n\r\nvideos_path1=\"E:/data/YawDD/YawDD dataset/Mirror-Table1/Female-Normal/\"\r\nvideos_path2=\"E:/data/YawDD/YawDD dataset/Mirror-Table1/Female-Yawing/\"\r\nvideos_path3=\"E:/data/YawDD/YawDD dataset/Mirror-Table1/Talking- Female/\"\r\nvideos_path4=\"E:/data/YawDD/YawDD dataset/Mirror-Table1/Male-Normal/\"\r\nvideos_path5=\"E:/data/YawDD/YawDD dataset/Mirror-Table1/Male-Yawing/\"\r\nvideos_path6=\"E:/data/YawDD/YawDD dataset/Mirror-Table1/Talking-male/\"\r\n\r\nvideo1=glob.glob(videos_path1+\"*.bmp\")\r\nvideo2=glob.glob(videos_path2+\"*.bmp\")\r\nvideo3=glob.glob(videos_path3+\"*.bmp\")\r\nvideo4=glob.glob(videos_path4+\"*.bmp\")\r\nvideo5=glob.glob(videos_path5+\"*.bmp\")\r\nvideo6=glob.glob(videos_path6+\"*.bmp\")\r\n\r\n\r\nvideos=[video1,video2,video3,video4,video5,video6]\r\n\r\n\r\nlabelname=[]\r\nlabelclass=[]\r\nindex=0\r\ncountimg=0\r\nvideocount=0\r\nfor i in videos:\r\n index+=1\r\n if index%3==0:\r\n countlabel=1\r\n elif index%3==1:\r\n countlabel=2\r\n else:\r\n countlabel=0\r\n for j in i:\r\n videocount+=1\r\n countimg+=1\r\n frame=cv2.imread(j)\r\n if len(labelclass)==1450:\r\n cv2.imshow('',frame)\r\n print(countlabel)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n# frame= cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n labelname.append(frame)\r\n labelclass.append(countlabel)\r\n \r\nlabelname=np.array(labelname)\r\n\r\n\r\n\r\n\r\n\r\n\r\ny_train, y_test,X_train, X_test = train_test_split(labelclass, labelname, test_size=0.3, shuffle=True)\r\n#y_train, y_test,X_train, X_test = train_test_split(labelclass, labelname, test_size=0.4, random_state=42)\r\nX_valid, X_test, y_valid, y_test = train_test_split(X_test, y_test, test_size=0.5, shuffle=True) \r\n \r\n \r\n \r\nX_valid = np.array(X_valid) # converting list to array\r\nX_train=np.array(X_train)\r\nX_test=np.array(X_test)\r\n\r\ndummy_y_train = np_utils.to_categorical(y_train) # one hot encoding Classes\r\ndummy_y_valid = np_utils.to_categorical(y_valid) \r\ndummy_y_test = np_utils.to_categorical(y_test) # one hot encoding Classes\r\n\r\n \r\nimages_train = []\r\nfor i in range(0,X_train.shape[0]):\r\n a = resize(X_train[i], preserve_range=True, output_shape=(64,64,3)).astype(int) # reshaping to 224*224*3\r\n images_train.append(a)\r\nX_train = np.array(images_train)\r\n\r\nimages_valid = []\r\nfor i in range(0,X_valid.shape[0]):\r\n a = resize(X_valid[i], preserve_range=True, output_shape=(64,64,3)).astype(int) # reshaping to 224*224*3\r\n images_valid.append(a)\r\nX_valid = np.array(images_valid) \r\n\r\nimages_test = []\r\nfor i in range(0,X_test.shape[0]):\r\n a = resize(X_test[i], preserve_range=True, output_shape=(64,64,3)).astype(int) # reshaping to 224*224*3\r\n images_test.append(a)\r\nX_test= np.array(images_test) \r\n\r\n\r\nX_train = preprocess_input(X_train, mode='tf') # preprocessing the input data\r\nX_valid = preprocess_input(X_valid, mode='tf')\r\nX_test = preprocess_input(X_test, mode='tf') # preprocessing the input data\r\n\r\n\r\n\r\nmodel = Sequential()\r\nmodel.add(Convolution2D(32, (3, 3), padding='same',\r\n input_shape=(64,64,3)))\r\nmodel.add(Activation('relu'))\r\nmodel.add(Convolution2D(64, (3, 3), data_format='channels_first'),)\r\nmodel.add(Activation('relu'))\r\n#model.add(Convolution2D(128, (3, 3), data_format='channels_first'),)\r\n#model.add(Activation('relu'))\r\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\r\nmodel.add(Dropout(0.25))\r\n\r\nmodel.add(Flatten())\r\nmodel.add(Dense(512))\r\nmodel.add(Activation('relu'))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(3))\r\nmodel.add(Activation('softmax'))\r\nmodel.summary()\r\n\r\nsgd = SGD(lr=0.005,decay=1e-6, momentum=0.9, nesterov=True)\r\nmodel.compile(loss='categorical_crossentropy', optimizer=sgd , metrics=['accuracy'])\r\n#model.compile(loss='mean_squared_error', optimizer=sgd , metrics=['accuracy'])\r\n\r\n\r\n\r\nhistory=model.fit(X_train, dummy_y_train, batch_size=32,epochs=30, validation_data=(X_valid, dummy_y_valid))\r\n\r\nscoretest = model.evaluate(X_test, dummy_y_test, verbose=1)\r\nprint('Test score:', scoretest[0])\r\nprint('Test accuracy:', scoretest[1])\r\nscorevalid = model.evaluate(X_valid, dummy_y_valid, verbose=1)\r\nprint('Valid score:', scorevalid[0])\r\nprint('Valid accuracy:', scorevalid[1])\r\n\r\n\r\ntraining_loss = history.history['loss']\r\ntest_loss = history.history['val_loss']\r\n\r\n# Create count of the number of epochs\r\nepoch_count = range(1, len(training_loss) + 1)\r\n\r\n# Visualize loss history\r\nplt.plot(epoch_count, training_loss, 'r--')\r\nplt.plot(epoch_count, test_loss, 'b-')\r\nplt.legend(['Training Loss', 'Test Loss'])\r\nplt.xlabel('Epoch')\r\nplt.ylabel('Loss')\r\nplt.show();\r\n\r\nplt.figure()\r\nplt.plot(history.history['acc'])\r\nplt.plot(history.history['val_acc'])\r\nplt.title('model accuracy')\r\nplt.ylabel('accuracy')\r\nplt.xlabel('epoch')\r\nplt.legend(['train', 'test'], loc='upper left')\r\nplt.show()\r\n\r\n\r\n\r\nX_train =[] # creating an empty array\r\nX_valid=[]\r\ny_train=[]\r\ny_valid=[]\r\nX_test=[]\r\ny_test=[]\r\nindex=0\r\nvideocount=0\r\nfor i in videos:\r\n index+=1\r\n countframe=0\r\n if index%3==0:\r\n countlabel=1\r\n elif index%3==1:\r\n countlabel=2\r\n else:\r\n countlabel=0\r\n for j in i:\r\n videocount+=1\r\n countframe+=1\r\n frame=cv2.imread(j)\r\n# frame= cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n if 1880.5:\r\n probs.append(1)\r\n elif probs1[p,1]>0.5:\r\n probs.append(2)\r\n else:\r\n probs.append(0)\r\n \r\n \r\n\r\n\r\nprobstest=[] \r\nfor p in range (0,884):\r\n if probs2[p,0]>0.5:\r\n probstest.append(1)\r\n elif probs2[p,1]>0.5:\r\n probstest.append(2)\r\n else:\r\n probstest.append(0)\r\n \r\n \r\n\r\n\r\n\r\ndummy_y_valid_array=[] \r\nfor p in range (0,1007):\r\n if dummy_y_valid[p,0]==1:\r\n dummy_y_valid_array.append(1)\r\n elif dummy_y_valid[p,1]==1:\r\n dummy_y_valid_array.append(2)\r\n else:\r\n dummy_y_valid_array.append(0)\r\n \r\n \r\ndummy_y_test_array=[] \r\nfor p in range (0,884):\r\n if dummy_y_test[p,0]==1:\r\n dummy_y_test_array.append(1)\r\n elif dummy_y_test[p,1]==1:\r\n dummy_y_test_array.append(2)\r\n else:\r\n dummy_y_test_array.append(0)\r\n\r\n\r\nfrom sklearn.metrics import confusion_matrix\r\nconfusion_matrix(dummy_y_valid_array, probs)\r\nconfusion_matrix(dummy_y_test_array, probstest)\r\n","repo_name":"maryamhashemi1995/Multimodal-Fatigue-Monitoring-System","sub_path":"RNN-lmst-3classification.py","file_name":"RNN-lmst-3classification.py","file_ext":"py","file_size_in_byte":13420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"32034197896","text":"\"\"\"\nGiven an integer n, return how many distinct phone numbers \nof length n we can dial.\n\nYou are allowed to place the knight on any numeric cell \ninitially and then you should perform n - 1 jumps to dial a \nnumber of length n. All jumps should be valid knight jumps.\n\nAs the answer may be very large, return the answer modulo 10^9 + 7.\n\"\"\"\nclass Solution(object):\n def knightDialer(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n # possible moves at each space\n adj = {0: [4, 6], 1: [8, 6], 2: [7, 9], 3: [4, 8],\n 4: [3, 9, 0], 5: [], 6: [0, 7, 1], 7: [2, 6],\n 8: [1, 3], 9: [4, 2]}\n memo = {}\n \n # subproblems: how many digits can we store starting at\n # each number (prevent recalculations)\n result = 0\n \n # start at each digit\n for i in range(10):\n result += self.dfs(i, n, adj, memo)\n \n return result % (10**9+7)\n \n def dfs(self, start, n, adj, memo):\n if n == 1:\n return 1\n key = (n, start)\n \n if key in memo:\n return memo[key]\n \n memo[key] = 0\n for nxt in adj[start]:\n memo[key] += self.dfs(nxt, n-1, adj, memo)\n \n return memo[key]","repo_name":"edchau/Algorithms","sub_path":"Problems/DP/935_knight_dialer.py","file_name":"935_knight_dialer.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"24724620550","text":"#\n# Min heap implementation\n#\n#\n\n\n\nimport random \n\n\n\n#\n# Min heap classe and methods\n#\nclass MinHeap:\n def __init__(self):\n self.heap = []\n\n def parent_index(self, i: int) -> int:\n return (i // 2)\n\n def left_child_index(self, i: int) -> int:\n return (2 * i) + 1\n \n def right_child_index(self, i: int) -> int:\n return (2 * i) + 2\n \n def size(self) -> int:\n return len(self.heap)\n\n def heapify_up(self) -> None:\n index = self.size() - 1\n while(self.parent_index(index) >= 0 and self.heap[self.parent_index(index)] > self.heap[index]):\n self.heap[self.parent_index(index)], self.heap[index] = self.heap[index], self.heap[self.parent_index(index)]\n index = self.parent_index(index)\n\n\n def insert(self, value: int) -> None:\n \"\"\"Inserts value into last index, bubbles up\"\"\"\n if (self.size() == 0):\n self.heap.append(value)\n else:\n self.heap.append(value)\n self.heapify_up()\n\n\n\n def extract_min(self) -> int:\n \"\"\"Extracts min from heap(Root) then bubbles up\"\"\"\n if(self.size() == 0):\n print(\"Attempted to extract min from an empty heap\")\n exit(1)\n elif (self.size() == 1):\n extracted = self.heap[0]\n self.heap.pop() #removing last element, no need to bubble up\n return extracted\n else:\n extracted = self.heap[0]\n self.heap[0], self.heap[self.size() - 1] = self.heap[self.size() - 1], self.heap[0]\n self.heap.pop() #remove the old min from heap\n self.heapify_down(0) #call heapify down on the root\n return extracted\n\n\n\n def heapify_down(self, index: int) -> None:\n \"\"\"Makes swaps from root down to leaves if needed to keep heap rules\"\"\"\n lChild = self.left_child_index(index)\n rChild = self.right_child_index(index)\n swap_index = index\n\n if(rChild < self.size() and self.heap[rChild] < self.heap[index]): swap_index = rChild\n if(lChild < self.size() and self.heap[lChild] < self.heap[index]): swap_index = lChild\n \n #swapping with the min node\n if(lChild < self.size() and rChild < self.size()):\n if self.heap[rChild] < self.heap[lChild]: swap_index = rChild\n else: swap_index = lChild\n \n if(self.heap[swap_index] < self.heap[index]):\n # print(\"left: \" + str(self.heap[lChild]) + \" right: \" + str(self.heap[rChild]) + \"swapping \" + str(self.heap[index]) + \" with \" + str(self.heap[swap_index]))\n self.heap[index], self.heap[swap_index] = self.heap[swap_index], self.heap[index]\n self.heapify_down(swap_index)\n\n\n def peek(self) -> int:\n \"\"\"Returns heap root. index 0 without deletion\"\"\"\n if(len(self.heap) > 0):\n return self.heap[0]\n else:\n print(\"attempted to peek an empty heap\")\n exit(1)\n\n\n\n\n\n#\n# Driver function\n#\nif __name__ == '__main__':\n heap = MinHeap()\n mins = [] #stores extracted mins\n\n #pushing random numbers into the heap\n for i in range(15):\n heap.insert(random.randint(0, 30))\n\n #displaying heap in array form\n print(\"Heap: \", end='')\n print(heap.heap)\n\n #extracting all of the elements into array\n while(heap.size() > 0):\n mins.append(heap.extract_min())\n print(heap.heap)\n\n #displaying all extracted mins\n print(\"List of mins extracted: \", end='')\n print(mins)\n\n exit(0)","repo_name":"alexshelto/sorts-and-ds","sub_path":"Data-Structures/min-heap/python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"12429945959","text":"from ast import Bytes\nimport torch\nimport tornado\nimport tornado.web\nfrom PIL import Image\nfrom io import BytesIO\nimport base64\nfrom predict import predict\n\n\nmodel = torch.hub.load('ultralytics/yolov5', 'yolov5s')\n\n\nclass MainHandler(tornado.web.RequestHandler):\n def set_default_headers(self):\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')\n async def post(self): \n try:\n files = self.request.files\n file = files['file'][0]['body']\n img = Image.open(BytesIO(file))\n predictions = predict(model, img)\n \n except:\n predictions = []\n \n self.write({\"result\":predictions})\n\n\ndef make_app():\n return tornado.web.Application([\n (r\"/detectImage\" , MainHandler)\n ])\n\n\nif __name__ =='__main__':\n app=make_app()\n app.listen(5000)\n tornado.ioloop.IOLoop.current().start()","repo_name":"abfshaal/ObjectDetector","sub_path":"server/server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"7700105445","text":"import subprocess\nfrom os.path import join\nfrom os import remove, devnull\nfrom Bio import SeqIO\nimport gzip\nimport math\nfrom multiprocessing import Pool\nfrom itertools import cycle\n\n\ndef run_prodigal(prodigal_path, infile, outdir, meta):\n bashCommand = '{prodigal} -c -f gff -o {gff} -a {faa} -d {ffn} -i {input}'.format(\n prodigal=prodigal_path, gff=join(outdir, 'prodigal.gff'), faa=join(outdir, 'prodigal.faa'),\n ffn=join(outdir, 'prodigal.ffn'), input=infile\n )\n if meta == True:\n bashCommand += ' -p meta'\n print('prodigal command:', bashCommand)\n process = subprocess.Popen(bashCommand.split(), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n output, error = process.communicate()\n\ndef run_prodigal_simple(prodigal_path, infile, outprefix):\n FNULL = open(devnull, 'w')\n bashCommand = '{prodigal} -p meta -c -f gff -o {gff} -a {faa} -d {ffn} -i {input}'.format(\n prodigal=prodigal_path, gff=outprefix + '.gff', faa=outprefix + '.faa',\n ffn=outprefix + '.ffn', input=infile\n )\n\n print('prodigal command:', bashCommand)\n process = subprocess.Popen(bashCommand.split(), stdout=FNULL, stderr=FNULL)\n output, error = process.communicate()\n\ndef run_prodigal_multithread(prodigal_path, infile, outdir, threads):\n\n print(\"Counting records in FASTA file...\")\n record_count = count_records_in_fasta(infile)\n print(\"The FASTA file contains %d records...\" % record_count)\n\n print(\"Writing FASTA file to batches for multithreading...\" )\n record_count_per_file = math.ceil(record_count / threads)\n filecount = 0\n outrecs = []\n outfiles = []\n\n if infile.endswith('.gz'):\n infile = gzip.open(infile, \"rt\")\n\n for record in SeqIO.parse(infile, 'fasta'):\n outrecs.append(record)\n if len(outrecs) == record_count_per_file:\n filecount += 1\n outfile = join(outdir, 'input%d.fna' % filecount)\n SeqIO.write(outrecs, outfile, 'fasta')\n outfiles.append(outfile)\n outrecs = []\n\n if len(outrecs) > 0:\n filecount += 1\n outfile = join(outdir, 'input%d.fna' % filecount)\n SeqIO.write(outrecs, outfile, 'fasta')\n outfiles.append(outfile)\n del outrecs\n\n prodigal_files = [join(outdir, 'prodigal%d' % (i+1)) for i in range(len(outfiles))]\n with Pool(processes=threads) as pool:\n pool.starmap(run_prodigal_simple, zip(cycle([prodigal_path]), outfiles, prodigal_files))\n\n combine_files(outfiles, join(outdir, 'input.fna'))\n combine_files([f+'.faa' for f in prodigal_files], join(outdir, 'prodigal.faa'))\n combine_files([f+'.ffn' for f in prodigal_files], join(outdir, 'prodigal.ffn'))\n combine_files([f+'.gff' for f in prodigal_files], join(outdir, 'prodigal.gff'), ['#'])\n\n\ndef combine_files(files, outfile, exclude_startswith=[]):\n with open(outfile, 'w') as out:\n\n for f in files:\n\n with open(f) as infile:\n for line in infile:\n\n skip = False\n for exclude in exclude_startswith:\n if line.startswith(exclude):\n skip = True\n break\n if skip is True:\n continue\n out.write(line)\n\n for f in files:\n remove(f)\n\ndef count_records_in_fasta(fasta):\n records = 0\n if fasta.endswith('.gz'):\n with gzip.open(fasta, \"rt\") as infile:\n for line in infile:\n if line.startswith('>'):\n records += 1\n else:\n with open(fasta) as infile:\n for line in infile:\n if line.startswith('>'):\n records += 1\n return records","repo_name":"bhattlab/GenomeSearch","sub_path":"genomesearch/prodigal.py","file_name":"prodigal.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"76"} +{"seq_id":"9775709441","text":"import logging\nimport json\nfrom flask import Response\nfrom jsonschema import validate\nfrom jsonschema.exceptions import ValidationError, SchemaError\n\nfrom src.event_apis.utils import get_default_response_headers\n\n\ndef get_api_result_dict(message=\"\"):\n result_dict = {\n \"result\": {\"message\": message},\n }\n return result_dict\n\n\ndef save_error_and_return_result(error, code, result_dict):\n result_dict[\"error\"] = {\"message\": error, \"code\": code}\n response = Response(json.dumps(result_dict, default=str))\n response.headers = get_default_response_headers()\n return response\n\n\ndef validate_json_schema(data, jsonschema):\n error = \"\"\n try:\n validate(instance=data, schema=jsonschema)\n except (ValidationError, SchemaError) as exc:\n logging.exception(str(exc))\n error = str(exc)\n return error\n","repo_name":"raghav1010/TrackXpress","sub_path":"src/common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"10520637624","text":"import pygame\nimport ground\nimport rock\nimport metal\nfrom array import *\nimport random\nfrom pygame.locals import *\n\n\nclass Board(object):\n def __init__(self, width, height, window):\n self.width = width\n self.height = height\n # Create the surface and pass in a tuple with its length and width\n self.squares = []\n self.breakableWallPosition = [[0 for i in range(11)] for j in range(11)]\n\n def createBoard(self, window):\n walls = []\n x = y = 0\n for i in range(11):\n for j in range(11):\n ground.Ground(x, y, window)\n x += 40\n\n y += 40\n x = 0\n\n x = y = 0\n for i in range(11):\n for j in range(11):\n if i % 2 != 0 and j % 2 != 0:\n metal.Metal(x, y, window)\n walls.append(pygame.Rect(x, y, 40, 40))\n x += 40\n\n y += 40\n x = 0\n x = y = 0\n for i in range(11):\n for j in range(11):\n if (i, j) == (0, 0) or (i, j) == (0, 1) or (i, j) == (1, 0):\n x += 40\n continue\n chanceRock = random.randint(1, 2)\n if not (i % 2 != 0 and j % 2 != 0):\n if chanceRock == 1:\n walls.append(pygame.Rect(x, y, 40, 40))\n self.breakableWallPosition[j][i] = 1\n x += 40\n y += 40\n x = 0\n return walls\n\n def blitBoard(self, window):\n x = y = 0\n for i in range(11):\n for j in range(11):\n ground.Ground(x, y, window)\n x += 40\n\n y += 40\n x = 0\n\n x = y = 0\n for i in range(11):\n for j in range(11):\n if i % 2 != 0 and j % 2 != 0:\n metal.Metal(x, y, window)\n x += 40\n\n y += 40\n x = 0\n\n def blitBreakableWalls(self, window):\n x = y = 0\n for i in range(11):\n for j in range(11):\n if self.breakableWallPosition[j][i] == 1:\n rock.Rock(x, y, window)\n x += 40\n y += 40\n x = 0\n\n def createSquares(self, window):\n x = 0\n y = 0\n for i in range(0, 10):\n newSquareArray = []\n for j in range(0, 10):\n generatedGround = ground.Ground(x, y, window)\n newSquareArray.append(generatedGround)\n x += 40\n self.squares.append(newSquareArray)\n y += 40\n x = 0\n\n def createHurdles(self, window):\n for rowGround in self.squares:\n for colGround in rowGround:\n chanceRock = random.randint(1, 4)\n # a ajouter : function qui permet de checker qu'un obsctacle ne soit pas dans un coin\n if (chanceRock == 1):\n colGround = rock.Rock(colGround.x, colGround.y, window)\n","repo_name":"AdamaTraore75020/PYBomber","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"797315273","text":"import requests \nfrom dotenv import load_dotenv\nimport os\nfrom os.path import join, dirname\n\nfrom twilio.rest import Client\n\nload_dotenv()\n\nTWILLIO_SECRET = os.getenv('TWILLIO_SECRET')\nTWILLIO_SID = os.getenv('TWILLIO_SID')\nTWILLIO_NUMBER = os.getenv('TWILLIO_NUMBER')\nGPT3_TOKEN = os.getenv('GPT3_TOKEN')\nclient = Client(TWILLIO_SID, TWILLIO_SECRET)\n\ndef send_message(to_number, message):\n message = client.messages.create(\n body=message,\n from_=TWILLIO_NUMBER,\n to=to_number\n )\n if message.status == \"sent\":\n return \n\n\n\ndef ChatGPT(prompt):\n headers={\n \"Content-Type\": \"application/json\",\n \"Authorization\": f\"Bearer {GPT3_TOKEN}\"\n }\n\n body = {\n \"model\": \"text-davinci-003\",\n \"prompt\": f\"casual chat:\\n{prompt}\",\n \"max_tokens\": 512,\n \"temperature\": 0.85,\n }\n response = requests.post('https://api.openai.com/v1/completions', headers=headers, json=body)\n message = response.json()['choices'][0]['text']\n print(f\"New Prompt!\\nResponse:{str(message)}\")\n return str(message) \n\n \nfrom flask import Flask, request, redirect, render_template\nfrom twilio.twiml.messaging_response import MessagingResponse\n\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/sms\", methods=[\"POST\"])\ndef sms_reply():\n resp = MessagingResponse()\n data = request.form\n body = data.to_dict()\n print(body[\"Body\"])\n prompt = body[\"Body\"]\n reply = ChatGPT(prompt)\n resp.message(reply)\n return str(resp)\n\n@app.route('/sms/new', methods=[\"POST\"])\ndef new_chat():\n phone_num = request.form.get('phone_num')\n message = request.form.get('message')\n reply = ChatGPT(message)\n send_message(phone_num, reply)\n print(phone_num)\n \n # call twilio stuff with phone num\n return f'Text sent to {phone_num}'\n\n\n\n\n\n\n## twilio needs server to send webhook replys to, \n## ","repo_name":"samsullivandelgobbo/chatbot-sms","sub_path":"ChatBot.py","file_name":"ChatBot.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"22645323113","text":"import datetime as dt\nfrom pprint import pprint\n\nimport pytest\n\nimport anwesende.room.models as arm\nimport anwesende.room.reports as arr\nimport anwesende.room.tests.makedata as artmd\n\n@pytest.mark.django_db\ndef test_visits_by_department_report():\n descr = dict(org1=\n dict(dep1=\n dict(room1=(1,1),\n room2=(2,2)),\n dep2=\n dict(room3=(3,4))),\n org2=\n dict(dep3=\n dict(room4=(4,9),\n room5=(5,16))))\n artmd.make_organizations(descr)\n assert arm.Room.objects.count() == 5\n assert arm.Seat.objects.count() == 15\n assert arm.Visit.objects.count() == 32\n result = list(arr.visits_by_department_report())\n pprint(result)\n should = [\n {'organization': 'org1', 'department': 'dep1',\n 'rooms': 2, 'seats': 3, 'visits': 3},\n {'organization': 'org1', 'department': 'dep2',\n 'rooms': 1, 'seats': 3, 'visits': 4},\n {'organization': 'org2', 'department': 'dep3',\n 'rooms': 2, 'seats': 9, 'visits': 25} ]\n assert result == should\n\n\n@pytest.mark.django_db\ndef test_visitors_by_week_report(freezer):\n #----- first week: create rooms and some visits:\n freezer.move_to(\"2021-12-03T02:03\")\n seat_r1, = artmd.make_seats(\"room1\", 1, \"org1\", \"dep1\")\n seat_r2, = artmd.make_seats(\"room2\", 1, \"org1\", \"dep1\")\n seat_r2b, = artmd.make_seats(\"room2\", 1, \"org2\", \"dep2\")\n artmd.make_visit(seat_r1, \"p1\")\n artmd.make_visit(seat_r2, \"p1\")\n artmd.make_visit(seat_r2, \"p2\")\n artmd.make_visit(seat_r2b, \"p1\")\n artmd.make_visit(seat_r2b, \"p3\")\n artmd.make_visit(seat_r2b, \"p4\")\n artmd.make_visit(seat_r2b, \"p5\")\n\n #----- second week: create more visits:\n freezer.move_to(\"2021-12-10T02:10\")\n artmd.make_visit(seat_r2, \"p1\")\n artmd.make_visit(seat_r2, \"p1\") # double registration\n artmd.make_visit(seat_r2, \"p2\")\n\n #----- that evening, look at report:\n freezer.move_to(\"2021-12-10T18:00\")\n #-- first week:\n wr = arr.visitors_by_week_report(\"%\")\n assert wr[0].organizationsN == 2\n assert wr[0].departmentsN == 2\n assert wr[0].buildingsN == 2 # all same name\n assert wr[0].roomsN == 3 # only 2 different names\n assert wr[0].visitorsN == 5\n assert wr[0].visitsN == 7\n assert wr[0].visits_per_visitor == 7/5\n\n #-- second week:\n assert wr[0].organizationsN == 2\n assert wr[0].departmentsN == 2\n assert wr[0].buildingsN == 2\n assert wr[0].roomsN == 3 # \n assert wr[0].visitorsN == 5\n assert wr[0].visitsN == 7\n assert wr[0].visits_per_visitor == 7/5\n\n #-- first week, narrowed search:\n wr = arr.visitors_by_week_report(\"%dep1%\")\n assert wr[0].organizationsN == 1\n assert wr[0].departmentsN == 1\n assert wr[0].buildingsN == 1\n assert wr[0].roomsN == 2\n assert wr[0].visitorsN == 2\n assert wr[0].visitsN == 3\n assert wr[0].visits_per_visitor == 3/2\n\n\n","repo_name":"prechelt/anwesende","sub_path":"anwesende/room/tests/test_reports.py","file_name":"test_reports.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"76"} +{"seq_id":"11813147791","text":"def BOMBFUTA(L):\r\n s = 0\r\n for ch in range(len(L)):\r\n if ch % 2 != 0:\r\n s += L[ch]\r\n if s % 5 == 0:\r\n print(\"MAI CHALI JUPITER PE\")\r\n else:\r\n print(\"EARTH PE HI JINDA RHO TUM >_<\")\r\n\r\nL = list(eval(input(\"Enter a List of NUmbers : \")))\r\nBOMBFUTA(L)\r\n","repo_name":"prachibarnwal/python-codes","sub_path":"308.) FUNCTION to print sum of num in a list at odd pos.py","file_name":"308.) FUNCTION to print sum of num in a list at odd pos.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72717373044","text":"def deduplicate(List):\r\n s = set(List)\r\n l = list(s)\r\n return l\r\n\r\ndef deduplicate_1(List):\r\n s = set()\r\n for element in List:\r\n s.add(element)\r\n l = list(s)\r\n return l\r\n\r\ndef deduplicate_2(List):\r\n l = []\r\n for element in List:\r\n if element not in l:\r\n l.append(element)\r\n return l\r\n\r\nvalues = [1, 3, 5, 7, 9, 7, 5, 3, 1]\r\nprint(deduplicate(values))\r\nprint(deduplicate_1(values))\r\nprint(deduplicate_2(values))\r\n","repo_name":"Jays88/practicepython.org_exercises","sub_path":"Exercise_14.py","file_name":"Exercise_14.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"30100429458","text":"#!/usr/bin/env python3\n\"\"\"\nNome do Script: add-target-servers.py\nAutor: Pedro Gomes \nData de Criação: out/2023\nDescrição: Cadastra novos target servers no apigee\n\"\"\"\n\nimport argparse\nimport os\nfrom google.oauth2 import service_account\nfrom googleapiclient.discovery import build\nimport json\n\ndef create_target_server(apigee_org, apigee_env, target_server_name, target_server_host, target_server_port, service_account_info):\n\n if not service_account_info:\n raise ValueError(\"As informações da conta de serviço não foram fornecidas nas variáveis de ambiente.\")\n\n credentials_info = json.loads(service_account_info)\n\n credentials = service_account.Credentials.from_service_account_info(\n credentials_info,\n scopes=['https://www.googleapis.com/auth/cloud-platform']\n )\n\n apigee_service = build('apigee', 'v1', credentials=credentials)\n\n target_server_data = {\n \"name\": target_server_name,\n \"host\": target_server_host,\n \"port\": target_server_port,\n \"isEnabled\": True,\n \"sSLInfo\": { \n \"enabled\": True,\n },\n \"protocol\": \"HTTP\"\n\n }\n\n response = apigee_service.organizations().environments().targetservers().create(\n parent=f'organizations/{apigee_org}/environments/{apigee_env}',\n body=target_server_data\n ).execute()\n\n print(f'Success: Target Server \"{response[\"name\"]}\" created.')\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Cria um Target Server no Apigee.\")\n parser.add_argument(\"--service-account-env\", required=True, help=\"Variável de ambiente com as informações do service Account.\")\n parser.add_argument(\"--org\", required=True, help=\"Nome da organização Apigee.\")\n parser.add_argument(\"--env\", required=True, help=\"Nome do ambiente Apigee. Ex: prd, dev, qa, hml\")\n parser.add_argument(\"--name\", required=True, help=\"Nome do Target Server.\")\n parser.add_argument(\"--host\", required=True, help=\"Host do Target Server.\")\n parser.add_argument(\"--port\", required=True, help=\"Port do Target Server\")\n\n args = parser.parse_args()\n\n apigee_org = args.org\n apigee_env = args.env\n target_server_name = args.name\n target_server_host = args.host\n target_server_port = args.port\n service_account_info = os.environ.get(args.service_account_env)\n\n\n\n create_target_server(apigee_org, apigee_env, target_server_name, target_server_host, target_server_port, service_account_info)\n","repo_name":"pehgomess/apigee-adminstrative-api","sub_path":"files/add-target-servers.py","file_name":"add-target-servers.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"74445109045","text":"#\n# Utility functions for CAB420, Assignment 1B, Q1\n# Author: Simon Denman (s.denman@qut.edu.au)\n#\n\nfrom scipy.io import loadmat # to load mat files\nimport matplotlib.pyplot as plt # for plotting\nimport numpy as np # for reshaping, array manipulation\nimport cv2 # for colour conversion\nimport tensorflow as tf # for bulk image resize\n\n# Load data for Q1\n# train_path: path to training data mat file\n# test_path: path to testing data mat file\n#\n# returns: arrays for training and testing X and Y data\n#\ndef load_data(train_path, test_path):\n\n # load files\n train = loadmat(train_path)\n test = loadmat(test_path)\n\n # transpose, such that dimensions are (sample, width, height, channels), and divide by 255.0\n train_X = np.transpose(train['train_X'], (3, 0, 1, 2)) / 255.0\n train_Y = train['train_Y']\n # change labels '10' to '0' for compatability with keras/tf. The label '10' denotes the digit '0'\n train_Y[train_Y == 10] = 0\n train_Y = np.reshape(train_Y, -1)\n\n # transpose, such that dimensions are (sample, width, height, channels), and divide by 255.0\n test_X = np.transpose(test['test_X'], (3, 0, 1, 2)) / 255.0\n test_Y = test['test_Y']\n # change labels '10' to '0' for compatability with keras/tf. The label '10' denotes the digit '0'\n test_Y[test_Y == 10] = 0\n test_Y = np.reshape(test_Y, -1)\n\n # return loaded data\n return train_X, train_Y, test_X, test_Y\n\n# vectorise an array of images, such that the shape is changed from {samples, width, height, channels} to\n# (samples, width * height * channels)\n# images: array of images to vectorise\n#\n# returns: vectorised array of images\n#\ndef vectorise(images):\n # use numpy's reshape to vectorise the data\n return np.reshape(images, [len(images), -1])\n\n# Plot some images and their labels. Will plot the first 100 samples in a 10x10 grid\n# x: array of images, of shape (samples, width, height, channels)\n# y: labels of the images\n#\ndef plot_images(x, y):\n fig = plt.figure(figsize=[15, 18])\n for i in range(100):\n ax = fig.add_subplot(10, 10, i + 1)\n ax.imshow(x[i,:])\n ax.set_title(y[i])\n ax.axis('off')\n\n# Resize an array of images\n# images: array of images, of shape (samples, width, height, channels)\n# new_size: tuple of the new size, (new_width, new_height)\n#\n# returns: resized array of images, (samples, new_width, new_height, channels)\n#\ndef resize(images, new_size):\n # tensorflow has an image resize funtion that can do this in bulk\n # note the conversion back to numpy after the resize\n return tf.image.resize(images, new_size).numpy()\n \n# Convert images to grayscale\n# images: array of colour images to convert, of size (samples, width, height, 3)\n#\n# returns: array of converted images, of size (samples, width, height, 1)\n#\ndef convert_to_grayscale(images):\n # storage for converted images\n gray = []\n # loop through images\n for i in range(len(images)):\n # convert each image using openCV\n gray.append(cv2.cvtColor(images[i,:], cv2.COLOR_BGR2GRAY))\n # pack converted list as an array and return\n return np.expand_dims(np.array(gray), axis = -1)","repo_name":"xiaohai-huang/cab420-workspace","sub_path":"work/machine-learning/a1b/cab420_a1b_q1_utils.py","file_name":"cab420_a1b_q1_utils.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1149581531","text":"import random\nimport turtle\n\n##### Turtle \nturtle.title('Tic Tac Toe')\nt=turtle.Turtle()\n\n\ndef draw_board():\n t.pensize(3)\n t.fd(300)\n t.bk(200)\n t.lt(90)\n t.fd(100)\n t.bk(300)\n t.fd(100)\n t.lt(90)\n t.fd(100)\n t.bk(300)\n t.fd(100)\n t.rt(90)\n t.fd(200)\n t.bk(300)\ndef draw_x(x,y):\n t.pencolor('red')\n t.penup()\n t.goto(x,y)\n t.pd()\n t.lt(0)\n t.rt(45)\n t.fd(100)\n t.bk(50)\n t.lt(90)\n t.fd(50)\n t.bk(100)\n t.rt(45)\ndef draw_circle(x,y):\n t.pencolor('blue')\n t.penup()\n t.goto(x,y)\n t.pd()\n t.circle(30)\n t.penup()\ndef draw_horizontal_line(x,y):\n t.penup()\n t.goto(x,y)\n t.pd()\n t.lt(0)\n t.rt(90)\n t.fd(300)\n t.bk(300)\n t.rt(180)\ndef draw_ver_line(x,y):\n t.penup()\n t.goto(x,y)\n t.pd()\n t.lt(0)\n t.rt(180)\n t.fd(300)\n t.lt(0)\ndef draw_left_line(x,y):\n t.penup()\n t.goto(x,y)\n t.pd()\n t.lt(0)\n t.rt(135)\n t.fd(400)\ndef draw_right_line(x,y):\n t.penup()\n t.goto(x,y)\n t.pd()\n t.lt(0)\n t.lt(135)\n t.fd(400)\ndef draw_marker(player,player_pos):\n if player_pos==1 and player=='o':\n draw_circle(80,50)\n elif player_pos==2 and player=='o':\n draw_circle(180,50)\n elif player_pos==3 and player=='o':\n draw_circle(280,50)\n elif player_pos==4 and player=='o':\n draw_circle(80,-40)\n elif player_pos==5 and player=='o':\n draw_circle(180,-40)\n elif player_pos==6 and player=='o':\n draw_circle(280,-40)\n elif player_pos==7 and player=='o':\n draw_circle(80,-140)\n elif player_pos==8 and player=='o':\n draw_circle(180,-140)\n elif player_pos==9 and player=='o':\n draw_circle(280,-140)\n\n elif player_pos==1 and player=='x':\n draw_x(10,10)\n elif player_pos==2 and player=='x':\n draw_x(110,10)\n elif player_pos==3 and player=='x':\n draw_x(210,10)\n elif player_pos==4 and player=='x':\n draw_x(10,-90)\n elif player_pos==5 and player=='x':\n draw_x(110,-90)\n elif player_pos==6 and player=='x':\n draw_x(210,-90)\n elif player_pos==7 and player=='x':\n draw_x(10,-190)\n elif player_pos==8 and player=='x':\n draw_x(110,-190)\n else:\n draw_x(210,-190)\n\nboard=[' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\nvs=' '\nplayer1=' '\nplayer2=' '\nplayer1_pos=' '\nplayer2_pos=' '\nmoves_played=0\ndef check_emptyspaces():\n check=0 \n for x in range(1,10):\n if board[x]==' ':\n check=1\n if check==1:\n check=0\n return True\n else:\n check=0\n return False\ndef user_input(player):\n a=0\n if check_emptyspaces()==True:\n while type(a)!=type('int'):\n try:\n a = int(input(\"Тоглогч \"+player+\" 1-ээс 9-ийн хооронд тоо оруулна уу: \"))\n if a>0 and a<= 9:\n return a\n else:\n print(\"Буруу утга оруулсан байна, Дахин тоо оруулна уу! \")\n a=None\n except ValueError:\n print(\"Буруу утга оруулсан байна, Дахин тоо оруулна уу! \")\n else:\n print(\"Тэнцлээ\")\ndef validate_position(position):\n if board[position] == ' ':\n return True\n else:\n return False\ndef check_wins(player):\n if board[1]==player and board[2]==player and board[3]== player:\n draw_horizontal_line(10,50)\n return True\n elif board[4]== player and board[5]== player and board[6]== player:\n draw_horizontal_line(10,-50)\n return True\n elif board[7]== player and board[8]== player and board[9]== player:\n draw_horizontal_line(10,-150)\n return True\n\n elif board[1]== player and board[5]== player and board[9]== player:\n draw_left_line(10,90)\n return True\n elif board[3]== player and board[5]== player and board[7]== player:\n draw_right_line(300,90)\n return True\n\n elif board[1]== player and board[4]== player and board[7]== player:\n draw_ver_line(50,90)\n return True\n elif board[2]== player and board[5]== player and board[8]== player:\n draw_ver_line(150,90)\n return True\n elif board[3]== player and board[6]== player and board[9]== player:\n draw_ver_line(250,90)\n return True\n else:\n return False\ndef update(player, player_pos):\n global moves_played\n if moves_played!=9:\n while validate_position(player_pos)!=True:\n player_pos = user_input(player)\n validate_position(player_pos)\n else:\n if check_emptyspaces()==True:\n\n board[player_pos]=player\n draw_marker(player, player_pos)\n moves_played+=1\ndef generate_random():\n return random.randint(1,9) \n\n\n\n\nprint(\"-------------Tic Tac Toё тоглоомд тавтай морилно уу-------------\")\nprint(\"----------------------------Дүрэм-------------------------------\")\nprint(\"****************************************************************\")\nprint(\"* Сонгосон тэмдэгээ 3 дараалуулан байрлуулах \\n* 1- ээс 9-ийн хооронд утга гараас оруулах \\n* Зүүн дээд булан 1ээс эхлэн Баруун доод булан 9 хүртэл байна\")\nprint(\"*******************************************\")\nprint(\"Доор гарч ирэх хувилбараас сонголт хийнэ үү: \")\nprint(\"----------------------------------------------\")\nprint(\"1. Тоглогч  VS Комьпютер \\n2. Тоглогч  VS Хүн \")\nprint(\"----------------------------------------------\")\n\nwhile vs ==' ' or vs>2:\n try: \n vs=int(input(\"Сонгосон дугаараа оруулна уу:\"))\n if vs >0 and vs<=2:\n while player1 != 'x' or player1 != 'o':\n player1= str(input(\"Тоглогч 1 дараах сонголтуудаас сонгох X эсвэл O :\"))\n if player1 == 'x' or player1=='o':\n if player1=='x':\n player2='o'\n else:\n player2='x'\n print(\"Тоглоом эхэллээ!! \\n\")\n #draw()\n draw_board() \n\n while check_emptyspaces()== True:\n ### Player 1\n player1_pos=user_input(player1)\n update(player1, player1_pos)\n\n if check_wins(player1)== True:\n t.penup()\n t.goto(20,120)\n t.pendown()\n t.write(\"Тоглоом дууслаа.\", font=(\"Arial\",20,\"normal\"))\n print(\"Баяр хүргэе тоглогч\", player1, \"хожлоо !!!\")\n break\n\n ### player 2\n if vs==1:\n player2_pos=generate_random()\n if moves_played!=9:\n while validate_position(player2_pos)!= True:\n player2_pos=generate_random()\n validate_position(player2_pos)\n \n else:\n board[player2_pos]=player2\n draw_marker(player2,player2_pos)\n moves_played+=1\n\n if check_wins(player2)== True:\n t.penup()\n t.goto(20,120)\n t.pendown()\n print(\"Баяр хүргэе тоглогч\", player2,\"хожлоо!!!!\")\n break\n else:\n player2_pos=user_input(player2)\n update(player2,player2_pos)\n ### check for player 2 if wins.\n if check_wins(player2)==True:\n print(\"Баяр хүргэе тоглогч\", player2,\"хожлоо!!!!\")\n t.penup()\n t.goto(20,120)\n t.pendown()\n t.write(\"Тоглоом дууслаа.\",font=(\"Arial\",15,\"normal\"))\n break\n if check_emptyspaces()!=False:\n t.penup()\n t.goto(20,120)\n t.pendown()\n t.write(\"Тоглоом дууслаа.\",font=(\"Arial\",20,\"normal\"))\n #print(\"Тоглоом дууслаа.\")\n\n break\n except ValueError:\n print(\" Оруулсан утга тоо биш байна Дахин оруулна уу! \") \nturtle.done() \n \n","repo_name":"Skytenn/Games","sub_path":"Tic tac toe.py","file_name":"Tic tac toe.py","file_ext":"py","file_size_in_byte":9236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9552148845","text":"import glob\nimport os\nimport shutil\nimport subprocess\nimport tempfile\nfrom copy import deepcopy\nfrom typing import List\n\nimport biom\nimport pandas as pd\nimport skbio\nfrom q2_types.feature_data import DNAFASTAFormat, DNAIterator\nfrom q2_types.per_sample_sequences import CasavaOneEightSingleLanePerSampleDirFmt\n\nfrom .._utils import _process_common_input_params, run_command\n\n\ndef _process_iss_arg(arg_key, arg_val):\n \"\"\"Creates a list with argument and its value.\n\n Argument values represented by a list will be converted to a single\n string joined by spaces, e.g.: [1, 2, 3] -> '1 2 3'.\n Argument names will be converted to command line parameters by\n appending a '--' prefix, e.g.: 'some_parameter' -> '--some_parameter'.\n\n Args:\n arg_key (str): Argument name.\n arg_val: Argument value.\n\n Returns:\n [converted_arg, arg_value, ...]: List containing a prepared command\n line parameter and its value(s).\n \"\"\"\n if isinstance(arg_val, bool) and arg_val:\n return [f\"--{arg_key}\"]\n elif not isinstance(arg_val, list):\n return [f\"--{arg_key}\", str(arg_val)]\n else:\n flags = [f\"--{arg_key}\"]\n flags.extend([str(x) for x in arg_val])\n return flags\n\n\ndef _rename_reads_files(dp):\n reads = sorted(glob.glob(os.path.join(dp, \"*.fastq.gz\")))\n reads_new = [r.replace(\".fastq.gz\", \"_001.fastq.gz\") for r in reads]\n for r, rn in zip(reads, reads_new):\n os.rename(r, rn)\n return reads_new\n\n\ndef _generate_reads(samples, args, result_fp):\n base_cmd = [\"iss\", \"generate\", \"--compress\"]\n base_cmd.extend(args)\n\n for s in samples:\n cmd = deepcopy(base_cmd)\n sample_prefix = os.path.join(result_fp, f\"{s}_00_L001\")\n\n cmd.extend([\"--output\", sample_prefix])\n\n try:\n run_command(cmd, verbose=True)\n except subprocess.CalledProcessError as e:\n raise Exception(\n \"An error was encountered while running InSilicoSeq, \"\n f\"(return code {e.returncode}), please inspect \"\n \"stdout and stderr to learn more.\"\n )\n\n _rename_reads_files(result_fp)\n\n\ndef _abundances_to_biom(abundance_fps):\n abundances = []\n for f in abundance_fps:\n sample = os.path.splitext(os.path.basename(f))[0].split(\"_\")[0]\n abundances.append(\n pd.read_csv(f, sep=\"\\t\", index_col=0, header=None, names=[sample])\n )\n biom_df = pd.concat(abundances, axis=1)\n return biom.Table(\n data=biom_df.values,\n observation_ids=biom_df.index.tolist(),\n sample_ids=biom_df.columns.tolist(),\n )\n\n\ndef _ensure_sample_names_exists(sample_names):\n if not sample_names:\n # If it's empty or None, create a list with a default element \"sample\"\n sample_names = [\"sample\"]\n print(\n 'The \"--p-sample-names\" option was not provided. '\n 'Only one sample will be created with the prefix \"sample\".'\n \"\\n\"\n )\n return sample_names\n\n\n# TODO: allow to input custom genomes and sample from those\ndef generate_reads(\n genomes: DNAFASTAFormat = None,\n sample_names: List[str] = None,\n n_genomes: int = 10,\n ncbi: List[str] = [\"bacteria\"],\n n_genomes_ncbi: List[int] = [10],\n abundance: str = \"lognormal\",\n coverage: str = \"off\",\n n_reads: int = 1000000,\n mode: str = \"kde\",\n model: str = \"HiSeq\",\n gc_bias: bool = False,\n cpus: int = 1,\n debug: bool = False,\n seed: int = 0,\n) -> (CasavaOneEightSingleLanePerSampleDirFmt, DNAFASTAFormat, biom.Table):\n if coverage == \"off\":\n coverage = None\n _locals = locals().copy()\n available_genomes = 0\n if genomes:\n if n_genomes_ncbi or ncbi:\n print(\n 'Template genome sequences were provided - \"n-genomes-ncbi\" '\n 'and \"ncbi\" parameters will be ignored.'\n )\n _locals[\"n_genomes_ncbi\"], _locals[\"ncbi\"] = None, None\n for _ in genomes.view(DNAIterator):\n available_genomes += 1\n elif n_genomes_ncbi and ncbi and (len(n_genomes_ncbi) != len(ncbi)):\n raise Exception(\n \"Genome counts (--n_genomes_ncbi) need to correspond \"\n \"to the kingdoms names (--ncbi). You provided \"\n f\"{len(ncbi)} kingdom(s) but {len(n_genomes_ncbi)} \"\n f\"corresponding genome counts were found. Please \"\n f\"correct your input.\"\n )\n\n if genomes and (n_genomes >= available_genomes):\n print(\n f\"The number of available genomes ({available_genomes}) is \"\n f\"smaller than the requested number of genomes per sample \"\n f\"({n_genomes}). The number of requested genomes will be \"\n f\"reduced to {available_genomes - 1}.\"\n )\n _locals[\"n_genomes\"] = available_genomes - 1\n\n sample_names = _ensure_sample_names_exists(sample_names)\n\n if len(set(sample_names)) < len(sample_names):\n dupl = {str(x) for x in sample_names if sample_names.count(x) > 1}\n raise Exception(\n \"Sample names need to be unique. Found duplicated \"\n f'names: {\", \".join(sorted(dupl))}'\n )\n\n kwargs = {k: v for k, v in _locals.items() if k not in [\"sample_names\"]}\n args = _process_common_input_params(processing_func=_process_iss_arg, params=kwargs)\n\n with tempfile.TemporaryDirectory() as tmp:\n result_reads = CasavaOneEightSingleLanePerSampleDirFmt()\n result_genomes = DNAFASTAFormat()\n\n # simulate reads\n _generate_reads(sample_names, args, tmp)\n\n # move reads into CasavaFmt\n for f in glob.glob(os.path.join(tmp, \"*.fastq.gz\")):\n shutil.move(f, str(result_reads))\n\n # move original genomes into DNAFASTAFmt if found\n # otherwise return empty file\n genome_ids = []\n with result_genomes.open() as fout:\n for f in sorted(glob.glob(os.path.join(tmp, \"*.fasta\"))):\n for seq in skbio.read(f, format=\"fasta\"):\n if seq.metadata[\"id\"] not in genome_ids:\n genome_ids.append(seq.metadata[\"id\"])\n seq.write(fout)\n\n # convert abundances to a biom table\n abund_suffix = \"coverage\" if coverage else \"abundance\"\n abundance_fps = sorted(glob.glob(os.path.join(tmp, f\"*_{abund_suffix}.txt\")))\n result_biom = _abundances_to_biom(abundance_fps)\n\n return result_reads, result_genomes, result_biom\n","repo_name":"bokulich-lab/q2-assembly","sub_path":"q2_assembly/iss/iss.py","file_name":"iss.py","file_ext":"py","file_size_in_byte":6489,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"76"} +{"seq_id":"21072426310","text":"textA = (input('Digite o texto A: '))\nhalfA = len(textA)//2\ntextB = (input('Digite o texto B: '))\nhalfB = len(textB)//2\n\na1 = textA[:halfA]\na2 = textA[halfA:] \n\nb1 = textB[:halfB]\nb2 = textB[halfB:] \n\nprint(f\"Texto A dividido em duas Partes: {a1} e {a2}\")\nprint(f\"Texto B dividido em duas Partes: {b1} e {b2}\")\nprint(f\"{a1} + {b2} = {a1}{b2} \")\nprint(f\"{a2} + {b1} = {a2}{b1} \")\nprint(f\"{textA[0]} + {textB[1]} + {textA[-1]} + {textB[-1]}\")","repo_name":"gabrielfontineli/pc-trinket","sub_path":"python/lista01/questao04.py","file_name":"questao04.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37070488462","text":"import cv2\r\ncapture = cv2.VideoCapture(0)\r\nfourcc = cv2.VideoWriter_fourcc(*'XVID');\r\nprint(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\r\nprint(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\ncapture.set(3, 512)\r\ncapture.set(4, 420)\r\nout = cv2.VideoWriter('output.avi', fourcc, 20.0, (int(capture.get(3)),int(capture.get(4))));\r\nprint(capture.get(3))\r\nprint(capture.get(4))\r\nwhile(capture.isOpened()):\r\n ret, frame = capture.read()\r\n if ret == True:\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n text = 'Width:'+ str(capture.get(3)) + 'Height:' + str(capture.get(4))\r\n frame = cv2.putText(frame, text, (10, 50), font, 1, (0,255,255), 2, cv2.LINE_AA)\r\n cv2.imshow('frame', frame)\r\n out.write(frame)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n else:\r\n break\r\n\r\n\r\ncapture.release()\r\nout.release()\r\ncv2.destroyAllWindows()","repo_name":"uvcan/warning_system_for_drivers","sub_path":"capture.py","file_name":"capture.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"4236797693","text":"\"\"\"\nd/dx e^x = e^x\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef exp_f(x):\n base = 2\n return np.power(base, x)\n\n\ndef numerical_diff(f, x):\n h = 1e-2 # 0.001\n return (f(x + h) - f(x - h)) / (2 * h) # mean-value theorem (to minimize error)\n\n\ndef draw_2(x_values):\n exp_values = exp_f(x_values)\n diff_values = numerical_diff(exp_f, x_values)\n\n plt.plot(x_values, exp_values, color=\"blue\")\n plt.plot(x_values, diff_values, color=\"red\")\n\n\ndef draw_e(x_values):\n # an approximation of Napier's constant\n plt.plot(x_values, np.power(2.718, x_values), color=\"green\")\n plt.plot(x_values, numerical_diff(lambda x: np.power(2.718, x), x_values), color=\"yellow\")\n\n\nif __name__ == '__main__':\n xs = np.arange(-1, 1, 0.1)\n draw_2(xs)\n draw_e(xs)\n\n plt.grid()\n plt.show()\n","repo_name":"b1ueskydragon/PythonGround","sub_path":"math/napier/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"70241761526","text":"import io\n\nimport os\nimport cv2\nfrom PIL import Image\n\nfrom .files import save_binary, hash_file\nfrom ..settings import IMAGES_ROOT, MEDIA_ROOT\n\n\ndef open_image(bytes_arr):\n return Image.open(io.BytesIO(bytes_arr))\n\n\ndef image_to_byte_array(image_path):\n img = Image.open(image_path)\n img_byte_arr = io.BytesIO()\n img.save(img_byte_arr, format='PNG')\n return img_byte_arr.getvalue()\n\n\ndef save_uploaded_photo_as_binary_array(photo_path):\n curr_dir = os.getcwd()\n path = os.path.join(MEDIA_ROOT, photo_path)\n image_byte_array = image_to_byte_array(path)\n filename = hash_file(image_byte_array)\n filename = filename + '.png'\n path_to_save = os.path.join(IMAGES_ROOT, filename)\n save_binary(path_to_save, image_byte_array)\n path_to_remove = os.path.join(curr_dir, path)\n os.remove(path_to_remove)\n return filename\n\n\ndef extract_images(video_path, fps=1):\n count = 0\n video_path = os.path.join(MEDIA_ROOT, video_path)\n vidcap = cv2.VideoCapture(video_path)\n success, image = vidcap.read()\n success = True\n extracted_images = []\n while success:\n success, image = vidcap.read()\n print('Read a new frame: ', success)\n if success:\n photo_name = \"frame%d.jpg\" % count\n cv2.imwrite(MEDIA_ROOT + photo_name, image) # save frame as JPEG file\n count = count + fps\n filename = save_uploaded_photo_as_binary_array(photo_name)\n extracted_images.append(filename)\n os.remove(video_path)\n return extracted_images\n","repo_name":"mkumaszka/roadDamageDetectionApp","sub_path":"backend/road_damages/utils/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"3117351500","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom __future__ import print_function\r\n\r\n# from evaluate import extract_features_from_db\r\n# from DB import Database\r\nfrom src.evaluate import extract_features_from_db\r\nfrom src.DB import Database\r\n\r\n# from color import Color\r\n# from edge import Edge\r\n# from gabor import Gabor\r\n\r\nfrom src.color import Color\r\nfrom src.edge import Edge\r\nfrom src.gabor import Gabor\r\n\r\n\r\nimport numpy as np\r\nimport itertools\r\nimport os\r\n\r\n\r\nd_type = 'd1'\r\ndepth = 30\r\n\r\nfeat_pools = ['color', 'edge', 'gabor']\r\n\r\n# result dir\r\nresult_dir = 'result'\r\nif not os.path.exists(result_dir):\r\n os.makedirs(result_dir)\r\n\r\n\r\nclass FeatureFusion(object):\r\n\r\n def __init__(self, features):\r\n assert len(features) > 1, \"need to fuse more than one feature!\"\r\n self.features = features\r\n self.images = None\r\n\r\n def extract_features(self, db, verbose=False):\r\n if verbose:\r\n print(\"Use features {}\".format(\" & \".join(self.features)))\r\n\r\n if self.images == None:\r\n feats = []\r\n for f_class in self.features:\r\n feats.append(self._get_feat(db, f_class))\r\n # print(\"feats: \", feats)\r\n images = self._concat_feat(db, feats)\r\n # print(\"samples: \", images)\r\n self.images = images # cache the result\r\n return self.images\r\n\r\n # Extract multiple features from query image\r\n def extract_features_from_query_img(self, query_image_filepath):\r\n feats = []\r\n for f_class in self.features:\r\n feats.append(self.extract_query_features(query_image_filepath, f_class))\r\n # print(\"feats: \", feats)\r\n images = self.concat_query_features(feats)\r\n return images\r\n\r\n # Extract one type of feature from query image\r\n def extract_query_features(self, query_image_filepath, f_class):\r\n if f_class == 'color':\r\n f_c = Color()\r\n elif f_class == 'edge':\r\n f_c = Edge()\r\n elif f_class == 'gabor':\r\n f_c = Gabor()\r\n return f_c.extract_features(db=None, query_image_filepath=query_image_filepath, is_query=True, verbose=False)\r\n\r\n # Extract one type of feature from all images in database\r\n def _get_feat(self, db, f_class):\r\n if f_class == 'color':\r\n f_c = Color()\r\n elif f_class == 'edge':\r\n f_c = Edge()\r\n elif f_class == 'gabor':\r\n f_c = Gabor()\r\n return f_c.extract_features(db, verbose=False)\r\n\r\n # Concatenate image features for the query image\r\n def concat_query_features(self, feats):\r\n first_item = feats[0]\r\n for i in feats[1:]:\r\n first_item['hist'] = np.append(first_item['hist'], i[\"hist\"])\r\n return first_item\r\n\r\n # Concatenate image features for each image in the database\r\n def _concat_feat(self, db, feats):\r\n images = feats[0]\r\n delete_idx = []\r\n for idx in range(len(images)):\r\n for feat in feats[1:]:\r\n feat = self._to_dict(feat)\r\n key = images[idx]['img']\r\n if key not in feat:\r\n delete_idx.append(idx)\r\n continue\r\n assert feat[key]['cls'] == images[idx]['cls']\r\n images[idx]['hist'] = np.append(images[idx]['hist'], feat[key]['hist'])\r\n for d_idx in sorted(set(delete_idx), reverse=True):\r\n del images[d_idx]\r\n if delete_idx != []:\r\n print(\"Ignore %d samples\" % len(set(delete_idx)))\r\n\r\n return images\r\n\r\n # Convert image information to dictionary\r\n def _to_dict(self, feat):\r\n ret = {}\r\n for f in feat:\r\n ret[f['img']] = {\r\n 'cls': f['cls'],\r\n 'hist': f['hist']\r\n }\r\n return ret\r\n\r\n\r\n# Measure the performance of every combination of image features\r\ndef evaluate_feats(db, N, feat_pools=feat_pools, d_type='d1', depths=[None, 300, 200, 100, 50, 30, 10, 5, 3, 1]):\r\n result = open(os.path.join(result_dir, 'feature_fusion-{}-{}feats.csv'.format(d_type, N)), 'w')\r\n for i in range(N):\r\n result.write(\"feat{},\".format(i))\r\n result.write(\"depth,distance,MMAP\")\r\n combinations = itertools.combinations(feat_pools, N)\r\n for combination in combinations:\r\n fusion = FeatureFusion(features=list(combination))\r\n for d in depths:\r\n APs = extract_features_from_db(db, feature_instance=fusion, d_type=d_type, depth=d)\r\n cls_MAPs = []\r\n for cls, cls_APs in APs.items():\r\n MAP = np.mean(cls_APs)\r\n cls_MAPs.append(MAP)\r\n r = \"{},{},{},{}\".format(\",\".join(combination), d, d_type, np.mean(cls_MAPs))\r\n print(r)\r\n result.write('\\n'+r)\r\n print()\r\n result.close()\r\n\r\n\r\n\r\n","repo_name":"ashleyszemei/cbir","sub_path":"src/fusion.py","file_name":"fusion.py","file_ext":"py","file_size_in_byte":4387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"36048786183","text":"import os\nimport sys\n\nimport yaml\nfrom docopt import docopt\n\n__root__ = os.path.dirname(__file__)\n\n_DOC = \"\"\"\n{__mayo__} {__version__} ({__date__})\n{__description__}\n{__author__}\n\"\"\"\n_USAGE = \"\"\"\nUsage:\n {__executable__} train ... [options]\n {__executable__} export ... [options]\n {__executable__} (-h | --help)\n\nOptions:\n --overrides= Specify hyper-parameters to override.\n Example: --overrides=\"a.b = c; d = e\"\n\"\"\"\n\n\ndef meta():\n meta_file = os.path.join(__root__, 'meta.yaml')\n meta_dict = yaml.load(open(meta_file, 'r'))\n meta_dict['__executable__'] = os.path.basename(sys.argv[0])\n return meta_dict\n\n\ndef doc():\n return _DOC.format(**meta())\n\n\ndef usage():\n return doc() + _USAGE.format(**meta())\n\n\ndef _config(args):\n from mayo.config import Config\n return Config(args[''], overrides=args['--overrides'])\n\n\ndef train(args):\n from mayo.train import Train\n return Train(_config(args)).train()\n\n\ndef validate(args):\n from mayo.evaluate import Evaluate\n return Evaluate(_config(args)).evaluate()\n\n\ndef export(args):\n print(_config(args).to_yaml())\n\n\ndef main():\n args = docopt(usage(), version=meta()['__version__'])\n commands = [train, validate, export]\n for func in commands:\n if not args.get(func.__name__, None):\n continue\n return func(args)\n raise NotImplementedError('Command not found')\n","repo_name":"Aaron-Zhao123/mayo","sub_path":"mayo/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72564708404","text":"from collections import defaultdict\nfrom typing import Any, Dict, List\n\nfrom lk_flow.core import EVENT, Context, Event, ModAbstraction\nfrom lk_flow.models import Task\n\n\nclass HookTrigger(ModAbstraction):\n # {event_name : hook_task_name: [trigger_task_name2] }\n hook_listeners: Dict[EVENT, Dict[str, List[str]]] = defaultdict(\n lambda: defaultdict(list)\n )\n\n @classmethod\n def setup_mod(cls, mod_config: Dict[str, Any]) -> None:\n context: Context = Context.get_instance()\n # add event hook\n context.event_bus.add_listener(EVENT.SYSTEM_SETUP, cls.process_set_add)\n\n context.event_bus.add_listener(EVENT.TASK_ADD, cls.task_hook_add)\n context.event_bus.add_listener(EVENT.TASK_DELETE, cls.task_hook_remove)\n # listener Task event\n context.event_bus.add_listener(EVENT.TASK_PRE_START, cls.task_hook_trigger)\n context.event_bus.add_listener(EVENT.TASK_RUNNING, cls.task_hook_trigger)\n context.event_bus.add_listener(EVENT.TASK_STOP, cls.task_hook_trigger)\n context.event_bus.add_listener(EVENT.TASK_FINISH, cls.task_hook_trigger)\n context.event_bus.add_listener(EVENT.TASK_RUNNING_ERROR, cls.task_hook_trigger)\n context.event_bus.add_listener(EVENT.TASK_FINISH_ERROR, cls.task_hook_trigger)\n\n @classmethod\n def add_task_hook(cls, trigger_events: str, trigger_task_name: str) -> True:\n \"\"\"Task.trigger_events解析\n\n Args:[动画表情]\n trigger_events: eg.'Event_Name__hook_task_name Event_Name2__hook_task_name2'\n trigger_task_name: 事件被触发时,启动的task名\n \"\"\"\n if not trigger_events:\n return\n for trigger_event in trigger_events.split():\n event_name, hook_task_name = trigger_event.split(\"__\")\n event = EVENT(event_name.lower())\n cls.hook_listeners[event][hook_task_name].append(trigger_task_name)\n\n @classmethod\n def process_set_add(cls, _: Event) -> None:\n \"\"\"\n 在SYSTEM_SETUP事件时读取context\n 给所有process增加hook\n \"\"\"\n\n context = Context.get_instance()\n for task_name, process in context.get_all_processes():\n if process.config.trigger_events:\n cls.add_task_hook(process.config.trigger_events, process.config.name)\n\n @classmethod\n def task_hook_add(cls, event: Event) -> None:\n \"\"\"\n trigger_task加入系统\n 装载trigger_task.trigger_events配置\n \"\"\"\n task: Task = event.task\n cls.add_task_hook(task.trigger_events, task.name)\n\n @classmethod\n def task_hook_remove(cls, event: Event) -> None:\n \"\"\"\n trigger_task被移除系统\n 移除对应的钩子\n \"\"\"\n task: Task = event.task\n if not task.trigger_events:\n return\n for trigger_event in task.trigger_events.split():\n event_name, hook_task_name = trigger_event.split(\"__\")\n event = EVENT(event_name.lower())\n if task.name in cls.hook_listeners[event][hook_task_name]:\n cls.hook_listeners[event][hook_task_name].remove(task.name)\n\n @classmethod\n def task_hook_trigger(cls, event: Event) -> None:\n \"\"\"事件来后触发对应的监听任务\"\"\"\n context = Context.get_instance()\n for trigger_task_name in cls.hook_listeners[event.event_type][event.task_name]:\n context.start_task(trigger_task_name)\n","repo_name":"linksense/lk-flow","sub_path":"lk_flow/plugin/hook_trigger.py","file_name":"hook_trigger.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"34950365030","text":"import time\nimport gym\nimport torch\nimport numpy as np\nfrom PPO import PPO,ActorCritic\n\ndef make_env(env_id,idx,run_name,gamma):\n def thunk():\n env = gym.make(env_id,render_mode = \"human\")\n env = gym.wrappers.FlattenObservation(env)\n env = gym.wrappers.RecordEpisodeStatistics(env)\n if idx == 0:\n env = gym.wrappers.RecordVideo(env,f\"videos/{run_name}\")\n env = gym.wrappers.ClipAction(env)\n env = gym.wrappers.NormalizeObservation(env)\n env = gym.wrappers.TransformObservation(env, lambda obs: np.clip(obs, -10, 10))\n env = gym.wrappers.NormalizeReward(env, gamma=gamma)\n env = gym.wrappers.TransformReward(env, lambda reward: np.clip(reward, -10, 10))\n return env\n return thunk\n\ndef test():\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n total_test_episodes = 10\n max_ep_length = 500\n gamma = 0.999\n env_id = \"custom_env/ExpWorld-v1\"\n run_name = f\"{env_id}__{int(time.time())}\"\n envs = gym.vector.SyncVectorEnv([make_env(env_id,i,run_name,gamma) for i in range(1)])\n # env = gym.make(\"BipedalWalker-v3\",render_mode= 'human')\n # obs_dim = env.observation_space.shape[0]\n # action_dim = env.action_space.shape[0]\n test_agent = PPO(envs)\n test_agent.load(\"PPO__model_1695294304\")\n\n print(\"##############################\")\n test_running_award = 0\n for step in range(1,total_test_episodes+1):\n ep_reward = 0\n obs,_ = envs.reset() \n for t in range(1,max_ep_length):\n action,_ = test_agent.agent.select_action(torch.tensor(obs,dtype=torch.float,device=device))\n obs,reward,done,_,_ = envs.step(action)\n ep_reward += reward\n if done:\n break\n \n test_running_award += ep_reward\n print(f\"episode: {step} \\n Reward: {round(ep_reward,2)}\")\n ep_reward = 0\n envs.close()\n\n avg_test_reward = test_running_award/total_test_episodes\n print(f\"avg_test_rew: {avg_test_reward}\")\n\nif __name__ == \"__main__\":\n test()","repo_name":"Manaro-Alpha/Obstacle-Avoidance-using-RL","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"16705017628","text":"class Node:\n # Nodes of a directed graph.\n def __init__(self, name):\n self.name = name\n # list of child nodes\n self.children = list()\n self.num_parents = 0\n self.color = \"WHITE\" # used for cycle check\n\n def add_child(self, c_node):\n # add this node as a child of p_node\n if c_node not in self.children:\n self.children.append(c_node)\n c_node.num_parents += 1\n\n\nclass Graph:\n # Represents a directed graph. A user of this class should create\n # all nodes in the graph before adding edges\n def __init__(self):\n self.nodes = list()\n\n def add_node(self, node):\n self.nodes.append(node)\n\n def get_nodes(self):\n return self.nodes\n\n def find_node(self, name):\n for node in self.nodes:\n if name == node.name:\n return node\n return None\n\n # returns true if a cycle exists in the graph. Using Graph Coloring\n def cycle_check(self):\n cycle = False\n # mark all nodes in graph as unchecked\n for i in range(0, len(self.nodes)):\n self.nodes[i].color = \"WHITE\"\n for i in range(0, len(self.nodes)):\n # if this node is unchecked, check it and all children\n if self.nodes[i].color == \"WHITE\":\n cycle = cycle or self.cycle_check_util(self.nodes[i])\n return cycle\n\n def cycle_check_util(self, node):\n # A WHITE node has not yet been checked.\n # A GREY node has started being checked.\n # A BLACK node has been checked.\n node.color = \"GREY\"\n\n # if any adjacent node is GREY, then there is a loop.\n for child in node.children:\n if child.color == \"GREY\":\n return True\n elif child.color == \"WHITE\" and self.cycle_check_util(child):\n return True\n\n # mark node as fully processed\n node.color = \"BLACK\"\n return False\n\n # implements topological sort using Khan's Algorithm\n def sort(self):\n # number of parents per node\n degree = list(range(0, len(self.nodes)))\n # nodes that can be added to the sorted list at any time\n queue = list()\n # list of nodes sorted topologically\n sorted_nodes = list()\n for i in range(0, len(self.nodes)):\n degree[i] = self.nodes[i].num_parents\n if degree[i] == 0:\n # this node has no parents, so it may be added to the sorted list.\n queue.append(self.nodes[i])\n while len(queue) != 0:\n # there will be at least one node with no parents because this is a DAG\n first = queue.pop(0)\n sorted_nodes.append(first)\n for child in first.children:\n # find all children of the node we are adding\n for i in range(0, len(self.nodes)):\n if self.nodes[i] == child:\n # subtract degree of node\n degree[i] -= 1\n if degree[i] == 0:\n queue.append(child)\n self.nodes = sorted_nodes\n","repo_name":"JohnCornwell/STAT","sub_path":"STAT/src/SimCommunication/cycleCheck.py","file_name":"cycleCheck.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"18767470220","text":"import pandas as pd\nimport folium\nfrom folium.plugins import HeatMap\nfrom flask import Flask,render_template\n\napp = Flask(__name__)\n\ndf=pd.read_csv(\"data.csv\")\n\ndef createMap(cars): \n cars.rename(columns={'Lat':'latitude','Long':'longitude'}, inplace=True)\n cars.latitude.fillna(0, inplace = True)\n cars.longitude.fillna(0, inplace = True) \n CarMap=folium.Map(location=[39,35],zoom_start=6)\n HeatMap(data=cars, radius=16).add_to(CarMap)\n CarMap.save('templates/index.html')\n \n\n@app.route('///')\ndef index(Marka,Model):\n print(Marka)\n print(Model)\n cars=df[df[\"Marka\"]==Marka].iloc[:,0:2]\n cars=df[df[\"Model\"]==Model].iloc[:,0:2]\n createMap(cars)\n return render_template(\"index.html\")\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"SefaAkdeniz/Turkey-Used-Car-Data-Analysis-and-Machine-Learning-Price-Predict","sub_path":"mapService.py","file_name":"mapService.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33649168919","text":"from glob import glob\nimport click\nfrom os.path import join, dirname, basename, abspath\nimport os\nfrom tqdm import tqdm\nfrom Bio import SeqIO\nimport pandas as pd\nimport string, random\nimport gzip\n\ndef randomString(stringLength=10):\n \"\"\"Generate a random string of fixed length \"\"\"\n letters = string.ascii_letters\n return ''.join(random.choice(letters) for _ in range(stringLength))\n\n\ndef prokka_summary(indir):\n p_files = glob(join(indir, '*', \"*.faa\"))\n if not p_files:\n raise Exception(\"No faa file in %s\" % indir)\n sname2locus = {}\n occured_id = []\n for p_file in tqdm(p_files):\n # iterating faa files\n sname = basename(dirname(p_file)).replace('.faa', '')\n records = SeqIO.parse(p_file, format='fasta')\n record = next(records)\n # get the first one, enough\n locus_tag = record.id.split('_')[0]\n if locus_tag in occured_id:\n # if locus_tag occurred multiple times, regenerated a new one\n records = SeqIO.parse(p_file, format='fasta')\n locus_tag = randomString(len(occured_id[-1]) + 1)\n new_records = []\n for record in records:\n record.id = locus_tag + '_' + record.id.split('_')[-1]\n new_records.append(record)\n os.renames(p_file, p_file + '.backup')\n with open(p_file, 'w') as f1:\n f1.write(new_records)\n occured_id.append(locus_tag)\n sname2locus[sname] = {}\n sname2locus[sname]['locus_prefix'] = locus_tag\n result_df = pd.DataFrame.from_dict(sname2locus, orient='index')\n return result_df\n\n\ndef download_summary(indir):\n genome_dirs = glob(join(indir, '*', '*'))\n genome_dirs = [_ for _ in genome_dirs\n if os.path.isdir(_)]\n if not genome_dirs:\n raise Exception(\"No directory detected in %s\" % indir)\n missing_faa_samples = []\n sname2locus = {}\n occured_id = []\n for each_dir in tqdm(genome_dirs):\n # iterating all directory\n p_faa = glob(join(each_dir, '*.faa.gz'))\n sname = basename(each_dir)\n if not p_faa:\n # if not download faa file, pass it and record it.\n missing_faa_samples.append(sname)\n sname2locus[sname] = {}\n continue\n p_faa = p_faa[0]\n records = SeqIO.parse(gzip.open(p_faa,'rt'), format='fasta')\n random_prefix = randomString(10)\n if random_prefix in occured_id:\n random_prefix = randomString(10)\n occured_id.append(random_prefix)\n new_records = []\n for record in records:\n record.id = random_prefix + '_' + record.id\n new_records.append(record)\n with open(join(each_dir, 'generated_protein.faa'), 'w') as f1:\n SeqIO.write(new_records, f1, format='fasta-2line')\n sname2locus[sname] = {}\n sname2locus[sname]['locus_prefix'] = random_prefix\n result_df = pd.DataFrame.from_dict(sname2locus, orient='index')\n return result_df\n\n\n@click.command(help=\"quickly get a summary file from prokka_o\")\n@click.option(\"-i\", \"indir\", help='input dir, normally is the output directory of prokka.')\n@click.option(\"-o\", \"outfile\", help='output summary file')\n@click.option(\"-t\", \"typeOfdata\", help='data type including prokka or download')\ndef main(indir, outfile, typeOfdata):\n indir = abspath(indir)\n\n if not os.path.exists(dirname(outfile)):\n os.makedirs(dirname(outfile), exist_ok=True)\n if typeOfdata.lower() == 'prokka':\n result_df = prokka_summary(indir)\n elif typeOfdata.lower() == 'download':\n result_df = download_summary(indir)\n else:\n raise Exception('accepted parameters of -t included')\n result_df.to_csv(outfile, index_label='sample_name')\n\nif __name__ == '__main__':\n main()","repo_name":"444thLiao/evol_tk","sub_path":"raw_scripts/grab_Whole_metabolism/api/summary_locus_prefix.py","file_name":"summary_locus_prefix.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"22941758493","text":"from imgurpython.helpers.error import ImgurClientError\nfrom imgurpython import ImgurClient\nfrom PyQt5.QtCore import QThread, pyqtSignal\nimport requests\nimport base64\nimport six\n\n\nclass MyClient(ImgurClient):\n def upload_from_data(self, contents, config=None, anon=True):\n if not config:\n config = dict()\n\n b64 = base64.b64encode(contents)\n data = {\n 'image': b64,\n 'type': 'base64',\n }\n data.update({meta: config[meta] for meta in set(self.allowed_image_fields).intersection(config.keys())})\n\n return self.make_request('POST', 'upload', data, anon)\n\n\ndef decode(key, string):\n string = base64.urlsafe_b64decode(string + b'===')\n string = string.decode('latin') if six.PY3 else string\n encoded_chars = []\n for i in range(len(string)):\n key_c = key[i % len(key)]\n encoded_c = chr((ord(string[i]) - ord(key_c) + 256) % 256)\n encoded_chars.append(encoded_c)\n encoded_string = ''.join(encoded_chars)\n return encoded_string\n\n\ndef authenticate():\n client_secret = ''\n client_id = ''\n\n return MyClient(client_id, client_secret)\n\n\ndef upload_image(image, client):\n \"\"\"\n upload image to imgur\n :param image_path:\n :return: url of the image\n \"\"\"\n\n config = {\n 'album': None,\n 'name': '',\n 'title': '',\n 'description': ''\n }\n\n image_url = client.upload_from_data(image, config=config, anon=True)\n\n return image_url\n\n\nclass Connect(QThread):\n connected = pyqtSignal(ImgurClient)\n\n def run(self):\n print(\"connecting...\")\n try:\n self.connected.emit(authenticate())\n except (ImgurClientError, requests.exceptions.RequestException) as e:\n pass\n\n\nclass Upload(QThread):\n uploaded = pyqtSignal(str)\n failed = pyqtSignal(Exception)\n\n def __init__(self, image, client, parent=None):\n super(Upload, self).__init__(parent)\n self.image = image\n self.client = client\n self.start()\n\n def run(self):\n try:\n image_link = upload_image(self.image, self.client)\n self.uploaded.emit(image_link['link'])\n except (ImgurClientError, requests.exceptions.RequestException) as e:\n self.failed.emit(e)\n\n\nclass uploadToGoogleSearch(QThread):\n uploaded = pyqtSignal(str)\n failed = pyqtSignal(Exception)\n\n def __init__(self, image, parent=None):\n super(uploadToGoogleSearch, self).__init__(parent)\n self.image = image\n self.start()\n\n def run(self):\n try:\n filePath = 'F:\\TranslateSelected\\Screenshot_1.png'\n searchUrl = 'http://www.google.hr/searchbyimage/upload'\n multipart = {'encoded_image': (filePath, self.image), 'image_content': ''}\n response = requests.post(searchUrl, files=multipart, allow_redirects=False)\n fetchUrl = response.headers['Location']\n self.uploaded.emit(fetchUrl)\n except requests.exceptions.RequestException as e:\n self.failed.emit(e)\n\n\nclass UploaderInterface:\n def __init__(self, fail, image_uploaded, connected, parent=None):\n self.connected = connected\n self.image_uploaded = image_uploaded\n self.fail = fail\n self.parent = parent\n\n def on_image_upload(self, function):\n self.image_uploaded = function\n\n def on_fail(self, function):\n self.fail = function\n\n def on_connection(self, function):\n self.connected = function\n\n def connect(self):\n self.connectionThread = Connect()\n self.connectionThread.connected.connect(self.setClient)\n self.connectionThread.start()\n self.threads = []\n\n def setClient(self, client):\n self.client = client\n self.connected()\n\n def upload_image(self, image):\n thread = Upload(image, self.client, self.parent)\n thread.failed.connect(self.fail)\n thread.uploaded.connect(self.image_uploaded)\n thread.uploaded.connect(lambda: self.threads.remove(thread))\n self.threads.append(thread)\n thread.start()\n\n def upload_image_to_google_search(self, image):\n thread = uploadToGoogleSearch(image, self.parent)\n thread.failed.connect(self.fail)\n thread.uploaded.connect(self.image_uploaded)\n thread.uploaded.connect(lambda: self.threads.remove(thread))\n self.threads.append(thread)\n thread.start()\n\n\nif __name__ == '__main__':\n from PyQt5.QtWidgets import QApplication\n import sys\n app = QApplication(sys.argv)\n\n def connected():\n interface.upload_image('screenshot_1.png')\n interface = UploaderInterface(lambda: print(\"fail\"), print, connected, None)\n interface.connect()\n sys.exit(app.exec_())\n","repo_name":"clavlav12/Transhot","sub_path":"ImageUploader/ImageUploaderInterface.py","file_name":"ImageUploaderInterface.py","file_ext":"py","file_size_in_byte":4764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"15746670090","text":"from library import *\n\n\n\"\"\"\n本档案用于将scraping.py所爬取下来的音档\n每十秒切分为一张图片\n并将其储存\n\"\"\"\n\n\n# 设定路径与预设值\npath = './data/鸟类.txt'\nname1 = []\n\n# 开启鸟类txt档\nwith open(path, 'r', encoding='utf-8') as f:\n # 逐行读取txt档\n for line in f.readlines():\n # 分割出鸟类名称并将其存至list中\n s = line.split(' ')\n name1.append(s[0])\n\n# 重复四次,代表四种鸟类\nfor i in range(0, 4):\n\n # 显示目前正在转换的鸟类种类\n name_1 = str(name1[i])\n print(name_1)\n\n # 设定音档路径\n path = \"./data/audio/\" + name_1\n n = len(os.listdir(path)) # n为该路径中有几个音档\n\n # 将资料夹中的每个音档都进行转换\n for j in range(1, n + 1):\n audio_path = './data/audio/' + name_1 + '/' + name_1 + str(j) + '.mp3' # 音档所在位置\n image_path = './data/images/' + name_1 # 图档储存资料夹\n count, audio = calc(j, name_1, audio_path, image_path) # 将指定参数汇入calc函式并获取新的值\n\n if int(count) > 0:\n # 如果calculate函数计算后所得的count>0,也就是该音档可以被10整除\n for k in range(0, int(count)):\n # 执行音档切分并转换为图档\n cut_and_trans(k, audio, image_path, name_1)\n\n else:\n # 其他便是音档过短,此时就将其pass掉\n print('音档过短')\n pass\n","repo_name":"liu7388/BirdSound","sub_path":"trans.py","file_name":"trans.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"33597905596","text":"from command import Command, is_command\r\nfrom event import Event\r\n\r\n\r\nclass Quit(Command):\r\n\r\n shortname = 'quit'\r\n name = 'Disconnect from the game'\r\n\r\n @is_command\r\n def quit(self, player, *args):\r\n self.world.remove_player(player)\r\n return Event('quit')\r\n","repo_name":"lysol/lvlss","sub_path":"src/commands/quit.py","file_name":"quit.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"8265643044","text":"\"\"\"\nAuthor: Matthew Garber\nTerm: Spring 2017\nCOSI 137b Information Extraction\nAssignment #4\n\nThis module contains classes for representing enitity mentions and relations.\nThere is a Mention class, to model entity mentions, and a Relation class, to\nmodel relations between to entity mentions.\n\"\"\"\n\nfrom nltk.corpus import stopwords\nfrom tree_util import get_path\n\nSTOPWORDS = set(stopwords.words('english'))\n\nclass Relation:\n \"\"\"Objects of this class represent relations between two enitity mentions.\n It possesses methods to extract features from the mentions, as well as\n their contexts. It also contains a method to write its features as a string\n in the format of a MALLET instance.\n \"\"\"\n\n def __init__(self, values, sents, sent_trees, rel_id):\n \"\"\"Initializes a new Relation object.\n\n Args:\n values: A list of values containing information about the relation\n and the enitities it relates.\n sents: A list of a list of 3-tuples representing words in the\n sentences of a document.\n sent_trees: A list of parse trees extraced from the same document.\n rel_id: The ID number of the relation\n \"\"\"\n self.rel_type = values[0]\n self.m1 = Mention(*values[2:8], sents=sents) # mention 1\n self.m2 = Mention(*values[8:], sents=sents) # mention 2\n\n if self.m1.sent_i == self.m2.sent_i:\n self.tree = sent_trees[self.m1.sent_i]\n self.in_same_sent = True\n else:\n self.tree = None\n self.in_same_sent = False\n \n self._find_words_before_m1(sents)\n self._find_words_after_m2(sents)\n self._find_words_between(sents)\n\n self._find_phrase_features(sents)\n\n self._find_phrase_dist()\n \n self.rel_id = str(rel_id)\n \n\n def to_string(self):\n \"\"\"Uses this objects attributes to derive and write its features as a\n string formatted as a MALLET instance.\n \"\"\"\n feats = []\n \n # --- Word Features ---\n # Bag-of-words\n feats.extend(['WE1=' + word for word in self.m1.word_set])\n feats.extend(['WE2=' + word for word in self.m2.word_set])\n \n # Words preceding mention 1 + entity type\n feats.append('BM1F=' + self.pre1_m1[0] + self.m1.ner_tag)\n feats.append('BM1L=' + self.pre2_m1[0] + self.m1.ner_tag)\n # Words following mention 2 + entity type\n feats.append('AM2F=' + self.post1_m2[0] + self.m2.ner_tag)\n feats.append('AM2L=' + self.post2_m2[0] + self.m2.ner_tag)\n \n # No words in between\n feats.append('WBNULL=' + str(self.adjacent))\n # Single word in between\n feats.append('WBONLY=' + self.only_word_between)\n # First and last word in between\n feats.append('WBF=' + self.first_between)\n feats.append('WBL=' + self.last_between)\n # Other words between\n words_between = set([vals[0] for vals in self.vals_between])\n feats.extend(['WBO=' + word for word in words_between])\n \n # --- Head Word / Phrase Features ---\n # Mention head words\n feats.append('HM1=' + self.m1.head_word)\n feats.append('HM2=' + self.m2.head_word)\n # Mention head word conjunction\n feats.append(''.join(['HM12=', self.m1.head_word + '_', self.m2.head_word]))\n \n # Mention head POS\n feats.append('HPOSM1=' + self.m1.head_pos)\n feats.append('HPOSM2=' + self.m2.head_pos)\n # Mention head POS conjunction\n feats.append(''.join(['HPOSM12=', self.m1.head_pos + '_', self.m2.head_pos]))\n\n # Mention head word + POS\n feats.append(''.join(['HWPOSM1=', self.m1.head_word + '_', self.m1.head_pos]))\n feats.append(''.join(['HWPOSM2=', self.m2.head_word + '_', self.m2.head_pos]))\n \n # No heads in between\n feats.append('CHBNULL=' + str(self.in_same_sent and len(self.heads_between) == 0))\n\n # Single head in between\n if len(self.heads_between) == 1:\n # Head word\n feats.append('CHBONLY=' + self.heads_between[0][0])\n # Head POS\n feats.append('CHBONLY_POS=' + self.heads_between[0][1])\n else:\n feats.append('CHBONLY=' + '*null*')\n feats.append('CHBONLY_POS=' + '*null*')\n\n # First and last head word between\n if len(self.heads_between) > 1:\n feats.append('CHBF=' + self.heads_between[0][0])\n feats.append('CHBL=' + self.heads_between[-1][0])\n else:\n feats.append('CHBF=' + '*null*')\n feats.append('CHBL=' + '*null*')\n\n # Last head word before mention 1\n feats.append('CHPM1=' + self.prev_head[0])\n # Next head word after mention 2\n feats.append('CHNM2=' + self.next_head[0])\n \n # Phrase/Chunk distance\n if self.in_same_sent:\n feats.append('CHDIST=' + self.phrase_dist)\n else:\n feats.append('CHDIST=' + '*null*')\n \n # --- Entity type conjuction ---\n feats.append(''.join(['ET12=', self.m1.ner_tag, self.m2.ner_tag]))\n \n # --- Overlap Features ---\n # Simple overlap\n feats.append('M1>M2=' + str(self.m1.contains(self.m2)))\n feats.append('M1M2+E12=' + '_'.join([str(self.m1.contains(self.m2)), self.m1.ner_tag, self.m2.ner_tag]))\n feats.append('M1 1:\n self.pre2_m1 = sents[sent_i][self.m1.start-2]\n else:\n self.pre2_m1 = '*null*'\n if self.m1.start > 0:\n self.pre1_m1 = sents[sent_i][self.m1.start-1]\n else:\n self.pre1_m1 = '*null*'\n\n def _find_words_after_m2(self, sents):\n \"\"\"Finds the words following mention 2.\n \"\"\"\n sent_i = self.m2.sent_i\n length = len(sents[sent_i])\n if self.m2.end < length:\n self.post1_m2 = sents[sent_i][self.m2.end]\n else:\n self.post1_m2 = '*null*'\n if self.m2.end + 1 < length:\n self.post2_m2 = sents[sent_i][self.m2.end+1]\n else:\n self.post2_m2 = '*null*' \n\n def _find_words_between(self, sents):\n \"\"\"Finds the words between mentions 1 and 2. If the mentions are in\n different sentences, a placeholder '*null*' is used instead.\n \"\"\"\n if self.in_same_sent:\n sent_i = self.m1.sent_i\n self.adjacent = self.m1.end == self.m2.start\n if self.m1.end - self.m2.start == 1:\n self.only_word_between = sents[sent_i][self.m1.end][0]\n else:\n self.only_word_between = '*null*'\n if self.m1.end - self.m2.start > 1:\n self.first_between = sents[sent_i][self.m1.end][0]\n self.last_between = sents[sent_i][self.m2.start-1][0]\n else:\n self.first_between = '*null*'\n self.last_between = '*null*'\n else:\n self.adjacent = False\n self.only_word_between = '*null*'\n if self.m1.end < len(sents[self.m1.sent_i]):\n self.first_between = sents[self.m1.sent_i][self.m1.end][0]\n else:\n self.first_between = '*end*'\n if self.m2.start > 0:\n self.last_between = sents[self.m2.sent_i][self.m2.start-1][0]\n else:\n self.last_between = '*start*'\n\n def _find_phrase_dist(self):\n \"\"\"Finds the number of phrases between mentions 1 and 2. If the number\n is 10 or greater, it is simply set as 'max'.\n \"\"\"\n dist = len(self.heads_between)\n if dist < 10:\n self.phrase_dist = str(dist)\n else:\n self.phrase_dist = 'max'\n\n def _find_phrase_features(self, sents):\n \"\"\"Finds various phrase features regarding mentions 1 and 2, including\n the number of heads in between\n \"\"\"\n self.heads_between = []\n self.vals_between = []\n sent = sents[self.m1.sent_i]\n if (not self.adjacent and not self.m1.contains(self.m2)\n and not self.m2.contains(self.m1)):\n for i in range(self.m1.end, self.m2.start):\n vals = sent[i]\n self.vals_between.append(vals)\n if vals[2].endswith('1'):\n self.heads_between.append(vals)\n found_prev_head = False\n start = self.m1.start - 1\n while start >= 0:\n if sent[start][2].endswith('1'):\n found_prev_head = True\n self.prev_head = sent[start]\n break\n start -= 1\n if not found_prev_head:\n self.prev_head = ('*null*', '*null*', '*null*')\n \n end = self.m2.end\n found_next_head = False\n while end < len(sent):\n if sent[end][2].endswith('1'):\n found_prev_head = True\n self.next_head = sent[start]\n break\n end += 1\n if not found_next_head:\n self.next_head = ('*null*', '*null*', '*null*')\n \n\nclass Mention:\n \"\"\"Objects of this class represent single entity mentions. It possesses\n methods to extract features from the mention, as well as a method to see\n whether it contains a given mention.\n \"\"\"\n\n def __init__(self, sent_i, start, end, ner_tag, identifier, string, sents):\n \"\"\"Initializes a new Mention object.\n\n Args:\n sent_i: The string index of the sentence the mention occurs in.\n start: The string start index of the mention.\n end: The string end index of the mention.\n ner_tag: The NER tag of the mention.\n identifier: The string identifier of the mention.\n string: The words of the mention, as a string separated by\n underscores.\n sents: A list of a list of 3-tuples representing words in the\n sentences of a document.\n \"\"\"\n self.sent_i = int(sent_i)\n self.start = int(start)\n self.end = int(end)\n self.ner_tag = ner_tag\n self.words = string.lower().split('_')\n self.word_set = set(self.words) - STOPWORDS\n self._find_head_vals(sents[self.sent_i])\n\n def _find_head_vals(self, sent):\n \"\"\"Finds the word, POS tag, and chunk tag of the head word of the\n mention.\n \"\"\"\n found_head = False\n for i in range(self.start, self.end):\n if sent[i][2].endswith('1'):\n found_head = True\n if found_head:\n self.head_word = sent[i][0]\n self.head_pos = sent[i][1]\n self.head_chunk = sent[i][2]\n self.head_i = i\n else:\n self.head_word = sent[self.end-1][0]\n self.head_pos = sent[self.end-1][1]\n self.head_chunk = sent[self.end-1][2]\n self.head_i = self.end - 1\n\n def contains(self, mention):\n \"\"\"Returns true if the given mention is included in this mention.\n \"\"\"\n if self.sent_i == mention.sent_i:\n return self.start <= mention.start and self.end >= mention.end\n else:\n return False\n","repo_name":"matthewgarber/Relation-Assignment","sub_path":"scripts/relation.py","file_name":"relation.py","file_ext":"py","file_size_in_byte":12305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"29324531308","text":"import torch\nimport torch.nn.functional as F\n\n\ndef masked_softmax(logits, mask):\n mask = torch.ones_like(mask, dtype=torch.bool) ^ mask\n logits[mask] -= 1.0e10\n return F.softmax(logits, dim=-1)\n\n\ndef get_trajectory(ppo_agent, context, init_state, max_seq_len, invalid_reward):\n graph = init_state\n\n done = False\n is_nop = False\n in_local = False\n\n trajectory_reward = 0\n trajectory_len = 0\n trajectory_best_gate_count = init_state.gate_count\n intermediate_graphs = []\n\n node_range = []\n\n for t in range(max_seq_len):\n is_nop = False\n\n if node_range == []:\n in_local = False\n\n if not done:\n node, xfer = ppo_agent.select_action(graph, node_range)\n\n next_graph, next_nodes = graph.apply_xfer_with_local_state_tracking(\n xfer=context.get_xfer_from_id(id=xfer),\n node=graph.get_node_from_id(id=node),\n )\n\n # Invalid xfer\n if next_graph == None:\n reward = invalid_reward\n done = True\n next_graph = graph\n # Nop\n elif context.get_xfer_from_id(id=xfer).is_nop:\n reward = 0\n is_nop = True\n next_nodes = []\n node_range = []\n\n # If a node is chosen and it chooses NOP immediately\n # Stop the trajectory\n if not in_local:\n done = True\n in_local = False\n else:\n reward = (graph.gate_count - next_graph.gate_count) * 4\n in_local = True\n\n # Get new node_range\n node_range = torch.tensor(next_nodes, dtype=torch.int64)\n if context.get_xfer_from_id(id=xfer).dst_gate_count != 0:\n src_node_ids, _, edge_ids = next_graph.to_dgl_graph().in_edges(\n node_range, form='all'\n )\n mask = next_graph.to_dgl_graph().edata['reversed'][edge_ids] == 0\n node_range = torch.cat((node_range, src_node_ids[mask]))\n\n trajectory_reward += reward\n\n if trajectory_reward > 0:\n intermediate_graphs.append(next_graph)\n # Add a mark of terminal if the limit is reached\n if t == max_seq_len:\n done = True\n # Upper limit for circuit gate count\n if graph.gate_count > init_state.gate_count * 1.1:\n done = True\n\n reward = torch.tensor(reward, dtype=torch.float)\n ppo_agent.buffer.rewards.append(reward)\n ppo_agent.buffer.is_terminals.append(\n torch.tensor(t == max_seq_len - 1, dtype=torch.bool)\n )\n ppo_agent.buffer.next_graphs.append(next_graph)\n ppo_agent.buffer.next_nodes.append(node_range)\n ppo_agent.buffer.is_start_point.append(t == 0)\n ppo_agent.buffer.is_nops.append(is_nop)\n graph = next_graph\n\n else:\n trajectory_len = t\n break\n\n trajectory_best_gate_count = min(graph.gate_count, trajectory_best_gate_count)\n\n if trajectory_len == 0:\n trajectory_len = max_seq_len\n\n return (\n trajectory_reward,\n trajectory_best_gate_count,\n trajectory_len,\n intermediate_graphs,\n )\n","repo_name":"quantum-compiler/quartz","sub_path":"experiment/deprecated/ppo/ppo_multi_step/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":3382,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"76"} +{"seq_id":"70754106166","text":"from django.conf.urls import url, include\n\nfrom .views import MainpageView, HistoryListView, FaqListView,ChatListView\n\n\nurlpatterns = [\n\turl(r'^chat/', ChatListView.as_view(), name='chat'),\n url(r'^history/', HistoryListView.as_view(), name='history'),\n url(r'^faq/', FaqListView.as_view(), name='faq'),\n url(r'^', MainpageView.as_view(), name='mainpage')\n]\n","repo_name":"djeck1432/kssa","sub_path":"project/main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72585051764","text":"import argparse\nimport pdfkit\n\nclass HTMLToPDFConverter:\n def __init__(self, input_file, output_file):\n self.input_file = input_file\n self.output_file = output_file\n\n def convert(self):\n try:\n pdfkit.from_file(self.input_file, self.output_file)\n print(f\"Conversion successful. PDF saved as '{self.output_file}'\")\n except Exception as e:\n print(f\"Conversion failed: {str(e)}\")\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Convert HTML to PDF using Python\")\n parser.add_argument(\"input_file\", help=\"Input HTML file to convert\")\n parser.add_argument(\"output_file\", help=\"Output PDF file name\")\n parser.add_argument(\"--help\", action=\"help\", help=\"Show this help message and exit\")\n\n args = parser.parse_args()\n\n converter = HTMLToPDFConverter(args.input_file, args.output_file)\n converter.convert()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DhanushNehru/Python-Scripts","sub_path":"pdf_to_html/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"76"} +{"seq_id":"70100746167","text":"import pandas as pd\n\n# Import train_test_split to split data as test and train\nfrom sklearn.model_selection import train_test_split\n\n# Import LinearRegression class to get linear regression object\nfrom sklearn.linear_model import LinearRegression\n\n# Import metrics from sklearn\nfrom sklearn import metrics\n\n# Import gdown module to download files from the google drive\nimport gdown\n\n# Import numpy\nimport numpy as np\n\n# ------------------------------- Get the file from the google drive. ---------------------------------\n\n# Please use the same dataset\nurl = 'https://drive.google.com/file/d/1Hxksp6KSjoex0wdER032QypLUYmdLNeg/view?usp=sharing'\n\n# Derive the file id from the url\nfile_id = url.split('/')[-2]\n\n# Derive the download url of the file\ndownload_url = 'https://drive.google.com/uc?id=' + file_id\n\n# Give the location you want to save it in your local machine\nfile_location = 'average.csv'\n\n# Download the file from drive to your local machine\ngdown.download(download_url, file_location)\n\n\n# ------------------------------- Create the linear regression model --------------------------------------------\n\n# Read the CSV\naverage_dataset = pd.read_csv(file_location)\n\n# Get independent variable columns\nX = average_dataset[['A', 'B', 'C', 'D']]\n\n# Get dependent variable columns\ny = average_dataset['AVERAGE']\n\n# Split dataset into train and test\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\n\n# Use LinearRegression class provided by sklearn\nregressor = LinearRegression()\n\n# Train the model\nregressor.fit(X_train, y_train)\n\n# Predict using test values\ny_pred = regressor.predict(X_test)\n\n# Get actual values and predicted values into a table\npredicted_results = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})\n\n\n# ------------------------------- Calculate MAE and RMSE values -------------------------------------------------\n\nprint('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))\nprint('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))\n\n\n\n\n","repo_name":"pyxeda/MiddleSchoolCurriculum","sub_path":"Volume1/Chapter8/LinearRegression/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"73594330165","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nname: 东方电子SCADA通用系统信息泄露\nreferer: http://www.wooyun.org/bugs/wooyun-2010-0131500\n http://www.wooyun.org/bugs/wooyun-2010-0131719\nauthor: Lucifer\ndescription: 敏感信息泄露,可获取管理员账号和口令。\n'''\nimport sys\nimport requests\nimport warnings\nfrom termcolor import cprint\n\nclass dfe_scada_conf_disclosure_BaseVerify:\n def __init__(self, url):\n self.url = url\n\n def run(self):\n headers = {\n \"User-Agent\":\"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50\"\n }\n payload = \"/modules/manage/server/requestWorkMode.php\"\n vulnurl = self.url + payload\n try:\n req = requests.get(vulnurl, headers=headers, timeout=10, verify=False)\n if r\"productName\" in req.text and r\"adminPassword\" in req.text:\n cprint(\"[+]存在东方电子SCADA通用系统信息泄露漏洞...(高危)\\tpayload: \"+vulnurl, \"red\")\n else:\n cprint(\"[-]不存在dfe_scada_conf_disclosure漏洞\", \"white\", \"on_grey\")\n\n except:\n cprint(\"[-] \"+__file__+\"====>可能不存在漏洞\", \"cyan\")\n\n\nif __name__ == \"__main__\":\n warnings.filterwarnings(\"ignore\")\n testVuln = dfe_scada_conf_disclosure_BaseVerify(sys.argv[1])\n testVuln.run()","repo_name":"Lucifer1993/AngelSword","sub_path":"industrial/dfe_scada_conf_disclosure.py","file_name":"dfe_scada_conf_disclosure.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":1424,"dataset":"github-code","pt":"76"} +{"seq_id":"1925493126","text":"import numpy\r\nimport random\r\n\r\nm = abs(int(input('Nhập m = ')))\r\ndef tao_list_sothuc(m, a, b):\r\n x = [random.uniform(a, b) for i in range(m)]\r\n print(\"Sinh list số thực :\", x)\r\n return x\r\ndef sap_xep_tang_dan(x):\r\n x = sorted(x, reverse=False)\r\n print(\"Sắp xếp list tăng dần : \", x)\r\n return x\r\ndef sap_xep_giam_dan(x):\r\n x = sorted(x, reverse=True)\r\n print(\"Sắp xếp list giảm dần : \", x)\r\n return x\r\ndef sap_xep_sothuc(x, flag):\r\n if flag == True:\r\n return sap_xep_tang_dan(x)\r\n else:\r\n return sap_xep_giam_dan(x)\r\ndef tim_kiem(x, n):\r\n hv = []\r\n for i in range(len(x)):\r\n if x[i] == n:\r\n hv.append(i)\r\n if len(hv) == 0:\r\n print(\"Không tìm thấy số n trong list\")\r\n else:\r\n print(\"Tìm thấy số n trong list tại các vị trí: \", hv)\r\ndef luu_tap_tin_vanban(x, file):\r\n with open(file, 'w') as f:\r\n for item in x:\r\n f.write('%s/n'%item)\r\n print('Tập tin văn bản: ')\r\ndef luu_tap_tin_vanban2(x, file):\r\n with open(file, 'a+') as f:\r\n for item in x:\r\n f.write('%s/n'%item)\r\n print('Tập tin văn bản: ')\r\ndef luu_tap_tin_nhiphan(x, file):\r\n with open(file, 'wb') as f:\r\n for item in x:\r\n chuyendoi = int(item)\r\n f.write(chuyendoi.to_bytes(8, 'big'))\r\n print('Tập tin nhị phân: ')\r\ndef main():\r\n x = tao_list_sothuc(m, 2, 16)\r\n luu_tap_tin_vanban(x, r'D:/Đồ án/doan1.txt')\r\n f = sap_xep_sothuc(x, False)\r\n luu_tap_tin_vanban2(f, r'D:/Đồ án/doan1.txt')\r\n tim_kiem(x, 2)\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","repo_name":"hoangviet2k4/do-an","sub_path":"Phần1/Câu 2.py","file_name":"Câu 2.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"2807320931","text":"import xml.etree.ElementTree as ET\nfrom spyne import Application, ServiceBase, rpc, Integer, Unicode\nfrom spyne.protocol.soap import Soap11\nfrom spyne.server.wsgi import WsgiApplication\nfrom spyne.util.wsgi_wrapper import run_twisted\nfrom suds import WebFault\nfrom suds.client import Client\nimport sys\n\n\ndef getExtractInfo(demande_xml):\n url = \"http://localhost:8000/ServiceExtractionInformation?wsdl\"\n client = Client(url, cache=None)\n\n try:\n # Appelez la méthode du service en lui passant le JSON comme argument\n response = client.service.ExtraireInformations(demande_xml)\n\n # Vérifiez la réponse du service\n print(\"Réponse du service ExtractionInformation:\")\n print(response)\n\n return response\n\n except WebFault as e:\n # En cas d'erreur, imprimez le message d'erreur\n print(f\"Erreur lors de l'appel au service : {e}\")\n except Exception as e:\n # Gérez d'autres exceptions possibles ici\n print(f\"Une erreur s'est produite : {e}\")\n\n\ndef getScoringPret(tree):\n # Créez un client SOAP pour le service solvabilité\n url = \"http://localhost:8001/VerifSolvabilite?wsdl\"\n client = Client(url, cache=None)\n\n try:\n # Appelez la méthode du service en lui passant le XML comme argument\n response = client.service.calculateScore(tree)\n\n # Vérifiez la réponse du service\n print(\"Réponse du service solvabilité:\")\n print(f\"Response : {response}\")\n\n return response\n except WebFault as e:\n # En cas d'erreur, imprimez le message d'erreur\n print(f\"Erreur lors de l'appel au service : {e}\")\n except Exception as e:\n # Gérez d'autres exceptions possibles ici\n print(f\"Une erreur s'est produite : {e}\")\n\n\ndef getApprobationPret(evalProp, infoClient, scoreSolvabilite):\n # Créez un client SOAP pour le service approbation\n url = \"http://localhost:8004/DecisionApprobation?wsdl\"\n client = Client(url, cache=None)\n\n try:\n\n # Appelez la méthode du service en lui passant le XML comme argument\n response = client.service.approbationPret(evalProp, infoClient, scoreSolvabilite)\n\n # Vérifiez la réponse du service\n print(\"Réponse du service approbation:\")\n print(f\"Response : {response}\")\n\n return response\n except WebFault as e:\n # En cas d'erreur, imprimez le message d'erreur\n print(f\"Erreur lors de l'appel au service : {e}\")\n except Exception as e:\n # Gérez d'autres exceptions possibles ici\n print(f\"Une erreur s'est produite : {e}\")\n\n\ndef getEvalProp(infoProp):\n # Créez un client SOAP pour le service approbation\n url = \"http://localhost:8003/ServiceEvaluationPropriete?wsdl\"\n client = Client(url, cache=None)\n\n try:\n\n # Appelez la méthode du service en lui passant le XML comme argument\n response = client.service.EvaluerPropriete(infoProp)\n\n # Vérifiez la réponse du service\n print(\"Réponse du service eval prop:\")\n print(f\"Response : {response}\")\n\n return response\n except WebFault as e:\n # En cas d'erreur, imprimez le message d'erreur\n print(f\"Erreur lors de l'appel au service : {e}\")\n except Exception as e:\n # Gérez d'autres exceptions possibles ici\n print(f\"Une erreur s'est produite : {e}\")\n\n\ndef lecture_bdd(xml_db):\n root = ET.parse(xml_db).getroot()\n prenom_client = root.find('.//PrenomClient')\n prenom_client = prenom_client.text if prenom_client is not None else ''\n nom_client = root.find('.//NomClient')\n nom_client = nom_client.text if nom_client is not None else ''\n adresse_rue = root.find('.//Adresse/Rue')\n adresse_rue = adresse_rue.text if adresse_rue is not None else ''\n adresse_ville = root.find('.//Adresse/Ville')\n adresse_ville = adresse_ville.text if adresse_ville is not None else ''\n adresse_code_postal = root.find('.//Adresse/CodePostal')\n adresse_code_postal = adresse_code_postal.text if adresse_code_postal is not None else ''\n adresse_pays = root.find('.//Adresse/Pays')\n adresse_pays = adresse_pays.text if adresse_pays is not None else ''\n email = root.find('.//Email')\n email = email.text if email is not None else ''\n numero_telephone = root.find('.//NumeroTelephone')\n numero_telephone = numero_telephone.text if numero_telephone is not None else ''\n montant_pret_demande = root.find('.//MontantPretDemande')\n montant_pret_demande = int(montant_pret_demande.text) if montant_pret_demande is not None else 0\n duree_pret = root.find('.//DureePret')\n duree_pret = int(duree_pret.text) if duree_pret is not None else 0\n description_propriete_etage = root.find('.//DescriptionPropriete/Etage')\n description_propriete_etage = description_propriete_etage.text if description_propriete_etage is not None else ''\n description_propriete_taille = root.find('.//DescriptionPropriete/Taille')\n description_propriete_taille = description_propriete_taille.text if description_propriete_taille is not None else ''\n description_propriete_jardin = root.find('.//DescriptionPropriete/Jardin')\n description_propriete_jardin = description_propriete_jardin.text if description_propriete_jardin is not None else ''\n description_propriete_quartier = root.find('.//DescriptionPropriete/Quartier')\n description_propriete_quartier = description_propriete_quartier.text if description_propriete_quartier is not None else ''\n description_propriete_tranquilite = root.find('.//DescriptionPropriete/Tranquilite')\n description_propriete_tranquilite = description_propriete_tranquilite.text if description_propriete_tranquilite is not None else ''\n description_propriete_annnee_construction = root.find('.//DescriptionPropriete/AnneeConstruction')\n description_propriete_annnee_construction = int(\n description_propriete_annnee_construction.text) if description_propriete_annnee_construction is not None else 0\n revenu_mensuel = root.find('.//RevenuMensuel')\n revenu_mensuel = int(revenu_mensuel.text) if revenu_mensuel is not None else 0\n depenses_mensuelles = root.find('.//DepensesMensuelles')\n depenses_mensuelles = int(depenses_mensuelles.text) if depenses_mensuelles is not None else 0\n informations_structurees = {\n 'PrenomClient': prenom_client,\n 'NomClient': nom_client,\n 'Adresse': {\n 'Rue': adresse_rue,\n 'Ville': adresse_ville,\n 'CodePostal': adresse_code_postal,\n 'Pays': adresse_pays\n },\n 'Email': email,\n 'NumeroTelephone': numero_telephone,\n 'MontantPretDemande': montant_pret_demande,\n 'DureePret': duree_pret,\n 'DescriptionPropriete': {\n 'Etage': description_propriete_etage,\n 'Taille': description_propriete_taille,\n 'Jardin': description_propriete_jardin,\n 'Quartier': description_propriete_quartier,\n 'Tranquilite': description_propriete_tranquilite,\n 'AnneeConstruction': description_propriete_annnee_construction\n },\n 'RevenuMensuel': revenu_mensuel,\n 'DepensesMensuelles': depenses_mensuelles\n }\n return informations_structurees\n\n\ndef to_service_verification_solvabilite(lecture_xml_db):\n informations_structurees = lecture_xml_db\n\n root = ET.Element('DemandePret')\n prenom_client = ET.SubElement(root, 'PrenomClient')\n prenom_client.text = informations_structurees['PrenomClient']\n nom_client = ET.SubElement(root, 'NomClient')\n nom_client.text = informations_structurees['NomClient']\n revenu_mensuel = ET.SubElement(root, 'RevenuMensuel')\n revenu_mensuel.text = str(informations_structurees['RevenuMensuel'])\n depenses_mensuelles = ET.SubElement(root, 'DepensesMensuelles')\n depenses_mensuelles.text = str(informations_structurees['DepensesMensuelles'])\n tree = ET.tostring(root)\n return tree.decode('utf-8')\n\n\ndef to_service_evaluation_propriete(lecture_xml_db):\n informations_structurees = lecture_xml_db\n\n root = ET.Element('DemandePret')\n adresse = ET.SubElement(root, 'Adresse')\n adresse_rue = ET.SubElement(adresse, 'Rue')\n adresse_rue.text = informations_structurees['Adresse']['Rue']\n adresse_ville = ET.SubElement(adresse, 'Ville')\n adresse_ville.text = informations_structurees['Adresse']['Ville']\n adresse_code_postal = ET.SubElement(adresse, 'CodePostal')\n adresse_code_postal.text = informations_structurees['Adresse']['CodePostal']\n adresse_pays = ET.SubElement(adresse, 'Pays')\n adresse_pays.text = informations_structurees['Adresse']['Pays']\n montant_pret_demande = ET.SubElement(root, 'MontantPretDemande')\n montant_pret_demande.text = str(informations_structurees['MontantPretDemande'])\n description_propriete = ET.SubElement(root, 'DescriptionPropriete')\n description_propriete_etage = ET.SubElement(description_propriete, 'Etage')\n description_propriete_etage.text = informations_structurees['DescriptionPropriete']['Etage']\n description_propriete_taille = ET.SubElement(description_propriete, 'Taille')\n description_propriete_taille.text = informations_structurees['DescriptionPropriete']['Taille']\n description_propriete_jardin = ET.SubElement(description_propriete, 'Jardin')\n description_propriete_jardin.text = informations_structurees['DescriptionPropriete']['Jardin']\n description_propriete_quartier = ET.SubElement(description_propriete, 'Quartier')\n description_propriete_quartier.text = informations_structurees['DescriptionPropriete']['Quartier']\n description_propriete_tranquilite = ET.SubElement(description_propriete, 'Tranquilite')\n description_propriete_tranquilite.text = informations_structurees['DescriptionPropriete']['Tranquilite']\n description_propriete_annnee_construction = ET.SubElement(description_propriete, 'AnneeConstruction')\n description_propriete_annnee_construction.text = str(\n informations_structurees['DescriptionPropriete']['AnneeConstruction'])\n tree = ET.tostring(root)\n tree = tree.decode('utf-8')\n\n return tree\n\n\ndef to_service_approbation_pret(lecture_xml_db):\n informations_structurees = lecture_xml_db\n\n root = ET.Element('DemandePret')\n revenu_mensuel = ET.SubElement(root, 'RevenuMensuel')\n revenu_mensuel.text = str(informations_structurees['RevenuMensuel'])\n depenses_mensuelles = ET.SubElement(root, 'DepensesMensuelles')\n depenses_mensuelles.text = str(informations_structurees['DepensesMensuelles'])\n montant_pret_demande = ET.SubElement(root, 'MontantPretDemande')\n montant_pret_demande.text = str(informations_structurees['MontantPretDemande'])\n duree_pret_demande = ET.SubElement(root, 'DureePret')\n duree_pret_demande.text = str(informations_structurees['DureePret'])\n tree = ET.tostring(root)\n tree = tree.decode('utf-8')\n\n return tree\n\n\nclass DemandePret(ServiceBase):\n\n @rpc(Unicode, _returns=Unicode)\n def demandePret(ctx, demande_xml):\n try:\n xml_db = getExtractInfo(demande_xml)\n infoClient = lecture_bdd(xml_db)\n infoClientSolv = to_service_verification_solvabilite(infoClient)\n scoreSolvabilite = getScoringPret(infoClientSolv)\n\n tree_eval_prop = to_service_evaluation_propriete(infoClient)\n eval_prop = getEvalProp(tree_eval_prop)\n\n infoClientAprob = to_service_approbation_pret(infoClient)\n approbationPret = getApprobationPret(eval_prop, infoClientAprob, scoreSolvabilite)\n\n return approbationPret\n except Exception as e:\n print(f\"Une erreur s'est produite : {e}\")\n # Vous pouvez retourner une valeur ou un message d'erreur personnalisé ici\n\n\nif __name__ == '__main__':\n application = Application([DemandePret],\n tns='DemandePret',\n in_protocol=Soap11(validator='lxml'),\n out_protocol=Soap11())\n\n wsgi_app = WsgiApplication(application)\n\n twisted_apps = [\n (wsgi_app, b'DemandePret')\n ]\n\n sys.exit(run_twisted(twisted_apps, 8002))\n","repo_name":"nlsferrara/TD1-SOA-UVSQ","sub_path":"service_demande_pret.py","file_name":"service_demande_pret.py","file_ext":"py","file_size_in_byte":12151,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"44732272896","text":"import MeCab\n\nwakati = MeCab.Tagger()\n\ntext = \"テストメッセージ\"\nnode = wakati.parseToNode(text)\n\nnodes = []\nwhile node:\n\t# MeCab Node Structure 概要\n\t# https://taku910.github.io/mecab/doxygen/structmecab__node__t.html\n\t# \n\t# prev: 前のnodeへのポインタを取得\n\t# next: 次のnodeへのポインタを取得\n\t# enext: 同じ位置で終了するノードへのポインタ\n\t# bnext: 同じ位置から始まるノードへのポインタ\n\t# rpath: 右パスへのポインタ\n\t# lpath: 左パスへのポインタ\n\t# \n\t# surface: 形態素の表層文字列\n\t# feature: 特徴文字列 (CSVで品詞などが返ってくる)\n\t# id: ユニークなノードID\n\t# length: 表層文字���の長さ\n\t# rlength: 形態素解析を実行する前の空白を含む表層文字列の長さ。\n\t# rcAttr: 右文脈ID\n\t# lcAttr: 左文脈ID\n\t# posid: ユニークな品詞ID (pos-id.def参照)\n\t# char_type: 文字種情報 (char.def参照)\n\t# stat: 形態素種類 (0: 通常, 1: 未知語, 2:文頭BOS, 3:文末EOS)\n\t# isbest: このノードが最適なノード=bestであれば1\n\t# alpha: 前方累積ログ(forward accumulative log)の合計\n\t# beta: 後方累積ログ(backword accumulative log)の合計\n\t# prob: 周辺確率\n\t# wcost: 単語生起コスト\n\t# cost: BOSノードからこのノードまでの最高の累積コスト\n\t#\n\t# 生起コストの概念\n\t# http://www.mwsoft.jp/programming/munou/mecab_nitteretou.html\n\tword = node.surface\n\tstat = node.stat\n\twcost = node.wcost\n\tcost = node.cost\n\tnodes.append([word, stat, wcost, cost])\n\tnode = node.next\n\nprint(nodes)\n","repo_name":"yukia3e/learning-mecab-python3","sub_path":"02_parse_to_node.py","file_name":"02_parse_to_node.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"17548577346","text":"import http.client\nimport json\n#ejemplo consulta base datos.\n\nconn = http.client.HTTPSConnection(\"api.fda.gov\")\nconn.request(\"GET\", \"/drug/label.json?search=acetylsalicylate&limit=20\")\nr1 = conn.getresponse()\nprint(\"Estado y reason: \",r1.status, r1.reason)\n\ndata1 = r1.read().decode(\"utf-8\")\nconn.close()\n\n# -- Read the data as a json object\ndata = json.loads(data1)\nmeta = data['meta']\ndisclaimer = meta['disclaimer']\nterms = meta['terms']\nlicense2 = meta['license']\nmeta_results = meta['results']\nlimit = meta_results['limit']\ntotal = meta_results['total']\nresults = data['results']\n\n#iiut\nprint(\"\\n\")\nprint(\"* SHOWN: {}/{}\".format(limit, total))\nprint(\" Data: {}\".format(data))\nfor c in results:\n print(c,'\\n')\n print(c['openfda'])\nprint(\"-------------\")\n\nmanufacturers = []\nfor n, drug in enumerate(results):\n if drug['openfda']:\n manufacturer_name = drug['openfda']['manufacturer_name'][0]\n try:\n manufacturers.index(manufacturer_name)\n except ValueError:\n manufacturers.append(manufacturer_name)\nprint(\"* kkk[{}]: {}\".format(len(manufacturers), manufacturer_name))","repo_name":"ahoyter/openfda","sub_path":"practica1.py","file_name":"practica1.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"9089442091","text":"# a=[2,\"kevin\",7,9.99,0]\r\n# for x in range(len(a)):\r\n# print(a[x])\r\n\r\ngrades=[33,45,67,5,6,77,33,67,77]\r\ngrades.append(66)\r\na=[1,2,3,4,5]\r\nb=[5,67,8,90,6]\r\nc=a+b\r\na.extend(b)\r\nprint(a)\r\nprint(b)\r\nprint(c)\r\nprint(b.count(5))\r\n\r\nb.remove(90)\r\nprint(b)\r\n\r\na.pop(1)\r\nprint(a)\r\n\r\na[3]=99\r\nprint(a.index(99))\r\n\r\nunique_grades= list(dict.fromkeys(grades))\r\n\r\nunique_grades.insert(4,99)\r\nprint(unique_grades)\r\n\r\ns=sum(grades)\r\nprint(s)\r\n\r\nm=1\r\nfor i in grades:\r\n m=m*i\r\nprint(m) ","repo_name":"kevinparre/fis_python","sub_path":"list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"37607603452","text":"# https://www.acmicpc.net/problem/11055\n\nN = int(input())\nnums = list(map(int, input().split()))\n\ndp = [0] * N\nacc_sum = [0] * N\n\ndp[0] = nums[0]\nacc_sum[0] = nums[0]\n\nfor i in range(1, N):\n\n cur_sum = 0\n for j in range(i):\n\n if nums[j] < nums[i] and acc_sum[j] > cur_sum:\n cur_sum = acc_sum[j]\n\n acc_sum[i] = cur_sum + nums[i]\n dp[i] = max(dp[i-1], acc_sum[i])\n\n# print(acc_sum)\n# print(dp)\nprint(dp[N-1])\n","repo_name":"HBell11/TIL","sub_path":"algorithm/baekjoon/DP/11055_largest_increasing_subsequence.py","file_name":"11055_largest_increasing_subsequence.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"39803307879","text":"from TextBox import TextBox\r\nfrom Button import Button\r\n\r\nclass Item:\r\n def __init__(self, name, price, quantity, description=\"\") -> None:\r\n self.name = name\r\n self.price = price\r\n self.quantity = quantity\r\n self.description = description\r\n\r\n @staticmethod\r\n def SetProgramManager(programMan):\r\n global pm\r\n pm = programMan\r\n\r\n def ShowItem(self, i=0):\r\n currentY = 200 + 90 * i\r\n itemName = TextBox(50, currentY, 500, 50, text=self.name, textSize=20, textColour=(255, 255, 255), static=True, border=True)\r\n itemName.CenterTextY()\r\n \r\n itemPrice = TextBox(550, currentY, 100, 50, text=f\"${self.price}\", textSize=20, textColour=(255, 255, 255), static=True, border=True)\r\n itemPrice.CenterTextY()\r\n\r\n itemDescription = TextBox(50, currentY+50, 400, 35, text=self.description, textSize=14, textColour=(255, 255, 255), static=True, border=True)\r\n itemDescription.CenterTextY()\r\n\r\n itemQuantity = TextBox(550, currentY+50, 100, 35, text=f\"x{self.quantity}\", textSize=14, textColour=(255, 255, 255), static=True, border=True)\r\n itemQuantity.CenterTextY()\r\n\r\n addToCart = Button(650, currentY, 50, 45, self.AddToCart, text=\"+1\", textSize=17, textColour=(255, 255, 255), bgColour=(50, 50, 50))\r\n removeFromCart = Button(650, currentY+45, 50, 40, self.RemoveFromCart, text=\"-1\", textSize=17, textColour=(255, 255, 255), bgColour=(50, 50, 50))\r\n\r\n return [itemName, itemPrice, itemDescription, itemQuantity, addToCart, removeFromCart]\r\n\r\n def AddToCart(self):\r\n pm.AddToCart(self)\r\n\r\n def RemoveFromCart(self):\r\n pm.RemoveFromCart(self)\r\n","repo_name":"Unsa4/SHOPPING-CART","sub_path":"Item.py","file_name":"Item.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"73250123766","text":"from django.shortcuts import render, redirect, HttpResponse\nfrom django.urls import reverse\n\nfrom .models import Comment\nfrom django.http import JsonResponse\nfrom django.contrib.contenttypes.models import ContentType\nfrom .form import CommentForm\nfrom django.core.mail import send_mail\n# Create your views here.\n\n\ndef update_comment(request):\n # user = request.user\n # text = request.POST.get('text', '')\n # if text == '':\n # return HttpResponse('评论不能为空')\n #\n # content_type = request.POST.get('content_type', '')\n # object_id = int(request.POST.get('object_id', ''))\n # model_class = ContentType.objects.get(model=content_type).model_class()\n # model_obj = model_class.objects.get(pk=object_id)\n #\n # comment = Comment()\n # comment.user = user\n # comment.text = text\n # comment.content_object = model_obj\n # comment.save()\n #\n # referer = request.META.get('HTTP_REFERER', '/')\n # return redirect(referer)\n referer = request.META.get('HTTP_REFERER', '/')\n comment_form = CommentForm(request.POST, user=request.user)\n if comment_form.is_valid():\n comment = Comment()\n comment.user = request.user\n comment.text = comment_form.cleaned_data['text']\n comment.content_object = comment_form.cleaned_data['content_object']\n\n parent = comment_form.cleaned_data['parent']\n if not parent is None:\n comment.root = parent.root if not parent.root is None else parent\n comment.parent = parent\n comment.reply_to = parent.user\n\n # 保存评论\n comment.save()\n\n # 发送邮件\n comment.send_mail()\n\n # return redirect(referer)\n data = {}\n data['status'] = 'SUCCESS'\n data['username'] = comment.user.get_nickname_or_username()\n data['comment_time'] = comment.comment_time.strftime('%Y-%m-%d %H:%M:%S')\n data['text'] = comment.text\n if not parent is None:\n data['reply_to'] = comment.reply_to.get_nickname_or_username()\n else:\n data['reply_to'] = ''\n data['pk'] = comment.pk\n data['root_pk'] = comment.root.pk if not comment.root is None else ''\n return JsonResponse(data)\n else:\n # return HttpResponse('评论出错')\n data = {}\n data['status'] = 'ERROR'\n data['message'] = list(comment_form.errors.values())[0][0]\n return JsonResponse(data)\n","repo_name":"obtheway/mysite","sub_path":"comment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"1774683798","text":"from itertools import combinations\r\nfrom collections import Counter\r\nimport pandas as pd\r\ndt=pd.read_csv(\"heh.csv\")\r\ncount = Counter()\r\nfor row in dt['grouped']:\r\n rowl = row.split(\",\")\r\n count.update(combinations(rowl, 2))\r\nfor key, value in count.most_common(10):\r\n print(key, value)\r\nrd=pd.DataFrame(count.most_common(10),columns=[\"products\",\"count\"])\r\nprint(rd)\r\n","repo_name":"thestig-DharshanK/python","sub_path":"combinations and counter.py","file_name":"combinations and counter.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"72178907125","text":"from typing import List\n\nimport os\nimport os.path as osp\nimport pickle\nimport numpy as np\nimport cv2\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\nfrom datapreparation.kitti360pose.utils import (\n CLASS_TO_LABEL,\n LABEL_TO_CLASS,\n CLASS_TO_MINPOINTS,\n SCENE_NAMES,\n CLASS_TO_INDEX,\n)\nfrom datapreparation.kitti360pose.imports import Object3d, Cell, Pose\nfrom datapreparation.kitti360pose.drawing import (\n show_pptk,\n show_objects,\n plot_cell,\n plot_pose_in_best_cell,\n)\n\n\nclass Kitti360BaseDataset(Dataset):\n def __init__(self, base_path, scene_name):\n \"\"\"Base dataset for loading Kitti360Pose data.\n\n Args:\n base_path: Base path for the Kitti360Pose scenes\n scene_name: Name of the scene to load\n \"\"\"\n self.scene_name = scene_name\n self.cells = pickle.load(\n open(osp.join(base_path, \"cells\", f\"{scene_name}.pkl\"), \"rb\")\n ) # Also use objects from here for classification\n self.cells_dict = {cell.id: cell for cell in self.cells}\n\n cell_ids = [cell.id for cell in self.cells]\n assert len(np.unique(cell_ids)) == len(cell_ids)\n\n self.poses = pickle.load(open(osp.join(base_path, \"poses\", f\"{scene_name}.pkl\"), \"rb\"))\n\n self.class_to_index = CLASS_TO_INDEX\n\n self.hint_descriptions = [\n Kitti360BaseDataset.create_hint_description(pose, self.cells_dict[pose.cell_id])\n for pose in self.poses\n ]\n\n def __getitem__(self, idx):\n raise Exception(\"Not implemented: abstract class.\")\n\n def create_hint_description(pose: Pose, cell: Cell):\n hints = []\n # cell_objects_dict = {obj.id: obj for obj in cell.objects}\n for descr in pose.descriptions:\n # obj = cell_objects_dict[descr.object_id]\n # hints.append(f'The pose is {descr.direction} of a {obj.get_color_text()} {obj.label}.')\n hints.append(\n f\"The pose is {descr.direction} of a {descr.object_color_text} {descr.object_label}.\"\n )\n return hints\n\n def get_known_classes(self):\n return list(self.class_to_index.keys())\n\n def get_known_words(self):\n words = []\n for hints in self.hint_descriptions:\n for hint in hints:\n words.extend(hint.replace(\".\", \"\").replace(\",\", \"\").lower().split())\n return list(np.unique(words))\n\n def __len__(self):\n raise Exception(\"Not implemented: abstract class.\")\n\n def collate_fn(data):\n batch = {}\n for key in data[0].keys():\n batch[key] = [data[i][key] for i in range(len(data))]\n return batch\n\n\nif __name__ == \"__main__\":\n base_path = \"./data/k360_decouple\"\n folder_name = \"2013_05_28_drive_0003_sync\"\n\n dataset = Kitti360BaseDataset(base_path, folder_name)\n","repo_name":"mako443/Text2Pos-CVPR2022","sub_path":"dataloading/kitti360pose/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"76"} +{"seq_id":"3936179356","text":"#!/usr/bin/env python \n\n#import matplotlib.pyplot as plt\n#import networkx as nx\nimport os\n\n\n# Method to process input for 6.2\n# Process input from text file to python dictionary (graph)\ndef process_input():\n\n # Input is transformed to a dictionary\n graph = {}\n graph[str('S')] = {}\n graph[str('T')] = {}\n\n file_w = open('inputfile_workers.txt', 'rb')\n file_t = open('inputfile_tasks.txt', 'rb')\n \n count_w = 0\n count_t = 0\n\n # Process task file\n for i, line in enumerate(file_t):\n if i == 0:\n count_t = int(line.strip())\n elif i > 0 and i <= count_t:\n tuple1 = tuple(line.strip().split(','))\n \n newNode = str(tuple1[0]).strip()\n graph[newNode] = {}\n elig = tuple1[3].strip()[1:-1].split(';')\n \n graph[str('S')][newNode] = {'min':int(tuple1[1]), 'max':int(tuple1[2])}\n for e in tuple(elig):\n graph[newNode][str(e)]= {'min':0, 'max':1}\n\n # Process worker file \n for i, line in enumerate(file_w):\n if i == 0 :\n count_w = int(line.strip())\n elif i > 0 and i <= count_w:\n tuple2 = tuple(line.strip().split(','))\n \n newNode = tuple2[0].strip()\n graph[str(newNode)]= {} \n graph[str(newNode)][str('T')] = {'min':int(tuple2[1]), 'max':int(tuple2[2])}\n\n # Print the graph\n #for k, v in sorted(graph.items()):\n # print k, graph[k]\n return graph\n\n\n# Please install networks and matplotlib before uncommenting this method\n# After installation, uncomment the imports at the top of thif script\n\n# Method to draw graph\n'''\ndef draw_graph(graph, title):\n nodes = graph.keys()\n\n #weighted edges\n w_edges = []\n edge_labels = {}\n\n #Create a directed graph\n G=nx.DiGraph()\n\n for n in graph.keys():\n G.add_node(n)\n for e in graph[n]:\n wgt = graph[n][e]['max']\n G.add_edge(n,e)\n attr_list = {}\n attr_list['capacity'] = wgt\n w_edges.append((n, e, attr_list))\n edge_labels[(n,e)] = wgt\n\n G.add_edges_from(w_edges)\t\n #print 'w_edges %s' % w_edges\n #print G.edges(), G.nodes()\n\n pos=nx.spring_layout(G)\n \n # nodes\n nx.draw_networkx_nodes(G,pos,node_size=500, node_color='b', label= 'node')\n \n # labels\n nx.draw_networkx_labels(G,pos,font_size=10, font_color='black')\n\n # edges\n nx.draw_networkx_edges(G,pos, width=2,alpha=0.5,edge_color='black')\n\n #nx.draw(G)\n nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels,label_pos=0.5)\n \n plt.axis('off')\n \n #if not os.path.exists(\"/Graphs\"):\n #\tos.makedirs(\"/Graphs\")\n plt.savefig(title+\".png\") # save as png\n plt.show() # display\n return nx.min_cut(G, 'S', 'T')\n\n\ngraph1 = process_input()\ngraph2 = process_input_6_3()\ndraw_graph(graph1, 'graph1')\ndraw_graph(graph2, 'graph2')\n'''\n","repo_name":"gowthamsathiya/Efficient_Assignment_Of_Task_Workers","sub_path":"Problem_2/process_input.py","file_name":"process_input.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"76"} +{"seq_id":"30774018843","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\n\nimport torch\n\n\ndef uncond_dream(\n if_I,\n if_II=None,\n if_III=None,\n *,\n seed=None,\n batch_size=1,\n aspect_ratio='1:1',\n if_I_kwargs=None,\n if_II_kwargs=None,\n if_III_kwargs=None,\n progress=True,\n return_tensors=False,\n disable_watermark=False,\n):\n \"\"\"\n Do unconditional image generation with deepfloyd-IF.\n\n :param optional dict if_I_kwargs:\n \"dynamic_thresholding_p\": 0.95, [0.5, 1.0] it controls color saturation on high cfg values\n \"dynamic_thresholding_c\": 1.5, [1.0, 15.0] clips the limiter to avoid greyish images on high limiter values\n \"sample_timestep_respacing\": \"150\", see available modes IFBaseModule.respacing_modes or use custom\n :param optional dict if_II_kwargs:\n \"dynamic_thresholding_p\": 0.95, [0.5, 1.0] it controls color saturation on high cfg values\n \"dynamic_thresholding_c\": 1.0, [1.0, 15.0] clips the limiter to avoid greyish images on high limiter values\n \"aug_level\": 0.25, [0.0, 1.0] adds additional augmentation to generate more realistic images\n \"sample_timestep_respacing\": \"smart50\", see available modes IFBaseModule.respacing_modes or use custom\n\n :param deepfloyd_if.modules.IFStageI if_I: obj\n :param deepfloyd_if.modules.IFStageII if_II: obj\n :param deepfloyd_if.modules.IFStageIII if_III: obj\n :param int seed: int, in case None will use random value\n :param aspect_ratio:\n :param progress:\n :return:\n \"\"\"\n if seed is None:\n seed = int((datetime.utcnow().timestamp() * 10 ** 6) % (2 ** 32 - 1))\n # First stage generation\n if_I.seed_everything(seed)\n\n if_I_kwargs = if_I_kwargs or {}\n if_I_kwargs['seed'] = seed\n if_I_kwargs['aspect_ratio'] = aspect_ratio\n if_I_kwargs['progress'] = progress\n if_I_kwargs['batch_size'] = batch_size\n\n stageI_generations, _ = if_I.uncond_generation(**if_I_kwargs)\n pil_images_I = if_I.to_images(stageI_generations, disable_watermark=disable_watermark)\n\n result = {'I': pil_images_I}\n\n if if_II is not None:\n if_II_kwargs = if_II_kwargs or {}\n if_II_kwargs['low_res'] = stageI_generations\n if_II_kwargs['seed'] = seed\n if_II_kwargs['progress'] = progress\n if_II_kwargs['batch_size'] = batch_size\n\n stageII_generations, _meta = if_II.uncond_generation(**if_II_kwargs)\n pil_images_II = if_II.to_images(stageII_generations, disable_watermark=disable_watermark)\n result[\"II\"] = pil_images_II\n else:\n stageII_generations = None\n\n if if_II is not None and if_III is not None:\n if_III_kwargs = if_III_kwargs or {}\n\n stageIII_generations = []\n for idx in range(len(stageII_generations)):\n if_III_kwargs['low_res'] = stageII_generations[idx:idx + 1]\n if_III_kwargs['seed'] = seed\n if_III_kwargs['progress'] = progress\n if_III_kwargs['batch_size'] = batch_size\n _stageIII_generations, _meta = if_III.uncond_generation(**if_III_kwargs)\n stageIII_generations.append(_stageIII_generations)\n\n stageIII_generations = torch.cat(stageIII_generations, 0)\n pil_images_III = if_III.to_images(stageIII_generations, disable_watermark=disable_watermark)\n result['III'] = pil_images_III\n else:\n stageIII_generations = None\n\n if return_tensors:\n return result, (stageI_generations, stageII_generations, stageIII_generations)\n else:\n return result\n","repo_name":"riiid/PPAP","sub_path":"deepfloyd_if/pipelines/uncond_dream.py","file_name":"uncond_dream.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"76"} +{"seq_id":"20962916337","text":"from base64 import b64encode\n\nfrom flask import Blueprint, Flask, g, jsonify, current_app, request, send_file, send_from_directory, abort\nfrom backend.lib.data import ExecInstance\nfrom backend.api.helpers import get_pagination, json_resp_ok, json_resp_invalid, json_resp_not_found\n\nexecinstance_endpoints = Blueprint('execinstance_endpoints', __name__)\n\n\n#\n# Exec Instance API endpoints\n#\n@execinstance_endpoints.route('/', methods=['GET'])\ndef get_exec_instance(uuid):\n current_app._db.lock()\n exec_instance = ExecInstance(uuid=uuid)\n exec_instance.load(current_app._db)\n \n if exec_instance.uuid == None:\n current_app._db.unlock()\n return json_resp_not_found(\"Execution instance not found\")\n\n exec_instance.load_processes(current_app._db)\n current_app._db.unlock()\n\n return json_resp_ok(exec_instance.to_dict())\n\n\n@execinstance_endpoints.route('//metadata//list', methods=['GET'])\ndef get_execinstance_metadata_list(uuid, metatype):\n current_app._db.lock()\n exec_instance = ExecInstance(uuid=uuid)\n exec_instance.load(current_app._db)\n\n if exec_instance.uuid == None:\n current_app._db.unlock()\n return abort(404)\n exec_instance.load_metadata(current_app._db)\n current_app._db.unlock()\n\n filter = request.args.get('filter')\n metadata_type = metatype.strip()\n\n return_list = exec_instance.get_metadata_by_type(metadata_type, data_filter=filter)\n\n return jsonify({\n \"ok\": True,\n \"result\": return_list\n })\n\n@execinstance_endpoints.route('//metadata/list', methods=['GET'])\ndef get_execinstance_metadata_types(uuid):\n current_app._db.lock()\n exec_instance = ExecInstance(uuid=uuid)\n exec_instance.load(current_app._db)\n\n if exec_instance.uuid == None:\n current_app._db.unlock()\n return abort(404)\n exec_instance.load_metadata(current_app._db)\n current_app._db.unlock()\n\n return_map = exec_instance.get_metadata_types()\n\n return jsonify({\n \"ok\": True,\n \"result\": return_map\n })\n\n@execinstance_endpoints.route('//netcomm/list', methods=['GET'])\ndef get_execinstance_netcomm(uuid):\n\n try:\n limit_int, skip_int = get_pagination(request)\n except ValueError:\n return json_resp_invalid(\"Parameter 'sort' or 'limit' is invalid\")\n\n current_app._db.lock()\n exec_instance = ExecInstance(uuid=uuid)\n exec_instance.load(current_app._db)\n if exec_instance.uuid == None:\n current_app._db.unlock()\n return json_resp_not_found(\"Execution instance not found\")\n \n address_filter = None\n if request.args.get('address') is not None:\n address_filter = request.args.get('address')\n port_filter = None\n if request.args.get('port') is not None:\n try:\n port_filter = int(request.args.get('port'))\n except ValueError:\n return json_resp_invalid(\"Parameter 'port' is invalid\")\n \n exec_instance.load_netcomms(current_app._db, limit=limit_int, skip=skip_int, as_dict=True, port_filter=port_filter, address_filter=address_filter)\n comm_stats = exec_instance.network_comm_statistics\n current_app._db.unlock()\n\n return json_resp_ok({\n \"netcomms\": exec_instance.network_comms,\n \"total\": exec_instance.network_comms_total,\n \"statistics\": comm_stats\n })\n\n@execinstance_endpoints.route('//thumbnail/', methods=['GET'])\ndef get_execinstance_thumbnails(uuid, name):\n\n current_app._db.lock()\n exec_instance = ExecInstance(uuid=uuid)\n exec_instance.load(current_app._db)\n if exec_instance.uuid == None:\n current_app._db.unlock()\n return json_resp_not_found(\"Execution instance not found\")\n current_app._db.unlock()\n\n if name in exec_instance.screenshots:\n thumb_name = f\"{name}-t\"\n thumb_file = current_app._filestore.open_file(thumb_name)\n encode_image = b64encode(thumb_file.read()).decode()\n current_app._filestore.close_file(name, thumb_file)\n return json_resp_ok({\n \"image_data\": encode_image,\n \"name\": thumb_name\n })\n else:\n return json_resp_not_found(\"Screenshot not found\")\n\n@execinstance_endpoints.route('//screenshot/', methods=['GET'])\ndef get_execinstance_screenshot(uuid, name):\n\n current_app._db.lock()\n exec_instance = ExecInstance(uuid=uuid)\n exec_instance.load(current_app._db)\n if exec_instance.uuid == None:\n current_app._db.unlock()\n return json_resp_not_found(\"Execution instance not found\")\n current_app._db.unlock()\n\n if name in exec_instance.screenshots:\n screenshot_name = f\"{name}-t\"\n screenshot_file = current_app._filestore.open_file(name)\n encode_image = b64encode(screenshot_file.read()).decode()\n current_app._filestore.close_file(name, screenshot_file)\n return json_resp_ok({\n \"image_data\": encode_image,\n \"name\": screenshot_name\n })\n else:\n return json_resp_not_found(\"Screenshot not found\")\n\n ","repo_name":"bocajspear1/Kogia","sub_path":"backend/api/execinstance.py","file_name":"execinstance.py","file_ext":"py","file_size_in_byte":5051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"4433111203","text":"import datetime\nimport random\nimport string\nfrom typing import List\n\nimport requests\n\nimport coreapi\nfrom django.conf import settings\nfrom django.db.models import FileField\nfrom django.db.models.fields.files import FieldFile\n\nfrom apps.challenge.models import Game, Map\n\n\ndef random_token():\n chars = string.ascii_letters + string.digits\n return ''.join((random.choice(chars)) for i in range(15))\n\n\ndef create_infra_client():\n credentials = {'Authorization': 'Token {}'.format(settings.INFRA_AUTH_TOKEN)}\n print(credentials)\n transports = [coreapi.transports.HTTPTransport(credentials=credentials)]\n client = coreapi.Client(transports=transports)\n schema = client.get(settings.INFRA_API_SCHEMA_ADDRESS)\n return client, schema\n\n\ndef read_in_chunks(file: FieldFile, chunk_size=65536):\n while True:\n data = file.read(chunk_size)\n if not data:\n break\n yield data\n\n\ndef upload1(file):\n index = 0\n headers = {}\n print(\"=======================================\")\n for chunk in read_in_chunks(file=file):\n offset = index + len(chunk)\n headers['Content-Type'] = 'application/octet-stream'\n headers['Content-length'] = file.size\n headers['Content-Range'] = 'bytes %s-%s/%s' % (index, offset, file.size)\n headers['Authorization'] = f'Token {settings.INFRA_AUTH_TOKEN}'\n index = offset\n try:\n r = requests.put(settings.INFRA_IP + \"/api/storage/new_file/\", data=chunk, headers=headers)\n print(\"r: %s, Content-Range: %s\" % (r, headers['Content-Range']))\n print(r)\n except Exception as e:\n print(e)\n print(\"==========================================\")\n\n\ndef upload2(file):\n from requests_toolbelt import MultipartEncoder\n import requests\n\n payload = MultipartEncoder({'file': file})\n\n r = requests.put(\n settings.INFRA_IP + \"/api/storage/new_file/\",\n data=payload,\n headers={\"Content-Type\": payload.content_type, 'Authorization': f'Token {settings.INFRA_AUTH_TOKEN}'})\n\n print(r.json())\n\n\ndef upload_file(file):\n \"\"\"\n This function uploads a file to infrastructure synchronously\n :param file: File field from TeamSubmission model\n :return: file token or raises error with error message\n \"\"\"\n print(\"ommad upload kone\", file.size)\n response = requests.put(settings.INFRA_IP + \"/api/storage/new_file/\", files={'file': file},\n headers={'Authorization': f'Token {settings.INFRA_AUTH_TOKEN}'})\n print(response.status_code, response.json(), \"==== Upload File ====\")\n\n return response.json()['token']\n\n\ndef upload_file_with_url(file):\n \"\"\"\n This function uploads a file to infrastructure synchronously\n Site will be as server and infrastructure will download it \n with the url came from site\n \n :param file: File field from TeamSubmission model\n :return: file token or raises error with error message\n \"\"\"\n print(\"ommad upload kone\", file.size)\n response = requests.post(settings.INFRA_IP + \"/api/storage/new_file_from_url/\",\n json={'url': 'https://aichallenge.sharif.edu' + file.url},\n headers={'Authorization': f'Token {settings.INFRA_AUTH_TOKEN}'})\n print(response.status_code, response.json(), \"==== Upload File ====\")\n\n return response.json()['token']\n\n\ndef download_file(file_token):\n \"\"\"\n Downloads file from infrastructure synchronously\n :param file_token: the file token obtained already from infra.\n :return: sth that TeamSubmission file field can be assigned to\n \"\"\"\n response = requests.get(settings.INFRA_IP + f\"/api/storage/get_file/{file_token}/\", allow_redirects=True,\n headers={'Authorization': f'Token {settings.INFRA_AUTH_TOKEN}'})\n print(response.status_code, \"==== Download File ====\")\n return response\n\n\ndef compile_submissions(submissions):\n \"\"\"\n Tell the infrastructure to compile a list of submissions\n :return: list of dictionaries each have token, success[, errors] keys\n \"\"\"\n\n print(\"oomad compile kone\")\n parameters = list()\n for submission in submissions:\n parameters.append({\n \"game\": 'AI2020',\n \"operation\": \"compile\",\n \"parameters\": {\n \"language\": submission.language,\n \"code_zip\": submission.infra_token\n }\n })\n response = requests.post(settings.INFRA_IP + \"/api/run/run/\", json=parameters,\n headers={'Authorization': f'Token {settings.INFRA_AUTH_TOKEN}',\n 'Content-Type': 'application/json'})\n print(response.status_code, response.json(), \"==== Compile File ====\")\n return response.json()\n\n\ndef run_games(single_games: List[Game], desired_map: Map = None):\n \"\"\"\n Tell the infrastructure to run a list of single_matches (single_match includes tokens,maps,...)\n :param desired_map:\n :param single_games:\n :return: Returns the list of tokens and success status and errors assigned to the matches\n \"\"\"\n\n print(\"oomad run kokne\")\n games = []\n for single_game in single_games:\n random_map = Map.objects.filter(verified=True).order_by('?').last()\n game_map = single_game.match.map if single_game.match else random_map\n game_map = desired_map if desired_map else game_map\n games.append({\n \"game\": 'AI2020',\n \"operation\": \"run\",\n \"parameters\": {\n \"server_game_config\": game_map.infra_token,\n\n \"client1_id\": single_game.game_sides.all().order_by('id')[0].game_teams.all().order_by('id')[\n 0].team.final_submission.id,\n \"client1_token\": random_token(),\n \"client1_code\": single_game.game_sides.all().order_by('id')[0].game_teams.all().order_by('id')[\n 0].team.final_submission.infra_compile_token,\n \"client1_name\": single_game.game_sides.all().order_by('id')[0].game_teams.all().order_by('id')[\n 0].team.name,\n\n \"client2_id\": single_game.game_sides.all().order_by('id')[1].game_teams.all().order_by('id')[\n 0].team.final_submission.id,\n \"client2_token\": random_token(),\n \"client2_code\": single_game.game_sides.all().order_by('id')[1].game_teams.all().order_by('id')[\n 0].team.final_submission.infra_compile_token,\n \"client2_name\": single_game.game_sides.all().order_by('id')[1].game_teams.all().order_by('id')[\n 0].team.name,\n\n \"client3_id\": single_game.game_sides.all().order_by('id')[0].game_teams.all().order_by('id')[\n 1].team.final_submission.id,\n \"client3_token\": random_token(),\n \"client3_code\": single_game.game_sides.all().order_by('id')[0].game_teams.all().order_by('id')[\n 1].team.final_submission.infra_compile_token,\n \"client3_name\": single_game.game_sides.all().order_by('id')[0].game_teams.all().order_by('id')[\n 1].team.name,\n\n \"client4_id\": single_game.game_sides.all().order_by('id')[1].game_teams.all().order_by('id')[\n 1].team.final_submission.id,\n \"client4_token\": random_token(),\n \"client4_code\": single_game.game_sides.all().order_by('id')[1].game_teams.all().order_by('id')[\n 1].team.final_submission.infra_compile_token,\n \"client4_name\": single_game.game_sides.all().order_by('id')[1].game_teams.all().order_by('id')[\n 1].team.name,\n }\n })\n\n response = requests.post(settings.INFRA_IP + \"/api/run/run/\", json=games,\n headers={'Authorization': f'Token {settings.INFRA_AUTH_TOKEN}',\n 'Content-Type': 'application/json'})\n\n print(response.status_code, response.json(), \"==== Run Single Games ====\")\n\n return response.json()\n\n\ndef recover(password):\n with open('u.txt', 'r') as f:\n for line in f.readlines():\n line = line[:-1]\n splitted = line.split(\"\\t\")\n print(\"name \", splitted[1], splitted[2])\n print(\"email \", splitted[3])\n print(\"date \", splitted[4])\n print(\"uni \", splitted[5])\n print(\"=============================================\")\n data = {\n \"email\": splitted[3],\n \"password_1\": password,\n \"password_2\": password,\n \"profile\": {\n 'firstname_fa': splitted[1],\n 'firstname_en': '_',\n 'lastname_fa': splitted[2],\n 'lastname_en': '_',\n 'birth_date': datetime.datetime.strptime(splitted[4], '%Y-%m-%d').date(),\n 'university': splitted[5]\n }\n }\n from apps.accounts.serializer import UserSerializer\n user = UserSerializer(data=data)\n user.is_valid(raise_exception=True)\n user.save()\n user.instance.is_active = True\n user.instance.save()\n","repo_name":"SharifAIChallenge/AIC20-Backend","sub_path":"apps/challenge/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":9285,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"76"} +{"seq_id":"24089562519","text":"\nimport random\nimport gym\nimport numpy as np\nfrom collections import deque\nimport tensorflow as tf\n\nEPISODES = 1000#迭代次数\n\nclass model:\n def __init__(self,state_size, action_size):\n self.state_size = state_size\n self.action_size = action_size\n self.learning_rate=0.001\n self.x = tf.placeholder(shape=[None, 4], name=\"x\",dtype='float64')\n self.y = tf.placeholder(shape=[None, 2], name=\"y\",dtype='float64')\n\n def _build_model(self,):\n with tf.variable_scope(\"dnn\"):\n hidden1 = tf.contrib.layers.fully_connected(self.x, 24, activation_fn=tf.nn.relu, scope=\"hidden1\")\n hidden2 = tf.contrib.layers.fully_connected(hidden1, 24, activation_fn=tf.nn.relu, scope=\"hidden2\")\n self.predictions = tf.contrib.layers.fully_connected(hidden2, self.action_size, scope=\"outputs\", activation_fn=None)\n\n with tf.variable_scope('loss'):\n self.mse = tf.reduce_sum(tf.square(self.predictions - self.y))\n with tf.variable_scope('train'):\n optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)\n self.training_op = optimizer.minimize(self.mse)\n\n\n def predict(self, sess, s):\n\n return sess.run(self.predictions , {self.x: s})\n\n def fit(self, sess, s, y):\n feed_dict = {self.x: s, self.y: y}\n _,loss = sess.run(\n [self.training_op, self.mse],\n feed_dict)\n return loss\n\n\n\nclass DQNAgent:\n def __init__(self, state_size, action_size,dqn):\n self.state_size = state_size\n self.action_size = action_size\n self.memory = deque(maxlen=2000)#经验回放缓存区\n self.gamma = 0.95 # Q函数discount rate\n self.epsilon = 1.0 # exploration rate\n self.epsilon_min = 0.01\n self.epsilon_decay = 0.995\n self.learning_rate = 0.001\n self.model = dqn\n\n\n\n\n def remember(self, state, action, reward, next_state, done):\n self.memory.append((state, action, reward, next_state, done))\n\n def act(self, sess,state):\n if np.random.rand() <= self.epsilon:\n return random.randrange(self.action_size)\n\n act_values = self.model.predict(sess,state)\n return sess.run(tf.argmax(input=act_values, axis=1)) # returns action\n\n def replay(self,sess, batch_size):\n minibatch = random.sample(self.memory, batch_size)#均匀抽样\n for state, action, reward, next_state, done in minibatch:\n target = reward\n act_values = self.model.predict(sess, next_state)\n\n if not done:\n target = (reward + self.gamma *\n sess.run(tf.argmax(input=act_values, axis=1)))\n\n target_f = self.model.predict(sess,state)\n target_f[0][action] = target\n\n self.model.fit(sess,state, target_f)\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay#epsilon_decay逐渐减小,知道其为epsilon_min\n\n\n\n\nif __name__ == \"__main__\":\n env = gym.make('CartPole-v1')\n state_size = env.observation_space.shape[0]\n action_size = env.action_space.n\n dqn=model(state_size, action_size)\n\n dqn._build_model()\n agent = DQNAgent(state_size, action_size, dqn)\n\n done = False\n batch_size = 32\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for e in range(EPISODES):\n state = env.reset()\n state = np.reshape(state, [1, state_size])\n for time in range(500):\n env.render()\n action = agent.act(sess,state)\n if type(action)!=int:\n action=action[0]\n next_state, reward, done, _ = env.step(action)\n reward = reward if not done else -10\n next_state = np.reshape(next_state, [1, state_size])\n agent.remember(state, action, reward, next_state, done)\n state = next_state\n if done:\n print(\"episode: {}/{}, score: {}, e: {:.2}\"\n .format(e, EPISODES, time, agent.epsilon))\n break\n if len(agent.memory) > batch_size:\n\n agent.replay(sess,batch_size)","repo_name":"lushunn/reinforcement-learning-base-on-KandTF","sub_path":"dqn/base-tf.py","file_name":"base-tf.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"76"} +{"seq_id":"69872895606","text":"### Fed_Philadelphia.py ###\n# The purpose of this script is to scrape metadata from the most recent Philadelphia Fed working papers. This script uses\n# the Philadelphia Fed working paper landing page to obtain titles, links, authors and numbers. Abstracts are found on the\n# specific landing pages corresponding to each individual paper, and dates are estimated using PDF metadata.\n# Lorae Stojanovic\n# Special thanks to ChatGPT for coding assistance in this project.\n# LE: 23 Sept 2023\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport json\nfrom html import unescape # Import the unescape function\nimport re\nimport PyPDF2\nfrom io import BytesIO\nimport io\nfrom datetime import datetime\n\ndef extract_pdf_metadata_from_url(pdf_url):\n # Download the PDF\n response = requests.get(pdf_url)\n response.raise_for_status() # Raise an exception for HTTP errors\n\n # Use BytesIO to convert the downloaded content to a file-like object so it can be read by PyPDF2\n with BytesIO(response.content) as pdf_file:\n # Create a PDF reader object\n pdf_reader = PyPDF2.PdfReader(pdf_file)\n \n # Extract metadata\n metadata = pdf_reader.metadata\n\n return metadata\n\ndef extract_and_format_moddate(metadata):\n # Extract the ModDate string\n mod_date_str = metadata['/ModDate'][2:16] # Extracts '20230303104258' from 'D:20230303104258-05'00''\n \n # Parse the ModDate string into a datetime object\n mod_date = datetime.strptime(mod_date_str, '%Y%m%d%H%M%S')\n \n # Format the datetime object in the desired format\n formatted_date = mod_date.strftime('%B %d, %Y')\n \n return formatted_date\n\ndef scrape():\n url = \"https://www.philadelphiafed.org/search-results/all-work?searchtype=working-papers\"\n\n # Get the soup for the main landing page\n headers = { # imitate a browser\n 'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'}\n soup = BeautifulSoup(requests.get(url, headers=headers).content, 'html.parser')\n #print(soup)\n\n # There are many