diff --git "a/1203.jsonl" "b/1203.jsonl" new file mode 100644--- /dev/null +++ "b/1203.jsonl" @@ -0,0 +1,722 @@ +{"seq_id":"233481371","text":"from numpy import linalg, array, asarray, matrix, random, dot, cross, pi, arccos\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom sympy import Matrix as spMatrix\n\nA = matrix([[2,-3,1],[0,1,3],[-4,2,1]])\n#som array, men uten komponentvise operasjoner * / **\nrandMat = random.rand(2,6) #2x6-matrise med tilfeldige verdier mellom 0 og 1\nA_inv = linalg.inv(A)\nA_det = linalg.det(A)\nE_val, E_vec = linalg.eig(A)\n\nv = matrix([2,4,6])\nv_transp = v.T\na = asarray(v)\nb = array([3,5,7])\nc = cross(a,b)\nd = arccos(dot(a,b)/(linalg.norm(a)*linalg.norm(b)))\n\nM = matrix([[2,-4,6,-2],[-3,2,-1,8],[1,-6,11,4]])\nprint(spMatrix.rref(spMatrix(M))) #radreduksjon via sympy","sub_path":"linalg/intro.py","file_name":"intro.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"468250146","text":"import random\n\nfrom tools.unionfind import UnionFind\n\n\ndef maze_generator_search(height, width):\n # Random breast/depth first search in an implicit graph starting from square (0,0)\n\n maze = [[0 if x % 2 == y % 2 == 1 else 1 for y in range(2 * width + 1)] for x in range(2 * height + 1)]\n\n queue = {(0, 0)} # waiting queue\n seen = {(0, 0)} # seen nodes\n\n # get coordinates of a node's neighbors\n neighbors = lambda X: filter(\n lambda Y: 0 <= Y[0] < height and 0 <= Y[1] < width,\n [(X[0] + d[0], X[1] + d[1]) for d in ((1, 0), (-1, 0), (0, 1), (0, -1))]\n )\n\n # get the coordinates of the wall beetween 2 nodes\n wall = lambda X, Y: tuple((2 * X[i] + 1 + 2 * Y[i] + 1) // 2 for i in range(2))\n\n while queue:\n # random waiting node\n square = queue.pop() # lot faster than random.sample\n\n # its neighbors (without the already seen ones)\n neighbors_ = set(neighbors(square)).difference(seen)\n\n if neighbors_:\n # random neighbor\n neighbor = random.sample(neighbors_, 1)[0]\n\n # add to queue and seen\n seen.add(neighbor)\n queue.add(neighbor)\n\n # break wall beetween the nodes\n wall_ = wall(square, neighbor)\n maze[wall_[0]][wall_[1]] = 0\n\n # put back node to in the queue it it has at least one unseen neighbor\n if len(neighbors_) > 1:\n queue.add(square)\n\n return maze\n\n\ndef maze_generator_union(height, width):\n # Random expansion of connected components\n\n # empty maze\n maze = [[0 if x % 2 == y % 2 == 1 else 1 for y in range(2 * width + 1)] for x in range(2 * height + 1)]\n\n # squares that can be randomly selected by the algorithm\n squares = {(x, y) for x in range(height) for y in range(width)}\n\n # union-find structure to keep track of connected components\n unionfind = UnionFind(squares)\n\n # get coordinates of a node's neighbors\n neighbors = lambda square: filter(\n lambda Y: 0 <= Y[0] < height and 0 <= Y[1] < width,\n [(square[0] + d[0], square[1] + d[1]) for d in ((1, 0), (-1, 0), (0, 1), (0, -1))]\n )\n\n # get the coordinates of the wall beetween 2 nodes\n wall = lambda X, Y: tuple((2 * X[i] + 1 + 2 * Y[i] + 1) // 2 for i in range(2))\n\n while squares:\n # random waiting node\n square = squares.pop()\n\n # its neighbors (without the ones which already are in the same connected component)\n neighbors_ = set(filter(\n lambda X: unionfind.find(square) != unionfind.find(X),\n neighbors(square)\n ))\n\n if neighbors_:\n # random neighbor\n neighbor = random.sample(neighbors_, 1)[0]\n\n # union of the connected components\n unionfind.union(square, neighbor)\n\n # break wall beetween the nodes\n wall_ = wall(square, neighbor)\n maze[wall_[0]][wall_[1]] = 0\n\n # put back node to in the queue it it has at least one unconnected neighbor\n if len(neighbors_) > 1:\n squares.add(square)\n\n return maze\n","sub_path":"maze/maze_generator.py","file_name":"maze_generator.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"427947269","text":"#!/usr/bin/env python\n\n\nimport turbotutils.account\nimport boto3\nimport json\n\n\ndef main(account, region):\n session = boto3.Session(profile_name=account)\n client = session.client('iam', region_name=region)\n try:\n response = client.list_users()\n #responseObj = json.loads(response.text)\n for user in response['Users']:\n keys = client.list_access_keys(UserName=user['UserName'])\n if (keys['AccessKeyMetadata'][0]['Status']) == 'Inactive':\n print(keys['AccessKeyMetadata'][0]['UserName'], keys['AccessKeyMetadata'][0]['AccessKeyId'], account)\n except Exception as e:\n print(e, account)\n\nif __name__ == '__main__':\n\n # Set to False if you do not have a valid certificate for your Turbot Host\n turbot_host_certificate_verification = True\n\n # Set to your Turbot Host URL\n turbot_host = turbotutils.get_turbot_host()\n\n turbot_user_id = turbotutils.get_turbot_user()\n\n # Get the turbot version\n api_version = turbotutils.get_api_version()\n\n # Get the access and secret key pairs\n (turbot_api_access_key, turbot_api_secret_key) = turbotutils.get_turbot_access_keys()\n region_name='us-east-1'\n accounts = turbotutils.cluster.get_turbot_account_ids(turbot_api_access_key, turbot_api_secret_key, turbot_host_certificate_verification, turbot_host, api_version)\n\n for account in accounts:\n turbot_account = account\n main(account, region_name)\n","sub_path":"examples/find_unused_access_keys.py","file_name":"find_unused_access_keys.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"203202361","text":"#Functions needed to handle the Audi A2D2 dataset, according to https://www.a2d2.audi/a2d2/en/tutorial.html\n\nimport json\nimport pprint\nimport numpy as np\nimport numpy.linalg as la\nimport open3d as o3\n\ndef skew_sym_matrix(u):\n return np.array([[ 0, -u[2], u[1]], \n [ u[2], 0, -u[0]], \n [-u[1], u[0], 0]])\n\n\ndef axis_angle_to_rotation_mat(axis, angle):\n return np.cos(angle) * np.eye(3) + \\\n np.sin(angle) * skew_sym_matrix(axis) + \\\n (1 - np.cos(angle)) * np.outer(axis, axis)\n\n\ndef read_bounding_boxes(file_name_bboxes):\n # open the file\n with open (file_name_bboxes, 'r') as f:\n bboxes = json.load(f)\n \n boxes = [] # a list for containing bounding boxes \n print(bboxes.keys())\n \n for bbox in bboxes.keys():\n bbox_read = {} # a dictionary for a given bounding box\n bbox_read['class'] = bboxes[bbox]['class']\n bbox_read['truncation']= bboxes[bbox]['truncation']\n bbox_read['occlusion']= bboxes[bbox]['occlusion']\n bbox_read['alpha']= bboxes[bbox]['alpha']\n bbox_read['top'] = bboxes[bbox]['2d_bbox'][0]\n bbox_read['left'] = bboxes[bbox]['2d_bbox'][1]\n bbox_read['bottom'] = bboxes[bbox]['2d_bbox'][2]\n bbox_read['right']= bboxes[bbox]['2d_bbox'][3]\n bbox_read['center'] = np.array(bboxes[bbox]['center'])\n bbox_read['size'] = np.array(bboxes[bbox]['size'])\n angle = bboxes[bbox]['rot_angle']\n axis = np.array(bboxes[bbox]['axis'])\n bbox_read['rotation'] = axis_angle_to_rotation_mat(axis, angle) \n boxes.append(bbox_read)\n\n return boxes \n\n\ndef extract_bboxes_file_name_from_image_file_name(file_name_image):\n file_name_bboxes = file_name_image.split('/')\n file_name_bboxes = file_name_bboxes[-1].split('.')[0]\n file_name_bboxes = file_name_bboxes.split('_')\n file_name_bboxes = file_name_bboxes[0] + '_' + \\\n 'label3D_' + \\\n file_name_bboxes[2] + '_' + \\\n file_name_bboxes[3] + '.json'\n \n return file_name_bboxes\n\n\ndef get_points(bbox):\n half_size = bbox['size'] / 2.\n \n if half_size[0] > 0:\n # calculate unrotated corner point offsets relative to center\n brl = np.asarray([-half_size[0], +half_size[1], -half_size[2]])\n bfl = np.asarray([+half_size[0], +half_size[1], -half_size[2]])\n bfr = np.asarray([+half_size[0], -half_size[1], -half_size[2]])\n brr = np.asarray([-half_size[0], -half_size[1], -half_size[2]])\n trl = np.asarray([-half_size[0], +half_size[1], +half_size[2]])\n tfl = np.asarray([+half_size[0], +half_size[1], +half_size[2]])\n tfr = np.asarray([+half_size[0], -half_size[1], +half_size[2]])\n trr = np.asarray([-half_size[0], -half_size[1], +half_size[2]])\n \n # rotate points\n points = np.asarray([brl, bfl, bfr, brr, trl, tfl, tfr, trr])\n points = np.dot(points, bbox['rotation'].T)\n \n # add center position\n points = points + bbox['center']\n \n return points\n\n\n# Create or update open3d wire frame geometry for the given bounding boxes\ndef _get_bboxes_wire_frames(bboxes, linesets=None, color=None):\n\n num_boxes = len(bboxes)\n \n # initialize linesets, if not given\n if linesets is None:\n linesets = [o3.geometry.LineSet() for _ in range(num_boxes)]\n\n # set default color\n if color is None:\n #color = [1, 0, 0]\n color = [0, 0, 1]\n\n assert len(linesets) == num_boxes, \"Number of linesets must equal number of bounding boxes\"\n\n # point indices defining bounding box edges\n lines = [[0, 1], [1, 2], [2, 3], [3, 0],\n [0, 4], [1, 5], [2, 6], [3, 7],\n [4, 5], [5, 6], [6, 7], [7, 4], \n [5, 2], [1, 6]]\n\n # loop over all bounding boxes\n for i in range(num_boxes):\n # get bounding box corner points\n points = get_points(bboxes[i])\n # update corresponding Open3d line set\n colors = [color for _ in range(len(lines))]\n line_set = linesets[i]\n line_set.points = o3.utility.Vector3dVector(points)\n line_set.lines = o3.utility.Vector2iVector(lines)\n line_set.colors = o3.utility.Vector3dVector(colors)\n\n return linesets\n\nprint(\"Function import done.\")","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"403650629","text":"import services.controlers.documentTypeControler\nimport json\nfrom json import JSONEncoder\nfrom json import JSONDecoder\nimport services.controlers.loggControler\nfrom services.exceptions import *\n\n\nclass GetDocumentTypeByCountryIdView:\n\n\tdef post(self, idCountry):\n\t\tresponse_data = {}\n\t\tresponse_data['status']=OK\n\t\tresponse_data['message']=\"DOCUMENTOS_ENCONTRADOS\"\n\t\tresponse_data['data']=''\n\t\tdocumentTypeControler =services.controlers.documentTypeControler.DocumentTypeControler()\n\t\ttry:\n\t\t\tresponse_data['data']=documentTypeControler.getByCountryId(idCountry)\n\t\texcept ExceptionWithCode as e:\n\t\t\tresponse_data['message']=e.message\n\t\t\tresponse_data['status']=e.code\n\n\t\texcept Exception as e:\n\t\t\tresponse_data['status']=ERROR_NO_DEFINIDO\n\t\t\tresponse_data['message']=e.message\n\t\t\tloggControler = services.controlers.loggControler.LoggControler()\n\t\t\tloggControler.addLogg('Critical', ERROR_NO_DEFINIDO, e.message)\n\t\tjsonStringResponse = JSONEncoder().encode(response_data)\n\t\treturn jsonStringResponse","sub_path":"services/views/getDocumentTypeByCountryIdView.py","file_name":"getDocumentTypeByCountryIdView.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"338567987","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n# CODE NAME HERE\n\n# CODE DESCRIPTION HERE\n\nCreated on 2018-12-18 at 10:47\n\n@author: cook\n\"\"\"\n\nimport numpy as np\nimport os\nfrom astropy.io import fits\n\n\n# =============================================================================\n# Define variables\n# =============================================================================\nWORKSPACE = '/spirou/cfht_nights/mtl/telluDB'\nDATABASE = os.path.join(WORKSPACE, 'master_tellu_SPIROU.txt')\n# -----------------------------------------------------------------------------\n\n# =============================================================================\n# Define functions\n# =============================================================================\ndef read_data_base(databasepath):\n # open file\n f = open(databasepath, 'r')\n lines = f.readlines()\n f.close()\n # split by space\n dlines = []\n for line in lines:\n dlines.append(line.split())\n # return database lines\n return dlines\n\n\n\n# =============================================================================\n# Start of code\n# =============================================================================\n# Main code here\nif __name__ == \"__main__\":\n # ----------------------------------------------------------------------\n # load database file\n database = read_data_base(DATABASE)\n\n # get filename sna obj name for all TELL_OBJ\n tell_file, tell_obj = [], []\n for entry in database:\n if len(entry) == 0:\n continue\n if entry[0] == 'TELL_OBJ':\n tell_file.append(entry[1].strip())\n tell_obj.append(entry[4].strip())\n\n # loop around files and check header\n hdr_obj = []\n for it, filename in enumerate(tell_file):\n # print progress\n print('Reading file {0} of {1}'.format(it + 1, len(tell_file)))\n # load header\n fhdr = fits.getheader(os.path.join(WORKSPACE, filename))\n # get objname from header\n objname_hdr = str(fhdr['OBJNAME']).strip()\n hdr_obj.append(objname_hdr)\n # check for consistency\n if objname_hdr.upper() != tell_obj[it].upper():\n wmsg = '\\tFile {0} OBJNAME match fail ({1} != {2})'\n print(wmsg.format(filename, tell_obj[it], objname_hdr))\n\n tell_obj = np.array(tell_obj)\n hdr_obj = np.array(hdr_obj)\n mask = tell_obj != hdr_obj\n print('Number OBJNAMES not equal = {0}'.format(np.nansum(mask)))\n\n\n# =============================================================================\n# End of code\n# =============================================================================\n","sub_path":"old_code/INTROOT/misc/test_db_values.py","file_name":"test_db_values.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"247661400","text":"__Author__ = 'Prameet Bisht'\n__Version__ = \"0.0.1\"\n__Email__ = \"myprameet09@gmail.com\"\n__Github__ = \"https://github.com/orgs/POC-AWS-services/dashboard\"\n\n\nimport boto3\n\n\n# Create CloudWatchEvents client\ncloudwatch_events = boto3.client('events')\n\n# Put an event rule\nresponse = cloudwatch_events.put_rule(\n Name='DEMO_EVENT',\n RoleArn='IAM_ROLE_ARN',\n ScheduleExpression='rate(5 minutes)',\n State='ENABLED'\n)\nprint(response['RuleArn'])","sub_path":"Create_a_scheduled_rule.py","file_name":"Create_a_scheduled_rule.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"110456376","text":"import heapq\r\nimport time\r\nimport math\r\nfrom pathlib import Path\r\nimport random\r\n\r\nrandomness_complexity = 25\r\n\r\n\"\"\"File with utility functions for agents and environments to use.\"\"\"\r\n\r\n\r\ndef get_reward_from_bitstring(s: str) -> float:\r\n \"\"\"Calculate reward from bit string\"\"\"\r\n reward = 0\r\n sign = 1 if s[0] == \"0\" else -1\r\n for idx in range(len(s) - 1):\r\n reward += int(s[idx + 1]) * pow(0.5, idx)\r\n return sign * reward\r\n\r\n\r\ndef get_decimal_from_bitstring(s: str) -> float:\r\n \"\"\"Calculate decimal number from bitstring\"\"\"\r\n decimal = 0\r\n for idx in range((len(s) - 1)):\r\n decimal += int(s[len(s) - 1 - idx]) * pow(2, idx)\r\n return decimal\r\n\r\n\r\ndef get_bitstring_from_decimal(decimal: int, length: int) -> str:\r\n \"\"\"Calculate bitstring from decimal\"\"\"\r\n return format(decimal, 'b').zfill(length)\r\n\r\n\r\ndef get_random_bit() -> int:\r\n \"\"\"Returns a pseudorandom bit from a timestamp. Used for calculating random bit complexity\"\"\"\r\n seed = time.time()\r\n return pow(2, int(str(seed).replace('.', '')[-5:])) % 3 % 2\r\n\r\n\r\ndef get_data_path() -> Path:\r\n \"\"\"Returns data path.\"\"\"\r\n return Path(__file__).parent.parent.joinpath('resources/data')\r\n\r\n\r\ndef get_plots_path() -> Path:\r\n \"\"\"Returns plots path.\"\"\"\r\n return Path(__file__).parent.parent.joinpath('resources/plots')\r\n\r\n\r\ndef is_saved(training_step: int) -> bool:\r\n \"\"\"Returns boolean indicating if the given training step is to be saved or loaded.\"\"\"\r\n if training_step == 0:\r\n return False\r\n if math.log10(training_step).is_integer():\r\n return True\r\n if math.log10(training_step * 2).is_integer():\r\n return True\r\n return False\r\n\r\n\r\ndef nested_set(dic, keys, value):\r\n \"\"\"Sets a nested value in a dictionary\"\"\"\r\n for key in keys[:-1]:\r\n dic = dic.setdefault(key, {})\r\n dic[keys[-1]] = value\r\n\r\n\r\ndef random_action(length: int) -> str:\r\n \"\"\"Returns a random action\"\"\"\r\n action = ''\r\n for i in range(length):\r\n action += str(random.randint(0, 1))\r\n return action\r\n\r\n\r\ndef sort_dict(dic: dict, keys: list):\r\n \"\"\"Sorts the keys in a dict with a sorted list of the keys. The list must contain only valid keys.\"\"\"\r\n temp_dic = {}\r\n for key in keys:\r\n temp_dic[key] = dic.pop(key)\r\n for key in temp_dic:\r\n dic[key] = temp_dic[key]\r\n\r\n\r\ndef init_heap(length: int):\r\n \"\"\"initialize action heap for pi agents\"\"\"\r\n reward_statistics = []\r\n for action_idx in range(pow(2, length)):\r\n action = get_bitstring_from_decimal(action_idx, length)\r\n heapq.heappush(reward_statistics, (1, action, 0))\r\n return reward_statistics\r\n\r\n\r\ndef heapq_siftdown(heap, startpos, pos):\r\n \"\"\"Taken from heapq internal code since it might be deprecated\r\n https://hg.python.org/cpython/file/3.6/Lib/heapq.py\r\n Implements decrease_key\"\"\"\r\n newitem = heap[pos]\r\n # Follow the path to the root, moving parents down until finding a place\r\n # newitem fits.\r\n while pos > startpos:\r\n parentpos = (pos - 1) >> 1\r\n parent = heap[parentpos]\r\n if newitem < parent:\r\n heap[pos] = parent\r\n pos = parentpos\r\n continue\r\n break\r\n heap[pos] = newitem\r\n\r\n\r\ndef heapq_siftup(heap, pos):\r\n \"\"\"Taken from heapq internal code since it might be deprecated.\r\n https://hg.python.org/cpython/file/3.6/Lib/heapq.py\r\n Implements increase_key\"\"\"\r\n endpos = len(heap)\r\n startpos = pos\r\n newitem = heap[pos]\r\n # Bubble up the smaller child until hitting a leaf.\r\n childpos = 2*pos + 1 # leftmost child position\r\n while childpos < endpos:\r\n # Set childpos to index of smaller child.\r\n rightpos = childpos + 1\r\n if rightpos < endpos and not heap[childpos] < heap[rightpos]:\r\n childpos = rightpos\r\n # Move the smaller child up.\r\n heap[pos] = heap[childpos]\r\n pos = childpos\r\n childpos = 2*pos + 1\r\n # The leaf at pos is empty now. Put newitem there, and bubble it up\r\n # to its final resting place (by sifting its parents down).\r\n heap[pos] = newitem\r\n heapq_siftdown(heap, startpos, pos)\r\n","sub_path":"python/src/Utility.py","file_name":"Utility.py","file_ext":"py","file_size_in_byte":4150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"352762794","text":"#!/usr/bin/python\n# -*- coding:UTF-8 -*-\n\nfrom test.common_package import myunit\nfrom time import sleep\nfrom test.common_package.searchcommon import SearchCommon\nfrom test.common_package.selectcity import SelectCity\nfrom test.common_package.basic import Page\n\nclass Bj_ErShouFang_All_PaiXu(myunit.MyTest,Page):\n #单元测试使用\n #def __init__(self,driver):\n # self.driver = driver\n\n def open_url(self):\n SelectCity(self.driver).dakaiwangzhan() # 打开网站\n sleep(2)\n SelectCity(self.driver).maitian_online_city_select(cityname=2) # 选择城市\n self.shouye_handle = self.driver.current_window_handle\n SelectCity(self.driver).second_hand_house() # 二手房\n sleep(3)\n self.all_handle = self.driver.window_handles\n for self.handle in self.all_handle:\n if self.handle != self.shouye_handle:\n self.driver.switch_to.window(self.handle)\n sleep(1)\n\n def test_1(self):\n self.open_url()\n SearchCommon(self.driver).paixu_list(paixuname='总价')\n SearchCommon(self.driver).paixu_list(paixuname='单价')\n SearchCommon(self.driver).paixu_list(paixuname='面积')\n\nif __name__==\"__main__\":\n from selenium import webdriver\n import data.urldata as URL\n\n driver = webdriver.Chrome()\n url = URL.maitian_online_url\n driver.get(url)\n # driver.get('http://bj-test.imtfc.com/esfway/B1D1/T3L1')\n driver.implicitly_wait(2)\n driver.maximize_window()\n Bj_ErShouFang_All_PaiXu(driver).test_1()\n","sub_path":"test/xiamen_TestCase/xm_ershoufang_paixu_testcase.py","file_name":"xm_ershoufang_paixu_testcase.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"35300069","text":"from models import networkgcn, networktcn \nimport torch \nimport numpy as np \nfrom TorchSUL import Model as M \nimport datareader \nfrom tqdm import tqdm\nimport torch.nn.functional as F \n\nbone_pairs = [[8,9],[9,10], [8,14],[14,15],[15,16], [8,11],[12,13],[11,12], [8,7],[7,0], [4,5],[5,6],[0,4], [0,1],[1,2],[2,3]]\nbone_matrix = np.zeros([16,17], dtype=np.float32)\nfor i, pair in enumerate(bone_pairs):\n\tbone_matrix[i, pair[0]] = -1\n\tbone_matrix[i, pair[1]] = 1\nbone_matrix_inv = np.linalg.pinv(bone_matrix)\nbone_matrix_inv = torch.from_numpy(bone_matrix_inv)\nbone_matrix = torch.from_numpy(bone_matrix)\n\nbsize = 32\nseq_len = 243\n\nnetgcn = networkgcn.TransNet(256, 17)\nnettcn = networktcn.Refine2dNet(17, seq_len)\n\n# initialize the network with dumb input \nx_dumb = torch.zeros(2,17,2)\naffb = torch.ones(2,16,16) / 16\naffpts = torch.ones(2,17,17) / 17\nnetgcn(x_dumb, affpts, affb, bone_matrix, bone_matrix_inv)\nx_dumb = torch.zeros(2,243, 17*3)\nnettcn(x_dumb)\n\n# load networks \nM.Saver(netgcn).restore('./ckpts/model_gcn/')\nM.Saver(nettcn).restore('./ckpts/model_tcn/')\n\n# push to gpu \nnetgcn.eval()\nnettcn.eval()\n\n# get loader \ndataset = datareader.PtsData(seq_len)\n\n# start testing \nsample_num = 0\nloss_total = 0\nfor i in tqdm(range(len(dataset))):\n\tp2d,p3d = dataset[i]\n\tbsize = p2d.shape[0]\n\taffb = torch.ones(bsize,16,16) / 16\n\taffpts = torch.ones(bsize,17,17) / 17\n\n\twith torch.no_grad():\n\t\tpred = netgcn(p2d, affpts, affb, bone_matrix, bone_matrix_inv)\n\t\tpred = pred.unsqueeze(0).unsqueeze(0)\n\t\tpred = F.pad(pred, (0,0,0,0,seq_len//2, seq_len//2), mode='replicate')\n\t\tpred = pred.squeeze()\n\t\tpred = nettcn.evaluate(pred)\n\t\tloss = torch.sqrt(torch.pow(pred - p3d, 2).sum(dim=-1)) # [N, 17]\n\t\tloss = loss.mean(dim=1).sum()\n\t\tloss_total = loss_total + loss\n\t\tsample_num = sample_num + bsize\n\nprint('MPJPE: %.4f'%(loss_total / sample_num))\n","sub_path":"eval_gt_h36m_cpu.py","file_name":"eval_gt_h36m_cpu.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"378608248","text":"# -*- coding:ascii -*-\nfrom mako import runtime, filters, cache\nUNDEFINED = runtime.UNDEFINED\n__M_dict_builtin = dict\n__M_locals_builtin = locals\n_magic_number = 10\n_modified_time = 1421880533.820617\n_enable_loop = True\n_template_filename = 'C:\\\\Python34\\\\text_dmp\\\\homepage\\\\templates/terms.html'\n_template_uri = 'terms.html'\n_source_encoding = 'ascii'\nimport os, os.path, re\n_exports = ['content3', 'content1', 'content2']\n\n\ndef _mako_get_namespace(context, name):\n try:\n return context.namespaces[(__name__, name)]\n except KeyError:\n _mako_generate_namespaces(context)\n return context.namespaces[(__name__, name)]\ndef _mako_generate_namespaces(context):\n pass\ndef _mako_inherit(template, context):\n _mako_generate_namespaces(context)\n return runtime._inherit_from(context, 'base.htm', _template_uri)\ndef render_body(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n __M_locals = __M_dict_builtin(pageargs=pageargs)\n def content3():\n return render_content3(context._locals(__M_locals))\n def content1():\n return render_content1(context._locals(__M_locals))\n def content2():\n return render_content2(context._locals(__M_locals))\n __M_writer = context.writer()\n __M_writer('\\n\\n \\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'content1'):\n context['self'].content1(**pageargs)\n \n\n __M_writer('\\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'content2'):\n context['self'].content2(**pageargs)\n \n\n __M_writer('\\n \\n')\n if 'parent' not in context._data or not hasattr(context._data['parent'], 'content3'):\n context['self'].content3(**pageargs)\n \n\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_content3(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def content3():\n return render_content3(context)\n __M_writer = context.writer()\n __M_writer('\\n
\\n\\n
\\n \\n
\\n
\\n
\\n
\\n

Google Web Fonts and
Font Awesome Icons

\\n

This template features the \\'Lato\\' font, part of the Google Web Font library, as well as icons from Font Awesome.

\\n
\\n
\\n \"\"\\n
\\n
\\n \\n
\\n \\n
\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_content1(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def content1():\n return render_content1(context)\n __M_writer = context.writer()\n __M_writer('\\n
\\n\\n
\\n \\n
\\n \\n
\\n \\n
\\n \\n\\n
\\n \\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\ndef render_content2(context,**pageargs):\n __M_caller = context.caller_stack._push_frame()\n try:\n def content2():\n return render_content2(context)\n __M_writer = context.writer()\n __M_writer('\\n
\\n\\n
\\n \\n
\\n
\\n
\\n
\\n

3D Device Mockups
by PSDCovers

\\n

Turn your 2D designs into high quality, 3D product shots in seconds using free Photoshop actions by PSDCovers! Visit their website to download some of their awesome, free photoshop actions!

\\n
\\n
\\n \"\"\\n
\\n
\\n \\n
\\n \\n\\n
\\n')\n return ''\n finally:\n context.caller_stack._pop_frame()\n\n\n\"\"\"\n__M_BEGIN_METADATA\n{\"uri\": \"terms.html\", \"filename\": \"C:\\\\Python34\\\\text_dmp\\\\homepage\\\\templates/terms.html\", \"line_map\": {\"48\": 40, \"64\": 42, \"82\": 19, \"27\": 0, \"70\": 4, \"38\": 1, \"88\": 19, \"58\": 42, \"43\": 18, \"76\": 4, \"94\": 88}, \"source_encoding\": \"ascii\"}\n__M_END_METADATA\n\"\"\"\n","sub_path":"homepage/cached_templates/templates/terms.html.py","file_name":"terms.html.py","file_ext":"py","file_size_in_byte":5344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"95427424","text":"# -*- coding: utf-8 -*-\r\n#variables\r\n\r\nb_rectangle = False\r\nf_b_rc = 2\r\nf_c = 6\r\n\r\nb_triangle = True\r\nf_h = 4\r\nf_b_tr = 5\r\n\r\n\"\"\"\r\n - computes the area of a triangle if the first parameter\r\n true and area of rectangle first parameter is false\r\n :input\r\n isTriangle = true if triangle and false if rectangle\r\n f_b = base of the triangle or rectange\r\n f_h = height of the triangle or rectangle\r\n\r\n :output\r\n f_A = area of triangle\r\n\"\"\"\r\ndef assigment1(isTriangle, f_b, f_h):\r\n if(isTriangle):\r\n A = 0.5*f_b*f_h;\r\n else:\r\n A = f_b*f_h\r\n \r\n return A\r\n\r\nprint('triangle area', assigment1(b_triangle, f_b_tr, f_h))\r\nprint('rectangle area', assigment1(b_rectangle, f_b_rc, f_c))","sub_path":"hw1/submissions/johannessonsofia/johannessonsofia_37907_1247027_assignment1.py","file_name":"johannessonsofia_37907_1247027_assignment1.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"574322406","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'orders'\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"register\", views.register, name='register'),\n path(\"enter\", views.enter, name='enter'),\n path(\"exit\", views.exit, name='exit'),\n path(\"pizza//\", views.process_order, name='process_order'),\n path(\"cart/\", views.process_cart, name='process_cart'),\n path(\"display_orders\", views.display_orders, name='display_orders'),\n path(\"clear_cart\", views.clear_cart, name='clear_cart')\n\n\n]","sub_path":"orders/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"414566947","text":"from collections import namedtuple\n\nimport os\n\nROOT_PATH = os.path.dirname(os.path.abspath(__file__))\n\nNNConfig = namedtuple('NNConfig', ['embeding_size', 'rnn_specs', 'rnn_doc_specs', 'projection_size', 'use_word2vec'])\nDataSetConfig = namedtuple('DataSetConfig', ['dict_size'])\nOptimizationConfig = namedtuple('OptimizationConfig', ['batch_size', 'num_epochs', 'gradient_norm'])\nCheckpointConfig = namedtuple('CheckpointConfig', ['checkpoint_dir', 'save_checkpoint_steps'])\nSummaryConfig = namedtuple('SummaryConfig', ['summary_dir', 'save_test_steps', 'save_eval_steps', 'eval_batch', 'save_graph'])\nTrainingConfig = namedtuple('TrainingConfig', ['nn_cfg', 'dataset_cfg', 'opt_cfg', 'checkpoint_cfg', 'summary_cfg', 'debug'])\n\nnn_cfg = NNConfig(\n embeding_size = 100,\n rnn_specs = [(\"GRU\",64)],\n rnn_doc_specs = [(\"GRU\",64)],\n projection_size = None,\n use_word2vec = False,\n)\n\nopt_cfg = OptimizationConfig(\n batch_size=500*5,\n num_epochs = 1000,\n gradient_norm = 5.\n)\n\ncheckpoint_cfg = CheckpointConfig(\n checkpoint_dir = 'checkpoint_dist',\n save_checkpoint_steps = 10000,\n)\nsummary_cfg = SummaryConfig(\n summary_dir='summary_dist',\n save_test_steps=10,\n save_eval_steps=30,\n eval_batch=200*5,\n save_graph=False,\n)\n\nif __name__ == '__main__':\n print(ROOT_PATH)\n","sub_path":"Arsenii/config_dist.py","file_name":"config_dist.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"49349721","text":"import numpy as np\nfrom easydict import EasyDict as edict\n\n\ncfgs = edict()\ncfgs.file_list = ['training.txt', 'validation.txt']\ncfgs.seq_num = 0\ncfgs.cur_channel = 3\n\ncfgs.NUM_OF_CLASSESS = 2\n#cfgs.IMAGE_SIZE = [270, 480]\ncfgs.IMAGE_SIZE = [540, 960]\ncfgs.batch_size = 8\n","sub_path":"experiment/finetune_fcn/cfgs/config_train_m0717.py","file_name":"config_train_m0717.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"501895107","text":"import cv2\nimport pymysql\nrecognizer = cv2.face.LBPHFaceRecognizer_create()\n\n# Load the trained mode\nrecognizer.read('auth/trainer/trainer.yml')\n\n# Load prebuilt model for Frontal Face\ncascadePath = \"auth/cascades/haarcascade_frontalface_default.xml\"\nface_id=26723\n# Create classifier from prebuilt model\nfaceCascade = cv2.CascadeClassifier(cascadePath);\ncap = cv2.VideoCapture(0)\nhieght=600\nwidht=1200\n# Initialize sample face image\ncount =1\nclass CaptureImages(object):\n\n\n def getCaptureImages(self):\n conn = pymysql.connect(\"localhost\", \"xolani\", \"phpXOLANI6565x.,\", \"register\")\n curs = conn.cursor()\n\n curs.execute(\"SELECT * FROM users WHERE registerNumber =(SELECT min(registerNumber) FROM users)\")\n val = curs.fetchall()\n for row in val:\n global face_id\n #face_id = row[0]\n face_id = row[0]\n conn.commit()\n conn.close()\n\n global count\n\n img, frame_image = cap.read()\n # Convert frame to grayscale\n gray = cv2.cvtColor(frame_image, cv2.COLOR_BGR2GRAY)\n\n # Detect frames of different sizes, list of faces rectangles\n faces = faceCascade.detectMultiScale(gray, 1.2, 5)\n\n # Loops for each faces\n for (x, y, w, h) in faces:\n # Crop the image frame into rectangle\n cv2.rectangle(frame_image, (x, y), (x + w, y + h), (255, 0, 0), 2)\n # Increment sample face image\n count += 1\n\n if (count < 60):\n\n # Save the captured image into the datasets folder\n cv2.imwrite(\"auth/dataset/User.\" + str(face_id) + '.' + str(count) + \".jpg\", gray[y:y + h, x:x + w])\n print(\"busy with ur staff\")\n else:\n print(\"im done with you staff\")\n\n # if img:\n # self.out.write(frame_image)\n\n# while True:\n# CaptureImages().getCaptureImages()\n","sub_path":"auth/capture_image.py","file_name":"capture_image.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"190488772","text":"# -*- coding:utf-8 -*-\r\n#Author:Mirror\r\n#CreateDate:18.9.6\r\n#ModifiedDate:18.9.6\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n#import seaborn as sns\r\n#import matplotlib as mpl\r\n#import matplotlib.pyplot as plt\r\n#from mpl_toolkits.mplot3d import Axes3D\r\n\r\n\r\ndef get_data(path,columns):\r\n Data = pd.read_csv(path)\r\n Data.columns = columns\r\n return Data\r\n\r\ndef get_ageRange(data):\r\n if data<=10:\r\n return 0\r\n elif 100])\r\n data[feature] = data[features][data[features]>0].mean()\r\n if np.isnan(data[feature]): data[feature]=0\r\n data[feature] = round(data[feature],4)\r\n #print(feature+\" \"+str(data[feature]))\r\n return data\r\n\r\ndef data_dropNan(Data):\r\n # print(Data)\r\n # print(trainData[trainData[\"2_total_fee\"]==\"\\\\N\"])\r\n two_total_fee_mean = Data[Data[\"2_total_fee\"].apply(lambda x: x != \"\\\\N\")][\"2_total_fee\"].astype(float).mean()\r\n Data[\"2_total_fee\"].replace(\"\\\\N\", two_total_fee_mean, inplace=True)\r\n Data[\"2_total_fee\"] = Data[\"2_total_fee\"].astype(float)\r\n # print(trainData[\"2_total_fee\"])\r\n\r\n three_total_fee_mean = Data[Data[\"3_total_fee\"].apply(lambda x: x != \"\\\\N\")][\"3_total_fee\"].astype(float).mean()\r\n Data[\"3_total_fee\"].replace(\"\\\\N\", three_total_fee_mean, inplace=True)\r\n Data[\"3_total_fee\"] = Data[\"3_total_fee\"].astype(float)\r\n\r\n # age_mean = Data[Data[\"age\"].apply(lambda x:x != \"\\\\N\")][\"age\"].astype(float).mean()\r\n Data[\"age\"].replace(\"\\\\N\", 0, inplace=True)\r\n Data[\"age\"] = Data[\"age\"].astype(float)\r\n\r\n Data[\"gender\"].replace(\"\\\\N\", 0, inplace=True)\r\n Data[\"gender\"] = Data[\"gender\"].astype(int)\r\n\r\n # Data[\"ageRange\"] = Data[\"age\"].apply(get_ageRange)\r\n # dummyFeature = pd.get_dummies(Data[[\"ageRange\"]])\r\n # Data = Data.drop([\"ageRange\",\"age\"],axis=1)\r\n # Data = pd.concat([Data,dummyFeature],axis=1)\r\n\r\n # print(Data.info())\r\n # print(Data[Data[\"contract_time\"] == -1].shape[0])\r\n dummyFeature = pd.get_dummies(Data[[\"complaint_level\", \"contract_type\", \"service_type\", \"net_service\"]])\r\n Data = Data.drop([\"complaint_level\", \"contract_type\", \"service_type\", \"net_service\"], axis=1)\r\n Data = pd.concat([Data, dummyFeature], axis=1)\r\n\r\n # Data[\"1_total_fee\"].apply(lambda x: max(x, 0))\r\n # Data[\"2_total_fee\"].apply(lambda x: max(x, 0))\r\n # Data[\"3_total_fee\"].apply(lambda x: max(x, 0))\r\n # Data[\"4_total_fee\"].apply(lambda x: max(x, 0))\r\n Data[\"1_total_fee\"] = Data[\"1_total_fee\"].apply(abs)\r\n Data[\"2_total_fee\"] = Data[\"2_total_fee\"].apply(abs)\r\n Data[\"3_total_fee\"] = Data[\"3_total_fee\"].apply(abs)\r\n Data[\"4_total_fee\"] = Data[\"4_total_fee\"].apply(abs)\r\n\r\n #============new=====================\r\n Data = Data.apply(total_fee_replace_mean,axis=1)\r\n print(\"[+]0replace mean over\")\r\n\r\n#==============0.9 Quantile Replace (except \"age\")================\r\n #columns = [\"online_time\",\"1_total_fee\",\"2_total_fee\",\"3_total_fee\",\"4_total_fee\",\"month_traffic\",\"contract_time\",\"pay_times\",\"pay_num\",\"last_month_traffic\",\"local_trafffic_month\",\"local_caller_time\",\"service1_caller_time\",\"service2_caller_time\",\"former_complaint_num\",\"former_complaint_fee\"]\r\n columns = [\"1_total_fee\", \"2_total_fee\", \"3_total_fee\", \"4_total_fee\", \"month_traffic\",\r\n \"pay_num\", \"last_month_traffic\", \"local_trafffic_month\",\r\n \"local_caller_time\", \"service1_caller_time\", \"service2_caller_time\"]\r\n\r\n # for feature in columns:\r\n # quantileValue = Data[feature].quantile(0.998)\r\n # Data[feature][Data[feature]>=quantileValue] = quantileValue\r\n # #print(Data[feature].max())\r\n\r\n return Data\r\n\r\ndef data_preprocess(Data):\r\n\r\n\r\n Data[\"12_total_fee\"] = abs(Data[\"1_total_fee\"]- Data[\"2_total_fee\"])\r\n Data[\"23_total_fee\"] = abs(Data[\"2_total_fee\"] - Data[\"3_total_fee\"])\r\n Data[\"34_total_fee\"] = abs(Data[\"3_total_fee\"] - Data[\"4_total_fee\"])\r\n\r\n\r\n\r\n #Data = Data.drop([\"2_total_fee\", \"3_total_fee\", \"4_total_fee\"], axis=1)\r\n\r\n Data[\"local_month_traffic_dif\"] = Data[\"month_traffic\"] - Data[\"local_trafffic_month\"]\r\n Data[\"month_traffic>zero\"] = (Data[\"month_traffic\"]>1.0).apply(int)\r\n Data[\"last_month_traffic>zero\"]= (Data[\"last_month_traffic\"]>1.0).apply(int)\r\n Data[\"local_trafffic_month>zero\"] = (Data[\"local_trafffic_month\"]>1.0).apply(int)\r\n\r\n\r\n\r\n\r\n #Data[\"onlineTimeRange\"] = Data[\"online_time\"].apply(get_onlineTimeRange)\r\n #dummyFeature = pd.get_dummies(Data[\"onlineTimeRange\"])\r\n #Data = Data.drop([\"online_time\",\"onlineTimeRange\"], axis=1)\r\n #Data = pd.concat([Data, dummyFeature], axis=1)\r\n\r\n Data[\"mean_service1_2_caller_time\"] = (Data[\"service1_caller_time\"]+Data[\"service2_caller_time\"])/2.0\r\n Data[\"diff_service1_2_caller_time\"] = Data[\"service2_caller_time\"]-Data[\"service1_caller_time\"]\r\n\r\n #Data[\"complaint_fee/num\"] = (Data[\"former_complaint_fee\"]*1.0)/Data[\"former_complaint_num\"]\r\n #==================New======================\r\n Data[\"total_time\"] = Data[\"online_time\"]+Data[\"contract_time\"]\r\n Data[\"average_total_fee\"] = Data[[\"1_total_fee\", \"2_total_fee\", \"3_total_fee\", \"4_total_fee\"]].sum(axis=1)/4.0\r\n Data[\"caller_time\"] = Data[[\"local_caller_time\", \"service1_caller_time\", \"service2_caller_time\"]].sum(axis=1)\r\n Data[\"month_traffic>=139\"] = (Data[\"month_traffic\"]>=139).apply(int)#.apply(lambda x: int(x>=139))\r\n Data[\"contract_time>=0\"] = (Data[\"contract_time\"]>=0).apply(int)#.apply(lambda x: int(x >= 0))\r\n Data[\"pay_times>=1\"] = (Data[\"pay_times\"]>=1).apply(int)#.apply(lambda x: int(x >= 1))\r\n Data[\"pay_num>=80\"] = (Data[\"pay_num\"]>=80).apply(int)#.apply(lambda x: int(x >= 80))\r\n Data[\"last_month_traffic>=0\"] = (Data[\"last_month_traffic\"]>=0).apply(int)#.apply(lambda x: int(x >= 0))\r\n Data[\"local_trafffic_month>=1262\"] = (Data[\"local_trafffic_month\"]>=1262).apply(int)#.apply(lambda x: int(x >= 1262))\r\n Data[\"local_caller_time>=14\"] = (Data[\"local_caller_time\"]>=14).apply(int)#.apply(lambda x: x >= 14)\r\n Data[\"service1_caller_time>=0\"] = (Data[\"service1_caller_time\"]>=0).apply(int)#.apply(lambda x: x >= 0)\r\n Data[\"service2_caller_time>=29\"] = (Data[\"service2_caller_time\"]>=29).apply(int)#.apply(lambda x: x >= 29)\r\n Data[\"former_complaint_num>=0\"] = (Data[\"former_complaint_num\"]>=0).apply(int)#.apply(lambda x: x >= 0)\r\n Data[\"former_complaint_fee>=0\"] = (Data[\"former_complaint_fee\"]>=0).apply(int)#.apply(lambda x: x >= 0)\r\n\r\n\r\n #===================New+1==========================\r\n Data[\"last_month_traffic_dif\"] = Data[\"month_traffic\"] - Data[\"last_month_traffic\"]\r\n Data[\"local_month_traffic_sum\"] = Data[\"month_traffic\"] + Data[\"local_trafffic_month\"]\r\n Data[\"online_time>=12\"] = (Data[\"online_time\"]>=12).apply(int)#.apply(lambda x: x >= 12)\r\n Data[\"online_time>=36\"] = (Data[\"online_time\"]>=36).apply(int)#.apply(lambda x: x >= 36)\r\n Data[\"14total_fee_dif\"] = Data[\"1_total_fee\"] - Data[\"4_total_fee\"]\r\n Data[\"13total_fee_dif\"] = Data[\"1_total_fee\"] - Data[\"3_total_fee\"]\r\n billColumns = [\"1_total_fee\", \"2_total_fee\", \"3_total_fee\", \"4_total_fee\"]\r\n #print(Data[billColumns][Data[\"many_over_bill\"] == 1][:5])\r\n for item in billColumns:\r\n Data[item][Data[\"many_over_bill\"] == 1] *=0.9\r\n #Data[billColumns][Data[\"many_over_bill\"] == 1] = 0#overBillData *0.9\r\n #print(Data[billColumns][Data[\"many_over_bill\"] == 1][:5])\r\n #================================================\r\n\r\n # ================Log============================\r\n Data[\"month_traffic\"] = np.log1p(Data[\"month_traffic\"])\r\n Data[\"pay_num\"] = np.log1p(Data[\"pay_num\"])\r\n Data[\"last_month_traffic\"] = np.log1p(Data[\"last_month_traffic\"])\r\n Data[\"local_trafffic_month\"] = np.log1p(Data[\"local_trafffic_month\"])\r\n Data[\"local_caller_time\"] = np.log1p(Data[\"local_caller_time\"])\r\n Data[\"service1_caller_time\"] = np.log1p(Data[\"service1_caller_time\"])\r\n Data[\"service2_caller_time\"] = np.log1p(Data[\"service2_caller_time\"])\r\n Data[\"former_complaint_fee\"] = np.log1p(Data[\"former_complaint_fee\"])\r\n\r\n #=========================================================\r\n\r\n\r\n return Data\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(\"[+]begin\")\r\n train_columns = [\"service_type\",\"is_mix_service\",\"online_time\",\"1_total_fee\",\"2_total_fee\",\"3_total_fee\",\"4_total_fee\",\"month_traffic\",\"many_over_bill\",\"contract_type\",\"contract_time\",\"is_promise_low_consume\",\"net_service\",\"pay_times\",\"pay_num\",\"last_month_traffic\",\"local_trafffic_month\",\"local_caller_time\",\"service1_caller_time\",\"service2_caller_time\",\"gender\",\"age\",\"complaint_level\",\"former_complaint_num\",\"former_complaint_fee\",\"current_service\",\"user_id\"]\r\n test_columns = [\"service_type\", \"is_mix_service\", \"online_time\", \"1_total_fee\", \"2_total_fee\", \"3_total_fee\",\r\n \"4_total_fee\", \"month_traffic\", \"many_over_bill\", \"contract_type\", \"contract_time\",\r\n \"is_promise_low_consume\", \"net_service\", \"pay_times\", \"pay_num\", \"last_month_traffic\",\r\n \"local_trafffic_month\", \"local_caller_time\", \"service1_caller_time\", \"service2_caller_time\",\r\n \"gender\", \"age\", \"complaint_level\", \"former_complaint_num\", \"former_complaint_fee\", \"user_id\"]\r\n\r\n trainData = get_data(\"data/train.csv\",train_columns)\r\n #print(trainData)\r\n testData = get_data(\"data/test.csv\",test_columns)\r\n #print(testData)\r\n\r\n\r\n current_service = trainData[\"current_service\"].unique()\r\n #print(current_service)\r\n current_service_to_label = {}\r\n label_to_current_service = {}\r\n for i in range(15):\r\n current_service_to_label[current_service[i]]=i\r\n label_to_current_service[i] = current_service[i]\r\n #print(current_service_to_label)\r\n #print(label_to_current_service)\r\n\r\n trainData = trainData.replace({\"current_service\":current_service_to_label})\r\n #print(trainData)\r\n trainLabel = trainData[\"current_service\"].copy()\r\n #print(trainLabel)\r\n #trainData = trainData.drop([\"user_id\",\"current_service\"],axis=1)\r\n #print(trainData)\r\n#==============================================================================\r\n\r\n#==============================================================================\r\n #trainData[trainData < 0] = 123456\r\n #print(trainData<0)\r\n #print(trainData[trainData < 0])\r\n #print(trainData[trainData[\"2_total_fee\"]==\"\\\\N\"])\r\n two_total_fee_mean = trainData[trainData[\"2_total_fee\"]!=\"\\\\N\"][\"2_total_fee\"].astype(float).mean()\r\n trainData[\"2_total_fee\"].replace(\"\\\\N\",two_total_fee_mean,inplace=True)\r\n trainData[\"2_total_fee\"]=trainData[\"2_total_fee\"].astype(float)\r\n #print(trainData[\"2_total_fee\"])\r\n\r\n three_total_fee_mean = trainData[trainData[\"3_total_fee\"] != \"\\\\N\"][\"3_total_fee\"].astype(float).mean()\r\n trainData[\"3_total_fee\"].replace(\"\\\\N\",three_total_fee_mean, inplace=True)\r\n trainData[\"3_total_fee\"]=trainData[\"3_total_fee\"].astype(float)\r\n\r\n age_mean = trainData[trainData[\"age\"] != \"\\\\N\"][\"age\"].astype(float).mean()\r\n trainData[\"age\"].replace(\"\\\\N\", three_total_fee_mean, inplace=True)\r\n trainData[\"age\"] = trainData[\"age\"].astype(float)\r\n\r\n trainData[\"gender\"].replace(\"\\\\N\", 0, inplace=True)\r\n trainData[\"gender\"] = trainData[\"gender\"].astype(float)\r\n\r\n print(trainData.info())\r\n\r\n\r\n\r\n#==============================================================================\r\n '''\r\n def get_zs(data):\r\n #print(pd.value_counts(data[\"online_time\"].tolist()))\r\n count = pd.value_counts(data[\"online_time\"].tolist())\r\n #print(count)\r\n result = np.zeros((1,300))\r\n #print(count.shape)\r\n for key,value in count.iteritems():\r\n #print(\"[k]%d\"%(key))\r\n #print(\"[v]%d\"%(value))\r\n result[0][key]=value\r\n #print(result)\r\n return result\r\n \r\n '''\r\n '''\r\n def get_zs1(data):\r\n #print(pd.value_counts(data[\"online_time\"].tolist()))\r\n length = data.shape[0]\r\n count = pd.value_counts(data[\"current_service\"].tolist())\r\n #print(count)\r\n result = np.zeros((1,15))\r\n #print(count.shape)\r\n for key,value in count.iteritems():\r\n #print(\"[k]%d\"%(key))\r\n #print(\"[v]%d\"%(value))\r\n result[0][key]=value/float(length)\r\n #print(result)\r\n return result\r\n '''\r\n '''\r\n #zs = trainData[[\"online_time\",\"current_service\"]].groupby([\"current_service\"]).apply(get_zs)#lambda x: pd.value_counts(x[\"online_time\"].tolist,sort=False))\r\n zs = trainData[[\"online_time\",\"current_service\"]].groupby([\"online_time\"]).apply(get_zs1)\r\n zs = np.vstack(zs.values)\r\n #print(np.shape(zs))\r\n #print(zs[0])\r\n #print(trainData[\"online_time\"].max())\r\n #print(trainData[\"online_time\"].min())\r\n #print(zs)\r\n #Axes3D.bar(trainData[\"online_time\"],trainData[\"current_service\"],zs=)\r\n\r\n sns.heatmap(zs,cmap=\"rainbow\")\r\n plt.show()\r\n '''\r\n '''\r\n total_fee = trainData[[\"4_total_fee\",\"current_service\"]]\r\n result = total_fee.boxplot(by=\"current_service\",sym=\"\",return_type=\"dict\")\r\n plt.show()\r\n '''\r\n '''\r\n month_traffic = trainData[[\"month_traffic\",\"current_service\"]]\r\n result = month_traffic.boxplot(by=\"current_service\", return_type=\"dict\", sym=\"\")\r\n plt.show()\r\n '''\r\n '''\r\n print(trainData[\"contract_time\"].max())\r\n print(trainData[\"contract_time\"].min())\r\n print(trainData[\"contract_time\"].unique())\r\n\r\n zs = trainData[[\"contract_time\", \"current_service\"]].groupby([\"contract_time\"]).apply(get_zs1)\r\n zs = np.vstack(zs.values)\r\n\r\n sns.heatmap(zs, cmap=\"rainbow\")\r\n plt.show()\r\n '''\r\n '''\r\n zs = trainData[[\"net_service\", \"current_service\"]].groupby([\"net_service\"]).apply(get_zs1)\r\n zs = np.vstack(zs.values)\r\n\r\n sns.heatmap(zs, cmap=\"rainbow\")\r\n plt.show()\r\n '''\r\n '''\r\n print(trainData[\"pay_times\"].max())\r\n print(trainData[\"pay_times\"].min())\r\n print(np.sort(trainData[\"pay_times\"].unique()))\r\n zs = trainData[[\"pay_times\", \"current_service\"]].groupby([\"pay_times\"]).apply(get_zs1)\r\n zs = np.vstack(zs.values)\r\n\r\n sns.heatmap(zs, cmap=\"rainbow\")\r\n plt.show()\r\n '''\r\n '''\r\n pay_num = trainData[[\"pay_num\", \"current_service\"]]\r\n result = pay_num.boxplot(by=\"current_service\", return_type=\"dict\", sym=\"\")\r\n plt.show()\r\n '''\r\n '''\r\n last_month_traffic = trainData[[\"last_month_traffic\", \"current_service\"]]\r\n result = last_month_traffic.boxplot(by=\"current_service\", return_type=\"dict\", sym=\"\")\r\n plt.show()\r\n '''\r\n '''\r\n trainData[\"local_traffic_rate\"] = trainData[\"local_trafffic_month\"]/(trainData[\"month_traffic\"]+trainData[\"last_month_traffic\"])\r\n local_traffic_rate = trainData[[\"local_traffic_rate\", \"current_service\"]]\r\n result = local_traffic_rate.boxplot(by=\"current_service\", return_type=\"dict\", sym=\"\")\r\n plt.yticks(np.arange(0, 2, step=0.2))\r\n plt.show()\r\n '''\r\n '''\r\n local_caller_time = trainData[[\"local_caller_time\", \"current_service\"]]\r\n result = local_caller_time.boxplot(by=\"current_service\", return_type=\"dict\", sym=\"\")\r\n #plt.yticks(np.arange(0, 2, step=0.2))\r\n plt.show()\r\n '''\r\n '''\r\n service1_caller_time = trainData[[\"service1_caller_time\", \"current_service\"]]\r\n result = service1_caller_time.boxplot(by=\"current_service\", return_type=\"dict\", sym=\"\")\r\n plt.yticks(np.arange(0, 160, step=5))\r\n plt.show()\r\n '''\r\n '''\r\n service2_caller_time = trainData[[\"service2_caller_time\", \"current_service\"]]\r\n result = service2_caller_time.boxplot(by=\"current_service\", return_type=\"dict\", sym=\"\")\r\n plt.yticks(np.arange(0, 2000, step=20))\r\n plt.show()\r\n '''\r\n '''\r\n print(trainData[\"age\"].max())\r\n print(trainData[\"age\"].min())\r\n print(np.sort(trainData[\"age\"].unique()))\r\n zs = trainData[[\"age\", \"current_service\"]].groupby([\"age\"]).apply(get_zs1)\r\n zs = np.vstack(zs.values)\r\n\r\n sns.heatmap(zs, cmap=\"rainbow\")\r\n plt.show()\r\n '''\r\n '''\r\n zs = trainData[[\"complaint_level\", \"current_service\"]].groupby([\"complaint_level\"]).apply(get_zs1)\r\n zs = np.vstack(zs.values)\r\n\r\n sns.heatmap(zs, cmap=\"rainbow\")\r\n plt.show()\r\n '''\r\n '''\r\n print(trainData[\"former_complaint_num\"].max())\r\n print(trainData[\"former_complaint_num\"].min())\r\n print(np.sort(trainData[\"former_complaint_num\"].unique()))\r\n former_complaint_num = trainData[[\"former_complaint_num\", \"current_service\"]]\r\n result = former_complaint_num.boxplot(by=\"current_service\", return_type=\"dict\", sym=\"\")\r\n plt.show()\r\n '''\r\n '''\r\n #print(trainData[\"former_complaint_fee\"].max())\r\n #print(trainData[\"former_complaint_fee\"].min())\r\n #print(np.sort(trainData[\"former_complaint_fee\"].unique()))\r\n #former_complaint_num = trainData[trainData[\"former_complaint_fee\"]!=0][[\"former_complaint_fee\", \"current_service\"]]\r\n former_complaint_num = trainData[[\"former_complaint_fee\", \"current_service\"]]\r\n rate = former_complaint_num.groupby([\"current_service\"]).apply(lambda x:x[x[\"former_complaint_fee\"]!=0].shape[0]/float(x.shape[0]))\r\n print(rate)\r\n plt.plot(range(15),rate)\r\n plt.title(\"former_complaint_fee>0 rate\")\r\n plt.grid(axis=\"y\")\r\n for i in range(15):\r\n plt.text(i, rate[i] + 0.001, '%.4f' % rate[i], ha='center', va='bottom', fontsize=9)\r\n #result = former_complaint_num.boxplot(by=\"current_service\", return_type=\"dict\", sym=\"\")\r\n plt.show()\r\n '''\r\n '''\r\n modeData = trainData[[\"service_type\",\"is_mix_service\",\"many_over_bill\",\"contract_type\",\"is_promise_low_consume\",\"net_service\",\"gender\",\"complaint_level\",\"current_service\"]].groupby(\"current_service\").apply(lambda x: x.mode())\r\n modeData = modeData.drop(\"current_service\",axis=1).reset_index()\r\n print(modeData.info())\r\n\r\n middleData = trainData[[\"online_time\",\"1_total_fee\",\"2_total_fee\",\"3_total_fee\",\"4_total_fee\",\"month_traffic\",\"contract_time\",\"pay_times\",\"pay_num\",\"last_month_traffic\",\"local_trafffic_month\",\"local_caller_time\",\"service1_caller_time\",\"service2_caller_time\",\"age\",\"former_complaint_num\",\"former_complaint_fee\",\"current_service\"]].groupby([\"current_service\"]).apply(lambda x: x.median())\r\n middleData = middleData.drop(\"current_service\",axis=1).reset_index()\r\n print(middleData.info())\r\n\r\n serviceData = pd.concat([modeData,middleData],axis=1)\r\n print(serviceData)\r\n\r\n for i in serviceData.columns:\r\n serviceData[i].plot(xticks=range(15))\r\n plt.title(i)\r\n plt.show()\r\n #serviceData.plot(subplots=True,xticks=range(15))\r\n\r\n #plt.legend()\r\n #plt.show()\r\n '''\r\n '''\r\n zeroCount = trainData[trainData[\"gender\"]==0].groupby(\"current_service\").apply(lambda x: x.shape[0])\r\n zeroRate = trainData.groupby(\"current_service\").apply(lambda x: float(x.shape[0]))\r\n zeroRate = round(zeroCount/zeroRate,4)\r\n print(list(zeroRate))\r\n '''\r\n '''\r\n rate = {}\r\n uni = trainData[\"gender\"].unique()\r\n all = trainData.shape[0]\r\n for i in uni:\r\n count = trainData[trainData[\"gender\"]==i].shape[0]\r\n rate[i] = round(count/float(all),4)\r\n print(rate)\r\n '''\r\n trainData[\"average_total_fee\"] = trainData[[\"1_total_fee\", \"2_total_fee\", \"3_total_fee\"]].sum(axis=1)\r\n print(trainData[\"average_total_fee\"])\r\n #print(trainData[[\"average_total_fee\",\"contract_time\",\"pay_times\",\"pay_num\",\"last_month_traffic\",\"local_trafffic_month\",\"local_caller_time\",\"service1_caller_time\",\"service2_caller_time\",\"former_complaint_num\",\"former_complaint_fee\"]].describe())\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":20179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"245057918","text":"from json import loads, dump, load\nfrom pandas import DataFrame\nfrom re import compile, DOTALL\nfrom requests import get\nfrom fake_useragent import UserAgent\nfrom codecs import open\nfrom time import sleep\nfrom random import randint\n\n\nclass ArtFactsArtistScraper(object):\n def __init__(self):\n with open(\"artfacts_artists.json\", \"r\", \"utf-8\") as f:\n self.artists = load(f)\n self.data = None\n self.base_url = \"https://artfacts.net/api/v0/\"\n self.ua = UserAgent()\n\n def get_artists(self):\n bu = self.base_url + \"artists/\"\n for i in range(1, 702500):\n print(i)\n bu_i = bu + str(i)\n headers = {'user-agent': self.ua.random}\n r = get(bu_i, headers=headers)\n if r.status_code == 200:\n text = r.text\n d = loads(text)\n self.artists.append(d)\n self.persist()\n sleep(randint(3, 10))\n\n def persist(self):\n with open(\"artfacts_artists.json\", \"w\", \"utf-8\") as f:\n dump(self.artists, f, indent=4)\n\n\nif __name__ == \"__main__\":\n afas = ArtFactsArtistScraper()\n afas.get_artists()\n","sub_path":"artfacts.py","file_name":"artfacts.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"534412551","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'howManyGames' function below.\n#\n# The function is expected to return an INTEGER.\n# The function accepts following parameters:\n# 1. INTEGER p\n# 2. INTEGER d\n# 3. INTEGER m\n# 4. INTEGER s\n#\n\n\ndef howManyGames(p, d, m, s):\n # Return the number of games you can buy\n if p > s:\n return 0\n k = 1 + (p - m) // d\n dif_price = p * k - ((k - 1) * k * d) // 2\n if dif_price <= s:\n return k + (s - dif_price) // m\n else:\n return math.floor(((m + 2*p) - math.sqrt((m + 2*p)**2 - 8 * m * s)) / 2 * m)\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n first_multiple_input = input().rstrip().split()\n\n p = int(first_multiple_input[0])\n\n d = int(first_multiple_input[1])\n\n m = int(first_multiple_input[2])\n\n s = int(first_multiple_input[3])\n\n answer = howManyGames(p, d, m, s)\n\n fptr.write(str(answer) + '\\n')\n\n fptr.close()\n","sub_path":"algorithms/halloween-sale.py","file_name":"halloween-sale.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"269956366","text":"import traceback\nimport urllib\nfrom pathlib import Path\nimport wget as wget\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup, element\nimport time\nimport re\nimport requests\nimport urllib3\nfrom urllib.parse import urljoin\n\nfrom datetime import datetime\nfrom selenium.webdriver.common.action_chains import ActionChains\n# from selenium.webdriver import ActionChains\nfrom selenium.webdriver.common import by\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nimport sys\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as cond\nimport mysql.connector\nfrom multiprocessing.pool import ThreadPool\n\n\n\ndef runChromeOverServer():\n while True:\n try:\n # proo = AmazonProxies.objects.order_by('count')[0]\n # proxies = proo.proxy\n # proo.count += 1\n # proo.save()\n proxy= '172.254.124.231:3128'\n # proxy = '162.243.108.161:8080'\n from pyvirtualdisplay import Display\n # display = Display(visible=0, size=(1024, 768))\n # display.start()\n display = ''\n options = webdriver.ChromeOptions()\n options.add_argument(\"--start-maximized\")\n # options.add_argument('--proxy-server=%s' % proxy)\n options.add_argument('--disable-notifications')\n options.add_argument('--disable-dev-shm-usage')\n options.add_argument('--shm-size=2g')\n options.add_argument('--no-sandbox')\n while True:\n try:\n driver = webdriver.Chrome(executable_path='E:\\Emsgroup\\web_driver\\chromedriver.exe')\n except:\n driver = webdriver.Chrome(executable_path='E:\\Emsgroup\\web_driver\\chromedriver.exe',\n chrome_options=options)\n break\n return driver,display\n\n except Exception as e:\n print('driver while exception')\n print(e)\n pass\n\n\n\ndef fetching_categories():\n driver, display = runChromeOverServer()\n try:\n driver.execute_script(\"window.open('about:blank','tab1')\")\n driver.switch_to.window(\"tab1\")\n driver.get(\"https://www.sheetplastics.co.uk/\")\n WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, \".sub-nav__column\")))\n main_nav = driver.find_elements_by_css_selector(\".sub-nav__column\")[0:3]\n p_id = 0\n for sublinks in main_nav:\n class_li = sublinks.find_elements_by_tag_name(\"li\")\n for li in class_li:\n a_ = li.find_element_by_tag_name(\"a\")\n main_cat_link = a_.get_attribute(\"href\")\n main_cat_name = a_.get_property(\"innerHTML\")\n print(main_cat_name , main_cat_link)\n\n # try:\n # mydb = mysql.connector.connect(\n # host=\"localhost\",\n # user=\"root\",\n # passwd=\"\",\n # database=\"plasticsheet\"\n # )\n # mycursor = mydb.cursor()\n # sql = \"INSERT INTO category (category_name,meta_title,meta_description,status,IsHome,IsMenu,parent,category_url,gfeed_status)\" \\\n # \" VALUES (%s, %s,%s, %s,%s, %s, %s,%s, %s)\"\n # val = (\n # main_cat_name, main_cat_name, main_cat_name, \"Yes\", \"Yes\", \"Yes\", p_id,\n # main_cat_name.replace('-', '').replace('/', '').replace(' ','').strip(), \"Yes\")\n # mycursor.execute(sql, val)\n # mydb.commit()\n # print(mycursor.rowcount, \"record inserted.\")\n # mydb.close()\n #\n # except Exception as e:\n # print('Database query errror')\n # print(e)\n try:\n mydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"\",\n database=\"plasticsheet\"\n )\n mycursor = mydb.cursor()\n\n sql = \"SELECT category_id FROM category WHERE category_name ='%s'\" % main_cat_name\n mycursor.execute(sql)\n myresult = mycursor.fetchall()\n for x in myresult:\n category_id = x[0]\n except Exception as e:\n print(e)\n pass\n\n try:\n catelist = []\n mydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"\",\n database=\"plasticsheet\"\n )\n mycursor = mydb.cursor()\n sql = \"SELECT category_id FROM products\"\n mycursor.execute(sql)\n myresult = mycursor.fetchall()\n for x in myresult:\n cat_id = x[0]\n catelist.append(cat_id)\n if category_id in catelist:\n pass\n print(\"Already Exist in Database\")\n else:\n extractproduct_urls(main_cat_link , category_id)\n except:\n pass\n\n except Exception as e:\n print(e)\n\n driver.quit()\n\ndef extractproduct_urls(main_cat_link , category_id):\n driver , display = runChromeOverServer()\n driver.execute_script(\"window.open('about:blank','tab2')\")\n driver.switch_to.window(\"tab2\")\n driver.get(main_cat_link)\n WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, \".product-image\")))\n product_url_div = driver.find_elements_by_css_selector(\".product-image\")\n product_urls_list =[]\n for anchor_tags in product_url_div:\n product_urls = anchor_tags.get_attribute(\"href\")\n product_urls_list.append((product_urls , category_id))\n multipooling(product_urls_list)\n driver.quit()\n\n\ndef multipooling(product_url_list):\n print(len(product_url_list))\n chunksList = []\n for i in range(0, len(product_url_list), 1):\n chunk = product_url_list[i: i+1]\n chunksList.append(chunk)\n\n # print(chunksList)\n print(chunksList)\n # print(len(chunksList))\n # print(len(chunksList), \" : \", chunksList)\n pool_size = 5\n pool = ThreadPool(pool_size)\n pool.map(products_data, chunksList)\n pool.close()\n pool.join()\n print('Done All Pool')\n\ndef products_data(chunksList):\n parent_url = 'https://www.sheetplastics.co.uk/'\n driver, display = runChromeOverServer()\n data_ = 10\n for url in chunksList:\n pro_url = url[0]\n cat_id = url[1]\n driver.execute_script(\"window.open('about:blank','tab\" + str(data_) + \"')\")\n driver.switch_to.window(\"tab\" + str(data_))\n driver.get(pro_url)\n try:\n product_title = driver.find_element_by_css_selector(\".product-name\").text\n print(product_title)\n except Exception as e:\n print(e)\n data_ += 1\n driver.quit()\n\ndef products():\n driver , display = runChromeOverServer()\n data_ = 1\n driver.execute_script(\"window.open('about:blank','tab\" + str(data_) + \"')\")\n driver.switch_to.window(\"tab\" + str(data_))\n driver.get(\"https://www.sheetplastics.co.uk/2mm-clear-acrylic-sheet-cut-to-size.html\")\n try:\n product_title = driver.find_element_by_css_selector(\".product-name\").text\n print(product_title)\n except Exception as e:\n print(e)\n try:\n price_div = driver.find_element_by_css_selector(\".regular-price\")\n product_price = price_div.text\n print(product_price)\n except Exception as e:\n print(e)\n\n try:\n product_image_div = driver.find_element_by_css_selector(\".product-image\").find_element_by_tag_name(\"a\")\n product_image = product_image_div.get_attribute(\"href\")\n print(product_image)\n except Exception as e:\n print(e)\n try:\n desc_ = driver.find_element_by_css_selector(\".tabbed-content__content.cf.tabbed-content__content--active\").text\n print(desc_)\n except Exception as e:\n print(e)\n\n data_ += 1\n driver.quit()\n\n\nif __name__ == '__main__':\n # fetching_categories()\n products()","sub_path":"plasticsheet.py","file_name":"plasticsheet.py","file_ext":"py","file_size_in_byte":8806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"224529328","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.6-universal/egg/windmill/authoring/unit.py\n# Compiled at: 2011-01-13 01:48:00\nimport unittest, sys\nfrom windmill.dep import functest\nreports = functest.reports\n\nclass UnitTestReporter(reports.FunctestReportInterface):\n\n def summary(self, test_list, totals_dict, stdout_capture):\n self.test_list = test_list\n\n\nunittestreporter = UnitTestReporter()\nreports.register_reporter(unittestreporter)\n\nclass WindmillUnitTestCase(unittest.TestCase):\n\n def setUp(self):\n import windmill\n windmill.stdout, windmill.stdin = sys.stdout, sys.stdin\n from windmill.bin.admin_lib import configure_global_settings, setup\n configure_global_settings()\n windmill.settings['TEST_URL'] = self.test_url\n if hasattr(self, 'windmill_settings'):\n for (setting, value) in self.windmill_settings.iteritems():\n windmill.settings[setting] = value\n\n self.windmill_shell_objects = setup()\n\n def testWindmill(self):\n self.windmill_shell_objects[('start_' + self.browser)]()\n self.windmill_shell_objects['do_test'](self.test_dir, threaded=False)\n for test in unittestreporter.test_list:\n self._testMethodDoc = getattr(test, '__doc__', None)\n self._testMethodName = test.__name__\n self.assertEquals(test.result, True)\n\n return\n\n def tearDown(self):\n from windmill.bin.admin_lib import teardown\n teardown(self.windmill_shell_objects)","sub_path":"pycfiles/windmill-1.6-py2.6/unit.py","file_name":"unit.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"69393030","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\navg_reward = np.load('no_target_net/average_data.npy')\nreward = np.load('no_target_net/episode_data.npy')\n\nplt.rc('font', family='serif')\nplt.figure(figsize=(12, 14))\n\nax = plt.subplot(111) \nax.spines[\"top\"].set_visible(False) \nax.spines[\"right\"].set_visible(False) \n\nax.get_xaxis().tick_bottom() \nax.get_yaxis().tick_left() \nplt.xticks(fontsize=16)\nplt.yticks(fontsize=16)\n\n# plt.plot(np.arange(2000), reward, color=[0.866, 0.596, 0.850])\n# plt.plot(np.arange(2000), avg_reward, color=[0.6, 0.384, 0.239], linewidth=2.5)\nplt.plot(np.arange(2000), reward, color=[0.709, 0.341, 0.050])\nplt.plot(np.arange(2000), avg_reward, color=[0.105, 0.207, 0.733], linewidth=2.5)\nplt.xlabel('Episode', fontsize=17)\nplt.ylabel('Reward', fontsize=17)\n\nplt.grid()\nplt.show()","sub_path":"CS6700_AE16B011_PA3/Code/Q2/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"119334481","text":"from typing import List\nfrom collections import deque\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n\n def pathSum(self, root: TreeNode, su: int) -> List[List[int]]:\n\n if not root:\n return list()\n\n ans = list()\n dq = deque()\n dq.append((root, root.val, [root.val]))\n while dq:\n node, d, path = dq.popleft()\n if not node.left and not node.right:\n if d == su:\n ans.append(path)\n if node.left:\n k = path[:]\n k.append(node.left.val)\n dq.append((node.left, d + node.left.val, k))\n if node.right:\n k = path[:]\n k.append(node.right.val)\n dq.append((node.right, d + node.right.val, k))\n return ans\n","sub_path":"offer/NO.34/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"270617133","text":"import json\nimport locale\nimport datetime\nimport time\nimport random\nimport logging\nimport web\n\nimport random\nimport string\n\nfrom utils.decorator import authentication\nfrom base import Base, NBase\nfrom dbapi.ComputingClient import ComputingClient\nfrom dbapi.CMDB import CMDB\n\ndef randomStr(size = 6, chars=string.ascii_letters + string.digits):\n return ''.join(random.choice(chars) for x in range(size))\n\nclass PageAnalyze(Base):\n def getAnalyze(self, mode):\n logging.debug('PageAnalyze.getAnalyze')\n params = self.validateParams()\n analyze = self.dcli.PageAnalyze(params, params['collector'], params['service'])\n if not analyze:\n analyze = []\n logging.debug(analyze)\n if mode == 'hit':\n ret = {'time': [], 'data': [[], []]}\n for row in analyze:\n m = int(row['start_time'])\n ret['time'].append(datetime.datetime.fromtimestamp(m).strftime(self.fmt))\n ret['data'][0].append(row['available_hits'])\n ret['data'][1].append(row['unavailable_hits'])\n elif mode == 'time':\n ret = {'time': [], 'data': [[], [], [], [], []]}\n for row in analyze:\n m = int(row['start_time'])\n ret['time'].append(datetime.datetime.fromtimestamp(m).strftime(self.fmt))\n ret['data'][0].append(float(row['tcp_retry_time']))\n ret['data'][1].append(float(row['tcp_connection_time']))\n ret['data'][2].append(float(row.get('http_network_time', 0)))\n ret['data'][3].append(float(row['http_server_time']))\n ret['data'][4].append(float(row['http_download_time']))\n elif mode == 'all':\n ret = {'time': [], 'data': [[], [], [], [], [], [], [], []], 'line': []}\n cmdb = CMDB()\n service_info = cmdb.GetServiceInfo(params['collector'], params['service'])\n if not service_info:\n service_info = {}\n ret['line'].append(service_info.get('performance_critical', 0))\n ret['line'].append(service_info.get('performance_warning', 0))\n for row in analyze:\n m = int(row['start_time'])\n ret['time'].append(datetime.datetime.fromtimestamp(m).strftime(self.fmt))\n ret['data'][0].append(float(row['tcp_retry_time']))\n ret['data'][1].append(float(row['tcp_connection_time']))\n ret['data'][2].append(float(row.get('http_network_time', 0)))\n ret['data'][3].append(float(row['http_server_time']))\n ret['data'][4].append(float(row['http_download_time']))\n ret['data'][5].append(row['available_hits'])\n ret['data'][6].append(row['unavailable_hits'])\n ret['data'][7].append(float(row['availability']))\n return ret\n\nclass PageHit(PageAnalyze):\n @authentication\n def POST(self, **kwagrs):\n logging.debug('PageHit.POST')\n web.header('Content-Type', 'application/json')\n #param = self.validateParams()\n ret = self.getAnalyze('hit')\n #now = datetime.datetime.now()\n #ret = {'time': [], 'data': []}\n #ret['time'] = [t.strftime(locale.nl_langinfo(locale.T_FMT)) for t in [now - datetime.timedelta(minutes = i) for i in range(10)]]\n #ret['data'] = [[random.randrange(100) for i in range(10)] for i in range(2)]\n return json.dumps(ret)\n\nclass PageTime(PageAnalyze):\n @authentication\n def POST(self, **kwagrs):\n logging.debug('PageTime.POST')\n web.header('Content-Type', 'application/json')\n #param = self.validateParams()\n ret = self.getAnalyze('time')\n #now = datetime.datetime.now()\n #ret = {'time': [], 'data': []}\n #ret['time'] = [t.strftime(locale.nl_langinfo(locale.T_FMT)) for t in [now - datetime.timedelta(minutes = i) for i in range(10)]]\n #ret['data'] = [[random.randrange(100) for i in range(10)] for i in range(5)]\n return json.dumps(ret)\n\nclass PageAnalytics(PageAnalyze):\n @authentication\n def POST(self, **kwagrs):\n logging.debug('PageTime.POST')\n web.header('Content-Type', 'application/json')\n ret = self.getAnalyze('all')\n return json.dumps(ret)\n\nclass PageSummary(NBase):\n def __init__(self):\n logging.debug('PageSummary.__init__')\n self.dcli = ComputingClient()\n\n @authentication\n def POST(self, **kwargs):\n logging.debug('PageSummary.POST')\n web.header('Content-Type', 'application/json')\n param = self.validateParams()\n perf_dict = self.dcli.PageSummary(param['start_time'], param['end_time'], param['collector'])\n logging.debug(perf_dict)\n ret = []\n for service in perf_dict:\n logging.info(service)\n tmp = {}\n tmp['service_name'] = service['service_name']\n tmp['availability'] = float(service['availability'])\n tmp['download_time'] = float(service['http_download_time'])\n tmp['server_time'] = float(service['http_server_time'])\n tmp['total_hits'] = service['total_hits']\n tmp['download_size'] = float(service['http_download_size'])\n tmp['event_count'] = service['event_count']\n ret.append(tmp)\n return json.dumps(ret)\n","sub_path":"backend/handlers/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":5358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"176849951","text":"#!/usr/bin/python\n\n# Imported Modules:\n\nfrom __future__ import print_function\nfrom config import os, params\nfrom flask import Flask\nfrom copy import deepcopy\nfrom gitbot import main, printdebug\n\n# Setting Up:\n\nglobal working\nworking = False\n\napp = Flask(__name__) # Creates the application\n\n# Running The Main Function:\n\n@app.route(\"/\", methods=['GET', 'POST']) # Registers the script to run on hook or visit\ndef run():\n printdebug(params, \"Initializing...\")\n newparams = deepcopy(params)\n printdebug(params, \"Using parameters: \"+repr(newparams))\n global working\n if working: # Prevents two scripts running at the same time\n printdebug(newparams, \" Failed due to concurrent boot.\")\n elif not newparams[\"orgname\"]:\n printdebug(newparams, \" Failed due to abscence of organization name. Set the BOT_ORGNAME environment variable to the name of the organization.\")\n elif not newparams[\"token\"]:\n printdebug(newparams, \" Failed due to abscence of login token. Set the BOT_TOKEN environment variable to the bot's login token.\")\n else:\n working = True\n try:\n openpulls, merges = main(newparams) # Runs the script and extracts the parameters\n finally:\n working = False\n printdebug(newparams, \"Pull Requests: \"+repr(openpulls)+\"\\nMerges: \"+repr(merges)) # Displays a message with the output parameters\n return \"GitHub pull requests succesfully analyzed. Merged \"+str(len(merges))+\" pull requests.\" # Returns a summary string for website visitors\n return \"Failed to boot up pull request analyzer. Check logs for more information.\"\n","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"517842859","text":"# -*- coding: utf-8 -*-\nimport unittest\nfrom mock import patch, Mock\nfrom prediction_model.pytorch.model import IntentPytorch\nfrom proto import rest_api_pb2\nimport functools\n\nDOC_1 = rest_api_pb2.PredictItem()\nDOC_1.content = 'doc 1'\nDOC_1.id = 'id 1'\n\n\nDOC_2 = rest_api_pb2.PredictItem()\nDOC_2.content = 'doc 2'\nDOC_2.id = 'id 2'\n\n\nDOC_3 = rest_api_pb2.PredictItem()\nDOC_3.content = 'doc 3'\nDOC_3.id = 'id 3'\n\n\nclass TestIntentPytorch(unittest.TestCase):\n @patch('prediction_model.pytorch.model.IntentPytorch.__init__', return_value=None)\n def test_rate_doc1(self, mock_model):\n '''test result have PIRCE intent'''\n model = IntentPytorch()\n with patch.object(model, \n \"_IntentPytorch__rate_mentions_proba\", \n return_value = [[(rest_api_pb2.IntentType.PRICE, 0.75)]]):\n\n result = model.predict(DOC_1)\n \n self.assertEqual(result[0].list_predict[0].intent_type, rest_api_pb2.IntentType.PRICE)\n","sub_path":"Thanos/intent/main/prediction_model/pytorch/model_test.py","file_name":"model_test.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"497942781","text":"#!/usr/bin/env python3\n\n#Author: Raphael Wang \n#Author ID: 1000\n#Date Created: 2020/12/28\n\nimport sys\n\ntimer = int(sys.argv[1])\n\nwhile timer != 0:\n print(str(timer))\n timer = timer - 1\nprint('blast off!')\n","sub_path":"lab2/lab2f.py","file_name":"lab2f.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"648538672","text":"from collections import defaultdict, deque\nimport sys\n\nn, m = map(int, input().split())\nc = 0\nnameToInt = {}\ngraph = defaultdict(list)\nindeg = defaultdict(int)\noutdeg = defaultdict(int)\ngood = [True for _ in range(n)]\nfor _ in range(n):\n stuff = input().split()\n name = stuff[0]\n if name not in nameToInt:\n nameToInt[name] = c\n c += 1\n a = nameToInt[name]\n edges = stuff[2:]\n for nxt in edges:\n if nxt not in nameToInt:\n nameToInt[nxt] = c\n c += 1\n b = nameToInt[nxt]\n if a != b:\n graph[a].append(b)\n graph[b].append(a)\n outdeg[a] += 1\n indeg[b] += 1\n good[a] = False\n good[b] = False\n start = a\nassert c == n\n\ndef solve():\n if good.count(True) == n:\n return 'FALSE ALARM'\n p1 = 0\n m1 = 0\n for i in range(c):\n deg = outdeg[i] - indeg[i]\n if 1 < deg or -1 > deg:\n return False\n if deg == -1:\n m1 += 1\n elif deg == 1:\n p1 += 1\n if p1 == m1 == 0 or p1 == m1 == 1:\n visited = set()\n q = deque([start])\n while len(q) > 0:\n cur = q.popleft()\n if cur in visited:\n continue\n visited.add(cur)\n for nxt in graph[cur]:\n q.append(nxt)\n return len(visited) == good.count(False)\n return False\n\nx = solve()\nif x == False:\n print('IMPOSSIBLE')\nelif x == True:\n print('POSSIBLE')\nelse:\n print(x)\n","sub_path":"kattis/grandopening.py","file_name":"grandopening.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"226741108","text":"from unittest import TestCase\ntry:\n from unittest import mock\nexcept ImportError:\n import mock\nfrom tox_ansible.tox_test_case import ToxTestCase\nfrom tox_ansible.ansible.role import Role\nfrom tox_ansible.ansible.scenario import Scenario\nfrom tox_ansible.options import Options\nfrom tox_ansible.tox_helper import Tox\n\n\nDOCKER_DRIVER = {\"driver\": {\"name\": \"docker\"}}\nOPENSTACK_DRIVER = {\"driver\": {\"name\": \"openstack\"}}\nBASE_DEPS = [\"molecule\", \"ansible-lint\", \"yamllint\", \"flake8\", \"pytest\",\n \"testinfra\"]\n\n\n@mock.patch.object(Scenario, \"config\", new_callable=mock.PropertyMock,\n return_value={})\nclass TestToxTestCase(TestCase):\n @mock.patch.object(Options, \"get_global_opts\", return_value=[])\n @mock.patch.object(Tox, \"posargs\", new_callable=mock.PropertyMock,\n return_value=[])\n def test_case_is_simple(self, pos_mock, opts_mock, config_mock):\n t = ToxTestCase(self.role, self.scenario)\n self.assertEqual(t.get_name(), \"derp-my_test\")\n self.assertEqual(t.get_working_dir(), \"roles/derp\")\n self.assertEqual(t.get_dependencies(), BASE_DEPS + [\"ansible\"])\n cmds = [[\"molecule\", \"test\", \"-s\", self.scenario.name]]\n self.assertEqual(t.get_commands(self.opts), cmds)\n self.assertIsNone(t.get_basepython())\n\n @mock.patch.object(Options, \"get_global_opts\", return_value=[\"-c\", \"derp\"])\n @mock.patch.object(Tox, \"posargs\", new_callable=mock.PropertyMock,\n return_value=[])\n def test_case_has_global_opts(self, pos_mock, opts_mock, config_mock):\n t = ToxTestCase(self.role, self.scenario)\n cmds = [[\"molecule\", \"-c\", \"derp\", \"test\", \"-s\", self.scenario.name]]\n self.assertEqual(t.get_commands(self.opts), cmds)\n\n def test_case_expand_ansible(self, config_mock):\n t = ToxTestCase(self.role, self.scenario)\n ts = t.expand_ansible(\"2.7\")\n self.assertEqual(ts.ansible, \"2.7\")\n self.assertEqual(ts.get_name(), \"ansible27-derp-my_test\")\n self.assertEqual(ts.get_dependencies(), BASE_DEPS + [\"ansible==2.7.*\"])\n self.assertIsNone(ts.get_basepython())\n\n def test_case_expand_python(self, config_mock):\n t = ToxTestCase(self.role, self.scenario)\n ts = t.expand_python(\"4.1\")\n self.assertEqual(ts.python, \"4.1\")\n self.assertEqual(ts.get_name(), \"py41-derp-my_test\")\n self.assertEqual(ts.get_basepython(), \"python4.1\")\n\n def test_case_expand_twice(self, config_mock):\n t = ToxTestCase(self.role, self.scenario)\n t1 = t.expand_python(\"4.1\")\n t2 = t1.expand_ansible(\"1.0\")\n self.assertEqual(t2.get_name(), \"ansible10-py41-derp-my_test\")\n\n @mock.patch.object(Scenario, \"driver\", new_callable=mock.PropertyMock,\n return_value=\"docker\")\n def test_case_includes_docker_deps(self, driver_mock, config_mock):\n s = Scenario(\"moelcule/my_test\")\n t = ToxTestCase(self.role, s)\n self.assertIn(\"docker\", t.get_dependencies())\n\n @mock.patch.object(Scenario, \"driver\", new_callable=mock.PropertyMock,\n return_value=\"openstack\")\n def test_case_includes_openstack_deps(self, driver_mock, config_mock):\n s = Scenario(\"molecule/osp_test\")\n t = ToxTestCase(self.role, s)\n self.assertIn(\"openstacksdk\", t.get_dependencies())\n\n @classmethod\n def setUp(cls):\n cls.role = Role(\"roles/derp\")\n cls.scenario = Scenario(\"molecule/my_test\")\n cls.opts = Options(mock.Mock())\n","sub_path":"tests/test_tox_test_case.py","file_name":"test_tox_test_case.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"107042691","text":"import tensorflow as tf\nfrom nn_architecture import decoder_g, map_xy_to_z\n\n\ndef predictNP(x_target\n , params\n , x_context = None, y_context = None #optionally supply context\n , epsilon = None #optionally supply randomness\n , n_draws = 1\n , noise_std = 0.05):\n\n # create tensors\n x_star = tf.constant(x_target, dtype=tf.float32)\n\n if y_context is not None:\n y = tf.constant(y_context, dtype=tf.float32)\n\n if x_context is not None:\n x = tf.constant(x_context, dtype=tf.float32)\n\n\n #if no epsilon provided, draw from standard normal with shape (n_draws, dim_z)\n if epsilon is None:\n epsilon = tf.random_normal((n_draws, params.dim_z))\n\n # if we're doing posterior prediction (i.e. have context)\n if x_context is not None and y_context is not None:\n #map (x_i, y_i) -> z\n z_params = map_xy_to_z(x = x, y = y, params = params)\n\n # use z params to adjust epsilon\n z_samples = tf.multiply(epsilon, z_params.sigma)\n z_samples = tf.add(z_samples, z_params.mu)\n else:\n print('epsilon')\n z_samples = epsilon\n\n # use decoder_g (x_i, z_i) -> y_i\n y_params = decoder_g(z_samples, x_star, params = params, noise_std = noise_std)\n\n return y_params\n","sub_path":"experiments/valerie/predictNP.py","file_name":"predictNP.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"241590963","text":"from .. import ma\nfrom ..models import User\nfrom marshmallow import fields, pre_dump\n\n\nclass _UserSchema(ma.Schema):\n email = fields.Email()\n token = fields.String()\n username = fields.String()\n bio = fields.String()\n image = fields.URL()\n following = fields.Boolean(default=None)\n\n @pre_dump(pass_many=False)\n def fill_following(self, data):\n logged_user = User.get_logged_user(raise_exceptipn=False)\n if logged_user:\n data.following = data.is_following_by(logged_user)\n return data\n\n\nclass UserSchema(ma.Schema):\n user = fields.Nested(\n _UserSchema, only=[\"email\", \"token\", \"username\", \"bio\", \"image\"])\n\n\nclass ProfileSchema(ma.Schema):\n profile = fields.Nested(\n _UserSchema, only=[\"username\", \"bio\", \"image\", \"following\"])\n\n\nclass _ArticleSchema(ma.Schema):\n slug = fields.String()\n title = fields.String()\n description = fields.String()\n body = fields.String()\n createdAt = fields.DateTime(attribute='created_at')\n updatedAt = fields.DateTime(attribute='updated_at')\n favorited = fields.Boolean(default=None)\n favoritesCount = fields.Integer(default=0)\n tagList = fields.List(fields.String())\n author = fields.Nested(\n _UserSchema, only=[\"username\", \"bio\", \"image\", \"following\"])\n\n @pre_dump(pass_many=False)\n def fill_favorited(self, data):\n logged_user = User.get_logged_user(raise_exceptipn=False)\n if logged_user:\n data.favorited = data.is_favorited_by(logged_user)\n return data\n\n\nclass ArticleSchema(ma.Schema):\n article = fields.Nested(_ArticleSchema)\n\n\nclass ArticlesSchema(ma.Schema):\n articles = fields.Nested(_ArticleSchema, many=True)\n articlesCount = fields.Integer(default=0)\n\n @pre_dump(pass_many=False)\n def calucate_count(self, data):\n if 'articlesCount' not in data:\n data['articlesCount'] = len(data['articles'])\n return data\n\n\nclass TagsSchema(ma.Schema):\n tags = fields.List(fields.String())\n\n\nclass _CommentSchema(ma.Schema):\n id = fields.Integer()\n body = fields.String()\n createdAt = fields.DateTime(attribute='created_at')\n updatedAt = fields.DateTime(attribute='updated_at')\n author = fields.Nested(\n _UserSchema, only=[\"username\", \"bio\", \"image\", \"following\"])\n\n\nclass CommentSchema(ma.Schema):\n comment = fields.Nested(_CommentSchema)\n\n\nclass CommentsSchema(ma.Schema):\n comments = fields.Nested(_CommentSchema, many=True)\n\n\nuser_schema = UserSchema()\nprofile_schema = ProfileSchema()\narticle_schema = ArticleSchema()\narticles_schema = ArticlesSchema()\ntags_schema = TagsSchema()\ncomment_schema = CommentSchema()\ncomments_schema = CommentsSchema()\n","sub_path":"conduit/views/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"268446088","text":"\"\"\"Define the MyQ API.\"\"\"\nimport logging\n\nfrom aiohttp import ClientSession\nfrom aiohttp.client_exceptions import ClientError\n\nfrom .device import MyQDevice, MyQDoorDevice, MyQLightDevice\nfrom .errors import MyQError, RequestError, UnsupportedBrandError\n\n_LOGGER = logging.getLogger(__name__)\n\nAPI_BASE = 'https://myqexternal.myqdevice.com'\nLOGIN_ENDPOINT = \"api/v4/User/Validate\"\nDEVICE_LIST_ENDPOINT = \"api/v4/UserDeviceDetails/Get\"\n\nDEFAULT_TIMEOUT = 10\nDEFAULT_USER_AGENT = \"Chamberlain/3773 (iPhone; iOS 11.0.3; Scale/2.00)\"\n\nBRAND_MAPPINGS = {\n 'liftmaster': {\n 'app_id':\n 'Vj8pQggXLhLy0WHahglCD4N1nAkkXQtGYpq2HrHD7H1nvmbT55KqtN6RSF4ILB/i'\n },\n 'chamberlain': {\n 'app_id':\n 'OA9I/hgmPHFp9RYKJqCKfwnhh28uqLJzZ9KOJf1DXoo8N2XAaVX6A1wcLYyWsnnv'\n },\n 'craftsman': {\n 'app_id':\n 'YmiMRRS1juXdSd0KWsuKtHmQvh5RftEp5iewHdCvsNB77FnQbY+vjCVn2nMdIeN8'\n },\n 'merlin': {\n 'app_id':\n '3004cac4e920426c823fa6c2ecf0cc28ef7d4a7b74b6470f8f0d94d6c39eb718'\n }\n}\n\n\nclass API:\n \"\"\"Define a class for interacting with the MyQ iOS App API.\"\"\"\n\n def __init__(self, brand: str, websession: ClientSession) -> None:\n \"\"\"Initialize the API object.\"\"\"\n if brand not in BRAND_MAPPINGS:\n raise UnsupportedBrandError('Unknown brand: {0}'.format(brand))\n\n self._brand = brand\n self._security_token = None\n self._websession = websession\n\n async def _request(\n self,\n method: str,\n endpoint: str,\n *,\n headers: dict = None,\n params: dict = None,\n data: dict = None,\n json: dict = None,\n **kwargs) -> dict:\n \"\"\"Make a request.\"\"\"\n url = '{0}/{1}'.format(API_BASE, endpoint)\n\n if not headers:\n headers = {}\n if self._security_token:\n headers['SecurityToken'] = self._security_token\n headers.update({\n 'MyQApplicationId': BRAND_MAPPINGS[self._brand]['app_id'],\n 'User-Agent': DEFAULT_USER_AGENT,\n })\n\n try:\n async with self._websession.request(\n method, url, headers=headers, params=params, data=data,\n json=json, timeout=DEFAULT_TIMEOUT, **kwargs) as resp:\n resp.raise_for_status()\n return await resp.json(content_type=None)\n except ClientError as err:\n raise RequestError(\n 'Error requesting data from {0}: {1}'.format(endpoint, err))\n\n async def authenticate(self, username: str, password: str) -> None:\n \"\"\"Authenticate against the API.\"\"\"\n login_resp = await self._request(\n 'post',\n LOGIN_ENDPOINT,\n json={\n 'username': username,\n 'password': password\n })\n\n if int(login_resp['ReturnCode']) != 0:\n raise MyQError(login_resp['ErrorMessage'])\n\n self._security_token = login_resp['SecurityToken']\n\n async def _get_devices(self, device_class: 'MyQDevice' = None) -> list:\n \"\"\"Get a list of all devices associated with the account.\n Optionally filtered by class.\n \"\"\"\n devices_resp = await self._request('get', DEVICE_LIST_ENDPOINT)\n return [\n MyQDevice.get_device(device, self._brand, self._request)\n for device in devices_resp['Devices'] if not device_class\n or MyQDevice.get_device_class(device) == device_class\n ]\n\n async def get_devices(self, covers_only: bool = True) -> list:\n \"\"\"Get a list of all devices associated with the account.\"\"\"\n return await self._get_devices(MyQDoorDevice if covers_only else None)\n\n async def get_covers(self) -> list:\n \"\"\"Get a list of all covers associated with the account.\"\"\"\n return await self._get_devices(MyQDoorDevice)\n\n async def get_lights(self) -> list:\n \"\"\"Get a list of all lights associated with the account.\"\"\"\n return await self._get_devices(MyQLightDevice)\n\n\nasync def login(\n username: str, password: str, brand: str,\n websession: ClientSession) -> API:\n \"\"\"Log in to the API.\"\"\"\n api = API(brand, websession)\n await api.authenticate(username, password)\n return api\n","sub_path":"pymyq/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"524946723","text":"#!/usr/bin/env python3\n\ndef can_be_equal(c1, c2):\n return c1 == '?' or c2 == '?' or c1 == c2\n\ndef can_be_lower(c1, c2):\n if c1 == '?': return c2 != '0'\n if c2 == '?': return c1 != '9'\n return c1 < c2\n\ndef make_lower(c1, c2):\n assert can_be_lower(c1, c2)\n if c1 == '?' and c2 == '?': return '0', '1'\n if c1 == '?': return chr(ord(c2) - 1), c2\n if c2 == '?': return c1, chr(ord(c1) + 1)\n return c1, c2\n\ndef make_max(c):\n if c == '?': return '9'\n else: return c\n\ndef make_min(c):\n if c == '?': return '0'\n else: return c\n\ndef assume_first_lower_or_eq(s1, s2, same):\n p1 = p2 = ''\n l = len(s1)\n i = 0\n while i < same and can_be_equal(s1[i], s2[i]):\n if s1[i] == '?' and s2[i] == '?':\n p1 += '0'\n p2 += '0'\n elif s1[i] == '?':\n p1 += s2[i]\n p2 += s2[i]\n else:\n p1 += s1[i]\n p2 += s1[i]\n i += 1\n if i < same: return None, None\n if i == l: return p1, p2\n if not can_be_lower(s1[i], s2[i]): return None, None\n c1, c2 = make_lower(s1[i], s2[i])\n p1 += c1\n p2 += c2\n i += 1\n while i < l:\n p1 += make_max(s1[i])\n p2 += make_min(s2[i])\n i += 1\n return p1, p2\n\ndef select_better(a1, a2, b1, b2):\n if a1 is None: return b1, b2\n if b1 is None: return a1, a2\n diff_a = abs(int(a1) - int(a2))\n diff_b = abs(int(b1) - int(b2))\n if diff_a < diff_b: return a1, a2\n if diff_a > diff_b: return b1, b2\n if int(a1) < int(b1): return a1, a2\n if int(a1) > int(b1): return b1, b2\n if int(a2) < int(b2): return a1, a2\n return b1, b2\n\ndef matches(s, pattern):\n if len(s) != len(pattern): return False\n for i in range(len(pattern)):\n if s[i] != pattern[i] and pattern[i] != '?': return False\n return True\n\nT = int(input())\nfor t in range(T):\n s1, s2 = input().split()\n while len(s1) < len(s2):\n s1 = '0' + s1\n while len(s2) < len(s1):\n s2 = '0' + s2\n if False:\n cand = [str(x).rjust(len(s1), '0') for x in range(10**len(s1))]\n cand1 = [x for x in cand if matches(x, s1)]\n cand2 = [x for x in cand if matches(x, s2)]\n a1, a2 = None, None\n for t1 in cand1:\n for t2 in cand2:\n a1, a2 = select_better(a1, a2, t1, t2)\n else:\n a1, a2 = None, None\n for i in range(len(s1)+1):\n b1, b2 = assume_first_lower_or_eq(s1, s2, i)\n a1, a2 = select_better(a1, a2, b1, b2)\n b2, b1 = assume_first_lower_or_eq(s2, s1, i)\n a1, a2 = select_better(a1, a2, b1, b2)\n print('Case #{}: {} {}'.format(t+1, a1, a2))\n","sub_path":"solutions_5695413893988352_0/Python/k21/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"303469921","text":"import tensorflow as tf\nimport tensorflow_probability as tfp\nimport numpy as np\n\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\ntfk = tf.keras\ntfkl = tf.keras.layers\ntfd = tfp.distributions\ntfpl = tfp.layers\n\n# Load data.\nn = int(1e3)\nscale_tril = np.array([[1.6180, 0.],\n [-2.7183, 3.1416]]).astype(np.float32)\nx = tfd.Normal(loc=0, scale=1).sample([n, 2])\neps = tfd.Normal(loc=0, scale=0.01).sample([n, 2])\ny = tf.matmul(x, scale_tril) + eps\n\n# Create model.\nd = tf.dimension_value(y.shape[-1])\nmodel = tfk.Sequential([\n tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(d), input_shape=(2,)),\n tfpl.MultivariateNormalTriL(d),\n])\n\n# Fit.\nmodel.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.02),\n loss=lambda y, model: -model.log_prob(y),\n metrics=[])\nbatch_size = 100\nmodel.fit(x, y,\n batch_size=batch_size,\n epochs=1,\n # epochs=500,\n steps_per_epoch=n // batch_size,\n verbose=False,\n shuffle=True)\n# print(x.shape, y.shape)\n# print([w.shape for w in model.get_weights()])\n# print(model(y[:1]))\nprint(model.predict(y[:1], steps=1))\nprint()\nfor layer in model.layers:\n\tprint(layer.output)\n# ==> [[ 1.61842895e+00 1.34138885e-04]\n# [ -2.71818233e+00 3.14186454e+00]]\n# model.summary()\n\n\n\n\n\n","sub_path":"test_stuff.py","file_name":"test_stuff.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"570826165","text":"class Solution:\n def calculateMinimumHP(self, dungeon: List[List[int]]) -> int:\n m, n = len(dungeon), len(dungeon[0])\n dp = [1000000000 for i in range(n+1)]\n dp[n] = 1\n i = m-1\n for j in range(n-1,-1,-1):\n minn = min(dp[j],dp[j+1])\n dp[j] = max(minn-dungeon[i][j],1)\n dp[n] = 100000000\n for i in range(m-2,-1,-1):\n for j in range(n-1,-1,-1):\n minn = min(dp[j],dp[j+1])\n dp[j] = max(minn-dungeon[i][j],1)\n return dp[0]","sub_path":"Codes/Daily-Practice/t174.py","file_name":"t174.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"233430559","text":"#!/usr/bin/env python\n\nimport yaml\nimport IPython\nimport ROOT\nimport DMesonJetUtils\nimport DMesonJetCompare\n\nglobalList = []\n\ndef ResolutionComparison(config):\n fname = \"{0}/{1}/{2}.root\".format(config[\"input_path\"], config[\"train\"], config[\"name\"])\n file = ROOT.TFile(fname)\n if not file or file.IsZombie():\n print(\"Could not open file {0}\".format(fname))\n exit(1)\n\n spectrumName = \"JetPtSpectrum_DPt_30\"\n jetName = \"Jet_AKTChargedR040_pt_scheme\"\n dmesonName = \"D0_D0toKpiCuts\"\n prefix = \"Prompt_{}_{}_{}\".format(dmesonName, jetName, spectrumName)\n\n pt_lim = [(5, 6), (8, 10), (20, 30)]\n histos = []\n for (minJetPt, maxJetPt) in pt_lim:\n resolutionName = \"{0}/DetectorResponse/{0}_DetectorResponse_{1}_{2}\".format(prefix, minJetPt * 10, maxJetPt * 10)\n h = DMesonJetUtils.GetObject(file, resolutionName)\n h.SetTitle(\"{} < #it{{p}}_{{T,gen jet}}^{{ch}} < {} GeV/#it{{c}}\".format(minJetPt, maxJetPt))\n globalList.append(h)\n histos.append(h)\n\n cname = \"ResolutionVsJetPt_Paper\"\n comp = DMesonJetCompare.DMesonJetCompare(cname)\n comp.fDoSpectraPlot = \"lineary\"\n comp.fDoRatioPlot = None\n comp.fMarkerSize = 1.5\n comp.fX1LegSpectrum = 0.14\n comp.fX2LegSpectrum = 0.41\n comp.fY1LegSpectrum = 0.53\n comp.fLinUpperSpace = 0.50\n comp.fLegLineHeight = 0.075\n comp.fColors = [ROOT.kRed + 2, ROOT.kBlue + 2, ROOT.kGreen + 2]\n comp.fMarkers = [ROOT.kOpenCircle, ROOT.kOpenSquare, ROOT.kOpenDiamond]\n r = comp.CompareSpectra(histos[0], histos[1:])\n histos[2].SetMarkerSize(2.2)\n for obj in r:\n globalList.append(obj)\n\n canvas = comp.fCanvasSpectra\n canvas.SetTicks(1, 1)\n canvas.SetLeftMargin(0.13)\n canvas.SetRightMargin(0.05)\n canvas.SetTopMargin(0.05)\n canvas.SetBottomMargin(0.15)\n canvas.cd()\n\n h = comp.fMainHistogram\n\n h.GetYaxis().SetTitle(\"Probability Density\")\n #h.GetXaxis().SetTitle(\"(#it{p}_{T,det jet}^{ch} #font[122]{-} #it{p}_{T,gen jet}^{ch}) / #it{p}_{T,gen jet}^{ch}\")\n h.GetXaxis().SetTitle(\"#Delta_{#it{p}_{T}}\")\n h.GetXaxis().SetTitleFont(43)\n h.GetXaxis().SetTitleSize(30)\n h.GetXaxis().SetTitleOffset(1.0)\n h.GetXaxis().SetLabelFont(43)\n h.GetXaxis().SetLabelSize(22)\n h.GetXaxis().SetLabelOffset(0.02)\n h.GetXaxis().SetRangeUser(-0.6, 0.6)\n h.GetYaxis().SetTitleFont(43)\n h.GetYaxis().SetTitleSize(26)\n h.GetYaxis().SetLabelFont(43)\n h.GetYaxis().SetLabelSize(22)\n h.GetYaxis().SetTitleOffset(0.9)\n h.GetYaxis().SetRangeUser(0, 14)\n\n paveALICE = ROOT.TPaveText(0.14, 0.62, 0.53, 0.95, \"NB NDC\")\n globalList.append(paveALICE)\n paveALICE.SetBorderSize(0)\n paveALICE.SetFillStyle(0)\n paveALICE.SetTextFont(43)\n paveALICE.SetTextSize(21)\n paveALICE.SetTextAlign(13)\n paveALICE.AddText(\"ALICE PYTHIA 6\")\n paveALICE.AddText(\"pp, #sqrt{#it{s}} = 7 TeV\")\n paveALICE.AddText(\"Prompt D^{0} #rightarrow K^{#font[122]{-}}#pi^{+}\")\n paveALICE.AddText(\"and charge conj.\")\n paveALICE.AddText(\"#it{p}_{T,D} > 3 GeV/#it{c}\")\n paveALICE.Draw()\n \n paveALICE = ROOT.TPaveText(0.65, 0.75, 0.90, 0.95, \"NB NDC\")\n globalList.append(paveALICE)\n paveALICE.SetBorderSize(0)\n paveALICE.SetFillStyle(0)\n paveALICE.SetTextFont(43)\n paveALICE.SetTextSize(21)\n paveALICE.SetTextAlign(13)\n paveALICE.AddText(\"Charged Jets\")\n paveALICE.AddText(\"Anti-#it{k}_{T}, #it{R} = 0.4\")\n paveALICE.AddText(\"|#it{#eta}_{jet}| < 0.5\")\n paveALICE.Draw()\n\n return canvas\n\ndef main():\n ROOT.TH1.AddDirectory(False)\n ROOT.gStyle.SetOptTitle(0)\n ROOT.gStyle.SetOptStat(0)\n\n f = open(\"LHC15i2response_Train1399_efficiency.yaml\", 'r')\n config = yaml.load(f)\n f.close()\n\n canvas = ResolutionComparison(config)\n canvas.SaveAs(\"{0}/ResolutionVsJetPt_Paper.pdf\".format(config[\"input_path\"]))\n canvas.SaveAs(\"{0}/ResolutionVsJetPt_Paper.C\".format(config[\"input_path\"]))\n\n\nif __name__ == '__main__':\n\n main()\n\n IPython.embed()\n","sub_path":"DMesonJetAnalysis/ResolutionVsJetPt_Paper.py","file_name":"ResolutionVsJetPt_Paper.py","file_ext":"py","file_size_in_byte":3995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"124876065","text":"import logging\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom http_request_randomizer.requests.parsers.UrlParser import UrlParser\nfrom http_request_randomizer.requests.proxy.ProxyObject import ProxyObject, AnonymityLevel, Protocol\n\nlogger = logging.getLogger(__name__)\n__author__ = 'pgaref'\n\n\nclass FreeProxyParser(UrlParser):\n def __init__(self, id, web_url, timeout=None):\n UrlParser.__init__(self, id=id, web_url=web_url, timeout=timeout)\n\n def parse_proxyList(self):\n curr_proxy_list = []\n try:\n response = requests.get(self.get_url(), timeout=self.timeout)\n if not response.ok:\n logger.warning(\"Proxy Provider url failed: {}\".format(self.get_url()))\n return []\n\n content = response.content\n soup = BeautifulSoup(content, \"html.parser\")\n #table = soup.find(\"table\", attrs={\"id\": \"proxylisttable\"})\n table = soup.find(\"table\")\n\n\n # The first tr contains the field names.\n headings = [th.get_text() for th in table.find(\"tr\").find_all(\"th\")]\n\n datasets = []\n for row in table.find_all(\"tr\")[1:-1]:\n dataset = zip(headings, (td.get_text() for td in row.find_all(\"td\")))\n if dataset:\n datasets.append(dataset)\n\n for dataset in datasets:\n proxy_obj = self.create_proxy_object(dataset)\n # Make sure it is a Valid Proxy Address\n if proxy_obj is not None and UrlParser.valid_ip_port(proxy_obj.get_address()):\n curr_proxy_list.append(proxy_obj)\n else:\n logger.debug(\"Proxy Invalid: {}\".format(dataset))\n except AttributeError as e:\n logger.error(\"Provider {0} failed with Attribute error: {1}\".format(self.id, e))\n except KeyError as e:\n logger.error(\"Provider {0} failed with Key error: {1}\".format(self.id, e))\n except Exception as e:\n logger.error(\"Provider {0} failed with Unknown error: {1}\".format(self.id, e))\n finally:\n return curr_proxy_list\n\n def create_proxy_object(self, dataset):\n # Check Field[0] for tags and field[1] for values!\n ip = \"\"\n port = None\n anonymity = AnonymityLevel.UNKNOWN\n country = None\n protocols = []\n for field in dataset:\n if field[0] == 'IP Address':\n # Make sure it is a Valid IP\n ip = field[1].strip() # String strip()\n # Make sure it is a Valid IP\n if not UrlParser.valid_ip(ip):\n logger.debug(\"IP with Invalid format: {}\".format(ip))\n return None\n elif field[0] == 'Port':\n port = field[1].strip() # String strip()\n elif field[0] == 'Anonymity':\n anonymity = AnonymityLevel.get(field[1].strip()) # String strip()\n elif field[0] == 'Country':\n country = field[1].strip() # String strip()\n elif field[0] == 'Https':\n if field[1].strip().lower() == 'yes': protocols.extend([Protocol.HTTP, Protocol.HTTPS])\n elif field[1].strip().lower() == 'no': protocols.append(Protocol.HTTP)\n return ProxyObject(source=self.id, ip=ip, port=port, anonymity_level=anonymity, country=country, protocols=protocols)\n\n def __str__(self):\n return \"{0} parser of '{1}' with required bandwidth: '{2}' KBs\" \\\n .format(self.id, self.url, self.minimum_bandwidth_in_KBs)\n","sub_path":"http_request_randomizer/requests/parsers/FreeProxyParser.py","file_name":"FreeProxyParser.py","file_ext":"py","file_size_in_byte":3592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"478315634","text":"from views import student_view\nfrom views import codecooler_view\nfrom views import submission_view\nfrom views import shoutbox_view\nfrom views import event_view\nfrom views import PM_view\nfrom models.submission_model import Submission\nfrom models.assingment_model import Assingment\nfrom controllers import codecooler_controller\nfrom controllers import event_controller\nimport os\n\n\ndef start_controller(user):\n '''\n Welcomes user, shows menu and asks\n to choose a function\n\n Paramaters\n ----------\n canvas = obj of Canvas class\n user = obj of Codecooler class\n\n Returns\n -------\n None\n '''\n choice = ''\n EXIT = 0\n codecooler_view.greet(user)\n event_controller.create_calendar(user.login)\n\n while choice != EXIT:\n student_view.show_menu()\n choice = student_view.choose_function()\n choice = run_chosen_function(choice, user)\n\n\n\ndef run_chosen_function(user_input, user):\n '''\n Runs appropriate functions, based on user choice.\n\n Paramaters\n ----------\n user_input = int\n canvas = obj of Canvas class\n user = obj of Codecooler class\n '''\n if user_input == 1:\n run_grades_functions(user)\n Assingment.get_coming_assingments()\n elif user_input == 2:\n run_submission_functions(user)\n elif user_input == 3:\n os.system('clear')\n shoutbox_view.show_shoutbox_panel()\n shoutbox_view.enter_message(user.login)\n elif user_input == 4:\n codecooler_controller.edit_profile(user.login)\n elif user_input == 5:\n event_controller.get_calendar(user.login)\n elif user_input == 6:\n os.system('clear')\n codecooler_controller.show_logins()\n receiver = codecooler_controller.get_correct_login()\n os.system('clear')\n PM_view.show_PM_panel(user.login, receiver)\n PM_view.enter_message(user.login, receiver)\n return user_input\n\n\ndef run_grades_functions(user):\n '''\n Handles flow of showing grades related functions.\n\n Paramaters\n ----------\n canvas = obj of Canvas class\n user = obj of Codecooler class\n\n Returns\n -------\n None\n '''\n\n grades_sum, max_grades_sum, amount_of_grades = calculate_grades(user.login)\n student_view.show_grades_info(grades_sum, max_grades_sum, amount_of_grades)\n\n data_rows = []\n\n for assingment in Assingment.assingments:\n assingment_grades = []\n my_grade = 'None'\n for submission in Submission.submissions:\n if submission.title == assingment.title and submission.is_checked == 'True':\n assingment_grades.append(submission.score)\n if submission.user_login == user.login:\n my_grade = submission.score\n data_row = [assingment.title,\n my_grade,\n min(assingment_grades, default=0),\n get_avg(assingment_grades),\n max(assingment_grades, default=0)]\n data_rows.append(data_row)\n\n student_view.print_grades(data_rows)\n\n\ndef run_submission_functions(user):\n '''\n Handles flow of sending submission related functions.\n\n Paramaters\n ----------\n canvas = obj of Canvas class\n user = obj of Codecooler class\n\n Returns\n -------\n None\n '''\n number = student_view.show_assingments(Assingment.assingments)\n chosen_assingment = choose_assingment(number)\n check_if_submitted(chosen_assingment.title, user.login)\n is_graded = check_if_graded(chosen_assingment.title, user.login)\n\n if not is_graded:\n add_submission(chosen_assingment, user)\n student_view.info_submission_added()\n\n\ndef calculate_grades(user_login):\n '''\n Basing on student's submissions calculates its sum of grades,\n and maximum amount of grades, that could have been scored.\n It also sums the number of graded submissions.\n\n Paramaters\n ----------\n assingments = list (Assingment objects)\n submissions = list (Submission objects)\n user_login = str\n\n Returns\n -------\n grades_sum = int\n max_grades_sum = int\n amount_of_grades = int\n '''\n grades_sum = 0\n max_grades_sum = 0\n amount_of_grades = 0\n\n for submission in Submission.submissions:\n if submission.user_login == user_login and submission.is_checked:\n grades_sum += submission.score\n amount_of_grades += 1\n for assingment in Assingment.assingments:\n if assingment.title == submission.title:\n max_grades_sum += assingment.max_grade\n\n return grades_sum, max_grades_sum, amount_of_grades\n\n\ndef choose_assingment(number):\n '''\n Asks user to choose on of possible\n assingment and returns it.\n\n Paramaters\n ----------\n number = int\n assingments = list (assingments from Canvas)\n\n Returns\n -------\n name_of_assingment = str\n '''\n choice = ''\n possible_choices = range(0, len(Assingment.assingments))\n\n while choice not in possible_choices:\n\n try:\n choice = student_view.get_assingment_number()\n except ValueError:\n student_view.error_number()\n\n chosen_assingment = Assingment.get_assingment_by_index(choice)\n\n return chosen_assingment\n\n\ndef check_if_submitted(assingment_name, user_login):\n '''\n Based on provided assingment name, checks if Student\n already sent a submission for it and prints info about\n it, if he did.\n\n Paramaters\n ----------\n assingment_name = str (title of assingment)\n submissions = list (submissions from Canvas)\n user_login = str\n\n Returns\n -------\n boolean = returns False if submission for this assingment\n wasn't sent by this Student yes, otherwise returns True.\n '''\n for submission in Submission.submissions:\n if submission.title == assingment_name and submission.user_login == user_login:\n student_view.print_assingment_done()\n submission_view.show_sub(submission)\n\n\ndef check_if_graded(assingment_name, user_name):\n '''\n Returns True if submission was already graded, otherwise returns False\n '''\n for submission in Submission.submissions:\n if submission.title == assingment_name and user_name == submission.user_login and submission.is_checked:\n return True\n elif submission.title == assingment_name and user_name == submission.user_login and not submission.is_checked:\n return False\n\n\ndef add_submission(assingment, user):\n '''\n Adds new submission from Student, for chosen\n assingment.\n\n Paramaters\n ----------\n assingment = obj of Assingment class\n submissions = list (of Submission objects from Canvas)\n user = obj of Codecooler class\n\n Returns\n -------\n new_sub = obj of Submission class\n '''\n answer = student_view.get_answer()\n for submission in Submission.submissions:\n if assingment.title == submission.title and user.login == submission.user_login:\n Submission.submissions.remove(submission)\n\n Submission(user.login, assingment.title, answer)\n\n\ndef count_average_attendance(attendances_list):\n '''\n Counts average attendance of student\n\n Args:\n attendances_list - list with Attendance objects\n\n Returns:\n avg_att - float\n '''\n\n avg_att = 0\n counter = 0\n\n for attendance in attendances_list:\n\n avg_att += attendance.value\n counter += 1\n\n if counter > 0:\n return avg_att / counter * 100\n else:\n return avg_att\n\n\ndef get_avg(int_list):\n '''\n Counts average from ints list\n\n Args:\n int_list - list of ints\n\n Return:\n float\n '''\n if int_list == []:\n return 0\n return sum(int_list) / float(len(int_list))\n\n\ndef get_longest_assingment_len():\n '''\n Gets assignment with longest name\n '''\n return max(Assingment.assingments, default=0)\n","sub_path":"controllers/student_controller.py","file_name":"student_controller.py","file_ext":"py","file_size_in_byte":7875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"384947808","text":"import json\n\nimport torch\nfrom torch.autograd import Variable\nfrom parlai.core.params import ParlaiParser\n\nfrom bots import Questioner, Answerer\nfrom dataloader import ShapesQADataset\nfrom world import QAWorld\n\n\ndef parse_options():\n parser = ParlaiParser()\n pth_file = \"world_best.pth\"\n # pth_file = 'checkpoints/world-07-Nov-2019-15:56:51/world_epoch_02900.pth'\n parser.add_argument(\n \"--load-path\",\n type=str,\n default=pth_file,\n help=\"path to pth file of the world checkpoint\",\n )\n parser.add_argument(\n \"--print-conv\",\n default=False,\n action=\"store_true\",\n help=\"whether to print the conversation between bots or not\",\n )\n parser.add_argument(\n \"--conv-save-path\",\n type=str,\n default=None,\n help=\"whether to print the conversation between bots or not\",\n )\n return parser.parse_args()\n\n\ndef load_world_dataset():\n world_dict = torch.load(OPT[\"load_path\"], map_location=torch.device(\"cpu\"))\n world_dict[\"opt\"][\"use_gpu\"] = torch.cuda.is_available()\n dataset = ShapesQADataset(world_dict[\"opt\"])\n questioner = Questioner(world_dict[\"opt\"])\n answerer = Answerer(world_dict[\"opt\"])\n if world_dict[\"opt\"].get(\"use_gpu\"):\n questioner, answerer = questioner.cuda(), answerer.cuda()\n questioner.load_state_dict(world_dict[\"qbot\"])\n answerer.load_state_dict(world_dict[\"abot\"])\n world = QAWorld(world_dict[\"opt\"], questioner, answerer)\n print(\"Loaded world from checkpoint: %s\" % OPT[\"load_path\"])\n print(\"Questioner and Answerer Bots: \")\n print(world.qbot)\n print(world.abot)\n return world, dataset\n\n\ndef run_evaluation(world, dataset):\n world.qbot.eval()\n world.abot.eval()\n first_accuracy = {\"train\": 0, \"val\": 0}\n second_accuracy = {\"train\": 0, \"val\": 0}\n atleast_accuracy = {\"train\": 0, \"val\": 0}\n both_accuracy = {\"train\": 0, \"val\": 0}\n\n for dtype in [\"train\", \"val\"]:\n batch = dataset.complete_data(dtype)\n # make variables volatile because graph construction is not required for eval\n batch[\"image\"] = Variable(batch[\"image\"], volatile=True)\n batch[\"task\"] = Variable(batch[\"task\"], volatile=True)\n world.qbot.observe({\"batch\": batch, \"episode_done\": True})\n\n for _ in range(world.opt[\"num_rounds\"]):\n world.parley()\n guess_token, guess_distr = world.qbot.predict(batch[\"task\"], 2)\n\n # check how much do first attribute, second attribute, both and at least one match\n first_match = guess_token[0].data == batch[\"labels\"][:, 0].long()\n second_match = guess_token[1].data == batch[\"labels\"][:, 1].long()\n both_matches = first_match & second_match\n atleast_match = first_match | second_match\n\n # compute accuracy according to matches\n first_accuracy[dtype] = 100 * torch.mean(first_match.float())\n second_accuracy[dtype] = 100 * torch.mean(second_match.float())\n atleast_accuracy[dtype] = 100 * torch.mean(atleast_match.float())\n both_accuracy[dtype] = 100 * torch.mean(both_matches.float())\n\n for dtype in [\"train\", \"val\"]:\n print(\n \"Overall accuracy [%s]: %.2f (first: %.2f, second: %.2f, atleast_one: %.2f)\"\n % (\n dtype,\n both_accuracy[dtype],\n first_accuracy[dtype],\n second_accuracy[dtype],\n atleast_accuracy[dtype],\n )\n )\n\n\n\nif __name__ == \"__main__\":\n\n OPT = parse_options()\n\n world, dataset = load_world_dataset()\n\n run_evaluation(\n world, dataset\n )\n\n \"\"\"\n world_best.pth\n Overall accuracy [train]: 97.12 (first: 98.08, second: 99.04, atleast_one: 100.00)\n Overall accuracy [val]: 95.83 (first: 97.22, second: 98.61, atleast_one: 100.00)\n \"\"\"\n","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":3827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"258971737","text":"\"\"\"\nAuthorization views\n\"\"\"\nfrom itsdangerous import BadSignature, BadPayload\nfrom requests import post\n\nfrom flask import render_template, request, redirect, url_for, Blueprint\nfrom flask_login import LoginManager, logout_user, UserMixin, login_user\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField\nfrom wtforms.validators import DataRequired\n\nfrom . import login_serializer\nfrom .utils import get_cida_auth_token, generate_auth_header, get_url_endpoint, is_safe_url\nfrom .. import app\n\n\nauth = Blueprint('auth', __name__,\n template_folder='templates',\n static_folder='static',\n static_url_path='/auth/static')\n\nAUTH_ENDPOINT_URL = app.config.get('AUTH_ENDPOINT_URL')\n# should requests verify the certificates for ssl connections\nVERIFY_CERT = app.config['VERIFY_CERT']\n\n\nclass LoginForm(FlaskForm):\n \"\"\"\n Authorization login form\n \"\"\"\n username = StringField('AD Username:', validators=[DataRequired()])\n password = PasswordField('AD Password:', validators=[DataRequired()])\n\n# Flask-Login Login Manager\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'auth.login_page'\n\n\nclass User(UserMixin):\n \"\"\"\n User Class for flask-Login\n \"\"\"\n def __init__(self, username=None, cida_auth_token=None):\n self.id = username\n self.cida_auth_token = cida_auth_token\n\n def is_authenticated(self):\n return True\n\n def is_active(self):\n return True\n\n def is_anonymous(self):\n return False\n\n def get_auth_token(self):\n \"\"\"\n Encode a secure token for cookie.\n\n The Token is encrypted using itsdangerous.URLSafeTimedSerializer which\n allows us to have a max_age on the token itself. When the cookie is stored\n on the users computer it also has a exipry date, but could be changed by\n the user, so this feature allows us to enforce the exipry date of the token\n server side and not rely on the users cookie to exipre.\n \"\"\"\n data = [str(self.id), self.cida_auth_token]\n return login_serializer.dumps(data)\n\n @staticmethod\n def get(username, cida_auth_token):\n '''\n :param username: AD username\n :param cida_auth_token: token returned by CIDA auth service\n :return User object if userid is valid, otherwise return None:\n '''\n if username:\n user = User(username, cida_auth_token)\n else:\n user = None\n return user\n\n\n@login_manager.user_loader\ndef load_user(username):\n \"\"\"\n Flask-Login user_loader callback.\n The user_loader function reloads the user object from the user ID stored in the session.\n \"\"\"\n cida_auth_token = get_cida_auth_token(request.cookies)\n if cida_auth_token:\n user = User.get(username, cida_auth_token)\n else:\n user = None\n\n return user\n\n\n@login_manager.token_loader\ndef load_token(token):\n \"\"\"\n Flask-Login token_loader callback.\n The token_loader function asks this function to take the token that was\n stored on the users computer process it to check if its valid and then\n return a User Object if its valid or None if its not valid.\n \"\"\"\n\n # The Token itself was generated by User.get_auth_token. So it is up to\n # us to known the format of the token data itself.\n # Decrypt the Security Token, data = [ad_user_username, user_ad_token]\n try:\n data = login_serializer.loads(token, max_age=app.config['REMEMBER_COOKIE_DURATION'].total_seconds())\n except (BadSignature, BadPayload):\n user = None\n else:\n # generate the user object based on the contents of the cookie, if the cookie isn't expired\n if data:\n user = User(data[0], data[1])\n else:\n user = None\n\n return user\n\n\n@auth.route(\"/logout/\")\ndef logout_page(forward):\n \"\"\"\n Web Page to Logout User, then Redirect them to Index Page.\n \"\"\"\n auth_header = generate_auth_header(request)\n logout_url = AUTH_ENDPOINT_URL + 'logout'\n response = post(logout_url, headers=auth_header, verify=VERIFY_CERT)\n\n logout_user()\n\n return redirect(url_for(forward))\n\n\n@auth.route(\"/login/\", methods=[\"GET\", \"POST\"])\ndef login_page():\n \"\"\"\n Web Page to Display Login Form and process form.\n \"\"\"\n form = LoginForm()\n error = None\n if request.method == \"POST\":\n # take the form data and put it into the payload to send to the pubs auth endpoint\n payload = {'username': request.form['username'], 'password': request.form['password']}\n # POST the payload to the pubs auth endpoint\n pubs_login_url = AUTH_ENDPOINT_URL + 'token'\n mp_response = post(pubs_login_url, data=payload, verify=VERIFY_CERT)\n # if the pubs endpoint login is successful, then proceed with logging in\n if mp_response.status_code == 200:\n user = User(request.form['username'], mp_response.json().get('token'))\n login_user(user, remember=True)\n\n next_page = request.args.get(\"next\")\n app.logger.info('Next page: %s', next_page)\n\n if next_page is not None and is_safe_url(next_page, request.host_url):\n endpoint = get_url_endpoint(next_page, request.environ['SERVER_NAME'], ('pubswh.index', {}))\n url = url_for(endpoint[0], **endpoint[1])\n return redirect(url)\n\n return redirect(url_for('pubswh.index'))\n else:\n error = 'Username or Password is invalid '+str(mp_response.status_code)\n\n return render_template('auth/login.html', form=form, error=error)\n\n\n@auth.route('/loginservice/', methods=[\"POST\"])\ndef login_service():\n \"\"\"\n Login service view\n \"\"\"\n resp = post(AUTH_ENDPOINT_URL + 'token', data=request.form, verify=VERIFY_CERT)\n if resp.status_code == 200:\n user = User(request.form['username'], resp.json().get('token'))\n login_user(user, remember=True)\n\n # This fixed an an ERR_INVALID_CHUNKED_ENCODING when the app was run on the deployment server.\n if 'transfer-encoding' in resp.headers:\n del resp.headers['transfer-encoding']\n return (resp.text, resp.status_code, list(resp.headers.items()))\n","sub_path":"server/pubs_ui/auth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"632142613","text":"import PyCore\nimport PyDataProcess\nimport QtConversion\nimport ResNetActionRecognition_process as processMod\nimport cv2\nimport os\nimport glob\n\n#PyQt GUI framework\nfrom PyQt5.QtWidgets import *\n\nbackend_names = {\n cv2.dnn.DNN_BACKEND_DEFAULT: \"Default\",\n cv2.dnn.DNN_BACKEND_HALIDE: \"Halide\",\n cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE: \"Inference engine\",\n cv2.dnn.DNN_BACKEND_OPENCV: \"OpenCV\",\n cv2.dnn.DNN_BACKEND_VKCOM: \"VKCOM\",\n cv2.dnn.DNN_BACKEND_CUDA: \"CUDA\",\n}\n\ntarget_names = {\n cv2.dnn.DNN_TARGET_CPU: \"CPU\",\n cv2.dnn.DNN_TARGET_OPENCL: \"OpenCL FP32\",\n cv2.dnn.DNN_TARGET_OPENCL_FP16: \"OpenCL FP16\",\n cv2.dnn.DNN_TARGET_MYRIAD: \"MYRIAD\",\n cv2.dnn.DNN_TARGET_VULKAN: \"VULKAN\",\n cv2.dnn.DNN_TARGET_FPGA: \"FPGA\",\n cv2.dnn.DNN_TARGET_CUDA: \"CUDA FP32\",\n cv2.dnn.DNN_TARGET_CUDA_FP16: \"CUDA FP16\",\n}\n\nbackend_targets = {\n cv2.dnn.DNN_BACKEND_DEFAULT: [cv2.dnn.DNN_TARGET_CPU, cv2.dnn.DNN_TARGET_OPENCL, cv2.dnn.DNN_TARGET_OPENCL_FP16],\n cv2.dnn.DNN_BACKEND_HALIDE: [cv2.dnn.DNN_TARGET_CPU],\n cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE: [cv2.dnn.DNN_TARGET_CPU],\n cv2.dnn.DNN_BACKEND_OPENCV: [cv2.dnn.DNN_TARGET_CPU, cv2.dnn.DNN_TARGET_OPENCL, cv2.dnn.DNN_TARGET_OPENCL_FP16],\n cv2.dnn.DNN_BACKEND_VKCOM: [cv2.dnn.DNN_TARGET_CPU],\n cv2.dnn.DNN_BACKEND_CUDA: [cv2.dnn.DNN_TARGET_CUDA, cv2.dnn.DNN_TARGET_CUDA_FP16],\n}\n\n# --------------------\n# - Class which implements widget associated with the process\n# - Inherits PyCore.CProtocolTaskWidget from Ikomia API\n# --------------------\nclass ResNetActionRecognitionWidget(PyCore.CProtocolTaskWidget):\n\n def __init__(self, param, parent):\n PyCore.CProtocolTaskWidget.__init__(self, parent)\n\n if param is None:\n self.param = processMod.ResNetActionRecognitionParam()\n else:\n self.param = param\n\n self.param_changed = False\n\n # Create layout : QGridLayout by default\n self.grid_layout = QGridLayout()\n\n\n # Sample duration\n label_duration = QLabel(\"Sample duration (in frame)\")\n self.spin_duration = QSpinBox()\n self.spin_duration.setRange(1, 100)\n self.spin_duration.setSingleStep(1)\n self.spin_duration.setValue(self.param.sample_duration)\n\n # Rolling prediction on/off\n self.check_rolling = QCheckBox(\"Rolling prediction\")\n self.check_rolling.setChecked(self.param.rolling)\n\n # Combobox for models\n label_model = QLabel(\"Model\")\n self.combo_models = QComboBox()\n self.fill_combo_models()\n self.combo_models.currentIndexChanged.connect(self.on_param_changed)\n self.combo_models.setCurrentIndex(self.combo_models.findData(self.param.model_path))\n\n # Combobox for inference backend\n label_backend = QLabel(\"DNN backend\")\n self.combo_backend = QComboBox()\n self.fill_combo_backend() \n self.combo_backend.setCurrentIndex(self.combo_backend.findData(self.param.backend))\n self.combo_backend.currentIndexChanged.connect(self.on_backend_changed)\n\n # Combobox for inference target\n label_target = QLabel(\"DNN target\")\n self.combo_target = QComboBox()\n self.fill_combo_target(self.param.backend)\n self.combo_target.setCurrentIndex(self.combo_target.findData(self.param.target))\n self.combo_target.currentIndexChanged.connect(self.on_param_changed)\n\n # Fill layout \n self.grid_layout.addWidget(label_backend, 0, 0, 1, 1)\n self.grid_layout.addWidget(self.combo_backend, 0, 1, 1, 1)\n self.grid_layout.addWidget(label_target, 1, 0, 1, 1)\n self.grid_layout.addWidget(self.combo_target, 1, 1, 1, 1)\n self.grid_layout.addWidget(label_model, 2, 0, 1, 1)\n self.grid_layout.addWidget(self.combo_models, 2, 1, 1, 1)\n self.grid_layout.addWidget(label_duration, 3, 0, 1, 1)\n self.grid_layout.addWidget(self.spin_duration, 3, 1, 1, 1)\n self.grid_layout.addWidget(self.check_rolling, 4, 0, 1, 2)\n \n\n # PyQt -> Qt wrapping\n layoutPtr = QtConversion.PyQtToQt(self.grid_layout)\n\n # Set widget layout\n self.setLayout(layoutPtr)\n\n \n def fill_combo_models(self):\n self.combo_models.clear()\n models_folder = os.path.dirname(os.path.realpath(__file__)) + \"/models\"\n model_files = glob.glob(models_folder + \"/*.onnx\")\n\n for f in model_files:\n self.combo_models.addItem(os.path.basename(f), f)\n\n \n def fill_combo_backend(self):\n self.combo_backend.clear()\n for backend in backend_names: \n self.combo_backend.addItem(backend_names[backend], backend)\n\n\n def fill_combo_target(self, backend):\n targets = backend_targets[backend]\n self.combo_target.clear()\n\n for target in targets:\n self.combo_target.addItem(target_names[target], target)\n\n\n def on_backend_changed(self, index):\n backend = self.combo_backend.currentData()\n self.fill_combo_target(backend)\n self.param_changed = True\n\n\n def on_param_changed(self, index):\n self.param_changed = True\n\n\n def onApply(self):\n # Apply button clicked slot\n # Get parameters from widget\n self.param.sample_duration = self.spin_duration.value()\n self.param.rolling = self.check_rolling.isChecked()\n self.param.model_path = self.combo_models.currentData()\n self.param.update = self.param_changed\n self.param.backend = self.combo_backend.currentData()\n self.param.target = self.combo_target.currentData()\n\n # Send signal to launch the process\n self.emitApply(self.param)\n\n\n#--------------------\n#- Factory class to build process widget object\n#- Inherits PyDataProcess.CWidgetFactory from Ikomia API\n#--------------------\nclass ResNetActionRecognitionWidgetFactory(PyDataProcess.CWidgetFactory):\n\n def __init__(self):\n PyDataProcess.CWidgetFactory.__init__(self)\n # Set the name of the process -> it must be the same as the one declared in the process factory class\n self.name = \"ResNet Action Recognition\"\n\n\n def create(self, param):\n # Create widget object\n return ResNetActionRecognitionWidget(param, None)\n","sub_path":"ResNetActionRecognition/ResNetActionRecognition_widget.py","file_name":"ResNetActionRecognition_widget.py","file_ext":"py","file_size_in_byte":6244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"164750761","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 18 10:42:38 2018\n\n@author: Zhen Chen\n\n@Python version: 3.6\n\n@description: this is a class to implement the PM setting method of Tunc et al. (2018) to solve \n single-item stochastic lot sizing problem.\n 30 periods 0 nodes, running time 1.42s, 50 periods 0 nodes, running time 65s\n \n\"\"\"\n\nfrom gurobipy import *\nimport math\nimport numpy as np\nimport time\n\n\n\nmeanDemand = np.array([50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50,50])\nsigma = meanDemand * 0.25\niniInventory = 0;\nfixOrderCost = 100\nvariCost = 0\nholdingCost = 1\npenaltyCost = 10\n\npartionNum = 10;\nM = 10000;\n\nT = len(meanDemand)\ncumDemand = np.cumsum(meanDemand)\nconSigma = [[0 for x in range(T)] for y in range(T)] ## initialize a 2-D array\nfor i in range(T) :\n for j in range(T) :\n sigmaPow = 0\n for k in range(i, j + 1) :\n sigmaPow += math.pow(sigma[k], 2)\n conSigma[i][j] = math.sqrt(sigmaPow)\n\nif partionNum == 4 :\n prob = np.array([0.187555, 0.312445, 0.312445, 0.187555])\n means = np.array([-1.43535, -0.415223, 0.415223, 1.43535])\n error = 0.0339052\nelif partionNum == 10 :\n prob = np.array([0.04206108420763477, 0.0836356495308449, 0.11074334596058821, 0.1276821455299152, 0.13587777477101692, 0.13587777477101692, 0.1276821455299152, 0.11074334596058821, 0.0836356495308449, 0.04206108420763477])\n means = np.array([-2.133986195498256, -1.3976822972668839, -0.918199946431143, -0.5265753462727588, -0.17199013069262026, 0.17199013069262026, 0.5265753462727588, 0.918199946431143, 1.3976822972668839, 2.133986195498256])\n error = 0.005885974956458359\n \n\nS = {i: fixOrderCost for i in range(0, T)}\nh = {i: holdingCost for i in range(0, T)}\nv = {i: variCost for i in range(0, T)}\npai = {i: penaltyCost for i in range(0, T)}\nI0 = iniInventory;\n\ntry:\n # creat a new model\n model = Model(\"mip_RS_PM\")\n\n # creat variables\n x = {}\n q = {}\n H = {}\n for i in range(T) :\n x[i] = {}\n q[i] = {}\n H[i] = {}\n for j in range(T) :\n if j < i :\n x_name = 'x' + str(i) + str(j)\n q_name = 'q' + str(i) + str(j)\n x[i][j] = model.addVar(lb = 0, ub = 0, vtype = GRB.BINARY, name = x_name)\n q[i][j] = model.addVar(lb = 0, ub = 0, vtype = GRB.CONTINUOUS, name = q_name)\n else : \n x_name = 'x' + str(i) + str(j)\n q_name = 'q' + str(i) + str(j)\n q[i][j] = model.addVar(lb = 0, ub = GRB.INFINITY, vtype = GRB.CONTINUOUS, name = q_name)\n x[i][j] = model.addVar(vtype = GRB.BINARY, name = x_name)\n H[i][j] = {}\n for t in range(T):\n H_name = 'H' + str(i) + str(j) + str(t)\n if t >= i and t <= j :\n H[i][j][t] = model.addVar(lb = 0, ub = GRB.INFINITY, vtype = GRB.CONTINUOUS, name = H_name)\n else :\n H[i][j][t] = model.addVar(lb = 0, ub = 0, vtype = GRB.CONTINUOUS, name = H_name)\n \n # set objective\n totalCosts = LinExpr()\n setupCosts = LinExpr()\n holdCosts = LinExpr()\n penaCosts = LinExpr()\n Dxij = LinExpr()\n \n for i in range(T) :\n for j in range(T) :\n setupCosts += x[i][j] * S[i] \n for t in range(i, j + 1) :\n Dxij += -h[i] * cumDemand[t] * x[i][j]\n holdCosts += q[i][j] * h[i]\n penaCosts += H[i][j][t] * (h[i] + pai[i])\n totalCosts = setupCosts + holdCosts + penaCosts + Dxij\n model.setObjective(totalCosts, GRB.MINIMIZE)\n \n \n # add constraints\n \n\t# sum_{i=0}^T x_{1, i} = 1\n\t# sum_{i=0}^T x_{i, T} = 1;\n\t# sum_{i=0}^t x_{i, t} = sum_{j=t+1}^T x_{t, j}\n expr_x1i = LinExpr()\n expr_xiT = LinExpr()\n for i in range(T) :\n expr_x1i += x[0][i]\n expr_xiT += x[i][T - 1]\n model.addConstr(expr_x1i == 1)\n model.addConstr(expr_xiT == 1)\n for t in range(T - 1) :\n expr_xit = LinExpr()\n expr_xtj = LinExpr()\n for i in range(t + 1) :\n expr_xit += x[i][t]\n for j in range(t + 1, T) :\n expr_xtj += x[t + 1][j]\n model.addConstr(expr_xit == expr_xtj)\n \n # q_{i,j} <= Mx_{i,j}\n for i in range(T) :\n for j in range(T) :\n model.addConstr(q[i][j] <= M * x[i][j])\n \n # sum_{i=0}^t q_{i, t} <= sum_{j=t+1}^T q_{t, j}\n for t in range(T - 1) :\n expr_qit = LinExpr()\n expr_qtj = LinExpr()\n for i in range(t + 1) :\n expr_qit += q[i][t]\n for j in range(t + 1, T) :\n expr_qtj += q[t + 1][j]\n model.addConstr(expr_qit <= expr_qtj)\n \n # piecewise constraints\n I = LinExpr()\n for i in range(T) :\n for j in range(i, T) :\n for t in range(i, j + 1) :\n I = q[i][j] - x[i][j] * cumDemand[t]\n for k in range(partionNum) :\n pik = prob[range(k + 1)].sum() # slope\n pmean = prob[range(k + 1)].dot(means[range(k + 1)]) # intercept\n model.addConstr(H[i][j][t] + I >= pik * I - x[i][j] * pmean * conSigma[i][t])\n \n # model.write('mip_RS_PM_Gurobi.lp')\n currTime = time.time()\n model.optimize()\n runTime = time.time() - currTime\n print('running time is %.5f s' % runTime)\n \n# print('setup costs is: %f' % setupCosts.getValue())\n# print('penalty costs is: %f' % penaCosts.getValue())\n# mark_holdCosts = holdCosts.getValue() + Dxij.getValue()\n# print('hold costs is: %f' % mark_holdCosts)\n if model.status == GRB.Status.OPTIMAL : \n# print('x: ', end = '')\n# print('\\n')\n# for i in range(T) :\n# for j in range(T) :\n# print('%d' % x[i][j].X, end = ' ')\n# print('\\n')\n# print('*************************')\n# print('q: ', end = '')\n# print('\\n')\n# for i in range(T) :\n# for j in range(T) :\n# print('%d' % q[i][j].X, end = ' ')\n# print('\\n')\n# print('H: ', end = '')\n# print('\\n')\n# for i in range(T) :\n# for j in range(T) :\n# for t in range(T) :\n# print('%d' % H[i][j][t].X, end = ' ')\n# print('\\n')\n# print('\\n')\n \n z = [0 for i in range(T)]\n quantity = [0 for i in range(T)]\n I = [0 for i in range(T)]\n lastQ = 0\n for i in range(T) :\n for j in range(T) :\n if x[i][j].X == 1 :\n z[i] = 1\n if i == 0 :\n quantity[i] = q[i][j].X\n lastQ = quantity[i]\n else :\n quantity[i] = q[i][j].X - lastQ\n lastQ = quantity[i]\n I[0] = quantity[0] + iniInventory - meanDemand[0]\n for i in range(1, T) :\n I[i] = quantity[i] + I[i - 1] - meanDemand[i]\n print('*************************')\n print('z = ')\n print(z)\n print('Q = ')\n print(quantity)\n print('I = ')\n print(I)\n elif model.status == GRB.Status.INFEASIBLE:\n print('Optimization was stopped with status %d' % model.status)\n # do IIS, find infeasible constraints\n model.computeIIS()\n for c in model.getConstrs():\n if c.IISConstr:\n print('%s' % c.constrName)\n \n \nexcept GurobiError as e:\n print('Error code ' + str(e.errno) + \": \" + str(e))\n\n\n\n\n","sub_path":"linear programming/gurobi/stochastic lot sizing/mip_RS_PM.py","file_name":"mip_RS_PM.py","file_ext":"py","file_size_in_byte":7655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"3024892","text":"def recur_fibo(n):\n if n <= 1:\n return n\n else:\n #każda kolejna liczba jest sumą dwóch poprzednich zatem:\n return(recur_fibo(n-1) + recur_fibo(n-2))\nileLiczb = 20\n# check number\nif ileLiczb <= 0:\n print(\"Daj pozytywną liczbę\")\nelif ileLiczb == 0:\n print('')\nelse:\n print(\"Fibonacci:\")\n for i in range(ileLiczb):\n print(recur_fibo(i),end=' ')\n","sub_path":"04-Subroutines/af_34.py","file_name":"af_34.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"171220784","text":"# 1952. [모의 SW 역량테스트] 수영장\n# https://swexpertacademy.com/main/code/problem/problemDetail.do?contestProbId=AV5PpFQaAQMDFAUq&categoryId=AV5PpFQaAQMDFAUq&categoryType=CODE\nimport math\n\n\ndef brute(i=0):\n global t, c, plan, result\n if i >= y:\n temp = sum([plan[j] for j in range(y) if not t[j]])\n temp += math.ceil(sum(t) / 3) * c\n if result > temp:\n result = temp\n return\n t[i] = False\n brute(i + 1)\n if i < y - 2:\n t[i] = t[i + 1] = t[i + 2] = True\n elif i == y - 2:\n t[i] = t[i + 1] = True\n else:\n t[i] = True\n brute(i + 3)\n\n\nfor TC in range(1, int(input()) + 1):\n a, b, c, result = map(int, input().split())\n m = int(b / a)\n inp = input().split()\n while '0' in inp:\n inp.remove('0')\n plan = list(map(int, inp))\n y = len(plan)\n for i in range(y):\n if plan[i] > m:\n plan[i] = b\n elif plan[i]:\n plan[i] *= a\n t = [False] * y\n brute()\n print(\"#{} {}\".format(TC, min(result, sum(plan))))\n","sub_path":"SWEA/sol/1952.py","file_name":"1952.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"45326086","text":"###############################\n# Author : septicmk\n# Date : 2015/07/25 16:14:09\n# FileName : main.py\n################################\n\nfrom lambdaimage import preprocess as prep\nfrom lambdaimage import registration as reg\nfrom lambdaimage import fusion as fus\nfrom pyspark import SparkContext, SparkConf\nfrom lambdaimage import lambdaimageContext\nfrom lambdaimage.utils.tool import exeTime, log, showsize\nimport numpy as np\n\n#conf = SparkConf().setAppName('test').setMaster('local[1]').set('spark.executor.memory','2g').set('spark.driver.maxResultSize','6g').set('spark.driver.memory','8g').set('spark.local.dir','/dev/shm').set('spark.storage.memoryFraction','0.2').set('spark.default.parallelism','10')\n#tsc=lambdaimageContext.start(conf=conf)\ntsc = lambdaimageContext.start(master=\"spark://blade12:7077\",appName=\"lambdaimage\")\nlog('info')('tiff load start...')\nrddA = tsc.loadImages('/home/wb/data/1-L/*.tif', inputFormat='tif-stack')\nrddB = tsc.loadImages('/home/wb/data/1-R/*.tif', inputFormat='tif-stack')\nlog('info')('tiff load over...')\n\nlog('info')('intensity normalization start ...')\nrddA = prep.intensity_normalization(rddA)\nrddB = prep.intensity_normalization(rddB)\nrddB = prep.flip(rddB)\n\n_rddA = prep.intensity_normalization(rddA,8)\n_rddB = prep.intensity_normalization(rddB,8)\nlog('info')('intensity normalization over ...')\n\nlog('info')('registration start ...')\nvec0 = [0,0,0,1,1,0,0]\nvec = reg.c_powell(_rddA.get(4), _rddB.get(4), vec0)\nrddB = reg.execute(rddB, vec)\nlog('info')('registration over ...')\n\nlog('info')('fusion start ...')\nL_img_stack = rddA.collectValuesAsArray()\nR_img_stack = rddB.collectValuesAsArray()\nimg_stack = zip(L_img_stack, R_img_stack)\nrdd = tsc.loadImagesFromArray(img_stack)\nfused_img = fus.wavelet_fusion(rdd)\nfused_img = tsc.loadImagesFromArray(fused_img)\nlog('info')('fusion over ...')\n\nlog('info')('saving ...')\nfused_img.exportAsTiffs('/home/wb/data/lambdaimage/fusion',overwrite = True)\n#fused_img = np.squeeze(np.array(fused_img.values().collect()))\n\nlog('info')('subtract background start ...')\nsb_img = prep.subtract_Background(fused_img)\nlog('info')('sbutract background over ... ')\n\nlog('info')('saving ...')\nsb_img.exportAsTiffs('/home/wb/data/lambdaimage/subtract',overwrite = True)\n","sub_path":"script/mehi_standalone.py","file_name":"mehi_standalone.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"297042210","text":"from flask import Flask, render_template, request\nfrom solver import solve\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html')\n\n@app.route('/translate', methods = ['POST'])\ndef translate():\n inputText = request.form['inputText']\n translationOption = request.form['translationOption']\n algoOption = request.form['algoOption']\n\n res = solve(inputText, algoOption, translationOption)\n\n return render_template('index.html', result=res)\n\nif __name__ == \"__main__\":\n app.run()","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"499586236","text":"import numpy as np\nfrom scipy.spatial import distance\nimport math\nimport random\n\nDEBUG = False\nUNIFORM = True\n\nclass gonzalez:\n def get_csv(self,fileName):\n return np.genfromtxt(fileName, delimiter=',')\n\n def __init__(self,data, it, r):\n self.it = it\n self.data = data\n self.r2 = r * 2\n \n def gonzalez(self):\n size = len(self.data)\n\n #Random initialization\n winners = [[random.randint(0, size-1)]]\n centers = np.array([self.data[winners[0][0]]])\n\n #Populating dist array\n dist = distance.cdist(self.data, np.array([centers[len(centers)-1]]))\n dist = np.array([item for sublist in dist for item in sublist])\n if(DEBUG):\n \tprint(dist[0:100])\n \n \n for i in range(self.it-1):\n if(DEBUG):\n print(\"-----------------------------\\n\", centers.shape)\n #Get distance to new center\n if(DEBUG):\n print(dist[0:100])\n tempdist = distance.cdist(self.data, np.array([centers[len(centers)-1]]))\n tempdist = np.array([item for sublist in tempdist for item in sublist])\n if(DEBUG):\n print(tempdist[0:100])\n #For each entry, if leq, replace\n dist = np.array([dist[i] if dist[i] <= tempdist[i] else tempdist[i] for i in range(size)])\n if(DEBUG):\n print(dist[0:100])\n #Picking center\n winnerInd = [np.argmax(dist)]\n winners.append(winnerInd)\n if(DEBUG):\n print(winnerInd, \"\\n\", self.data[winnerInd])\n #Adding center\n centers = np.append(centers, self.data[winnerInd], axis = 0)\n\n if(DEBUG):\n print(winners)\n \n return np.array(centers)\n\n\n\n#kcent = kcentersOutliers(\"syntheticData/data.txt\",10,10.0)\n#kcent.kcentersOutliers()\n\n","sub_path":"lib/gonzalez.py","file_name":"gonzalez.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"404123668","text":"import read_imf_nan\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport datetime as dt\n\n# read the data\ndata = read_imf_nan.read_imf_nan( )\n\n# create plotting canvas\n# save a reference so we have it for later\nfig = plt.figure(1)\n\n# create first subplot\nax1 = plt.subplot(2,1,1)\nplt.plot(data['date'], data['AE'], color='black')\n# add labels\nax1.set_ylabel('AE')\n# set the x axis limits to 00:00\nbt = dt.datetime(2012, 3, 5, 0, 0, 0)\nft = dt.datetime(2012, 3, 15, 0, 0, 0)\nax1.set_xlim(bt, ft)\n# make the y axis symmetric\nax1.set_ylim(0, 3000)\n# overplot a line at zero\nxli = ax1.get_xlim()\nplt.plot( xli, [0,0], '--', color='gray' )\n\n#create subplot\nax2 = plt.subplot(2,1,2)\nplt.plot(data['date'], data['SYM'], color='black')\n# add labels\nax2.set_ylabel('SYM/H')\n# set the x axis limits to 00:00\nbt = dt.datetime(2012, 3, 5, 0, 0, 0)\nft = dt.datetime(2012, 3, 15, 0, 0, 0)\nax2.set_xlim(bt, ft)\n# make the y axis symmetric\nax2.set_ylim(-150, 60)\n# overplot a line at zero\nxli = ax2.get_xlim()\nplt.plot( xli, [0,0], '--', color='gray' )\n\n\n\n#show plot\nplt.show()\n","sub_path":"plot_imf_nice_sub.py","file_name":"plot_imf_nice_sub.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"560382632","text":"import os\nimport argparse\n\nimport numpy as np\nimport torch\n\nfrom src.general.envs.gym_env import GymEnv\nfrom src.sac.sac_agent import SACAgent\n\ndef main(args,\n train_steps=1000000,\n random_steps=1000,\n train_freq=1,\n target_update_freq=1,\n actor_lr=0.0001,\n q_lr=0.0001,\n entropy_lr=0.001,\n gamma=0.99,\n alpha=1,\n tau=0.005,\n buffer_size=500000,\n batch_size=256,\n gradient_steps=1,\n actor_fc=(256, 256),\n critic_fc=(256, 256),\n conv_size=None,\n logging_period=25,\n checkpoint_period=5000,\n gpu=False):\n\n if args.wandb:\n import wandb\n if args.wandb_name != None:\n wandb.init(name=args.wandb_name,\n project=\"hexapod-sac\",\n entity=\"olin-robolab\")\n else:\n wandb.init(project=\"hexapod-sac\",\n entity=\"olin-robolab\")\n wandb.config.update({\"train_steps\": train_steps,\n \"random_steps\": random_steps,\n \"train_freq\": train_freq,\n \"target_update_freq\": target_update_freq,\n \"actor_lr\": actor_lr,\n \"q_lr\": q_lr,\n \"entropy_lr\": entropy_lr,\n \"gamma\": gamma,\n \"alpha\": alpha,\n \"tau\": tau,\n \"buffer_size\": buffer_size,\n \"batch_size\": batch_size,\n \"gradient_steps\": gradient_steps,\n \"actor_fc\": actor_fc,\n \"critic_fc\": critic_fc,\n \"conv_size\": conv_size})\n else: wandb = None\n\n env = GymEnv(\"BipedalWalker-v3\")\n if args.render:\n env.render()\n\n if torch.cuda.is_available() and args.gpu: \n device = \"cuda:0\"\n else:\n if args.gpu:\n print(\"GPU flag set, but no GPU found! Using CPU.\")\n device = \"cpu\"\n\n print(\"Building agent...\")\n agent = SACAgent(train_steps=train_steps,\n random_steps=random_steps,\n train_freq=train_freq,\n target_update_freq=target_update_freq,\n actor_lr=actor_lr,\n q_lr=q_lr,\n entropy_lr=entropy_lr,\n gamma=gamma,\n alpha=alpha,\n tau=tau,\n buffer_size=buffer_size,\n batch_size=batch_size,\n gradient_steps=gradient_steps,\n env=env,\n actor_fc=actor_fc,\n critic_fc=critic_fc,\n conv_size=conv_size,\n device=device,\n logging_period=logging_period,\n checkpoint_period=checkpoint_period,\n output_dir=args.output_dir,\n restore_dir=args.restore,\n wandb=wandb)\n print(\"Agent built!\")\n\n print(\"Starting train...\")\n try:\n agent.train()\n finally:\n # Make sure out environment is closed\n # PLEASE DONT HIT CTRL C TWICE\n env.close()\n print(\"Train done!\")\n\n env.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('Train PPO')\n # Directory path arguments\n parser.add_argument(\n '--output-dir',\n type=str,\n default='/tmp/sac')\n\n # File path arguments\n parser.add_argument(\n '--restore',\n type=str,\n default=None)\n\n # Run mode arguments\n parser.add_argument(\n '--render',\n default=False,\n action='store_true')\n parser.add_argument(\n '--gpu',\n default=False,\n action='store_true')\n\n # WandB flags\n parser.add_argument(\n '--wandb',\n default=False,\n action='store_true')\n parser.add_argument(\n '--wandb-name',\n type=str,\n default=None)\n args = parser.parse_args()\n\n #logging.getLogger().setLevel(logging.INFO)\n\n main(args)\n","sub_path":"src/sac/train_sac.py","file_name":"train_sac.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"121821869","text":"# cnMaestro sample API test code. From page 86-87 of the cnMaestro 2.4.1 RESTful API documentation\n# Copyright (C) 2017 Cambium Networks, LTD.\n# Implemented/reworked by Adrien K\n\n# \"API test code for cnMaestro that demonstrates session establishment and API\n# api. The client connects to cnMaestro using the Client Id and Client\n# Secret downloaded from the Client API page in the cnMaestro UI. The Client\n# receives a URL, Access Token, and Expiration Interval (in seconds)\n# defining how long the token is valid. The URL and Access Token are used\n# for subsequent API requests.\" -- cnMaestro 2.4.1 RESTful API documentation\n\nimport sys\nimport requests\nimport json\nimport base64\n\ndef check_http_return(section, url, code, request):\n if int(code) != 200:\n print('{0} failed with HTTP status {1}'.format(section, code))\n print('URL: {}'.format(url))\n try:\n print(json.dumps(request.json(), indent=2))\n except:\n pass\n sys.exit(1)\n\n\n# Retrieve access parameters (url, access_token, and expires_in).\ndef get_access_parameters(host, client_id, client_secret):\n token_url = 'https://{}/api/v1/access/token'.format(host)\n encoded_credentials = base64.b64encode('{}:{}'.format(client_id, client_secret).encode()).decode()\n headers = {\n 'Authorization': 'Basic {}'.format(encoded_credentials),\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n print(encoded_credentials)\n body = 'grant_type=client_credentials'\n r = requests.post(token_url, body, headers=headers, verify=False)\n check_http_return('Access Parameters', token_url, r.status_code, r)\n return r.json()['access_token'], r.json()['expires_in']\n\n\n# Validate the expiration of the access token.\ndef validate_access_token(host, access_token):\n validate_url = 'https://{}/api/v1/access/validate_token'.format(host)\n headers = {\n 'Authorization': 'Bearer {}'.format(access_token),\n }\n r = requests.get(validate_url, headers=headers, verify=False)\n check_http_return('Validate Access Token', validate_url, r.status_code, r)\n return r.json()['expires_in']\n\n\ndef generate_api_session(host, client_id, client_secret):\n # Retrieve access parameters and generate API session\n print('\\nRetrieve Access Parameters')\n access_token, expires_in = get_access_parameters(host, client_id, client_secret)\n print('Success: access_token ({}) expires_in ({})\\n'.format(access_token, expires_in))\n\n # Validate time remaining for the access token\n print('Validating expiration time')\n expires_in_check = validate_access_token(host, access_token)\n print('Success: expiresIn ({})\\n'.format(expires_in_check))\n return access_token\n\n\n# Execute API using URL returned in access parameters. Currently unused\ndef call_api(host, path, access_token):\n api_url = 'https://{}{}'.format(host, path)\n headers = {\n 'Authorization': 'Bearer {}'.format(access_token),\n }\n r = requests.get(api_url, headers=headers, verify=False)\n check_http_return(\"API\", api_url, r.status_code, r)\n return r.json()\n","sub_path":"cnm_usage/api/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"315233768","text":"import json\nfrom math import sqrt\nfrom os.path import exists\n\n\ndef load_data(filepath):\n if not exists(filepath):\n return None\n with open(filepath, encoding='windows-1251') as file_handler:\n json_object = json.load(file_handler, encoding='windows-1251')\n return json_object\n\n\ndef get_biggest_bar(data):\n return max(data, key=lambda x: x['SeatsCount'])\n\n\ndef get_smallest_bar(data):\n return min(data, key=lambda x: x['SeatsCount'])\n\n\ndef get_closest_bar(data, longitude, latitude):\n def distance_calculation(bar, longitude=longitude, latitude=latitude):\n\n bar_latitude = bar['geoData']['coordinates'][1]\n bar_longitude = bar['geoData']['coordinates'][0]\n x2 = latitude\n x1 = bar_latitude\n y2 = longitude\n y1 = bar_longitude\n\n return sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\n\n return min(data, key=distance_calculation)\n\n\ndef print_result(filepath, longitude, latitude):\n json_data = load_data(filepath)\n\n print('Самый большой бар: {}\\n'.format(json.dumps(\n get_biggest_bar(json_data), indent=4, ensure_ascii=False)))\n print('Самый маленький бар: {}\\n'.format(json.dumps(\n get_smallest_bar(json_data), indent=4, ensure_ascii=False)))\n print('Ближайший бар: {}\\n'.format(json.dumps(\n get_closest_bar(json_data, longitude, latitude), indent=4, ensure_ascii=False)))\n\n\ndef get_data():\n filepath = input('Введите путь к файлу: ')\n longitude = int(input('Введите свои кординаты (долгота): '))\n latitude = int(input('Введите свои кординаты (широта): '))\n\n return filepath, longitude, latitude\n\n\nif __name__ == '__main__':\n users_data = get_data()\n print_result(*users_data)\n","sub_path":"bars.py","file_name":"bars.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"563718698","text":"dic = set()\n\n\ndef elfish(word, L):\n if (L == len(word)):\n ans = 'e' in dic and 'l' in dic and 'f' in dic\n return ans\n elif L < len(word):\n dic.add(word[L])\n return elfish(word, L + 1)\n\n\nprint(elfish('ebankailf', 0))\n","sub_path":"Archive/Others/Recursion/elfish.py","file_name":"elfish.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"226466050","text":"# All classes in python are inherited from class \"Object\"\n# thats why you will see lot of dunder methods when you check the method of any class\n\nclass Animal:\n species = 'mammal'\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def make_sound(self):\n \tprint('Do Nothing')\n\nclass Dog(Animal):\t\t# inheriting class Animal \n\tdef make_sound(self):\n\t\tprint(f\"{self.name} is barking always \")\n\nclass Cat(Animal):\n\tdef make_sound(self):\n\t\tAnimal.make_sound(self)\t\t\t\t# to call the make_sound of parent class\n\t\tprint(f\"{self.name} is meowing always \")\n\n\n\n# make_sound - is polymorphism (different forms with same name)\n\nroxy = Dog(\"Roxy\", 6)\nprint(roxy.age)\nroxy.make_sound()\n\njulie = Cat(\"Julie\", 3)\nprint(julie.age)\njulie.make_sound()","sub_path":"inherit_ex.py","file_name":"inherit_ex.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"234742174","text":"\"\"\"\nGeneral drawing methods for graphs using Bokeh.\n\"\"\"\n\nfrom bokeh.io import show, output_file\nfrom bokeh.plotting import figure\nfrom bokeh.models import (GraphRenderer, StaticLayoutProvider, Circle, LabelSet,\n ColumnDataSource)\nfrom graph import Graph\nimport random\nimport math\n\n\nclass BokehGraph:\n \"\"\"Class that takes a graph and exposes drawing methods.\"\"\"\n def __init__(self, graph):\n if not graph.vertices:\n raise Exception(\"Graph should contain vertices\")\n self.graph = graph\n self.pos = {}\n\n def _setup_labels(self):\n label_data = {'x': [], 'y': [], 'names': []}\n for vertex, position in self.pos.items():\n label_data['x'].append(position[0])\n label_data['y'].append(position[1])\n label_data['names'].append(vertex)\n label_source = ColumnDataSource(label_data)\n labels = LabelSet(x='x', y='y', text='names', level='glyph',\n text_align='center', text_baseline='middle',\n source=label_source, render_mode='canvas')\n return labels\n\n def _get_edges(self):\n start = []\n end = []\n checked = set()\n for startpoint, endpoints in self.graph.vertices.items():\n for endpoint in endpoints:\n if (startpoint.label, endpoint.label) not in checked:\n checked.add((startpoint.label, endpoint.label))\n start.append(startpoint.label)\n end.append(endpoint.label)\n return dict(start=start, end=end)\n\n def _get_colors(self):\n colors = []\n for vertex in self.graph.vertices.keys():\n color = vertex.color\n colors.append(color)\n return colors\n\n def _map_coords(self, width, height):\n cells = math.ceil(len(self.graph.vertices.keys())**(1/2))\n cube = (width-1)/cells\n x_grid = 0.5\n y_grid = 0.5\n for vertex in self.graph.vertices.keys():\n self.pos[vertex.label] = (random.uniform(x_grid,x_grid+cube), \n random.uniform(y_grid,y_grid+cube))\n if x_grid + cube >= (width - cube):\n x_grid = 0.5\n y_grid += cube\n else:\n x_grid += cube\n \n def _get_indices(self):\n indices = []\n for vertex in self.graph.vertices.keys():\n indices.append(vertex.label)\n return indices\n \n def draw(self, title='Graph', width=10, height=10,\n show_axis=False, show_grid=False, circle_size=25):\n plot = figure(title=title, x_range=(0,width), y_range=(0,height))\n \n plot.axis.visible = show_axis\n plot.grid.visible = show_grid\n\n graph = GraphRenderer()\n graph.node_renderer.data_source.add(\n self._get_indices(), 'index')\n graph.node_renderer.data_source.add(\n self._get_colors(), 'color')\n graph.node_renderer.glyph = Circle(size=circle_size,\n fill_color='color')\n graph.edge_renderer.data_source.data = self._get_edges()\n\n self._map_coords(width, height)\n graph.layout_provider = StaticLayoutProvider(graph_layout=self.pos)\n plot.renderers.append(graph)\n\n labels = self._setup_labels()\n plot.add_layout(labels)\n\n output_file('./graph.html')\n show(plot)\n\ndef randomize_graph(vertices=10, connections=5):\n graph = Graph()\n for i in range(vertices):\n graph.add_vertex(str(i))\n for i in range(connections):\n start, end = random.sample(list(graph.vertices), 2)\n graph.add_edge(start, end)\n colors = ['#FF395B', '#FC928F', '#F9C6A3', '#C0BF9F',\n '#79A792', '#1A8CC1', '#FECE6B', '#F69D61']\n color_index = 0\n searched = set()\n for vertex in graph.vertices:\n if vertex not in searched:\n if color_index > len(colors):\n color_index = 0\n color = colors[-color_index]\n searched.update(graph.search(vertex, color))\n color_index += 1\n bokeh = BokehGraph(graph)\n bokeh.draw()\n \nif __name__ == '__main__':\n randomize_graph(16, 6)","sub_path":"projects/graph/src/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"363118791","text":"import os\nfrom django.conf import settings\nfrom django.contrib.staticfiles import finders\nfrom datetime import datetime\nfrom app.constants import MESES\n\ndef link_callback(uri, rel):\n sUrl = settings.STATIC_URL # Typically /static/\n sRoot = settings.STATIC_ROOT # Typically /home/userX/project_static/\n mUrl = settings.MEDIA_URL # Typically /media/\n mRoot = settings.MEDIA_ROOT # Typically /home/userX/project_static/media/\n\n if uri.startswith(mUrl):\n path = os.path.join(mRoot, uri.replace(mUrl, \"\"))\n elif uri.startswith(sUrl):\n path = os.path.join(sRoot, uri.replace(sUrl, \"\"))\n else:\n return uri\n\n # make sure that file exists\n if not os.path.isfile(path):\n raise Exception('media URI must start with %s or %s' % (sUrl, mUrl))\n return path\n\ndef convertir_fecha(fecha: str) -> datetime:\n formato_fecha = '%B %d, %Y' # January 28, 2021\n for m in MESES:\n if m == fecha.split(' ')[0]:\n return datetime.strptime(fecha.replace(m, MESES[m]), formato_fecha) ","sub_path":"gentelella/app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"325397405","text":"import os, sys, fnmatch, shutil\r\nimport re\r\n\r\ndef dir_list_folder(head_dir, dir_name): \r\n outputList = [] \r\n for root, dirs, files in os.walk(head_dir): \r\n for d in dirs: \r\n if d.upper() == dir_name.upper(): \r\n outputList.append(os.path.join(root, d))\r\n if dir_name == \"*\":\r\n outputList.append(os.path.join(root, d))\r\n return outputList \r\n \r\n\r\n# DEFINITION OF FILES THAT WE ARE CONVERTING TO TIF, THIS WILL USE\r\n# UNIX WILCARD OPTIONS TO MAKE IT MORE FLEXIBLE\r\n#FOLDER = 'V:\\\\Data\\\\Global\\\\ClimateChange\\\\royal_society\\\\pp2\\\\*qump_pr*divide*mean*.txt'\r\nBaseFolder = 'C:\\\\Program Files (x86)\\\\GeoServer 2.2\\\\data_dir\\\\gwc\\\\PULSE_BRA_adm3\\\\'\r\nCopyToDir = 'C:\\\\Apache\\\\HTTP Server 2.2\\\\htdocs\\\\Brazil_obs\\\\target\\\\content\\\\map-datasets\\\\MunicTiles\\\\'\r\n\r\n# LOOP THROUGH EACH ZOOM LEVEL\r\nfor zoom in range(0, 9):\r\n\t\r\n # get the name of the folder\r\n curFolder=BaseFolder + \"EPSG_4326_\" + str(zoom).zfill(2)\r\n filesToLookFor = curFolder + \"\\\\\" + \"*.png\" \r\n\t\r\n # create directory of zoom level\r\n zoomDir = CopyToDir + str(zoom) + \"\\\\\"\r\n \r\n if not os.path.exists(zoomDir):\r\n os.makedirs(zoomDir)\r\n\r\n matches = []\r\n for root, dirnames, filenames in os.walk(curFolder):\r\n for filename in fnmatch.filter(filenames, '*.png'):\r\n # get the name of the file and get the frist part for \r\n # the folder name\r\n fileSplit=filename.split(\"_\")\r\n yFold=re.sub(\"^0+\",\"\",fileSplit[0])\r\n xName=re.sub(\"^0+\",\"\",fileSplit[1])\r\n\r\n\r\n if yFold==\"\":\r\n yFold=\"0\"\r\n if xName==\".png\":\r\n xName=\"0.png\"\r\n \r\n \t\t\t\r\n fileCopied = zoomDir + yFold + \"\\\\\" + xName\r\n if not os.path.exists(zoomDir + yFold + \"\\\\\"):\r\n os.makedirs(zoomDir + yFold + \"\\\\\")\r\n\t\t\r\n shutil.copyfile (os.path.join(root, filename), fileCopied)\r\n\r\n","sub_path":"Scripts/Python/PULSE_Brazil/createXYZTilesFromGeoserver.py","file_name":"createXYZTilesFromGeoserver.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"649779314","text":"import sys\nsys.path.append(\"/home/andrewliao11/gail-tf\")\nfrom baselines import logger\nimport pickle as pkl\nimport numpy as np\nfrom tqdm import tqdm\nimport ipdb\n\nclass Mujoco_Dset(object):\n def __init__(self, expert_path, ret_threshold=None, traj_limitation=np.inf, random=True):\n with open(expert_path, \"rb\") as f:\n traj_data = pkl.load(f)\n obs = []\n acs = []\n rets = []\n lens = []\n for traj in tqdm(traj_data):\n if ret_threshold is not None and traj[\"ep_ret\"] < ret_threshold:\n pass\n if len(rets) >= traj_limitation:\n break\n rets.append(traj[\"ep_ret\"])\n lens.append(len(traj[\"ob\"]))\n obs.append(traj[\"ob\"])\n acs.append(traj[\"ac\"])\n self.num_traj = len(rets)\n self.avg_ret = sum(rets)/len(rets)\n self.avg_len = sum(lens)/len(lens)\n self.rets = np.array(rets)\n self.lens = np.array(lens)\n self.obs = np.array([v for ob in obs for v in ob])\n self.acs = np.array([v for ac in acs for v in ac])\n if len(self.acs) > 2:\n self.acs = np.squeeze(self.acs)\n assert len(self.obs) == len(self.acs)\n self.num_transition = len(self.obs)\n self.randomize = random\n self.init_pointer()\n self.log_info()\n\n def log_info(self):\n logger.log(\"Total trajectories: %d\"%self.num_traj)\n logger.log(\"Total transitions: %d\"%self.num_transition)\n logger.log(\"Average episode length: %f\"%self.avg_len)\n logger.log(\"Average returns: %f\"%self.avg_ret)\n\n def init_pointer(self):\n self.pointer = 0\n if self.randomize:\n idx = np.arange(self.num_transition)\n np.random.shuffle(idx)\n self.obs = self.obs[idx, :]\n self.acs = self.acs[idx, :]\n\n def get_next_batch(self, batch_size):\n if self.pointer + batch_size >= self.num_transition:\n self.init_pointer()\n end = self.pointer + batch_size\n obs = self.obs[self.pointer:end, :]\n acs = self.acs[self.pointer:end, :]\n self.pointer = end\n return obs, acs\n\n def plot(self):\n import matplotlib.pyplot as plt\n plt.hist(self.rets)\n plt.savefig(\"histogram_rets.png\")\n plt.close()\n\n\ndef test(expert_path):\n dset = Mujoco_Dset(expert_path)\n dset.plot()\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--expert_path\", type=str, default=\"../baselines/ppo1/ppo.Hopper.0.00.pkl\")\n args = parser.parse_args()\n test(args.expert_path)\n\n","sub_path":"dataset/mujoco.py","file_name":"mujoco.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"647594131","text":"import collections\nfrom datetime import datetime\nfrom contextlib import contextmanager\n\nimport cx_Oracle\nfrom sqlalchemy import create_engine, func\nfrom sqlalchemy.orm import Session\n\nfrom .model import Base, Body, Ticker\nfrom .util import is_test_mode, read_config\n\n\nclass Database:\n def __init__(self, connection_string, **kwargs):\n self.__engine = create_engine(\n connection_string,\n **kwargs)\n\n @classmethod\n def initialize(cls, **kwargs):\n database_config = read_config()[\"database\"]\n\n connection_string = 'oracle://{user}:{password}@{sid}'.format(\n user=database_config[\"username\"],\n password=database_config[\"password\"],\n sid=database_config[\"dsn\"])\n\n if is_test_mode():\n connection_string = 'sqlite:///:memory:'\n\n del kwargs['test']\n\n database = Database(connection_string, **kwargs)\n # Create all tables\n Base.metadata.create_all(database.__engine)\n return database\n\n @contextmanager\n def session_scope(self):\n \"\"\"Provide a transactional scope around a series of operations.\"\"\"\n session = Session(self.__engine)\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n\n def add(self, obj):\n with self.session_scope() as session:\n if isinstance(obj, collections.Iterable):\n for item in obj:\n session.merge(item)\n else:\n session.merge(obj)\n session.commit()\n\n def get_all_tickers(self) -> list[Ticker]:\n with self.session_scope() as session:\n return session.query(Ticker).all()\n\n def get_ticker_mentions(self, \n start: datetime = None, \n end: datetime = None\n ):\n with self.session_scope() as session:\n return session.query(Body.ticker, func.count(Body.content)).group_by(Body.ticker).order_by(func.count(Body.content).desc())\n","sub_path":"wsbdiscordbot/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"426790747","text":"#!/usr/bin/env python3\n\ndef main():\n from ken2015nov.xlsxcsv import rows_to_csv, rows_to_xlsx,\\\n CsvToRows, XlsxToRows\n _prepare_a_csv()\n with CsvToRows(r'a.csv') as rows:\n rows_to_xlsx(r'a.xlsx', rows)\n\n with CsvToRows(r'a.csv') as rows:\n rows_to_csv(r'0.csv', rows)\n with CsvToRows(r'a.csv') as rows:\n rows_to_xlsx(r'1.xlsx', rows)\n with XlsxToRows(r'a.xlsx') as rows:\n rows_to_csv(r'2.csv', rows)\n with XlsxToRows(r'a.xlsx') as rows:\n rows_to_xlsx(r'3.xlsx', rows)\n\n_a_csv = r'''\na,b\n1,2\n3,4\n'''[1:]\n\ndef _prepare_a_csv():\n from pathlib import Path\n with Path(__file__).with_name(r'a.csv').open(r'wt') as ostream:\n ostream.write(_a_csv)\n\nif __name__ == r'__main__':\n main()\n","sub_path":"xlsxcsvdemo.py","file_name":"xlsxcsvdemo.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"503731084","text":"# import sys\n# reload(sys)\n# sys.setdefaultencoding('utf8')\nimport numpy as np\nimport matplotlib.pyplot as plt \n\ndef f1 (x):\n\treturn (-0.2*x**4+x**2 -x)\ndef f1p(x):\n\treturn(-0.8*x**3+2*x-1) \ndef f1pp(x):\n\treturn(-0.24*x**2+2)\n\ndef f2(x):\n\treturn(1-np.exp(-x**2) )\ndef f2p(x):\n\treturn(-2*x*np.exp(-x**2) )\ndef f2pp(x):\n\treturn(np.exp(-x**2)*(-2+4*x**2) )\n\ndef f3(x):\n\treturn( (x**2+0.3)**(1./2) )\ndef f3p(x):\n\treturn( x/(x**2+0.3)**(1./2) )\ndef f3pp(x):\n\treturn(3/(x**2+0.3)**(3./2) ) \n\ndef newtonMethod(x0, f, fp, fpp): \n\ttolerance = 10**(-7)\n\teps = 10**(-14) \n\tp = []\n\tres = []\n\n\tmaxIt = 10\n\thaveSol = False \n\n\tfor i in range(maxIt): \n\t\ty = f(x0)\n\t\typ = fp(x0) \n\t\typp = fpp(x0) \n\t\tprint (i, x0, y, \"###\", yp, ypp)\n\t\tif (abs(ypp) < eps):\n\t\t\tprint (\"denominator too small\" )\n\t\t\tbreak\n\t\tx1 = x0 - yp/ypp \n\t\tp.append(x0) \n\t\tres.append(y) \n\t\tif (abs(x1-x0) <= tolerance * abs(x1)):\n\t\t\thaveSol = True\n\t\t\tprint (\"Found Solution\")\n\t\t\tbreak\n\t\tx0 = x1\n\t\t# print i, x0, y\n\t# print x1-x0, f(x1), f(x0) \n\treturn res, p \n\n# newtonMethod(-1, f1,f1p, f1pp) \n# print \"-----\"\n# newtonMethod(-0.1, f2, f2p, f2pp) \n# newtonMethod(-2.5, f2, f2p, f2pp) \n# print \"-----\"\n# newtonMethod(-2,f3,f3p,f3pp) \n\nprint (\"---------\")\nres, p = newtonMethod(-1,f1,f1p, f1pp) \ny = [f1(x) for x in np.linspace(-1,1,len(res))]\nres = [x for x in res] \nx =np.linspace(-1,1,len(res))\n# print len(x), len(y)\n# print len(p), len(res) \nplt.plot(x,y)\nplt.hold(True) \nplt.scatter(p, res)\nplt.axis([-1,1,-2,2])\nplt.title('Newton search, f(x) = -0.2x^4+x^2-x')\n\np = ['%.2f' %elem for elem in p]\nres = ['%.2f' %elem for elem in res] \ntabl = plt.table(cellText = [p,res], \n\tloc = 'top',\n\tcolWidths = [0.1]*len(res), colLoc = 'bottom', \n\trowLabels = ['p', 'f(p)'], rowLoc = 'left',\n\tbbox = [0,-0.2,1,0.1] )\ntabl.auto_set_font_size(False) \ntabl.set_fontsize(12)\ntabl.scale(1, 1)\nplt.hold(False) \n# plt.savefig('f1newt')\nprint(\"----------\")\n\nplt.figure() \nprint (\"---------\")\nres, p = newtonMethod(-2.5,f2,f2p,f2pp) \ny = [f2(x) for x in np.linspace(-3,3,len(res))]\nres = [x for x in res] \nx =np.linspace(-3,3,len(res))\n# print y\n# print len(x), len(y)\n# print len(p), len(res) \nplt.plot(x,y)\nplt.hold(True) \nplt.scatter(p, res)\nplt.axis([-3,3,-2,2])\nplt.title('Newton search, 1-exp(-x^2)') \np = ['%.2f' %elem for elem in p]\nres = ['%.2f' %elem for elem in res] \ntabl = plt.table(cellText = [p,res], \n\tloc = 'top',\n\tcolWidths = [0.1]*len(res), colLoc = 'center', \n\trowLabels = ['p', 'f(p)'], rowLoc = 'left',\n\tbbox = [0,-0.2,1,0.1] )\ntabl.auto_set_font_size(False) \ntabl.set_fontsize(12)\ntabl.scale(1, 1)\nplt.hold(False) \nprint(\"----------\")\n\nplt.figure() \nprint (\"---------\")\nres, p = newtonMethod(-2,f3,f3p,f3pp) \ny = [f3(x) for x in np.linspace(-1,1,len(res))]\nres = [x for x in res] \nx =np.linspace(-1,1,len(res))\n# print y\n# print len(x), len(y)\n# print len(p), len(res) \nplt.plot(x,y)\nplt.hold(True) \nplt.scatter(p, res)\nplt.title('Newton search, (x^2+3)^(1/2)')\nplt.axis([-1,1,0,3])\np = ['%.2f' %elem for elem in p]\nres = ['%.2f' %elem for elem in res] \ntabl = plt.table(cellText = [p,res], \n\tloc = 'top',\n\tcolWidths = [0.1]*len(res), colLoc = 'center', \n\trowLabels = ['p', 'f(p)'], rowLoc = 'left',\n\tbbox = [0,-0.2,1,0.1] )\ntabl.auto_set_font_size(False) \ntabl.set_fontsize(12)\ntabl.scale(1, 1)\nplt.hold(False) \nplt.show(block = False) \nplt.show() \nprint(\"----------\")","sub_path":"4th_sem_grad/OPTIMIZATION/hwk1/newt.py","file_name":"newt.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"80852717","text":"from logging import getLogger\n\nfrom django_eth.constants import NULL_ADDRESS\nfrom ethereum.utils import (check_checksum, checksum_encode, ecrecover_to_pub,\n privtoaddr, sha3)\nfrom functools import wraps\nfrom hexbytes import HexBytes\nfrom typing import Dict, Union\nfrom web3 import HTTPProvider, Web3\nfrom web3.middleware import geth_poa_middleware\nfrom web3.utils.threads import Timeout\n\nfrom .contracts import get_erc20_contract\n\nlogger = getLogger(__name__)\n\n\nclass TransactionAlreadyImported(ValueError):\n pass\n\n\nclass ReplacementTransactionUnderpriced(ValueError):\n pass\n\n\nclass FromAddressNotFound(ValueError):\n pass\n\n\nclass InvalidNonce(ValueError):\n pass\n\n\nclass InsufficientFunds(ValueError):\n pass\n\n\ndef tx_with_exception_handling(func):\n error_with_exception: Dict[str, Exception] = {\n 'Transaction with the same hash was already imported': TransactionAlreadyImported,\n 'replacement transaction underpriced': ReplacementTransactionUnderpriced,\n 'from not found': FromAddressNotFound,\n 'correct nonce': InvalidNonce,\n 'insufficient funds': InsufficientFunds,\n \"doesn't have enough funds\": InsufficientFunds,\n }\n\n @wraps(func)\n def with_exception_handling(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except ValueError as exc:\n str_exc = str(exc).lower()\n for reason, custom_exception in error_with_exception.items():\n if reason.lower() in str_exc:\n raise custom_exception(str(exc)) from exc\n raise exc\n return with_exception_handling\n\n\nclass EthereumServiceProvider:\n def __new__(cls):\n if not hasattr(cls, 'instance'):\n from django.conf import settings\n cls.instance = EthereumService(settings.ETHEREUM_NODE_URL,\n settings.SAFE_FUNDER_MAX_ETH,\n settings.SAFE_FUNDER_PRIVATE_KEY)\n return cls.instance\n\n\nclass EthereumService:\n NULL_ADDRESS = NULL_ADDRESS\n\n def __init__(self, ethereum_node_url, max_eth_to_send=0.1, funder_private_key=None):\n self.ethereum_node_url = ethereum_node_url\n self.max_eth_to_send = max_eth_to_send\n self.funder_private_key = funder_private_key\n self.w3 = Web3(HTTPProvider(self.ethereum_node_url))\n try:\n if self.w3.net.chainId != 1:\n self.w3.middleware_stack.inject(geth_poa_middleware, layer=0)\n # For tests using dummy connections (like IPC)\n except (ConnectionError, FileNotFoundError):\n self.w3.middleware_stack.inject(geth_poa_middleware, layer=0)\n\n def get_nonce_for_account(self, address, block_identifier=None):\n return self.w3.eth.getTransactionCount(address, block_identifier=block_identifier)\n\n @property\n def current_block_number(self):\n return self.w3.eth.blockNumber\n\n @staticmethod\n def estimate_data_gas(data: bytes):\n if isinstance(data, str):\n data = HexBytes(data)\n\n gas = 0\n for byte in data:\n if not byte:\n gas += 4 # Byte 0 -> 4 Gas\n else:\n gas += 68 # Any other byte -> 68 Gas\n return gas\n\n def get_balance(self, address: str, block_identifier=None):\n return self.w3.eth.getBalance(address, block_identifier)\n\n def get_erc20_balance(self, address: str, erc20_address: str):\n return get_erc20_contract(self.w3, erc20_address).functions.balanceOf(address).call()\n\n def get_transaction(self, tx_hash):\n return self.w3.eth.getTransaction(tx_hash)\n\n def get_transaction_receipt(self, tx_hash, timeout=None):\n if not timeout:\n return self.w3.eth.getTransactionReceipt(tx_hash)\n else:\n try:\n tx_receipt = self.w3.eth.waitForTransactionReceipt(tx_hash, timeout=timeout)\n # Parity returns tx_receipt even is tx is still pending, so we check `blockNumber` is not None\n return None if tx_receipt['blockNumber'] is None else tx_receipt\n except Timeout:\n return None\n\n def get_block(self, block_number, full_transactions=False):\n return self.w3.eth.getBlock(block_number, full_transactions=full_transactions)\n\n @tx_with_exception_handling\n def send_transaction(self, transaction_dict: Dict[str, any]) -> bytes:\n return self.w3.eth.sendTransaction(transaction_dict)\n\n @tx_with_exception_handling\n def send_raw_transaction(self, raw_transaction) -> bytes:\n return self.w3.eth.sendRawTransaction(bytes(raw_transaction))\n\n def send_unsigned_transaction(self, tx: Dict[str, any], private_key: Union[None, str]=None,\n public_key: Union[None, str]=None, retry: bool=False,\n block_identifier: Union[None, str]=None) -> bytes:\n \"\"\"\n Send a tx using an unlocked public key in the node or a private key. Both `public_key` and\n `private_key` cannot be `None`\n :param tx:\n :param private_key:\n :param public_key:\n :param retry: Retry if a problem with nonce is found\n :param block_identifier:\n :return:\n \"\"\"\n if private_key:\n address = self.private_key_to_address(private_key)\n elif public_key:\n address = public_key\n else:\n logger.error('No ethereum account provided. Need a public_key or private_key')\n raise ValueError(\"Ethereum account was not configured or unlocked in the node\")\n\n nonce = tx.get('nonce')\n if nonce is None:\n nonce = self.get_nonce_for_account(address, block_identifier=block_identifier)\n tx['nonce'] = nonce\n\n number_errors = 0\n while number_errors != 5: # Retry if a problem with a nonce arises\n try:\n if private_key:\n signed_tx = self.w3.eth.account.signTransaction(tx, private_key=private_key)\n logger.debug('Sending %d wei from %s to %s', tx['value'], address, tx['to'])\n return self.w3.eth.sendRawTransaction(signed_tx.rawTransaction)\n elif public_key:\n tx['from'] = public_key\n if 'nonce' not in tx:\n tx['nonce'] = self.get_nonce_for_account(public_key, block_identifier=block_identifier)\n return self.send_transaction(tx)\n except ValueError as e:\n str_e = str(e).lower()\n if retry and 'replacement transaction underpriced' in str_e:\n logger.error('Tx with same nonce was already sent, retrying with nonce + 1')\n tx['nonce'] += 1\n elif retry and \"the tx doesn't have the correct nonce\" in str_e:\n logger.error('Tx does not have the correct nonce, retrying recovering nonce again')\n tx['nonce'] = self.get_nonce_for_account(address, block_identifier='latest')\n number_errors += 1\n else:\n raise e\n\n def send_eth_to(self, to: str, gas_price: int, value: int, gas: int=22000, retry: bool=False,\n block_identifier=None) -> bytes:\n \"\"\"\n Send ether using configured account\n :param to: to\n :param gas_price: gas_price\n :param value: value(wei)\n :param gas: gas, defaults to 22000\n :param retry: Retry if a problem is found\n :param block_identifier: None default, 'pending' not confirmed txs\n :return: tx_hash\n \"\"\"\n\n assert check_checksum(to)\n assert value < self.w3.toWei(self.max_eth_to_send, 'ether')\n\n private_key = None\n public_key = None\n\n if self.funder_private_key:\n private_key = self.funder_private_key\n elif self.w3.eth.accounts:\n public_key = self.w3.eth.accounts[0]\n else:\n logger.error('No ethereum account configured')\n raise ValueError(\"Ethereum account was not configured or unlocked in the node\")\n\n tx = {\n 'to': to,\n 'value': value,\n 'gas': gas,\n 'gasPrice': gas_price,\n }\n\n return self.send_unsigned_transaction(tx, private_key=private_key, public_key=public_key,\n retry=retry, block_identifier=block_identifier)\n\n def check_tx_with_confirmations(self, tx_hash: str, confirmations: int) -> bool:\n \"\"\"\n Check tx hash and make sure it has the confirmations required\n :param w3: Web3 instance\n :param tx_hash: Hash of the tx\n :param confirmations: Minimum number of confirmations required\n :return: True if tx was mined with the number of confirmations required, False otherwise\n \"\"\"\n tx_receipt = self.w3.eth.getTransactionReceipt(tx_hash)\n if not tx_receipt or tx_receipt['blockNumber'] is None:\n # If tx_receipt exists but blockNumber is None, tx is still pending (just Parity)\n return False\n else:\n return (self.w3.eth.blockNumber - tx_receipt['blockNumber']) >= confirmations\n\n @staticmethod\n def private_key_to_address(private_key):\n return checksum_encode(privtoaddr(private_key))\n\n @staticmethod\n def get_signing_address(hash, v, r, s) -> str:\n \"\"\"\n :return: checksum encoded address starting by 0x, for example `0x568c93675A8dEb121700A6FAdDdfE7DFAb66Ae4A`\n :rtype: str\n \"\"\"\n encoded_64_address = ecrecover_to_pub(hash, v, r, s)\n address_bytes = sha3(encoded_64_address)[-20:]\n return checksum_encode(address_bytes)\n","sub_path":"gnosis/safe/ethereum_service.py","file_name":"ethereum_service.py","file_ext":"py","file_size_in_byte":9802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"34689344","text":"#-*-coding:utf-8-*-\nimport tkinter\nimport tkinter.messagebox\n\nroot = tkinter.Tk()\nroot.title(\"XO棋\")\n\n#保存棋盘现状\nchessboard=['','','','','','','','','']\n#胜局数组8种情况\nvictorychesskeep=[[0,1,2],[3,4,5],[6,7,8],[0,3,6],[1,4,7],[2,5,8],[0,4,8],[2,4,6]]\nisgameover = True\n\n \n#回调函数\ndef updatebttx_1():\n button_1['text']='O'\n button_1['state']='disabled'#点了就冻结\n chessboard[0]='O'\n if judgemodulcase.judge()== False:#O走完判断\n computerstepcase.computerchess()\n judgemodulcase.judge()#X走完判断\n \ndef updatebttx_2():\n button_2['text']='O'\n button_2['state']='disabled'\n chessboard[1]='O'\n if judgemodulcase.judge()== False:\n computerstepcase.computerchess()\n judgemodulcase.judge()\n\ndef updatebttx_3():\n button_3['text']='O'\n button_3['state']='disabled'\n chessboard[2]='O'\n if judgemodulcase.judge()== False:\n computerstepcase.computerchess()\n judgemodulcase.judge()\n\ndef updatebttx_4():\n button_4['text']='O'\n button_4['state']='disabled'\n chessboard[3]='O'\n if judgemodulcase.judge()== False:\n computerstepcase.computerchess()\n judgemodulcase.judge()\n\ndef updatebttx_5():\n button_5['text']='O'\n button_5['state']='disabled'\n chessboard[4]='O'\n if judgemodulcase.judge()== False:\n computerstepcase.computerchess()\n judgemodulcase.judge()\n\ndef updatebttx_6():\n button_6['text']='O'\n button_6['state']='disabled'\n chessboard[5]='O'\n if judgemodulcase.judge()== False:\n computerstepcase.computerchess()\n judgemodulcase.judge()\n\ndef updatebttx_7():\n button_7['text']='O'\n button_7['state']='disabled'\n chessboard[6]='O'\n if judgemodulcase.judge()== False:\n computerstepcase.computerchess()\n judgemodulcase.judge()\n\ndef updatebttx_8():\n button_8['text']='O'\n button_8['state']='disabled'\n chessboard[7]='O'\n if judgemodulcase.judge()== False:\n computerstepcase.computerchess()\n judgemodulcase.judge()\n\ndef updatebttx_9():\n button_9['text']='O'\n button_9['state']='disabled'\n chessboard[8]='O'\n if judgemodulcase.judge()== False:\n computerstepcase.computerchess()\n judgemodulcase.judge()\n \ndef chessboardfreeze():#棋盘冻结\n button_1['state']='disabled'\n button_2['state']='disabled'\n button_3['state']='disabled'\n button_4['state']='disabled'\n button_5['state']='disabled'\n button_6['state']='disabled'\n button_7['state']='disabled'\n button_8['state']='disabled'\n button_9['state']='disabled' \ndef unchessboardfreeze():#棋盘冻结\n button_1['state']='normal'\n button_2['state']='normal'\n button_3['state']='normal'\n button_4['state']='normal'\n button_5['state']='normal'\n button_6['state']='normal'\n button_7['state']='normal'\n button_8['state']='normal'\n button_9['state']='normal'\n\n#棋盘布局\nbutton_1 = tkinter.Button(root,width = 13,height=5,cursor = \"hand2\")\nbutton_1['text']=''\n#button_1.bind('',updatebttx_1)\nbutton_1[\"command\"] = updatebttx_1\nbutton_1.grid(row=0,column = 0)\n\nbutton_2 = tkinter.Button(root,width = 13,height=5,cursor = \"hand2\")\nbutton_2['text']=''\nbutton_2[\"command\"] = updatebttx_2\nbutton_2.grid(row=0,column = 1)\n\nbutton_3 = tkinter.Button(root,width = 13,height=5,cursor = \"hand2\")\nbutton_3['text']=''\nbutton_3[\"command\"] =updatebttx_3\nbutton_3.grid(row=0,column = 2)\n\nbutton_4 = tkinter.Button(root,width = 13,height=5,cursor = \"hand2\")\nbutton_4['text']=''\nbutton_4[\"command\"] =updatebttx_4\nbutton_4.grid(row=1,column = 0)\n\nbutton_5 = tkinter.Button(root,width = 13,height=5,cursor = \"hand2\")\nbutton_5['text']=''\nbutton_5[\"command\"] = updatebttx_5\nbutton_5.grid(row=1,column = 1)\n\nbutton_6 = tkinter.Button(root,width = 13,height=5,cursor = \"hand2\")\nbutton_6['text']=''\nbutton_6[\"command\"] = updatebttx_6\nbutton_6.grid(row=1,column = 2)\n\nbutton_7 = tkinter.Button(root,width = 13,height=5,cursor = \"hand2\")\nbutton_7['text']=''\nbutton_7[\"command\"] = updatebttx_7\nbutton_7.grid(row=2,column = 0)\n\nbutton_8 = tkinter.Button(root,width = 13,height=5,cursor = \"hand2\")\nbutton_8['text']=''\nbutton_8[\"command\"] = updatebttx_8\nbutton_8.grid(row=2,column = 1)\n\nbutton_9 = tkinter.Button(root,width = 13,height=5,cursor = \"hand2\")\nbutton_9['text']=''\nbutton_9[\"command\"] = updatebttx_9\nbutton_9.grid(row=2,column = 2)\n\n \nclass Judgemodul:#判断胜负类\n def judge(self):\n ishasend = False\n eval = chessthink.evaluatefunc()\n if eval == 1000 or eval == -1000 or eval ==0:\n ishasend = True\n chessboardfreeze()\n if(eval == 1000):\n tkinter.messagebox.askokcancel(\"结果\",\"电脑赢了\")\n if(eval == -1000):\n tkinter.messagebox.askokcancel(\"结果\",\"你赢了\")\n if(eval == 0):\n tkinter.messagebox.askokcancel(\"结果\",\"平局\")\n return ishasend \n\n\nclass Thinkfunc:#AI走法类\n \n def evaluatefunc(self):#评估函数\n end = 1#结果初始化 0:平局,1:继续,1000:赢......\n isfull = True#棋盘是否已满\n Xcx = 0#X的2连子计量\n Ocx = 0\n for ech in chessboard:\n if ech == '':\n isfull = False\n break \n for x in range(0,len(victorychesskeep)):\n #3连子\n if chessboard[victorychesskeep[x][0]] == chessboard[victorychesskeep[x][1]] ==chessboard[victorychesskeep[x][2]]:\n if chessboard[victorychesskeep[x][0]] != '':\n if chessboard[victorychesskeep[x][0]] == 'X':\n end =1000\n return end\n elif chessboard[victorychesskeep[x][0]] == 'O':\n end = -1000\n return end\n elif end != 1000 or end != -1000:#没赢没输\n if isfull == True:#棋盘满了\n end = 0#平局\n return end\n elif isfull == False:#没赢没输没满,判断2连子\n if chessboard[victorychesskeep[x][0]]==chessboard[victorychesskeep[x][1]] or chessboard[victorychesskeep[x][2]]==chessboard[victorychesskeep[x][1]]: \n if chessboard[victorychesskeep[x][1]] != '':\n if chessboard[victorychesskeep[x][1]] == 'X':\n Xcx = Xcx +1\n elif chessboard[victorychesskeep[x][1]] =='O':\n Ocx = Ocx +1\n \n end = Xcx*50 - Ocx*50#2连子差\n if end == 0:#不是平局的0,则继续\n end = 1\n return end \n \n \n def recurmin(self,searchdeep,alpha,beta):\n isgameover = False\n bettervalue = 1000\n value = self.evaluatefunc()\n if value == 1000 or value == -1000 or value == 0:\n isgameover = True\n if(alpha >= beta):#对min最有利>=对max最有利,β剪枝\n return value\n if(searchdeep == 0 or isgameover == True):\n return value\n for ech in range(0,len(chessboard)):\n if chessboard[ech] == '':\n chessboard[ech]= 'O'\n recordvalue = self.recurmax(searchdeep-1,alpha,min(bettervalue,beta))\n bettervalue = min(recordvalue,bettervalue)\n chessboard[ech] =''\n return bettervalue\n \n def recurmax(self,searchdeep,alpha,beta):\n isgameover = False\n bettervalue = -1000\n value = self.evaluatefunc()\n if value ==1000 or value == -1000 or value == 0:\n isgameover = True\n if(beta <= alpha):#对max最有利<=对min最有利,α剪枝\n return value\n if(searchdeep ==0 or isgameover == True):\n return value\n for ech in range(0,len(chessboard)):\n if chessboard[ech] == '':\n chessboard[ech] = 'X'\n recordvalue = self.recurmin(searchdeep-1,max(alpha,bettervalue),beta)\n bettervalue = max(recordvalue,bettervalue)\n chessboard[ech] =''\n return bettervalue\n\n \n def minMax(self,searchdeep):\n keepindex=[]\n bettervalue = -1000\n for ech in range(0,len(chessboard)):\n if chessboard[ech] == '':\n chessboard[ech] = 'X'#假设\n recordvalue = self.recurmin(searchdeep,-1000,1000)\n if(recordvalue >= bettervalue):#最大值\n bettervalue = recordvalue\n betterposition = ech\n chessboard[ech] = ''#还原\n return betterposition\n\n\n\nclass Computerstep:#电脑走棋类\n def computerchess(self):\n nextsp = chessthink.minMax(5)\n if nextsp == 0:\n button_1[\"text\"]='X'\n button_1['state']='disabled'#点了就冻结\n chessboard[0]='X'\n elif nextsp ==1:\n button_2[\"text\"]='X'\n button_2['state']='disabled'\n chessboard[1]='X'\n elif nextsp ==2:\n button_3[\"text\"]='X'\n button_3['state']='disabled'\n chessboard[2]='X'\n elif nextsp ==3:\n button_4[\"text\"]='X'\n button_4['state']='disabled'\n chessboard[3]='X'\n elif nextsp ==4:\n button_5[\"text\"]='X'\n button_5['state']='disabled'\n chessboard[4]='X'\n elif nextsp ==5:\n button_6[\"text\"]='X'\n button_6['state']='disabled'\n chessboard[5]='X'\n elif nextsp ==6:\n button_7[\"text\"]='X'\n button_7['state']='disabled'\n chessboard[6]='X'\n elif nextsp ==7:\n button_8[\"text\"]='X'\n button_8['state']='disabled'\n chessboard[7]='X'\n elif nextsp ==8:\n button_9[\"text\"]='X'\n button_9['state']='disabled'\n chessboard[8]='X'\n\ndef main():\n unchessboardfreeze()#解冻 \n computerstepcase.computerchess()#第一步\n chessboard=['','','','','','','','','']\n victorychesskeep=[[0,1,2],[3,4,5],[6,7,8],[0,3,6],[1,4,7],[2,5,8],[0,4,8],[2,4,6]]\n isgameover = True\n \nchessthink = Thinkfunc()\ncomputerstepcase = Computerstep()\njudgemodulcase = Judgemodul()\nmain()\nroot.resizable(width='false',height='false')#不改变窗体大小\nroot.mainloop()\n","sub_path":"PythonScript/XO祺.py","file_name":"XO祺.py","file_ext":"py","file_size_in_byte":12547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"234215852","text":"# Copyright (c) 2018 Anki, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License in the file LICENSE.txt or at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Support for Vector's camera.\n\nVector has a built-in camera which he uses to observe the world around him.\n\nThe :class:`CameraComponent` class defined in this module is made available as\n:attr:`anki_vector.robot.Robot.camera` and can be used to enable/disable image\nsending and observe images being sent by the robot.\n\nThe camera resolution is 1280 x 720 with a field of view of 90 deg (H) x 50 deg (V).\n\"\"\"\n\n# __all__ should order by constants, event classes, other classes, functions.\n__all__ = ['CameraComponent']\n\nimport asyncio\nfrom concurrent.futures import CancelledError\nimport sys\n\ntry:\n import cv2\nexcept ImportError as exc:\n sys.exit(\"Cannot import opencv-python: Do `pip3 install opencv-python` to install\")\n\nfrom . import util\nfrom .messaging import protocol\n\ntry:\n import numpy as np\nexcept ImportError as exc:\n sys.exit(\"Cannot import numpy: Do `pip3 install numpy` to install\")\n\ntry:\n from PIL import Image\nexcept ImportError:\n sys.exit(\"Cannot import from PIL: Do `pip3 install --user Pillow` to install\")\n\n\nclass CameraComponent(util.Component):\n \"\"\"Represents Vector's camera.\n\n The CameraComponent object receives images from Vector's camera, unpacks the data,\n composes it and makes it available as latest_image.\n\n The :class:`anki_vector.robot.Robot` or :class:`anki_vector.robot.AsyncRobot` instance observes the camera.\n\n .. testcode::\n\n import anki_vector\n import time\n\n with anki_vector.Robot(enable_camera_feed=True) as robot:\n time.sleep(1)\n image = robot.camera.latest_image\n image.show()\n\n :param robot: A reference to the owner Robot object.\n \"\"\"\n\n def __init__(self, robot):\n super().__init__(robot)\n\n self._latest_image: Image.Image = None\n self._latest_image_id: int = None\n self._camera_feed_task: asyncio.Task = None\n\n @property\n def latest_image(self) -> Image.Image:\n \"\"\":class:`Image.Image`: The most recently processed image received from the robot.\n\n :getter: Returns the Pillow Image representing the latest image\n\n .. testcode::\n\n import anki_vector\n import time\n\n with anki_vector.Robot(enable_camera_feed=True) as robot:\n time.sleep(1)\n image = robot.camera.latest_image\n image.show()\n \"\"\"\n\n return self._latest_image\n\n @property\n def latest_image_id(self) -> int:\n \"\"\"The most recently processed image's id received from the robot.\n\n Used only to track chunks of the same image.\n\n :getter: Returns the id for the latest image\n\n .. testcode::\n\n import anki_vector\n import time\n\n with anki_vector.Robot(enable_camera_feed=True) as robot:\n time.sleep(1)\n image = robot.camera.latest_image\n image.show()\n print(f\"latest_image_id: {robot.camera.latest_image_id}\")\n \"\"\"\n return self._latest_image_id\n\n def init_camera_feed(self) -> None:\n \"\"\"Begin camera feed task.\"\"\"\n if not self._camera_feed_task or self._camera_feed_task.done():\n self._camera_feed_task = self.conn.loop.create_task(self._request_and_handle_images())\n\n def close_camera_feed(self) -> None:\n \"\"\"Cancel camera feed task.\"\"\"\n if self._camera_feed_task:\n self._camera_feed_task.cancel()\n future = self.conn.run_coroutine(self._camera_feed_task)\n future.result()\n\n def _unpack_image(self, msg: protocol.CameraFeedResponse) -> None:\n \"\"\"Processes raw data from the robot into a more more useful image structure.\"\"\"\n size = len(msg.data)\n\n # Constuct numpy array out of source data\n array = np.empty(size, dtype=np.uint8)\n array[0:size] = list(msg.data)\n\n # Decode compressed source data into uncompressed image data\n imageArray = cv2.imdecode(array, -1)\n imageArray = cv2.cvtColor(imageArray, cv2.COLOR_BGR2RGB)\n\n # Convert to Pillow Image\n self._latest_image = Image.fromarray(imageArray)\n self._latest_image_id = msg.image_id\n self.robot.viewer.enqueue_frame(self._latest_image)\n\n async def _request_and_handle_images(self) -> None:\n \"\"\"Queries and listens for camera feed events from the robot.\n Received events are parsed by a helper function.\"\"\"\n try:\n req = protocol.CameraFeedRequest()\n async for evt in self.grpc_interface.CameraFeed(req):\n # If the camera feed is disabled after stream is setup, exit the stream\n # (the camera feed on the robot is disabled internally on stream exit)\n if not self.robot.enable_camera_feed:\n self.logger.warning('Camera feed has been disabled. Enable the feed to start/continue receiving camera feed data')\n return\n self._unpack_image(evt)\n except CancelledError:\n self.logger.debug('Camera feed task was cancelled. This is expected during disconnection.')\n","sub_path":"anki_vector/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":5722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"496859301","text":"# -*- coding:utf-8 -*-\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\nimport os\nmain_dir = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.dirname(main_dir))\n\nimport argparse\nfrom org.org import Org\nfrom user.user import User\nfrom function.function import IndustryAndFunction\nfrom db.mongoExe import MongoExe\nfrom main import get_manual_match_dict, cleanYXT\n\nENV = 'line'\nUSER_TABLE = 'user'\nmongoConnections = MongoExe()\n\n\ndef update_user_info(orgid):\n # create obj\n org_obj = Org(ENV)\n user_obj = User(ENV, 'elearning.CORE_USERPROFILE', USER_TABLE)\n mongo_conn_alias, mongoConnection = mongoConnections.getConnection()\n\n function_obj = IndustryAndFunction(\n org_obj.get_single_org_info_from_slave(orgid),\n user_obj.get_user_list_from_master(orgid),\n get_manual_match_dict(mongoConnection, orgid))\n\n # set detail\n user_list = function_obj.set_region().set_industry().set_function().user_list\n user_obj.delete_records(orgid)\n user_obj.insert_user_list_into_slave(user_list)\n\n # close all connections\n user_obj.close()\n org_obj.close()\n mongoConnections.close()\n\n cleanYXT()\n\n\ndef config_argparser():\n parser = argparse.ArgumentParser(description='set the orgId')\n parser.add_argument(\n '--orgid',\n required=True,\n dest='orgid',\n action='store',\n help='set the orgid you want to update users')\n return parser.parse_args()\n\nif __name__ == '__main__':\n args = config_argparser()\n update_user_info(args.orgid)\n # update_user_info('78558c63-22fe-4e84-88f2-f5bb4df64855')\n","sub_path":"skyeye/skyeyeops_backend/jobs/UserprofileCleaning/main_change_info.py","file_name":"main_change_info.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"518263689","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\n\nimport numpy as np\n\nfrom mecab_analyzer import MecabAnalyzer, Morpheme\nfrom shared.datetime_extentions import *\nfrom shared.decorators import trace\n\n\nclass PnDictScorer(MecabAnalyzer):\n \"\"\"\n 日本語評価極性辞書を用いてネガポジ判定を行うクラス\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.PN_DICT = self._init_pn_dict()\n\n @trace()\n def update_negaposi(self, start_datetime: datetime, end_datetime: datetime):\n \"\"\"\n ネガポジスコアを算出し、MongoDBにセットする。\n リツィート/スパムは対象外。\n :param start_datetime: 検索開始時刻\n :param end_datetime: 検索終了時刻\n :return: なし\n \"\"\"\n self.log.info(\"ネガポジスコアの算出開始\")\n # リツィート/スパムは除外、過去14日分を対象\n\n search_condition = {'retweeted_status': {'$eq': None}, 'spam': {'$eq': None},\n 'created_datetime': {'$gte': start_datetime, '$lte': end_datetime}}\n\n # ネガポジスコアを算出し��DBにセットする\n score_list = []\n for tweet in self.tweets.find(search_condition, {'id': 1, 'text': 1}):\n score = self._calc_negaposi_score(tweet[\"text\"])\n score_list.append(score)\n self.tweets.update({'_id': tweet['_id']}, {'$set': {'polarity': score}})\n # print(\"text: {}\".format(tweet[\"text\"]))\n # print(\"score: {}\".format(score))\n ave = np.array(score_list).mean()\n print(\"ネガポジスコアの平均値は、{}でした。\".format(ave))\n self.log.info(\"ネガポジスコアの算出完了\")\n\n def _calc_negaposi_score(self, text: str) -> float:\n \"\"\"\n 単語感情極性対応表を使って極性値の平均を算出する。\n :param text:文字列\n :return:平均極性値(-1〜1)\n \"\"\"\n polarity_list = []\n # 1文ごとに分割し、httpで始まるものは除外したうえで処理を行う。\n sentences = [s for s in re.split(r'\\s+|。|.|?', text) if len(s) > 0 and not s.startswith(\"http\")]\n for sentence in sentences:\n m_list = self._sentence2morpheme_list(sentence=sentence)\n\n for idx, m in enumerate(m_list):\n prev = m_list[idx - 1]\n # 動詞・形容詞直後に強い肯定or否定が続く場合、極性値を0にして調整する。\n # おそらくCaboChaを使って係り受け解析をするのがよりよい方法と思われる。\n if prev and (abs(m.polarity) > -0.9) and (prev.part_of_speech in (\"動詞\", \"形容詞\")):\n prev.pn_polarity = 0.0\n\n # 極性値がニュートラルな(どちらでもない)ものは除外する。\n # 現状、ネガティブに偏ってしまっているので適当に調整している。\n polarity_list.extend([m.polarity for m in m_list\n if (m.part_of_speech in (\"形容詞\", \"動詞\", \"助詞\")) and\n (m.polarity < -0.5 or m.polarity > 0.0)])\n \"\"\"\n polarity_list.extend([m.polarity for m in m_list\n if (abs(m.polarity) > 0.8 and m.part_of_speech != \"名詞\") or (abs(m.polarity) > 0.5)])\n \"\"\"\n\n # 極性の平均値を算出する。\n if len(polarity_list) > 0:\n return np.array(polarity_list).mean()\n else:\n return 0.0\n\n def _sentence2morpheme_list(self, sentence: str) -> list:\n \"\"\"\n 文をMecabで分割し、形態素データのリストに変換する。\n :param sentence:文\n :return:文から生成したMorphemeオブジェクトのリスト\n \"\"\"\n morpheme_list = []\n self.tagger.parse(\"\")\n node = self.tagger.parseToNode(sentence)\n\n while node:\n ft = node.feature.split(\",\")\n word = node.surface\n original_form = ft[6]\n part_of_speech = ft[0]\n\n polarity = self._calc_polarity(word=word, original=original_form, part=part_of_speech)\n morpheme = Morpheme(word=word, part=part_of_speech, original=original_form, polarity=polarity)\n\n # 記号などは算出対象(リスト)に含めないようにする。\n if morpheme.part_of_speech != \"BOS/EOS\" and morpheme.part_of_speech != \"記号\":\n morpheme_list.append(morpheme)\n\n node = node.next\n\n return morpheme_list\n\n def _calc_polarity(self, word, original, part):\n \"\"\"\n :param word:\n :param original:\n :param part:\n :return: 極性値\n \"\"\"\n # http://www.lr.pi.titech.ac.jp/~takamura/pndic_en.html\n # ネガティブワードとして定義されているが、除外したいものは0.0を設定しておく。\n if (self.PN_DICT is None) or (word in (\"印刷\", \"プリント\", \"写真\", \"用紙\", \"コピー\", \"ネット\")):\n return 0.0\n elif original != \"*\":\n return self.PN_DICT.get((original, part), float(0))\n else:\n return self.PN_DICT.get((word, part), float(0))\n\n @staticmethod\n def _init_pn_dict() -> dict:\n \"\"\"\n 単語感情極性表を辞書形式に変換する。\n :return: dict key: タプル(単語, 品詞) value: 極性値\n \"\"\"\n pn_dict = {}\n with open(\"conf/pn_ja.dic\", \"r\", encoding=\"shift-jis\") as file:\n for idx, text in enumerate(file):\n l = text.strip().split(\":\")\n key = (l[0], l[2])\n val = float(l[3])\n pn_dict[key] = val\n return pn_dict\n","sub_path":"analyzer/pn_dict_scorer.py","file_name":"pn_dict_scorer.py","file_ext":"py","file_size_in_byte":5866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"600273546","text":"#!/usr/bin/env python3\r\n#\r\n\r\n\r\nfrom threading import Thread, Event\r\nimport threading\r\nimport time, random\r\n\r\n\r\ndef conn_mysql():\r\n count = 1\r\n while not event.is_set():\r\n if count >= 4:\r\n raise TimeoutError('time out')\r\n print(f'{threading.current_thread().getName()}第{count}次尝试连接')\r\n event.wait(1)\r\n count += 1\r\n print(f'\\n{threading.current_thread().getName()}连接成功\\n')\r\n\r\n\r\ndef check_mysql():\r\n print(f'\\033[45m{threading.current_thread().getName()}正在检查mysql\\033[0m')\r\n # check时间可能为1、2、3、4、5、6,其中1、2、3连接成功,4,5,6连接失败,成败概率各为 50%\r\n check_duration = random.randint(1, 6)\r\n print(f'sleep: {check_duration}')\r\n time.sleep(check_duration)\r\n event.set()\r\n\r\n\r\nif __name__ == '__main__':\r\n event = Event()\r\n conn1 = Thread(target=conn_mysql)\r\n conn2 = Thread(target=conn_mysql)\r\n check = Thread(target=check_mysql)\r\n\r\n conn1.start()\r\n conn2.start()\r\n check.start()\r\n","sub_path":"basic_/concurrent_/event_demo1.py","file_name":"event_demo1.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"190199373","text":"GOCARDLESS_ENVIRONMENT = 'sandbox'\nGOCARDLESS_APP_DOMAIN = 'api.andreas.cloock.be'\n\nPLIVO_WEBHOOK_URL = 'http://api.andreas.cloock.be/sms/plivo'\nTWILIO_WEBHOOK_URL = 'http://api.andreas.cloock.be/sms/twilio'\n\nMIDDLEWARE_CLASSES += (\n 'lunch.middleware.PrintExceptionMiddleware',\n 'qinspect.middleware.QueryInspectMiddleware',\n 'livereload.middleware.LiveReloadScript',\n)\n\nALLOWED_HOSTS = [\n 'lunchbreak.dev',\n 'www.lunchbreak.dev',\n 'api.lunchbreak.dev',\n\n 'lunchbreak.redirect',\n 'www.lunchbreak.redirect',\n 'api.lunchbreak.redirect',\n\n 'andreas.cloock.be',\n 'www.andreas.cloock.be',\n 'api.andreas.cloock.be',\n]\n\nREDIRECTED_HOSTS = {\n 'lunchbreak.redirect': 'www.lunchbreak.dev',\n 'www.lunchbreak.redirect': 'www.lunchbreak.dev',\n}\n\nINSTALLED_APPS = [\n 'livereload',\n 'django_extensions',\n] + INSTALLED_APPS\n\nSSL = False\nDEBUG = True\n\nQUERY_INSPECT_LOG_TRACEBACKS = False\n\n# Whether the Query Inspector should do anything (default: False)\nQUERY_INSPECT_ENABLED = True\n# Whether to log the stats via Django logging (default: True)\nQUERY_INSPECT_LOG_STATS = True\n# Whether to add stats headers (default: True)\nQUERY_INSPECT_HEADER_STATS = True\n# Whether to log duplicate queries (default: False)\nQUERY_INSPECT_LOG_QUERIES = False\n# Whether to log queries that are above an absolute limit (default: None - disabled)\nQUERY_INSPECT_ABSOLUTE_LIMIT = None # in milliseconds\n# Whether to log queries that are more than X standard deviations above\n# the mean query time (default: None - disabled)\nQUERY_INSPECT_STANDARD_DEVIATION_LIMIT = None\n# Whether to include tracebacks in the logs (default: False)\nQUERY_INSPECT_LOG_TRACEBACKS = False\n# Project root (a list of directories, see below - default empty)\nQUERY_INSPECT_TRACEBACK_ROOTS = ['/Users/Andreas/Development/Python/Lunchbreak-Backend']\n\nAPNS_HOST = 'gateway.sandbox.push.apple.com'\nAPNS_FEEDBACK_HOST = 'feedback.sandbox.push.apple.com'\nAMQP_USER = 'guest'\nAMQP_PASSWORD = 'guest'\nAMQP_HOST = '127.0.0.1'\n\nPAYCONIQ_WEBHOOK_DOMAIN = 'api.andreas.cloock.be'\n","sub_path":"lunchbreak/Lunchbreak/settings/branches/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"481026498","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport MISTY1_model_arbitrarily_sbox as mm\nimport os\nimport time\n\nif __name__ == \"__main__\":\n\n cd = dict()\n\n cd[\"cipher_name\"] = \"MISTY1_max_round_arbitrarily\"\n\n cd[\"cipher_size\"] = 64\n cd[\"branch_size\"] = 32\n cd[\"mode\"] = 1\n distinguish_find = True\n round_i = 4\n folder = cd[\"cipher_name\"] + \"_mode{}\".format(cd[\"mode\"])\n\n if not os.path.exists(folder):\n os.mkdir(folder)\n\n cd[\"record_file\"] = folder + \"////\" + cd[\"cipher_name\"] + \"_record_mode{}.txt\".format(cd[\"mode\"])\n cd[\"time_record\"] = folder + \"////\" + cd[\"cipher_name\"] + \"_time_record_mode{}.txt\".format(cd[\"mode\"])\n\n '''\n search_space = [[0, 43]]\n '''\n search_space = list()\n for i in range(0, 32):\n for j in range(32, 64):\n search_space.append([i, j])\n\n distinguish_find = True\n\n t1 = time.time()\n\n cd[\"solve_file\"] = folder + \"////\" + cd[\"cipher_name\"] + \"_round{}_model.stp\".format(round_i)\n search_count = 0\n distinguish_count = 0\n for sp in search_space:\n search_count += 1\n cd[\"b1\"] = [0 for i in range(0, 64)]\n cd[\"e1\"] = [0 for i in range(0, 64)]\n\n cd[\"b1\"][sp[0]] = 1\n cd[\"e1\"][sp[1]] = 1\n mode = [cd[\"mode\"], [0, round_i]]\n s_t1 = time.time()\n mm.model_build(cd, mode)\n flag = mm.solver(cd[\"solve_file\"])\n s_t2 = time.time()\n print(s_t2 - s_t1)\n if flag:\n distinguish_count += 1\n\n rf = open(cd[\"record_file\"], \"a\")\n rf.write(\"*\" * 20)\n rf.write(\"{} 4-round impossible distinguish found\\n\".format(distinguish_count))\n rf.write(\"when the values:\\n\")\n rf.write(\"b1 = {}\\n\".format(str(cd[\"b1\"])))\n rf.write(\"e1 = {}\\n\".format(str(cd[\"e1\"])))\n rf.close()\n distinguish_find = True\n print(\"testing: round = {}, time = {}, search_count = {}\".format(round_i, s_t2 - s_t1, search_count))\n t2 = time.time()\n tf = open(cd[\"time_record\"], \"a\")\n tf.write(\"After \" + str(t2 - t1) + \"time, we found total ({}) 4-round impossible differential.\\n\\n\".format(search_count))\n tf.close()\n","sub_path":"MISTY1/New Impossible Differentials/MISTY1_main_arbitrarity_sbox.py","file_name":"MISTY1_main_arbitrarity_sbox.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"592388247","text":"grid = [[0] * 1000 for _ in range(1000)]\n\nfor line in open('input.txt'):\n command, start, _, end = line.rsplit(' ', 3)\n start_x, start_y = [int(c) for c in start.split(',')]\n end_x, end_y = [int(c) for c in end.split(',')]\n\n for y in range(start_y, end_y+1):\n for x in range(start_x, end_x+1):\n if command == 'turn on':\n grid[y][x] = 1\n elif command == 'turn off':\n grid[y][x] = 0\n elif command == 'toggle':\n grid[y][x] = 0 if grid[y][x] else 1\n\nprint(sum(sum(row) for row in grid))\n","sub_path":"2015/day_6/lights.py","file_name":"lights.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"75309091","text":"from django.urls import path\n\nfrom .views import *\n\napp_name = 'blog'\n\nurlpatterns = [\n path('', PostList.as_view(), name='home'),\n path('', PostDetail.as_view(), name='post_detail'),\n path('accounts/signup/', signup_view, name='signup'),\n path('accounts/profile/', profile_view, name='profile'),\n path('accounts/login/', login_view, name='login'),\n path('accounts/logout/', logout_view, name='logout')\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"108975117","text":"\nfrom pybind11 import get_cmake_dir\nfrom pybind11.setup_helpers import Pybind11Extension, build_ext\nfrom setuptools import Extension\nfrom .utils import get_incs, get_srcs\n\n\n\n# libi2c_module = Extension('pylibi2c', include_dirs=[\n# 'ext_modules/libi2c/src'], sources=get_srcs('ext_modules/libi2c/src'))\n\next_so = \"./ext_modules/libmaix/components/libmaix/lib/arch/r329\"\n\n_maix_module = Extension('_maix', include_dirs=['ext_modules/_maix/include', 'ext_modules/libmaix/components/libmaix/include'],\n sources=get_srcs('ext_modules/_maix'),\n libraries=[\n \"jpeg\"\n ],\n)\n\n# python3.8 -m pip install pybind11\n# _maix_vivo_module = Pybind11Extension(\"_maix_vivo\",\n# include_dirs=[\n# get_incs(\n# 'ext_modules/libmaix/components/libmaix/include')\n# ],\n# sources=get_srcs(\n# 'ext_modules/_maix_vivo'),\n# libraries=[\n# # \"dl\", \n# # \"rt\", \n# # \"log\", \n# # \"ion\", \n# \"pthread\", \n# # \"cdc_base\",\n# # \"MemAdapter\", \n# # \"media_utils\",\n# # \"mpp_vi\", \n# # \"mpp_isp\", \n# # \"ISP\",\n# # \"venc_base\", \n# # \"mpp_component\", \n# # \"adecoder\", \n# # \"asound\", \n# # \"venc_base\", \n# # \"hwdisplay\",\n# # \"maix_utils\", \n# # \"maix_cam\", \n# # \"maix_image\",\n# ],\n# library_dirs=[ ext_so, ],\n# extra_link_args=[-Wl,-rpath=/usr/lib/python3.8/site-packages/maix -DR329],\n# # define_macros=[('V831Camera', None)],\n# )\n\n# python3.8 -m pip install pybind11\n_maix_opencv_module = Pybind11Extension(\n name = \"_maix_opencv\",\n include_dirs=[get_incs('ext_modules/libmaix/components/libmaix/lib/arch/r329/include/opencv4/')],\n sources=get_srcs('ext_modules/_maix_opencv'),\n libraries=[\n \"opencv_aruco\", \n \"opencv_dnn\", \n \"opencv_hfs\", \n \"opencv_optflow\", \n \"opencv_shape\", \n \"opencv_videoio\",\n \"opencv_bgsegm\", \n \"opencv_dpm\", \n \"opencv_highgui\", \n \"opencv_phase_unwrapping\", \n \"opencv_stereo\",\n \"opencv_video\", \n \"opencv_bioinspired\", \n \"opencv_face\", \n \"opencv_imgcodecs\", \n \"opencv_photo\",\n \"opencv_stitching\", \n \"opencv_videostab\", \n \"opencv_calib3d\", \n \"opencv_features2d\", \n \"opencv_img_hash\",\n \"opencv_plot\", \n \"opencv_structured_light\", \n \"opencv_ccalib\", \n \"opencv_flann\",\n \"opencv_imgproc\", \n \"opencv_quality\", \n \"opencv_superres\", \n \"opencv_ximgproc\", \n \"opencv_core\", \n \"opencv_freetype\",\n \"opencv_line_descriptor\", \n \"opencv_reg\", \n \"opencv_surface_matching\", \n \"opencv_xobjdetect\", \n \"opencv_datasets\",\n \"opencv_fuzzy\", \n \"opencv_ml\", \n \"opencv_rgbd\", \n \"opencv_text\", \n \"opencv_xphoto\", \n \"opencv_dnn_objdetect\",\n \"opencv_objdetect\", \n \"opencv_saliency\", \n \"opencv_tracking\"\n ],\n library_dirs=[\"./ext_modules/libmaix/components/libmaix/lib/arch/r329/opencv4\", ],\n extra_link_args=[\"-Wl,-rpath=/usr/local/lib/python3.9/dist-packages/maix/_maix_opencv\"],\n extra_compile_args=['-std=c++11', '-std=gnu++11' ],\n )\n\n_maix_camera_module = Pybind11Extension(\n name = '_maix_camera', \n include_dirs=['ext_modules/_maix_camera/include', 'ext_modules/libmaix/components/libmaix/include'],\n sources=get_srcs('ext_modules/_maix_camera'),\n libraries=[\n # \"dl\", \n # \"rt\", \n # \"log\", \n # \"ion\", \n \"pthread\", \n # \"cdc_base\",\n # \"MemAdapter\", \n # \"media_utils\", \n # \"mpp_vi\", \n # \"mpp_isp\", \n # \"ISP\",\n # \"venc_base\", \n # \"mpp_component\", \n # \"adecoder\", \n # \"asound\", \n # \"venc_base\", \n # \"hwdisplay\",\n # \"maix_utils\", \n \"maix_cam\", \n # \"maix_image\",\n],\n library_dirs=[\"/lib\", \"/usr/lib\", ext_so, ],\n # extra_link_args = [ \"-Wl,-z,origin\", \"-Wl,-rpath='$ORIGIN/maix'\" ]\n extra_compile_args=['-DR329Camera', '-std=c++11', '-std=gnu++11' ],\n extra_link_args=[\"-Wl,-rpath=/usr/local/lib/python3.9/dist-packages/maix\"]\n)\n\n_maix_display_module = Pybind11Extension(\n name = \"_maix_display\",\n include_dirs=['ext_modules/_maix_display/include', 'ext_modules/libmaix/components/libmaix/include'],\n sources=get_srcs('ext_modules/_maix_display'),\n libraries=[\n # \"dl\", \n # \"rt\", \n # \"log\", \n # \"ion\", \n \"pthread\", \n # \"cdc_base\",\n # \"maix_utils\", \n \"maix_disp\", \n # \"maix_image\",\n ],\n library_dirs=[\"/lib\", \"/usr/lib\", ext_so, ],\n extra_compile_args=['-DR329Display', '-std=c++11', '-std=gnu++11' ],\n extra_link_args=[\"-Wl,-rpath=/usr/local/lib/python3.9/dist-packages/maix\"]\n )\n# max_nn_srcs = get_srcs('ext_modules/_maix_nn/src')\n# max_nn_srcs.extend(get_srcs('ext_modules/libmaix/components/libmaix/src'))\n# max_nn_srcs.remove(\"ext_modules/libmaix/components/libmaix/src/libmaix.c\")\n# _maix_nn_module = Extension('_maix_nn', include_dirs=['ext_modules/_maix_nn/include', 'ext_modules/libmaix/components/libmaix/include'],\n# sources=max_nn_srcs,\n# libraries=[\n# \"maix_utils\", \"maix_nn\",\n# ],\n# library_dirs=[\"/lib\", \"/usr/lib\", ext_so, ],\n# # extra_link_args = [ \"-Wl,-z,origin\", \"-Wl,-rpath='$ORIGIN/maix'\" ]\n# extra_link_args=[-Wl,-rpath=/usr/lib/python3.8/site-packages/maix -DR329]\n# )\n\n_maix_modules = [\n # libi2c_module,\n _maix_module,\n # _maix_vivo_module,\n _maix_opencv_module,\n _maix_camera_module,\n _maix_display_module,\n # _maix_nn_module\n]\n\n_maix_data_files = [\n ('/maix', get_srcs(ext_so, ['so'])),\n ('/maix/_maix_opencv/', get_srcs(\"ext_modules/libmaix/components/libmaix/lib/arch/r329/opencv4\", ['so'])), # depend system provide\n]\n\n_maix_py_modules = [\n \"Pillow\",\n \"rpyc\",\n \"gpiod\",\n \"evdev\",\n \"spidev\",\n \"pyserial\"\n \"zbarlight\",\n]\n","sub_path":"envs/maix_r329.py","file_name":"maix_r329.py","file_ext":"py","file_size_in_byte":7214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"445205190","text":"#!/usr/bin/python3\n\"\"\"city flask triggers\"\"\"\nfrom api.v1.views import app_views\nfrom flask import Flask, jsonify, abort, make_response, request\nfrom models import storage\nfrom models.state import State\nfrom models.city import City\n\n\n@app_views.route('/states//cities',\n methods=['GET'], strict_slashes=False)\ndef citiesretr(state_id):\n \"\"\"Retrieves the list of all City objects of a State\"\"\"\n idstate = storage.get(State, state_id)\n if idstate is None:\n abort(404)\n cities = []\n for city in idstate.cities:\n cities.append(city.to_dict())\n return jsonify(cities)\n\n\n@app_views.route('/cities/',\n methods=['GET'], strict_slashes=False)\ndef cityid(city_id):\n \"\"\"Retrieves a City object\"\"\"\n cityobj = storage.get(City, city_id)\n if cityobj is None:\n abort(404)\n return jsonify(cityobj.to_dict())\n\n\n@app_views.route('/cities/',\n methods=['DELETE'], strict_slashes=False)\ndef delete(city_id):\n \"\"\"deletes a city object\"\"\"\n cityobj = storage.get(City, city_id)\n if cityobj is None:\n abort(404)\n storage.delete(cityobj)\n storage.save()\n return (jsonify({}), 200)\n\n\n@app_views.route('/states//cities',\n methods=['POST'], strict_slashes=False)\ndef createcity(state_id):\n \"\"\"Creates a City\"\"\"\n citystate = storage.get(State, state_id)\n if citystate is None:\n abort(404)\n if not request.get_json():\n return make_response(jsonify({\"error\": \"not a JSON\"}), 400)\n if 'name' not in request.get_json():\n return make_response(jsonify({\"error\": \"missing name\"}), 400)\n post_json = request.get_json()\n post_json['state_id'] = state_id\n jsoncity = City(**post_json)\n jsoncity.save()\n return make_response(jsonify(jsoncity.to_dict()), 201)\n\n\n@app_views.route('/cities/',\n methods=['PUT'], strict_slashes=False)\ndef updatecity(city_id):\n \"\"\"update a city as json\"\"\"\n update = storage.get(City, city_id)\n if update is None:\n abort(404)\n if not request.get_json():\n return make_response(jsonify({\"error\": \"Not a JSON\"}), 400)\n for key, value in request.get_json().items():\n if key not in ['id', 'state_id', 'created_at', 'updated_at']:\n setattr(update, key, value)\n update.save()\n return jsonify(update.to_dict())\n","sub_path":"api/v1/views/cities.py","file_name":"cities.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"496748234","text":"#Author \n#---Needs Testing---\nimport subprocess, os, signal\n\ndef terminate_process(p):\n ps_command = subprocess.Popen(\"ps -o pid --ppid %d --noheaders\" % p.pid, shell=True, stdout=subprocess.PIPE)\n ps_output = ps_command.stdout.read()\n retcode = ps_command.wait()\n assert retcode == 0, \"ps command returned %d\" % retcode\n for pid_str in ps_output.split(\"\\n\")[:-1]:\n os.kill(int(pid_str), signal.SIGINT)\n p.terminate()\n\n\n#pass the process to kill as a parameter to 'terminate_process' function\n#this code is not tested, you are gonna have to check it.\n# I dont remember what exactly the stop script have to do, \n#this code is for general process killing purpose, just pass it to terminator and it will be gone\n","sub_path":"scripts/stop.py","file_name":"stop.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"432624500","text":"import csv\nimport numpy as np\nimport pandas\nimport pylab\nimport seaborn as sns\n\nfrom model.team import Team\n\nmap_of_teams = {\"ORL\": Team(\"Orlando\"), \"IND\": Team(\"Indiana\"), \"CHI\": Team(\"Chicago\"), \"LAC\": Team(\"Clippers\"),\n \"LAL\": Team(\"Lakers\"), \"NYK\": Team(\"New York\"), \"GSW\": Team(\"Golden State\"), \"CLE\": Team(\"Clevland\"),\n \"CHA\": Team(\"Charlotte\"), \"NOP\": Team(\"New Orleans\"), \"PHI\": Team(\"Philadelphia\"),\n \"WAS\": Team(\"Washington\"),\n \"TOR\": Team(\"Toronto\"), \"ATL\": Team(\"Atlanta\"), \"MIL\": Team(\"Milwaukee\"), \"BOS\": Team(\"Boston\"),\n \"DAL\": Team(\"Dallas\"), \"MEM\": Team(\"Memphis\"), \"HOU\": Team(\"Houston\"), \"DET\": Team(\"Detroit\"),\n \"OKC\": Team(\"Oklahoma\"), \"MIN\": Team(\"Minnesota\"), \"MIA\": Team(\"Miami\"), \"BKN\": Team(\"Brooklyn\"),\n \"POR\": Team(\"Portland\"), \"DEN\": Team(\"Denver\"), \"UTA\": Team(\"Utah\"), \"PHX\": Team(\"Pheonix\"),\n \"SAC\": Team(\"Sacremento\"), \"SAS\": Team(\"San Antonio\")}\n\ngame_home = []\ngame_away = []\n\nall_rows = []\nindex_for_remove = []\n# Reading stats for home and away team\n# Need to remove East vs West game from all stats manually and duplicate games\nwith open(\"data_regular_season_advanced/season1314.csv\", \"r\") as file:\n reader = csv.reader(file, delimiter=',')\n for row in reader:\n all_rows.append(row)\n\n # Ponekad ima duplikata u podacima pa ih je potrebno obrisati\n # for i in range(0, (len(all_rows) - 1)):\n # if all_rows[i] == all_rows[i+1]:\n # index_for_remove.append(i)\n #\n # index_for_remove.reverse()\n # for i in index_for_remove:\n # del(all_rows[i])\n #\n # #Konacno filterisani podaci\n # print(len(all_rows))\n #\n # with open(\"filtered1415.csv\", \"w\") as csvfile:\n # writer = csv.writer(csvfile)\n # writer.writerows(all_rows)\n\n\ncounter = 0\ngame_for_learning = []\ngames_for_learning = []\n\n# Krecemo red po red kroz procitane podatke\nfor row in all_rows:\n\n counter += 1\n # Necemo uzimati u obzir utakmice kada jos nemamo dovoljno statistickih podataka\n if counter > 400:\n for item in row[0:4]:\n game_for_learning.append(item)\n for item in map_of_teams[row[0]].stats_away.get_stats_for_learning():\n game_for_learning.append(item)\n for item in map_of_teams[row[1]].stats_home.get_stats_for_learning():\n game_for_learning.append(item)\n\n # \n rounded_game_for_learning = []\n for item in game_for_learning:\n if type(item) is float:\n rounded = round(item, 2)\n rounded_game_for_learning.append(rounded)\n else:\n rounded_game_for_learning.append(item)\n\n # \n\n games_for_learning.append(rounded_game_for_learning[:])\n game_for_learning.clear()\n\n # \n game_home.clear()\n game_away.clear()\n\n # Dodajem statistiku posle odigrane utakmice\n for item in row[4:23]:\n game_away.append(item)\n map_of_teams[row[0]].add_stats(game_away, \"away\")\n\n for item in row[23:]:\n game_home.append(item)\n map_of_teams[row[1]].add_stats(game_home, \"home\")\n # \n\nwith open(\"test.csv\", \"w\", newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerows(games_for_learning)\n\n","sub_path":"prepare_data_for_learning.py","file_name":"prepare_data_for_learning.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"476281184","text":"import numpy as np\nimport matplotlib.pyplot as plt\n# This python script require vpython to be installed (see vpython.org)\n# vpython is used for the structural visualization\nfrom vpython import *\nimport sys\nsys.path.append(\"../Spectra\")\nsys.path.append(\"../Structure\")\nimport Spectra\nimport CD\nimport Structure\n\n# Global parameters\nd2eA=0.20819434 # Debye to eÅ\nbohr3=0.529177210 # bohr in Å\nEh2icm=219500 # Hartree to cm-1\nA=Eh2icm*bohr3*d2eA**2\nprint(A)\n\n# Define Parameters\nd=16.0 # Distance between molecules along spiral in Ångstrøm\nr=3.0 # Distance between layers in Ångstrøm\nN=20 # Number of molecules\nn0=2 # Number of first point\nd2r=np.pi/180.0 # Degree to radians\nalpha=4.0*d2r # Alpha angle for transition dipole\nbeta=55.0*d2r # Beta angle for transition dipole\nmum=5.5 # Dipole in Debye\n\n# Create positions\nx=np.zeros((N,3))\nmu=np.zeros((N,3))\nfor n in range(N):\n p=np.sqrt(2*d*(n+n0)/r) # Leave our first n0 points\n x[n,0]=r*p*np.cos(p)\n x[n,1]=r*p*np.sin(p)\n mu[n,2]=mum*np.cos(beta)\n mu[n,0]=mum*np.sin(beta)*(-np.sin(p)*np.cos(alpha)+np.cos(p)*np.sin(alpha))\n mu[n,1]=mum*np.sin(beta)*(np.cos(p)*np.cos(alpha)+np.sin(p)*np.sin(alpha))\n lmu=np.linalg.norm(mu[n,:])\n# print(lmu) \n\n# Plot the structure in 2D\nplt.plot(x[:,0],x[:,1])\nfor n in range(N):\n plt.arrow(x[n,0],x[n,1],mu[n,0],mu[n,1])\n# Make x and y direction equivalent on screen\nplt.axis('equal')\nplt.show()\n\n# Create Hamiltonian\nH=np.zeros((N,N))\nfor n in range(N):\n for m in range(n+1,N):\n dx=x[n,:]-x[m,:]\n dd=np.linalg.norm(dx)\n d3=dd*dd*dd\n d5=d3*dd*dd\n J=np.inner(mu[n,:],mu[m,:])/d3-3*np.inner(mu[n,:],dx)*np.inner(dx,mu[m,:])/d5\n H[n,m]=J*A\n H[m,n]=J*A\n\n# Plot structure\nStructure.visual(x,mu,N,1)\n# Make spectrum\nSpectra.absorption(H,mu,N,10)\nCD.CD(H,mu,x,N,10)\n# Visualize state\n#visual_exciton(x,mu,c,index,N,scale)\n\n","sub_path":"Spiral/Spiral.py","file_name":"Spiral.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"581001264","text":"from __future__ import print_function\nfrom warnings import filterwarnings\nfilterwarnings('ignore', module='IPython.html.widgets')\n\nimport Tkinter as tk\nimport ttk\n\ntry:\n import cPickle as pickle \nexcept:\n import pickle\n\nimport threading, tkFileDialog\nfrom serial import Serial\nfrom time import sleep, time, strftime\nfrom math import pi\n\ntry:\n from tecancavrotest.models import XCaliburD\n from tecancavrotest.transport import TecanAPISerial, TecanAPINode\nexcept ImportError: # Support direct import from package\n import sys\n import os\n dirn = os.path.dirname\n LOCAL_DIR = os.path.dirname(os.path.realpath(__file__))\n sys.path.append(dirn(dirn(LOCAL_DIR)))\n from tecancavrotest.models import XCaliburD\n from tecancavrotest.transport import TecanAPISerial, TecanAPINode\n\n\n########################################################################################################################\n\nclass AppWidget(tk.Frame):\n def __init__(self, parent, title=\"App Widget\", scrolled=False):\n tk.Frame.__init__(self, parent)\n \n self.parent = parent\n self.title = title\n self.scrolled = scrolled\n \n self.wrapperframe = tk.Frame(self.parent, bg=self.parent.parent.framecolor)\n \n self.titleframe = tk.Frame(self.wrapperframe, bg=self.parent.parent.headcolor)\n self.title = tk.Label(self.titleframe, text=self.title, bg=self.parent.parent.headcolor)\n self.title.config(font=self.parent.parent.headfont, fg=self.parent.parent.headfc)\n self.title.pack()\n self.titleframe.grid(row=0, column=0, ipadx=5, sticky='we')\n\n self.bodyframe = tk.Frame(self.wrapperframe, bg=self.parent.parent.framecolor)\n self.bodyframe.grid(row=1, column=0, sticky='we')\n \n self.wrapperframe.pack(padx=10, pady=10)\n \n########################################################################################################################\n\nclass MainMenu(tk.Menu):\n\n def __init__(self, parent, root, *args, **kwargs):\n tk.Menu.__init__(self, parent)\n\n self.parent = parent\n self.root = root\n\n self.menubar = tk.Menu(self.root)\n\n self.filemenu = tk.Menu(self.menubar, tearoff=0)\n# self.filemenu.add_command(label=\"New protocol\", command=self.parent.protocol.newProtocol)\n self.filemenu.add_command(label=\"Open protocol...\", command=self.parent.protocol.loadProtocol)\n self.filemenu.add_command(label=\"Save protocol...\", command=self.parent.protocol.saveProtocol)\n self.filemenu.add_separator()\n self.filemenu.add_command(label=\"Exit\", command=self.root.destroy)\n\n self.helpmenu = tk.Menu(self.menubar, tearoff=0)\n self.helpmenu.add_command(label=\"About...\")\n self.helpmenu.add_command(label=\"Manual...\")\n\n self.menubar.add_cascade(menu=self.filemenu, label=\"File\")\n self.menubar.add_cascade(menu=self.helpmenu, label=\"Help\")\n \n self.filemenu.config(bg=self.parent.headcolor, fg=self.parent.headfc, bd=1, activebackground=self.parent.buttoncolor, activeforeground=self.parent.bodyfc)\n self.helpmenu.config(bg=self.parent.headcolor, fg=self.parent.headfc, bd=1, activebackground=self.parent.buttoncolor, activeforeground=self.parent.bodyfc)\n\n self.root.config(menu=self.menubar)\n \n########################################################################################################################\n\nclass Log(tk.Frame):\n\n def __init__(self, parent, *args, **kwargs):\n tk.Frame.__init__(self, parent)\n\n self.parent = parent\n\n self.config(bg=self.parent.wrapcolor)\n\n self.logframe = AppWidget(self, \"Log\", False)\n\n self.renderLog()\n\n\n def addRecord(self, text):\n self.log.configure(state=\"normal\")\n formattedtext = self.format(text)+\"\\n\"\n self.log.insert(\"end\", formattedtext)\n self.log.configure(state=\"disabled\")\n self.log.see(\"end\")\n\n\n def format(self, text):\n formattedtime = strftime('%d %b %H:%M:%S')\n formattedtext = str(formattedtime)+\" - \"+text\n \n return formattedtext\n\n\n def renderLog(self):\n self.log = tk.Text(self.logframe.bodyframe, height=\"15\", width=\"120\", bg=self.parent.lightercolor, bd=0, fg=self.parent.bodyfc, highlightthickness=0)\n self.log.pack(padx=10, pady=10, fill=\"both\", expand=1)\n self.log.configure(state=\"disabled\")\n\n########################################################################################################################\n\nclass ControlPanel(tk.Frame):\n def __init__(self, parent, *args, **kwargs):\n tk.Frame.__init__(self, parent)\n\n self.parent = parent\n \n self.config(bg=self.parent.wrapcolor)\n\n self.primingframe = AppWidget(self, \"Port Priming\", False)\n\n self.calibframe = AppWidget(self, \"Output Calibration\", False)\n\n self.cmdframe = AppWidget(self, \"Control Panel\", False)\n \n self.cycletimeframe = AppWidget(self, \"Cycle Duration\", False)\n\n self.renderCP()\n\n\n def renderCP(self):\n pframe = tk.Frame(self.primingframe.bodyframe, bg=self.parent.framecolor)\n pframe.pack()\n\n lframe = tk.Frame(pframe, bg=self.parent.darkercolor)\n lframe.grid(row=0, column=0)\n\n rframe = tk.Frame(pframe, bg=self.parent.lightercolor)\n rframe.grid(row=0, column=1)\n\n tk.Label(lframe, text=\"Port\", bg=self.parent.darkercolor, fg=self.parent.bodyfc).grid(row=0, column=0, columnspan=2)\n tk.Label(rframe, text=\"Tubing Type\", bg=self.parent.lightercolor, fg=self.parent.bodyfc).grid(row=0, column=0, columnspan=2)\n# tk.Label(self.priminglf, text=\"Length(in)\").grid(row=0, column=2)\n\n self.portstoprime = []\n self.tubingtypes = []\n# self.lengths = []\n\n for i in range(1, 10):\n port = tk.DoubleVar()\n tk.Checkbutton(lframe, variable=port, bg=self.parent.darkercolor, highlightthickness=0).grid(row=i, column=0)\n tk.Label(lframe, text=str(i), bg=self.parent.darkercolor, fg=self.parent.bodyfc).grid(row=i, column=1)\n self.portstoprime.append(port)\n\n tubingtype = tk.IntVar()\n tk.Radiobutton(rframe, text=\"PEEK\", variable=tubingtype, value=1, indicator=0, offrelief='flat', bg=self.parent.lightercolor, fg=self.parent.bodyfc, selectcolor=self.parent.buttoncolor, highlightthickness=0).grid(row=i, column=0)\n #tk.Label(rframe, text=\"PEEK\", bg=self.parent.lightercolor, fg=self.parent.bodyfc).grid(row=i, column=1)\n tk.Radiobutton(rframe, text=\"non-PEEK\", variable=tubingtype, value=2, indicator=0, offrelief='flat', bg=self.parent.lightercolor, fg=self.parent.bodyfc, selectcolor=self.parent.buttoncolor, highlightthickness=0).grid(row=i, column=1)\n #tk.Label(rframe, text=\"non-PEEK\", bg=self.parent.lightercolor, fg=self.parent.bodyfc).grid(row=i, column=3)\n \n self.tubingtypes.append(tubingtype)\n\n# length = tk.DoubleVar()\n# tk.Entry(self.primingll, width=4, textvariable=length).grid(row=i, column=2)\n# self.lengths.append(length)\n\n pf = tk.Frame(pframe, bg=self.parent.framecolor)\n pf.grid(row=len(self.portstoprime)+2, column=0, columnspan=3, padx=5, pady=5)\n\n self.volume = tk.Spinbox(pf, from_=500, to=1000, width=4, bg=self.parent.entrycolor, bd=0)\n self.volume.pack(side='left', padx=5)\n\n #ttk.Separator(pframe, orient='horizontal').grid(row=len(self.portstoprime)+1, column=0, columnspan=3, sticky='ew')\n\n tk.Button(pf, text=\"Prime Ports\", command=lambda: self.parent.protocol.primePorts(self.portstoprime, self.volume, self.tubingtypes), relief='flat', bg=self.parent.buttoncolor, activebackground=self.parent.buttoncolor, activeforeground=self.parent.bodyfc, fg=self.parent.buttonfc, highlightthickness=0).pack(padx=5, side='left')\n\n #ttk.Separator(pframe, orient='horizontal').grid(row=len(self.portstoprime)+3, column=0, columnspan=3, sticky='ew')\n\n tk.Button(pframe, text=\"Return Port Contents\", command=lambda: self.parent.protocol.returnPortContents(self.portstoprime), relief='flat', bg=self.parent.buttoncolor, activebackground=self.parent.buttoncolor, activeforeground=self.parent.bodyfc, fg=self.parent.buttonfc, highlightthickness=0).grid(row=len(self.portstoprime)+3, column=0, columnspan=2, padx=5, pady=5)\n\n ########################################\n cf = tk.Frame(self.calibframe.bodyframe, bg=self.parent.framecolor)\n cf.pack(padx=5, pady=5)\n\n self.calibvolume = tk.Spinbox(cf, from_=0, to=1000, width=4, bg=self.parent.entrycolor, bd=0)\n self.calibvolume.pack(padx=5, side='left')\n tk.Button(cf, text=\"Calibrate\", command=lambda: self.parent.protocol.calibrateOutput(self.calibvolume), relief='flat', bg=self.parent.buttoncolor, activebackground=self.parent.buttoncolor, activeforeground=self.parent.bodyfc, fg=self.parent.buttonfc, highlightthickness=0).pack(padx=5, side='left') \n\n ########################################\n cpframe = tk.Frame(self.cmdframe.bodyframe, bg=self.parent.framecolor)\n cpframe.pack()\n\n tk.Button(cpframe, text=\"Execute Cycle\", command=self.parent.protocol.executeCycle, width=15, relief='flat', bg=self.parent.buttoncolor, activebackground=self.parent.buttoncolor, activeforeground=self.parent.bodyfc, fg=self.parent.buttonfc, highlightthickness=0).grid(row=0, column=0, padx=5, pady=5)\n tk.Button(cpframe, text=\"Update Protocol\", command=self.parent.protocol.updateProtocol, width=15, relief='flat', bg=self.parent.buttoncolor, activebackground=self.parent.buttoncolor, activeforeground=self.parent.bodyfc, fg=self.parent.buttonfc, highlightthickness=0).grid(row=1, column=0, padx=5, pady=5)\n tk.Button(cpframe, text=\"Add Command\", command=self.parent.protocol.addCommand, width=15, relief='flat', bg=self.parent.buttoncolor, activebackground=self.parent.buttoncolor, activeforeground=self.parent.bodyfc, fg=self.parent.buttonfc, highlightthickness=0).grid(row=2, column=0, padx=5, pady=5)\n tk.Button(cpframe, text=\"Calculate Cycle Times\", command=self.parent.protocol.renderCycleTimes, width=15, relief='flat', bg=self.parent.buttoncolor, activebackground=self.parent.buttoncolor, activeforeground=self.parent.bodyfc, fg=self.parent.buttonfc, highlightthickness=0).grid(row=6, column=0, padx=5, pady=5)\n\n #ttk.Separator(cpframe, orient='horizontal').grid(row=3, column=0, sticky='ew')\n# pumps = ['/dev/ttyUSb0']\n pumps = [x[0] for x in self.parent.protocol.devices]\n self.selectedpump = tk.StringVar()\n self.selectedpump.set(pumps[0])\n option = ttk.Combobox(cpframe, textvariable=self.selectedpump, state='readonly')\n option['values'] = pumps\n option.grid(row=4, column=0, padx=5, pady=5)\n \n tk.Button(cpframe, text=\"Reset Pump\", command=self.parent.protocol.resetPump, width=15, relief='flat', bg=self.parent.buttoncolor, activebackground=self.parent.buttoncolor, activeforeground=self.parent.bodyfc, fg=self.parent.buttonfc, highlightthickness=0).grid(row=5, column=0, padx=5, pady=5)\n\n ########################################\n self.ctframe = tk.Frame(self.cycletimeframe.bodyframe, bg=self.parent.framecolor)\n self.ctframe.pack()\n \n tk.Label(self.ctframe, text=\"Cycle\", bg=self.parent.darkercolor, fg=self.parent.bodyfc).grid(row=0, column=0, sticky='we')\n tk.Label(self.ctframe, text=\"Duration\", bg=self.parent.lightercolor, fg=self.parent.bodyfc).grid(row=0, column=1, sticky='we')\n\n########################################################################################################################\n\nclass Protocol(tk.Frame):\n\n def __init__(self, parent, *args, **kwargs):\n tk.Frame.__init__(self, parent)\n\n self.parent = parent\n\n self.config(bg=self.parent.wrapcolor)\n\n self.devices = self.getSerialPumps()\n self.device_dict = dict(self.devices)\n\n if not self.device_dict:\n print(\"There is no pump connected. Please connect one and try again.\")\n sys.exit()\n else:\n print(\"Device dict: \" + str(self.device_dict))\n\n self.protocol = []\n\n self.protocol.append({})\n\n self.cmdnumbers = []\n self.cycles = []\n self.names = []\n self.pumpports = []\n self.fromports = []\n self.toports = []\n self.volumes = []\n self.speeds = []\n self.waitmins = []\n self.waitsecs = []\n self.wasteornots = []\n self.statuses = []\n\n self.cyclecounter = 0\n self.cmdcounter = 0\n\n self.calibrationvolume = 0\n\n self.protocolframe = AppWidget(self, 'Protocol', False)\n \n self.frame = VerticalScrolledFrame(self.protocolframe.bodyframe, bg=self.parent.framecolor)\n self.frame.pack()\n\n self.renderProtocol()\n\n\n def updateProtocol(self):\n for i in range(len(self.protocol)):\n self.protocol[i] = {\n 'cmdnumber': int(self.cmdnumbers[i]),\n 'cycle': int(self.cycles[i].get()),\n 'name': self.names[i].get(),\n 'pump': self.pumpports[i].get(),\n 'fromport': int(self.fromports[i].get()),\n 'toport': int(self.toports[i].get()),\n 'volume': int(self.volumes[i].get()),\n 'speed': int(self.speeds[i].get()),\n 'waitmins': int(self.waitmins[i].get()),\n 'waitsecs': int(self.waitsecs[i].get()),\n 'waste': int(self.wasteornots[i].get()), \n }\n print(\"Updated command list: \" + str(self.protocol))\n\n\n def resetProtocol(self):\n self.protocol = []\n self.cmdcounter = 0\n self.cyclecounter = 0\n\n self.cmdnumbers = []\n self.cycles = []\n self.names = []\n self.pumpports = []\n self.fromports = []\n self.toports = []\n self.volumes = []\n self.speeds = []\n self.waitmins = []\n self.waitsecs = []\n self.wasteornots = []\n self.statuses = []\n\n# self.renderProtocol()\n\n\n def saveProtocol(self):\n self.updateProtocol()\n filename = tkFileDialog.asksaveasfilename(defaultextension='.pkl')\n if filename is None:\n return\n file = open(filename, 'wb')\n pickle.dump(self.protocol, file)\n file.close()\n\n\n def loadProtocol(self):\n filename = tkFileDialog.askopenfilename(filetypes=[('Pickled protocols', '*.pkl')])\n if filename is None:\n return\n file = open(filename, 'rb')\n protocol = pickle.load(file)\n file.close()\n print(\"Loaded protocol: \" + str(protocol))\n# self.protocol = self.cmdnumbers = self.cycles = self.names = self.pumpports = self.fromports = self.toports = self.volumes = self.speeds = self.waitmins = self.waitsecs = self.wasteornots = self.statuses = []\n self.resetProtocol()\n self.protocol = protocol\n print(\"Native protocol: \" + str(self.protocol))\n self.renderProtocol()\n #self.insertFieldValues()\n\n\n def addCommand(self):\n print(\"Previous command list: \"+str(self.protocol))\n self.protocol.append({})\n print(\"Appended command list: \"+str(self.protocol))\n\n self.renderCommand(len(self.protocol)-1)\n\n# self.update_idletasks()\n\n\n def renderCommand(self, i):\n if i%2 == 0:\n color = self.parent.lightercolor\n else:\n color = self.parent.darkercolor\n\n cframe = tk.Frame(self.frame.interior, bg=color)\n frame = tk.Frame(cframe, bg=color)\n\n tk.Label(frame, text=str(i+1)+\") \", bg=color, width=3, fg=self.parent.bodyfc).grid(row=0, column=0)\n\n self.cmdnumbers.append(i)\n\n tk.Label(frame, text=\"Cycle: \", bg=color, fg=self.parent.bodyfc).grid(row=0, column=1)\n sb_cycle = tk.Spinbox(frame, from_=0, to=99, width=2, bg=self.parent.entrycolor, bd=0, fg=self.parent.entryfc)\n sb_cycle.grid(row=0, column=2)\n\n self.cycles.append(sb_cycle)\n\n if self.protocol[i].get('cycle') is not None:\n sb_cycle['value'] = self.protocol[i].get('cycle') \n\n tk.Label(frame, text=\"Name: \", bg=color, fg=self.parent.bodyfc).grid(row=0, column=3)\n commandname = tk.StringVar()\n en_name = tk.Entry(frame, textvariable=commandname, bg=self.parent.entrycolor, width=20, bd=0)\n en_name.grid(row=0, column=4)\n\n self.names.append(commandname)\n\n if self.protocol[i].get('name') is not None:\n self.names[i].set(self.protocol[i].get('name'))\n\n tk.Label(frame, text=\"Pump: \", bg=color, fg=self.parent.bodyfc).grid(row=0, column=5)\n# pumps = ['/dev/ttyUSB0', '/dev/ttyUSB1']\n pumps = [x[0] for x in self.devices]\n #print(\"List of pumps: \" + str(pumps))\n selectedpump = tk.StringVar()\n selectedpump.set(pumps[0])\n #self.parent.setPump(self.selectedpump.get())\n #print(\"Selected pump: \" + str(self.selectedpump.get()))\n #option = tk.OptionMenu(frame, selectedpump, *pumps)\n option = ttk.Combobox(frame, textvariable=selectedpump, state='readonly', width=15)\n option['values'] = pumps\n option.grid(row=0, column=6)\n\n self.pumpports.append(selectedpump)\n\n if self.protocol[i].get('pump') is not None:\n self.pumpports[i].set(self.protocol[i].get('pump'))\n\n tk.Label(frame, text=\"From Port: \", bg=color, fg=self.parent.bodyfc).grid(row=0, column=7)\n sb_fromport = tk.Spinbox(frame, from_=1, to=9, width=2, bg=self.parent.entrycolor, bd=0, fg=self.parent.entryfc)\n sb_fromport.grid(row=0, column=8)\n\n self.fromports.append(sb_fromport)\n\n if self.protocol[i].get('fromport') is not None:\n self.fromports[i]['value'] = self.protocol[i].get('fromport')\n\n tk.Label(frame, text=\"To Port: \", bg=color, fg=self.parent.bodyfc).grid(row=0, column=9)\n sb_toport = tk.Spinbox(frame, from_=1, to=9, width=2, bg=self.parent.entrycolor, bd=0, fg=self.parent.entryfc)\n sb_toport.grid(row=0, column=10)\n\n self.toports.append(sb_toport)\n\n if self.protocol[i].get('toport') is not None:\n self.toports[i]['value'] = self.protocol[i].get('toport')\n\n tk.Label(frame, text=\"Volume(uL): \", bg=color, fg=self.parent.bodyfc).grid(row=0, column=11)\n sb_volume = tk.Spinbox(frame, from_=1, to=1000, width=4, bg=self.parent.entrycolor, bd=0, fg=self.parent.entryfc)\n sb_volume.grid(row=0, column=12)\n\n self.volumes.append(sb_volume)\n\n if self.protocol[i].get('volume') is not None:\n self.volumes[i]['value'] = self.protocol[i].get('volume')\n\n tk.Label(frame, text=\"Speed(0-40): \", bg=color, fg=self.parent.bodyfc).grid(row=0, column=13)\n sb_speed = tk.Spinbox(frame, from_=0, to=40, width=2, bg=self.parent.entrycolor, bd=0, fg=self.parent.entryfc)\n sb_speed.grid(row=0, column=14)\n\n self.speeds.append(sb_speed)\n\n if self.protocol[i].get('speed') is not None:\n self.speeds[i]['value'] = self.protocol[i].get('speed')\n\n tk.Label(frame, text=\"Leave for: \", bg=color, fg=self.parent.bodyfc).grid(row=0, column=15)\n sb_timemin = tk.Spinbox(frame, from_=0, to=600, width=3, bg=self.parent.entrycolor, bd=0, fg=self.parent.entryfc)\n sb_timemin.grid(row=0, column=16)\n tk.Label(frame, text=\"min\", bg=color, fg=self.parent.bodyfc).grid(row=0, column=17)\n sb_timesec = tk.Spinbox(frame, from_=5, to=59, width=2, bg=self.parent.entrycolor, bd=0, fg=self.parent.entryfc)\n sb_timesec.grid(row=0, column=18) \n tk.Label(frame, text=\"sec\", bg=color, fg=self.parent.bodyfc).grid(row=0, column=19)\n\n self.waitmins.append(sb_timemin)\n self.waitsecs.append(sb_timesec)\n\n if self.protocol[i].get('waitmins') is not None:\n self.waitmins[i]['value'] = self.protocol[i].get('waitmins')\n if self.protocol[i].get('waitsecs') is not None:\n self.waitsecs[i]['value'] = self.protocol[i].get('waitsecs')\n\n waste = tk.IntVar()\n tk.Radiobutton(frame, text=\"Return\", variable=waste, value=0, bg=color, highlightthickness=0, fg=self.parent.logofc).grid(row=0, column=20)\n tk.Radiobutton(frame, text=\"Waste\", variable=waste, value=1, bg=color, highlightthickness=0, fg=self.parent.logofc).grid(row=0, column=21)\n\n self.wasteornots.append(waste)\n\n if self.protocol[i].get('waste') is not None:\n self.wasteornots[i].set(self.protocol[i].get('waste'))\n\n tk.Label(frame, text=\" - \", bg=color, fg=self.parent.bodyfc).grid(row=0, column=22)\n status = tk.Label(frame, text=\"Not complete\", bg=color, fg=self.parent.bodyfc)\n status.grid(row=0, column=23)\n\n self.statuses.append(status)\n\n frame.pack(pady=5, fill='x')\n cframe.grid(row=i, column=0, sticky='we')\n \n# self.parent.update_idletasks()\n\n\n def renderProtocol(self):\n for i in range(len(self.protocol)):\n self.renderCommand(i)\n\n\n def renderCycleTimes(self):\n #for i in len(set(self.protocol.get('cycle'))):\n times = {16: 18, 30: 88}\n \n numcycles = len({v['cycle']:v for v in self.protocol}.values())\n print(\"# cycles: \"+str(numcycles))\n \n for i in range(numcycles):\n print(\"cycle: \"+str(i))\n time = 0\n for cmd in self.protocol:\n if cmd.get('cycle') == i:\n speed = cmd.get('speed')\n #print(\"speed: \"+str(speed))\n volume = int(cmd.get('volume'))\n #print(\"volume: \"+str(volume))\n vratio = volume/1000.0\n #print(\"vratio: \"+str(vratio))\n \n t1 = times.get(speed)*vratio\n #print(str(t1))\n t2 = times.get(30)*vratio\n execdelay = 8*4\n wait = int(cmd.get('waitmins'))*60+int(cmd.get('waitsecs'))\n #print(str(t2))\n delay = execdelay+wait\n tt = 3*t1+t2+delay\n #print(str(tt))\n time += tt\n tk.Label(self.parent.cp.ctframe, text=i, bg=self.parent.darkercolor, fg=self.parent.bodyfc).grid(row=i+1, column=0, sticky='we')\n tk.Label(self.parent.cp.ctframe, text=str(time)+\"sec\", bg=self.parent.lightercolor, fg=self.parent.bodyfc).grid(row=i+1, column=1, sticky='we')\n \n def primePorts(self, portstoprime, volume, tubingtypes):\n pump = self.device_dict.get('/dev/ttyUSB0')\n\n buffertime = 2\n speed = 14\n\n for i in range(len(portstoprime)):\n if portstoprime[i].get() == 1:\n if tubingtypes[i].get() == 1:\n speed = 28\n elif tubingtypes[i].get() == 2:\n speed = 14\n v = int(volume.get()) \n port = i+1\n\n print(\"Priming port \"+str(port)+\" with \"+str(v)+\"uL at speed: \"+str(speed))\n self.parent.log.addRecord(\"Priming port \"+str(port)+\" with \"+str(v)+\"uL at speed: \"+str(speed))\n\n pump.primePort(port, v, speed, port)\n\n sleep(buffertime)\n\n print(\"Washing valve\")\n self.parent.log.addRecord(\"Washing valve\")\n pump.primePort(5, 750, speed, 9)\n\n sleep(buffertime)\n\n\n def returnPortContents(self, portstoprime):\n pump = self.device_dict.get('/dev/ttyUSB0')\n\n volume = 500\n speed = 14\n buffertime = 2\n\n pump.setSpeed(speed)\n sleep(1)\n\n for i in range(len(portstoprime)):\n if portstoprime[i].get() == 1:\n port = i+1\n \n print(\"Returning contents of port \" +str(port))\n self.parent.log.addRecord(\"Returning contents of port \" +str(port))\n esttime = pump.extract(7, volume, execute=True)\n sleep(esttime+buffertime)\n esttime = pump.dispense(port, volume, execute=True)\n sleep(esttime+buffertime)\n\n\n def calibrateOutput(self, volume):\n self.calibrationvolume = int(volume.get())\n self.parent.log.addRecord(\"Output port has been calibrated for additional \" + str(self.calibrationvolume) + \"uL \\n\")\n\n\n def executeCycle(self):\n self.updateProtocol()\n\n for cmd in self.protocol:\n \n if cmd['cycle'] == self.cyclecounter:\n self.parent.log.addRecord(\"Carrying out cycle: \" + str(self.cyclecounter))\n print(\"Found cmd with appropriate cycle\")\n self.executeCommand(self.cmdcounter)\n self.cmdcounter += 1\n \n self.cyclecounter += 1\n\n\n def executeCommand(self, index):\n self.updateProtocol()\n \n i = index\n\n status = self.statuses[i]\n status.config(bg=\"yellow\", fg=\"black\")\n status['text'] = 'Processing'\n\n pump = self.device_dict.get(self.protocol[i].get('pump'))\n\n name = str(self.protocol[i].get('name'))\n toport = int(self.protocol[i].get('toport'))\n fromport = int(self.protocol[i].get('fromport'))\n volume = int(self.protocol[i].get('volume')) + self.calibrationvolume\n speed = int(self.protocol[i].get('speed'))\n waitmins = int(self.protocol[i].get('waitmins'))\n waitsecs = int(self.protocol[i].get('waitsecs'))\n\n timebuffer = 6\n\n timewait = waitmins*60 + waitsecs\n\n waste = int(self.protocol[i].get('waste'))\n\n self.parent.log.addRecord(\"Starting command '\" + name+ \"'\")\n self.parent.log.addRecord(\"Selected pump for this command is: \" + str(pump)) \n self.parent.log.addRecord(\"Setting pump speed to \" + str(speed))\n pump.setSpeed(speed)\n sleep(2)\n \n self.parent.log.addRecord(\"Extracting \" + str(volume) + \"uL from port \" + str(fromport))\n esttime = pump.extract(fromport, volume, execute=True)\n self.parent.log.addRecord(\"Estimated time: \" + str(esttime) + \"sec\")\n sleep(esttime+timebuffer)\n\n self.parent.log.addRecord(\"Dispensing \" + str(volume) + \"uL to port \" + str(toport))\n esttime = pump.dispense(toport, volume, execute=True)\n sleep(esttime+timebuffer)\n\n self.parent.log.addRecord(\"Waiting for \" + str(waitmins) + \"min \" + str(waitsecs) + \"sec before extraction\")\n sleep(timewait)\n\n self.parent.log.addRecord(\"Setting pump speed to 30 for extraction\")\n pump.setSpeed(30)\n sleep(2)\n self.parent.log.addRecord(\"Extracting \" + str(volume) + \"uL from port \" + str(toport))\n esttime = pump.extract(toport, volume, execute=True)\n sleep(esttime+timebuffer)\n self.parent.log.addRecord(\"Setting pump speed back to \" + str(speed))\n pump.setSpeed(speed)\n sleep(2)\n if waste == 0:\n self.parent.log.addRecord(\"Dispensing \" + str(volume) + \"uL back to port \" + str(fromport))\n esttime = pump.dispense(fromport, volume, execute=True)\n sleep(esttime+timebuffer)\n elif waste == 1:\n self.parent.log.addRecord(\"Dispensing \" + str(volume) + \"uL to waste port 9\")\n esttime = pump.dispense(9, volume, execute=True)\n sleep(esttime+timebuffer)\n\n self.parent.log.addRecord(\"Command '\" + name + \"' is finished \\n\")\n\n status['text'] = 'Complete'\n status.config(bg='green', fg=\"black\")\n\n\n #if (self.cmdcounter+1) == len(self.protocol):\n # print(\"Resetting cmdcounter to 0\")\n # self.cmdcounter = 0\n # self.resetStatuses()\n #else:\n # print(\"Incrementing cmdcounter\")\n # self.cmdcounter += 1\n\n\n def resetStatuses(self):\n for i in range(len(self.protocol)):\n status = self.statuses[i]\n status['text'] = 'Not complete'\n status.config(fg='black')\n\n\n def resetPump(self):\n port = self.parent.cp.selectedpump.get()\n pump = self.device_dict.get(port)\n self.parent.log.addRecord(\"Resetting pump \" + str(pump) + \"\\n\")\n pump.init()\n\n\n def findSerialPumps(self):\n return TecanAPISerial.findSerialPumps()\n\n\n def getSerialPumps(self):\n pump_list = self.findSerialPumps()\n return [(ser_port, XCaliburD(com_link=TecanAPISerial(0,ser_port, 9600))) for ser_port, _, _ in pump_list]\n\n########################################################################################################################\n\nclass SerialPort():\n\n def __init__(self, parent, port, baud, timeout):\n self.parent = parent\n self.portname = port\n self.baudrate = baud\n self.timeout = timeout\n\n self.read = True\n\n self.port = Serial(self.portname, self.baudrate, timeout=self.timeout, writeTimeout=0)\n\n\n def readPort(self, bits):\n while self.read:\n cmd = self.port.read(bits)\n print(\"Cmd: \"+cmd)\n if len(cmd) > 0:\n self.parent.log.addRecord(\"Received serial command: \" + str(cmd))\n if cmd == \"pump\":\n self.parent.protocol.executeCycle()\n\n########################################################################################################################\n\nclass SerialThread(threading.Thread):\n def __init__(self, parent, port, baud, timeout):\n threading.Thread.__init__(self)\n\n self.parent = parent\n self.port = port\n self.baud = baud\n self.timeout = timeout\n\n\n def run(self):\n s = Serial(self.port, self.baud, timeout=self.timeout)\n while True:\n# if s.inWaiting():\n cmd = s.read(5)\n print('Cmd: '+str(cmd))\n if len(cmd) > 0:\n self.parent.log.addRecord(\"Received serial command '\" + str(cmd) + \"'\")\n if cmd == 'pump':\n self.parent.protocol.executeCycle() \n\n########################################################################################################################\n\nclass VerticalScrolledFrame(tk.Frame):\n def __init__(self, parent, *args, **kwargs):\n tk.Frame.__init__(self, parent, *args, **kwargs)\n\n self.parent = parent\n\n vscrollbar = tk.Scrollbar(self, orient='vertical', relief='flat')\n vscrollbar.pack(fill='y', side='right', expand=False)\n\n canvas = tk.Canvas(self, bd=0, highlightthickness=0, yscrollcommand=vscrollbar.set)\n canvas.pack(side='left', fill='both', expand=True)\n canvas.config(height=300)\n \n vscrollbar.config(command=canvas.yview)\n\n canvas.xview_moveto(0)\n canvas.yview_moveto(0)\n\n self.interior = interior = tk.Frame(canvas)\n interior_id = canvas.create_window(0, 0, window=interior, anchor='nw')\n\n def _configure_interior(event):\n size = (interior.winfo_reqwidth(), interior.winfo_reqheight())\n canvas.config(scrollregion=\"0 0 %s %s\" % size)\n if interior.winfo_reqwidth() != canvas.winfo_width():\n canvas.config(width=interior.winfo_reqwidth())\n interior.bind('', _configure_interior)\n\n def _configure_canvas(event):\n if interior.winfo_reqwidth() != canvas.winfo_width():\n canvas.itemconfigure(interior_id, width=canvas.winfo_width())\n canvas.bind('', _configure_canvas)\n\n########################################################################################################################\n\n#--------------------------- @30 1000uL = 88sec\n#--------------------------- @31 1000uL = 102sec\n#--------------------------- @32 1000uL = 122sec\n#--------------------------- @33 1000uL = 152sec\n\nif __name__ == \"__main__\":\n\n class App(tk.Frame):\n def __init__(self, parent, *args, **kwargs):\n tk.Frame.__init__(self, parent, *args, **kwargs)\n\n self.headfont = ('arial', 16, 'normal')\n\n self.logofc = \"#468ef2\"\n self.headcolor = \"#707070\"\n self.headfc = \"#e5e5e5\"\n self.wrapcolor = \"#212121\"\n self.framecolor = \"#4f4f4f\"\n self.buttoncolor = \"#468ef2\"\n self.buttonfc = \"#e5e5e5\"\n self.entrycolor = \"#e5e5e5\"\n self.entryfc = \"#1c1c1c\"\n self.bodyfc = \"#e5e5e5\"\n self.lightercolor = \"#595959\"\n self.darkercolor = \"#4a4a4a\"\n\n self.config(bg=self.wrapcolor)\n\n self.parent = parent\n\n self.c1 = tk.Frame(self, bg=self.wrapcolor).grid(row=0, column=0)\n self.c2 = tk.Frame(self, bg=self.wrapcolor).grid(row=0, column=1)\n self.c3 = tk.Frame(self, bg=self.wrapcolor).grid(row=0, column=2)\n\n tk.Label(self, text=\"CASPA\", bg=self.wrapcolor, fg=self.logofc, font=('arial', 24, 'normal')).grid(row=0, column=1)\n\n self.protocol = Protocol(self)\n self.protocol.grid(row=1, column=1, sticky='n')\n\n self.menu = MainMenu(self, self.parent)\n\n self.cp = ControlPanel(self)\n self.cp.grid(row=1, column=0, rowspan=2)\n\n self.log = Log(self)\n self.log.grid(row=2, column=1, sticky='n')\n\n thread = SerialThread(self, '/dev/ttyAMA0', 9600, 1)\n thread.start()\n\n# self.processSerial()\n\n# self.serial = SerialPort(self, \"/dev/ttyAMA0\", 9600, 1)\n\n# thread = threading.Thread(target=self.serial.readPort, args=(20,)) \n# thread.start()\n\n\n def processSerial(self):\n while self.queue.qsize():\n try:\n cmd = self.queue.get()\n print(\"Cmd: \" + str(cmd))\n if cmd == 'pump':\n self.protocol.executeCycle()\n except Queue.Empty:\n pass\n self.after(100, self.processSerial)\n\n\n root = tk.Tk()\n root.wm_title(\"Computer-Aided Syringe Pump Automation\")\n\n app = App(root)\n app.pack(padx=5, pady=5)\n\n root.config(bg=app.wrapcolor)\n\n root.mainloop()\n","sub_path":"old/example/ipythonnb/caspa2p5.py","file_name":"caspa2p5.py","file_ext":"py","file_size_in_byte":34344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"268358259","text":"import numpy as np\nimport random\nfrom vehicle import *\n\n\nclass Ant:\n\n def __init__(self, n, vehicleCapacities, numberOfVehicles):\n self.n = n # broj gradova\n self.tour_length = -1 # duljina ture\n # memorija (djelomican/konacan put mrava)\n self.tour = []\n # posjeceni gradovi (u pocetku sve False)\n self.visited = np.full(n, False)\n # napunjenost mrava\n self.antLoad = 0\n # kreiranje liste vozila\n self.vehicle = [Vehicle(vehicleCapacities[i])\n for i in range(numberOfVehicles)]\n # Postaviti prvo vozilo za korištenje\n self.vehicleInUse = self.vehicle[0]\n self.vehicleInUse.usedVehicle = True\n self.vehicleInUse.vehicleTour.append(0)\n\n def emptyAntMemory(self):\n for i in range(self.n):\n self.visited[i] = False\n self.tour.clear()\n self.antLoad = 0\n\n def placeAntInDepot(self, depot):\n self.tour.insert(depot)\n self.visited[depot] = True\n","sub_path":"ant.py","file_name":"ant.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"376193552","text":"import discord, sqlite3, asyncio\nfrom discord.ext import commands\nfrom discord_slash import cog_ext, SlashContext\nfrom discord_slash.utils.manage_commands import create_option\n\nclass Slash(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @cog_ext.cog_slash(name=\"give\", description=\"Donner des crédits à quelqu'un qui est inscrit à l'aventure ISO land !\", options=[\n create_option(\n name=\"membre\",\n description=\"Membre de discord à qui donner des crédits\",\n option_type=6,\n required=True\n ),\n create_option(\n name=\"argent\",\n description=\"Montant de crédits à donner (avec une taxe de 2%)\",\n option_type=4,\n required=True\n )])\n async def _give(self, ctx, membre: discord.Member, argent: int):\n connection = sqlite3.connect(\"iso_card.db\")\n cursor = connection.cursor()\n if membre.bot == True:\n await ctx.send(f\"{ctx.author.mention} Tu ne peux pas donner d'argent aux bots... :wink:\")\n if membre.bot == False:\n if membre == ctx.author:\n await ctx.send(\"Tu ne peux pas te donner de l'argent à toi-même ! :stuck_out_tongue:\")\n else:\n member_id = (f\"{membre.id}\",)\n cursor.execute('SELECT * FROM tt_iso_card WHERE user_id = ?', member_id)\n member_values = cursor.fetchone()\n author_id = (f\"{ctx.author.id}\",)\n cursor.execute('SELECT * FROM tt_iso_card WHERE user_id = ?', author_id)\n author_values = cursor.fetchone()\n if member_values == None:\n await ctx.send(f\"{ctx.author.mention} Tu ne peux pas donner d'argent à cette personne car elle ne s'est pas inscrite à l'aventure ISO land ! (Pour qu'elle inscrive : **/start**)\")\n elif author_values == None:\n await ctx.send(f\"{ctx.author.mention} Tu ne peux pas donner d'argent car tu ne t'es pas inscrit à l'aventure ISO land ! (Pour t'inscrire : **/start**)\")\n else:\n argent_de_author = author_values[5]\n if argent > argent_de_author:\n await ctx.send(f\"{ctx.author.mention} Tu ne peux pas donner autant d'argent car tu n'en as pas assez sur ta carte !\")\n else:\n if argent < 1:\n await ctx.send(f\"{ctx.author.mention} Tu ne peux pas effectuer cette transaction car le montant est trop bas (minimum 1<:aCoin:822427301488623620> ) !\")\n else:\n argent_a_donner = argent\n ancient_argent_author = argent_de_author\n taxe = argent*0.02 # le complément est la taxe de 2%\n new_argent_author = argent_de_author - argent_a_donner - taxe\n new_argent_author = round(new_argent_author, 2)\n\n ancient_argent_member = member_values[5]\n new_argent_member = ancient_argent_member + argent_a_donner\n\n updated_author = (f\"{new_argent_author}\", f\"{ctx.author.id}\",)\n cursor.execute('UPDATE tt_iso_card SET dailies = ? WHERE user_id = ?', updated_author)\n updated_member = (f\"{new_argent_member}\", f\"{membre.id}\",)\n cursor.execute('UPDATE tt_iso_card SET dailies = ? WHERE user_id = ?', updated_member)\n connection.commit()\n await ctx.send(embed=None, content=f\"**Transaction** effectuée par : {ctx.author.mention}\\ncréditeur : {membre.mention}\\nMontant : {argent_a_donner}<:aCoin:822427301488623620> (Montant total : {argent_a_donner + taxe}<:aCoin:822427301488623620> )\\nTaxe : {taxe}<:aCoin:822427301488623620> (2%)\")\n\n connection.close()\n\ndef setup(bot):\n bot.add_cog(Slash(bot))\n\ndef teardown(bot):\n bot.remove_cog(\"give\")","sub_path":"cogs/give.py","file_name":"give.py","file_ext":"py","file_size_in_byte":4133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"403060909","text":"from typing import List\r\n\r\nfrom discord.ext.commands import Bot, Cog, command, context\r\n\r\nfrom cogs.utils.const import GameStatusConst\r\nfrom cogs.utils.werewolf_bot import WerewolfBot\r\nfrom setup_logger import setup_logger\r\n\r\nlogger = setup_logger(__name__)\r\n\r\n\r\nclass GameStatusCog(Cog):\r\n def __init__(self, bot: WerewolfBot):\r\n logger.debug(\"GameStatusCogのinit\")\r\n self.bot: WerewolfBot = bot\r\n\r\n @command(aliases=[\"cre\"])\r\n async def create(self, ctx: context) -> None:\r\n \"\"\"人狼ゲーム作成(エイリアス[cre])\"\"\"\r\n if self.bot.game.status == GameStatusConst.PLAYING.value:\r\n await ctx.send(\"現在ゲーム中です。createコマンドは使えません\")\r\n return\r\n if self.bot.game.status == GameStatusConst.WAITING.value:\r\n await ctx.send(\"現在参加者募集中です\")\r\n return\r\n\r\n self.bot.game.status = GameStatusConst.WAITING.value\r\n await ctx.send(\"参加者の募集を開始しました。\")\r\n\r\n @command(aliases=[\"sgs\"])\r\n async def show_game_status(self, ctx: context) -> None:\r\n \"\"\"コマンド:現在のゲームステータスを表示\r\n\r\n :param ctx:\r\n :return:\r\n \"\"\"\r\n await ctx.send(\"show_game_statusコマンドが実行されました\")\r\n status: str = self.bot.game.status\r\n await ctx.send(f\"現在のゲームのステータスは{status}です\")\r\n\r\n @command(aliases=[\"setgs\"])\r\n async def set_game_status(self, ctx: context, status: str = \"\") -> None:\r\n \"\"\"コマンド:ゲームステータスを引数statusに設定\r\n\r\n :param ctx:\r\n :param status:ゲームのステータス。GameStatusConst参照\r\n :return:\r\n \"\"\"\r\n status_list: List[str] = [x.value for x in GameStatusConst]\r\n\r\n if status == \"\":\r\n await ctx.send(f\"引数がありません。引数は以下からえらんでください。 {status_list}\")\r\n return\r\n\r\n if status not in status_list:\r\n await ctx.send(f\"引数が間違っています。引数は以下からえらんでください。{status_list}\")\r\n return\r\n\r\n self.bot.game.status = status\r\n await ctx.send(f\"ゲームのステータスを{status}にセットしました\")\r\n\r\n\r\ndef setup(bot: Bot) -> None:\r\n bot.add_cog(GameStatusCog(bot))\r\n","sub_path":"cogs/game_status_cog.py","file_name":"game_status_cog.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"66610584","text":"\nimport sys\nsys.path.append(\"./LegalUISRNN\")\nimport numpy as np\nimport torch\nimport glob\nimport os\nimport uisrnn\n\n\n#expects processed cases in data folder (take from Google Drive or PRINCE)\n#case_path = './LegalUISRNN/data/SCOTUS_Processed/*/*' # local path\ncase_path = '/scratch/jt2565/SCOTUS_Processed/*/*' # prince path\n\n\ncase_path = glob.glob(os.path.dirname(case_path))\ntotal_cases = len(case_path)\ntrain_cases = total_cases//10*8\nprint(\"# of training:\", train_cases)\nprint(\"# total cases:\" , total_cases)\n\ntrn_seq_lst = []\ntrn_cluster_lst = []\ntest_seq_lst = []\ntest_cluster_lst = []\n\nverbose = True\nfor i, case in enumerate(case_path):\n\n case_id = case.split('/')[-1][:-7]\n train_seq = np.load(case+'/'+case_id+'_sequence.npy', allow_pickle=True)\n train_clus = np.load(case+'/'+case_id+'_cluster_id.npy', allow_pickle=True)\n\n train_sequence = []\n train_cluster_id = []\n for j in range(np.shape(train_seq)[0]):\n train_sequence.append(train_seq[j])\n if i <= train_cases:\n train_cluster_id.append(train_clus[j])\n else:\n train_cluster_id.append(list(map(int, train_clus[j])))\n \n if False:\n print('Processed case:', case_id)\n print('emb shape:', np.shape(train_seq))\n print('label shape:', np.shape(train_clus))\n print('emb len:', len(train_sequence))\n print('label len:', len(train_cluster_id)) \n \n if i <= train_cases:\n trn_seq_lst.append(train_sequence)\n trn_cluster_lst.append(train_cluster_id)\n else:\n test_seq_lst.append(train_sequence)\n test_cluster_lst.append(train_cluster_id) \n\n\n\nfrom uisrnn import utils\nitem=0\n(concatenated_train_sequence, concatenated_train_cluster_id) = utils.concatenate_training_data(trn_seq_lst[item], trn_cluster_lst[item], False, False)\n\nif verbose:\n print(type(concatenated_train_sequence), type(concatenated_train_sequence[0]))\n #print(np.shape(concatenated_train_sequence))\n print(np.shape(concatenated_train_sequence))\n\n","sub_path":"SCOTUS/testing/bad_test/checkSCdata.py","file_name":"checkSCdata.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"612246260","text":"import datetime\nimport json\nimport os\nimport platform\nimport re\nfrom enum import Enum, auto\nfrom pathlib import Path\n\nimport numpy as np\nfrom PyQt5.QtWidgets import QLabel, QPushButton, QRadioButton, QStatusBar, \\\n QMessageBox\nfrom netCDF4 import Dataset\n\n\nclass PlotType(Enum):\n TIME_SERIES = auto()\n HEAT_MAP = auto()\n\n\nclass DataAction(Enum):\n EXPORT = auto()\n PLOT = auto()\n\n\nclass ExportDataType(Enum):\n CSV = \".csv\"\n ZIP = \".zip\"\n EXCEL = \".xlsx\"\n HTML = \".html\"\n\n\nclass PlotDataType(Enum):\n PDF = \"pdf\"\n PNG = \"png\"\n EPS = \"eps\"\n SVG = \"svg\"\n JPEG = \"jpeg\"\n\n\nclass FileExtension(Enum):\n NETCDF = \"*.nc\"\n NETCDF4 = \"*.nc4\"\n DATA_FILE = \".npz\"\n TMP_DATA_FILE = \"_tmp.npz\"\n META_FILE = \"metadata.json\"\n TMP_META_FILE = \"metadata_tmp.json\"\n PLOT_PNG = \".png\"\n PLOT_PDF = \".pdf\"\n\n\nclass DirectorySeparator(Enum):\n UNIX = \"/\"\n WINDOWS = \"/\"\n\n\nclass HelperFunction:\n @staticmethod\n def format_directory_path(path: str) -> str:\n separator = HelperFunction.get_dir_separator()\n reg = r\"{0}$\".format(separator)\n if not re.findall(reg, path):\n path += separator\n return path\n\n @staticmethod\n def can_read_directory(src_path: str) -> bool:\n return os.access(src_path, os.R_OK)\n\n @staticmethod\n def can_write_directory(src_path: str) -> bool:\n return os.access(src_path, os.W_OK)\n\n @staticmethod\n def get_qt_text_width(element, text: str) -> int:\n return 1.1 * element.fontMetrics().boundingRect(text).width()\n\n @staticmethod\n def replace_array_fill_value(data: np.ndarray, fill_value) -> np.ndarray:\n if len(data.shape) == 3:\n new_d = np.where(data[:, :, :] != fill_value, data[:, :, :],\n np.NaN)\n else:\n new_d = np.where(data[:, :, :, :] != fill_value, data[:, :, :, :],\n np.NaN)\n return new_d\n\n @staticmethod\n def get_long_variable_name(src_path: str, variable_name: str) -> str:\n sorted_file_list = sorted(\n Path(src_path).glob(FileExtension.NETCDF4.value))\n if len(sorted_file_list) == 0:\n sorted_file_list = sorted(\n Path(src_path).glob(FileExtension.NETCDF.value))\n if len(sorted_file_list) == 0:\n return \"\"\n with Dataset(sorted_file_list[0], 'r') as d:\n return HelperFunction.format_variable_name(\n d.variables[variable_name].long_name)\n\n @staticmethod\n def get_available_variables(src_path: str) -> [str]:\n sorted_file_list = sorted(\n Path(src_path).glob(FileExtension.NETCDF4.value))\n if len(sorted_file_list) == 0:\n sorted_file_list = sorted(\n Path(src_path).glob(FileExtension.NETCDF.value))\n var_info_list = []\n if HelperFunction.is_valid_nc_source_directory(src_path):\n with Dataset(sorted_file_list[0], 'r') as d:\n for var in d.variables.keys():\n if var != 'time' and var != 'lat' and var != 'lon' and var != 'lev':\n var_info_list.append(var)\n return var_info_list\n\n @staticmethod\n def is_valid_nc_source_directory(src_path: str) -> bool:\n sorted_file_list = sorted(\n Path(src_path).glob(FileExtension.NETCDF4.value))\n if len(sorted_file_list) == 0:\n sorted_file_list = sorted(\n Path(src_path).glob(FileExtension.NETCDF.value))\n if len(sorted_file_list) == 0:\n return False\n return True\n\n @staticmethod\n def is_valid_npz_source_directory(src_path: str) -> bool:\n dir_separator = HelperFunction.get_dir_separator()\n reg = r\"{0}$\".format(dir_separator)\n if not re.findall(reg, src_path):\n src_path += dir_separator\n\n metadata_path = src_path + FileExtension.META_FILE.value\n\n metadata_dictionary = None\n try:\n with open(metadata_path, 'r') as f:\n metadata_dictionary = json.load(f)\n except:\n return False\n\n if metadata_dictionary is None:\n return False\n\n var_name = metadata_dictionary['name']\n data_path = src_path + var_name + FileExtension.DATA_FILE.value\n try:\n np.load(data_path)\n return True\n except:\n return False\n\n @staticmethod\n def get_dir_separator() -> str:\n system = platform.system()\n if system == 'Darwin' or system == 'Linux':\n return DirectorySeparator.UNIX.value\n elif system == 'Windows':\n return DirectorySeparator.WINDOWS.value\n\n @staticmethod\n def format_variable_name(name: str) -> str:\n n = re.sub(\"_\", \" \", name)\n return \" \".join(w.capitalize() for w in n.split())\n\n @staticmethod\n def get_data_info(src_folder: str):\n with open(src_folder + FileExtension.META_FILE.value, 'r') as f:\n return json.load(f)\n\n @staticmethod\n def round_number(number, places):\n return round(10 ** places * number) / 10 ** places\n\n @staticmethod\n def create_label(parent, text: str, x_position: int, y_position: int,\n height: int) -> QLabel:\n label = HelperFunction.create_label_with_width(parent, text,\n x_position, y_position,\n 1, height)\n label.setFixedWidth(HelperFunction.get_qt_text_width(label, text))\n return label\n\n @staticmethod\n def create_label_with_width(parent, text: str, x: int, y: int, width: int,\n height: int) -> QLabel:\n label = QLabel(parent)\n label.setText(text)\n label.setGeometry(x, y, width, height)\n return label\n\n @staticmethod\n def create_button(parent, text: str, x: int, y: int, width: int,\n height: int) -> QPushButton:\n button = QPushButton(parent)\n button.setText(text)\n button.setGeometry(x, y, width, height)\n return button\n\n @staticmethod\n def create_radio_button(parent, text: str, x: int, y: int, width: int,\n height: int) -> QRadioButton:\n radio_button = QRadioButton(parent)\n radio_button.setText(text)\n radio_button.setGeometry(x, y, width, height)\n return radio_button\n\n @staticmethod\n def create_status_bar(parent, text: str, x: int, y: int, width: int,\n height: int) -> QStatusBar:\n status_bar = QStatusBar(parent)\n status_bar.showMessage(text)\n status_bar.setGeometry(x, y, width, height)\n return status_bar\n\n @staticmethod\n def show_error_message(parent, text: str):\n error = QMessageBox(parent)\n error.setWindowTitle(\"An Error Occurred!\")\n error.setText(text)\n error.exec_()\n\n @staticmethod\n def get_datetime_from_str(string: str):\n return datetime.datetime.strptime(string, \"%Y-%m-%d %H:%M\")\n\n @staticmethod\n def get_str_from_datetime(dt: datetime.datetime):\n return datetime.datetime.strftime(dt, \"%Y-%m-%d %H:%M\")\n","sub_path":"programs/gui_program/HelperFunctions.py","file_name":"HelperFunctions.py","file_ext":"py","file_size_in_byte":7214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"331137373","text":"from config.auth_config import JWT_SECRET, JWT_CLIENT_ID, ADMIN_CLIENT_ID, ADMIN_CLIENT_SECRET, \\\n AUTH_CONNECTION, AUTH_URL, USE_AUTH, NO_AUTH_EMAIL\nfrom config.portal_config import PORTAL_URL\nfrom datetime import datetime, timedelta\nfrom functools import wraps\nfrom flask import request, Response, jsonify\nimport jwt\nimport logging\nfrom membership.database.base import Session\nfrom membership.database.models import Member\nimport pkg_resources\nimport random\nimport requests\nimport string\n\nPASSWORD_CHARS = string.ascii_letters + string.digits\n\n\ndef deny(reason: str= '') -> Response:\n \"\"\"Sends a 401 response that enables basic auth\"\"\"\n response = jsonify({\n 'status': 'error',\n 'err': 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials and' + reason\n })\n response.status_code = 401\n return response\n\n\ndef requires_auth(admin=False):\n \"\"\" This defines a decorator which when added to a route function in flask requires authorization to\n view the route.\n \"\"\"\n def decorator(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n if USE_AUTH:\n auth = request.headers.get('authorization')\n if not auth:\n return deny('Authorization not found.')\n token = auth.split()[1]\n try:\n token = jwt.decode(token, JWT_SECRET, audience=JWT_CLIENT_ID)\n except Exception as e:\n return deny(str(e))\n email = token.get('email')\n else:\n email = NO_AUTH_EMAIL\n session = Session()\n try:\n member = session.query(Member).filter_by(email_address=email).one()\n authenticated = False\n if admin:\n for role in member.roles:\n if role.committee_id is None and role.role == 'admin':\n authenticated = True\n else:\n authenticated = True\n if authenticated:\n kwargs['requester'] = member\n kwargs['session'] = session\n return f(*args, **kwargs)\n return deny('not enough access')\n finally:\n session.close()\n\n return decorated\n return decorator\n\n\ncurrent_token = {}\n\n\ndef get_auth0_token():\n if not current_token or datetime.now() > current_token['expiry']:\n current_token.update(generate_auth0_token())\n return current_token['token']\n\n\ndef generate_auth0_token():\n payload = {'grant_type': \"client_credentials\",\n 'client_id': ADMIN_CLIENT_ID,\n 'client_secret': ADMIN_CLIENT_SECRET,\n 'audience': AUTH_URL + 'api/v2/'}\n response = requests.post(AUTH_URL + 'oauth/token', json=payload).json()\n return {'token': response['access_token'],\n 'expiry': datetime.now() + timedelta(seconds=response['expires_in'])}\n\n\ndef create_auth0_user(email):\n if not USE_AUTH:\n return PORTAL_URL\n # create the user\n payload = {\n 'connection': AUTH_CONNECTION,\n 'email': email,\n 'password': ''.join(random.SystemRandom().choice(PASSWORD_CHARS) for _ in range(12)),\n 'user_metadata': {},\n 'email_verified': False,\n 'verify_email': False\n }\n headers = {'Authorization': 'Bearer ' + get_auth0_token()}\n r = requests.post(AUTH_URL + 'api/v2/users', json=payload, headers=headers)\n if r.status_code > 299:\n logging.error(r.json())\n raise Exception('Failed to create user')\n user_id = r.json()['user_id']\n\n # get a password change URL\n payload = {\n 'result_url': PORTAL_URL,\n 'user_id': user_id\n }\n r = requests.post(AUTH_URL + 'api/v2/tickets/password-change', json=payload, headers=headers)\n if r.status_code > 299:\n logging.error(r.json())\n raise Exception('Failed to get password url')\n reset_url = r.json()['ticket']\n\n # get email verification link\n payload = {\n 'result_url': reset_url,\n 'user_id': user_id\n }\n r = requests.post(AUTH_URL + 'api/v2/tickets/email-verification', json=payload, headers=headers)\n if r.status_code > 299:\n logging.error(r.json())\n raise Exception('Failed to get verify url')\n validate_url = r.json()['ticket']\n return validate_url\n\n","sub_path":"membership/web/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":4446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"171186073","text":"import os\nimport re\nimport json\nimport threading\nimport subprocess\n\nimport sublime\nimport sublime_plugin\n\n\ndef _get_workspace_json_path(wind):\n proj_file_path = wind.project_file_name()\n if proj_file_path is not None:\n proj_file = os.path.basename(proj_file_path)\n proj_file_dir = os.path.dirname(proj_file_path)\n\n json_file = re.sub(\n r\"\\.sublime-project\",\n r\".p4basic.json\",\n proj_file)\n json_file_path = os.path.join(proj_file_dir, json_file)\n\n return json_file_path\n else:\n return None\n\n\ndef _get_setting(wind, key, default=None):\n # First check dedicated .json file in project file dir\n json_file_path = _get_workspace_json_path(wind)\n if json_file_path is not None and os.path.isfile(json_file_path):\n with open(json_file_path) as f:\n json_sett = json.load(f)\n\n if key in json_sett:\n return json_sett[key]\n\n # Next check embedded p4basic section in project file\n proj_sett = wind.project_data().get(\"p4basic\")\n if proj_sett is not None and key in proj_sett:\n return proj_sett[key]\n\n # Check user preferences\n sett = sublime.load_settings(\"p4basic.sublime-settings\")\n return sett.get(key, default=default)\n\n\ndef _get_p4_base_cmd(wind):\n cmd = _get_setting(wind, \"p4_path\", \"p4\")\n port = _get_setting(wind, \"port\")\n client = _get_setting(wind, \"client\")\n host = _get_setting(wind, \"host\")\n user = _get_setting(wind, \"user\")\n\n if port is not None:\n cmd += \" -p {}\".format(port)\n if client is not None:\n cmd += \" -c {}\".format(client)\n if host is not None:\n cmd += \" -H {}\".format(host)\n if user is not None:\n cmd += \" -u {}\".format(user)\n\n return cmd\n\n\ndef _out_msg(wind, msg):\n view = wind.create_output_panel(\"p4basic\")\n\n wind.run_command(\"show_panel\", {\"panel\": \"output.p4basic\"})\n view.run_command(\"p4basic_panel_append\", {'text': msg})\n\n\nclass P4basicPanelAppend(sublime_plugin.TextCommand):\n def run(self, edit, text):\n self.view.insert(edit, self.view.size(), text)\n\n\ndef _where_path(wind, path):\n cmd_base = _get_p4_base_cmd(wind)\n\n path_dir = os.path.dirname(path)\n\n cmd = cmd_base + ' -Ztag where \"{}\"'.format(path)\n p = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True,\n cwd=path_dir)\n _out_msg(wind, \"[COMMAND]{}\\n\".format(cmd))\n result, err = p.communicate()\n result = result.decode(\"utf-8\")\n err = err.decode(\"utf-8\")\n\n if err != '':\n output = \"[COMMAND]{}\\n[ERROR]{}\\n\".format(cmd, err)\n _out_msg(wind, output)\n sublime.error_message(output)\n else:\n output = \"[COMMAND]{}\\n[OUTPUT]{}\\n\".format(cmd, result)\n _out_msg(wind, output)\n\n\nclass P4basicWhereSidebar(sublime_plugin.WindowCommand):\n def run(self, paths=[]):\n if len(paths) != 1:\n _out_msg(self.window, \"Only where of single path supported.\")\n return\n\n _where_path(self.window, paths[0])\n\n def is_enabled(self, paths=[]):\n return len(paths) == 1\n\n\nclass P4basicWhereText(sublime_plugin.TextCommand):\n def run(self, edit):\n _where_path(self.view.window(), self.view.file_name())\n\n\nclass P4basicEditSidebar(sublime_plugin.WindowCommand):\n def run(self, paths=[]):\n _edit_paths(self.window, paths)\n\n\nclass P4basicEditText(sublime_plugin.TextCommand):\n def run(self, edit):\n _edit_paths(self.view.window(), [self.view.file_name()])\n\n\ndef _edit_paths(wind, paths):\n cmd_base = _get_p4_base_cmd(wind)\n\n for path in paths:\n path_dir = os.path.dirname(path)\n\n if os.path.isdir(path):\n path = os.path.join(path, \"...\")\n\n cmd = cmd_base + ' edit \"{}\"'.format(path)\n p = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True,\n cwd=path_dir)\n\n result, err = p.communicate()\n result = result.decode(\"utf-8\")\n err = err.decode(\"utf-8\")\n\n if err != '':\n output = \"[COMMAND]{}\\n[ERROR]{}\\n\".format(cmd, err)\n _out_msg(wind, output)\n sublime.error_message(err)\n else:\n output = \"[COMMAND]{}\\n[OUTPUT]{}\\n\".format(cmd, result)\n _out_msg(wind, output)\n\n\nclass P4basicDiffSidebar(sublime_plugin.WindowCommand):\n def run(self, files=[]):\n if len(files) != 1:\n _out_msg(self.window, \"Only diff of single file supported.\")\n return\n\n _diff_file(self.window, files[0])\n\n def is_enabled(self, files=[]):\n return len(files) == 1\n\n\nclass P4basicDiffText(sublime_plugin.TextCommand):\n def run(self, edit):\n _diff_file(self.view.window(), self.view.file_name())\n\n\ndef _diff_file(wind, file):\n cmd_base = _get_p4_base_cmd(wind)\n\n file_dir = os.path.dirname(file)\n\n env = dict(os.environ)\n\n diff_path = _get_setting(wind, \"diff_path\")\n if diff_path is not None:\n env['P4DIFF'] = diff_path\n\n cmd = cmd_base + ' diff \"{}\"'.format(file)\n def _func():\n p = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True,\n cwd=file_dir,\n env=env)\n _out_msg(wind, \"[COMMAND]{}\\n\".format(cmd))\n result, err = p.communicate()\n result = result.decode(\"utf-8\")\n err = err.decode(\"utf-8\")\n\n if err != '':\n output = \"[COMMAND]{}\\n[ERROR]{}\\n\".format(cmd, err)\n _out_msg(wind, output)\n sublime.error_message(output)\n else:\n output = \"[COMMAND]{}\\n[OUTPUT]{}\\n\".format(cmd, result)\n _out_msg(wind, output)\n\n t = threading.Thread(target=_func)\n t.start() \n\n\nclass P4basicP4vSidebar(sublime_plugin.WindowCommand):\n def run(self, paths=[]):\n if len(paths) != 1:\n _out_msg(\"Only p4v on single path supported.\")\n return\n\n _open_p4v(self.window, paths[0])\n\n def is_enabled(self, paths=[]):\n return len(paths) == 1\n\n\nclass P4basicP4vText(sublime_plugin.TextCommand):\n def run(self, edit):\n _open_p4v(self.view.window(), self.view.file_name())\n\n\ndef _open_p4v(wind, path):\n path_dir = os.path.dirname(path)\n\n cmd = _get_setting(wind, \"p4v_path\", \"p4v\")\n\n port = _get_setting(wind, \"port\")\n client = _get_setting(wind, \"client\")\n user = _get_setting(wind, \"user\")\n\n if port is not None:\n cmd += \" -p {}\".format(port)\n if user is not None:\n cmd += \" -u {}\".format(user)\n if client is not None:\n cmd += \" -c {}\".format(client)\n\n cmd += ' -s \"{}\"'.format(path)\n\n def _func():\n p = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True,\n cwd=path_dir)\n _out_msg(wind, \"[COMMAND]{}\\n\".format(cmd))\n result, err = p.communicate()\n result = result.decode(\"utf-8\")\n err = err.decode(\"utf-8\")\n\n if err != '':\n output = \"[COMMAND]{}\\n[ERROR]{}\\n\".format(cmd, err)\n _out_msg(wind, output)\n sublime.error_message(output)\n\n t = threading.Thread(target=_func)\n t.start()\n\n\nclass P4basicOpenWorkspaceSettings(sublime_plugin.WindowCommand):\n def run(self):\n path = _get_workspace_json_path(self.window)\n\n if path is None:\n return\n\n self.window.open_file(path)\n\n def is_enabled(self):\n return self.window.project_file_name() is not None\n","sub_path":"p4basic.py","file_name":"p4basic.py","file_ext":"py","file_size_in_byte":7621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"522237358","text":"class Solution(object):\r\n def jump(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: int\r\n The main idea is based on greedy. \r\n Every one more jump, you want to jump as far as possible. In Jump Game I, when you at position i, you care about what is the furthest position could be reached from i th position. but here in Jump Game II, instead you care about what would be the next furthest jump could be made when you could reach as far as ith position from last jump.\r\n So let's say the range of the current jump is [curBegin, curEnd], curFarthest is the farthest point that all points in [curBegin, curEnd] can reach.\r\n \"\"\"\r\n nums_len = len(nums)\r\n cur_pos = 0\r\n cnt = 0\r\n while 1:\r\n if cur_pos >= nums_len - 1:\r\n return cnt\r\n max_jump = 0\r\n max_id = -1\r\n if cur_pos + nums[cur_pos] >= nums_len - 1:\r\n # if we can reach the end, no need to consider the next two jumps.\r\n return cnt + 1\r\n for id in range(cur_pos+1, cur_pos+nums[cur_pos]+1):\r\n # consider the next two jumps\r\n if id < nums_len and max_jump <= id + nums[id]:\r\n max_jump = id + nums[id]\r\n max_id = id\r\n if max_jump == 0:\r\n return None\r\n cur_pos = max_id\r\n cnt += 1","sub_path":"src/045_JumpGameII.py","file_name":"045_JumpGameII.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"438307074","text":"import numpy as np\nfrom copy import deepcopy\nimport liegroups\nimport math\n\n\ndef rm2rpy(R):\n sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])\n singular = sy < 1e-6\n\n if not singular:\n x = math.atan2(R[2, 1], R[2, 2])\n y = math.atan2(-R[2, 0], sy)\n z = math.atan2(R[1, 0], R[0, 0])\n else:\n x = math.atan2(-R[1, 2], R[1, 1])\n y = math.atan2(-R[2, 0], sy)\n z = 0\n # x: roll\n # y: pitch\n # z: yaw\n return np.array([x, y, z])\n\n# http://web.mit.edu/2.05/www/Handout/HO2.PDF\ndef rpy2rm(roll, pitch, yaw):\n rotation_matrix = np.array(\n [\n [np.cos(yaw) * np.cos(pitch),\n np.cos(yaw) * np.sin(pitch) * np.sin(roll) - np.sin(yaw) * np.cos(roll),\n np.cos(yaw) * np.sin(pitch) * np.cos(roll) + np.sin(yaw) * np.sin(roll)\n ],\n [np.sin(yaw) * np.cos(pitch),\n np.sin(yaw) * np.sin(pitch) * np.sin(roll) + np.cos(yaw) * np.cos(roll),\n np.sin(yaw) * np.sin(pitch) * np.cos(roll) - np.cos(yaw) * np.sin(roll)\n ],\n [-np.sin(pitch),\n np.cos(pitch) * np.sin(roll),\n np.cos(pitch) * np.cos(roll)\n ]\n ])\n return rotation_matrix\n\n\ndef SE3_to_se3(SE3_matrix):\n # This liegroups lib represent se3 with first 3 element as translation, which is different than us\n se3_rot_last = liegroups.SE3.log(liegroups.SE3.from_matrix(SE3_matrix, normalize=True))\n se3 = np.zeros_like(se3_rot_last)\n se3[:3] = se3_rot_last[3:]\n se3[3:] = se3_rot_last[:3]\n return se3\n\n\ndef cart2hom(pts_3d):\n \"\"\"\n Input: nx3 points in Cartesian\n Output: nx4 points in Homogeneous by pending 1\n \"\"\"\n n = pts_3d.shape[0]\n pts_3d_hom = np.hstack((pts_3d, np.ones((n, 1))))\n return pts_3d_hom\n\n\ndef project_pts_to_image(pts_3d, transform_rm_3by4, R_rect, P_rect, verbose=True):\n \"\"\"\n - R_rect_xx: 3x3 rectifying rotation to make image planes co-planar\n - P_rect_xx: 3x4 projection matrix after rectification\n :param pts_3d: (?, 3) numpy array\n :param R_rect: (3, 3) numpy array\n :param P_rect: (3, 4) numpy array\n :param transform_rm_3by4: (3, 4) numpy array\n :return:\n \"\"\"\n _transform_rm_3by4 = deepcopy(transform_rm_3by4)\n _R_rect = deepcopy(R_rect)\n _P_rect = deepcopy(P_rect)\n assert transform_rm_3by4.shape == (3, 4), \"Transform RM XYZ shape is not correct!\"\n if verbose:\n print(\"----------------PROJECT Points To Image--------------------------\")\n print(\"Input Point Number: {}\".format(pts_3d.shape[0]))\n print(\"Input Point (Cartesian) Example: {}\".format(pts_3d[0]))\n pts_3d_velo = cart2hom(pts_3d)\n if verbose:\n print(\"Homogeneous Point Example: {}\".format(pts_3d_velo[0]))\n pts_3d_ref = np.dot(pts_3d_velo, np.transpose(_transform_rm_3by4))\n # viz_combined_points(pts_3d_ref[:, :3], pts_3d[:, :3])\n # pts_3d_rect = np.transpose(np.dot(R_rect, np.transpose(pts_3d_ref)))\n pts_3d_rect = np.dot(pts_3d_ref, np.transpose(_R_rect))\n if verbose:\n print(\"pts_3d_rect: {}\".format(pts_3d_rect.shape))\n pts_3d_rect = cart2hom(pts_3d_rect)\n pts_2d = np.dot(pts_3d_rect, np.transpose(_P_rect))\n pts_2d[:, 0] /= pts_2d[:, 2]\n pts_2d[:, 1] /= pts_2d[:, 2]\n pts_on_img = pts_2d[:, 0:3]\n return pts_on_img, pts_3d_ref\n\n\ndef generate_depth_map(pts_3d_in, transform_rm_3by4, R_rect, P_rect, H, W, verbose=False):\n # Project Points to Image, still points but x, y ,z --> u, v\n # Each points include (x, y, z, intensity)\n # When do projection, we only need (x, y, z)\n pts_3d = deepcopy(pts_3d_in)\n clip_distance = 0\n transform_rm_3by4_copy = deepcopy(transform_rm_3by4)\n pts_cam_without_intensity, pts_rec_without_intensity = project_pts_to_image(\n pts_3d=pts_3d[:, :3],\n transform_rm_3by4=transform_rm_3by4_copy,\n R_rect=R_rect,\n P_rect=P_rect,\n verbose=verbose\n )\n # Get back intensity channel\n pts_cam = np.concatenate((pts_cam_without_intensity, pts_3d[:, [-1]]), axis=-1)\n\n # Filter Points not within range\n condition = (pts_cam[:, 0] < W) & \\\n (pts_cam[:, 0] >= 0) & \\\n (pts_cam[:, 1] < H) & \\\n (pts_cam[:, 1] >= 0) & \\\n (pts_3d[:, 0] > clip_distance)\n\n pts_cam_fov = pts_cam[condition]\n pts_rec_fov = pts_rec_without_intensity[condition]\n\n x, y, z, i = pts_cam_fov[:, 0], pts_cam_fov[:, 1], pts_cam_fov[:, 2], pts_cam_fov[:, 3]\n _x, _y, _z = pts_rec_fov[:, 0], pts_rec_fov[:, 1], pts_rec_fov[:, 2],\n\n phi_ = x.astype(int)\n phi_[phi_ < 0] = 0\n phi_[phi_ >= W] = W - 1\n\n theta_ = y.astype(int)\n theta_[theta_ < 0] = 0\n theta_[theta_ >= H] = H - 1\n\n depth_map = np.zeros((H, W, 5))\n\n depth_map[theta_, phi_, 0] = _x\n depth_map[theta_, phi_, 1] = _y\n depth_map[theta_, phi_, 2] = _z\n try:\n zcam_normed = deepcopy(z) / np.max(z)\n except:\n zcam_normed = np.zeros_like(z)\n depth_map[theta_, phi_, 3] = i # (0~1)\n depth_map[theta_, phi_, 4] = zcam_normed # (0~1)\n\n return depth_map\n\n\ndef update_depth_map(depth_map, transform_rm_3by4, R_rect, P_rect, H, W):\n \"\"\"\n depths maps are points on cam frame but data encoded with of rec frame\n \"\"\"\n depth_map_in = deepcopy(depth_map)\n transform_rm_3by4_copy = deepcopy(transform_rm_3by4)\n pad = np.ones([H, W, 1])\n\n # Step1: Normalize back\n depth_map_xyz = depth_map_in[:, :, :3]\n # Step2: Transform\n depth_map_xyz_hom = np.concatenate([depth_map_xyz, pad], -1)\n depth_map_xyz_transformed = np.dot(depth_map_xyz_hom, np.transpose(transform_rm_3by4_copy))\n\n # Step2: Compute data on cam frame\n depth_map_xyz_transformed_rect = np.dot(depth_map_xyz_transformed, np.transpose(R_rect))\n depth_map_xyz_transformed_rect_hom = np.concatenate([depth_map_xyz_transformed_rect, pad], -1)\n\n depth_map_xyz_cam = np.dot(depth_map_xyz_transformed_rect_hom, np.transpose(P_rect))\n depth_map_xyz_cam[:, :, 0] /= depth_map_xyz_cam[:, :, 2]\n depth_map_xyz_cam[:, :, 1] /= depth_map_xyz_cam[:, :, 2]\n\n # Step3: Add back intensity\n # depth_map_transformed = np.concatenate([depth_map_xyz_transformed, depth_map_in[:, :, [-1]]], -1)\n\n x, y, z = depth_map_xyz_cam[:, :, 0], depth_map_xyz_cam[:, :, 1], depth_map_xyz_cam[:, :, 2]\n _x, _y, _z = depth_map_xyz_transformed[:, :, 0], depth_map_xyz_transformed[:, :, 1], depth_map_xyz_transformed[:, :, 2]\n i = depth_map_in[:, :, -2]\n\n depth_map_x_cam = x.astype(int)\n depth_map_x_cam = np.clip(depth_map_x_cam, 0, W-1)\n\n depth_map_y_cam = y.astype(int)\n depth_map_y_cam = np.clip(depth_map_y_cam, 0, H-1)\n\n depth_map_next = np.zeros((H, W, 5), np.float32)\n depth_map_next[depth_map_y_cam, depth_map_x_cam, 0] = _x\n depth_map_next[depth_map_y_cam, depth_map_x_cam, 1] = _y\n depth_map_next[depth_map_y_cam, depth_map_x_cam, 2] = _z\n\n try:\n zcam_normed = deepcopy(z) / np.max(z)\n except:\n zcam_normed = np.zeros_like(z)\n depth_map_next[depth_map_y_cam, depth_map_x_cam, 3] = i\n depth_map_next[depth_map_y_cam, depth_map_x_cam, 4] = zcam_normed\n\n return depth_map_next","sub_path":"training_data_serialization/kitti_tools/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":7164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"623454831","text":"#!/usr/bin/python\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.layers.core import Flatten, Dense\n\nimport input_data\n\nmnist = input_data.read_data_sets(\"Mnist_data/\", one_hot=True)\n\nX_train = np.expand_dims(mnist.train.images.reshape(-1, 28, 28), axis=3)\nY_train = mnist.train.labels\n\nX_test = np.expand_dims(mnist.test.images.reshape(-1, 28, 28), axis=3)\nY_test = mnist.test.labels\n\nMODEL_FILENAME = \"number_model2.hdf5\"\n\n# Build the neural network!\nmodel = Sequential()\n\nmodel.add(Conv2D(32, (5, 5), padding='same', input_shape=(28,28,1), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))\n\nmodel.add(Conv2D(64, (5, 5), padding='same', activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))\n\nmodel.add(Flatten())\nmodel.add(Dense(1024, activation='relu'))\n\n# Output layer with 32 nodes (one for each possible letter/number we predict)\nmodel.add(Dense(10, activation=\"softmax\"))\n\n# Ask Keras to build the TensorFlow model behind the scenes\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n\n# Train the neural network\nmodel.fit(X_train, Y_train, validation_data=(X_test, Y_test), batch_size=50, epochs=3, verbose=1)\n\n# Save the trained model to disk\nmodel.save(MODEL_FILENAME)\n","sub_path":"keras_train_deep.py","file_name":"keras_train_deep.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"286973557","text":"\nimport matplotlib.pyplot as plt\n\ndef OneCyclePolicy(LRmax, step, iterations):\n LRmin = LRmax/10;\n LRvalues = []\n for x in range(0,iterations+1):\n cycle = int(1+(x/(2*step)))\n a = abs((x/step)-(2*cycle)+1)\n LRt = LRmin + ((LRmax - LRmin)*(1-a))\n LRvalues.append(LRt)\n return LRvalues\nprint (OneCyclePolicy(1,5,30))","sub_path":"Assignment 11/OneCyclePolicy.py","file_name":"OneCyclePolicy.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"129264654","text":"from collections import OrderedDict\n\nfrom flask import Blueprint, current_app as app, url_for, redirect\n\nfrom ..domain import Image\n\n\nblueprint = Blueprint('link', __name__, url_prefix=\"/\")\n\n\n@blueprint.route(\"//\")\ndef get(**kwargs):\n data = OrderedDict()\n data['visible'] = OrderedDict()\n data['hidden'] = OrderedDict()\n for kind in Image.KINDS:\n url = url_for('image.get', kind=kind, _external=True, **kwargs)\n data['visible'][kind] = url\n code = app.link_service.encode(**kwargs)\n url = url_for('image.get_encoded', kind=kind, _external=True, code=code)\n data['hidden'][kind] = url\n return data\n\n\n@blueprint.route(\"\")\ndef get_encoded(code):\n key, top, bottom = app.link_service.decode(code)\n url = url_for('.get', key=key, top=top, bottom=bottom)\n return redirect(url)\n","sub_path":"memegen/routes/link.py","file_name":"link.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"621845048","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2017-03-23 21:13:03\n# @Author : AlexanderZzzz (hjzhu@uvic.ca)\n# @Link : ${link}\n# @Version : v.0.9.9\n\nimport os, time, math, csv\nimport pandas as pd\n\nprint('Iutput file name in Output folder:')\nINPUT_FILENAME =str(os.getcwd() + '\\\\Input\\\\' + input())\nprint('Output file name in Output folder:')\nOUTPUT_FILENAME = str(os.getcwd() + '\\\\Output\\\\' + input())\nOUTPUT_FILENAME_TEMP = OUTPUT_FILENAME + \"_TEMP\"\n##################################\ndef str_recorder(strings, input_item):\n fir_char_flag = 0\n sec_char_flag = 0\n new_strings = input_item[0]\n strings = strings.replace('\\n', '')\n for idx, item in enumerate(strings):\n #print('item:',item,input_item[0],input_item[1])\n #print(sec_char_flag, fir_char_flag)\n if not ((item == input_item[1]) and (fir_char_flag == 1) and (sec_char_flag == 0)):\n fir_char_flag = 0\n else:\n sec_char_flag = 1\n if not ((item == input_item[0]) and (fir_char_flag == 0)):\n fir_char_flag = 0\n else:\n fir_char_flag = 1\n if not sec_char_flag == 1:\n pass\n else:\n new_strings = new_strings + item\n # print('copy')\n # new_strings = new_strings.replace(\"[\", \"\").split(',') # this is a list\n\n return new_strings, sec_char_flag\n##################################\n\n##################################\ndef line_counter(filename):\n\n# find out the line number\n with open(filename, encoding='cp1256') as csvfile:\n count = 0\n for i, l in enumerate(csvfile):\n count = i\n count = count + 1\n csvfile.close()\n return count\n##################################\n## function start from here.########################################\ndef processing_data(dp):\n #GPRMS\n GPS = df.head(1).values.tolist()\n GPZDA = GPS[0]\n tim = GPZDA[1]\n\n # lat = GPRMC[3]\n # lat_direction = GPRMC[4]\n # lon = GPRMC[5]\n # lon_direction = GPRMC[6]\n # speed = GPRMC[7]\n date = GPZDA[4]+'-'+GPZDA[3]+'-'+GPZDA[2]\n\n # ('T:',date, time)\n # merge the time and date columns into one Python datetime object\n # date_and_time = datetime.strptime(date + ' ' + time, '%d%m%y %H%M%S.%f')\n # date_and_time = date_and_time.strftime('%y-%m-%d %H:%M:%S.%f')[:-3] # [:-3] cuts off the last three characters (trailing zeros from the fractional seconds)\n date_m = date\n t = pd.datetime.strptime(tim, '%H%M%S.%f')\n time_m = t.strftime('%H:%M:%S.%f')[:-7]\n\n\n\n # if you prefer km/h, then using 1.852 instead of 1 in the formula below:\n # speed = int(round(float(speed) * 1, 0))\n mydf_GPGLL = df[df.item0 == 'GPGLL']\n lat = pd.to_numeric(mydf_GPGLL['item1'],errors='coerce', downcast='float').mean()\n lon = pd.to_numeric(mydf_GPGLL['item3'],errors='coerce', downcast='float').mean()\n lat = round(math.floor(float(lat) / 100) + (float(lat) % 100) / 60, 6)\n lon = round(math.floor(float(lon) / 100) + (float(lon) % 100) / 60, 6)\n lon = lon * -1 # W\n\n mydf_speedd = df[df.item0 == 'GPVTG']\n speed = pd.to_numeric(mydf_speedd['item5'],errors='coerce', downcast='float').mean()\n # item 7 :speed in km/h\n # item 5 :speed in knots\n\n\n time_spd_lat_lon = [date_m, time_m, lat, lon, speed]\n # print(time_spd_lat_lon)\n # HEHDT, true heading\n # Column \tName \t Description \t Example Data\n # 1 Sentence Identifier \tHeading, True \t $HEHDT\n # 2 \t Heading, true \t Heading in degrees, true \t 064.3\n # 3 \t True Designation \t Static Text designating the heading is in reference to true North \tT\n # 4 \t Checksum \t 2-byte XOR sum of data to check for transmission errors \t *2E\n mydf_HEHDT = df[df.item0 == 'HEHDT']\n heading = pd.to_numeric(mydf_HEHDT['item1'],errors='coerce', downcast='float').mean()\n # print('HEADING', heading)\n\n\n # HEROT, turn rate\n # Column \tName \t Description \t Example Data\n # 1 \t Sentence Identifier Rate of Turn \t $HEROT\n # 2 \t Rate of turn \t Rate of turn in degrees/min, “-” means turning to port \t0007.8\n # 3 \t Data Valid \t Is date valid (A) or void (V) \t A\n # 4 \t Checksum \t 2-byte XOR sum of data to check for transmission errors \t*7B\n mydf_HEROT = df[df.item0 == 'HEROT']\n turn_rate = pd.to_numeric(mydf_HEROT['item1'],errors='coerce', downcast='float').mean()\n # print('TURN RATE', turn_rate)\n\n\n # ANL01\n mydf_ANL01 = df[df.item0 == 'ANL01']\n ANL01_1 = pd.to_numeric(mydf_ANL01['item1'],errors='coerce', downcast='float').mean()\n ANL01_2 = pd.to_numeric(mydf_ANL01['item2'],errors='coerce', downcast='float').mean()\n ANL01_3 = pd.to_numeric(mydf_ANL01['item3'],errors='coerce', downcast='float').mean()\n ANL01_4 = pd.to_numeric(mydf_ANL01['item4'],errors='coerce', downcast='float').mean()\n ANL01_5 = pd.to_numeric(mydf_ANL01['item5'],errors='coerce', downcast='float').mean()\n ANL01_6 = pd.to_numeric(mydf_ANL01['item6'],errors='coerce', downcast='float').mean()\n ANL01_7 = pd.to_numeric(mydf_ANL01['item7'],errors='coerce', downcast='float').mean()\n ANL01_8 = pd.to_numeric(mydf_ANL01['item8'],errors='coerce', downcast='float').mean()\n ANL01 = [ANL01_1, ANL01_2, ANL01_3, ANL01_4, ANL01_5, ANL01_6, ANL01_7, ANL01_8]\n # ANL01 = ANL01_1 + ANL01_2 + ANL01_3 + ANL01_4 + ANL01_5 + ANL01_6 + ANL01_7 + ANL01_8\n # ANL02\n mydf_ANL02 = df[df.item0 == 'ANL02']\n ANL02_1 = pd.to_numeric(mydf_ANL02['item1'],errors='coerce', downcast='float').mean()\n ANL02_2 = pd.to_numeric(mydf_ANL02['item2'],errors='coerce', downcast='float').mean()\n ANL02_3 = pd.to_numeric(mydf_ANL02['item3'],errors='coerce', downcast='float').mean()\n ANL02_4 = pd.to_numeric(mydf_ANL02['item4'],errors='coerce', downcast='float').mean()\n ANL02_5 = pd.to_numeric(mydf_ANL02['item5'],errors='coerce', downcast='float').mean()\n ANL02_6 = pd.to_numeric(mydf_ANL02['item6'],errors='coerce', downcast='float').mean()\n ANL02_7 = pd.to_numeric(mydf_ANL02['item7'],errors='coerce', downcast='float').mean()\n ANL02_8 = pd.to_numeric(mydf_ANL02['item8'],errors='coerce', downcast='float').mean()\n # ANL02 = ANL02_1+ ANL02_2+ ANL02_3+ ANL02_4+ ANL02_5 + ANL02_6 + ANL02_7 + ANL02_8\n ANL02 = [ANL02_1, ANL02_2, ANL02_3, ANL02_4, ANL02_5, ANL02_6, ANL02_7, ANL02_8]\n print(ANL02)\n # construct the output\n output_string = time_spd_lat_lon + [heading]+ [turn_rate] + ANL01 + ANL02\n # print('Out',output_string)\n return output_string\n\n###############################################################\nstart_time = time.time()\ndata = ['GP', 'HE', 'AN']\ncount = line_counter(INPUT_FILENAME)\ncount =float(count)\n # creat output file object\nspamWriter = csv.writer(open(OUTPUT_FILENAME_TEMP, 'w', newline='', encoding='utf-8'))\nspamWriter.writerow(['item0', 'item1', 'item2', 'item3', 'item4', 'item5', 'item6', 'item7', 'item8', 'item9', 'item00', 'item11', 'item12]', 'item13', 'item14', 'item15', 'item16', 'item17', 'item18', 'item19'])\n# process row\nwith open(INPUT_FILENAME, encoding='cp1256') as csvfile:\n for idx, row in enumerate(csvfile):\n strings = ''\n for str in data:\n [newstrings, flag] = str_recorder(row,str)\n if not flag == 1:\n pass\n else:\n strings = newstrings\n del flag\n temp = strings.replace(\"[\", \"\").split(',') # this is a list\n if (len(temp) > 2):\n spamWriter.writerow(temp)\n print('\\r', \"{:5.1f}\".format(idx/count*100), '%', 'Woring on line:', idx,end='', flush=True)\ncsvfile.close()\ncount2 = line_counter(OUTPUT_FILENAME_TEMP)\nelapsed_time = time.time() - start_time\ncount = int(count)\n\nprint()\nprint('******************* STAGE ONE FINISHED *******************')\nprint('Input file name and location:',INPUT_FILENAME)\nprint('Output file name and location:', OUTPUT_FILENAME_TEMP)\nprint('Processed', count, 'lines', '; Droped', count - count2, 'lines')\nprint('******************* STAGE TWO START *******************')\n\n\n\n\n\n\n\n# create ouput file object\noutput_file = open(OUTPUT_FILENAME, 'wt')\nwriter = csv.writer(output_file, delimiter=',', lineterminator='\\n')\nwriter.writerow(['Date','Time', 'Latitude', 'Longitude', 'Speed in knots', 'Heading', 'Turn rate', 'ANL0_1', 'ANL0_2', 'ANL0_3', 'ANL0_4', 'ANL0_5', 'ANL0_6', 'ANL0_7', 'ANL0_8', 'ANL1_1', 'ANL1_2', 'ANL1_3', 'ANL1_4', 'ANL1_5', 'ANL1_6', 'ANL1_7', 'ANL1_8'])\n# read csv file as DataFrame\ncsv_data = pd.read_csv(OUTPUT_FILENAME_TEMP)\n# create the boundary, set GPRMC as boundary, record data every 2 second\nrow_number = csv_data[csv_data.item0 == 'GPZDA']\nrow_num = row_number.index.values\nrow_length =len(row_num)\nprint('Total length:',row_length)\nfor idx, row in enumerate(row_num):\n if idx >= row_length-2:\n break\n else :\n start_row = row_num[idx]\n end_row = row_num[idx+1]\n df = csv_data[start_row:end_row]\n strings = processing_data(df)\n writer.writerow(strings)\n print('\\r', \"{:5.1f}\".format(idx/row_length*100), '%', idx,end='', flush=True)\n# row_number.to_csv(OUTPUT_FILENAME)\n\n# strings_data=\nprint()\nelapsed_time = time.time() - start_time\nprint('..........................FINISHED..........................')\nprint('Input file name and location:',INPUT_FILENAME)\nprint('Output file name and location:', OUTPUT_FILENAME)\nprint('Time elapsed:','{:10.4f}'.format(elapsed_time), 'sec')\n","sub_path":"NMEA0183toCSV/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":9723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"432571539","text":"from src.scripts import ZISweeper\nimport time\nfrom src.core import Script, Parameter, plotting\nfrom PySide.QtCore import Signal, QThread\nfrom collections import deque\nimport numpy as np\nfrom copy import deepcopy\n\nclass ZISweeperHighResolution(Script, QThread):\n updateProgress = Signal(int)\n\n _DEFAULT_SETTINGS = Parameter([\n Parameter('path', 'C:\\\\Users\\\\Experiment\\\\Desktop\\\\tmp_data\\\\fast', str, 'path to folder where data is saved'),\n Parameter('tag', 'some_name'),\n Parameter('save', True, bool,'check to automatically save data'),\n Parameter('high_res_df', 1000, float, 'frequency step of high res. scan'),\n Parameter('high_res_N', 21, int, 'number of data points of high res. scan'),\n ])\n\n _INSTRUMENTS = {}\n\n _SCRIPTS = {'zi sweep' : ZISweeper}\n\n def __init__(self, scripts, name = None, settings = None, log_output = None, timeout = 1000000000):\n self._recording = False\n self._timeout = timeout\n\n Script.__init__(self, name, settings, scripts = scripts, log_output = log_output)\n QThread.__init__(self)\n\n self.data = deque()\n\n # todo: clean this up! and plot data in gui!\n self._sweep_values = {'frequency' : [], 'x' : [], 'y' : [], 'phase': [], 'r':[]}.keys()\n\n\n def _receive_signal(self, progess_sub_script):\n # calculate progress of this script based on progress in subscript\n\n if self.current_subscript == 'quick scan':\n progress = int(self.weights['quick scan'] * progess_sub_script)\n elif self.current_subscript == 'high res scan':\n progress = int(self.weights['quick scan']*100 + self.weights['high res scan'] * progess_sub_script)\n else:\n progress = None\n # if calculated progress is 100 force it to 99, because we still have to save before script is finished\n if progress>= 100:\n progress = 99\n\n if progress is not None:\n self.updateProgress.emit(progress)\n\n if progess_sub_script == 100:\n self.current_subscript = None\n\n def _function(self):\n \"\"\"\n This is the actual function that will be executed. It uses only information that is provided in the settings property\n will be overwritten in the __init__\n \"\"\"\n\n\n\n def calculate_weights():\n \"\"\"\n calculate a weight inversely proportional to the expected to duration of the two steps in the\n script\n\n Returns: weights as a dictionary for the two steps\n\n \"\"\"\n weights = {}\n\n\n # estimate run time of step 1 (fast sweep)\n f_range = sweeper_script.settings['stop'] - sweeper_script.settings['start']\n N_samples = sweeper_script.settings['samplecount']\n df = f_range / N_samples\n\n t = N_samples / df\n\n weights['quick scan'] = t\n\n # estimate run time of step 2 (high res sweep)\n df = self.settings['high_res_df']\n N_samples = self.settings['high_res_N']\n\n t = N_samples / df\n\n weights['high res scan'] = t\n\n\n total_time = sum([v for k, v in weights.iteritems()])\n\n weights = {k: v/total_time for k, v in weights.iteritems()}\n\n print('weights',weights)\n\n return weights\n\n def run_scan(name):\n self.current_subscript = name\n sweeper_script.start()\n while self.current_subscript is name:\n time.sleep(0.1)\n\n def calc_new_range():\n\n\n df = self.settings['high_res_df']\n N = self.settings['high_res_N']\n\n r = sweeper_script.data[-1]['r']\n freq = sweeper_script.data[-1]['frequency']\n freq = freq[np.isfinite(r)]\n r = r[np.isfinite(r)]\n\n fo = freq[np.argmax(r)]\n\n f_start, f_end = fo - N/2 *df, fo + N/2 *df\n\n\n # make sure that we convert back to native python types (numpy file types don't pass the Parameter validation)\n return float(f_start), float(f_end), int(N)\n\n\n sweeper_script = self.scripts['zi sweep']\n #save initial settings, so that we can rest at the end of the script\n initial_settings = deepcopy(sweeper_script.settings)\n self.weights = calculate_weights()\n\n # take the signal from the subscript and route it to a function that takes care of it\n sweeper_script.updateProgress.connect(self._receive_signal)\n\n print('====== start quick scan ============')\n\n run_scan('quick scan')\n\n print('====== calculate new scan range ====')\n f_start, f_stop, N = calc_new_range()\n\n print('f_start, f_stop, N', f_start, f_stop, N)\n\n print('====== update sweeper ==============')\n sweeper_script.update({\n 'start' : f_start,\n 'stop' : f_stop,\n 'samplecount' : N\n })\n\n print('====== start high res scan =========')\n # print(sweeper_script.sweeper.finished())\n # print(sweeper_script.sweeper.progress())\n\n run_scan('high res scan')\n\n sweeper_script.updateProgress.disconnect()\n self.data = sweeper_script.data[-1]\n\n self._recording = False\n\n if self.settings['save']:\n self.save()\n\n # set the sweeper script back to initial settings\n sweeper_script.update(initial_settings)\n # make sure that progess is set 1o 100 because we check that in the old_gui\n self.updateProgress.emit(100)\n\n\n def plot(self, axes):\n if self.current_subscript == 'quick scan' and self.scripts['zi sweep'].data:\n self.scripts['zi sweep'].plot(axes)\n elif self.current_subscript in ('high res scan', None) and self.data:\n r = self.data['r']\n freq = self.data['frequency']\n freq = freq[np.isfinite(r)]\n r = r[np.isfinite(r)]\n plotting.plot_psd(freq, r, axes, False)\n\n","sub_path":"src/scripts/zi_high_res_sweep.py","file_name":"zi_high_res_sweep.py","file_ext":"py","file_size_in_byte":5966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"411281911","text":"import sys\n\n\ndef get_user_input():\n try:\n user_input = int(input(\"Type pokdex entry number: \"))\n except ValueError:\n print(\"Usage error: Please only type in integers! \\n\", file=sys.stderr)\n sys.exit()\n if(user_input <= 0):\n print(\"Usage error: Please only type positive integers! \\n\", file=sys.stderr)\n sys.exit()\n return user_input\n\n\ndef get_box_number(pokedex_number):\n modulo_remainder = pokedex_number % 30\n if(modulo_remainder == 0):\n box_number = int(pokedex_number / 30)\n else:\n box_number = int(pokedex_number / 30) + 1\n print(\"Box number: \" + str(box_number) + \"\\n\")\n\n\ndef main():\n pokedex_number = get_user_input()\n get_box_number(pokedex_number)\n\n\nif __name__ == '__main__':\n while(True):\n main()\n","sub_path":"pokedexSorter/Python/pokedex.py","file_name":"pokedex.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"570212221","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 2 01:45:54 2020\n\n@author: Larry Juang\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport glob\n\n\ndef assets_list(ext=\"\"):\n \"Returns files with an extension\"\n return [f[5:-4] for f in glob.glob(\"Data/\" + f\"*{ext}\")]\n\ndef backtest(daily_fluc, allocation, commission):\n \"\"\"the decision to for investment allocation was already made on 0th, (i-1)th day\"\"\"\n \"\"\"the gain/loss occurs on 1st, ith day\"\"\"\n assert len(daily_fluc) == len(allocation) # daily_fluc is \n weights = np.array([-1, -0.75, 0.5, 0.25, 0, 0.25, 0.5, 0.75, 1]) # only 100%, 50%, and 0 positions available\n total_return = 1\n max_return = 1\n drawdown = []\n return_series = []\n for i in range(len(daily_fluc)):\n pos = allocation[i]\n assert(np.sum(pos) == 1)\n transaction = 0\n if i > 0:\n if np.array_equal(pos, allocation[i-1]) == False:\n transaction = 1\n \n total_return = total_return*(1+np.sum(weights*pos)*daily_fluc[i]) - transaction*commission\n max_return = max(max_return, total_return)\n drawdown.append(total_return/max_return)\n return_series.append(total_return)\n total_return = max(0, total_return)\n\n\n return total_return, np.array(drawdown), np.array(return_series)\n\ndef data_processing(ticker = None):\n try:\n assert ticker != None\n assert ticker in assets_list(\"csv\")\n data = pd.read_csv(\"Data/\" + ticker + \".csv\")\n data[\"Date\"] = pd.to_datetime(data[\"Date\"])\n data[\"Adj Close Yesterday\"] = data[\"Adj Close\"].shift(1)\n data[\"High 5\"] = data[\"Adj Close\"].rolling(5).max()\n data[\"High 10\"] = data[\"Adj Close\"].rolling(10).max()\n data[\"High 20\"] = data[\"Adj Close\"].rolling(20).max()\n data[\"High 50\"] = data[\"Adj Close\"].rolling(50).max()\n data[\"High 75\"] = data[\"Adj Close\"].rolling(75).max()\n data[\"High 100\"] = data[\"Adj Close\"].rolling(100).max()\n data[\"High 125\"] = data[\"Adj Close\"].rolling(125).max()\n data[\"Low 20\"] = data[\"Adj Close\"].rolling(20).min()\n data[\"Low 30\"] = data[\"Adj Close\"].rolling(30).min()\n data[\"Low 40\"] = data[\"Adj Close\"].rolling(40).min()\n data[\"Low 50\"] = data[\"Adj Close\"].rolling(50).min()\n data[\"Low 75\"] = data[\"Adj Close\"].rolling(75).min()\n data[\"Low 100\"] = data[\"Adj Close\"].rolling(100).min()\n data[\"Low 125\"] = data[\"Adj Close\"].rolling(125).min()\n data[\"Daily Fluctuation\"] = (data[\"Adj Close\"].values-data[\"Adj Close Yesterday\"].values)/data[\"Adj Close Yesterday\"].values\n data = data.dropna()\n data = data.reset_index(drop = True)\n\n return data\n except: \n print(\"The ticker is incorrect, or does not exist in Data folder.\")\n\ndef action_generation(data):\n # this function takes in the expanded data set and generate the action \n # sequence based on the rules. Action sequence is [0,0,1,1,2,2,1,1,0,-1], etc\n action = [0]\n for n in range(1, len(data)):\n if (data[\"Adj Close\"][n] >= data[\"High 50\"][n]) and (action[n-1] == 0):\n position = 1\n action.append(position)\n continue\n if (data[\"Adj Close\"][n] >= data[\"High 75\"][n]) and (action[n-1] == 1):\n position = 2\n action.append(position)\n continue\n if (data[\"Adj Close\"][n] >= data[\"High 100\"][n]) and (action[n-1] == 2):\n position = 3\n action.append(position)\n continue\n if (data[\"Adj Close\"][n] >= data[\"High 125\"][n]) and (action[n-1] == 3):\n position = 4\n action.append(position)\n continue \n if (data[\"Adj Close\"][n] <= data[\"Low 20\"][n]) and (action[n-1] > 0):\n position = action[n-1] - 1\n action.append(position)\n continue\n if (data[\"Adj Close\"][n] <= data[\"Low 30\"][n]) and (action[n-1] == 0):\n position = -1\n action.append(position)\n continue\n if (data[\"Adj Close\"][n] <= data[\"Low 40\"][n]) and (action[n-1] == -1):\n position = -2\n action.append(position)\n continue\n if (data[\"Adj Close\"][n] <= data[\"Low 50\"][n]) and (action[n-1] == -2):\n position = -3\n action.append(position)\n continue\n if (data[\"Adj Close\"][n] <= data[\"Low 75\"][n]) and (action[n-1] == -3):\n position = -4\n action.append(position)\n continue \n if (data[\"Adj Close\"][n] >= data[\"High 5\"][n]) and (action[n-1] < 0):\n position = action[n-1] + 1\n action.append(position)\n continue \n position = action[n-1]\n action.append(position)\n return action\n\ndef action_allocation_conversion(action):\n # each action is a integer, e.g. -2 or 3.\n # This function converts the action into the 9 bits allocation array.\n allocation = []\n for n in range(len(action)):\n if action[n] == 0:\n allocation.append(np.array([0, 0, 0, 0, 1, 0, 0, 0, 0]))\n continue\n if action[n] == 1:\n allocation.append(np.array([0, 0, 0, 0, 0, 1, 0, 0, 0]))\n continue\n if action[n] == 2:\n allocation.append(np.array([0, 0, 0, 0, 0, 0, 1, 0, 0]))\n continue\n if action[n] == 3:\n allocation.append(np.array([0, 0, 0, 0, 0, 0, 0, 1, 0]))\n continue\n if action[n] == 4:\n allocation.append(np.array([0, 0, 0, 0, 0, 0, 0, 0, 1]))\n continue\n if action[n] == -1:\n allocation.append(np.array([0, 0, 0, 1, 0, 0, 0, 0, 0]))\n continue\n if action[n] == -2:\n allocation.append(np.array([0, 0, 1, 0, 0, 0, 0, 0, 0]))\n continue\n if action[n] == -3:\n allocation.append(np.array([0, 1, 0, 0, 0, 0, 0, 0, 0]))\n continue\n if action[n] == -4:\n allocation.append(np.array([1, 0, 0, 0, 0, 0, 0, 0, 0]))\n continue\n return allocation\n\ndef main(ticker = \"SPY\"):\n data = data_processing(ticker)\n action = action_generation(data)\n allocation = action_allocation_conversion(action)\n total_return, drawdown, return_series = backtest(data[\"Daily Fluctuation\"], allocation, 0.0003)\n # Plot the Asset's historic return\n plt.figure(1)\n plt.plot(data[\"Date\"], data[\"Adj Close\"])\n plt.title(\"Historic Adj. Close\")\n plt.xlabel(\"Date\")\n plt.ylabel(\"Price\")\n \n # Plot the strategy's backtested return\n plt.figure(2)\n plt.plot(data[\"Date\"], return_series)\n plt.title(\"Backtested Strategy Return\")\n plt.xlabel(\"Date\")\n plt.ylabel(\"Return\")\n \n # Plot the Strategy's backtested drawdown\n plt.figure(3)\n plt.plot(data[\"Date\"], drawdown)\n plt.title(\"Backtested Strategy Drawdown\")\n plt.xlabel(\"Date\")\n plt.ylabel(\"Drawdown\") \n \n \n # Plot the Strategy's Action over the backtested period\n plt.figure(4)\n plt.plot(data[\"Date\"], np.array(action)/4)\n plt.title(\"Backtested Strategy Action\")\n plt.xlabel(\"Date\")\n plt.ylabel(\"Allocated Portion\") \n\nif __name__ == \"__main__\":\n #main()\n main() \n \n \n \n \n\n \n\n \n \n \n \n\n\n \n\n \n\n","sub_path":"Donchian with Future Knowledge Unrealistic Script.py","file_name":"Donchian with Future Knowledge Unrealistic Script.py","file_ext":"py","file_size_in_byte":7325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"445463415","text":"\nfrom g1.arithmeticgenome import ArithmeticGenome\nfrom g1.population import Population, PopulationAndSelectionConfig\nfrom g1.multithreading import PrintThread, ConstantDiscoveryTask, TaskRunner, createAndStartPrintAndTaskQueues, goGentleIntoThatGoodNight\nfrom g1.individual import Individual\n\nimport random\nimport logging\n# import cProfile, pstats\nimport datetime, time\n\n### Setup\nrandom.seed()\n\nlogFormat = '%(asctime)-15s %(message)s'\nlogging.basicConfig(format=logFormat)\nsystemLog = logging.getLogger(__name__)\nsystemLog.setLevel(logging.WARNING)\n\nst = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S')\ndataLogFileName = 'data/Log0.333.' + st + \".tsv\"\n\n# Creates queues which facilitate multi-threaded execution of populations simultaneously, and safe multi-threaded logging\ntaskQueue, printQueue = createAndStartPrintAndTaskQueues(dataLogFileName, systemLog, threads=1)\n\n### Example to discover a constant value, looping through different dna lengths, with multi-threading\n\ndef problem(dummmy):\n return 3.141\n\npopulationSize=60\niterations = 500\n\nfor dnaLength in range(10,51,5):\n for k in range(0,100):\n populationConfig = PopulationAndSelectionConfig(populationSize,0.0001, 0.33, 2, 0.16, 0.32, 0, 0.33, 1, 1, 1, 1, 0, 1, 0.25, 0.25, 0.4, 0.5, 1, 0)\n genomeConfig = {\"length\" : dnaLength}\n t = ConstantDiscoveryTask(problem, ArithmeticGenome, genomeConfig, populationConfig, iterations)\n taskQueue.put(t)\n\n\n# Wait for everything to finish, and close peacefully. Without this end of execution of the main threads will kill off all other threads that are probably still running\ngoGentleIntoThatGoodNight(taskQueue, printQueue)\n","sub_path":"multiThreadedConstantDiscovery.py","file_name":"multiThreadedConstantDiscovery.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"600452793","text":"#当前目录递归查找.txt关键字\r\n\r\nimport os\r\ndef is_txt(file, key):\r\n f = open(file)\r\n count = 0\r\n s = []\r\n tem = 0\r\n dictionary = {}\r\n \r\n for each_line in f:\r\n count += 1\r\n if key in each_line:\r\n tem = each_line.find(key, tem)\r\n s.append(tem+1)\r\n while tem != -1:\r\n tem = each_line.find(key, tem+1)\r\n if tem != -1:\r\n s.append(tem+1)\r\n dictionary[count] = s\r\n s = [] \r\n tem = 0 \r\n f.close()\r\n #print(s)\r\n return dictionary\r\n \r\n\r\ndef find_file(key):\r\n all_file = os.walk(os.getcwd())\r\n for i in all_file:\r\n for file_ in i[2]:\r\n if os.path.splitext(file_)[1] == '.txt':\r\n file = os.path.join(i[0],file_)\r\n dictionary = is_txt(file, key)\r\n \r\n print('=============================================')\r\n print(file)\r\n go(dictionary)\r\n \r\ndef go(dictionary):\r\n keys = dictionary.keys()\r\n keys = sorted(keys)\r\n for i in keys:\r\n print('在第',i,'行' '第',str(dictionary[i]),'个')\r\n\r\n \r\nkey = input('关键字是:')\r\nfind_file('x')\r\ninput()\r\n","sub_path":"file/关键字curdir&txt.py","file_name":"关键字curdir&txt.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"82211507","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win-amd64\\egg\\opentnsim\\cli.py\n# Compiled at: 2019-07-18 03:25:05\n# Size of source mod 2**32: 775 bytes\n\"\"\"Console script for opentnsim.\"\"\"\nimport sys, click, opentnsim.server\n\n@click.group()\ndef cli(args=None):\n \"\"\"OpenTNSim simulation.\"\"\"\n click.echo('Replace this message by putting your code into opentnsim.cli.main')\n click.echo('See click documentation at http://click.pocoo.org/')\n return 0\n\n\n@cli.command()\n@click.option('--host', default='0.0.0.0')\n@click.option('--port', default=5000, type=int)\n@click.option('--debug/--no-debug', default=False)\ndef serve(host, port, debug, args=None):\n \"\"\"Run a flask server with the backend code\"\"\"\n app = opentnsim.server.app\n app.run(host=host, debug=debug, port=port)\n\n\nif __name__ == '__main__':\n sys.exit(cli())","sub_path":"pycfiles/opentnsim-0.0.1-py3.7/cli.cpython-37.py","file_name":"cli.cpython-37.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"389038622","text":"\"\"\"\nCode modified from: https://github.com/alshedivat/lola/tree/master/lola\n\"\"\"\nimport numpy as np\nfrom gym.envs.multi_agent.matrix_social_dilemma import MatrixSocialDilemma\n\ndef test_MatrixSocialDilemma():\n n_test_games=20\n n_test_step=5\n # Play n games\n for i in range(n_test_games):\n payout_matrix = np.random.randint(-10, 10, (2,2))\n social_dilemma = MatrixSocialDilemma(payout_matrix=payout_matrix)\n o = social_dilemma.reset()\n\n for agent_num in range(len(o)):\n assert o[agent_num] == (social_dilemma.NUM_STATES -1)\n\n # Play n steps\n for n in range(n_test_step):\n action = np.random.randint(0, 2, (2,)).tolist()\n o, r, done, info = social_dilemma.step(action=action)\n\n # Assume 2 agents\n for agent_num in range(len(r)):\n current_agent_a = action[agent_num]\n other_agent_a = action[(agent_num +1 ) % len(r)]\n assert (r[agent_num] ==\n social_dilemma.payout_mat[current_agent_a][other_agent_a])\n","sub_path":"gym/envs/tests/multi_agent/test_maxtric_social_dilemma.py","file_name":"test_maxtric_social_dilemma.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"120247630","text":"a = int(input(\"Укажите число: \"))\nlist_of_tasks = []\nwhile a != 3:\n if a == 1:\n task = \"Задача: \" + input(\"Сформулируйте задачу: \") + \" Категория: \" + input(\"Добавьте категорию к задаче: \") + \"Дата: \" + input(\"Добавьте время к задаче: \")\n list_of_tasks.append(task)\n if a == 2:\n print(list_of_tasks)\n print('''Простой todo: \n 1. Доба��ить задачу.\n 2. Вывести список задач.\n 3. Выход.''')\n a = int(input(\"Укажите число: \"))\n\n","sub_path":"HW/HW_07/my_tasks.py","file_name":"my_tasks.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"240233265","text":"#!/usr/bin/env python3\n\nname = \"antenna_el_commander_pid\"\n\nimport math\nimport time\nimport rospy\nimport std_msgs.msg\n\n\nclass antenna_el_feedback(object):\n\n speed_d = 0.0\n pre_arcsec = 0.0\n pre_hensa = 0.0\n enc_before = 0.0\n ihensa = 0.0\n t_now = t_past = 0.0\n\n arcsec_enc = 0.0\n\n p_coeff = 2.2\n i_coeff = 0.0\n d_coeff = 0.0\n\n lock = False\n \n def __init__(self):\n self.topic_to = rospy.Publisher(\n name = \"el_speed\",\n data_class = std_msgs.msg.Float64,\n queue_size = 1,\n )\n\n topic_from1 = rospy.Subscriber(\n name = \"el_cmd2\",\n data_class = std_msgs.msg.Float64,\n callback = self.antenna_el_feedback,\n queue_size = 1,\n )\n\n topic_from2 = rospy.Subscriber(\n name = \"el\",\n data_class = std_msgs.msg.Float64,\n callback = self.antenna_el_encoder,\n queue_size = 1,\n )\n\n topic_from_pid = rospy.Subscriber(\n name = \"el_pid\",\n data_class = std_msgs.msg.Float32MultiArray,\n callback = self.antenna_el_pid,\n queue_size = 1,\n )\n\n topic_from_lock = rospy.Subscriber(\n name = \"el_lock\",\n data_class = std_msgs.msg.Bool,\n callback = self.antenna_el_lock,\n queue_size = 1,\n )\n\n pass\n\n def antenna_el_feedback(self, command):\n MOTOR_MAXSTEP = 1000\n MOTOR_el_MAXRATE = 10000\n rate_to_arcsec = (12/7)*(3600/10000)\n \n arcsec_cmd = command.data * 3600.\n \n #for el >= 180*3600 and el <= -180*3600\n if self.arcsec_enc > 40*3600 and arcsec_cmd+360*3600 < 220*3600:\n arcsec_cmd += 360*3600\n elif self.arcsec_enc < -40*3600 and arcsec_cmd-360*3600 > -220*3600:\n arcsec_cmd -= 360*3600\n \n if self.t_past == 0.0:\n self.t_past = time.time()\n else:\n pass\n self.t_now = time.time()\n\n ret = calc_pid(arcsec_cmd, self.arcsec_enc,\n self.pre_arcsec, self.pre_hensa, self.ihensa, self.enc_before,\n self.t_now, self.t_past,\n self.p_coeff, self.i_coeff, self.d_coeff)\n \n speed = ret[0]\n\n #update\n self.pre_hensa = arcsec_cmd - self.arcsec_enc\n self.pre_arcsec = arcsec_cmd\n self.enc_before = self.arcsec_enc\n self.ihensa = ret[1]\n self.t_past = self.t_now\n\n #limit of acceleraion\n if abs(speed - self.speed_d) < MOTOR_MAXSTEP*rate_to_arcsec:\n self.speed_d = speed\n else:\n if (speed - self.speed_d) < 0:\n a = -1\n else:\n a = 1\n self.speed_d += a*MOTOR_MAXSTEP*rate_to_arcsec\n\n #limit of max speed\n if self.speed_d > MOTOR_el_MAXRATE*rate_to_arcsec:\n self.speed_d = MOTOR_el_MAXRATE*rate_to_arcsec\n if self.speed_d < -MOTOR_el_MAXRATE*rate_to_arcsec:\n self.speed_d = -MOTOR_el_MAXRATE*rate_to_arcsec\n \n command_speed = self.speed_d\n \n if self.lock == True:\n self.speed_d = 0.0\n self.topic_to.publish(0.0)\n return\n else:\n self.topic_to.publish(command_speed)\n return\n\n def antenna_el_encoder(self, status):\n self.arcsec_enc = status.data * 3600.\n return\n\n def antenna_el_pid(self, status):\n self.p_coeff = status.data[0]\n self.i_coeff = status.data[1]\n self.d_coeff = status.data[2]\n return\n \n def antenna_el_lock(self, status):\n self.lock = status.data\n return\n\ndef calc_pid(target_arcsec, encoder_arcsec, pre_arcsec, pre_hensa, ihensa, enc_before, t_now, t_past, p_coeff, i_coeff, d_coeff):\n \"\"\" \n DESCRIPTION \n =========== \n This function determine el&el speed for antenna \n \"\"\"\n\n #calculate ichi_hensa\n hensa = target_arcsec - encoder_arcsec\n\n dhensa = hensa - pre_hensa\n if math.fabs(dhensa) > 1:\n dhensa = 0\n\n if (encoder_arcsec - enc_before) != 0.0:\n current_speed = (encoder_arcsec - enc_before) / (t_now-t_past)\n\n if pre_arcsec == 0: # for first move\n target_speed = 0\n else:\n target_speed = (target_arcsec - pre_arcsec)/(t_now - t_past)\n\n ihensa += (hensa + pre_hensa)/2\n if math.fabs(hensa) > 50:\n ihensa = 0.0\n\n #PID\n rate = target_speed + p_coeff*hensa + i_coeff*ihensa*(t_now-t_past) + d_coeff*dhensa/(t_now-t_past)\n\n return [rate, ihensa]\n\nif __name__ == \"__main__\":\n rospy.init_node(name)\n feedback = antenna_el_feedback()\n rospy.spin()\n\n","sub_path":"scripts/antenna_el_commander_pid.py","file_name":"antenna_el_commander_pid.py","file_ext":"py","file_size_in_byte":4999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"16977627","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function\nfrom abc import ABCMeta, abstractmethod\n\nfrom six import with_metaclass\n\nfrom .utils import parse_query_parameters\n\n\nclass CollectionMixin(with_metaclass(ABCMeta, object)):\n @abstractmethod\n def list(self, size=100, offset=None, **filter_fields):\n \"\"\"\n :param size: A limit on the number of objects to be returned.\n :type size: int\n :param offset: A cursor used for pagination. offset is an object identifier that defines a place in the list.\n :type offset: uuid.UUID\n :param filter_fields: Dictionary containing values to filter for\n :type filter_fields: dict\n :rtype: dict\n :return: Dictionary containing dictionaries\n \"\"\"\n\n def iterate(self, window_size=10, **filter_fields):\n current_offset = None\n while True:\n response = self.list(size=window_size, offset=current_offset, **filter_fields)\n for item in response['data']:\n yield item\n next_url = response.get('next', None)\n if next_url is None:\n return\n current_offset = parse_query_parameters(next_url).get('offset')[0]\n","sub_path":"src/kong/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"533437472","text":"# -*- coding:utf-8 -*-\n\nfrom __future__ import (absolute_import, division, print_function, unicode_literals)\n\n\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nimport tornado.httpclient\nimport tornado.gen\nfrom tornado.httpclient import HTTPRequest\n\ntry:\n from urllib import urlencode, quote\nexcept ImportError:\n from urllib.parse import urlencode, quote\nimport json\n\n\ntry:\n from lxml import etree\nexcept ImportError:\n import sys\n sys.exit(1)\n\n\nfrom tornado.options import define, options\ndefine(\"port\", default=8080, help=\"run on given port\", type=int)\n\n\ndef make_jw_encode(string_to_encode):\n return quote(string_to_encode.encode('gbk'))\n\n\ndef make_jw_weekdays(chinese):\n if chinese == \"一\":\n return \"Mon\"\n elif chinese == \"二\":\n return \"Tue\"\n elif chinese == \"三\":\n return \"Wed\"\n elif chinese == \"四\":\n return \"Thu\"\n elif chinese == \"五\":\n return \"Fri\"\n elif chinese == \"六\":\n return \"Sat\"\n elif chinese == \"日\":\n return \"Sun\"\n\n\ndef clean(s):\n \"\"\"Returns version of s without undesired characters in it.\"\"\"\n wanted = \"0123456789-\"\n out = \"\"\n for c in s:\n if c in wanted:\n out += c\n return out\n\n\ndef make_jw_weeks(duration, w_type):\n\n du_list = duration.split(\"-\")\n if len(du_list) == 1:\n start = 0\n end = 18\n else:\n start = int(du_list[0])\n end = int(du_list[1])\n\n return_str = \"\"\n\n for x in range(start, end + 1):\n if w_type == \"all\" or x % 2 != (0 if w_type == \"odd\" else 1):\n return_str = return_str + \",\" + str(x)\n\n return return_str[1:]\n\n\ndef make_headers(referer=\"\"):\n dic = {}\n dic[\"User-Agent\"] = \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.143 Safari/537.36\"\n dic[\"Connection\"] = \"keep-alive\"\n dic[\"Accept-Language\"] = \"en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4\"\n dic[\"Accept\"] = \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\"\n dic[\"Host\"] = \"222.201.132.114\"\n dic[\"Referer\"] = referer\n return dic\n\n\nclass LessonHandler(tornado.web.RequestHandler):\n @tornado.web.asynchronous\n @tornado.gen.engine\n def get(self):\n xh = self.get_argument(\"xh\")\n pw = self.get_argument(\"pw\")\n\n client = tornado.httpclient.AsyncHTTPClient()\n\n jw_url = 'http://222.201.132.114/default2.aspx'\n request = HTTPRequest(jw_url, \"GET\", follow_redirects=True)\n response = yield tornado.gen.Task(client.fetch,\n request)\n\n location = response.effective_url\n hash_string = location[location.find(\"(\"):location.find(\")\") + 1]\n login_url = \"%s%s%s\" % (\n \"http://222.201.132.114/\", hash_string, \"/default2.aspx\")\n\n page = etree.HTML(response.body)\n viewstate_input = page.xpath(\n \"/html/body/form/input[@name='__VIEWSTATE']\")[0]\n viewstate = viewstate_input.get(\"value\")\n\n params = {\"__VIEWSTATE\": viewstate, \"txtUserName\": xh,\n \"TextBox2\": pw, \"txtSecretCode\": \"\", \"Button1\": \"\"}\n\n request = HTTPRequest(\n login_url, \"POST\",\n None,\n body=urlencode(params),\n follow_redirects=True)\n response = yield tornado.gen.Task(client.fetch,\n request)\n\n main_url = response.effective_url\n page = etree.HTML(response.body)\n name_span = page.xpath(\n \"/html/body/div/div/div/form/div/ul/li/em/span[@id='xhxm']\")[0]\n name = name_span.text[-5:-2]\n\n lesson_url = \"%s%s%s%s%s%s%s\" % (\n \"http://222.201.132.114/\",\n hash_string,\n \"/xskbcx.aspx?xh=\",\n xh,\n \"&xm=\",\n make_jw_encode(name),\n \"&gnmkdm=N121603\")\n\n request = HTTPRequest(\n lesson_url,\n \"GET\",\n follow_redirects=True,\n headers=make_headers(main_url))\n response = yield tornado.gen.Task(client.fetch,\n request)\n\n page = etree.HTML(response.body)\n table_rows = page.xpath(\"/html/body/form/div/div/span/table[2]/tr\")\n\n lesson_list = []\n count = 0\n\n for row in table_rows[1:]:\n cells = row.getchildren()\n\n for cell in cells:\n\n cell_html = etree.tostring(cell, encoding=\"utf-8\")\n if b'{' in cell_html:\n cell_html = cell_html[cell_html.find(b\">\") + 1:-5]\n\n count = count + 1\n\n tiny_cell_list = cell_html.split(b\"
\")\n\n if len(tiny_cell_list) == 1:\n lesson_name = tiny_cell_list[0]\n lesson_teach_by = b\"\"\n lesson_classroom = b\"\"\n content = \"\"\n elif len(tiny_cell_list) == 4:\n lesson_name = tiny_cell_list[0]\n content = tiny_cell_list[1]\n lesson_teach_by = tiny_cell_list[2]\n lesson_classroom = tiny_cell_list[3]\n else:\n lesson_name = tiny_cell_list[0]\n lesson_teach_by = tiny_cell_list[2]\n content = tiny_cell_list[1]\n lesson_classroom = b\"\"\n\n content = content.decode(\n \"utf-8\")\n lesson_time = content[\n content.find(\"第\") + 1:content.find(\"节\")]\n\n lesson_day = make_jw_weekdays(content[\n content.find('周') + 1:content.find('第')])\n\n if \"单周\" in content:\n lesson_type = \"odd\"\n elif \"双周\" in content:\n lesson_type = \"even\"\n else:\n lesson_type = \"all\"\n\n lesson_week_du = clean(content[\n content.find(\"{\") + 1:content.find(\"}\")])\n\n lesson_weeks = make_jw_weeks(lesson_week_du, lesson_type)\n\n if '节/周' in content:\n continue\n\n lesson_dict = {\n \"lesson_day\": lesson_day,\n \"lesson_name\": lesson_name.decode(\"utf-8\"),\n \"lesson_teach_by\": lesson_teach_by.decode(\"utf-8\"),\n \"lesson_classroom\": lesson_classroom.decode(\"utf-8\"),\n \"lesson_time\": lesson_time,\n \"lesson_weeks\": lesson_weeks,\n \"lesson_type\": lesson_type\n }\n\n lesson_list.append(lesson_dict)\n\n return_dict = {\"count\": count, \"lessons\": lesson_list}\n json_data = json.dumps(return_dict, ensure_ascii=False)\n\n self.write(json_data)\n self.finish()\n\n def post(self):\n self.write(\"use get to enable cache\")\n\n def write_error(self, status_code, **kwargs):\n self.write(\"Gosh darnit, user! You caused a %d error.\" % status_code)\n\n\nclass ScoreHandler(tornado.web.RequestHandler):\n\n @tornado.web.asynchronous\n @tornado.gen.engine\n def get(self):\n xh = self.get_argument(\"xh\")\n pw = self.get_argument(\"pw\")\n if not xh or not pw:\n self.write(\"no enough parameter\")\n self.finish()\n\n client = tornado.httpclient.AsyncHTTPClient()\n\n jw_url = 'http://222.201.132.114/default2.aspx'\n request = HTTPRequest(jw_url, \"GET\", follow_redirects=True)\n response = yield tornado.gen.Task(client.fetch,\n request)\n\n location = response.effective_url\n hash_string = location[location.find(\"(\"):location.find(\")\") + 1]\n login_url = \"%s%s%s\" % (\n \"http://222.201.132.114/\", hash_string, \"/default2.aspx\")\n\n page = etree.HTML(response.body)\n viewstate_input = page.xpath(\n \"/html/body/form/input[@name='__VIEWSTATE']\")[0]\n viewstate = viewstate_input.get(\"value\")\n\n params = {\"__VIEWSTATE\": viewstate, \"txtUserName\": xh,\n \"TextBox2\": pw, \"txtSecretCode\": \"\", \"Button1\": \"\"}\n request = HTTPRequest(\n login_url,\n \"POST\",\n None,\n body=urlencode(params),\n follow_redirects=True,\n request_timeout=60)\n response = yield tornado.gen.Task(client.fetch,\n request)\n\n main_url = response.effective_url\n page = etree.HTML(response.body)\n name_span = page.xpath(\n \"/html/body/div/div/div/form/div/ul/li/em/span[@id='xhxm']\")[0]\n name = name_span.text[-5:-2]\n\n # critical\n score_url = \"%s%s%s%s%s%s%s\" % (\n \"http://222.201.132.114/\",\n hash_string,\n \"/xscjcx.aspx?xh=\",\n xh,\n \"&xm=\",\n make_jw_encode(name),\n \"&gnmkdm=N121605\")\n request = HTTPRequest(\n score_url,\n \"GET\",\n follow_redirects=True,\n headers=make_headers(main_url))\n response = yield tornado.gen.Task(client.fetch,\n request)\n\n page = etree.HTML(response.body)\n viewstate2_span = page.xpath(\n \"/html/body/form/input[@name='__VIEWSTATE']\")[0]\n viewstate2 = viewstate2_span.get(\"value\")\n\n # critical\n params = {\"__VIEWSTATE\": viewstate2, \"btn_zcj\": \"历年成绩\".encode(\"utf-8\")}\n request = HTTPRequest(\n score_url,\n \"POST\",\n body=urlencode(params),\n follow_redirects=True,\n headers=make_headers(score_url),\n request_timeout=60)\n response = yield tornado.gen.Task(client.fetch,\n request)\n response\n page = etree.HTML(response.body)\n score_table = page.xpath(\"//table[@id='Datagrid1']\")[0]\n\n count = 0\n score_list = []\n for score_row in score_table.getchildren()[1:]:\n cells = score_row.getchildren()\n\n score_lesson_from_to = cells[0].text\n score_term = cells[1].text\n score_lesson_code = cells[2].text\n score_name = cells[3].text\n score_lesson_kind = cells[4].text\n score_lesson_belongs_to = cells[5].text\n score_lesson_point = cells[6].text\n score_credit = cells[7].text\n score_value = cells[8].text\n score_reexam_value = cells[10].text\n score_restudy_value = cells[11].text\n score_lesson_issue_by = cells[12].text\n score_rank = cells[15].text\n\n score_dict = {\n 'id'\t\t\t\t\t\t: count\t\t\t\t\t,\n 'score_lesson_from_to'\t\t: score_lesson_from_to\t,\n 'score_term'\t\t\t\t: score_term\t\t\t\t,\n 'score_lesson_code'\t\t\t: score_lesson_code\t\t,\n 'score_name'\t\t\t\t: score_name\t\t\t\t,\n 'score_lesson_kind'\t\t\t: score_lesson_kind\t\t,\n 'score_lesson_belongs_to' \t: score_lesson_belongs_to,\n 'score_lesson_point' \t\t: score_lesson_point \t\t,\n 'score_credit'\t\t\t\t: score_credit\t\t\t,\n 'score_value'\t\t\t\t: score_value\t\t\t\t,\n 'score_reexam_value'\t\t: score_reexam_value\t,\n 'score_restudy_value'\t\t: score_restudy_value\t\t,\n 'score_lesson_issue_by'\t\t: score_lesson_issue_by\t,\n 'score_rank'\t\t\t\t: score_rank\t\t\t\t,\n }\n\n score_list.append(score_dict)\n\n count += 1\n\n name_dict = {\n 'name': name,\n 'count': count,\n\n }\n score_list.append(name_dict)\n json_data = json.dumps(score_list, ensure_ascii=False)\n\n self.set_status(200)\n self.write(json_data)\n\n self.finish()\n\n def post(self):\n self.write(\"use get to enable cache\")\n\n def write_error(self, status_code, **kwargs):\n self.write(\"Gosh darnit, user! You caused a %d error.\" % status_code)\n\n\nclass GpaHandler(tornado.web.RequestHandler):\n\n @tornado.web.asynchronous\n @tornado.gen.engine\n def get(self):\n xh = self.get_argument(\"xh\")\n pw = self.get_argument(\"pw\")\n if not xh or not pw:\n self.write(\"no enough parameter\")\n self.finish()\n\n client = tornado.httpclient.AsyncHTTPClient()\n\n jw_url = 'http://222.201.132.114/default2.aspx'\n request = HTTPRequest(jw_url, \"GET\", follow_redirects=True)\n response = yield tornado.gen.Task(client.fetch,\n request)\n\n location = response.effective_url\n hash_string = location[location.find(\"(\"):location.find(\")\") + 1]\n login_url = \"%s%s%s\" % (\n \"http://222.201.132.114/\", hash_string, \"/default2.aspx\")\n\n page = etree.HTML(response.body)\n viewstate_input = page.xpath(\n \"/html/body/form/input[@name='__VIEWSTATE']\")[0]\n viewstate = viewstate_input.get(\"value\")\n\n params = {\"__VIEWSTATE\": viewstate, \"txtUserName\": xh,\n \"TextBox2\": pw, \"txtSecretCode\": \"\", \"Button1\": \"\"}\n request = HTTPRequest(\n login_url,\n \"POST\",\n None,\n body=urlencode(params),\n follow_redirects=True)\n response = yield tornado.gen.Task(client.fetch,\n request)\n\n main_url = response.effective_url\n page = etree.HTML(response.body)\n name_span = page.xpath(\n \"/html/body/div/div/div/form/div/ul/li/em/span[@id='xhxm']\")[0]\n name = name_span.text[-5:-2]\n\n # critical\n score_url = \"%s%s%s%s%s%s%s\" % (\n \"http://222.201.132.114/\",\n hash_string,\n \"/xscjcx.aspx?xh=\",\n xh,\n \"&xm=\",\n make_jw_encode(name),\n \"&gnmkdm=N121605\")\n request = HTTPRequest(\n score_url,\n \"GET\",\n follow_redirects=True,\n headers=make_headers(main_url))\n response = yield tornado.gen.Task(client.fetch,\n request)\n\n page = etree.HTML(response.body)\n viewstate2_span = page.xpath(\n \"/html/body/form/input[@name='__VIEWSTATE']\")[0]\n viewstate2 = viewstate2_span.get(\"value\")\n\n # critical\n params = {\"__VIEWSTATE\": viewstate2, \"btn_zcj\": \"历年成绩\".encode(\"utf-8\")}\n request = HTTPRequest(\n score_url,\n \"POST\",\n body=urlencode(params),\n follow_redirects=True,\n headers=make_headers(score_url),\n request_timeout=60)\n response = yield tornado.gen.Task(client.fetch,\n request)\n\n page = etree.HTML(response.body)\n score_table = page.xpath(\"//table[@id='Datagrid1']\")[0]\n\n count = 0\n total_lesson_point = 0.0\n total_credit = 0.0\n result = {}\n\n for score_row in score_table.getchildren()[1:]:\n cells = score_row.getchildren()\n\n score_lesson_point = float(cells[6].text)\n score_credit = float(cells[7].text[1:])\n count = count + 1\n\n total_credit = total_credit + score_credit\n total_lesson_point = total_lesson_point + \\\n score_lesson_point * score_credit\n\n gpa = total_lesson_point / total_credit\n\n result[\"gpa\"] = gpa\n result[\"how_many_lessons\"] = count\n result[\"total_lesson_point\"] = total_lesson_point\n result[\"total_credit\"] = total_credit\n\n self.set_status(200)\n self.write(json.dumps(result))\n\n self.finish()\n\n def post(self):\n self.write(\"use get to enable cache\")\n\n def write_error(self, status_code, **kwargs):\n self.write(\"Gosh darnit, user! You caused a %d error.\" % status_code)\n\n\ndef getApplication():\n tornado.options.parse_command_line() # parse port\n app = tornado.web.Application(handlers=getHandlers())\n return app\n\n\ndef getHandlers():\n return [\n (r\"/Lesson/\", LessonHandler),\n (r\"/Score/\", ScoreHandler),\n (r\"/Gpa/\", GpaHandler),\n ]\n\n\nif __name__ == \"__main__\":\n tornado.options.parse_command_line() # parse port\n app = tornado.web.Application(handlers=getHandlers())\n http_server = tornado.httpserver.HTTPServer(app)\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n","sub_path":"scuter/scuter.py","file_name":"scuter.py","file_ext":"py","file_size_in_byte":16739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"178525330","text":"#!/usr/bin/python3\n\nimport array\nfrom PIL import Image\n\n\nimage = Image.open('dog.ppm')\nvar = image.readline()\nprint(\"esto es: \",var)\n\n\nnew_image = image.resize((400, 400))\nnew_image.save('image_400.ppm')\n\nprint(image.size) # Output: (1200, 776)\nprint(new_image.size) # Output: (400, 400)\nprint(image.mode)","sub_path":"computacion2/img_3.py","file_name":"img_3.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"609478117","text":"# -*- coding:utf-8 -*-\nimport string\n\nfrom bs4 import BeautifulSoup # 解析html结构的模块\nfrom src.tool.ExcelManager import writeExcel\nimport urllib.request\n\ndef makeBookListInfo(url_content):\n \"\"\"\n\t抓取图书列表信息\n\t\"\"\"\n books = [] # 初始化为一个空列表\n soup = BeautifulSoup(url_content, 'html.parser') # 开始解析\n list = soup.select(\"div.article div#subject_list ul.subject-list\")\n listsoup = BeautifulSoup(str(list), 'html.parser') # 开始解析\n booktable1 = listsoup.findAll(\"li\") # 找到所有图书信息所在标记\n for book in booktable1: # 循环遍历图书列表\n simplebook = book # book为booktable1下面的一个dl标签\n subsoup = BeautifulSoup(str(simplebook), 'html.parser') # 单本书进行解析\n book_large_img = subsoup.img['src'] # 直接使用标签,然后获得属性的值 # 图书封面:\n # print(str(book_large_img)) # 打印图片链接\n img_temp = book_large_img.split('/') # 将该链接以‘/’进行切分\n img_temp[len(img_temp) - 2] = 'spic'\n message = subsoup.find('div', attrs={\"class\": \"info\"}) # 图书信息\n book_link = message.a['href'] # 图书链接\n book_name = message.a['title'] # 图书名称\n book_info = subsoup.find('div', attrs={\"class\": \"pub\"}).string.replace('\\n ', '').replace(' \\n', '').replace(' ', '') # 图书出版信息\n try: book_star = subsoup.find('span', attrs={\"class\": \"rating_nums\"}).string # 图书星级\n except:\n book_star = '-'\n pass\n book_info = book_info.strip(' \\n') # strip用来去掉换行符和空格\n books.append([book_name, book_link, book_large_img, book_info, book_star]) # 构建自己的信息结构\n return books # 返回图书列表\n\n\"\"\"\ndef makeBookInfo(url_content):\n\n#抓取单本书\n\n soup = BeautifulSoup(url_content, 'html.parser') # 开始解析\n # 查找对应的meta标签,返回的是整个标签的内容\n book_no = soup.find('meta', attrs={'http-equiv': 'mobile-agent'})\n # 这里是有问题的,meta标签没闭合\n # 去除那本书的编号\n book_no = book_no['content'].split('subject/')[1].replace('/', '')\n # 书名,不知道为什么是有空格和换行符的,看html源文件是没有的,但是取出来就有了。\n book_name = soup.find('h1').text.replace('\\n', '')\n book_info = soup.find('div', attrs={\"id\": \"info\"}) # 出版信息\n peoples = soup.find('a', attrs={\"class\", \"rating_people\"}) # 评分的人数\n books = soup.findAll('span', attrs={\"class\", \"rating_per\"}) # 人数的比例\n\n book_intro = soup.findAll('div', attrs={\"class\": \"intro\"}) # 书籍及作者介绍\n book_alot = soup.findAll('div', attrs={\"class\": \"subject_show block5\"}) # 丛书信息 可能不存在\n\n # 使用css的选择器来获得标签。\n # 认真看了一下这部分获取的东西,html代码里面,这部分只是一些文字,用css装饰了一下。好像通过标签确实找不到\n # 而且有些分了好多层,所以才用css的选择器拉获取吧。\n\n mu_lu = soup.select('div[id*=\"dir\"]') # 表示获得所有id属性中包含dir的div\n #bookhotcomment1 = soup.select('div#wt_1 div.ctsh div.tlst div.ilst a') # 评论头像\n #bookhotcomment2 = soup.select('div#wt_1 div.ctsh div.tlst div.nlst h3 > a') # 评论详情\n #bookhotcomment3 = soup.select('div#wt_1 div.ctsh div.tlst div.clst span.starb') # 用户简介\n comments = []\n # 获取评论信息\n bookhotcomment = soup.findAll('li', attrs=['class', 'comment-item'])\n for comment in bookhotcomment:\n # print(comment)\n simple_comment = BeautifulSoup(str(comment), 'html.parser')\n book_hot_comment1 = simple_comment.find('span', attrs=['class', 'comment-info']) # 评论ID\n book_hot_comment2 = simple_comment.find('p', attrs=['class', 'comment-content']) # 评论详情\n book_hot_comment3 = simple_comment.find('span', attrs=['class', 'user-stars allstar50 rating']) # 评价等级\n try:\n book_hot_comment33 = book_hot_comment3.title()\n except:\n book_hot_comment33 = 'none'\n comments.append(\n '
'.join(['评论ID:'+book_hot_comment1.find('a').get_text(), '\\n评论内容:'+book_hot_comment2.get_text(), '\\n星级:'+book_hot_comment33.replace('\\xa0', '\\n')])) # \\xa0是不间断空白符\n mu_lu = soup.select('div[id*=\"dir\"]') # 表示获得所有id属性中包含dir的div\n\n try:\n # 获取出来的东西很多换行符和空格,不知道什么原理。这个串联有点像C++的输入输出哇。\n book_info = book_info.text.replace(' \\n', '').replace('\\n ', '').replace(' ', '')\n except:\n book_info = ''\n # 内容里面分了好多的段落,这些写得有点硬了,稍微变更一下就不能用了。\n\n try:\n bookintro1 = book_intro[0].findAll('p')\n except:\n bookintro1 = []\n try:\n bookintro2 = book_intro[1].findAll('p')\n except:\n bookintro2 = []\n book_intro = ''\n author_intro = ''\n # 相当于去除

\n for i in bookintro1:\n book_intro = book_intro + i.text + '\\n'\n for i in bookintro2:\n author_intro = author_intro + i.text + '\\n'\n try:\n bookalot = book_alot[0].text.replace('\\n', '').replace(' ', '')\n except:\n bookalot = '.>_<.无丛书信息'\n peoples = peoples.text\n try:\n mu_lu = mu_lu[0].text.replace(' ', '')\n mu_lu = mu_lu[1].text.replace(' ', '')\n except:\n mu_lu = '.>_<.未检索到目录信息'\n\n\n stars = []\n for i in books:\n stars.append(i.text)\n # 写在函数最后,这个函数就是将书本页面的内容提取出来,按自己的格式构造一下。\n # 所以,核心内容是那几个提取的函数find,findAll,select。理解这三个函数。其他部分都不重要了。\n # 构造成一个列表\n\n return [book_no, book_name, book_info, book_intro, author_intro, int(peoples.replace('人评价', '')), ' ,'.join(stars), bookalot, mu_lu,\n '
'.join(comments)]\n\"\"\" \"\"\"\n 抓取单本书\n \"\"\"\ndef makeBookInfo(url_content):\n soup = BeautifulSoup(url_content, 'html.parser') # 开始解析\n # 查找对应的meta标签,返回的是整个标签的内容\n book_no = soup.find('meta', attrs={'http-equiv': 'mobile-agent'})\n book_no = book_no['content'].split('subject/')[1].replace('/', '') # 书编号\n # print('编号:' + book_no)\n book_name = soup.find('h1').text.replace('\\n', '')\n # print('书名:' + book_name)\n book_info = soup.find('div', attrs={\"id\": \"info\"}) # 出版信息\n try: # 作者\n book_au = soup.select(\"div.article div#info a:nth-of-type(1)\")\n # print(book_au)\n soup_au = BeautifulSoup(str(book_au), 'html.parser')\n book_author = soup_au.get_text().replace(' \\n', '').replace('\\n ', '').replace(' ','') # .replace('[', '').replace(']', '')\n except:\n book_author = '未检索到作者信息'\n #print(\"作者:\" + book_author)\n book_tra = soup.select(\"div.article div#info a:nth-of-type(2)\") # 译者\n book_trans = u'-'\n try:\n # print(book_tra)\n soup_tra = BeautifulSoup(str(book_tra), 'html.parser')\n if not soup_tra.text.replace(' \\n', '').replace('\\n ', '').replace(' ', ''):\n book_trans = str(soup_tra.text.replace(' \\n', '').replace('\\n ', '').replace(' ', ''))\n except:\n book_trans = '-'\n #print('译者:' + book_trans)\n # 出版年,页数,定价,出版社\n book_message = book_info.findAll('span', attrs={'class': 'pl'})\n try:\n book_year = u'-'\n book_price = u'-'\n book_page = 0\n book_public = u'-'\n for item in book_message:\n if item.string == u\"出版年:\":\n book_year = item.nextSibling.strip().split(\"/\")[0].strip() # nextSibling查找下一个兄弟节点 ,strip()去掉空格,split用来分割\n if item.string == u\"页数:\":\n book_page = item.nextSibling.strip().split(\"/\")[0].strip()\n if item.string == u\"定价:\":\n book_price = item.nextSibling.strip().split(\"/\")[0].strip()\n if item.string == u\"出版社:\":\n book_public = item.nextSibling.strip().split(\"/\")[0].strip()\n except:\n print(\"抓取出版年和页数,定价时出错\")\n book_year = '未检索到年份信息'\n book_price = '未检索到价格'\n book_page = '未检索到页数'\n book_public = '未检索到出版社信息'\n #print('出版社:' + book_public)\n #print('出版年:' + book_year)\n #print('页数:' + book_page)\n #print('定价:' + book_price)\n\n # 评论人数:\n try:\n infoVoteNum = soup.find('span', {'property': 'v:votes'})\n votenum = infoVoteNum.get_text().strip()\n except:\n # print(\"评论人数不足\")\n votenum = '0'\n\n # print(\"评论人数:\" + votenum)\n\n # 星级评价:\n infostar = soup.find('strong', {'property': 'v:average'})\n # print(infostar)\n try:\n stars = \"0\"\n stars = infostar.get_text().strip()\n if stars == '':\n stars = \"0\"\n except:\n print(\"抓取评价出错\")\n stars = \"0\" # 使用u或者U处理unicode文本\n # print(\"星级评价:\" + stars)\n\n # 评价人数的比例\n try:\n ratio = soup.findAll('span', attrs={\"class\", \"rating_per\"})\n voteratio = []\n for i in ratio:\n voteratio.append(i.text)\n except:\n voteratio = \"0\"\n # print(\"人数比例:\")\n # print(voteratio)\n try:\n book_intro = ''\n author_intro = ''\n total = soup.findAll('h2')\n for previous in total:\n #print(previous)\n #print(previous.text.replace('·', '').replace('\\n', '').replace(' \\n', '').replace(' ', ''))\n word = previous.text.replace('·', '').replace('\\n', '').replace(' \\n', '').replace(' ', '')\n if word == '内容简介      ':\n book = previous.find_next_sibling() # 找下一个节点\n #print(book)\n soup_book = BeautifulSoup(str(book), 'html.parser')\n book_intro1 = soup_book.findAll('div', attrs={\"class\": \"intro\"})\n #print(book_intro1)\n #print(len(book_intro1))\n if len(book_intro1) == 1:\n intro1 = book_intro1[0].findAll('p')\n if len(book_intro1) == 2:\n intro1 = book_intro1[1].findAll('p')\n if word == '作者简介      ':\n author = previous.find_next_sibling() # 找下一个节点\n #print(author)\n soup_author = BeautifulSoup(str(author), 'html.parser')\n author_intro1 = soup_author.findAll('div', attrs={\"class\": \"intro\"})\n if len(author_intro1) == 1:\n intro2 = author_intro1[0].findAll('p')\n if len(author_intro1) == 2:\n intro2 = author_intro1[1].findAll('p')\n for i in intro1:\n book_intro = book_intro + i.text + '\\n'\n for i in intro2:\n author_intro = author_intro + i.text + '\\n'\n #print(book_intro)\n #print(author_intro)\n except:\n book_intro = '-'\n author_intro = '-'\n\n # 丛书信息 可能不存在\n book_oth = soup.findAll('div', attrs={\"class\": \"subject_show block5\"})\n try:\n book_others = book_oth[0].text.replace('\\n', '').replace(' ', '')\n except:\n book_others = '-'\n #print('丛书:' + book_others)\n # 目录\n mu_lu = soup.select('div[id*=\"dir\"]') # 表示获得所有id属性中包含dir的div\n # print(mu_lu)\n if len(mu_lu) == 1:\n try:\n mu_lu = mu_lu[0].text.replace(' ', '')\n except:\n mu_lu = '-'\n if len(mu_lu) == 2:\n try:\n mu_lu = mu_lu[1].text.replace(' · · · · · · (收起)', '').replace(' ', '').replace('\\n', '|')\n except:\n mu_lu = '-'\n else:\n mu_lu = '-'\n\n #print('目录:' + mu_lu)\n\n # 推荐书籍\n try:\n movie_like = soup.find('div', attrs={\"class\": \"content clearfix\"}) # 相关电影推荐 可能不存在\n recomm = ''\n movie = movie_like.findAll('dl')\n for i in movie:\n recomm += i.dd.a.string + ','\n recommendations = recomm.strip(string.punctuation).strip()\n except:\n recommendations = '暂无推荐'\n #print('推荐书籍:' + recommendations)\n\n # 获取评论信息\n comments = []\n bookcomment = soup.findAll('li', attrs=['class', 'comment-item'])\n for comment in bookcomment:\n simple_comment = BeautifulSoup(str(comment), 'html.parser')\n book_comment1 = simple_comment.find('span', attrs=['class', 'comment-info']) # 评论ID\n comment_id = book_comment1.find('a').string.replace(\"'\", \"\\\\'\").replace('\"', '\\\\\"')\n book_comment2 = simple_comment.find('p', attrs=['class', 'comment-content']).get_text().replace(' \\n', '').replace('\\n ', '').replace(' ', '').replace(\"'\", \"\\\\'\").replace('\"', '\\\\\"') # 评论详情\n book_comment11 = BeautifulSoup(str(book_comment1), 'html.parser')\n book_comment3 = book_comment11.select(\"span:nth-of-type(2)\") # 评价等级\n # print(book_comment3)\n try:\n book_comment33 = str(book_comment3).split('title=\"')[1].split('\">')[0]\n if book_comment33 == '力荐':\n book_comment33 = '★★★★★'\n elif book_comment33 == '推荐':\n book_comment33 = '★★★★'\n elif book_comment33 == '还行':\n book_comment33 = '★★★'\n elif book_comment33 == '较差':\n book_comment33 = '★★'\n elif book_comment33 == '很差':\n book_comment33 = '★'\n else:\n book_comment33 = '该用户未评分'\n except:\n book_comment33 = '该用户未评分'\n comments.append(\n '
'.join(['评论ID:' + comment_id, '评论内容:' + book_comment2,\n '评价等级:' + book_comment33.replace('\\xa0', '\\n')])\n )# \\xa0是不间断空白符\n #-----------如果是写入excel,由于是把每个list元素拆分开写,所以应该是string类型-------\n return [book_no, book_name, book_author, book_public, book_trans, book_year, book_page, book_price, votenum,\n stars, ','.join(voteratio), book_intro, author_intro, book_others, mu_lu, recommendations, '
'.join(comments), book_info.text.replace(' \\n', '').replace('\\n ', '').replace(' ', '').replace(\"'\", '\"')]\n\n\ndef makeBookTag(url_content, path='D:\\workplace\\pythonwork\\douban_book_catch_zzq\\darabase/bookTag.xlsx'):\n \"\"\"\n\t抓取标签提取写入Excel\n\t\"\"\"\n soup = BeautifulSoup(url_content, 'html.parser') # 开始解析\n booktag1 = soup.select('div#content div.article div div')\n print(str(booktag1))\n taglist = [['标签类别', '标签名', '标签链接', '点击量']] # Excel里面的标题\n for booktag2 in booktag1:\n # print(str(booktag2))\n soup1 = BeautifulSoup(str(booktag2), 'html.parser') # 开始解析\n booktag2 = soup1.find('a', attrs={'class': 'tag-title-wrapper'})\n tagType = booktag2['name'] # 标签类别\n booktag3 = soup1.findAll(\"a\")\n booktag4 = soup1.findAll(\"b\") # 该标签下的图书数量\n for i in range(0, len(booktag4)): # len(booktag4)就可以表示一行有多少种不同类型的分类\n tag = booktag3[i + 1].string # 标签名\n taglink = 'https://book.douban.com'+booktag3[i + 1]['href'] # 链接\n tagnum = booktag4[i].string\n taglist.append([tagType, tag, taglink, tagnum]) # 把内容加进去,按照标题的顺序\n writeExcel(path, taglist) # 将内容写入excel中\n print(\"写入EXCEL成功\")\n\n\ndef testBookTag():\n file = open('D:\\workplace\\pythonwork\\douban_book_catch_zzq\\darabase/booktag.html', 'rb')\n content = file.read()\n makeBookTag(content, r'D:\\workplace\\pythonwork\\douban_book_catch_zzq\\darabase/booktag.xlsx')\n\n\ndef testManyBook():\n file = open('D:/workplace/pythonwork/douban_book_catch_zzq/web抓取/0.html', 'rb')\n content = file.read()\n books = makeBookListInfo(content)\n for i in books:\n print(i)\n\n\ndef testBookInfo():\n print('D:/workplace/pythonwork/HelloWorld/book/流行/1000134三毛流浪记全集.html')\n file = open('D:/workplace/pythonwork/HelloWorld/book/流行/1000134三毛流浪记全集.html', 'rb') # 读取文件\n content = file.read()\n book = makeBookInfo(content)\n for i in book:\n print('*' * 50) # 分割一下,方便查看\n print(i) # 打印一下内容\n\n\nif __name__ == '__main__':\n # testManyBook()\n testBookInfo() # 提取书详细页面的信息\n # testBookTag() # 测试图书标签抓取是否成功\n","sub_path":"src/tool/TagManager.py","file_name":"TagManager.py","file_ext":"py","file_size_in_byte":16944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"67920672","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\n\nfrom sklearn.linear_model import LinearRegression\n\ndef select_feature(train_set, vbias=0.5, wbias=0.002, loop_times=200, rate=0.075):\n '''\n train_set: 训练集,用于数据划分\n vbias: 方差阈值, 各特征数据方差低于该值删除\n wbias: 线性函数各特征系数阈值, 特征系数低于该值删除\n loop_times: 循环轮数, 减小偶然事件导致特征删除误差\n rate: 特征系数阈值与循环轮数比率, 如果高于该值,那么该特征在循环loop_times次后,低于wbias值次数较多\n\n 参数调整:\n vbias小 -> 该特征数据较平稳,无明显波动\n wbias小 -> 该特征对于目标影响能力小,\n loop_times大 -> 减小偶然误差\n rate小 -> loop_times * rate = 特征系数低于wbias最大次数\n '''\n \n feature_names = []\n\n # 根据各特征方差选取特征\n features = train_set.columns[:-1] \n for feature in features.values:\n vals = train_set[feature].values\n var = vals.var()\n if var > vbias:\n feature_names.append(feature)\n\n X = train_set[feature_names].values\n Y_= train_set[train_set.columns[-1:]].values\n\n # 根据线性模型特征系数选取特征\n drop = {}\n drop_keys = []\n for i in range(loop_times): #迭代轮数,\n clf = LinearRegression()\n X_train, X_test, y_train, y_test = train_test_split(X, Y_, test_size=0.2, random_state=None)\n clf = clf.fit(X_train, y_train)\n coef_ = clf.coef_[0]\n\n for w in range(len(coef_)):\n if np.abs(coef_[w]) < wbias: # 系数小于wbias的特征删除\n if feature_names[w] in drop.keys():\n drop[feature_names[w]] += 1\n else:\n drop[feature_names[w]] = 0\n # v32 v21 v34 v20 v11 v18 v15 v25 v30 v8\n for k, v in drop.items():\n if rate < (v/loop_times): # 特征系数低于wbias出现次数 比 循环次数 高于rate删除\n feature_names.remove(k)\n drop_keys.append(k)\n print(sorted(drop.items(), key=lambda d: d[1], reverse=True))\n print('drop keys: \\n', drop_keys)\n print('num of features: %d'%len(feature_names))\n print('features:\\n', feature_names)\n return feature_names \n\nif __name__=='__main__':\n train_set = pd.read_csv('data/zhengqi_train.csv')\n select_feature(train_set, 0.5)\n","sub_path":"zhengqi/feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"584545774","text":"import pandas as pd\nimport pymysql\nimport time\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import linear_kernel\n\nconn = pymysql.connect(host='34.64.88.80', port=3306, user='root',\n passwd='root', db='django_db', charset='utf8')\n\ndef contents_based_filtering(item_id, num,tag_data, ingredi):\n curs=conn.cursor()\n\n splited_data=ingredi.split(',')\n\n getAll=\"select * from recipes where ingredients like \"\n \n for idx, data in enumerate(splited_data):\n if idx==len(splited_data)-1:\n break\n getAll = getAll + \"\\\"%\" + data + \"%\\\"\" + \" or ingredients like \"\n\n getAll = getAll + \"\\\"%\" + splited_data[len(splited_data)-1] + \"%\\\"\"\n\n print(getAll)\n\n curs.execute(getAll)\n temp = curs.fetchall()\n\n\n allcontent=list(temp)\n\n allcontent.append((999999,'name','timereq','cookmethod','img',tag_data,'category','ingredients'))\n\n df=pd.DataFrame(allcontent, columns=['id','name','time_req','cook_method','img','tags','category','ingredients'])\n\n tf = TfidfVectorizer(analyzer='word', stop_words='english')\n tfidf_matrix = tf.fit_transform(df['tags'].values.astype('U'))\n cosine_similarities = linear_kernel(tfidf_matrix, tfidf_matrix)\n\n results = {}\n for idx, row in df.iterrows():\n similar_indices = cosine_similarities[idx].argsort()[:-100:-1]\n similar_items = [(cosine_similarities[idx][i], df['id'][i]) for i in similar_indices]\n results[row['id']] = similar_items[1:]\n\n print(\"Recommending \" + str(num) + \" products similar to \" + df.loc[df['id'] == item_id]['name'].tolist()[0] + \"...\")\n print(\"-------\")\n recs = results[item_id][:num]\n dict = {}\n\n for rec in recs:\n print(\"Recommended: \" + df.loc[df['id'] == rec[1]]['name'].tolist()[0] + \" (score:\" + str(rec[0]) + \")\")\n dict[df.loc[df['id'] == rec[1]]['name'].tolist()[0]] = str(rec[0])\n\n return dict\n\n\ncontents_based_filtering(item_id=999999, num=5, tag_data='#피자#이탈리안#술안주', ingredi='치즈,옥수수,고추장')\n","sub_path":"backEnd/200424_subtask_hybridSystem/algorithm/tfidf.py","file_name":"tfidf.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"81785013","text":"class Solution:\n def maxDistToClosest(self, seats):\n max_dis = 0\n cur_dis = 0\n for value in seats:\n if value == 0:\n cur_dis += 1\n else:\n max_dis = max(max_dis, cur_dis)\n cur_dis = 0\n head_dis = seats.index(1)\n tail_dis = cur_dis\n max_dis = ((max_dis + 1) // 2, head_dis, tail_dist)\n return max_dis\n\n def maxDistToClosest(self, seats):\n start, end = 0, len(seats) - 1\n while seats[start] == 0:\n start += 1\n while seats[end] == 0:\n end -= 1\n max_dist, dist = 0, 0\n for i in range(start, end + 1):\n if seats[i] == 1:\n if dist > max_dist:\n max_dist = dist\n dist = 0\n else:\n dist += 1\n return max(math.ceil(max_dist // 2), start, len(seats) - end - 1)\n","sub_path":"849/849.maximize-distance-to-closest-person.234399605.Wrong-Answer.leetcode.py","file_name":"849.maximize-distance-to-closest-person.234399605.Wrong-Answer.leetcode.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"401333562","text":"'''\nThis script get the output of Sean's QGIS process and flattens the output in\norder to be runnable on other scripts\n'''\nimport argparse\nimport geopandas as gpd\n\n\ndef keep_rows_with_values(df, row_name):\n df_temp = df[['id', 'damagelevel', 'geometry', row_name]].dropna()\n df_temp.columns = ['id', 'damagelevel', 'geometry', 'image']\n return df_temp\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('input', help=\"name the geojson you want flat\")\nparser.add_argument('output', help=\"name what you want it called\")\nargs = parser.parse_args()\n\ndf = gpd.read_file('../data_training/boundingboxes/' + args.input)\n\ndates = ['files_0827', 'files_0828', 'files_0829', 'files_0830', 'files_0831',\n 'files_0901', 'files_0902', 'files_0903']\nflat_df = gpd.GeoDataFrame(columns=['id', 'damagelevel', 'geometry', 'image'])\nfor date in dates:\n temp_df = keep_rows_with_values(df, date)\n flat_df = flat_df.append(temp_df, ignore_index=True)\nflat_df.id = flat_df.index\n\nflat_df.to_file('../data_training/boundingboxes/' + args.output,\n driver='GeoJSON')\n","sub_path":"scripts/pixel-conversion/flatten_geojson.py","file_name":"flatten_geojson.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"250432081","text":"#\n# voice-skill-sdk\n#\n# (C) 2021, Deutsche Telekom AG\n#\n# This file is distributed under the terms of the MIT license.\n# For details see the file LICENSE in the top directory.\n#\n\n#\n# Internationalization\n#\n\nimport re\nimport random\nimport logging\nimport subprocess\nfrom pathlib import Path\nfrom functools import reduce\nfrom types import MappingProxyType\nfrom typing import Dict, Iterable, List, Optional, Mapping, Text, Tuple, Union\n\nimport yaml\nfrom yaml.reader import ReaderError\nfrom yaml.scanner import ScannerError\nfrom babel import dates, lists, support\n\n# Place your `[lang].po` files to `locale` directory\nLOCALE_DIR = \"locale\"\n\nPROGRAM = \"pybabel\"\nPROGRAM_NOT_FOUND = f'Failed to launch \"{PROGRAM} %s\": not found. Make sure \"{PROGRAM}\" is in your PATH.'\n\nRE_TRANSLATIONS = re.compile(r\"^[a-z]{2}(-[A-Z]{2})?$\")\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_locale_dir(locale_dir: Text = None) -> Path:\n \"\"\"Returns locales folder location\"\"\"\n return Path(locale_dir or LOCALE_DIR)\n\n\ndef make_lazy(func, alt=None):\n \"\"\"\n Make lazy translation function\n\n :param func: function to call\n :param alt: alternative function if translation is not set\n :return:\n \"\"\"\n\n def lazy_func(*args, **kwargs):\n \"\"\"Lazy translations wrapper\"\"\"\n\n from skill_sdk.intents import r\n\n try:\n return getattr(r.get_translation(), func)(*args, **kwargs)\n except TypeError:\n logger.error(\"Calling translation functions outside of request context.\")\n except AttributeError as e:\n logger.exception(\"%s\", repr(e))\n return alt(*args, **kwargs) if callable(alt) else None\n\n return lazy_func\n\n\n_ = make_lazy(\"gettext\", lambda m, *a, **kw: m)\n_n = make_lazy(\n \"ngettext\", lambda singular, plural, n, *a, **kw: singular if n == 1 else plural\n)\n_a = make_lazy(\"getalltexts\", lambda m, *a, **kw: [m])\n\n\nclass TranslationError(Exception):\n \"\"\"\n Exception raised when a translation could not be performed due to a missing ``.mo`` file, a missing translation\n key or if there are no suitable translations available in the text service.\n \"\"\"\n\n\nclass Message(str):\n \"\"\"String object that encapsulates formatting parameters\"\"\"\n\n # Message id\n key: Text\n\n # Message string (un-formatted)\n value: Text\n\n # Positional arguments\n args: Tuple\n\n # Keyword arguments\n kwargs: Dict\n\n def __new__(cls, value, key=None, *args, **kwargs):\n \"\"\"\n Create a message with msgstr/msgid and format parameters\n\n :return:\n \"\"\"\n message = (\n value.format(*args, **kwargs)\n if isinstance(value, str) and (args or kwargs)\n else value\n )\n string = super().__new__(cls, message)\n string.key = key or value\n string.args = args\n string.kwargs = kwargs\n string.value = value\n return string\n\n def format(self, *args, **kwargs) -> \"Message\":\n \"\"\"\n Create and return new Message object with given format parameters\n\n :return:\n \"\"\"\n message = Message(self.value, self.key, *args, **kwargs)\n return message\n\n def __add__(self, other: Union[\"Message\", Text]) -> \"Message\":\n \"\"\"\n Concatenate messages (or Message and str)\n\n @param other:\n @return:\n \"\"\"\n if isinstance(other, Message):\n value = self.value + other.value\n args = self.args + other.args\n kwargs = {**self.kwargs, **other.kwargs}\n else:\n value = self.value + other\n args, kwargs = self.args, self.kwargs\n\n return Message(value, self.key, *args, **kwargs)\n\n def join(self, iterable: Iterable[Union[\"Message\", Text]]):\n \"\"\"\n Join messages in iterable and return a concatenated Message.\n\n @param iterable:\n @return:\n \"\"\"\n return reduce(lambda x, y: x + self + y, iterable)\n\n def strip(self, __chars: Optional[Text] = None) -> \"Message\":\n \"\"\"\n Return new Message object with stripped value\n\n :return:\n \"\"\"\n message = Message(\n self.value.strip(__chars), self.key, *self.args, **self.kwargs\n )\n return message\n\n\nclass Translations(support.Translations):\n \"\"\"Lazy translations, return Message object instead of formatted string\"\"\"\n\n def __init__(self, lang: Text = None, fp=None):\n self.lang = lang\n super().__init__(fp)\n\n def gettext(self, message, *args, **kwargs) -> Message:\n return Message(super().gettext(message), message, *args, **kwargs)\n\n def ngettext(self, singular, plural, n, *args, **kwargs) -> Message:\n return Message(super().ngettext(singular, plural, n), singular, *args, **kwargs)\n\n def format_list(self, elements: List[Text], style=\"standard\"):\n \"\"\"\n Join list elements\n [items, item2, item3] -> 'item1, item2 and item3'\n\n :param elements:\n :param style:\n :return:\n \"\"\"\n return lists.format_list(elements, style=style, locale=self.lang)\n\n # Backward compatibility\n nl_join = format_list\n\n def nl_build(self, header: Text, elements: List[Text]) -> Text:\n \"\"\"\n Build list in natural language:\n (header, [items, item2, item3]) -> 'Header: item1, item2 and item3.'\n\n :param header: list header\n :param elements: list elements\n :return:\n \"\"\"\n return Message(\": \").join((header, self.format_list(elements)))\n\n def format_datetime(self, datetime=None, format=\"medium\", tzinfo=None) -> Text:\n \"\"\"Format datetime according to the locale\"\"\"\n return dates.format_datetime(datetime, format, tzinfo, self.lang)\n\n def format_date(self, date=None, format=\"medium\") -> Text:\n \"\"\"Format date according to the locale\"\"\"\n return dates.format_date(date, format, self.lang)\n\n def format_time(self, time=None, format=\"medium\", tzinfo=None) -> Text:\n \"\"\"Format time according to the locale\"\"\"\n return dates.format_time(time, format, tzinfo, self.lang)\n\n def format_timedelta(\n self,\n delta,\n granularity=\"second\",\n threshold=0.85,\n add_direction=False,\n format=\"long\",\n ) -> Text:\n \"\"\"Format a time delta according to the rules of the given locale\"\"\"\n return dates.format_timedelta(\n delta, granularity, threshold, add_direction, format, self.lang\n )\n\n\nclass MultiStringTranslation(Translations):\n \"\"\"Translations that allows single key to have multiple values\"\"\"\n\n def _parse(self, fp):\n \"\"\"\n Load catalogue from YAML file\n\n @param fp:\n @return:\n \"\"\"\n\n try:\n catalog = yaml.safe_load(fp)\n self._catalog = {\n k: v if isinstance(v, list) else [v] for k, v in catalog.items()\n }\n except (ReaderError, ScannerError) as ex:\n logger.exception(\n \"Could not load translations from %s: %s\", repr(fp), repr(ex)\n )\n raise RuntimeError from ex\n\n def __repr__(self):\n return f\"<{type(self).__name__}: {repr(self.files)}>\"\n\n def gettext(self, message, *args, **kwargs):\n logger.debug(\"Translating message %s to %s\", repr(message), repr(self.lang))\n try:\n candidates = self._catalog[message]\n logger.debug(\"%s candidates: %s\", len(candidates), repr(candidates))\n return Message(random.choice(candidates), message, *args, **kwargs)\n except LookupError:\n logger.warning(\"No translation for key: %s\", repr(message))\n return super().gettext(message, *args, **kwargs)\n\n def ngettext(self, singular, plural, n, *args, **kwargs):\n logger.debug(\"Translating %s/%s/%s to %s\", singular, plural, n, self.lang)\n return self.gettext(singular if n == 1 else plural, *args, **kwargs)\n\n def getalltexts(self, key, *args, **kwargs):\n logger.debug(\"Retrieving all translation messages for %s in %s\", key, self.lang)\n try:\n candidates = self._catalog[key]\n logger.debug(\"%s candidates: %s\", len(candidates), repr(candidates))\n return [Message(value, key, *args, **kwargs) for value in candidates]\n except LookupError:\n logger.warning(\"No translation for key: %s\", key)\n return [super().gettext(key, *args, **kwargs)]\n\n\ndef compile_locales(locale_dir: Text = None, force: bool = False):\n \"\"\"\n Compile all languages available in locale_dir:\n launches `pybabel compile` to compile .po to .mo files\n\n :param locale_dir:\n :param force: force compilation even if *.mo files exist\n :return:\n \"\"\"\n command = \"compile\"\n\n for po_file in get_locale_dir(locale_dir).glob(\"*.po\"):\n\n mo_file = po_file.with_suffix(\".mo\")\n if mo_file.exists() and not force:\n logger.info(\"Skipping %s: %s exists\", po_file.name, mo_file)\n continue\n\n logger.info(\"Compiling %s ...\", po_file.name)\n try:\n\n result = subprocess.check_output(\n [\n PROGRAM,\n command,\n \"-i\",\n str(po_file),\n \"-o\",\n str(mo_file),\n ],\n text=True,\n stderr=subprocess.STDOUT,\n )\n logger.info(result)\n\n except FileNotFoundError:\n logger.error(PROGRAM_NOT_FOUND, command)\n\n except subprocess.CalledProcessError as ex:\n logger.error(\"Failed to compile %s: %s\", po_file.name, ex.stdout)\n raise\n\n\ndef _load_yaml(locale_dir: Text = None) -> Dict[Text, MultiStringTranslation]:\n \"\"\"\n Load multi-string translations from YAML files\n\n @param locale_dir:\n @return:\n \"\"\"\n\n logger.info(\"Loading YAML translations...\")\n\n return {\n yaml_file.stem: MultiStringTranslation(yaml_file.stem, yaml_file.open(mode=\"r\"))\n for yaml_file in get_locale_dir(locale_dir).glob(\"*.yaml\")\n if RE_TRANSLATIONS.match(yaml_file.stem)\n }\n\n\ndef _load_gettext(locale_dir: Text = None) -> Dict[Text, Translations]:\n \"\"\"\n Load `gettext` translations from *.po/*.mo files\n\n @param locale_dir:\n @return:\n \"\"\"\n\n logger.info(\"Loading gettext translations...\")\n\n compile_locales(locale_dir)\n return {\n mo_file.stem: Translations(mo_file.stem, mo_file.open(mode=\"rb\"))\n for mo_file in get_locale_dir(locale_dir).glob(\"*.mo\")\n if RE_TRANSLATIONS.match(mo_file.stem)\n }\n\n\ndef load_translations(locale_dir: Text = None) -> Mapping[Text, Translations]:\n \"\"\"\n Load local languages available in locale_dir\n\n :param locale_dir:\n :return:\n \"\"\"\n\n translations = _load_yaml(locale_dir) or _load_gettext(locale_dir)\n\n return MappingProxyType(translations)\n","sub_path":"skill_sdk/i18n.py","file_name":"i18n.py","file_ext":"py","file_size_in_byte":10928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"86950410","text":"def kruskal(edges, n, m, s):\n \n edges.sort(key=lambda x: x.weight)\n \n subsets = [Subset(x) for x in range(n + 1)]\n \n total = 0\n e = 0\n i = 0\n while e < n - 1:\n edge = edges[i]\n i += 1\n x = find(subsets, edge.u)\n y = find(subsets, edge.v)\n \n if x != y:\n union(subsets, x, y)\n e += 1\n total += edge.weight\n \n return total\n\ndef union(subsets, x, y):\n u = find(subsets, x)\n v = find(subsets, y) \n while u == v:\n u = u.parent\n \n subsets[v].parent = u\n\ndef find(subsets, node):\n subset = subsets[node]\n while node != subset.parent:\n node = subset.parent\n subset = subsets[node]\n\n return node\n \nclass Subset(object):\n def __init__(self, parent):\n self.parent = parent\n self.rank = 0\n \n def __repr__(self):\n return '(' + str(self.parent) + ',' + str(self.rank) + ')'\n\nclass Edge(object):\n def __init__(self, u, v, weight):\n self.u = u\n self.v = v \n self.weight = weight\n \n def __str__(self):\n return '(' + str(self.u) + ',' + str(self.v) + ',' + str(self.weight) + ')'\n \n def __repr__(self):\n return '(' + str(self.u) + ',' + str(self.v) + ',' + str(self.weight) + ')'\n\nif __name__ == '__main__':\n f = open(\"./kruskalmstrsub.txt\")\n\n line = f.readline().rstrip('\\n').split()\n n = int(line[0])\n m = int(line[1])\n \n edges = []\n \n for i in range(m):\n line = f.readline().rstrip('\\n').split()\n x = int(line[0])\n y = int(line[1])\n r = int(line[2])\n \n edges.append(Edge(x, y, r))\n \n \n s = int(f.readline().rstrip('\\n')) \n print(kruskal(edges, n, m, s))","sub_path":"hackerrank/kruskalmstrsub.py","file_name":"kruskalmstrsub.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"371670388","text":"from Bio import SeqIO\nimport os\nfile = \"../data/megaphage_contigs.fasta\"\n#loci = [\"SRS015941_NODE_2_length_315556_cov_47.6479\", \"SRS014470_NODE_1_length_389119_cov_8.88358\"]\nloci = [\"SRS078431_NODE_11_length_258293_cov_26.0675\", \"SRS017304_NODE_5_length_258288_cov_116.449\"]\ncluster = \"VC_1419_6\"\n\nfor record in SeqIO.parse(file, \"fasta\"):\n for locus in loci:\n if record.id == locus:\n SeqIO.write(record, \"../data/\" + cluster + \"/\" + locus + \".fasta\", \"fasta\")\n","sub_path":"helper_scripts/extract_fasta_files.py","file_name":"extract_fasta_files.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"191039656","text":"from __future__ import division\nfrom __future__ import print_function\n\nimport auc_heuristic_computation\nfrom collections import defaultdict\nfrom enum import Enum\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport re\nimport sys\n\nNUMERIC_CONST_PATTERN_TEXT = r\"\"\"\n [-+]? # optional sign\n (?: \n (?: \\d* \\. \\d+ ) # .1 .12 .123 etc 9.1 etc 98.1 etc \n | \n (?: \\d+ \\.? ) # 1. 12. 123. etc 1 12 123 etc \n ) \n # followed by optional exponent part if desired \n (?: [Ee] [+-]? \\d+ ) ? \n \"\"\"\nNUMERIC_CONST_PATTERN = re.compile(NUMERIC_CONST_PATTERN_TEXT, re.VERBOSE)\nRESULTS_DIR = sys.argv[1]\nHEURISTIC_VERSION = 1\nDPI_FOR_SAVING_PNG = 200\nDATASET_TYPES = auc_heuristic_computation.print_dataset_types()\nUSER_TYPE_TO_NAME = {\n 0: \"non-recurring\",\n# 1: \"sporadic\",\n 2: \"frequent\"#,\n# 3: \"permanent\"\n}\nDATASET_TYPE_TO_NAME = {\n 'B': 'Sustainable',\n 'C': 'Transitioning',\n 'A': 'Emerging'\n}\nACTIVITY_TYPE_TO_NAME = {\n \"posts\": \"questions\",\n \"replies\": \"answers\"\n}\n\nclass activityType(Enum):\n Q = \"posts\"\n A = \"replies\"\n\ndef get_user_type_aucs(dataset_list, activityType):\n user_type_aucs = defaultdict(list)\n for dataset in dataset_list:\n dataset_aucs = []\n with open(RESULTS_DIR + dataset + \"/auc_activity_per_cluster_{}.txt\".format(activityType.value), \"r\") as f:\n for line in f:\n if line.strip() == \"\":\n break\n dataset_aucs.append(float(NUMERIC_CONST_PATTERN.findall(line)[-1]))\n dataset_auc_sum = sum(dataset_aucs)\n normalized_aucs = [auc / dataset_auc_sum for auc in dataset_aucs]\n for auc_index, auc in enumerate(normalized_aucs):\n user_type_aucs[auc_index].append(auc)\n return user_type_aucs\n\ndataset_types_aucs = {}\nfor dataset_type, dataset_list in DATASET_TYPES.items():\n dataset_types_aucs[dataset_type + \"_\" + activityType.Q.value] = get_user_type_aucs(dataset_list, activityType.Q)\n dataset_types_aucs[dataset_type + \"_\" + activityType.A.value] = get_user_type_aucs(dataset_list, activityType.A)\n\nfor activity_type in [activityType.Q.value, activityType.A.value]:\n fig, ax_list = plt.subplots(1, 2, figsize=(9, 6))\n #fig, ax_list = plt.subplots(1, 4)\n first_plot = True\n for user_type in [0, 2]:\n array_to_plot = []\n labels_to_plot = []\n user_type_name = USER_TYPE_TO_NAME[user_type]\n ax_list_index = 0 if user_type == 0 else 1\n for dataset_type in sorted(DATASET_TYPES.keys(), reverse=True):\n dataset_type_name = DATASET_TYPE_TO_NAME[dataset_type]\n label = dataset_type_name\n labels_to_plot.append(label)\n array_to_plot.append(np.array(dataset_types_aucs[dataset_type + \"_\" + activity_type][user_type]).reshape((-1, 1)))\n ax_list[ax_list_index].boxplot(array_to_plot, labels=labels_to_plot)\n if first_plot:\n first_plot = False\n ax_list[ax_list_index].set_ylabel(\"Area-Under-the-Curve (% of total)\")\n else:\n ax_list[ax_list_index].tick_params(axis='y', which='both', bottom='off', top='off', labelbottom='off')\n ax_list[ax_list_index].set_title(user_type_name)\n fig.suptitle(\"Distribution of Area-Under-the-Curve Ratios per User Type of \" + ACTIVITY_TYPE_TO_NAME[activity_type].capitalize() + \"-based Activity Time Series\")\n #fig.tight_layout(h_pad=5)\n fig.subplots_adjust(wspace=0.3)\n plt.savefig(\"auc_ratios_per_user_per_dataset_types_{}.png\".format(activity_type))\n","sub_path":"src/auc_boxplot.py","file_name":"auc_boxplot.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"394136443","text":"import glob, os, sys\nfrom subprocess import check_output\nimport shutil\n\ndef make_folder_if_not_exist(directory):\n if os.path.exists(directory):\n shutil.rmtree(directory)\n\n os.makedirs(directory)\n\nif len(sys.argv) == 0:\n exit()\n\nif sys.argv[1] == \"GOG\":\n directory=\"D:\\\\Program Files (x86)\\\\GOG Galaxy\\\\Games\\\\Pillars of Eternity II Deadfire\\\\Mods\\\\\"\nelse:\n directory=\"D:\\\\Program Files (x86)\\\\Steam\\\\steamapps\\\\common\\\\Pillars of Eternity II\\\\Mods\\\\\"\n\nos.chdir(sys.argv[1])\nfor f in glob.glob(\"*.zip\"):\n result = os.path.splitext(f)[0]\n\n mod_dir = directory + result\n make_folder_if_not_exist(mod_dir)\n\n tempDir = \".\\\\temp\"\n if os.path.exists(tempDir):\n shutil.rmtree(tempDir)\n os.makedirs(tempDir)\n\n shutil.copyfile(f, tempDir + \"\\\\t.zip\")\n os.chdir(tempDir)\n\n check_output(\"izarce -e *.zip\", shell=True)\n os.remove(\"t.zip\")\n\n for filename in glob.glob(\"*\"):\n shutil.copy(filename, mod_dir)\n os.chdir(\"..\")\n\n shutil.rmtree(tempDir)","sub_path":"handle_copy.py","file_name":"handle_copy.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"621458370","text":"import sys\nfrom dependency_injector.wiring import inject, Provide\nfrom menu.menu import Menu\nfrom app.b2c2_client import B2C2Client\nfrom dependency_injection.container import Container\n\n\n@inject\ndef main_menu(\n b2c2_client: B2C2Client = Provide[Container.b2c2_client],\n):\n my_menu = Menu(b2c2_client)\n my_menu.main_menu()\n\n\nif __name__ == \"__main__\":\n container = Container()\n container.init_resources()\n container.config.from_ini(\"config.ini\")\n container.wire(modules=[sys.modules[__name__]])\n\n main_menu()","sub_path":"cli_client.py","file_name":"cli_client.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"642374310","text":"import io\r\n\r\ndef get_traits(cur):\r\n trait_files = ['data/00_traits.txt', 'data/01_traits.txt',\r\n 'data/02_traits.txt', 'data/03_traits.txt']\r\n trait_id = 1\r\n for file in trait_files:\r\n trait_id = add_traits(file,cur,trait_id)\r\n\r\n\r\ndef add_traits(file,cur,trait_id):\r\n #open the file and add traits to the traitlookup relation\r\n with io.open(file,encoding=\"cp1252\") as f:\r\n for line in f.readlines():\r\n if line.find('=')!=-1 and line.find('{')!=-1 and line[0]!='\\t':\r\n name = line[0:line.find('=')-1]\r\n if name.find('#')!=-1: continue\r\n cur.execute('INSERT INTO traitlookup Values(?,?)',[trait_id,name])\r\n trait_id += 1\r\n return trait_id\r\n","sub_path":"get_traits.py","file_name":"get_traits.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"539895812","text":"#%%\nfrom time import time\nfrom numpy import load\nfrom scipy.spatial.distance import cosine\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.neural_network import MLPClassifier as MLP\nfrom sklearn.externals.joblib import dump\n\n#%%\n# The data is load\nM = load(\"use_data_normalized.npy\")\n\nXtrain = M.item().get('X_train')\nXtest = M.item().get('X_test')\nytrain = M.item().get('y_train')\nytest = M.item().get('y_test')\n\n#%%\n\"\"\"\nfrom sklearn.metrics import confusion_matrix\n\nypred = clf.predict(Xtest)\n\ncm = confusion_matrix(ytest, ypred)\n\nprint(f\"FMR: {100*cm[0][1]/(cm[0][1] + cm[0][0])}%\")\nprint(f\"FNMR: {100*cm[1][0]/(cm[1][0] + cm[1][1])}%\")\n\"\"\"\n\n#%%\n\nimport numpy as np\nfrom math import sqrt\nfrom numpy import linalg as LA\n\n\ndef get_d_prime(clf, Xtest, ytest):\n \n \n values_genuines = []\n values_impostors = []\n \n for pr, real in zip(Xtest, ytest):\n v1, v2 = pr[:512], pr[512:]\n v1 = v1/LA.norm(v1)\n v2 = v2/LA.norm(v2)\n distance = cosine(v1, v2)\n distance = LA.norm(distance)\n \n if real == 0:\n values_impostors.append(distance)\n else:\n values_genuines.append(distance)\n \n impostors = np.array(values_impostors)\n genuines = np.array(values_genuines)\n \n std_impostors = np.std(impostors)\n std_genuines = np.std(genuines)\n \n print(f\"std genuinos: {std_genuines}\")\n print(f\"std impostores: {std_impostors}\")\n \n mean_impostors = np.mean(impostors)\n mean_genuines = np.mean(genuines)\n \n print(f\"mean genuinos: {mean_genuines}\")\n print(f\"mean impostores: {mean_impostors}\")\n \n d_prime = abs(mean_genuines-mean_impostors)/(sqrt(0.5*(std_impostors+std_genuines)))\n \n print(d_prime)\n return d_prime\n\nget_d_prime(None, Xtest, ytest)\n \nprint(\"\\n\\n\\n\\n\\n\")","sub_path":"distance.py","file_name":"distance.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"504499537","text":"#!/usr/bin/env python3\n#'description': ''This module automatically creates API keys for every available region. There is an included cleanup feature to remove old \"AWSc2\" keys that are referenced by name.',\nimport datetime\nimport argparse\nfrom copy import deepcopy\nfrom botocore.exceptions import ClientError\n\ndef cleanup(awsattack_main, regions):\n print = awsattack_main.print\n for region in regions:\n client = awsattack_main.get_boto3_client('apigateway', region)\n try:\n keys = client.get_api_keys()['items']\n if len(keys) < 1:\n print(' No keys were found in {}'.format(region))\n for key in keys:\n if key['name'] == 'AWSc2':\n try:\n client.delete_api_key(apiKey=key['id'])\n print(' Key deletion successful for: {}'.format(region))\n except ClientError as error:\n if error.response['Error']['Code'] == 'AccessDeniedException':\n print(' FAILURE: ')\n print(' MISSING NEEDED PERMISSIONS')\n return False\n except ClientError as error:\n if error.response['Error']['Code'] == 'AccessDeniedException':\n print(' FAILURE: ')\n print(' MISSING NEEDED PERMISSIONS')\n return False\n return True\n\n\ndef main(args, awsattack_main):\n session = awsattack_main.get_active_session()\n\n print = awsattack_main.print\n get_regions = awsattack_main.get_regions\n regions = args.regions.split(',') if args.regions else get_regions('apigateway')\n\n summary_data = {}\n api_keys = {}\n \n if cleanup(awsattack_main, regions):\n print(' Old Keys Cleaned')\n summary_data['cleanup'] = True\n else:\n print(' Failed to Cleanup Keys')\n summary_data['cleanup'] = False\n \n\n return summary_data\n\n","sub_path":"ttp/src/api_gateway_cleanup_api_keys_src.py","file_name":"api_gateway_cleanup_api_keys_src.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"99717270","text":"#!/usr/bin/python3\n\nimport os\nimport time\nimport logging\nimport logging.config\nimport yaml\nimport click\nfrom certs_net import CertsNet\nfrom datapower_net import DataPowerNet\nfrom manager_net import ManagerNet\nfrom analytics_net import AnalyticsNet\nfrom prometheus_client import start_http_server\nimport metrics_graphite\nfrom prometheus_client import Gauge\n\n\nlogger = logging.getLogger('trawler')\n\nlogging.basicConfig(\n level=logging.getLevelName(logging.INFO),\n format=\"%(levelname)s: %(asctime)s (%(module)s:%(lineno)d): %(message)s\"\n)\n\n\nclass Trawler(object):\n \"\"\" The main trawling \"\"\"\n config = {\n 'prometheus': {'enabled': False},\n 'graphite': {'enabled': False}\n }\n # Default looping frequency\n frequency = 10\n # Default to True, but detected unless overridden in config\n use_kubeconfig = True\n # Default path for secrets in container build - override with envvar SECRETS\n secrets_path = '/app/secrets'\n graphite = None\n gauges = {}\n\n def __init__(self, config_file=None):\n self.secrets_path = os.getenv('SECRETS', self.secrets_path)\n if config_file:\n self.load_config(config_file)\n if 'logging' in self.config:\n logging.config.dictConfig(self.config['logging'])\n self.logger = logging.getLogger(__name__)\n if self.config['prometheus']['enabled']:\n port = self.config['prometheus'].get('port')\n logger.info('Starting prometheus http port at http://0.0.0.0:{}'.format(port))\n start_http_server(port)\n if self.config['graphite']['enabled']:\n self.graphite = metrics_graphite.instance(self.config['graphite'])\n\n use_kubeconfig = False\n if 'trawler' in self.config:\n use_kubeconfig = self.config['trawler'].get('use_kubeconfig')\n self.frequency = self.config['trawler'].get('frequency', self.frequency)\n\n if use_kubeconfig:\n # Explicit override that we want to use kubeconfig rather than in cluster k8s comms\n self.use_kubeconfig = True\n else:\n # Check for KUBERNETES_SERVICE_HOST to determine if running within kubernetes\n if os.getenv('KUBERNETES_SERVICE_HOST'):\n self.use_kubeconfig = False\n\n def read_secret(self, key):\n # Helper function read secrets from mounted k8s secrets\n try:\n with open(\"{}/{}\".format(self.secrets_path, key), 'r') as secret:\n value = secret.read().rstrip()\n return value\n except FileNotFoundError as e:\n logger.exception(e)\n return None\n\n def load_config(self, config_file):\n try:\n with open(config_file, 'r') as config_yaml:\n self.config = yaml.safe_load(config_yaml)\n except FileNotFoundError as e:\n logger.exception(e)\n exit(2)\n\n def set_gauge(self, component, target_name, value, pod_name=None):\n logger.debug(\"Entering set_gauge - params: ({}, {}, {}, {})\".format(component, target_name, value, pod_name))\n logger.debug(type(value))\n if type(value) is float or type(value) is int:\n target_name = target_name.replace('-', '_')\n if self.config['prometheus']['enabled']:\n prometheus_target = \"{}_{}\".format(component, target_name.replace('.', '_'))\n if prometheus_target not in self.gauges:\n logger.info(\"Creating gauge {}\".format(prometheus_target))\n if pod_name:\n self.gauges[prometheus_target] = Gauge(\n prometheus_target,\n prometheus_target, ['pod'])\n else:\n self.gauges[prometheus_target] = Gauge(\n prometheus_target,\n prometheus_target)\n\n logger.debug(\"Setting gauge {} to {}\".format(\n self.gauges[prometheus_target]._name, value))\n if pod_name:\n self.gauges[prometheus_target].labels(pod_name).set(value)\n else:\n self.gauges[prometheus_target].set(value)\n if self.config['graphite']['enabled']:\n if pod_name:\n metric_name = \"{}.{}.{}\".format(component, pod_name, target_name)\n else: \n metric_name = \"{}.{}\".format(component, target_name)\n self.graphite.stage(metric_name, value)\n\n def trawl_metrics(self):\n # Initialise\n logger.info(\"Laying nets...\")\n nets = []\n if 'certs' in self.config['nets'] and self.config['nets']['certs'].get('enabled', True):\n nets.append(CertsNet(self.config['nets']['certs'], self))\n if 'datapower' in self.config['nets'] and self.config['nets']['datapower'].get('enabled', True):\n nets.append(DataPowerNet(self.config['nets']['datapower'], self))\n if 'manager' in self.config['nets'] and self.config['nets']['manager'].get('enabled', True):\n nets.append(ManagerNet(self.config['nets']['manager'], self))\n if 'analytics' in self.config['nets'] and self.config['nets']['analytics'].get('enabled', True):\n nets.append(AnalyticsNet(self.config['nets']['analytics'], self))\n\n while True:\n logger.info(\"Trawling for metrics...\")\n for net in nets:\n net.fish()\n if self.graphite:\n self.graphite.store()\n time.sleep(self.frequency)\n\n\n@click.command()\n@click.version_option()\n@click.option('-c', '--config', required=False, envvar='CONFIG',\n help=\"Specifies an alternative config file\",\n default=None,\n type=click.Path())\ndef cli(config=None):\n trawler = Trawler(config)\n trawler.trawl_metrics()\n\n\nif __name__ == '__main__':\n cli()\n","sub_path":"trawler.py","file_name":"trawler.py","file_ext":"py","file_size_in_byte":5921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"527501907","text":"# -*- coding:utf-8 -*-\n\n__auth__ = 'peic'\n\n''' \nPython 协程和异步IO \nyield 、yield from的使用\nasyncio 模块的使用\n'''\n\n# -*- 协程 -*-\n# 首先需要理解yield\n# 函数中有yield语句而变成generator的函数,在每次调用next()的时候执行,遇到yield语句返回,再次执行时从上次返回的yield语句处继续执行,类似CPU的中断处理\n\n\ndef consumer():\n c_r = ''\n while True:\n \t# 此处yield接受调用者发出的参数,通过send进行调用\n \t# c.send(p_n)中n的值通过yield返回,赋值给r_n\n \t# send(value):The value argument becomes the result of the current yield expression. \n # consumer通过yield拿到消息,处理,又通过yield把结果传回。拿到的值是send传递的值,赋给c_n,返回的值是c_r\n c_n = yield c_r\n if not c_n:\n return\n print('[CONSUMER] Consuming %s...' % c_n)\n c_r = '200 OK'\n\ndef produce(c):\n c.send(None)\n p_n = 0\n while p_n < 5:\n p_n = p_n + 1\n print('[PRODUCER] Producing %s...' % p_n)\n\n\n # 调用send的generator(此处就是c),send语句会将参数(也就是p_n)的值传给这个生成器目前yield表达式的值(c_n)\n # 而send表达式的值(也就是传给p_r的值)会是generator的下一个值(next(c_r))\n # 通过调用返回的形式,c.send()就完成了函数的调用返回,即执行了一步generator(consumer)然后返回到原函数(produce),在这个过程中,yield起到中断返回作用\n p_r = c.send(p_n)\n print('[PRODUCER] Consumer return: %s' % p_r)\n c.close()\n\nc = consumer()\nproduce(c)\n\n\n\n\n# -*- 异步IO -*-\nimport asyncio\nimport threading\n\n# @asyncio.coroutine把一个generator标记为coroutine类型\n@asyncio.coroutine\ndef sub():\n print('sub start: ...')\n n = 10\n while True:\n print('yield start')\n # asyncio.sleep()也是一个coroutine类型的generator,所以线程不会中断,而是直接执行下一个循环,等待yield from的返回\n # 可以简单的理解为出现yield之后则开启一个协程(类似开启一个新线程),不管这个协程是否执行完毕,继续下一个循环\n # 开启新协程后,print('yield start')会因为继续执行循环被立即执行,可以通过打印结果观察\n r = yield from asyncio.sleep(1)\n n = n - 1\n print('---sub: %s, thread:%s' %(n, threading.currentThread()))\n if n == 0:\n break\n\n@asyncio.coroutine\ndef add():\n print('add start: ...')\n n = 10\n while True:\n print('yield start')\n r = yield from asyncio.sleep(2)\n n = n + 1\n print('+++add: %s, thread:%s' %(n, threading.currentThread()))\n if n > 20:\n break\n\n\n# 获取EventLoop:\nloop = asyncio.get_event_loop()\n# 执行coroutine\ntasks = [add(),sub()]\nloop.run_until_complete(asyncio.wait(tasks))\nloop.close()\n","sub_path":"python-toys/learn-python/Asyncio_Coroutine.py","file_name":"Asyncio_Coroutine.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"90072507","text":"import logging\nimport socket\nimport uuid\n\nimport docker\nimport pytest\n\nfrom viz import Client\nfrom viz.instance import set_shared_chain_instance\n\nlog = logging.getLogger(\"vizapi\")\nlog.setLevel(logging.DEBUG)\n\n\n@pytest.fixture(scope=\"session\")\ndef private_keys():\n return [\n \"5JabcrvaLnBTCkCVFX5r4rmeGGfuJuVp4NAKRNLTey6pxhRQmf4\",\n \"5Hw9YPABaFxa2LooiANLrhUK5TPryy8f7v9Y1rk923PuYqbYdfC\",\n \"5J9DBCRX5D2ZUUuy9qV2ef9p5sfA3ydHsDs2G531bob7wbEigDJ\",\n ]\n\n\n@pytest.fixture(scope=\"session\")\ndef default_account():\n return \"viz\"\n\n\n@pytest.fixture(scope=\"session\")\ndef session_id():\n \"\"\"\n Generate unique session id.\n\n This is needed in case testsuite may run in parallel on the same server, for example if CI/CD is being used. CI/CD\n infrastructure may run tests for each commit, so these tests should not influence each other.\n \"\"\"\n return str(uuid.uuid4())\n\n\n@pytest.fixture(scope=\"session\")\ndef unused_port():\n \"\"\"Obtain unused port to bind some service.\"\"\"\n\n def _unused_port():\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((\"127.0.0.1\", 0))\n return s.getsockname()[1]\n\n return _unused_port\n\n\n@pytest.fixture(scope=\"session\")\ndef docker_manager():\n \"\"\"Initialize docker management client.\"\"\"\n return docker.from_env(version=\"auto\")\n\n\n@pytest.fixture(scope=\"session\")\ndef viz_testnet(session_id, unused_port, docker_manager):\n \"\"\"Run vizd inside local docker container.\"\"\"\n port_http = unused_port()\n port_ws = unused_port()\n container = docker_manager.containers.run(\n image=\"vizblockchain/vizd:testnet\",\n name=\"viz-testnet-{}\".format(session_id),\n ports={\"8090\": port_http, \"8091\": port_ws},\n detach=True,\n )\n container.http_port = port_http\n container.ws_port = port_ws\n yield container\n container.remove(v=True, force=True)\n\n\n@pytest.fixture(scope=\"session\")\ndef viz_instance_ws(viz_testnet, private_keys):\n \"\"\"Initialize BitShares instance connected to a local testnet.\"\"\"\n viz = Client(node=\"ws://127.0.0.1:{}\".format(viz_testnet.ws_port), keys=private_keys, num_retries=-1)\n set_shared_chain_instance(viz)\n\n return viz\n\n\n@pytest.fixture(scope=\"session\")\ndef viz(viz_instance_ws):\n \"\"\"Shortcut to ws instance.\"\"\"\n\n return viz_instance_ws\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"547428187","text":"import requests\nimport json\n\n\nclass RunMethod(object):\n\n def post_main(self, url, data=None, header=None, cookies=None):\n res = None\n if header != None:\n res = requests.post(url=url, data=data, headers=header, cookies=cookies)\n # 如果是https请求无法发送加上verify=False,忽略https\n else:\n res = requests.post(url=url, data=data, cookies=cookies)\n return res\n\n def get_main(self, url, params=None, header=None, cookies=None):\n res = None\n if header != None:\n res = requests.get(url=url, params=params, headers=header, cookies=cookies)\n else:\n res = requests.get(url=url, params=params, cookies=cookies)\n return res\n\n def run_main(self, method, url, data=None, header=None, params=None, cookies=None):\n res = None\n if method == \"post\":\n res = self.post_main(url, data, header, cookies)\n else:\n res = self.post_main(url, params, header, cookies)\n return res\n #return json.dumps(res, ensure_ascii=False)\n\nif __name__ == \"__main__\":\n run = RunMethod()\n url = \"http://hubskins.zzbtest.com/front/member/login\"\n data = {\"user_email\": \"1102055693@qq.com\",\"user_pass\": \"wu123456\"}\n res = run.run_main(\"post\", url, data)\n print(res)\n print(type(res))\n\n\n\n\n","sub_path":"base/runmain.py","file_name":"runmain.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"314956956","text":"# adapted code from Niklas\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport scipy.io\nfrom matplotlib_scalebar.scalebar import ScaleBar\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom scipy import ndimage as ndi\nfrom scipy.optimize import curve_fit\nfrom skimage.feature import peak_local_max\nfrom skimage.filters import gaussian\nfrom skimage.morphology import flood\nfrom skimage.segmentation import watershed\nfrom skimage.transform import resize\n\nfrom util.inches import cm_to_inch\nfrom util.tum_jet import tum_jet\n\n\nclass MetalLuminescencePlotter:\n\n def __init__(self):\n mpl.rcParams['text.latex.preamble'] = [r'\\usepackage{amsmath}']\n self.min_v = 0.00e2\n self.max_v = 1.4e2\n self.map_c = tum_jet\n\n def plot(self):\n title_pad = 3\n interpolation = 'gaussian'\n fig, axs = plt.subplots(3, 3, gridspec_kw={'width_ratios': [1, 1, 0.2], 'wspace': 0.10, 'hspace': 0.3},\n figsize=(cm_to_inch(15) * 0.8, cm_to_inch(14)))\n plt.sca(axs[0, 0])\n nicer_ax2(plt.gca())\n d_ni = r'D$_\\text{Ni}$'\n d_pd = r'D$_\\text{Pd}$'\n\n d_uv = r'D$_\\text{UV}$'\n d_al_ox = r'D$_\\text{AlOx}$'\n plt.title(d_ni, fontsize=8, pad=title_pad)\n plt.tick_params(direction='inout', left=False, bottom=False)\n plt.xlim(0.5, 20)\n plt.ylim(0, 19.5)\n plt.gca().yaxis.set_ticks([])\n plt.gca().xaxis.set_ticks([])\n filepath = 'metal_luminescence/2019-06-06D45/scan.005.mat'\n mat = scipy.io.loadmat(filepath)\n x45 = mat['x'][0]\n y45 = mat['y'][0]\n z45 = mat['result'] / 1e3\n filepath = 'metal_luminescence/2019-06-08/scan.003.mat'\n mat = scipy.io.loadmat(filepath)\n x01b = mat['x'][0]\n y01b = mat['y'][0]\n z01b = mat['result'] / 1e3\n filepath = 'metal_luminescence/2019-06-12/scan.020.mat'\n mat = scipy.io.loadmat(filepath)\n x44 = mat['x'][0]\n y44 = mat['y'][0]\n z44 = mat['result'] / 1e3\n filepath = 'metal_luminescence/2019-07-9/scan.010.mat'\n mat = scipy.io.loadmat(filepath)\n x02c = mat['x'][0]\n y02c = mat['y'][0]\n z02c = mat['result'] / 1e3\n plt.imshow(z45.copy(), vmin=self.min_v, vmax=self.max_v, cmap=self.map_c, interpolation=interpolation,\n extent=[0, x45[-1] - x45[0], 0, y45[-1] - y45[0]])\n scalebar45 = ScaleBar(1E-6)\n plt.gca().add_artist(scalebar45)\n\n plt.sca(axs[0, 1])\n nicer_ax2(plt.gca())\n plt.title(d_pd, fontsize=8, pad=title_pad)\n plt.tick_params(direction='inout', right=True, top=True)\n plt.xlim(0.5, 20)\n plt.ylim(0, 19.5)\n plt.gca().yaxis.set_ticks([])\n plt.gca().xaxis.set_ticks([])\n plt.imshow(z44, vmin=self.min_v, vmax=self.max_v, cmap=self.map_c, interpolation=interpolation,\n extent=[0, x44[-1] - x44[0], 0, y44[-1] - y44[0]])\n scalebar44 = ScaleBar(1E-6)\n plt.gca().add_artist(scalebar44)\n\n plt.sca(axs[1, 0])\n nicer_ax2(plt.gca())\n plt.title(d_uv, fontsize=8, pad=title_pad)\n plt.tick_params(direction='inout', left=True, bottom=True)\n plt.xlim(0, 19.5)\n plt.ylim(0, 19.5)\n plt.gca().yaxis.set_ticks([])\n plt.gca().xaxis.set_ticks([])\n plt.imshow(z01b, vmin=self.min_v, vmax=self.max_v, cmap=self.map_c, interpolation=interpolation,\n extent=[0, x01b[-1] - x01b[0], 0, y01b[-1] - y01b[0]])\n scalebar01b = ScaleBar(1E-6)\n plt.gca().add_artist(scalebar01b)\n\n plt.sca(axs[1, 1])\n nicer_ax2(plt.gca())\n plt.title(d_al_ox, fontsize=8, pad=title_pad)\n plt.tick_params(direction='inout', right=True, top=True)\n plt.xlim(0.5, 20)\n plt.ylim(0, 19.5)\n plt.gca().yaxis.set_ticks([])\n plt.gca().xaxis.set_ticks([])\n plt.imshow(z02c, vmin=self.min_v, vmax=self.max_v, cmap=self.map_c, interpolation=interpolation,\n extent=[0, x02c[-1] - x02c[0], 0, y02c[-1] - y02c[0]])\n scalebar02c = ScaleBar(1E-6)\n plt.gca().add_artist(scalebar02c)\n\n plt.sca(axs[2, 0])\n imat = 1\n mats = [z45 - 15.29, z44 - 8.74, z01b - 10.16, z02c - 1.93]\n titles = ['Ni', 'Pd', 'UV', 'AlOx']\n thresholds = [50 - 15.29, 50 - 8.74, 40 - 10.16, 30 - 1.93]\n areas = [(4, 4), (4, 4), (4, 4), (4, 4)]\n hist_dat_mat = []\n for mat, title, threshold, area in zip(mats, titles, thresholds, areas):\n hist_dat_mat.append(find_peaks_simple(mat, threshold=threshold, area=area, sigma=1.5))\n wid = np.mean(hist_dat_mat[imat][0][:-1] - hist_dat_mat[imat][0][1:])\n plt.bar(hist_dat_mat[imat][0], hist_dat_mat[imat][1], wid, color='xkcd:gray')\n plt.plot(hist_dat_mat[imat][0], hist_dat_mat[imat][2], lw=0.7, color='xkcd:black')\n plt.plot(hist_dat_mat[imat][0], hist_dat_mat[imat][3], lw=0.7, color='#f5ea6a', ls='--')\n plt.plot(hist_dat_mat[imat][0], hist_dat_mat[imat][4], lw=0.7, color='#f5ea6a', ls='--')\n\n plt.gca().set_aspect(165 / 45)\n plt.xlim(45, 210)\n plt.ylim(0, 45)\n plt.xlabel(r'$I^{(k)}$ (kcps)')\n plt.ylabel('\\#')\n plt.annotate('bunched', xy=(150, 24), fontsize=6, ha='center', )\n plt.annotate(\"\", xy=(138, 15), xytext=(150, 23), arrowprops=dict(arrowstyle=\"->\"), fontsize=6)\n\n plt.annotate('individual', xy=(74, 33), xytext=(110, 38.5), fontsize=6, ha='left', va='center')\n plt.annotate(\"\", xy=(74, 36), xytext=(110, 39), arrowprops=dict(arrowstyle=\"->\"), fontsize=6)\n plt.title(d_pd, fontsize=8, pad=0)\n plt.sca(axs[2, 1])\n\n yield_val = [5.612, 5.016, 3.054, 2.888]\n brightness_val = [hist_dat_mat[i][5] for i in range(len(hist_dat_mat))]\n brightness_sigma = [hist_dat_mat[i][6] for i in range(len(hist_dat_mat))]\n yield_error = [0.423, 0.385, 0.248, 0.234]\n plt.errorbar(yield_val, brightness_val, xerr=yield_error, yerr=brightness_sigma, fmt='s', ms=2, lw=1, color='k')\n plt.annotate('Ni', (yield_val[0], brightness_val[0]), xytext=(yield_val[0] - 0.07, brightness_val[0] + 2),\n ha='right', fontsize=6, va='bottom')\n plt.annotate('Pd', (yield_val[1], brightness_val[1]), xytext=(yield_val[1] + 0.1, brightness_val[1] - 4),\n ha='left', fontsize=6, va='top')\n plt.annotate('UV', (yield_val[2], brightness_val[2]), xytext=(yield_val[2] + 0.08, brightness_val[2] + 2),\n ha='left', fontsize=6, va='bottom')\n plt.annotate('AlOx', (yield_val[3], brightness_val[3]), xytext=(yield_val[3] - 0.07, brightness_val[3] - 3),\n ha='right', fontsize=6, va='top')\n nicer_ax(plt.gca())\n plt.gca().set_aspect(4 / 70)\n axs[2, 1].set_xlim(1.9, 5.9)\n axs[2, 1].set_ylim(30, 100)\n axs[2, 1].set_xlabel(r'yield $\\eta$ (\\%)')\n axs[2, 1].set_ylabel(r'$I$ (kcps)')\n axs[2, 1].set_xticks([3, 5])\n axs[2, 1].set_yticks([50, 75])\n axs[2, 1].yaxis.set_label_position(\"right\")\n axs[2, 1].yaxis.tick_right()\n axs[2, 1].spines['left'].set_linewidth(0.5)\n axs[2, 1].spines['bottom'].set_linewidth(0.5)\n axs[2, 1].spines['left'].set_linewidth(0.5)\n axs[2, 1].spines['bottom'].set_linewidth(0.5)\n plt.grid(True, lw=0.5, ls=':')\n\n axs[1, 2].axis('off')\n axs[2, 2].axis('off')\n\n gs = axs[0, 2].get_gridspec()\n # remove the underlying axes\n for ax in axs[:2, 2]:\n ax.remove()\n axcolorbar = fig.add_subplot(gs[:2, 2])\n axcolorbar.axis('off')\n plt.sca(axcolorbar)\n divider = make_axes_locatable(axcolorbar)\n cax1 = divider.append_axes(\"left\", size=\"80%\", pad=0.00)\n norm = mpl.colors.Normalize(vmin=self.min_v, vmax=self.max_v)\n mpl.colorbar.ColorbarBase(cax1, cmap=tum_jet,\n norm=norm,\n orientation='vertical',\n ticks=np.arange(0, 150, 40))\n\n cax1.set_title('kcps', loc='left', fontsize=6.5)\n\n plt.rc('scalebar', sep=1)\n plt.rc('scalebar', frameon=False)\n plt.rc('scalebar', box_alpha=0.2)\n plt.rc('scalebar', border_pad=0)\n plt.rc('scalebar', length_fraction=0.3)\n plt.rc('scalebar', label_loc='top')\n plt.rc('scalebar', location='lower right')\n plt.rc('scalebar', color='w')\n\n plt.figtext(0.165, 0.895, 'a)', fontsize=8)\n plt.figtext(0.52, 0.895, 'b)', fontsize=8)\n plt.figtext(0.165, 0.623, 'c)', fontsize=8)\n plt.figtext(0.52, 0.623, 'd)', fontsize=8)\n plt.figtext(0.165, 0.345, 'e)', fontsize=8)\n plt.figtext(0.52, 0.345, 'f)', fontsize=8)\n\n fig.savefig('metal_luminescence' + '/confocal_pic.jpg', dpi=600, pad_inches=0, bbox_inches='tight')\n\n\ndef nicer_ax2(ax):\n ax.spines['top'].set_visible(True)\n ax.spines['right'].set_visible(True)\n ax.spines['left'].set_linewidth(0.8)\n ax.spines['bottom'].set_linewidth(0.8)\n ax.spines['left'].set_linewidth(0.8)\n ax.spines['bottom'].set_linewidth(0.8)\n ax.yaxis.tick_left()\n\n\ndef find_peaks_simple(mat_in, threshold=5., area=(4, 4), sigma=1., borders=None):\n if borders is None:\n borders = [0.305, 1.33489786, 1.42829031, 1.80438237, 2.79750851]\n local_max, local_max_val, marks, ws_area, flood_sections, flood_values = get_flood_array(mat_in, threshold, area)\n\n resize_factor = 3\n upsampled = resize(mat_in.copy(), np.array(mat_in.shape) * resize_factor, order=0)\n upsampled = gaussian(upsampled, sigma=sigma, cval=0)\n local_max_up, local_max_val_up, marks_up, ws_area_up, flood_sections_up, flood_values_up = get_flood_array(\n upsampled, threshold, np.array(area) * resize_factor)\n\n bins = 50\n values, bin_edges = np.histogram(local_max_val, bins=bins, density=False)\n bin_mid = (bin_edges[1:] + bin_edges[:-1]) / 2\n single_value = bin_mid[np.argmax(values)]\n\n values_up, binedges_up = np.histogram(local_max_val_up, bins=120, density=False)\n bin_mid_up = (binedges_up[1:] + binedges_up[:-1]) / 2\n single_value_up = bin_mid_up[np.argmax(values_up)]\n\n values_up, binedges_up = np.histogram(local_max_val_up / single_value_up, bins=bins, density=False, range=(0, 3))\n bin_mid_up = (binedges_up[1:] + binedges_up[:-1]) / 2\n\n p0 = [0.1, 1, 0.4, 0.21, 1.7, 0.5]\n bounds = ([0, 0, 0, 0, 1.3, .38], [np.inf, 1.3, 60, np.inf, np.inf, 20])\n try:\n coeff, b = curve_fit(double_peak, bin_mid_up, values_up, p0=p0, bounds=bounds)\n except:\n coeff = p0\n\n coeff_pd = pd.DataFrame()\n count_range = []\n count_range_up = []\n for ni, (i, j) in enumerate(\n [(borders[0], borders[1]), (borders[1], borders[2]), (borders[2], borders[3]), (borders[3], borders[4]),\n (borders[4], 1000)]):\n count_mask = (local_max_val > (i) * single_value) & (local_max_val <= (j) * single_value).astype('int')\n count_mask_up = (local_max_val_up > (i) * single_value_up) & (local_max_val_up <= (j) * single_value_up).astype(\n 'int')\n count_range.append(np.count_nonzero(count_mask) * (ni + 1))\n count_range_up.append(np.count_nonzero(count_mask_up) * (ni + 1))\n coeff_pd['CountRange'] = count_range\n coeff_pd['CountRange_up'] = count_range_up\n\n rng = bin_mid_up\n peak_d = double_peak(rng, *coeff)\n peak1, peak2 = double_peak_ind(rng, *coeff)\n\n return bin_mid_up * single_value_up, values_up, peak_d, peak1, peak2, coeff[1] * single_value_up, coeff[\n 2] * single_value_up / 2, len(local_max_val_up), len(local_max_val)\n\n\ndef nicer_ax(ax):\n ax.spines['top'].set_visible(True)\n ax.spines['right'].set_visible(True)\n ax.spines['left'].set_linewidth(0.8)\n ax.spines['bottom'].set_linewidth(0.8)\n ax.spines['left'].set_linewidth(0.8)\n ax.spines['bottom'].set_linewidth(0.8)\n ax.yaxis.tick_left()\n\n\ndef get_flood_array(matIn, threshold, area):\n local_maxi = peak_local_max(matIn, indices=False, footprint=np.ones(area), threshold_abs=threshold)\n markers = ndi.label(local_maxi)[0]\n labels = watershed(-matIn, markers)\n\n flood_sections = np.zeros_like(labels)\n flood_sections -= 1\n local_maxi_val = []\n for iii in range(1, np.max(markers) + 1):\n local_maxiS = np.argwhere(markers == iii)\n local_maxi_val.append(matIn[tuple(local_maxiS[0])])\n mask = (labels == iii) & flood(-matIn, tuple(local_maxiS[0]),\n tolerance=0.6 * (matIn[local_maxiS[0, 0], local_maxiS[0, 1]]))\n flood_sections[mask] = iii\n\n flood_seq_values = []\n for i in range(1, np.max(labels) + 1):\n val = np.sum(matIn[(flood_sections == i)])\n flood_seq_values.append(val)\n\n return local_maxi, local_maxi_val, markers, labels, flood_sections, flood_seq_values\n\n\ndef peak(x, *p):\n a1, b1, c1 = p\n return a1 * 1 / 2 / ((c1 / 2) ** 2 + (x - b1) ** 2)\n\n\ndef double_peak(x, *p):\n return peak(x, *p[:3]) + peak(x, *p[3:])\n\n\ndef double_peak_ind(x, *p):\n return peak(x, *p[:3]), peak(x, *p[3:])\n\n\nif __name__ == '__main__':\n plotter = MetalLuminescencePlotter()\n plotter.plot()\n","sub_path":"images/chapter_6/metal_luminescence.py","file_name":"metal_luminescence.py","file_ext":"py","file_size_in_byte":13324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"202944470","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.http import HttpResponse\nimport requests\n\n\ndef index(request):\n\n soon_url = \"https://imdb-api.com/en/API/ComingSoon/k_pftzqnp0\"\n soon = requests.get(soon_url).json()\n release_url = (\"https://imdb-api.com/en/API/InTheaters/k_pftzqnp0\")\n release = requests.get(release_url).json()\n top_url = \"https://imdb-api.com/en/API/BoxOffice/k_pftzqnp0\"\n top = requests.get(top_url).json()\n lastest_url = \"https://imdb-api.com/en/API/InTheaters/k_pftzqnp0\"\n lastest = requests.get(lastest_url).json()\n context = {\"soon\": soon, \"release\": release,\n \"top\": top, \"lastest\": lastest}\n return render(request, \"home.html\", context)\n\n\ndef movies(request):\n return render(request, \"movies.html\")\n\n\ndef login(request):\n return render(request, \"login.html\")\n\n\ndef celebrities(request):\n return render(request, \"celebrities.html\")\n\n\ndef moviedetails(request, pk):\n print(pk)\n url = 'https://imdb-api.com/en/API/Title/k_lwm5x736/'+pk+'/Trailer,Ratings,Wikipedia,'\n details = requests.get(url).json()\n context = {\"details\": details}\n\n return render(request, \"movie-details.html\", context)\n\n\ndef top_movies(request):\n return render(request, \"top-movies.html\")\n\n\ndef blog(request):\n return render(request, \"blog.html\")\n\n\ndef blog_details(request):\n return render(request, \"blog-details.html\")\n\n\ndef register_user(request):\n\n if request.method == 'POST':\n user_form = userForm(request.POST)\n user_info_form = userInfoForm(request.POST, request.FILES)\n\n if user_form.is_valid() and user_info_form.is_valid():\n user = user_form.save()\n user.set_password(user.password)\n user.save()\n\n user_info = user_info_form.save(commit=False)\n user_info.user = user\n user_info.save()\n\n username = request.POST.get('username')\n password = request.POST.get('password')\n\n user = authenticate(username=username, password=password)\n\n if user:\n login(request, user)\n\n return redirect('home')\n\n else:\n context = {'user_form.errors': user_form.errors,\n 'user_info_form.errors': user_info_form.errors}\n return render(request, 'user/register.html', context)\n else:\n\n user_form = userForm()\n user_info_form = userInfoForm()\n\n context = {'user_form': user_form,\n 'user_info_form': user_info_form}\n\n return render(request, 'user/register.html', context)\n\n\ndef searchresult(request):\n\n if request.method == \"POST\":\n Query = request.POST.get(\"Query\")\n query_url = \"https://imdb-api.com/en/API/SearchMovie/k_08ug9l32/\"+Query\n query = requests.get(query_url).json()\n print(query)\n context = {\"query\": query}\n return render(request, 'result.html', context)\n","sub_path":"movieapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"205255310","text":"logo0_path = \"/home/hengkai/Desktop/file-FILE5_32/install/3/pdr3.log\"\nlogo3_path = \"/home/hengkai/Desktop/file-FILE5_32/install/0/pdr3.log\"\nlogtrace_path = \"/home/hengkai/Desktop/AFL/llvm_mode/trace/t3.log\"\nid_list = []\nbranch_list = []\nline_list = []\nlogo3_file = open(logo3_path, \"r\")\nlogo0_file = open(logo0_path, \"r\")\nlogtrace_file = open(logtrace_path, \"r\")\nfor line in logo3_file:\n if(line.split(\":\")[1] == \"No-New-Branch\\n\"):\n id_list.append(line.split(\":\")[0])\n\ncount = 0\nwhile 1:\n line = logo0_file.readline()\n target_str = \"0\" + id_list[count] + \",src\"\n if not line:\n break\n if target_str in line:\n line = logo0_file.readline()\n #branch_list.append(line.split(\":\")[0])\n end_str = \",src\"\n while(end_str not in line):\n branch_list.append(line.split(\":\")[0])\n last_line = logo0_file.tell()\n line = logo0_file.readline()\n \n if count < len(id_list) - 1:\n count += 1\n if(int(id_list[count]) == (int(id_list[count - 1]) + 1)):\n logo0_file.seek(last_line)\n\nprint(id_list)\nprint(branch_list)\nbranch_list = list(set(branch_list))\nprint(branch_list)\nfor line in logtrace_file:\n if(line.split(\":\")[1] in branch_list):\n line_list.append(line)\nprint(sorted(line_list))\nprint(len(line_list))\n","sub_path":"project/useless/id-to-line.py","file_name":"id-to-line.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"304635696","text":"\n\n#calss header\nclass _VITAMIN():\n\tdef __init__(self,): \n\t\tself.name = \"VITAMIN\"\n\t\tself.definitions = [u'any of a group of natural substances that are necessary in small amounts for the growth and good health of the body: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_vitamin.py","file_name":"_vitamin.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"280569015","text":"import inspect\nimport platform\nfrom functools import (partial,\n reduce)\nfrom itertools import chain\nfrom types import ModuleType\nfrom typing import (Any,\n List,\n Union)\n\nfrom hypothesis import strategies\n\nfrom paradigm.definitions import (is_supported,\n unsupported)\nfrom paradigm.definitions.utils import (_add,\n _to_contents,\n _update)\nfrom paradigm.hints import (MethodDescriptorType,\n WrapperDescriptorType)\nfrom tests.strategies import modules_list\n\n\ndef to_inner_callables(objects: List[Union[ModuleType, type]]) -> List[Any]:\n return list(filter(callable,\n chain.from_iterable(map(_to_contents, objects))))\n\n\nmodules_callables_list = to_inner_callables(modules_list)\nmodules_callables = strategies.sampled_from(modules_callables_list)\nclasses_list = list(filter(is_supported,\n filter(inspect.isclass, modules_callables_list)))\nclasses_callables_list = to_inner_callables(classes_list)\nclasses = strategies.sampled_from(classes_list)\nclasses_callables = strategies.sampled_from(classes_callables_list)\nmethods = classes_callables.filter(inspect.isfunction)\n\n\ndef is_method_descriptor(object_: Any) -> bool:\n return isinstance(object_, MethodDescriptorType)\n\n\nmethods_descriptors = (classes_callables.filter(is_method_descriptor)\n .filter(is_supported))\n\n\ndef is_wrapper_descriptor(object_: Any) -> bool:\n return isinstance(object_, WrapperDescriptorType)\n\n\nwrappers_descriptors = (classes_callables.filter(is_wrapper_descriptor)\n .filter(is_supported))\nfunctions = (modules_callables.filter(inspect.isfunction)\n .filter(is_supported))\nbuilt_in_functions = (modules_callables.filter(inspect.isbuiltin)\n .filter(is_supported))\ntop_coverage_callables = set()\n_add(top_coverage_callables, '_compression', 'BaseStream')\n_update(top_coverage_callables, 'builtins', ['dict',\n 'set.__init__', 'set.__lt__'])\n_add(top_coverage_callables, 'configparser', 'DuplicateSectionError')\n_add(top_coverage_callables, 'ctypes', 'c_byte')\n_add(top_coverage_callables, 'formatter', 'NullFormatter.pop_alignment')\n_update(top_coverage_callables, 'inspect', ['Signature.__init__',\n 'getinnerframes'])\n_add(top_coverage_callables, 'logging', 'Handler.get_name')\n_add(top_coverage_callables, 'os', 'times_result')\n_add(top_coverage_callables, 'sqlite3', 'Connection.rollback')\n_add(top_coverage_callables, 'symtable', 'Symbol.is_global')\n_add(top_coverage_callables, 'tarfile', 'EOFHeaderError')\n_add(top_coverage_callables, 'telnetlib', 'Telnet.fileno')\n_add(top_coverage_callables, 'time', 'struct_time')\n_update(top_coverage_callables, 'tkinter', ['Misc.focus_force',\n 'Wm.iconmask'])\n_add(top_coverage_callables, 'turtle', 'RawTurtle.turtlesize')\n_add(top_coverage_callables, 'weakref', 'ref')\n_add(top_coverage_callables, 'zipfile', 'error')\ntop_coverage_callables = strategies.sampled_from(list(top_coverage_callables))\ncallables = (built_in_functions\n | classes\n | functions\n | methods\n | methods_descriptors\n | wrappers_descriptors)\npartial_callables = callables.map(partial)\nif platform.python_implementation() == 'PyPy':\n overloaded_callables = strategies.nothing()\nelse:\n overloaded_callables = strategies.sampled_from([int, reduce, super, type])\nunsupported_callables = strategies.sampled_from(\n list(unsupported.built_in_functions\n | unsupported.classes\n | unsupported.methods_descriptors\n | unsupported.wrappers_descriptors))\n","sub_path":"tests/signatures_tests/strategies.py","file_name":"strategies.py","file_ext":"py","file_size_in_byte":3893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"619615366","text":"import math\nfrom datetime import datetime\nfrom multiprocessing import Pool\nfrom pathlib import Path\nfrom typing import Optional\n\nimport pandas as pd\nfrom dateutil import tz\nfrom sqlalchemy.types import Date, DateTime, Float, Integer, String, Text\n\nfrom config import config\nfrom lib import db, helpers, transformers\n\n# French timezone\nFRA = tz.gettz(\"Europe/Paris\")\n\n\ndef create_table_cis_atc():\n source = {\"pattern\": \"CIS-ATC_2021-01-04.xlsx\"}\n read_excel_config = {\n \"dtype\": {\"cis\": str},\n \"index_col\": \"cis\",\n \"usecols\": [\"cis\", \"atc\", \"nom_atc\"],\n \"names\": [\"cis\", \"atc\", \"nom_atc\"],\n }\n to_sql_config = {\n \"name\": \"specialite_atc\",\n \"index\": True,\n \"if_exists\": \"replace\",\n \"dtype\": {\"cis\": String(16)},\n }\n _path = helpers.get_path_from_source(source)\n if isinstance(_path, Path):\n df = helpers.load_excel_to_df(read_excel_config, _path)\n db.create_table_from_df(df, to_sql_config)\n else:\n print(f'file with pattern {source[\"pattern\"]} not found')\n\n\ndef create_table_bdpm_cis():\n \"\"\"\n Table specialite\n \"\"\"\n source = config[\"bdpm_cis_url\"]\n tmp_path = Path(config[\"tmp_folder\"]).joinpath(\"BDPM_CIS.txt\")\n custom_date_parser = lambda x: datetime.strptime(x, \"%d/%m/%Y\")\n read_csv_config = {\n \"sep\": \"\\t\",\n \"encoding\": \"latin1\",\n \"names\": [\n \"cis\",\n \"nom\",\n \"forme_pharma\",\n \"voie_admin\",\n \"statut_amm\",\n \"type_amm\",\n \"etat_commercialisation\",\n \"date_amm\",\n \"statut_bdpm\",\n \"num_autorisation\",\n \"titulaires\",\n \"surveillance_renforcee\",\n ],\n \"header\": None,\n \"index_col\": \"cis\",\n \"parse_dates\": [\"date_amm\"],\n \"date_parser\": custom_date_parser,\n \"dtype\": {\"cis\": str},\n }\n to_sql_config = {\n \"name\": \"specialite\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\n \"cis\": String(16),\n \"nom\": Text,\n \"forme_pharma\": Text,\n \"voie_admin\": Text,\n \"statut_amm\": Text,\n \"type_amm\": Text,\n \"etat_commercialisation\": Text,\n \"date_amm\": Date,\n \"statut_bdpm\": Text,\n \"num_autorisation\": Text,\n \"titulaires\": Text,\n \"surveillance_renforcee\": Text,\n },\n }\n _path = helpers.download_file_from_url(source, tmp_path)\n if isinstance(_path, Path):\n df = helpers.load_csv_to_df(read_csv_config, _path)\n # cleaning\n helpers.serie_to_lowercase(df, read_csv_config[\"names\"][1:])\n db.create_table_from_df(df, to_sql_config)\n else:\n print(f\"tmp file {tmp_path} not found\")\n\n\ndef create_tables_rsp_compo():\n \"\"\"\n Table substance\n \"\"\"\n source = config[\"rsp_compo_url\"]\n tmp_path = Path(config[\"tmp_folder\"]).joinpath(\"COMPO.txt\")\n read_csv_config = {\n \"sep\": \"\\t\",\n \"encoding\": \"latin1\",\n \"names\": [\n \"cis\",\n \"elem_pharma\",\n \"code\",\n \"nom\",\n \"dosage\",\n \"ref_dosage\",\n \"nature_composant\",\n \"num_lien\",\n \"v\",\n ],\n \"header\": None,\n \"index_col\": \"code\",\n \"dtype\": {\"cis\": str, \"code\": str},\n }\n to_sql_config = {\n \"name\": \"substance\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\n \"code\": String(16),\n \"nom\": Text,\n },\n }\n _path = helpers.download_file_from_url(source, tmp_path)\n if isinstance(_path, Path):\n df = helpers.load_csv_to_df(read_csv_config, _path)\n # cleaning\n df = df[df.nature_composant == \"SA\"]\n df = df[[\"nom\"]]\n df = df[~df.index.duplicated(keep=\"first\")]\n helpers.serie_to_lowercase(df, [\"nom\"])\n db.create_table_from_df(df, to_sql_config)\n\n # table specialite_substance\n read_csv_config[\"index_col\"] = \"cis\"\n to_sql_config = {\n \"name\": \"specialite_substance\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\n \"cis\": String(16),\n \"code_substance\": String(16),\n \"elem_pharma\": Text,\n \"dosage\": Text,\n \"ref_dosage\": Text,\n },\n }\n df = helpers.load_csv_to_df(read_csv_config, path=_path)\n # cleaning\n df = df[df.nature_composant == \"SA\"]\n df = df[[\"code\", \"elem_pharma\", \"dosage\", \"ref_dosage\"]]\n df = df.rename(columns={\"code\": \"code_substance\"})\n db.create_table_from_df(df, to_sql_config)\n else:\n print(f'file with pattern {source[\"pattern\"]} not found')\n\n\ndef create_table_atc():\n source = {\"pattern\": \"atc_names.json\"}\n to_sql_config = {\n \"name\": \"classes_atc\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\"code\": String(16)},\n }\n _path = helpers.get_path_from_source(source)\n if isinstance(_path, Path):\n df = helpers.load_to_df_atc(_path)\n db.create_table_from_df(df, to_sql_config)\n else:\n print(f'file with pattern {source[\"pattern\"]} not found')\n\n\ndef create_table_cis_cip_bdpm():\n \"\"\"\n Table presentation\n \"\"\"\n source = config[\"cis_cip_url\"]\n tmp_path = Path(config[\"tmp_folder\"]).joinpath(\"CIS_CIP_bdpm.txt\")\n\n read_csv_config = {\n \"sep\": \"\\t\",\n \"encoding\": \"latin1\",\n \"names\": [\n \"cis\",\n \"cip7\",\n \"libelle_presentation\",\n \"statut_admin_presentation\",\n \"etat_commercialisation\",\n \"date_declaration_commercialisation\",\n \"cip13\",\n \"agrement_collectivites\",\n \"taux_remboursement\",\n \"prix_medicament_euro\",\n \"nb_1\",\n \"nb_2\",\n \"indications_remboursement\",\n ],\n \"index_col\": 0,\n \"header\": None,\n \"dtype\": {\"cis\": str, \"cip13\": str},\n \"parse_dates\": [\"date_declaration_commercialisation\"],\n }\n to_sql_config = {\n \"name\": \"presentation\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\"cis\": String(16), \"cip13\": String(16)},\n }\n _path = helpers.download_file_from_url(source, tmp_path)\n if isinstance(_path, Path):\n df = helpers.load_csv_to_df(read_csv_config, _path)\n # cleaning\n df = df.drop(\n [\n \"prix_medicament_euro\",\n \"nb_1\",\n \"nb_2\",\n \"indications_remboursement\",\n ],\n axis=1,\n )\n df = df.where(pd.notnull(df), None)\n db.create_table_from_df(df, to_sql_config)\n else:\n print(f'file with pattern {source[\"pattern\"]} not found.')\n\n\n# # ORDEI\n\n\ndef round_small_values(conso_value: int) -> Optional[int]:\n if conso_value <= 10:\n return None\n if 10 < conso_value < 50:\n return 50\n elif 50 <= conso_value < 95:\n return 100\n else:\n return round(conso_value, -int(math.log10(conso_value)))\n\n\ndef create_open_medic_tables():\n source = {\"pattern\": \"open_medic2014_2018_cis_agg.csv\"}\n read_csv_config = {\n \"sep\": \";\",\n \"dtype\": {\"cis\": str},\n \"usecols\": [\"cis\", \"age\", \"conso\", \"n_conso_an\", \"sexe\"],\n \"index_col\": \"cis\",\n \"header\": 0,\n \"names\": [\"index\", \"cis\", \"sexe\", \"age\", \"conso\", \"n_conso_an\", \"SEXE\"],\n }\n _path = helpers.get_path_from_source(source)\n if isinstance(_path, Path):\n df = helpers.load_csv_to_df(read_csv_config, _path)\n create_spe_exposition_table(df)\n create_spe_patients_sexe_table(df)\n create_spe_patients_age_table(df)\n else:\n print(f'file with pattern {source[\"pattern\"]} not found.')\n\n\ndef create_spe_exposition_table(df: pd.DataFrame):\n to_sql_config = {\n \"name\": \"specialite_exposition\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\n \"cis\": String(16),\n \"conso_an_trunc\": Integer,\n \"exposition\": Integer,\n },\n }\n df = df.groupby(\"cis\").agg(n_conso_an=(\"n_conso_an\", \"sum\"), conso=(\"conso\", \"sum\"))\n df[\"exposition\"] = df[\"n_conso_an\"].apply(\n helpers.get_exposition_level, type=\"specialite\"\n )\n df[\"conso_an_trunc\"] = df.n_conso_an.apply(round_small_values)\n df = df[[\"conso_an_trunc\", \"exposition\"]]\n db.create_table_from_df(df, to_sql_config)\n\n\ndef create_spe_patients_sexe_table(df: pd.DataFrame):\n to_sql_config = {\n \"name\": \"specialite_patient_sexe_ordei\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\n \"cis\": String(16),\n \"sexe\": Integer,\n \"conso\": Integer,\n \"pourcentage_patients\": Float,\n },\n }\n conso = df.groupby([\"cis\", \"sexe\"]).conso.sum().rename(\"conso\")\n conso_pct = (\n conso.groupby(level=0)\n .apply(lambda x: x / x.sum() * 100)\n .rename(\"pourcentage_patients\")\n )\n final_df = pd.merge(conso, conso_pct, on=[\"cis\", \"sexe\"])\n final_df.drop([\"conso\"], axis=1, inplace=True)\n final_df.reset_index(inplace=True, level=[\"sexe\"])\n db.create_table_from_df(final_df, to_sql_config)\n\n\ndef create_spe_patients_age_table(df: pd.DataFrame):\n to_sql_config = {\n \"name\": \"specialite_patient_age_ordei\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\n \"cis\": String(16),\n \"age\": Text,\n \"conso\": Integer,\n \"pourcentage_patients\": Float,\n },\n }\n conso = df.groupby([\"cis\", \"age\"]).conso.sum().rename(\"conso\")\n conso_pct = (\n conso.groupby(level=0)\n .apply(lambda x: x / x.sum() * 100)\n .rename(\"pourcentage_patients\")\n )\n final_df = pd.merge(conso, conso_pct, on=[\"cis\", \"age\"])\n final_df.drop([\"conso\"], axis=1, inplace=True)\n final_df.reset_index(inplace=True, level=[\"age\"])\n db.create_table_from_df(final_df, to_sql_config)\n\n\ndef create_substance_tables():\n source = {\"pattern\": \"bnpv_open_medic1418_sa_codex.csv\"}\n read_csv_config = {\n \"sep\": \";\",\n \"encoding\": \"ISO-8859-1\",\n \"dtype\": {\"code\": str},\n \"usecols\": [\n \"annee\",\n \"sexe\",\n \"age\",\n \"substance\",\n \"code\",\n \"conso\",\n \"cas\",\n ],\n \"index_col\": \"code\",\n \"header\": 0,\n \"names\": [\n \"index\",\n \"annee\",\n \"sexe\",\n \"age\",\n \"substance\",\n \"code\",\n \"cas\",\n \"conso\",\n ],\n }\n _path = helpers.get_path_from_source(source)\n if isinstance(_path, Path):\n df = helpers.load_csv_to_df(read_csv_config, _path)\n create_substance_exposition_table(df)\n create_substance_patients_sexe_table(df)\n create_substance_patients_age_table(df)\n create_substance_cas_sexe_table(df)\n create_substance_cas_age_table(df)\n else:\n print(f'file with pattern {source[\"pattern\"]} not found')\n\n\ndef create_substance_exposition_table(df: pd.DataFrame):\n to_sql_config = {\n \"name\": \"substance_exposition\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\n \"code\": String(16),\n \"exposition\": Integer,\n \"cas\": Integer,\n \"taux_cas\": Float,\n \"annee\": Integer,\n \"conso_annee\": Integer,\n \"cas_annee\": Integer,\n \"conso_an_trunc\": Integer,\n },\n }\n df_by_years = df.groupby([\"code\", \"annee\"]).agg(\n conso_annee=(\"conso\", \"sum\"), cas_annee=(\"cas\", \"sum\")\n )\n df_by_code = df_by_years.groupby(\"code\").agg(\n conso=(\"conso_annee\", \"sum\"),\n cas=(\"cas_annee\", \"sum\"),\n exposition=(\n \"conso_annee\",\n lambda x: helpers.get_total_exposition_level(x, \"substance\"),\n ),\n )\n final_df = df_by_years.join(df_by_code, on=\"code\")\n final_df = helpers.filter_df_on_low_values(final_df, [\"cas\", \"cas_annee\"])\n final_df[\"taux_cas\"] = final_df.apply(\n axis=1,\n func=lambda x: x.cas * 100000 / x.conso if 10 < x.cas <= x.conso else None,\n )\n final_df[\"conso_an_trunc\"] = final_df.conso.apply(\n lambda x: round_small_values(x / 5)\n )\n\n final_df.drop([\"conso\"], inplace=True, axis=1)\n final_df.reset_index(inplace=True, level=[\"annee\"])\n db.create_table_from_df(final_df[final_df.cas.notnull()], to_sql_config)\n\n\ndef create_substance_patients_sexe_table(df: pd.DataFrame):\n to_sql_config = {\n \"name\": \"substance_patient_sexe_ordei\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\n \"code\": String(16),\n \"sexe\": Integer,\n \"conso\": Integer,\n \"pourcentage_patients\": Float,\n },\n }\n df_copy = df.copy(deep=True)\n df_copy.sexe = df_copy.sexe.apply(lambda x: helpers.mapSexeToCode(x))\n conso = df_copy.groupby([\"code\", \"sexe\"]).conso.sum().rename(\"conso\")\n conso = helpers.filter_serie_on_low_values(conso)\n conso_pct = (\n conso.groupby(level=0)\n .apply(lambda x: x / x.sum() * 100 if x is not None else None)\n .rename(\"pourcentage_patients\")\n )\n df_final = pd.merge(conso, conso_pct, on=[\"code\", \"sexe\"])\n df_final.pourcentage_patients = df_final.apply(\n lambda x: x.pourcentage_patients\n if not df_final.loc[x.name[0]].pourcentage_patients.isnull().values.any()\n else None,\n axis=1,\n )\n df_final.drop([\"conso\"], inplace=True, axis=1)\n df_final.reset_index(inplace=True, level=[\"sexe\"])\n db.create_table_from_df(df_final, to_sql_config)\n\n\ndef create_substance_patients_age_table(df: pd.DataFrame):\n to_sql_config = {\n \"name\": \"substance_patient_age_ordei\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\n \"code\": String(16),\n \"age\": Text,\n \"conso\": Integer,\n \"pourcentage_patients\": Float,\n },\n }\n conso = df.groupby([\"code\", \"age\"]).conso.sum().rename(\"conso\")\n conso = helpers.filter_serie_on_low_values(conso)\n conso_pct = (\n conso.groupby(level=0)\n .apply(lambda x: x / x.sum() * 100 if x is not None else None)\n .rename(\"pourcentage_patients\")\n )\n final_df = pd.merge(conso, conso_pct, on=[\"code\", \"age\"])\n final_df.pourcentage_patients = final_df.apply(\n lambda x: x.pourcentage_patients\n if not final_df.loc[x.name[0]].pourcentage_patients.isnull().values.any()\n else None,\n axis=1,\n )\n final_df.reset_index(inplace=True, level=[\"age\"])\n final_df.drop([\"conso\"], inplace=True, axis=1)\n db.create_table_from_df(final_df, to_sql_config)\n\n\ndef create_substance_cas_sexe_table(df: pd.DataFrame):\n to_sql_config = {\n \"name\": \"substance_cas_sexe_ordei\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\n \"code\": String(16),\n \"sexe\": Integer,\n \"pourcentage_cas\": Float,\n },\n }\n df_copy = df.copy(deep=True)\n df_copy.sexe = df_copy.sexe.apply(lambda x: helpers.mapSexeToCode(x))\n cas = df_copy.groupby([\"code\", \"sexe\"]).cas.sum().rename(\"cas\")\n cas = helpers.filter_serie_on_low_values(cas)\n cas_pct = (\n cas.groupby(level=0)\n .apply(lambda x: x / x.sum() * 100 if cas is not None else None)\n .rename(\"pourcentage_cas\")\n )\n df_final = pd.merge(cas, cas_pct, on=[\"code\", \"sexe\"])\n df_final.pourcentage_cas = df_final.apply(\n lambda x: x.pourcentage_cas\n if not df_final.loc[x.name[0]].pourcentage_cas.isnull().values.any()\n else None,\n axis=1,\n )\n df_final.drop([\"cas\"], axis=1, inplace=True)\n df_final.reset_index(inplace=True, level=[\"sexe\"])\n db.create_table_from_df(df_final[df_final.pourcentage_cas.notnull()], to_sql_config)\n\n\ndef create_substance_cas_age_table(df: pd.DataFrame):\n to_sql_config = {\n \"name\": \"substance_cas_age_ordei\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\n \"code\": String(16),\n \"age\": Text,\n \"pourcentage_cas\": Float,\n },\n }\n cas = df.groupby([\"code\", \"age\"])[\"cas\"].sum().rename(\"cas\")\n cas = helpers.filter_serie_on_low_values(cas)\n cas_pct = (\n cas.groupby(level=0)\n .apply(lambda x: x / x.sum() * 100 if cas is not None else None)\n .rename(\"pourcentage_cas\")\n )\n final_df = pd.merge(cas, cas_pct, on=[\"code\", \"age\"])\n final_df.pourcentage_cas = final_df.apply(\n lambda x: x.pourcentage_cas\n if not final_df.loc[x.name[0]].pourcentage_cas.isnull().values.any()\n else None,\n axis=1,\n )\n final_df.drop([\"cas\"], axis=1, inplace=True)\n final_df.reset_index(inplace=True, level=[\"age\"])\n db.create_table_from_df(final_df[final_df.pourcentage_cas.notnull()], to_sql_config)\n\n\ndef create_notificateurs_table():\n source = {\"pattern\": \"bnpv_notif_sa_codex_snds.csv\"}\n read_csv_config = {\n \"encoding\": \"ISO-8859-1\",\n \"sep\": \";\",\n \"dtype\": {\"code\": str},\n \"usecols\": [\n \"notificateur\",\n \"substance_active\",\n \"code\",\n \"age\",\n \"sexe\",\n \"n_decla\",\n \"n_cas\",\n ],\n \"index_col\": \"code\",\n \"header\": 0,\n \"names\": [\n \"index\",\n \"notificateur\",\n \"substance_active\",\n \"code\",\n \"age\",\n \"sexe\",\n \"n_decla\",\n \"n_cas\",\n ],\n }\n to_sql_config = {\n \"name\": \"substance_notif_ordei\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\n \"code\": String(16),\n \"notificateur\": Text,\n \"pourcentage_notif\": Float,\n },\n }\n _path = helpers.get_path_from_source(source)\n if isinstance(_path, Path):\n df = helpers.load_csv_to_df(read_csv_config, _path)\n decla = df.groupby([\"code\", \"notificateur\"]).n_decla.sum()\n decla_pct = (\n decla.groupby(level=0)\n .apply(lambda x: x / x.sum() * 100)\n .rename(\"pourcentage_notif\")\n )\n final_df = pd.merge(decla, decla_pct, on=[\"code\", \"notificateur\"])\n final_df.pourcentage_notif = final_df.apply(\n lambda x: x.pourcentage_notif if x.n_decla > 10 else None, axis=1\n )\n final_df.drop([\"n_decla\"], axis=1, inplace=True)\n final_df.reset_index(inplace=True, level=[\"notificateur\"])\n db.create_table_from_df(\n final_df[final_df.pourcentage_notif.notnull()], to_sql_config\n )\n else:\n print(f'file with pattern {source[\"pattern\"]} not found')\n\n\ndef create_substance_soclong_and_hlt_tables():\n source = {\"pattern\": \"bnpv_eff_soclong_sa_codex_snds.csv\"}\n read_csv_config = {\n \"encoding\": \"ISO-8859-1\",\n \"sep\": \";\",\n \"dtype\": {\"code\": str},\n \"usecols\": [\n \"substance_active\",\n \"code\",\n \"soc_long\",\n \"age\",\n \"sexe\",\n \"n_decla_eff\",\n \"n_cas\",\n ],\n \"index_col\": \"code\",\n \"header\": 0,\n \"names\": [\n \"index\",\n \"substance_active\",\n \"code\",\n \"soc_long\",\n \"age\",\n \"sexe\",\n \"n_decla_eff\",\n \"n_cas\",\n ],\n }\n _path = helpers.get_path_from_source(source)\n if isinstance(_path, Path):\n df_soclong = helpers.load_csv_to_df(read_csv_config, _path)\n create_substance_soclong_table(df_soclong)\n create_hlt_table(df_soclong)\n else:\n print(f'file with pattern {source[\"pattern\"]} not found')\n\n\ndef create_substance_soclong_table(df: pd.DataFrame):\n to_sql_config = {\n \"name\": \"substance_soclong_ordei\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\"code\": String(16), \"soc_long\": Text, \"pourcentage_cas\": Float},\n }\n total_case_per_sex_and_age = df.groupby([\"code\", \"sexe\", \"age\"]).agg(\n {\"n_cas\": \"max\"}\n )\n total_case = total_case_per_sex_and_age.groupby(\"code\").agg({\"n_cas\": \"sum\"})\n decla_eff = (\n df.groupby([\"code\", \"soc_long\"]).n_decla_eff.sum().reset_index(level=\"soc_long\")\n )\n final_df = pd.merge(total_case, decla_eff, left_index=True, right_on=[\"code\"])\n final_df = helpers.filter_df_on_low_values(final_df, [\"n_decla_eff\", \"n_cas\"])\n final_df[\"pourcentage_cas\"] = final_df.apply(\n lambda x: x.n_decla_eff / x.n_cas * 100 if x.n_decla_eff and x.n_cas else None,\n axis=1,\n result_type=\"expand\",\n )\n final_df = final_df.rename(columns={\"n_decla_eff\": \"n_cas_effet\"})\n final_df.drop(\"n_cas\", inplace=True, axis=1)\n db.create_table_from_df(final_df[final_df.pourcentage_cas.notnull()], to_sql_config)\n\n\ndef create_hlt_table(df_soclong: pd.DataFrame):\n source = {\"pattern\": \"bnpv_eff_hlt_soclong_sa_codex_snds.csv\"}\n read_csv_config = {\n \"encoding\": \"ISO-8859-1\",\n \"sep\": \";\",\n \"dtype\": {\"code\": str},\n \"usecols\": [\n \"subtance_active\",\n \"code\",\n \"age\",\n \"sexe\",\n \"effet_hlt\",\n \"soc_long\",\n \"n_decla_eff_hlt\",\n ],\n \"index_col\": \"code\",\n \"header\": 0,\n \"names\": [\n \"index\",\n \"subtance_active\",\n \"code\",\n \"age\",\n \"sexe\",\n \"effet_hlt\",\n \"soc_long\",\n \"n_decla_eff_hlt\",\n ],\n }\n to_sql_config = {\n \"name\": \"substance_hlt_ordei\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\n \"code\": String(16),\n \"soc_long\": String(255),\n \"effet_hlt\": String(255),\n \"pourcentage_cas\": Float,\n },\n }\n _path = helpers.get_path_from_source(source)\n if isinstance(_path, Path):\n df = helpers.load_csv_to_df(read_csv_config, _path)\n decla_eff = df_soclong.groupby([\"code\", \"soc_long\"]).agg({\"n_decla_eff\": \"sum\"})\n hlt = df.groupby([\"code\", \"soc_long\", \"effet_hlt\"]).agg(\n {\"n_decla_eff_hlt\": \"sum\"}\n )\n hlt.reset_index([\"effet_hlt\"], inplace=True)\n tmp_df = pd.merge(decla_eff, hlt, left_index=True, right_index=True)\n soclong_hlt = (\n tmp_df.groupby([\"code\", \"soc_long\"])\n .n_decla_eff_hlt.sum()\n .rename(\"n_decla_eff_soclong\")\n )\n final_df = pd.merge(tmp_df, soclong_hlt, left_index=True, right_index=True)\n\n final_df[\"pourcentage_cas\"] = final_df.apply(\n lambda x: x.n_decla_eff_hlt / x.n_decla_eff_soclong * 100,\n axis=1,\n result_type=\"expand\",\n )\n final_df.pourcentage_cas = final_df.apply(\n lambda x: x.pourcentage_cas if x.n_decla_eff_hlt > 10 else None, axis=1\n )\n final_df.reset_index([\"soc_long\"], inplace=True)\n final_df.drop(\n [\"n_decla_eff_soclong\", \"n_decla_eff_hlt\", \"n_decla_eff\"],\n inplace=True,\n axis=1,\n )\n db.create_table_from_df(\n final_df[final_df.pourcentage_cas.notnull()], to_sql_config\n )\n else:\n print(f'file with pattern {source[\"pattern\"]} not found')\n\n\ndef check_threshold(df: pd.DataFrame, x: pd.Series):\n dfx = df.loc[x.name]\n if (\n dfx[dfx.grave == \"oui\"].cas.values[0] > 10\n and dfx[dfx.grave == \"non\"].cas.values[0] > 10\n ):\n return x.cas\n else:\n return None\n\n\ndef create_cas_grave_table():\n source = {\"pattern\": \"bnpv_cas_grave_sa_codex_snds.csv\"}\n read_csv_config = {\n \"encoding\": \"ISO-8859-1\",\n \"sep\": \";\",\n \"dtype\": {\"code\": str},\n \"usecols\": [\n \"grave\",\n \"code\",\n \"cas\",\n ],\n \"header\": 0,\n \"names\": [\n \"grave\",\n \"subtance_active\",\n \"code\",\n \"cas\",\n ],\n }\n to_sql_config = {\n \"name\": \"substance_cas_grave_ordei\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\n \"code\": String(16),\n \"grave\": String(16),\n \"pourcentage_cas\": Float,\n },\n }\n _path = helpers.get_path_from_source(source)\n if isinstance(_path, Path):\n df = helpers.load_csv_to_df(read_csv_config, _path)\n df.grave = df.grave.str.lower()\n df = df.set_index(\"code\")\n df.cas = df.apply(lambda x: check_threshold(df, x), axis=1)\n df.grave = df.grave.apply(lambda x: \"Grave\" if x == \"oui\" else \"Non grave\")\n df = df.where(pd.notnull(df), None)\n df = df.sort_index()\n db.create_table_from_df(df[df.cas.notnull()], to_sql_config)\n else:\n print(f'file with pattern {source[\"pattern\"]} not found')\n\n\ndef create_table_emed():\n source = {\"pattern\": \"RqHackathon_20220225.xlsx\"}\n read_excel_config = {\n \"usecols\": \"E:L,N:Q\",\n \"names\": [\n \"lieu_erreur\",\n \"initial_erreur\",\n \"nature_erreur\",\n \"cause_erreur\",\n \"population_erreur\",\n \"qualif_erreur\",\n \"effet_indesirable\",\n \"gravite\",\n \"denomination\",\n \"dci\",\n \"atc\",\n \"voie\",\n ],\n }\n to_sql_config = {\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\"cis\": String(16)},\n }\n tables = {\n # \"lieu\": \"lieu_erreur\",\n # \"cause\": \"cause_erreur\",\n \"population\": \"population_erreur\",\n \"initial\": \"initial_erreur\",\n \"nature\": \"nature_erreur\",\n \"effet_indesirable\": \"effet_indesirable\",\n \"gravite\": \"gravite\",\n }\n\n _path = helpers.get_path_from_source(source)\n if isinstance(_path, Path):\n df = helpers.load_excel_to_df(read_excel_config, _path)\n df = transformers.erreurs_med.clean_emed_df(df)\n\n df_spe = db.create_df_from_table(\"specialite\")\n df_spe = df_spe.set_index(\"cis\")\n\n # Create 'erreur_med_cis_denomination' corresp table\n df_corresp = transformers.erreurs_med.get_corresp_df(df, df_spe)\n args_corresp = {\n **{\"name\": \"erreur_med_cis_denomination\"},\n **to_sql_config,\n }\n db.create_table_from_df(df_corresp, args_corresp)\n\n # Create all tables\n for table_name, table_column in tables.items():\n df_table = transformers.erreurs_med.get_table_df(df, df_spe, table_column)\n args = {\n **{\"name\": \"erreur_med_{}\".format(table_name)},\n **to_sql_config,\n }\n db.create_table_from_df(df_table, args)\n else:\n print(f'file with pattern {source[\"pattern\"]} not found')\n\n\ndef create_table_ruptures():\n # Load 2 dependencies tables\n df_spe = db.create_df_from_table(\"specialite\").reset_index()\n df_pres = db.create_df_from_table(\"presentation\")\n\n # Old ruptures file (<= 03/05/2021)\n source = {\"pattern\": \"ListeDesRuptures_2022_3_110_59_37.xlsx\"}\n read_excel_config = {\n \"header\": 0,\n \"parse_dates\": [\n \"Date Signalement\",\n ],\n \"usecols\": [\n \"Signalement\",\n \"Date Signalement\",\n \"Laboratoire\",\n \"Spécialité\",\n \"Rupture\",\n \"Etat dossier\",\n \"ATC\",\n \"DCI\",\n \"Origine_Cause_RS\",\n ],\n }\n # Old ruptures file is not full at this time\n _path = helpers.get_path_from_source(source)\n if isinstance(_path, Path):\n df_old = helpers.load_excel_to_df(read_excel_config, _path)\n df_old = transformers.ruptures.clean_old_ruptures_df(df_old, df_spe)\n\n # Create table signalement\n _create_table_causes(df_old[[\"numero\", \"cause\"]])\n\n df_old = df_old.drop([\"cause\"], axis=1)\n\n # New ruptures file (>= 04/05/2021)\n source = {\"pattern\": \"Dossier_de_rupture_100322.xlsx\"}\n read_excel_config = {\n \"header\": 0,\n \"dtype\": {\"numero\": str, \"cip13\": str},\n \"usecols\": [\n \"Numéro\",\n \"État\",\n \"Date de déclaration\",\n \"Classification\",\n \"Nom Laboratoire\",\n \"CIP\",\n \"Nom\",\n \"DCI\",\n \"Code ATC\",\n \"Presentation\",\n \"Classe Therapeutique\",\n ],\n }\n to_sql_config = {\n \"name\": \"ruptures\",\n \"if_exists\": \"replace\",\n \"dtype\": {\n \"annee\": String,\n \"cip13\": String,\n \"date\": Date,\n },\n }\n _path = helpers.get_path_from_source(source)\n if isinstance(_path, Path):\n df_new = helpers.load_excel_to_df(read_excel_config, _path)\n df_new = transformers.ruptures.clean_new_ruptures_df(df_new, df_pres)\n\n df_ruptures_final = transformers.ruptures.merge_new_and_old_ruptures_df(\n df_old, df_new\n )\n\n # Create table signalement\n _create_table_signalements(df_new, df_pres)\n\n # Create table ruptures\n df_ruptures_final = df_ruptures_final.set_index(\"numero\")\n db.create_table_from_df(df_ruptures_final, to_sql_config)\n else:\n print(f'file with pattern {source[\"pattern\"]} not found')\n\n\ndef _create_table_causes(df_old: pd.DataFrame):\n df_old = transformers.causes_ruptures.clean_old_causes_df(df_old)\n\n source = {\"pattern\": \"causes_100322.xlsx\"}\n read_excel_config = {\n \"header\": 0,\n \"dtype\": {\"numero\": str, \"cip13\": str},\n \"usecols\": [\"rst_rpt_numero\", \"lov_label\"],\n }\n to_sql_config = {\"name\": \"causes\", \"if_exists\": \"replace\"}\n\n _path = helpers.get_path_from_source(source)\n if isinstance(_path, Path):\n df_new = helpers.load_excel_to_df(read_excel_config, _path)\n df_new = transformers.causes_ruptures.clean_new_causes_df(df_new)\n\n df_causes = transformers.causes_ruptures.merge_new_and_old_causes_df(\n df_old, df_new\n )\n\n # We remove \"Stock inférieur au stock défini par le décret n°2021-349 du 30 mars 2021\"\n # because it is not a rupture cause\n df_causes = df_causes[\n df_causes.cause\n != \"Stock inférieur au stock défini par le décret n°2021-349 du 30 mars 2021\"\n ]\n db.create_table_from_df(df_causes, to_sql_config)\n\n\ndef _create_table_signalements(df: pd.DataFrame, df_pres: pd.DataFrame):\n # Load 2 dependencies tables\n df_classes_atc = db.create_df_from_table(\"classes_atc\")\n df_spe_atc = db.create_df_from_table(\"specialite_atc\").sort_values(by=\"atc\")\n\n to_sql_config = {\n \"name\": \"signalements\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\n \"annee\": String,\n \"nb_signalements\": Integer,\n \"nb_presentations\": Integer,\n },\n }\n\n # Create df_pres_atc from df_pres and (df_classes_atc & df_spe_atc)\n df_spe_atc[\"atc1\"] = df_spe_atc.atc.apply(lambda x: x[:1])\n df_spe_atc = df_spe_atc.merge(\n df_classes_atc, left_on=\"atc1\", right_on=\"code\", how=\"left\"\n )\n df_pres = df_pres.merge(df_spe_atc[[\"cis\", \"atc1\", \"label\"]], on=\"cis\", how=\"left\")\n df_pres_atc = (\n df_pres.groupby(\"label\").agg(nb_presentations=(\"cip13\", \"count\")).reset_index()\n )\n\n # Create new df_sig from df, df_classes_atc and df_pres_atc\n df = df.merge(df_classes_atc, left_on=\"atc1\", right_on=\"code\", how=\"left\")\n df = df.drop_duplicates(subset=[\"numero\", \"cis\"], keep=\"first\")\n\n # Exclude décret rows\n df = df[df.classification.isin([\"rupture\", \"risque\"])]\n\n # Compute number of signalings, per year, per atc class\n df[\"annee\"] = df.date.dt.year\n\n # Remove bad data (year = NaN)\n df.annee = df.annee.fillna(0)\n df = df[df.annee != 0]\n\n # Force type to integer\n df.annee = df.annee.astype(int)\n\n df_sig = (\n df.groupby([\"annee\", \"label\"])\n .agg(nb_signalements=(\"numero\", \"count\"))\n .reset_index()\n .sort_values(by=\"nb_signalements\", ascending=False)\n )\n\n # Add number of presentations by atc class\n df_sig = df_sig.merge(df_pres_atc, on=\"label\", how=\"left\").sort_values(\n by=[\"annee\", \"nb_signalements\"], ascending=False\n )\n\n df_sig = df_sig.set_index(\"annee\")\n\n db.create_table_from_df(df_sig, to_sql_config)\n\n\ndef create_table_mesures():\n # Load dependency table\n try:\n df_ruptures = db.create_df_from_table(\"ruptures\")\n except Exception:\n print(\"Unable to import dataframe from table ruptures\")\n return\n\n source = {\"pattern\": \"Mesure_100322.xlsx\"}\n read_excel_config = {\n \"header\": 0,\n \"usecols\": [\n \"Etat\",\n \"Numéro Rupture\",\n \"Identifiant\",\n \"Description\",\n \"Nom Produit\",\n \"Demande de mise en place\",\n \"Date mise en place\",\n \"Date de fin prévisionnelle\",\n \"Date de clotûre\",\n \"Justification\",\n ],\n }\n to_sql_config = {\n \"name\": \"mesures\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\n \"annee\": String,\n \"date_demande\": Date,\n \"date_mise_en_place\": Date,\n \"date_previ_fin\": Date,\n \"date_cloture\": Date,\n },\n }\n\n _path = helpers.get_path_from_source(source)\n if isinstance(_path, Path):\n df = helpers.load_excel_to_df(read_excel_config, _path)\n df = transformers.mesures.prepare_df_mesures(df, df_ruptures)\n db.create_table_from_df(df, to_sql_config)\n else:\n print(f'file with pattern {source[\"pattern\"]} not found')\n\n\ndef create_table_icones():\n # Load table dependency\n df_spe = db.create_df_from_table(\"specialite\")\n\n to_sql_config = {\n \"name\": \"icones\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\n \"cis\": String(16),\n \"forme_pharma\": Text,\n \"icone\": Text,\n },\n }\n df_icones = transformers.icones.prepare_df_icones(df_spe)\n db.create_table_from_df(df_icones, to_sql_config)\n\n\ndef create_table_mesusage():\n # Load dependency tables\n try:\n df_spe = db.create_df_from_table(\"specialite\")\n except Exception:\n print(\"Unable to create df from table `specialite`\")\n\n source = {\"pattern\": \"20210104 - YAuffray - Mésusages depuis 2015.xlsx\"}\n read_excel_config = {\n \"sheet_name\": \"Complet\",\n \"usecols\": [\n \"Cas CRPV\",\n \"Mode Recueil\",\n \"Typ Décl\",\n \"Typ Cas\",\n \"Typ Notif\",\n \"Cadre Notif\",\n \"Sex\",\n \"Age\",\n \"Grave\",\n \"Décès\",\n \"Notif\",\n \"Médicaments\",\n \"Voie\",\n \"Début TT\",\n \"Fin TT\",\n \"Durée\",\n \"Début EI\",\n \"Fin EI\",\n \"HLT\",\n \"HLGT\",\n \"SOC long\",\n \"Evolution\",\n \"Indication\",\n ],\n }\n tables = {\n \"mesusage_global_sexe\": \"sexe\",\n \"mesusage_global_age\": \"age\",\n \"mesusage_global_gravite\": \"gravite\",\n \"mesusage_global_declarant\": \"notificateur\",\n \"mesusage_global_annee\": \"annee\",\n \"mesusage_specialite_sexe\": [\"cis\", \"sexe\"],\n \"mesusage_specialite_age\": [\"cis\", \"age\"],\n \"mesusage_specialite_soc\": [\"cis\", \"soc_long\"],\n }\n to_sql_config = {\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\"cis\": String(16)},\n }\n _path = helpers.get_path_from_source(source)\n if isinstance(_path, Path):\n df = helpers.load_excel_to_df(read_excel_config, _path)\n df_mesusage = transformers.mesusage.prepare_df_mesusage(df, df_spe)\n for table_name, table_columns in tables.items():\n df_mesusage_final = transformers.mesusage.get_proporition_df(\n df_mesusage, table_columns\n )\n if not table_name.startswith(\"mesusage_global\"):\n df_mesusage_final = df_mesusage_final.set_index(\"cis\")\n db.create_table_from_df(\n df_mesusage_final, {**{\"name\": table_name}, **to_sql_config}\n )\n else:\n print(f'file with pattern {source[\"pattern\"]} not found')\n\n\ndef create_table_pv():\n \"\"\"\n Nombre de cas déclarés dans la BNPV chaque année\n \"\"\"\n to_sql_config = {\n \"name\": \"cas_pv\",\n \"if_exists\": \"replace\",\n \"index\": True,\n }\n df = pd.DataFrame(transformers.pv.NB_CAS_AN)\n db.create_table_from_df(df, to_sql_config)\n\n\ndef create_scrapping_tables():\n to_sql_description = {\n \"name\": \"description\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\"cis\": String(16)},\n }\n to_sql_publications = {\n \"name\": \"publications\",\n \"if_exists\": \"replace\",\n \"index\": True,\n \"dtype\": {\"cis\": String(16)},\n }\n df_cis = pd.read_sql(\"specialite\", db.connect_db())\n cis_list = list(df_cis.cis.unique())\n\n flatten = lambda t: [item for sublist in t for item in sublist]\n\n with Pool(10) as p:\n print(\"start scrapping BDPM...\")\n print(\"count cis:\", len(cis_list))\n all_scraps = p.map(transformers.bdpm.scrap_bdpm, cis_list)\n\n # Table descriptions\n print(\"start creating descriptions table...\")\n descriptions_list = list(map(lambda x: x[0], all_scraps))\n df_descriptions = pd.DataFrame(descriptions_list, columns=[\"cis\", \"description\"])\n df_descriptions = df_descriptions.set_index(\"cis\").sort_index()\n db.create_table_from_df(df_descriptions, to_sql_description)\n\n # Table publications\n print(\"start creating publications table...\")\n publications_list = flatten(list(map(lambda x: x[1], all_scraps)))\n df_publications = pd.DataFrame(\n publications_list, columns=[\"cis\", \"title\", \"type\", \"link\"]\n )\n df_publications = df_publications.set_index(\"cis\").sort_index()\n db.create_table_from_df(df_publications, to_sql_publications)\n\n\ndef create_global_dec_table():\n print(\"start creating global indicators table...\")\n source = {\"pattern\": \"global_indic_v3.xlsx\"}\n read_excel_config = {\n \"header\": 0,\n \"sheet_name\": \"data\",\n \"dtype\": {\"label\": str, \"N\": int, \"pct\": float, \"commentaire\": str},\n }\n to_sql_config = {\n \"name\": \"global_ei_indicators\",\n \"if_exists\": \"replace\",\n \"dtype\": {\"label\": String, \"N\": Integer, \"pct\": Float, \"commentaire\": String},\n }\n\n _path = helpers.get_path_from_source(source)\n if isinstance(_path, Path):\n df = helpers.load_excel_to_df(read_excel_config, _path)\n\n df = df.where(pd.notnull(df), None)\n\n db.create_table_from_df(df, to_sql_config)\n\n\ndef create_table_config():\n print(\"start creating config table...\")\n to_sql_config = {\n \"name\": \"config\",\n \"if_exists\": \"replace\",\n \"dtype\": {\"populate_last_update\": DateTime},\n \"index\": False,\n }\n last_update = datetime.now(tz=FRA)\n df = pd.DataFrame([last_update], columns=[\"populate_last_update\"])\n\n db.create_table_from_df(df, to_sql_config)\n\n\nprint(\"start to populate db...\")\n\ncreate_table_bdpm_cis()\ncreate_tables_rsp_compo()\ncreate_table_cis_cip_bdpm()\ncreate_table_atc()\ncreate_table_cis_atc()\n\n# Scrapping\ncreate_scrapping_tables()\n\n# Ordei\ncreate_open_medic_tables()\ncreate_substance_tables()\ncreate_notificateurs_table()\ncreate_substance_soclong_and_hlt_tables()\ncreate_cas_grave_table()\ncreate_global_dec_table()\n\n# Erreurs médicamenteuses\ncreate_table_emed()\n\n# Ruptures\ncreate_table_ruptures()\ncreate_table_mesures()\n\n# Logos\ncreate_table_icones()\n\n# Mésusage\n# create_table_mesusage()\n\n# Pharmacovigilance\ncreate_table_pv()\n\n# Config\ncreate_table_config()\n\nprint(\"end populate db...\")\n","sub_path":"datamed/populate/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":39996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"395327049","text":"from __future__ import annotations\nfrom dataclasses import dataclass, field\nfrom travelport.models.type_profile_type_7 import TypeProfileType7\n\n__NAMESPACE__ = \"http://www.travelport.com/schema/uprofile_v37_0\"\n\n\n@dataclass\nclass TypeProfileParentHistory2:\n \"\"\"\n Parameters\n ----------\n profile_id\n Agency in which the field group is created.\n profile_type\n The type of profile this profile is for (e.g., branch, account,\n traveler). The profile type identifies which default\n attributes/elements (minimum data set) the system will insert.\n profile_name\n The name of the profile. Either the concatenated first name or last\n name of a Agent or Traveler or the name of the other profile.\n provisioning_code\n The Provisioning Code for this profile.\n \"\"\"\n class Meta:\n name = \"typeProfileParentHistory\"\n\n profile_id: None | int = field(\n default=None,\n metadata={\n \"name\": \"ProfileID\",\n \"type\": \"Attribute\",\n }\n )\n profile_type: None | TypeProfileType7 = field(\n default=None,\n metadata={\n \"name\": \"ProfileType\",\n \"type\": \"Attribute\",\n }\n )\n profile_name: None | str = field(\n default=None,\n metadata={\n \"name\": \"ProfileName\",\n \"type\": \"Attribute\",\n }\n )\n provisioning_code: None | str = field(\n default=None,\n metadata={\n \"name\": \"ProvisioningCode\",\n \"type\": \"Attribute\",\n \"min_length\": 1,\n \"max_length\": 25,\n }\n )\n","sub_path":"travelport/models/type_profile_parent_history_2.py","file_name":"type_profile_parent_history_2.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"601811749","text":"# 실습\n# cifar10 과 cifar 100 으로 모델 만들것\n# trainable=True, False\n# FC 로 만든것과 Avarage Pooling 으로 만든거 비교\n\nfrom tensorflow.keras.layers import Dense, Flatten, GlobalAveragePooling2D\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.applications import VGG16, VGG19\nimport tensorflow as tf\nfrom tensorflow.keras.datasets import cifar100, cifar10\n\n\n# 1. 데이터 구성\n(x_train, y_train), (x_test, y_test) = cifar100.load_data()\n\nx_train = x_train.reshape(50000, 32 * 32 * 3)\nx_test = x_test.reshape(10000, 32 * 32 * 3)\n# 2차원으로 reshpae 하고 다시 4차원으로 원위치\n# print(x_train.shape, x_test.shape) # (50000, 3072) (10000, 3072)\n\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler, MaxAbsScaler, RobustScaler, QuantileTransformer, PowerTransformer\nscaler = MinMaxScaler()\n# scaler = StandardScaler()\nx_train = scaler.fit_transform(x_train) # 한번에 써줄 수 있음, train 에서만 쓴다\nx_test = scaler.transform(x_test)\n\nx_train = x_train.reshape(x_train.shape[0], 32,32, 3)\nx_test = x_test.reshape(x_test.shape[0], 32,32, 3)\n\nfrom tensorflow.keras.utils import to_categorical\ny_train = to_categorical(y_train)\ny_test = to_categorical(y_test)\n\n\n# 2. 모델링\nvgg19 = VGG19(weights='imagenet', include_top=False, input_shape=(32, 32, 3))\n\n# model = VGG16()\n# model = VGG19()\n\nvgg19.trainable=False\n\nmodel = Sequential()\nmodel.add(vgg19)\nmodel.add(Flatten())\nmodel.add(Dense(100, activation='relu'))\n# model.add(GlobalAveragePooling2D())\nmodel.add(Dense(100, activation='softmax'))\n\n# model.summary()\n# model.trainable=False # 전체 모델 훈련을 동결한다\n\n# 3. 평가, 훈련\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # 다중분류에서 loss 는 categorical_crossentropy\n\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\nes = EarlyStopping(monitor='val_loss', patience=5, mode='min', verbose=1)\n# cp = ModelCheckpoint(monitor='val_loss', save_best_only=True, mode='auto',\n# filepath='./_save/ModelCheckPoint/keras48_MCP_cifar10.hdf5')\n\nmodel.fit(x_train, y_train, epochs=100, batch_size=1500, callbacks=[es,], validation_split=0.08, verbose=2)\n\n\nloss = model.evaluate(x_test, y_test)\nprint('loss : ', loss[0])\nprint('accuracy : ', loss[1])\n\n\n\n'''\n결과 출력\n1. cifar 10\ntrainable = True, FC : loss=?, acc=?\nloss : 0.7416892647743225\naccuracy : 0.8048999905586243\ntrainable = True, GAP : loss=?, acc=?\nloss : 0.74931800365448\naccuracy : 0.7973999977111816\ntrainable = False, FC : loss=?, acc=?\nloss : 1.158708930015564\naccuracy : 0.6047000288963318\ntrainable = False, GAP : loss=?, acc=?\nloss : 1.2098288536071777\naccuracy : 0.579800009727478\n\n2. cifar 100\ntrainable = True, FC : loss=?, acc=?\nloss : 3.0773749351501465\naccuracy : 0.29660001397132874\ntrainable = True, GAP : loss=?, acc=?\nloss : 4.605196475982666\naccuracy : 0.009999999776482582\ntrainable = False, FC : loss=?, acc=?\nloss : 2.6485595703125\naccuracy : 0.33880001306533813\ntrainable = False, GAP : loss=?, acc=?\nloss : 2.650249719619751\naccuracy : 0.34630000591278076\n'''\n\n","sub_path":"keras2/keras72_01_cifar_VGG19.py","file_name":"keras72_01_cifar_VGG19.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"372000362","text":"import json\nimport re\n\n\nsource_file=open('./soruce_tifu.txt', 'a')\ntarget_file=open('./target_tifu.txt', 'a')\n\n# Read entire file\nposts = []\nwith open('tifu_all_tokenized_and_filtered.json', 'r') as fp:\n for line in fp:\n posts.append(json.loads(line))\n\n# Json entries\n#print(posts[50000].keys())\n\n#print(posts[168].get('selftext_without_tldr').replace('\\n', ' '))\n# print(posts[50000].get('tldr'))\n# print(posts[50000].get('title'))\n\n# exit()\n\n\ni=0\nfor element in posts:\n\n if not element.get('tldr') is None:\n\n \n target_text=element.get('tldr')\n\n else:\n target_text='shamane'\n \n\n\n source_text=element.get('selftext_without_tldr')\n\n\n \n\n source_text=source_text.replace(\"\\n\", \" \").lstrip(' ')\n target_text=target_text.replace(\"\\n\", \" \").lstrip(' ')\n\n\n source_text=\" \".join(source_text.split())\n target_text=\" \".join(target_text.split())\n\n\n\n soruce_wordList = re.sub(\"[^\\w]\", \" \", source_text).split()\n target_wordList = re.sub(\"[^\\w]\", \" \", target_text).split()\n\n\n\n if len(soruce_wordList) < 100:\n continue\n\n if len(target_wordList)<25:\n continue\n\n \n if len(target_wordList) >= len(source_text):\n continue\n\n \n i=i+1\n\n\n\n source_file.write(source_text+'\\n')\n target_file.write(target_text+'\\n')\n\n\n # if i==170:\n \n # print(source_text)\n # exit()\n\n\n\n\n\n\n\n\n\nexit()\n# [u'title_tokenized',\n# u'permalink',\n# u'title',\n# u'url',\n# u'num_comments',\n# u'tldr', # (optional)\n# u'created_utc',\n# u'trimmed_title_tokenized',\n# u'ups',\n# u'selftext_html',\n# u'score',\n# u'upvote_ratio',\n# u'tldr_tokenized', # (optional)\n# u'selftext',\n# u'trimmed_title',\n# u'selftext_without_tldr_tokenized',\n# u'id',\n# u'selftext_without_tldr']","sub_path":"Trainning data/Reddit-TIFU-data-pre/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"16069429","text":"import psycopg2 as ps\nimport csv\nimport math\n\nclass DBO:\n ##Custom database object, retains information about dbname, user and pw\n def __init__(self,dbname,user,pw):\n self.user = user\n self.pw = pw\n self.dbname = dbname\n self.admin = ps.connect(database='postgres', user=self.user, password=self.pw)\n self.admin.autocommit=True\n self.checkDBName()\n self.conn = ps.connect(database=self.dbname, user=self.user, password=self.pw)\n self.conn.autocommit=True\n \n\n def checkDBName(self):\n try:\n conn = ps.connect(database=self.dbname, user=self.user, password=self.pw)\n conn.close()\n except ps.OperationalError:\n print(\"Failed to find that database\")\n test = input(\"Would you like to create that database? (y/n)\")\n if test == 'y':\n self.createNewDB()\n else:\n print(\"There is no database\")\n \n def mapTypesToPostgres(self,dataset):\n ##Here we create data to help map python types to postgres types\n dic = {}\n tempD = {}\n for i,y in enumerate(dataset[0]):##Iterate through columns as headers\n if y!='':\n inconsistentWarning = False\n for k,x in enumerate(dataset):##Iterate through rows\n if k==0:##Set list for header\n header = x\n if k>0:\n try:\n test = tempD[self.checkHeader(header[i])]\n if test!=str(type(x[i])):\n if inconsistentWarning==True:\n print(\"Error: inconsistent type in {0}\".format(y))\n inconsistentWarning = True\n tempD[self.checkHeader(header[i])] = \"\"\n except KeyError:\n tempD[self.checkHeader(header[i])] = str(type(x[i]))\n return(tempD)\n\n def checkHeader(self,val):\n val = val.lower()\n exclude = [' ','\"',\"'\",'.',',','/','?',';',':','[',']','{','}','\\\\','|',\n '!','@','#','$','%','^','&','*','(',')','~','`','-','+','=']\n for e in exclude:\n val = val.replace(e,\"_\")\n return(val)\n \n def makeCreateTableStatement(self,dataset,mapping,tableName,cur):\n ##This creates a table based on the structure of the dataset\n typeDB = {\"\":'text',\n \"\":'integer',\n \"\":'numeric'}\n createTable = 'CREATE TABLE {0} ('.format(tableName)\n for k,var in enumerate(mapping):\n #print(var, mapping[var], typeDB[mapping[var]])\n var = self.checkHeader(var)\n createTable = createTable + '\"{0}\" {1}'.format(var, typeDB[mapping[var]])\n if k<(len(mapping)-1):\n createTable = createTable+', '\n createTable = createTable+') WITH (OIDS=FALSE); ALTER TABLE {0} OWNER TO postgres;'.format(tableName)\n cur.execute(createTable)\n #print(createTable)\n\n def checkForBlanks(self,header):\n newH = []\n for h in header:\n if h!='':\n newH.append(h)\n return(newH)\n\n def writeData(self,dataset,mapping,tableName,cur):\n ##This iterates through the data and creates statements to post to db\n header = dataset[0]\n header = self.checkForBlanks(header)\n data = dataset[1:len(dataset)]\n cmd = 'INSERT INTO {0} ('.format(tableName)\n for k, h in enumerate(header):\n cmd = cmd+self.checkHeader(h)\n if k < (len(header)-1):\n cmd = cmd+\", \"\n cmd = cmd+\") VALUES (\"\n for i, row in enumerate(data):\n for k, r in enumerate(header):\n r = row[k]\n if mapping[self.checkHeader(header[k])]==\"\":\n cmd = cmd+\"'{0}'\".format(r)\n else:\n cmd = cmd+\"{0}\".format(r)\n if k < (len(header)-1):\n cmd = cmd+\", \"\n if i < (len(data)-1):\n cmd = cmd+\"),(\"\n cmd = cmd+\");\"\n #print(cmd)\n #cur.execute(cmd)\n try:\n cur.execute(cmd)\n except ps.ProgrammingError:\n print('========================')\n print(cmd)\n print(len(header))\n for i in data:\n print(len(i))\n print(i)\n print('========================')\n cur.execute(cmd)\n\n## def writeData(self,dataset,mapping,tableName,cur):\n## ##This iterates through the data and creates statements to post to db\n## header = dataset[0]\n## header = self.checkForBlanks(header)\n## data = dataset[1:len(dataset)]\n## cmd = 'INSERT INTO {0} ('.format(tableName)\n## for k, h in enumerate(header):\n## cmd = cmd+self.checkHeader(h)\n## if k < (len(header)-1):\n## cmd = cmd+\", \"\n## cmd = cmd+\") VALUES (\"\n## for i, row in enumerate(data):\n## for k, r in enumerate(header):\n## r = row[k]\n## if mapping[self.checkHeader(header[k])]==\"\":\n## cmd = cmd+\"'{0}'\".format(r)\n## else:\n## cmd = cmd+\"{0}\".format(r)\n## if k < (len(header)-1):\n## cmd = cmd+\", \"\n## if i < (len(data)-1):\n## cmd = cmd+\"),(\"\n## cmd = cmd+\");\"\n## #print(cmd)\n## #cur.execute(cmd)\n## try:\n## cur.execute(cmd)\n## except ps.ProgrammingError:\n## print('========================')\n## print(cmd)\n## print(len(header))\n## for i in data:\n## print(len(i))\n## print(i)\n## print('========================')\n## cur.execute(cmd)\n\n## def copyFromCSV(self,fiName,tblName,delim=\",\",overwrite=False):\n## chunker = 20.\n## tblName = self.checkHeader(tblName)\n## cur = self.conn.cursor()\n## if overwrite==True:\n## self.dropFromPostgres(tblName, objType='TABLE')\n## with open(fiName) as fi:\n## reader = csv.reader(fi,delimiter=delim,dialect='excel')\n## for k,r in enumerate(reader):\n## #print(k)\n## data = []\n## ##print(k,((float(k)/500.)*500)==math.ceil(float(k)/500.)*500)\n## if k==0:\n## header = r\n## fullData = [header]\n## else:\n## for i in r:\n## data.append(getType(i))\n## #print(data)\n## fullData.append(data)\n## if k>0 and ((float(k)/chunker)*chunker)==math.ceil(float(k)/chunker)*chunker:\n## print(\"Time to write\")\n## print(k)\n## #print(fullData)\n## try:\n## x = mapping\n## except:\n## mapping = self.mapTypesToPostgres(fullData)\n## self.makeCreateTableStatement(fullData, mapping, tblName, cur)\n## self.writeData(fullData, mapping, tblName, cur)\n## fullData = [header]\n## if len(fullData)>1:\n## self.writeData(fullData,mapping,tblName,cur)##For final run through\n\n def copyFromCSV(self,fiName,tblName,delim=\",\",overwrite=False):\n print('Reading in data')\n data = readInData(fiName,delim)\n print('Finishesd reading in data')\n cur = self.conn.cursor()\n if overwrite==True:\n self.dropFromPostgres(tblName, objType='TABLE')\n mapping = self.mapTypesToPostgres(data)\n self.makeCreateTableStatement(data, mapping, tblName, cur)\n print(\"Writing data\")\n self.writeData(data, mapping, tblName,cur)\n print(\"Finished writing data\")\n\n def createNewDB(self):\n ##This should be done if no database exists\n cur = self.admin.cursor()\n cur.execute(\"CREATE DATABASE {0} WITH OWNER = {1} ENCODING = 'UTF8' TABLESPACE = pg_default LC_COLLATE = 'English_United States.1252' LC_CTYPE = 'English_United States.1252' CONNECTION LIMIT = -1;\".format(self.dbname.lower(),self.user))\n print(\"Created new empty database - {0}\".format(self.dbname))\n\n def dropFromPostgres(self, objName, objType='TABLE'):\n if objType=='TABLE':\n cur = self.conn.cursor()\n if objType=='DATABASE':\n if objName==self.dbname:\n self.conn.close()\n cur = self.admin.cursor()\n try:\n cur.execute(\"DROP {0} {1};\".format(objType, objName))\n print(\"{0} ({1}) dropped\".format(objType,objName))\n except:\n print(\"There was no {0} to drop\".format(objName))\n\n def adHoc(self,query):\n cur = self.conn.cursor()\n cur.execute(query)\n out = cur.fetchall()\n print(\"Query executed:\")\n print(query)\n return(out)\n\ndef getType(i):\n try:\n i = int(i)\n try:\n i = float(i)\n except ValueError:\n pass\n except ValueError:\n i = i.replace(\"'\",\"\")\n return(i)\n \n \ndef readInData(fiName, delim=','):\n data = []\n with open(fiName) as fi:\n reader = csv.reader(fi,delimiter=delim,dialect='excel')\n for r in reader:\n tempLi = []\n for i in r:\n try:\n i = int(i)\n try:\n i = float(i)\n except ValueError:\n pass\n except ValueError:\n i = i.replace(\"'\",\"\")\n tempLi.append(i)\n data.append(tempLi)\n\n return(data)\n \n \nfiName = 'C:/Users/jarrodanderin/Documents/_RWork/_Datasets/COW_Alliance_v3.03.csv'\ndbname = 'testing2'\ntblName = 'test'\nfiLi = open('fiNames.csv')\nreader = csv.reader(fiLi)\nsaveDic = {}\nfor r in reader:\n try:\n saveDic[r[2]].append(r)\n except KeyError:\n saveDic[r[2]] = [r]\nfor ty in saveDic:\n print(ty)\n for i in saveDic[ty]:\n if i[1]=='polity_four':\n print(\"==>\"+str(i[1]))\n db = DBO(ty,'postgres','pw')\n db.copyFromCSV(i[0],i[1],overwrite=True)\n##db = DBO(dbname,'postgres','pw')\n##db.copyFromCSV(fiName,tblName,overwrite=True)\n","sub_path":"ConvertToDatabase.py","file_name":"ConvertToDatabase.py","file_ext":"py","file_size_in_byte":10590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"307441123","text":"from flask import Flask, render_template, request\nfrom script import assistant_gui\n\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef load_home():\n name = \"Ratnesh\"\n return render_template('home.html', name=name)\n\n\n@app.route(\"/func\", methods={'GET', 'POST'})\ndef process():\n name = request.form.get('input')\n '''name=assistant_gui.record_audio()\n assistant_gui.respond(name)'''\n assistant_gui.respond(name)\n return render_template('home.html', name=name)\n\nif __name__ == \"__main__\":\n app.run(debug=False)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"220621344","text":"import requests\nfrom bs4 import BeautifulSoup\n\nclass BTCSpider(object):\n def __init__(self):\n self.url = \"http://8btc.com/forum-61-{}.html\"\n self.headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36\"\n }\n self.data_list = []\n self.data_detai_list = []\n\n def get_data(self, url):\n response = requests.get(url, headers=self.headers)\n # 当前页面的字符集 是gbk ;\n # 一般都是 转成对应的字符串处理 但是 可能转码的时候有小问题\n # 出现问题之后, 使用原生的 bytes\n data = response.text\n return data\n\n # 批量的url\n def get_url_list(self):\n return [self.url.format(i) for i in range(1, 5)]\n\n def bs4_demo_parse_data(self, data):\n # 解析 bs4\n parse_data = BeautifulSoup(data, 'lxml')\n # 解析数据 title 和url list\n # 拿到了目标标签的 list\n a_list = parse_data.select('.xst')\n for a in a_list:\n dict = {}\n dict['text'] = a.get_text()\n dict['url'] = a.get('href')\n self.data_list.append(dict)\n\n def run(self):\n # 循环遍历 列表页面\n url_list = self.get_url_list()\n for url in url_list[:1]:\n print(url)\n data = self.get_data(url)\n self.bs4_demo_parse_data(data)\n\n # 等列表页 抓取完毕; 在抓取详情页\n for detail in self.data_list:\n detail_url = detail['url']\n print(detail_url)\n\n detail_data = self.get_data(detail_url)\n\n #解析详情页的数据\n detail_parse = BeautifulSoup(detail_data,'lxml')\n\n detail['result'] = detail_parse.select('.t_f')[0].get_text().replace('\\n','')\n print(self.data_list)\n\nspider = BTCSpider()\nspider.run()\n","sub_path":"requests_bs4_test.py","file_name":"requests_bs4_test.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"525274191","text":"#!/usr/bin/env python\nfrom docopt import docopt\n\nfrom poeml.graph import Graph\nfrom poeml.ginst_builder import GraphInstanceBuilder\nfrom poeml.ginst_builder import make_props\nfrom poeml.template import apply_template\n\nusage = \"\"\"ASP XML generator\nUsage:\n\taspo.py \n\"\"\"\n\ndef d_print(s):\n\tprint(s)\n\ndef aspo(graph, roots):\n\td_print('Started')\n\tnodes = len(graph.nodes)\n\ttiles = nodes // roots\n\tassert tiles*roots==nodes, 'Node count is not divisible by root count'\n\n\tdef node_id(tile, id):\n\t\treturn 'node_%d_%s' % (tile, graph.nodes[id] if type(id) is int else id)\n\t\n\tinst = GraphInstanceBuilder()\n\t\n\td_print('Generating devices...')\n\tinst.start_devices()\n\t\n\tfor tile in range(tiles):\n\t\tbase_id = tile*roots\n\t\tfor id in range(nodes):\n\t\t\troot_idx = (id - base_id) if (id>=base_id and id']),\n\t\troots=int(args[''])\n\t)\n\td_print('Done')\n\tprint(xml)\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"aspo.py","file_name":"aspo.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"234957826","text":"import torch\nimport torch.nn as nn\nimport numpy as np\n\n\nclass Model(object):\n def __init__(self):\n super(Model, self).__init__()\n\n self.model = nn.Sequential(\n nn.Linear(4, 64),\n nn.Sigmoid(),\n nn.Linear(64, 3)\n )\n self.loss = nn.CrossEntropyLoss()\n self.optimizer = torch.optim.Adam(params=self.model.parameters(), lr=0.001)\n\n\ndef read_data_bak(path: str):\n label2id = {'\"setosa\"': 0, '\"versicolor\"': 1, '\"virginica\"': 2}\n\n file = open(path, \"r\", encoding=\"utf-8\")\n lines = file.readlines()\n\n features, labels = [], []\n for i in range(1, len(lines)):\n line = lines[i].strip().split()\n length1 = float(line[1])\n width1 = float(line[2])\n length2 = float(line[3])\n width2 = float(line[4])\n label = label2id[line[5]]\n\n features.append([length1, width1, length2, width2])\n labels.append(label)\n return features, labels\n\n\ndef prepare_data(features: list, labels: list, ratio: float):\n assert len(features) == len(labels)\n\n data_size = len(features)\n idx = list(range(data_size))\n np.random.shuffle(idx)\n\n train_features = np.array([features[i] for i in idx[:int(ratio * data_size)]])\n train_labels = np.array([labels[i] for i in idx[:int(ratio * data_size)]])\n test_features = np.array([features[i] for i in idx[int(ratio * data_size):]])\n test_labels = np.array([labels[i] for i in idx[int(ratio * data_size):]])\n\n train_features, train_labels, test_features, test_labels = torch.from_numpy(train_features).float(), torch.from_numpy(train_labels).long(), torch.from_numpy(test_features).float(), torch.from_numpy(test_labels).long()\n return train_features, train_labels, test_features, test_labels\n\n\ndef eval(predictions: list, labels: torch.Tensor):\n labels = labels.data.tolist()\n truth = sum([1 if predictions[i] == labels[i] else 0 for i in range(len(labels))]) * 1.0 / len(labels)\n print(truth)\n\n\ndef read_data(file_path: str):\n label2id = {\n '\"setosa\"': 0,\n '\"versicolor\"': 1,\n '\"virginica\"': 2\n }\n\n iris_file = open(file_path, mode=\"r\", encoding=\"utf-8\")\n iris_data = iris_file.readlines()\n\n features, labels = [], []\n iris_data = iris_data[1:]\n for per_data in iris_data:\n per_data = per_data.split()\n\n length1 = float(per_data[1])\n width1 = float(per_data[2])\n length2 = float(per_data[3])\n width2 = float(per_data[4])\n\n label = per_data[5]\n labelid = label2id[label]\n\n features.append([length1, width1, length2, width2])\n labels.append(labelid)\n\n return features, labels\n\n\ndef shuffle_data(features: list, labels: list):\n np.random.seed(0)\n index = list(range(len(features)))\n np.random.shuffle(index)\n\n data_size = len(features)\n train_data_size = int(0.8 * data_size)\n\n train_features = [features[i] for i in index[:train_data_size]]\n train_labels = [labels[i] for i in index[:train_data_size]]\n\n test_features = [features[i] for i in index[train_data_size:]]\n test_labels = [labels[i] for i in index[train_data_size:]]\n\n return train_features, train_labels, test_features, test_labels\n\nif __name__ == '__main__':\n # definition\n label2labelid = {\n '\"setosa\"': 0,\n '\"versicolor\"': 1,\n '\"virginica\"': 2\n }\n\n # data pre-process\n features, labels = [], []\n file = open(\"data/iris.txt\", \"r\", encoding=\"utf-8\")\n total_lines = file.readlines()[1:]\n for each_line in total_lines:\n line = each_line.strip().split()\n\n id = line[0]\n length1 = float(line[1])\n width1 = float(line[2])\n length2 = float(line[3])\n width2 = float(line[4])\n label = line[5]\n labelid = label2labelid[label]\n\n features.append([length1, width1, length2, width2])\n labels.append(labelid)\n\n np.random.seed(0)\n index = list(range(len(features)))\n np.random.shuffle(index)\n\n train_data_size = int(len(features) * 0.8)\n test_data_size = int(len(features) * 0.2)\n\n train_features = [features[i] for i in index[:train_data_size]]\n train_labels = [labels[i] for i in index[:train_data_size]]\n test_features = [features[i] for i in index[train_data_size:]]\n test_labels = [labels[i] for i in index[train_data_size:]]\n\n train_features = torch.from_numpy(np.array(train_features)).float()\n train_labels = torch.from_numpy(np.array(train_labels)).long()\n test_features = torch.from_numpy(np.array(test_features)).float()\n test_labels = torch.from_numpy(np.array(test_labels)).long()\n\n model = nn.Sequential(\n nn.Linear(4, 128),\n nn.Sigmoid(),\n nn.Linear(128, 3)\n )\n loss = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(params=model.parameters(), lr=0.001)\n\n epochs = 1000\n for each_epoch in range(epochs):\n optimizer.zero_grad()\n predictions = model(train_features)\n loss_value = loss(predictions, train_labels)\n loss_value.backward()\n optimizer.step()\n print(f\"epoch: {each_epoch + 1}, loss: {loss_value.data}\")\n\n model.eval()\n predictions = model(test_features).detach().numpy()\n predictions = np.argmax(predictions, axis=-1)\n eval(predictions, test_labels)\n\n \"\"\"\n features, labels = read_data(\"data/iris.txt\")\n train_features, train_labels, test_features, test_labels = shuffle_data(features, labels)\n train_features = torch.from_numpy(np.array(train_features)).float()\n test_features = torch.from_numpy(np.array(test_features)).float()\n train_labels = torch.from_numpy(np.array(train_labels)).long()\n test_labels = torch.from_numpy(np.array(test_labels)).long()\n\n model = nn.Sequential(\n nn.Linear(4, 256),\n nn.Sigmoid(),\n nn.Linear(256, 3)\n )\n loss = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(params=model.parameters(), lr=0.001)\n\n predictions = model(test_features)\n\n epochs = 1000\n for each_epoch in range(epochs):\n optimizer.zero_grad()\n predictions = model(train_features)\n loss_value = loss(predictions, train_labels)\n loss_value.backward()\n optimizer.step()\n print(f\"epoch: {each_epoch}, loss: {loss_value.data}\")\n\n model.eval()\n predictions = model(test_features).detach().numpy()\n predictions = np.argmax(predictions, axis=-1).tolist()\n eval(predictions, test_labels)\n \"\"\"\n\n \"\"\"\n features, labels = read_data_bak(\"data/iris.txt\")\n train_features, train_labels, test_features, test_labels = prepare_data(features, labels, 0.8)\n\n model = Model()\n optimizer = model.optimizer\n epochs = 1000\n for epoch in range(epochs):\n optimizer.zero_grad()\n output = model.model(train_features)\n loss = model.loss(output, train_labels)\n loss.backward()\n optimizer.step()\n print(loss)\n\n predictions = []\n for i in range(test_features.shape[0]):\n feature = test_features[i]\n label = test_labels[i]\n model.model.eval()\n prediction = model.model(feature)\n prediction = np.argmax(prediction.detach().numpy(), axis=-1)\n predictions.append(prediction)\n eval(predictions, test_labels)\n \"\"\"\n\n\n","sub_path":"nn_demo.py","file_name":"nn_demo.py","file_ext":"py","file_size_in_byte":7261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"323501426","text":"import numpy as np\nimport glob\nfrom PIL import Image\nfrom random import shuffle\nimport cv2\n\ntrain_X = []\ntrain_Y = []\ntrain_files = []\n\ntest_X = []\ntest_Y = []\ntest_files = []\n\nDATA_SET = \"found\"\n\n\ndef expand():\n\tfor path in test_files:\n\t\timage = cv2.imread(path)\n\t\texpanded = cv2.copyMakeBorder(src=image, dst=image, top=3, bottom=3, left=3, right=3, borderType=cv2.BORDER_CONSTANT, value=[0, 0, 0])\n\t\tcv2.imwrite(path, expanded)\n\n\tfor path in train_files:\n\t\timage = cv2.imread(path)\n\t\texpanded = cv2.copyMakeBorder(src=image, dst=image, top=3, bottom=3, left=3, right=3, borderType=cv2.BORDER_CONSTANT,\n\t\t value=[0, 0, 0])\n\t\tcv2.imwrite(path, expanded)\n\n\ndef preprocess():\n\tfor path in test_files:\n\t\timage = cv2.imread(path, 0)\n\t\timage = image[3:25, 3:25]\n\n\t\tblurred = cv2.fastNlMeansDenoising(image, h=7, templateWindowSize=7, searchWindowSize=21)\n\t\tthreshold = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 21, 7)\n\t\tthreshold = cv2.bitwise_not(threshold)\n\n\t\tcv2.imwrite(path, threshold)\n\n\tfor path in train_files:\n\t\timage = cv2.imread(path, 0)\n\n\t\timage = image[3:25, 3:25]\n\n\t\tblurred = cv2.fastNlMeansDenoising(image, h=7, templateWindowSize=7, searchWindowSize=21)\n\t\tthreshold = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 21, 7)\n\t\tthreshold = cv2.bitwise_not(threshold)\n\n\t\tcv2.imwrite(path, threshold)\n\n\ndef resize():\n\tfor path in test_files:\n\t\timage = cv2.imread(path)\n\t\timage = cv2.resize(image, (28, 28))\n\t\tcv2.imwrite(path, image)\n\n\ndef get_train_files():\n\tglobal train_files\n\ttrain_files = glob.glob(\"data/\" + DATA_SET + \"/training/*.jpg\")\n\n\tshuffle(train_files)\n\n\ndef get_test_files():\n\tglobal test_files\n\ttest_files = glob.glob(\"data/\" + DATA_SET + \"/testing/*.jpg\")\n\n\tshuffle(test_files)\n\n\ndef load_train_x():\n\tglobal train_X\n\n\tfor path in train_files:\n\t\timg = Image.open(path).convert(\"L\")\n\t\timgarr = np.array(img).astype(float).flatten()\n\n\t\ttrain_X.append(imgarr / 255)\n\n\ndef load_train_y():\n\tglobal train_Y\n\n\tfor i in range(len(train_files)):\n\t\ttrain_Y.append(np.array([0 for i in range(10)]).astype(float))\n\t\tidx = int(train_files[i].split(\"_\")[0][-1])\n\t\ttrain_Y[i][idx] = 1\n\n\ndef load_test_x():\n\tglobal test_X\n\n\tfor path in test_files:\n\t\timg = Image.open(path).convert(\"L\")\n\t\timgarr = np.array(img).astype(float).flatten()\n\n\t\ttest_X.append(imgarr / 255)\n\n\ndef load_test_y():\n\tglobal test_Y\n\n\tfor i in range(len(test_files)):\n\t\ttest_Y.append(np.array([0 for i in range(10)]).astype(float))\n\t\tidx = int(test_files[i].split(\"_\")[0][-1])\n\t\ttest_Y[i][idx] = 1\n\n\ndef serialize():\n\ttrain_x_file = open(\"data/\" + DATA_SET + \"/processed/training/X.npy\", \"wb\")\n\tnp.save(train_x_file, train_X)\n\n\ttrain_y_file = open(\"data/\" + DATA_SET + \"/processed/training/Y.npy\", \"wb\")\n\tnp.save(train_y_file, train_Y)\n\n\ttest_x_file = open(\"data/\" + DATA_SET + \"/processed/testing/X.npy\", \"wb\")\n\tnp.save(test_x_file, test_X)\n\n\ttest_y_file = open(\"data/\" + DATA_SET + \"/processed/testing/Y.npy\", \"wb\")\n\tnp.save(test_y_file, test_Y)\n\n\nif __name__ == \"__main__\":\n\tget_train_files()\n\tget_test_files()\n\n\t# expand()\n\n\t# preprocess()\n\n\tload_train_x()\n\tload_train_y()\n\n\tload_test_x()\n\tload_test_y()\n\n\tserialize()\n","sub_path":"dataset-processing.py","file_name":"dataset-processing.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"161275987","text":"'''Train CIFAR10 with PyTorch.'''\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\n\nimport torchvision\nimport torchvision.transforms as transforms\n\nimport os\nimport argparse\n\n#from train_test import train, test\nfrom data_utils import trainloader, testloader\n\n#from utils import progress_bar\nfrom torch.autograd import Variable\nfrom cnn2layer import CNN\n\nimport numpy as np\nfrom skopt.callbacks import DeadlineStopper\nfrom skopt import gp_minimize\nfrom skopt import dump\nfrom space_division import HyperSpace\nfrom mpi4py import MPI\n\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')\nparser.add_argument('--lr', default=0.1, type=float, help='learning rate')\nparser.add_argument('--num_epochs', default=20, type=int, help='number of epochs')\nparser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')\nargs = parser.parse_args()\n\nbest_acc = 0 # best test accuracy\nstart_epoch = 0 # start from epoch 0 or last checkpoint epoch\n\ncomm = MPI.COMM_WORLD\nrank = comm.Get_rank()\nsize = comm.Get_size()\n\n\ndef objective(space):\n kernel_size1, stride1, dropout1, kernel_size2, stride2, dropout2, learning_rate = space\n\n # Hyper Parameters\n num_epochs = 10\n kernel_size1 = int(kernel_size1)\n stride1 = int(kernel_size1)\n dropout1 = float(dropout1)\n kernel_size2 = int(kernel_size2)\n stride2 = int(stride2)\n dropout2 = float(dropout2)\n learning_rate = float(learning_rate)\n\n cnn = CNN()\n cnn.cuda()\n\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)\n\n # Train the Model\n for epoch in range(num_epochs):\n for i, (images, labels) in enumerate(trainloader):\n images = Variable(images).cuda()\n labels = Variable(labels).cuda()\n\n # Forward + Backward + Optimize\n optimizer.zero_grad()\n outputs = cnn(images)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n if (i+1) % 100 == 0:\n print ('Epoch [%d/%d], Step [%d/%d], Loss: %.4f'\n %(epoch+1, num_epochs, i+1, 60000//128, loss.data[0]))\n\n # Test the Model\n correct = 0\n total = 0\n for images, labels in testloader:\n images = Variable(images).cuda()\n outputs = cnn(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted.cpu() == labels).sum()\n\n test_accuracy = 100 * correct / total\n return loss.data[0]\n\n\ndef main():\n if rank == 0:\n hyperparameters = {'kernelSize1': np.arange(2,10),\n 'stride1': np.arange(1, 5),\n 'dropout1': np.linspace(0.0, 0.8),\n 'kernelSize2': np.arange(2,10),\n 'stride2': np.arange(1, 5),\n 'dropout2': np.linspace(0.0, 0.8),\n 'learningRate': np.linspace(0.001, 0.1)}\n\n hyperspace = HyperSpace(hyperparameters)\n all_intervals = hyperspace.fold_space()\n hyperspaces = hyperspace.hyper_permute(all_intervals)\n subspace_keys, subspace_boundaries = hyperspace.format_hyperspace(hyperspaces)\n else:\n subspace_keys, subspace_boundaries = None, None\n\n space = comm.scatter(subspace_boundaries, root=0)\n\n deadline = DeadlineStopper(18000)\n # Gaussian process minimization (see scikit-optimize skopt module for other optimizers)\n res_gp = gp_minimize(objective, space, n_calls=50, callback=deadline, random_state=0, verbose=True)\n # Each worker will write their results to disk\n dump(res_gp, '/lustre/atlas/proj-shared/csc237/ygx/safari_zone/vision/pytorch/cifar2/mobilenet/hyper_results/gp_subspace_' + str(rank))\n\n\nif __name__=='__main__':\n main()\n","sub_path":"hyperdrive_cifar2.py","file_name":"hyperdrive_cifar2.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"290487079","text":"#===============================================================\r\n#Author: Eric Zhou\r\n#Date: Oct 25, 2017\r\n#Purpose: To create a GUI verson of Number Theory\r\n#Inputs: Keyboard, 2 valid positive numbers, click \"proceed\"\r\n#Outputs: Screen, returns factorials of two numbers,\r\n#the permutations, combinations, GCD, LCM and if the two numbers\r\n#entered are relatively prime.\r\n#===============================================================\r\n\r\nfrom tkinter import *\r\n\r\n#==================================\r\n#Date: Oct 12, 2017\r\n#Author: Eric Zhou\r\n#Purpose: Find the factorial of n\r\n#Inputs: Keyboard and factorial\r\n#Outputs: Return factorial\r\n#==================================\r\n\r\ndef getFactorial(number=0):\r\n factorialNumber = 0\r\n total = 1\r\n while factorialNumber != (number):\r\n factorialNumber += 1\r\n total *= (factorialNumber + number) - number\r\n return total\r\n\r\n#==============================================\r\n#Date: Oct 12, 2017\r\n#Author: Eric Zhou\r\n#Purpose: Use the formula to receive an output\r\n#Inputs: Keyboard and 2 numbers\r\n#Outputs: Return the permutation of the 2 numbers\r\n#==============================================\r\n\r\ndef calcPermutations(n, r):\r\n m = 0\r\n\r\n if n < r:\r\n m = r - n\r\n m = getFactorial(r-n)\r\n r = getFactorial(r)\r\n n = getFactorial(n)\r\n calcPermutations = r / (m)\r\n else:\r\n m = n - r\r\n m = getFactorial(n-r)\r\n r = getFactorial(r)\r\n n = getFactorial(n)\r\n calcPermutations = n / (m)\r\n return calcPermutations\r\n\r\n#================================================\r\n#Date: Oct 13, 2017\r\n#Author: Eric Zhou\r\n#Purpose: Finding the combinations of 2 numbers\r\n#Inputs: Keyboard, n and r\r\n#Outputs: Return the combinations of the 2 numbers\r\n#================================================\r\n\r\ndef calcCombinations(n, r):\r\n m = 0\r\n \r\n if n < r:\r\n m = r - n\r\n m = getFactorial(r-n)\r\n r = getFactorial(r)\r\n n = getFactorial(n)\r\n calcCombinations = r / (n * (m))\r\n else:\r\n m = n - r\r\n m = getFactorial(n-r)\r\n r = getFactorial(r)\r\n n = getFactorial(n)\r\n calcCombinations = n / (r * (m))\r\n \r\n return calcCombinations\r\n\r\n#================================================================\r\n#Date: Oct 13, 2017\r\n#Author: Eric Zhou\r\n#Purpose: Find the greatest common divisor of 2 positive Numbers\r\n#Inputs: Keyboard, 2 positive integers\r\n#Outputs: Return the greatest common divisor\r\n#================================================================\r\n\r\ndef calcGCD(m, n):\r\n t = m % n\r\n while t != 0:\r\n m = n\r\n n = t\r\n t = m % n\r\n return n\r\n\r\n#================================================================\r\n#Date: Oct 13, 2017\r\n#Author: Eric Zhou\r\n#Purpose: Find the least common multiple of 2 positive Numbers\r\n#Inputs: Keyboard, 2 positive integers\r\n#Outputs: Return the least common multiple\r\n#================================================================\r\n\r\ndef calcLCM(m, n, GCD):\r\n LCM = m * n / GCD\r\n return LCM\r\n\r\n#================================================================\r\n#Date: Oct 13, 2017\r\n#Author: Eric Zhou\r\n#Purpose: Find out if the given 2 numbers are relatively prime\r\n#Inputs: Keyboard, 2 positive integers\r\n#Outputs: Return the GCD if it equals to 1 or not\r\n#================================================================\r\n\r\ndef getRelativelyPrime(GCD):\r\n if GCD == 1:\r\n relativelyPrime = \"relatively prime \"\r\n else:\r\n relativelyPrime = \"not relatively prime \"\r\n return relativelyPrime\r\n\r\n#================================================================\r\n#Date: Oct 26, 2017\r\n#Author: Eric Zhou\r\n#Purpose: Main code subprogram, sees if numbers entered are valid\r\n#Inputs: Keyboard,\r\n#Outputs: GUI statements, errors\r\n#================================================================\r\n\r\ndef buttonPressed():\r\n valid = True\r\n strNumber1 = str(firstNumber.get())\r\n strNumber2 = str(secondNumber.get())\r\n if not (strNumber1.isdigit() and strNumber2.isdigit()):\r\n messagebox.showerror(\"Error\", \"Your last inputs were not all valid positive integers. \")\r\n valid = False\r\n \r\n if valid == True:\r\n number1 = int(firstNumber.get())\r\n number2 = int(secondNumber.get())\r\n if not(number1 >0 and number2 >0 and number1 <=10 and number2 <=10):\r\n messagebox.showerror(\"Error\", \"Your last inputs were not in range 1-10. \")\r\n valid = False\r\n \r\n if valid == True:\r\n valFirst = getFactorial(number1)\r\n factorial1.set(value = \" The factorial of the first number is \" + str(valFirst))\r\n valSecond = getFactorial(number2)\r\n factorial2.set(value = \" The factorial of the second number is \" + str(valSecond))\r\n valPermutations = calcPermutations(number1, number2)\r\n permutations.set(value = \" The permutations is \" + str(valPermutations))\r\n valCombinations = calcCombinations(number1, number2)\r\n combinations.set(value = \" The combinations is \" + str(valCombinations))\r\n valGCD = calcGCD(number1, number2)\r\n GCD.set(value = \" The GCD is \" + str(valGCD))\r\n valLCM = calcLCM(number1, number2, valGCD)\r\n LCM.set(value = \" The LCM is \" + str(valLCM))\r\n valRelativelyPrime = getRelativelyPrime(valGCD)\r\n relativelyPrime.set(value = \"The two numbers entered is \" + str(valRelativelyPrime))\r\n\r\n#MAIN WINDOW\r\nmainWindow = Tk()\r\nmainWindow.title(\"Number Theory\")\r\n\r\n#Menu\r\nmenubar = Menu(mainWindow)\r\n\r\n#File Menu\r\nfilemenu = Menu(menubar, tearoff=0)\r\nfilemenu.add_command(label=\"Open\",\r\n command=lambda:messagebox.showerror(\"Error\", \"Not completed\"))\r\nfilemenu.add_command(label=\"Save\",\r\n command=lambda:messagebox.showerror(\"Error\", \"Not completed\"))\r\nfilemenu.add_separator()\r\nfilemenu.add_command(label=\"Exit\",\r\n command=lambda:mainWindow.destroy())\r\nmenubar.add_cascade(label=\"File\", menu=filemenu)\r\n\r\n#Edit Menu\r\neditmenu = Menu(menubar, tearoff = 0)\r\neditmenu.add_command(label=\"Cut\",\r\n command=lambda:messagebox.showerror(\"Error\", \"Not completed\"))\r\neditmenu.add_command(label=\"Copy\",\r\n command=lambda:messagebox.showerror(\"Error\", \"Not completed\"))\r\neditmenu.add_command(label=\"Paste\",\r\n command=lambda:messagebox.showerror(\"Error\", \"Not completed\"))\r\neditmenu.add_command(label=\"Redo\",\r\n command=lambda:messagebox.showerror(\"Error\", \"Not completed\"))\r\nmenubar.add_cascade(label=\"Edit\", menu=editmenu)\r\n\r\n#Help Menu\r\nhelpmenu = Menu(menubar, tearoff = 0)\r\nhelpmenu.add_command(label=\"About\",\r\n command=lambda:messagebox.showinfo(\" About \", \" This program was created by Eric Zhou :) \"))\r\nmenubar.add_cascade(label=\"Help\", menu=helpmenu)\r\n\r\n#Frames (Gray/Blue)\r\nFrame(bg = \"gray\", width = 300, height = 125).place(x=0, y=0)\r\nFrame(bg = \"white\", width= 275, height =100).place(x=13, y=13)\r\nFrame(bg = \"steelblue\", width = 300, height=225).place(x=0, y=125)\r\nFrame(bg = \"white\", width = 275, height = 195).place(x=13, y=140)\r\n\r\nmainWindow.config(menu=menubar, width= 300, height = 350)\r\n\r\n#StringVars\r\nfactorial1 = StringVar()\r\nfactorial1.set(value=\"\")\r\nfactorial2 = StringVar()\r\nfactorial2.set(value=\"\")\r\nfirstNumber = StringVar()\r\nfirstNumber.set(value = \"\")\r\nsecondNumber = StringVar()\r\nsecondNumber.set(value = \"\")\r\nnumber = StringVar()\r\nnumber.set(value = \"\")\r\npermutations = StringVar()\r\npermutations.set(value = \"\")\r\ncombinations = StringVar()\r\ncombinations.set(value = \"\")\r\nGCD = StringVar()\r\nGCD.set(value = \"\")\r\nLCM = StringVar()\r\nLCM.set(value = \"\")\r\nrelativelyPrime = StringVar()\r\nrelativelyPrime.set(value = \"\")\r\n\r\n#Gray Box\r\nLabel(mainWindow, text = ' Enter the first number: ').place(x= 15, y=25)\r\nEntry(mainWindow, textvariable = firstNumber).place(x=160, y = 25)\r\nLabel(mainWindow, text = ' Enter the second number: ').place(x= 15, y=50)\r\nEntry(mainWindow, textvariable = secondNumber).place(x=160, y = 50)\r\nButton(mainWindow, text= \" Proceed \", command=lambda:buttonPressed()).place(x=110, y=85)\r\n\r\n#Steel-Blue Box\r\nLabel(mainWindow, textvariable = factorial1).place(x=13, y = 150)\r\nLabel(mainWindow, textvariable = factorial2).place(x=13, y = 175)\r\nLabel(mainWindow, textvariable = permutations).place(x=13, y=200)\r\nLabel(mainWindow, textvariable = combinations).place(x=13, y=225) \r\nLabel(mainWindow, textvariable = GCD).place(x=13, y=250)\r\nLabel(mainWindow, textvariable = LCM).place(x=13, y=275)\r\nLabel(mainWindow, textvariable = relativelyPrime).place(x=13, y=300)\r\n\r\n#Main Loop\r\nmainloop()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"PY5 - GUI -front ends- for Number Theory- Red Dog and Stars/Number Theory GUI.py","file_name":"Number Theory GUI.py","file_ext":"py","file_size_in_byte":8673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"625579658","text":"import messages as msg\nimport helpers, re\n\n# settings = helpers.get_settings()\n\ndef execute():\n\tmsg.types()\n\tlistOut = helpers.user_input(\"Full List: [y/n] \")\n\tresults = helpers.run_command_output('ag --list-file-types', False)\n\n\tif listOut == 'y':\n\t\tpat = re.compile(\"--.+\\n\")\n\t\tmatches = re.findall(pat, results)\n\t\tfor item in matches:\n\t\t\tprint('- {}'.format(item.replace('--', '')[:-1]))\n\telif listOut == 'n':\n\t\treturn","sub_path":"Type.py","file_name":"Type.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"434576582","text":"from __future__ import absolute_import, division, print_function\n\nimport stripe\nfrom tests.helper import StripeResourceTest\n\n\nclass PayoutTest(StripeResourceTest):\n\n def test_list_payouts(self):\n stripe.Payout.list()\n self.requestor_mock.request.assert_called_with(\n 'get',\n '/v1/payouts',\n {}\n )\n\n def test_cancel_payout(self):\n self.mock_response({\n 'id': 'po_cancel',\n 'status': 'canceled',\n })\n\n payout = stripe.Payout(id='po_cancel')\n\n self.assertTrue(payout is payout.cancel(idempotency_key='idem-foo'))\n self.assertEquals('canceled', payout.status)\n self.assertEquals('po_cancel', payout.id)\n\n self.requestor_mock.request.assert_called_with(\n 'post',\n '/v1/payouts/po_cancel/cancel',\n {},\n {'Idempotency-Key': 'idem-foo'}\n )\n","sub_path":"tests/api_resources/test_payout.py","file_name":"test_payout.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"432648740","text":"import json\nfrom enum import Enum\n\n'''\n ‘Thoughts are the shadows of our feelings-always darker, emptier, and simpler’. Nietzsche, The gay science p. 203. \n'''\n\nclass Msg(Enum):\n\tsimple_msg = 1\n\tsadness = 2\n\thappiness = 3\n\nclass Connect:\n\tdef __init__(self):\n\t\tself.file_path = 'log.json'\n\t\tself.initData()\n\t\tself.total = 0\n\t\tself.sadness = 0.0\n\t\tself.happiness = 0.0\n\n\tdef initData(self):\n\t\tdata = {}\n\t\tdata['last_msg'] = ''\n\t\tdata['last_sentiment'] = ''\n\t\tdata['sadness_degree'] = 0.5\n\t\tdata['happiness_degree'] = 0.5\n\t\tdata['msg_log'] = []\n\t\tdata['state'] = 0 # Nao alterado\n\t\twith open(self.file_path, 'w+') as file:\n\t\t\tf_line = file.read(1)\n\t\t\tif not f_line:\n\t\t\t\tjson.dump(data, file)\n\t\n\tdef balance(self, msg_type):\n\t\tself.total += 1\n\t\tif msg_type is Msg.happiness:\n\t\t\tself.happiness += 1 \n\t\t\t\n\t\telif msg_type is Msg.sadness:\n\t\t\tself.sadness += 1\n\n\tdef toggleState(self, data):\n\t\tif data['state'] == 0:\n\t\t\tdata['state'] = 1 # Alterado\n\t\telse:\n\t\t\tdata['state'] = 1 # Alterado\n\n\tdef write(self, msg_type, msg):\n\t\twith open(self.file_path, 'r+') as file:\n\t\t\tdata = json.load(file)\n\t\t\tif msg_type is Msg.simple_msg:\n\t\t\t\tdata['last_msg'] = msg\n\t\t\t\tdata['msg_log'].append(msg)\n\t\t\t\tprint(\"Mensagem inserida no json\")\n\t\t\t\t\n\t\t\telif msg_type is Msg.sadness:\n\t\t\t\t\n\t\t\t\tdata['last_msg'] = msg\n\t\t\t\tdata['msg_log'].append(msg)\n\n\t\t\t\tself.balance(msg_type)\n\t\t\t\tdata['last_sentiment'] = 'neg'\n\t\t\t\tdata['sadness_degree'] = self.sadness/self.total\n\t\t\t\tdata['happiness_degree'] = self.happiness/self.total\n\t\t\t\tprint(\"Mensagem inserida no json\")\n\t\t\t\n\t\t\telif msg_type is Msg.happiness:\n\t\t\t\t\n\t\t\t\tdata['last_msg'] = msg\n\t\t\t\tdata['msg_log'].append(msg)\n\n\t\t\t\tself.balance(msg_type)\n\t\t\t\tdata['last_sentiment'] = 'pos'\n\t\t\t\tdata['happiness_degree'] = self.happiness/self.total\n\t\t\t\tdata['sadness_degree'] = self.sadness/self.total\n\t\t\t\tprint(\"Mensagem inserida no json\")\n\n\t\t\tself.toggleState(data)\n\t\t\tfile.seek(0)\n\t\t\tjson.dump(data, file)\n\t\t\tfile.truncate()\n\t\t\t\n\n","sub_path":"Project2/Modules/Connection/connector.py","file_name":"connector.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"356737699","text":"import hashlib\nimport time\nfrom database import BaseModel\n\n\nclass Transaction(BaseModel):\n \"\"\"\n This model represents in a simplified way a transaction\n \"\"\"\n from_address = CharField(max_length=255)\n to_address = CharField(max_length=255)\n amount = IntegerField()\n\n def __init__(self, from_address, to_address, amount):\n self.from_address = from_address\n self.to_address = to_address\n self.amount = amount\n\n @property\n def serialized(self):\n return {\n 'from_address': self.from_address,\n 'to_address': self.to_address,\n 'amount': self.amount\n }\n\n\nclass Block:\n \"\"\"\n This is a simplified implementation of a bitcoin block\n model.\n Here a explanation what the nonce property is:\n https://en.bitcoin.it/wiki/Nonce\n \"\"\"\n def __init__(self, transaction, prev_hash):\n self.timestamp = time.time()\n self.transaction = transaction\n self.prev_hash = prev_hash\n self.nonce = 0\n self.hash = self.hash_block()\n\n def hash_block(self):\n concatenation = (\n str(self.prev_hash).encode('utf-8') +\n str(self.transaction).encode('utf-8') +\n str(self.timestamp).encode('utf-8') +\n str(self.nonce).encode('utf-8')\n )\n return hashlib.sha256(concatenation).hexdigest()\n\n def mine_block(self, difficulty):\n while self.hash[:difficulty] != \"0\" * difficulty:\n self.nonce += 1\n self.hash = self.hash_block()\n\n @property\n def serialized(self):\n return {\n 'timestamp': self.timestamp,\n 'transaction': self.transaction,\n 'prev_hash': self.prev_hash,\n 'nonce': self.nonce,\n 'hash': self.hash,\n }\n\n def __repr__(self):\n return \"\".format(self.hash)\n\n\nclass Blockchain:\n \"\"\"\n This is a simplified implementation of a blockchain. In this case,\n it will just consist of an ordered secuence of blocks.\n \"\"\"\n def __init__(self):\n self.chain = []\n self.difficulty = 1\n self._create_genesis_block()\n self.pending_transactions = []\n self.mining_reward = 100\n\n def _create_genesis_block(self):\n \"\"\"\n In every blockchain we need a Genesis Block, this is the function\n that creates it.\n \"\"\"\n tx_zero = Transaction(\"\", \"\", 0)\n genesis_block = Block([tx_zero, ], 0)\n self.chain.append(genesis_block)\n\n def get_latest_block(self):\n return self.chain[-1]\n\n def mine_pending_transactions(self, miner_address):\n previous_block = self.get_latest_block()\n block = Block(self.pending_transactions, previous_block)\n block.mine_block(self.difficulty)\n self.chain.append(block)\n\n miner_reward = Transaction(\"\", miner_address, self.mining_reward)\n self.pending_transactions = [miner_reward]\n\n def create_transaction(self, transaction):\n self.pending_transactions.append(transaction)\n\n def get_address_balance(self, address):\n balance = 0\n for block in self.chain:\n for individual_transaction in block.transaction:\n if individual_transaction.from_address == address:\n balance -= individual_transaction.amount\n if individual_transaction.to_address == address:\n balance += individual_transaction.amount\n return balance\n\n def check_if_chain_is_valid(self):\n for block in self.chain:\n if self.chain.index(block) == 0:\n continue\n current_block = block\n previous_block = self.chain[(self.chain.index(block) - 1)]\n\n if current_block.hash != current_block.hash_block():\n return False\n\n if current_block.prev_hash != previous_block.hash:\n return False\n\n return True\n\n @property\n def config_serialized(self):\n # Here we ommit chain property because it will be what we are going\n # to store in a separate db, this will go into a blockchain config db\n return {\n 'difficulty': self.difficulty,\n 'pending_transactions': self.pending_transactions,\n 'mining_reward': self.mining_reward,\n }\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"444846099","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# __author__ = 'Arthur|http://wingedwhitetiger.com/'\n\n\nfrom PySide2 import QtWidgets, QtGui, QtCore\n\n\nclass OpenVDBExport(QtWidgets.QWidget):\n def __init__(self, parent=None):\n super(OpenVDBExport, self).__init__(parent)\n\n '''init data'''\n self.__time = [1.0, 240.0]\n self.__step = 1.0\n\n '''create layout'''\n main_layout = QtWidgets.QVBoxLayout(self)\n main_layout.setAlignment(QtCore.Qt.AlignTop)\n main_layout.setContentsMargins(0, 0, 0, 0)\n\n time_layout = QtWidgets.QHBoxLayout()\n time_layout.addSpacing(5)\n time_layout.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)\n time_layout.setContentsMargins(0, 0, 0, 0)\n\n time_label_layout = QtWidgets.QVBoxLayout()\n time_label_layout.setAlignment(QtCore.Qt.AlignTop)\n time_label_layout.setContentsMargins(0, 0, 0, 0)\n\n time_options_layout = QtWidgets.QVBoxLayout()\n time_options_layout.setAlignment(QtCore.Qt.AlignTop)\n time_options_layout.setContentsMargins(0, 0, 0, 0)\n\n time_custom_layout = QtWidgets.QHBoxLayout()\n time_custom_layout.addSpacing(15)\n time_custom_layout.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)\n time_custom_layout.setContentsMargins(0, 0, 0, 0)\n\n step_layout = QtWidgets.QHBoxLayout()\n step_layout.addSpacing(40)\n step_layout.setAlignment(QtCore.Qt.AlignLeft)\n step_layout.setContentsMargins(0, 0, 0, 0)\n\n '''create widget'''\n time_label = QtWidgets.QLabel('Frame Range:')\n self.__timeRange_btg = QtWidgets.QButtonGroup()\n self.__timeRange_btg.buttonClicked.connect(self.__toggle_time)\n current_rb = QtWidgets.QRadioButton('Current frame')\n current_rb.setObjectName('current')\n custom_rb = QtWidgets.QRadioButton('Start/End')\n custom_rb.setObjectName('custom')\n self.__timeRange_btg.addButton(current_rb)\n self.__timeRange_btg.addButton(custom_rb)\n custom_rb.setChecked(True)\n\n self.__time_custom_label = QtWidgets.QLabel('Start/End:')\n self.__custom_min = QtWidgets.QDoubleSpinBox()\n self.__custom_min.setMinimum(0.0)\n self.__custom_min.setMaximum(9998.9)\n self.__custom_min.setValue(self.__time[0])\n self.__custom_min.setDecimals(4)\n self.__custom_min.setSingleStep(1)\n self.__custom_min.valueChanged.connect(self.__set_time)\n\n self.__custom_max = QtWidgets.QDoubleSpinBox()\n self.__custom_max.setMinimum(1.0)\n self.__custom_max.setMaximum(9999.9)\n self.__custom_max.setValue(self.__time[1])\n self.__custom_max.setDecimals(4)\n self.__custom_max.setSingleStep(1)\n self.__custom_max.valueChanged.connect(self.__set_time)\n\n self.__step_label = QtWidgets.QLabel('Step:')\n self.__inc = QtWidgets.QDoubleSpinBox()\n self.__inc.setValue(1.0)\n self.__inc.setDecimals(4)\n self.__inc.setMinimum(0.01)\n self.__inc.setSingleStep(1)\n self.__inc.valueChanged.connect(self.__set_step)\n\n '''add layout'''\n main_layout.addLayout(time_layout)\n main_layout.addLayout(time_custom_layout)\n main_layout.addLayout(step_layout)\n\n time_layout.addLayout(time_label_layout)\n time_layout.addLayout(time_options_layout)\n\n '''add widget'''\n time_label_layout.addWidget(time_label)\n time_options_layout.addWidget(current_rb)\n time_options_layout.addWidget(custom_rb)\n\n time_custom_layout.addWidget(self.__time_custom_label)\n time_custom_layout.addWidget(self.__custom_min)\n time_custom_layout.addWidget(self.__custom_max)\n\n step_layout.addWidget(self.__step_label)\n step_layout.addWidget(self.__inc)\n\n def __toggle_time(self):\n option = self.__timeRange_btg.checkedButton().objectName()\n\n if option == 'custom':\n self.__time_custom_label.setEnabled(True)\n self.__custom_min.setEnabled(True)\n self.__custom_max.setEnabled(True)\n self.__step_label.setEnabled(True)\n self.__inc.setEnabled(True)\n self.__time = [self.__custom_min.value(), self.__custom_max.value()]\n else:\n self.__time_custom_label.setEnabled(False)\n self.__custom_min.setEnabled(False)\n self.__custom_max.setEnabled(False)\n self.__step_label.setEnabled(False)\n self.__inc.setEnabled(False)\n self.__time = []\n\n def __set_time(self):\n self.__time = [self.__custom_min.value(), self.__custom_max.value()]\n\n def __set_step(self):\n self.__step = self.__inc.value()\n\n def get_option(self):\n return {'time': self.__time,\n 'step': self.__step}\n\n\nif __name__ == '__main__':\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n\n repository = OpenVDBExport()\n repository.show()\n sys.exit(app.exec_())\n","sub_path":"WitRepository/Houdini/16.0/scripts/python/repositoryLib/pysideLib/houdiniOpenVDBWidget.py","file_name":"houdiniOpenVDBWidget.py","file_ext":"py","file_size_in_byte":4989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"589564580","text":"import torch\nimport math\nimport torch.nn as nn\nfrom torch.nn import functional as F\nimport numpy as np\n\n\nclass MDN(nn.Module):\n \"\"\"\n Mixture Density Network\n \"\"\"\n\n def __init__(self, input_size, num_mixtures):\n super().__init__()\n self.input_size = input_size\n self.num_mixtures = num_mixtures\n\n self.fc1 = nn.Linear(self.input_size, 6 * num_mixtures + 1)\n\n def forward(self, x):\n mid = self.fc1(x)\n return mid\n\n def sample(self, output, bias=5.):\n output = output.view(1, -1)\n w, mx, my, varx, vary, rho, end = self.to_dists(output, bias=bias)\n w = w.softmax(dim=-1).data.cpu().view(-1).numpy()\n idx = np.random.choice(range(self.num_mixtures), size=2, p=w)[0]\n varx = varx.view(-1)[idx].item()\n vary = vary.view(-1)[idx].item()\n mx = mx.view(-1)[idx].item()\n my = my.view(-1)[idx].item()\n rho = rho.view(-1)[idx].item()\n mean = torch.empty(2)\n mean[0], mean[1] = mx, my\n cov = torch.Tensor([[varx, rho*math.sqrt(varx*vary)],\n [rho*math.sqrt(varx*vary), vary]])\n\n z = torch.distributions.MultivariateNormal(loc=mean,\n covariance_matrix=cov)\n t = z.sample()\n\n end = torch.sigmoid(end).view(-1).item()\n e = 1 if torch.rand(1).item() < end else 0\n inp = torch.zeros(1, 1, 3)\n inp[0, 0, 0] = t[0]\n inp[0, 0, 1] = t[1]\n inp[0, 0, 2] = e\n return inp.view(1, 3)\n\n def to_dists(self, output, bias=0.):\n lens = [self.num_mixtures for i in range(6)]\n lens.append(1)\n w, mx, my, vx, vy, rho, end = output.split(lens, dim=-1)\n\n w = w * (1 + bias)\n w = w.log_softmax(dim=-1)\n varx = torch.nn.functional.softplus(vx-bias)\n vary = torch.nn.functional.softplus(vy-bias)\n rho = rho.tanh()\n return w, mx, my, varx, vary, rho, end\n\n def loss_fn(self, output, target):\n tx, ty, te = target.chunk(3, dim=-1)\n w, mx, my, varx, vary, rho, end = self.to_dists(output)\n\n ro2 = 1. - rho * rho\n dx = mx - tx\n dy = my - ty\n z = torch.pow(dx, 2) / varx\n z = z + torch.pow(dy, 2) / vary\n z = z - 2.0 * rho * dx * dy / torch.sqrt(varx*vary)\n\n llh = -z / 2.0 / ro2\n llh = llh - math.log(2*math.pi)\n llh = llh - 0.5 * (varx.log() + vary.log() + ro2.log())\n llh = torch.logsumexp(llh + w, dim=-1, keepdim=True)\n out = -llh + \\\n F.binary_cross_entropy_with_logits(end, te, reduction=\"none\")\n\n return out\n\n\nclass Decoder(nn.Module):\n def __init__(self, input_size=3, num_mixtures=20, rnn_hidden_size=128, num_rnn_layers=2, dropout=0.2):\n super().__init__()\n self.rnn_hidden_size = rnn_hidden_size\n self.num_rnn_layers = num_rnn_layers\n\n self.rnn = nn.LSTM(input_size=input_size, hidden_size=rnn_hidden_size,\n num_layers=num_rnn_layers, dropout=dropout)\n self.mdn = MDN(rnn_hidden_size, num_mixtures)\n\n def forward(self, x, hidden):\n mid, hidden = self.rnn(x, hidden)\n out = self.mdn(mid)\n return out, hidden\n\n def init_hidden_states(self, bs, device):\n state = (torch.ones(self.num_rnn_layers, bs, self.rnn_hidden_size, device=device).data,\n torch.ones(self.num_rnn_layers, bs, self.rnn_hidden_size, device=device).data)\n return state\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"643305049","text":"from flask import Flask, render_template\n\napp = Flask(__name__)\n\n@app.route(\"/hello\")\ndef root_page():\n response = render_template('hello.html', greetings=\"Saludos, Amigos\")\n return response\n \n if __name__ == \"__main__\":\n app.run\n","sub_path":"project/hello02.py","file_name":"hello02.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"445810781","text":"import socket\r\n\r\nserver = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\nserver.bind(('localhost', 43568))\r\nprint('server is waiting for requests...')\r\n\r\narp_data = {\r\n '192.168.42.1': 'AE:D3:45:66:7B:55',\r\n '192.168.42.2': 'AE:D3:46:6D:7C:22'\r\n}\r\n\r\n\r\ndef rarp(query):\r\n flag = 0\r\n for key,value in arp_data.items():\r\n\r\n if value == query:\r\n flag = 1\r\n return key\r\n\r\n if flag == 0:\r\n return 'none'\r\n\r\n\r\nwhile True:\r\n msg, addr = server.recvfrom(4096)\r\n print('RARP request for ', msg.decode(), 'from', addr)\r\n query = msg.decode()\r\n\r\n result = rarp(query)\r\n\r\n server.sendto(bytes(result, 'utf-8'), addr)\r\n\r\n","sub_path":"NetworksLabCode/rarp_server.py","file_name":"rarp_server.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"506377781","text":"\"\"\"\n抗功耗攻击评估模块。\nData类,用于读取存储明文、功耗数据\nEvalution类,攻击算法类。\n\"\"\"\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport math\nimport struct\nimport matplotlib.pyplot as plt\n\n# from object_module import ObjISbox\n\n\"\"\"存储数据的类,用于读取存储于文件中的明文、功耗等数据。\n\"\"\"\n\n\nclass Data:\n \"\"\"读取并存储文件中的明文或功耗数据\n \"\"\"\n\n def __init__(self):\n \"\"\"data只读,存放数据,files只读,读取的文件名称列表\n \"\"\"\n self._data = np.array([])\n self._files = []\n\n def read_files(self, files, etype, ftype='c', columns=1):\n \"\"\"读入files列表中的文件,存储在columns列的数组中。\n Keyword Arguments:\n columns -- (default 1)\n etype -- (default )读取文件中数据的类型,如整形,浮点\n files -- (default [])\n ftype -- (default )读取文件的类型,如二进制(b),字符型(c)\n \"\"\"\n self._files = files\n data_dir = []\n\n for fname in files:\n if 'c' == ftype:\n with open(fname, 'r') as file:\n data_file = [etype(i) for i in file.read().split()]\n else:\n with open(fname, 'rb') as file:\n check = (1023, 1023, 1023, 1023, 1023, 1023, 1023,\n 1023, 1023, 1023, 1023, 1023, 0, 0, 0, 0)\n if check == struct.unpack('16H', file.read(32)):\n data = file.read()\n num_data = divmod(len(data), 2)[0]\n data_file = list(struct.unpack(''.join([str(num_data), 'H']), data))\n else:\n raise UserWarning(''.join(['文件', fname, '未通过较验']))\n\n data_dir.extend(data_file)\n\n num_total = len(data_dir)\n num_mod = divmod(num_total, columns)[1]\n \"\"\"将数据存成columns列n行的数组,多出的数据舍去。\n \"\"\"\n data_dir = np.array(data_dir)[0:(num_total - num_mod)]\n self._data = data_dir.reshape(-1, columns)\n\n\n def read_dir(self, dirname, etype, ftype, columns=1):\n \"\"\"读目录中的所有文件,存成columns列的数组。\n Keyword Arguments:\n dirname --\n columns -- (default -1)\n etype -- (default 'float')\n \"\"\"\n path_files = [''.join([dirname, x]) for x in os.listdir(dirname)]\n self.read_files(path_files, etype, ftype, columns)\n\n\n @property\n def data(self):\n \"\"\"设置变量_data为只读\n \"\"\"\n return self._data\n\n\n @property\n def files(self):\n \"\"\"设置变量_files为只读\n \"\"\"\n return self._files\n\n\nclass AttackCorr:\n \"\"\"抗相关系数攻击评估\n \"\"\"\n\n def __init__(self):\n \"\"\"result存放攻击结果数据。\n \"\"\"\n self._mat_corr = np.array([])\n self._result = []\n\n def do_attack(self, obj_module, plain, power, bits_key):\n \"\"\"对目标object抗攻击能力进行评估。\n Keyword Arguments:\n obj_module --\n data --\n power --\n \"\"\"\n\n self._mat_corr = np.array([self._corr_onekey(obj_module,\n power,\n plain,\n key)\n for key in range(2**bits_key)])\n\n abs_mat_corr = abs(self._mat_corr)\n self._result = np.where(abs_mat_corr == abs_mat_corr.max())[0]\n\n\n def plot_result(self, bits_key):\n \"\"\"输入key的数目,如8位密钥为256\n \"\"\"\n plt.plot(range(2**bits_key), self._mat_corr)\n\n @staticmethod\n def _corr_onekey(obj, power_data, plain_data, key):\n \"\"\"对key实施一轮攻击\n Keyword Arguments:\n obj_module -- 攻击目标\n plain -- 明文,为Data的实例\n key -- 某一个key,0-255\n \"\"\"\n\n \"\"\"明文对应某一密钥的输出密文的汉明距离\n \"\"\"\n\n def haming_distance(plain_data, obj, key):\n \"\"\"生成密钥key对应明文输出的汉明距离序列。\n \"\"\"\n cipher_current = 0\n for plain_text in plain_data:\n cipher_last = cipher_current\n cipher_current = obj.gen_cipher(plain_text, key)\n yield np.binary_repr(\n np.bitwise_xor(cipher_last, cipher_current)).count('1')\n\n haming = list(haming_distance(plain_data, obj, key))\n return np.corrcoef(haming, power_data.transpose())[0, 1:]\n\nclass EvaluationCorr:\n \"\"\"相关系数评估\n \"\"\"\n\n def __init__(self):\n \"docstring\"\n self._table = {'0.80': 0.84162123,\n '0.85': 1.03643339,\n '0.90': 1.28155157,\n '0.95': 1.64485363,\n '0.96': 1.75068607,\n '0.97': 1.88079361,\n '0.98': 2.05374891,\n '0.99': 2.32634787}\n\n self._result = {}\n\n def do_evaluation(self, mat_corr, truekey):\n \"\"\"\n Keyword Arguments:\n data_corr --\n truekey --\n \"\"\"\n self._result = dict([(k, self._evaluate(mat_corr, truekey, v))\n for (k, v) in self._table.items()])\n\n @staticmethod\n def _evaluate(mat_corr, truekey, z_alpha):\n \"\"\"\n Keyword Arguments:\n result_corr --\n truekey --\n alpha --\n \"\"\"\n p_truekey = abs(mat_corr[truekey]).max()\n return 3 + 8*((z_alpha/math.log((1 + p_truekey)/(1 - p_truekey)))**2)\n\n\n\nclass AttackMean:\n \"\"\"抗均值差攻击\n \"\"\"\n\n def __init__(self):\n \"\"\"result存放攻击结果数据。\n \"\"\"\n self._mat_mean = []\n self._result = []\n\n def do_attack(self, obj_module, plain, power, bits_key):\n \"\"\"对目标object抗攻击能力进行评估。\n Keyword Arguments:\n obj_module --\n data --\n power --\n \"\"\"\n\n self._mat_mean = np.array([self._mean_onekey(obj_module,\n power,\n plain,\n key,\n bits_key)\n for key in range(2**bits_key)])\n\n df_mat_mean = pd.DataFrame(self._mat_mean,\n index=(range(2**bits_key)),\n columns=range(bits_key))\n\n \"\"\"每个位对应的前五个最大值的密钥,共40个(8位,每位5个)。\n \"\"\"\n result_top5 = []\n for i in range(bits_key):\n result_top5.extend(list(df_mat_mean.sort(i, ascending=False).index[0:5]))\n\n \"\"\"将密钥由numpy.int64变为字符型,因为json不识别numpy.int64\n \"\"\"\n result_top5 = [str(i) for i in result_top5]\n\n \"\"\"统计密钥出现的次数\n \"\"\"\n result = {}\n for i in result_top5:\n result[i] = result.get(i, 0) + 1\n\n \"\"\"按密钥出现的次数进行排序\n \"\"\"\n self._result = sorted(result.items(), key=lambda d: d[1], reverse=True)\n\n # abs_mat_mean = abs(self._mat_mean)\n # self._result = np.array([np.where(i == i.max())[0] for i in self._mat_mean.transpose()]).reshape(bits_key)\n # self._result = np.array([np.where(i == i.max())[0] for i in df_mat_mean])\n\n\n def plot_result(self, bits_key):\n \"\"\"输入key的数目,如8位密钥为256\n \"\"\"\n plt.plot(range(2**bits_key), self._mat_mean[:, 2])\n\n @staticmethod\n def _mean_onekey(obj, power_data, plain_data, key, bits_key):\n \"\"\"对key实施一轮攻击\n Keyword Arguments:\n obj_module -- 攻击目标\n plain -- 明文,为Data的实例\n key -- 某一个key,0-255\n \"\"\"\n\n \"\"\"明文对应某一密钥的输出密文的汉明距离\n \"\"\"\n cipher_text = [np.binary_repr(obj.gen_cipher(plain_text, key), bits_key)\n for plain_text in plain_data]\n\n df_data = pd.DataFrame(power_data, index=cipher_text)\n\n all_cipher = [np.binary_repr(i, bits_key) for i in range(2**bits_key)]\n\n\n result = []\n for i in range(7, -1, -1):\n df_data_1 = df_data.loc[[x for x in all_cipher if x[i] == '1']]\n df_data_0 = df_data.loc[[x for x in all_cipher if x[i] == '0']]\n df_data_diff = np.array(df_data_1.mean() - df_data_0.mean())\n result.append(abs(df_data_diff).max())\n\n return np.array(result)\n\n\nif __name__ == '__main__':\n\n\n PLAIN = Data()\n PLAIN.read_dir('./data_repo/plain/', int, 'c', 1)\n\n POWER = Data()\n POWER.read_dir('./data_repo/power/', float, 'c', 400)\n\n OBJECTIVE = ObjISbox()\n\n ATTACKCORR = AttackCorr()\n ATTACKCORR.do_attack(OBJECTIVE, PLAIN.data[:1000], POWER.data[:1000, :], 8)\n EVALUATION = EvaluationCorr()\n EVALUATION.do_evaluation(ATTACKCORR._mat_corr, 198)\n ATTACKCORR.plot_result(8)\n\n\n ATTACKMEAN = AttackMean()\n ATTACKMEAN.do_attack(OBJECTIVE, PLAIN.data[:2000], POWER.data[:2000, :], 8)\n ATTACKMEAN.plot_result(8)\n\n plt.show()\n","sub_path":"epar/epar.py","file_name":"epar.py","file_ext":"py","file_size_in_byte":9406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"324889185","text":"# -*- coding: utf-8 -*-\nfrom tipfy import Rule\n\ndef get_rules(app):\n \"\"\"Returns a list of URL rules for the admin application.\n\n :param app:\n The WSGI application instance.\n :return:\n A list of class:`tipfy.Rule` instances.\n \"\"\"\n\n rules = [\n Rule('/', handler='apps.example_app.handlers.ExampleHandler'),\n ]\n\n return rules\n","sub_path":"apps/example_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"528280273","text":"# https://www.youtube.com/watch?v=OMDn66kM9Qc\nimport torch\nfrom torch import nn\nfrom torch import optim\nfrom torch.utils import data\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.dataset import random_split\nfrom torchvision import datasets, transforms\n\n# define the model\nmodel = nn.Sequential(\n nn.Linear(28 * 28, 64),\n nn.ReLU(),\n nn.Linear(64, 64),\n nn.ReLU(),\n nn.Linear(64, 10)\n).cuda()\n\n# define the optimizer\nparams = model.parameters() # under-the-hood\noptimizer = optim.SGD(params, lr=1e-2)\n\n# define the loss\nloss = nn.CrossEntropyLoss()\n\n# data, train and val split\ntrain_data = datasets.MNIST('data', train=True, download=True, transform=transforms.ToTensor())\ntrain, val = random_split(train_data, [55000, 5000])\ntrain_loader = DataLoader(train, batch_size=32)\nval_loader = DataLoader(val, batch_size=32)\n\n# training loop\nnb_epochs = 5\nfor epoch in range(nb_epochs):\n\n losses = [] # for logging\n\n for batch in train_loader:\n x, y = batch\n\n # x: b x 1 x 28 x 28\n b = x.size(0)\n x = x.view(b, -1).cuda()\n\n ### 5 steps for supervised learning ###\n # under-the-hood: gives the underlying idea, code will not work as is\n \n # 1 forward\n l = model(x) # logit\n \n # 2 compute the objective function\n J = loss(l, y.cuda())\n \n # 3 cleaning the gradient\n model.zero_grad()\n # under-the-hood:\n # params.grad._zero()\n \n # 4 accumulate the partial derivatives of J wrt params\n J.backward()\n # under-the-hood:\n # params.grad.add_(dJ/dparams)\n \n # 5 step in opposite direction of the gradient\n optimizer.step()\n # under-the-hood\n # with torch.no_grad(): params = params - eta * params.grad\n \n losses.append(J.item())\n\n print(f'Epoch {epoch+1}, training loss: {torch.tensor(losses).mean():.2f}')\n \n\n losses = [] # for logging\n\n for batch in val_loader:\n x, y = batch\n\n # x: b x 1 x 28 x 28\n b = x.size(0)\n x = x.view(b, -1).cuda()\n\n ### 5 steps for supervised learning ###\n # under-the-hood: gives the underlying idea, code will not work as is\n \n # 1 forward\n with torch.no_grad():\n l = model(x) # logit\n \n # 2 compute the objective function\n J = loss(l, y.cuda())\n \n losses.append(J.item())\n\n print(f'Epoch {epoch+1}, validation loss: {torch.tensor(losses).mean():.2f}')","sub_path":"mnist-classifier-pytorch-basic-gpu.py","file_name":"mnist-classifier-pytorch-basic-gpu.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"145210269","text":"from numpy import *\r\nimport operator\r\n\r\ndef createDataSet():\r\n group = array([[1.0, 1.1],[1.0,1.0],[0,0],[0,0.1]])\r\n labels = ['A', 'A', 'B', 'B']\r\n return group, labels\r\n\r\n\r\ndef classify0(inX, dataSet, labels, k):\r\n dataSetSize = dataSet.shape[0] # the row of dataSet(also means how many date of dataSet)\r\n # Distance calculation\r\n diffMat = tile(inX, (dataSetSize, 1)) # create a new mat have the same row number of dataSet and data is inX\r\n diffMat = diffMat - dataSet\r\n sqDiffMat = diffMat**2\r\n sqDistances = sqDiffMat.sum(axis=1) # row add\r\n distances = sqDistances**0.5\r\n # Sort Distances\r\n sortedDistIndicies = distances.argsort()\r\n # Taking a majority vote\r\n classCount = {}\r\n for i in range(k):\r\n voteIlabel = labels[sortedDistIndicies[i]]\r\n classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1\r\n sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)\r\n \r\n # The first metten in sortedClassCount is classified data label\r\n return sortedClassCount[0][0]","sub_path":"2_k-Nearest Neighbors/kNN.py","file_name":"kNN.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"607894115","text":"import subprocess\nimport sys\nfrom datetime import datetime\n\n\ndef log(*args):\n sys.stdout.write(*args)\n sys.stdout.flush()\n with open(\"timeCmd.log\", \"a+\") as f:\n f.write(*args)\n\n\nif __name__ == \"__main__\":\n quotes = lambda text: text if ' ' not in text else '\"{}\"'.format(text)\n arguments = sys.argv\n cmd = \"\"\n for x in range(1, len(arguments)):\n cmd += quotes(arguments[x]) + \" \"\n log(\"\\nExecuting Command : {}\\n\".format(cmd))\n startTime = datetime.now()\n popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n for stdout_line in iter(popen.stdout.readline, \"\"):\n log(\"\\t\" + stdout_line)\n popen.stdout.close()\n return_code = popen.wait()\n endTime = datetime.now()\n log(\"\\nTook {} seconds to execute\".format((endTime - startTime).total_seconds()))\n","sub_path":"timeCmd/timeCmd.py","file_name":"timeCmd.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"218656986","text":"import json\r\nfrom channels.generic.websocket import AsyncWebsocketConsumer\r\nfrom channels.db import database_sync_to_async\r\nfrom .models import Chat_room, Message\r\nfrom account.models import MyUser\r\n\r\nclass ChatConsumerA(AsyncWebsocketConsumer):\r\n async def connect(self):\r\n # print(self.scope['url_route']['kwargs']['room'])\r\n self.user = self.scope['user']\r\n self.room_name = self.scope['url_route']['kwargs']['room']\r\n self.room_group_name = 'chat_%s' % self.room_name\r\n print(self.room_group_name)\r\n # print(self.room_group_name) \r\n # Join room group\r\n await self.channel_layer.group_add(\r\n self.room_group_name,\r\n self.channel_name\r\n )\r\n\r\n await self.accept()\r\n\r\n async def receive(self,text_data):\r\n text_data_json = json.loads(text_data)\r\n message = text_data_json['message']\r\n sender = text_data_json['sender']\r\n # print(self)\r\n # call fx to save message to db\r\n await self.create_message(message,sender)\r\n \r\n # Send message to room group \r\n await self.channel_layer.group_send(\r\n self.room_group_name,\r\n {'type':'chat_message','message': message,'sender':sender}\r\n )\r\n \r\n async def chat_message(self,event):\r\n # print(event)\r\n message = event['message']\r\n sender = event['sender']\r\n # # Send message to WebSocket\r\n await self.send(text_data=json.dumps({\r\n 'room': self.room_name,\r\n 'message': message,\r\n 'sender':sender\r\n })) \r\n \r\n async def disconnect(self, close_code):\r\n # Leave room group\r\n await self.channel_layer.group_discard(\r\n self.room_group_name,\r\n self.channel_name\r\n )\r\n\r\n @database_sync_to_async\r\n def create_message(self,message,sender):\r\n room = Chat_room.objects.get(id=self.room_name)\r\n sender = MyUser.objects.get(username=sender)\r\n return Message.objects.create(room=room, sender=sender, content=message)\r\n\r\nclass ChatConsumerB(AsyncWebsocketConsumer):\r\n async def connect(self):\r\n await self.accept()\r\n await self.channel_layer.group_add('gossip',self.channel_name)\r\n # print(f' Added to gossip')\r\n \r\n async def disconnect(self,close_code):\r\n await self.channel_layer.group_discard('gossip',self.channel_name)\r\n # print(f' Removed from gossip')\r\n\r\n async def user_gossip(self, event):\r\n data = json.dumps(event)\r\n await self.send(data)\r\n # print(f' Got message on gossip')\r\n","sub_path":"chat/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"541773517","text":"# -*- coding: utf-8 -*-\n\n'''\nCreated on 2017-11-05 16:40:35\n一等函数.\n@author: zhoujiagen\n'''\nfrom operator import add # 操作符模块\nimport random\nimport unittest\nimport functools\nfrom inspect import signature\n\ndef factorial(number):\n \"\"\"计算阶乘.\n\n Args:\n number: 数.\n\n Returns:\n number!.\n\n Raises:\n None\n \"\"\"\n if number < 2:\n return 1\n return number * factorial(number - 1)\n\n\ndef reverse(word):\n \"\"\"单词的反向拼写.\n\n Args:\n word: 单词.\n\n Returns:\n 单词的反向拼写.\n\n Raises:\n None\n \"\"\"\n return word[::-1]\n\n\ndef tag(name, *content, cls=None, **attrs):\n \"\"\"生成一个或多个HTML标签.\n\n 作为函数形参和实参的示例.\n\n Args:\n name: 标签的名称.\n *content: 标签的内容.\n cls: 标签的属性类.\n **attrs: 标签的属性键值.\n\n Returns:\n HTML便签片段.\n\n Raises:\n None\n \"\"\"\n if cls is not None:\n attrs['class'] = cls\n if attrs:\n attr_str = ''.join(' %s=\"%s\"' % (attr, value)\n for attr, value in sorted(attrs.items()))\n else:\n attr_str = ''\n\n if content:\n return '\\n'.join('<%s%s>%s' % (name, attr_str, c, name)\n for c in content)\n return '<%s%s />' % (name, attr_str)\n\n\ndef clip_with_annotation(text:str, max_len:'int > 0'=80) -> str:\n \"\"\"带注解版本的clip(text, max_len)\"\"\"\n return clip(text, max_len)\n\n\ndef clip(text, max_len=80):\n \"\"\"在max_len前面或者后面的第一个空格处截断文本.\n\n 用于演示函数内省.\n\n Args:\n text: 文本.\n max_len: 文本截断的参考最大长度.\n\n Returns:\n 截断后的文本.\n\n Raises:\n None\n \"\"\"\n end = None\n if(len(text) > max_len):\n space_before = text.rfind(' ', 0, max_len)\n if space_before >= 0:\n end = space_before\n else:\n space_after = text.rfind(' ', max_len)\n if space_after >= 0:\n end = space_after\n\n if end is None:\n end = len(text)\n\n return text[:end].rstrip()\n\n\nclass BingoCage(object):\n \"\"\"自定义可调用类型实例.\n\n 使用可迭代对象创建, 内部存储随机排列的列表.\n 调用实例时取出一个元素.\n\n Attributes:\n likes_spam: A boolean indicating if we like SPAM or not.\n eggs: An integer count of the eggs we have laid.\n \"\"\"\n\n def __init__(self, items):\n \"\"\"使用可迭代对象创建, 内部存储随机排列的列表.\"\"\"\n self._items = list(items)\n random.shuffle(self._items) # 随机排列\n\n def pick(self):\n \"\"\"取出一个元素\"\"\"\n try:\n return self._items.pop()\n except IndexError:\n raise LookupError('pick form empty BingoCage')\n\n def __call__(self):\n return self.pick()\n\n\nclass TestFunctionAsFirstLevelObject(unittest.TestCase):\n \"\"\"函数作为一等对象的Spike单元测试.\n\n Attributes:\n None\n \"\"\"\n def test_function_property(self):\n \"\"\"函数对象的属性\"\"\"\n print(type(factorial))\n print(factorial.__doc__)\n\n self.assertEqual(1, 1)\n\n class DummyClass(object):\n \"\"\"演示用类\"\"\"\n pass\n obj = DummyClass()\n def func():\n \"\"\"演示用函数\"\"\"\n pass\n print(sorted(set(dir(func)) - set(dir(obj))))\n\n def test_function_alias(self):\n \"\"\"函数对象别名\"\"\"\n fact = factorial\n print(fact)\n print(fact(5))\n self.assertEqual(120, fact(5))\n\n def test_function_as_parameter(self):\n \"\"\"函数对象作为参数\"\"\"\n self.assertEqual([1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880],\n map(factorial, range(10)))\n\n def test_functional_operator(self):\n \"\"\"map, filter, reduce的替代品\"\"\"\n # 使用列表推导\n self.assertEqual(list(map(factorial, range(6))),\n [factorial(n) for n in range(6)])\n # self.assertEqual(list(map(factorial,\n # filter(lambda n: n % 2, range(6)))),\n # [factorial(n) for n in range(6) if n % 2])\n # 使用内建函数\n self.assertEqual(functools.reduce(add, range(100)), sum(range(100)))\n\n def test_reverse_word(self):\n \"\"\"测试单词的反向拼写\"\"\"\n word = 'testing'\n self.assertEqual('gnitset', reverse(word))\n\n def test_highorder_function(self):\n \"\"\"高阶函数\"\"\"\n fruits = ['strawberry', 'fig', 'apple', 'cherry', 'raspberry',\n 'banana']\n # 使用BIF len()\n self.assertEqual(['fig', 'apple', 'cherry', 'banana', 'raspberry',\n 'strawberry'], sorted(fruits, key=len))\n self.assertEqual(['banana', 'apple', 'fig', 'raspberry', 'strawberry',\n 'cherry'], sorted(fruits, key=reverse))\n\n def test_lambda(self):\n \"\"\"匿名函数\"\"\"\n fruits = ['strawberry', 'fig', 'apple', 'cherry', 'raspberry',\n 'banana']\n self.assertEqual(['banana', 'apple', 'fig', 'raspberry', 'strawberry',\n 'cherry'],\n sorted(fruits, key=lambda word: word[::-1]))\n\n def test_callable(self):\n \"\"\"可调用对象\"\"\"\n bingo = BingoCage(range(3))\n self.assertTrue(bingo.pick() in range(3))\n self.assertTrue(bingo.pick() in range(3))\n self.assertTrue(callable(bingo)) # 判断是否是可调用对象\n\n def test_function_parameters(self):\n \"\"\"函数的形参和实参\"\"\"\n print(tag('br')) # 单个定位参数\n print(tag('p', 'hello')) # 第一个参数后的参数被*content捕获, 存入元组\n print(tag('p', 'hello', 'world'))\n print(tag('p', 'hello', id=33)) # 未在签名中指定的关键字参数被**attrs捕获, 存入字典\n print(tag('p', 'hello', 'world', cls='siderbar')) # cls作为关键字参数传入\n print(tag(content='testing', name='img')) # 位置参数也可作为关键字参数传入\n # 实参字典中所有元素作为单个参数传入\n # 同名键绑定到具名参数上, 余下的被**attrs捕获\n tags = {'name': 'img', 'title': 'Sunset Boulevard',\n 'src': 'sunset.jpg', 'cls': 'framed'}\n print(tag(**tags))\n\n def f(a, *, b):\n \"\"\"仅限关键字参数\"\"\"\n return a, b\n print(f(1, b=2))\n # print(f(1, 2, b=2)) # ERROR\n # print(f(b=2, 1)) # ERROR\n\n self.assertTrue(1 == 1)\n\n def test_function_introspection(self):\n \"\"\"函数内省\"\"\"\n # 函数的位置参数和关键字参数的默认值\n print(clip.__defaults__)\n # 仅限关键字参数的默认值\n print(clip.__kwdefaults__)\n # 函数的代码对象属性\n print(clip.__code__)\n # 函数的局部变量名称和数量\n print(clip.__code__.co_varnames)\n print(clip.__code__.co_argcount)\n\n # 提取函数的签名\n tag_sig = signature(tag)\n print(tag_sig)\n for name, param in tag_sig.parameters.items():\n print(param.kind, ':', name, '=', param.default)\n # 绑定实参\n tags = {'name': 'img', 'title': 'Sunset Boulevard',\n 'src': 'sunset.jpg', 'cls': 'framed'}\n bound_args = tag_sig.bind(**tags)\n print(bound_args)\n for name, value in bound_args.arguments.items():\n print(name, '=', value)\n\n self.assertTrue(1 == 1)\n\n def test_function_annotation(self):\n \"\"\"函数注解\"\"\"\n print(clip_with_annotation.__annotations__)\n\n clip_sig = signature(clip_with_annotation)\n for param in clip_sig.parameters.values():\n note = repr(param.annotation).ljust(13)\n print(note, ':', param.name, '=', param.default)\n\n self.assertTrue(1 == 1)\n","sub_path":"python/src3/com/spike/functional/function_object_v3.py","file_name":"function_object_v3.py","file_ext":"py","file_size_in_byte":8044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"210169290","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 13 21:32:08 2018\n\n@author: zkapach\n\"\"\"\nimport matplotlib\nmatplotlib.use(\"Agg\")\n\n\nimport _init_paths\nfrom core.train import get_training_roidb\nfrom core.config import cfg, cfg_from_file, cfg_from_list, get_output_dir, loadDatasetIndexDict,iconicImagesFileFormat\nfrom datasets.factory import get_repo_imdb\nfrom datasets.ds_utils import load_mixture_set,print_each_size,computeTotalAnnosFromAnnoCount,cropImageToAnnoRegion,roidbSampleHOG,roidbSampleImage\nimport os.path as osp\nimport datasets.imdb\nimport argparse\nimport pprint\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys,os,cv2,pickle,uuid\n# pytorch imports\nfrom datasets.pytorch_roidb_loader import RoidbDataset\nfrom numpy import transpose as npt\nfrom ntd.hog_svm import plot_confusion_matrix, extract_pyroidb_features,appendHOGtoRoidb,split_data, scale_data,train_SVM,findMaxRegions, make_confusion_matrix\nfrom utils.misc import *\n\ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Test loading a mixture dataset')\n parser.add_argument('--cfg', dest='cfg_file',\n help='optional config file',\n default=None, type=str)\n parser.add_argument('--setID', dest='setID',\n help='which 8 digit ID to read from',\n default=['11111111'],nargs='*',type=str)\n parser.add_argument('--repeat', dest='repeat',\n help='which repeat to read from',\n default=[1],nargs='*',type=int)\n parser.add_argument('--size', dest='size',\n help='which size to read from',\n default=[250],nargs='*',type=int)\n parser.add_argument('--save', dest='save',\n help='save some samples with bboxes visualized?',\n action='store_true')\n parser.add_argument('--rand', dest='randomize',\n help='randomize (do not use a fixed seed)',\n action='store_true')\n parser.add_argument('--modelRaw', dest='modelRaw',\n help='give the path to a fit model',\n default=None, type=str)\n parser.add_argument('--modelCropped', dest='modelCropped',\n help='give the path to a fit model',\n default=None, type=str)\n\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n print('Called with args:')\n print(args)\n if args.cfg_file is not None:\n cfg_from_file(args.cfg_file)\n if not args.randomize:\n np.random.seed(cfg.RNG_SEED)\n print('Using config:')\n pprint.pprint(cfg)\n\n ntdGameInfo = {}\n ntdGameInfo['trainSize'] = 5\n ntdGameInfo['testSize'] = 5\n\n cfg.DEBUG = False\n cfg.uuid = str(uuid.uuid4())\n\n setID_l = args.setID\n repeat_l = args.repeat\n size_l = args.size\n assert len(setID_l) == 1 and len(size_l) == 1,\\\n \"code only works for one size and size currently\"\n\n numEl = len(setID_l) * len(repeat_l) * len(size_l)\n rawMats = []\n croppedMats = []\n diffMats = []\n\n for setID in setID_l:\n for repeat in repeat_l:\n for size in size_l:\n ntdGameInfo['setID'] = setID\n ntdGameInfo['size'] = size\n ntdGameInfo['repeat'] = 222 # repeat\n\n \"\"\"\n convMat_fn = \"output/ntd/confMats_{}_{}_{}.pkl\".format(setID,repeat,size)\n convMat_fn = \"output/ntd/confMats_11111111_0_1000_4b142eec-0ae2-4ed5-9435-4440a8228b63.pkl\"\n convMat = pickle.load(open(convMat_fn,\"rb\"))\n cmRaw = convMat['raw']\n cmCropped = convMat['cropped']\n \"\"\"\n cmRaw = np.array([31,19,10,1,21,13,5,0,18,21,21,2,12,20,4,1,23,12,23,1,17,18,5,1,0,0,0,100,0,0,0,0,11,9,8,0,61,7,2,1,15,15,16,1,13,33,4,4,2,0,1,0,3,1,92,0,12,6,6,2,3,4,3,65]).reshape(8,8)\n cmCropped = np.array([20,14,18,9,8,12,8,11,12,20,20,8,9,14,7,11,16,19,19,8,10,13,7,9,8,8,8,49,6,6,8,8,8,10,10,4,42,12,5,8,19,16,15,7,8,17,7,11,12,8,11,18,9,7,26,9,11,13,12,9,14,11,9,21]).reshape(8,8)\n cmDiff = cmRaw - cmCropped\n plotNtdConfMats(cmRaw,cmCropped,cmDiff,ntdGameInfo)\n sys.exit()\n\n rawMats.append(cmRaw)\n croppedMats.append(cmCropped)\n diffMats.append(cmDiff)\n\n ntdGameInfo['ave'] = len(rawMats)\n ntdGameInfo['std'] = len(rawMats)\n\n rawMatsAve = np.array(rawMats).mean(axis=0)\n print(rawMatsAve.shape)\n croppedMatsAve = np.array(croppedMats).mean(axis=0)\n diffMatsAve = np.array(diffMats).mean(axis=0)\n\n plotNtdConfMats(rawMatsAve,croppedMatsAve,diffMatsAve,ntdGameInfo,\"ave\")\n\n rawMatsStd = np.array(rawMats).std(axis=0)\n croppedMatsStd = np.array(croppedMats).std(axis=0)\n diffMatsStd = np.array(diffMats).std(axis=0)\n\n plotNtdConfMats(rawMatsStd,croppedMatsStd,diffMatsStd,ntdGameInfo,\"std\")\n\n print(rawMatsAve.shape)\n print(\"\\n\\n -=-=-=- uuid: {} -=-=-=- \\n\\n\".format(cfg.uuid))\n\n \n\n\n\n\n","sub_path":"tools/prettyConfMat.py","file_name":"prettyConfMat.py","file_ext":"py","file_size_in_byte":5256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"509053017","text":"# 1.\nclass Laptop:\n \"\"\"\n Make the class with composition.\n \"\"\"\n def __init__(self, laptop_manufacturer, hours_of_work):\n self.laptop_manufacturer = laptop_manufacturer\n self.battery = Battery(hours_of_work)\n\n def __str__(self):\n return f'{self.battery}'\n\nclass Battery:\n \"\"\"\n Make the class with composition.\n \"\"\"\n def __init__(self, hours_of_work):\n self.hours_of_work = hours_of_work\n\n def __str__(self):\n return f'{self.hours_of_work}'\n\n\nlaptop_1 = Laptop(\"Apple\", 7)\n\nprint(f'The laptop manufacturer is {laptop_1.laptop_manufacturer}, '\n f'and battery working hours are', laptop_1)\n\n\n# 2.\nclass Guitar:\n \"\"\"\n Make the class with aggregation\n \"\"\"\n def __init__(self, name, string):\n self.name = name\n self.string = string\n\n\nclass GuitarString:\n \"\"\"\n Make the class with aggregation\n \"\"\"\n def __init__(self, string_length):\n self.string_length = string_length\n \n def __str__(self):\n return f'{self.string_length}'\n\n\nstring = GuitarString(4)\nguitar = Guitar(\"Lennon\", string)\n\nprint(f'{guitar.name}, {guitar.string}')\n\n\n# 3\nclass Calc:\n \"\"\"\n Make class with one method \"add_nums\" with 3 parameters,\n which returns sum of these parameters.\n Note: this method should not take instance as first parameter.\n \"\"\"\n @staticmethod\n def add_nums(x, y, z):\n return x+y+z\n\n\nprint(Calc.add_nums(5, 3, 8))\n\n\n# 4\nclass Pasta:\n \"\"\"\n Make class which takes 1 parameter on init - list of ingredients\n and defines instance attribute ingredients.\n It should have 2 methods:\n carbonara (['forcemeat', 'tomatoes']) and bolognaise (['bacon', 'parmesan', 'eggs'])\n which should create Pasta instances with predefined list of ingredients.\n Example:\n pasta_1 = Pasta([\"tomato\", \"cucumber\"])\n pasta_1.ingredients will equal to [\"tomato\", \"cucumber\"]\n pasta_2 = Pasta.bolognaise()\n pasta_2.ingredients will equal to ['bacon', 'parmesan', 'eggs']\n \"\"\"\n\n CARBONARA = ['forcemeat', 'tomatoes']\n BOLOGNAISE = ['bacon', 'parmesan', 'eggs']\n\n def __init__(self, list_of_ingredients):\n self.list_of_ingredients = list_of_ingredients\n\n\n @classmethod\n def carbonara(cls):\n return Pasta(cls.CARBONARA)\n\n @classmethod\n def bolognaise(cls):\n return Pasta(cls.BOLOGNAISE)\n\n\npasta_1 = Pasta([\"tomato\", \"cucumber\"])\npasta_2 = Pasta.bolognaise()\npasta_3 = Pasta.carbonara()\n\nprint(pasta_1.list_of_ingredients)\nprint(pasta_2.list_of_ingredients)\nprint(pasta_3.list_of_ingredients)\n\n\n# 5*.\nclass Concert:\n \"\"\"\n Make class, which has max_visitors_num attribute and\n its instances will have visitors_count attribute.\n In case of setting visitors_count - max_visitors_num should be checked,\n if visitors_count value is bigger than max_visitors_num -\n visitors_count should be assigned with max_visitors_num.\n Example:\n Concert.max_visitor_num = 50\n concert = Concert()\n concert.visitors_count = 1000\n print(concert.visitors_count) # 50\n \"\"\"\n\n max_visitors_num = 22\n\n def __init__(self, visitors_count=0):\n self.visitors_count = visitors_count\n\n @property\n def visitors_count(self):\n return self._visitors_count\n\n @visitors_count.setter\n def visitors_count(self, x):\n if x < self.max_visitors_num:\n self._visitors_count = x\n else:\n self._visitors_count = self.max_visitors_num\n\n\nConcert.max_visitors_num = 50\nconcert = Concert(50)\nconcert.visitors_count = 1000\nprint(concert.visitors_count)\n\n\n#6.\nimport dataclasses\n\n\n@dataclasses.dataclass\nclass AddressBookDataClass:\n \"\"\"\n Create dataclass with 7 fields - key (int), name (str),\n phone_number (str), address (str), email (str), birthday (str), age (int)\n \"\"\"\n\n key: int\n name: str\n phone_number: str\n address: str\n email: str\n birthday: str\n age: int\n\n\ncontact_1 = AddressBookDataClass(1, 'Kiki', '8758181', 'City',\n 'jahjsHJAH@gmail.com', '11.09.1919', 101)\nprint(contact_1.address)\n\n\n# 7. Create the same class (6) but using NamedTuple\nimport collections\n\n\nAddressBookDataClass_1 = collections.namedtuple('AddressBookDataClass_1',\n ['key', 'name', 'phone_number', 'address',\n 'email', 'birthday', 'age'])\n\ncontact_1 = AddressBookDataClass_1(1, 'Kiki', '8758181', 'City',\n 'jahjsHJAH@gmail.com', '11.09.1919', 101)\n\nprint(contact_1[2])\n\n\n# 8.\nclass AddressBook:\n \"\"\"\n Create regular class taking 7 params on init - key, name, phone_number, address,\n email, birthday, age\n Make its str() representation the same as for AddressBookDataClass defined above.\n \"\"\"\n def __init__(self, key, name, phone_number, address, email, birthday, age):\n self.key = key\n self.name = name\n self.phone_number = phone_number\n self.address = address\n self.email = email\n self.birthday = birthday\n self.age = age\n\n def __str__(self):\n return f\"{__class__.__name__}(key={self.key}, name={self.name}, phone_number={self.phone_number}, \" \\\n f\"address={self.address}, email={self.email}, birthday={self.birthday}, age={self.age})\"\n\n\ncontact_2 = AddressBook(key=1, name='Pedro', phone_number='1717171', address='Tokyo',\n email='jahsgd@gmail.com', birthday='17.08.2004', age=16)\nprint(contact_2.name)\n\n\n# 9.\nclass Person:\n \"\"\"\n Change the value of the age property of the person object\n \"\"\"\n name = \"John\"\n age = 36\n country = \"USA\"\n\n\nperson_1 = Person()\n\nsetattr(person_1, 'age', '77')\n\nprint(f'{person_1.name} is {person_1.age}.')\n\n\n# 10.\nclass Student:\n \"\"\"\n Add an 'email' attribute of the object student and set its value\n Assign the new attribute to 'student_email' variable and print it by using getattr\n \"\"\"\n id = 0\n name = \"\"\n\n def __init__(self, id, name):\n self.id = id\n self.name = name\n\n\nstudent_1 = Student(1, \"Kiki\")\nsetattr(student_1, \"email\", \"student@gmail.com\")\nstudent_email = student_1.email\nprint(getattr(student_1, \"email\"))\nprint(student_email)\n\n\n#11*.\nclass Celsius:\n \"\"\"\n By using @property convert the celsius to fahrenheit\n Hint: (temperature * 1.8) + 32)\n \"\"\"\n def __init__(self, temperature=0):\n self._temperature = temperature\n\n @property\n def convert(self):\n return (self._temperature * 1.8) + 32\n\n\ncels_fahr = Celsius(5)\n\nprint(cels_fahr.convert)\n","sub_path":"HW_5.py","file_name":"HW_5.py","file_ext":"py","file_size_in_byte":6625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"264889557","text":"from math import log, floor\nimport time\n\ndef h_sort(arr):\n if not arr: return None\n\n n = len(arr)\n heap_height = floor(log(n, 2)) + 1\n start_ind = 2 ** (heap_height - 1) - 2\n\n for ind in range(start_ind, -1, -1):\n heapify(arr, ind, n)\n\n for i in range(n):\n swap(arr, 0, n - 1)\n n -= 1\n heapify(arr, 0, n)\n\n return arr\n\ndef heapify(arr, index, n):\n largest = index\n lc = get_child(index, n, 1)\n rc = get_child(index, n, 2)\n\n if lc != None and arr[lc] > arr[largest]:\n largest = lc\n if rc != None and arr[rc] > arr[largest]:\n largest = rc\n\n if largest != index:\n swap(arr, index, largest)\n heapify(arr, largest, n)\n\ndef get_parent(index):\n return (index - 1) // 2 if (index - 1) // 2 >= 0 else None\n\ndef get_child(index, n, val):\n \"\"\"left child: val = 1, right child: val = 2\"\"\"\n ind = (index) * 2 + val\n return ind if ind < n else None\n\ndef swap(arr, i, j):\n temp = arr[i]\n arr[i] = arr[j]\n arr[j] = temp\n","sub_path":"Algorithms/Sorting/Heap_Sort/HSort_2.py","file_name":"HSort_2.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"448789692","text":"from flask import Flask, request\nimport os, requests, json\nimport ChatBot\n\napp = Flask(__name__)\nACCESS_TOKEN = os.environ['ACCESS_TOKEN']\nVERIFY_TOKEN = os.environ['VERIFY_TOKEN']\n\n@app.route('/', methods = [\"GET\", \"POST\"])\ndef ReciveMessage():\n\tif request.method == 'GET':\n\t\ttoken_sent = request.args.get(\"hub.verify_token\")\n\t\treturn VerifyFBToken(token_sent)\n\telse:\n\t\tdata = request.get_json()\n\t\tentry = data[\"entry\"][-1]\n\t\tmessaging_event = entry[\"messaging\"][-1]\n\n\t\tif messaging_event.get(\"message\"):\n\t\t\tsender_id = messaging_event[\"sender\"][\"id\"]\n\t\t\trecipient_id = messaging_event[\"recipient\"][\"id\"]\n\t\t\tmessagetext = messaging_event[\"message\"].get(\"text\")\n\t\t\tmessageattachment = messaging_event['message'].get('attachments')\n\n\t\t\tmessage = \"\"\n\t\t\tlatlong = False\n\t\t\tif messagetext:\n\t\t\t\tmessage = messagetext\n\t\t\telif messageattachment and messageattachment[0][\"type\"] == \"location\":\n\t\t\t\tmessage = [messageattachment[0]['payload']['coordinates'][\"lat\"], messageattachment[0]['payload']['coordinates'][\"long\"]]\n\t\t\t\tlatlong = True\n\n\t\t\tresponses = GetReply(sender_id, message, latlong)\n\t\t\tfor response in responses:\n\t\t\t\tSendMessage(sender_id, response)\n\n\t\t\tquick_replies = {\"text\":\"Send my location\",\"quick_replies\":[{\"content_type\":\"location\"}]}\n\t\t\tr = requests.post(\"https://graph.facebook.com/v2.6/me/messages?access_token=\" + ACCESS_TOKEN,\n\t\t\t\theaders = {\"Content-Type\": \"application/json\"}, \n\t\t\t\tdata = json.dumps({\"recipient\": {\"id\": recipient_id}, \"message\": quick_replies})\n\t\t\t\t)\n\treturn \"Message Processed\"\n\ndef VerifyFBToken(token_sent):\n\tif token_sent == VERIFY_TOKEN:\n\t\treturn request.args.get(\"hub.challenge\")\n\treturn \"Invalid verification token\"\n\ndef GetReply(recipient_id, message, latlong):\n\tCB = ChatBot.Chatbot()\n\tif latlong:\n\t\treturn CB.ProcessLatitudeLongitude(recipient_id, message)\n\treturn CB.ProcessMessage(recipient_id, message)\n\ndef SendMessage(recipient_id, response):\n\tr = requests.post(\"https://graph.facebook.com/v2.6/me/messages?access_token=\" + ACCESS_TOKEN,\n\t\theaders = {\"Content-Type\": \"application/json\"}, \n\t\tdata = json.dumps({\"recipient\": {\"id\": recipient_id}, \"message\": {\"text\": response}})\n\t\t)\n\treturn \"Success\"\n\nif __name__ == \"__main__\":\n\tapp.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"637304972","text":"import extract as ex\nimport numpy as np\nimport sys\n\ncrossword = ex.modify_crossword_edges(sys.argv[1])\nu_variables = ex.extract_variables(crossword)\nwords = ex.extract_domain(sys.argv[2], u_variables)\n\n# a_variables[i] = True if i is assigned\na_variables = np.zeros(len(u_variables), dtype=bool)\nlength = crossword.shape[0]\nwidth = crossword.shape[1]\n\n# True if a solution has been found\ndone = False\n\n# Dictionary holding the modified domains for each unassigned variable\nm_domain = {}\n\ncrossword = crossword.flatten()\n\ndef modify_domain(var):\n \"\"\"\n :param var: variable analyzed in this recursive call\n :type var int\n :return var's new domain, the state of the positions occupied by var in the crossword\n :rtype int,1D-numpy array\n \"\"\"\n\n d = words[u_variables[var].size][var]\n\n # Reduces var's domain\n for i in range(crossword[u_variables[var]].shape[0]):\n if crossword[u_variables[var]][i] != '0' and crossword[u_variables[var]][i] != '*':\n for word in d:\n if list(word)[i]!=crossword[u_variables[var]][i]:\n d = np.delete(d,np.where(d == word))\n\n return d, crossword[u_variables[var]]\n\n\ndef forward_checking(var):\n \"\"\"\n :param var: variable analyzed in this recursive call\n :type var int\n :return: False if any domain is left empty by var's assignation. True otherwise\n \"\"\"\n\n for i in range(len(u_variables)):\n intersect = np.in1d(u_variables[var], u_variables[i])\n\n # Update domain of unassigned variable if intersect\n if np.any(intersect) and not a_variables[i] and var != i:\n\n d = words[u_variables[i].size][var]\n c = crossword[u_variables[i]]\n\n for j in range(c.size):\n if c[j] != '0' and c[j] != '*':\n for word in d:\n if list(word)[j] != c[j]:\n d = np.delete(d, np.where(d == word))\n m_domain[i] = d\n if m_domain[i].size == 0:\n return False\n return True\n\n\ndef assign_next_var(var):\n \"\"\"\n\n :param var: variable analyzed in this recursive call\n :return: the variable to be analyzed in the next recursive call\n :rtype int\n \"\"\"\n\n for i in range(a_variables.size):\n if np.any(np.in1d(u_variables[var], u_variables[i])) and not a_variables[i]:\n next_var = i\n return next_var\n\n for i in range(a_variables.size):\n if not a_variables[i]:\n next_var = i\n return next_var\n\n\ndef Backtracking(var):\n \"\"\"\n\n :param var: variable analyzed in this recursive call\n :rtype int\n :return: Full completed crossword if a solution exists. None otherwise\n :rtype 2D-numpy array\n \"\"\"\n\n global done\n\n # If all variables have been assigned a solution has been found\n if False not in a_variables:\n done = True\n return np.reshape(crossword, (length, width))[1:-1, 1:-1]\n\n domain, pos = modify_domain(var)\n for word in domain:\n crossword[u_variables[var]] = list(word)\n if forward_checking(var):\n a_variables[var] = True\n next_var = assign_next_var(var)\n res = Backtracking(next_var)\n\n if done:\n return res\n\n crossword[u_variables[var]] = pos\n\n a_variables[var] = False\n return None\n\n\nsol = Backtracking(assign_next_var(0))\nnp.savetxt('sol.txt', sol, fmt=\"%s\")","sub_path":"csp/backtracking.py","file_name":"backtracking.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"33139935","text":"# -*- coding: utf-8 -*-\n# file: train.py\n# author: yangheng \n# Copyright (C) 2020. All Rights Reserved.\n\nimport random\n\nimport numpy\nimport torch\nfrom torch.utils.data import DataLoader\nfrom transformers import BertModel, BertTokenizer\n\nfrom modules.models import LCA_BERT, SLIDE_LCF_BERT, LCF_BERT\nfrom modules.models import BERT_BASE, BERT_SPC\nfrom modules.utils.data_utils_for_inferring import Tokenizer4Bert, ABSADataset, parse_experiments\n\n\nclass Instructor:\n def __init__(self, opt):\n self.opt = opt\n # opt.learning_rate = 2e-5\n # Use any type of BERT to initialize your model.\n # The weights of loaded BERT will be covered after loading state_dict\n # self.bert = BertModel.from_pretrained('bert-base-uncased')\n self.bert = BertModel.from_pretrained(opt.pretrained_bert_name)\n self.bert_tokenizer = BertTokenizer.from_pretrained(opt.pretrained_bert_name, do_lower_case=True)\n tokenizer = Tokenizer4Bert(self.bert_tokenizer, opt.max_seq_len)\n self.model = opt.model_class(self.bert, opt).to(opt.device)\n\n self.model.load_state_dict(torch.load(opt.state_dict_path))\n infer_set = ABSADataset(opt.infer_data, tokenizer, opt)\n self.train_data_loader = DataLoader(dataset=infer_set, batch_size=1, shuffle=False)\n\n def _infer(self):\n sentiments = {0: 'Negative', 1: \"Neutral\", 2: 'Positive', -999: ''}\n Correct = {True: 'Correct', False: 'Wrong'}\n with torch.no_grad():\n self.model.eval()\n for _, sample in enumerate(self.train_data_loader):\n print(sample['text_raw'][0])\n\n inputs = [sample[col].to(self.opt.device) for col in self.opt.inputs_cols]\n self.model.eval()\n outputs = self.model(inputs)\n if 'lca' in self.opt.model_name:\n sen_logits, _, _ = outputs\n else:\n sen_logits = outputs\n t_probs = torch.softmax(sen_logits, dim=-1).cpu().numpy()\n sent = int(t_probs.argmax(axis=-1))\n real_sent = int(sample['polarity'])\n aspect = sample['aspect'][0]\n\n print('{} --> {}'.format(aspect, sentiments[sent])) if real_sent == -999 \\\n else print('{} --> {} Real Polarity: {} ({})'.format(aspect, sentiments[sent],\n sentiments[real_sent],\n Correct[sent == real_sent]))\n\n def run(self):\n\n _params = filter(lambda p: p.requires_grad, self.model.parameters())\n return self._infer()\n\n\ndef init_and_infer(opt):\n if opt.seed is not None:\n random.seed(opt.seed)\n numpy.random.seed(opt.seed)\n torch.manual_seed(opt.seed)\n torch.cuda.manual_seed(opt.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n model_classes = {\n 'bert_base': BERT_BASE,\n 'bert_spc': BERT_SPC,\n 'lca_bert': LCA_BERT,\n 'lcf_bert': LCF_BERT,\n 'lcfs_bert': LCF_BERT,\n 'slide_lcf_bert': SLIDE_LCF_BERT,\n 'slide_lcfs_bert': SLIDE_LCF_BERT,\n }\n\n initializers = {\n 'xavier_uniform_': torch.nn.init.xavier_uniform_,\n 'xavier_normal_': torch.nn.init.xavier_normal,\n 'orthogonal_': torch.nn.init.orthogonal_\n }\n\n opt.model_class = model_classes[opt.model_name]\n opt.inputs_cols = ABSADataset.input_colses[opt.model_name]\n opt.initializer = initializers[opt.initializer]\n\n ins = Instructor(opt)\n return ins.run() # _reset_params in every repeat\n\n\nif __name__ == '__main__':\n\n configs = parse_experiments('inferring_config.json')\n\n from modules.utils.Pytorch_GPUManager import GPUManager\n\n GM = GPUManager()\n gpu = GM.auto_choice()\n\n # only take the first config to infer each running\n opt = configs[0]\n opt.device = 'cuda:' + str(gpu)\n # config.device = 'cpu' # Uncomment this line to use CPU\n\n import os\n\n for file in os.listdir():\n if 'state_dict' in file:\n opt.state_dict_path = file\n if 'inferring.dat' in file:\n opt.infer_data = file\n if 'config.json' in file:\n opt.config = file\n if 'embedding' in file:\n opt.embedding = file.split('/')[-1]\n if 'tokenizer' in file:\n opt.tokenizer = file.split('/')[-1]\n\n print('*' * 80)\n print('Warning: Be sure the eval-config, eval-dataset, saved_state_dict, seed are compatible! ')\n print('*' * 80)\n opt.seed = int(opt.state_dict_path.split('seed')[1])\n init_and_infer(opt)\n","sub_path":"batch_inferring/inferring.py","file_name":"inferring.py","file_ext":"py","file_size_in_byte":4725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"488452939","text":"#!/usr/bin/python2.7\n\nimport sys\nimport json\nimport time\nimport aol_api\nfrom data_file import report_book\nfrom mysql.connector import MySQLConnection, Error\nfrom python_dbconfig import read_db_config\n\ntodaytime = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n\ndef connect():\n\n # \"\"\"Gets AOL Data and writes them to a MySQL table\"\"\"\n db = \"mysql_sa\"\n report_type = \"site_additions\"\n p_name = sys.argv[1]\n\n # Connect To DB:\n db_config = read_db_config(db)\n\n try:\n #print('Connecting to database...')\n conn = MySQLConnection(**db_config)\n\n if conn.is_connected():\n #print('Connection established')\n \n cursor = conn.cursor()\n\n sql = \"DROP TABLE IF EXISTS \" + p_name + \"_site_addition\"\n cursor.execute(sql)\n\n sql = \"CREATE TABLE \" + p_name + \"_site_addition (date varchar(50), media varchar(255), ad_revenue decimal(15, 5))\"\n cursor.execute(sql)\n\n # calls get_access_token function and starts script\n logintoken = aol_api.get_access_token(p_name)\n #print(logintoken)\n\n for report in report_book[report_type][p_name]:\n\n print(str(todaytime) + \" Running \" + p_name + \"_site addition with report # \" + str(report))\n result = aol_api.run_existing_report(logintoken, str(report))\n #print(result)\n\n for x in json.loads(result)['data']:\n date = x['row'][0]\n media = x['row'][1]\n ad_revenue = x['row'][2]\n\n list = (date, media, ad_revenue)\n #print(list)\n\n sql = \"\"\"INSERT INTO \"\"\" + p_name + \"\"\"_site_addition VALUES (\"%s\", \"%s\", \"%s\")\"\"\" % (date, media, ad_revenue)\n cursor.execute(sql)\n \n cursor.execute('commit')\n\n else:\n print('Connection failed.')\n \n except Error as error:\n print(error)\n\n finally:\n conn.close()\n #print('Connection closed.')\n\n\nif __name__ == '__main__':\n connect()\n\n","sub_path":"Python/automated_processes/siteAdditions.py","file_name":"siteAdditions.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"3782956","text":"\"\"\"\nAES full key recovery attack utilizing power analysis\n\"\"\"\n\nfrom softAES import AES\nfrom softAES import _bytes_to_string\nfrom softAESr import AESr\n\nimport numpy as np\nfrom hashlib import sha256\nimport copy\n\nBlockSize = 16\n\n\ndef hamming_weight(x):\n \"\"\"\n Calculate Hamming Weight of x\n :param x: x\n :return: hamming weight of x\n \"\"\"\n hw = 0\n while x != 0:\n hw += x & 1\n x = x >> 1\n return hw\n\n\ndef generate_key_options(keys, option_list):\n \"\"\"\n Given lists of possible key segments, composes them into a list of keys\n :param keys: [[]]\n :param option_list: list of options for key segments\n :return: list of keys\n \"\"\"\n if option_list.size == 0:\n return keys\n\n new_keys = []\n for key in keys:\n for byte in option_list[0]:\n new_keys.append(key + [byte])\n return generate_key_options(new_keys, option_list[1:])\n\n\ndef simulate_power_analysis(plaintexts, key, start_round, end_round):\n \"\"\"\n Generates an array the holds hamming distances between inputs and outputs of S-boxes\n :param plaintexts: list of plaintexts\n :param key: encryption key\n :param start_round: first round to gain traces from\n :param end_round: final round to gain traces from\n :return: hamming_distances, where hamming_distances[j][r][i] holds the hamming distance corresponding to trace j,\n the r'th round, and byte i\n \"\"\"\n aes = AESr(key, end_round + 2)\n\n num_traces = len(plaintexts)\n\n num_rounds = end_round - max(start_round, 1) + 1\n hamming_distances = np.empty((num_traces, num_rounds, BlockSize), dtype=np.uint8)\n\n for j in range(num_traces):\n for r in range(max(start_round, 1), end_round + 1):\n stateround = aes.encrypt_r(plaintexts[j], r, start_round)\n for i in range(BlockSize):\n hamming_distances[j][r-1][i] = hamming_weight(stateround[i] ^ AES.S[stateround[i]])\n\n return hamming_distances\n\n\ndef guess_key_hd(plaintexts, hamming_distances, r):\n \"\"\"\n Guesses a round key based on hamming distances gleaned from power analysis\n :param plaintexts: list of plaintexts\n :param hamming_distances: hamming_distances[j][r][i] holds the hamming distance corresponding to trace j,\n the r'th round, and byte i\n :param r: round number\n :return: list of guesses for each key byte\n \"\"\"\n num_traces = len(plaintexts)\n key_list = np.empty(BlockSize, dtype=object)\n\n for i in range(BlockSize):\n guess_list = np.full(2 ** 8, True)\n\n # TODO: check\n # check what are the possible keys for this block\n # A key is possible, only if after that we use it on the plaintext we get the same hamming distance.\n for guess in range(2**8):\n for j, p in enumerate(plaintexts):\n # compute hamming distance yielded by this guess.\n input = (guess ^ p[i])\n output = AES.S[guess ^ p[i]]\n hd = hamming_weight(output ^ input)\n if hamming_distances[j][r][i] != hd:\n guess_list[guess] = False\n break\n\n key_list[i] = [guess for guess in range(2 ** 8) if guess_list[guess]]\n\n return key_list\n\n\ndef guess_k1_hd(plaintexts, hamming_distances, k0):\n \"\"\"\n Guess k1 based on hamming distances gleaned from power analysis\n :param plaintexts: list of plaintexts\n :param hamming_distances: hamming_distances[j][r][i] holds the hamming distance corresponding to trace j,\n the r'th round, and byte i\n :param k0: first round key\n :return: list of guesses for each key byte\n \"\"\"\n num_traces = len(plaintexts)\n\n # TODO: check\n # Calculate the first round of AES on each trace using k0 as a full 128-bit key (utilizing the AES key schedule)\n # Actually get the result of the 2 round as well, by setting a zeroed out 256 bit key with zero suffix.\n\n zeros = bytes.fromhex('00' * 16)\n combined = k0 + zeros\n aes = AESr(combined, 2)\n\n r1 = np.empty(num_traces, dtype=bytearray)\n\n for j, p in enumerate(plaintexts):\n r1[j] = aes.encrypt_r(p, 2, 0)\n\n return guess_key_hd(r1, hamming_distances, 1)\n\n\ndef recover_full_key(key_len, k0_list, k1_list):\n \"\"\"\n Given candidates for first and second round keys, generate a list of candidates for the full round key\n :param key_len: length of key in bits\n :param k0_list: list of candidates for k0\n :param k1_list: list of candidates for k1\n :return: list of candidates for the full key\n \"\"\"\n full_key_list = []\n\n for k0 in k0_list:\n for k1 in k1_list:\n full_key_list += [k0 + k1]\n\n # Remove duplicate keys\n return list(dict.fromkeys(full_key_list))\n\n\ndef power_analysis_attack(key_len, plaintexts, hamming_distances, verbose=False):\n \"\"\"\n Recover the full encryption key using hamming distances between inputs and outputs of sboxes\n :param key_len: length of key in bits\n :param plaintexts: list of plaintexts\n :param hamming_distances: hamming_distances[j][r][i] holds the hamming distance corresponding to trace j, round r,\n and byte i\n :return: list of possible keys\n \"\"\"\n if key_len not in [128, 192, 256]:\n raise Exception(\"unsupported key length\")\n\n key_list = guess_key_hd(plaintexts, hamming_distances, 0)\n\n if verbose:\n print(\"Number of k0 options: \", np.prod([len(x) for x in key_list]))\n\n k0_list = generate_key_options([[]], key_list)\n\n for i in range(len(k0_list)):\n k0_list[i] = _bytes_to_string(k0_list[i])\n\n # Remove duplicate keys\n k0_list = list(dict.fromkeys(k0_list))\n\n if key_len == 128:\n return k0_list\n\n k1_list = []\n for k0 in k0_list:\n key_list = guess_k1_hd(plaintexts, hamming_distances, k0)\n if verbose:\n print(\"Number of k1 options: \", np.prod([len(x) for x in key_list]))\n k1_list += copy.copy(generate_key_options([[]], key_list))\n\n for i in range(len(k1_list)):\n k1_list[i] = _bytes_to_string(k1_list[i])\n\n # Remove duplicate keys\n k1_list = list(dict.fromkeys(k1_list))\n\n return recover_full_key(key_len, k0_list, k1_list)\n\n\ndef check_test_vectors():\n \"\"\"\n Checks and prints test vectors on various functions\n \"\"\"\n num_traces = 10\n plaintext_seed = 0\n\n key = '000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f'\n\n # Generate random plaintexts\n plaintexts = np.empty(num_traces, dtype=bytearray)\n for i in range(num_traces):\n sha = sha256()\n sha.update(bytes([plaintext_seed]) + bytes([i]))\n plaintexts[i] = sha.digest()[:BlockSize]\n\n keyarr = bytes.fromhex(key)\n\n hamming_distances = simulate_power_analysis(plaintexts, keyarr, 0, 2)\n if np.array_equal(hamming_distances[0], np.array([[2, 4, 5, 7, 5, 6, 6, 5, 6, 5, 6, 1, 4, 2, 3, 4], [4, 6, 3, 3, 4, 3, 3, 6, 4, 2, 3, 4, 5, 2, 6, 6]])):\n print(\"simulate_power_analysis: Functional\")\n else:\n print(\"simulate_power_analysis: Not Functional\")\n\n key_list = guess_key_hd(plaintexts, hamming_distances, 0)\n\n k0_list = generate_key_options([[]], key_list)\n\n for i in range(len(k0_list)):\n k0_list[i] = _bytes_to_string(k0_list[i])\n\n # Remove duplicate keys\n k0_list = list(dict.fromkeys(k0_list))\n\n if set(k0_list) == {b'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r\\x0e\\x0f'}:\n print(\"guess_key_hd: Functional\")\n else:\n print(\"guess_key_hd: Not Functional\")\n\n\nif __name__ == \"__main__\":\n num_traces = 10\n plaintext_seed = 0\n\n key = '000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f'\n\n # Generate random plaintexts\n plaintexts = np.empty(num_traces, dtype=bytearray)\n for i in range(num_traces):\n sha = sha256()\n sha.update(bytes([plaintext_seed]) + bytes([i]))\n plaintexts[i] = sha.digest()[:BlockSize]\n\n keyarr = bytes.fromhex(key)\n\n hamming_distances = simulate_power_analysis(plaintexts, keyarr, 0, 2)\n\n keys = power_analysis_attack(len(keyarr) * 8, plaintexts, hamming_distances, True)\n for k in keys:\n print(k.hex())\n","sub_path":"aes_power_analysis.py","file_name":"aes_power_analysis.py","file_ext":"py","file_size_in_byte":8113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"109614697","text":"from django.contrib.auth.hashers import make_password\nfrom datetime import datetime, date\nfrom food_management.constants.constants import (\n DEFAULT_DATE_FORMAT, DEFAULT_TIME_FORMAT, BREAKFAST_START_TIME,\n BREAKFAST_END_TIME, LUNCH_START_TIME, LUNCH_END_TIME,\n DINNER_START_TIME, DINNER_END_TIME\n)\nfrom food_management.models.user import User\nfrom food_management.models.meal import Meal\nfrom food_management.models.user_rating import UserRating\nfrom food_management.models.user_feedback import UserFeedback\nfrom food_management.models.announcements import Announcements\nfrom food_management.models.items import Items\nfrom food_management.models.meal_course import MealCourse\nfrom food_management.dtos.dtos import (\n ItemAndQuantityDto, CustomeMealUpdateDto, ItemAndRatingDto,\n RatingDto, UpdateMealScheduleDto, ItemDetailsDto\n)\nfrom food_management.models.user_meal_status import UserMealStatus\nimport pytest\nfrom freezegun import freeze_time\nfrom django.utils import timezone\nfrom datetime import time\nfrom food_management.interactors.storages.dtos import (\n HomePageDto, MealCourseDto, ItemDto, MealDto, AnnouncementDtos,\n MealCourseCompleteDetailsDto, SetMealPreferenceDto, MealScheduleDto\n)\nfrom food_management.constants.enums import (\n TypeOfMeal, CategoryType, UnitType, CourseType\n)\n\n@pytest.fixture\ndef user_objs():\n\n user_dict = [\n {'username': 'user1', 'password': 'password1'},\n {'username': 'user2', 'password': 'password2'},\n {'username': 'user3', 'password': 'password3'}\n ]\n\n User.objects.bulk_create([\n User(username=user['username'], password=make_password(user['password'])\n ) for user in user_dict])\n\n@pytest.fixture\n@freeze_time('2020-02-12')\ndef meal_objs(item_objs):\n datetime_obj = datetime.now()\n meal_dict = [\n {\n 'meal_type': TypeOfMeal.breakfast.value,\n 'date': datetime_obj,\n 'from_time_string': BREAKFAST_START_TIME,\n 'to_time_string': BREAKFAST_END_TIME\n },\n {\n 'meal_type': TypeOfMeal.lunch.value,\n 'date': datetime_obj,\n 'from_time_string': LUNCH_START_TIME,\n 'to_time_string': LUNCH_END_TIME\n },\n {\n 'meal_type': TypeOfMeal.dinner.value,\n 'date': datetime_obj,\n 'from_time_string': DINNER_START_TIME,\n 'to_time_string': DINNER_END_TIME\n }\n ]\n\n Meal.objects.bulk_create([\n Meal(\n meal_type=meal['meal_type'], date=meal['date'],\n from_time_string=meal['from_time_string'],\n to_time_string=meal['to_time_string']\n )\n for meal in meal_dict\n ])\n\n\n\n@pytest.fixture\n@freeze_time('2020-02-12')\ndef meal_dtos(meal_objs, item_objs):\n meal_dtos_list = [\n MealDto(\n meal_id=1, meal_type=TypeOfMeal.breakfast.value,\n date=datetime.now(), from_time_string=BREAKFAST_START_TIME,\n to_time_string=BREAKFAST_END_TIME\n ),\n MealDto(\n meal_id=2, meal_type=TypeOfMeal.lunch.value,\n date=datetime.now(), from_time_string=LUNCH_START_TIME,\n to_time_string=LUNCH_END_TIME\n ),\n MealDto(\n meal_id=3, meal_type=TypeOfMeal.dinner.value,\n date=datetime.now(), from_time_string=DINNER_START_TIME,\n to_time_string=DINNER_END_TIME\n )\n ]\n return meal_dtos_list\n\n@pytest.fixture\ndef item_objs():\n items_dict = [\n {\n 'item': 'Idly', 'category': CategoryType.indian_bread.value,\n 'units': UnitType.pieces.value\n },\n {\n 'item': 'Poori', 'category': CategoryType.indian_bread.value,\n 'units': UnitType.pieces.value\n },\n {\n 'item': 'MasalaRice', 'category': CategoryType.rice.value,\n 'units': UnitType.laddles.value\n },\n {\n 'item': 'WhiteRice', 'category': CategoryType.rice.value,\n 'units': UnitType.laddles.value\n },\n {\n 'item': 'Dal', 'category': CategoryType.curry.value,\n 'units': UnitType.cups.value\n },\n {\n 'item': 'PotatoCurry', 'category': CategoryType.curry.value,\n 'units': UnitType.cups.value\n },\n {\n 'item': 'Curd', 'category': CategoryType.curry.value,\n 'units': UnitType.cups.value\n },\n {\n 'item': 'Roti', 'category': CategoryType.indian_bread.value,\n 'units': UnitType.pieces.value\n },\n {\n 'item': 'Rajma', 'category': CategoryType.curry.value,\n 'units': UnitType.cups.value\n },\n {\n 'item': 'FriedRice', 'category': CategoryType.rice.value,\n 'units': UnitType.laddles.value\n }\n ]\n Items.objects.bulk_create([\n Items(\n item=item['item'],\n category=item['category'],\n units=item['units']\n )\n for item in items_dict])\n\n@pytest.fixture\ndef item_dtos(item_objs, meal_objs):\n item_dtos_list = [\n ItemDto(\n item_id=1, item='Idly', category=CategoryType.indian_bread.value,\n units=UnitType.pieces.value, meal_id=1\n ),\n ItemDto(\n item_id=2, item='Poori', category=CategoryType.indian_bread.value,\n units=UnitType.pieces.value, meal_id=1\n ),\n ItemDto(\n item_id=3, item='MasalaRice', category=CategoryType.rice.value,\n units=UnitType.laddles.value, meal_id=1\n ),\n ItemDto(\n item_id=4, item='WhiteRice', category=CategoryType.rice.value,\n units=UnitType.laddles.value, meal_id=2\n ),\n ItemDto(\n item_id=5, item='Dal', category=CategoryType.curry.value,\n units=UnitType.cups.value, meal_id=2\n ),\n ItemDto(\n item_id=6, item='PotatoCurry', category=CategoryType.curry.value,\n units=UnitType.cups.value, meal_id=2\n ),\n ItemDto(\n item_id=7, item='Curd', category=CategoryType.curry.value,\n units=UnitType.cups.value, meal_id=2\n ),\n ItemDto(\n item_id=8, item='Roti', category=CategoryType.indian_bread.value,\n units=UnitType.pieces.value, meal_id=3\n ),\n ItemDto(\n item_id=9, item='Rajma', category=CategoryType.curry.value,\n units=UnitType.cups.value, meal_id=3\n ),\n ItemDto(\n item_id=10, item='FriedRice', category=CategoryType.rice.value,\n units=UnitType.laddles.value, meal_id=3\n )\n ]\n return item_dtos_list\n\n@pytest.fixture\ndef list_of_items_dtos(item_objs):\n\n items_list = [\n ItemDto(item_id=1, item='Idly', category='Indian-Bread', units='pieces', meal_id=None),\n ItemDto(item_id=2, item='Poori', category='Indian-Bread', units='pieces', meal_id=None),\n ItemDto(item_id=3, item='MasalaRice', category='Rice', units='laddles', meal_id=None),\n ItemDto(item_id=4, item='WhiteRice', category='Rice', units='laddles', meal_id=None),\n ItemDto(item_id=5, item='Dal', category='Curry', units='cups', meal_id=None),\n ItemDto(item_id=6, item='PotatoCurry', category='Curry', units='cups', meal_id=None),\n ItemDto(item_id=7, item='Curd', category='Curry', units='cups', meal_id=None),\n ItemDto(item_id=8, item='Roti', category='Indian-Bread', units='pieces', meal_id=None),\n ItemDto(item_id=9, item='Rajma', category='Curry', units='cups', meal_id=None),\n ItemDto(item_id=10, item='FriedRice', category='Rice', units='laddles', meal_id=None)\n ]\n return items_list\n\n\n@pytest.fixture\ndef meal_course_objs(item_objs, meal_objs):\n meal_course_list = [\n {\n 'item_id':1, 'meal_course': 'Half-meal', 'meal_id': 1, 'quantity':2\n },\n {\n 'item_id':2, 'meal_course': 'Half-meal', 'meal_id': 1, 'quantity':2\n },\n {\n 'item_id':3, 'meal_course': 'Half-meal', 'meal_id': 1, 'quantity':2\n },\n {\n 'item_id':4, 'meal_course': 'Half-meal', 'meal_id': 2, 'quantity':2\n },\n {\n 'item_id':5, 'meal_course': 'Half-meal', 'meal_id': 2, 'quantity':2\n },\n {\n 'item_id':6, 'meal_course': 'Half-meal', 'meal_id': 2, 'quantity':2\n },\n {\n 'item_id':7, 'meal_course': 'Half-meal', 'meal_id': 2, 'quantity':2\n },\n {\n 'item_id':8, 'meal_course': 'Full-meal', 'meal_id': 3, 'quantity':3\n },\n {\n 'item_id':9, 'meal_course': 'Full-meal', 'meal_id': 3, 'quantity':2\n },\n {\n 'item_id':10, 'meal_course': 'Full-meal', 'meal_id': 3, 'quantity':3\n },\n {\n 'item_id':1, 'meal_course': 'Full-meal', 'meal_id': 1, 'quantity':3\n },\n {\n 'item_id':2, 'meal_course': 'Full-meal', 'meal_id': 1, 'quantity':3\n },\n {\n 'item_id':3, 'meal_course': 'Full-meal', 'meal_id': 1, 'quantity':3\n }\n ]\n MealCourse.objects.bulk_create([\n MealCourse(\n meal_id=meal_course['meal_id'],\n item_id=meal_course['item_id'],\n meal_course=meal_course['meal_course'],\n quantity=meal_course['quantity']\n )\n for meal_course in meal_course_list\n ])\n\n\n@pytest.fixture\ndef user_meal_course_objs(meal_objs, user_objs, meal_course_objs):\n user_meal_course_dict=[\n {\n 'user_id': 1, 'meal_course_id': 1,\n 'meal_id': 1, 'item_id':1, 'custom_meal_quantity':2\n },\n {\n 'user_id': 1, 'meal_course_id': 2,\n 'meal_id': 1, 'item_id':2, 'custom_meal_quantity':2\n },\n {\n 'user_id': 1, 'meal_course_id': 3,\n 'meal_id': 1, 'item_id':3, 'custom_meal_quantity':2\n },\n {\n 'user_id': 1, 'meal_course_id': 4,\n 'meal_id': 2, 'item_id': 4, 'custom_meal_quantity':2\n },\n {\n 'user_id': 1, 'meal_course_id': 5,\n 'meal_id': 2, 'item_id':5, 'custom_meal_quantity':2\n },\n {\n 'user_id': 1, 'meal_course_id': 6,\n 'meal_id': 2, 'item_id':6, 'custom_meal_quantity':2\n },\n {\n 'user_id': 1, 'meal_course_id': 7,\n 'meal_id': 2, 'item_id':7, 'custom_meal_quantity':2\n },\n {\n 'user_id': 1, 'meal_course_id': 8,\n 'meal_id': 3, 'item_id':8, 'custom_meal_quantity':3\n },\n {\n 'user_id': 1, 'meal_course_id': 9,\n 'meal_id': 3, 'item_id':9, 'custom_meal_quantity':2\n },\n {\n 'user_id': 1, 'meal_course_id': 10,\n 'meal_id': 3, 'item_id':10, 'custom_meal_quantity':3\n }\n ]\n UserMealStatus.objects.bulk_create([\n UserMealStatus(\n user_id=user_meal['user_id'], meal_course_id=user_meal['meal_course_id'],\n meal_id=user_meal['meal_id'], item_id=user_meal['item_id'],\n custom_meal_quantity=user_meal['custom_meal_quaa']\n )\n for user_meal in user_meal_course_dict\n ])\n\n@pytest.fixture\ndef meal_course_dtos(user_meal_course_objs):\n meal_course_dtos_list = [\n MealCourseDto(\n meal_course=CourseType.half_meal.value,\n meal_id=1, meal_type=TypeOfMeal.breakfast.value\n ),\n MealCourseDto(\n meal_course=CourseType.half_meal.value,\n meal_id=2, meal_type=TypeOfMeal.lunch.value\n ),\n MealCourseDto(\n meal_course=CourseType.full_meal.value,\n meal_id=3, meal_type=TypeOfMeal.dinner.value\n )\n ]\n return meal_course_dtos_list\n\n@pytest.fixture\ndef home_page_dto(meal_course_dtos, meal_dtos, item_dtos):\n return HomePageDto(\n meal_course=meal_course_dtos,\n items=item_dtos,\n meal=meal_dtos\n )\n\n@pytest.fixture\n@freeze_time('2020-02-12')\ndef announcement_objs():\n announcement_list = [\n {\n 'title': 'Happy Birthday',\n 'subtitle': 'Birthday Special!',\n 'description':'Here are the newly added items on the occasion of our \\\n friends birthday',\n 'image':'https://www.google.co.in',\n 'date': datetime.now()\n }\n ]\n Announcements.objects.bulk_create([\n Announcements(\n title=announcement['title'],\n subtitle=announcement['subtitle'],\n description=announcement['description'],\n image=announcement['image'],\n date=announcement['date']\n )\n for announcement in announcement_list\n ])\n\n@pytest.fixture\ndef annoncement_dtos(announcement_objs):\n announcement_dtos_list = [\n AnnouncementDtos(\n title='Happy Birthday',\n subtitle='Birthday Special!',\n description='Here are the newly added items on the occasion of our \\\n friends birthday',\n image='https://www.google.co.in'\n )\n ]\n return announcement_dtos_list\n\n\n@pytest.fixture\ndef meal_items_dtos(item_objs):\n item_dtos_list = [\n ItemDto(\n item_id=1, item='Idly', category=CategoryType.indian_bread.value,\n units=UnitType.pieces.value, meal_id=1\n ),\n ItemDto(\n item_id=2, item='Poori', category=CategoryType.indian_bread.value,\n units=UnitType.pieces.value, meal_id=1\n ),\n ItemDto(\n item_id=3, item='MasalaRice', category=CategoryType.rice.value,\n units=UnitType.laddles.value, meal_id=1\n )\n ]\n return item_dtos_list\n\n\n@pytest.fixture\ndef meal_course_complete_details_dtos(item_objs, meal_course_objs):\n meal_course_details_list = [\n MealCourseCompleteDetailsDto(\n item_id=1, meal_course=CourseType.half_meal.value, quantity=2\n ),\n MealCourseCompleteDetailsDto(\n item_id=2, meal_course=CourseType.half_meal.value, quantity=2\n ),\n MealCourseCompleteDetailsDto(\n item_id=3, meal_course=CourseType.half_meal.value, quantity=2\n ),\n MealCourseCompleteDetailsDto(\n item_id=1, meal_course=CourseType.full_meal.value, quantity=3\n ),\n MealCourseCompleteDetailsDto(\n item_id=2, meal_course=CourseType.full_meal.value, quantity=3\n ),\n MealCourseCompleteDetailsDto(\n item_id=3, meal_course=CourseType.full_meal.value, quantity=3\n )\n ]\n return meal_course_details_list\n\n@pytest.fixture\ndef meal_data_dtos(meal_course_complete_details_dtos, meal_items_dtos):\n return SetMealPreferenceDto(\n meal_course=meal_course_complete_details_dtos,\n items=meal_items_dtos\n )\n\n@pytest.fixture\ndef custom_meal_objs(item_objs, meal_objs):\n meal_course_list = [\n {\n 'item_id':1, 'meal_course': 'Custom-meal', 'meal_id': 1, 'quantity':0\n },\n {\n 'item_id':2, 'meal_course': 'Custom-meal', 'meal_id': 1, 'quantity':0\n },\n {\n 'item_id':3, 'meal_course': 'Custom-meal', 'meal_id': 1, 'quantity':0\n }\n ]\n MealCourse.objects.bulk_create([\n MealCourse(\n meal_id=meal_course['meal_id'],\n item_id=meal_course['item_id'],\n meal_course=meal_course['meal_course'],\n quantity=meal_course['quantity']\n )\n for meal_course in meal_course_list\n ])\n\n@pytest.fixture\ndef item_and_quantity_dtos(item_objs):\n list_of_items = [\n ItemAndQuantityDto(\n item_id=1, quantity=2,\n ),\n ItemAndQuantityDto(\n item_id=2, quantity=1,\n ),\n ItemAndQuantityDto(\n item_id=3, quantity=1,\n )\n ]\n return list_of_items\n\n\n\n@pytest.fixture\n@freeze_time('2020-02-12')\ndef custom_meal_upadte_dto(user_objs, item_and_quantity_dtos, custom_meal_objs):\n return CustomeMealUpdateDto(\n user_id=1,\n meal_id = 1,\n meal_course = 'Custom-meal',\n items_and_quantities=item_and_quantity_dtos\n )\n\n@pytest.fixture\ndef user_rating_objs(user_objs, meal_objs):\n rating_list = [\n {\n 'user_id': 1, 'meal_id': 1, 'item_id': 1, 'taste': 4, 'quality': 3\n },\n {\n 'user_id': 1, 'meal_id': 1, 'item_id': 2, 'taste': 4, 'quality': 3\n },\n {\n 'user_id': 1, 'meal_id': 1, 'item_id': 3, 'taste': 4, 'quality': 3\n }\n ]\n UserRating.objects.bulk_create([\n UserRating(\n user_id=rating_obj['user_id'],\n meal_id=rating_obj['meal_id'],\n taste=rating_obj['taste'],\n quality=rating_obj['quality'],\n item_id=rating_obj['item_id']\n )\n for rating_obj in rating_list\n ])\n\n@pytest.fixture\ndef items_and_rating_dtos(item_objs, meal_objs):\n items_and_rating_dtos_list = [\n ItemAndRatingDto(item_id=1, quality=4, taste=4),\n ItemAndRatingDto(item_id=2, quality=4, taste=4),\n ItemAndRatingDto(item_id=3, quality=3, taste=4)\n ]\n return items_and_rating_dtos_list\n\n@pytest.fixture\ndef rating_dtos(items_and_rating_dtos, user_objs, meal_objs):\n return RatingDto(\n user_id=1,\n meal_id=1,\n description='',\n items_and_ratings=items_and_rating_dtos\n )\n\n@pytest.fixture\ndef user_feedback(user_objs, meal_objs):\n UserFeedback.objects.create(user_id=1, meal_id=1, description='')\n\n@pytest.fixture\ndef update_items_and_rating_dtos(item_objs, meal_objs):\n items_and_rating_dtos_list = [\n ItemAndRatingDto(item_id=1, quality=2, taste=2),\n ItemAndRatingDto(item_id=2, quality=2, taste=3),\n ItemAndRatingDto(item_id=3, quality=4, taste=5)\n ]\n return items_and_rating_dtos_list\n\n@pytest.fixture\ndef update_rating_dtos(\n update_items_and_rating_dtos, user_rating_objs,\n user_objs, meal_objs, user_feedback):\n return RatingDto(\n user_id=1,\n meal_id=1,\n description='',\n items_and_ratings=update_items_and_rating_dtos\n )\n\n@pytest.fixture\ndef breakfast_items():\n item_dtos = [\n ItemDto(\n item_id=1, item='Idly', category=CategoryType.indian_bread.value,\n units=UnitType.pieces.value, meal_id=1\n ),\n ItemDto(\n item_id=2,item='Poori', category=CategoryType.indian_bread.value,\n units=UnitType.pieces.value, meal_id=1\n ),\n ItemDto(\n item_id=3,item='MasalaRice', category=CategoryType.rice.value,\n units=UnitType.laddles.value, meal_id=1\n )\n ]\n return item_dtos\n\n@pytest.fixture\n@freeze_time('2020-02-12')\ndef meal_breakfast_dto():\n return MealDto(\n meal_id=1, meal_type=TypeOfMeal.breakfast.value,\n date=datetime.now(),\n from_time_string=BREAKFAST_START_TIME,\n to_time_string=BREAKFAST_END_TIME\n )\n\n@pytest.fixture\ndef meal_schedule_dto(breakfast_items, meal_breakfast_dto, meal_course_objs):\n return MealScheduleDto(\n items=breakfast_items,\n meal=meal_breakfast_dto\n )\n\n@pytest.fixture\ndef items_and_their_meal_course(item_objs):\n items_and_meal_course_list = [\n ItemDetailsDto(\n item_id=1,meal_course='Half-meal',quantity=2\n ),\n ItemDetailsDto(\n item_id=2,meal_course='Half-meal',quantity=2\n ),\n ItemDetailsDto(\n item_id=3,meal_course='Half-meal',quantity=1\n ),\n ItemDetailsDto(\n item_id=1,meal_course='Full-meal',quantity=3\n ),\n ItemDetailsDto(\n item_id=2,meal_course='Full-meal',quantity=3\n ),\n ItemDetailsDto(\n item_id=3,meal_course='Full-meal',quantity=2\n )\n ]\n return items_and_meal_course_list\n\n\n@pytest.fixture\n@freeze_time('2020-02-12')\ndef update_meal_schedule_dtos(items_and_their_meal_course, meal_course_objs):\n return UpdateMealScheduleDto(\n meal_type='Breakfast',\n date=date(2020,2,12),\n items=items_and_their_meal_course\n )\n\n@pytest.fixture\n@freeze_time('2020-02-12')\ndef create_meal_schedule(items_and_their_meal_course):\n return UpdateMealScheduleDto(\n meal_type='Breakfast',\n date=date(2020,2,12),\n items=items_and_their_meal_course\n )","sub_path":"food_management/tests/storages/.~c9_invoke_wiL7Xo.py","file_name":".~c9_invoke_wiL7Xo.py","file_ext":"py","file_size_in_byte":20348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"283812039","text":"words = {\"Yeet\":\"Diabetes\",\"Neap\":\"A type of spring tide\"}\nword=\"\"\ndefinition=\"\"\nchoice=\"\"\nwhile True:\n print(\"enter quit to quit\")\n choice=input()\n if choice==\"new\":\n print(\"Please enter the word you'd wish to add.\")\n word=input()\n print(\"Please enter the definition for \"+word)\n definition=input()\n words[word]=definition\n elif choice==\"print\":\n for i in words:\n print(i)\n elif choice==\"definition\":\n print(\"Which word would you like the definition of\")\n word=input()\n definition=words[word]\n print(definition)\n elif choice==\"quit\":\n break\n else:\n print(\"Invalid option\")\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"559084394","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/hachoir_parser/image/psd.py\n# Compiled at: 2009-09-07 17:44:28\n\"\"\"\nPhotoshop parser (.psd file).\n\nCreation date: 8 january 2006\nAuthor: Victor Stinner\n\"\"\"\nfrom hachoir_parser import Parser\nfrom hachoir_core.field import FieldSet, UInt16, UInt32, String, NullBytes, Enum, RawBytes\nfrom hachoir_core.endian import BIG_ENDIAN\nfrom hachoir_parser.image.photoshop_metadata import Photoshop8BIM\n\nclass Config(FieldSet):\n __module__ = __name__\n\n def __init__(self, *args):\n FieldSet.__init__(self, *args)\n self._size = (4 + self['size'].value) * 8\n\n def createFields(self):\n yield UInt32(self, 'size')\n while not self.eof:\n yield Photoshop8BIM(self, 'item[]')\n\n\nclass PsdFile(Parser):\n __module__ = __name__\n endian = BIG_ENDIAN\n PARSER_TAGS = {'id': 'psd', 'category': 'image', 'file_ext': ('psd', ), 'mime': ('image/psd', 'image/photoshop', 'image/x-photoshop'), 'min_size': 4 * 8, 'magic': (('8BPS\\x00\\x01', 0), ), 'description': 'Photoshop (PSD) picture'}\n COLOR_MODE = {0: 'Bitmap', 1: 'Grayscale', 2: 'Indexed', 3: 'RGB color', 4: 'CMYK color', 7: 'Multichannel', 8: 'Duotone', 9: 'Lab Color'}\n COMPRESSION_NAME = {0: 'Raw data', 1: 'RLE'}\n\n def validate(self):\n if self.stream.readBytes(0, 4) != '8BPS':\n return 'Invalid signature'\n return True\n\n def createFields(self):\n yield String(self, 'signature', 4, 'PSD signature (8BPS)', charset='ASCII')\n yield UInt16(self, 'version')\n yield NullBytes(self, 'reserved[]', 6)\n yield UInt16(self, 'nb_channels')\n yield UInt32(self, 'width')\n yield UInt32(self, 'height')\n yield UInt16(self, 'depth')\n yield Enum(UInt16(self, 'color_mode'), self.COLOR_MODE)\n yield UInt32(self, 'mode_data_size')\n size = self['mode_data_size'].value\n if size:\n yield RawBytes(self, 'mode_data', size)\n yield Config(self, 'config')\n yield UInt32(self, 'reserved_data_size')\n size = self['reserved_data_size'].value\n if size:\n yield RawBytes(self, 'reserved_data', size)\n yield Enum(UInt16(self, 'compression'), self.COMPRESSION_NAME)\n size = (self.size - self.current_size) // 8\n if size:\n yield RawBytes(self, 'end', size)","sub_path":"pycfiles/hachoir_parser-1.3.4-py2.4/psd.py","file_name":"psd.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"364641554","text":"import math\r\nfin = open('adventdata.in','r')\r\ninstructions = [x.strip() for x in fin]\r\n#coordinates for both car and waypoint\r\ncar_x_pos = 0\r\ncar_y_pos = 0\r\nwaypoint_x_pos = 10\r\nwaypoint_y_pos = 1\r\nfor instruction in instructions:\r\n\tcommand = instruction[0]\r\n\tamount = int(instruction[1:])\r\n\t#used for calculating the rotation\r\n\tx_diff = (waypoint_x_pos - car_x_pos)\r\n\ty_diff = (waypoint_y_pos - car_y_pos)\r\n\t#moving forward is based on the difference btwn car and waypoint\r\n\tif command == 'F':\r\n\t\tx_change = (waypoint_x_pos - car_x_pos)*amount\r\n\t\ty_change =(waypoint_y_pos - car_y_pos)*amount\r\n\t\twaypoint_x_pos = waypoint_x_pos + x_change\r\n\t\twaypoint_y_pos = waypoint_y_pos + y_change\r\n\t\tcar_x_pos = car_x_pos + x_change\r\n\t\tcar_y_pos = car_y_pos + y_change\r\n\telif command == 'L': \r\n\t\t'''\r\n\t\tfor rotations (clockwise) (x,y), 90 degrees = -y,x, 180 degrees = (-x,-y) and 270 degrees = (y,-x)\r\n\t\tcounterclockwise, 90 and 270 degrees are switched\r\n\t\t'''\r\n\t\tif amount == 90: \r\n\t\t\twaypoint_x_pos = car_x_pos + -1*y_diff\r\n\t\t\twaypoint_y_pos = car_y_pos + x_diff\r\n\t\tif amount == 180:\r\n\t\t\twaypoint_x_pos = car_x_pos + -1*x_diff\r\n\t\t\twaypoint_y_pos = car_y_pos + -1*y_diff\r\n\t\tif amount == 270:\r\n\t\t\ttemp = waypoint_x_pos\r\n\t\t\twaypoint_x_pos = car_x_pos + y_diff\r\n\t\t\twaypoint_y_pos = car_y_pos + -1*x_diff\r\n\telif command == 'R': \r\n\t\tif amount == 270: \r\n\t\t\twaypoint_x_pos = car_x_pos + -1*y_diff\r\n\t\t\twaypoint_y_pos = car_y_pos + x_diff\r\n\t\tif amount == 180:\r\n\t\t\twaypoint_x_pos = car_x_pos + -1*x_diff\r\n\t\t\twaypoint_y_pos = car_y_pos + -1*y_diff\r\n\t\tif amount == 90:\r\n\t\t\ttemp = waypoint_x_pos\r\n\t\t\twaypoint_x_pos = car_x_pos + y_diff\r\n\t\t\twaypoint_y_pos = car_y_pos + -1*x_diff\r\n\telif command == 'N': waypoint_y_pos = waypoint_y_pos + amount\r\n\telif command == 'S': waypoint_y_pos = waypoint_y_pos - amount\r\n\telif command == 'E': waypoint_x_pos = waypoint_x_pos + amount\r\n\telif command == 'W': waypoint_x_pos = waypoint_x_pos - amount\r\n\r\n#calculates manhattan distance\r\nprint(abs(car_x_pos)+abs(car_y_pos))\r\n","sub_path":"Day_12/AdventDay12-2.py","file_name":"AdventDay12-2.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"605133859","text":"from Constants import GEO_SCRIPT_FUNCS\nfrom Rom import ROM\nimport typing\n\nCMD_LENGTH = {\n 0x00: 8,\n 0x01: 4,\n 0x02: 8,\n 0x03: 4,\n 0x04: 4,\n 0x05: 4,\n 0x06: None,\n 0x07: None,\n 0x08: 12, # figure out\n 0x09: 4,\n 0x0A: lambda data: 12 if data[1] > 0 else 8,\n 0x0B: 4,\n 0x0C: 4,\n 0x0D: 8,\n 0x0E: 8,\n 0x0F: 14,\n 0x10: 16,\n 0x11: lambda data: 12 if data[1] > 0 else 8,\n 0x12: lambda data: 12 if data[1] > 0 else 8,\n 0x13: lambda data: 12, # figure out\n 0x14: lambda data: 12 if data[1] > 0 else 8,\n 0x15: 8,\n 0x16: 8,\n 0x17: 4,\n 0x18: 8,\n 0x19: 8,\n 0x1A: 8,\n 0x1D: lambda data: 12 if data[1] & 0x80 > 0 else 8,\n 0x1E: 8,\n 0x1F: 16,\n 0x20: 4\n}\n\nclass GeoLayoutParser:\n def __init__(self, rom : 'ROM', address_start : int, address_end : int):\n self.rom = rom\n\n self.address_start = address_start\n self.address_end = address_end\n\n self.rom.file.seek(address_start, 0)\n self.data = self.rom.file.read(address_end - address_start)\n\n self.commands = []\n self.was_processed = False\n\n def process(self):\n cursor_pos = 0\n indent = 0\n while cursor_pos < len(self.data):\n # determine number of bytes to be read\n cmd = self.data[cursor_pos]\n\n if cmd is None:\n print(\"GEO Layout EOF\")\n break\n\n if cmd not in CMD_LENGTH:\n print(f'{hex(self.address_start + cursor_pos).ljust(10, \"0\")}: {hex(cmd)}')\n print('next few bytes:')\n print([hex(b) for b in self.data[cursor_pos:cursor_pos+30]])\n raise Exception(\"No idea where we are\")\n\n cmd_length = None\n if type(CMD_LENGTH[cmd]) is int:\n cmd_length = CMD_LENGTH[cmd]\n elif callable(CMD_LENGTH[cmd]):\n cmd_length = CMD_LENGTH[cmd](self.data[cursor_pos:cursor_pos+16])\n else:\n print(hex(cmd))\n raise Exception(\"No idea how long this command is\")\n\n if cmd == 0x04:\n indent = indent + 1\n if cmd == 0x05:\n indent = indent - 1\n\n #print(GEO_SCRIPT_FUNCS[cmd])\n #print([hex(b) for b in self.data[cursor_pos:cursor_pos + cmd_length]])\n self.commands.append((indent, cursor_pos, self.data[cursor_pos:cursor_pos + cmd_length]))\n cursor_pos = cursor_pos + cmd_length\n\n self.was_processed = True\n\n def replace_command_values(self, cmd, index, value, filter_lambda=lambda d: True):\n if not self.was_processed:\n raise Exception(\"GeoLayouts weren't processed. Please call 'process()' prior to manipulating the data.\")\n count = 0\n for (_, pos, command) in self.commands:\n if command[0] == cmd and command[4:8] == bytes([0x80, 0x2D, 0x5B, 0x98]) and command[2] == 1: print([hex(p) for p in command])\n if command[0] == cmd and filter_lambda(command):\n count = count + 1\n self.rom.target.seek(self.address_start + pos)\n data = self.rom.target.read(8)\n print([f'>{hex(b)}' if i == index else hex(b) for i, b in enumerate(data)])\n self.rom.target.seek(self.address_start + pos + index)\n self.rom.target.write(bytes([value]))\n\n \n return count\n\n def dump(self):\n for (indent, pos, command) in self.commands:\n print(f'{hex(self.address_start + pos).ljust(8, \"0\")}:{\" \" * 2 * indent}{[hex(c) for c in command]}')","sub_path":"GeoLayout.py","file_name":"GeoLayout.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"466722571","text":"# Form / Progress\n# Use a progress bar to indicate completion status of an operation.\n# ---\nfrom h2o_wave import site, ui\n\npage = site['/demo']\n\npage['example'] = ui.form_card(\n box='1 1 4 -1',\n items=[\n ui.progress(label='Indeterminate Progress', caption='Goes on forever'),\n ui.progress(label='Standard Progress', caption='Downloading the interwebs...', value=0.25),\n ]\n)\npage.save()\n","sub_path":"py/examples/progress.py","file_name":"progress.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"620498319","text":"import unittest\n\nfrom vprof import base_profile\n\n# For Python 2 and Python 3 compatibility.\ntry:\n import mock\nexcept ImportError:\n from unittest import mock\n\n\nclass BaseProfileUnittest(unittest.TestCase):\n def setUp(self):\n self._profile = object.__new__(base_profile.BaseProfile)\n\n def testInit(self):\n program_cmd = 'foo.py --bar --baz'\n self._profile.__init__(program_cmd)\n self.assertEqual(self._profile._program_name, 'foo.py')\n self.assertEqual(self._profile._program_args, ['foo.py', '--bar', '--baz'])\n self.assertDictEqual(self._profile._globs, {\n '__file__': 'foo.py',\n '__name__': '__main__',\n '__package__': None\n })\n\n def testRun(self):\n with self.assertRaises(NotImplementedError):\n self._profile.run()\n","sub_path":"vprof/tests/base_profile_test.py","file_name":"base_profile_test.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"601208178","text":"import sys\n\nfrom libs.audio_steganography.engine import encode, decode\n\n# Constants\nDECODE = \"decode\" # Operation mode of the software\nENCODE = \"encode\"\nWAV = \"wav\" # input type\nMP3 = \"mp3\" # input type\nRATE = 44100 # Sample rate (expected)\n\nif __name__ == '__main__':\n fileToEncode = \"./samples/a2002011001-e02.wav\"\n messageFile = './samples/message.txt'\n encodedOutputFile = 'encoded.wav'\n decodedOutputFile = 'decoded.wav'\n decodedMessageFile = 'decoded_message.txt'\n\n print(\"Encoding file: {}, adding message: {}, and write to: {}\".format(fileToEncode,\n messageFile,\n encodedOutputFile))\n try:\n encode(fileToEncode, messageFile, encodedOutputFile)\n except:\n print(\"Nope...\")\n sys.exit(1)\n\n print(\"Decoding file: {}\".format(decodedMessageFile))\n decode(encodedOutputFile, decodedMessageFile)\n with open(decodedMessageFile, 'r') as fh:\n print(\"Content of the file:\\n{}\".format(fh.read()))\n\n","sub_path":"myHackaton/text_in_audio_test.py","file_name":"text_in_audio_test.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"372678383","text":"import logging\nimport sys\nfrom datetime import date\n\nformatter = logging.Formatter('{pathname:s}:{lineno:d}:{levelname:s}: {message:s}', style='{')\n\nhandler = logging.StreamHandler(sys.stdout)\nhandler.setFormatter(formatter)\nhandler.setLevel(logging.DEBUG)\n\nroot = logging.getLogger()\nroot.addHandler(handler)\nroot.setLevel(logging.DEBUG)\n\nlogger = logging.getLogger(__name__)\n\nYEAR_FROM = 2000\nYEAR_TO = 3000\n\n\ndef parse(d):\n l = list(map(int, d.split('/')))\n\n possible_orderings = [\n (l[0], l[1], l[2]), # Y/m/d\n (l[2], l[1], l[0]), # d/m/Y\n (l[2], l[0], l[1]), # m/d/Y\n # Exotic formats\n (l[0], l[2], l[1]), # Y/d/m\n (l[1], l[0], l[2]), # m/Y/d\n (l[1], l[2], l[0]), # d/Y/m\n ]\n for data in possible_orderings:\n year, month, day = data\n if year + YEAR_FROM < YEAR_TO:\n year = year + YEAR_FROM\n try:\n return date(year, month, day).strftime('%Y-%m-%d')\n except ValueError as e:\n logger.debug(e)\n return '{} is illegal'.format(d)\n\n\ndef main():\n with open('data.txt') as f:\n data = f.readline()\n\n print(parse(data))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"dateparser.py","file_name":"dateparser.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"255774730","text":"\"\"\"\nCopyright 2017-present, Airbnb Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n# pylint: disable=abstract-class-instantiated,protected-access,no-self-use,abstract-method,attribute-defined-outside-init\nimport json\nfrom mock import Mock, mock_open, patch\n\nfrom boxsdk.exception import BoxException\nfrom nose.tools import assert_equal, assert_false, assert_items_equal, assert_true\nfrom requests.exceptions import ConnectionError, Timeout\n\nfrom app_integrations.apps.box import BoxApp\nfrom app_integrations.config import AppConfig\n\nfrom tests.unit.app_integrations.test_helpers import (\n get_valid_config_dict,\n MockSSMClient\n)\n\n@patch.object(BoxApp, 'type', Mock(return_value='type'))\n@patch.object(AppConfig, 'SSM_CLIENT', MockSSMClient())\nclass TestBoxApp(object):\n \"\"\"Test class for the BoxApp\"\"\"\n\n # Remove all abstractmethods so we can instantiate BoxApp for testing\n @patch.object(BoxApp, '__abstractmethods__', frozenset())\n def setup(self):\n \"\"\"Setup before each method\"\"\"\n self._app = BoxApp(AppConfig(get_valid_config_dict('box_admin_events')))\n\n def test_sleep(self):\n \"\"\"BoxApp - Sleep Seconds\"\"\"\n assert_equal(self._app._sleep_seconds(), 0)\n\n def test_required_auth_info(self):\n \"\"\"BoxApp - Required Auth Info\"\"\"\n assert_items_equal(self._app.required_auth_info().keys(), {'keyfile'})\n\n @patch('app_integrations.apps.box.JWTAuth.from_settings_dictionary',\n Mock(return_value=True))\n def test_keyfile_validator(self):\n \"\"\"BoxApp - Keyfile Validation, Success\"\"\"\n validation_function = self._app.required_auth_info()['keyfile']['format']\n data = {'test': 'keydata'}\n mocker = mock_open(read_data=json.dumps(data))\n with patch('__builtin__.open', mocker):\n loaded_keydata = validation_function('fakepath')\n assert_equal(loaded_keydata, data)\n\n @patch('app_integrations.apps.box.JWTAuth.from_settings_dictionary')\n def test_keyfile_validator_failure(self, cred_mock):\n \"\"\"BoxApp - Keyfile Validation, Failure\"\"\"\n validation_function = self._app.required_auth_info()['keyfile']['format']\n cred_mock.return_value = False\n mocker = mock_open(read_data=json.dumps({'test': 'keydata'}))\n with patch('__builtin__.open', mocker):\n assert_false(validation_function('fakepath'))\n cred_mock.assert_called()\n\n @patch('app_integrations.apps.box.JWTAuth.from_settings_dictionary')\n def test_keyfile_validator_bad_json(self, cred_mock):\n \"\"\"BoxApp - Keyfile Validation, Bad JSON\"\"\"\n validation_function = self._app.required_auth_info()['keyfile']['format']\n mocker = mock_open(read_data='invalid json')\n with patch('__builtin__.open', mocker):\n assert_false(validation_function('fakepath'))\n cred_mock.assert_not_called()\n\n @patch('app_integrations.apps.box.JWTAuth.from_settings_dictionary',\n Mock(return_value=True))\n def test_load_credentials(self):\n \"\"\"BoxApp - Load Auth, Success\"\"\"\n assert_true(self._app._load_auth('fakedata'))\n\n @patch('app_integrations.apps.box.JWTAuth.from_settings_dictionary')\n def test_load_credentials_bad(self, cred_mock):\n \"\"\"BoxApp - Load Auth, ValueError\"\"\"\n cred_mock.side_effect = ValueError('Bad things happened')\n assert_false(self._app._load_auth('fakedata'))\n\n @patch('app_integrations.apps.box.Client',\n Mock(return_value=True))\n @patch('app_integrations.apps.box.BoxApp._load_auth')\n def test_create_client(self, auth_mock):\n \"\"\"BoxApp - Create Client, Success\"\"\"\n assert_true(self._app._create_client())\n auth_mock.assert_called_with(self._app._config.auth['keyfile'])\n\n @patch('logging.Logger.debug')\n def test_create_client_exists(self, log_mock):\n \"\"\"BoxApp - Create Client, Exists\"\"\"\n self._app._client = True\n assert_true(self._app._create_client())\n log_mock.assert_called_with('Client already instantiated for %s', 'type')\n\n @patch('app_integrations.apps.box.BoxApp._load_auth',\n Mock(return_value=False))\n def test_create_client_fail_auth(self):\n \"\"\"BoxApp - Create Client, Auth Failure\"\"\"\n assert_false(self._app._create_client())\n\n def test_gather_logs(self):\n \"\"\"BoxApp - Gather Logs, Success\"\"\"\n with patch.object(self._app, '_client') as client_mock:\n payload = {\n 'chunk_size': 10,\n 'next_stream_position': '1152922976252290886',\n 'entries': self._get_sample_logs(10)\n }\n client_mock.make_request.return_value.json.return_value = payload\n\n assert_equal(len(self._app._gather_logs()), 10)\n assert_equal(self._app._last_timestamp, '2017-10-27T12:31:22-07:00')\n\n @patch('app_integrations.apps.box.BoxApp._create_client',\n Mock(return_value=True))\n @patch('logging.Logger.exception')\n def test_gather_logs_box_error(self, log_mock):\n \"\"\"BoxApp - Gather Logs, BoxException\"\"\"\n with patch.object(self._app, '_client') as client_mock:\n client_mock.make_request.side_effect = BoxException('bad error')\n assert_false(self._app._gather_logs())\n log_mock.assert_called_with('Failed to get events for %s', 'type')\n\n @patch('app_integrations.apps.box.BoxApp._create_client',\n Mock(return_value=True))\n @patch('logging.Logger.exception')\n def test_gather_logs_requests_error(self, log_mock):\n \"\"\"BoxApp - Gather Logs, ConnectionError\"\"\"\n with patch.object(self._app, '_client') as client_mock:\n self._app._next_stream_position = 10241040195019\n client_mock.make_request.side_effect = ConnectionError(response='bad error')\n assert_false(self._app._gather_logs())\n log_mock.assert_called_with('Bad response received from host, will retry once')\n\n @patch('app_integrations.apps.box.BoxApp._create_client',\n Mock(return_value=True))\n @patch('logging.Logger.exception')\n def test_gather_logs_requests_timeout(self, log_mock):\n \"\"\"BoxApp - Gather Logs, Timeout\"\"\"\n with patch.object(self._app, '_client') as client_mock:\n client_mock.make_request.side_effect = Timeout(response='request timed out')\n assert_false(self._app._gather_logs())\n log_mock.assert_called_with('Request timed out for %s', 'type')\n\n @patch('app_integrations.apps.box.BoxApp._load_auth',\n Mock(return_value=False))\n def test_gather_logs_no_client(self):\n \"\"\"BoxApp - Gather Logs, No Client\"\"\"\n with patch.object(self._app, '_client') as client_mock:\n self._app._client = False\n assert_false(self._app._gather_logs())\n client_mock.make_request.assert_not_called()\n\n @patch('app_integrations.apps.box.BoxApp._create_client',\n Mock(return_value=True))\n @patch('logging.Logger.error')\n def test_gather_logs_no_results(self, log_mock):\n \"\"\"BoxApp - Gather Logs, No Results From API\"\"\"\n with patch.object(self._app, '_client') as client_mock:\n client_mock.make_request.return_value.json.return_value = None\n assert_false(self._app._gather_logs())\n log_mock.assert_called_with('No results received from the Box API request for %s',\n 'type')\n\n @patch('app_integrations.apps.box.BoxApp._create_client',\n Mock(return_value=True))\n @patch('logging.Logger.info')\n def test_gather_logs_empty_items(self, log_mock):\n \"\"\"BoxApp - Gather Logs, Empty Entries List\"\"\"\n with patch.object(self._app, '_client') as client_mock:\n payload = {\n 'chunk_size': 0,\n 'next_stream_position': '1152922976252290886',\n 'entries': []\n }\n client_mock.make_request.return_value.json.return_value = payload\n assert_false(self._app._gather_logs())\n log_mock.assert_called_with('No events in response from the Box API request for %s',\n 'type')\n\n @staticmethod\n def _get_sample_logs(count):\n \"\"\"Helper function for returning sample Box admin event logs\"\"\"\n return [{\n 'additional_details': None,\n 'created_at': '2017-10-27T12:31:22-07:00',\n 'created_by': {\n 'id': '2710218233',\n 'login': 'testemail@email.com',\n 'name': 'User Name',\n 'type': 'user'\n },\n 'event_id': '0e0b8122-17ed-42ee-8a9d-d9a57bf8dd83',\n 'event_type': 'ADD_LOGIN_ACTIVITY_DEVICE',\n 'ip_address': '1.1.1.22',\n 'session_id': None,\n 'source': {\n 'id': '2710218233',\n 'login': 'testemail@email.com',\n 'name': 'User Name',\n 'type': 'user'\n },\n 'type': 'event'\n } for _ in range(count)]\n","sub_path":"tests/unit/app_integrations/test_apps/test_box.py","file_name":"test_box.py","file_ext":"py","file_size_in_byte":9556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"562125317","text":"import allure\r\nfrom page_locators.search_policy_locator import SearchLocator\r\nimport time\r\nfrom page_locators.menu_locator import MenuLocator\r\nfrom page_locators.client_detail_locator import ClientDetailLocator\r\nfrom testcase.mdes_base_case import MdesBaseCase\r\nfrom config import config\r\nfrom selenium import webdriver\r\n\r\n# 投保书审核/查询\r\nclass SearchPage(MdesBaseCase):\r\n temp_dict1 = {}\r\n\r\n @allure.step('查询已提交的投保书号并获取保单五要素')\r\n def search_policy_number(self):\r\n\r\n self.switch_frame_to_main()\r\n self.select_frame('frame_content')\r\n self.select_frame(0)\r\n self.select_frame('MainInfoFrame')\r\n # cont_no = '202010291707'\r\n cont_no = self.get_element_attribute(SearchLocator.proposal_cont_no, 'value') # 获取已提交的投保书号\r\n print(cont_no)\r\n self.switch_frame_to_main()\r\n print('开始点击投保书号查询,进入查询页面')\r\n # self.click_element(SearchLocator.search_cont_no) # 点击进入投保书号查询页面,方法一:通过定位元素,不稳定\r\n # self.click_element_by_js(SearchLocator.search_cont_no) # 点击进入投保书号查询页面,方法二:通过js定位,不稳定\r\n self.driver.get(config.get_config(\"MDES\", \"ip\") + SearchLocator.search_cont_no) # 点击进入投保书号查询页面,方法三:通过直接跳转链接\r\n self.switch_frame_to_main()\r\n self.select_frame('frame_content')\r\n # self.wait_element(SearchLocator.proposal_cont_no) --------------------------------\r\n self.input_text(SearchLocator.cont_no_input, cont_no) # 填入投保书号\r\n print('开始点击查询')\r\n self.click_element(SearchLocator.cont_no_searchBTN)\r\n time.sleep(3)\r\n try:\r\n self.click_element(SearchLocator.cont_no) # 点击投保书号进入保单详情页面\r\n except Exception as e:\r\n print('没有找到投保书号...')\r\n raise\r\n time.sleep(3)\r\n\r\n \"\"\" 采集留存信息 \"\"\"\r\n # temp_list = [] # 新建列表保存留存信息\r\n temp_risk_list = []\r\n\r\n self.switch_frame_to_main()\r\n self.select_frame('frame_content')\r\n self.select_frame(0)\r\n self.select_frame('MainInfoFrame')\r\n # proposal_cont_no = self.get_element_attribute(SearchLocator.proposal_cont_no, 'value') # 投保书号\r\n self.switch_frame_to_main()\r\n self.select_frame('frame_content')\r\n self.select_frame(0)\r\n self.select_frame('WorkFrame')\r\n\r\n \"\"\" 读取险种信息 \"\"\"\r\n tr_elements = self.get_elements(SearchLocator.tr_locator)\r\n for tr_element in tr_elements:\r\n temp_risk_list.append(tr_element.text)\r\n\r\n \"\"\" 将险种信息组合成字典\"\"\"\r\n\r\n temp_dict = {}\r\n\r\n # list2 = []\r\n # for i in range(len(temp_risk_list)):\r\n # list2 = temp_risk_list[i].split()\r\n # for j in range(len(list2)):\r\n # temp_dict[risk_keys_list[j] + \"_\" + str(i)] = list2[j]\r\n # print(risk_keys_list[j] + \"_\" + str(i))\r\n # print(temp_dict)\r\n\r\n risk_keys_list = ['risk_code', 'risk_name', 'year', 'pay_year', 'get_year', 'select_insured_or_premium',\r\n 'pay_frequency', 'each_premium']\r\n temp_dict = {(risk_keys_list[j] + \"_\" + str(i)): temp_risk_list[i].split()[j] for i in range(len(temp_risk_list)) for j in\r\n range(len(temp_risk_list[i].split()))}\r\n\r\n first_premium = self.get_element_text(SearchLocator.first_premium) # 合计首期保费\r\n temp_dict['first_premium'] = first_premium\r\n self.switch_frame_to_main()\r\n self.select_frame('frame_content')\r\n self.select_frame(0)\r\n self.select_frame('NavigationFrame')\r\n self.click_element(MenuLocator.tab_2) # 客户信息页\r\n self.switch_frame_to_main()\r\n self.select_frame('frame_content')\r\n self.select_frame(0)\r\n self.select_frame('MainInfoFrame')\r\n time.sleep(1)\r\n policy_number = self.get_element_attribute(SearchLocator.policy_number, 'value') # 保单号\r\n self.switch_frame_to_main()\r\n self.select_frame('frame_content')\r\n self.select_frame(0)\r\n self.select_frame('WorkFrame')\r\n # appnt_name = self.get_element_attribute(ClientDetailLocator.appnt_name, 'value') # 投保人姓名\r\n # appnt_sex = self.get_element_attribute(ClientDetailLocator.appnt_sex, 'value') # 投保人性别\r\n # appnt_nationality = self.get_element_attribute(ClientDetailLocator.appnt_nationality, 'value') # 投保人国籍\r\n # appnt_birthday = self.get_element_attribute(ClientDetailLocator.appnt_birthday, 'value') # 投保人生日\r\n # appnt_ID_type = self.get_element_attribute(ClientDetailLocator.appnt_id_type, 'value') # 投保人证件类型\r\n # appnt_ID = self.get_element_attribute(ClientDetailLocator.appnt_id, 'value') # 投保人证件号\r\n\r\n \"\"\"将保单信息&五要素组合成字典\"\"\"\r\n # temp_dict['proposal_cont_no'] = proposal_cont_no\r\n temp_dict['policy_number'] = policy_number\r\n # temp_dict['appnt_name'] = appnt_name\r\n # temp_dict['appnt_sex'] = appnt_sex\r\n # temp_dict['appnt_nationality'] = appnt_nationality\r\n # temp_dict['appnt_birthday'] = appnt_birthday\r\n # temp_dict['appnt_ID_type'] = appnt_ID_type\r\n # temp_dict['appnt_ID'] = appnt_ID\r\n\r\n return temp_dict\r\n\r\nif __name__ == \"__main__\":\r\n sp = SearchPage(webdriver.Ie())\r\n print(config.get_config('MDES', 'ip') + SearchLocator.search_cont_no)\r\n\r\n","sub_path":"MetLife/page/search_policy_page.py","file_name":"search_policy_page.py","file_ext":"py","file_size_in_byte":5715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"369989350","text":"\"\"\"\r\n(b) Extract the following fields from the jobpost column:\r\n\t1. Job Title\r\n\t2. Position Duration\r\n\t3. Position Location\r\n\t4. Job Description\r\n\t5. Job Responsibilities\r\n\t6. Required Qualifications\r\n\t7. Renumeration\r\n\t8. Application Deadline\r\n\t9. About Company\r\n\"\"\"\r\nimport pandas as pd\r\nimport re\r\n\r\ndef find_jobtitle(text):\r\n\ttry:\r\n\t\ttitle = re.search(\"TITLE:\\s*(.*)(\\r\\n)\",text).group(1)\r\n\t\treturn title\r\n\texcept AttributeError:\r\n\t\treturn \"Not Mentioned\"\r\n\r\ndef find_duration(text):\r\n\ttry:\r\n\t\tduration = re.search(\"DURATION:\\s*(([-]*[0-9]*[\\W]*[\\s]*\\w+[\\s]*[\\W]*[0-9]*)*)\",text).group(1)\r\n\t\tdelete = re.search(\"[\\r\\n]*[\\-]{34}[\\s\\w\\W\\.]*|[\\r\\n]*[A-Z]+[\\s]*[\\W]*[A-Z]*[\\:]+[\\s\\w\\W\\.]*\",duration).group(0)\r\n\t\tduration = duration.replace(delete,\" \")\r\n\t\treturn duration\r\n\texcept AttributeError:\r\n\t\treturn \"Not Mentioned\"\r\n\r\ndef find_location(text):\r\n\ttry:\r\n\t\tlocation = re.search(\"LOCATION:\\s*(([-]*[0-9]*[\\W]*[\\s]*\\w+[\\s]*[\\W]*[0-9]*)*)\",text).group(1)\r\n\t\tdelete = re.search(\"[\\r\\n]*[\\-]{34}[\\s\\w\\W\\.]*|[\\r\\n]*[A-Z]+[\\s]*[\\W]*[A-Z]*[\\:]+[\\s\\w\\W\\.]*\",location).group(0)\r\n\t\tlocation = location.replace(delete,\" \")\r\n\t\treturn location\r\n\texcept AttributeError:\r\n\t\treturn \"Not Mentioned\"\r\n\r\ndef find_description(text):\r\n\ttry:\r\n\t\tdescription = re.search(\"DESCRIPTION:\\s*(([-]*[0-9]*[\\W]*[\\s]*\\w+[\\s]*[\\W]*[0-9]*)*)\",text).group(1)\r\n\t\tdelete = re.search(\"[\\r\\n]*[\\-]{34}[\\s\\w\\W\\.]*|[\\r\\n]*[A-Z]+[\\s]*[\\W]*[A-Z]*[\\:]+[\\s\\w\\W\\.]*\",description).group(0)\r\n\t\tdescription = description.replace(delete,\" \")\r\n\t\treturn description\r\n\texcept AttributeError:\r\n\t\treturn \"Not Mentioned\"\r\n\r\ndef find_responsibilities(text):\r\n\ttry:\r\n\t\tresp = re.search(\"RESPONSIBILITIES:\\s*(([-]*[0-9]*[\\W]*[\\s]*\\w+[\\s]*[\\W]*[0-9]*)*)\",text).group(1)\r\n\t\tdelete = re.search(\"[\\r\\n]*[\\-]{34}[\\s\\w\\W\\.]*|[\\r\\n]*[A-Z]+[\\s]*[\\W]*[A-Z]*[\\:]+[\\s\\w\\W\\.]*\",resp).group(0)\r\n\t\tresp = resp.replace(delete,\" \")\r\n\t\treturn resp\r\n\texcept AttributeError:\r\n\t\treturn \"Not Mentioned\"\r\n\r\ndef find_qualifications(text):\r\n\ttry:\r\n\t\tqual = re.search(\"QUALIFICATIONS:\\s*(([-]*[0-9]*[\\W]*[\\s]*\\w+[\\s]*[\\W]*[0-9]*)*)\",text).group(1)\r\n\t\tdelete = re.search(\"[\\r\\n]*[\\-]{34}[\\s\\w\\W\\.]*|[\\r\\n]*[A-Z]+[\\s]*[\\W]*[A-Z]*[\\:]+[\\s\\w\\W\\.]*\",qual).group(0)\r\n\t\tqual = qual.replace(delete,\" \")\r\n\t\treturn qual\r\n\texcept AttributeError:\r\n\t\treturn \"Not Mentioned\"\r\n\r\ndef find_renumeration(text):\r\n\ttry:\r\n\t\trenum = re.search(\"RENUMERATION:\\s*(([-]*[0-9]*[\\W]*[\\s]*\\w+[\\s]*[\\W]*[0-9]*)*)\",text).group(1)\r\n\t\tdelete = re.search(\"[\\r\\n]*[\\-]{34}[\\s\\w\\W\\.]*|[\\r\\n]*[A-Z]+[\\s]*[\\W]*[A-Z]*[\\:]+[\\s\\w\\W\\.]*\",renum).group(0)\r\n\t\trenum = renum.replace(delete,\" \")\r\n\t\treturn renum\r\n\texcept AttributeError:\r\n\t\treturn \"Not Mentioned\"\r\n\r\ndef find_deadline(text):\r\n\ttry:\r\n\t\tdeadline = re.search(\"DEADLINE:\\s*(([-]*[0-9]*[\\W]*[\\s]*\\w+[\\s]*[\\W]*[0-9]*)*)\",text).group(1)\r\n\t\tdelete = re.search(\"[\\r\\n]*[\\-]{34}[\\s\\w\\W\\.]*|[\\r\\n]*[A-Z]+[\\s]*[\\W]*[A-Z]*[\\:]+[\\s\\w\\W\\.]*\",deadline).group(0)\r\n\t\tdeadline = deadline.replace(delete,\" \")\r\n\t\treturn deadline\r\n\texcept AttributeError:\r\n\t\treturn \"Not Mentioned\"\r\n\r\ndef find_about(text):\r\n\ttry:\r\n\t\tabout = re.search(\"COMPANY:\\s*(([-]*[0-9]*[\\W]*[\\s]*\\w+[\\s]*[\\W]*[0-9]*)*)\",text).group(1)\r\n\t\tdelete = re.search(\"[\\r\\n]*[\\-]{34}[\\s\\w\\W\\.]*|[\\r\\n]*[A-Z]+[\\s]*[\\W]*[A-Z]*[\\:]+[\\s\\w\\W\\.]*\",about).group(0)\r\n\t\tabout = about.replace(delete,\" \")\r\n\t\treturn about\r\n\texcept AttributeError:\r\n\t\treturn \"Not Mentioned\"\r\n\r\njobposts = pd.read_csv('jobposts.csv')\r\njobpostings = pd.DataFrame()\r\norder_of_cols = ['Job Titles', 'Position Duration', 'Position Location', 'Job Description', 'Job Responsibilities', 'Required Qualifications', 'Renumeration', 'Application Deadline', 'About Company']\r\n\r\nfor i, row in jobposts.iterrows():\r\n\ttitle = find_jobtitle(row['jobpost'])\r\n\tduration = find_duration(row['jobpost'])\r\n\tlocation = find_location(row['jobpost'])\r\n\tdescription = find_description(row['jobpost'])\r\n\tresponsibilities = find_responsibilities(row['jobpost'])\r\n\tqualifications = find_qualifications(row['jobpost'])\r\n\trenumeration = find_renumeration(row['jobpost'])\r\n\tdeadline = find_deadline(row['jobpost'])\r\n\tabout = find_about(row['jobpost'])\r\n\tjobpostings = jobpostings.append({'Job Titles': title, 'Position Duration': duration, 'Position Location': location, 'Job Description': description,\r\n\t\t\t\t\t\t\t\t\t 'Job Responsibilities': responsibilities, 'Required Qualifications': qualifications, 'Renumeration': renumeration, \r\n\t\t\t\t\t\t\t\t\t 'Application Deadline': deadline, 'About Company': about}, ignore_index =True)\r\n\r\n\t\r\njobpostings = jobpostings[order_of_cols]\r\nprint(jobpostings)\r\n","sub_path":"q3/q3_b.py","file_name":"q3_b.py","file_ext":"py","file_size_in_byte":4508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"141262713","text":"import numpy as np\nfrom config import cifar10_dir\n\n\ndef unpickle(file):\n import pickle\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\n\n\ndef load_training_data():\n data = np.zeros((50000, 3, 32, 32), np.uint8)\n data[0 :10000, :] = unpickle(cifar10_dir + 'data_batch_1')[b'data'].reshape((-1, 3, 32, 32))\n data[10000:20000, :] = unpickle(cifar10_dir + 'data_batch_2')[b'data'].reshape((-1, 3, 32, 32))\n data[20000:30000, :] = unpickle(cifar10_dir + 'data_batch_3')[b'data'].reshape((-1, 3, 32, 32))\n data[30000:40000, :] = unpickle(cifar10_dir + 'data_batch_4')[b'data'].reshape((-1, 3, 32, 32))\n data[40000:50000, :] = unpickle(cifar10_dir + 'data_batch_5')[b'data'].reshape((-1, 3, 32, 32))\n data = np.swapaxes(data, 1, 3)\n data = np.swapaxes(data, 1, 2)\n\n mean_image = np.mean(data, axis=0)\n\n labels = np.zeros((50000,), np.uint8)\n labels[0 :10000] = np.array(unpickle(cifar10_dir + 'data_batch_1')[b'labels']).astype(np.uint8)\n labels[10000:20000] = np.array(unpickle(cifar10_dir + 'data_batch_2')[b'labels']).astype(np.uint8)\n labels[20000:30000] = np.array(unpickle(cifar10_dir + 'data_batch_3')[b'labels']).astype(np.uint8)\n labels[30000:40000] = np.array(unpickle(cifar10_dir + 'data_batch_4')[b'labels']).astype(np.uint8)\n labels[40000:50000] = np.array(unpickle(cifar10_dir + 'data_batch_5')[b'labels']).astype(np.uint8)\n\n np.save(cifar10_dir+'training_data.npy', data)\n np.save(cifar10_dir+'training_label.npy', labels)\n np.save(cifar10_dir+'mean_image.npy', mean_image)\n print(\"training data saved to npy format\")\n\n\ndef load_test_data():\n data = unpickle(cifar10_dir + 'test_batch')[b'data'].reshape((-1, 3, 32, 32))\n data = np.swapaxes(data, 1, 3)\n data = np.swapaxes(data, 1, 2)\n labels = np.array(unpickle(cifar10_dir + 'test_batch')[b'labels']).astype(np.uint8)\n np.save(cifar10_dir+'test_data.npy', data)\n np.save(cifar10_dir+'test_label.npy', labels)\n print(\"test data saved to npy format\")\n\n\ndef load_label_names():\n return unpickle(cifar10_dir + 'batches.meta')[b'label_names'] # list\n\n\ndef train_valid_split():\n data = np.load(cifar10_dir+'training_data.npy')\n train = data[1:45000]\n valid = data[45000:]\n np.save(cifar10_dir+'train_data.npy', train)\n np.save(cifar10_dir+'valid_data.npy', valid)\n\n label = np.load(cifar10_dir + 'training_label.npy')\n label_train = label[1:45000]\n label_valid = label[45000:]\n np.save(cifar10_dir+'train_label.npy', label_train)\n np.save(cifar10_dir+'valid_label.npy', label_valid)\n\n print(\"split done\")\n\n\nload_training_data()\nload_test_data()\ntrain_valid_split()","sub_path":"Project/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"231515487","text":"# Задание 1\n# Необходимо вывести имена всех учеников из списка с новой строки\n\nnames = ['Olya', 'Petya', 'Vasia', 'Misha']\nfor name in names:\n print(name)\n\n\n# Задание 2\n# Необходимо вывести имена всех учеников из списка, рядом с именем показать количество букв в нём.\n\nnames = ['Olya', 'Petya', 'Vasia', 'Misha']\nfor name in names:\n print(name, \"-\", len(name), \"letters in name\")\n\n\n# Задание 3\n# Необходимо вывести имена всех учеников из списка, рядом с именем вывести пол ученика\n\nis_male = {\n 'Olya': False,\n 'Petya': True,\n 'Vasia': True,\n 'Masha': False,\n}\nnames = ['Olya', 'Petya', 'Vasia', 'Masha']\nfor name in names:\n if name in is_male:\n if is_male[name] == True:\n print(name, '-', 'gender: male')\n else:\n print(name, '-', 'gender: female')\n\n\n# Задание 4\n# Даны группу учеников. Нужно вывести количество групп и для каждой группы – количество учеников в ней\n# Пример вывода:\n# Всего 2 группы.\n# В группе 2 ученика.\n# В группе 3 ученика.\n\ngroups = [\n ['Vasya', 'Masha'],\n ['Olya', 'Petr', 'Grisha'],\n]\nprint('How many groups are in list -', len(groups))\nfor group in groups:\n print('Number of group members -', len(group))\n\n\n\n# Задание 5\n# Для каждой пары учеников нужно с новой строки перечислить учеников, которые в неё входят.\n# Пример:\n# Группа 1: Вася, Маша\n# Группа 2: Оля, Петя, Гриша\n\ngroups = [\n ['Vasya', 'Masha'],\n ['Olya', 'Petr', 'Grisha'],\n]\n\ngroups_counter = 0\nfor group in groups:\n groups_counter += 1\n print('In group numer', groups_counter, 'number of members is -', len(group))\n","sub_path":"for_challenges.py","file_name":"for_challenges.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"358039690","text":"import json\nfrom django.apps import apps\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.gis.geos import *\nfrom django.http import HttpResponseRedirect, HttpResponse, \\\n HttpResponseBadRequest, Http404\nfrom django.shortcuts import render, redirect\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.translation import get_language\nfrom django.views.generic.list import ListView\nfrom django.views.generic.detail import DetailView\nfrom mezzanine.conf import settings\nfrom mezzanine.core.models import Displayable\nfrom mezzanine.pages.views import page\nfrom mezzanine.utils.cache import add_cache_bypass\nfrom mezzanine.utils.urls import home_slug\nfrom mezzanine.utils.views import paginate, render, set_cookie, is_spam\nfrom cities.models import Country, Region, City\nfrom directory.models import TagCategory, Tag, Main\nfrom directory.forms import SearchNarrowForm, SubmitMainForm, RatingForm\n\n\nclass MainListView(ListView):\n \"\"\"\n List View for Main objects (home page)\n \"\"\"\n model = Main\n template_name = \"pages/main_list.html\"\n paginate_by = 2\n\n def get_context_data(self, **kwargs):\n context = super(MainListView, self).get_context_data(**kwargs)\n if self.request.session.get('selected_city'):\n selected_city = self.request.session.get('selected_city')\n context['selected_city'] = City.objects \\\n .filter(name=selected_city)[:1].get()\n context['search_narrow_form'] = SearchNarrowForm()\n return context\n\nmain_list_view = MainListView.as_view()\n\n\nclass TagCategoryListView(ListView):\n \"\"\"\n List View for Tags\n \"\"\"\n model = TagCategory\n template_name = \"pages/tag_list.html\"\n paginate_by = 10\n\n def get_context_data(self, **kwargs):\n context = super(TagCategoryListView, self).get_context_data(**kwargs)\n if self.request.session.get('selected_city'):\n selected_city = self.request.session.get('selected_city')\n context['selected_city'] = City.objects \\\n .filter(name=selected_city)[:1].get()\n context['search_narrow_form'] = SearchNarrowForm()\n categories_qs = TagCategory.objects.order_by('title')\n # put categories, tags and counters in context\n categories = []\n for category in categories_qs:\n tags_dict = {}\n tags_objects = Tag.objects.filter(category=category).order_by(\n 'title')\n tags_dict_count = {}\n for tag in tags_objects:\n tags_count = Main.objects.filter(tag=tag).count()\n tags_dict_count = {tag: tags_count}\n tags_dict = {\n 'category': category, 'tags': tags_dict_count\n }\n categories.append(tags_dict)\n context['categories'] = categories\n return context\n\ntag_category_list_view = TagCategoryListView.as_view()\n\n\nclass TagDetailView(DetailView):\n \"\"\"\n Detail View for Tags\n \"\"\"\n model = Tag\n template_name = \"pages/tag.html\"\n\n def get_context_data(self, **kwargs):\n context = super(TagDetailView, self).get_context_data(**kwargs)\n if self.request.session.get('selected_city'):\n selected_city = self.request.session.get('selected_city')\n context['selected_city'] = City.objects \\\n .filter(name=selected_city)[:1].get()\n context['main_list'] = Main.objects.filter(\n address__city__name=selected_city, tag=self.object)\n else:\n context['main_list'] = Main.objects.filter(tag=self.object)\n context['search_narrow_form'] = SearchNarrowForm()\n return context\n\ntag_detail_view = TagDetailView.as_view()\n\n\n@login_required\ndef submit_main(request):\n \"\"\"\n View for submit new Main objects\n \"\"\"\n selected_city = None\n # check if selected city in session\n if request.session.get('selected_city'):\n selected_city = request.session.get('selected_city')\n selected_city = City.objects \\\n .filter(name=selected_city)[:1].get()\n search_narrow_form = SearchNarrowForm()\n form = SubmitMainForm(\n request.POST or None, request.FILES or None)\n if request.method == 'POST':\n if form.is_valid():\n title = form.cleaned_data['title']\n description = form.cleaned_data['description']\n address = form.cleaned_data['address']\n image = form.cleaned_data['image']\n new_main = Main(title=title, description=description,\n gen_description=False,\n address=address, image=image)\n new_main.save()\n return HttpResponseRedirect('/')\n return render(request, 'includes/submit_main.html',\\\n {'form': form, 'selected_city': selected_city,\\\n 'search_narrow_form': search_narrow_form})\n\n\ndef json_countries(request):\n \"\"\"\n Json view for Countries search\n \"\"\"\n q = request.GET.get('q')\n current_lang = get_language()[0:2]\n results = Country.objects.filter(alt_names__language=current_lang)\\\n .order_by('name')\n if q:\n results = results.filter(alt_names__name__icontains=q)\n flat_results = list(results.values_list('id', 'alt_names__name'))\n results = [{'id': item[0], 'text': item[1]} for item in flat_results]\n return HttpResponse(\n json.dumps(results, ensure_ascii=False),\n content_type='application/json; charset=utf-8')\n\n\ndef json_regions(request):\n \"\"\"\n Json view for Regions search\n \"\"\"\n q = request.GET.get('q')\n current_lang = get_language()[0:2]\n results = Region.objects.filter(alt_names__language=current_lang)\\\n .order_by('name')\n if q:\n results = results.filter(alt_names__name__icontains=q)\n flat_results = list(results.values_list('id', 'alt_names__name'))\n results = [{'id': item[0], 'text': item[1]} for item in flat_results]\n return HttpResponse(\n json.dumps(results, ensure_ascii=False),\n content_type='application/json; charset=utf-8')\n\n\ndef json_cities(request):\n \"\"\"\n Json view for Cities search\n \"\"\"\n q = request.GET.get('q')\n current_lang = get_language()[0:2]\n results = City.objects.filter(alt_names__language=current_lang)\\\n .order_by('name')\n if q:\n results = results.filter(alt_names__name__icontains=q)\n flat_results = list(results.values_list('id', 'alt_names__name'))\n results = [{'id': item[0], 'text': item[1]} for item in flat_results]\n return HttpResponse(\n json.dumps(results, ensure_ascii=False),\n content_type='application/json; charset=utf-8')\n\n\ndef search(request, template=\"search_results.html\", extra_context=None):\n \"\"\"\n Search view. Overrides Mezzanine's search, adding filters by Country,\n Region, City and writing the City to session if selected.\n \"\"\"\n query = request.GET.get(\"q\", \"\")\n page = request.GET.get(\"page\", 1)\n per_page = settings.SEARCH_PER_PAGE\n max_paging_links = settings.MAX_PAGING_LINKS\n country = request.GET.get(\"country\", \"\") or None\n region = request.GET.get(\"region\", \"\") or None\n city = request.GET.get(\"city\", \"\") or None\n if city:\n selected_city = City.objects.filter(id=city)[:1].get()\n request.session['selected_city'] = selected_city.name\n try:\n parts = request.GET.get(\"type\", \"\").split(\".\", 1)\n search_model = apps.get_model(*parts)\n search_model.objects.search # Attribute check\n except (ValueError, TypeError, LookupError, AttributeError):\n search_model = Displayable\n search_type = _(\"Everything\")\n else:\n search_type = search_model._meta.verbose_name_plural.capitalize()\n # default search results\n results = search_model.objects.search(query, for_user=request.user)\n # new filters, creating new list of results\n results_filtered = []\n if city or region or country:\n for i in results:\n # special filter of searching in Tags\n if isinstance(i, Tag):\n if (city and Main.objects.filter(address__city=city, tag__title=i.title).count() > 0) or\\\n (region and Main.objects.filter(address__city__region=region, tag__title=i.title).count() > 0) or\\\n (country and Main.objects.filter(address__city__country=country, tag__title=i.title).count() > 0):\n results_filtered.append(i)\n continue\n if city and i.address.filter(city__id=city).count() > 0:\n results_filtered.append(i)\n if i not in results_filtered and region and \\\n i.address.filter(city__region__id=region).count() > 0:\n results_filtered.append(i)\n if i not in results_filtered and country and \\\n i.address.filter(city__country__id=country).count() > 0:\n results_filtered.append(i)\n else:\n results_filtered = results\n paginated = paginate(results_filtered, page, per_page, max_paging_links)\n # put new list iof result in context\n context = {\"query\": query, \"results\": paginated,\n \"search_type\": search_type}\n context.update(extra_context or {})\n # add selected city to context\n if request.session.get('selected_city'):\n selected_city = request.session.get('selected_city')\n context['selected_city'] = City.objects \\\n .filter(name=selected_city)[:1].get()\n # add search form to context\n context['search_narrow_form'] = SearchNarrowForm()\n return render(request, template, context)\n\n\ndef initial_validation(request, prefix):\n \"\"\"\n Returns the related model instance and post data to use in the\n comment/rating views below.\n Both comments and ratings have a ``prefix_ACCOUNT_REQUIRED``\n setting. If this is ``True`` and the user is unauthenticated, we\n store their post data in their session, and redirect to login with\n the view's url (also defined by the prefix arg) as the ``next``\n param. We can then check the session data once they log in,\n and complete the action authenticated.\n On successful post, we pass the related object and post data back,\n which may have come from the session, for each of the comments and\n ratings view functions to deal with as needed.\n \"\"\"\n post_data = request.POST\n login_required_setting_name = prefix.upper() + \"S_ACCOUNT_REQUIRED\"\n posted_session_key = \"unauthenticated_\" + prefix\n redirect_url = \"\"\n if getattr(settings, login_required_setting_name, False):\n if not request.user.is_authenticated():\n request.session[posted_session_key] = request.POST\n error(request, _(\"You must be logged in. Please log in or \"\n \"sign up to complete this action.\"))\n redirect_url = \"%s?next=%s\" % (settings.LOGIN_URL, reverse(prefix))\n elif posted_session_key in request.session:\n post_data = request.session.pop(posted_session_key)\n if not redirect_url:\n model_data = post_data.get(\"content_type\", \"\").split(\".\", 1)\n if len(model_data) != 2:\n return HttpResponseBadRequest()\n try:\n model = apps.get_model(*model_data)\n obj = model.objects.get(id=post_data.get(\"object_pk\", None))\n except (TypeError, ObjectDoesNotExist, LookupError):\n redirect_url = \"/\"\n if redirect_url:\n if request.is_ajax():\n return HttpResponse(dumps({\"location\": redirect_url}))\n else:\n return redirect(redirect_url)\n return obj, post_data\n\n\ndef rating(request):\n \"\"\"\n Overrides Mezzanine's rating view.\n \"\"\"\n response = initial_validation(request, \"rating\")\n if isinstance(response, HttpResponse):\n return response\n obj, post_data = response\n url = add_cache_bypass(obj.get_absolute_url().split(\"#\")[0])\n response = redirect(url + \"#rating-%s\" % obj.id)\n rating_form = RatingForm(request, obj, post_data)\n if rating_form.is_valid():\n rating_form.save()\n if request.is_ajax():\n # Reload the object and return the rating fields as json.\n obj = obj.__class__.objects.get(id=obj.id)\n rating_name = obj.get_ratingfield_name()\n json = {}\n for f in (\"average\", \"count\", \"sum\"):\n json[\"rating_\" + f] = getattr(obj, \"%s_%s\" % (rating_name, f))\n response = HttpResponse(dumps(json))\n if rating_form.undoing:\n ratings = set(rating_form.previous) ^ set([rating_form.current])\n else:\n ratings = rating_form.previous + [rating_form.current]\n set_cookie(response, \"mezzanine-rating\", \",\".join(ratings))\n return response\n\ndef directory_page(request, slug, template=u\"pages/page.html\", extra_context=None):\n \"\"\"\n Overrides Mezzanine's page view, adding search form in context.\n \"\"\"\n included_search_form = {}\n included_search_form['search_narrow_form'] = SearchNarrowForm()\n return page(request, slug, template=u\"pages/page.html\", extra_context=included_search_form)\n","sub_path":"directory/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"107466740","text":"#\n# Arg parser factory class\n# Probably not needed at all...\n#\nfrom argparse import ArgumentParser\n\nclass ArgParserFactory:\n \"\"\" Class for creating an Arg Parser \"\"\"\n\n @staticmethod\n def create_arg_parser():\n \"\"\" creates a CMD line argument parser with possible options \"\"\"\n parser = ArgumentParser(description='Replaces Header section of Amgen standard .SAS files.')\n #group = parser.add_mutually_exclusive_group(required=True)\n\n parser.add_argument('-d', '--dir_file',\n help='Path of the [d]irectory paths .txt file, containing the paths to that need' +\n 'headers to be replaced',\n required=True)\n\n parser.add_argument('-r', '--replacement_file',\n help='Path of the .txt file containing the text that is used as the replacement content',\n required=True)\n\n parser.add_argument('-v', '--version_history',\n help='Wipe the [v]ersion history present in the header.',\n action='store_true')\n\n return parser\n","sub_path":"header_replace_object_oriented/src/ArgParserFactory.py","file_name":"ArgParserFactory.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"643447917","text":"from WindPy import *\nimport pandas as pd\nimport numpy as np\nfrom time import sleep\nimport os\nimport csv\nimport mysql.connector\n\n\npd.set_option('expand_frame_repr', False)\nclass NumpyMySQLConverter(mysql.connector.conversion.MySQLConverter):\n \"\"\" A mysql.connector Converter that handles Numpy types \"\"\"\n\n def _float32_to_mysql(self, value):\n return float(value)\n\n def _float64_to_mysql(self, value):\n return float(value)\n\n def _int32_to_mysql(self, value):\n return int(value)\n\n def _int64_to_mysql(self, value):\n return int(value)\n\n\nconfig = {\n 'user': 'wudilianghua',\n 'host': 'east2-mysql-instance1.cwj25hshjcl1.us-east-2.rds.amazonaws.com',\n 'password': 'nbwind123!',\n 'database': 'QuantDB'\n}\n\ndef calTime(original_datetime, delta):\n return (datetime.strptime(original_datetime, '%Y-%m-%d') + timedelta(days=delta)).strftime('%Y-%m-%d')\n\ndef calTime1(original_datetime, delta):\n return (datetime.strptime(original_datetime, '%Y-%m-%d %H-%M') + timedelta(hours=delta)).strftime('%Y-%m-%d %H-%M')\n\ndef getAllStock():\n allStock = w.wset(\"sectorconstituent\", \"sectorid=a001010100000000;field=wind_code\")\n fm = pd.DataFrame(allStock.Data, index=allStock.Fields)\n fm = fm.T # Transpose index and columns\n code_list = fm['wind_code'].values\n return code_list\n # return parseStock(code_list)\ndef parseStock(code_list):\n codes = ''\n ct = 1\n for code in code_list:\n if ct<=900:\n codes += code\n codes += ','\n ct = ct+1\n codes = codes[:len(codes) - 1]\n return codes\n\ndef conWSQData(indata1):\n fm = pd.DataFrame(indata1.Data, index=indata1.Fields, columns=indata1.Codes)\n fm = fm.T # Transpose index and columns\n fm['code'] = fm.index\n fm['datetime'] = indata1.Times[0]\n return fm\ndef getStockCategyMap(path):\n stockCategory = {}\n for root, dirs, files in os.walk(path):\n for file in files:\n # print (file)\n table = file.split(\".\")[0]\n category = table.replace(\"-\", '_')\n with open(path + file) as csvDataFile:\n\n csvReader = csv.reader(csvDataFile)\n for row in csvReader:\n stock_code = row[1]\n stockCategory.setdefault(stock_code, []).append(category)\n return stockCategory\n\nclass BanKuaiObj(object):\n zdf = 0\n zhanbi = 0\n code = \"\"\n\n\n\ndef SortByZhanbiasc(bankuaiList):\n return sorted(bankuaiList, key=lambda x: x.zhanbi, reverse=False)\n########################################################\ndef main():\n dir = \"C:/KeLiQuant/WindCategory/\"\n import logging\n logging.basicConfig(filename='Bankuai.log', level=logging.DEBUG)\n stockCategory = getStockCategyMap(dir)\n\n ##############################################\n w.start()\n codeList = getAllStock()\n print (len(codeList))\n w.stop()\n # codeLists = []\n # codeLists.append(codeList[:500])\n # codeLists.append(codeList[500:1000])\n # codeLists.append(codeList[1000:1500])\n # codeLists.append(codeList[1500:2000])\n # codeLists.append(codeList[2000:2500])\n # codeLists.append(codeList[2500:3000])\n # codeLists.append(codeList[3000:])\n\n codeLists = []\n\n eachListLength = 200\n for i in range(0, len(codeList), eachListLength):\n if i + eachListLength > len(codeList):\n codeLists.append(codeList[i:])\n else:\n codeLists.append(codeList[i:i + eachListLength])\n\n\n\n # conn = mysql.connector.connect(**config)\n # conn.set_converter_class(NumpyMySQLConverter)\n #\n # cur = conn.cursor()\n while (1):\n\n weekno = datetime.today().weekday()\n if weekno in [0, 1, 2, 3, 4]:\n curTime = datetime.today().strftime('%H-%M-%S')\n logging.debug(curTime)\n if (curTime >= '09-35-00' and curTime <= '09-35-59') or (curTime >= '10-00-00' and curTime <= '10-00-59')or (curTime >= '10-30-00' and curTime <= '10-30-59') \\\n or (curTime >= '11-00-00' and curTime <= '11-00-59')or (curTime >= '11-30-00' and curTime <= '11-30-59')or (curTime >= '13-00-00' and curTime <= '13-00-59') \\\n or (curTime >= '13-30-00' and curTime <= '13-30-59')or (curTime >= '14-00-00' and curTime <= '14-00-59')or (curTime >= '14-30-00' and curTime <= '14-30-59') \\\n or (curTime >= '15-00-00' and curTime <= '15-00-59'):\n w.start()\n conn = mysql.connector.connect(**config)\n conn.set_converter_class(NumpyMySQLConverter)\n\n\n categoryData = {}\n all_vol = 0\n curTime = datetime.today().strftime('%Y-%m-%d %H-%M')\n logging.debug(\"BanKuai--\" + curTime + \" is in processing\")\n\n for code_list in codeLists:\n parsedStocks = parseStock(code_list)\n data = conWSQData(w.wsq(parsedStocks, \"rt_vol,rt_pct_chg,rt_mkt_cap,rt_float_mkt_cap,rt_insti_activebuy_amt,rt_amt\"))\n # print (data)\n inserted = []\n ct = 1\n for row in data.itertuples():\n\n stock = row[0]\n if stock not in stockCategory:\n continue\n rt_vol = row[1]\n rt_pct_chg = row[2] * 100\n rt_mkt_cap = row[3]\n rt_float_mkt_cap = row[4]\n rt_insti_activebuy_amt = row[5]\n rt_amt = row[6]\n # zhan_bi = rt_vol/rt_float_mkt_cap\n inserted.append((curTime, stock, rt_pct_chg, rt_vol,rt_insti_activebuy_amt,rt_amt))\n\n all_vol = all_vol + rt_vol\n\n categories = stockCategory.get(stock)\n for category in categories:\n if category not in categoryData:\n categoryData[category] = [0,0,0,0,0] #rt_pct_chg*rt_float_mkt_cap(0), rt_float_mkt_cap(1), rt_pct_chg*rt_mkt_cap(2),rt_mkt_cap(3),rt_vol(4)\n catList = categoryData[category]\n catList[0] = catList[0] + (rt_pct_chg*rt_float_mkt_cap)\n catList[1] = catList[1] + rt_float_mkt_cap\n catList[2] = catList[2] + (rt_pct_chg * rt_mkt_cap)\n catList[3] = catList[3] + rt_mkt_cap\n catList[4] = catList[4] + rt_vol\n\n ct = ct+1\n # print (inserted)\n try:\n cur = conn.cursor()\n except:\n logging.debug(\"connection is lost 1\")\n conn = mysql.connector.connect(**config)\n conn.set_converter_class(NumpyMySQLConverter)\n cur = conn.cursor()\n query = \"\"\"INSERT INTO Stock_RT (DATA_DATETIME,STOCK_CODE,ZDF,RT_VOL,rt_insti_activebuy_amt,RT_AMT) VALUES (%s,%s,%s,%s,%s,%s)\"\"\"\n try:\n cur.executemany(query, inserted)\n except:\n # conn.commit()\n logging.debug('exception in loading')\n continue\n\n # print (str(ct),\" rows into stock_RT table\")\n conn.commit()\n\n bankuaiList = []\n ct=1\n inserted = []\n for category in categoryData.keys():\n ct = ct+1\n\n dataList = categoryData.get(category)\n zdf = dataList[0]/dataList[1]\n zhan_bi = dataList[4]/all_vol\n inserted.append((curTime, category, zdf, zhan_bi,dataList[1]))\n\n b = BanKuaiObj()\n b.code = category\n b.zhanbi = zhan_bi\n bankuaiList.append(b)\n query = \"\"\"INSERT INTO BanKuai_RT (DATA_DATETIME,BanKuai,ZDF,Volume_Zhan_Bi,RT_FLOAT_MKT_CAP) VALUES (%s,%s,%s,%s,%s)\"\"\"\n try:\n cur.executemany(query,inserted)\n except:\n # conn.commit()\n logging.debug('exception in loading bankuai')\n continue\n conn.commit()\n # print(str(ct), \" rows into BanKuai_RT table\")\n\n zhanbiSortedList = SortByZhanbiasc(bankuaiList)\n rank = 0\n ct = 1\n inserted = []\n for bankuai in zhanbiSortedList:\n ct = ct+1\n rank = rank + 1\n inserted.append((curTime, bankuai.code, rank))\n query = \"\"\"INSERT INTO BanKuai_Rank (DATA_DATETIME,BanKuai,Volume_Zhan_Bi) VALUES (%s,%s,%s)\"\"\"\n try:\n cur.executemany(query, inserted)\n except:\n # conn.commit()\n logging.debug('exception in loading bankuai ranking')\n continue\n # print(str(ct), \" rows into BanKuai_Rank table\")\n conn.commit()\n conn.close()\n w.stop()\n sleep(60)\n # conn.close()\n logging.debug (\"done processing\")\n\n\nif __name__ == \"__main__\": main()","sub_path":"C009_realtime_ranking_v4.py","file_name":"C009_realtime_ranking_v4.py","file_ext":"py","file_size_in_byte":9417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"37288587","text":"\nfrom flask import Flask\nimport os\n\n\ndef create_app(test_config=None):\n # 创建实例\n # print('__name__:', __name__)\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_mapping(SECRET_KEY='abc',\n DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'))\n # 从instance目录下找配置\n if test_config is None:\n app.config.from_pyfile('config.py', silent=True)\n else:\n app.config.form_mapping(test_config)\n\n # 确保实例目录存在\n try:\n # print('instance.app:', app.instance_path)\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n @app.route('/hello/') # 在这里,路径后必须要写反斜杠,404 error\n def hello():\n return \"hello world!\"\n\n from . import db\n db.init_app(app)\n\n from . import auth\n app.register_blueprint(auth.bp)\n\n from . import blog\n app.register_blueprint(blog.bp)\n app.add_url_rule('/', endpoint='index')\n\n print(app.url_map)\n\n return app\n\n#\n# if __name__ == '__main__':\n# app = create_app()\n# app.run()\n","sub_path":"build/lib/flaskr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"435837809","text":"import random\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom .rnncells import StackedLSTMCell, StackedGRUCell\nfrom .beam_search import Beam\nfrom .feedforward import FeedForward\nfrom utils import to_var, SOS_ID, UNK_ID, EOS_ID\nimport math\nimport pdb\nfrom queue import PriorityQueue\nimport operator\nimport numpy\n\n\n\nclass BeamSearchNode(object):\n def __init__(self, hiddenstate, previousNode, wordId, logProb, length):\n '''\n :param hiddenstate:\n :param previousNode:\n :param wordId:\n :param logProb:\n :param length:\n '''\n self.h = hiddenstate\n self.prevNode = previousNode\n self.wordid = wordId\n self.logp = logProb\n self.leng = length\n\n def eval(self, alpha=1.0):\n reward = 0\n # Add here a function for shaping a reward\n\n return self.logp / float(self.leng - 1 + 1e-6) + alpha * reward\n\nclass BaseRNNDecoder(nn.Module):\n def __init__(self):\n \"\"\"Base Decoder Class\"\"\"\n super(BaseRNNDecoder, self).__init__()\n\n @property\n def use_lstm(self):\n return isinstance(self.rnncell, StackedLSTMCell)\n\n def init_token(self, batch_size, SOS_ID=SOS_ID):\n \"\"\"Get Variable of Index (batch_size)\"\"\"\n x = to_var(torch.LongTensor([SOS_ID] * batch_size))\n return x\n\n def init_h(self, batch_size=None, zero=True, hidden=None):\n \"\"\"Return RNN initial state\"\"\"\n if hidden is not None:\n return hidden\n\n if self.use_lstm:\n # (h, c)\n return (to_var(torch.zeros(self.num_layers,\n batch_size,\n self.hidden_size)),\n to_var(torch.zeros(self.num_layers,\n batch_size,\n self.hidden_size)))\n else:\n # h\n return to_var(torch.zeros(self.num_layers,\n batch_size,\n self.hidden_size))\n\n def batch_size(self, inputs=None, h=None):\n \"\"\"\n inputs: [batch_size, seq_len]\n h: [num_layers, batch_size, hidden_size] (RNN/GRU)\n h_c: [2, num_layers, batch_size, hidden_size] (LSTMCell)\n \"\"\"\n if inputs is not None:\n batch_size = inputs.size(0)\n return batch_size\n\n else:\n if self.use_lstm:\n batch_size = h[0].size(1)\n else:\n batch_size = h.size(1)\n return batch_size\n\n def decode(self, out):\n \"\"\"\n Args:\n out: unnormalized word distribution [batch_size, vocab_size]\n Return:\n x: word_index [batch_size]\n \"\"\"\n\n # Sample next word from multinomial word distribution\n if self.sample:\n # x: [batch_size] - word index (next input)\n x = torch.multinomial(self.softmax(out / self.temperature), 1).view(-1)\n\n # Greedy sampling\n else:\n # x: [batch_size] - word index (next input)\n _, x = out.max(dim=1)\n return x\n\n def forward(self):\n \"\"\"Base forward function to inherit\"\"\"\n raise NotImplementedError\n\n def forward_step(self):\n \"\"\"Run RNN single step\"\"\"\n raise NotImplementedError\n\n def embed(self, x):\n \"\"\"word index: [batch_size] => word vectors: [batch_size, hidden_size]\"\"\"\n\n if self.training and self.word_drop > 0.0:\n if random.random() < self.word_drop:\n embed = self.embedding(to_var(x.data.new([UNK_ID] * x.size(0))))\n else:\n embed = self.embedding(x)\n else:\n embed = self.embedding(x)\n\n return embed\n'''\n def beam_decode(self,\n init_h=None,\n encoder_outputs=None, input_valid_length=None,\n decode=False):\n \"\"\"\n Args:\n encoder_outputs (Variable, FloatTensor): [batch_size, source_length, hidden_size]\n input_valid_length (Variable, LongTensor): [batch_size] (optional)\n init_h (variable, FloatTensor): [batch_size, hidden_size] (optional)\n Return:\n out : [batch_size, seq_len]\n \"\"\"\n batch_size = self.batch_size(h=init_h)\n\n # [batch_size x beam_size]\n x = self.init_token(batch_size * self.beam_size, SOS_ID)\n\n # [num_layers, batch_size x beam_size, hidden_size]\n h = self.init_h(batch_size, hidden=init_h).repeat(1, self.beam_size, 1)\n\n # batch_position [batch_size]\n # [0, beam_size, beam_size * 2, .., beam_size * (batch_size-1)]\n # Points where batch starts in [batch_size x beam_size] tensors\n # Ex. position_idx[5]: when 5-th batch starts\n batch_position = to_var(torch.arange(0, batch_size).long() * self.beam_size)\n\n # Initialize scores of sequence\n # [batch_size x beam_size]\n # Ex. batch_size: 5, beam_size: 3\n # [0, -inf, -inf, 0, -inf, -inf, 0, -inf, -inf, 0, -inf, -inf, 0, -inf, -inf]\n score = torch.ones(batch_size * self.beam_size) * -float('inf')\n score.index_fill_(0, torch.arange(0, batch_size).long() * self.beam_size, 0.0)\n score = to_var(score)\n\n # Initialize Beam that stores decisions for backtracking\n beam = Beam(\n batch_size,\n self.hidden_size,\n self.vocab_size,\n self.beam_size,\n self.max_unroll,\n batch_position)\n\n for i in range(self.max_unroll):\n\n # x: [batch_size x beam_size]; (token index)\n # =>\n # out: [batch_size x beam_size, vocab_size]\n # h: [num_layers, batch_size x beam_size, hidden_size]\n out, h = self.forward_step(x, h,\n encoder_outputs=encoder_outputs,\n input_valid_length=input_valid_length)\n # log_prob: [batch_size x beam_size, vocab_size]\n log_prob = F.log_softmax(out, dim=1)\n\n # [batch_size x beam_size]\n # => [batch_size x beam_size, vocab_size]\n score = score.view(-1, 1) + log_prob\n\n # Select `beam size` transitions out of `vocab size` combinations\n\n # [batch_size x beam_size, vocab_size]\n # => [batch_size, beam_size x vocab_size]\n # Cutoff and retain candidates with top-k scores\n # score: [batch_size, beam_size]\n # top_k_idx: [batch_size, beam_size]\n # each element of top_k_idx [0 ~ beam x vocab)\n\n score, top_k_idx = score.view(batch_size, -1).topk(self.beam_size, dim=1)\n\n\n # Get token ids with remainder after dividing by top_k_idx\n # Each element is among [0, vocab_size)\n # Ex. Index of token 3 in beam 4\n # (4 * vocab size) + 3 => 3\n # x: [batch_size x beam_size]\n x = (top_k_idx % self.vocab_size).view(-1)\n\n # top-k-pointer [batch_size x beam_size]\n # Points top-k beam that scored best at current step\n # Later used as back-pointer at backtracking\n # Each element is beam index: 0 ~ beam_size\n # + position index: 0 ~ beam_size x (batch_size-1)\n beam_idx = top_k_idx / self.vocab_size # [batch_size, beam_size]\n top_k_pointer = (beam_idx + batch_position.unsqueeze(1)).view(-1)\n\n\n # Select next h (size doesn't change)\n # [num_layers, batch_size * beam_size, hidden_size]\n h = h.index_select(1, top_k_pointer)\n\n # Update sequence scores at beam\n beam.update(score.clone(), top_k_pointer, x) # , h)\n\n # Erase scores for EOS so that they are not expanded\n # [batch_size, beam_size]\n eos_idx = x.data.eq(EOS_ID).view(batch_size, self.beam_size)\n if eos_idx.nonzero().dim() > 0:\n score.data.masked_fill_(eos_idx, -float('inf'))\n\n # prediction ([batch, k, max_unroll])\n # A list of Tensors containing predicted sequence\n # final_score [batch, k]\n # A list containing the final scores for all top-k sequences\n # length [batch, k]\n # A list specifying the length of each sequence in the top-k candidates\n # prediction, final_score, length = beam.backtrack()\n prediction, final_score, length = beam.backtrack()\n\n return prediction, final_score, length\n\n'''\nclass DecoderRNN(BaseRNNDecoder):\n def __init__(self, vocab_size, embedding_size,\n hidden_size, rnncell=StackedGRUCell, num_layers=1,\n dropout=0.0, word_drop=0.0,\n max_unroll=30, sample=True, temperature=1.0, beam_size=1):\n super(DecoderRNN, self).__init__()\n\n self.vocab_size = vocab_size\n self.embedding_size = embedding_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.dropout = dropout\n self.temperature = temperature\n self.word_drop = word_drop\n self.max_unroll = max_unroll\n self.sample = sample\n self.beam_size = beam_size\n\n self.embedding = nn.Embedding(vocab_size, embedding_size)\n\n self.rnncell = rnncell(num_layers,\n embedding_size,\n hidden_size,\n dropout)\n self.out = nn.Linear(hidden_size, vocab_size)\n self.softmax = nn.Softmax(dim=1)\n\n def forward_step(self, x, h,\n encoder_outputs=None,\n input_valid_length=None):\n \"\"\"\n Single RNN Step\n 1. Input Embedding (vocab_size => hidden_size)\n 2. RNN Step (hidden_size => hidden_size)\n 3. Output Projection (hidden_size => vocab size)\n\n Args:\n x: [batch_size]\n h: [num_layers, batch_size, hidden_size] (h and c from all layers)\n\n Return:\n out: [batch_size,vocab_size] (Unnormalized word distribution)\n h: [num_layers, batch_size, hidden_size] (h and c from all layers)\n \"\"\"\n # x: [batch_size] => [batch_size, hidden_size]\n x = self.embed(x)\n # last_h: [batch_size, hidden_size] (h from Top RNN layer)\n # h: [num_layers, batch_size, hidden_size] (h and c from all layers)\n last_h, h = self.rnncell(x, h)\n\n if self.use_lstm:\n # last_h_c: [2, batch_size, hidden_size] (h from Top RNN layer)\n # h_c: [2, num_layers, batch_size, hidden_size] (h and c from all layers)\n last_h = last_h[0]\n\n # Unormalized word distribution\n # out: [batch_size, vocab_size]\n out = self.out(last_h)\n return out, h\n\n def forward(self, inputs, init_h=None, encoder_outputs=None, input_valid_length=None,\n decode=False, turn = None):\n \"\"\"\n Train (decode=False)\n Args:\n inputs (Variable, LongTensor): [batch_size, seq_len]\n init_h: (Variable, FloatTensor): [num_layers, batch_size, hidden_size]\n Return:\n out : [batch_size, seq_len, vocab_size]\n Test (decode=True)\n Args:\n inputs: None\n init_h: (Variable, FloatTensor): [num_layers, batch_size, hidden_size]\n Return:\n out : [batch_size, seq_len]\n \"\"\"\n batch_size = self.batch_size(inputs, init_h)\n\n # x: [batch_size]\n x = self.init_token(batch_size, SOS_ID)\n\n # h: [num_layers, batch_size, hidden_size]\n h = self.init_h(batch_size, hidden=init_h)\n\n\n if not decode:\n out_list = []\n seq_len = inputs.size(2)\n for i in range(seq_len):\n\n # x: [batch_size]\n # =>\n # out: [batch_size, vocab_size]\n # h: [num_layers, batch_size, hidden_size] (h and c from all layers)\n out, h = self.forward_step(x, h)\n\n out_list.append(out)\n x = inputs[:, turn, i]\n\n # [batch_size, max_target_len, vocab_size]\n return torch.stack(out_list, dim=1)\n\n elif decode == 'F1':\n x_list = []\n for i in range(self.max_unroll):\n # x: [batch_size]\n # =>\n # out: [batch_size, vocab_size]\n # h: [num_layers, batch_size, hidden_size] (h and c from all layers)\n out, h = self.decode_in_beam(x, h)\n log_prob, indexes = torch.topk(out, 1)\n decoded_t = torch.transpose(indexes, 0, 1)[0]\n # out: [batch_size, vocab_size]\n # => x: [batch_size]\n x_list.append(decoded_t)\n x = inputs[:, turn, i]\n return torch.stack(x_list, dim=1)\n\n elif decode == 'beam':\n x_list = []\n for i in range(self.max_unroll):\n # x: [batch_size]\n # =>\n # out: [batch_size, vocab_size]\n # h: [num_layers, batch_size, hidden_size] (h and c from all layers)\n out, h = self.decode_in_beam(x, h)\n log_prob, indexes = torch.topk(out, 1)\n decoded_t = torch.transpose(indexes, 0, 1)[0]\n # out: [batch_size, vocab_size]\n # => x: [batch_size]\n x_list.append(decoded_t)\n x = inputs[:, turn, i]\n return torch.stack(x_list, dim=1)\n\n else:\n x_list = []\n for i in range(self.max_unroll):\n\n # x: [batch_size]\n # =>\n # out: [batch_size, vocab_size]\n # h: [num_layers, batch_size, hidden_size] (h and c from all layers)\n out, h = self.forward_step(x, h)\n\n # out: [batch_size, vocab_size]\n # => x: [batch_size]\n x = self.decode(out)\n x_list.append(x)\n\n # [batch_size, max_target_len]\n return torch.stack(x_list, dim=1)\n\n def decode_in_beam(self, x, h, encoder_outputs=None):\n\n\n # x: [batch_size]\n # =>\n # out: [batch_size, vocab_size]\n # h: [num_layers, batch_size, hidden_size] (h and c from all layers)\n out, h = self.forward_step(x, h)\n out = F.log_softmax(out, dim=1)\n\n return out, h\n\n def beam_decode(self, inputs, decoder_hiddens=None, turn=None, encoder_outputs=None):\n '''\n :param target_tensor: target indexes tensor of shape [B, T] where B is the batch size and T is the maximum length of the output sentence\n :param decoder_hidden: input tensor of shape [1, B, H] for start of the decoding\n :param encoder_outputs: if you are using attention mechanism you can pass encoder outputs, [T, B, H] where T is the maximum length of input sentence\n :return: decoded_batch\n '''\n\n beam_width = self.beam_size\n topk = self.beam_size # how many sentence do you want to generate\n decoded_batch = []\n\n # decoding goes sentence by sentence\n seq_len = inputs.size(0)\n for idx in range(seq_len):\n if isinstance(decoder_hiddens, tuple): # LSTM case\n decoder_hidden = (\n decoder_hiddens[0][:, idx, :].unsqueeze(0), decoder_hiddens[1][:, idx, :].unsqueeze(0))\n else:\n decoder_hidden = decoder_hiddens[:, idx, :].unsqueeze(0)\n '''\n encoder_output = encoder_outputs[:, idx, :].unsqueeze(1)\n '''\n\n # Start with the start of the sentence token\n decoder_input = to_var(torch.LongTensor([[SOS_ID]]))\n\n # Number of sentence to generate\n endnodes = []\n number_required = topk\n\n # starting node - hidden vector, previous node, word id, logp, length\n node = BeamSearchNode(decoder_hidden, None, decoder_input, 0, 1)\n nodes = PriorityQueue()\n\n # start the queue\n nodes.put((-node.eval(), node))\n qsize = 1\n # start beam search\n while True:\n # give up when decoding takes too long\n if qsize > 999999: break\n nextnodes = []\n # fetch the best node\n for _ in range(nodes.qsize()):\n score, n = nodes.get()\n decoder_input = to_var(n.wordid[0])\n\n decoder_hidden = n.h\n\n if (n.wordid.item() == EOS_ID and n.prevNode != None) or n.leng >= self.max_unroll:\n endnodes.append((score, n))\n # if we reached maximum # of sentences required\n if len(endnodes) >= number_required:\n break\n\n # decode for one step using decoder\n decoder_output, decoder_hidden = self.decode_in_beam(decoder_input, decoder_hidden) #, encoder_output)\n\n # PUT HERE REAL BEAM SEARCH OF TOP\n log_prob, indexes = torch.topk(decoder_output, beam_width)\n #nextnodes = []\n for new_k in range(beam_width):\n decoded_t = indexes[0][new_k].view(1, -1)\n log_p = log_prob[0][new_k].item()\n\n node = BeamSearchNode(decoder_hidden, n, decoded_t, n.logp + log_p, n.leng + 1)\n score = -node.eval()\n nextnodes.append((score, node))\n\n if len(endnodes) >= number_required:\n break\n nextnodes = sorted(nextnodes, key=operator.itemgetter(0), reverse=True)\n length = min(len(nextnodes), beam_width)\n for i in range(length):\n score, nn = nextnodes[i]\n nodes.put((score, nn))\n # increase qsize\n\n qsize += len(nextnodes) - 1\n\n # choose nbest paths, back trace them\n '''\n if len(endnodes) <= beam_width:\n for _ in range(beam_width - len(endnodes)):\n score, n = nodes.get()\n endnodes.append((score, node))\n '''\n\n utterances = []\n for score, n in sorted(endnodes, key=operator.itemgetter(0)):\n utterance = []\n utterance.append(n.wordid.cpu().numpy()[0][0])\n # back trace\n while n.prevNode != None:\n n = n.prevNode\n utterance.append(n.wordid.cpu().numpy()[0][0])\n\n utterance = utterance[::-1]\n utterances.append(utterance)\n\n final_utterance = utterances[0]\n if len(utterances[0]) < self.max_unroll:\n final_utterance += [3 for _ in range(self.max_unroll-len(utterances[0]))]\n decoded_batch.append(final_utterance)\n\n return decoded_batch\n\n\n\n\n\n\n","sub_path":"model/layers/decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":19140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"302249251","text":"#!/usr/bin/python3\n# pamlsetup_physics.py by josh\n\nimport sys\nimport glob\nimport re\nimport subprocess\nimport os\nimport string\nimport shutil\n\njobheader = \"\"\"#!/bin/bash\n#$ -cwd # Use current working directory\n#$ -V # Verbose\n#$ -j y # Maximum output, inc errors\n#$ -r y # Condense error files into one\n#$ -l h_rt=24:0:0 # Request runtime (up to 240 hours)\n#$ -l h_vmem=4G # Request RAM per core\n#$ -m ea # Status emails\n\n\"\"\"\n\n\npamlopts = \"\"\"noisy = 0\nverbose = 1\nrunmode = 0\n\nseqtype = 1\nCodonFreq = 2\n* ndata = 10\n\nclock = 0\naaDist = 0\n\nmodel = 2\n\nNSsites = 2\n\nicode = 0\nMgene = 0\n\nfix_kappa = 0\nkappa = 2\nfix_omega = 0\nomega = 0.4\n\nfix_alpha = 1\nalpha = 0.\nMalpha = 0\nncatG = 3\n\ngetSE = 0\nRateAncestor = 1\n\nSmall_Diff = .5e-6\n* cleandata = 0 * remove sites with ambiguity data (1:yes, 0:no)?\n*fix_blength = -1 * 0: ignore, -1: random, 1: initial, 2: fixed\nmethod = 0\n\n\"\"\"\n\nprint(pamlopts)\n\nnullpamlopts = \"\"\"noisy = 0\nverbose = 1\nrunmode = 0\n\nseqtype = 1\nCodonFreq = 2\n* ndata = 10\n\nclock = 0\naaDist = 0\n\nmodel = 2\n\nNSsites = 2\n\nicode = 0\nMgene = 0\n\nfix_kappa = 0\nkappa = 2\nfix_omega = 1\nomega = 1\n\nfix_alpha = 1\nalpha = 0.\nMalpha = 0\nncatG = 3\n\ngetSE = 0\nRateAncestor = 1\n\nSmall_Diff = .5e-6\n* cleandata = 0 * remove sites with ambiguity data (1:yes, 0:no)?\n*fix_blength = -1 * 0: ignore, -1: random, 1: initial, 2: fixed\nmethod = 0\n\n\"\"\"\n\nrootdir = \"/home/hep/jpotter/phylogenetics/min35spp\"\noutdir = \"/mnt/lustre_0/data/jpotter/paml_fbs\"\nidpattern = re.compile(\"^>(\\S+)$\")\nseqpattern = re.compile(\"^[A-Z*-]+$\")\nsubprocpattern = re.compile(r\"^b\\'(\\d+)\\\\n\\'$\")\n\nif not os.path.isdir(outdir):\n os.makedirs(outdir)\n\ntreename = \"{}/rapid_35spp_100bs_20ML_bipartitions.tre\".format(rootdir)\n#treename = \"{}/phyllostomids.NEX\".format(rootdir)\ntreefile = open(treename, \"r\")\nblengthpattern1 = re.compile(r\"(\\))\\d+:\\d+\\.\\d+E-\\d+\")\nblengthpattern2 = re.compile(r\"(\\))\\d+:\\d+\\.\\d+\")\nblengthpattern3 = re.compile(r\":\\d+\\.\\d+E-\\d+|:\\d+\\.\\d+\")\nlabelpattern = re.compile (r\"\\[&!name=\\\"#(\\d+)\\\"\\]\")\n\ndef blength_remove(matchobj):\n print(matchobj.group(0))\n if matchobj.group(1) == \")\":\n print(matchobj.group(1))\n return \")\"\n else:\n return \"\"\n\ndef labelrpl(matchobj):\n print(matchobj.group(0))\n# outstr = \":\" + matchobj.group(1)\n outstr = \"\"\n return outstr\n \n\nfor line in treefile:\n print(line)\n editedline = blengthpattern1.sub(blength_remove,line)\n editedline = blengthpattern2.sub(blength_remove,editedline)\n editedline = blengthpattern3.sub(\"\",editedline)\n editedline = labelpattern.sub(labelrpl,editedline)\n print(editedline)\n print(editedline.count(\"(\"))\n print(editedline.count(\")\"))\n\nouttree = open(\"{}/mastertree.tre\".format(outdir), \"w\")\nouttree.write(editedline)\nouttree.close()\n\n\npruningscript = open(\"{}/batchprune.sh\".format(outdir), \"w\")\npruningscript.write(\"#!/bin/sh\\n\")\npruningscript.write(\"\\n\")\n\nfor alignment in glob.glob(\"{}/*_align_f.fas\".format(rootdir)):\n specieslist = []\n print(alignment)\n infile = open(alignment, \"r\")\n gene = os.path.basename(alignment).replace(\"_align_f.fas\",\"\")\n print(gene)\n pamldir = \"{}/{}_ALT\".format(outdir,gene)\n pamlnulldir = \"{}/{}_NULL\".format(outdir,gene)\n if not os.path.isdir(pamldir):\n os.makedirs(pamldir)\n if not os.path.isdir(pamlnulldir):\n os.makedirs(pamlnulldir)\n outfile = open(\"{}/{}_align.phy\".format(pamldir,gene), \"w\")\n grepcommand = \"\"\"grep -c \">\" {}\"\"\".format(alignment)\n grepcheck = str(subprocess.check_output(grepcommand, shell=True))\n taxacount = subprocpattern.match(grepcheck).group(1)\n print(taxacount)\n for linecount, line in enumerate(infile):\n if linecount == 0 and not idpattern.match(line):\n sys.exit(\"Error: First line is not an ID line\")\n if idpattern.match(line):\n species = idpattern.match(line).group(1)\n specieslist.append(species)\n sequence = infile.readline().rstrip()\n seqlen = str(len(sequence))\n print(seqlen)\n if linecount == 0:\n refseqlen = seqlen\n outfile.write(\"\\t\" + taxacount + \" \" + seqlen + \"\\n\")\n if not seqpattern.match(sequence):\n print(line)\n print(sequence)\n sys.exit(\"Error, sequence not in expected format\")\n if seqlen != refseqlen:\n sys.exit(\"Error, sequences are not the same length\")\n outfile.write(species + \" \" + sequence + \"\\n\")\n elif seqpattern.match(line):\n sys.exit(\"Error, sequence shouldn't match here\")\n else:\n sys.exit(\"Error, no match to line\")\n outfile.close()\n shutil.copy(\"{}/{}_align.phy\".format(pamldir,gene),\"{}/{}_align.phy\".format(pamlnulldir,gene))\n pamlctlfile = open(\"{}/codeml.ctl\".format(pamldir), \"w\")\n pamlctlfile.write(\"seqfile = {}/{}_align.phy\\n\".format(pamldir, gene))\n pamlctlfile.write(\"treefile = {}/{}_tree.tre\\n\\n\".format(pamldir, gene))\n pamlctlfile.write(\"outfile = {}/{}_out\\n\\n\".format(pamldir, gene))\n pamlctlfile.write(pamlopts)\n nullpamlctlfile = open(\"{}/codemlNULL.ctl\".format(pamlnulldir), \"w\")\n nullpamlctlfile.write(\"seqfile = {}/{}_align.phy\\n\".format(pamlnulldir, gene))\n nullpamlctlfile.write(\"treefile = {}/{}_tree.tre\\n\\n\".format(pamlnulldir, gene))\n nullpamlctlfile.write(\"outfile = {}/{}_outNULL\\n\\n\".format(pamlnulldir, gene))\n nullpamlctlfile.write(nullpamlopts)\n nullpamlctlfile.close()\n specieslist.sort()\n print(specieslist)\n print(len(specieslist))\n if len(specieslist) != int(taxacount):\n print(taxacount)\n sys.exit(\"Error, taxa counts do not match\")\n taxastr = \" \".join(specieslist)\n print(taxastr)\n listfile = open(\"{}/taxalist.txt\".format(pamldir), \"w\")\n listfile.write(taxastr + \"\\n\")\n for species in specieslist:\n listfile.write(species + \"\\n\")\n listfile.close()\n shutil.copy(\"{}/taxalist.txt\".format(pamldir),\"{}/taxalist.txt\".format(pamlnulldir))\n try:\n shutil.copy(\"{}/mastertree.tre\".format(outdir),\"{}/{}_tree_up.tre\".format(pamldir,gene))\n shutil.copy(\"{}/mastertree.tre\".format(outdir),\"{}/{}_tree_up.tre\".format(pamlnulldir,gene))\n print(\"Tree successfully copied\")\n except:\n sys.exit(\"Error\")\n pruningscript.write(\"echo \\\"{}\\\"\\n\".format(gene))\n pruningscript.write(\"/home/hep/jpotter/programs/newick-utils-1.6/src/nw_prune -v \")\n pruningscript.write(\"{}/{}_tree_up.tre \".format(pamldir,gene))\n pruningscript.write(taxastr + \" > {}/{}_tree_p.tre\\n\".format(pamldir,gene))\n pruningscript.write(\"/home/hep/jpotter/programs/newick-utils-1.6/src/nw_prune -v \")\n pruningscript.write(\"{}/{}_tree_up.tre \".format(pamlnulldir,gene))\n pruningscript.write(taxastr + \" > {}/{}_tree_p.tre\\n\\n\".format(pamlnulldir,gene))\n pamlscript = open(\"{}/{}_ALTpaml.sh\".format(pamldir,gene), \"w\")\n pamlscript.write(jobheader)\n pamlscript.write(\"\\n\\n\")\n pamlscript.write(\"/home/hep/jpotter/programs/paml4.8/bin/codeml \")\n pamlscript.write(\"codeml.ctl\")\n pamlscript.close()\n pamlnullscript = open(\"{}/{}_NULLpaml.sh\".format(pamlnulldir,gene), \"w\")\n pamlnullscript.write(jobheader)\n pamlnullscript.write(\"\\n\\n\")\n pamlnullscript.write(\"/home/hep/jpotter/programs/paml4.8/bin/codeml \")\n pamlnullscript.write(\"codemlNULL.ctl\")\n pamlnullscript.close()\n subprocess.call([\"chmod\",\"u+x\",\"{}/{}_ALTpaml.sh\".format(pamldir,gene)])\n subprocess.call([\"chmod\",\"u+x\",\"{}/{}_NULLpaml.sh\".format(pamlnulldir,gene)])\n\npruningscript.close()\nsubprocess.call([\"chmod\",\"u+x\",\"{}/batchprune.sh\".format(outdir)])\n\n\nquit()\n","sub_path":"pamlsetup_physics.py","file_name":"pamlsetup_physics.py","file_ext":"py","file_size_in_byte":7755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"259445162","text":"def sumDigits(strs):\n \"\"\"s为数字组成的字符串,切分后求和\"\"\"\n result = 0\n for s in strs:\n try:\n curI = int(s)\n result += curI\n except ValueError:\n pass\n return result\n\nr = sumDigits(\"a1dfdddd333333\")\nprint(str(r))\n\n\ndef findAnEvent(L):\n \"\"\"L 为整数列表,返回其中e的i 第一个偶数,如果没有则抛出异常\"\"\"\n firstEvent = -1\n for i in L:\n if i % 2 == 0:\n firstEvent = i\n break\n\n if firstEvent == -1:\n raise ValueError(\"not found\")\n\nfindAnEvent([1,3])\n","sub_path":"IntroductionToComputationAndProgrammingUsingPython/chapter7/except_cases.py","file_name":"except_cases.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"239730244","text":"import pickle\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom transformers import *\n\n\ndevice = 'cpu'\nif torch.cuda.is_available():\n device = 'cuda'\n\nSTART_TAG: str = ''\nSTOP_TAG: str = ''\n \n \nimport string\n\nchars = string.ascii_letters + string.digits + string.punctuation.strip()\n\nchar2idx = {\"\":0}\nfor c in chars:\n char2idx[c] = len(char2idx)\nidx2char = {v:k for k,v in char2idx.items()}\n\n\nclass BertTagger(torch.nn.Module):\n def __init__(self, bert_model_name, tag2idx, num_hidden_layers=2, max_sent_length=100, use_crf=True):\n super(BertTagger, self).__init__()\n \n if bert_model_name=='allenai/scibert_scivocab_uncased':\n self.bert = AutoModel.from_pretrained(bert_model_name)\n else:\n self.bert = BertModel.from_pretrained(bert_model_name, output_hidden_states=False, num_hidden_layers=num_hidden_layers, num_attention_heads=12)\n \n self.char_emb_dim = 50\n self.char_emb = torch.nn.Embedding(len(char2idx), self.char_emb_dim)\n self.conv1_padding = 0\n self.dilation = 1\n self.kernel_size = (3,1)\n self.stride = 1\n self.conv1 = torch.nn.Sequential(\n torch.nn.Conv1d(in_channels=1, out_channels=64, kernel_size=self.kernel_size,\n stride=self.stride, dilation=self.dilation,\n bias=True, padding=self.conv1_padding, padding_mode='rand'),\n torch.nn.ReLU(),\n \n )\n self.pool = torch.nn.MaxPool1d(kernel_size=3, stride=3)\n self.conv1_out_dim = (self.char_emb_dim+2*self.conv1_padding-self.dilation*(self.kernel_size[0]-1))//3\n print(\"char embedding dim: \", self.conv1_out_dim)\n self.linear = torch.nn.Linear(256*2, len(tag2idx))\n self.dropout = torch.nn.Dropout(0.1)\n self.rnn = torch.nn.LSTM(768+self.conv1_out_dim, 256, batch_first=True, bidirectional=True, num_layers=1)\n self.dropout2 = torch.nn.Dropout(0.1)\n \n if use_crf:\n self.crf = CRF(len(tag2idx), tag_dict=tag2idx)\n\n if torch.cuda.is_available():\n self.cuda()\n\n def forward(self, input, attention_mask, tokens_batch):\n self.zero_grad()\n out = self.bert(input, attention_mask=attention_mask)\n state = out[0] \n state = self.dropout(state)\n \n ############# char embedding ########\n batch_size, batch_sent_length = state.shape[0], state.shape[1]\n batch_char_embedding = torch.zeros(batch_size, batch_sent_length, self.conv1_out_dim).to(device)\n for i, tokens in enumerate(tokens_batch):\n sent_length = len(tokens)\n max_char_length = max(len(tk) if not tk.startswith('##') else len(tk)-2 for tk in tokens)\n \n onehot = [[char2idx[''] for _ in range(max_char_length)] for _ in range(sent_length)]\n for tid, tk in enumerate(tokens):\n if tk.startswith('##'):\n for cid in range(len(tk[2:])):\n onehot[tid][cid] = char2idx[tk[2+cid]]\n else:\n for cid in range(len(tk)):\n onehot[tid][cid] = char2idx[tk[cid]]\n \n onehot = torch.LongTensor(onehot).to(device)\n \n seq_char_embedding = self.char_emb(onehot)\n conv_seq = self.conv1(seq_char_embedding.unsqueeze(1))\n conv_seq = torch.mean(conv_seq, dim=1)\n pool_seq = F.max_pool1d(conv_seq, kernel_size=3)\n emb_seq = torch.max(pool_seq, dim=1)[0]\n \n batch_char_embedding[i, :sent_length] = emb_seq\n \n state = torch.cat((state, batch_char_embedding), dim=2)\n #####################################\n \n \n rnn_output, (final_hidden, _) = self.rnn(state)\n rnn_output = self.dropout2(rnn_output)\n logit = self.linear(rnn_output)\n \n return logit\n\n \n \nSTART_TAG: str = ''\nSTOP_TAG: str = ''\n\n\ndef to_scalar(var):\n return var.view(-1).data.tolist()[0]\n\n\ndef argmax(vec):\n _, idx = torch.max(vec, 1)\n return to_scalar(idx)\n\n\ndef log_sum_exp(vec):\n max_score = vec[0, argmax(vec)]\n max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])\n return max_score + \\\n torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))\n\n\ndef argmax_batch(vecs):\n _, idx = torch.max(vecs, 1)\n return idx\n\n\ndef log_sum_exp_batch(vecs):\n maxi = torch.max(vecs, 1)[0]\n maxi_bc = maxi[:, None].repeat(1, vecs.shape[1])\n recti_ = torch.log(torch.sum(torch.exp(vecs - maxi_bc), 1))\n return maxi + recti_\n\n\ndef pad_tensors(tensor_list, type_=torch.FloatTensor):\n ml = max([x.shape[0] for x in tensor_list])\n shape = [len(tensor_list), ml] + list(tensor_list[0].shape[1:])\n template = type_(*shape)\n template.fill_(0)\n lens_ = [x.shape[0] for x in tensor_list]\n for i, tensor in enumerate(tensor_list):\n template[i, :lens_[i]] = tensor\n\n return template, lens_\n\nclass CRF(torch.nn.Module):\n def __init__(self, tagset_size, tag_dict):\n super(CRF, self).__init__()\n self.tagset_size = tagset_size\n self.tag_dict = tag_dict\n self.transitions = torch.nn.Parameter(torch.randn(self.tagset_size, self.tagset_size))\n self.transitions.data[self.tag_dict[START_TAG], :] = -10000.\n self.transitions.data[:, self.tag_dict[STOP_TAG]] = -10000.\n\n if torch.cuda.is_available():\n self.cuda()\n\n def neg_log_likelihood(self, rnn_out, tags, tags_prob, lengths):\n\n if torch.cuda.is_available():\n tags, _ = pad_tensors(tags, torch.cuda.LongTensor)\n else:\n tags, _ = pad_tensors(tags, torch.LongTensor)\n\n forward_score = self._forward_alg(rnn_out[:len(tags), :, :], lengths)\n gold_score = self._score_sentence(rnn_out[:len(tags), :, :], tags, tags_prob, lengths)\n\n score = torch.abs(forward_score - gold_score)\n\n return score.mean()\n\n def _forward_alg(self, feats, lens_):\n init_alphas = torch.Tensor(self.tagset_size).fill_(-10000.)\n init_alphas[self.tag_dict[START_TAG]] = 0.\n forward_var = torch.FloatTensor(feats.shape[0], feats.shape[1] + 1, feats.shape[2]).fill_(0)\n\n forward_var[:, 0, :] = init_alphas[None, :].repeat(feats.shape[0], 1)\n if torch.cuda.is_available():\n forward_var = forward_var.cuda()\n\n transitions = self.transitions.view(\n 1, self.transitions.shape[0], self.transitions.shape[1]\n ).repeat(feats.shape[0], 1, 1)\n\n for i in range(feats.shape[1]):\n emit_score = feats[:, i, :]\n tag_var = \\\n emit_score[:, :, None].repeat(1, 1, transitions.shape[2]) + \\\n transitions + \\\n forward_var[:, i, :][:, :, None].repeat(1, 1, transitions.shape[2]).transpose(2, 1)\n\n max_tag_var, _ = torch.max(tag_var, dim=2)\n tag_var = tag_var - max_tag_var[:, :, None].repeat(1, 1, transitions.shape[2])\n\n agg_ = torch.log(torch.sum(torch.exp(tag_var), dim=2))\n\n cloned = forward_var.clone()\n cloned[:, i + 1, :] = max_tag_var + agg_\n\n forward_var = cloned\n\n forward_var = forward_var[range(forward_var.shape[0]), lens_, :]\n terminal_var = forward_var + \\\n self.transitions[self.tag_dict[STOP_TAG]][None, :].repeat(forward_var.shape[0],\n 1)\n\n alpha = log_sum_exp_batch(terminal_var)\n return alpha\n\n def _score_sentence(self, feats, tags, tags_prob, lens_):\n \n start = torch.LongTensor([self.tag_dict[START_TAG]])\n start = start[None, :].repeat(tags.shape[0], 1)\n stop = torch.LongTensor([self.tag_dict[STOP_TAG]])\n stop = stop[None, :].repeat(tags.shape[0], 1)\n if torch.cuda.is_available():\n start = start.cuda()\n stop = stop.cuda()\n\n pad_start_tags = torch.cat([start, tags], 1)\n pad_stop_tags = torch.cat([tags, stop], 1)\n\n for i in range(len(lens_)):\n pad_stop_tags[i, lens_[i]:] = self.tag_dict[STOP_TAG]\n\n score = torch.FloatTensor(feats.shape[0])\n\n if torch.cuda.is_available():\n score = score.cuda()\n\n\n start_prob, end_prob = torch.Tensor([1.0]), torch.Tensor([1.0])\n if torch.cuda.is_available():\n start_prob = start_prob.cuda()\n end_prob = end_prob.cuda()\n\n\n for i in range(feats.shape[0]):\n r = torch.LongTensor(range(lens_[i]))\n if torch.cuda.is_available():\n r = r.cuda()\n \n if tags_prob:\n feats_prob = feats[i, r, tags[i, :lens_[i]]] * tags_prob[i][r, tags[i, :lens_[i]]]\n\n pad_start_tags_prob = torch.cat((start_prob, tags_prob[i][r, tags[i, :lens_[i]]]))\n pad_end_tags_prob = torch.cat((tags_prob[i][r, tags[i, :lens_[i]]], end_prob))\n\n score[i] = \\\n torch.sum(\n self.transitions[pad_stop_tags[i, :lens_[i] + 1], pad_start_tags[i, :lens_[i] + 1]] * pad_start_tags_prob * pad_end_tags_prob\n ) + torch.sum(feats[i,:lens_[i],:] * tags_prob[i])\n# torch.sum(feats_prob)\n \n else:\n score[i] = \\\n torch.sum(\n self.transitions[pad_stop_tags[i, :lens_[i] + 1], pad_start_tags[i, :lens_[i] + 1]]\n ) + \\\n torch.sum(feats[i, r, tags[i, :lens_[i]]])\n\n return score\n\n def viterbi_decode(self, feats):\n backpointers, backscores = [], []\n\n init_vvars = torch.Tensor(1, self.tagset_size).fill_(-10000.0)\n init_vvars[0][self.tag_dict[START_TAG]] = 0\n forward_var = init_vvars\n if torch.cuda.is_available():\n forward_var = forward_var.cuda()\n\n for feat in feats:\n next_tag_var = forward_var.view(1, -1).expand(self.tagset_size, self.tagset_size) + self.transitions\n _, bptrs_t = torch.max(next_tag_var, dim=1)\n # bptrs_t = bptrs_t.squeeze().data.cpu().numpy()\n # next_tag_var = next_tag_var.data.cpus().numpy()\n viterbivars_t = next_tag_var[range(len(bptrs_t)), bptrs_t]\n forward_var = viterbivars_t + feat\n backscores.append(forward_var)\n backpointers.append(bptrs_t)\n\n terminal_var = forward_var + self.transitions[self.tag_dict[STOP_TAG]]\n terminal_var.data[self.tag_dict[STOP_TAG]] = -10000.\n terminal_var.data[self.tag_dict[START_TAG]] = -10000.\n best_tag_id = argmax(terminal_var.unsqueeze(0))\n\n best_path = [best_tag_id]\n\n for bptrs_t in reversed(backpointers):\n best_tag_id = bptrs_t[best_tag_id]\n best_path.append(best_tag_id)\n best_scores = []\n for backscore in backscores:\n softmax = F.softmax(backscore, dim=0)\n _, idx = torch.max(backscore, 0)\n prediction = idx.item()\n best_scores.append(softmax[prediction].item())\n\n start = best_path.pop()\n assert start == self.tag_dict[START_TAG]\n best_path.reverse()\n return best_scores, best_path","sub_path":"code-NCBI/tagger_models.py","file_name":"tagger_models.py","file_ext":"py","file_size_in_byte":11459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"358467254","text":"import numpy as np\nfrom typing import List\nfrom learnt.regression import predict_outcome\n\n\ndef feature_derivative_ridge(errors, feature, weight, l2_penalty: float, feature_is_constant: bool):\n \"\"\"\n Computing the derivative of the regression cost function.\n Recall that the cost function is the sum over the data points of the squared difference between an observed output\n and a predicted output, plus the L2 penalty term.\n\n Parameters:\n ----------\n :param errors: ndarray\n :param feature: column of a feature.\n :param feature_is_constant: Set true when given column of a feature is a constant.\n :param weight: ndarray\n :param l2_penalty: (lambda) Regularization tuning parameter\n :return: derivation (ndarray)\n \"\"\"\n # (y-HW)ᵀ(y-HW) + λ |W|² is our cost function. to derive this; we'll get following:\n # -2Hᵀ(y-HW) + 2λW\n\n # IMPORTANT: We will not regularize the constant. Thus, in the case of the constant,\n # the derivative is just twice the sum of the errors (without the 2λw[0] term).\n # If feature_is_constant is True, derivative is twice the dot product of errors and feature\n errors = np.reshape(errors, [-1, 1]) # need error to be a n×1 vector\n derivative = np.float64(2 * np.dot(feature, errors)) # 1×n dot product n×1 gives us a scalar\n # simple form of code above:\n # derivative = feature * errors\n # derivative = 2 * sum(derivative)\n if not feature_is_constant:\n # Otherwise, derivative is twice the dot product plus 2*l2_penalty*weight\n derivative = derivative + 2 * (l2_penalty * weight)\n # Noticed omitted -1?! We are adding it at the updating weights term (at ridge gradient decent function).\n return derivative\n\n\n'''\n# To test your feature derivative function, run the following:\n\nimport pandas as pd\nfrom learnt.regression import get_numpy_data\nfrom learnt.regression import predict_outcome\n\ndtype_dict = {'bathrooms': float, 'waterfront': int, 'sqft_above': int, 'sqft_living15': float, 'grade': int,\n 'yr_renovated': int, 'price': float, 'bedrooms': float, 'zipcode': str, 'long': float,\n 'sqft_lot15': float, 'sqft_living': float, 'floors': float, 'condition': int, 'lat': float, 'date': str,\n 'sqft_basement': int, 'yr_built': int, 'id': str, 'sqft_lot': int, 'view': int}\n\ndf = pd.read_csv('kc_house_data.csv', dtype=dtype_dict)\nexample_features, example_output = get_numpy_data(df, ['sqft_living'], 'price')\nmy_weights = np.array([1., 10.])\ntest_predictions = predict_outcome(example_features, my_weights)\nerrors = test_predictions - example_output # prediction errors\n\n# next two lines should print the same values\nprint(feature_derivative_ridge(errors, example_features[:, 1], my_weights[1], 1, False))\nprint(np.sum(errors * example_features[:, 1]) * 2 + 20.)\nprint('')\n\n# next two lines should print the same values\nprint(feature_derivative_ridge(errors, example_features[:, 0], my_weights[0], 1, True))\nprint(np.sum(errors) * 2.)\n'''\n\n\ndef ridge_regression_gradient_descent(feature_matrix, output, initial_weights: List[float], step_size,\n l2_penalty: float,\n max_iterations: int = 100):\n # if type(initial_weights[0]) != float:\n # # make sure auto casting, (float to int) doesn't happen at updating weights[i].\n # raise Exception('initial_weights setted with an int number instead of a float')\n\n weights = np.array(initial_weights, dtype=np.float64) # make sure it's a numpy array\n while 0 < max_iterations: # while not reached maximum number of iterations:\n # compute the predictions using your predict_output() function\n predictions = predict_outcome(feature_matrix, weights)\n # compute the errors as predictions - output\n errors = predictions - output # predictions is n×1 so we need output to be n×1 too\n\n for i in range(len(weights)): # loop over each weight\n # Recall that feature_matrix[:,i] is the feature column associated with weights[i]\n is_constant: bool = False\n if i == 0: # when i is equal to 0, you are computing the derivative of the constant!\n is_constant = True\n\n # compute the derivative for weight[i]:\n derivative = feature_derivative_ridge(errors, feature_matrix[:, i], weights[i], l2_penalty, is_constant)\n # subtract the step size times the derivative from the current weight\n weights[i] = weights[i] - step_size * derivative\n max_iterations -= 1\n return weights.reshape(-1, 1)\n","sub_path":"learnt/regression/ridge.py","file_name":"ridge.py","file_ext":"py","file_size_in_byte":4622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"630790671","text":"#!/usr/bin/env python\n\nimport importlib\nimport json\nimport logging\nimport argparse\n\nfrom future.backports.urllib.parse import urlparse\n\nfrom oic.utils.authn.client import BearerHeader\nfrom oic.utils.keyio import build_keyjar\n\nfrom otest import ConfigurationError\nfrom otest import NotSupported\nfrom otest import exception_trace\nfrom otest.check import OK\nfrom otest.conversation import Conversation\nfrom otest.parse_cnf import parse_yaml_conf\n\nfrom oidctest.op import make_list\nfrom oidctest.common import make_client\nfrom oidctest.common import setup_logger\nfrom oidctest.common import run_flow\nfrom oidctest.common import Trace\nfrom oidctest.io import ClIO\nfrom oidctest.session import SessionHandler\nfrom oidctest.utils import get_check\n\nfrom requests.packages import urllib3\n\nurllib3.disable_warnings()\n\n__author__ = 'roland'\n\nlogger = logging.getLogger(\"\")\n\n\ndef get_claims(client):\n resp = {}\n for src in list(client.userinfo[\"_claim_names\"].values()):\n spec = client.userinfo[\"_claim_sources\"][src]\n ht_args = BearerHeader(client).construct(**spec)\n\n try:\n part = client.http_request(spec[\"endpoint\"], \"GET\", **ht_args)\n except Exception:\n raise\n resp.update(json.loads(part.content))\n\n return resp\n\n\ndef endpoint_support(client, endpoint):\n if endpoint in client.provider_info:\n return True\n else:\n return False\n\n\ndef run_func(spec, conv, req_args):\n if isinstance(spec, tuple):\n func, args = spec\n else:\n func = spec\n args = {}\n\n try:\n req_args = func(req_args, conv, args)\n except KeyError as err:\n conv.trace.error(\"function: %s failed\" % func)\n conv.trace.error(str(err))\n raise NotSupported\n except ConfigurationError:\n raise\n else:\n return req_args\n\n\ndef run_one(test_id, flows, profile, profiles, io, sh, **kw_args):\n try:\n redirs = kw_args[\"cinfo\"][\"client\"][\"redirect_uris\"]\n except KeyError:\n redirs = kw_args[\"cinfo\"][\"registered\"][\"redirect_uris\"]\n\n io = ClIO(flows=flows, profile=profile, **kw_args)\n sh = SessionHandler(None, profile, flows, **kw_args)\n\n _flow = flows[test_id]\n _cli = make_client(**kw_args)\n conversation = Conversation(_flow, _cli, kw_args[\"msg_factory\"],\n interaction=kw_args[\"conf\"].INTERACTION,\n trace_cls=Trace, callback_uris=redirs)\n # noinspection PyTypeChecker\n try:\n run_flow(profiles, conversation, test_id, kw_args[\"conf\"],\n profile, kw_args[\"check_factory\"], io, sh)\n except Exception as err:\n exception_trace(\"\", err, logger)\n print(conversation.trace)\n\n\ndef main(flows, profile, profiles, **kw_args):\n try:\n redirs = kw_args[\"cinfo\"][\"client\"][\"redirect_uris\"]\n except KeyError:\n redirs = kw_args[\"cinfo\"][\"registered\"][\"redirect_uris\"]\n\n test_list = make_list(flows, profile, **kw_args)\n\n for tid in test_list:\n io = ClIO(flows=flows, profile=profile, **kw_args)\n sh = SessionHandler(profile, flows, **kw_args)\n\n _flow = flows[tid]\n _cli, _cliconf = make_client(**kw_args)\n conversation = Conversation(_flow, _cli, kw_args[\"msg_factory\"],\n interaction=kw_args[\"conf\"].INTERACTION,\n trace_cls=Trace, callback_uris=redirs)\n\n _cli.conv = conversation\n # noinspection PyTypeChecker\n try:\n info = run_flow(profiles, conversation, tid, kw_args[\"conf\"],\n profile, kw_args[\"check_factory\"], io, sh)\n if info['status'] == OK:\n print('+{}'.format(tid))\n else:\n print('!{}'.format(tid))\n for ev in conversation.events:\n print(ev)\n break\n except Exception as err:\n exception_trace(\"\", err, logger)\n print(conversation.trace)\n break\n\n\nif __name__ == '__main__':\n from oidctest import profiles\n from oidctest import oper\n from oidctest import func\n from oic.oic.message import factory as oic_message_factory\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', dest='flows')\n parser.add_argument('-l', dest=\"log_name\")\n parser.add_argument('-p', dest=\"profile\")\n parser.add_argument('-t', dest=\"testid\")\n parser.add_argument(dest=\"config\")\n cargs = parser.parse_args()\n\n fdef = {'Flows': {}, 'Order': [], 'Desc': []}\n cls_factories = {'': oper.factory}\n func_factory = func.factory\n\n spec = parse_yaml_conf(cargs.flows, cls_factories, func_factory)\n fdef['Flows'].update(spec['Flows'])\n for param in ['Order', 'Desc']:\n try:\n fdef[param].extend(spec[param])\n except KeyError:\n pass\n\n CONF = importlib.import_module(cargs.config)\n\n if cargs.log_name:\n setup_logger(logger, cargs.log_name)\n else:\n setup_logger(logger)\n\n # Add own keys for signing/encrypting JWTs\n jwks, keyjar, kidd = build_keyjar(CONF.keys)\n\n # export JWKS\n p = urlparse(CONF.KEY_EXPORT_URL)\n f = open(\".\" + p.path, \"w\")\n f.write(json.dumps(jwks))\n f.close()\n jwks_uri = p.geturl()\n\n kwargs = {\"base_url\": CONF.BASE, \"kidd\": kidd, \"keyjar\": keyjar,\n \"jwks_uri\": jwks_uri, \"flows\": fdef['Flows'], \"conf\": CONF,\n \"cinfo\": CONF.INFO, \"desc\": fdef['Desc'], 'order': fdef['Order'],\n \"profiles\": profiles, \"operations\": oper,\n \"profile\": cargs.profile, \"msg_factory\": oic_message_factory,\n 'check_factory': get_check}\n\n if cargs.testid:\n run_one(cargs.testid, **kwargs)\n else:\n main(**kwargs)\n","sub_path":"test_tool/test_op/rp/cl/cloprp.py","file_name":"cloprp.py","file_ext":"py","file_size_in_byte":5769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"591890311","text":"# ------------------------------------------------------------------------------\n# The MIT License (MIT)\n#\n# Copyright (c) 2014-2021 Digital Sapphire\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n# ------------------------------------------------------------------------------\nimport logging\nimport typing as t\n\ntry:\n from flask import Flask, g, request\nexcept ImportError:\n Flask = None\n g = None\n request = None\n\nlog = logging.getLogger(__name__)\n\n\nclass DSFlaskResponse:\n bad_request = 400\n conflict = 409\n created = 201\n forbidden = 403\n not_found = 404\n ok = 200\n unauthorized = 401\n\n @staticmethod\n def log_request(**kwargs):\n if Flask is None:\n raise RuntimeError('dsdev-utils[flask] is not installed')\n\n log.info(\"Path: %s\", request.path)\n log.info(\"Method: %s\", request.method)\n log.info(\"Remote Addr: %s\", request.remote_addr)\n if hasattr(g, \"user\"):\n log.info(\"User ID: %s\", g.user.get_id())\n\n parsed_data = dict()\n\n for k, v in request.headers.items():\n if \"Authorization\" in k:\n v = \"*****\"\n parsed_data[k] = v\n\n data = None\n if 'data' in kwargs.keys():\n data = kwargs['data']\n del kwargs['data']\n\n parsed_data.update(kwargs)\n\n msg = f\"Headers: {parsed_data}\"\n log.info(msg)\n if data is not None:\n DSFlaskResponse.log_request_data(data)\n\n @staticmethod\n def log_request_data(data):\n if \"password\" in data.keys():\n temp_password = data[\"password\"]\n data[\"password\"] = \"*****\"\n msg = f\"Request Data: {data}\"\n data[\"password\"] = temp_password\n else:\n msg = f\"Request Data: {data}\"\n\n log.info(msg)\n\n @staticmethod\n def resp_data(\n data: t.Union[t.Dict, t.List], status_code: int\n ) -> t.Tuple[t.Dict[str, t.Any], int]:\n return (\n {\n \"data\": data,\n },\n status_code,\n )\n\n @staticmethod\n def resp_data_created(data: t.Union[t.Dict, t.List]):\n return DSFlaskResponse.resp_data(data, DSFlaskResponse.created)\n\n @staticmethod\n def resp_data_ok(data: t.Union[t.Dict, t.List]):\n return DSFlaskResponse.resp_data(data, DSFlaskResponse.ok)\n\n @staticmethod\n def resp_message(msg, status_code) -> t.Tuple[t.Dict[str, t.Any], int]:\n return (\n {\n \"message\": msg,\n },\n status_code,\n )\n\n @staticmethod\n def resp_message_bad_request(msg=\"Bad Request\") -> t.Tuple[t.Dict[str, t.Any], int]:\n return DSFlaskResponse.resp_message(msg, DSFlaskResponse.bad_request)\n\n @staticmethod\n def resp_message_conflict(msg=\"Conflict\") -> t.Tuple[t.Dict[str, t.Any], int]:\n return DSFlaskResponse.resp_message(msg, DSFlaskResponse.conflict)\n\n @staticmethod\n def resp_message_forbidden(msg=\"Forbidden\") -> t.Tuple[t.Dict[str, t.Any], int]:\n return DSFlaskResponse.resp_message(msg, DSFlaskResponse.forbidden)\n\n @staticmethod\n def resp_message_not_found(msg=\"Not Found\") -> t.Tuple[t.Dict[str, t.Any], int]:\n return DSFlaskResponse.resp_message(msg, DSFlaskResponse.not_found)\n\n @staticmethod\n def resp_message_unauthorized(\n msg=\"Unauthorized\",\n ) -> t.Tuple[t.Dict[str, t.Any], int]:\n return DSFlaskResponse.resp_message(msg, DSFlaskResponse.not_found)\n","sub_path":"dsdev_utils/flask.py","file_name":"flask.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"5187166","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\n# In[2]:\n\n\n# Create test data\n# Create const variance\na, b, c, d = [4, 2, 5, 7]\nnum_arr = 100\n\n# Create random values X1,X2,X3 and Y with Y = aX1 + bX2 + cX3 + dX4 with X4 of values is 1\nX1, X2, X3, X4 = [np.random.rand(100), np.random.rand(100), np.random.rand(100), np.ones(100)]\nY = a* X1 + b*X2 + c*X3 + np.random.rand(100)* X4\n\n# Import data into dataframe and transposing data\ndataFrame = pd.DataFrame({\n 'X1' : X1,\n 'X2' : X2,\n 'X3' : X3,\n 'X4' : X4,\n 'Y' : Y\n})\n\ndataFrame\n\n\n# In[3]:\n\n\n# Cai thien ham Training, su dung cong thuc nhan 2 matrix\n# Rows number in Training Data\nm = dataFrame.X1.count()\n# Columns number in Training Data\nn = len(dataFrame.columns) -1\n# Learning_rate\nlearning_rate = 0.02\n# Set random for theta\ntheta = np.random.rand(n)\n# To save values of cost function each loop\ncost_arr = []\n\ndef predict(datafrm, theta):\n col_start = 0\n col_end = 4\n # Get first 4 columns of table and transpose dimension\n get_trainData = datafrm.iloc[ : , col_start : col_end]\n return np.dot(theta, get_trainData.T)\n\ndef cost_update(datafrm, theta):\n yhat = predict(datafrm, theta)\n y = datafrm.Y\n return np.sum((yhat-y)**2)/(2*m)\n\ny = dataFrame.Y\n\nfor i in range(6000):\n yhat_train = predict(dataFrame, theta)\n for x in range(n):\n theta[x] = theta[x] - learning_rate*np.sum((yhat_train-y)*dataFrame.iloc[ : , x])/m\n cost_arr.append(cost_update(dataFrame, theta))\n \ncost_arr\nplt.plot(cost_arr)\n\n\n# In[4]:\n\n\ntheta\n\n","sub_path":"01_Machine Learning/01_Liner Regression/03_Multi Variance.py","file_name":"03_Multi Variance.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"256760358","text":"'''Procedure'''\n\n#1-4. n/a\n\n#5. The integer, float, and sometimes string types can represent 6 million\n\n#6. The first example resulted in a string because both variables were strings. \n# The second example however, had a string and an interger so it resulted in \n# an error.\n\n#7. When the integer has a negative number as the index then it will result in \n# the placement of the letter starting from the end. Additionally, if the\n# number is greater than the amount of iterables in a string, then it will \n# result in an error. The iterables also count the spaces in and if it results\n# on a space, it will output \" \"\n\n#8. Slicing: In order for slogan to return the string \"best\" I used slogan[17:]\n\n#9. n/a\n\n#10a. It results in 7 because it takes the length of the string that is set to \n# activity. Theater has a length of 7 so it resulted in 7.\n\n#10b. It first uses the string from the activity variable and outputs theate\n# because the len of activity is 7 and -1 is 6. This makes it so that it \n# would only output from the start to the 5th iterate.\n\n#11. It outputs true because the first string, 'test goo' is in the next string\n\n#12.\ndef how_eligible(essay):\n '''This function detirmines how many of the given characters are in the \n sentence and returns a score depending on how many there are'''\n if '?' and '!' and '\"' in essay:\n return 4\n elif '?' and '!' in essay:\n return 3\n elif '?' and '!' in essay:\n return 2\n elif ',' in essay:\n return 1\n else:\n return 0\n\n\n\n#1.3.5 Function Test\nprint(how_eligible('This? \"Yes.\" No, not really!'))\nprint(how_eligible('Really, not a compound sentence.'))\n\n#I did end up getting the correct returned values however I was unable to insert\n#the comma because it ended up returning a 4 everytime I tested it.\n","sub_path":"1.3.5/Jap_1.3.5.py","file_name":"Jap_1.3.5.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"384641067","text":"from django.conf.urls import patterns, url\nfrom usuarios import views\n\nurlpatterns = patterns('',\n\t#urls de compras\n\turl(r'^$', views.index, name=\"index\"),\n\turl(r'^agregar-usuario/$', views.agregarUsuario, name=\"agregarUsuario\"),\n\turl(r'^editar-usuario/$', views.editarUsuario, name=\"editarUsuario\"),\n\t#vistas ajax\n\turl(r'^desactivar-usuario/$', views.desactivarUsuario, name=\"desactivarUsuario\"),\n\turl(r'^cambiar-contrasenia/$', views.cambiarContrasenia, name=\"cambiarContrasenia\"),\n)","sub_path":"usuarios/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"265220110","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"Small parser utils for smvk.\"\"\"\nimport re\nimport os\nimport pywikibot\n\nimport batchupload.common as common\nimport batchupload.helpers as helpers\n\ncleaner_pattern = None # to avoid repeated loads\n\n\ndef load_cleaner_patterns(filename=None):\n \"\"\"Load the cleaner patterns file if needed.\"\"\"\n if not filename:\n _filename = 'cleaner_patterns.json'\n filename = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), _filename)\n\n global cleaner_pattern\n if not cleaner_pattern:\n cleaner_pattern = common.open_and_read_file(filename, as_json=True)\n return cleaner_pattern\n\n\ndef parse_external_id(ext_id):\n \"\"\"Match an external id to a Commons formating template.\"\"\"\n if ext_id.startswith('gnm/'):\n return gnm_parser(ext_id)\n elif ext_id.startswith('SMVK'): # same image in use in a sister collection\n return smvk_parser(ext_id)\n\n # if not caught by any of the above\n pywikibot.warning('{} is not a recognized external id'.format(ext_id))\n\n\ndef smvk_parser(ext_id, label_delimiter='!'):\n \"\"\"Parser for SMVK identifiers.\"\"\"\n # Not as sensitive as build_link_template nor is it validated\n museum, type, id = ext_id.split('/', 2)\n label = None\n if label_delimiter in id: # lable is added to the id during a merge\n id, _, label = id.partition(label_delimiter)\n prefix = ''\n if museum != 'SMVK-MM': # MM has prefix as part of id\n prefix = '|{}'.format(type)\n\n if label:\n return '{{%s-link%s|%s|%s}}' % (museum, prefix, id, label)\n return '{{%s-link%s|%s}}' % (museum, prefix, id)\n\n\ndef gnm_parser(ext_id):\n \"\"\"Parser for Gothenburgh Natural Museum identifiers.\"\"\"\n if not ext_id.startswith('gnm/photo/GNM'):\n pywikibot.warning(\n 'The GNM parser needs to be extended to handle {}'.format(ext_id))\n return '{{GNM-link|%s}}' % ext_id[len('gnm/photo/GNM'):]\n\n\ndef clean_uncertain(value, keep=False):\n \"\"\"\n Handle uncertain values in the data.\n\n Process any value containing a '[?]' string.\n\n :param value: the value or list of values to process\n :param keep: whether to keep the clean value or discard it\n \"\"\"\n was_list = isinstance(value, list)\n values = common.listify(value)\n new_list = []\n for val in values:\n if '[?]' in val:\n if keep:\n new_list.append(\n val.replace('[?]', '').replace(' ', ' ').strip())\n else:\n new_list.append(val)\n\n # return in same format as original\n if not was_list:\n if not new_list:\n return ''\n return new_list[0]\n return new_list\n\n\ndef get_last_year(date_text):\n \"\"\"Attempt to extract the last year in a wikitext date template.\"\"\"\n hits = re.findall('\\d\\d\\d\\d', date_text)\n if hits:\n return int(hits[-1])\n\n\ndef format_description_row(label, value, delimiter=','):\n \"\"\"Format a single description line.\"\"\"\n delimiter = '{} '.format(delimiter)\n return '
\\n{}: {}'.format(\n helpers.italicize(label),\n delimiter.join(common.listify(value)))\n\n\ndef replace_repeat_character(text, char_1, target, delimiter, char_2=None):\n \"\"\"\n Replace two characters by a single one.\n\n Replaces them even if separated by space or delimiter. Also merges any\n adjacent delimiters.\n\n If char_2 is not provided then it is assumed that char_1 is repeated\n \"\"\"\n char_2 = char_2 or char_1\n patterns = (\n char_1 + char_2,\n char_1 + delimiter + char_2,\n char_1 + ' ' + char_2)\n\n text = text.replace(delimiter * 2, delimiter)\n while any(text.find(pattern) > 0 for pattern in patterns):\n for pattern in patterns:\n text = text.replace(pattern, target + delimiter)\n text = text.replace(delimiter + ' ', delimiter)\n text = text.replace(delimiter * 2, delimiter)\n return text\n\n\ndef description_cleaner(text, structured=False):\n \"\"\"\n Attempt a cleanup of SMVK descriptions.\n\n The descriptions contain a lot of info which is more of internal notes\n character. This method contains an ugly list of such strings and attempts\n to get rid of them.\n\n Outsourced to the utils file because it is ugly.\n\n :param structured: if internal structure should be kept to facilitate\n diffs.\n \"\"\"\n delimiter = '¤'\n cleaner_patterns = load_cleaner_patterns()\n\n # anything found after one of these should be removed\n for test in cleaner_patterns.get('endings'):\n if text.find(test) >= 0:\n text = text[:text.find(test)]\n # anything found before one of these should be removed\n for test in cleaner_patterns.get('starts'):\n if text.find(test) >= 0:\n text = text[text.find(test) + len(test):]\n\n # remove these blocks from inside kept text\n for test in cleaner_patterns.get('middle'):\n while text.find(test) >= 0:\n start = text.find(test)\n end = start + len(test)\n text = text[:start].rstrip() + delimiter + text[end:].lstrip()\n\n # clean out any [...], there may be many\n while text.find('[') >= 0:\n start = text.find('[')\n end = text.find(']', start)\n if end < 0:\n break\n text = text[:start].rstrip() + delimiter + text[end + 1:].lstrip()\n\n # remove repeats, even if interspersed with delimiters\n repeats = (' ', ',', '.')\n for char in repeats:\n text = replace_repeat_character(text, char, char, delimiter)\n # special case .,\n text = replace_repeat_character(text, '.', '.', delimiter, char_2=',')\n\n # merge any remaining removed blocks\n while text.find(delimiter * 2) > 0:\n text = text.replace(delimiter * 2, delimiter)\n # ignore any removed block in the end\n text = text.strip(delimiter)\n\n if structured:\n return text.split(delimiter)\n else:\n no_space_before = (',', '.', ':', ';')\n for char in no_space_before:\n text = text.replace(delimiter + char, char)\n return text.replace(delimiter, ' ')\n\n\ndef clean_all_descriptions(filename):\n \"\"\"\n Clean all descriptions in a file.\n\n Load a file with one description per row, clean each and output a visible\n diff for on-wiki consumption.\n \"\"\"\n import os.path as path\n base, ext = path.splitext(filename)\n f_in = open(filename)\n f_out = open('{}_clean{}'.format(base, ext), 'w')\n\n intro = (\n 'Preview of description cleanup for SMVK.\\n'\n '{} text is discarded, {} text is '\n 'kept, {} indicates a description '\n 'which was completely discarded.\\n\\n----\\n\\n'.format(\n helpers.bolden('Black'),\n helpers.bolden('blue'),\n helpers.bolden('red')))\n f_out.write(intro)\n\n for l in f_in:\n if not l.strip():\n f_out.write('* {}'.format(l))\n continue\n cleaned = description_cleaner(l, structured=True)\n if not any(block.strip() for block in cleaned):\n f_out.write('* {}\\n'.format(\n l.rstrip()))\n else:\n end = 0\n clean_l = l\n for block in cleaned:\n block = block.strip()\n if not block:\n continue\n start = clean_l.find(block, end)\n end = start + len(block)\n clean_l = '{}{}{}'.format(\n clean_l[:start], block, clean_l[end:])\n end += len('')\n f_out.write('* {}'.format(clean_l))\n f_in.close()\n f_out.close()\n","sub_path":"smvk/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"47575469","text":"import concurrent.futures\nimport time\nimport mpu\nimport geoip2.database\nimport os\nimport datetime\n\nfrom haversine import haversine, Unit\n\nreader = geoip2.database.Reader('GeoLite2-City.mmdb')\n \ndnsiplist = [] \n\nfile_gs= \"gs.txt\" \n\nwith open(file_gs) as f:\n \n for line in f:\n dnsiplist.append(line.strip()) \n \nf.close()\n\nrfilename = \"100\"\nwfilename = \"100\" + \".dis\"\n\ntry:\n os.remove(wfilename)\nexcept:\n pass\n \nfor ip in dnsiplist:\n \n distance = 100000000000000\n nearest_ip = \"\"\n \n response = reader.city(ip)\n\n print(ip)\n print(\"open \" + rfilename)\n\n counter = 0\n with open(rfilename) as rf:\n \n for line2 in rf:\n\n #reader2 = geoip2.database.Reader('GeoLite2-City.mmdb')\n\n counter = counter + 1\n #print(counter)\n\n now = datetime.datetime.now() \n\n if counter % 100000 == 0:\n print(\"[\" + str(now) + \"] \" + str(counter) + \" lines done.\" )\n\n \n try:\n response2 = reader.city(line2.strip())\n except:\n continue\n \n try:\n\n paris = (response.location.latitude, response.location.longitude) # (lat, lon)\n lyon = (response2.location.latitude, response2.location.longitude) # (lat, lon)\n\n if float(distance) > float(haversine(lyon, paris)):\n #fw = open(wfilename, 'a')\n #fw.write(str(ip)+\",\"+str(line2.strip())+\",\"+str(haversine(lyon, paris))+\"\\n\")\n #fw.close() \n\n now = datetime.datetime.now() \n print(\"[\" + str(now) + \"] UPDATE:\"+str(ip)+\",\"+str(line2.strip())+\",\"+str(haversine(lyon, paris)))\n\n nearest_ip = line2.strip()\n distance = haversine(lyon, paris) \n \n \n except:\n continue\n\n #reader2.close()\n\n\n now = datetime.datetime.now() \n\n fw = open(wfilename, 'a')\n fw.write(str(ip)+\",\"+str(line2.strip())+\",\"+str(haversine(lyon, paris))+\"\\n\")\n fw.close()\n \n print(\"[\"+str(now)+\"]\"+\" COMMIT: \"+str(nearest_ip)+\",\"+str(line2.strip())+\",\"+str(haversine(lyon, paris)))\n \n print(\"close \" + rfilename)\n rf.close() \n\nreader.close()\n \n \n","sub_path":"geoip/conc3.py","file_name":"conc3.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"11950515","text":"# -*- coding: utf-8 -*-\nimport asyncio\nimport requests\nfrom difflib import SequenceMatcher\nimport distutils.util\nimport json\nimport os\nimport platform\n\nfrom src.trackers.COMMON import COMMON\nfrom src.console import console \n\nclass STC():\n \"\"\"\n Edit for Tracker:\n Edit BASE.torrent with announce and source\n Check for duplicates\n Set type/category IDs\n Upload\n \"\"\"\n def __init__(self, config):\n self.config = config\n self.tracker = 'STC'\n self.source_flag = 'STC'\n self.upload_url = 'https://skipthecommericals.xyz/api/torrents/upload'\n self.search_url = 'https://skipthecommericals.xyz/api/torrents/filter'\n self.signature = '\\n[center][url=https://skipthecommericals.xyz/pages/1]Please Seed[/url][/center]'\n self.banned_groups = [\"\"]\n pass\n \n async def upload(self, meta):\n common = COMMON(config=self.config)\n await common.edit_torrent(meta, self.tracker, self.source_flag)\n await common.unit3d_edit_desc(meta, self.tracker, self.signature)\n cat_id = await self.get_cat_id(meta['category'])\n type_id = await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', \"\"))\n resolution_id = await self.get_res_id(meta['resolution'])\n stc_name = await self.edit_name(meta)\n if meta['anon'] == 0 and bool(distutils.util.strtobool(str(self.config['TRACKERS'][self.tracker].get('anon', \"False\")))) == False:\n anon = 0\n else:\n anon = 1\n if meta['bdinfo'] != None:\n mi_dump = None\n bd_dump = open(f\"{meta['base_dir']}/tmp/{meta['uuid']}/BD_SUMMARY_00.txt\", 'r', encoding='utf-8').read()\n else:\n mi_dump = open(f\"{meta['base_dir']}/tmp/{meta['uuid']}/MEDIAINFO.txt\", 'r', encoding='utf-8').read()\n bd_dump = None\n desc = open(f\"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]DESCRIPTION.txt\", 'r').read()\n open_torrent = open(f\"{meta['base_dir']}/tmp/{meta['uuid']}/[{self.tracker}]{meta['clean_name']}.torrent\", 'rb')\n files = {'torrent': open_torrent}\n data = {\n 'name' : stc_name,\n 'description' : desc,\n 'mediainfo' : mi_dump,\n 'bdinfo' : bd_dump, \n 'category_id' : cat_id,\n 'type_id' : type_id,\n 'resolution_id' : resolution_id,\n 'tmdb' : meta['tmdb'],\n 'imdb' : meta['imdb_id'].replace('tt', ''),\n 'tvdb' : meta['tvdb_id'],\n 'mal' : meta['mal_id'],\n 'igdb' : 0,\n 'anonymous' : anon,\n 'stream' : meta['stream'],\n 'sd' : meta['sd'],\n 'keywords' : meta['keywords'],\n 'personal_release' : int(meta.get('personalrelease', False)),\n 'internal' : 0,\n 'featured' : 0,\n 'free' : 0,\n 'doubleup' : 0,\n 'sticky' : 0,\n }\n # Internal\n if self.config['TRACKERS'][self.tracker].get('internal', False) == True:\n if meta['tag'] != \"\" and (meta['tag'][1:] in self.config['TRACKERS'][self.tracker].get('internal_groups', [])):\n data['internal'] = 1\n \n if meta.get('category') == \"TV\":\n data['season_number'] = meta.get('season_int', '0')\n data['episode_number'] = meta.get('episode_int', '0')\n headers = {\n 'User-Agent': f'Upload Assistant/2.1 ({platform.system()} {platform.release()})'\n }\n params = {\n 'api_token': self.config['TRACKERS'][self.tracker]['api_key'].strip()\n }\n \n if meta['debug'] == False:\n response = requests.post(url=self.upload_url, files=files, data=data, headers=headers, params=params)\n try:\n \n console.print(response.json())\n except:\n console.print(\"It may have uploaded, go check\")\n open_torrent.close()\n return \n else:\n console.print(f\"[cyan]Request Data:\")\n console.print(data)\n open_torrent.close()\n\n\n\n async def edit_name(self, meta):\n stc_name = meta.get('name')\n return stc_name\n\n async def get_cat_id(self, category_name):\n category_id = {\n 'MOVIE': '1', \n 'TV': '2', \n }.get(category_name, '0')\n return category_id\n\n async def get_type_id(self, type, tv_pack, sd, category):\n type_id = {\n 'DISC': '1', \n 'REMUX': '2',\n 'WEBDL': '4', \n 'WEBRIP': '5', \n 'HDTV': '6',\n 'ENCODE': '3'\n }.get(type, '0')\n if tv_pack == 1:\n if sd == 1:\n # Season SD\n type_id = '14'\n if type == \"ENCODE\":\n type_id = '18'\n if sd == 0:\n # Season HD\n type_id = '13'\n if type == \"ENCODE\":\n type_id = '18'\n if type == \"DISC\" and category == \"TV\":\n if sd == 1:\n # SD-RETAIL\n type_id = '17'\n if sd == 0:\n # HD-RETAIL\n type_id = '18'\n return type_id\n\n async def get_res_id(self, resolution):\n resolution_id = {\n '8640p':'10', \n '4320p': '1', \n '2160p': '2', \n '1440p' : '3',\n '1080p': '3',\n '1080i':'4', \n '720p': '5', \n '576p': '6', \n '576i': '7',\n '480p': '8', \n '480i': '9'\n }.get(resolution, '10')\n return resolution_id\n\n\n\n\n\n \n\n\n async def search_existing(self, meta):\n dupes = []\n console.print(\"[yellow]Searching for existing torrents on site...\")\n params = {\n 'api_token' : self.config['TRACKERS'][self.tracker]['api_key'].strip(),\n 'tmdbId' : meta['tmdb'],\n 'categories[]' : await self.get_cat_id(meta['category']),\n 'types[]' : await self.get_type_id(meta['type'], meta.get('tv_pack', 0), meta.get('sd', 0), meta.get('category', \"\")),\n 'resolutions[]' : await self.get_res_id(meta['resolution']),\n 'name' : \"\"\n }\n if meta['category'] == 'TV':\n params['name'] = f\"{meta.get('season', '')}{meta.get('episode', '')}\"\n if meta.get('edition', \"\") != \"\":\n params['name'] + meta['edition']\n try:\n response = requests.get(url=self.search_url, params=params)\n response = response.json()\n for each in response['data']:\n result = [each][0]['attributes']['name']\n dupes.append(result)\n except:\n console.print('[bold red]Unable to search for existing torrents on site. Either the site is down or your API key is incorrect')\n await asyncio.sleep(5)\n\n return dupes","sub_path":"src/trackers/STC.py","file_name":"STC.py","file_ext":"py","file_size_in_byte":6985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"349924864","text":"#!/usr/bin/env python\n\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport os, sys\nfrom importlib import import_module\nimport scipy as sp\nimport matplotlib.pyplot as pl\nfrom matplotlib import ticker, mlab, colors\nfrom matplotlib.cm import jet\nfrom mpl_toolkits.basemap.cm import sstanom\nimport g5lib.plotters as ptrs\nimport g5lib.domain as domain\nfrom g5lib import cmaps as g5cmaps\nfrom g5lib import g5dset\n\nvarname='S'\n# Read variable\nexp=g5dset.read_exp(sys.argv[1])\nexp.ctl=g5dset.Ctl(exp,'geosgcm_ocn3d')\n\nif exp.ctl.grid['lev'][-1] < 0.0:\n exp.ctl.grid['lev'][:]*=-1 \n\ndd=exp.ctl.domain\ndd['dates']=exp.dates\nind=domain.Domain(**dd)(exp.ctl.grid, exp.ctl.time)\nexp.am=exp.ctl.tave(varname,ind['tind']) \n\nexp.am.shiftgrid(30.)\n\nexp.lat_depth=exp.am.ave(3)\nexp.eq_depth=exp.am(lats=(-2.1,2.1)).ave(2)\n\nexp.lat_depth.name=exp.am.name+', Annual Mean'\nexp.eq_depth.name=exp.am.name+', Eq. Annual Mean'\n\n\n# Read experiment to compare\nexp1=g5dset.read_exp(exp.cmpexp)\nexp1.ctl=g5dset.Ctl(exp1,'geosgcm_ocn3d')\n\nif exp1.ctl.grid['lev'][-1] < 0.0:\n exp1.ctl.grid['lev'][:]*=-1 \n\ndd=exp1.ctl.domain\ndd['dates']=exp1.dates\nind=domain.Domain(**dd)(exp1.ctl.grid, exp1.ctl.time)\nexp1.am=exp1.ctl.tave(varname, ind['tind']) \n\nexp1.am.shiftgrid(30.)\n# If dimensions do not match, regrid\nif exp1.am.dims[2:] != exp.am.dims[2:]:\n exp1.am.regrid(exp.am.grid)\n\nexp1.lat_depth=exp1.am.ave(3)\nexp1.eq_depth=exp1.am(lats=(0,))\n\n# If levels do not match, interpolate\nif exp1.lat_depth.dims[1] != exp.lat_depth.dims[1]:\n exp1.lat_depth.vinterp(exp.lat_depth.grid,newmask=exp.lat_depth.data.mask)\n exp1.eq_depth.vinterp(exp.eq_depth.grid,newmask=exp.eq_depth.data.mask)\n\n# Read vaidation data and interpolate to exp grid\nobs=import_module('levitus')\n\nobs.am=obs.ctl('salt').ave(0)\nobs.am.shiftgrid(30.)\nobs.am.regrid(exp.am.grid)\n\nobs.lat_depth=obs.am.ave(3)\nobs.eq_depth=obs.am(lats=(0,))\n\nobs.lat_depth.vinterp(exp.lat_depth.grid,newmask=exp.lat_depth.data.mask)\nobs.eq_depth.vinterp(exp.eq_depth.grid,newmask=exp.eq_depth.data.mask)\n###################### Do plots #######################################################\n\nclevs=sp.arange(33.,36.1,0.2)\n\npath=exp.plot_path\ntry:\n os.makedirs(path)\nexcept OSError:\n pass\n \n\ndef plot_field(field, fig, clevs, cmap, fill_range=None):\n pl.figure(fig)\n pl.clf()\n n=colors.Normalize()\n n.autoscale(clevs)\n if fill_range is not None:\n m=g5cmaps.FilledCmap(cmap, fill_range=n(fill_range))\n else: \n m=cmap\n p=ptrs.Plotter2d(copts=dict(levels=clevs, cmap=m, norm=n))\n p(field)\n p.method=pl.contour\n p.copts=dict(levels=clevs[0::2], colors='black')\n p(field)\n ax=p.axis\n ax.set_ylabel('depth, m'); ax.invert_yaxis()\n return p\n\np=plot_field(exp.lat_depth, 1, clevs, jet)\nax=p.axis; ax.set_ylim(3000., 0.) \nax.xaxis.set_major_locator(ticker.MultipleLocator(30))\npl.grid(); pl.tight_layout(); pl.show()\npl.savefig(path+'/'+varname+'_lat_depth.png')\n\nclevs1=sp.arange(-2,2.1,0.2)\nfrange=(-0.2,0.2)\ndif=exp.lat_depth.subset(); dif.data-=exp1.lat_depth.data\ndif.name=exp.ctl.name+'-'+exp1.ctl.name+' '+varname+', Annual Mean'\np=plot_field(dif, 2, clevs1, sstanom, frange)\nax=p.axis; ax.set_ylim(3000., 0.)\nax.xaxis.set_major_locator(ticker.MultipleLocator(30))\npl.grid(); pl.tight_layout(); pl.show()\npl.savefig(path+'/'+varname+'_dif_lat_depth.png')\n\n\ndif=exp.lat_depth.subset(); dif.data-=obs.lat_depth.data\ndif.name=exp.ctl.name+'-'+obs.ctl.name+' '+varname+', Annual Mean'\np=plot_field(dif, 3, clevs1, sstanom, frange)\nax=p.axis; ax.set_ylim(3000., 0.)\nax.xaxis.set_major_locator(ticker.MultipleLocator(30))\npl.grid(); pl.tight_layout(); pl.show()\npl.savefig(path+'/'+varname+'-obs_lat_depth.png')\n\nclevs=sp.arange(34.,36.1,0.2)\n\np=plot_field(exp.eq_depth, 4, clevs, jet)\nax=p.axis; ax.set_ylim(500., 0.)\npl.grid(); pl.tight_layout(); pl.show()\npl.savefig(path+'/'+varname+'_eq_depth.png')\n\ndif=exp.eq_depth.subset(); dif.data-=exp1.eq_depth.data\ndif.name=exp.ctl.name+'-'+exp1.ctl.name+' '+varname+', Eq. Annual Mean'\np=plot_field(dif, 5, clevs1, sstanom, frange)\nax=p.axis; ax.set_ylim(500., 0.)\npl.grid(); pl.tight_layout(); pl.show()\npl.savefig(path+'/'+varname+'_dif_eq_depth.png')\n\ndif=exp.eq_depth.subset(); dif.data-=obs.eq_depth.data\ndif.name=exp.ctl.name+'-'+obs.ctl.name+' '+varname+', Eq. Annual Mean'\np=plot_field(dif, 6, clevs1, sstanom, frange)\nax=p.axis; ax.set_ylim(500., 0.)\npl.grid(); pl.tight_layout(); pl.show()\npl.savefig(path+'/'+varname+'-obs_eq_depth.png')\n\n","sub_path":"GEOS_Util/coupled_diagnostics/analysis/clim/salt.py","file_name":"salt.py","file_ext":"py","file_size_in_byte":4477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"36769207","text":"from bs4 import BeautifulSoup\nfrom pyhanlp import *\nimport re\n\n\nclass HanlpUnit:\n def __init__(self):\n self.CustomDictionary = JClass(\"com.hankcs.hanlp.dictionary.CustomDictionary\")\n self.added_word_list = list()\n\n def add_word_list(self, word_list):\n \"\"\"\n 添加自定义词\n :param word_list:词列表,格式[{\"word\": \"\", \"mask\": \"\"}] word为词名,mask为词性\n :return:\n \"\"\"\n try:\n for item in word_list:\n result = self.CustomDictionary.add(item[\"word\"], item[\"mask\"])\n if result is False:\n self.added_word_list.append(item)\n return True\n except Exception:\n return False\n\n def cut(self, sentence):\n \"\"\"\n 分词\n :param sentence: 要分词的句子\n :return:\n \"\"\"\n cut_result = HanLP.segment(sentence)\n for i in range(len(cut_result)):\n cut_result[i] = str(cut_result[i])\n for item in self.added_word_list:\n if cut_result[i].split(\"/\")[0] == item[\"word\"]:\n cut_result[i] = item[\"word\"] + \"/\" + item[\"mask\"]\n break\n return cut_result\n\n @staticmethod\n def get_text_from_html(text):\n \"\"\"\n 从html内容中提取文本,主要是针对爬下来的带有html标签的新闻内容\n :param text:\n :return:\n \"\"\"\n text = \"
\" + text + \"
\"\n soup = BeautifulSoup(text, \"html.parser\")\n return soup.get_text()\n\n @staticmethod\n def split_paragraph(para):\n \"\"\"\n 将段落拆成句子\n :param para:\n :return:\n \"\"\"\n para = re.sub('([。!?\\?])([^”’])', r\"\\1\\n\\2\", para) # 单字符断句符\n para = re.sub('(\\.{6})([^”’])', r\"\\1\\n\\2\", para) # 英文省略号\n para = re.sub('(\\…{2})([^”’])', r\"\\1\\n\\2\", para) # 中文省略号\n para = re.sub('([。!?\\?][”’])([^,。!?\\?])', r'\\1\\n\\2', para)\n # 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\\n放到双引号后,注意前面的几句都小心保留了双引号\n para = para.rstrip() # 段尾如果有多余的\\n就去掉它\n # 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。\n return para.split(\"\\n\")\n\n def __del__(self):\n \"\"\"\n 消除对象时撤销已添加词汇\n :return:\n \"\"\"\n for item in self.added_word_list:\n self.CustomDictionary.remove(item[\"word\"])\n","sub_path":"knowledge_map_system3.0/model/hanlpUnit.py","file_name":"hanlpUnit.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"353622584","text":"from .c_player_meta import PlayerMeta\n\n\nclass RobotMeta(PlayerMeta):\n \"\"\"Представляет базовый класс Роботов.\n\n Поля:\n claw -- клешня робота, в которой он хранит выбранные кости\n dices_for_pick -- копия списка с выпавшими костями. Ее использование\n освобождает от передачи списка dices как аргумента в\n функции take_*\n thinking -- булевая переменная, определяющая, будет ли робот думать,\n задерживая свой ход\n\n \"\"\"\n\n type = \"Robot\"\n\n def __init__(self, game_mode, screen, name, thinking=False):\n PlayerMeta.__init__(self, game_mode, screen, name)\n self.claw = []\n self.dices_for_pick = []\n self.thinking = thinking\n\n def take_range(self):\n \"\"\"Забирает в claw наибольший диапазон костей.\"\"\"\n claw = self.claw\n dices = self.dices_for_pick\n if 6 in dices:\n claw.extend(range(1, 7))\n dices.clear()\n else:\n for d in range(1, 6):\n claw.append(d)\n dices.remove(d)\n\n def take_row(self):\n \"\"\"Забирает в claw ряд(ы) костей.\"\"\"\n claw = self.claw\n dices = self.dices_for_pick[:]\n for d in dices:\n if dices.count(d) >= 3:\n claw.append(d)\n self.dices_for_pick.remove(d)\n\n def take_single(self, amount=4):\n \"\"\"Забирает в claw единичные кости, где amount - количество.\"\"\"\n claw = self.claw\n dices = self.dices_for_pick\n # По умолчанию amount = 4, т.к. это максимальное число единичных косей\n for i in (1, 5): # Сначала забираются кости со значением \"1\"\n for d in dices[:]:\n if amount == 0:\n break\n if d == i:\n claw.append(d)\n dices.remove(d)\n amount -= 1\n\n def rowcombo_dice(self):\n \"\"\"Возвращает значение костей, образующих комбо 'row'.\"\"\"\n dices = self.dices_for_pick\n out = []\n for d in dices:\n if dices.count(d) >= 3 and d not in out:\n out.append(d)\n return out\n","sub_path":"Classes/Players/c_robot_meta.py","file_name":"c_robot_meta.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"222745015","text":"# Module: bytecode\n# Date: 7th September 2014\n# Author: James Mills, prologic at shortcircuit dot net dot au\n\n\n\"\"\"Bytecode\"\"\"\n\n\nfrom .rstringutils import string_escape_encode\n\n\nbytecodes = [\n \"BIND\", # Bind messages on the stack into a message chain (arguments)\n \"PUSH\", # Push a new message on the stack from a message and arguments\n \"DROP\", # Discard the top of the receiver stack (arguments)\n \"LOAD\", # Load a constant value onto the stack\n \"EVAL\", # Evaluate the message on the top of the stack with arguments\n \"STOP\", # Terminate the interpreter (virtual machone)\n]\n\n\nglobals().update(dict((bytecode, i) for i, bytecode in enumerate(bytecodes)))\n\n\ndef dis_one(code):\n return bytecodes[ord(code)]\n\n\nclass ByteCode(object):\n\n _immutable_fields_ = [\"code\", \"constants[*]\"]\n\n def __init__(self, code, constants):\n self.code = code\n self.constants = constants\n\n def dump_code(self):\n lines = []\n i = 0\n for i in range(0, len(self.code), 2):\n c = self.code[i]\n c2 = self.code[i + 1]\n lines.append(bytecodes[ord(c)] + \" \" + str(ord(c2)))\n return \"\\n\".join(lines)\n\n def dump_constants(self):\n lines = []\n i = 0\n for i in range(0, len(self.constants)):\n lines.append(\n str(i) + \": \" + string_escape_encode(self.constants[i], \"'\")\n )\n return \"\\n\".join(lines)\n\n def repr(self):\n lines = []\n\n lines.append(\"Code:\")\n lines.append(self.dump_code())\n\n lines.append(\"Constants:\")\n lines.append(self.dump_constants())\n\n return \"\\n\".join(lines)\n","sub_path":"mio/bytecode.py","file_name":"bytecode.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"356894357","text":"from tkinter import *\r\nimport random\r\n\r\n# the game data for the initial game state\r\ndef init():\r\n data.playerX = 250\r\n data.playerY = 550\r\n data.circleX = 250\r\n data.circleY = 0\r\n data.gameOver = False\r\n\r\n# events updating the game data\r\ndef keyPressed(event):\r\n if event.keysym == \"Right\" and data.playerX < 550:\r\n data.playerX += 5\r\n elif event.keysym == \"Left\" and data.playerX > 0:\r\n data.playerX -= 5\r\n\r\ndef moveCircle():\r\n if not data.gameOver:\r\n data.circleY += 10\r\n\r\n# the game data updating the game state\r\ndef timerFired():\r\n moveCircle()\r\n if checkCollision(data.playerX, data.playerY,\r\n data.circleX, data.circleY,\r\n 10, 50):\r\n data.gameOver = True\r\n if data.circleY > 600:\r\n data.gameOver = True\r\n\r\ndef checkCollision(x1, y1, x2, y2, r1, r2):\r\n distance = ((x2-x1)**2 + (y2 - y1)**2)**0.5\r\n return distance <= r1 + r2\r\n\r\n# the game state updating what is drawn\r\ndef redrawAll(canvas):\r\n canvas.create_oval(data.playerX - 10, data.playerY - 10,\r\n data.playerX + 10, data.playerY + 10,\r\n fill=\"red\")\r\n canvas.create_oval(data.circleX - 50, data.circleY - 50,\r\n data.circleX + 50, data.circleY + 50,\\\r\n fill=\"yellow\")\r\n if data.gameOver:\r\n canvas.create_text(300, 250, text=\"Game Over\", font=\" Arial 20\")\r\n\r\n\r\n# animation setup code below here #\r\n\r\nclass Struct(object): pass\r\ndata = Struct()\r\n\r\ndef run(width=600, height=600):\r\n def redrawAllWrapper(canvas):\r\n canvas.delete(ALL)\r\n redrawAll(canvas)\r\n canvas.update() \r\n\r\n def keyPressedWrapper(event, canvas):\r\n keyPressed(event)\r\n redrawAllWrapper(canvas)\r\n\r\n def timerFiredWrapper(canvas):\r\n timerFired()\r\n redrawAllWrapper(canvas)\r\n # pause, then call timerFired again\r\n canvas.after(data.timerDelay, timerFiredWrapper, canvas)\r\n\r\n # Set up data and call init\r\n data.width = width\r\n data.height = height\r\n data.timerDelay = 200 # milliseconds\r\n init()\r\n # create the root and the canvas\r\n root = Tk()\r\n canvas = Canvas(root, width=data.width, height=data.height)\r\n canvas.pack()\r\n # set up events\r\n root.bind(\"\", lambda event:\r\n keyPressedWrapper(event, canvas))\r\n timerFiredWrapper(canvas)\r\n # and launch the app\r\n root.mainloop() # blocks until window is closed\r\n print(\"bye!\")\r\n\r\nrun()\r\n","sub_path":"past_iterations/01_first_design/Week 3 Iteration & Graphics/Circle Clash Lab/Programming Files/basic_circle_clash.py","file_name":"basic_circle_clash.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"314416472","text":"\"\"\"\n\nUsage:\n exverify [( )] [options]\n exverify (-h | --help)\n exverify (-v | --version)\n\nOption:\n -h --help - Show this screen.\n -v --version - Show ExVerify version.\n --verbose - Print more text.\n\n - GDS file name.\n\n - Cell name to be extracted.\n - `ntrons` -> list nTron cells.\n - `jjs` -> list Josephson juction cells.\n - `vias` -> list VIA cells.\n\n - Process Data File name (.json).\n\n -s --select - Extrude selected cell to a new, seperate gds file.\n -p --plot - Only for debugging. Plot the LVS graphs.\n\n --filter - Disable the graph filtering algorithms.\n --combine - Disable combining mask graphs.\n --edges - Disable the graph edge generation. \n\n --model - Generate a 3D circuit model from the .gds file.\n --log=log - Generate a 3D circuit model from the .gds file.\n\n --viewer=all - Use the gdspy viewer for debugging.\n auron - View the geometry that will be send to the Auron package.\n ix - GDS file prepaired for InductEx.\n\n\"\"\"\n\n\nfrom docopt import docopt\nfrom itertools import count\n\nimport networkx as nx\n\nimport os\nimport sys\nimport yuna\nimport auron\nimport rikku\nimport gdspy\nimport pygmsh\n\nimport gdspy as gdscell\n\nfrom exverify import tools\nfrom exverify import version\nfrom exverify import convert\n\nfrom .tools import logging\n\nfrom termcolor import colored\nfrom collections import defaultdict\nfrom networkx.algorithms import isomorphism\n\n\n\"\"\"\nHacker: 1h3d*n\nFor: Volundr\nDocs: Algorithm 1\nDate: 31 April 2017\n\nDescription: Morph the moat layer and the wire layers.\n\n1) Get a list of all the polygons inside the GDS file.\n2) Send this list to the Clip library with the wiring\n layer number and the moat layer number as parameters.\n3) Get the union of all the wiring layer polygons that\n are connected. Update this to check for vias.\n4) Get the intersection of the moat layer with the\n wiring layer and save this in a new polygon.\n5) Get the difference of the moat layer with the\n wiring layer and save this in a new polygon.\n6) Join the intersected and difference polygons\n to form a list of atleast 3 polygons.\n7) We now know which part of the wiring layer\n goes over the moat is most probably mutually\n connected to wiring layer 2.\n8) Send this polygon structure to GMSH.\n\"\"\"\n\n\ndef _cell_accepted(args):\n \"\"\"\n Filter the unused cellreferences using the seleted cell\n as the new top-level cell.\n \"\"\"\n\n gds_file = os.getcwd() + '/' + args[''] + '.gds'\n gdsii = gdspy.GdsLibrary()\n gdsii.read_gds(gds_file, unit=1.0e-12)\n\n accept = True\n\n name = args['']\n\n if name is None:\n tools.list_layout_cells(gdsii)\n accept = False\n elif name == 'ntrons':\n tools.list_ntron_cells(gdsii)\n accept = False\n elif name == 'jjs':\n tools.list_jj_cells(gdsii)\n accept = False\n elif name == 'vias':\n tools.list_via_cells(gdsii)\n accept = False\n else:\n if name not in gdsii.cell_dict.keys():\n raise ValueError('not a valid cell name')\n\n if accept:\n gdspy.GdsLibrary(name='yuna_library')\n usercell = gdsii.extract(name)\n\n if args['--select']:\n cells = [usercell]\n for cell in usercell.get_dependencies(True):\n cells.append(cell)\n\n gdscell.write_gds(usercell.name + '.gds', \n cells,\n name='yuna_library',\n unit=1.0e-12)\n\n return gdsii, usercell\n return None, None\n\n\ndef _raise_phoenix(args):\n print('\\n----- ExVerify -----')\n\n args['--combine'] = not args['--combine']\n args['--filter'] = not args['--filter']\n args['--edges'] = not args['--edges']\n\n tools.parameter_print(args)\n\n tools.args = args\n\n if args['--log'] == 'debug':\n logging.basicConfig(level=logging.DEBUG)\n elif args['--log'] == 'info':\n logging.basicConfig(level=logging.INFO)\n\n print(colored('gdspy ', 'green'), end='')\n print('version - {}'.format(gdspy.__version__))\n print(colored('pygmsh ', 'green'), end='')\n print('version - {}'.format(pygmsh.__version__))\n print(colored('yuna ', 'green'), end='')\n print('version - {}'.format(yuna.__version__))\n print(colored('auron ', 'green'), end='')\n print('version - {}'.format(auron.__version__))\n print(colored('rikku ', 'green'), end='')\n print('version - {}'.format(rikku.__version__))\n\n\ndef phoenixdown():\n \"\"\"\n Main function of the Auron package.\n Generates a subgraph for each wirechain\n and then combines them into one graph network.\n \"\"\"\n\n args = docopt(__doc__, version=version.__version__)\n\n _raise_phoenix(args)\n\n gdsii, cell = _cell_accepted(args)\n\n if args[''] is not None:\n pdk_file = os.getcwd() + '/' + args[''] + '.json'\n\n json_devices = {'via': 1, 'ntron': 7}\n\n if cell is not None:\n yuna_geom = yuna.grand_summon(cell,\n pdk_file,\n json_devices)\n\n if args['--model']:\n model = rikku.mix(yuna_geom)\n print(model)\n else:\n layoutgraph = auron.bushido(yuna_geom,\n args['--combine'],\n args['--filter'],\n args['--edges'],\n args['--plot'])\n\n g1 = layoutgraph.g.copy()\n\n if g1 is None:\n print('... no graph was generated')\n else:\n convert.to_netlist(g1, args[''])\n g2 = convert.to_graph(args[''])\n\n GM = isomorphism.GraphMatcher(g1, g2)\n if GM.is_isomorphic():\n print('\\nYES - LN and SN matches :)\\n')\n else:\n print('\\nNO - LN & SN does not match :(\\n')\n","sub_path":"exverify/overdrive.py","file_name":"overdrive.py","file_ext":"py","file_size_in_byte":6214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"217669112","text":"import cv2\nimport numpy as np\n\nvideoLeftUp = cv2.VideoCapture('./res/2_003_013.mp4')\nvideoLeftDown = cv2.VideoCapture('./res/2_003_014.mp4')\nvideoRightUp = cv2.VideoCapture('./res/2_003_015.mp4')\nvideoRightDown = cv2.VideoCapture('./res/2_003_016.mp4')\n\nfps = videoLeftUp.get(cv2.CAP_PROP_FPS)\n\nwidth = (int(videoLeftUp.get(cv2.CAP_PROP_FRAME_WIDTH)))\nheight = (int(videoLeftUp.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n\n#videoWriter = cv2.VideoWriter('./out/4in1.mp4', cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), fps, (width, height))\n\nsuccessLeftUp, frameLeftUp = videoLeftUp.read()\nsuccessLeftDown , frameLeftDown = videoLeftDown.read()\nsuccessRightUp, frameRightUp = videoRightUp.read()\nsuccessRightDown, frameRightDown = videoRightDown.read()\n\nwhile successLeftUp and successLeftDown and successRightUp and successRightDown:\n frameLeftUp = cv2.resize(frameLeftUp, (int(width / 2), int(height / 2)), interpolation=cv2.INTER_CUBIC)\n frameLeftDown = cv2.resize(frameLeftDown, (int(width / 2), int(height / 2)), interpolation=cv2.INTER_CUBIC)\n frameRightUp = cv2.resize(frameRightUp, (int(width / 2), int(height / 2)), interpolation=cv2.INTER_CUBIC)\n frameRightDown = cv2.resize(frameRightDown, (int(width / 2), int(height / 2)), interpolation=cv2.INTER_CUBIC)\n\n frameUp = np.hstack((frameLeftUp, frameRightUp))\n frameDown = np.hstack((frameLeftDown, frameRightDown))\n frame = np.vstack((frameUp, frameDown))\n\n\n if cv2.waitKey(1) == 27:\n break\n cv2.imshow('Test camera', frame)\n successLeftUp, frameLeftUp = videoLeftUp.read()\n successLeftDown, frameLeftDown = videoLeftDown.read()\n successRightUp, frameRightUp = videoRightUp.read()\n successRightDown, frameRightDown = videoRightDown.read()\n\n#videoWriter.release()\nvideoLeftUp.release()\nvideoLeftDown.release()\nvideoRightUp.release()\nvideoRightDown.release()","sub_path":"OpenCV/106.py","file_name":"106.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"598368986","text":"import lib.helper_functions as helpers\nimport uuid\nimport datetime\nfrom datetime import timedelta\n\ndef getTransactionsByDirection(transactions, direction):\n res = [t for t in transactions if t['direction'] == direction]\n if res:\n return res\n else:\n return []\n\ndef setTransactionStartEnd(transaction, start, end):\n transaction['start'] = start\n transaction['end'] = end\n return transaction\n\ndef setNewTransactionBudget(transaction, new_budget):\n \"\"\"Returns updated transaction\"\"\"\n transaction['total_budget'] = new_budget\n num_of_payments = len(transaction['payments'])\n divided_budget = new_budget/num_of_payments\n for p in transaction['payments']:\n p['amount'] = divided_budget\n return transaction\n\ndef rescalePayments(transaction, num_of_payment_per_year):\n total_budget = transaction['total_budget']\n start_date = helpers.strToDate(transaction['start'])\n end_date = helpers.strToDate(transaction['end'])\n delta = end_date - start_date\n\n num_of_payments = round(delta.days/(int(365/num_of_payment_per_year)))\n num_of_payments = int(num_of_payments)\n if num_of_payments == 0:\n num_of_payments = 1\n\n budget_amount_per_payment = total_budget / num_of_payments\n\n interval = int(delta.days/num_of_payments)\n payments = []\n\n def generatePayments(x, add_days=0):\n p_date = start_date + timedelta(days=(interval*x)+add_days)\n return dict(\n date=p_date.strftime('%Y-%m-%d'),\n amount=budget_amount_per_payment,\n id=str(uuid.uuid4())\n )\n for x in reversed(range(num_of_payments,0,-1)):\n payments.append(generatePayments(x)) \n # if num_of_payment_per_year > 1: \n # for x in reversed(range(num_of_payments,0,-1)):\n # payments.append(generatePayments(x))\n # else:\n # for x in range(num_of_payments):\n # payments.append(generatePayments(x, add_days=35))\n \n transaction['payments'] = payments\n return transaction\n","sub_path":"lib/transactions.py","file_name":"transactions.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"178875742","text":"from django.views.generic import TemplateView\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom cv.forms import cvForm\nfrom cv.models import cv\nfrom django.shortcuts import render, redirect\nfrom django.utils import timezone\n\n# Create your views here.\n\ndef cv_view(request):\n content = cv.objects.order_by('-updated')[:1]\n return render(request, 'cv/cv.html', {'content' : content})\n\ndef cv_edit(request):\n form = cvForm() \n if request.method == \"POST\":\n form = cvForm(request.POST, request.user)\n if form.is_valid():\n new = form.save(commit=False)\n new.auther = request.user\n new.updated = timezone.now()\n new.save()\n return redirect('/cv/')\n\n return render(request, 'cv/cv_edit.html', {'form': form})\n else:\n if len(cv.objects.order_by('-updated')) != 0:\n content = cv.objects.order_by('-updated')[:1]\n form = cvForm(instance=content[0])\n else:\n form = cvForm()\n return render(request, 'cv/cv_edit.html', {'form': form})","sub_path":"cv/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"606935509","text":"# HeatMay.py\r\n# Author: Dustin Wilson\r\n\r\nimport plotly.graph_objects as go\r\nimport csv\r\n\r\n# Creates a heat map of a given csv file\r\n# Path is the location of the csv file\r\ndef HeatMap(path):\r\n\r\n # Number of rows in the csv file\r\n numRow = 0\r\n # Number of cols\r\n numCol = 0\r\n\r\n # Opens the csv file and reads in each line\r\n # Adds each line to the array\r\n # First pass is to know how big to intialize the array\r\n with open(path, newline='') as File: \r\n reader = csv.reader(File)\r\n for row in reader:\r\n numCol = 0\r\n for indiv in row:\r\n numCol = numCol + 1\r\n numRow = numRow + 1\r\n \r\n # Intializes an array\r\n # Has zeros in all positions initially\r\n array = [[0 for i in range(numCol)] for j in range(numRow)]\r\n\r\n # Opens the csv file and reads in each line\r\n # Adds each line to the array\r\n # Second pass actually sets values\r\n with open(path, newline='') as File: \r\n reader = csv.reader(File)\r\n i = 0\r\n for row in reader:\r\n j = 0\r\n for indiv in row:\r\n array[i][j] = indiv\r\n j = j + 1\r\n i = i + 1\r\n\r\n # Creates the heat map based on the array which is built from csv file\r\n fig = go.Figure(data=go.Heatmap(z = array))\r\n # Displays the figure\r\n fig.show()","sub_path":"Test/HeatMap.py","file_name":"HeatMap.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"413400841","text":"from google.appengine.ext import db\nfrom datetime import date, datetime\nfrom dataFile import FileObject, dataObject\nfrom timeUtilities import GMT1, GMT2, UTC\nimport os, time\nimport cgi\nimport webapp2\n\n\nclass sensorsHandler(webapp2.RequestHandler):\n\n\tdef get(self):\n\t\tutc = datetime.utcnow()\n\t\tself.response.write(\"current date \" + utc.strftime(\"%A %d %B %Y %I:%M%p\") + '
')\n\n\t\tUKTime = datetime.fromtimestamp(time.mktime(utc.timetuple()), GMT1())\n\t\tRwandaTime = datetime.fromtimestamp(time.mktime(utc.timetuple()), GMT2())\n\n\t\tself.response.write(\"UK Time: \" + UKTime.ctime());\n\t\tself.response.write(\"
UK Timestamp \" + str(time.mktime(UKTime.timetuple())) )\n\t\tself.response.write(\"
Rwanda Time: \" + RwandaTime.ctime() + \"
\");\n\n\t\tdata_query = db.GqlQuery(\"SELECT * \"\n\t\t\t\t\t\t\t\t \"FROM dataObject \"\n\t\t\t\t\t\t\t\t \"ORDER BY tdate DESC LIMIT 1\"\n \t)\n\n\t\tself.response.out.write(\"
Number of rows: \" + str( data_query.count()) )\n\n\n\t\tnewObject = dataObject()\n\t\tnewObject.put()\n\n\t\tif data_query is not None:\n\n\t\t\tfor temp in data_query:\n\t\t\t\tself.response.write('
ACcurrent1 = %s
' % cgi.escape( str(temp.ac_current1) ) )\n\t\t\t\tself.response.write('
ACcurrent2 = %s
' % cgi.escape( str(temp.ac_current2) ) )\n\t\t\t\tself.response.write('
ACvoltage1 = %s
' % cgi.escape( str(temp.ac_voltage2) ) )\n\t\t\t\tself.response.write('
ACvoltage2 = %s
' % cgi.escape( str(temp.ac_voltage2) ) )\n\n\t\t\t\tself.response.write('
DCcurrent1 = %s
' % cgi.escape( str(temp.dc_current1) ) )\n\t\t\t\tself.response.write('
DCcurrent2 = %s
' % cgi.escape( str(temp.dc_current2) ) )\n\t\t\t\tself.response.write('
DCcurrent3 = %s
' % cgi.escape( str(temp.dc_current3) ) )\n\t\t\t\tself.response.write('
DCcurrent4 = %s
' % cgi.escape( str(temp.dc_current4) ) )\n\n\t\t\t\tself.response.out.write('
DCvoltage1 = %s
' % cgi.escape( str(temp.dc_voltage1) ) )\n\t\t\t\tself.response.out.write('
DCvoltage1 = %s
' % cgi.escape( str(temp.dc_voltage2) ) )\n\t\t\t\tself.response.out.write('
DCvoltage1 = %s
' % cgi.escape( str(temp.dc_voltage3) ) )\n\t\t\t\tself.response.out.write('
DCvoltage1 = %s
' % cgi.escape( str(temp.dc_voltage4) ) )\n\n\tdef post(self):\n\t\t# get data from mbed and write it to a buffer\n\t\tbuf = self.request.get('e.quinoxsensors') \n\n\t\t# convert data to an array by detecting whitespace\n\t\tarray = buf.split(' ')\n\n\t\tutc = datetime.utcnow()\n\t\tUKtime = datetime.fromtimestamp(time.mktime(utc.timetuple()), GMT1())\n\n\n\t\tdata_query = db.GqlQuery(\"SELECT * \"\n\t\t\t\t\t\t\t\t \"FROM dataObject \"\n\t\t\t\t\t\t\t\t)\n\t\tcount = data_query.count()\n\n\t\tif not (buf == ''):\n\t\t\ti = 0\n\n\t\t\tfor index, item in enumerate(array):\n\t\t\t\ti = i+1\n\n\t\t\t\tif( i%12 == 0):\n\t\t\t\t\tcount = count + 1\n\n\t\t\t\t\tnewObject = dataObject(tdate = UKtime)\n\t\t\t\t\tnewObject.sampleTime = int(array[index - 12])\n\t\t\t\t\tnewObject.ac_current1 = int(array[index - 11])\n\t\t\t\t\tnewObject.ac_current2 = int(array[index - 10])\n\t\t\t\t\tnewObject.ac_voltage1 = int(array[index - 9])\n\t\t\t\t\tnewObject.ac_voltage2 = int(array[index - 8])\n\n\t\t\t\t\tnewObject.dc_current1 = int(array[index - 7])\n\t\t\t\t\tnewObject.dc_current2 = int(array[index - 6])\n\t\t\t\t\tnewObject.dc_current3 = int(array[index - 5])\n\t\t\t\t\tnewObject.dc_current4 = int(array[index - 4])\n\n\t\t\t\t\tnewObject.dc_voltage1 = int(array[index - 3])\n\t\t\t\t\tnewObject.dc_voltage2 = int(array[index - 2])\n\t\t\t\t\tnewObject.dc_voltage3 = int(array[index - 1])\n\t\t\t\t\tnewObject.dc_voltage4 = int(array[index])\n\t\t\t\t\tnewObject.no = count\n\t\t\t\t\tnewObject.put()\n\n\t\t\ttmp = UKtime.strftime(\"%A %d %B %Y %I:%M%p\")\n\t\t\tself.response.write('Google App Engine Web server received file on ' + tmp+ ' (UKTime)')\n\nclass logHandler(webapp2.RequestHandler):\n\tdef get(self):\n\t\tquery = db.GqlQuery(\"SELECT * \"\n\t\t\t\t\t\t\t\"FROM FileObject \"\n\t\t\t\t\t\t\t\"ORDER BY tdate DESC LIMIT 1\"\n\t\t\t\t\t\t\t)\n\n\t\tif query is not None:\n\t\t\tfor tmp in query:\n\t\t\t\tself.response.write('log :
' + cgi.escape(tmp.text))\n\n\tdef post(self):\n\t\tbuf = self.request.get('e.quinoxlog')\n\n\t\tif not ( buf == ''):\n\t\t\tUKtime = datetime.fromtimestamp(time.mktime(datetime.utcnow().timetuple()), GMT1())\n\t\t\tnewObject = FileObject(text = buf, tdate = UKtime)\n\t\t\tnewObject.put()\n\n\t\t\t_time = UKtime.strftime(\"%A %d %B %Y %I:%M%p\")\n\t\t\tself.response.write('Google App Engine Web server received file on ' + _time + ' (UKTime)')\n\n\n\n\n\n\n","sub_path":"2012-2013/Software/Google App Engine/datalogger2013Batima/modemUtilities.py","file_name":"modemUtilities.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"450413698","text":"# Denem Orhun\n\ndef findClosestValueInBst(tree, target) -> int:\n print(\"Target: \" , target)\n # we need a helper function to call recursively\n # tree.value starts with root\n closest = _findClosestValueInBst(tree, target, tree.value)\n print (\"This is our closest: \" ,closest)\n return closest\n \n\n# closest is a value of a node\ndef _findClosestValueInBst(tree, target, closest) -> int:\n\t\n\t# if the difference between target and current node \n\t# is less that established difference between \n\t# closest and target, update closest node\n\tdiff = abs(closest - target)\n\t\n\tif abs(target - tree.value) < diff:\n\t\tprint(\"Updating the closest value\")\n\t\tclosest = tree.value\n\telse:\n\t\tprint(\"Eliminating half of tree\")\n\t\n\t# If target value is more than current node, traverse right\n\tif target > tree.value:\n\t\tprint(\"Traversing Right tree, starting with\", tree.value)\n\t\treturn _findClosestValueInBst(tree.right, target, closest)\n\t# if target value is less than current node, traverse left\n\telif target < tree.value:\n\t\tprint(\"Traversing Left tree, starting with\", tree.value)\n\t\treturn _findClosestValueInBst(tree.left, target, closest)\n\t# If target and closest are equal \n\telse:\n\t\treturn closest\n\n\t\n# This is the class of the input tree. Do not edit\nclass BST:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n","sub_path":"Lesson 2 - Data Structures/2 Data Structures/Trees/find_closest_value_in_bst_iterative.py","file_name":"find_closest_value_in_bst_iterative.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"66483627","text":"from my_queue import Queue\n\n\ndef queue_list(my_list, my_queue):\n \"\"\"\n\n @param my_list: list\n @param my_queue: Queue\n @return: None\n \"\"\"\n\n for i in my_list:\n my_queue.add(i)\n while not my_queue.is_empty():\n top_element = my_queue.remove()\n if isinstance(top_element, list):\n for i in top_element:\n my_queue.add(i)\n else:\n print(top_element)\n\n\nif __name__ == \"__main__\":\n q = Queue()\n L = ['a', ['b', ['c', 'd'], 'f'], ['g', 'i'], 'j']\n queue_list(L, q)\n\n attempts_sum = 0\n my_int = int(input(\"Please type an int\"))\n while my_int != 148:\n attempts_sum += my_int\n q.add(my_int)\n my_int = int(input(\"Please type an int\"))\n\n print('\\n', attempts_sum)\n","sub_path":"labs/lab3/queue driver.py","file_name":"queue driver.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"180043216","text":"\"\"\"\r\nInitialize Flask app\r\n\r\n\"\"\"\r\nfrom flask import Flask, jsonify, request, make_response\r\nimport os, sys\r\nimport traceback\r\nfrom flask_debugtoolbar import DebugToolbarExtension\r\nfrom werkzeug.debug import DebuggedApplication\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom models import Plant, Supplier, update_model, GrowWeek\r\nfrom standard_models import DBEntry\r\n\r\nfrom google.appengine.api.taskqueue import taskqueue\r\n\r\n\r\napp = Flask('application')\r\n\r\napp.config['SQLALCHEMY_DATABASE_URI'] = DBEntry.get_connection_string('Datawarehouse')\r\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\r\ndb = SQLAlchemy(app)\r\n\r\nimport database\r\n\r\nif os.getenv('FLASK_CONF') == 'TEST':\r\n app.config.from_object('application.settings.Testing')\r\n\r\nelif 'SERVER_SOFTWARE' in os.environ and os.environ['SERVER_SOFTWARE'].startswith('Dev'):\r\n # Development settings\r\n app.config.from_object('application.settings.Development')\r\n # Flask-DebugToolbar\r\n toolbar = DebugToolbarExtension(app)\r\n\r\n # Google app engine mini profiler\r\n # https://github.com/kamens/gae_mini_profiler\r\n app.wsgi_app = DebuggedApplication(app.wsgi_app, evalex=True)\r\n _temp = __import__('gae_mini_profiler', globals(), locals(), ['profiler', 'templatetags'], -1)\r\n profiler = _temp.profiler\r\n templatetags = _temp.templatetags\r\n #from gae_mini_profiler import profiler, templatetags\r\n #from flasext.gae_mini_profiler import profiler\r\n\r\n @app.context_processor\r\n def inject_profiler():\r\n return dict(profiler_includes=templatetags.profiler_includes())\r\n app.wsgi_app = profiler.ProfilerWSGIMiddleware(app.wsgi_app)\r\n \r\n Plant.dev_get_create()\r\n Supplier.dev_get_create()\r\nelse:\r\n app.config.from_object('application.settings.Production')\r\n\r\n# Enable jinja2 loop controls extension\r\napp.jinja_env.add_extension('jinja2.ext.loopcontrols')\r\n\r\n# Pull in URL dispatch routes\r\nimport urls\r\nimport restful\r\n\r\n@app.route('/rest')\r\n@app.route('/rest//',methods=['DELETE', 'GET', 'GET_METADATA', 'POST', 'PUT'])\r\ndef rest_impl(path):\r\n return restful.process_rest_request(path, request,make_response())\r\n\r\n\r\n@app.route('/notes/save/',methods=['POST'])\r\ndef save_note(pg_key):\r\n #jin = request.form\r\n jin = request.get_json(force=True)\r\n return restful.notes_wrapper(plantgrow_key=pg_key, note = jin['note'], method='save')\r\n\r\n@app.route('/notes/get/',methods=['GET'])\r\ndef get_notes(pg_key):\r\n return restful.notes_wrapper(plantgrow_key=pg_key, method='get')\r\n\r\n@app.route('/notes/delete/',methods=['GET'])\r\ndef delete_note(nt_key):\r\n return restful.notes_wrapper(note_key=nt_key, method='delete')\r\n\r\n@app.route('/week_summary//', methods=['GET'])\r\ndef get_summary(year, week_num):\r\n return restful.get_week_summary(year, week_num)\r\n\r\n\r\n@app.route('/plantgrow/availability/', methods=['GET'])\r\ndef get_availability(plantgrow):\r\n try:\r\n avail = restful.get_availability(plantgrow)\r\n return jsonify({'availability': avail})\r\n except:\r\n traceback.print_exc(file=sys.stdout)\r\n print(\"Unexpected error:\", sys.exc_info()[0])\r\n \r\n return jsonify({'availability':0})\r\n\r\n@app.route('/plantgrow/update/',methods=['GET','POST'])\r\ndef upd_plantgrow():\r\n try:\r\n jpg = request.get_json()\r\n return restful.update_plant_grow(jpg['plant'], jpg['week'], jpg['actual'])\r\n except Exception:\r\n return traceback.format_exc()\r\n\r\n@app.route('/supplier_plants/update/',methods=['GET','POST'])\r\ndef update_supplier_plants():\r\n try:\r\n uJson = request.get_json()\r\n return restful.update_plantweek_entry(uJson)\r\n except Exception:\r\n msg = traceback.format_exc()\r\n print(msg)\r\n return {'status':'failed','msg': msg}\r\n \r\n@app.route('/customer_reserve/update/',methods=['GET','POST'])\r\ndef update_customer_reserve():\r\n try:\r\n uJson = request.get_json()\r\n return restful.update_plantweek_entry(uJson)\r\n except Exception:\r\n return traceback.format_exc() \r\n\r\n@app.route('/update_info//',methods=['DELETE', 'GET', 'GET_METADATA', 'POST', 'PUT'])\r\ndef get_update_info(path):\r\n return restful.get_update_info(path)\r\n\r\n@app.route('/options//',methods=['GET'])\r\ndef get_options(path):\r\n r = restful.get_option_field(path, request.values)\r\n return jsonify(r)\r\n\r\n@app.route('/log_message',methods=['GET','POST'])\r\ndef process_add_log_message():\r\n uJson = request.get_json()\r\n return restful.add_logging_message(uJson['message'],uJson['msg_type'])\r\n\r\n@app.route(\"/test_email\", methods=['GET'])\r\ndef send_test_email():\r\n return restful.send_test_email()\r\n\r\n@app.route('/push_dw',methods=['GET','POST'])\r\ndef process_dw_task():\r\n process_task = request.values.get(\"task\")\r\n process_step = request.values.get('process')\r\n task = taskqueue.add(\r\n url='/run_dw_task',\r\n target='worker',\r\n params={'task':process_task,'process':process_step})\r\n \r\n return jsonify({'task_name':task.name,'task_eta':task.eta})\r\n\r\n@app.route('/run_dw_task',methods=['POST','GET'])\r\ndef run_dw_task():\r\n runtask = request.values.get('task')\r\n process = request.values.get(\"process\") # either prep or run\r\n \r\n try:\r\n if runtask == 'newprop':\r\n update_model(process)\r\n elif runtask == 'fix_dates':\r\n GrowWeek.set_mondays()\r\n elif runtask == 'date':\r\n if process == 'prep':\r\n database.set_date()\r\n else:\r\n database.get_date()\r\n elif runtask == 'supply':\r\n if process == 'prep':\r\n database.set_supply()\r\n else:\r\n database.get_supply()\r\n elif runtask == 'reserve':\r\n if process == 'prep':\r\n database.set_reserves()\r\n else:\r\n database.get_reserves()\r\n elif runtask == 'summary':\r\n if process == 'prep':\r\n database.set_summary()\r\n else:\r\n database.get_summary()\r\n elif runtask == 'all':\r\n if process == 'prep':\r\n database.set_date()\r\n database.set_supply()\r\n database.set_reserves()\r\n database.set_summary()\r\n else:\r\n database.get_date()\r\n database.get_supply()\r\n database.get_reserves()\r\n database.get_summary()\r\n else:\r\n print(\"Got {}, not sure what to do!!1\".format(runtask))\r\n return jsonify({\"status\":\"success\"})\r\n except:\r\n traceback.print_exc(file=sys.stdout)\r\n print(\"Unexpected error:\", sys.exc_info()[0])\r\n return jsonify({\"status\":\"failed\"})\r\n \r\nif __name__ == \"__main__\":\n app.run()","sub_path":"sales-inv-corchids/application/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"417001856","text":"x=int(input('Ingrese el primer numero'))\ny=int(input('Ingrese el segundo numero'))\nlista=[]\nsuma=0\n\nif x best_acc:\n best_acc = accuracy_score(test_y, y_pred)\n best_gs = gs\n best_cls = idx\nprint('\\n Classifier with best score: %s' % grid_dict[best_cls])\n\n# Save model (local)\njoblib.dump(best_gs.best_estimator_, model_output_path+'/model.joblib')\njoblib.dump(best_gs.best_params_, model_output_path+'/params.joblib')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"APRI/event_number_model_training.py","file_name":"event_number_model_training.py","file_ext":"py","file_size_in_byte":6401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"233143915","text":"\nPROJECT_ROOT=r'/home/kira/bishe/output/'\n\nstart_chain_addEth='''\n#!/bin/bash\naddEth () {\n podname=\"$1\"\n ovs_bridge=\"$2\"\n eth=\"$3\"\n ipaddress=\"$4\"\n macaddress=\"$5\"\n homenet=\"$6\"\n portname=\"$7\"\n\n containerid=`docker ps | grep \"${podname}\" |grep -v \"infrastructure\" | awk ' {print $1} '`\n #echo \"print containerid:\"$containerid\n ovs-docker add-port \"${ovs_bridge}\" \"${eth}\" \"${containerid}\" --ipaddress=\"${ipaddress}\" --portname=\"${portname}\" --macaddress=\"${macaddress}\"\n pid=`docker inspect -f '{{ .State.Pid }}' $containerid`\n mkdir -p /var/run/netns\n if [ ! -e /var/run/netns/\"$pid\" ]; then\n ln -s /proc/\"$pid\"/ns/net /var/run/netns/\"$pid\"\n fi\n #ip netns exec ${pid} route del -net 0.0.0.0\n ip netns exec \"${pid}\" route add -net \"${homenet}\" dev \"${eth}\"\n #ip netns exec ${pid} route add default gw 172.81.0.1 eth1\n rm -f /var/run/netns/\"$pid\"\n}\n'''\n\nstart_chain_createpod='''\nkubectl create %s\n'''\n\nSTART_CHAIN_WAITPODS='''\nRUNNING=`kubectl get pod %s|grep Running|wc -l`\nwhile [ $RUNNING -ne 1 ]\ndo\n sleep 1\n RUNNING=`kubectl get pod %s|grep Running|wc -l`\ndone\n\n'''\n#addEth \"haproxy\" \"${OVS_BRIDGE}\" \"eth1\" \"${OVS_NET}3\" \"${OVS_MAC}3\" \"${OVS_NET}0/16\" \"haproxy\"\n\nstart_chain_addEthToPod='''\naddEth %s %s %s %s %s %s %s\n'''\n\ndef addEth(pod, ip, mac, eth, port):\n if '89' in ip:\n net = '172.89.0.0/16'\n bridge = 'br-out'\n else:\n net = '172.81.0.0/16'\n bridge = 'br-tun'\n s = start_chain_addEthToPod % (pod, bridge, eth, ip, mac, net, port)\n return s\n\nHTTPERF_LINK='''\n#!/bin/bash\nWORKS_NUM=`kubectl exec -it %s -- curl http://%s | grep \"It works\" |wc -l`\n\nif [ $WORKS_NUM == 2 ]; then\n echo \"The NF Chain has linked correctly.\"\n exit 0\nelse\n echo \"There is something wrong with the chain.\"\n exit 1\nfi\n'''\n\n\nTCPREPLAY_LINK='''\n#!/bin/bash\ntcpdump -nttvv -i %s_l > %s 2> /dev/null &\ntcpdump -nttvv -i %s_l > %s 2> /dev/null &\nkubectl exec %s -it tcpreplay -- --intf1=eth1 /root/test.pcap > /dev/null\n\nTCPDUMP_PIDS=`ps aux | grep tcpdump | grep -v \"grep\" | awk '{print $2}'`\nfor dump_pid in $TCPDUMP_PIDS; do\n kill $dump_pid\ndone\n\npython /home/kira/bishe/linktest.py %s %s\nif [ $? == 0 ]; then\n #echo \"The NF Chain has linked correctly.\"\n exit 0\nelse\n #echo \"There is something wrong with the chain.\"\n exit 1\nfi\n'''\n\ndef createLinkTest(nfs,chainid):\n shellscript = ''\n if 'httperf' in nfs:\n httperfpod = nfs['httperf']['podname']\n dest = nfs['httperf']['httperf_server']+':'+nfs['httperf']['httperf_port']\n shellscript = HTTPERF_LINK % (httperfpod, dest)\n\n if 'tcpreplay' in nfs:\n tcpreplaypod = nfs['tcpreplay']['podname']\n snortport = nfs['snort']['podname'][0:10]\n tcpdumport = nfs['tcpdump']['podname'][0:10]\n snortpcap = '%sdumpfile/%s.pcap' % (PROJECT_ROOT, snortport)\n tcpdumpcap = '%sdumpfile/%s.pcap' % (PROJECT_ROOT, tcpdumport)\n shellscript = TCPREPLAY_LINK % (snortport, snortpcap, tcpdumport, tcpdumpcap, tcpreplaypod, snortpcap, tcpdumpcap)\n\n f = open(PROJECT_ROOT+('linktest/%d.sh' % chainid), 'w')\n f.write(shellscript)\n f.close()\n\ndef createStartChain(nfs,chainid):\n shellscript=start_chain_addEth\n createStatement=\"\"\n num = 0\n waitpods = ''\n for nf in nfs.keys():\n if nf == 'apache':\n num = int(nfs[nf]['cases'])\n for i in range(0, num):\n createStatement += (\" -f \" + PROJECT_ROOT + 'pods/' + nfs[nf]['podname'][i] + \".yaml\")\n waitpods += START_CHAIN_WAITPODS % (nfs[nf]['podname'][i], nfs[nf]['podname'][i])\n else:\n createStatement += (\" -f \" + PROJECT_ROOT + 'pods/' + nfs[nf]['podname'] + \".yaml\")\n waitpods += START_CHAIN_WAITPODS % (nfs[nf]['podname'], nfs[nf]['podname'])\n shellscript += start_chain_createpod % createStatement\n shellscript += waitpods\n\n for nf in nfs.keys():\n if nf == 'nat':\n shellscript += addEth(nfs[nf]['podname'], nfs[nf]['srcip'],\n nfs[nf]['srcmac'], 'eth1', nfs[nf]['podname'][0:10])\n shellscript += addEth(nfs[nf]['podname'], nfs[nf]['ip2'],\n nfs[nf]['destmac'], 'eth2', nfs[nf]['podname'][0:9])\n elif nf == 'apache':\n for i in range(0, num):\n shellscript += addEth(nfs[nf]['podname'][i], nfs[nf]['ip'][i],\n nfs[nf]['mac'][i],'eth1', nfs[nf]['podname'][i][0:10])\n else:\n shellscript+=addEth(nfs[nf]['podname'], nfs[nf]['ip'],\n nfs[nf]['mac'] ,'eth1', nfs[nf]['podname'][0:10])\n\n if 'nat' in nfs:\n protocal = ' -p ' + nfs['nat']['nat_protocal']\n if nfs['nat']['nat_protocal'] == 'any':\n protocal = ''\n srcport = ' --dport ' + nfs['nat']['nat_srcport']\n if nfs['nat']['nat_srcport'] == '0':\n srcport = ''\n destport = ':' + nfs['nat']['nat_destport']\n if nfs['nat']['nat_destport'] == '0':\n destport = ''\n natpod=nfs['nat']['podname']\n shellscript+=(NAT_CTL % (natpod, protocal, srcport, nfs['nat']['destip'] + destport, natpod))\n\n if 'haproxy' in nfs:\n haproxypod = nfs['haproxy']['podname']\n cfgpath = haproxypod + '.cfg'\n shellscript+=(HAPROXY_CTL % (haproxypod, cfgpath))\n\n if 'apache' in nfs:\n apachepods = nfs['apache']['podname']\n for pod in apachepods:\n shellscript += (APACHE_CTL % (pod, pod))\n\n if 'snort' in nfs:\n snortpod = nfs['snort']['podname']\n shellscript += (SNORT_CTL % snortpod)\n\n if 'tcpdump' in nfs:\n tcpdumpod = nfs['tcpdump']['podname']\n shellscript += (TCPDUMP_CTL % tcpdumpod)\n\n f = open(PROJECT_ROOT + ('startchain/%d.sh' % chainid), 'w')\n f.write(shellscript)\n f.close()\n\n\nTCPREPLAY_CTL='''\nkubectl exec tcpreplay -it tcpreplay -- --intf1=eth1 /root/test.pcap > /dev/null\n'''\n\n\nNAT_CTL='''\nkubectl exec -it %s -- iptables -A PREROUTING -t nat -i eth1 %s %s -j DNAT --to %s\nkubectl exec -it %s -- iptables -t nat -A POSTROUTING -j MASQUERADE -o eth2\n'''\n\nHAPROXY_CTL='''\nkubectl exec -it %s -- haproxy -f /myetc/%s\n'''\n\nSNORT_CTL='''\nkubectl exec -it %s -- snort -i eth1 -c /etc/snort/snort.conf\n'''\n\nAPACHE_CTL='''\nkubectl exec -it %s -- mkdir /var/log/apache2\nkubectl exec -it %s -- /etc/init.d/apache2 start > /dev/null\n'''\n\nTCPDUMP_CTL='''\nkubectl exec -it %s -- tcpdump -i eth1 -nttvv\n'''\n\nRMCHAIN='''\n#!/bin/bash\nOVS_BRIDGE=\"br-tun\"\nOVS_BRIDGE_OUT=\"br-out\"\n\nkubectl delete %s\n\novs-ofctl del-flows $OVS_BRIDGE_OUT\novs-ofctl add-flow $OVS_BRIDGE_OUT table=0,priority=0,actions=NORMAL\novs-ofctl del-flows $OVS_BRIDGE\novs-ofctl add-flow $OVS_BRIDGE table=0,priority=0,actions=NORMAL\n\n'''\n\nRMPORT1='''\novs-vsctl del-port $OVS_BRIDGE %s_l\n'''\n\nRMPORT2='''\novs-vsctl del-port $OVS_BRIDGE_OUT %s_l\n'''\n\ndef createRMChainScript(nfs, chainid):\n rmStatement = ''\n rmPort= ''\n for nf in nfs.keys():\n if nf == 'apache':\n num = int(nfs[nf]['cases'])\n for i in range(0, num):\n rmStatement += (\" -f \" + PROJECT_ROOT + 'pods/' + nfs[nf]['podname'][i] + \".yaml\")\n if '89' in nfs[nf]['ip'][i]:\n rmPort += (RMPORT2 % nfs[nf]['podname'][i][0:10])\n else:\n rmPort += (RMPORT1 % nfs[nf]['podname'][i][0:10])\n elif nf == 'nat':\n rmStatement += (\" -f \" + PROJECT_ROOT + 'pods/' + nfs[nf]['podname'] + \".yaml\")\n rmPort += (RMPORT1 % nfs[nf]['podname'][0:10])\n rmPort += (RMPORT2 % nfs[nf]['podname'][0:9])\n\n else:\n rmStatement += (\" -f \" + PROJECT_ROOT + 'pods/' + nfs[nf]['podname'] + \".yaml\")\n if '89' in nfs[nf]['ip']:\n rmPort += (RMPORT2 % nfs[nf]['podname'][0:10])\n else:\n rmPort += (RMPORT1 % nfs[nf]['podname'][0:10])\n f = open(PROJECT_ROOT + ('rmchain/%d.sh' % chainid), 'w')\n f.write((RMCHAIN % rmStatement) + rmPort)\n f.close()\n\n\nTHROUGHPUT_TEST='''\n#!/bin/bash\n\nLOG_FILE=%s\nREPORT_FILE=%s\n\naddTitle () {\n logfile=$1\n LG_POD=$2\n LG_CID=`docker ps | grep ${LG_POD} |grep -v \"infrastructure\" | awk '{print $1}'`\n LG_PID=`docker inspect -f '{{ .State.Pid }}' $LG_CID`\n sed -n '1,2p' \"/proc/${LG_PID}/net/dev\" > $logfile\n sed -i '1s/^/Container\\t| TIME \\t|&/g' $logfile\n sed -i '2s/^/ NAME \\t| STAMP \\t|&/g' $logfile\n}\n\ngetFlow () {\n logfile=$1\n# for podname in \"httperf\" \"iptables\" \"apache\" ; do\n for podname in %s ; do\n containerid=`docker ps | grep \"${podname}\" |grep -v \"infrastructure\" | awk ' {print $1} '`\n #echo \"print containerid:\"$containerid\n pid=`docker inspect -f '{{ .State.Pid }}' $containerid`\n sed -n '/eth1/p' /proc/${pid}/net/dev >> $logfile\n time=`date +%%s.%%N`\n sed -i \"\\$s/^/$podname\\t| $time\\t|&/g\" $logfile\n %s\n done\n}\n\n\naddTitle \"${LOG_FILE}\"\n%s\ngetFlow \"${LOG_FILE}\"\necho \"------------------------------------\" >> \"${LOG_FILE}\"\nsleep 5\ngetFlow \"${LOG_FILE}\"\n%s\n#python /home/kira/bishe/throughput.py \"${LOG_FILE}\" \"${REPORT_FILE}\"\n\n'''\n\n#httperfCID=`docker ps | grep nfv/httperf | awk '{print $1}'`\n#docker exec $httperfCID httperf --server=172.81.0.3 --port=5000 --num-conns=1000000 --num-calls=10000000000000 &> /dev/null &\n\nTHROUGHPUT_TEST_LG_HTTPERF='''\nhttperfCID=`docker ps | grep %s | grep -v \"infrastructure\" |awk '{print $1}'`\ndocker exec $httperfCID httperf --server=%s --port=%s --num-conns=%s --num-calls=%s &> /dev/null &\n'''\n\n#httperfPID=`docker exec $httperfCID ps -axu | grep httperf | awk '{print $2}'`\n#docker exec $httperfCID kill $httperfPID\n\nTHROUGHPUT_TEST_KILL_HTTPERF='''\nhttperfPID=`docker exec $httperfCID ps -axu | grep httperf | awk '{print $2}'`\ndocker exec $httperfCID kill -2 $httperfPID &> /dev/null\n'''\n\nTHROUGHPUT_TEST_GETFLOWNAT='''\n if [[ $podname == \"%s\" ]] ; then\n sed -n '/eth2/p' /proc/${pid}/net/dev >> $logfile\n time=`date +%%s.%%N`\n sed -i \"\\$s/^/$podname\\t| $time\\t|&/g\" $logfile\n fi\n'''\n\nTHROUGHPUT_TEST_LG_TCPREPLAY='''\ntcpreplayCID=`docker ps | grep %s |grep -v \"infrastructure\" | awk '{print $1}'`\ntrace=\"/root/traces_len/trace_${packetsize}.pcap\"\ndocker exec $tcpreplayCID tcpreplay --intf1=eth1 -t --loop=0 ${trace} &> /dev/null &\n'''\n\nTHROUGHPUT_TEST_KILL_TCPREPLAY='''\ntcpreplayPID=`docker exec $tcpreplayCID ps -axu | grep tcpreplay | awk '{print $2}'`\ndocker exec $TCPREPLAY_CID kill $tcpreplayPID\n'''\n\ndef createThroughputTest(nfs,chainid):\n logfilePath = PROJECT_ROOT + ('log/log_chain%d' % chainid)\n reportfilePath = PROJECT_ROOT + ('report/report_chain%d' %chainid)\n podnames = ''\n for nf in nfs.keys():\n if nf == 'apache':\n num = int(nfs[nf]['cases'])\n for i in range(0, num):\n podnames += ('\"%s\" ' % nfs[nf]['podname'][i] )\n else:\n podnames += ('\"%s\" ' % nfs[nf]['podname'])\n getflowinside = ''\n if 'nat' in nfs:\n getflowinside = THROUGHPUT_TEST_GETFLOWNAT % nfs['nat']['podname']\n if 'httperf' in nfs:\n lg = THROUGHPUT_TEST_LG_HTTPERF % (nfs['httperf']['podname'], nfs['httperf']['httperf_server'], nfs['httperf']['httperf_port'], nfs['httperf']['httperf_num_conns'], nfs['httperf']['httperf_num_calls'])\n kill = THROUGHPUT_TEST_KILL_HTTPERF\n elif 'tcpreplay' in nfs:\n lg = THROUGHPUT_TEST_LG_TCPREPLAY % nfs['tcpreplay']['podname']\n kill = THROUGHPUT_TEST_KILL_TCPREPLAY\n\n wholescript = THROUGHPUT_TEST % (logfilePath, reportfilePath, podnames, getflowinside, lg, kill)\n f = open(PROJECT_ROOT + ('throughput/%d.sh' % chainid), 'w')\n f.write(wholescript)\n f.close()\n\n\nLATENCY_HTTPERFSCRIPT='''\n#!/bin/bash\nhttperfCID=`docker ps | grep %s |grep -v \"infrastructure\"| awk '{print $1}'`\ndocker exec $httperfCID httperf --server=%s --port=%s --num-conns=%s --num-calls=%s &> /dev/null &\nsleep 5\nhttperfPID=`docker exec $httperfCID ps -axu | grep httperf | awk '{print $2}'`\ndocker exec $httperfCID kill $httperfPID\ndocker cp $httperfCID:/result %s\n'''\n\n\nLATENCY_TCPREPLAYSCRIPT='''\n#!/bin/bash\n\ntcpdump -nttvv -i %s_l > %s 2> /dev/null &\ntcpdump -nttvv -i %s_l > %s 2> /dev/null &\ntcpdump -nttvv -i %s_l > %s 2> /dev/null &\n\nsnortCID=`docker ps | grep %s |grep -v \"infrastructure\" | awk '{print $1}'`\nsink1CID=`docker ps | grep %s |grep -v \"infrastructure\" | awk '{print $1}'`\ndocker exec -it $snortCID snort -i eth1 &> /dev/null &\ndocker exec -it $sink1CID tcpdump -i eth1 &> /dev/null &\n\ntcpreplayCID=`docker ps | grep %s |grep -v \"infrastructure\"| awk '{print $1}'`\ntrace=\"/root/traces_len/trace_64.pcap\"\ndocker exec $tcpreplayCID tcpreplay --intf1=eth1 -t ${trace} &> /dev/null &\n\nsleep 5\n\nTCPDUMP_PIDS=`ps aux | grep tcpdump | grep -v \"grep\" | awk '{print $2}'`\nfor dump_pid in $TCPDUMP_PIDS; do\n kill $dump_pid &> /dev/null\ndone\n\npython /home/kira/bishe/latency.py %s %s %s\n'''\n\n\ndef createLatencyTest(nfs,chainid):\n podnames = ''\n for nf in nfs.keys():\n if nf == 'apache':\n num = int(nfs[nf]['cases'])\n for i in range(0, num):\n podnames += ('\"%s\" ' % nfs[nf]['podname'][i] )\n else:\n podnames += ('\"%s\" ' % nfs[nf]['podname'])\n\n if 'httperf' in nfs:\n logfilePath = PROJECT_ROOT + ('log/hplog_%d' % chainid)\n script = LATENCY_HTTPERFSCRIPT % (nfs['httperf']['podname'], nfs['httperf']['httperf_server'], nfs['httperf']['httperf_port'], nfs['httperf']['httperf_num_conns'], nfs['httperf']['httperf_num_calls'], logfilePath)\n elif 'tcpreplay' in nfs:\n tcpreplayport = nfs['tcpreplay']['podname'][0:10]\n snortport = nfs['snort']['podname'][0:10]\n tcpdumport = nfs['tcpdump']['podname'][0:10]\n tcpreplaypcap = PROJECT_ROOT + 'dumpfile/%s.pcap' % tcpreplayport\n snortpcap = PROJECT_ROOT + 'dumpfile/%s.pcap' % snortport\n tcpdumpcap = PROJECT_ROOT + 'dumpfile/%s.pcap' % tcpdumport\n script = LATENCY_TCPREPLAYSCRIPT % (tcpreplayport, tcpreplaypcap, snortport, snortpcap, tcpdumport, tcpdumpcap, snortport, tcpdumport, tcpreplaypcap, snortpcap, tcpdumpcap)\n\n\n f = open(PROJECT_ROOT + ('latency/%d.sh' % chainid), 'w')\n f.write(script)\n f.close()\n","sub_path":"dashboard/RunShell.py","file_name":"RunShell.py","file_ext":"py","file_size_in_byte":14266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"297002575","text":"from multiprocessing import Process, Pool, Manager\r\n\r\nm1_file = 'matrix_1.txt'\r\nm2_file = 'matrix_2.txt'\r\n\r\nmatrix_1 = []\r\nmatrix_2 = []\r\n\r\n\r\ndef matrix_product(m1,m2,num_line,glob):\r\n result = glob.result\r\n dop = []\r\n sum = 0\r\n \r\n if len(m2) != len(m1[0]):\r\n print('Ошибка.Некорректные данные')\r\n return \"ошибка\"\r\n else:\r\n columns1 = len(m1[0])\r\n lines1 = len(m1)\r\n columns2 = len(m2[0])\r\n lines2 = len(m2)\r\n\t\t\r\n for line1 in range(num_line, num_line+1):\r\n for col2 in range(0,columns2):\r\n for col1 in range(0,columns1):\r\n sum += m1[line1][col1]*m2[col1][col2]\r\n dop.append(sum)\r\n sum = 0\r\n str_res = \"[\"\r\n for el in range(0,len(dop)-1):\r\n result[num_line*len(dop)+el] = dop[el]\r\n str_res+=f'{dop[el]},'\r\n result[num_line*len(dop)+(len(dop)-1)] = dop[len(dop)-1]\r\n str_res += f'{dop[len(dop)-1]}]'\r\n with open('result.txt','a') as f:\r\n f.write(str_res + '\\n')\r\n dop = []\r\n\r\n glob.result = result\r\n\r\nwith open('result.txt','w') as f:\r\n f.write('')\r\n\r\n\r\nwith open(m1_file,'r') as f:\r\n str = f.read()\r\n str = str.replace('[[','')\r\n str = str.replace(']]','')\r\n str = str.split('],[')\r\n for line in range(0,len(str)):\r\n matrix_1.append([])\r\n l = str[line].split(',')\r\n for el in range(0,len(l)):\r\n matrix_1[line].append(int(l[el]))\r\n\t\t\t\r\n\t\t\t\r\nwith open(m2_file,'r') as f:\r\n str = f.read()\r\n str = str.replace('[[','')\r\n str = str.replace(']]','')\r\n str = str.split('],[')\r\n for line in range(0,len(str)):\r\n matrix_2.append([])\r\n l = str[line].split(',')\r\n for el in range(0,len(l)):\r\n matrix_2[line].append(int(l[el]))\r\n\r\nprint('-----Результат произведения матриц:')\r\n\t\r\nmat = matrix_2\r\nif len(matrix_1) > len(matrix_2):\r\n mat = matrix_1\r\nif __name__ == '__main__':\r\n manager = Manager()\r\n Global = manager.Namespace()\r\n Global.result = [0 for i in range(0,len(mat)*len(mat[0]))]\r\n \r\n pool = Pool(len(matrix_1))\r\n\t\r\n pool.starmap(matrix_product,[(matrix_1, matrix_2, lin, Global) for lin in range(0,len(matrix_1))])\r\n result = Global.result\r\n n = 0\r\n for line in range(0,len(mat)):\r\n print('[',end='')\r\n for el in range(0,len(mat[line])-1):\r\n print(f'{result[n]},',end='')\r\n n += 1\r\n print(f'{result[n]}',end='')\r\n n += 1\r\n print(']\\n',end='')\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"193960752","text":"import numpy as np\nimport tables as tb\nimport calendar as cd\nimport datetime\nimport pandas as pd\nimport time\n\n\"\"\"Data types for pytables\"\"\"\nB_dtype = np.dtype([('Ycp','float32'),('ls','float32'),('rs','float32')])\nCP_dtype = np.dtype([('cp1','float32'),('cp2','float32')])\ndmonths_dtype = np.dtype([('heat','int8'),('cool','int8')])\nmodels_dtype = np.dtype([('B',B_dtype),('CP',CP_dtype),('RMSE','float32'),('Rsqr','float32'),('CV_RMSE','float32')])\ndata_dtype = np.dtype([('Result','S4'),('dmonths',dmonths_dtype)])\nt_dtype = np.dtype([('Result','S4'),('t1','float32'),('t2','float32')])\ntest_dtype = np.dtype([('shape_test','S4'),('data',data_dtype),('T',t_dtype),('Result','S4')])\nallModels_dtype = np.dtype([('enddate','int32'),('mtype','int8'),('Ycp','float32'),('ls','float32'),('rs','float32'),('cp1','float32'),('cp2','float32'),('RMSE','float32'),('Rsqr','float32'),('CV_RMSE','float32'),('shape_test','S4'),('data_test','S4'),('heat_months','int8'),('cool_months','int8'),('t_test','S4'),('t_ls','float32'),('t_rs','float32'),('model_result','S4')])\ntimeseries_dtype = np.dtype([('enddate','int32'),('OAT','float32'),('usage','float32')])\nraw_dtype = np.dtype([('start_date','int32'),('end_date','int32'),('elec','float32'),('fuel','float32'),('OAT','float32'),('prepost','int8')])\n\n\"Test Files\"\nenergy = [\"/Volumes/MAC 1/Documents/CUNY EDL/BEMA-Project/BEMA/Data/Woodhull_Seperate.csv\",\"/Volumes/MAC 1/Documents/CUNY EDL/BEMA-Project/BEMA/Data/Woodhull_Combined.csv\"]\nweather = [\"/Volumes/MAC 1/Documents/CUNY EDL/BEMA-Project/BEMA/Data/LGAAverageDailyTemperature.csv\",\"/Volumes/MAC 1/Documents/CUNY EDL/BEMA-Project/BEMA/Data/LGAAverageDailyTemperature.csv\"]\nfac = [\"Woodhull Seperate\",\"Woodhull Combined\"]\n\nclass createHDF5(object):\n \n def __init__(self, energy, weather, fac, filename):\n \n self.fac = fac\n self.energy = energy\n self.weather = weather\n self.table = None\n self.h5_filename = filename\n \n \"\"\"Create HDF5 File\"\"\"\n self.h5 = tb.openFile(self.h5_filename,'w')\n \n def runAllFiles(self):\n \n #Create PyTable group for each facility\n for index,fac in enumerate(self.fac):\n self.h5.create_group(self.h5.root,fac,fac)\n self.h5.flush()\n \n #Process data\n self.runFile(self.energy[index],self.weather[index],fac)\n \n #Close h5\n self.h5.close()\n \n def runFile(self,energy,weather,fac):\n #Open up energy and average temperature files as panda dataframes\n energy_file = pd.read_csv(energy)\n temps = pd.read_csv(weather)\n \n #Store data into PyTable\n self.table = self.h5.createTable(\"/\"+fac,'rawdata', raw_dtype,\"Raw Data\")\n reading = self.table.row\n temps.index = pd.to_datetime(temps.pop('Date'))\n \n for row in energy_file.iterrows():\n reading['start_date'] = int(time.mktime(time.strptime(row[1]['Start_Date'], \"%m/%d/%y\")))\n reading['end_date'] = int(time.mktime(time.strptime(row[1]['End_Date'], \"%m/%d/%y\")))\n reading['elec'] = row[1]['Elec']\n reading['fuel'] = row[1]['Fuel']\n temperatures = temps.loc[row[1]['Start_Date']:row[1]['End_Date'],'Temp']\n reading['OAT'] = np.mean(temperatures)\n reading['prepost'] = row[1]['PrePost']\n reading.append()\n \n self.table.flush()\n self.h5.flush()\n \n #Delete panda dataframes\n del energy_file\n del temps\n \n #Electricity\n dates = np.array([x['end_date'] for x in self.table.where(\"\"\"(elec > 0)\"\"\")])\n if len(dates) > 0:\n \n last_row = dates[-1] #Last date in set\n \n #Create group in hdf5 file to store tables containing model coefficients for each model year\n self.h5.create_group(\"/\"+fac,\"elec_models\",\"Elec Models\")\n \n for reading in dates:\n startdate = datetime.datetime.fromtimestamp(reading)\n month = startdate.month\n year = startdate.year + 1\n day = startdate.day - 5\n if day < 1:\n month -= 1\n if month < 1:\n month = 12\n year = year - 1\n day = cd.monthrange(year,month)[-1]\n enddate = datetime.datetime(year,month,day)\n mcheck = month - 1\n if mcheck < 1:\n mcheck = 12\n year = year - 1\n if day > cd.monthrange(year,mcheck)[-1]:\n day = day - cd.monthrange(year,mcheck)[-1]\n mcheck += 1\n if mcheck > 12:\n mcheck = 1\n year = year + 1\n check = datetime.datetime(year,mcheck,day)\n startdate = int(startdate.strftime('%s'))\n enddate = int(enddate.strftime('%s'))\n check = int(check.strftime('%s'))\n if check > last_row:\n break\n else:\n x_elec = np.array([x['OAT'] for x in self.table.iterrows() if x['elec'] > 0 and startdate <= x['end_date'] < enddate])\n y_elec = np.array([x['elec'] for x in self.table.iterrows() if x['elec'] > 0 and startdate <= x['end_date'] < enddate])\n dates_elec = np.array([x['end_date'] for x in self.table.iterrows() if x['elec'] > 0 and startdate <= x['end_date'] < enddate])\n self.runAllModels(x_elec,y_elec,dates_elec,fac,\"elec\")\n\n #Fuel\n dates = np.array([x['end_date'] for x in self.table.where(\"\"\"(fuel > 0)\"\"\")])\n if len(dates) > 0:\n \n last_row = dates[-1] #Last date in set\n \n #Create group in hdf5 file to store tables containing model coefficients for each model year\n self.h5.create_group(\"/\"+fac,\"fuel_models\",\"Fuel Models\")\n \n for reading in dates:\n startdate = datetime.datetime.fromtimestamp(reading)\n month = startdate.month\n year = startdate.year + 1\n day = startdate.day - 5\n if day < 1:\n month -= 1\n if month < 1:\n month = 12\n year = year - 1\n day = cd.monthrange(year,month)[-1]\n enddate = datetime.datetime(year,month,day)\n mcheck = month - 1\n if mcheck < 1:\n mcheck = 12\n year = year - 1\n if day > cd.monthrange(year,mcheck)[-1]:\n day = day - cd.monthrange(year,mcheck)[-1]\n mcheck += 1\n if mcheck > 12:\n mcheck = 1\n year = year + 1\n check = datetime.datetime(year,mcheck,day)\n startdate = int(startdate.strftime('%s'))\n enddate = int(enddate.strftime('%s'))\n check = int(check.strftime('%s'))\n if check > last_row:\n break\n else:\n x_fuel = np.array([x['OAT'] for x in self.table.iterrows() if x['fuel'] > 0 and startdate <= x['end_date'] < enddate])\n y_fuel = np.array([x['fuel'] for x in self.table.iterrows() if x['fuel'] > 0 and startdate <= x['end_date'] < enddate])\n dates_fuel = np.array([x['end_date'] for x in self.table.iterrows() if x['fuel'] > 0 and startdate <= x['end_date'] < enddate])\n self.runAllModels(x_fuel,y_fuel,dates_fuel,fac,\"fuel\")\n \n \"\"\"Creates X Matrix for Least Squares Equation given array of outside air temps,\n change point(s) and model type\"\"\"\n def createXMatrix(self,mtype,x,cp1 = 0.0,cp2 = 0.0):\n c1 = np.array(np.ones(len(x)))\n if mtype == 5 or mtype == 6:\n c2 = np.array(np.where(cp1 < x,0.0,x - cp1))\n if mtype == 5:\n c3 = np.array(np.where(cp1 > x,0.0,x - cp1))\n else:\n c3 = np.array(np.where(cp2 > x,0.0,x - cp2))\n X = np.mat(np.column_stack((c1,c2,c3)))\n elif mtype == 4:\n c2 = np.array(np.where(cp1 < x,0.0,x - cp1))\n X = np.mat(np.column_stack((c1,c2))) \n else:\n c2 = np.array(np.where(cp1 > x,0.0,x - cp1))\n X = np.mat(np.column_stack((c1,c2)))\n return X\n \n \"\"\"Calculates least square estimates of the model coefficients\"\"\"\n def leastSquares(self,X,Y):\n Xtrans = X.transpose()\n A = np.dot(Xtrans,X)\n G = np.dot(Xtrans,Y)\n Ainv = np.linalg.inv(A)\n B = np.dot(Ainv,G)\n return B\n \n \"\"\"Calculates RMSE of the model\"\"\"\n def stats(self,mtype,X,Y,B):\n Ytrans = Y.transpose()\n Xtrans = X.transpose()\n Btrans = B.transpose()\n A = float(np.dot(Ytrans,Y))\n V = np.dot(Xtrans,Y)\n V = float(np.dot(Btrans,V))\n SSE = float(A - V)\n Y = np.squeeze(np.asarray(Y))\n Ysst = Y - np.average(Y)\n Yssttrans = Ysst.transpose()\n SST = float(np.dot(Yssttrans,Ysst))\n if mtype == 2:\n params = 2\n elif mtype < 5:\n params = 3\n else:\n params = mtype - 1\n MSE = float(SSE / (len(Y)-params))\n Rsquared = 1 - (SSE*(len(Y)-1))/(SST*(len(Y)-params))\n RMSE = np.sqrt(MSE)\n CV_RMSE = RMSE / np.average(Y) * 100\n return [RMSE, Rsquared, CV_RMSE]\n \n \"\"\"Loops through possible change points stores the coefficients, change point(s),\n and RMSE into a structured array then returns the model with the \n lowest RMSE\"\"\"\n def createModel(self,mtype,x,y):\n Y = np.mat(y).transpose()\n RMSE1 = 0\n RMSE2 = 0\n if mtype == 2:\n models = np.zeros(1,dtype = models_dtype)\n B_temp = np.zeros(1,B_dtype)\n CP = np.zeros(1,CP_dtype)\n X = self.createXMatrix(mtype,x)\n B = self.leastSquares(X,Y)\n Perform = self.stats(mtype,X,Y,B)\n CP[0] = (0.0,0.0)\n if B[1] < 0:\n B_temp[0] = (B[0],B[1],0.0)\n else:\n B_temp[0] = (B[0],0.0,B[1])\n models[0] = (B_temp[0],CP[0],Perform[0],Perform[1],Perform[2]) \n model = models[0]\n return model\n else:\n x_sorted = np.sort(x)\n lowT = x_sorted[len(x)/4 - 1]\n maxT = x_sorted[(len(x)-len(x)/4)-1]\n step = 0.25\n models = np.zeros(1,dtype = models_dtype)\n B_temp = np.zeros(1,B_dtype)\n CP = np.zeros(1,CP_dtype)\n for cp1 in np.arange(lowT,maxT,step):\n if mtype == 6:\n models2 = np.zeros(1,dtype = models_dtype)\n for cp2 in np.arange(cp1,maxT,step):\n X = self.createXMatrix(mtype,x,cp1,cp2)\n B = self.leastSquares(X,Y)\n Perform = self.stats(mtype,X,Y,B)\n CP[0] = (cp1,cp2)\n B_temp[0] = (B[0],B[1],B[2])\n if Perform[0] < RMSE2 or RMSE2 == 0:\n models2[0] = (B_temp[0],CP[0],Perform[0],Perform[1],Perform[2])\n RMSE2 = Perform[0]\n cp2 = models2['CP']['cp2']\n else:\n cp2 = 0.0\n X = self.createXMatrix(mtype,x,cp1,cp2)\n B = self.leastSquares(X,Y)\n Perform = self.stats(mtype,X,Y,B)\n CP[0] = (cp1,cp2)\n if mtype < 5:\n if mtype == 3:\n B_temp[0] = (B[0],0.0,B[1])\n else:\n B_temp[0] = (B[0],B[1],0.0)\n else:\n B_temp[0] = (B[0],B[1],B[2])\n if Perform[0] < RMSE1 or RMSE1 == 0:\n models[0] = (B_temp[0],CP[0],Perform[0],Perform[1],Perform[2])\n RMSE1 = Perform[0]\n return models[0]\n \n \"\"\"Performs shape test on model (Based off of Texas A&M algorithm)\"\"\"\n def shapeTest(self,mtype,ls,rs):\n test = \"Fail\"\n if mtype == 3:\n if rs > 0.0:\n test = \"Pass\"\n elif mtype == 4:\n if ls < 0.0:\n test = \"Pass\" \n elif mtype == 5:\n if ls < 0.0 and rs > 0.0:\n test = \"Pass\"\n elif ls < 0.0 and rs < 0.0:\n test = \"Pass\"\n elif ls > 0.0 and rs > 0.0:\n test = \"Pass\"\n elif mtype == 6:\n if ls < 0.0 and rs > 0.0:\n test = \"Pass\" \n return test\n \n \"\"\"Performs data population test on model (Based off of Texas A&M algorithm)\"\"\"\n def datapopTest(self,mtype,x,cp1,cp2):\n test = \"Fail\"\n region1 = 0\n region2 = 0\n region3 = 0\n end = len(x)\n dmonths = np.zeros(1,dmonths_dtype)\n if mtype == 6:\n for i in range(end):\n if x[i] <= cp1:\n region1 += 1\n elif x[i] > cp1 and x[i] <= cp2:\n region2 += 1\n elif x[i] > cp2:\n region3 += 1\n if region1 > 2 and region2 > 2 and region3 > 2:\n test = \"Pass\"\n dmonths[0]['cool'] = region3\n dmonths[0]['heat'] = region1\n else:\n for i in range(end):\n if x[i] <= cp1:\n region1 += 1\n elif x[i] > cp1:\n region2 += 1\n if region1 > 2 and region2 > 2:\n test = \"Pass\"\n if mtype == 4:\n dmonths[0]['heat'] = region1\n elif mtype == 5:\n dmonths[0]['heat'] = region1\n dmonths[0]['cool'] = region2\n else:\n dmonths[0]['cool'] = region2\n return (test, dmonths[0])\n \n \"\"\"Calulates t-statistic. (equation from \"A Second Course in Statistics\n Regression Analysis\")\"\"\"\n def calcT(self,b,RMSE,XXinverse,loc):\n c = XXinverse[(loc,loc)]\n tstat = b / (RMSE * np.sqrt(c))\n return tstat\n \n \"\"\"Checks to see if t-statistic is within the acceptable range for ls and rs\"\"\"\n def tTest(self,mtype,x,cp1,cp2,ls,rs,RMSE):\n test = \"Fail\"\n value = 2.0\n tstat = 0.0\n tstat2 = 0.0\n X = self.createXMatrix(mtype,x,cp1,cp2)\n Xtrans = X.transpose()\n XtransX = np.dot(Xtrans,X)\n XXinverse = np.linalg.inv(XtransX)\n if mtype == 3:\n tstat2 = self.calcT(rs,RMSE,XXinverse,1)\n if tstat2 > value or tstat2 < (-1 * value):\n test = \"Pass\"\n elif mtype == 4: \n tstat = self.calcT(ls,RMSE,XXinverse,1)\n if tstat > value or tstat < (-1 * value):\n test = \"Pass\"\n elif mtype > 4: \n tstat = self.calcT(ls,RMSE,XXinverse,1)\n tstat2 = self.calcT(rs,RMSE,XXinverse,2)\n if (tstat > value or tstat < (-1 * value)) and (tstat2 > value or tstat2 < (-1 * value)):\n test = \"Pass\"\n return (test,tstat,tstat2)\n \n \"\"\"Checks to see if model passes all the test\n (Includes test for 2P model looking at the RMSE)\"\"\"\n def testModel(self,x,y,mtype,model):\n test_values = np.zeros(1,test_dtype)\n test_values['Result'] = \"Fail\"\n ls = model['B']['ls']\n rs = model['B']['rs']\n cp1 = model['CP']['cp1']\n cp2 = model['CP']['cp2']\n Rsqr = model['Rsqr']\n RMSE = model['RMSE']\n test_count = 0\n if mtype == 2:\n if Rsqr > .75:\n test_values[0]['Result'] = \"Pass\"\n if ls == 0:\n test_values[0]['data']['dmonths']['cool'] = len(y)\n else:\n test_values[0]['data']['dmonths']['heat'] = len(y)\n else:\n shape_test = self.shapeTest(mtype,ls,rs)\n test_values[0]['shape_test'] = str(shape_test)\n if shape_test == \"Pass\":\n test_count = test_count + 1\n dTest = self.datapopTest(mtype,x,cp1,cp2)\n test_values[0]['data']['Result'] = dTest[0]\n test_values[0]['data']['dmonths']['heat'] = dTest[1]['heat']\n test_values[0]['data']['dmonths']['cool'] = dTest[1]['cool']\n if dTest[0] == \"Pass\":\n test_count = test_count + 1\n t_Test = self.tTest(mtype,x,cp1,cp2,ls,rs,RMSE)\n test_values[0]['T']['Result'] = t_Test[0]\n test_values[0]['T']['t1'] = t_Test[1]\n test_values[0]['T']['t2'] = t_Test[2]\n if t_Test[0] == \"Pass\":\n test_count = test_count + 1\n if test_count == 3:\n test_values[0]['Result'] = \"Pass\"\n return test_values[0]\n \n \"\"\"Stores the resulting model from the createModel() function and test result\n from the testModel() function for each model type (2,3C,3H,4,5) in a PyTable\n for that year\"\"\"\n def runAllModels(self,x,y,dates,name,etype):\n enddate = datetime.datetime.fromtimestamp(dates[-1])\n enddate_str =enddate.strftime('%m_%d_%y')\n model = self.createModel(2,x,y)\n test_values = self.testModel(x,y,2,model)\n \n #Create group containing tables for all models and timeseries data\n self.h5.createGroup(\"/\"+name+\"/\"+etype+\"_models\",enddate_str, name + \" \" + enddate_str)\n \n #Table containing model coefficients and test results\n modeltable = self.h5.createTable(\"/\"+name+\"/\"+etype+\"_models\"+\"/\"+enddate_str,\"Models\", allModels_dtype,name + \" \" + enddate_str+\" Models\")\n models = modeltable.row\n models['enddate'] = int(enddate.strftime('%s'))\n models['mtype'] = 2\n models['Ycp'] = model['B']['Ycp']\n models['ls'] = model['B']['ls']\n models['rs'] = model['B']['rs']\n models['cp1'] = model['CP']['cp1']\n models['cp2'] = model['CP']['cp2']\n models['RMSE'] = model['RMSE']\n models['Rsqr'] = model['Rsqr']\n models['CV_RMSE'] = model['CV_RMSE']\n models['shape_test'] = test_values['shape_test']\n models['data_test'] = test_values['data']['Result']\n models['heat_months'] = test_values['data']['dmonths']['heat']\n models['cool_months'] = test_values['data']['dmonths']['cool']\n models['t_test'] = test_values['T']['Result']\n models['t_ls'] = test_values['T']['t1']\n models['t_rs'] = test_values['T']['t2']\n models['model_result'] = test_values['Result']\n models.append()\n for mtype in range(3,7):\n model = self.createModel(mtype,x,y)\n test_values = self.testModel(x,y,mtype,model)\n models['enddate'] = int(enddate.strftime('%s'))\n models['mtype'] = mtype\n models['Ycp'] = model['B']['Ycp']\n models['ls'] = model['B']['ls']\n models['rs'] = model['B']['rs']\n models['cp1'] = model['CP']['cp1']\n models['cp2'] = model['CP']['cp2']\n models['RMSE'] = model['RMSE']\n models['Rsqr'] = model['Rsqr']\n models['CV_RMSE'] = model['CV_RMSE']\n models['shape_test'] = test_values['shape_test']\n models['data_test'] = test_values['data']['Result']\n models['heat_months'] = test_values['data']['dmonths']['heat']\n models['cool_months'] = test_values['data']['dmonths']['cool']\n models['t_test'] = test_values['T']['Result']\n models['t_ls'] = test_values['T']['t1']\n models['t_rs'] = test_values['T']['t2']\n models['model_result'] = test_values['Result']\n models.append()\n modeltable.flush()\n \n #Table containing timeseries data\n timetable = self.h5.createTable(\"/\"+name+\"/\"+etype+\"_models\"+\"/\"+enddate_str,\"Time_Series\",timeseries_dtype,name + \" \" + enddate_str+\" Time Series\")\n reading = timetable.row\n for enddate,x,y in zip(dates,x,y):\n reading['enddate'] = enddate\n reading['OAT'] = x\n reading['usage'] = y\n reading.append()\n timetable.flush()\n \n self.h5.flush()","sub_path":"Python_Files/Old Files/changepoint_regression_pytables.py","file_name":"changepoint_regression_pytables.py","file_ext":"py","file_size_in_byte":20473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"496719559","text":"import logging\n\nfrom pylons import request, response, session, tmpl_context as c, url\nfrom pylons.controllers.util import abort, redirect\n\nfrom kazhal.lib.base import BaseController, render\nfrom kazhal.lib import helpers as h\n\nfrom pylons.decorators.rest import restrict\n\nfrom kazhal.model import Session, Group, Permission\nfrom repoze.what.predicates import not_anonymous, has_permission,is_anonymous,in_group\nfrom repoze.what.plugins.pylonshq import ActionProtector\n\nimport formencode\nfrom formencode import htmlfill\nfrom pylons.decorators import validate\n\nfrom schemas import NewGroupForm,EditGroupForm\n\nlog = logging.getLogger(__name__)\nfrom pylons.i18n import set_lang,get_lang,_\n\nclass AddgroupController(BaseController):\n def __before__(self):\n h.setMenuItems(_('menus.dat'))\n self.menu_items = session[_('menus.dat')]\n\n @ActionProtector(in_group('admin'))\n def new(self): \n c.permissions = Session.query(Permission)\n c.menu_items = h.top_menu(self.menu_items,_('Customers'))\n return render('/derived/group/addgroup.html')\n \n \n @restrict('POST')\n @ActionProtector(in_group('admin'))\n @validate(schema=NewGroupForm(), form='new')\n def create(self): \n #if Session.query(Group).filter_by(group=request.POST['group']).one() != None:\n #abort(404)\n newgroup = Group(request.POST['group'])\n newgroup.permissions = self.form_result['permissions'] \n Session.add(newgroup)\n Session.commit()\n h.flash(_('Group successfully Created.'))\n redirect(url(controller='addgroup', action='list'))\n \n @ActionProtector(in_group('admin'))\n def edit(self,id): \n values={}\n group = Session.query(Group).filter_by(id=id).one()\n values['group']= group.group\n\n c.permissions = Session.query(Permission).all()\n for i,perm in enumerate(c.permissions):\n for permission in group.permissions:\n if permission.name == perm.name:\n values['permissions-%i.%i'%(i,perm.id)]= 1 \n\n c.menu_items = h.top_menu(self.menu_items,_('Customers'))\n html = render('/derived/group/edit.html')\n return htmlfill.render(html, defaults=values)\n\n \n @restrict('POST')\n @ActionProtector(in_group('admin'))\n @validate(schema=EditGroupForm(), form='edit') \n def save(self,id):\n if id is None:\n abort(404)\n group = self.form_result['group']\n del self.form_result['group']\n for k,v in self.form_result.items():\n if getattr(group, k) != v:\n setattr(group, k, v) \n Session.add(group)\n Session.commit()\n response.status_int = 302\n response.headers['location'] = url(controller='addgroup', action='list')\n return \"Moved temporarily\"\n\n @ActionProtector(in_group('admin'))\n def delete(self,id):\n if id is None:\n abort(404)\n group = Session.query(Group).filter_by(id=id).one()\n if group is None:\n abort(404)\n h.flash(_('Group successfully deleted.'))\n\n Session.delete(group)\n Session.commit()\n redirect(url(controller='addgroup', action='list'))\n return \"Group Deleted\"\n \n @ActionProtector(in_group('admin'))\n def list(self):\n groups = Session.query(Group).all()\n c.groups = groups\n c.menu_items = h.top_menu(self.menu_items,_('Customers'))\n return render('/derived/group/list.html')\n\n def view(self,id):\n pass\n ","sub_path":"kazhal/controllers/addgroup.py","file_name":"addgroup.py","file_ext":"py","file_size_in_byte":3598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"173273290","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom doubanSpider.items import DoubanspiderItem\n\nclass MoviespiderSpider(scrapy.Spider):\n name = 'movieSpider'\n allowed_domains = ['movie.douban.com']\n url = \"https://movie.douban.com/top250?start=\"\n offset = 0\n start_urls = [url+str(offset)]\n\n def parse(self, response):\n item = DoubanspiderItem()\n movies = response.xpath('//div[@class=\"item\"]')\n for each in movies:\n item[\"movie_name\"] = each.xpath('.//div[@class=\"hd\"]//a//span[1]//text()').extract()[0]\n item[\"movie_star\"] = each.xpath('.//div[@class=\"star\"]//span[2]//text()').extract()[0]\n item[\"movie_star_person\"] = each.xpath('.//div[@class=\"star\"]//span[4]//text()').extract()[0]\n item[\"movie_summary\"] = each.xpath('.//p[@class=\"quote\"]//span[1]//text()').extract()[0]\n item[\"movie_info_url\"] = each.xpath('.//div[@class=\"pic\"]//a//@href').extract()[0]\n item[\"movie_image\"] = each.xpath('.//div[@class=\"pic\"]//a//img//@src').extract()[0]\n\n yield item\n\n if self.offset < 250:\n self.offset += 25\n yield scrapy.Request(self.url+str(self.offset), callback=self.parse)","sub_path":"doubanSpider/doubanSpider/spiders/movieSpider.py","file_name":"movieSpider.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"309979882","text":"import jpype\nfrom jpype.types import *\nimport common\n\n\nclass CollectionTestCase(common.JPypeTestCase):\n\n def setUp(self):\n super(CollectionTestCase, self).setUp()\n\n def testCollection(self):\n collection = jpype.java.util.ArrayList()\n collection.add(1)\n collection.add(2)\n self.assertEqual([1, 2], [i for i in collection])\n\n def testIterateHashmap(self):\n collection = jpype.java.util.HashMap()\n collection.put('A', 1)\n collection.put('B', 2)\n asdict = dict()\n for x in collection.entrySet():\n asdict[str(x.getKey())] = x.getValue().longValue()\n self.assertEqual(asdict, {'A': 1, 'B': 2})\n\n def testEnumMap(self):\n enumclass = jpype.JClass('jpype.collection.TestEnum')\n enummap = jpype.java.util.EnumMap(enumclass)\n enummap.put(enumclass.A, 'ABC')\n enummap.put(enumclass.B, 'DEF')\n asdict = dict()\n for x in enummap.entrySet():\n asdict[str(x.getKey())] = x.getValue()\n self.assertEqual({'A': 'ABC', 'B': 'DEF'}, asdict)\n\n def testMapPut(self):\n jmap = jpype.JClass(\"java.util.HashMap\")()\n jmap[\"a\"] = 1\n self.assertEqual(jmap[\"a\"], 1)\n\n def testMapPutAll(self):\n jmap = jpype.JClass(\"java.util.HashMap\")()\n dic = {\"a\": \"1\", \"b\": \"2\", \"c\": \"3\"}\n jmap.putAll(dic)\n self.assertEqual(jmap[\"a\"], \"1\")\n self.assertEqual(jmap[\"b\"], \"2\")\n self.assertEqual(jmap[\"c\"], \"3\")\n with self.assertRaises(TypeError):\n jmap.putAll([1, 2, 3])\n\n def testListGet(self):\n jlist = jpype.JClass(\"java.util.ArrayList\")()\n jlist.addAll([1, 2, 3, 4])\n self.assertEqual(jlist[0], 1)\n self.assertEqual(jlist[3], 4)\n self.assertEqual(jlist[-1], 4)\n self.assertEqual(jlist[-4], 1)\n\n def testListSlice(self):\n jlist = jpype.JClass(\"java.util.ArrayList\")()\n jlist.addAll([1, 2, 3, 4])\n jlist[1:3] = [5, 6]\n self.assertEqual(jlist[1], 5)\n self.assertEqual(jlist[2], 6)\n\n def testListDel(self):\n jlist = jpype.JClass(\"java.util.ArrayList\")()\n jlist.addAll([1, 2, 3, 4])\n del jlist[0]\n self.assertEqual(len(jlist), 3)\n self.assertEqual(jlist[0], 2)\n\n def testCollectionAddAll(self):\n l = [1, 2, 3, 4]\n l2 = ['a', 'b']\n jlist = jpype.JClass(\"java.util.ArrayList\")()\n jlist.addAll(l)\n jcollection = jpype.JObject(jlist, jpype.java.util.Collection)\n jcollection.addAll(l2)\n l.extend(l2)\n self.assertEqual(l, list(jcollection))\n\n def testListSetItemNeg(self):\n l = [1, 2, 3, 4]\n jlist = jpype.JClass(\"java.util.ArrayList\")()\n jlist.addAll([1, 2, 3, 4])\n jlist[-1] = 5\n l[-1] = 5\n self.assertEqual(l, list(jlist))\n jlist[-2] = 6\n l[-2] = 6\n self.assertEqual(l, list(jlist))\n with self.assertRaises(IndexError):\n jlist[-5] = 6\n\n def testMapKeyError(self):\n hm = JClass('java.util.HashMap')()\n with self.assertRaises(KeyError):\n hm['foo']\n hm['foo'] = None\n self.assertEqual(hm['foo'], None)\n\n def testHashMapEntryIter(self):\n hm = JClass('java.util.HashMap')()\n hm['alice'] = 'alice'\n hm['betty'] = 'betty'\n hm['catty'] = 'catty'\n for p, v in hm.entrySet():\n self.assertEqual(p, v)\n\n def testTreeMapEntryIter(self):\n hm = JClass('java.util.TreeMap')()\n hm['alice'] = 'alice'\n hm['betty'] = 'betty'\n hm['catty'] = 'catty'\n for p, v in hm.entrySet():\n self.assertEqual(p, v)\n\n def testSetDelItem(self):\n hs = JClass('java.util.HashSet')()\n hs.add('a')\n hs.add('b')\n hs.add('c')\n self.assertIn('a', hs)\n del hs['a']\n self.assertNotIn('a', hs)\n\n def testMapEntry(self):\n hm = JClass('java.util.TreeMap')()\n hm['alice'] = 'alice'\n h = hm.entrySet()\n self.assertEqual(len(h.iterator().next()), 2)\n\n def testListIter(self):\n ls = JClass('java.util.ArrayList')([0, 1, 2, 3])\n for i, j in enumerate(ls):\n self.assertEqual(i, j)\n\n def testEnumeration(self):\n st = JClass('java.util.StringTokenizer')(\"this is a test\")\n out = []\n for i in st:\n out.append(str(i))\n self.assertEqual(len(i), 4)\n self.assertEqual(\" \".join(out), \"this is a test\")\n\n def testCollectionDelItem(self):\n ja = JClass('java.util.ArrayList')(['1', '2', '3'])\n jc = JObject(ja, 'java.util.Collection')\n with self.assertRaisesRegex(TypeError, 'remove'):\n del jc[1]\n\n def testHashMapCtor(self):\n HashMap = JClass('java.util.HashMap')\n dc = dict()\n dc['fred'] = 1\n dc['george'] = 2\n dc['paul'] = 3\n hm = HashMap(dc)\n for p, v in dc.items():\n self.assertEqual(hm[p], v)\n\n def testHashMapPutAll(self):\n HashMap = JClass('java.util.HashMap')\n hm = HashMap()\n dc = dict()\n dc['fred'] = 1\n dc['george'] = 2\n dc['paul'] = 3\n hm.putAll(dc)\n for p, v in dc.items():\n self.assertEqual(hm[p], v)\n\n def testHashMapConvert(self):\n HashMap = JClass('java.util.HashMap')\n hm = HashMap()\n hm['fred'] = 1\n hm['george'] = 2\n hm['paul'] = 3\n dc = dict(hm)\n for p, v in hm.items():\n self.assertEqual(dc[p], v)\n\n def testMapABC(self):\n from collections.abc import Mapping, Sized, Iterable, Container\n hm = JClass('java.util.HashMap')()\n self.assertIsInstance(hm, Sized)\n self.assertIsInstance(hm, Iterable)\n self.assertIsInstance(hm, Container)\n self.assertIsInstance(hm, Mapping)\n\n def testUnmodifiableNext(self):\n ArrayList = JClass('java.util.ArrayList')\n Collections = JClass('java.util.Collections')\n a = ArrayList()\n a.add(\"first\")\n a.add(\"second\")\n a.add(\"third\")\n for i in a:\n pass\n\n for i in Collections.unmodifiableList(a):\n pass\n","sub_path":"test/jpypetest/test_collection.py","file_name":"test_collection.py","file_ext":"py","file_size_in_byte":6202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"280961601","text":"import sys, os, traceback, time\nfrom functools import lru_cache\nfrom math import sin, cos, sqrt, radians\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\nfrom PyQt5 import QtGui\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtWidgets import QHBoxLayout, QFrame, QSplitter, QTabWidget\nfrom PyQt5.QtWidgets import QRadioButton, QButtonGroup\nfrom PyQt5 import QtCore as Qt\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtOpenGL import *\nfrom stl import mesh\nfrom dxf_loader import DXF_Loader\nfrom stl_loader import STL_loader\nfrom drawer import drawPolygon, drawText, drawText_3D, drawAxis, create_vbo, draw_vbo\nimport ctypes\nimport numpy as np\nfrom math import sqrt, pi, exp, floor\n\nwindow_title = \"Cloth SIM@PyQt5 v.0.5\"\nscreen_size = [600, 500]\nto_models = \"/Users/ryotaro/py_projects/pygame_sim/model\"\n\n##################### DXF ANALYSATION #####################\nextensor = \"model/extensor_hood_test002.dxf\"\n## スケーリング済\nextensor_reduced_scale = 1/1500\nExtensor = DXF_Loader(extensor, extensor_reduced_scale, -0.65, 2, -1.)\nstop_points_3d, particle_points_3d, poly_lines_3d = Extensor.ver_col_ind()\n## オリジナルスケール\norgExtensor = DXF_Loader(extensor, 1, 0, 0, 0, integer=True)\norg_sp_3d, org_pp_3d, org_pl_3d = orgExtensor.ver_col_ind()\n\nconst_set = []\nfor set in poly_lines_3d:\n for i in range(len(set)-1):\n const_set.append([set[i].tolist(), set[i+1].tolist()])\n\n##################### LOAD 3D MODEL #####################\nfinger = \"/Index\"\nnames_list = [\"/Metacarpal3_01.stl\",\n \"/Proximal_Phalanx3_01_org.stl\",\n \"/Middle_Phalanxh3_01_org.stl\",\n \"/Distal_Phalanxh3_01_org.stl\"]\n\nfile_name = [to_models+finger+names_list[0],\n to_models+finger+names_list[1],\n to_models+finger+names_list[2],\n to_models+finger+names_list[3]]\nbone_reduced_scale = 1/15\nMetacarpal3 = STL_loader(file_name[0], bone_reduced_scale)\nProximal_Phalanx3 = STL_loader(file_name[1], bone_reduced_scale)\nMiddle_Phalanxh3 = STL_loader(file_name[2], bone_reduced_scale)\nDistal_Phalanxh3 = STL_loader(file_name[3], bone_reduced_scale)\n\n## 頂点座標, カラー, 構成インデックス\nMeta_ver, Meta_col, Meta_ind = Metacarpal3.ver_col_ind()\nProP_ver, ProP_col, ProP_ind = Proximal_Phalanx3.ver_col_ind()\nMidP_ver, MidP_col, MidP_ind = Middle_Phalanxh3.ver_col_ind()\nDisP_ver, DisP_col, DisP_ind = Distal_Phalanxh3.ver_col_ind()\n\n## フレーム用カラー\nMeta_Frame_col = Metacarpal3.color(Meta_ver, _r=0, _g=0, _b=0)\nProP_Frame_col = Metacarpal3.color(ProP_ver, _r=0, _g=0, _b=0)\nMidP_Frame_col = Metacarpal3.color(MidP_ver, _r=0, _g=0, _b=0)\nDisP_Frame_col = Metacarpal3.color(DisP_ver, _r=0, _g=0, _b=0)\n\n\"\"\" 各モデルの座標の最大値(Y軸) \"\"\"\nMeta_max_index = np.argmax(np.array(Metacarpal3.all_mesh_particle)[:,1])\nMeta_max_cood = Metacarpal3.all_mesh_particle[Meta_max_index]\nProP_max_index = np.argmax(np.array(Proximal_Phalanx3.all_mesh_particle)[:,1])\nProP_max_cood = Metacarpal3.all_mesh_particle[ProP_max_index]\nMidP_max_index = np.argmax(np.array(Middle_Phalanxh3.all_mesh_particle)[:,1])\nMidP_max_cood = Metacarpal3.all_mesh_particle[MidP_max_index]\n\n###############################################################\n\ndef gaussian_function(sigma, mu, x, A=1.25):\n return A*(1/sqrt(2*pi*sigma) * exp(-1/(2*sigma*sigma)*(x-mu)**2))\n\ndef super_gaussian_function(sigma, mu, lmd, x, A=1.25):\n return A*exp(-(1/2*sigma*sigma*(x-mu)**2)**lmd)\n\ndef subtract(vec1,vec2):\n return [vec1[i]-vec2[i] for i in [0,1,2]]\n\ndef get_length(vec):\n return sum([vec[i]*vec[i] for i in [0,1,2]])**0.5\n\nBLACK = (0, 0, 0)\nWHITE = (1, 1, 1)\nRED = (1, 0, 0)\nGREEN = (0, 1, 0)\n\ndelta_t = 0.2\nNUM_ITER = 10\n\nclass Particle:\n def __init__(self, x, y, z, m=1.0):\n self.m = m\n self.init_x, self.init_y, self.init_z = x, y, z\n self.x, self.y, self.z = x, y, z\n self.oldx, self.oldy, self.oldz = x, y, z\n self.newx, self.newy, self.newz = x, y, z\n self.ax = 0\n self.ay = 0#-9.8 #0\n self.az = 0\n\n self.fixed = False\n\n def when_move(self, x, y, z):\n self.x, self.y, self.z = x, y, z\n\n def update(self, delta_t):\n if self.fixed == False:\n # Verlet Integration\n # (https://www.watanabe-lab.jp/blog/archives/1993)\n self.newx = 2.0 * self.x - self.oldx + self.ax * delta_t**2\n self.newy = 2.0 * self.y - self.oldy + self.ay * delta_t**2\n self.newz = 2.0 * self.z - self.oldz + self.az * delta_t**2\n self.oldx = self.x\n self.oldy = self.y\n self.oldz = self.z\n self.x = self.newx\n self.y = self.newy\n self.z = self.newz\n\n def set_pos(self, pos):\n self.x, self.y, self.z = pos\n\n def draw_sp(self):\n color = GREEN\n glColor3f(*color);\n glPointSize(10);\n glBegin(GL_POINTS);\n glVertex3fv(tuple((self.x, self.y, self.z)));\n glEnd();\n\n drawText_3D(str(self.x)+\", \"+str(self.y)+\", \"+str(self.z),\n self.x, self.y, self.z)\n\n def draw(self):\n DisP_cood_y = 10.61333\n MidP_cood_y = 8.88667\n #if self.fixed == True:\n #else:\n color = RED\n glColor3f(*color);\n glPointSize(10);\n glBegin(GL_POINTS);\n glVertex3fv(tuple((self.x, self.y, self.z)));\n glEnd();\n\n# パーティクルへの拘束条件\nclass Constraint:\n def __init__(self, index0, index1):\n self.index0 = index0\n self.index1 = index1\n delta_x = particles[index0].x - particles[index1].x\n delta_y = particles[index0].y - particles[index1].y\n delta_z = particles[index0].z - particles[index1].z\n self.restLength = sqrt(delta_x**2 + delta_y**2 + delta_z**2)\n self.init_d = 0\n self.d = 0\n\n def update(self):\n delta_x = particles[self.index1].x - particles[self.index0].x\n delta_y = particles[self.index1].y - particles[self.index0].y\n delta_z = particles[self.index1].z - particles[self.index0].z\n deltaLength = sqrt(delta_x**2 + delta_y**2 + delta_z**2)\n diff = (deltaLength - self.restLength)/(deltaLength+0.001)\n\n le = 0.5\n if particles[self.index0].fixed == False:\n particles[self.index0].x += le * diff * delta_x\n particles[self.index0].y += le * diff * delta_y\n particles[self.index0].z += le * diff * delta_z\n if particles[self.index1].fixed == False:\n particles[self.index1].x -= le * diff * delta_x\n particles[self.index1].y -= le * diff * delta_y\n particles[self.index1].z -= le * diff * delta_z\n\n def draw(self):\n ## 初期位置からパーティクル間の距離を計算\n f_x0 = particles[self.index0].init_x\n f_y0 = particles[self.index0].init_y\n f_z0 = particles[self.index0].init_z\n f_x1 = particles[self.index1].init_x\n f_y1 = particles[self.index1].init_y\n f_z1 = particles[self.index1].init_z\n self.init_d = sqrt((f_x0-f_x1)**2+(f_y0-f_y1)**2+(f_z0-f_z1)**2)\n\n x0 = particles[self.index0].x\n y0 = particles[self.index0].y\n z0 = particles[self.index0].z\n x1 = particles[self.index1].x\n y1 = particles[self.index1].y\n z1 = particles[self.index1].z\n self.d = sqrt((x0-x1)**2+(y0-y1)**2++(z0-z1)**2)\n\n #pygame.draw.line(surf, rgb(d, minimum=init_d, maximum=init_d*1.25),\n # (int(x0), int(y0)), (int(x1), int(y1)), size)\n glColor3f(1, 0, 1)\n glBegin(GL_LINES)\n glVertex3fv(tuple((x0, y0, z0)))\n glVertex3fv(tuple((x1, y1, z1)))\n glEnd()\n\n###stop_points_3d, particle_points_3d, poly_lines_3d\nparticles = []\nfor p_point in particle_points_3d:\n p = Particle(p_point[0], p_point[1], p_point[2])\n particles.append(p)\n\n### DISTAL PHALANX SP ancs[8, 1, 7] -> particles[60, 21, 59] ###\n### MIDDLE PHALANX SP ancs[6, 0, 5] -> particles[58, 22, 57] ###\nfor sp in stop_points_3d:\n try:\n anc_idx = particle_points_3d.tolist().index(sp.tolist())\n particles[anc_idx].fixed = True\n except:\n print(\"sp error : \", sp)\n\ndef flooring(x, n=2):\n return floor(x*10**n) / (10**n)\n\nconstraints = []\nfor pl in poly_lines_3d:\n top_count = len(pl)\n pl = pl.tolist()\n for i in range(top_count-1):\n try:\n if pl[i][1] == 5.8583:index0 = 31\n else:index0 = particle_points_3d.tolist().index(pl[i])\n\n if pl[i+1][1] == 5.8583:index1 = 31\n else:index1 = particle_points_3d.tolist().index(pl[i+1])\n c = Constraint(index0, index1)\n constraints.append(c)\n except:\n print(\"pl error : \",pl[i], pl[i+1])\n\nMeta_angle, Meta_AbdAdd_angle, ProP_angle, MidP_angle, DisP_angle = 0., 0., 0., 0., 0.\nMeta, PrxPh, MddPh, DisPh = False, False, False, False\nDisP_1, DisP_2, DisP_3 = [0,0,0], [0,0,0], [0,0,0]\nMidP_1, MidP_2, MidP_3 = [0,0,0], [0,0,0], [0,0,0]\nserect = None\nclass DrawWidget(QGLWidget):\n Meta_buff=np.array([None])\n ProP_buff=np.array([None])\n MidP_buff=np.array([None])\n DisP_buff=np.array([None])\n\n outMeta_buff=np.array([None])\n outProP_buff=np.array([None])\n outMidP_buff=np.array([None])\n outDisP_buff=np.array([None])\n def __init__(self, parent):\n QGLWidget.__init__(self, parent)\n self.setMinimumSize(*screen_size)\n self.camera_rot = [70,23]\n self.camera_radius = 2.5\n self.camera_center = [0.5,0.5,0.5]\n self.camera_cood = [[0.],[0.],[0.]]\n self.camera_wide_angle = 60\n self.angle_x, self.angle_y, self.angle_z = 0., 0., 0.\n self.vias_x, self.vias_y, self.vias_z = 0.,0.,0.\n self.bool_vias_x, self.bool_vias_y, self.bool_vias_z = False, False, False\n self.org = tuple((0,0,0))\n self.org_points = [[tuple((0, 0, 0)), tuple((5, 0, 0))],\n [tuple((0, 0, 0)), tuple((0, 5, 0))],\n [tuple((0, 0, 0)), tuple((0, 0, 5))]]\n self.bRGB = [.0, .0, .0]\n self.Meta, self.PrxPh, self.MddPh, self.DisPh = False, False, False, False\n self.keys_list = []\n self.all_camera_status = []\n #self.Meta_angle, self.ProP_angle, self.MidP_angle, self.DisP_angle = 0., 0., 0., 0.\n\n def mode_sp(self, mode):\n global serect\n if mode==0:serect=\"DisP_1\";\n elif mode==1:serect=\"DisP_2\";\n elif mode==2:serect=\"DisP_3\";\n elif mode==3:serect=\"MidP_1\";\n elif mode==4:serect=\"MidP_2\";\n elif mode==5:serect=\"MidP_3\";\n\n def sp_slide_listener(self, axis, val):\n global DisP_1, DisP_2, DisP_3, MidP_1, MidP_2, MidP_3\n if serect==\"DisP_1\":\n if axis==\"X\":DisP_1[0]=val\n elif axis==\"Y\":DisP_1[1]=val\n elif axis==\"Z\":DisP_1[2]=val\n print(serect, DisP_1)\n elif serect==\"DisP_2\":\n if axis==\"X\":DisP_2[0]=val\n elif axis==\"Y\":DisP_2[1]=val\n elif axis==\"Z\":DisP_2[2]=val\n print(serect, DisP_2)\n elif serect==\"DisP_3\":\n if axis==\"X\":DisP_3[0]=val\n elif axis==\"Y\":DisP_3[1]=val\n elif axis==\"Z\":DisP_3[2]=val\n print(serect, DisP_3)\n elif serect==\"MidP_1\":\n if axis==\"X\":MidP_1[0]=val\n elif axis==\"Y\":MidP_1[1]=val\n elif axis==\"Z\":MidP_1[2]=val\n print(serect, MidP_1)\n elif serect==\"MidP_2\":\n if axis==\"X\":MidP_2[0]=val\n elif axis==\"Y\":MidP_2[1]=val\n elif axis==\"Z\":MidP_2[2]=val\n print(serect, MidP_2)\n elif serect==\"MidP_3\":\n if axis==\"X\":MidP_3[0]=val\n elif axis==\"Y\":MidP_3[1]=val\n elif axis==\"Z\":MidP_3[2]=val\n print(serect, MidP_3)\n else:print(\"serect is None\")\n\n def joint_listener(self, typ, val):\n global Meta_angle, Meta_AbdAdd_angle, ProP_angle, MidP_angle, DisP_angle\n if typ==\"Meta\":Meta_angle=val\n elif typ==\"Meta_AbdAdd\":Meta_AbdAdd_angle=val\n elif typ==\"ProP\":ProP_angle=val\n elif typ==\"MidP\":MidP_angle=val\n elif typ==\"DisP\":DisP_angle=val\n\n def box_listener(self, bool_list):\n global Meta, PrxPh, MddPh, DisPh\n Meta, PrxPh, MddPh, DisPh = bool_list\n\n def key_listener(self, event):\n key = event.key()\n move_pix = 0.5\n if event.modifiers() & Qt.ShiftModifier:\n if key==Qt.Key_X : self.camera_cood[0][0] -= 0.05\n elif key==Qt.Key_Y : self.camera_cood[1][0] -= 0.05\n elif key==Qt.Key_Z : self.camera_cood[2][0] -= 0.05\n\n elif key == Qt.Key_Up : self.angle_x += move_pix\n elif key == Qt.Key_Down : self.angle_x -= move_pix\n\n elif key==Qt.Key_X : self.camera_cood[0][0] += 0.05\n elif key==Qt.Key_Y : self.camera_cood[1][0] += 0.05\n elif key==Qt.Key_Z : self.camera_cood[2][0] += 0.05\n\n elif key==Qt.Key_Left : self.angle_y += move_pix\n elif key==Qt.Key_Right : self.angle_y -= move_pix\n elif key==Qt.Key_Up : self.angle_z += move_pix\n elif key==Qt.Key_Down : self.angle_z -= move_pix\n\n def mouse_listener(self, type, event, mv_cood=[0, 0]):\n ## LEFT, MIDDLE, RIGHT, WHEEL, MOVE\n move_pix = 0.5\n if type == 'MOVE':\n self.camera_rot[0] += mv_cood[0]\n self.camera_rot[1] += mv_cood[1]\n if type == 'WHEEL':\n if event.angleDelta().y() == 120 : self.camera_radius -= move_pix\n elif event.angleDelta().y() == -120 : self.camera_radius += move_pix\n\n def cameraRESET(self):\n self.camera_rot = [70,23]\n self.camera_radius = 2.5\n self.camera_center = [0.5,0.5,0.5]\n self.camera_cood = [[0.],[0.],[0.]]\n self.angle_x, self.angle_y, self.angle_z = 0., 0., 0.\n self.vias_x, self.vias_y, self.vias_z = 0.,0.,0.\n\n def paintGL(self):\n glClearColor(*self.bRGB, 0.0)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n glLoadIdentity()\n camera_pos = [self.camera_center[0]+self.camera_radius*cos(radians(self.camera_rot[0]))*cos(radians(self.camera_rot[1])),\n self.camera_center[1]+self.camera_radius*sin(radians(self.camera_rot[1])),\n self.camera_center[2]+self.camera_radius*sin(radians(self.camera_rot[0]))*cos(radians(self.camera_rot[1]))]\n gluLookAt(camera_pos[0], camera_pos[1], camera_pos[2],\n self.camera_center[0], self.camera_center[1], self.camera_center[2], 0,1,0)\n\n ## 指定軸に対して回転\n glRotated(self.angle_x, 1.0, 0.0, 0.0)\n glRotated(self.angle_y, 0.0, 1.0, 0.0)\n glRotated(self.angle_z, 0.0, 0.0, 1.0)\n ## 指定軸に対して平行移動\n glTranslatef(-self.camera_cood[0][0], -self.camera_cood[1][0], -self.camera_cood[2][0])\n\n drawText_3D(\"X\", 3., 0., 0.)\n drawText_3D(\"Y\", 0., 3., 0.)\n drawText_3D(\"Z\", 0., 0., 3.)\n drawAxis()\n\n ########################## SET EXTENSOR HOOD ##########################\n\n for i in range(len(particles)):\n particles[i].update(delta_t)\n\n for i in range(NUM_ITER):\n for ii in range(len(constraints)):\n constraints[ii].update()\n\n ############################## DRAW BONES ##############################\n glPushMatrix();\n ## 中手骨の描画\n if not Meta:\n global Meta_buff, outMeta_buff\n if self.Meta_buff.all()==None:\n Meta_buff = create_vbo(self.Meta_buff, Meta_ver, Meta_col, Meta_ind)\n outMeta_buff = create_vbo(self.outMeta_buff, Meta_ver, Meta_Frame_col, Meta_ind)\n draw_vbo(Meta_buff, Meta_ind)\n draw_vbo(outMeta_buff, Meta_ind, mode_front=GL_LINE, mode_back=GL_LINE)\n\n ## 基節骨の描画\n pos_proP = (2.4, (Meta_max_cood[1]-0.2)-Meta_angle*0.01, Meta_angle*0.002)\n glTranslatef(*pos_proP)\n if not PrxPh:\n global ProP_buff, outProP_buff\n if self.ProP_buff.all()==None:\n ProP_buff = create_vbo(self.ProP_buff, ProP_ver, ProP_col, ProP_ind)\n outProP_buff = create_vbo(self.outProP_buff, ProP_ver, ProP_Frame_col, ProP_ind)\n glRotatef(Meta_angle, 1, 0, 0)\n glTranslatef(-1.2,0,0)\n glRotated(Meta_AbdAdd_angle, 0, 0, 1)\n glTranslatef(-1.2-Meta_AbdAdd_angle*0.003,0,0)\n draw_vbo(ProP_buff, ProP_ind)\n draw_vbo(outProP_buff, ProP_ind, mode_front=GL_LINE)\n\n ## 中節骨の描画\n mddp_vias = gaussian_function(sigma=20, mu=60, x=ProP_angle, A=1.7)\n pos_midP = (0, (1.462+1.8)-ProP_angle*0.008, -ProP_angle*0.001+mddp_vias)\n glTranslatef(*pos_midP)\n if not MddPh:\n global MidP_buff, outMidP_buff\n if self.MidP_buff.all()==None:\n MidP_buff = create_vbo(self.MidP_buff, MidP_ver, MidP_col, MidP_ind)\n outMidP_buff = create_vbo(self.outMidP_buff, MidP_ver, MidP_Frame_col, MidP_ind)\n glRotatef(ProP_angle+3, 1, 0, 0)\n draw_vbo(MidP_buff, MidP_ind)\n draw_vbo(outMidP_buff, MidP_ind, mode_front=GL_LINE)\n\n #print(pos_proP[1]+pos_midP[1])\n glTranslatef(0,pos_proP[1]+pos_midP[1],0)\n particles[58].draw_sp()\n particles[58].when_move(0, (1.462+1.8)-ProP_angle*0.008, -ProP_angle*0.001+mddp_vias)\n particles[22].draw_sp()\n particles[22].when_move(0, (1.462+1.8)-ProP_angle*0.008, -ProP_angle*0.001+mddp_vias)\n particles[57].draw_sp()\n particles[57].when_move(0, (1.462+1.8)-ProP_angle*0.008, -ProP_angle*0.001+mddp_vias)\n glTranslatef(0,0,0)\n\n\n ## 末節骨の描画\n disp_vias = gaussian_function(sigma=25, mu=70, x=MidP_angle, A=1.9)\n pos_disP = (0, (2.906-0.95)-MidP_angle*0.009, -MidP_angle*0.005+disp_vias)\n glTranslatef(*pos_disP)\n if not DisPh:\n global DisP_buff, outDisP_buff\n if self.DisP_buff.all()==None:\n DisP_buff = create_vbo(self.DisP_buff, DisP_ver, DisP_col, DisP_ind)\n outDisP_buff = create_vbo(self.outDisP_buff, DisP_ver, DisP_Frame_col, DisP_ind)\n glRotatef(MidP_angle+3, 1, 0, 0)\n draw_vbo(DisP_buff, DisP_ind)\n draw_vbo(outDisP_buff, DisP_ind, mode_front=GL_LINE)\n\n glTranslatef(0,pos_proP[1]+pos_midP[1]+pos_disP[1],0)\n particles[60].draw_sp()\n particles[21].draw_sp()\n particles[59].draw_sp()\n glTranslatef(0,0,0)\n\n glPopMatrix();\n ##########################################################################\n\n ## 座標の表示 -self.camera_cood[0][0], -self.camera_cood[1][0], -self.camera_cood[2][0]\n drawText(\"Camera Pos : \"+str(round(camera_pos[0], 2))+\", \"\\\n +str(round(camera_pos[1], 2))+\", \"\\\n +str(round(camera_pos[2], 2)), 2, 12, *screen_size)\n\n drawText(\"Camera Axe : \"+str(round(self.camera_cood[0][0], 2))+\", \"\\\n +str(round(self.camera_cood[1][0], 2))+\", \"\\\n +str(round(self.camera_cood[2][0], 2)), 2, 2, *screen_size)\n ## 関節角度の表示\n drawText(\"Meta Angle : \" +str(float(Meta_angle))+\"°\"+\" | \"\n +\"Meta Abd & Add Angle : \"+str(float(Meta_AbdAdd_angle))+\"°\",2, screen_size[1]-10, *screen_size)\n drawText(\"ProP Angle : \" +str(float(ProP_angle))+\"°\", 2, screen_size[1]-20, *screen_size)\n drawText(\"MidP Angle : \" +str(float(MidP_angle))+\"°\", 2, screen_size[1]-30, *screen_size)\n drawText(\"DisP Angle : \"+str(float(DisP_angle))+\"°\", 2, screen_size[1]-40, *screen_size)\n\n ########################## DRAW EXTENSOR HOOD ##########################\n #\"\"\"\n glPushMatrix();\n for i in range(len(particles)):\n particles[i].draw()\n\n for i in range(len(constraints)):\n constraints[i].draw()\n glPopMatrix();\n #\"\"\"\n ##########################################################################\n ## 原点の描画\n glColor3f(1, 1, 0)\n glPointSize(30)\n glBegin(GL_POINTS)\n glVertex3fv(self.org)\n glEnd()\n\n glFlush()\n\n def resizeGL(self, w, h):\n glViewport(0, 0, w, h)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(30.0, w/h, 1.0, 100.0)\n glMatrixMode(GL_MODELVIEW)\n\n def initializeGL(self):\n glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)\n\n glClearColor(*self.bRGB, 1.0)\n glClearDepth(1.0)\n\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(40.0, 1.0, .1, 100.0)\n\n##### http://penguinitis.g1.xrea.com/computer/programming/Python/PyQt5/PyQt5-memo/PyQt5-memo.html\nclass Joint_Slider(QWidget):\n def __init__(self, parent=None):\n QWidget.__init__(self)\n self.gl = DrawWidget(self)\n self.Meta_lab = QLabel(\"0\")\n self.ProP_lab = QLabel(\"0\")\n self.MidP_lab = QLabel(\"0\")\n self.DisP_lab = QLabel(\"0\")\n\n self.Meta_lab.setFont(QtGui.QFont(\"Sanserif\", 10))\n self.ProP_lab.setFont(QtGui.QFont(\"Sanserif\", 10))\n self.MidP_lab.setFont(QtGui.QFont(\"Sanserif\", 10))\n self.DisP_lab.setFont(QtGui.QFont(\"Sanserif\", 10))\n\n self.initUI()\n\n def initUI(self):\n splitter1 = QSplitter(Qt.Vertical)\n splitter2 = QSplitter(Qt.Vertical)\n splitter3 = QSplitter(Qt.Vertical)\n ## Meta,ProP,MidP,DisP\n Meta_slider = QSlider(Qt.Horizontal)\n Meta_AbdAdd_slider = QSlider(Qt.Horizontal)\n ProP_slider = QSlider(Qt.Horizontal)\n MidP_slider = QSlider(Qt.Horizontal)\n DisP_slider = QSlider(Qt.Horizontal)\n\n label_Meta = QLabel(\"Angle of the Meta (Flexion / extension) / (abduction / adduction)\")\n label_ProP = QLabel(\"Angle of the ProP (Flexion / extension)\")\n label_MidP = QLabel(\"Angle of the MidP (Flexion / extension)\")\n ## 屈曲 / 伸展\n Meta_slider.setMinimum(-10)\n Meta_slider.setMaximum(90)\n Meta_slider.valueChanged.connect(lambda val: self.gl.joint_listener(\"Meta\", val))\n ## 外転 / 内転\n Meta_AbdAdd_slider.setMinimum(-20)\n Meta_AbdAdd_slider.setMaximum(20)\n Meta_AbdAdd_slider.valueChanged.connect(lambda val: self.gl.joint_listener(\"Meta_AbdAdd\",val))\n splitter1.addWidget(label_Meta)\n splitter1.addWidget(Meta_slider)\n splitter1.addWidget(Meta_AbdAdd_slider)\n splitter1.setFrameShape(QFrame.Panel)\n\n ## 屈曲 / 伸展\n ProP_slider.setMinimum(-10)\n ProP_slider.setMaximum(90)\n ProP_slider.valueChanged.connect(lambda val: self.gl.joint_listener(\"ProP\",val))\n splitter2.addWidget(label_ProP)\n splitter2.addWidget(ProP_slider)\n splitter2.setFrameShape(QFrame.Panel)\n\n ## 屈曲 / 伸展\n MidP_slider.setMinimum(-10)\n MidP_slider.setMaximum(90)\n MidP_slider.valueChanged.connect(lambda val: self.gl.joint_listener(\"MidP\",val))\n splitter3.addWidget(label_MidP)\n splitter3.addWidget(MidP_slider)\n splitter3.setFrameShape(QFrame.Panel)\n\n layout = QVBoxLayout()\n layout.addWidget(splitter1)\n layout.addWidget(splitter2)\n layout.addWidget(splitter3)\n\n self.setLayout(layout)\n\nclass Coordination_slider(QFrame):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.gl = DrawWidget(self)\n ## パーティクルの移動\n slider_x = QSlider(Qt.Horizontal)\n slider_x.setMinimum(-10)\n slider_x.setMaximum(10)\n slider_x.setTickInterval(.1)\n slider_x.valueChanged.connect(lambda val: self.gl.sp_slide_listener(\"X\", val))\n\n slider_y = QSlider(Qt.Horizontal)\n slider_y.setMinimum(-10)\n slider_y.setMaximum(10)\n slider_y.setTickInterval(.1)\n slider_y.valueChanged.connect(lambda val: self.gl.sp_slide_listener(\"Y\", val))\n\n slider_z = QSlider(Qt.Horizontal)\n slider_z.setMinimum(-10)\n slider_z.setMaximum(10)\n slider_z.setTickInterval(.1)\n slider_z.valueChanged.connect(lambda val: self.gl.sp_slide_listener(\"Z\", val))\n\n label_x = QLabel(\"Coordination X\")\n label_y = QLabel(\"Coordination Y\")\n label_z = QLabel(\"Coordination Z\")\n\n layout = QVBoxLayout()\n\n layout.addWidget(label_x)\n layout.addWidget(slider_x)\n layout.addWidget(label_y)\n layout.addWidget(slider_y)\n layout.addWidget(label_z)\n layout.addWidget(slider_z)\n\n self.setLayout(layout)\n\nclass Particle_cBox(QFrame):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.gl = DrawWidget(self)\n ## パーティクルの選択\n cBox_label = QLabel(\"Select Stop Particle\")\n self.combo = QComboBox(self)\n self.combo.addItem(\"DisP_1\")\n self.combo.addItem(\"DisP_2\")\n self.combo.addItem(\"DisP_3\")\n self.combo.addItem(\"MidP_1\")\n self.combo.addItem(\"MidP_2\")\n self.combo.addItem(\"MidP_3\")\n\n button = QPushButton(\"Check\")\n button.clicked.connect(self.buttonClicked)\n\n layout = QVBoxLayout()\n layout.addWidget(cBox_label)\n layout.addWidget(self.combo)\n layout.addWidget(button)\n\n self.setLayout(layout)\n\n def buttonClicked(self):\n self.gl.mode_sp(self.combo.currentIndex())\n\nclass AnchorPoint_Slider(QWidget):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.gl = DrawWidget(self)\n\n self.initUI()\n\n def initUI(self):\n frame1 = Particle_cBox(self)\n frame1.setFrameShape(QFrame.Panel)\n\n frame2 = Coordination_slider(self)\n frame2.setFrameShape(QFrame.Panel)\n\n splitter = QSplitter(Qt.Horizontal)\n splitter.addWidget(frame1)\n splitter.addWidget(frame2)\n splitter.setHandleWidth(10)\n\n layout = QHBoxLayout()\n layout.addWidget(splitter)\n self.setLayout(layout)\n\nclass Bone_CheckBox(QWidget):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.bool_list = [True, True, True, True]\n self.listCheckBox = names_list\n self.listLabel = []\n self.layout = QGridLayout()\n for label in range(len(self.listCheckBox)):\n self.listLabel.append(\"\")\n self.gl = DrawWidget(self)\n self.initUI()\n\n def initUI(self):\n for i, v in enumerate(self.listCheckBox):\n self.listCheckBox[i] = QCheckBox(v)\n self.listLabel[i] = QLabel()\n self.layout.addWidget(self.listCheckBox[i], i+10, 0)\n self.layout.addWidget(self.listLabel[i], i+10, 1)\n\n sc_button = QPushButton(\"Show / Clear\")\n sc_button.clicked.connect(self.check_checkbox)\n self.layout.addWidget(sc_button, 20, 0)\n\n #rst_button = QPushButton(\"RESER CAMERA VIEW\")\n #rst_button.clicked.connect(self.gl.cameraRESET)\n #layout.addWidget(rst_button, 30, 0)\n self.setLayout(self.layout)\n\n def check_checkbox(self):\n for i, v in enumerate(self.listCheckBox):\n if v.checkState():\n self.bool_list[i]=True\n else:\n self.bool_list[i]=False\n self.gl.box_listener(self.bool_list)\n\nclass QTWidget(QWidget):\n def __init__(self):\n QWidget.__init__(self)\n self.clicked_points = [0, 0]\n self.gl = DrawWidget(self)\n self.initUI()\n\n timer = QTimer(self)\n timer.setInterval(20) # period, in milliseconds\n timer.timeout.connect(self.gl.updateGL)\n timer.start()\n\n def initUI(self):\n gui_layout = QGridLayout()\n self.setLayout(gui_layout)\n gui_layout.addWidget(self.gl)\n\n widget1 = Joint_Slider(self)\n widget2 = Bone_CheckBox(self)\n widget3 = AnchorPoint_Slider(self)\n\n tab = QTabWidget()\n tab.addTab(widget1, \"Joint slider\")\n tab.addTab(widget2, \"Check Box (Bone)\")\n tab.addTab(widget3, \"Stop Particle slider\")\n\n rst_button = QPushButton(\"RESER CAMERA VIEW\")\n rst_button.clicked.connect(self.gl.cameraRESET)\n widget2.layout.addWidget(rst_button, 30, 0)\n\n gui_layout.addWidget(tab)\n self.setLayout(gui_layout)\n\n def keyPressEvent(self, event):\n self.gl.key_listener(event)\n\n ## LEFT, MIDDLE, RIGHT, WHEEL, MOVE\n def mouseButtonKind(self, buttons):\n if buttons & Qt.LeftButton : self.gl.mouse_listener(\"LEFT\", None)\n if buttons & Qt.MidButton : self.gl.mouse_listener(\"MIDDLE\", None)\n if buttons & Qt.RightButton : self.gl.mouse_listener(\"RIGHT\", None)\n\n def mousePressEvent(self, e):\n self.mouseButtonKind(e.buttons())\n self.clicked_points = [e.pos().x(), e.pos().y()]\n\n def mouseReleaseEvent(self, e):\n self.mouseButtonKind(e.buttons())\n\n def wheelEvent(self, e):\n self.gl.mouse_listener(\"WHEEL\", e)\n\n def mouseMoveEvent(self, e):\n # マウスの相対移動座標\n mvX, mvY = self.clicked_points[0]-e.x(), self.clicked_points[1]-e.y()\n self.gl.mouse_listener(\"MOVE\", e, mv_cood=[-mvX*0.2, -mvY*0.2])\n self.update()\n\nif __name__=='__main__':\n app = QApplication(sys.argv)\n w = QTWidget()\n w.setWindowTitle(window_title)\n w.show()\n\n sys.exit(app.exec_())\n","sub_path":"cloth_test_opengl_3D_05.py","file_name":"cloth_test_opengl_3D_05.py","file_ext":"py","file_size_in_byte":29680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"89855454","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'ITsystem'\n__mtime__ = '2015/12/23'\n\nimport urllib2\nimport urllib\n\n# response=urllib2.urlopen('http://192.168.109.55')\n# html=response.read()\n# print html\n\n# req=urllib2.Request('http://192.168.109.55')\n# response=urllib2.urlopen(req)\n# the_page=response.read()\n# print the_page\n#! /usr/bin/env python\n# -*- coding=utf-8 -*-\n# @Author pythontab.com\nimport urllib2\nurl=\"http://pythontab.com\"\nreq_header = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\n'Accept':'text/html;q=0.9,*/*;q=0.8',\n'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n'Accept-Encoding':'gzip',\n'Connection':'close',\n'Referer':None #注意如果依然不能抓取的话,这里可以设置抓取网站的host\n}\nreq_timeout = 5\nreq = urllib2.Request(url,None,req_header)\nresp = urllib2.urlopen(req,None,req_timeout)\nhtml = resp.read()\nprint(html)\n","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"413898853","text":"from classpython import oddeven\nfrom sys import exit\ndef lettergrade():\n # The interoperability of code is astounding, one thing generally does work with another\n # thing. That concept reminds me of Magika, that sweet little game where you combine\n # different types of magic to create new magic. Thats how I feel when I code, like I am\n # conjuring \"fantastical\" creations. And this is just the beggining\n numcent = [ input(\"Please enter your name:\\n \"), int(input(\"Please enter your exam score: \"))]\n if numcent[1] >= 90:\n print(f\"Your name is {numcent[0]} and you scored an A, Great job!\")\n elif numcent[1] >=80:\n print(f\"Your name is {numcent[0]} and you scored a B, good job.\")\n elif numcent[1] >= 70:\n print(f\"Your name is {numcent[0]} and you scored a C, you passed!\")\n elif numcent[1] >= 60:\n print(f\"Your name is {numcent[0]} and you scored a D, D's get degrees.\")\n else:\n print(f\"Unfortunately {numcent[0]}, you failed. Better luck next time.\")\n\ndef profitloss():\n revenue = int(input(\"Enter the predicted revenue: \"))\n fixed_costs = int(input(\"Enter the fixed costs: \"))\n variable_costs = int(input(\"Enter the variable costs: \"))\n profit = revenue - fixed_costs - variable_costs\n if profit > 0:\n print(f\"Profit of {profit} is projected\")\n else:\n print(f\"A loss of {profit} is projected\")\n\n\n\ntry:\n userinput = int(input(\"What function do you want to run? [Odds/Evens(1),Letter_Grade(2),Profit_Loss(3)]\"))\nexcept:\n print(\"ERROR\")\n exit(0)\nif userinput == 1:\n oddeven()\n# I wondered what the difference was in elif and if, if you change the elif to an if below it will cause the else to trigger!\n# becuase its in a different block I suppose. its quite strange though\nelif userinput == 2:\n lettergrade()\nelse:\n profitloss()\n","sub_path":"python/classpython2.py","file_name":"classpython2.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"118003632","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 8 15:57:59 2019\n\n@author: hankui\n\"\"\"\n\n# https://practice.geeksforgeeks.org/problems/find-sum-of-different-corresponding-bits-for-all-pairs/0\n\n#%%\narr = [1, 3, 5] \nn = len(arr) \n\n\n#%% sum of bit differences among all pairs\n# source: \n\ndef sumBitDifferences(arr, n): \n \n ans = 0 # Initialize result \n \n # traverse over all bits \n for i in range(0, 32): \n \n # count number of elements with i'th bit set \n count = 0\n for j in range(0,n): \n if ( (arr[j] & (1 << i)) ): \n count += 1\n \n # Add \"count * (n - count) * 2\" to the answer \n ans += (count * (n - count) * 2); \n \n return ans \n\n\n#%%\nfrom itertools import permutations\nimport numpy as np\ndef BitDiff(arr): \n \n # initialised answer\n count = 0\n \n # the length of the array\n n = len(arr)\n \n # obtain the binary representation for every number in the array\n br = [] # initialise an empty list \n for i in range(0,n):\n br_i = str(bin(arr[i]))[2:]\n br.append(br_i)\n \n # get all combinations of length 2\n comb = list(permutations(list(range(0,n)), 2))\n \n for ii in comb:\n \n str_a = br[ii[0]];\n str_b = br[ii[1]];\n # maximum length of the two strings being compared\n max_len = max(len(str_a), len(str_b))\n \n # minimum length of the two strings being compared \n min_len = min(len(str_a), len(str_b))\n \n # append zeros to the end of the shorter string\n ind = np.argmin([len(str_a), len(str_b)])\n str_short = br[ii[ind]]\n str_short = str_short.ljust(max_len, '0')\n \n for l in range(max_len):\n if str_short[l] != br[ii[1-ind]][l]:\n count += 1\n \n \n return count\n\n\n#%% one correct submission\nfor _ in range(int(input())):\n n=int(input())\n arr=list(map(int,input().split()))\n ans = 0 # Initialize result \n \n # traverse over all bits \n for i in range(0, 32): \n \n # count number of elements with i'th bit set \n count = 0\n for j in range(0,n): \n if ( (arr[j] & (1 << i)) ): \n count+=1\n \n # Add \"count * (n - count) * 2\" to the answer \n ans =(ans+ (count * (n - count) * 2))%(pow(10,9)+7); \n print(ans)\n\n\n#%%\nt=int(input())\nres = BitDiff(arr)\n \n#%%\nprint(sumBitDifferences(arr, n)) \n","sub_path":"General/SolvedFirstTime/SumOfBits.py","file_name":"SumOfBits.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"54339954","text":"# -*- coding: utf-8 -*-\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nfrom rq import Connection, Queue, Worker\n\nlisten = ['queue1', 'queue2', 'queue3']\n\nif __name__ == '__main__':\n # Tell rq what Redis connection to use\n with Connection():\n q = Queue()\n Worker(map(q, listen), round_robin=True).work()\n","sub_path":"examples/run_worker_round_robin.py","file_name":"run_worker_round_robin.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"99029226","text":"# coding = utf8\nimport logging\nimport multiprocessing\nimport subprocess\n\nimport pytest\nfrom airtest.core.api import *\nfrom poco.drivers.android.uiautomation import AndroidUiautomationPoco\n\nfrom config import install_app_necessary, SERIAL_NUMBER\nfrom page.fota.fota_page import Fota_Page\nfrom page.main_page import Main_Page\nfrom page.system.system import System\nfrom toolsbar.common import test_device\nfrom toolsbar.permissionGrant import grant_permission\n\nos.path.abspath(\".\")\n\n# 过滤airtest log只打印ERROR的Log\nlogger_airtest = logging.getLogger(\"airtest\")\nlogger_airtest.setLevel(logging.ERROR)\ncur_time = time.strftime(\"%Y%m%d_%H%M%S\")\n\"\"\"\n @File:run_test.py\n @Author:Bruce\n @Date:2020/12/15\n @Description:项目运行函数,存放测试和调试函数\n\"\"\"\n\n\"\"\"\n 单个设备poco、device不需要初始化\n 多个设备poco、device都需要创建新对象poco_item\n 后续将poco_item传入使用即可,airtest相关api,使用对应device_item进行调用\n case不需要重复写\n UI 进程和底部进程不要在同一个进程中容易出问题\n\"\"\"\n\n# 多机测试进程池:兼容单机和多机运行\n\"\"\"\n @description:多进程创建进行多台设备测试\n @tip:\n Pycharm调用adb缺陷,需要使用terminal输入charm来启动pycharm,以获得dash权限\n 执行case前,手动将pocoservice.apk的contniue安装好并将授权界面点掉,防止后续错误发生\n\"\"\"\n\n\ndef start_test():\n print(\"当前设备数量:\" + str(len(SERIAL_NUMBER)))\n if len(SERIAL_NUMBER) > 1:\n for i in test_device:\n install_app_necessary(i)\n grant_permission(i)\n else:\n install_app_necessary(test_device)\n grant_permission(test_device)\n test_pool = multiprocessing.Pool(len(SERIAL_NUMBER))\n for device_ in SERIAL_NUMBER:\n test_pool.apply_async(func=fota_test_area, args=(device_,))\n sleep(10)\n test_pool.close()\n test_pool.join()\n\n\n\"\"\"\n @description:Fota checklist测试函数执行区域\n @param:\n device_:设备序列号\n\"\"\"\n\n\ndef fota_test_area(device_):\n pytest.main([\"-v\", \"-s\", \"--cmdopt={}\".format(device_), \"{}\".format(\"./test_case/test_before_fota.py\"),\n \"--reruns={}\".format(1),\n \"--alluredir={}\".format(\"./temp/need_data[{}_{}]/\".format(cur_time, device_))])\n # 设置差异化\n subprocess.Popen(\n args=[\"allure\", \"generate\", \"./temp/need_data[{}_{}]/\".format(cur_time, device_), \"-o\",\n \"./report/test_report[{}_{}]/\".format(cur_time, device_),\n \"--clean\"],\n shell=False).communicate()[0]\n updatesw(device_)\n\n # subprocess.Popen(\n # \"allure generate ./temp/need_data[{}_{}] -o ./report/test_report[{}_{}]/ --clean\".format(cur_time, device_,\n # cur_time, device_),\n # shell=True).communicate()[0]\n\n\n\"\"\"\n @description:Fota checklist测试软件升级函数执行区域\n @param:\n device_:设备序列号\n\"\"\"\n\n\ndef updatesw(device_):\n print(\"开始新版本升级\")\n try:\n device_c = connect_device(\"Android:///{}\".format(device_))\n poco = AndroidUiautomationPoco(device=device_c, use_airtest_input=False,\n screenshot_each_action=False)\n main_page = Main_Page(device_c, poco)\n system = System(main_page)\n system.unlock_screen()\n fota_page = Fota_Page(main_page)\n fota_page.start_fota_page()\n fota_page.skip_guide()\n fota_page.updatesw()\n print(\"升级结果:\" + str(fota_page.check_update_result(device_)))\n print(\"Fota升级测试结束\")\n except Exception as ex:\n print(str(ex))\n\n\n\"\"\"\n @description:Fota checklist测试函数区域\n\"\"\"\n\n\ndef fota_checklist_test_module():\n start_test()\n\n\n\"\"\"\n @description:main函数,主要运行函数\n\"\"\"\nif __name__ == '__main__':\n print(\"脚本开始测试,Fota checklist模块测试正在运行中……\")\n for i in range(5):\n print(\"这是第{}次测试该脚本\".format(i))\n fota_checklist_test_module()\n print(\"This is {} times running and time is {}\".format(str(i), time.strftime(\"%Y%m%d_%H%M%S\")))\n print(\"脚本测试结束,请检查测试结果\")\n","sub_path":"run_test.py","file_name":"run_test.py","file_ext":"py","file_size_in_byte":4342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"469078008","text":"def ncdump(nc_fid, verb=True):\n '''\n ncdump outputs dimensions, variables and their attribute information.\n The information is similar to that of NCAR's ncdump utility.\n ncdump requires a valid instance of Dataset.\n\n Parameters\n ----------\n nc_fid : netCDF4.Dataset\n A netCDF4 dateset object\n verb : Boolean\n whether or not nc_attrs, nc_dims, and nc_vars are printed\n\n Returns\n -------\n nc_attrs : list\n A Python list of the NetCDF file global attributes\n nc_dims : list\n A Python list of the NetCDF file dimensions\n nc_vars : list\n A Python list of the NetCDF file variables\n '''\n def print_ncattr(key):\n \"\"\"\n Prints the NetCDF file attributes for a given key\n\n Parameters\n ----------\n key : unicode\n a valid netCDF4.Dataset.variables key\n \"\"\"\n try:\n print(\"\\t\\ttype:\", repr(nc_fid.variables[key].dtype))\n for ncattr in nc_fid.variables[key].ncattrs():\n print('\\t\\t%s:' % ncattr,\\\n repr(nc_fid.variables[key].getncattr(ncattr)))\n except KeyError:\n print(\"\\t\\tWARNING: %s does not contain variable attributes\" % key)\n\n # NetCDF global attributes\n nc_attrs = nc_fid.ncattrs()\n if verb:\n print(\"NetCDF Global Attributes:\")\n for nc_attr in nc_attrs:\n print('\\t%s:' % nc_attr, repr(nc_fid.getncattr(nc_attr)))\n nc_dims = [dim for dim in nc_fid.dimensions] # list of nc dimensions\n # Dimension shape information.\n if verb:\n print(\"NetCDF dimension information:\")\n for dim in nc_dims:\n print(\"\\tName:\", dim)\n print(\"\\t\\tsize:\", len(nc_fid.dimensions[dim]))\n print_ncattr(dim)\n # Variable information.\n nc_vars = [var for var in nc_fid.variables] # list of nc variables\n if verb:\n print(\"NetCDF variable information:\")\n for var in nc_vars:\n if var not in nc_dims:\n print('\\tName:', var)\n print(\"\\t\\tdimensions:\", nc_fid.variables[var].dimensions)\n print(\"\\t\\tsize:\", nc_fid.variables[var].size)\n print_ncattr(var)\n return nc_attrs, nc_dims, nc_vars\n\n\ndef findScaleOffset(nc_fid, var, scaleKey='SCale', offsetKey='offset',\n print_info=False):\n '''\n findScaleOffset searchs for scale and offset in the attributes, return thems\n if found, otherwise returns 1 and 0 respectively\n\n Parameters\n ----------\n nc_fid : netCDF4.Dataset\n A netCDF4 dateset object\n var : string\n var to search (tas, t2m, etc)\n scaleKey : string\n string to search for (case insensitive)\n offsetKey : string\n string to search for (case insensitive)\n\n Returns\n -------\n Found : boolean\n True if any of the scaling factors is found\n scale_factor : depends on the NetCDF (double, float, etc)\n The value of the scale factor\n add_offset : depends on the NetCDF (double, float, etc)\n The value of the offset\n '''\n\n # scale var\n found = False\n scale_factor = 1\n add_offset = 0\n\n try:\n var_attrs = nc_fid.variables[var].ncattrs()\n if print_info:\n print(\"%s attributes : %s\" % (var, var_attrs))\n # print(type(tas_attrs)) #list\n for attr in var_attrs:\n if scaleKey.lower() in attr.lower(): # lower just to make it case insensitive\n scale_factor = nc_fid.variables[var].getncattr(attr)\n print(\"Found %s: %f\" % (attr, scale_factor))\n found = True\n if offsetKey.lower() in attr.lower():\n add_offset = nc_fid.variables[var].getncattr(attr)\n print(\"Found %s: %f\" % (attr, add_offset))\n found = True\n except:\n print(\"Error, leaving to default vals\")\n # scale var\n found = False\n scale_factor = 1\n add_offset = 0\n return found, scale_factor, add_offset\n\n\ndef convertTime(cdo, nc_file_in, nc_file_out):\n cdo.setreftime(\"1850-01-01,00:00:00\", input=\"-setcalendar,standard \"+nc_file_in, output=nc_file_out)\n\n\ndef convertTemp(cdo, nc_file_in, nc_file_out):\n import os\n nc_file_out_aux = \"convertTemp_aux.nc\"\n cdo.subc(\"273.15\", input=nc_file_in, output=nc_file_out_aux)\n cdo.chunit(\"K,C\", input=nc_file_out_aux, output=nc_file_out)\n os.remove(nc_file_out_aux)\n\n\ndef convertPrecip(cdo, nc_file_in, nc_file_out):\n import os\n nc_file_out_aux = \"convertPrecip_aux.nc\"\n # 1 kg of rain water spread over 1 square meter of surface is 1 mm in thickness\n # there are 60X60X24=86400 seconds in one day.\n # Therefore, 1 kg/m2/s = 86400 mm/day.\n cdo.mulc(\"86400\", input=nc_file_in, output=nc_file_out_aux)\n cdo.chunit(\"'kg m-2 s-1','mm day-1'\", input=nc_file_out_aux, output=nc_file_out)\n os.remove(nc_file_out_aux)\n\n\ndef draw_map(m, scale=1):\n from itertools import chain\n import numpy as np\n # draw a shaded-relief image\n m.shadedrelief(scale=scale)\n\n # Add Coastlines, States, and Country Boundaries\n# m.drawcoastlines()\n# m.drawstates()\n m.drawcountries()\n\n # lats and longs are returned as a dictionary\n lats = m.drawparallels(np.linspace(-90, 90, 100))\n lons = m.drawmeridians(np.linspace(-180, 180, 300))\n\n # keys contain the plt.Line2D instances\n lat_lines = chain(*(tup[1][0] for tup in lats.items()))\n lon_lines = chain(*(tup[1][0] for tup in lons.items()))\n all_lines = chain(lat_lines, lon_lines)\n\n # cycle through these lines and set the desired style\n for line in all_lines:\n line.set(linestyle='-', alpha=0.3, color='b')\n\n\ndef get_subdirs(a_dir):\n '''\n Return (me da los nombres de cada directorio) the names of the directories inside the input folder,\n and the full path of each directory.\n '''\n import os\n return [[name, os.path.join(a_dir, name)] for name in os.listdir(a_dir)\n if os.path.isdir(os.path.join(a_dir, name))]\n\n\ndef get_subfiles(a_dir):\n '''\n Return the names of the files inside the input folder,\n and the full path of each file.\n '''\n import os\n return [[name, os.path.join(a_dir, name)] for name in os.listdir(a_dir)\n if os.path.isfile(os.path.join(a_dir, name))]\n\n\ndef check(dir_in):\n import os\n if os.path.exists(dir_in):\n return True\n else:\n return False\n\n\ndef check_and_create(dir_in):\n '''\n Check if a dir exists, if not, create it\n '''\n import os\n if os.path.exists(dir_in):\n pass # does nothing\n # print(\"Dir already exists\", dir_in)\n else:\n print(\"Create dir\", dir_in)\n os.mkdir(dir_in)\n\n\ndef plotRavel(file_path, param):\n '''\n flaten array to 1D and plot it\n '''\n\n from netCDF4 import Dataset\n import numpy as np\n\n import matplotlib.pyplot as plt\n\n fh = Dataset(file_path, mode='r') # file handle\n\n pr = fh.variables[param][:, :, :]\n fh.close()\n\n plt.plot(np.ravel(pr))\n plt.title(file_path)\n plt.show()\n\n\ndef moving_average(arr, win=3):\n ''' calculates non weigthed moving moving_average of array\n '''\n\n import numpy as np\n if win == 0:\n return arr\n ret = np.cumsum(arr, dtype=float)\n ret[win:] = ret[win:] - ret[:-win]\n return ret[win - 1:] / win\n\n\ndef reorderLegend(ax=None, order=None, unique=False):\n '''\n # Returns tuple of handles, labels for axis ax, after reordering them to\n conform to the label order `order`, and if unique is True,\n after removing entries with duplicate labels\n\n From https://gitlab.com/cpbl/cpblUtilities/blob/master/mathgraph.py\n '''\n import matplotlib.pyplot as plt\n import numpy as np\n\n if ax is None:\n ax = plt.gca()\n handles, labels = ax.get_legend_handles_labels()\n labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0])) # sort both labels and handles by labels\n if order is not None: # Sort according to a given list (not necessarily complete)\n keys = dict(zip(order, range(len(order))))\n labels, handles = zip(*sorted(zip(labels, handles), key=lambda t, keys=keys: keys.get(t[0], np.inf)))\n if unique:\n labels, handles = zip(*unique_everseen(zip(labels, handles), key=labels)) # Keep only the first of each handle\n # ax.legend(handles, labels)\n return(handles, labels)\n\n\ndef unique_everseen(seq, key=None):\n seen = set()\n seen_add = seen.add\n return [x for x, k in zip(seq, key) if not (k in seen or seen_add(k))]\n\n\ndef draw_screen_poly(box_in, m):\n '''\n '''\n import numpy as np\n from matplotlib.patches import Polygon\n\n import matplotlib.pyplot as plt\n\n # to draw polygon\n lon0 = box_in[0]-360\n lon1 = box_in[1]-360\n lat0 = box_in[2]\n lat1 = box_in[3]\n resolution = 10\n lats_r = np.hstack((np.linspace(lat0, lat1, resolution),\n np.linspace(lat1, lat0, resolution)))\n\n lons_r = np.hstack((np.linspace(lon0, lon0, resolution),\n np.linspace(lon1, lon1, resolution)))\n\n x, y = m(lons_r, lats_r)\n xy = zip(x, y)\n poly = Polygon(list(xy), fc=(1, 0, 0, 0.0), ec=(0.8, 0, 0, 1), lw=2)\n plt.gca().add_patch(poly)\n\n\ndef plot_basemap_regions(nc_in, png_name_in, param_in, region_in, title_in, cdo, bounds_in, colors_in, over_in, under_in, poly_in=False):\n '''\n\n '''\n from netCDF4 import Dataset\n import numpy as np\n\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n from mpl_toolkits.basemap import Basemap\n\n boxDict = {\n \"Andes\": [283-1, 288+1, 0, 8.5+1], # long1, long2, lat1, lat2\n \"Alpine\": [5-1, 14+1, 44.5-1, 48.5+1]\n }\n box_in = boxDict[region_in]\n box = \"%d,%d,%d,%d\" % (box_in[0], box_in[1], box_in[2], box_in[3]) # box of Cdo\n\n print(box)\n print(nc_in)\n print(param_in)\n\n fh = Dataset(nc_in, 'r')\n\n lons = fh.variables['lon'][:]\n lats = fh.variables['lat'][:]\n param = fh.variables[param_in][0:, :, :]\n\n param_units = fh.variables[param_in].units\n param_name = fh.variables[param_in].long_name\n # close file\n fh.close()\n\n # Get some parameters for the Stereographic Projection\n # lon_0 = lons.mean()\n # lat_0 = lats.mean()\n\n # m = Basemap(projection='moll',lon_0=0,resolution='l')\n # m = Basemap(width=50000, height=10000,\n # resolution='l', projection='moll',\\\n # lat_ts=40, lat_0=lat_0, lon_0=lon_0) # stere=stereographic projection\n #\n # m = Basemap(projection='ortho', lat_0=5, lon_0=-60, resolution='l')\n m = Basemap(projection='cass', llcrnrlat=box_in[2]-2, urcrnrlat=box_in[3]+2,\n llcrnrlon=box_in[0]-2, urcrnrlon=box_in[1]+2, resolution='l',\n lon_0=box_in[0]+3, lat_0=box_in[2]+4)\n\n lons_dim = len(lons.shape)\n if 2 == lons_dim:\n lon = lons\n lat = lats\n elif 1 == lons_dim:\n lon, lat = np.meshgrid(lons, lats)\n else:\n print(\"Error in lon lat array dimension: %d\" % lons_dim)\n\n xi, yi = m(lon, lat)\n\n # Plot Data\n # cmap = plt.get_cmap('terrain')'' 0.7 0.6 0.5 0.4 0.3 0.2 0.1 0.0\n cmap = mpl.colors.ListedColormap(colors_in)\n cmap.set_over(over_in)\n cmap.set_under(under_in)\n\n norm = mpl.colors.BoundaryNorm(bounds_in, cmap.N)\n # cb3 = mpl.colorbar.ColorbarBase(ax, cmap=cmap,\n # norm=norm,\n # boundaries=[-10] + bounds + [10],\n # extend='both',\n # extendfrac='auto',\n # ticks=bounds,\n # spacing='uniform',\n # orientation='horizontal')\n\n cs = m.pcolor(xi, yi, np.squeeze(param), alpha=0.7, cmap=cmap, norm=norm)\n\n # Add Grid Lines\n m.drawparallels(np.arange(-80., 81., 10.), labels=[1, 0, 0, 0], fontsize=10)\n m.drawmeridians(np.arange(-180., 181., 10.), labels=[0, 0, 0, 1], fontsize=10)\n\n # Add Coastlines, States, and Country Boundaries\n m.drawcoastlines()\n # m.drawstates()\n m.drawcountries()\n m.shadedrelief()\n\n # Add Colorbar\n cbar = m.colorbar(cs, location='bottom', pad=\"10%\")\n cbar.set_label(\"%s (%s)\" % (param_name, param_units))\n\n if (poly_in):\n draw_screen_poly(box_in, m)\n\n # Add Title\n title_region = (title_in)\n plt.title(title_region)\n plt.savefig(png_name_in, dpi=200)\n # plt.show()\n plt.close()\n\n\ndef plot_time_series(file_path_in_array, png_name_in=None, param_in=None, region=None, h_line=None):\n '''\n plot_time_series ...\n '''\n import pathlib\n from useful_functions import findScaleOffset\n from netcdftime import utime\n import matplotlib.pyplot as plt\n import matplotlib.dates as date_plt\n from useful_functions import moving_average\n from netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/\n import datetime as dt # Python standard library datetime module\n from bisect import bisect_left\n import logging\n\n logger = logging.getLogger('root')\n FORMAT = \"[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s\"\n logging.basicConfig(format=FORMAT)\n logger.setLevel(logging.debug())\n\n plot_each = True\n\n # Read time and param vars\n if param_in is None:\n return\n\n date_fill = None\n rcp45_p25_fill = None\n rcp45_p75_fill = None\n\n rcp85_p25_fill = None\n rcp85_p75_fill = None\n\n histo_date_fill = None\n histo_rcp45_p25_fill = None\n histo_rcp45_p75_fill = None\n\n histo_rcp85_p25_fill = None\n histo_rcp85_p75_fill = None\n\n for file_path_in in file_path_in_array:\n\n plt.figure(region+' '+param_in, figsize=(15, 6))\n logger.debug(\"plot_time_series \"+file_path_in)\n\n data_in = Dataset(file_path_in, mode='r')\n\n time = data_in.variables['time'][:]\n param = data_in.variables[param_in][:]\n # Scale var\n [scal_req, scale_factor, add_offset] = findScaleOffset(data_in, param_in)\n param_scaled = (scale_factor*param)+add_offset\n\n # create time vector\n time_uni = data_in.variables['time'].units\n time_cal = data_in.variables['time'].calendar\n\n cdftime = utime(time_uni, calendar=time_cal)\n date = [cdftime.num2date(t) for t in time]\n\n # ############# A plot of Maximum precipitation ##############\n\n # plt.plot(date, param_scaled[:, 0, 0], label=model)\n\n days_2006 = 57160.5 # 2006 value in time:units = \"days since 1850-1-1 00:00:00\" ; time:calendar = \"standard\" ;'\n index_2006 = bisect_left(time, days_2006)\n\n half_window = 60 # 60-5\n window = half_window * 2\n date_start = half_window\n date_end = half_window - 1 # date [x:-y], where x+y = window - 1\n\n param_scaled_smoothed = moving_average(arr=param_scaled[:, 0, 0], win=window)\n\n if \"25\" in file_path_in and \"rcp45\" in file_path_in:\n rcp45_p25_fill = param_scaled_smoothed[index_2006-1-date_end-1:]\n histo_rcp45_p25_fill = param_scaled_smoothed[:index_2006-date_start]\n\n # plt.plot(date[index_2006-1:-date_end], param_scaled_smoothed[index_2006-1-date_end-1:], 'g--', label=pathlib.Path(file_path_in).stem.split(\"45\")[0])#.split(\"_histo\")[0])\n elif \"75\" in file_path_in and \"rcp45\" in file_path_in:\n rcp45_p75_fill = param_scaled_smoothed[index_2006-1-date_end-1:]\n date_fill = date[index_2006-1:-date_end]\n histo_rcp45_p75_fill = param_scaled_smoothed[:index_2006-date_start]\n histo_date_fill = date[date_start:index_2006]\n # plt.plot(date[index_2006-1:-date_end], param_scaled_smoothed[index_2006-1-date_end-1:], 'g--', label=pathlib.Path(file_path_in).stem.split(\"45\")[0])#.split(\"_histo\")[0])\n elif \"rcp45\" in file_path_in:\n plt.plot(date[date_start : index_2006], param_scaled_smoothed[:index_2006-date_start], 'k') # label=pathlib.Path(file_path_in).stem.split(\"45\")[0])#.split(\"_histo\")[0])\n plt.plot(date[index_2006-1 : -date_end], param_scaled_smoothed[index_2006-1-date_end-1:], 'g', label=pathlib.Path(file_path_in).stem.split(\"45\")[0])#.split(\"_histo\")[0])\n\n\n if \"25\" in file_path_in and \"rcp85\" in file_path_in:\n rcp85_p25_fill = param_scaled_smoothed[index_2006-1-date_end-1:]\n histo_rcp85_p25_fill = param_scaled_smoothed[:index_2006-date_start]\n # plt.plot(date[index_2006-1:-date_end], param_scaled_smoothed[index_2006-1-date_end-1:], 'r--', label=pathlib.Path(file_path_in).stem.split(\"45\")[0])#.split(\"_histo\")[0])\n elif \"75\" in file_path_in and \"rcp85\" in file_path_in:\n rcp85_p75_fill = param_scaled_smoothed[index_2006-1-date_end-1:]\n date_fill = date[index_2006-1:-date_end]\n histo_rcp85_p75_fill = param_scaled_smoothed[:index_2006-date_start]\n histo_date_fill = date[date_start:index_2006]\n # plt.plot(date[index_2006-1:-date_end], param_scaled_smoothed[index_2006-1-date_end-1:], 'r--', label=pathlib.Path(file_path_in).stem.split(\"45\")[0])#.split(\"_histo\")[0])\n elif \"rcp85\" in file_path_in:\n plt.plot(date[index_2006-1:-date_end], param_scaled_smoothed[index_2006-1-date_end-1:], 'r', label=pathlib.Path(file_path_in).stem.split(\"45\")[0])#.split(\"_histo\")[0])\n plt.plot(date[date_start:index_2006], param_scaled_smoothed[:index_2006-date_start], 'k') # label=pathlib.Path(file_path_in).stem.split(\"45\")[0])#.split(\"_histo\")[0])\n\n # plt.ylabel(\"%s Anomaly (%s)\" % (data_in.variables[param_in].long_name,\n # data_in.variables[param_in].units))\n #plt.ylabel(\"Exceedance rate (%)\") # TX90P\n plt.ylabel( data_in.variables[param_in].units) # R95P, SDII, RX5DAY, SDII\n #plt.ylabel(\"Days\") # FD\n\n plt.ticklabel_format(useOffset=False, axis='y')\n plt.xlabel(\"Year\")\n plt.title('Annual '+data_in.variables[param_in].long_name+' Anomaly '+'in the ' + region + ' region (smoothed)', fontweight='bold')\n data_in.close()\n\n ############## TO PLOT each model ############\n # if plot_each:\n # plt.legend()#loc=(0, 0), fontsize=7, frameon=True, ncol=11, bbox_to_anchor=(0, -0.5)) # Legend for smoothed\n # plt.tight_layout(rect=[0, 0, 1, 1])\n #\n # # add horizontal line at y=0\n # if h_line is not None:\n # plt.axhline(y=h_line, color='k')\n # # highligth 1961 to 1990 range\n # plt.axvspan(dt.datetime(1961, 1, 1), dt.datetime(1990, 12, 30), color='b', alpha=0.1)\n #\n # plt.grid(b=True, linestyle='--', linewidth=1)\n # plt.show()\n\n if rcp45_p25_fill is not None:\n plt.fill_between(date_fill, rcp45_p25_fill, rcp45_p75_fill,\n facecolor=\"g\", # The fill color\n # color='', # The outline color\n alpha=0.2) # Transparency of the fill\n\n if rcp85_p25_fill is not None:\n plt.fill_between(date_fill, rcp85_p25_fill, rcp85_p75_fill,\n facecolor=\"r\", # The fill color\n # color='', # The outline color\n alpha=0.2) # Transparency of the fill\n\n if histo_rcp45_p25_fill is not None:\n plt.fill_between(histo_date_fill, histo_rcp45_p25_fill, histo_rcp45_p75_fill,\n facecolor=\"k\", # The fill color\n # color='', # The outline color\n alpha=0.1)\n\n if histo_rcp85_p25_fill is not None:\n plt.fill_between(histo_date_fill, histo_rcp85_p25_fill, histo_rcp85_p75_fill,\n facecolor=\"k\", # The fill color\n # color='', # The outline color\n alpha=0.1) # Transparency of the fill\n\n # plt.legend(loc=(0, 0), fontsize=7, frameon=True, ncol=11, bbox_to_anchor=(0, -0.5)) # Legend for smoothed\n plt.tight_layout(rect=[0, 0, 1, 1])\n\n # add horizontal line at y=0\n if h_line is not None:\n plt.axhline(y=h_line, color='b', alpha=0.5, linestyle='--')\n # logger.debug(clean((clean((list(plt.yticks()[0]))))))\n # plt.yticks(list(plt.yticks()[0]) + [h_line])\n # highligth 1961 to 1990 range\n plt.axvspan(dt.datetime(1961, 1, 1), dt.datetime(1990, 12, 30), color='b', alpha=0.1)\n\n plt.grid(b=True, linestyle='--', linewidth=1)\n\n # cdftime = utime(time_uni, calendar=time_cal)\n # date = [cdftime.num2date(time[140])]\n # dates = [dt.datetime(1861,1,1),\n # dt.datetime(1890,1,1),\n # dt.datetime(1961,1,1),\n # dt.datetime(1990,1,1),\n # dt.datetime(2006,1,1),\n # dt.datetime(2061,1,1),\n # dt.datetime(2090,1,1)]\n #\n # dates_plot = [date_plt.date2num(date) for date in dates]\n # plt.xticks(dates_plot)\n\n # plt.show()\n\n if png_name_in is None:\n plt.show()\n else:\n logger.debug((png_name_in))\n plt.savefig(png_name_in, dpi=150)\n","sub_path":"python3/useful_functions.py","file_name":"useful_functions.py","file_ext":"py","file_size_in_byte":21189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"642510232","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#******************************************************************************\n#\n# prepare_data.py\n# ---------------------------------------------------------\n# Wrapper script that runs other scripts. Converts all HDFs to TIFs (hdf2tif.py), mosaics them all together (merge_all.py) and reprojects to EPSG:4326 or needed.\n# More: http://github.com/nextgis/dhi\n#\n# Usage: \n# prepare_data.py dataset x:\\MCD15A2\\2003\\hdf\\ x:\\MCD15A2\\2003\\tif-fpar\\ 0.0083 epsg\n# where:\n# dataset SDS name of the dataset to process\n# input_folder input folder with HDFs (this is folder of folders)\n# output_folder where result will be stored\n# pixel_size pixel size, pixels are square\n# epsg EPSG code for output file or \"no\" for default\n# Examples:\n# python prepare_data.py 2003 MOD_Grid_MOD15A2:Fpar_1km x:\\MCD15A2\\2003\\hdf\\ x:\\MCD15A2\\2003\\tif-fpar\\before-nodata\\ 0.0083 4326\n#\n# Copyright (C) 2015 Maxim Dubinin (sim@gis-lab.info)\n#\n# This source is free software; you can redistribute it and/or modify it under\n# the terms of the GNU General Public License as published by the Free\n# Software Foundation; either version 2 of the License, or (at your option)\n# any later version.\n#\n# This code is distributed in the hope that it will be useful, but WITHOUT ANY\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# A copy of the GNU General Public License is available on the World Wide Web\n# at . You can also obtain it by writing\n# to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,\n# MA 02111-1307, USA.\n#\n#******************************************************************************\n\nimport os\nimport glob\nimport sys\nimport calendar\n \nif __name__ == '__main__':\n dataset = sys.argv[1] #'MOD_Grid_MOD15A2:Lai_1km'\n id = sys.argv[2]\n od = sys.argv[3]\n res = sys.argv[4]\n epsg = sys.argv[5] #4326, 'no'\n \n script_path = 'e:/users/maxim/thematic/dhi/scripts/'\n os.chdir(id)\n dates = next(os.walk('.'))[1]\n \n if epsg == 'no':\n epsg = ''\n else:\n epsg = '-t_srs EPSG:' + epsg\n \n for date in dates:\n \n cmd = 'python ' + script_path + 'hdf2tif.py ' + dataset.split(':')[0] + ':' + '\\\"' + dataset.split(':')[1] + '\\\" ' + id + date + '\\\\ ' + id + date + '\\\\'\n #print(cmd)\n os.system(cmd)\n \n if not os.path.exists(date + '.vrt'):\n cmd = 'python ' + script_path + 'merge_all.py ' + date + '.vrt no 0 ' + id + date + '\\\\ ' + id + date + '\\\\'\n os.system(cmd)\n \n if not os.path.exists(od + date + '.tif'):\n cmd = 'gdalwarp ' + epsg + ' -tr ' + res + ' ' + res + ' ' + id + date + '\\\\' + date + '.vrt ' + od + date + '.tif'\n os.system(cmd)\n ","sub_path":"prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"536232108","text":"from nikola.plugin_categories import Task\n\n\nclass Deploy(Task):\n \"\"\"Deploy site. \"\"\"\n name = \"deploy\"\n is_default = False\n\n def gen_tasks(self):\n yield {\n \"basename\": self.name,\n \"actions\": self.site.config['DEPLOY_COMMANDS'],\n \"verbosity\": 2,\n }\n","sub_path":"nikola/plugins/task_deploy.py","file_name":"task_deploy.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"256665717","text":"from collections import deque\n\nn,m = map(int,input().split())\ngraph = [ [] for _ in range(n+1) ]\nin_degree = [0] * (n+1)\n\ndef topological_sort():\n res = []\n for i in range(1,n+1):\n if in_degree[i] == 0:\n que.append(i)\n\n while que:\n p = que.popleft()\n res.append(p)\n for adj in graph[p]:\n in_degree[adj] -= 1\n if in_degree[adj] == 0:\n que.append(adj)\n\n print(*res)\n\nfor _ in range(m):\n a, b = map(int,input().split())\n graph[a].append(b)\n in_degree[b] += 1\n\nque = deque() \n\n\n\ntopological_sort()\n\n\n","sub_path":"3.beakjoon/jungle/week3/2252_줄세우기.py","file_name":"2252_줄세우기.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"517503991","text":"#!/usr/bin/env pybricks-micropython\n\n\nfrom pybricks.hubs import EV3Brick\nfrom pybricks.ev3devices import Motor, InfraredSensor\nfrom pybricks.media.ev3dev import SoundFile\nfrom pybricks.parameters import Button, Direction, Port\nfrom pybricks.robotics import DriveBase\n\nfrom time import sleep\n\n\nclass Bobb3e(EV3Brick):\n \"\"\"\n CHALLENGES:\n\n Here are some challenges you can try to do in order to make BOBB3E better:\n\n - Can you make BOBB3E say sounds when he is lifting/lowering his forks?\n\n - BOBB3E does by default drive rather slow - try to see if you can\n make him go faster!\n\n - You could utilise that the remote has 4 channels and use that as\n different gears. Say, for instance, that when using Channel 1 is the same\n as driving in 1st gear; very slow. Channel 2 could make him go a little\n faster and using channel 4 would make him go very fast!\n\n - The remote control can also be used as a Beacon, which BOBB3E is able to\n detect and drive towards. Can you make him automatically find the Beacon\n and lift it, when BOBB3E comes close enough to it?\n \"\"\"\n\n WHEEL_DIAMETER = 24 # milimeters\n AXLE_TRACK = 100 # milimeters\n\n def __init__(\n self,\n left_motor_port: str = Port.B, right_motor_port: str = Port.C,\n lift_motor_port: str = Port.A,\n ir_sensor_port: str = Port.S4, ir_beacon_channel: int = 1):\n self.left_motor = Motor(port=left_motor_port,\n positive_direction=Direction.COUNTERCLOCKWISE)\n self.right_motor = Motor(port=right_motor_port,\n positive_direction=Direction.COUNTERCLOCKWISE)\n self.drive_base = DriveBase(left_motor=self.left_motor,\n right_motor=self.right_motor,\n wheel_diameter=self.WHEEL_DIAMETER,\n axle_track=self.AXLE_TRACK)\n\n self.lift_motor = Motor(port=lift_motor_port,\n positive_direction=Direction.CLOCKWISE)\n\n self.ir_sensor = InfraredSensor(port=ir_sensor_port)\n self.ir_beacon_channel = ir_beacon_channel\n\n self.reversing = False\n self.playing_sound = False\n\n \"\"\"\n BOBB3E takes advantage of running multiple subprograms;\n one for receiving the commands from the remote control and\n one for handling the reversing alarm.\n \"\"\"\n\n def drive_or_operate_forks_once_by_ir_beacon(\n self,\n speed: float = 1000, # mm/s\n turn_rate: float = 90 # rotational speed deg/s\n ):\n \"\"\"\n Read the commands from the remote control and convert them into actions\n such as go forward, lift and turn.\n \"\"\"\n ir_beacon_button_pressed = \\\n set(self.ir_sensor.buttons(channel=self.ir_beacon_channel))\n\n # lower the forks\n if ir_beacon_button_pressed == {Button.LEFT_UP, Button.LEFT_DOWN}:\n self.reversing = False\n\n self.drive_base.stop()\n\n self.lift_motor.run(speed=100)\n\n # raise the forks\n elif ir_beacon_button_pressed == {Button.RIGHT_UP, Button.RIGHT_DOWN}:\n self.reversing = False\n\n self.drive_base.stop()\n\n self.lift_motor.run(speed=-100)\n\n # forward\n elif ir_beacon_button_pressed == {Button.LEFT_UP, Button.RIGHT_UP}:\n self.reversing = False\n\n self.drive_base.drive(\n speed=speed,\n turn_rate=0)\n\n self.lift_motor.hold()\n\n # backward\n elif ir_beacon_button_pressed == {Button.LEFT_DOWN, Button.RIGHT_DOWN}:\n self.reversing = True\n\n self.drive_base.drive(\n speed=-speed,\n turn_rate=0)\n\n self.lift_motor.hold()\n\n # turn left on the spot\n elif ir_beacon_button_pressed == {Button.LEFT_UP, Button.RIGHT_DOWN}:\n self.reversing = False\n\n self.drive_base.drive(\n speed=0,\n turn_rate=-turn_rate)\n\n self.lift_motor.hold()\n\n # turn right on the spot\n elif ir_beacon_button_pressed == {Button.RIGHT_UP, Button.LEFT_DOWN}:\n self.reversing = False\n\n self.drive_base.drive(\n speed=0,\n turn_rate=turn_rate)\n\n self.lift_motor.hold()\n\n # turn left forward\n elif ir_beacon_button_pressed == {Button.LEFT_UP}:\n self.reversing = False\n\n self.drive_base.drive(\n speed=speed,\n turn_rate=-turn_rate)\n\n self.lift_motor.hold()\n\n # turn right forward\n elif ir_beacon_button_pressed == {Button.RIGHT_UP}:\n self.reversing = False\n\n self.drive_base.drive(\n speed=speed,\n turn_rate=turn_rate)\n\n self.lift_motor.hold()\n\n # turn left backward\n elif ir_beacon_button_pressed == {Button.LEFT_DOWN}:\n self.reversing = True\n\n self.drive_base.drive(\n speed=-speed,\n turn_rate=turn_rate)\n\n self.lift_motor.hold()\n\n # turn right backward\n elif ir_beacon_button_pressed == {Button.RIGHT_DOWN}:\n self.reversing = True\n\n self.drive_base.drive(\n speed=-speed,\n turn_rate=-turn_rate)\n\n self.lift_motor.hold()\n\n # otherwise stop\n else:\n self.reversing = False\n\n self.drive_base.stop()\n\n self.lift_motor.hold()\n\n def keep_driving_or_operating_forks_by_ir_beacon(\n self,\n speed: float = 1000 # degrees per second\n ):\n \"\"\"\n Main Loop\n \"\"\"\n while True:\n self.drive_or_operate_forks_once_by_ir_beacon(speed=speed)\n\n def sound_alarm_if_reversing(self):\n \"\"\"\n Reversing alarm sound:\n - Whenever the Reversing variable is changed to True\n the alarm starts to play.\n - When the value of the Reversing variable is set to False\n the alarm stops.\n \"\"\"\n if self.reversing:\n if not self.playing_sound:\n self.playing_sound = True\n\n self.speaker.play_file(file=SoundFile.BACKING_ALERT)\n\n elif self.playing_sound:\n self.playing_sound = False\n\n sleep(0.01)\n\n def sound_alarm_whenever_reversing(self):\n \"\"\"\n Backing Sound Loop\n \"\"\"\n while True:\n self.sound_alarm_if_reversing()\n","sub_path":"Computing-Platforms/EV3/Home-Edition/Fan-Robots/Bobb3e/bobb3e_pybricks.py","file_name":"bobb3e_pybricks.py","file_ext":"py","file_size_in_byte":6596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"5800997","text":"import models\nimport os\nimport queue\nimport shutil\nimport torch\nimport torch.nn as nn\n\nfrom dataset import TASK_SEQUENCES\nfrom models import EnsembleClassifier\n\n\nclass ModelSaver(object):\n \"\"\"Class to save and load model ckpts.\"\"\"\n def __init__(self, save_dir, iters_per_save, max_ckpts, metric_name='val_loss',\n maximize_metric=False, keep_topk=True, **kwargs):\n \"\"\"\n Args:\n save_dir: Directory to save checkpoints.\n iters_per_save: Number of iterations between each save.\n max_ckpts: Maximum number of checkpoints to keep before overwriting old ones.\n metric_name: Name of metric used to determine best model.\n maximize_metric: If true, best checkpoint is that which maximizes the metric value passed in via save.\n If false, best checkpoint minimizes the metric.\n keep_topk: Keep the top K checkpoints, rather than the most recent K checkpoints.\n \"\"\"\n super(ModelSaver, self).__init__()\n\n self.save_dir = save_dir\n self.iters_per_save = iters_per_save\n self.max_ckpts = max_ckpts\n self.metric_name = metric_name\n self.maximize_metric = maximize_metric\n self.best_metric_val = None\n self.ckpt_paths = queue.PriorityQueue()\n self.keep_topk = keep_topk\n\n def _is_best(self, metric_val):\n \"\"\"Check whether metric_val is the best one we've seen so far.\"\"\"\n if metric_val is None:\n return False\n return (self.best_metric_val is None\n or (self.maximize_metric and self.best_metric_val < metric_val)\n or (not self.maximize_metric and self.best_metric_val > metric_val))\n\n def save(self, iteration, epoch, model, optimizer, lr_scheduler,\n device, metric_val, covar_list=''):\n \"\"\"If this iteration corresponds to a save iteration, save model parameters to disk.\n\n Args:\n iteration: Iteration that just finished.\n epoch: epoch to stamp on the checkpoint\n model: Model to save.\n optimizer: Optimizer for model parameters.\n lr_scheduler: Learning rate scheduler for optimizer.\n device: Device where the model/optimizer parameters belong.\n metric_val: Value for determining whether checkpoint is best so far.\n \"\"\"\n if iteration % self.iters_per_save != 0:\n return\n\n ckpt_dict = {\n 'ckpt_info': {'epoch': epoch, 'iteration': iteration, self.metric_name: metric_val},\n 'model_name': model.module.__class__.__name__,\n 'task_sequence': model.module.task_sequence,\n 'model_state': model.to('cpu').state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': None if lr_scheduler is None else lr_scheduler.state_dict(),\n 'covar_list': covar_list,\n }\n model.to(device)\n\n ckpt_path = os.path.join(self.save_dir, 'iter_{}_{}_{:.2f}.pth.tar'.format(iteration, self.metric_name, metric_val))\n torch.save(ckpt_dict, ckpt_path)\n\n if self._is_best(metric_val):\n # Save the best model\n print(f\"Saving the model based on metric={self.metric_name} and \\\n maximize={self.maximize_metric} with value={metric_val}\")\n self.best_metric_val = metric_val\n best_path = os.path.join(self.save_dir, 'best.pth.tar')\n shutil.copy(ckpt_path, best_path)\n\n # Add checkpoint path to priority queue (lower priority order gets removed first\n if not self.keep_topk:\n priority_order = iteration\n elif self.maximize_metric:\n priority_order = metric_val\n else:\n priority_order = -metric_val\n\n self.ckpt_paths.put((priority_order, ckpt_path))\n\n # Remove a checkpoint if more than max_ckpts ckpts saved\n if self.ckpt_paths.qsize() > self.max_ckpts:\n _, oldest_ckpt = self.ckpt_paths.get()\n try:\n os.remove(oldest_ckpt)\n except OSError:\n pass\n\n @classmethod\n def load_model(cls, ckpt_path, gpu_ids, model_args, data_args):\n \"\"\"Load model parameters from disk.\n\n Args:\n ckpt_path: Path to checkpoint to load.\n gpu_ids: GPU IDs for DataParallel.\n\n Returns:\n Model loaded from checkpoint, dict of additional checkpoint info (e.g. epoch, metric).\n \"\"\"\n device = 'cuda:{}'.format(gpu_ids[0]) if len(gpu_ids) > 0 else 'cpu'\n ckpt_dict = torch.load(ckpt_path, map_location=device)\n\n # Build model, load parameters\n model_fn = models.__dict__[ckpt_dict['model_name']]\n original_task_sequence = ckpt_dict['task_sequence']\n task_sequence = TASK_SEQUENCES[data_args.task_sequence] if data_args.task_sequence else original_task_sequence\n\n model = model_fn(task_sequence, model_args)\n\n # Transform classifier if task_sequence for current task is\n # different than the pretrained model.\n # if model_args.transform_classifier:\n num_orign_classes = (len(original_task_sequence)\n if 'task_sequence' in ckpt_dict else model_args.n_orig_classes)\n num_origin_covars = (len(ckpt_dict['covar_list'].split(';'))\n if 'covar_list' in ckpt_dict and len(ckpt_dict['covar_list']) > 0 else 0)\n model.transform_model_shape(num_orign_classes, num_origin_covars)\n\n model = nn.DataParallel(model, gpu_ids)\n model.load_state_dict(ckpt_dict['model_state'])\n \n num_covars = len(model_args.covar_list.split(';')) if len(model_args.covar_list) > 0 else 0\n if num_origin_covars == 0:\n model.module.transform_model_shape(len(task_sequence), num_covars)\n\n return model, ckpt_dict['ckpt_info']\n\n @classmethod\n def load_ensemble(cls, ckpt_paths, gpu_ids, model_args, data_args):\n \"\"\"Load multiple models from disk.\n Args:\n ckpt_paths: List of checkpoint paths to load.\n gpu_ids: GPU IDs for DataParallel.\n Returns:\n Ensemble Model loaded from checkpoint, list of dicts of additional\n checkpoint info (e.g. iters, metric).\n \"\"\"\n individual_models = []\n ckpt_dicts = []\n for ckpt_path in ckpt_paths:\n model, ckpt_info = cls.load_model(ckpt_path, gpu_ids, model_args, data_args)\n individual_models.append(model)\n ckpt_dicts.append(ckpt_info)\n\n ensemble_model = EnsembleClassifier(individual_models)\n return ensemble_model, ckpt_dicts\n\n\n @classmethod\n def load_optimizer(cls, ckpt_path, gpu_ids, optimizer, lr_scheduler=None):\n \"\"\"Load optimizer and LR scheduler state from disk.\n\n Args:\n ckpt_path: Path to checkpoint to load.\n gpu_ids: GPU IDs for loading the state dict.\n optimizer: Optimizer to initialize with parameters from the checkpoint.\n lr_scheduler: Optional learning rate scheduler to initialize with parameters from the checkpoint.\n \"\"\"\n device = 'cuda:{}'.format(gpu_ids[0]) if len(gpu_ids) > 0 else 'cpu'\n ckpt_dict = torch.load(ckpt_path, map_location=device)\n optimizer.load_state_dict(ckpt_dict['optimizer'])\n if lr_scheduler is not None:\n lr_scheduler.load_state_dict(ckpt_dict['lr_scheduler'])\n","sub_path":"saver/model_saver.py","file_name":"model_saver.py","file_ext":"py","file_size_in_byte":7423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"261043955","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport logging\nimport sys\n\nfrom cronq.backends.mysql import Storage\nfrom cronq.models.event import Event\nfrom cronq.models.job import Job\n\nfrom sqlalchemy import between\nfrom sqlalchemy.sql.expression import asc\nfrom sqlalchemy.sql.expression import desc\n\nlogger = logging.getLogger(__name__)\n\n\ndef prune_record(event_id, max_event_id, event_range, storage):\n last = event_id + event_range\n if last > max_event_id:\n last = max_event_id\n\n stmt = Event.__table__.delete().where(between(Event.id, event_id, last))\n storage._engine.execute(stmt)\n logger.info('Pruning {0} - {1}'.format(event_id, last))\n storage.session.commit()\n return last\n\n\ndef prune(first, last, interval):\n storage = Storage(isolation_level=None)\n\n event_id = first\n while event_id <= last:\n try:\n event_id = prune_record(event_id, last, interval, storage)\n except (KeyboardInterrupt, SystemExit):\n storage.session.commit()\n return\n except Exception as e:\n logger.warning(e)\n return\n if event_id == last:\n break\n\n storage.session.commit()\n\n\ndef prune_keep_record(job_id, keep, storage):\n event = storage.session.query(Event).filter_by(job_id=job_id).\\\n order_by(asc(Event.id)).limit(1).first()\n\n min_id = None\n if event is not None:\n min_id = event.id\n\n events = storage.session.query(Event).filter_by(job_id=job_id).\\\n order_by(desc(Event.id)).limit(keep)\n event_ids = [e.id for e in events]\n if len(event_ids) == 0:\n logger.info('No events for {0}'.format(job_id))\n return\n\n max_id = min(event_ids)\n if min_id == max_id:\n logger.info('Min and max event ids for {0} are the same: {1} - {2}'.format( # noqa\n job_id, min_id, max_id))\n return\n\n if min_id > max_id:\n logger.info('Min event id for {0} is larger than max event id: {1} - {2}'.format( # noqa\n job_id, min_id, max_id))\n return\n\n logger.info('Job ID {0}, Pruning events {1} - {2}'.format(\n job_id, min_id, max_id))\n\n stmt = Event.__table__.delete()\\\n .where(between(Event.id, min_id, max_id))\\\n .where(Event.job_id == job_id)\n storage._engine.execute(stmt)\n storage.session.commit()\n\n\ndef prune_keep(keep):\n storage = Storage(isolation_level=None)\n jobs = storage.session.query(Job).order_by(asc(Job.id))\n for job in jobs:\n prune_keep_record(job.id, keep, storage)\n\n\ndef prune_type(args):\n is_keep = args['keep'] is not None\n is_interval = True\n interval_args = []\n for k in ['first', 'last']:\n if args[k] is None:\n is_interval = False\n interval_args.append(False)\n else:\n interval_args.append(True)\n\n type_ = None\n error = None\n if not is_interval and True in interval_args:\n error = 'If any \"range\" args are specified, all must be specified'\n\n if is_keep:\n if is_interval:\n error = 'Cannot specify both \"keep\" arg and \"range\" args'\n else:\n type_ = 'keep'\n else:\n if not is_interval:\n error = 'Must specify either \"keep\" arg or \"range\" args'\n else:\n type_ = 'range'\n\n return type_, error\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Prunes the cronq datastore')\n parser.add_argument('--keep',\n type=int,\n default=None,\n help='number of event entries to keep')\n parser.add_argument('--first',\n type=int,\n default=None,\n help='first entry to prune')\n parser.add_argument('--interval',\n type=int,\n default=100,\n help='interval to delete by')\n parser.add_argument('--last',\n type=int,\n default=None,\n help='last entry to prune')\n args = parser.parse_args()\n args = vars(args)\n\n type_, error = prune_type(args)\n if error is not None:\n logger.warning(error)\n sys.exit(1)\n\n if type_ == 'keep':\n prune_keep(args['keep'])\n else:\n prune(args['first'], args['last'], args['range'])\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"cronq/pruner.py","file_name":"pruner.py","file_ext":"py","file_size_in_byte":4421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"186172225","text":"# This Python file uses the following encoding: utf-8\n\n\n# Utility function to read the README file.\n# Used for the long_description. It's nice, because now 1) we have a top level\n# README file and 2) it's easier to type in the README file than to put a raw\n# string in below ...\ndef read(fname):\n import os\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nfrom setuptools import setup, find_packages\n\npackage_name='cdb_query'\nsetup(\n name = package_name,\n version = \"1.3\",\n packages=[package_name],\n package_dir = {package_name: 'lib'},\n#\n# # metadata for upload to PyPI\n author = \"F. B. Laliberté, P. J. Kushner\",\n author_email = \"frederic.laliberte@utoronto.ca\",\n description = \"Simple tools to query and retrieve data from the ESGF's CMIP5 and CORDEX projects.\",\n license = \"BSD\",\n keywords = \"atmosphere climate\",\n url = \"http://proj.badc.rl.ac.uk/exarch\", # project home page, if any\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Science/Research\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Fortran\",\n \"Topic :: Scientific/Engineering :: Atmospheric Science\",\n \"Topic :: Scientific/Engineering :: Mathematics\"\n ],\n long_description=read('README'),\n install_requires = ['numpy','h5py','netCDF4','sqlalchemy','esgf-pyclient','timeaxis'],\n zip_safe=False,\n # other arguments here...\n #package_data = {package_name : ['lib/*.sh']},\n entry_points = {\n 'console_scripts': [\n 'cdb_query_CMIP5 = '+package_name+'.cdb_query_archive:main_CMIP5',\n 'cdb_query_CORDEX = '+package_name+'.cdb_query_archive:main_CORDEX',\n 'cdb_query_NMME = '+package_name+'.cdb_query_archive:main_NMME',\n 'cdb_query_LRFTIP = '+package_name+'.cdb_query_archive:main_LRFTIP'\n ],\n }\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"415449355","text":"def mainBSF():\n # Crear el grafo\n adja_list = []\n with open(\"grafo.txt\", \"r\") as file:\n n, e = file.readline().split()\n n = int(n)\n e = int(e)\n\n for i in range(n):\n adja_list.append([])\n\n for _ in range(e):\n nodo1, nodo2 = map(int, file.readline().split())\n print(nodo1,\":::\",nodo2)\n adja_list[nodo1].append(nodo2)\n adja_list[nodo2].append(nodo1)\n print(adja_list,\";;;;;;;;;\")\n print(adja_list,\"________________________________\")\n # for edge in edge_list:\n # print(edge[0], edge[1])\n\n # BFS\n Q = []\n visitado = [False] * n\n saltos_para_inicio = [0] * n\n\n visitado[0] = True\n Q.insert(0, 0)\n while len(Q) != 0:\n nodo_actual = Q.pop()\n print(\"procesando el nodo:\", nodo_actual)\n\n # Recorrer las aristas\n for w in adja_list[nodo_actual]:\n # si el nodo ya se visitó\n print(w,\"......\")\n if not visitado[w]:\n print(\"entro: \",w)\n visitado[w] = True\n Q.insert(0, w)\n saltos_para_inicio[w] = saltos_para_inicio[nodo_actual] + 1\n print(Q)\n print(saltos_para_inicio)\n\n print()\n print(adja_list)\n for i, val in enumerate(saltos_para_inicio):\n print(\"nodo\", i, \"está a\", val, \"saltos :)\")\n\n\nif __name__ == \"__main__\":\n mainBSF()\n\n\n\n\n","sub_path":"ene-jun-2020/joe tareas y parciales/practica_9/grafo_practica.py","file_name":"grafo_practica.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"281217052","text":"#!/usr/bin/env python3\n\ndef interleave(*lists):\n newList = []\n i = 0;\n zippedList = list(zip(lists))\n while i < len(lists[0]): \n #Käydään läpi alkio i joka listasta ja lisätään palautettavaan listaan\n for item in zippedList:\n y = list(item)\n x = y[0]\n newList.append(x[i])\n i += 1\n return newList\n\ndef main():\n print(interleave([1, 2, 3], [20, 30, 40], ['a', 'b', 'c']))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"part01-e11_interleave/src/interleave.py","file_name":"interleave.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"445691845","text":"#!/usr/bin/env python\n\nimport os\nimport re\nimport sys\nimport gzip\nimport pandas as pd\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s: %(message)s')\n\n\ngtf, assembly_report = sys.argv[1:]\n\n# read in the chromosome names...\nchromosomes = pd.read_csv(assembly_report, delimiter='\\t', comment='#', header=None)\nchromosomes.columns = ['sequence_name', 'sequence_role', 'assigned_molecule', 'assigned_molecule_location_type', 'genbank', 'relationship', 'refseq', 'assembly_unit', 'sequence_length', 'ucsc']\nchromosome_translations = {row['refseq']: row['ucsc'] for index, row in chromosomes.iterrows()}\n\n# for each item in the GTF:\n# convert chromosome name\n# check the source. if it's Curated Genomic or BestRefSeq, keep\n# keep genes \n# as the name of the feature -- use 'gene' attribute\n\nHEADER = re.compile(\"^#\")\n\nwith gzip.open(gtf, 'rt') as f:\n for line in f:\n if HEADER.match(line):\n continue\n chrom, source, feature_type, start, end, score, strand, phase, info = line.rstrip().split('\\t')\n if feature_type != 'gene':\n continue\n chrom = chromosome_translations[chrom]\n info = [i.split(' ') for i in info.rstrip(';').split('\"; ')]\n info = {i[0]: i[1].replace('\"', '') for i in info}\n gene_name = info['gene']\n if chrom == 'na':\n logging.info('Skipping {} (no chromosome translation)'.format(gene_name))\n continue\n print('{chrom}\\t{start}\\t{end}\\t{gene_name}\\t.\\t{strand}'.format(**locals()))\n\n\n","sub_path":"bin/refseq-annotation-and-report-to-genic-regions.py","file_name":"refseq-annotation-and-report-to-genic-regions.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"53190726","text":"from ..systems.docker import get_dusty_containers\nfrom . import utils\n\ndef tail_container_logs(app_or_service_name, follow=False, lines=None):\n container = get_dusty_containers([app_or_service_name], include_exited=True)[0]\n args = ['logs']\n if follow:\n args.append('-f')\n if lines:\n args.append('--tail={}'.format(lines))\n args.append(container['Id'])\n utils.exec_docker(*args)\n","sub_path":"dusty/commands/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"422363639","text":"from setuptools import setup, find_packages\nfrom os.path import join\n\nversion = '1.0b1'\nreadme = open(\"README.txt\").read()\nhistory = open('HISTORY.txt').read()\n\nsetup(name='ely.croppableimagefield',\n version=version,\n description=\"CroppableImageField is a drop-in replacement for the Archetype field ImageField\",\n long_description = readme + '\\n' + history,\n classifiers=[\n \"Framework :: Plone\",\n 'License :: OSI Approved :: GNU General Public License (GPL)',\n ],\n author = 'Michael Dunstan',\n author_email = 'michael@elyt.com',\n url = 'http://pypi.python.org/pypi/ely.croppableimagefield',\n license = 'GPL',\n packages=find_packages(),\n namespace_packages=['ely'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n ],\n )\n","sub_path":"pypi_install_script/ely.croppableimagefield-1.0b1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"164045386","text":"#\n# @lc app=leetcode.cn id=557 lang=python3\n#\n# [557] 反转字符串中的单词 III\n#\n\n# @lc code=start\nclass Solution:\n def reverseWords(self, s: str) -> str:\n s = s.split()\n ans = []\n for i in range(len(s)):\n ans.append(s[i][::-1])\n \n return ' '.join(ans)\n# @lc code=end\n\n","sub_path":"Week09/557.反转字符串中的单词-iii.py","file_name":"557.反转字符串中的单词-iii.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"617475995","text":"import sys\n\ndef build_dictionary(file):\n\td = {}\n\twith open(file, 'r') as f:\n\t\tfor line in f:\n\t\t\tname, phone = line.strip().split()\n\t\t\td[name] = phone\n\treturn d\n\ndef main():\n\td = build_dictionary(sys.argv[1])\n\tfor line in sys.stdin:\n\t\tname = line.strip()\n\t\tprint('Name: {}'.format(name))\n\t\ttry:\n\t\t\tprint('Phone: {}'.format(d[name]))\n\t\texcept KeyError:\n\t\t\tprint('No such contact')\n\nif __name__ == '__main__':\n\tmain()","sub_path":"lab_4.1/contacts_short_041.py","file_name":"contacts_short_041.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"508337024","text":"from flask import (\n render_template,\n request,\n current_app,\n redirect,\n url_for,\n flash,\n)\nfrom flask_login import login_required, current_user\n\nfrom ...models import (\n Post,\n Category,\n Comment,\n)\nfrom ...forms import (\n PostForm,\n CategoryForm,\n SettingForm,\n)\nfrom . import admin_bp\nfrom ...extensions import db\nfrom ...utils import redirect_back\n\n\n@admin_bp.before_request\n@login_required\ndef login_protect():\n '''\n 为 admin 蓝本下的所有视图函数添加保护\n '''\n pass\n\n\n@admin_bp.route('/posts/manage')\ndef manage_posts():\n '''\n 管理文章视图 \n '''\n page = request.args.get('page', 1, type=int)\n pagination = Post.query.order_by(Post.timestamp.desc()).paginate(\n page, per_page=current_app.config['BLOG_MANAGE_POST_PER_PAGE'])\n posts = pagination.items\n return render_template('admin/manage_posts.html', pagination=pagination, posts=posts)\n\n\n@admin_bp.route('/post/new', methods=['GET', 'POST'])\ndef new_post():\n form = PostForm()\n if form.validate_on_submit():\n title = form.title.data\n body = form.body.data\n category = Category.query.get(form.category.data)\n post = Post(title=title, body=body, category=category)\n db.session.add(post)\n db.session.commit()\n flash('发表成功.', 'success')\n return redirect(url_for('blog.show_post', post_id=post.id))\n return render_template('admin/new_post.html', form=form)\n\n\n@admin_bp.route('/post/edit/', methods=['GET', 'POST'])\ndef edit_post(post_id):\n form = PostForm()\n post = Post.query.get_or_404(post_id)\n if form.validate_on_submit():\n post.title = form.title.data\n post.body = form.body.data\n post.category = Category.query.get(form.category.data)\n db.session.commit()\n flash('文章更新成功.', 'success')\n return redirect(url_for('blog.show_post', post_id=post_id))\n form.title.data = post.title\n form.body.data = post.body\n form.category.data = post.category_id\n return render_template('admin/edit_post.html', form=form)\n\n\n@admin_bp.route('/post/delete/', methods=['POST'])\ndef delete_post(post_id):\n post = Post.query.get_or_404(post_id)\n db.session.delete(post)\n db.session.commit()\n flash('删除文章成功', 'success')\n return redirect_back()\n\n\n@admin_bp.route('/comments/manage')\ndef manage_comments():\n filter_rule = request.args.get('filter', 'all') # 从查询字符串获取过滤规则\n page = request.args.get('page', 1, type=int)\n per_page = current_app.config['BLOG_MANAGE_COMMENT_PER_PAGE']\n if filter_rule == 'unread':\n filtered_comments = Comment.query.filter_by(reviewed=False)\n elif filter_rule == 'admin':\n filtered_comments = Comment.query.filter_by(from_admin=True)\n else:\n filtered_comments = Comment.query\n\n pagination = filtered_comments.order_by(Comment.timestamp.desc()).paginate(\n page, per_page=per_page)\n comments = pagination.items\n return render_template('admin/manage_comments.html', comments=comments,\n pagination=pagination)\n\n\n@admin_bp.route('/comment/delete/', methods=['POST'])\ndef delete_comment(comment_id):\n comment = Comment.query.get_or_404(comment_id)\n db.session.delete(comment)\n db.session.commit()\n flash('删除评论成功', 'success')\n return redirect_back()\n\n\n@admin_bp.route('/set-comment/', methods=['POST'])\ndef set_comment(post_id):\n post = Post.query.get_or_404(post_id)\n if post.can_comment:\n post.can_comment = False\n flash('当前文章设置为禁止评论', 'info')\n else:\n post.can_comment = True\n flash('当前文章设置为允许评论', 'info')\n db.session.commit()\n return redirect(url_for('blog.show_post', post_id=post_id))\n\n\n@admin_bp.route('/approve_comment/', methods=['POST'])\ndef approve_comment(comment_id):\n comment = Comment.query.get_or_404(comment_id)\n comment.reviewed = True\n db.session.commit()\n flash('评论审核通过', 'success')\n return redirect_back()\n\n\n@admin_bp.route('/categories/manage')\ndef manage_categories():\n return render_template('admin/manage_categories.html')\n\n\n@admin_bp.route('/category/new', methods=['GET', 'POST'])\ndef new_category():\n form = CategoryForm()\n if form.validate_on_submit():\n name = form.name.data\n category = Category(name=name)\n db.session.add(category)\n db.session.commit()\n flash('分类创建成功', 'success')\n return redirect_back()\n return render_template('admin/new_category.html', form=form)\n\n\n@admin_bp.route('/category/edit/', methods=['GET', 'POST'])\ndef edit_category(category_id):\n form = CategoryForm()\n category = Category.query.get_or_404(category_id)\n if form.validate_on_submit():\n category.name = form.name.data\n db.session.commit()\n flash('分类修改成功', 'success')\n return redirect_back()\n form.name.data = category.name\n return render_template('admin/edit_category.html', form=form)\n\n\n@admin_bp.route('/category/delete/', methods=['POST'])\ndef delete_category(category_id):\n category = Category.query.get_or_404(category_id)\n if category.id == 1:\n flash('不能删除默认分类', 'warning')\n return redirect_back()\n category.delete()\n flash('删除成功', 'success')\n return redirect_back()\n\n\n@admin_bp.route('/settings', methods=['GET', 'POST'])\ndef settings():\n form = SettingForm()\n if form.validate_on_submit():\n print('yes')\n current_user.name = form.name.data\n current_user.blog_title = form.blog_title.data\n current_user.blog_sub_title = form.blog_sub_title.data\n current_user.about = form.about.data\n db.session.commit()\n flash('博客信息更新成功', 'success')\n return redirect(url_for('blog.index'))\n form.name.data = current_user.name\n form.blog_title.data = current_user.blog_title\n form.blog_sub_title.data = current_user.blog_sub_title\n form.about.data = current_user.about\n return render_template('admin/settings.html', form=form)","sub_path":"blog/blueprints/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"495346469","text":"#! /usr/bin/python\n\nfIn = open(\"lab7_queries.txt\", \"r\")\nfOut = open(\"testQuestions.txt\", \"w\")\n\nfor line in fIn.readlines():\n parts = line.split(\"|\")\n if parts[0][-4:-3] == '?':\n fOut.write(\"(\\\"\" + parts[0][:-3] + \"\\\", 0)\\n\")\n\nfIn = open(\"PossibleQuestions.txt\")\nfor line in fIn.readlines():\n if line[2:3] == '.':\n fOut.write(\"(\\\"\" + line[4:-1] + \"\\\", 1)\\n\")\n else:\n parts = line.split(\"|\")\n if parts[0][-4:-3] == '?':\n fOut.write(\"(\\\"\" + parts[0][:-3] + \"\\\", 1)\\n\")\n","sub_path":"parseQuestions.py","file_name":"parseQuestions.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"173941825","text":"from flask import render_template, redirect, url_for, flash, send_file, make_response, Blueprint, request\nfrom flask_login import current_user\nimport flask_login\nfrom flask_login.utils import login_required\nfrom trial import db\nfrom trial.generalforms.forms import DefectReportForm, LeaveForm\nfrom trial.models import Leave, Post\nimport pdfkit\n\n\ngeneralforms = Blueprint('generalforms', __name__) \n\n\n#Create a route for defect form\n@generalforms.route('/defect', methods=['GET', 'POST'])\ndef defect():\n form = DefectReportForm()\n if form.validate_on_submit():\n flash(f'Defect Report Form submitted successfully', 'success') \n return redirect(url_for('generalforms.defect'))\n posts = Post.query.order_by(Post.id.desc()).all()\n return render_template('generalforms/defect_rep.html', title='Road Defects Report', form=form, posts=posts)\n\n#Create a route for leave Form \n@generalforms.route('/leave', methods=['GET', 'POST'])\n@login_required\ndef leave():\n form = LeaveForm()\n if form.validate_on_submit():\n name=request.form.get('name')\n le_ave = Leave(name=name, rank=form.rank.data, section=form.section.data, date_app=form.date_app.data,\n tele_no=form.tele_no.data, leave_cat=form.leave_cat.data, no_of_days=form.no_of_days.data, \n start_date=form.start_date.data, end_date=form.end_date.data, supp_info=form.supp_info.data,\n address=form.address.data, mobile_no=form.mobile_no.data, email=form.email.data, \n days_proceed=form.days_proceed.data, effec_date=form.effec_date.data, resump_date=form.resump_date.data,\n outs_days=form.outs_days.data, author=current_user)\n db.session.add(le_ave)\n db.session.commit()\n flash(f\"Leave form submitted successfully\", 'success')\n return redirect(url_for('generalforms.view_form', post_id=le_ave.id))\n posts = Post.query.order_by(Post.id.desc()).all() \n return render_template('generalforms/leave_form.html', title='Leave Form Report', form=form, posts=posts)\n\n\n\n#Route to view the Leave form\n@generalforms.route('/post/')\n@login_required\ndef post(post_id):\n post = Leave.query.get_or_404(post_id)\n posts = Post.query.order_by(Post.id.desc()).all()\n return render_template('generalforms/render_form.html', post=post, posts=posts)\n\n#Route for View form \n@generalforms.route('/view_form/', methods=['GET', 'POST'])\n@login_required\ndef view_form(post_id):\n post = Leave.query.get_or_404(post_id)\n posts = Post.query.order_by(Post.id.desc()).all()\n \n return render_template('generalforms/view_lv_form.html', title='Leave', post=post, posts=posts) \n\n#Generate pdf from the Leave Form\n@generalforms.route('/get_pdf/', methods=['GET','POST'])\n@login_required\ndef get_pdf(post_id):\n\n post = Leave.query.get_or_404(post_id)\n posts = Post.query.order_by(Post.id.desc()).all()\n rendered= render_template('generalforms/render_form.html', title=current_user.username, post=post, posts=posts)\n css = ['trial/static/css/bootstrap.min.css', 'trial/static/css/style.css']\n\n options = {'enable-local-file-access': None}\n pdf = pdfkit.from_string(rendered, False, options=options, css=css)\n response = make_response(pdf)\n \n response.headers['content-Type'] = 'application/pdf'\n response.headers['content-Disposition'] = 'inline; filename=output.pdf'\n\n return response\n\n#Render pdf format of the Leave Form\n@generalforms.route('/render/', methods=['GET', 'POST'])\ndef render(post_id):\n post = Leave.query.get_or_404(post_id)\n posts = Post.query.order_by(Post.id.desc()).all()\n return render_template('generalforms/render_form.html', post=post, title=current_user.username, posts=posts)\n\n\n#Route to view other forms \n@generalforms.route('/other_forms', methods=['GET', 'POST'])\n@login_required\ndef others():\n posts = Post.query.order_by(Post.id.desc()).all()\n return render_template('generalforms/other_forms.html', title='Other Forms', posts=posts)\n\n#Route to download Hospital Form\n@generalforms.route('/download_hospital-form')\n@login_required\ndef download_hosp():\n p = './static/other_forms/GHANA HIGHWAY AUTHORITY (HOSPITAL FORM).pdf'\n\n return send_file(p, as_attachment=True)\n\n#Route to download Accomodation Form\n@generalforms.route('/download_accomodation-form')\n@login_required \ndef download_accom():\n p = './static/other_forms/GHANA HIGHWAY AUTHORITY (REQUEST FOR ACCOMODATION).pdf'\n\n return send_file(p, as_attachment=True)","sub_path":"trial/generalforms/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"583966673","text":"import sys\nfrom pyknp import Juman\nfrom argparse import ArgumentParser\n\n\ndef parse_args():\n p = ArgumentParser(description='add label(hinshi)')\n p.add_argument('source', help='[in] input fileaname')\n p.add_argument('target', help='[out] output fileaname')\n args = p.parse_args()\n return args\n\n\n# 進捗バーを出力\ndef printProgressInfo(end, now):\n MAX_LEN = 50\n progress = 1.0 if now == end-1 else 1.0 * now / end\n BAR_LEN = MAX_LEN if now == end-1 else int(MAX_LEN * progress)\n progressbar_str = ('[' + '=' * BAR_LEN +\n ('>' if BAR_LEN < MAX_LEN else '=') +\n ' ' * (MAX_LEN - BAR_LEN) +\n '] %.1f%% (%d/%d)' % (progress * 100., now, end))\n sys.stderr.write('\\r' + progressbar_str)\n sys.stderr.flush()\n\n\nif __name__ == '__main__':\n args = parse_args()\n # 出力ファイルのリセット\n fp = open(args.target, \"w\")\n fp.close()\n\n n_line = sum(1 for line in open(args.source, \"r\"))\n\n with open(args.target, \"a\") as target_file:\n for i, line in enumerate(open(args.source, \"r\")):\n juman = Juman()\n input_sentence = line.replace(\" \", \"\")\n res = juman.analysis(input_sentence)\n\n sentence_l = [mrph.midasi+\"/\"+mrph.hinsi for mrph in res.mrph_list()]\n sentence = \" \".join(sentence_l)\n\n target_file.write(sentence+\"\\n\")\n printProgressInfo(n_line, i+1)\n\n print(\"end...\")\n","sub_path":"src/script/addLabel.py","file_name":"addLabel.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"201173631","text":"import time\nimport logging\nimport threading\nimport sys\nimport traceback\nimport platform\n\nimport graphsignal\nfrom graphsignal import statistics\nfrom graphsignal.uploader import Uploader\nfrom graphsignal.predictions import Prediction\nfrom graphsignal.windows import Window, Model, Metric, Event\n\nlogger = logging.getLogger('graphsignal')\n\nMAX_TAGS = 10\nMAX_EVENTS = 50\n\nMIN_WINDOW_SIZE = 50\nMIN_WINDOW_DURATION = 120\nMAX_WINDOW_DURATION = 600\n\n_session_index = {}\n_session_index_lock = threading.Lock()\n\n\nclass Session(object):\n __slots__ = [\n '_deployment_name',\n '_tags',\n '_prediction_window',\n '_event_window',\n '_window_start_time',\n '_window_size',\n '_update_lock',\n '_is_updated',\n '_upload_timer'\n ]\n\n def __init__(self, deployment_name):\n self._deployment_name = deployment_name\n self._tags = {}\n self._update_lock = threading.Lock()\n self._reset_window()\n\n def _reset_window(self):\n self._prediction_window = []\n self._event_window = []\n self._window_start_time = time.time()\n self._window_size = 0\n self._is_updated = False\n\n def _set_updated(self):\n self._is_updated = True\n if self._upload_window():\n graphsignal._get_uploader().flush_in_thread()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if exc_type and exc_val and exc_tb:\n message = traceback.format_exception_only(exc_type, exc_val)\n stack_trace = traceback.format_tb(exc_tb)\n\n attributes = {}\n if isinstance(message, list) and len(message) > 0:\n attributes['Message'] = str('\\n'.join(message))\n if isinstance(stack_trace, list) and len(stack_trace) > 0:\n attributes['Stack trace'] = str('\\n'.join(stack_trace))\n\n self.log_event(\n description='Prediction exception',\n attributes=attributes,\n is_error=True\n )\n\n def set_tag(self, name=None, value=None):\n '''\n Set model deployment tags.\n\n Args:\n name (:obj:`str`):\n Tag name.\n value (:obj:`int` or :obj:`float`):\n Tag value.\n '''\n\n if not isinstance(name, str) or len(name) > 250:\n logger.error('invalid tag name format')\n return\n if not isinstance(value, (str, int, float)) or len(str(value)) > 2500:\n logger.error(\n 'invalid tag value format for name: {0}'.format(name))\n return\n\n if len(self._tags) >= MAX_TAGS:\n logger.error(\n 'too many tags, max={0}'.format(MAX_TAGS))\n return\n\n self._tags[name] = value\n\n def log_prediction(\n self,\n input_data=None,\n output_data=None,\n actual_timestamp=None):\n '''\n Log single or batch model prediction.\n\n See `Supported Data Formats `_\n for detailed description of data types and formats.\n\n Computed data statistics are uploaded at certain intervals and on process exit. No raw data is uploaded.\n\n Args:\n input_data (:obj:`list` or :obj:`dict` or :obj:`numpy.ndarray` or :obj:`pandas.DataFrame`, optional):\n Input data instances.\n output_data (:obj:`list` or :obj:`dict` or :obj:`numpy.ndarray` or :obj:`pandas.DataFrame`, optional):\n Output data instances.\n actual_timestamp (:obj:`int`, optional, default is current timestamp):\n Actual timestamp of the measurement, when different from current timestamp.\n '''\n\n self._window_size += max(\n statistics.estimate_size(input_data),\n statistics.estimate_size(output_data))\n\n with self._update_lock:\n self._prediction_window.append(Prediction(\n input_data=input_data,\n output_data=output_data,\n timestamp=actual_timestamp))\n\n self._set_updated()\n\n def log_event(\n self,\n description=None,\n attributes=None,\n is_error=False,\n actual_timestamp=None):\n '''\n Log arbitrary event or exception.\n\n Args:\n description (:obj:`str`):\n Event description.\n attributes (:obj:`dict`, optional):\n Event attributes.\n is_error (:obj:`bool`, optional):\n Set error type.\n actual_timestamp (:obj:`int`, optional, default is current timestamp):\n Actual timestamp of the measurement, when different from current timestamp.\n '''\n\n if not description or not isinstance(\n description, str) or len(description) > 250:\n logger.error('invalid format for description')\n return\n\n if attributes is not None:\n if isinstance(attributes, dict):\n for name, value in attributes.items():\n if not isinstance(name, str) or len(name) > 250:\n logger.error('invalid attribute name format')\n return\n if not isinstance(value, (str, int, float)):\n logger.error(\n 'invalid attribute value format for attribute name {0}'.format(name))\n return\n else:\n logger.error('invalid attributes format, expecting dict')\n return\n\n if len(self._event_window) >= MAX_EVENTS:\n logger.error('too many events, max={0}'.format(MAX_EVENTS))\n return\n\n type_name = Event.TYPE_INFO\n if is_error:\n type_name = Event.TYPE_ERROR\n event_name = Event.NAME_ERROR\n\n with self._update_lock:\n event = Event(\n type=type_name,\n name=event_name,\n description=description,\n timestamp=actual_timestamp)\n for name, value in attributes.items():\n event.add_attribute(name, value)\n self._event_window.append(event)\n\n self._set_updated()\n\n def _upload_window(self, force=False):\n if not self._is_updated:\n return False\n\n # check if current window should be uploaded\n if not force:\n window_duration = time.time() - self._window_start_time\n if window_duration < MIN_WINDOW_DURATION:\n return False\n if (self._window_size < MIN_WINDOW_SIZE and\n window_duration < MAX_WINDOW_DURATION):\n return False\n\n # reset\n with self._update_lock:\n prediction_window = self._prediction_window\n events_window = self._event_window\n self._reset_window()\n\n # initialize window object\n window = Window()\n\n # set model\n window.model = Model(\n deployment=self._deployment_name)\n if self._tags is not None:\n for name, value in self._tags.items():\n window.model.add_tag(name, value)\n\n # add prediction count metric\n last_timestamp = max([p.timestamp for p in prediction_window if p]) if len(\n prediction_window) > 0 else None\n prediction_count_metric = Metric(\n dataset='model_statistics',\n name='prediction_count',\n aggregation=Metric.AGGREGATION_SUM,\n timestamp=last_timestamp)\n prediction_count_metric.set_gauge(len(prediction_window))\n window.add_metric(prediction_count_metric)\n\n # add computed data metrics\n try:\n data_metrics = statistics.compute_metrics(\n prediction_window)\n if data_metrics is not None and len(data_metrics) > 0:\n for metric in data_metrics:\n window.add_metric(metric)\n except Exception:\n logger.error(\n 'Unable to compute data statistics', exc_info=True)\n\n # add events\n for event in events_window:\n window.add_event(event)\n\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug('Uploading window:')\n logger.debug(window)\n\n graphsignal._get_uploader().upload_window(window.to_dict())\n return True\n\n\ndef get_session(deployment_name):\n if not deployment_name or len(deployment_name) > 250:\n raise ValueError('invalid deployment_name format')\n\n with _session_index_lock:\n if deployment_name in _session_index:\n return _session_index[deployment_name]\n else:\n sess = Session(deployment_name)\n _session_index[deployment_name] = sess\n return sess\n\n\ndef reset_all():\n with _session_index_lock:\n _session_index.clear()\n\n\ndef upload_all(force=False):\n session_list = None\n with _session_index_lock:\n session_list = _session_index.values()\n\n uploaded = False\n for session in session_list:\n if session._upload_window(force=force):\n uploaded = True\n\n return uploaded\n","sub_path":"graphsignal/sessions.py","file_name":"sessions.py","file_ext":"py","file_size_in_byte":9286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"633389609","text":"import requests\nimport datetime\nfrom bs4 import BeautifulSoup\nimport google_calendar_lib as GCar\n\nclass Chouseisan(object):\n def __init__(self):\n calendar_id = '4hrfgm5memn5sdfgiahhsjsme8@group.calendar.google.com'\n days = 7\n self.holidays = GCar.get_holidays(calendar_id, days)\n self.week = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\n\n def _get_token(self):\n url_for_token = 'https://chouseisan.com'\n response = requests.get(url_for_token)\n response.raise_for_status()\n\n soup = BeautifulSoup(response.text, 'html.parser')\n\n return soup.find(id='chousei_token').get('value')\n\n def create_schedule(self, name='No_title', comment='No_comment', kouho='No_option'):\n create_url = 'https://chouseisan.com/schedule/newEvent/create'\n payload = { 'name' : name, 'comment' : comment, 'kouho' : kouho, 'chousei_token' : self._get_token() }\n response = requests.post(create_url, data=payload, allow_redirects=False)\n\n complete_url = response.headers['location']\n new_schedule_url = 'https://chouseisan.com/s?' + complete_url.split('?')[1]\n return new_schedule_url\n\n def get_total(self, url, number=3):\n response = requests.get(url)\n response.raise_for_status()\n soup = BeautifulSoup(response.text, 'html.parser')\n\n tables = soup.find(id='nittei')\n simple_table = []\n table_list = [table for table in tables] # faster than `tables.contents`\n table_list = table_list[1::2] # remove blank line\n\n for table in table_list:\n row = []\n for t in table:\n if \"\" == str(type(t)):\n elements = [e for e in t]\n row.append(elements[0].string if 1 == len(elements) else elements[1].contents[0].string)\n simple_table.append(row)\n\n simple_table.pop()\n simple_table.sort(key=lambda l: l.count('○'))\n simple_table.reverse()\n names = simple_table.pop()\n names.pop(0) # remove '日程'\n\n results = []\n\n for t in simple_table[0:number]:\n date = t.pop(0)\n result_row = [date]\n for i, e in enumerate(t):\n if '○' == e:\n result_row.append(names[i])\n results.append(result_row)\n\n s = ''\n\n for result in results:\n s += result[0] + '\\n'\n s += ' '.join(result[1:]) + '\\n\\n'\n\n return s[:-2]\n\n def get_days(self, num=7):\n dt_now = datetime.datetime.now()\n result = []\n for i in range(num):\n result.append((dt_now + datetime.timedelta(days = i)).date().isoformat())\n return result\n\n def get_options_str(self):\n result = ''\n for i, date in enumerate(self.get_days()):\n if date in self.holidays:\n result += date[5:] + ' (' + self.week[i] + '.) ' + 'AM' + '\\n'\n result += date[5:] + ' (' + self.week[i] + '.) ' + 'PM' + '\\n'\n else:\n result += date[5:] + ' (' + self.week[i] + '.) ' + '4限' + '\\n'\n result += date[5:] + ' (' + self.week[i] + '.) ' + '5限' + '\\n'\n result += date[5:] + ' (' + self.week[i] + '.) ' + '6限' + '\\n'\n return result\n\nif __name__ == '__main__':\n cho = Chouseisan()\n test = cho.get_options_str()\n print(cho.holidays)\n print(test)\n\n","sub_path":"chouseisanlib.py","file_name":"chouseisanlib.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"294862319","text":"# In MADDPG, each agent has its own actor and its own critic\n# original [Paper](http://arxiv.org/abs/1706.02275)\n\n# responsible for\n# - action taking\n# - model updates (policy + critic)\n# see ddpg.py for other details of one DDPG agent\n\n# hyper-parameters\n# - [policies are parameterized by a 2-layer ReLU MLP with 64 units per layer]o.p.\n# - [2 hidden layers with 400 and 300 units respectively]ddpg.p.\n# - [actions were not included until the 2nd hidden layer of Q]ddpg.p.\n\n# Features:\n# - For Critic, use Huber-loss (less sensitive to outliers than the squared error loss)\n# - quadratic for small values of [target-estimate], and linear for large values\n\nfrom ddpg import DDPGAgent\nimport torch\nfrom utilities import soft_update, transpose_to_tensor\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n# device = 'cpu'\n\n\nclass MADDPG:\n def __init__(self, discount_factor=0.95, tau=0.01):\n super(MADDPG, self).__init__()\n\n # args = in_actor, hidden_in_actor, hidden_out_actor, out_actor, in_critic, hidden_in_critic, hidden_out_critic\n # critic input = obs_full + actions = 2*24+2+2=52\n self.maddpg_agent = [DDPGAgent(24, 400, 300, 2, 52, 400, 300),\n DDPGAgent(24, 400, 300, 2, 52, 400, 300)]\n # DDPGAgent(24, 16, 8, 2, 52, 32, 16)]\n\n self.discount_factor = discount_factor\n self.tau = tau\n self.iter = 0\n\n def act(self, obs_all_agents, noise=0.0):\n \"\"\"get local network actions from all agents\"\"\"\n actions = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return actions\n\n def target_act(self, obs_all_agents, noise=0.0):\n \"\"\"get target network actions from all the agents in the MADDPG object \"\"\"\n target_actions = [agent.target_act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return target_actions\n\n def update(self, samples, agent_number, logger):\n \"\"\"\n update critic and actor nets of one given agent\n :param samples: a list, representing the batch and containing 7 elements\n samples = [obs, obs_full, action, reward, next_obs, next_obs_full, done]\n with e.g. done = [[False, False], [False, False], False, False]] for batch_size = 3\n :param agent_number: int -- in [0, 1]\n :param logger: writer object for tensorboard\n :return: -\n \"\"\"\n # figure out whether GPU or CPU is used\n # print(\"completing Update() with device = {}\".format(device))\n\n # need to transpose each element of the samples\n # to flip obs[parallel_agent][agent_number] to\n # obs[agent_number][parallel_agent]\n obs, obs_full, action, reward, next_obs, next_obs_full, done = map(transpose_to_tensor, samples)\n obs_full = torch.stack(obs_full).to(device)\n next_obs_full = torch.stack(next_obs_full).to(device)\n\n # only one agent it updated\n agent = self.maddpg_agent[agent_number]\n\n # ---------------------------- update local critic ---------------------------- #\n agent.critic_optimizer.zero_grad()\n # critic loss = batch-mean of [y - Q(s,a) from local network]^2\n # y = reward of this timestep + discount * Q(st+1, at+1) from target network\n\n # get predicted next-state actions and Q values from target models\n target_actions = self.target_act(next_obs)\n target_actions = torch.cat(target_actions, dim=1)\n\n # feed the concatenated (states + actions) directly into the *input* layer of the critic\n target_critic_input = torch.cat((next_obs_full.t(), target_actions), dim=1).to(device)\n with torch.no_grad():\n q_next = agent.target_critic(target_critic_input)\n\n # compute the TD-target\n y = reward[agent_number].view(-1, 1).to(device) + self.discount_factor * q_next * \\\n (1 - done[agent_number].view(-1, 1).to(device))\n\n # compute the TD-estimate\n # action = torch.cat(action, dim=1)\n action = torch.cat(action, dim=1).to(device)\n critic_input = torch.cat((obs_full.t(), action), dim=1).to(device)\n q_estimate = agent.local_critic(critic_input)\n # print(\"q_estimate = {}\".format(q_estimate))\n\n # compute loss on [TD-target - TD-estimate]^2\n huber_loss = torch.nn.SmoothL1Loss()\n critic_loss = huber_loss(q_estimate, y.detach()) # y.detach() to prevent grads back\n\n # minimize the loss: 1)perform a backward pass and 2)update the weights\n # use autograd to compute the backward pass\n critic_loss.backward()\n\n # torch.nn.utils.clip_grad_norm_(agent.local_critic.parameters(), 0.5)\n # update the weights\n agent.critic_optimizer.step()\n\n # ---------------------------- update local actor ---------------------------- #\n # update actor local network using policy gradient\n agent.actor_optimizer.zero_grad()\n\n # each local actor (#1 and #2) gives its actions\n actions_predict = [self.maddpg_agent[i].local_actor(ob.to(device)).to(device) if i == agent_number else\n self.maddpg_agent[i].local_actor(ob.to(device)).detach().to(device) # detach() prevents grads back\n for i, ob in enumerate(obs)]\n actions_predict = torch.cat(actions_predict, dim=1).to(device)\n\n # combine all the actions and observations for input to local critic\n # many of the obs are redundant, and obs[1] contains all useful information already\n critic_input = torch.cat((obs_full.t(), actions_predict), dim=1)\n\n # use samples to estimate the expectation of gradient. Hence mean()\n # Deterministic Gradient Policy Theorem: gradient = expectation[Q-values]\n # pytorch by default does gradient DESCENT. Hence minus term for ASCENT\n actor_loss = -agent.local_critic(critic_input).mean()\n\n actor_loss.backward()\n # torch.nn.utils.clip_grad_norm_(agent.local_actor.parameters(),0.5)\n agent.actor_optimizer.step()\n\n # ---------------------------- Logging ---------------------------- #\n # torch.Tensor.item() to get a Python number from a tensor containing a single value\n a_l = actor_loss.cpu().detach().item() # prevent grads back\n c_l = critic_loss.cpu().detach().item()\n logger.add_scalars('agent%i/losses' % agent_number,\n {'local_critic_loss': c_l,\n 'local_actor_loss': a_l},\n self.iter) # number of network updates (local -> target)\n\n def update_targets(self):\n \"\"\"soft update of critic and actor target networks for all agents\"\"\"\n self.iter += 1\n for ddpg_agent in self.maddpg_agent:\n soft_update(ddpg_agent.target_actor, ddpg_agent.local_actor, self.tau)\n soft_update(ddpg_agent.target_critic, ddpg_agent.local_critic, self.tau)\n\n def reset(self):\n for ddpg_agent in self.maddpg_agent:\n ddpg_agent.reset()\n\n def save(self, saving_name):\n for i, ddpg_agent in enumerate(self.maddpg_agent):\n torch.save(ddpg_agent.local_actor.state_dict(), saving_name + '-' + str(i) + '.actor.pth')\n torch.save(ddpg_agent.local_critic.state_dict(), saving_name + '-' + str(i) + '.critic.pth')\n\n def load(self, loading_name):\n for i, ddpg_agent in enumerate(self.maddpg_agent):\n actor_file = torch.load(loading_name + '-' + str(i) + '.actor.pth', map_location='cpu')\n critic_file = torch.load(loading_name + '-' + str(i) + '.critic.pth', map_location='cpu')\n # same config for locals and targets\n ddpg_agent.local_actor.load_state_dict(actor_file)\n ddpg_agent.target_actor.load_state_dict(actor_file)\n ddpg_agent.local_critic.load_state_dict(critic_file)\n ddpg_agent.target_critic.load_state_dict(critic_file)\n print('Loaded: {}.actor.pth'.format(loading_name))\n print('Loaded: {}.critic.pth'.format(loading_name))\n","sub_path":"p3_collab-compet/src_draft_maddpg/maddpg.py","file_name":"maddpg.py","file_ext":"py","file_size_in_byte":8084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"563120809","text":"# coding:utf-8\n# 获取excel单元格中的内容,获取行数据,以及数据值判断\nfrom tool.operation_excel import OperationExcel\nfrom operation_data.data_config import DataConfig\nfrom tool.operation_json import OperationJson\nfrom tool.connect_db import OperationMysql\nfrom tool.common_util import CommonUtil\nimport json\n\n\nclass GetData:\n def __init__(self, file_path, sheet_id):\n self.opera_excel = OperationExcel(file_path, sheet_id)\n self.data_config = DataConfig()\n self.com_util = CommonUtil()\n\n def get_case_lines(self):\n \"\"\"\n 去获取excel行数,就是case的个数\n :return:\n \"\"\"\n return self.opera_excel.get_lines()\n\n def get_is_run(self, row):\n \"\"\"\n 获取是否可行\n :param row:\n :return:\n \"\"\"\n flag = None\n col = int(DataConfig.get_is_run())\n run_model = self.opera_excel.get_cell_value(row, col)\n if run_model == 'yes':\n flag = True\n else:\n flag = False\n return flag\n\n def is_header(self, row):\n \"\"\"\n 是否携带header\n :param row:\n :return:\n \"\"\"\n col = int(DataConfig.get_header())\n header = self.opera_excel.get_cell_value(row, col)\n if header != '':\n return header\n else:\n return None\n\n def get_run_method(self, row):\n \"\"\"\n 获取请求方式\n :param row:\n :return:\n \"\"\"\n col = int(DataConfig.get_run_method())\n request_method = self.opera_excel.get_cell_value(row, col)\n return request_method\n\n def get_request_url(self, row):\n \"\"\"\n 获取url\n :param row:\n :return:\n \"\"\"\n col = int(DataConfig.get_url())\n url = self.opera_excel.get_cell_value(row, col)\n return url\n\n def get_is_depend(self, row):\n \"\"\"\n 判断是否有case依赖,并返回caseid\n :param row:\n :return:\n \"\"\"\n col = int(DataConfig.get_case_depend())\n depend_case_id = self.opera_excel.get_cell_value(row, col)\n if depend_case_id == \"\":\n return None\n return depend_case_id\n\n def get_depend_key(self, row): # invalid func\n \"\"\"\n 获取依赖数据的key\n :param row:\n :return:\n \"\"\"\n col = int(DataConfig.get_depend_key())\n depend_key = self.opera_excel.get_cell_value(row, col)\n if depend_key == \"\":\n return None\n return depend_key\n\n def get_current_key(self, row): # invalid func\n \"\"\"\n 获取当前请求的key\n :param row:\n :return:\n \"\"\"\n col = int(DataConfig.get_current_key())\n current_key = self.opera_excel.get_cell_value(row, col)\n if current_key == '':\n return None\n return current_key\n\n def get_other_depend_key(self, row): # invalid func\n \"\"\"\n 获取其他依赖字段,不从接口中直接得到的依赖字段\n :param row:\n :return:\n \"\"\"\n col = int(DataConfig.get_other_depend_key())\n other_depend_key = self.opera_excel.get_cell_value(row, col)\n if other_depend_key == '':\n return None\n return other_depend_key\n\n def _get_other_depend_key_sql(self, row): # invalid func\n \"\"\"\n 获取查询other_depend_key的sql\n :param row:\n :return:\n \"\"\"\n col = int(DataConfig.get_other_depend_key_sql())\n other_depend_key_sql = self.opera_excel.get_cell_value(row, col)\n if other_depend_key_sql == '':\n return None\n return other_depend_key_sql\n\n def get_other_depend_key_value_from_db(self, row): # invalid func\n \"\"\"\n 获取需要依赖DB查询值的other_depend_key\n :param row:\n :return: return a sql query result\n \"\"\"\n op_mysql = OperationMysql()\n # op_mysql.connect_db()\n sql = self._get_other_depend_key_sql(row)\n result = op_mysql.get_one(op_mysql.execute_sql(sql))\n op_mysql.close_db()\n return result\n\n def get_request_data(self, row):\n \"\"\"\n 获取请求数据\n :param row:\n :return:\n \"\"\"\n col = int(DataConfig.get_request_data())\n data = self.opera_excel.get_cell_value(row, col)\n if data == '':\n return None\n return data # type is str\n\n def get_file_path(self, row):\n \"\"\"\n 获取上传文件的路径\n :param row:\n :return:\n \"\"\"\n col = int(DataConfig.get_file_path())\n file_path = self.opera_excel.get_cell_value(row, col)\n if file_path == '':\n return None\n return file_path\n\n def get_key_need_randomstr(self, row):\n \"\"\"\n 获取需要使用随机字符串的key\n :param row:\n :return:\n \"\"\"\n col = int(DataConfig.get_key_need_randomstr())\n key_need_randomstr = self.opera_excel.get_cell_value(row, col)\n if key_need_randomstr == '':\n return None\n return key_need_randomstr\n\n def get_random_str(self, row):\n \"\"\"\n 获取随机字符串\n :param row:\n :return:\n \"\"\"\n col = int(DataConfig.get_random_str())\n random_str = self.opera_excel.get_cell_value(row, col)\n if random_str == '':\n return None\n return random_str\n\n def update_key_with_randomstr(self, row):\n \"\"\"\n 使用随机字符串random_str复制给request_data中需要使用随机字符串的key,\n 如果key_need_randomstr为空,返回的是未经处理的request_data\n :param row:\n :return: 替换过key值的request_data\n \"\"\"\n request_data = self.get_request_data(row)\n request_data = json.loads(request_data, encoding='UTF-8')\n key_need_randomstr = self.get_key_need_randomstr(row)\n if key_need_randomstr == '':\n return request_data\n random_str = self.get_random_str(row)\n request_data[key_need_randomstr] = random_str\n return request_data\n\n def get_request_param(self, row): # invalid func\n \"\"\"\n 获取需要依赖DB查询值的request_data中的字段\n :param row:\n :return:\n \"\"\"\n col = int(DataConfig.get_request_param())\n request_param = self.opera_excel.get_cell_value(row, col)\n if request_param == '':\n return None\n return request_param\n\n def _get_request_param_sql(self, row): # invalid func\n \"\"\"\n 获取查询request_param值的sql\n :return:\n \"\"\"\n col = int(DataConfig.get_request_param_sql())\n request_param_sql = self.opera_excel.get_cell_value(row, col)\n if request_param_sql == '':\n return None\n return request_param_sql\n\n def get_request_param_value_from_db(self, row): # invalid func\n \"\"\"\n 查询sql返回request_param的值\n :param row:\n :return: return a sql query result\n \"\"\"\n op_mysql = OperationMysql()\n # op_mysql.connect_db()\n sql = self._get_request_param_sql(row)\n result = op_mysql.get_one(op_mysql.execute_sql(sql))\n op_mysql.close_db()\n return result\n\n def get_expect_as_fixed_value(self, row):\n \"\"\"\n 获取完整及固定值的期望结果\n :param row:\n :return:\n \"\"\"\n col = int(DataConfig.get_expect_as_fixed_value())\n expect_fixed_value = self.opera_excel.get_cell_value(row, col)\n if expect_fixed_value == '':\n return None\n return expect_fixed_value\n\n def get_expect_code(self, row):\n \"\"\"\n 获取期望的code值\n :param row:\n :return:\n \"\"\"\n col = int(DataConfig.get_expect_code())\n expect_code = self.opera_excel.get_cell_value(row, col)\n if expect_code == '':\n return None\n return int(expect_code)\n\n def get_expect_msg(self, row):\n \"\"\"\n 获取期望的接口json返回值中的msg值\n :param row:\n :return:\n \"\"\"\n col = int(DataConfig.get_expect_msg())\n expect_msg = self.opera_excel.get_cell_value(row, col)\n if expect_msg == '':\n return None\n return expect_msg\n\n def get_expect_data(self, row):\n \"\"\"\n 获取期望的接口json返回值中的data部分的关键key\n :param row:\n :return:\n \"\"\"\n col = int(DataConfig.get_expect_data())\n expect_key = self.opera_excel.get_cell_value(row, col)\n if expect_key == '':\n return None\n return expect_key\n\n def get_expect_key_sql(self, row):\n \"\"\"\n 获取sql\n :param row:\n :return:\n \"\"\"\n col = int(DataConfig.get_expect_key_sql())\n sql = self.opera_excel.get_cell_value(row, col)\n if sql == '':\n return None\n return sql\n\n def get_expect_key_value_from_db(self, row): # 无用函数\n \"\"\"\n 通过sql获取预期data中的key值\n :param row:\n :return:\n \"\"\"\n op_mysql = OperationMysql()\n # op_mysql.connect_db()\n sql = self.get_expect_key_sql(row)\n result = op_mysql.get_one(op_mysql.execute_sql(sql))\n op_mysql.close_db()\n return result\n\n def write_api_assertion_result(self, row, value):\n \"\"\"\n 将执行结果pass/fail写入excel\n :param row:\n :param value:\n :return:\n \"\"\"\n col = int(DataConfig.get_api_assertion_result())\n self.opera_excel.write_value(row, col, value)\n\n def write_response(self, row, res):\n \"\"\"\n 将接口返回写入excel\n :param row:\n :param res:\n :return:\n \"\"\"\n col = int(DataConfig.get_response_result())\n self.opera_excel.write_value(row, col, res)\n\n def get_response_result(self, row):\n col = int(DataConfig.get_response_result())\n response_result = self.opera_excel.get_cell_value(row, col)\n if response_result == '':\n return None\n return response_result\n\n def is_set_global_vars(self, row):\n \"\"\"\n 返回set_global_vars\n :param row:\n :return:\n \"\"\"\n col= int(DataConfig.is_set_global_vars())\n global_vars = self.opera_excel.get_cell_value(row, col)\n if global_vars == '':\n return None\n return global_vars\n\n\n # def get_field_name_for_assert_from_db(self, row):\n # col = int(GlobalVar.get_field_name_for_assert_from_db())\n # data = self.opera_excel.get_cell_value(row, col)\n # if data == \"\":\n # return None\n # else:\n # return data\n #\n # def get_field_name_for_assert_from_response(self, row):\n # col = int(GlobalVar.get_field_name_for_assert_from_response())\n # data = self.opera_excel.get_cell_value(row, col)\n # if data == \"\":\n # return None\n # else:\n # return data\n\n # def get_target_value_for_assert_from_db(self, row):\n # field_for_assert = self.get_field_name_for_assert_from_db(row)\n # expect_result_from_db = self.get_expect_data_from_mysql(row)\n # target_value_from_expect_result = []\n # self.com_util.get_target_value(field_for_assert, expect_result_from_db, target_value_from_expect_result)\n # return target_value_from_expect_result # return a list\n #\n # def get_target_value_for_assert_from_response(self, row, response):\n # field_for_assert = self.get_field_name_for_assert_from_response(row)\n # target_value_from_response = []\n # self.com_util.get_target_value(field_for_assert, response, target_value_from_response)\n # return target_value_from_response # return a list\n\n\n # 获取依赖字段是否与当前请求的字段同名\n # def is_same_field_name(self, row):\n # col = int(GlobalVar.get_is_same_field_name())\n # current_field = self.opera_excel.get_cell_value(row, col)\n # if current_field == \"\":\n # return None\n # else:\n # return current_field\n # 通过获取关键字拿到data数据 先从excel指定行中得到请求数据作为key去json文件里面读取到该key对应的value\n # value可能是登录后获取的cookie参数\n\n # def get_data_from_json(self, row): # ???没用的函数???\n # opera_json = OperationJson()\n # request_data = opera_json.get_data(self.get_request_data(row))\n # return request_data\n\n\nif __name__ == '__main__':\n demoGetData = GetData('../dataconfig/case3.xls', 0)\n # print(demoGetData.get_case_lines())\n # print(demoGetData.get_is_run(3))\n # print(demoGetData.is_header(1))\n # print(demoGetData.get_run_method(3))\n # print(demoGetData.get_request_url(2))\n # print(demoGetData.get_is_depend(3))\n # print(demoGetData.get_depend_key(3))\n # print(demoGetData.get_current_key(3))\n # print(demoGetData.get_other_depend_key(3))\n # print(demoGetData.get_other_depend_key_sql(3))\n # print(demoGetData.get_other_depend_key_value_from_db(3))\n print('-------------------------------------------分割线-----------------------------------------------------------')\n # print(demoGetData.get_request_data(3))\n print('-------------------------------------------分割线-----------------------------------------------------------')\n # print(demoGetData.get_file_path(5))\n # print(demoGetData.get_key_need_randomstr(4))\n # print(demoGetData.get_random_str(4))\n # print(demoGetData.update_key_with_randomstr(4))\n # print(demoGetData.get_request_param(4))\n # print(demoGetData.get_request_param_sql(4))\n # print(demoGetData.get_request_param_value_from_db(4))\n # print(demoGetData.get_expect_as_fixed_value(5))\n # print(demoGetData.get_expect_code(5))\n # print(demoGetData.get_expect_msg(5))\n # print(demoGetData.get_expect_data(5))\n # print(demoGetData.get_expect_key_value_from_db(5))\n # value = '{\"code\":0,\"msg\":\"iot switch off\",\"data\":null}'\n # demoGetData.write_response(5, value)\n # print(demoGetData.get_response_result(5))\n # demoGetData.write_api_assertion_result(5, 'pass')\n # temp = demoGetData.update_key_with_randomstr(4)\n # print(temp)\n # file_path = demoGetData.get_file_path(5)\n # print(file_path)\n # temp = demoGetData.get_expect_key_sql(1)\n # print(temp)\n # print(demoGetData.get_expect_code(1))\n # print(demoGetData.is_set_global_vars(1))\n\n\n temp = demoGetData.get_request_data(5)\n print(type(temp))\n temp2 = json.loads(temp)\n print(temp2)\n print(type(temp2))\n\n\n\n","sub_path":"operation_data/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":14760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"178640263","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nclass Solution:\n # @return a boolean\n def isMatch(self, s, p):\n string_length = len(s)\n pattern_length = len(p)\n \n table = [[False] * (pattern_length + 1) for i in range(string_length + 1)]\n table[0][0] = True\n \n for i in range(1, pattern_length + 1):\n if p[i - 1] == '*' and table[0][i - 2]:\n table[0][i] = True\n\n for i in range(1, string_length + 1):\n for j in range(1, pattern_length + 1):\n char = s[i - 1]\n pattern = p[j - 1]\n \n if pattern == '*':\n if table[i][j - 2]:\n table[i][j] = True\n elif self.match(char, p[j - 2]) and table[i - 1][j]:\n table[i][j] = True\n else:\n if self.match(char, pattern) and table[i - 1][j - 1]:\n table[i][j] = True\n \n return table[string_length][pattern_length]\n \n def match(self, char, pattern):\n return char == pattern or pattern == '.'\n","sub_path":"Algorithms/010_regular_expression_matching.py","file_name":"010_regular_expression_matching.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"442744504","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom face2face.models import MeshModel\nfrom face2face.utils.opengl import Render\nimport face2face.optimize.image as opt\nfrom face2face.utils.mesh import generateFace, generateTexture\n\nimport dlib\nimport cv2\nimport numpy as np\nfrom scipy.optimize import least_squares\nfrom skimage import img_as_float, img_as_ubyte\n\nimport os\nimport glob\nimport argparse\nimport time\nimport pandas as pd\nfrom tqdm import tqdm\n\n\n\ndef getFaceKeypoints(img, detector, predictor, maxImgSizeForDetection=320):\n imgScale = 1\n scaledImg = img\n if max(img.shape) > maxImgSizeForDetection:\n imgScale = maxImgSizeForDetection / float(max(img.shape))\n scaledImg = cv2.resize(img, (int(img.shape[1] * imgScale), int(img.shape[0] * imgScale)))\n\n dets = detector(scaledImg, 1)\n\n if len(dets) == 0:\n return None\n\n shapes2D = []\n for det in dets:\n faceRectangle = dlib.rectangle(int(det.left() / imgScale), int(det.top() / imgScale), int(det.right() / imgScale), int(det.bottom() / imgScale))\n dlibShape = predictor(img, faceRectangle)\n shape2D = np.array([[p.x, p.y] for p in dlibShape.parts()])\n shape2D = shape2D.T\n shapes2D.append(shape2D)\n\n return shapes2D\n\n\ndef loadOpenFaceKeypoints(frame_cnt, openFace_landmarks):\n shapes2D = []\n frame = openFace_landmarks[openFace_landmarks['frame'] == frame_cnt]\n\n for i in range(0, 68):\n x = frame[' x_' + str(i)].values[0]\n y = frame[' y_' + str(i)].values[0]\n shapes2D.append([x, y])\n\n return shapes2D\n\n\ndef saveImage(path, img):\n b,g,r = cv2.split(img)\n img = cv2.merge([r,g,b])\n img = img_as_ubyte(img)\n cv2.imwrite(path, img)\n\n\ndef main():\n # Set weights for the 3DMM RGB color shape, landmark shape, and regularization terms\n max_iterations = 9\n wCol = 1\n # old\n # wLan = 2.5e-5\n # wRegS = 1.25e-4\n \n # init\n # wLan = 2.9e-5\n # wRegS = 0.25e-5\n\n # dlib\n # wLan = 1.25e-5\n # wRegS = 0.25e-5\n wLan = 0\n wRegS = 2.5e-5\n\n # openFace - Test\n # wLan = 1.3e-5\n # wRegS = 0.6e-4\n\n # lsmr is numerically stable and faster\n tr_solver = 'lsmr'\n\n\n # Change directory to the folder that holds the VRN data, OpenPose landmarks, and original images (frames) from the source video\n os.chdir('./data')\n \n # Load 3DMM\n m = MeshModel('../models/bfm2017.npz')\n \n # Set an orthographic projection for the camera matrix\n cam = 'orthographic'\n\n # Landmark detector\n if FLAGS.openFace_landmarks is None:\n print('Using dlib landmarks...')\n predictor_path = \"../models/shape_predictor_68_face_landmarks.dat\"\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor(predictor_path)\n else:\n print('Using openFace landmarks...')\n openFaceData = pd.read_csv(FLAGS.openFace_landmarks)\n\n # apply mask on faces if supplied\n if FLAGS.face_mask is not None:\n mask_id = np.load(FLAGS.face_mask)\n m.face = np.delete(m.face, mask_id, axis = 0)\n m.vertex2face = np.array([np.where(np.isin(m.face.T, vertexInd).any(axis = 0))[0] for vertexInd in range(m.numVertices)])\n\n # Load parameters\n all_param = np.load(FLAGS.parameters)\n texCoef = all_param[:m.numTex]\n shCoef = all_param[m.numTex: m.numTex + 27]\n param = all_param[m.numTex + 27:]\n idCoef = param[:m.numId]\n expCoef = param[m.numId : m.numId + m.numExp]\n\n vertexImgColor = None\n if FLAGS.img_texture is not None:\n vertexImgColor = np.load(os.path.join(FLAGS.img_texture))\n\n data_path = os.path.join(FLAGS.input_dir, '*.png')\n keyframes = glob.glob(data_path)\n\n start = time.time()\n\n for i in tqdm(range(FLAGS.start_frame, len(keyframes))):\n fNameImgOrig = os.path.join(FLAGS.input_dir, str(i) + '.png')\n\n # Load the source video frame and convert to 64-bit float\n b,g,r = cv2.split(cv2.imread(fNameImgOrig))\n img_org = cv2.merge([r,g,b])\n img_org = cv2.GaussianBlur(img_org, (5, 5), 0)\n img = img_as_float(img_org)\n\n if FLAGS.openFace_landmarks is None:\n shape2D = getFaceKeypoints(img_org, detector, predictor)\n shape2D = np.asarray(shape2D)[0].T\n else:\n shape2D = loadOpenFaceKeypoints(i + 1, openFaceData)\n shape2D = np.asarray(shape2D)\n\n lm = shape2D[m.targetLMInd, :2]\n\n if i == FLAGS.start_frame:\n vertexCoords = generateFace(np.r_[param[:-1], 0, param[-1]], m)\n # Rendering of initial 3DMM shape with mean texture model\n texParam = np.r_[texCoef, shCoef.flatten()]\n meshData = np.r_[vertexCoords.T, m.texMean.T]\n renderObj = Render(img.shape[1], img.shape[0], meshData, m.face)\n\n # Adjust Landmarks to be consistent across segments\n p1_id = 27 # nose\n p2_id = 8 # jaw\n x2 = lm[p1_id, 0]\n x1 = lm[p2_id, 0]\n y2 = lm[p1_id, 1]\n y1 = lm[p2_id, 1]\n nosejaw_dist = ((x2 - x1)**2 + (y2 - y1)**2)**(1/2)\n wLan = wLan * (225.0 / nosejaw_dist)\n\n\n # \"\"\"\n # Optimization over all experssion & SH\n # \"\"\"\n # LSMR is numerically stable combared to the default option (Exact)\n initFit = least_squares(opt.denseJointExpResiduals, np.r_[shCoef, param[m.numId:]], tr_solver = tr_solver, max_nfev = max_iterations, jac = opt.denseJointExpJacobian, args = (idCoef, texCoef, img, lm, m, renderObj, (wCol, wLan, wRegS), vertexImgColor), verbose = 0, x_scale = 'jac')\n shCoef = initFit['x'][:27]\n expCoef = initFit['x'][27:]\n param = np.r_[idCoef, expCoef]\n\n # # Generate 3DMM vertices from shape and similarity transform parameters\n vertexCoords = generateFace(np.r_[param[:-1], 0, param[-1]], m)\n\n # Generate the texture at the 3DMM vertices from the learned texture coefficients\n texParam = np.r_[texCoef, shCoef.flatten()]\n texture = generateTexture(vertexCoords, texParam, m, vertexImgColor)\n\n # Render the 3DMM\n renderObj.updateVertexBuffer(np.r_[vertexCoords.T, texture.T])\n renderObj.resetFramebufferObject()\n renderObj.render()\n rendering = renderObj.grabRendering()\n\n saveImage(os.path.join(FLAGS.output_dir, str(i) + \".png\"), rendering)\n np.save(os.path.join(FLAGS.output_dir, str(i) + \"_params\"), np.r_[shCoef, param])\n\n elapsed = time.time() - start\n print(time.strftime(\"%H:%M:%S\", time.gmtime(elapsed)))\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description = 'Initialize Identity & Texture from multiple frames')\n parser.add_argument('--input_dir', help = 'Path to frames')\n parser.add_argument('--parameters', help = 'Path to parameters to start tracking')\n parser.add_argument('--output_dir', help = 'Output directory')\n parser.add_argument('--openFace_landmarks', help = 'Path to openface landmarks otherwise dlib will be used (optional)')\n parser.add_argument('--img_texture', help = 'Path to texture (vertex space) instead of PCA model (optional)')\n parser.add_argument('--face_mask', help = 'Path to face ids to mask as eyes (optional)')\n parser.add_argument('--start_frame', help = 'Frame to start tracking from',type = int, default = 0)\n\n FLAGS, unparsed = parser.parse_known_args()\n\n main()\n","sub_path":"cli/tracker.py","file_name":"tracker.py","file_ext":"py","file_size_in_byte":7404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"15"} +{"seq_id":"503342080","text":"import flask\r\nfrom flask import request, jsonify\r\nfrom google.cloud import bigquery\r\nfrom google.oauth2 import service_account\r\nimport pandas as pd\r\nimport os.path\r\nimport ssl\r\nimport json\r\nfrom s2 import s2\r\napp = flask.Flask(__name__)\r\napp.config[\"DEBUG\"] = True\r\n\r\ngcp_project = \"weighty-flag-307702\"\r\nclass Gcp_connect:\r\n def __init__(self):\r\n my_path = os.path.abspath(os.path.dirname(__file__))\r\n path = os.path.join(my_path, \"../Config/weighty-flag-307702-26125390e609.json\")\r\n print(path)\r\n #self.credentials = service_account.Credentials.from_service_account_file('Config\\weighty-flag-307702-26125390e609.json')\r\n self.credentials = service_account.Credentials.from_service_account_file(path)\r\n self.client = bigquery.Client(credentials=self.credentials, project=gcp_project)\r\n def get_client(self):\r\n return self.client\r\n\r\n#Get GCP Client\r\ndef get_gcp_client():\r\n return Gcp_connect().get_client()\r\n\r\ngcp_client=get_gcp_client()\r\n\r\n@app.route('/', methods=['GET'])\r\ndef home():\r\n return \"

Gojek Taxi API.

\"\r\n\r\n#\r\n@app.route('/total_trips', methods=['GET'])\r\ndef total_trips():\r\n query_parameters = request.args\r\n start_date= query_parameters.get('start')\r\n end_date = query_parameters.get('end')\r\n print(start_date)\r\n print(end_date)\r\n query_res = gcp_client.query(\"\"\"select date, total_trips from (\r\n SELECT CAST(DATE(trip_start_timestamp) as DATE) as date, count(*) as total_trips from\r\n `bigquery-public-data.chicago_taxi_trips.taxi_trips` where CAST(DATE(trip_start_timestamp) as DATE) between\r\n '\"\"\"+start_date+\"' and '\"+end_date+\"' group by CAST(DATE(trip_start_timestamp) as DATE))a order by date\"\"\")\r\n # to store results in dataframe\r\n results = [] # empty dataframe\r\n for row in query_res:\r\n results.append({\"date\":str(row.date),\"total_trips\":row.total_trips})\r\n return {\"data\": results}\r\n\r\n@app.route('/avg_speed_24hrs', methods=['GET'])\r\n\r\ndef avg_speed_24hrs():\r\n\r\n query_parameters = request.args\r\n date= query_parameters.get('date')\r\n\r\n query_res=gcp_client.query(\"\"\"select \r\n avg(trip_miles / (TIMESTAMP_DIFF(trip_end_timestamp, trip_start_timestamp, minute) / 60)) as average_speed\r\n from `bigquery-public-data.chicago_taxi_trips.taxi_trips` where\r\n abs(DATE_DIFF(DATE '\"\"\"+date+\"', CAST(DATE(trip_end_timestamp) AS DATE), DAY)) < 1 and \" \\\r\n \"trip_end_timestamp != trip_start_timestamp\"\"\")\r\n\r\n results = [] # empty dataframe\r\n for row in query_res:\r\n results.append({\"average_speed\":row.average_speed})\r\n return {\"data\": results}\r\n\r\n@app.route('/average_fare_heatmap', methods=['GET'])\r\n\r\ndef avg_fare_heatmap():\r\n query_parameters = request.args\r\n date= query_parameters.get('date')\r\n query_res = gcp_client.query(\"\"\"select pickup_location,avg(fare) as avg_fare from `bigquery-public-data.chicago_taxi_trips.taxi_trips`\r\n where pickup_location is not null and CAST(DATE(trip_start_timestamp) AS DATE) = '\"\"\"+date+\"' group by pickup_location\"\"\")\r\n\r\n results = [] # empty dataframe\r\n for row in query_res:\r\n results.append({\"s2id\":row.pickup_location,\"fare\":row.avg_fare})\r\n return {\"data\": results}\r\n\r\ndef calculate_s2id(point,radius ):\r\n latlong = s2.LatLngFromDegrees(point.Latitude, point.Longitude)\r\n s2Point = s2.PointFromLatLng(latlong)\r\n EarthRadiusInMeter=10\r\n angle = s2.Angle(radius / EarthRadiusInMeter)\r\n sphereCap = s2.CapFromCenterAngle(s2Point, angle)\r\n region = s2.Region(sphereCap)\r\n rc = s2.RegionCoverer(MaxLevel=16, MinLevel=16)\r\n cellUnion = rc.Covering(region)\r\n stringCellIDs=[]\r\n for cellID in cellUnion:\r\n stringCellIDs.append(int(cellID))\r\n return stringCellIDs\r\n\r\n@app.errorhandler(404)\r\ndef page_not_found(e):\r\n return \"

404

The resource could not be found.

\", 404\r\n\r\nif __name__ == \"__main__\":\r\n app.run()","sub_path":"webapp/Source/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"575127088","text":"#-------------------------------------------------------------------------------\n# Name: each_compare.py\n# Purpose: This file has the same functions in deal_compare.py but eliminates\n# the functions that do not need after the table allCombDict_new.p is generated.\n#-------------------------------------------------------------------------------\n\n# These functions are the same as in deal_compare.py\ndef highcard(a):\n al=sorted([a[0][0]]+[a[1][0]]+[a[2][0]]+[a[3][0]]+[a[4][0]])\n dic={'2':0,'3':1,'4':2,'5':4,'6':8,'7':16,'8':32,'9':64,'T':128,'J':256,'Q':512,'K':1024,'A':2048}\n tot=0\n for i in range(5):\n tot+=dic[al[i]]\n return tot\n\ndef flush(a):\n if a[0][1]==a[1][1]==a[2][1]==a[3][1]==a[4][1]:\n val=highcard(a)\n return val,True\n else:\n return 0,False\n\ndef straight(a):\n al=sorted([a[0][0]]+[a[1][0]]+[a[2][0]]+[a[3][0]]+[a[4][0]])\n if al==['2','3','4','5','A']:\n return 1,True\n elif al==['2','3','4','5','6']:\n return 2,True\n elif al==['3','4','5','6','7']:\n return 3,True\n elif al==['4','5','6','7','8']:\n return 4,True\n elif al==['5','6','7','8','9']:\n return 5,True\n elif al==['6','7','8','9','T']:\n return 6,True\n elif al==['7','8','9','J','T']:\n return 7,True\n elif al==['8','9','J','Q','T']:\n return 8,True\n elif al==['9','J','K','Q','T']:\n return 9,True\n elif al==['A','J','K','Q','T']:\n return 10,True\n else:\n return 0,False\n\ndef straightflush(a):\n fv,fb=flush(a)\n sv,sb=straight(a)\n if fb and sb:\n return sv,True\n else:\n return 0,False\n\ndef fourkind(a):\n dic={'2':0,'3':1,'4':2,'5':4,'6':8,'7':16,'8':32,'9':64,'T':128,'J':256,'Q':512,'K':1024,'A':2048}\n al=sorted([a[0][0]]+[a[1][0]]+[a[2][0]]+[a[3][0]]+[a[4][0]])\n if al[0]==al[1]==al[2]==al[3]:\n return dic[al[0]],dic[al[4]],True\n elif al[1]==al[2]==al[3]==al[4]:\n return dic[al[1]],dic[al[0]],True\n else:\n return 0,0,False\n\ndef fullhouse(a):\n dic={'2':0,'3':1,'4':2,'5':4,'6':8,'7':16,'8':32,'9':64,'T':128,'J':256,'Q':512,'K':1024,'A':2048}\n al=sorted([a[0][0]]+[a[1][0]]+[a[2][0]]+[a[3][0]]+[a[4][0]])\n if al[0]==al[1]==al[2] and al[3]==al[4]:\n return dic[al[0]],dic[al[3]],True\n elif al[0]==al[1] and al[2]==al[3]==al[4]:\n return dic[al[2]],dic[al[0]],True\n else:\n return 0,0,False\n\ndef threekind(a):\n dic={'2':0,'3':1,'4':2,'5':4,'6':8,'7':16,'8':32,'9':64,'T':128,'J':256,'Q':512,'K':1024,'A':2048}\n al=sorted([a[0][0]]+[a[1][0]]+[a[2][0]]+[a[3][0]]+[a[4][0]])\n if al[0]==al[1]==al[2]:\n return dic[al[0]],dic[al[3]]+dic[al[4]],True\n elif al[1]==al[2]==al[3]:\n return dic[al[1]],dic[al[0]]+dic[al[4]],True\n elif al[2]==al[3]==al[4]:\n return dic[al[2]],dic[al[0]]+dic[al[1]],True\n else:\n return 0,0,False\n\ndef twopairs(a):\n dic={'2':0,'3':1,'4':2,'5':4,'6':8,'7':16,'8':32,'9':64,'T':128,'J':256,'Q':512,'K':1024,'A':2048}\n al=sorted([a[0][0]]+[a[1][0]]+[a[2][0]]+[a[3][0]]+[a[4][0]])\n if al[0]==al[1] and al[2]==al[3]:\n return dic[al[0]]+dic[al[2]],dic[al[4]],True\n elif al[0]==al[1] and al[3]==al[4]:\n return dic[al[0]]+dic[al[3]],dic[al[2]],True\n elif al[1]==al[2] and al[3]==al[4]:\n return dic[al[1]]+dic[al[3]],dic[al[0]],True\n else:\n return 0,0,False\n\ndef onepair(a):\n dic={'2':0,'3':1,'4':2,'5':4,'6':8,'7':16,'8':32,'9':64,'T':128,'J':256,'Q':512,'K':1024,'A':2048}\n al=sorted([a[0][0]]+[a[1][0]]+[a[2][0]]+[a[3][0]]+[a[4][0]])\n if al[0]==al[1]:\n return dic[al[0]],dic[al[2]]+dic[al[3]]+dic[al[4]],True\n elif al[1]==al[2]:\n return dic[al[1]],dic[al[0]]+dic[al[3]]+dic[al[4]],True\n elif al[2]==al[3]:\n return dic[al[2]],dic[al[0]]+dic[al[1]]+dic[al[4]],True\n elif al[3]==al[4]:\n return dic[al[3]],dic[al[0]]+dic[al[1]]+dic[al[2]],True\n else:\n return 0,0,False\n\n\n# In[1]:\n\ndef compare(a,b):\n sfva,sfba=straightflush(a)\n sfvb,sfbb=straightflush(b)\n if sfva or sfbb:\n if sfva>sfvb:\n return a,'straightflush',0\n elif sfva==sfvb:\n return a,'straightflush',1\n else:\n return b,'straightflush',0\n\n fourv4a,fourv1a,fourba=fourkind(a)\n fourv4b,fourv1b,fourbb=fourkind(b)\n if fourba and (not fourbb):\n return a,'four-of-a-kind',0\n elif (not fourba) and fourbb:\n return b,'four-of-a-kind',0\n elif fourba and fourbb:\n if fourv4a>fourv4b:\n return a,'four-of-a-kind',0\n elif fourv4afourv1b:\n return a,'four-of-a-kind',0\n elif fourv1a==fourv1b:\n return a,'four-of-a-kind',1\n else:\n return b,'four-of-a-kind',0\n\n fhousev4a,fhousev1a,fhouseba=fullhouse(a)\n fhousev4b,fhousev1b,fhousebb=fullhouse(b)\n if fhouseba and (not fhousebb):\n return a,'full-house',0\n elif (not fhouseba) and fhousebb:\n return b,'full-house',0\n elif fhouseba and fhousebb:\n if fhousev4a>fhousev4b:\n return a,'full-house',0\n elif fhousev4afhousev1b:\n return a,'full-house',0\n elif fhousev1a==fhousev1b:\n return a,'full-house',1\n else:\n return b,'full-house',0\n\n fva,fba=flush(a)\n fvb,fbb=flush(b)\n if fba or fbb:\n if fva>fvb:\n return a,'flush',0\n elif fva==fvb:\n return a,'flush',1\n else:\n return b,'flush',0\n\n sva,sba=straight(a)\n svb,sbb=straight(b)\n if sba or sbb:\n if sva>svb:\n return a,'straight',0\n elif sva==svb:\n return a,'straight',1\n else:\n return b,'straight',0\n\n tv3a,tvra,tba=threekind(a)\n tv3b,tvrb,tbb=threekind(b)\n if tba and (not tbb):\n return a,'three-of-a-kind',0\n elif (not tba) and tbb:\n return b,'three-of-a-kind',0\n elif tba and tbb:\n if tv3a>tv3b:\n return a,'three-of-a-kind',0\n elif tv3atvrb:\n return a,'three-of-a-kind',0\n elif tvra==tvrb:\n return a,'three-of-a-kind',1\n else:\n return b,'three-of-a-kind',0\n\n tuv2a,tuvra,tuba=twopairs(a)\n tuv2b,tuvrb,tubb=twopairs(b)\n if tuba and (not tubb):\n return a,'two-pairs',0\n elif (not tuba) and tubb:\n return b,'two-pairs',0\n elif tuba and tubb:\n if tuv2a>tuv2b:\n return a,'two-pairs',0\n elif tuv2atuvrb:\n return a,'two-pairs',0\n elif tuvra==tuvrb:\n return a,'two-pairs',1\n else:\n return b,'two-pairs',0\n\n onev1a,onevra,oneba=onepair(a)\n onev1b,onevrb,onebb=onepair(b)\n if oneba and (not onebb):\n return a,'one-pair',0\n elif (not oneba) and onebb:\n return b,'one-pair',0\n elif oneba and onebb:\n if onev1a>onev1b:\n return a,'one-pair',0\n elif onev1aonevrb:\n return a,'one-pair',0\n elif onevra==onevrb:\n return a,'one-pair',1\n else:\n return b,'one-pair',0\n\n va=highcard(a)\n vb=highcard(b)\n if va>vb:\n return a,'high-card',0\n elif va==vb:\n return a,'high-card',1\n else:\n return b,'high-card',0\n\n","sub_path":"each_compare.py","file_name":"each_compare.py","file_ext":"py","file_size_in_byte":7844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"562231332","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 12 20:02:13 2018\n\n@author: jagtarsingh\n\"\"\"\n\n\n\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nimport torchvision\nfrom torchvision import datasets\nfrom torchvision import transforms\nfrom torchvision.utils import save_image\nimport torchvision.utils as vutils\n\nfrom random import randint\nfrom matplotlib import pyplot as plt\nfrom IPython.display import Image\nfrom IPython.core.display import Image, display\n\nget_ipython().run_line_magic('load_ext', 'autoreload')\nget_ipython().run_line_magic('autoreload', '2')\n\n\n\n# Device configuration\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ndevice\n\n\nbs = 32\n\n\n\n\n# Load Data\ndataset = datasets.ImageFolder(root='/Users/jagtarsingh/OneDrive/UPenn/CIS680/VAE/cufs', transform=transforms.Compose([\n transforms.Resize(64),\n transforms.ToTensor(), \n]))\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=bs, shuffle=False)\nlen(dataset.imgs), len(dataloader)\n\n\n\nfixed_x, _ = next(iter(dataloader))\nsave_image(fixed_x, 'real_image.png')\n\nImage('real_image.png')\n\n\n\nclass Flatten(nn.Module):\n def forward(self, input):\n return input.view(input.size(0), -1)\n\n\n\nclass UnFlatten(nn.Module):\n def forward(self, input, size=1024):\n return input.view(input.size(0), size, 1, 1)\n\n\n\nclass VAE(nn.Module):\n def __init__(self, channels=3, h_dim=1024, latent_dim=64):\n super(VAE, self).__init__()\n self.encoder = nn.Sequential(\n nn.Conv2d(channels, 32, kernel_size=4, stride=2),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.Conv2d(32, 64, kernel_size=4, stride=2),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.Conv2d(64, 128, kernel_size=4, stride=2),\n nn.BatchNorm2d(128),\n nn.ReLU(),\n nn.Conv2d(128, 256, kernel_size=4, stride=2),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n Flatten()\n )\n \n self.fc1 = nn.Linear(h_dim, latent_dim)\n# self.fc2 = nn.Linear(h_dim, latent_dim)\n self.fc3 = nn.Linear(latent_dim, h_dim)\n \n self.decoder = nn.Sequential(\n UnFlatten(),\n nn.ConvTranspose2d(h_dim, 128, kernel_size=5, stride=2),\n nn.ReLU(),\n nn.ConvTranspose2d(128, 64, kernel_size=5, stride=2),\n nn.ReLU(),\n nn.ConvTranspose2d(64, 32, kernel_size=6, stride=2),\n nn.ReLU(),\n nn.ConvTranspose2d(32, channels, kernel_size=6, stride=2),\n nn.Sigmoid(),\n )\n \n \n\n def forward(self, x):\n encoded_images = self.encoder(x)\n latent_space = self.fc1(encoded_images)\n \n z = self.fc3(latent_space)\n z = self.decoder(z)\n return z\n \n\n\n\n\nchannels = fixed_x.size(1)\nprint(channels)\n\n\n\nmodel = VAE(channels=channels).to(device)\n\noptimizer = torch.optim.Adam(model.parameters(), lr=1e-3) \n\n\n\ndef loss_fn(recon_x, x):\n Recon_loss = F.mse_loss(recon_x, x, size_average=False)\n return Recon_loss \n\n\n\nepochs = 100\n\n\n\nitera = 0\nloss_all = []\nfor epoch in range(epochs):\n for idx, (images, _) in enumerate(dataloader):\n if idx!=5:\n \n recon_images = model(images)\n loss= loss_fn(recon_images, images)\n batch_s = images.size(0)\n \n # loss = loss_fn(recon_images, images, mu, log_sig)\n loss_all.append(loss.data[0]/batch_s)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n itera+=1\n\n \n if idx == 4:\n n = min(images.size(0), 8)\n recon_images = model(images)\n comparison = torch.cat([images[:n],\n recon_images[:n]])\n save_image(comparison.data.cpu(),\n './reconstructed_AE_train/reconstruction_' + str(epoch) + '.png', nrow=n)\n print(\"Epoch[{}/{}] Loss: {:.3f} {:.3f} {:.3f}\".format(epoch+1, \n epochs, loss.data[0]/bs, Recon_loss.data[0]/bs, KL_loss.data[0]/bs))\n if idx == 5:\n n = min(images.size(0), 8)\n recon_images = model(images)\n comparison = torch.cat([images[:n],\n recon_images[:n]])\n save_image(comparison.data.cpu(),\n './reconstructed_AE_test/reconstruction_' + str(epoch) + '.png', nrow=n)\n print(\"Epoch[{}/{}] Loss: {:.3f} {:.3f} {:.3f}\".format(epoch+1, \n epochs, loss.data[0]/bs, Recon_loss.data[0]/bs, KL_loss.data[0]/bs))\n \n \n \n\nplt.plot(loss_all)\nplt.xlabel('Iterations')\nplt.ylabel('Loss')\nplt.title('Loss Vs Iteration for CUFS dataset')\nplt.savefig('AE_loss.png')\ntorch.save(model.state_dict(), 'AE.torch')\n\n#recon_images= model(fixed_x)\n#comparison = torch.cat([fixed_x[:],recon_images[:]])\n#save_image(comparison.data.cpu(),\n# './reconstructed_AE/reconstruction_last' + str(epoch) + '.png', nrow=n)\n\n","sub_path":"Autoencoder.py","file_name":"Autoencoder.py","file_ext":"py","file_size_in_byte":5165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"152623837","text":"#!/bin/python3\nimport random\n\ndef printtable(map):\n for y in range(len(map)):\n line = ''\n for x in range(len(map[y])):\n if map[y][x] != 0:\n line += str(map[y][x])\n else:\n line += ' '\n line += ' '\n print(line)\n print('---------')\n\n\ndef generateMap(modules, dimension):\n map = [[0 for x in range(dimension)] for y in range(dimension)]\n\n i = 0\n while i < len(modules):\n if i == 0:\n y = random.randint(1, len(map) - 1)\n x = random.randint(0, len(map[y]) - 1)\n\n map[y][x] = 1\n\n # print(str(y) + ' ' + str(x))\n\n i += 1\n # printtable(map)\n continue\n\n while True:\n while True:\n y = random.randint(0, len(map) - 1)\n x = random.randint(0, len(map[y]) - 1)\n\n if map[y][x] != 0:\n break\n\n if map[y][x] == 1:\n available = [0, 0, -1, 0]\n else:\n available = [0, 0, 0, 0]\n\n if available[0] == 0 and y > 0 and map[y - 1][x] == 0:\n available[0] = 1\n\n if available[1] == 0 and x < 4 and map[y][x + 1] == 0:\n available[1] = 1\n\n if available[2] == 0 and y < 4 and map[y + 1][x] == 0:\n available[2] = 1\n\n if available[3] == 0 and x > 0 and map[y][x - 1] == 0:\n available[3] = 1\n\n mod = -1\n\n if 1 in available:\n # if available.count(1) >= 2:\n # print(available.count(1));\n break\n\n while True:\n mod = random.randint(0, 3)\n if available[mod] == 1:\n break\n\n if mod == 0:\n y -= 1\n elif mod == 1:\n x += 1\n elif mod == 2:\n y += 1\n elif mod == 3:\n x -= 1\n map[y][x] = i + 1;\n\n i += 1\n\n rooms = {}\n\n for y in range(len(map)):\n line = ''\n for x in range(len(map[y])):\n if (map[y][x] == 0):\n continue\n\n room = {}\n\n if y > 0 and map[y - 1][x] != 0:\n room.update({'north': modules[map[y - 1][x] - 1]})\n if x < len(map[y]) - 1 and map[y][x + 1] != 0:\n room.update({'east': modules[map[y][x + 1] - 1]})\n if y < len(map) - 1 and map[y + 1][x] != 0:\n room.update({'south': modules[map[y + 1][x] - 1]})\n if x > 0 and map[y][x - 1] != 0:\n room.update({'west': modules[map[y][x - 1] - 1]})\n\n rooms.update({modules[map[y][x] - 1]: room})\n\n items = {\n 0: 'portion',\n 1: 'key',\n 2: 'monster'\n }\n\n i = 0;\n while i < 3:\n room = random.choice(list(rooms.keys()))\n\n if 'item' in rooms[room] or room == 'Hall' or room == 'Garden':\n continue\n\n rooms[room].update({'item': items[i]})\n\n i += 1\n\n return rooms, map;\n","sub_path":"randomGen.py","file_name":"randomGen.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"190397065","text":"import task\n\n\nclass TaskList():\n\n def __init__(self, name, task_counter=0):\n \"\"\"Initialiserar classvariabler.\n \n self.task_counter räknar antalet uppgifter i listan\n self.task_list är listan där uppgifter lagras\n self.name är namnet på den specifika listan\n \"\"\"\n self.task_counter = task_counter\n self.task_list = []\n self.name = name\n\n def create_task(self, description):\n \"\"\"Lägger till en ny uppgift i listan task_list.\n \n namges genom description som tas in som argument\n skriver ut namn på uppgift och namn på listan'\n \"\"\"\n self.task_counter += 1\n self.task_list.append(task.Task(self.task_counter, description, False))\n print(\"Du har lagt till {} i {}!\".format(description, self.name))\n\n def mark_done(self, task_id):\n \"\"\"Markerar en uppgift som klar baserat på argumentet task_id.\n\n kallar på funktionen mark_done()\n returnerar en boolean\n \"\"\"\n for current_task in self.task_list:\n if int(task_id) == current_task.task_id:\n current_task.mark_done()\n return True\n return False\n\n def __str__(self):\n \"\"\"Skriver ut alla task i task_list som en sträng.\"\"\"\n if self.task_counter == 0:\n return \"Det finns inga uppgifter!!!\"\n task_print = \"\"\n for task in self.task_list:\n task_print += (str(task) + \"\\n\")\n return task_print\n \n\n\n \n","sub_path":"task_list.py","file_name":"task_list.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"411169132","text":"import os\nimport datetime\nimport pandas as pd\n\n# for OSI-450 validation\nYEARS_OF_INTEREST = range(1972, 2016)\n# for OSI-401 validation\n# YEARS_OF_INTEREST = [1996]\nVALIDATION_ID = 'OSI450'\nCSV_HEADER = ['reference_time', 'run_time', 'total_bias', 'ice_bias',\n 'water_bias', 'total_stddev', 'ice_stddev', 'water_stddev',\n 'within_10pct', 'within_20pct']\n\nSTART_YEAR = min(YEARS_OF_INTEREST)\nEND_YEAR = max(YEARS_OF_INTEREST)\nif END_YEAR == START_YEAR:\n END_YEAR += 1\n\nBASE_PATH = os.path.join(os.path.expanduser('~/'), 'validation', 'data')\nINPUT_DIR = os.path.join(BASE_PATH, 'input')\n# for OSI-409 validation\nOUTPUT_DIR = os.path.join(BASE_PATH, 'output')\n# for OSI-401 validation\n# OUTPUT_DIR = os.path.join(BASE_PATH, 'output', 'OSI-401-a')\nTMP_DIR = os.path.join(BASE_PATH, 'input', 'tmp')\n\nAREAS = 'etc/areas.cfg'\nDESCRIPTION = 'Comparison of NIC ice charts and OSI-450 products for {0}' \\\n' hemisphere'\nSHORT_DESCRIPTION = 'OSI450_validation_{0}_{1}' # hemisphere, date\nPICKLED_DATA = 'OSI450_val_data.hdf5'\n\n# for OSI-409 validation\nMETNO_DOWNL = {\n 'protocol': 'ftp://',\n 'host': 'osisaf.met.no',\n 'remote_dir_f_pattern': 'reprocessed/ice/conc/v1p2/*/*/*ease*.nc.gz',\n 'remote_date_pattern': (r'\\d{12}', '%Y%m%d%H%M'),\n 'glob_file': os.path.join(TMP_DIR, 'metno_files.json')\n}\n\n# for OSI-401 validation\n# METNO_DOWNL = {\n# 'protocol': 'ftp://',\n# 'host': 'osisaf.met.no',\n# 'remote_dir_f_pattern': 'archive/ice/conc/*/*/*_polstere-100_multi_*.nc',\n# 'remote_date_pattern': (r'\\d{12}', '%Y%m%d%H%M'),\n# 'glob_file': os.path.join(TMP_DIR, 'metno_files.json')\n# }\n\n\nNIC_BIN_DOWNL = {\n 'protocol': 'ftp://',\n 'host': 'sidads.colorado.edu',\n 'remote_dir_f_pattern':\n 'pub/DATASETS/NOAA/G02172/weekly/nic_weekly_*_tot.v0.bin',\n 'remote_date_pattern': (r'\\d{4}_\\d{2}_\\d{2}', '%Y_%m_%d'),\n 'glob_file': os.path.join(TMP_DIR, 'nic_bin_files.json')\n}\n\nNIC_SIG_DOWNL = {\n 'protocol': 'http://',\n 'host': 'wdc.aari.ru',\n 'remote_dir_f_pattern': 'datasets/d0001/south/nic/*/*.sig',\n 'remote_date_pattern': (r'\\d{6}', '%Y%W'),\n 'glob_file': os.path.join(TMP_DIR, 'nic_sig_files.json'),\n}\n\nNIC_SHP_DOWNL = {\n 'scrape': True,\n 'protocol': 'http://',\n 'host': 'www.natice.noaa.gov',\n 'remote_html_path': {\n 'nh':\n 'products/weekly_products.html?oldarea=Arctic&area=Arctic&'\n 'oldformat=Shapefiles&format=Shapefiles&month0=Jan&day0=01&'\n 'year0=2006&month1=Jan&day1=01&year1={0}&subareas='\n 'Hemispheric'.format(datetime.datetime.now().year + 1),\n 'sh':\n 'products/weekly_products.html?oldarea=Antarctic&area=Antarctic&'\n 'oldformat=Shapefiles&format=Shapefiles&month0=Jan&day0=01&'\n 'year0=2006&month1=Jan&day1=01&year1={0}&subareas='\n 'Hemispheric'.format(datetime.datetime.now().year + 1)\n },\n 'remote_file_pattern': {\n 'nh': 'pub/weekly/arctic/{0}/shapefiles/hemispheric/arctic{1}.zip',\n 'sh': 'pub/weekly/antarctic/{0}/shapefiles/hemispheric/antarc{1}.zip'\n },\n 'remote_link_pattern': {\n 'nh': r'href\\s?=\\s?\".*arctic\\d{6}.zip',\n 'sh': r'href\\s?=\\s?\".*antarc\\d{6}.zip',\n },\n 'remote_date_pattern': (r'\\d{6}', '%y%m%d'),\n 'glob_file': os.path.join(TMP_DIR, 'nic_shp_files.json')\n}\n\n# http://thredds.met.no/thredds/dodsC/osisaf/met.no/ice/conc/2016/09/ice_conc_sh_polstere-100_multi_201609211200.nc\nMETNO_THREDDS_DOWNL = {\n 'generate':\n pd.date_range('1/1/{0} 12:00'.format(START_YEAR),\n '1/1/{0} 12:00'.format(END_YEAR), freq='D'),\n 'protocol': 'http://',\n 'host': 'thredds.met.no',\n 'remote_dir_f_pattern':\n 'thredds/dodsC/metusers/sicci_shared/v2.0draftC/{0}/{1}/' \\\n 'ice_conc_{2}h_ease2-250_cdr-v2p0_{3}1200.nc',\n 'remote_date_pattern': (r'\\d{12}', '%Y%m%d%H%M'),\n 'glob_file': os.path.join(TMP_DIR, 'metno_thredds_files.json')\n}\n\n\nif not os.path.exists(INPUT_DIR):\n os.system('mkdir -p {0}'.format(INPUT_DIR))\nif not os.path.exists(OUTPUT_DIR):\n os.system('mkdir -p {0}'.format(OUTPUT_DIR))\nif not os.path.exists(TMP_DIR):\n os.system('mkdir -p {0}'.format(TMP_DIR))\n\ntry:\n import apt\nexcept Exception as e:\n print(\"No python-apt installed, I wont check for gdal-bin and lftp...\")\nelse:\n cache = apt.Cache()\n if not cache['gdal-bin'].is_installed:\n raise Exception('You have to have \"gdal-bin\" installed. Do \"apt-get '\n 'install gdal-bin\"!')\n if not cache['lftp'].is_installed:\n raise Exception('You have to have \"lftp\" installed. Do \"apt-get '\n 'install lftp\"!')\n","sub_path":"trollvalidation/validations/ice_conc_configuration.py","file_name":"ice_conc_configuration.py","file_ext":"py","file_size_in_byte":4662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"62346906","text":"# Transformer-Based Sentiment Analysis Utils\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\n\nfrom transformers import BertTokenizer\nfrom transformers import TFBertForSequenceClassification, TFTrainer, TFTrainingArguments, BertConfig\n\ndef load_distilbert_model(model_path='./tf_model.h5', config_path='./config.json'):\n from transformers import DistilBertConfig\n from transformers import TFDistilBertForSequenceClassification\n\n config = DistilBertConfig.from_json_file(config_path)\n model_reloaded = TFDistilBertForSequenceClassification.from_pretrained(model_path, config=config)\n return model_reloaded\n\ndef read_data(csv_path, tweet_col='text', label_col=None, shuffle=True):\n if label_col:\n df = pd.read_csv(csv_path, usecols=[tweet_col, label_col])\n if shuffle:\n df = df.sample(frac=1)\n X = df['text'].to_list()\n y = df[label_col].to_list()\n else:\n df = pd.read_csv(csv_path, usecols=[tweet_col])\n if shuffle:\n df = df.sample(frac=1)\n X = df['text'].to_list()\n y=None\n\n return df, X, y\n\ndef preprocess_data_distilbert(X, y=None, orig_checkpoint='distilbert-base-uncased'):\n from transformers import DistilBertTokenizerFast\n tokenizer = DistilBertTokenizerFast.from_pretrained(orig_checkpoint, num_labels=3)\n encodings = tokenizer(X, truncation=True, padding=True)\n print(len(encodings))\n if not y:\n y = np.zeros(len(X))\n dataset = tf.data.Dataset.from_tensor_slices((dict(encodings), y))\n dataset_batched = dataset.batch(16)\n\n return dataset_batched\n\n","sub_path":"SentimentUtils.py","file_name":"SentimentUtils.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"368778307","text":"class Solution(object):\n def evalRPN(self, tokens):\n \"\"\"\n :type tokens: List[str]\n :rtype: int\n \"\"\"\n stack = []\n operators = set(['+', '-', '*', '/'])\n for token in tokens:\n if token not in operators:\n stack.append(int(token))\n continue\n a = stack.pop()\n b = stack.pop()\n if token == '+':\n stack.append(a+b)\n elif token == '-':\n stack.append(b-a)\n elif token == '*':\n stack.append(b*a)\n else:\n # here take care of the case like \"1/-22\",\n # in Python 2.x, it returns -1, while in \n # Leetcode it should return 0\n if b*a < 0 and b%a != 0:\n stack.append(b/a+1)\n else:\n stack.append(b/a)\n return stack.pop()\n","sub_path":"python_solutions/150-evaluate-reverse-polish-notation.py","file_name":"150-evaluate-reverse-polish-notation.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"653531054","text":"from flask import Blueprint, render_template, jsonify, request, abort\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom config import (\n Session,\n)\n\n\nfrom models.models import Book\n\nquery = Blueprint('query', __name__)\n\n#Development tool to remove models from the database\n@query.route('/query', methods=['POST'])\ndef query_book():\n j = request.get_json()\n if 'title' not in j or 'author' not in j:\n abort(400)\n title = j['title']\n auth = j['author']\n session = Session()\n b = session.query(Book).filter(Book.title == title and Book.author == auth).all()\n if len(b) > 0:\n return jsonify({'exists': True})\n return jsonify({'exists': False})\n\n@query.route('/add', methods=['POST'])\ndef add_book():\n j = request.get_json()\n if 'title' not in j or 'author' not in j or 'owner' not in j:\n abort(400)\n title = j['title']\n author = j['author']\n owner = j['owner']\n session = Session()\n b = Book(title=title, author=author, owner_id=owner)\n session.add(b)\n session.commit()\n return jsonify({'success': True})\n","sub_path":"src/api/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"68064822","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2020-09-17 17:21\n\n@author: a002028\n\n\"\"\"\nimport os\n\nimport numpy as np\n\nfrom bawsvis.session import Session\nfrom bawsvis.readers.text import np_txt_reader\nfrom bawsvis.plotting import PlotMap, PlotIceMap\nimport matplotlib.pyplot as plt\nimport cmocean\n\n\nif __name__ == \"__main__\":\n\n s = Session()\n\n lat = np_txt_reader('..\\\\proj\\\\havgem\\\\Johannes_Johansson\\\\N_FIX\\\\python_process_data\\\\lat_baws.txt')\n lon = np_txt_reader('..\\\\proj\\\\havgem\\\\Johannes_Johansson\\\\N_FIX\\\\python_process_data\\\\lon_baws.txt')\n\n # lat = np_txt_reader('E:\\\\Johannes_exjobb\\\\import_data\\\\lat_small.txt')\n # lon = np_txt_reader('E:\\\\Johannes_exjobb\\\\import_data\\\\lon_small.txt')\n\n # wd_data = 'E:\\\\Johannes_exjobb\\\\MODIS_data\\\\outdata\\\\monthly_seasonally_cummulative_and_FCA_data\\\\Cumulative\\\\annual\\\\Cumu_%s.txt'\n\n # for year in range(2019, 2021):\n # year = str(year)\n #\n # file = wd_data % year\n # data = np_txt_reader(file)\n #\n # map_frame = {'lat_min': 52., 'lat_max': 66.,\n # 'lon_min': 7., 'lon_max': 37.5}\n #\n # plot = PlotMap(data_mat=data.astype(float),\n # lat_mat=lat,\n # lon_mat=lon,\n # cbar_label='Number of bloom days',\n # cmap_step=5,\n # max_tick=20,\n # min_tick=0,\n # use_frame=True,\n # p_color=True,\n # map_frame=map_frame,\n # resolution='h',\n # fig_title='Cyanobacterial bloom %s' % year,\n # fig_name='aggregation_%s.png' % year,\n # save_fig=True,\n # clear_fig=True,\n # )\n #\n # plot._draw_map()\n # plot._draw_mesh(p_color=True)\n # plot._save_figure(''.join((s.setting.export_directory, 'aggregation_baws_modis_%s.png' % year)))\n data = np_txt_reader('C:\\\\Utveckling\\\\BAWS-vis\\\\bawsvis\\\\export\\\\modis_aggregation_2002-2020.txt')\n data = np.where(data==0, np.nan, data)\n data = data/19.\n\n # mask = np_txt_reader('...N_FIX\\\\Result\\\\MASK_BP_GoF_GoB.txt')\n\n # map_frame = {'lat_min': 52., 'lat_max': 66.,\n # 'lon_min': 7., 'lon_max': 37.5}\n\n map_frame = {'lat_min': 52.5, 'lat_max': 66.,\n 'lon_min': 9., 'lon_max': 36.8}\n\n plot = PlotIceMap(data_mat=data.astype(float),\n lat_mat=lat,\n lon_mat=lon,\n cbar_label='Average number of bloom days per year',\n cmap=cmocean.cm.haline,\n cmap_step=2,\n max_tick=10,\n min_tick=0,\n use_frame=True,\n p_color=True,\n map_frame=map_frame,\n resolution='f',\n fig_title='Cyanobacterial bloom 2002-2020',\n fig_name='aggregation_2002_2020_2.png',\n save_fig=True,\n clear_fig=True,\n )\n\n plot._draw_map()\n plot._draw_mesh(p_color=True)\n\n save_dir = r'..\\proj\\havgem\\Johannes_Johansson\\coclime_figures\\map'\n f_name = 'aggregation_2002_2020_v4'\n plt.savefig(os.path.join(save_dir, f_name) + '.png', format='png', dpi=500)\n # plt.savefig(os.path.join(save_dir, f_name) + '.eps', format='eps')\n plt.savefig(os.path.join(save_dir, f_name) + '.pdf', format='pdf')\n # plot._save_figure(''.join((s.setting.export_directory, 'aggregation_2002_2020_3.png')))\n","sub_path":"bawsvis/examples/baws_plot_map_modis.py","file_name":"baws_plot_map_modis.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"396970884","text":"class BST:\n def search(self, node, data):\n current = node\n while current:\n if data < current.data:\n current = current.left\n elif data > current.data:\n current = current.right\n elif data == current.data:\n return 1\n return 0\n\nclass Node:\n def __init__(self, value):\n self.left = None\n self.data = value\n self.right = None\n\nclass Tree:\n def createNode(self, data):\n return Node(data)\n \n def insert(self, node, data):\n if node is None:\n return self.createNode(data)\n else:\n if data < node.data:\n node.left = self.insert(node.left, data)\n else:\n node.right = self.insert(node.right, data)\n return node\n\n def traverseInorder(self, root):\n if root is not None:\n print(root.data, end= \" \")\n self.traverseInorder(root.left)\n self.traverseInorder(root.right)\nif __name__=='__main__':\n t=int(input())\n for i in range(t):\n n=int(input())\n arr = input().strip().split()\n root = None\n tree = Tree()\n root = tree.insert(root, int(arr[0]))\n for j in range(1, n):\n root = tree.insert(root, int(arr[j]))\n num = int(input())\n find = BST()\n if find.search(root, num):\n print(1)\n else:\n print(0)","sub_path":"Binary search tree/search_node_in_BST.py","file_name":"search_node_in_BST.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"274400385","text":"import os\nimport luigi\nimport bingads\nimport yaml\nimport logging\nfrom suds.client import Client\nfrom datetime import date, timedelta, datetime\n\nfrom bingads.service_client import ServiceClient\nfrom bingads.authorization import *\nfrom bingads.v12.reporting import *\n\nfrom luigi.contrib.s3 import S3Client, S3Target\n\nimport bing_client_helper\n\nTHIS_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)))\nwith open(os.path.join(THIS_DIR, 'config.yml'), 'r') as yml_in:\n CONFIG = yaml.load(yml_in)\n\nREPORT_FILE_FORMAT = 'Csv'\n\nauthorization_data = None\n\nDATE = datetime.now().strftime(\"%Y_%m_%d\")\nFILE_NAME = \"BingAds_Report_{0}.csv\".format(DATE)\n\n\nclass PullCampaignReportsFromAPI(luigi.Task):\n\n def parse_report_and_append(self, input_file):\n file_in = open(input_file, \"r\")\n lines = file_in.readlines()\n\n row_count = lines[8][7:-3]\n rows = lines[10: 11 + int(row_count)]\n\n with self.output().open(\"r\") as mem_file:\n mem_lines = mem_file.read()\n\n with self.output().open(\"w\") as file_out:\n file_out.write(mem_lines)\n for line in rows[1:]:\n row = line.split(\",\")\n\n # Strip quotes from numeric fields\n for i in range(4, 10):\n row[i] = row[i][1:-1]\n\n file_out.write(','.join(row))\n\n file_in.close()\n os.remove(input_file)\n\n def get_campaign_performance_report_request(self, account_id, campaign_ids):\n \"\"\"\"\n Build a campaign performance report request, including Format, ReportName,\n Time, and Columns.\n \"\"\"\n reporting_service = ServiceClient(\n 'ReportingService',\n version=12,\n authorization_data=authorization_data,\n environment=CONFIG['api']['environment'],\n )\n\n report_request = reporting_service.factory.create('CampaignPerformanceReportRequest')\n report_request.Format = REPORT_FILE_FORMAT\n report_request.ReportName = 'My Campaign Performance Report'\n report_request.ReturnOnlyCompleteData = False\n report_request.Aggregation = 'Daily'\n report_request.Language = 'English'\n\n scope = reporting_service.factory.create('AccountThroughCampaignReportScope')\n if campaign_ids is None:\n scope.AccountIds = {'long': account_id}\n scope.Campaigns = None\n else:\n scope.AccountIds = None\n campaigns = reporting_service.factory.create('ArrayOfCampaignReportScope')\n for campaign_id in campaign_ids['long']:\n campaign_report_scope = reporting_service.factory.create('CampaignReportScope')\n campaign_report_scope.AccountId = authorization_data.account_id\n campaign_report_scope.CampaignId = campaign_id\n campaigns.CampaignReportScope.append(campaign_report_scope)\n scope.Campaigns = campaigns\n\n report_request.Scope = scope\n\n # You may either use a custom date range or predefined time.\n report_time = reporting_service.factory.create('ReportTime')\n report_time.PredefinedTime = 'Yesterday'\n report_time.ReportTimeZone = 'EasternTimeUSCanada'\n report_request.Time = report_time\n\n # Specify columns to include in campaign performance report\n # column names are pulled from config.yml file\n report_columns = reporting_service.factory.create('ArrayOfCampaignPerformanceReportColumn')\n column_list = []\n\n for field in CONFIG['output_report_fields']:\n column_list.append(field)\n\n report_columns.CampaignPerformanceReportColumn.append(column_list)\n report_request.Columns = report_columns\n\n return report_request\n\n def get_reports_for_accounts(self, accounts):\n global authorization_data\n\n with self.output().open(\"w\") as output_file:\n # Pull column header names from config.yml file\n column_list = []\n for field in CONFIG['output_report_fields']:\n column_list.append(field)\n\n # Write column headers to the BingAds Report file\n output_file.write(\", \".join(column_list))\n output_file.write('\\n')\n\n for account in accounts['AdvertiserAccount']:\n\n print(account.Name + ' ' + str(account.Id))\n raw_file_name_string = 'RAW_BingAds_' + str(account.Id) + '_' + DATE\n\n report_request = self.get_campaign_performance_report_request(account.Id, None)\n\n reporting_service_manager = ReportingServiceManager(\n authorization_data=authorization_data,\n poll_interval_in_milliseconds=5000,\n environment=CONFIG['api']['environment'],\n )\n\n staging_directory = os.getcwd()\n reporting_download_parameters = ReportingDownloadParameters(\n report_request=report_request,\n result_file_directory=staging_directory,\n result_file_name=raw_file_name_string,\n overwrite_result_file=True, # Set this value true if you want to overwrite the same file.\n timeout_in_milliseconds=3600000\n )\n\n result_file_path = reporting_service_manager.download_file(reporting_download_parameters)\n\n if result_file_path is not None:\n self.parse_report_and_append(raw_file_name_string)\n\n def run(self):\n # Pulls refresh token from 'refresh.txt'\n # If token doesnt exist, user is prompted to initiate manual OAuth flow\n global authorization_data\n authorization_data = bing_client_helper.authenticate_with_oauth()\n\n customer_service = ServiceClient(\n 'CustomerManagementService',\n version=12,\n authorization_data=authorization_data,\n environment=CONFIG['api']['environment'],\n )\n\n user = customer_service.GetUser(None).User\n account_list = bing_client_helper.search_accounts_by_user_id(user.Id)\n self.get_reports_for_accounts(account_list)\n\n def output(self):\n return luigi.LocalTarget(FILE_NAME)\n\n\nclass PushCampaignReportToS3(luigi.Task):\n s3_client = S3Client()\n bucket = luigi.Parameter()\n\n def requires(self):\n return PullCampaignReportsFromAPI()\n\n def run(self):\n self.s3_client.put(FILE_NAME, self.s3_key_path)\n\n def output(self):\n s3_uri_template = \"{bucket}/{prefix}/{filename}\"\n s3_prefix = CONFIG[\"s3\"][\"path\"]\n self.s3_key_path = s3_uri_template.format(bucket=self.bucket,\n prefix=s3_prefix,\n filename=FILE_NAME)\n\n return S3Target(self.s3_key_path, client=self.s3_client)\n\n\nif __name__ == '__main__':\n luigi.run(main_task_cls=PushCampaignReportToS3)\n\n","sub_path":"pull_daily_campaign_reports.py","file_name":"pull_daily_campaign_reports.py","file_ext":"py","file_size_in_byte":6890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"417692556","text":"from django.conf.urls import (url, handler400, handler403, handler404, handler500 )\nfrom . import views\nfrom django.views.generic import TemplateView\n\nhandler400 = 'adminapp.views.bad_request'\nhandler403 = 'adminapp.views.permission_denied'\nhandler404 = 'adminapp.views.page_not_found'\nhandler500 = 'adminapp.views.server_error'\n\nurlpatterns = [\n\turl(r'^$', views.index, name='index'),\n\turl(r'^alumnos/$', views.alumnosxgrado, name='alumnosxgrado'),\n\turl(r'^alumnos/all/$', views.alumnos_sinmat, name='alumnos_sinmat'),\n\turl(r'^alumnos/grado/(?P\\d+)/$', views.all_students, name='all_students'),\n\turl(r'^alumnos/new/$', views.new_student, name='new_student'),\n\turl(r'^alumnos/new/add/$', views.new_student_add, name='new_student_add'),\n\turl(r'^alumnos/familiar/(?P\\d+)/all/$', views.all_familiar, name='all_familiar'),\n\turl(r'^alumnos/familiar/(?P\\d+)/$', views.new_familiar, name='new_familiar'),\n\turl(r'^alumnos/familiar/add/$', views.new_familiar_add, name='new_familiar_add'),\n\turl(r'^promotores/$', views.all_promotores, name='all_promotores'),\n\turl(r'^promotores/new/$', views.new_promotor, name='new_promotor'),\n\turl(r'^promotores/new/add/$', views.new_promotor_add, name='new_promotor_add'),\n\turl(r'^promotores/edit/(?P\\d+)/$', views.edit_promotores, name='edit_promotores'),\n\turl(r'^promotores/delete/(?P\\d+)/$', views.delete_promotor, name='delete_promotor'),\n\turl(r'^facilitador/$', views.all_facilitador, name='all_facilitador'),\n\turl(r'^facilitador/new/$', views.new_facilitador, name='new_facilitador'),\n\turl(r'^facilitador/new/add/$', views.new_facilitador_add, name='new_facilitador_add'),\n\turl(r'^facilitador/delete/(?P\\d+)/$', views.delete_facilitador, name='delete_facilitador'),\n\turl(r'^facilitador/edit/(?P\\d+)/$', views.edit_facilitador, name='edit_facilitador'),\n\turl(r'^grados/$', views.all_grados, name='all_grados'),\n\turl(r'^grados/new/$', views.new_grado, name='new_grado'),\n\turl(r'^grados/new/add/$', views.new_grado_add, name='new_grado_add'),\n\turl(r'^grados/delete/(?P\\d+)/$', views.delete_grado, name='delete_grado'),\n\turl(r'^grados/edit/(?P\\d+)/$', views.edit_grado, name='edit_grado'),\n\turl(r'^centros/$', views.all_centros, name='all_centros'),\n\t\n\turl(r'^enroll/new/$', views.new_enroll, name='new_enroll'),\n\turl(r'^enroll/massive/$', views.enroll_massive, name='enroll_massive'),\n\turl(r'^enroll/massive/add/$', views.enroll_massive_add, name='enroll_massive_add'),\n\turl(r'^enroll/all/$', views.all_enroll, name='all_enroll'),\n\turl(r'^enroll/new/add/$', views.new_enroll_add, name='new_enroll_add'),\n\turl(r'^enroll/new/alumno/(?P\\d+)/$', views.matricularxalumno, name='matricularxalumno'),\n\turl(r'^enroll/new/alumno/add/$', views.matricularxalumno_add, name='matricularxalumno_add'),\n\turl(r'^reports/$', views.all_reports, name='all_reports'),\n\turl(r'^reports/graphics$', views.all_graphics, name='all_graphics'),\n\turl(r'^reports/fisico/$', views.reportes ,name=\"reportes\"),\n\turl(r'^notas/$', views.notas ,name=\"notas\"),\n\turl(r'^descargas/$', views.descargas ,name=\"descargas\"),\n\turl(r'^tombola/$', views.tombola ,name=\"tombola\"),\n\n]","sub_path":"adminapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"466178271","text":"'''\r\nНабор общих юнитов\r\n@author: Kosh\r\n'''\r\n\r\nimport random\r\nimport numpy as np\r\n\r\nfrom streampy.units.base.pooled import Pool, Worker as Base\r\nfrom numpy import uint8\r\nclass Worker(Base):\r\n '''\r\n Добавляем шум с яркостью, немного уводим цвет\r\n '''\r\n def process(self, inData, inMeta):\r\n config = self.config\r\n image = inData['image']\r\n size = (image.shape[0], image.shape[1])\r\n \r\n noiseRange = random.randint(1, int(float(config.get('noise', 0.1)) * 255))\r\n \r\n temp = np.random.randint(low = -noiseRange, high = noiseRange, \r\n size = (image.shape[0], image.shape[1], 3), \r\n dtype = 'int16')\r\n temp += image\r\n \r\n bias = int(float(config.get('rgbBias', 0.1))*255)\r\n bias2 = int(float(config.get('brightnessBias', 0.1))*255)\r\n bias2 = random.randint(-bias2, bias2)\r\n \r\n rgbBias = [random.randint(-bias, bias) + bias2,\r\n random.randint(-bias, bias) + bias2,\r\n random.randint(-bias, bias) + bias2]\r\n \r\n temp += rgbBias\r\n \r\n image = temp\r\n \r\n lessThen0 = image < 0\r\n moreThen255 = image > 255\r\n image[lessThen0] = 0\r\n image[moreThen255] = 255\r\n\r\n return [{'image':image.astype(uint8)}]\r\n \r\n","sub_path":"streampy/units/images/colorAugmentImage.py","file_name":"colorAugmentImage.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"595480011","text":"\"\"\"\nThis is a script for collecting truth catalog from cosmos deep debiasing run\n#TODO: \n1. filenames here need to be changed for a better structure. \n2. the data products here needs more description \n\"\"\"\nimport sys\nsys.path.append(\"../\")\nfrom filesystem import LegacySimData \nimport subprocess\nfrom glob import glob\nimport astropy.io.fits as fits\nfrom astropy.table import Table\nfrom SurveySource import BaseSource\nimport os\nimport numpy as np\nimport glob \nimport os\nfrom astropy.table import vstack,Table\nfrom SurveySource import BaseSource\nfrom astropy.coordinates import SkyCoord\nfrom astropy import units as u\n#keys to be collected to generate sweep files\nsweep_keys= ['BRICKNAME','RA','DEC','TYPE','OBJID','EBV','FLUX_G','FLUX_R','FLUX_Z','FLUX_W1','FLUX_W2','FLUX_IVAR_G','FLUX_IVAR_R','FLUX_IVAR_Z','FLUX_IVAR_W1','FLUX_IVAR_W2','MW_TRANSMISSION_G','MW_TRANSMISSION_R','MW_TRANSMISSION_Z','MW_TRANSMISSION_W1','MW_TRANSMISSION_W2','NOBS_G','NOBS_R','NOBS_Z','NOBS_W1','NOBS_W2','SHAPE_R','SHAPE_E1','SHAPE_E2','FIBERFLUX_G','FIBERFLUX_R','FIBERFLUX_Z','MASKBITS','SERSIC','DCHISQ','PSFSIZE_G','PSFSIZE_R','PSFSIZE_Z','PSFDEPTH_G','PSFDEPTH_R','PSFDEPTH_Z','GALDEPTH_G','GALDEPTH_R','GALDEPTH_Z','WISEMASK_W1','WISEMASK_W2','ANYMASK_G','ANYMASK_R','ANYMASK_Z','BX','BY','GAIA_PHOT_G_MEAN_MAG','FIBERTOTFLUX_Z']\n\nclass CosmosDeep(object):\n def __init__(self):\n self.origin_outdir = '/global/project/projectdirs/cosmo/work/legacysurvey/dr9.1.1/'\n self.dr9_outdir = '/global/project/projectdirs/cosmo/work/legacysurvey/dr9/south/'\n self.survey_dir = self.dr9_outdir\n self.outdir = self.origin_outdir\n self.savedir = '/global/cscratch1/sd/huikong/Obiwan/dr9_LRG/obiwan_out/cosmos_deep/'\n self.bricklist = self._get_bricklist()\n self.ccdnum_fn = os.path.join(self.savedir,'ccd_num.fits')\n self.cut_bricklist = np.loadtxt(os.path.join(self.savedir,'bricklist_cutted.txt'),dtype = np.str)\n def _get_bricklist(self,write=True):\n #get list of deep bricks by finding bricknames in coadd files\n if os.path.isfile(os.path.join(self.savedir,'bricklist.txt')):\n bricklist = np.loadtxt(os.path.join(self.savedir,'bricklist.txt'),dtype = np.str)\n return bricklist\n fns = glob.glob(os.path.join(self.outdir, 'coadd','*','*'))\n bricklist = []\n for fn in fns:\n bricklist.append(os.path.basename(fn))\n bricklist = np.array(bricklist, dtype = np.str)\n if write:\n np.savetxt(os.path.join(self.savedir,'bricklist.txt'), bricklist, fmt=\"%s\")\n return bricklist\n def get_ccd_num(self):\n ccd1 = []\n ccd2 = []\n depthz1=[]\n depthz2=[]\n for brickname in self.bricklist:\n ccd_fn = os.path.join(self.origin_outdir, 'coadd', brickname[:3], brickname, 'legacysurvey-%s-ccds.fits'%brickname)\n depthz_fn = os.path.join(self.origin_outdir, 'coadd', brickname[:3], brickname, 'legacysurvey-%s-depth-z.fits.fz'%brickname)\n ccd_num1 = len(fits.getdata(ccd_fn))\n depthz_median = np.median(fits.getdata(depthz_fn).ravel())\n \n ccd_fn_dr9 = os.path.join(self.dr9_outdir, 'coadd', brickname[:3], brickname, 'legacysurvey-%s-ccds.fits'%brickname)\n depthz_fn_dr9 = os.path.join(self.dr9_outdir, 'coadd', brickname[:3], brickname, 'legacysurvey-%s-depth-z.fits.fz'%brickname)\n ccd_num2 = len(fits.getdata(ccd_fn_dr9))\n depthz_dr9_median = np.median(fits.getdata(depthz_fn_dr9).ravel())\n \n ccd1.append(ccd_num1)\n ccd2.append(ccd_num2)\n depthz1.append(depthz_median)\n depthz2.append(depthz_dr9_median)\n ccd1 = np.array(ccd1)\n ccd2 = np.array(ccd2)\n depthz1 = np.array(depthz1)\n depthz2 = np.array(depthz2)\n T = Table()\n T['brickname'] = self.bricklist\n T['ccd_deep'] = ccd1\n T['ccd_dr9'] = ccd2\n T['depthz_deep'] = depthz1\n T['depthz_dr9'] = depthz2\n T.write(os.path.join(self.savedir,'ccd_num.fits'), overwrite = True)\n def cut_bricks(self, scale = None, depthz_cut = None, write=True):\n #cut the bricks that does not have enough ccds, so deep_ccd_num>dr9_ccd_num*scale or using median galdepth cut, this is a rough cut, I use galdepth_z>300\n t = fits.getdata(self.ccdnum_fn)\n print(t['ccd_dr9'].max(),t['ccd_dr9'].min())\n if scale is not None:\n sel = t['ccd_deep']>scale*t['ccd_dr9']\n else:\n sel = t['depthz_deep']>depthz_cut\n print('total ccd: %d, after cut: %d'%(len(sel), sel.sum()))\n if write:\n np.savetxt(os.path.join(self.savedir,'bricklist_cutted.txt'),t[sel]['brickname'], fmt=\"%s\")\n self.cut_bricklist = t[sel]['brickname']\n def get_cosmos_repeats_lists(self):\n #return a list of cosmos repeats bricks\n self.reference_outdir = '/global/cscratch1/sd/dstn/dr9-cosmos-subs/'\n fns = glob.glob(os.path.join(self.reference_outdir,'80','coadd','*','*'))\n bricklist = []\n for fn in fns:\n bricklist.append(os.path.basename(fn))\n self.repeat_bricklist = bricklist\n return bricklist\n def make_truth(self,TYPE='deep'):\n #make truth inputs from cosmos deep region\n #deep is collecting deep data, dr9 is corresponding dr9 data, both using self.cut_bricklist defined previously \n assert(TYPE in [\"deep\",\"dr9\"])\n if TYPE == \"deep\":\n output_fn = \"truth.fits\"\n if TYPE == \"dr9\":\n output_fn = \"dr9_mirror.fits\"\n tab = None\n for brickname in self.cut_bricklist:\n print(brickname)\n if TYPE == \"deep\":\n self.catalog = LegacySimData(survey_dir=self.survey_dir, outdir=self.outdir, brick=brickname)\n elif TYPE == \"dr9\":\n self.catalog = LegacySimData(survey_dir=self.survey_dir, outdir=self.dr9_outdir, brick=brickname)\n else:\n raise\n tractor_fn = self.catalog.find_file('tractor')\n dat_i = Table.read(tractor_fn)\n tab_i = Table()\n for key in sweep_keys:\n tab_i[key.lower()] = dat_i[key.lower()]\n if tab is None:\n tab = tab_i\n else:\n tab = vstack((tab, tab_i))\n tab.write(self.savedir+output_fn,overwrite=True)\n print(\"saved %s\"%(self.savedir+output_fn))\n def add_cards(self, filetype, obj):\n #only add it to dr9 since w1 in deep does not work\n assert(filetype in [\"cosmos_deep_dr9\",\"cosmos_deep\"])\n assert(obj in [\"LRG_sv3_like\",\"LRG_sv3\"])\n catalog_i = BaseSource(filetype=filetype, survey_dir=self.survey_dir, outdir=self.outdir,force_construct=True)\n card = catalog_i.target_selection(obj)\n t_truth = Table.read(catalog_i.source_fn)\n t_truth[obj]=card\n t_truth.write(catalog_i.source_fn, overwrite=True)\n print(catalog_i.source_fn)\n def match(self, filetype1, filetype2):\n if filetype1 == \"cosmos_deep\" and filetype2 == \"cosmos_deep_dr9\":\n #match dr9 to truth\n prefix = \"dr9\"\n elif filetype1 == \"cosmos_deep_dr9\" and filetype2 == \"cosmos_deep\":\n #match truth to dr9\n prefix = \"truth\"\n catalog_1 = BaseSource(filetype=filetype1, survey_dir=self.survey_dir, outdir=self.outdir,force_construct=True)\n catalog_2 = BaseSource(filetype=filetype2, survey_dir=self.survey_dir, outdir=self.outdir,force_construct=True)\n cat1, cat2, matched = catalog_1.match_catalog(catalog_1.source, catalog_2.source)\n T = Table()\n T['matched'] = matched\n input_sweep_keys = sweep_keys.copy()\n input_sweep_keys.extend(['LRG_sv3_like','LRG_sv3'])\n for key in input_sweep_keys:\n T[key.lower()] = cat1[key.lower()]\n T[\"%s_%s\"%(prefix, key.lower())] = cat2[key.lower()]\n names = catalog_1.source.columns.names\n for name in names:\n if name not in input_sweep_keys:\n T[key.lower()] = cat1[key.lower()]\n T.write(catalog_1.source_fn,overwrite=True)\n def truth_match_to_cosmos(self):\n bricknames = self.get_cosmos_repeats_lists()\n catalog_1 = BaseSource(filetype='cosmos_deep', survey_dir=self.survey_dir, outdir=self.outdir,force_construct=True)\n cat1_all = catalog_1.source\n sels = np.zeros(len(cat1_all),dtype=np.bool)\n \n for brickname in bricknames:\n sel = (cat1_all['brickname']==brickname)\n sels+=sel\n cat1 = cat1_all[sels]\n tot_sets = np.arange(0,10)\n topdir = '/global/cscratch1/sd/huikong/Obiwan/dr9_LRG/obiwan_out/cosmos_subsets/cosmos_all_stacked/'\n T = Table()\n for one_set in tot_sets:\n print(one_set)\n cat2 = fits.getdata('/global/cscratch1/sd/huikong/Obiwan/dr9_LRG/obiwan_out/cosmos_subsets/cosmos_all_stacked/cosmos_set%d.fits'%one_set)\n c1 = SkyCoord(ra=cat1['ra']*u.degree, dec=cat1['dec']*u.degree)\n c2 = SkyCoord(ra=cat2['ra']*u.degree, dec=cat2['dec']*u.degree)\n idx, d2d, d3d = c1.match_to_catalog_sky(c2)\n matched = d2d.value <= 1./3600\n distance = d2d.value\n cat2 = cat2[idx]\n T['set_%d_matched'%one_set] = matched\n T['set_%d_sv3_lrg'%one_set] = cat2['set_%d_lrg_sv3'%one_set]\n T['set_%d_sv3_elg_hip'%one_set] = cat2['set_%d_elg_sv3_hip'%one_set]\n for key in sweep_keys:\n T[key.lower()] = cat1[key.lower()]\n T[\"set%d_%s\"%(one_set, key.lower())] = cat2[key.lower()]\n T['lrg_sv3'] = cat1['lrg_sv3']\n T['lrg_sv3_like'] = cat1['lrg_sv3_like']\n T.write(self.savedir+\"/truth_cosmos_repeats.fits\",overwrite=True)\n def run(self):\n cd = self\n \n cd.get_ccd_num()\n cd.cut_bricks(depthz_cut = 300,write=True)\n print(\"make_truth1\")\n cd.make_truth(TYPE = \"deep\")\n print(\"make_truth2\")\n cd.make_truth(TYPE=\"dr9\")\n print(\"add_cards1\")\n cd.add_cards(filetype=\"cosmos_deep_dr9\",obj='LRG_sv3_like')\n print(\"add_cards2\")\n cd.add_cards(filetype=\"cosmos_deep_dr9\",obj='LRG_sv3')\n print(\"add_cards3\")\n cd.add_cards(filetype=\"cosmos_deep\",obj='LRG_sv3_like')\n print(\"add_cards4\")\n cd.add_cards(filetype=\"cosmos_deep\",obj='LRG_sv3')\n print(\"match1\")\n cd.match(filetype1 = \"cosmos_deep\", filetype2=\"cosmos_deep_dr9\")\n print(\"match2\")\n cd.match(filetype1 = \"cosmos_deep_dr9\", filetype2=\"cosmos_deep\")\n print(\"truth_match_to_cosmos\")\n cd.truth_match_to_cosmos()\n def split(self, filetype, topdir, target):\n #split the files into per brick file needed for obiwan run\n assert(filetype in [\"cosmos_deep_dr9\",\"cosmos_deep\"])\n assert(target in [\"LRG_sv3_like\",\"LRG_sv3\"])\n catalog_i = BaseSource(filetype=filetype, survey_dir=self.survey_dir, outdir=self.outdir,force_construct=True)\n source = catalog_i.source\n ids = np.arange(len(source))\n for brickname in self.cut_bricklist:\n \n print(brickname)\n sel = (source['brickname']==brickname)&(source[target])\n print(sel.sum())\n T = Table()\n T['ra'] = source[sel]['ra']\n T['dec'] = source[sel]['dec']\n T['e1'] = source[sel]['shape_e1']\n T['e2'] = source[sel]['shape_e2']\n T['n'] = source[sel]['sersic']\n #some g band is nan, set it to a high mag\n T['g'] = 22.5 - 2.5*np.log10(source[sel]['flux_g']/source[sel]['mw_transmission_g'])\n idx = np.where(source[sel]['flux_g']<=0)\n T['g'][idx] = 30\n T['r'] = 22.5 - 2.5*np.log10(source[sel]['flux_r']/source[sel]['mw_transmission_r'])\n T['z'] = 22.5 - 2.5*np.log10(source[sel]['flux_r']/source[sel]['mw_transmission_z'])\n T['w1'] = 22.5 - 2.5*np.log10(source[sel]['flux_w1']/source[sel]['mw_transmission_w1'])\n T['w2'] = np.clip(0,30,22.5 - 2.5*np.log10(source[sel]['flux_w2']/source[sel]['mw_transmission_w2']))\n T['rhalf'] = source[sel]['shape_r']\n T['id'] = ids[sel]\n T.write(topdir+'/brick_%s.fits'%brickname,overwrite=True)\n \n def split_elgs(self, topdir):\n fn = \"/global/cscratch1/sd/adematti/legacysim/dr9/cosmos/merged/truth_ELG_HIP.fits\"\n source = fits.getdata(fn)\n ids = np.arange(len(source))\n for brickname in self.cut_bricklist:\n \n print(brickname)\n sel = (source['brickname']==brickname)\n print(sel.sum())\n T = Table()\n T['ra'] = source[sel]['ra']\n T['dec'] = source[sel]['dec']\n T['e1'] = source[sel]['shape_e1']\n T['e2'] = source[sel]['shape_e2']\n T['n'] = source[sel]['sersic']\n T['g'] = 22.5 - 2.5*np.log10(source[sel]['flux_g']/source[sel]['mw_transmission_g'])\n T['r'] = 22.5 - 2.5*np.log10(source[sel]['flux_r']/source[sel]['mw_transmission_r'])\n T['z'] = 22.5 - 2.5*np.log10(source[sel]['flux_r']/source[sel]['mw_transmission_z'])\n T['w1'] = 22.5*np.ones(sel.sum())\n T['w2'] = 22.5*np.ones(sel.sum())\n T['rhalf'] = source[sel]['shape_r']\n T['id'] = ids[sel]\n T.write(topdir+'/brick_%s.fits'%brickname,overwrite=True)\n def collect_tracers(self, tracer):\n assert(tracer in ['elg','lrg'])\n all_fn = \"/global/cscratch1/sd/huikong/Obiwan/dr9_LRG/obiwan_out/sim_deep/output/rs0/tractor/*/*\"\n all_fns = glob.glob(all_fn)\n elg_fns = []\n lrg_fns = []\n for fn in all_fns:\n if 'elg' in fn:\n elg_fns.append(fn)\n else:\n lrg_fns.append(fn)\n if tracer == 'elg':\n fns = elg_fns\n else:\n fns = lrg_fns\n \n samp = None\n for fn in fns:\n tt = fits.getdata(fn)\n if 'current_gflux' in tt.columns.names:\n tracer_i = Table.read(fn)\n if samp is None:\n samp = tracer_i\n else:\n samp = vstack((samp,tracer_i))\n samp['bad'] = np.zeros(len(samp),dtype = np.bool)\n sel = (samp['n']==-999)\n samp['bad'][sel] = np.ones(sel.sum(),dtype = np.bool)\n samp['n'] = np.clip(samp['n'],0.2,7)\n samp.write(\"/global/cscratch1/sd/huikong/Obiwan/dr9_LRG/obiwan_out/sim_deep/%s_v2.fits\"%tracer,overwrite = True)\n print(\"written /global/cscratch1/sd/huikong/Obiwan/dr9_LRG/obiwan_out/sim_deep/%s_v2.fits\"%tracer)\n \n if tracer=='elg':\n topdir = '/global/cscratch1/sd/huikong/Obiwan/dr9_LRG/obiwan_out/sim_deep/divided_randoms_elg/'\n else:\n topdir = '/global/cscratch1/sd/huikong/Obiwan/dr9_LRG/obiwan_out/sim_deep/divided_randoms/'\n final = None\n for fn in fns:\n tracer_i = Table.read(fn)\n brickname = fn[-13:-5]\n randoms_i = Table.read(topdir+\"brick_%s.fits\"%brickname)\n assert(len(tracer_i)==len(randoms_i))\n if len(tracer_i)>0:\n tracer_i['bad'] = np.zeros(len(tracer_i),dtype = np.bool)\n sel = (tracer_i['n']==-999)\n tracer_i['bad'][sel] = np.ones(sel.sum(),dtype = np.bool)\n sel = (~tracer_i['fitted'])|(tracer_i['bad'])\n randoms_i['resampled_e1'] = tracer_i['e1']\n randoms_i['resampled_e1'][sel] = randoms_i['e1'][sel]\n \n randoms_i['resampled_e2'] = tracer_i['e2']\n randoms_i['resampled_e2'][sel] = randoms_i['e2'][sel]\n \n randoms_i['resampled_rhalf'] = tracer_i['rhalf']\n randoms_i['resampled_rhalf'][sel] = randoms_i['rhalf'][sel]\n \n randoms_i['resampled_n'] = tracer_i['n']\n randoms_i['resampled_n'][sel] = randoms_i['n'][sel]\n \n randoms_i['brickname'] = np.array([brickname]*len(randoms_i),dtype=np.str)\n \n randoms_i['bad'] = tracer_i['bad']\n \n \n if final is None:\n final = randoms_i\n else:\n final = vstack((final,randoms_i))\n final.write(\"/global/cscratch1/sd/huikong/Obiwan/dr9_LRG/obiwan_out/sim_deep/%s_final_v2.fits\"%tracer,overwrite=True)\n print(\"written /global/cscratch1/sd/huikong/Obiwan/dr9_LRG/obiwan_out/sim_deep/%s_final_v2.fits\"%tracer)\n \n if tracer == 'lrg':\n seed_fn = '/global/cscratch1/sd/huikong/Obiwan/dr9_LRG/obiwan_out/cosmos_deep/truth.fits'\n if tracer == 'elg':\n seed_fn = \"/global/cscratch1/sd/adematti/legacysim/dr9/cosmos/merged/truth_ELG_HIP.fits\"\n \n fn1 = \"/global/cscratch1/sd/huikong/Obiwan/dr9_LRG/obiwan_out/sim_deep/%s_final.fits\"%tracer\n fn2 = seed_fn\n dat1 = fits.getdata(fn1)\n dat2 = fits.getdata(fn2)\n print(\"bad: %d\"%dat1['bad'].sum())\n from astropy.coordinates import SkyCoord\n from astropy import units as u\n c2 = SkyCoord(ra=dat1['ra']*u.degree, dec=dat1['dec']*u.degree)\n c1 = SkyCoord(ra=np.array(dat2['ra'])*u.degree, dec=np.array(dat2['dec'])*u.degree)\n idx, d2d, d3d = c1.match_to_catalog_sky(c2)\n matched = (d2d.value<1./3600)\n truth = Table.read(fn2)\n truth['matched'] = matched\n truth['resampled_e1'] = dat1['resampled_e1'][idx]\n truth['resampled_e2'] = dat1['resampled_e2'][idx]\n truth['resampled_rhalf'] = dat1['resampled_rhalf'][idx]\n truth['resampled_n'] = dat1['resampled_n'][idx]\n truth['bad'] = dat1['bad'][idx]\n\n truth.write(\"/global/cscratch1/sd/huikong/Obiwan/dr9_LRG/obiwan_out/sim_deep/%s_truth_v2.fits\"%tracer,overwrite = True)\n print(\"written /global/cscratch1/sd/huikong/Obiwan/dr9_LRG/obiwan_out/sim_deep/%s_truth_v2.fits\"%tracer)\n \n def make_seed(self):\n fn = \"/global/cscratch1/sd/huikong/Obiwan/dr9_LRG/obiwan_out/sim_deep/lrg_truth.fits\"\n dat = fits.getdata(fn)\n sel = dat['LRG_sv3_like']\n print(\"matched: %d/%d\"%((dat['matched']&sel).sum(),sel.sum()))\n sel = dat['matched']&dat['LRG_sv3_like']&(dat['galdepth_z']>1000)&(~dat['bad'])\n T = Table()\n T['ra'] = dat[sel]['ra']\n T['dec'] = dat[sel]['dec']\n gmag = 22.5 - 2.5*np.log10(dat[sel]['flux_g']/dat[sel]['mw_transmission_g'])\n g_sel = ~((gmag>0)&(gmag<30))\n gmag[g_sel] = 30\n rmag = 22.5 - 2.5*np.log10(dat[sel]['flux_r']/dat[sel]['mw_transmission_r'])\n zmag = 22.5 - 2.5*np.log10(dat[sel]['flux_z']/dat[sel]['mw_transmission_z'])\n w1mag = 22.5 - 2.5*np.log10(dat[sel]['flux_w1']/dat[sel]['mw_transmission_w1'])\n w2mag = 22.5 - 2.5*np.log10(dat[sel]['flux_w2']/dat[sel]['mw_transmission_w2'])\n T['g'] = gmag\n T['r'] = rmag\n T['z'] = zmag\n T['w1'] = w1mag\n T['w2'] = w2mag\n T['e1'] = dat[sel]['resampled_e1']\n T['e2'] = dat[sel]['resampled_e2']\n T['n'] = np.clip(dat[sel]['resampled_n'],0.2,7)\n T['rhalf'] = dat[sel]['resampled_rhalf']\n T['id_sample'] = np.arange(sel.sum())\n T.write(\"/global/cscratch1/sd/huikong/Obiwan/dr9_LRG/obiwan_out/cosmos_deep/seed.fits\",overwrite=True)\n \n \n \n \n \n \nif __name__ == '__main__':\n cd = CosmosDeep()\n cd.run()\n topdir = '/global/cscratch1/sd/huikong/Obiwan/dr9_LRG/obiwan_out/sim_deep/divided_randoms/'\n cd.split(filetype = \"cosmos_deep\", topdir=topdir, target=\"LRG_sv3_like\")\n topdir = '/global/cscratch1/sd/huikong/Obiwan/dr9_LRG/obiwan_out/sim_deep/divided_randoms_elg/'\n cd.split_elgs(topdir)\n cd.collect_tracers('lrg')\n cd.make_seed()\n \n \n \n","sub_path":"bin/cosmos_deep.py","file_name":"cosmos_deep.py","file_ext":"py","file_size_in_byte":20020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"131403172","text":"salutation=\"Mr.\"\nname=\"George\"\n\nname=salutation + name\nprint(\"The name is\",name)\n\nnumber=\"100\"\nprint(number.isdigit())\n\nsongname=\"Hello.mp3\"\nif songname.endswith(\".mp3\"):\n print(\"We can play this audio file\")\nelse:\n print(\"Invalid audio format\")\n\n#Strings have so many built in functions","sub_path":"Session8D.py","file_name":"Session8D.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"45243111","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 24 08:01:03 2018\n\n@author: stem\n\"\"\"\nimport random\nplayer_hp = 60\nenemy_hp = random.randint(30, 80)\ndef enemy_atk(php):\n miss_or_hit = random.randint(1, 3)\n if miss_or_hit == 1:\n print(\"Enemy missed\")\n else:\n enemy_dmg = random.randint(3, 8)\n php = php - enemy_dmg\n print (\"Enemy dealt\", enemy_dmg, \"damage!\")\n return php\n \ndef player_atk(ehp):\n roll = random.randint(1, 6)\n if roll < 3:\n print(\"You missed!\")\n elif roll < 6:\n dmg = random.randint(1, 7)\n ehp = ehp - dmg\n print(\"You dealt\", dmg, \"damage!\")\n else:\n dmg = 10\n ehp = ehp - dmg\n print(\"You dealt\", dmg, \"damage!\")\n return ehp\n\ndef player_heal(health):\n roll = random.randint(1, 6)\n if roll == 1:\n print(\"Your heal failed!\")\n elif roll < 6:\n heal = random.randint(1, 6)\n print(\"You healed\", heal, \"hp!\")\n health = heal + health\n else:\n heal = 8\n print(\"You healed 10 hp!!\")\n health = heal + health\n return health\ndef fight(php, ehp):\n while php > 0 and ehp > 0:\n print(php)\n print(ehp)\n a = input(\"Press 1 for Attack,Press 2 for heal.\" )\n a = float(a)\n php = enemy_atk(php)\n if a == 1 and php > 0:\n ehp = player_atk(ehp)\n elif a == 2 and php > 0:\n php = player_heal(php)\n else:\n break\n return php\nplayer_hp = fight(player_hp, enemy_hp)\nif player_hp <= 0:\n print(\"GAME OVER!!!\")\nelif enemy_hp <= 0:\n print(\"YOU WIN!!!\")\n \n\n\n \n \n","sub_path":"textbasedgame.py","file_name":"textbasedgame.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"368271033","text":"# coding:utf-8\n\"\"\"\nThere are N gas stations along a circular route, where the amount of gas at station i is gas[i].\nYou have a car with an unlimited gas tank and it costs cost[i] of gas to travel from station i to\\\nits next station (i+1). You begin the journey with an empty tank at one of the gas stations.\nReturn the starting gas station's index if you can travel around the circuit once, otherwise return -1.\nNote:\nThe solution is guaranteed to be unique.\n\"\"\"\n\n\ndef can_complete_circuit(gas, cost):\n if sum(gas) < sum(cost):\n return -1\n start_index = 0\n count = len(gas)\n sum_gas = 0\n index = 0\n while index < count:\n sum_gas += gas[index] - cost[index]\n index += 1\n if sum_gas < 0:\n sum_gas = 0\n start_index = index % count\n return start_index\n","sub_path":"greedy/gasstation.py","file_name":"gasstation.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"31710351","text":"#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n# Functions for solving ODE\n#\n# pymacrospin Python package\n# Authors: Colin Jermain, Minh-Hai Nguyen\n# Copyright: 2014-2020\n#\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n\nimport numpy as np\nfrom pymacrospin.__init__ import normalize\n\n\ndef euler_step(dt, m, torque):\n \"\"\"Takes one step using the Euler method\n\n dt: time step\n m: moment unit vector\n torque: function to calculate torque from m\n \"\"\"\n t = torque(m)\n return normalize(m + dt*t)\n\n\ndef huen_step(dt, m, torque):\n \"\"\" Takes one step using Huen's method\n\n dt: time step\n m: moment unit vector\n torque: function to calculate torque from m\n \"\"\"\n k1 = torque(m)\n m1 = m + dt*k1\n k2 = torque(m1)\n m = m + dt*(k1 + k2)/2.0\n return normalize(m)\n\n\ndef rk23_step(dt, m, torque):\n \"\"\" Takes one step using the Bogacki-Shampine method (Runga-Kutta RK23)\n\n dt: time step\n m: moment unit vector\n torque: function to calculate torque from m\n \"\"\"\n k1 = torque(m)\n k2 = torque(m + dt*k1/2.0)\n k3 = torque(m + 3.0*dt*k2/2.0)\n m = m + 2.0*dt*k1/9.0 + dt*k2/3.0 + 4*dt*k3/9.0\n return normalize(m)\n\n\n# cdef inline void rk4_step(Kernel kernel):\ndef rk4_step(dt, m, torque):\n \"\"\" Takes one step using the Classic 4th order Runga-Kutta method\n\n dt: time step\n m: moment unit vector\n torque: function to calculate torque from m\n \"\"\"\n k1 = torque(m)\n k2 = torque(m + dt*k1/2.0)\n k3 = torque(m + dt*k2/2.0)\n k4 = torque(m + dt*k3)\n m = m + dt*(k1 + 2.0*k2 + 2.0*k3 + k4)/6.0\n return normalize(m)\n\n\ndef run(step_func, steps, m):\n \"\"\" Run multiple steps over time period t\n\n step_func: step run function\n steps: number of steps\n m: moment unit vector\n \"\"\"\n ms = np.zeros((steps+1,3),dtype=np.float32)\n ms[0] = m\n for i in range(steps):\n ms[i+1] = step_func(ms[i])\n return ms[1:]\n\n\ndef relax(step_func, energy_func, precision, steps, max_iters, m):\n \"\"\" Run the simulation until energy variation falls within a threshold\n\n precision: energy's relative error for halting condition\n steps: number of steps per iteration\n max_iters: maximum number of iterations\n m: moment unit vector\n \"\"\"\n ms = np.zeros((steps,3),dtype=np.float32)\n ms[-1] = m\n g1 = energy_func(m)\n for i in range(max_iters):\n g0 = g1\n ms = run(step_func, steps,ms[-1])\n g1 = energy_func(ms[-1])\n if g0-g1 < abs(g0*precision):\n # Reach local minimum within precision\n return ms[-1], i*steps\n return ms[-1], i*steps\n\n\ndef stabilize(step_func, torque_func, dm_thres, steps, max_iters, m, dt):\n \"\"\" Run until torque is below a threshold within a defined errorbar\n\n step_func: step run function\n torque_func: function to calculate torque\n dm_thres: halting threshold for dm\n steps: number of steps per iteration\n max_iters: maximum number of iterations\n m: moment unit vector\n dt: time step\n \"\"\"\n ms = np.zeros((steps,3),dtype=np.float32)\n ms[-1] = m\n for i in range(max_iters):\n if np.linalg.norm(torque_func(ms[-1]))*dt < dm_thres:\n return ms[-1], i*steps\n else:\n ms = run(step_func, steps, ms[-1])\n return ms[-1], i*steps\n","sub_path":"pymacrospin/core/solvers.py","file_name":"solvers.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"352622790","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the Windows Registry objects cache.\"\"\"\n\nimport unittest\n\nfrom plaso.winreg import cache\nfrom plaso.winreg import test_lib\nfrom plaso.winreg import winregistry\n\n\nclass CacheTest(test_lib.WinRegTestCase):\n \"\"\"Tests for the Windows Registry objects cache.\"\"\"\n\n def testBuildCache(self):\n \"\"\"Tests creating a Windows Registry objects cache.\"\"\"\n registry = winregistry.WinRegistry(\n winregistry.WinRegistry.BACKEND_PYREGF)\n\n test_file = self._GetTestFilePath(['SYSTEM'])\n file_entry = self._GetTestFileEntry(test_file)\n winreg_file = registry.OpenFile(file_entry, codepage='cp1252')\n\n winreg_cache = cache.WinRegistryCache()\n\n # Test if this function does not raise an exception.\n winreg_cache.BuildCache(winreg_file, 'SYSTEM')\n\n self.assertEqual(\n winreg_cache.attributes['current_control_set'], 'ControlSet001')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"plaso/winreg/cache_test.py","file_name":"cache_test.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"44296735","text":"from .vin import *\nfrom .augmentations import *\n\n\ndef get_dataset(cfgs, mode, specific_csv=None):\n if mode == \"train\":\n transform = get_augmentation(cfgs)\n else:\n transform = None\n\n dataset = VIN(cfgs, transform=transform, mode=mode)\n\n return dataset\n\n\ndef get_augmentation(cfgs):\n if cfgs[\"model\"][\"inputs\"][\"augment\"] == \"train_multi_augment12\":\n aug_fn = train_multi_augment12\n elif cfgs[\"inputs\"][\"augment\"][\"augment\"] is None:\n aug_fn = None\n\n return aug_fn\n\n\ndef get_collater():\n collate_fn = collater\n\n return collate_fn\n","sub_path":"mkdet/inputs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"78008683","text":"## -*- coding: utf-8 -*-\n##----------------------------------------------------------------------\n## ProbeConfig model\n##----------------------------------------------------------------------\n## Copyright (C) 2007-2014 The NOC Project\n## See LICENSE for details\n##----------------------------------------------------------------------\n\n## Python modules\nfrom collections import defaultdict\nimport datetime\nimport logging\nimport random\n## Django modules\nimport django.db.models.signals\n## Third-party modules\nimport mongoengine.signals\nfrom mongoengine.document import Document, EmbeddedDocument\nfrom mongoengine.fields import (\n StringField, IntField, DictField, DateTimeField, FloatField,\n ListField, EmbeddedDocumentField)\n## NOC Modules\nfrom noc import settings\n\nlogger = logging.getLogger(__name__)\n\n\nclass CollectorAddress(EmbeddedDocument):\n proto = StringField()\n address = StringField()\n port = IntField()\n\n\nclass MetricCollectors(EmbeddedDocument):\n policy = StringField(default=\"prio\")\n write_concern = IntField(default=1)\n collectors = ListField(EmbeddedDocumentField(CollectorAddress))\n\n\nclass ProbeConfigMetric(EmbeddedDocument):\n metric = StringField()\n metric_type = StringField()\n thresholds = ListField()\n convert = StringField()\n scale = FloatField(default=1.0)\n collectors = EmbeddedDocumentField(MetricCollectors)\n\n\nclass ProbeConfig(Document):\n meta = {\n \"collection\": \"noc.pm.probeconfig\",\n \"indexes\": [(\"model_id\", \"object_id\"),\n (\"probe_id\", \"instance_id\"),\n (\"probe_id\", \"instance_id\", \"expire\"),\n \"uuid\", \"expire\", \"changed\", \"metrics.metric\"]\n }\n\n # Reference to model or document, like sa.ManagedObject\n model_id = StringField()\n # Object id, converted to string\n object_id = StringField()\n #\n probe_id = StringField()\n instance_id = IntField()\n #\n managed_object = IntField(required=False)\n #\n uuid = StringField()\n #\n changed = DateTimeField(default=datetime.datetime.now)\n expire = DateTimeField()\n # Configuration section\n handler = StringField()\n interval = IntField()\n config = DictField()\n metrics = ListField(EmbeddedDocumentField(ProbeConfigMetric))\n\n PROFILES = defaultdict(list) # model -> [(model, field), ...]\n MODELS = []\n TTL = settings.config.getint(\"pm\", \"config_ttl\")\n TTL_JITTER = settings.config.getfloat(\"pm\", \"config_ttl_jitter\")\n TJL = int(TTL - TTL_JITTER * TTL)\n TJH = int(TTL + TTL_JITTER * TTL)\n\n DELETE_DATE = datetime.datetime(2030, 1, 1)\n\n def __unicode__(self):\n return unicode(self.uuid)\n\n @property\n def is_deleted(self):\n return (self.changed == self.expire and\n self.expire == self.DELETE_DATE)\n\n @property\n def is_expired(self):\n return self.expire <= datetime.datetime.now()\n\n @classmethod\n def get_model_id(cls, object):\n if isinstance(object._meta, dict):\n # Document\n return u\"%s.%s\" % (object.__module__.split(\".\")[1],\n object.__class__.__name__)\n else:\n # Model\n return u\"%s.%s\" % (object._meta.app_label,\n object._meta.object_name)\n\n def get_object(self):\n return MetricSettings(\n model_id=self.model_id,\n object_id=self.object_id\n ).get_object()\n\n @classmethod\n def install(cls):\n mongoengine.signals.class_prepared.connect(cls.on_new_document)\n django.db.models.signals.class_prepared.connect(cls.on_new_model)\n\n @classmethod\n def on_new_model(cls, sender, *args, **kwargs):\n if hasattr(sender, \"get_probe_config\"):\n cls.MODELS += [sender]\n django.db.models.signals.post_save.connect(\n cls.on_change_model, sender=sender)\n django.db.models.signals.pre_delete.connect(\n cls.on_delete_model, sender=sender)\n p_field = getattr(sender, \"PROFILE_LINK\", None)\n if p_field:\n for f in sender._meta.fields:\n if f.name == p_field:\n pm = f.rel.to\n cls.PROFILES[pm] += [(sender, p_field)]\n break\n\n @classmethod\n def on_new_document(cls, sender, *args, **kwargs):\n if hasattr(sender, \"get_probe_config\"):\n cls.MODELS += [sender]\n mongoengine.signals.post_save.connect(\n cls.on_change_document, sender=sender)\n mongoengine.signals.pre_delete.connect(\n cls.on_delete_document, sender=sender)\n p_field = getattr(sender, \"PROFILE_LINK\", None)\n if p_field:\n pm = sender._fields[p_field].document_type_obj\n cls.PROFILES[pm] += [(sender, p_field)]\n\n @classmethod\n def _delete_object(cls, object):\n model_id = cls.get_model_id(object)\n object_id = str(object.id)\n # Mark probeconfig as deleted\n logger.debug(\"Marking ProbeConfig as deleted: %s:%s\",\n model_id, object_id)\n cls._get_collection().update({\n \"model_id\": model_id,\n \"object_id\": object_id\n },\n {\n \"$set\": {\n \"changed\": cls.DELETE_DATE,\n \"expire\": cls.DELETE_DATE\n }\n },\n multi=True\n )\n # wipe out metricsettings\n logger.debug(\"Deleting MetricSettings: %s:%s\",\n model_id, object_id)\n MetricSettings._get_collection().remove({\n \"model_id\": model_id,\n \"object_id\": object_id\n })\n\n @classmethod\n def get_ttl(cls):\n if not cls.TTL_JITTER:\n return cls.TTL\n else:\n return random.randint(cls.TJL, cls.TJH)\n\n @classmethod\n def _refresh_object(cls, object):\n def get_collectors(es):\n c = collectors.get(es.probe.id)\n if c:\n return c\n c = es.probe.storage.default_collector\n collectors[es.probe.id] = c\n return c\n\n def get_instance(probe, uuid):\n ni = probe.n_instances\n if ni < 1:\n return 0\n else:\n return int(str(uuid)[:8], 16) % ni\n\n def get_refresh_ops(bulk, o):\n model_id = cls.get_model_id(o)\n logger.debug(\"Bulk refresh %s %s\", model_id, o)\n # Cleanup\n bulk.find(\n {\n \"model_id\": model_id,\n \"object_id\": str(o.id)\n }\n ).update(\n {\n \"$set\": {\n \"changed\": cls.DELETE_DATE,\n \"expire\": cls.DELETE_DATE\n }\n }\n )\n for es in MetricSettings.get_effective_settings(o):\n if es.managed_object:\n mo = es.managed_object.id\n else:\n mo = None\n bulk.find(\n {\n \"uuid\": es.uuid\n }\n ).upsert().update(\n {\n \"$set\": {\n \"model_id\": es.model_id,\n \"object_id\": str(es.object.id) if es.object else None,\n \"changed\": now,\n \"expire\": now + datetime.timedelta(seconds=cls.get_ttl()),\n \"handler\": es.handler,\n \"interval\": es.interval,\n \"probe_id\": str(es.probe.id),\n \"instance_id\": get_instance(es.probe, es.uuid),\n \"config\": es.config,\n \"managed_object\": mo,\n \"metrics\": [{\n \"metric\": m.metric,\n \"metric_type\": m.metric_type.name,\n \"thresholds\": m.thresholds,\n \"convert\": m.convert,\n \"scale\": m.scale,\n \"collectors\": get_collectors(es)\n } for m in es.metrics]\n }\n }\n )\n for m, n in cls.PROFILES[o.__class__]:\n for obj in m.objects.filter(**{n: o.id}):\n get_refresh_ops(bulk, obj)\n\n logger.debug(\"Refresh object %s\", object)\n collectors = {} # Storage rule -> collector url\n # @todo: Make configurable\n now = datetime.datetime.now()\n bulk = cls._get_collection().initialize_ordered_bulk_op()\n get_refresh_ops(bulk, object)\n bulk.execute()\n\n @classmethod\n def _refresh_config(cls, object):\n def get_collectors(es):\n c = collectors.get(es.probe.id)\n if c:\n return c\n c = es.probe.storage.default_collector\n collectors[es.probe.id] = c\n return c\n\n def get_instance(probe, uuid):\n ni = probe.n_instances\n if ni < 1:\n return 0\n else:\n return int(str(uuid)[:8], 16) % ni\n\n def get_refresh_ops(bulk, o):\n model_id = cls.get_model_id(o)\n logger.debug(\"Bulk refresh %s %s\", model_id, o)\n # Cleanup\n bulk.find(\n {\n \"model_id\": \"pm.MetricConfig\",\n \"object_id\": str(o.id)\n }\n ).update(\n {\n \"$set\": {\n \"changed\": cls.DELETE_DATE,\n \"expire\": cls.DELETE_DATE\n }\n }\n )\n for es in o.get_effective_settings():\n bulk.find(\n {\n \"uuid\": es.uuid\n }\n ).upsert().update(\n {\n \"$set\": {\n \"model_id\": \"pm.MetricConfig\",\n \"object_id\": str(o.id),\n \"changed\": now,\n \"expire\": now + datetime.timedelta(seconds=cls.get_ttl()),\n \"handler\": es.handler,\n \"interval\": es.interval,\n \"probe_id\": str(es.probe.id),\n \"instance_id\": get_instance(es.probe, es.uuid),\n \"config\": es.config,\n \"metrics\": [{\n \"metric\": m.metric,\n \"metric_type\": m.metric_type.name,\n \"thresholds\": m.thresholds,\n \"convert\": m.convert,\n \"scale\": m.scale,\n \"collectors\": get_collectors(es)\n } for m in es.metrics]\n }\n }\n )\n\n logger.debug(\"Refresh metric config %s\", object.name)\n collectors = {} # Storage rule -> collector url\n # @todo: Make configurable\n now = datetime.datetime.now()\n bulk = cls._get_collection().initialize_ordered_bulk_op()\n get_refresh_ops(bulk, object)\n bulk.execute()\n\n @classmethod\n def on_change_model(cls, sender, instance, *args, **kwargs):\n cls._refresh_object(instance)\n\n @classmethod\n def on_change_document(cls, sender, document=None, *args, **kwargs):\n cls._refresh_object(document)\n\n @classmethod\n def on_delete_model(cls, sender, instance, *args, **kwargs):\n cls._delete_object(instance)\n # Rebuild configs for related objects\n for m, n in cls.PROFILES[sender]:\n for obj in m.objects.filter(**{n: instance.id}):\n cls._refresh_object(obj)\n\n @classmethod\n def on_delete_document(cls, sender, document, *args, **kwargs):\n cls._delete_object(document)\n # Rebuild configs for related objects\n for m, n in cls.PROFILES[sender]:\n for obj in m.objects.filter(**{n: document.id}):\n cls._refresh_object(obj)\n\n @classmethod\n def on_change_storage(cls, sender, document=None, *args, **kwargs):\n logger.debug(\"Apply changed storage '%s'\", document.name)\n for p in Probe.objects.filter(storage=document):\n logger.info(\"Applying changes to Probe '%s'\", p.name)\n for pc in ProbeConfig.objects.filter(probe_id=str(p.id)):\n pc.refresh()\n\n @classmethod\n def on_change_metric_settings(cls, sender, document=None, *args, **kwargs):\n object = document.get_object()\n logger.debug(\"Apply changed MetricSettings for '%s'\", object)\n cls._refresh_object(object)\n if not document.metric_sets:\n logger.debug(\"Delete empty MetricSettings for %s\", object)\n document.delete()\n\n @classmethod\n def on_delete_metric_settings(cls, sender, document, *args, **kwargs):\n object = document.get_object()\n logger.debug(\"Apply deleted MetricSettings for '%s'\", object)\n cls._refresh_object(object)\n\n @classmethod\n def on_change_metric_config(cls, sender, document=None, *args, **kwargs):\n logger.debug(\"Apply changed MetricConfig for '%s'\", document.name)\n cls._refresh_config(document)\n\n @classmethod\n def on_delete_metric_config(cls, sender, document, *args, **kwargs):\n logger.debug(\"Apply deleted MetricConfig for '%s'\", document.name)\n cls._delete_object(document)\n\n @classmethod\n def on_change_metric_set(cls, sender, document=None, *args, **kwargs):\n logger.info(\"Applying changes to MetricSet '%s'\", document.name)\n # Find all affected metric settings\n for ms in MetricSettings.objects.filter(\n metric_sets__metric_set=document.id):\n cls._refresh_object(ms.get_object())\n\n @classmethod\n def on_delete_metric_set(cls, sender, document, *args, **kwargs):\n logger.info(\"Deleting MetricSet '%s'\", document.name)\n for ms in MetricSettings.objects.filter(\n metric_sets__metric_set=document.id\n ):\n ms.metric_sets = [s for s in ms.metric_sets\n if s.metric_set.id != document.id]\n ms.save() # Triggers refresh_object\n\n @classmethod\n def on_change_probe(cls, sender, document=None, *args, **kwargs):\n logger.info(\"Applying changes to Probe '%s'\", document.name)\n for pc in ProbeConfig.objects.filter(probe_id=str(document.id)):\n pc.refresh()\n\n @classmethod\n def on_change_auth_profile(cls, sender, instance, *args, **kwargs):\n logger.info(\"Applying changes to AuthProfile '%s'\" % instance.name)\n for mo in instance.managedobject_set.all():\n cls._refresh_object(mo)\n\n @classmethod\n def on_change_object_caps(cls, sender, document=None, *args, **kwargs):\n logger.info(\"Applying changes to object capabilities '%s'\", document.object.name)\n cls.on_change_model(document.object, document.object)\n\n def refresh(self):\n logger.debug(\"Refreshing %s\", self.uuid)\n o = self.get_object()\n if not o:\n return\n if self.model_id == \"pm.MetricConfig\":\n self._refresh_config(o)\n else:\n self._refresh_object(o)\n\n @classmethod\n def rebuild(cls, model_id=None):\n pass\n\n##\nfrom metricset import MetricSet\nfrom metricsettings import MetricSettings\nfrom metricconfig import MetricConfig\nfrom probe import Probe","sub_path":"pm/models/probeconfig.py","file_name":"probeconfig.py","file_ext":"py","file_size_in_byte":15822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"503747457","text":"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for fedjax.experimental.metrics.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nfrom fedjax.experimental import metrics\n\nimport jax.numpy as jnp\nimport numpy.testing as npt\n\n\nclass MeanStatTest(absltest.TestCase):\n\n def test_str(self):\n stat = metrics.MeanStat.new(2, 4)\n self.assertEqual(\n 'MeanStat(accum=DeviceArray(2, dtype=int32), weight=DeviceArray(4, dtype=int32)) => 0.5',\n str(stat))\n\n def test_new(self):\n stat = metrics.MeanStat.new(jnp.array([2, 3, 1]), jnp.array([1, 0, 1]))\n npt.assert_array_equal(stat.accum, [2, 0, 1])\n npt.assert_array_equal(stat.weight, [1, 0, 1])\n\n def test_result(self):\n stat = metrics.MeanStat.new(2, 5)\n self.assertEqual(stat.result(), 0.4)\n\n def test_merge(self):\n stat_0 = metrics.MeanStat.new(1, 2)\n stat_1 = metrics.MeanStat.new(2, 3)\n merged_stat = stat_0.merge(stat_1)\n self.assertEqual(merged_stat.accum, 3)\n self.assertEqual(merged_stat.weight, 5)\n\n def test_reduce(self):\n stat = metrics.MeanStat.new(jnp.array([1, 2, 4]), jnp.array([1, 1, 0]))\n reduced_stat = stat.reduce()\n self.assertEqual(reduced_stat.accum, 3)\n self.assertEqual(reduced_stat.weight, 2)\n\n\nclass SumStatTest(absltest.TestCase):\n\n def test_str(self):\n stat = metrics.SumStat.new(2)\n self.assertEqual('SumStat(accum=DeviceArray(2, dtype=int32)) => 2',\n str(stat))\n\n def test_result(self):\n stat = metrics.SumStat.new(2)\n self.assertEqual(stat.result(), 2)\n\n def test_merge(self):\n stat_0 = metrics.SumStat.new(1)\n stat_1 = metrics.SumStat.new(2)\n merged_stat = stat_0.merge(stat_1)\n self.assertEqual(merged_stat.accum, 3)\n\n def test_reduce(self):\n stat = metrics.SumStat.new(jnp.array([1, 2, 1]))\n reduced_stat = stat.reduce()\n self.assertEqual(reduced_stat.accum, 4)\n\n\nclass MetricsTest(parameterized.TestCase):\n\n def test_cross_entropy_loss(self):\n example = {'y': jnp.array(1)}\n prediction = jnp.array([1.2, 0.4])\n metric = metrics.CrossEntropyLoss()\n loss = metric.evaluate_example(example, prediction)\n self.assertAlmostEqual(loss.result(), 1.1711007)\n\n @parameterized.named_parameters(\n {\n 'testcase_name': 'correct',\n 'target': 2,\n 'prediction': [0, 0, 1],\n 'expected_result': 1.,\n }, {\n 'testcase_name': 'incorrect',\n 'target': 1,\n 'prediction': [1, 0, 0],\n 'expected_result': 0.,\n })\n def test_accuracy(self, target, prediction, expected_result):\n example = {'y': jnp.array(target)}\n prediction = jnp.array(prediction)\n metric = metrics.Accuracy()\n accuracy = metric.evaluate_example(example, prediction)\n self.assertEqual(accuracy.result(), expected_result)\n\n def test_sequence_token_cross_entropy_loss(self):\n example = {'y': jnp.array([1, 0, 1])}\n prediction = jnp.array([[1.2, 0.4], [2.3, 0.1], [0.3, 3.2]])\n metric = metrics.SequenceTokenCrossEntropyLoss()\n loss = metric.evaluate_example(example, prediction)\n self.assertAlmostEqual(loss.result(), 0.612331725)\n\n def test_sequence_cross_entropy_loss(self):\n example = {'y': jnp.array([1, 0, 1])}\n prediction = jnp.array([[1.2, 0.4], [2.3, 0.1], [0.3, 3.2]])\n metric = metrics.SequenceCrossEntropyLoss()\n loss = metric.evaluate_example(example, prediction)\n self.assertAlmostEqual(loss.result(), 1.2246635)\n\n def test_sequence_token_accuracy(self):\n example = {'y': jnp.array([1, 2, 2, 1, 0])}\n # prediction = [1, 0, 2, 1, 0].\n prediction = jnp.array([[0, 1, 0], [1, 0, 0], [0, 0, 1], [0, 1, 0],\n [1, 0, 0]])\n metric = metrics.SequenceTokenAccuracy()\n accuracy = metric.evaluate_example(example, prediction)\n self.assertEqual(accuracy.result(), 0.75) # 3 / 4.\n\n def test_sequence_token_count(self):\n example = {'y': jnp.array([1, 2, 2, 3, 4, 0, 0])}\n prediction = jnp.array([]) # Unused.\n metric = metrics.SequenceTokenCount(masked_target_values=(0, 2))\n count = metric.evaluate_example(example, prediction)\n self.assertEqual(count.result(), 3)\n\n @parameterized.named_parameters(\n {\n 'testcase_name': 'untruncated',\n 'target': [1, 2, 2, 3, 4, 0, 0],\n 'expected_result': 0.,\n }, {\n 'testcase_name': 'truncated',\n 'target': [1, 2, 2, 3, 3, 3, 3],\n 'expected_result': 1.,\n })\n def test_sequence_truncation_rate(self, target, expected_result):\n example = {'y': jnp.array(target)}\n prediction = jnp.array([]) # Unused.\n metric = metrics.SequenceTruncationRate(eos_target_value=4)\n truncation_rate = metric.evaluate_example(example, prediction)\n self.assertEqual(truncation_rate.result(), expected_result)\n\n def test_sequence_token_oov_rate(self):\n example = {'y': jnp.array([1, 2, 2, 3, 4, 0, 0])}\n prediction = jnp.array([]) # Unused.\n metric = metrics.SequenceTokenOOVRate(oov_target_values=(2,))\n oov_rate = metric.evaluate_example(example, prediction)\n self.assertEqual(oov_rate.result(), 0.4) # 2 / 5.\n\n def test_sequence_length(self):\n example = {'y': jnp.array([1, 2, 3, 4, 0, 0])}\n prediction = jnp.array([]) # Unused.\n metric = metrics.SequenceLength()\n sequence_length = metric.evaluate_example(example, prediction)\n self.assertEqual(sequence_length.result(), 4.0)\n\n\nif __name__ == '__main__':\n absltest.main()\n","sub_path":"fedjax/experimental/metrics_test.py","file_name":"metrics_test.py","file_ext":"py","file_size_in_byte":5975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"581862660","text":"# Given an array with fields and a sequence of field names\n# construct a new array with just those fields copied over\ndef _index_fields(ary, fields):\n from multiarray import empty, dtype, array\n dt = ary.dtype\n names = [name for name in fields if name in dt.names]\n formats = [dt.fields[name][0] for name in fields if name in dt.names]\n offsets = [dt.fields[name][1] for name in fields if name in dt.names]\n view_dtype = {'names':names, 'formats':formats, 'offsets':offsets, 'itemsize':dt.itemsize}\n view = ary.view(dtype=view_dtype)\n # Return a copy for now until behavior is fully deprecated\n # in favor of returning view\n copy_dtype = {'names':view_dtype['names'], 'formats':view_dtype['formats']}\n return array(view, dtype=copy_dtype, copy=True)\n","sub_path":"LIVE/dj_demo/mysite/test_segment_base/_internal_3.py","file_name":"_internal_3.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"575817649","text":"import asyncio\nfrom logging import exception\nfrom nodewire import Message\nfrom socket_link import SocketLink\nfrom web_link import WebLink\nfrom mqtt_link import MqttLink\nfrom execution_context import ExecutionContext\nfrom execution_engine import ExecutionEngine\nimport sizeof as sizeof\nimport json\nfrom bson.objectid import ObjectId\nimport time\n\nfrom datetime import datetime, timedelta\nfrom config import mongo_client\n\n\nclass CommandProcessor:\n def __init__(self):\n self.sys_db = mongo_client.nodewire\n self.clients = []\n self.subscriptions = []\n self.messages = asyncio.Queue()\n\n self.socket = SocketLink(self.messages)\n self.socket.new = self.client_added\n self.socket.client_done = self.client_closed\n\n self.pipe = SocketLink(self.messages, port=9001)\n self.pipe.new = self.client_added\n self.pipe.client_done = self.client_closed\n self.pipe.safe = True\n\n self.web = WebLink(self.messages)\n self.web.new = self.client_added\n self.web.client_done = self.client_closed\n\n self.mqtt = MqttLink(self.messages)\n self.mqtt.new = self.client_added\n self.mqtt.client_done = self.client_closed\n\n self.send_queues = []\n for _ in range(4): # 5 tasks for return message\n q = asyncio.Queue()\n self.send_queues.append(q)\n asyncio.Task(self.msg_sender(q))\n self.sq_index = 0\n\n self.execution_contexts = {} # one per gateway\n self.when_dos = [] # universal whendo list\n self.auto_execed = []\n self.exec_engine = ExecutionEngine(self.when_dos, self.execution_contexts)\n self.exec_engine.start()\n\n async def start_execution(self, client, n):\n if not client.safe and n == 0 and client.gateway not in self.execution_contexts:\n gw = await self.sys_db.instances.find_one({'instance_id': client.gateway})\n users = [u['user_instance_and_node_name'].split(':')[1] for u in gw['users'] if u['admin'] == True]\n if users:\n user = users[0]\n self.execution_contexts[client.gateway] = ExecutionContext(client.gateway, self.when_dos,\n self.exec_engine, self.messages)\n if client.gateway not in self.auto_execed:\n kk = await self.execution_contexts[client.gateway].engine_process([\"exec('auto')\"], user)\n if kk == '':\n self.auto_execed.append(client.gateway)\n await self.execution_contexts[client.gateway].engine_process([\"kill('auto')\"], user)\n await self.sys_db.live_gateways.replace_one({'instance': client.gateway},\n {'instance': client.gateway, 'count': n + 1})\n\n def client_added(self, client):\n if client in self.clients:\n self.client_closed(client)\n n = len([c for c in self.clients if c.gateway == client.gateway])\n client.seqn = n\n if n < 20:\n self.sq_index = (self.sq_index + 1) % len(self.send_queues)\n client.send_queue = self.send_queues[self.sq_index]\n asyncio.create_task(self.start_execution(client, n))\n self.clients.append(client)\n if client.type == 'mqtt':\n node = client.nodes[-1]\n if node not in self.execution_contexts[client.gateway].pvs and node != 'ee' and node != 'nwscript':\n self.execution_contexts[client.gateway].add_node(node, client.gateway)\n return True\n return False\n\n def client_closed(self, client):\n try:\n clients = [c for c in self.clients if c == client]\n if len(clients) != 0:\n client = clients[0]\n self.terminate_connections(clients)\n # update number of connected gateways\n n = len([c for c in self.clients if c.gateway == client.gateway])\n n_g = {'instance': client.gateway, 'count': n}\n\n asyncio.get_event_loop().call_soon(self.sys_db.live_gateways.replace_one, {'instance': client.gateway},\n n_g)\n except Exception as ex:\n print('pass 7'),\n print(ex)\n\n async def scavenger(self):\n while True:\n await asyncio.sleep(300) # wait 5 minutes\n for client in [c for c in self.clients if not c.safe if c.type!='mqtt']:\n try:\n if time.time() - client.last_seen > 900: # older than 15 minutes\n self.terminate_connections([client])\n except Exception as ex:\n print('error in scavanger')\n print(ex)\n await asyncio.sleep(0)\n\n def terminate_connections(self, clients):\n for client in clients:\n if client.type!= 'mqtt':\n client.task.cancel()\n if client in self.clients:\n self.clients.remove(client)\n for node in client.nodes:\n if client.gateway is not None and node in self.execution_contexts[client.gateway].pvs:\n try:\n self.execution_contexts[client.gateway].pvs.remove(node)\n thenode = [n for n in self.execution_contexts[client.gateway].variables['nodes'] if\n n.name == node]\n if len(thenode) != 0:\n self.execution_contexts[client.gateway].variables['nodes'].remove(thenode[0])\n self.exec_engine.pending_signals.put_nowait(['nodes', client.gateway])\n except Exception as ex:\n print(\"{}. couldn't remove node: {}\".format(str(ex), node))\n if client.type == 'web' and len(client.nodes)!=0:\n self.exec_engine.pending_signals.put_nowait(('session', client.nodes[0], client.session, client.gateway, client.session))\n client.close()\n\n async def check_id(self, node, gateway, Sender, id, task):\n try:\n client = None\n clients = [client for client in self.clients if client.gateway == gateway and node in client.ghosts]\n if len(clients) != 1:\n print(f'{len(clients)} other instances present')\n for c in clients:\n if node in c.nodes and c.task != task:\n print(f'{node} terminate')\n self.terminate_connections([c])\n elif node in c.ghosts and c.task == task:\n print(f'{node} select')\n if client is None or client.last_seen < c.last_seen:\n client = c\n else:\n client = clients[0]\n\n if not client is None: # and node in client['nodes']:\n d_instance = await self.sys_db.instances.find_one(\n {'instance_id': gateway}) # , {'registered_nodes', 1})\n if client.type == 'web' or len(\n [rn for rn in d_instance['registered_nodes'] if rn['name'] == node and rn['id'] == id]) != 0:\n if node in client.ghosts: client.ghosts.remove(node)\n client.nodes.append(node)\n await client.send((Sender + ' ack cp'))\n self.messages.put_nowait(('{}:{} get ports {}:ee'.format(gateway, node, gateway), None))\n if node not in self.execution_contexts[gateway].pvs and node != 'ee' and node != 'nwscript':\n self.execution_contexts[gateway].add_node(node, gateway)\n # self.messages.put_nowait(('{}:{} get ports {}:ee'.format(gateway, node, gateway), None))\n else:\n pass\n else:\n await client.send('{}:{} not_registered {}:ee'.format(gateway, node, gateway))\n\n else:\n print(\"shouldn't happen\")\n except ConnectionResetError:\n clients = [client for client in self.clients if task == client.task]\n self.terminate_connections(clients)\n except Exception as ex:\n print('checkid error', ex)\n # self.messages.put_nowait((Sender + ' error ' + str(ex).split() + ' cp', None))\n\n def is_auth(self, gateway, nodename, task):\n clients = [client for client in self.clients if task == client.task]\n if clients and clients[0].safe: return True\n if nodename == 'ee' or nodename == 'cp' or (\n gateway in self.execution_contexts and nodename in self.execution_contexts[gateway].apps): return True\n theyre = [cc for cc in self.clients if cc.gateway == gateway and nodename in cc.nodes]\n return len(theyre) != 0\n\n async def handle_subscriptions(self, msg):\n subscribers = [s for s in self.subscriptions if s['command'] == msg.command and s['target'] == msg.sender_full]\n for subscriber in subscribers:\n if subscriber['client'] is None or (subscriber['client'].type!='mqtt' and subscriber['client'].task._state == 'FINISHED'):\n self.subscriptions.remove(subscriber)\n elif msg.address_full != subscriber['subscriber']:\n raw = '{} {} {} {}'.format(subscriber['subscriber'], msg.command, ' '.join(p for p in msg.params), msg.sender_full)\n if not subscriber['client']:\n self.messages.put_nowait((raw, None))\n else:\n try:\n await subscriber['client'].send(raw)\n except:\n self.terminate_connections([subscriber['client']])\n self.subscriptions.remove(subscriber)\n\n async def handle_val(self, msg, task):\n try:\n varname = msg.sender\n for nn in [p for p in self.execution_contexts[msg.sender_instance].variables['nodes'] if p.name == varname]:\n try:\n val = json.loads(msg.params[1])\n except ValueError:\n bc = {'app': msg.address, 'module': self.execution_contexts[msg.sender_instance].themodule, 'variables': {}}\n val = await self.execution_contexts[msg.sender_instance].evaluate(msg.params[1], bc)\n if val != nn[msg.params[0]]:\n nn.set(msg.params[0], val)\n self.exec_engine.pending_signals.put_nowait([varname + '.' + msg.params[0], msg.sender_instance])\n if not nn.discovery_complete:\n nullports = [pp for pp in nn.ports if nn[pp] is None and pp!=msg.params[0]]\n if nullports == []:\n nn.discovery_complete = True\n self.execution_contexts[msg.sender_instance].anounce(nn.name, nn.gateway)\n self.messages.put_nowait(('{}:{} get type {}:ee'.format(msg.sender_instance, nn.name, msg.sender_instance), None))\n for p in nullports:\n response = '{}:{} get {} ee'.format(msg.sender_instance, nn.name, p)\n self.messages.put_nowait((response, None))\n break\n if varname == 'cp':\n self.execution_contexts[msg.sender_instance].variables[msg.params]['cp'].set(msg.params[0],json.loads(msg.params[1]))\n self.exec_engine.pending_signals.put_nowait([varname + '.' + msg.params[0], msg.sender_instance])\n if '@' in varname and '.' in varname:\n try:\n val = json.loads(msg.params[1])\n except:\n bc = {'app': msg.address, 'module': self.execution_contexts[msg.sender_instance].themodule, 'variables': {}}\n val = await self.execution_contexts[msg.sender_instance].evaluate(msg.params[1], bc)\n cs = [c for c in self.clients if c['task'] == task]\n signal = ('me.' + msg.params[0], varname, val, msg.sender, cs[0].session)\n self.exec_engine.pending_signals.put_nowait(signal)\n if varname == 'db':\n self.execution_contexts[msg.sender_instance].variables['_id'] = msg.params[1]\n except ValueError:\n print('pass 6')\n except Exception:\n print('pass 7')\n\n async def handle_ee(self, msg, task):\n gateway = msg.sender_instance\n if msg.command == 'val':\n await self.handle_val(msg, task)\n elif msg.command == 'type':\n nodename = msg.sender\n try:\n thenode = [p for p in self.execution_contexts[gateway].variables['nodes'] if p.name == nodename][0]\n thenode.settype(msg.params[0])\n except:\n pass\n elif msg.command == 'set':\n if msg.params[0] == 'scriptlet':\n # list = json.loads(Params[1])\n line = msg.params[1][1:-1]\n result = []\n try:\n if line == 'reset':\n s = 'cleared'\n self.execution_contexts[gateway].reset('')\n elif line == 'debug':\n s = None\n l_no = 0\n user =msg.sender\n for whendo in [wd for wd in self.when_dos if wd.instance==gateway and wd.app==self.execution_contexts[gateway].theapp[user]]:\n l_no+=1\n if whendo.errors != []:\n result.append('Rule {} -> {}'.format(l_no, json.dumps(whendo.errors)))\n whendo.errors = []\n else:\n user =msg.sender\n s = await self.execution_contexts[gateway].engine_process(line.splitlines(), user)\n if s != None: result.append(str(s))\n except Exception as ex:\n result.append(str(ex))\n self.messages.put_nowait(('{} val script {} {}:ee'.format(msg.sender_full, json.dumps(result), gateway), None))\n # print(result)\n elif msg.params[0] == 'script':\n result = []\n try:\n #self.execution_contexts[gateway].reset()\n user = msg.sender\n if user in self.execution_contexts[gateway].theapp:\n # Params[1] = Params[1].replace(\"'\", '\"')\n lines = msg.params[1][1:-1].splitlines()\n await self.execution_contexts[gateway].engine_process_file(lines, user)\n result.append('Running {}:{}'.format(self.execution_contexts[gateway].theapp[user], self.execution_contexts[gateway].themodule))\n except Exception as ex:\n result.append(str(ex))\n self.messages.put_nowait(('{} val script {} {}:ee'.format(msg.sender_full,json.dumps(result),gateway),None))\n elif msg.params[0].split('.')[0] in self.execution_contexts[gateway].variables[msg.address]['inputs'] or msg.params[0].split('[')[0] in self.execution_contexts[gateway].variables[Address]['inputs']:\n bc = {'app': msg.address,'module': self.execution_contexts[gateway].themodule, 'variables': {}}\n await self.execution_contexts[gateway].evaluate(msg.address + '.' + msg.params[0] + '=' + msg.params[1], bc)\n signal = [msg.params[0], gateway, msg.sender]\n if not task is None:\n cs = [c for c in self.clients if c.task==task]\n if cs[0].session is not None:\n signal.append(cs[0].session)\n self.exec_engine.pending_signals.put_nowait(signal)\n if msg.address != 'ee':\n self.messages.put_nowait(('{} val {} {} {}:{}'.format(msg.sender_full, msg.params[0], msg.params[1], gateway, msg.address_full), None))\n elif msg.command == 'nodes':\n gw = msg.sender_instance\n self.execution_contexts[gateway].nodes = [n.split(':')[1] for n in msg.params]\n self.execution_contexts[gateway].pvs = []\n self.execution_contexts[gateway].variables['nodes'] = []\n for node in self.execution_contexts[gateway].nodes:\n if node not in self.execution_contexts[gateway].pvs and node != 'ee' and node != 'nwscript':\n self.messages.put_nowait(('{}:cp subscribe {} val {}:ee'.format(gw,node,gateway),None))\n # self.messages.put_nowait(('{}:{} get ports {}:ee'.format(gw, node, gateway), None))\n self.execution_contexts[gateway].add_node(node, gateway)\n elif msg.command == 'ports':\n varname = msg.sender\n gw = msg.sender_instance\n if varname not in self.execution_contexts[gateway].pvs and varname != 'ee' and varname != 'nwscript':\n self.messages.put_nowait(('{}:cp subscribe {} val {}:ee'.format(gw, varname, gateway), None))\n self.execution_contexts[gateway].add_node(varname, gateway)\n nn = [p for p in self.execution_contexts[gateway].variables['nodes'] if p.name == varname][0]\n if len(msg.params) == 0:\n nn.discovery_complete = True\n self.execution_contexts[msg.sender_instance].anounce(nn.name, nn.gateway)\n else:\n nn.discovery_complete = False\n self.messages.put_nowait(('{} get {} {}:ee'.format(msg.sender_full, msg.params[0], gateway), None))\n for port in msg.params:\n nn.set(port, None)\n # self.messages.put_nowait(('{} get {} {}:ee'.format(Sender, port, gateway), None))\n elif msg.command == 'get':\n bc = {'app': msg.address,'module': self.execution_contexts[gateway].themodule, 'variables': {}}\n if msg.params[0] == 'ports':\n ports = [p for p in self.execution_contexts[gateway].variables[msg.address]['inputs']] # todo MUMT\n for p in self.execution_contexts[gateway].variables[msg.address]['outputs']:\n if p not in ports: ports.append(p)\n self.messages.put_nowait(('{} ports {} {}:{}'.format(msg.sender, ' '.join(p for p in ports), gateway, msg.address), None))\n elif self.execution_contexts[gateway].is_defined(msg.params[0], bc) and (\n msg.params[0] in self.execution_contexts[gateway].variables[msg.address]['inputs'] or\n msg.params[0] in self.execution_contexts[gateway].variables[msg.address]['outputs']):\n theval = self.execution_contexts[gateway].get_val(msg.params[0], bc) # todo MUMT\n self.messages.put_nowait(('{} val {} {} {}:{}'.format(msg.sender, msg.params[0],\n '\"' + theval + '\"' if isinstance(theval,str) else str(theval), gateway, msg.address), None))\n elif msg.params[0] == 'status':\n for whendo in [wd for wd in self.when_dos if wd.instance == gateway and wd.app == msg.address]:\n self.messages.put_nowait(('{} val status {} {}:ee'.format(msg.sender_full, json.dumps(whendo.errors), gateway), None))\n elif '.' in msg.params[0]:\n var = msg.params[0].split(':')[0]\n if self.execution_contexts[gateway].is_defined(var, bc) and (\n var in self.execution_contexts[gateway].variables[msg.address]['inputs'] or\n var in self.execution_contexts[gateway].variables[msg.address]['outputs']):\n theval = self.execution_contexts[gateway].get_val(msg.params[0], bc)\n self.messages.put_nowait(('{} val {} {} {}:{}'.format(msg.sender_full, var,'\"' + theval + '\"' if isinstance(theval,str) else str(theval), gateway, msg.address_full), None))\n\n async def handle_db(self, msg):\n gateway = msg.address_instance\n db = mongo_client[gateway]\n collection = db[msg.params[0]]\n if msg.command == 'set':\n if msg.params[1] == 'drop':\n await collection.drop()\n elif msg.params[1] == 'remove':\n query = json.loads(msg.params[2])\n if '_id' in query: query['_id'] = ObjectId(query['_id'])\n await collection.delete_many(query)\n elif msg.params[1] == 'index':\n keys = json.loads(msg.params[2])\n options = json.loads(msg.params[3]) if len(msg.params)>=4 else None\n if isinstance(keys, dict):\n keys = [(k, keys[k]) for k in keys]\n if options:\n await collection.create_index(keys, background=True, **options)\n else:\n await collection.create_index(keys, background=True)\n print('index')\n else:\n if len(msg.params) == 3:\n query = json.loads(msg.params[1])\n if '_id' in query: query['_id'] = ObjectId(query['_id'])\n if msg.params[2] == 'remove':\n await collection.delete_many(query)\n elif msg.params[2] == 'removeindex':\n await collection.drop_index(query)\n elif msg.params[2] == 'index':\n keys = query\n if isinstance(keys, dict):\n keys = [(k, keys[k]) for k in keys]\n await collection.create_index(keys, background=True)\n else:\n doc = json.loads(msg.params[2])\n if isinstance(doc, list) or '$set' in doc:\n id = await collection.update_many(query, doc) # update_many\n else:\n id = await collection.replace_one(query, doc) # replace one\n response = msg.sender + ' val ' + msg.params[0] + '_id \\\"' + str(id.modified_count) + \"\\\" db\"\n elif len(msg.params) == 4:\n if msg.params[3] == 'index':\n keys = json.loads(msg.params[1])\n options = json.loads(msg.params[2])\n if isinstance(keys, dict):\n keys = [(k, keys[k]) for k in keys]\n await collection.create_index(keys, background=True, **options)\n else:\n docs = json.loads(msg.params[1])\n if type(docs) is dict:\n docs = [docs]\n for doc in docs:\n if '_id' in doc:\n doc['_id'] = ObjectId(doc['_id'])\n await collection.replace_one({'_id': doc['_id']}, doc)\n id = doc['_id']\n else:\n result = (await collection.insert_one(doc))\n id = result.inserted_id\n response = msg.sender_full + ' val ' + msg.params[0] + '_id \\\"' + str(id) + \"\\\" db\"\n elif msg.command == 'get':\n if msg.params[0] == 'ports':\n collections = await db.collection_names(include_system_collections=False)\n response = msg.sender_full + ' ports ' + ' '.join(c for c in collections) + ' db'\n else:\n query = json.loads(msg.params[1])\n if type(query) is list:\n pipeline = query if type(query) is list else json.loads(msg.params[2])\n # if '_id' in query: query['_id'] = ObjectId(query['_id'])\n results = await collection.aggregate(pipeline).to_list(None)\n rs = []\n try:\n for result in results:\n if '_id' in result: result['_id'] = str(result['_id'])\n rs.append(result)\n response = msg.sender_full + ' val ' + msg.params[0] + ' ' + json.dumps(rs) + ' db'\n except Exception as ex:\n print('pass 4')\n else:\n if '_id' in query: query['_id'] = ObjectId(query['_id'])\n try:\n if len(msg.params) >= 4:\n options = json.loads(msg.params[3])\n sort = options['$sort'] if '$sort' in options else {}\n limit = options['$limit'] if '$limit' in options else {}\n skip = options['$skip'] if '$skip' in options else 0\n if sort!={} and type(sort) == dict:\n sort = list(sort.items())\n if sort and limit:\n results = await collection.find(query, json.loads(msg.params[2])).skip(skip).sort(sort).limit(limit).to_list(None)\n elif sort:\n results = await collection.find(query, json.loads(msg.params[2])).skip(skip).sort(sort).to_list(None)\n elif limit:\n results = await collection.find(query, json.loads(msg.params[2])).skip(skip).limit(limit).to_list(None)\n else:\n results = await collection.find(query, json.loads(msg.params[2])).skip(skip).to_list(None)\n elif len(msg.params)>=3:\n results = await collection.find(query, json.loads(msg.params[2])).to_list(None)\n else:\n results = await collection.find(query).to_list(None)\n rs = []\n for result in results:\n if '_id' in result: result['_id'] = str(result['_id'])\n rs.append(result)\n response = msg.sender_full + ' val ' + msg.params[0] + ' ' + json.dumps(rs) + ' db'\n except Exception as ex:\n print('pass 5')\n self.messages.put_nowait((response, None))\n\n def app_permission(self, app, context):\n if 'permission' in context.variables[app]:\n return {'name': app, 'access_permission': context.variables[app]['permission']}\n else:\n return {'name': app, 'access_permission': [2, 2, 0]}\n\n async def access_allowed(self, user, node, command):\n gu, u = user.split(':')\n g, n = node.split(':')\n\n if u in ['cp', 'ee', 'remote'] and gu == g: return True\n\n n_gateway = await self.sys_db.instances.find_one({'instance_id': g})\n d_user = await self.sys_db.users.find_one({'email': u})\n # u_gateway = await self.sys_db.instances.find_one({'instance_id': gu})\n if d_user is None:\n try:\n client = [client for client in self.clients if client.gateway == gu and u in client.nodes]\n if client != []:\n d_user = client[0].user\n elif u in self.execution_contexts[g].apps:\n email = self.execution_contexts[g].owners[u]\n d_user = await self.sys_db.users.find_one({'email': email})\n else:\n raise Exception('Node or User \"{}\" does not exist'.format(u))\n except Exception as ex:\n raise Exception('Node or User \"{}\" does not exist'.format(u))\n\n if n_gateway['owner'] == d_user['_id']: # super user\n return True\n\n i_user = [u1 for u1 in n_gateway['users'] if u1['user_instance_and_node_name'] == user]\n if i_user == []:\n userclass = 2 # 'others, unregistered users\n else:\n i_user = i_user[0]\n if i_user['admin']:\n userclass = 0 # 'admin'\n else:\n userclass = 1 # 'user'\n\n if n in self.execution_contexts[g].apps:\n i_nodes = [self.app_permission(n, self.execution_contexts[g])]\n else:\n i_nodes = [n1 for n1 in (n_gateway['registered_nodes'] +\n [\n {'name': 'cp', 'access_permission': [2, 2, 0]},\n {'name': 'ee', 'access_permission': [2, 1, 0]},\n {'name': 'db', 'access_permission': [2, 0, 0]}\n ]) if n1['name'] == n]\n if len(i_nodes) != 0:\n i_node = i_nodes[0]\n node_permission = i_node['access_permission'][userclass]\n if command == 'set' and node_permission == 2:\n return True\n elif command != 'set' and node_permission >= 1:\n return True\n return False\n \n async def handle(self, message: Message, task):\n if message.command == 'ThisIs':\n try:\n clients = [client for client in self.clients if task == client.task]\n client = clients[0]\n node = message.sender\n '''\n nodes ghosts\n --------------------------\n in in 1 shouldn't happen\n in not in 2 remove node from clients\n not in in 3 replace ghost if new tcp connection\n not in not in 4 handle\n '''\n if node not in client.ghosts and node not in client.nodes: # case 4\n # add to ghosts\n client.ghosts.append(node)\n if client.type == 'web':\n await self.check_id(node, message.sender_instance, message.sender, message.params[0], task)\n await client.send(message.sender + ' ack cp')\n else:\n if message.params != []:\n await self.check_id(node, message.sender_instance, message.sender, message.params[0], task)\n else:\n await client.send(message.sender + ' ack cp')\n await client.send(message.sender + ' get id cp')\n elif node in client.nodes: # and len(clients)==1: # case 2\n # pass\n if time.time() - client.last_seen > 20:\n client.nodes.remove(node) # = [n for n in client['nodes'] if n != node]\n else: # case 3\n if client.type == 'tcp':\n # terminate previous connections\n other_ghosts = [client for client in self.clients if\n node in client.ghosts and client.task != task]\n if len(other_ghosts) > 1:\n self.terminate_connections(other_ghosts)\n if message.params != []:\n await self.check_id(node, message.sender_instance, message.sender, message.params[0], task)\n else:\n await client.send(message.sender + ' ack cp')\n await client.send(message.sender + ' get id cp')\n elif client.type == 'web':\n await client.send(message.sender + ' ack cp')\n except ConnectionResetError:\n clients = [client for client in self.clients if task == client.task]\n self.terminate_connections(clients)\n except Exception as ex:\n print(f'pass 2: {ex}, node is {message.sender}')\n elif message.command == 'id':\n node = message.sender\n await self.check_id(node, message.sender_instance, message.sender, message.params[0], task)\n elif message.command == 'keepalive':\n client = [client for client in self.clients if task == client.task][0]\n if client:\n try:\n await client.send(message.sender + ' ack cp')\n varname = message.sender\n if varname not in self.execution_contexts[message.sender_instance].pvs and varname not in ['ee','nwscript','cp','db']:\n self.terminate_connections([client])\n except ConnectionResetError:\n self.terminate_connections([client])\n else:\n print(f'CLIENT NOT CONNECTED => {message}')\n elif self.is_auth(message.sender_instance, message.sender, task):\n if message.command == 'get':\n if message.params[0] == 'nodes':\n nodeses = [[n for n in client.nodes] for client in self.clients if\n client.gateway == message.sender_instance and (\n client.type == 'tcp' or client.type == 'mqtt')]\n nodes_web = [[n for n in client.nodes[1:]] for client in self.clients if\n client.gateway == message.sender_instance and client.type == 'web']\n nodes = self.execution_contexts[\n message.sender_instance].apps if message.sender_instance in self.execution_contexts else []\n for nodegroup in nodeses: nodes = nodes + nodegroup\n for nodegroup in nodes_web: nodes = nodes + nodegroup\n response = '{} nodes {} {}:cp'.format(message.sender, ' '.join(nodes), message.sender_instance)\n self.messages.put_nowait((response, None))\n elif message.params[0] == 'ghosts':\n nodeses = [[n for n in client.ghosts] for client in self.clients if\n client.gateway == message.sender_instance]\n nodes = []\n for nodegroup in nodeses: nodes = nodes + nodegroup\n response = '{} ghosts {} {}:cp'.format(message.sender, ' '.join(nodes), message.sender_instance)\n self.messages.put_nowait((response, None))\n elif message.params[0] == 'gateways':\n def fdate(t):\n d = datetime(1970, 1, 1) + timedelta(seconds=t)\n return d.strftime('%X %x')\n\n gw = [{'gateway': c.gateway, 'lastseen': fdate(c['last_seen']), 'nodes': c.nodes} for c in\n self.clients]\n response = f'{message.sender} val gateways {json.dumps(gw)} cp'\n self.messages.put_nowait((response, None))\n elif message.params[0] == 'users':\n users = [' '.join(u) for u in [c.nodes for c in self.clients if\n c['type'] == 'web' and c.gateway == message.sender_instance]]\n response = f'{message.sender} val users {json.dumps(users)} cp'\n self.messages.put_nowait((response, None))\n elif message.params[0] == 'connections':\n response = f'{message.sender} val connections {len(self.clients)} cp'\n self.messages.put_nowait((response, None))\n elif message.params[0] == 'mem':\n m = sizeof.deep_getsizeof(self.execution_contexts[message.sender_instance].variables, set())\n cc = [c for c in self.clients if c.gateway == message.sender_instance]\n n = sizeof.deep_getsizeof(cc[0], set())\n print(m, n)\n response = f'{message.sender} val mem ' + '{\"clients:\":' + str(n) + ', \"context\":' + str(\n m) + '} cp'\n self.messages.put_nowait((response, None))\n elif message.command == 'set':\n if message.params[0] == 'id': # cp set id node_name id_code new_name sender\n clients = [c for c in self.clients if message.params[1] in c.ghosts and c.gateway == message.sender_instance]\n if clients:\n client = clients[0]\n client.ghosts.remove(message.params[1])\n try:\n await client.send('{} set id {} cp'.format(message.params[1], message.params[2]))\n await client.send('{} set name {} cp'.format(message.params[1], message.params[3]))\n except:\n self.terminate_connections(clients)\n elif message.params[0] == 'reset': # reset all conections from this instance\n clients = [c for c in self.clients if c.gateway == message.sender_instance]\n self.terminate_connections(clients)\n elif message.command == 'register':\n try:\n # cp register node id pwd=password user\n d_instance = await self.sys_db.instances.find_one({'instance_id': message.sender_instance})\n i_user = \\\n [u for u in d_instance['users'] if u['user_instance_and_node_name'] == message.sender_full][0]\n if i_user['admin']: # d_user['password'] == Params[2].split('=')[1] and i_user['admin']:\n node = {'name': message.params[0], 'id': message.params[1], 'access_permission': [2, 2, 1]}\n if not node in [n['name'] for n in d_instance['registered_nodes']]:\n d_instance['registered_nodes'].append(node)\n await self.sys_db.instances.replace_one({'_id': d_instance['_id']}, d_instance)\n except Exception as ex:\n print('pass 3')\n elif message.command == 'getnode':\n nodename = message.params[0].split(':')[1] if ':' in message.params[0] else message.params[0]\n nodeinstance = message.params[0].split(':')[0] if ':' in message.params[0] else message.sender_instance\n if await self.access_allowed(message.sender_full, nodeinstance+':'+nodename, 'get'):\n nodes = [n for n in self.execution_contexts[nodeinstance].variables['nodes'] if n['name'] == nodename]\n if len(nodes) != 0:\n dnode = nodes[0]\n nodename = dnode.name\n nodebody = dnode.json()\n if nodes[0].type != None:\n response = '{} node {} {} {} {} cp'.format(message.sender_full, nodebody, nodename, nodes[0].gateway, nodes[0].type)\n else:\n response = '{} node {} {} {} cp'.format(message.sender_full, nodebody, nodename, nodes[0].gateway)\n self.messages.put_nowait((response, None))\n else:\n nodes = [n for n in self.execution_contexts[nodeinstance].apps if n == nodename]\n if len(nodes) != 0:\n ports = [p for p in\n self.execution_contexts[nodeinstance].variables[nodename]['inputs']]\n for p in self.execution_contexts[nodeinstance].variables[nodename]['outputs']:\n if p not in ports: ports.append(p)\n bc = {'app': nodename,'module': self.execution_contexts[nodeinstance].themodule, 'variables': {}}\n content = {}\n for port in ports:\n try:\n content[port] = self.execution_contexts[nodeinstance].get_val(port, bc)\n except:\n content[port] = None\n response = '{} node {} {} {} {} cp'.format(message.sender_full, json.dumps(content), nodename, nodeinstance, nodename)\n self.messages.put_nowait((response, None))\n else:\n #node not online\n self.execution_contexts[nodeinstance].nodewaiters.append({'nodename': nodename, 'waiter': message.sender_full})\n\n elif message.command == 'erase':\n # cp erase node user\n d_instance = await self.sys_db.instances.find_one({'instance_id': message.sender_instance})\n i_user = [u for u in d_instance['users'] if u['user_instance_and_node_name'] == message.sender][0]\n if i_user['admin']:\n client = \\\n [c for c in self.clients if message.params[0] in c.ghosts and c.gateway == message.sender_instance][\n 0]\n if client['type'] == 'tcp':\n client.send('{} set reset cp'.format(message.params[0]))\n else:\n await client.send('{} set reset cp'.format(message.params[0]))\n\n elif message.command == 'subscribe':\n # cp subscribe node cmd sender\n if message.sender != message.params[0]:\n clients = [client for client in self.clients if task == client.task]\n client = clients[0] if len(clients) != 0 else None\n target = message.params[0] if ':' in message.params[0] else message.sender_instance + ':' + message.params[0]\n if len([s for s in self.subscriptions if s['target'] == target and s['command'] == message.params[1] and s['subscriber'] == message.sender_full and s['client'].task==client.task]) == 0:\n self.subscriptions.append({'target': target, 'command': message.params[1], 'subscriber': message.sender_full, 'client': client})\n else:\n client = [c for c in self.clients if message.sender in c.ghosts and c.gateway == message.sender_instance]\n if client:\n await client[0].send(message.sender + ' auth_error cp')\n\n async def msg_sender(self, dqueue):\n while True:\n raw_msg, client = await dqueue.get()\n try:\n await client.send(raw_msg)\n except Exception as ex:\n print('message sender error', ex)\n self.terminate_connections([client])\n\n async def process(self):\n while True:\n #try:\n raw, sender = await self.messages.get()\n # print(f'received:{raw}<<')\n message = Message(raw)\n if message.command == 'error':\n continue\n asyncio.Task(self.handle_subscriptions(message))\n if message.address == 'cp':\n try:\n await self.handle(message, sender)\n except Exception as ex:\n print('error while handling message', message, ex)\n elif (message.command in ['get', 'set'] and (await self.access_allowed(message.sender_full, message.address_full, message.command)))\\\n or (not message.command in ['get', 'set'] and await self.access_allowed(message.address_full, message.sender_full, message.command)):\n if message.address == 'db':\n await self.handle_db(message)\n elif message.address == 'ee' or message.address in self.execution_contexts[message.address_instance].apps:\n try:\n await self.handle_ee(message, sender)\n except Exception as ex:\n print('error in ee while handling message', message, ex)\n else:\n if 'session' in message.named_params:\n clients = [c for c in self.clients if\n message.address in c.nodes and c.instance == message.address_instance and c.session ==\n message.named_params['session']]\n else:\n clients = [c for c in self.clients if\n message.address in c.nodes and c.gateway == message.address_instance] + \\\n [c for c in self.clients if message.address_full in c.nodes and c.safe]\n for client in clients:\n client.send_queue.put_nowait((raw, client))\n if message.command == 'val' and message.sender!='ee':\n await self.handle_val(message, sender)\n self.messages.task_done()\n #except Exception as ex:\n # print('pass unknowns', ex)\n\n async def run_async(self):\n await asyncio.gather(\n asyncio.ensure_future(self.socket.start()),\n asyncio.ensure_future(self.pipe.start()),\n asyncio.ensure_future(self.web.start()),\n asyncio.ensure_future(self.mqtt.start()),\n asyncio.ensure_future(self.process()),\n asyncio.ensure_future(self.scavenger())\n )\n\n def run(self):\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(self.run_async())\n loop.run_forever()\n except KeyboardInterrupt:\n loop.run_until_complete(loop.shutdown_asyncgens())\n\n\nif __name__ == \"__main__\":\n the_cp = CommandProcessor()\n the_cp.run()\n","sub_path":"cp/cp.py","file_name":"cp.py","file_ext":"py","file_size_in_byte":45650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"184085246","text":"\"\"\"NVDM Tensorflow implementation by Yishu Miao, adapted to work with the Dirichlet distribution by Sophie Burkhardt\"\"\"\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\nimport math\nimport os\nimport utils as utils\nimport sys\nimport argparse\nimport pickle\n\nnp.random.seed(0)\ntf.set_random_seed(0)\n\nflags = tf.app.flags\nflags.DEFINE_integer('batch_size', 200, 'Batch size.')\nflags.DEFINE_integer('n_hidden', 100, 'Size of each hidden layer.')\nflags.DEFINE_boolean('test', True, 'Process test data.')\nflags.DEFINE_string('non_linearity', 'relu', 'Non-linearity of the MLP.')\nflags.DEFINE_string('summaries_dir','summaries','where to save the summaries')\nFLAGS = flags.FLAGS\n\nclass NVDM(object):\n \"\"\" Neural Variational Document Model -- BOW VAE.\n \"\"\"\n def __init__(self, \n vocab_size,\n n_hidden,\n n_topic,\n learning_rate, \n batch_size,\n non_linearity,\n adam_beta1,\n adam_beta2,\n dir_prior):\n tf.reset_default_graph()\n self.vocab_size = vocab_size\n self.n_hidden = n_hidden\n self.n_topic = n_topic\n self.n_sample = 1#n_sample\n self.non_linearity = non_linearity\n self.learning_rate = learning_rate\n self.batch_size = batch_size\n\n lda=False\n self.x = tf.placeholder(tf.float32, [None, vocab_size], name='input')\n self.mask = tf.placeholder(tf.float32, [None], name='mask') # mask paddings\n self.warm_up = tf.placeholder(tf.float32, (), name='warm_up') # warm up\n self.adam_beta1=adam_beta1\n self.adam_beta2=adam_beta2\n self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n self.min_alpha = tf.placeholder(tf.float32,(), name='min_alpha')\n # encoder\n with tf.variable_scope('encoder'): \n self.enc_vec = utils.mlp(self.x, [self.n_hidden], self.non_linearity)\n self.enc_vec = tf.nn.dropout(self.enc_vec,self.keep_prob)\n self.mean = tf.contrib.layers.batch_norm(utils.linear(self.enc_vec, self.n_topic, scope='mean'))\n self.alpha = tf.maximum(self.min_alpha,tf.log(1.+tf.exp(self.mean)))\n #Dirichlet prior alpha0\n self.prior = tf.ones((batch_size,self.n_topic), dtype=tf.float32, name='prior')*dir_prior\n \n \n self.analytical_kld = tf.lgamma(tf.reduce_sum(self.alpha,axis=1))-tf.lgamma(tf.reduce_sum(self.prior,axis=1))\n self.analytical_kld-=tf.reduce_sum(tf.lgamma(self.alpha),axis=1)\n self.analytical_kld+=tf.reduce_sum(tf.lgamma(self.prior),axis=1)\n minus = self.alpha-self.prior\n test = tf.reduce_sum(tf.multiply(minus,tf.digamma(self.alpha)-tf.reshape(tf.digamma(tf.reduce_sum(self.alpha,1)),(batch_size,1))),1)\n self.analytical_kld+=test\n self.analytical_kld = self.mask*self.analytical_kld # mask paddings\n max_kld = tf.argmax(self.analytical_kld,0)\n\n with tf.variable_scope('decoder'):\n if self.n_sample ==1: # single sample\n u = tf.random_uniform((batch_size,self.n_topic))\n with tf.variable_scope('prob'):\n #CDF transform\n self.doc_vec = tf.pow(u*self.alpha*tf.exp(tf.lgamma(self.alpha)),1./self.alpha)\n #normalize\n self.doc_vec = tf.div(self.doc_vec,tf.reshape(tf.reduce_sum(self.doc_vec,1), (-1, 1)))\n self.doc_vec.set_shape(self.alpha.get_shape())\n #reconstruction\n if lda:\n logits = tf.log(tf.clip_by_value(utils.linear_LDA(self.doc_vec, self.vocab_size, scope='projection',no_bias=True),1e-10,1.0))\n else:\n logits = tf.nn.log_softmax(tf.contrib.layers.batch_norm(utils.linear(self.doc_vec, self.vocab_size, scope='projection',no_bias=True)))\n self.recons_loss = -tf.reduce_sum(tf.multiply(logits, self.x), 1)\n \n dir1=tf.contrib.distributions.Dirichlet(self.prior)\n dir2=tf.contrib.distributions.Dirichlet(self.alpha)\n self.kld = dir2.log_prob(self.doc_vec)-dir1.log_prob(self.doc_vec)\n max_kld_sampled = tf.arg_max(self.kld,0)\n # multiple samples\n #not implemented\n \n self.objective = self.recons_loss + self.warm_up*self.analytical_kld\n self.true_objective = self.recons_loss + self.kld\n \n self.analytical_objective = self.recons_loss+self.analytical_kld\n \n fullvars = tf.trainable_variables()\n\n enc_vars = utils.variable_parser(fullvars, 'encoder')\n dec_vars = utils.variable_parser(fullvars, 'decoder')\n \n #this is the standard gradient for the reconstruction network\n dec_grads = tf.gradients(self.objective, dec_vars)\n \n \n #####################################################\n #Now calculate the gradient for the encoding network#\n #####################################################\n \n \n kl_grad = tf.gradients(self.analytical_kld,enc_vars)\n \n g_rep = tf.gradients(self.recons_loss,enc_vars)\n \n enc_grads = [g_r+self.warm_up*g_e for g_r,g_e in zip(g_rep,kl_grad)]\n \n \n \n optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate,beta1=self.adam_beta1,beta2=self.adam_beta2)#,beta1=0.99\n self.optim_enc = optimizer.apply_gradients(zip(enc_grads, enc_vars))\n self.optim_dec = optimizer.apply_gradients(zip(dec_grads, dec_vars))\n self.optim_all = optimizer.apply_gradients(list(zip(enc_grads, enc_vars))+list(zip(dec_grads, dec_vars)))\n \n\n\ndef train(sess, model, \n train_url, \n test_url, \n batch_size, \n vocab_size,\n alternate_epochs=1,#10\n lexicon=[],\n result_file='test.txt',\n B=1,\n warm_up_period=100):\n \"\"\"train nvdm model.\"\"\"\n train_set, train_count = utils.data_set(train_url)\n test_set, test_count = utils.data_set(test_url)\n # hold-out development dataset\n train_size=len(train_set)\n validation_size=int(train_size*0.1)\n dev_set = train_set[:validation_size]\n dev_count = train_count[:validation_size]\n train_set = train_set[validation_size:]\n train_count = train_count[validation_size:]\n #print('sizes',train_size,validation_size,len(dev_set),len(train_set))\n optimize_jointly = True\n dev_batches = utils.create_batches(len(dev_set), batch_size, shuffle=False)\n test_batches = utils.create_batches(len(test_set), batch_size, shuffle=False)\n warm_up = 0\n min_alpha = 0.00001#\n curr_B=B\n\n best_print_ana_ppx=1e10\n early_stopping_iters=30\n no_improvement_iters=0\n stopped=False\n epoch=-1\n #for epoch in range(training_epochs):\n while not stopped:\n epoch+=1\n train_batches = utils.create_batches(len(train_set), batch_size, shuffle=True)\n if warm_up<1.:\n warm_up += 1./warm_up_period\n else:\n warm_up=1.\n \n print('B',curr_B)\n #-------------------------------\n # train\n #for switch in range(0, 2):\n if optimize_jointly:\n optim = model.optim_all\n print_mode = 'updating encoder and decoder'\n elif switch == 0:\n optim = model.optim_dec\n print_mode = 'updating decoder'\n else:\n optim = model.optim_enc\n print_mode = 'updating encoder'\n for i in range(alternate_epochs):\n loss_sum = 0.0\n ana_loss_sum = 0.0\n ppx_sum = 0.0\n kld_sum_train = 0.0\n ana_kld_sum_train = 0.0\n word_count = 0\n doc_count = 0\n recon_sum=0.0\n for idx_batch in train_batches:\n data_batch, count_batch, mask = utils.fetch_data(\n train_set, train_count, idx_batch, vocab_size)\n input_feed = {model.x.name: data_batch, model.mask.name: mask,model.keep_prob.name: 0.75,model.warm_up.name: warm_up,model.min_alpha.name:min_alpha}\n _, (loss,recon, kld_train,ana_loss,ana_kld_train) = sess.run((optim, \n [model.true_objective, model.recons_loss, model.kld,model.analytical_objective,model.analytical_kld]),\n input_feed)\n loss_sum += np.sum(loss)\n ana_loss_sum += np.sum(ana_loss)\n kld_sum_train += np.sum(kld_train) / np.sum(mask) \n ana_kld_sum_train += np.sum(ana_kld_train) / np.sum(mask)\n word_count += np.sum(count_batch)\n # to avoid nan error\n count_batch = np.add(count_batch, 1e-12)\n # per document loss\n ppx_sum += np.sum(np.divide(loss, count_batch)) \n doc_count += np.sum(mask)\n recon_sum+=np.sum(recon)\n print_loss = recon_sum/len(train_batches)\n dec_vars = utils.variable_parser(tf.trainable_variables(), 'decoder')\n phi = dec_vars[0]\n phi = sess.run(phi)\n utils.print_top_words(phi, lexicon,result_file=None)\n print_ppx = np.exp(loss_sum / word_count)\n print_ana_ppx = np.exp(ana_loss_sum / word_count)\n print_ppx_perdoc = np.exp(ppx_sum / doc_count)\n print_kld_train = kld_sum_train/len(train_batches)\n print_ana_kld_train = ana_kld_sum_train/len(train_batches)\n print('| Epoch train: {:d} |'.format(epoch+1), \n print_mode, '{:d}'.format(i),\n '| Corpus ppx: {:.5f}'.format(print_ppx), # perplexity for all docs\n '| Per doc ppx: {:.5f}'.format(print_ppx_perdoc), # perplexity for per doc\n '| KLD: {:.5}'.format(print_kld_train),\n '| Loss: {:.5}'.format(print_loss),\n '| ppx anal.: {:.5f}'.format(print_ana_ppx),\n '|KLD anal.: {:.5f}'.format(print_ana_kld_train))\n \n \n #-------------------------------\n # dev\n loss_sum = 0.0\n kld_sum_dev = 0.0\n ppx_sum = 0.0\n word_count = 0\n doc_count = 0\n recon_sum=0.0\n print_ana_ppx = 0.0\n ana_loss_sum = 0.0\n for idx_batch in dev_batches:\n data_batch, count_batch, mask = utils.fetch_data(\n dev_set, dev_count, idx_batch, vocab_size)\n input_feed = {model.x.name: data_batch, model.mask.name: mask,model.keep_prob.name: 1.0,model.warm_up.name: 1.0,model.min_alpha.name:min_alpha}\n loss,recon, kld_dev,ana_kld,ana_loss = sess.run([model.objective, model.recons_loss,model.kld, model.analytical_kld,model.analytical_objective],\n input_feed)\n loss_sum += np.sum(loss)\n ana_loss_sum += np.sum(ana_loss)\n kld_sum_dev += np.sum(kld_dev) / np.sum(mask) \n word_count += np.sum(count_batch)\n count_batch = np.add(count_batch, 1e-12)\n ppx_sum += np.sum(np.divide(loss, count_batch))\n doc_count += np.sum(mask) \n recon_sum+=np.sum(recon)\n print_ana_ppx = np.exp(ana_loss_sum / word_count)\n print_ppx = np.exp(loss_sum / word_count)\n print_ppx_perdoc = np.exp(ppx_sum / doc_count)\n print_kld_dev = kld_sum_dev/len(dev_batches)\n print_loss = recon_sum/len(dev_batches)\n if print_ppx save improved model\n \n tf.train.Saver().save(sess, 'models/improved_model') \n \n else:\n no_improvement_iters+=1\n print('no_improvement_iters',no_improvement_iters,'best ppx',best_print_ana_ppx)\n if no_improvement_iters>=early_stopping_iters:\n #if model has not improved for 30 iterations, stop training\n ###########STOP TRAINING############\n stopped=True\n print('stop training after',epoch,'iterations,no_improvement_iters',no_improvement_iters)\n ###########LOAD BEST MODEL##########\n print('load stored model')\n tf.train.Saver().restore(sess,'models/improved_model')\n print('| Epoch dev: {:d} |'.format(epoch+1), \n '| Perplexity: {:.9f}'.format(print_ppx),\n '| Per doc ppx: {:.5f}'.format(print_ppx_perdoc),\n '| KLD: {:.5}'.format(print_kld_dev) ,\n '| Loss: {:.5}'.format(print_loss)) \n\n #-------------------------------\n # test\n if FLAGS.test:\n \n loss_sum = 0.0\n kld_sum_test = 0.0\n ppx_sum = 0.0\n word_count = 0\n doc_count = 0\n recon_sum = 0.0\n ana_loss_sum = 0.0\n ana_kld_sum_test = 0.0\n for idx_batch in test_batches:\n data_batch, count_batch, mask = utils.fetch_data(\n test_set, test_count, idx_batch, vocab_size)\n input_feed = {model.x.name: data_batch, model.mask.name: mask,model.keep_prob.name: 1.0,model.warm_up.name: 1.0,model.min_alpha.name:min_alpha}\n loss, recon,kld_test,ana_loss,ana_kld_test = sess.run([model.objective, model.recons_loss,model.kld,model.analytical_objective,model.analytical_kld],\n input_feed)\n loss_sum += np.sum(loss)\n kld_sum_test += np.sum(kld_test)/np.sum(mask) \n ana_loss_sum += np.sum(ana_loss)\n ana_kld_sum_test += np.sum(ana_kld_test) / np.sum(mask)\n word_count += np.sum(count_batch)\n count_batch = np.add(count_batch, 1e-12)\n ppx_sum += np.sum(np.divide(loss, count_batch))\n doc_count += np.sum(mask) \n recon_sum+=np.sum(recon)\n print_loss = recon_sum/len(test_batches)\n print_ppx = np.exp(loss_sum / word_count)\n print_ppx_perdoc = np.exp(ppx_sum / doc_count)\n print_kld_test = kld_sum_test/len(test_batches)\n print_ana_ppx = np.exp(ana_loss_sum / word_count)\n print_ana_kld_test = ana_kld_sum_test/len(train_batches)\n print('| Epoch test: {:d} |'.format(epoch+1), \n '| Perplexity: {:.9f}'.format(print_ppx),\n '| Per doc ppx: {:.5f}'.format(print_ppx_perdoc),\n '| KLD: {:.5}'.format(print_kld_test),\n '| Loss: {:.5}'.format(print_loss),\n '| ppx anal.: {:.5f}'.format(print_ana_ppx),\n '|KLD anal.: {:.5f}'.format(print_ana_kld_test)) \n if stopped:#epoch==training_epochs-1:\n #only do it once in the end\n print('calculate topic coherence (might take a few minutes)')\n coherence=utils.topic_coherence(test_set,phi, lexicon)\n print('topic coherence',str(coherence))\n \n \ndef myrelu(features):\n return tf.maximum(features, 0.0)\n\ndef parseArgs():\n #get line from config file\n args = sys.argv\n linum = int(args[1])\n argstring=''\n configname = 'tfconfig'\n with open(configname,'r') as rf:\n for i,line in enumerate(rf):\n #print i,line\n argstring = line\n if i+1==linum:\n print(line)\n break\n argparser = argparse.ArgumentParser()\n #define arguments\n argparser.add_argument('--adam_beta1',default=0.9, type=float)\n argparser.add_argument('--adam_beta2',default=0.999, type=float)\n argparser.add_argument('--learning_rate',default=1e-3, type=float)\n argparser.add_argument('--dir_prior',default=0.1, type=float)\n argparser.add_argument('--n_topic',default=50, type=int)\n argparser.add_argument('--n_sample',default=1, type=int)\n argparser.add_argument('--warm_up_period',default=100, type=int)\n argparser.add_argument('--data_dir',default='data/20news', type=str)\n return argparser.parse_args(argstring.split())\n\ndef main(argv=None):\n if FLAGS.non_linearity == 'tanh':\n non_linearity = tf.nn.tanh\n elif FLAGS.non_linearity == 'sigmoid':\n non_linearity = tf.nn.sigmoid\n else:\n non_linearity = myrelu#max(features, 1.1)#tf.nn.relu\n \n args = parseArgs()\n adam_beta1 = args.adam_beta1\n adam_beta2 = args.adam_beta2\n learning_rate = args.learning_rate\n dir_prior = args.dir_prior\n warm_up_period = args.warm_up_period\n n_sample = args.n_sample\n n_topic = args.n_topic\n lexicon=[]\n vocab_path = os.path.join(args.data_dir, 'vocab.new')\n with open(vocab_path,'r') as rf:\n for line in rf:\n word = line.split()[0]\n lexicon.append(word)\n vocab_size=len(lexicon)\n \n nvdm = NVDM(vocab_size=vocab_size,\n n_hidden=FLAGS.n_hidden,\n n_topic=n_topic, \n learning_rate=learning_rate, \n batch_size=FLAGS.batch_size,\n non_linearity=non_linearity,\n adam_beta1=adam_beta1,\n adam_beta2=adam_beta2,\n dir_prior=dir_prior)\n sess = tf.Session()\n init = tf.global_variables_initializer()\n result = sess.run(init)\n train_url = os.path.join(args.data_dir, 'train.feat')\n test_url = os.path.join(args.data_dir, 'test.feat')\n \n train(sess, nvdm, train_url, test_url, FLAGS.batch_size,vocab_size,lexicon=lexicon,\n result_file=None,\n warm_up_period = warm_up_period)\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"nvdm_dirichlet_invCDF.py","file_name":"nvdm_dirichlet_invCDF.py","file_ext":"py","file_size_in_byte":16752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"574089679","text":"#!/usr/bin/env python\n# Copyright 2013, Big Switch Networks, Inc.\n#\n# LoxiGen is licensed under the Eclipse Public License, version 1.0 (EPL), with\n# the following special exception:\n#\n# LOXI Exception\n#\n# As a special exception to the terms of the EPL, you may distribute libraries\n# generated by LoxiGen (LoxiGen Libraries) under the terms of your choice, provided\n# that copyright and licensing notices generated by LoxiGen are not altered or removed\n# from the LoxiGen Libraries and the notice provided below is (i) included in\n# the LoxiGen Libraries, if distributed in source code form and (ii) included in any\n# documentation for the LoxiGen Libraries, if distributed in binary form.\n#\n# Notice: \"Copyright 2013, Big Switch Networks, Inc. This library was generated by the LoxiGen Compiler.\"\n#\n# You may not use this file except in compliance with the EPL or LOXI Exception. You may obtain\n# a copy of the EPL at:\n#\n# http://www.eclipse.org/legal/epl-v10.html\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# EPL for the specific language governing permissions and limitations\n# under the EPL.\nimport unittest\nfrom testutil import add_datafiles_tests\n\ntry:\n import loxi\n import loxi.of12 as ofp\n from loxi.generic_util import OFReader\nexcept ImportError:\n exit(\"loxi package not found. Try setting PYTHONPATH.\")\n\nclass TestImports(unittest.TestCase):\n def test_toplevel(self):\n import loxi\n self.assertTrue(hasattr(loxi, \"ProtocolError\"))\n self.assertEquals(loxi.version_names[3], \"1.2\")\n ofp = loxi.protocol(3)\n self.assertEquals(ofp.OFP_VERSION, 3)\n self.assertTrue(hasattr(ofp, \"action\"))\n self.assertTrue(hasattr(ofp, \"common\"))\n self.assertTrue(hasattr(ofp, \"const\"))\n self.assertTrue(hasattr(ofp, \"message\"))\n self.assertTrue(hasattr(ofp, \"oxm\"))\n\n def test_version(self):\n import loxi\n self.assertTrue(hasattr(loxi.of12, \"ProtocolError\"))\n self.assertTrue(hasattr(loxi.of12, \"OFP_VERSION\"))\n self.assertEquals(loxi.of12.OFP_VERSION, 3)\n self.assertTrue(hasattr(loxi.of12, \"action\"))\n self.assertTrue(hasattr(loxi.of12, \"common\"))\n self.assertTrue(hasattr(loxi.of12, \"const\"))\n self.assertTrue(hasattr(loxi.of12, \"message\"))\n self.assertTrue(hasattr(loxi.of12, \"oxm\"))\n\n# The majority of the serialization tests are created here using the files in\n# the test_data directory.\nclass TestDataFiles(unittest.TestCase):\n pass\nadd_datafiles_tests(TestDataFiles, 'of12/', ofp)\n\nclass TestAllOF12(unittest.TestCase):\n \"\"\"\n Round-trips every class through serialization/deserialization.\n Not a replacement for handcoded tests because it only uses the\n default member values.\n \"\"\"\n\n def setUp(self):\n mods = [ofp.action,ofp.message,ofp.common,ofp.oxm]\n self.klasses = [klass for mod in mods\n for klass in mod.__dict__.values()\n if isinstance(klass, type) and\n issubclass(klass, loxi.OFObject) and\n hasattr(klass, 'pack')]\n self.klasses.sort(key=lambda x: str(x))\n\n def test_serialization(self):\n expected_failures = [\n ofp.action.set_field, # field defaults to None\n ]\n for klass in self.klasses:\n def fn():\n obj = klass()\n if hasattr(obj, \"xid\"): obj.xid = 42\n buf = obj.pack()\n obj2 = klass.unpack(OFReader(buf))\n self.assertEquals(obj, obj2)\n if klass in expected_failures:\n self.assertRaises(Exception, fn)\n else:\n fn()\n\n def test_parse_message(self):\n expected_failures = []\n for klass in self.klasses:\n if not issubclass(klass, ofp.message.message):\n continue\n def fn():\n obj = klass(xid=42)\n buf = obj.pack()\n obj2 = ofp.message.parse_message(buf)\n self.assertEquals(obj, obj2)\n if klass in expected_failures:\n self.assertRaises(Exception, fn)\n else:\n fn()\n\n def test_show(self):\n expected_failures = []\n for klass in self.klasses:\n def fn():\n obj = klass()\n if hasattr(obj, \"xid\"): obj.xid = 42\n obj.show()\n if klass in expected_failures:\n self.assertRaises(Exception, fn)\n else:\n fn()\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"py_gen/tests/of12.py","file_name":"of12.py","file_ext":"py","file_size_in_byte":4800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"127201054","text":"\"\"\"\r\n\n\nWrite a function that removes any non-letters from a string, returning a well-\nknown film title.\n\n### Examples\n\n letters_only(\"R!=:~0o0./c&}9k`60=y\") ➞ \"Rocky\"\n \n letters_only(\"^,]%4B|@56a![0{2m>b1&4i4\") ➞ \"Bambi\"\n \n letters_only(\"^U)6$22>8p).\") ➞ \"Up\"\n\n### Notes\n\nSee the **Resources** section for more information on Python string methods.\n\n\"\"\"\r\n\ndef letters_only(txt):\n newstring = ''\n chars = [char for char in txt]\n for item in chars:\n if str(item).isalpha():\n newstring += item\n else:\n continue\n return newstring\n\n","sub_path":"vqMFpARj3DvELLDmZ_8.py","file_name":"vqMFpARj3DvELLDmZ_8.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"257907189","text":"#!/usr/bin/env python\n\n#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nfrom apache_ranger.model.ranger_base import RangerBase\n\n\nclass RangerSecurityZoneService:\n def __init__(self, resources=None):\n self.resources = resources if resources is not None else []\n\n def __repr__(self):\n return json.dumps(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)\n\n\nclass RangerSecurityZone(RangerBase):\n def __init__(self, id=None, guid=None, createdBy=None, updatedBy=None, createTime=None, updateTime=None,\n version=None, isEnabled=None, name=None, services=None, tagServices=None, adminUsers=None,\n adminUserGroups=None, auditUsers=None, auditUserGroups=None, description=None):\n super().__init__(id, guid, createdBy, updatedBy, createTime, updateTime, version, isEnabled)\n self.name = name\n self.services = services if services is not None else {}\n self.tagServices = tagServices if tagServices is not None else []\n self.adminUsers = adminUsers if adminUsers is not None else []\n self.adminUserGroups = adminUserGroups if adminUserGroups is not None else []\n self.auditUsers = auditUsers if auditUsers is not None else []\n self.auditUserGroups = auditUserGroups if auditUserGroups is not None else []\n self.description = description\n return\n\n def __repr__(self):\n return json.dumps(self, default=lambda x: x.__dict__, sort_keys=True, indent=4)\n","sub_path":"intg/src/main/python/apache_ranger/model/ranger_security_zone.py","file_name":"ranger_security_zone.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"369545360","text":"# Standard Library\nimport gzip\nimport logging\nfrom typing import Any, Iterable, List, Mapping, Tuple\n\n# Third Party Imports\nimport jsonschema\nfrom cityhash import CityHash64\n\n# Local Imports\nimport bel.edge.edges\nimport bel.lang.belobj\nfrom bel.Config import config\nfrom bel.utils import http_client\n\nlog = logging.getLogger(__name__)\n\n\n# TODO is this code being used? We also have bel.nanopub.validate.validate(nanopub, error_level) for validation\n\n\nclass Nanopub(object):\n \"\"\"Nanopub object to manage Nanopub processing\"\"\"\n\n def __init__(self, endpoint: str = config.get(\"api\", \"\")) -> None:\n \"\"\" Initialize Nanopub\n\n Args:\n endpoint (str): BEL.bio API endpoint uri, e.g. https://api.bel.bio/v1, default read from config\n \"\"\"\n self.endpoint = endpoint\n\n def validate(self, nanopub: Mapping[str, Any]) -> Tuple[bool, List[Tuple[str, str]]]:\n \"\"\"Validates using the nanopub schema\n\n Args:\n nanopub (Mapping[str, Any]): nanopub dict\n\n Returns:\n Tuple[bool, List[Tuple[str, str]]]:\n bool: Is valid? Yes = True, No = False\n List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg)\n e.g. [('WARNING', \"Context ID not found\")] \"\"\"\n\n # Validate nanopub\n (is_valid, messages) = validate_to_schema(nanopub, self.nanopub_schema)\n if not is_valid:\n return messages\n\n # Extract BEL Version\n if nanopub[\"nanopub\"][\"type\"][\"name\"].upper() == \"BEL\":\n bel_version = nanopub[\"nanopub\"][\"type\"][\"version\"]\n else:\n is_valid = False\n return (\n is_valid,\n f\"Not a BEL Nanopub according to nanopub.type.name: {nanopub['nanopub']['type']['name']}\",\n )\n\n all_messages = []\n # Validate BEL Statements\n bel_obj = bel.lang.belobj.BEL(bel_version, self.endpoint)\n for edge in nanopub[\"nanopub\"][\"edges\"]:\n bel_statement = f\"{edge['subject']} {edge['relation']} {edge['object']}\"\n parse_obj = bel_obj.parse(bel_statement)\n if not parse_obj.valid:\n all_messages.extend(\n (\n \"ERROR\",\n f\"BEL statement parse error {parse_obj.error}, {parse_obj.err_visual}\",\n )\n )\n\n # Validate nanopub.context\n for context in nanopub[\"nanopub\"][\"context\"]:\n (is_valid, messages) = self.validate_context(context)\n all_messages.extend(messages)\n\n is_valid = True\n for _type, msg in all_messages:\n if _type == \"ERROR\":\n is_valid = False\n\n return (is_valid, all_messages)\n\n def validate_context(self, context: Mapping[str, Any]) -> Tuple[bool, List[Tuple[str, str]]]:\n \"\"\" Validate context\n\n Args:\n context (Mapping[str, Any]): context dictionary of type, id and label\n\n Returns:\n Tuple[bool, List[Tuple[str, str]]]:\n bool: Is valid? Yes = True, No = False\n List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('ERROR|WARNING', msg)\n e.g. [('WARNING', \"Context ID not found\")]\n \"\"\"\n\n url = f'{self.endpoint}/terms/{context[\"id\"]}'\n\n res = http_client.get(url)\n if res.status_code == 200:\n return (True, [])\n else:\n return (False, [(\"WARNING\", f'Context {context[\"id\"]} not found at {url}')])\n\n def bel_edges(\n self,\n nanopub: Mapping[str, Any],\n namespace_targets: Mapping[str, List[str]] = {},\n rules: List[str] = [],\n orthologize_target: str = None,\n ) -> List[Mapping[str, Any]]:\n \"\"\"Create BEL Edges from BEL nanopub\n\n Args:\n nanopub (Mapping[str, Any]): bel nanopub\n namespace_targets (Mapping[str, List[str]]): what namespaces to canonicalize\n rules (List[str]): which computed edge rules to process, default is all,\n look at BEL Specification yaml file for computed edge signature keys,\n e.g. degradation, if any rule in list is 'skip', then skip computing edges\n just return primary_edge\n orthologize_target (str): species to convert BEL into, e.g. TAX:10090 for mouse, default option does not orthologize\n\n Returns:\n List[Mapping[str, Any]]: edge list with edge attributes (e.g. context)\n \"\"\"\n\n edges = bel.edge.edges.create_edges(\n nanopub,\n self.endpoint,\n namespace_targets=namespace_targets,\n rules=rules,\n orthologize_target=orthologize_target,\n )\n\n return edges\n\n\ndef validate_to_schema(nanopub, schema) -> Tuple[bool, List[Tuple[str, str]]]:\n \"\"\"Validate nanopub against jsonschema for nanopub\n\n Args:\n nanopub (Mapping[str, Any]): nanopub dict\n schema (Mapping[str, Any]): nanopub schema\n\n Returns:\n Tuple[bool, List[str]]:\n bool: Is valid? Yes = True, No = False\n List[Tuple[str, str]]: Validation issues, empty if valid, tuple is ('Error|Warning', msg)\n e.g. [('ERROR', \"'subject' is a required property\")]\n \"\"\"\n\n v = jsonschema.Draft4Validator(schema)\n messages = []\n errors = sorted(v.iter_errors(nanopub), key=lambda e: e.path)\n for error in errors:\n for suberror in sorted(error.context, key=lambda e: e.schema_path):\n print(list(suberror.schema_path), suberror.message, sep=\", \")\n messages.append((\"ERROR\", suberror.message))\n\n is_valid = True\n if errors:\n is_valid = False\n\n return (is_valid, messages)\n\n\n# Following is used in nanopub-tools codebase\ndef hash_nanopub(nanopub: Mapping[str, Any]) -> str:\n \"\"\"Create CityHash64 from nanopub for duplicate check\n\n TODO - check that this hash value is consistent between C# and Python running on\n laptop and server\n\n Build string to hash\n\n Collect flat array of (all values.strip()):\n nanopub.type.name\n nanopub.type.version\n\n One of:\n nanopub.citation.database.name\n nanopub.citation.database.id\n\n OR\n\n nanopub.citation.database.uri\n\n OR\n\n nanopub.citation.database.reference\n\n Extend with sorted list of assertions (SRO as single string with space between S, R and O)\n\n Extend with sorted list of annotations (nanopub.annotations.type + ' ' + nanopub.annotations.id)\n\n Convert array to string by joining array elements separated by a space\n\n Create CityHash64(str) and return\n\n \"\"\"\n\n hash_list = []\n\n # Type\n hash_list.append(nanopub[\"nanopub\"][\"type\"].get(\"name\", \"\").strip())\n hash_list.append(nanopub[\"nanopub\"][\"type\"].get(\"version\", \"\").strip())\n\n # Citation\n if nanopub[\"nanopub\"][\"citation\"].get(\"database\", False):\n hash_list.append(nanopub[\"nanopub\"][\"citation\"][\"database\"].get(\"name\", \"\").strip())\n hash_list.append(nanopub[\"nanopub\"][\"citation\"][\"database\"].get(\"id\", \"\").strip())\n elif nanopub[\"nanopub\"][\"citation\"].get(\"uri\", False):\n hash_list.append(nanopub[\"nanopub\"][\"citation\"].get(\"uri\", \"\").strip())\n elif nanopub[\"nanopub\"][\"citation\"].get(\"reference\", False):\n hash_list.append(nanopub[\"nanopub\"][\"citation\"].get(\"reference\", \"\").strip())\n\n # Assertions\n assertions = []\n for assertion in nanopub[\"nanopub\"][\"assertions\"]:\n if assertion.get(\"relation\") is None:\n assertion[\"relation\"] = \"\"\n if assertion.get(\"object\") is None:\n assertion[\"object\"] = \"\"\n assertions.append(\n \" \".join(\n (\n assertion[\"subject\"].strip(),\n assertion.get(\"relation\", \"\").strip(),\n assertion.get(\"object\", \"\").strip(),\n )\n ).strip()\n )\n assertions = sorted(assertions)\n hash_list.extend(assertions)\n\n # Annotations\n annotations = []\n\n for anno in nanopub[\"nanopub\"][\"annotations\"]:\n annotations.append(\n \" \".join((anno.get(\"type\", \"\").strip(), anno.get(\"id\", \"\").strip())).strip()\n )\n\n annotations = sorted(annotations)\n hash_list.extend(annotations)\n\n np_string = \" \".join([l.lower() for l in hash_list])\n\n return \"{:x}\".format(CityHash64(np_string))\n","sub_path":"bel/nanopub/nanopubs.py","file_name":"nanopubs.py","file_ext":"py","file_size_in_byte":8476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"181589299","text":"from threading import Thread, RLock\nfrom random import randint\nimport pygame as pg\nimport json\n\nclass task_recipe_checker(Thread):\n def __init__(self, task_timer, level, score):\n self.score = score\n self.kebab_content = []\n self.target_list = []\n self.timer = task_timer\n self.player_level = level\n self.lock = RLock()\n self.end_recipe = False\n self.recipe_value = [pg.image.load(\"images/indicators/good.png\"), pg.image.load(\"images/indicators/bad.png\")]\n self.image_recipe_value = None\n #Graphical recipes\n self.grecipes = {\n \"Kebab_meat_Tomato_Salad_Cheese_Onions\" : [\"Fromage\", \"Viande\", \"Onions\", \"Salade\", \"Tomate\"],\n \"Tomato_Salad_Onions_Cheese\" : [\"Fromage\", \"Onions\", \"Salade\", \"Tomate\"],\n \"Kebab_meat_Tomato_Salad_Onions\" : [\"Viande\", \"Onions\", \"Salade\", \"Tomate\"],\n \"Tomato_Salad_Onions\" : [\"Onions\", \"Salade\", \"Tomate\"],\n \"Kebab_meat_Tomato_Cheese_Salad\" : [\"Fromage\", \"Viande\", \"Salade\", \"Tomate\"],\n \"Tomato_Cheese_Salad\" : [\"Fromage\", \"Salade\", \"Tomate\"],\n \"Kebab_meat_Tomato_Salad\" : [\"Viande\", \"Salade\", \"Tomate\"],\n \"Tomato_Salad\" : [\"Salade\", \"Tomate\"],\n \"Kebab_meat_Tomato_Cheese_Onions\" : [\"Fromage\", \"Viande\", \"Onions\", \"Tomate\"],\n \"Tomato_Cheese_Onions\" : [\"Fromage\", \"Onions\", \"Tomate\"],\n \"Kebab_meat_Tomato_Onions\" : [\"Viande\", \"Onions\", \"Tomate\"],\n \"Tomato_Onions\" : [\"Onions\", \"Tomate\"],\n \"Kebab_meat_Tomato_Cheese\" : [\"Fromage\", \"Viande\", \"Tomate\"],\n \"Tomato_Cheese\" : [\"Fromage\", \"Tomate\"],\n \"Kebab_meat_Tomato\" : [\"Viande\", \"Tomate\"],\n \"Tomato\" : [\"Tomate\"],\n \"Kebab_meat_Cheese_Onions_Salad\" : [\"Fromage\", \"Viande\", \"Onions\", \"Salade\"],\n \"Salad_Cheese_Onions\" : [\"Fromage\", \"Onions\", \"Salade\"],\n \"Kebab_meat_Onions_Salad\" : [\"Viande\", \"Onions\", \"Salade\"],\n \"Salad_Cheese\" : [\"Fromage\", \"Salade\"],\n \"Salad_Onions\" : [\"Onions\", \"Salade\"],\n \"Salad_Kebab_meat\" : [\"Viande\", \"Salade\"],\n \"Salad\" : [\"Salade\"],\n \"Kebab_meat_Cheese_Onions\" : [\"Fromage\", \"Viande\", \"Onions\"],\n \"Onions_Cheese\" : [\"Fromage\", \"Onions\"],\n \"Kebab_meat_Onions\" : [\"Viande\", \"Onions\"],\n \"Onions\" : [\"Onions\"],\n \"Kebab_meat_Cheese\" : [\"Fromage\", \"Viande\"],\n \"Cheese\" : [\"Fromage\"],\n \"Kebab_meat\" : [\"Viande\"],\n \"Kebab_meat_Salad_Cheese\" : [\"Fromage\", \"Viande\", \"Salade\"]\n }\n\n try:\n f = open(\"ressources/items.json\")\n except IOError as e:\n print(e)\n\n with f as json_file:\n self.items_list = json.load(json_file)\n\n Thread.__init__(self)\n\n def get_target_list(self):\n with self.lock:\n return self.target_list\n\n def add_kebab_item(self, i):\n with self.lock:\n self.kebab_content.append(i)\n\n def get_kebab_content(self):\n with self.lock:\n return self.kebab_content\n\n def random_item(self):\n\n try:\n items_index_level = [key for key in self.items_list\n if int(key) <= self.player_level]\n random_level = items_index_level[randint(0,len(items_index_level)-1)]\n selected_item_list = [item for item in self.items_list[random_level]]\n items_index_list = [key for key in selected_item_list[0]]\n selected_item = selected_item_list[0][\n items_index_list[randint(0, len(items_index_list)-1)]\n ]\n except:\n selected_item = 0\n\n return selected_item\n\n def new_recipe(self):\n self.target_list = []\n item_number = randint(2, 7)\n for i in range(0, item_number):\n random_item = self.random_item()\n if random_item != 0:\n self.target_list.append(random_item[\"name\"])\n else:\n pass\n\n #Remove duplicates items\n self.target_list = list(set(self.target_list))\n\n def get_state_image(self):\n with self.lock:\n return self.image_recipe_value\n\n def recipe_is_done(self):\n with self.lock:\n return self.end_recipe\n\n def run(self):\n self.new_recipe()\n recipe_len = len(self.target_list)\n target_in_kebab = []\n\n while self.timer.get_actual_time() > 0:\n for item in set(self.target_list).intersection(self.kebab_content):\n target_in_kebab.append(item)\n del self.target_list[self.target_list.index(item)]\n\n if len(self.kebab_content) == recipe_len:\n if len(target_in_kebab) == recipe_len:\n self.score.update_actual_score(5)\n self.image_recipe_value = self.recipe_value[0]\n else:\n self.score.update_actual_score(-5)\n self.image_recipe_value = self.recipe_value[1]\n\n self.end_recipe = True\n time_ending = self.timer.get_actual_time()\n self.new_recipe()\n recipe_len = len(self.target_list)\n target_in_kebab = []\n self.kebab_content = []\n\n #Unset value image\n if \"time_ending\" in locals():\n if self.timer.get_actual_time() <= time_ending - 2 and self.end_recipe == True:\n self.end_recipe = False\n","sub_path":"_class/task/recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":6155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"579682073","text":"\"\"\"Tests for citrine.informatics.predictors serialization.\"\"\"\nfrom copy import deepcopy\nfrom uuid import UUID\n\nimport pytest\n\nfrom citrine.informatics.descriptors import RealDescriptor\nfrom citrine.informatics.predictors import ExpressionPredictor, GeneralizedMeanPropertyPredictor, \\\n GraphPredictor, Predictor, SimpleMLPredictor, IngredientsToSimpleMixturePredictor, \\\n LabelFractionsPredictor, SimpleMixturePredictor, IngredientFractionsPredictor, DeprecatedExpressionPredictor\n\n\ndef valid_serialization_output(data):\n \"\"\"Remove fields that are not preserved by serialization.\"\"\"\n return {x: y for x, y in data.items() if x not in {'status', 'status_info'}}\n\n\ndef test_simple_legacy_deserialization(valid_simple_ml_predictor_data):\n \"\"\"Ensure that a deserialized SimplePredictor looks sane.\"\"\"\n predictor: SimpleMLPredictor = SimpleMLPredictor.build(valid_simple_ml_predictor_data)\n assert predictor.name == 'ML predictor'\n assert predictor.description == 'Predicts z from input x and latent variable y'\n assert len(predictor.inputs) == 1\n assert predictor.inputs[0] == RealDescriptor(\"x\", 0, 100, \"\")\n assert len(predictor.outputs) == 1\n assert predictor.outputs[0] == RealDescriptor(\"z\", 0, 100, \"\")\n assert len(predictor.latent_variables) == 1\n assert predictor.latent_variables[0] == RealDescriptor(\"y\", 0, 100, \"\")\n assert len(predictor.training_data) == 1\n assert predictor.training_data[0].table_id == UUID('e5c51369-8e71-4ec6-b027-1f92bdc14762')\n\n\ndef test_polymorphic_legacy_deserialization(valid_simple_ml_predictor_data):\n \"\"\"Ensure that a polymorphically deserialized SimplePredictor looks sane.\"\"\"\n predictor: SimpleMLPredictor = Predictor.build(valid_simple_ml_predictor_data)\n assert predictor.name == 'ML predictor'\n assert predictor.description == 'Predicts z from input x and latent variable y'\n assert len(predictor.inputs) == 1\n assert predictor.inputs[0] == RealDescriptor(\"x\", 0, 100, \"\")\n assert len(predictor.outputs) == 1\n assert predictor.outputs[0] == RealDescriptor(\"z\", 0, 100, \"\")\n assert len(predictor.latent_variables) == 1\n assert predictor.latent_variables[0] == RealDescriptor(\"y\", 0, 100, \"\")\n assert len(predictor.training_data) == 1\n assert predictor.training_data[0].table_id == UUID('e5c51369-8e71-4ec6-b027-1f92bdc14762')\n\n\ndef test_legacy_serialization(valid_simple_ml_predictor_data):\n \"\"\"Ensure that a serialized SimplePredictor looks sane.\"\"\"\n predictor = SimpleMLPredictor.build(valid_simple_ml_predictor_data)\n serialized = predictor.dump()\n serialized['id'] = valid_simple_ml_predictor_data['id']\n assert serialized == valid_serialization_output(valid_simple_ml_predictor_data)\n\n\ndef test_graph_serialization(valid_graph_predictor_data):\n \"\"\"Ensure that a serialized GraphPredictor looks sane.\"\"\"\n graph_data_copy = deepcopy(valid_graph_predictor_data)\n predictor = GraphPredictor.build(valid_graph_predictor_data)\n serialized = predictor.dump()\n serialized['id'] = graph_data_copy['id']\n assert serialized['config']['predictors'] == graph_data_copy['config']['predictors']\n assert serialized == valid_serialization_output(graph_data_copy)\n\n\ndef test_deprecated_expression_serialization(valid_deprecated_expression_predictor_data):\n \"\"\"Ensure that a serialized DeprecatedExpressionPredictor looks sane.\"\"\"\n predictor = DeprecatedExpressionPredictor.build(valid_deprecated_expression_predictor_data)\n serialized = predictor.dump()\n serialized['id'] = valid_deprecated_expression_predictor_data['id']\n assert serialized == valid_serialization_output(valid_deprecated_expression_predictor_data)\n\n\ndef test_expression_serialization(valid_expression_predictor_data):\n \"\"\"Ensure that a serialized ExpressionPredictor looks sane.\"\"\"\n predictor = ExpressionPredictor.build(valid_expression_predictor_data)\n serialized = predictor.dump()\n serialized['id'] = valid_expression_predictor_data['id']\n assert serialized == valid_serialization_output(valid_expression_predictor_data)\n\n\ndef test_ing_to_simple_mixture_serialization(valid_ing_to_simple_mixture_predictor_data):\n \"\"\"Ensure that a serialized IngredientsToSimpleMixturePredictor looks sane.\"\"\"\n predictor = IngredientsToSimpleMixturePredictor.build(valid_ing_to_simple_mixture_predictor_data)\n serialized = predictor.dump()\n serialized['id'] = valid_ing_to_simple_mixture_predictor_data['id']\n assert serialized == valid_serialization_output(valid_ing_to_simple_mixture_predictor_data)\n\n\ndef test_generalized_mean_property_serialization(valid_generalized_mean_property_predictor_data):\n \"\"\"Ensure that a serialized GeneralizedMeanPropertyPredictor looks sane.\"\"\"\n predictor = GeneralizedMeanPropertyPredictor.build(valid_generalized_mean_property_predictor_data)\n serialized = predictor.dump()\n serialized['id'] = valid_generalized_mean_property_predictor_data['id']\n assert serialized == valid_serialization_output(valid_generalized_mean_property_predictor_data)\n\n\ndef test_simple_mixture_predictor_serialization(valid_simple_mixture_predictor_data):\n predictor = SimpleMixturePredictor.build(valid_simple_mixture_predictor_data)\n serialized = predictor.dump()\n serialized['id'] = valid_simple_mixture_predictor_data['id']\n assert serialized == valid_serialization_output(valid_simple_mixture_predictor_data)\n\n\ndef test_label_fractions_serialization(valid_label_fractions_predictor_data):\n \"\"\"Ensure that a serialized LabelFractionPredictor looks sane.\"\"\"\n predictor = LabelFractionsPredictor.build(valid_label_fractions_predictor_data)\n serialized = predictor.dump()\n serialized['id'] = valid_label_fractions_predictor_data['id']\n assert serialized == valid_serialization_output(valid_label_fractions_predictor_data)\n\n\ndef test_ingredient_fractions_serialization(valid_ingredient_fractions_predictor_data):\n \"\"\"\"Ensure that a serialized IngredientsFractionsPredictor looks sane.\"\"\"\n predictor = IngredientFractionsPredictor.build(valid_ingredient_fractions_predictor_data)\n serialized = predictor.dump()\n serialized[\"id\"] = valid_ingredient_fractions_predictor_data['id']\n assert serialized == valid_serialization_output(valid_ingredient_fractions_predictor_data)\n\n\ndef test_invalid_predictor_type(invalid_predictor_data):\n \"\"\"Ensures we raise proper exception when an invalid type is used.\"\"\"\n with pytest.raises(ValueError):\n Predictor.build(invalid_predictor_data)\n","sub_path":"tests/serialization/test_predictors.py","file_name":"test_predictors.py","file_ext":"py","file_size_in_byte":6503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"527234468","text":"import math\nimport numpy as np\nfrom abc import ABCMeta, abstractmethod\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\n\nclass NonLinearMethod(metaclass=ABCMeta):\n\n def __init__(self, function):\n self.function = function\n\n # 収束かどうかを判定するための値\n self.errorRangeLimit = 0.00001\n\n\nclass SteepestDecentMethod(NonLinearMethod):\n def __init__(self, function, dif_func, x0):\n super().__init__(function)\n self.dif_func = dif_func\n self.x = x0\n self.transition_x0 = []\n self.transition_x1 = []\n\n def backtrack(self, alpha=0.5, beta=0.8):\n while True:\n self.transition_x0.append(self.x[0])\n self.transition_x1.append(self.x[1])\n dx = 1.0\n while True:\n next_x = self.x - dx * self.dif_func(self.x)\n armijo_rule = self.function(next_x) - self.function(self.x) + alpha * dx * pow(np.linalg.norm(self.dif_func(self.x)), 2)\n if armijo_rule <= 0:\n break\n else:\n dx *= beta\n self.nextX = next_x\n\n errorRange = math.fabs(np.linalg.norm(self.nextX - self.x))\n\n # 収束したかどうかの判定\n if errorRange > self.errorRangeLimit:\n self.x = self.nextX\n else:\n break\n\n def getAnswer(self, filename=\"\"):\n if filename != \"\":\n # グラフの描画\n plt.figure()\n plt.title(\"Back Tracking Line Search\")\n plt.xlabel(\"x1\")\n plt.ylabel(\"x2\")\n plt.xlim([-5,5])\n plt.ylim([-5,5])\n plt.plot(self.transition_x0, self.transition_x1)\n plt.savefig(filename)\n optX = self.nextX\n optA = self.function(optX)\n return optX, optA\n\n# 問題として与えられた関数\ndef f(x):\n return 10.0 * pow(x[0], 2) + pow(x[1], 2)\n\n# 与えられた関数の1階微分\ndef dif_f(x):\n return np.array([20.0 * x[0], 2.0 * x[1]])\n\nif __name__ == \"__main__\":\n # 初期値\n x0 = np.array([1.0,5.0])\n\n method = SteepestDecentMethod(f,dif_f,x0)\n method.backtrack()\n x,a = method.getAnswer(filename=\"./backtrack.png\")\n print(\"最適解: \", x)\n print(\"最適値: \", a)\n","sub_path":"assignments/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"417761625","text":"\nclass UnknownSuit(Exception):\n\tpass\n\nclass UnknownValue(Exception):\n\tpass\n\n\nclass Hand(object):\n\t_values = {'2':'Two', '3':'Three', '4':'Four', '5':'Five', '6':'Six', '7':'Seven', '8':'Right', '9':'Nine', '10':'Ten', 'J':'Jack', 'Q':'Queen', 'K':'King', 'A':'Ace'}\n\t_suits = {'H':'Hearts', 'C':'Clubs', 'S':'Spades', 'D':'Diamonds'}\n\n\tdef __init__(self, hand):\n\t\tself.cards = hand.split()\n\t\tself.values = []\n\n\t\tfor card in self.cards:\n\t\t\tex_res = Hand.extract_value_suit(card)\n\t\t\tvalue = ex_res[0]\n\t\t\tsuit = ex_res[1]\n\t\t\tself.values.append(value)\n\n\t\tpair = self.has_pair()\n\t\tif pair:\n\t\t\tself._rank = \"PAIR: \" + pair[0] + \" \" + pair[1]\n\t\t#if self.cards[2] == '8S' and self.cards[3] == '8C':\n\t\t\t#self._rank = \"PAIR: 8S 8C\"\n\t\t#elif self.cards[2] == '8C' and self.cards[3] == '8S':\n\t\t\t#self._rank = \"PAIR: 8C 8S\"\n\t\t#elif self.cards[0] == '2C' and self.cards[1] == '2S':\n\t\t\t#self._rank = \"PAIR: 2C 2S\"\n\t\telse:\n\t\t\thigh_card = self.cards[-1];\n\t\t\tself._rank = \"HIGH CARD: \" + Hand.card_string(high_card)\n\n\n\tdef has_card(self, card):\n\t\treturn card in self.cards\n\n\t@staticmethod\n\tdef extract_value_suit(card):\n\t\tsuit = None\n\t\tvalue = None\n\t\tif card[0] == '1' and card[1] == '0':\n\t\t\tvalue = '10'\n\t\t\tsuit = card[2]\n\t\telse:\n\t\t\tvalue = card[0]\n\t\t\tsuit = card[1]\n\n\t\tif not (suit in Hand._suits):\n\t\t\traise UnknownSuit\n\t\tif not (value in Hand._values):\n\t\t\traise UnknownValue\n\n\t\treturn (value, suit)\n\n\t@staticmethod\n\tdef card_string(card):\n\t\ttemp = Hand.extract_value_suit(card)\n\t\tvalue = Hand._values[temp[0]]\n\t\tsuit =\tHand._suits[temp[1]]\n\t\treturn value + \" of \" + suit\n\n\tdef rank(self):\n\t\treturn self._rank\n\n\n\tdef has_pair(self):\n\t\tvalues = []\n\t\tfor card in self.cards:\n\t\t\tex_res = Hand.extract_value_suit(card)\n\t\t\tvalue = ex_res[0]\n\t\t\tvalues.append(value)\n\n\t\tfor i in range(0, len(values)):\n\t\t\tfor j in range(0, len(values)):\n\t\t\t\tif i == j:\n\t\t\t\t\tcontinue\n\t\t\t\tif values[i] == values[j]:\n\t\t\t\t\treturn [self.cards[i], self.cards[j]]\n\t\treturn []\n\n\n\n\n","sub_path":"homework/poker/poker.py","file_name":"poker.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"324765172","text":"# -*- coding: utf-8 -*-\n\"\"\"\nMain script of bossfight.server package.\n\n- `bossfight.server` will run a local server on an arbitrary free port\n- `bossfight.server ip_address` will start a server on an arbitrary free port\nbound to the given IP address.\n- `bossfight.server ip_address port` will run a server on the specified port\nand IP address.\n\nIn either case the server process will give the following output on stdout\ndirectly after starting the server:\n\n`ip_address\\\\n\n\nport\\\\EOF`\n\nTo shutdown the server, write a line containing `shutdown` to the processes\n*stdin* channel.\n\"\"\"\n\nimport sys\nimport pygase.shared\nimport pygase.server\nfrom bossfight.server.game_loop import BFGameLoop\n\nSHARED_GAME_STATE = pygase.shared.GameState()\nSHARED_GAME_STATE.npcs = dict()\n\nif len(sys.argv) == 1:\n SERVER = pygase.server.Server(\n ip_address=\"localhost\",\n port=0,\n game_loop_class=BFGameLoop,\n game_state=SHARED_GAME_STATE,\n )\nelif len(sys.argv) == 2:\n SERVER = pygase.server.Server(\n ip_address=sys.argv[1],\n port=0,\n game_loop_class=BFGameLoop,\n game_state=SHARED_GAME_STATE,\n )\nelse:\n SERVER = pygase.server.Server(\n ip_address=sys.argv[1],\n port=int(sys.argv[2]),\n game_loop_class=BFGameLoop,\n game_state=SHARED_GAME_STATE,\n )\n\nprint(SERVER.get_ip_address())\nprint(SERVER.get_port())\nsys.stdout.close()\n\nSERVER.start()\n\nwhile not sys.stdin.readline().__contains__(\"shutdown\"):\n pass\nSERVER.shutdown()\n","sub_path":"bossfight.server/bossfight/server/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"629557640","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Question: https://leetcode.com/problems/validate-binary-search-tree/\n#\n# Given a binary tree, determine if it is a valid binary search tree (BST).\n#\n# Assume a BST is defined as follows:\n#\n# - The left subtree of a node contains only nodes with keys less than\n# the node's key.\n# - The right subtree of a node contains only nodes with keys greater than \n# the node's key.\n# - Both the left and right subtrees must also be binary search trees.\n# Example 1:\n# \n# 2\n# / \\\n# 1 3\n# \n# Binary tree [2,1,3], return true.\n# \n# Example 2:\n# \n# 1\n# / \\\n# 2 3\n# \n# Binary tree [1,2,3], return false. \n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def isValidBST(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n# Recursive\nclass Solution:\n def isValidBST(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n def bst_validator(node, min_val, max_val):\n if not node:\n return True\n min_invalid = min_val != None and node.val <= min_val\n max_invalid = max_val != None and node.val >= max_val\n if min_invalid or max_invalid:\n return False\n return bst_validator(node.left, min_val, node.val) and \\\n bst_validator(node.right, node.val, max_val)\n return bst_validator(root, None, None)\n\n# Iterative. In-order.\nclass Solution:\n def isValidBST(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n if not root:\n return True\n stack = []\n prev_node = None\n while root or stack:\n while root:\n stack.append(root)\n root = root.left\n root = stack.pop()\n if prev_node and root.val <= prev_node.val:\n return False\n prev_node = root\n root = root.right\n return True\n","sub_path":"0098.validate-binary-search-tree.py","file_name":"0098.validate-binary-search-tree.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"329019373","text":"import datetime\nfrom django.shortcuts import render, get_object_or_404\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n# Create your views here.\n\nfrom .models import Item, Price\n\n\ndef list(request, page=1):\n items = Item.objects.all()\n paginator = Paginator(items, 25)\n items = paginator.get_page(page)\n return render(request, \"pricecheck/list.html\", {\"items\": items})\n\n\ndef main(request, id):\n item = get_object_or_404(Item, pk=id)\n start_from = datetime.date.today() - datetime.timedelta(days=7)\n price_last = Price.objects.filter(item=item,\n date__gte=start_from).order_by(\"date\")\n js = []\n for price in price_last:\n js.append(\n ['new Date(\"{}T00:00\")'.format(price.date.isoformat()), price.price]\n )\n js = str(js).replace(\"'\", \"\")\n return render(request, \"pricecheck/main.html\",\n {\"item\": item, \"price\": js})\n","sub_path":"pricecheck/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"25136694","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 23 11:02:47 2017\n\n@author: zhengyaolin\n\"\"\"\n\ndef insertionSort(A):\n for j in range(1, len(A)):\n key = A[j]\n i = j - 1 \n while i >= 0 and A[i] > key:\n A[i + 1] = A[i]\n i -= 1\n A[i + 1] = key\n \nA = [5,2,4,6,1,3]\ninsertionSort(A)\nprint(A)","sub_path":"Algorithm/InsertionSort.py","file_name":"InsertionSort.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"60572823","text":"from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\nfrom django.http import JsonResponse, Http404\nfrom django.views import View\nfrom django.views.generic.base import TemplateResponseMixin\n\nimport logging\nimport requests\nfrom requests_saml import HTTPSAMLAuth\nfrom requests_kerberos import HTTPKerberosAuth\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass SendThroughIdP(LoginRequiredMixin, TemplateResponseMixin, View):\n \"\"\" allow the user to POST some data; then the render SP's response to it\n\n data will be exchanged between IdP and SP based on user POST:\n 1. user POST to IdP\n 2. IdP perform POST to SP\n 3. SP perform GET to IdP for more data\n 4. IdP return GET request\n 5. SP return POST request\n 6. user response\n \"\"\"\n SP_ENDPOINT = '{}/endpoint/'\n template_name = 'endpoint.html'\n extra_context = {\n \"known_sp_ids\": [x for x in settings.SAML_IDP_SPCONFIG],\n }\n\n def post(self, request, *args, **kwargs):\n ''' this is where we start data exchange '''\n url = self.SP_ENDPOINT.format('http://localhost:9000')\n\n user_data = request.POST\n logger.debug(f'received POST from user {request.user!r}')\n\n # do we have to set cookies?\n cookies = request.COOKIES\n\n with requests.Session() as session:\n logger.debug('performing requests lib auth config')\n k = HTTPKerberosAuth()\n s = HTTPSAMLAuth(chained_auth=k)\n\n logger.debug('starting transaction with service provider')\n response = session.post(\n url, data=user_data, cookies=cookies, auth=s)\n\n logger.debug(f'SP response status: {response.status_code}')\n\n return self.render_to_response({\n 'last_stop': True,\n 'sp_response': {\n 'header': response.headers,\n 'status_code': response.status_code,\n # 'content': str(response.text),\n 'content': response.text,\n # 'content': response.content,\n },\n })\n\n def get(self, request, *args, **kwargs):\n return self.render_to_response(self.extra_context)\n\n\nclass ProvideInfo(LoginRequiredMixin, View):\n http_method_names = ['get']\n\n def get(self, request, *args, **kwargs):\n logger.debug('providing data to SP')\n u = request.user\n logger.debug(f'user object: {u}')\n\n return JsonResponse({\n 'username': u.username if u.is_authenticated else 'ANONYMOUS',\n 'authenticated': u.is_authenticated,\n # 'META': dict(**request.META),\n # 'headers': str(request.headers),\n })\n\n\n# Views bellow are related to the alternate protocol:\n# 1. user fills a form and POST to service provider\n# 2. SP perform POST to IdP for more data\n# 3. IdP return POST request\n# 4. user response\n\nclass PostToSP(LoginRequiredMixin, TemplateResponseMixin, View):\n ''' allow user to POST directly to service provider '''\n SP_ENDPOINT = '{}/endpoint/direct/'\n template_name = 'send_to_sp.html'\n extra_context = {\n \"known_sp_ids\": [x for x in settings.SAML_IDP_SPCONFIG],\n \"sp_url\": SP_ENDPOINT.format('http://localhost:9000'),\n }\n\n def get(self, request, *args, **kwargs):\n return self.render_to_response(self.extra_context)\n\n\nclass AlternateProvideInfo(LoginRequiredMixin, View):\n \"\"\" provide some info based on correct data\n\n example: provide user's last login date if POST data\n (username, date_joined) are correct\n \"\"\"\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n ''' will provide JSON data if POST parameters exist in DB '''\n data = request.POST\n print(data)\n try:\n user = User.objects.get(username=data.get('username'))\n except User.DoesNotExist:\n raise Http404\n\n return JsonResponse({\n 'id': user.pk,\n 'last_login': user.last_login,\n 'date_joined': user.date_joined,\n 'is_authenticated': user.is_authenticated,\n })\n","sub_path":"idp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"185956759","text":"from admin_pages import admin_homepage\r\nfrom db_handlers import db_article\r\n\r\n\r\ndef main(result):\r\n import db_handlers\r\n\r\n print(\"Business Statistics\")\r\n print(\"1: All time | 2: By timeframe | 3: By article | 4: Back\")\r\n buttonpressed = None\r\n buttonpressed = input()\r\n select_opt_business_stats(buttonpressed,result)\r\n\r\ndef select_opt_business_stats(buttonpressed,result):\r\n if buttonpressed == '1':\r\n print(\"All time\")\r\n all_time(result)\r\n\r\n elif buttonpressed == '2':\r\n print(\"Last month\")\r\n by_timeframe(result)\r\n\r\n elif buttonpressed == '3':\r\n print(\"By article\")\r\n by_article(result)\r\n\r\n elif buttonpressed == '4':\r\n print(\"Going back\")\r\n from admin_pages import admin_homepage\r\n admin_homepage.main(result)\r\n else:\r\n print(\"bad option. try again.\")\r\n select_opt_business_stats()\r\n\r\n\r\n\r\ndef all_time(result):\r\n print(\"All time sales:\")\r\n from db_handlers import db_rent\r\n list = db_rent.get_article_rent_overview()\r\n sum = 0\r\n for rent in list:\r\n # list of all sales\r\n print(\"Purchase date: \" + str(rent[1]) + \" Price: \" + str(rent[2]))\r\n # sum of all prices in that list\r\n sum += rent[2]\r\n\r\n\r\n print(\"Total revenue: \" + str(sum) + \" euros.\")\r\n\r\n print(\"Returning to homepage.\")\r\n admin_homepage.main(result)\r\n\r\n\r\ndef by_timeframe(result):\r\n\r\n print(\"Introduce start day:\")\r\n start_day = input()\r\n print(\"Introduce start month:\")\r\n start_month =input()\r\n print(\"Introduce start year:\")\r\n start_year = input()\r\n\r\n print(\"Introduce end day:\")\r\n end_day = input()\r\n print(\"Introduce end month:\")\r\n end_month = input()\r\n print(\"Introduce end year:\")\r\n end_year = input()\r\n\r\n\r\n from db_handlers import db_rent\r\n moneymade = db_rent.get_rent_by_timeframe(start_day,start_month,start_year,end_day,end_month,end_year)\r\n print('We made ' + str(moneymade[0]) + \" euros between \" + start_day + \"-\" + start_month + \"-\" + start_year + \" and \" + end_day + \"-\" + end_month + \"-\" + end_year)\r\n\r\n print(\"Returning to homepage.\")\r\n admin_homepage.main(result)\r\n\r\n\r\ndef by_article(result):\r\n\r\n print(\"Introduce article to search its stats.\")\r\n article_id_input = input()\r\n\r\n from db_handlers import db_rent\r\n if db_rent.check_if_rent_exists(article_id_input):\r\n from db_handlers import db_rent\r\n n = db_rent.get_numberoftimes_by_article(article_id_input)\r\n article = db_article.get_article_by_filter('id', article_id_input)\r\n\r\n sum = n[0] * article[2]\r\n\r\n print(\"Total revenue of that artice: \" + str(sum) + \" euros.\")\r\n\r\n else:\r\n print(\"Couldn't find rents with that article. Returning to homepage.\")\r\n from admin_pages import main\r\n admin_homepage.main(result)\r\n\r\n print(\"Returning to homepage.\")\r\n admin_homepage.main(result)\r\n\r\n\r\n\r\n\r\n","sub_path":"admin_pages/business_statistics.py","file_name":"business_statistics.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"257343643","text":"# -*- coding: utf-8 -*-\n\"\"\"\nui class for the BUILD toolset\n\"\"\"\n#==============================================================================\n# imports\n#==============================================================================\nimport sys, os, warnings, tempfile, logging, configparser, datetime, time\nimport os.path\nfrom shutil import copyfile\n\n#PyQt\nfrom PyQt5 import uic, QtWidgets\nfrom PyQt5.QtWidgets import QAction, QFileDialog, QListWidget, QTableWidgetItem\n\n#===============================================================================\n# from qgis.PyQt.QtCore import QSettings, QTranslator, QCoreApplication, QObject \n# from qgis.PyQt.QtGui import QIcon\n#===============================================================================\n\n\nfrom qgis.core import *\nfrom qgis.analysis import *\nimport qgis.utils\nimport processing\nfrom processing.core.Processing import Processing\n\n\nimport resources\n\nimport pandas as pd\nimport numpy as np #Im assuming if pandas is fine, numpy will be fine\n\n\n#==============================================================================\n# custom imports\n#==============================================================================\n\nfrom build.rsamp import Rsamp\nfrom build.lisamp import LikeSampler\nfrom build.oth_rfda import RFDAconv\n\n\n\nfrom hlpr.plug import *\nfrom hlpr.Q import *\nfrom hlpr.basic import *\n\n# This loads your .ui file so that PyQt can populate your plugin with the elements from Qt Designer\nui_fp = os.path.join(os.path.dirname(__file__), 'build.ui')\nassert os.path.exists(ui_fp)\nFORM_CLASS, _ = uic.loadUiType(ui_fp)\n\n\nclass DataPrep_Dialog(QtWidgets.QDialog, FORM_CLASS, QprojPlug):\n \n event_name_set = [] #event names\n \n \n \n def __init__(self, iface, parent=None):\n \"\"\"these will only ini tthe first baseclass (QtWidgets.QDialog)\n \n required\"\"\"\n super(DataPrep_Dialog, self).__init__(parent)\n #super(DataPrep_Dialog, self).__init__(parent)\n self.setupUi(self)\n \n # Set up the user interface from Designer through FORM_CLASS.\n # After self.setupUi() you can access any designer object by doing\n # self., and you can use autoconnect slots - see\n # http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html\n # #widgets-and-dialogs-with-auto-connect\n\n self.ras = []\n self.ras_dict = {}\n self.vec = None\n\n self.iface = iface\n \n self.qproj_setup()\n \n \n self.connect_slots()\n \n \n self.logger.info('DataPrep_Dialog initilized')\n \n\n def connect_slots(self):\n log = self.logger.getChild('connect_slots')\n #self.testit()\n #======================================================================\n # pull project data\n #======================================================================\n #pull layer info from project\n rlays_d = dict()\n vlays_d = dict()\n for layname, layer in QgsProject.instance().mapLayers().items():\n if isinstance(layer, QgsVectorLayer):\n vlays_d[layname] = layer\n elif isinstance(layer, QgsRasterLayer):\n rlays_d[layname] = layer\n else:\n self.logger.debug('%s not filtered'%layname)\n \n #=======================================================================\n # general----------------\n #=======================================================================\n #=======================================================================\n # def test():\n # self.logger.push('test button pushed')\n # \n # for i in range(10):\n # time.sleep(.5)\n # self.progressBar.setValue(i + 1)\n # \n # self.logger.push('finished')\n #=======================================================================\n #ok/cancel buttons\n self.buttonBox.accepted.connect(self.reject)\n self.buttonBox.rejected.connect(self.reject)\n \n \n #connect to status label\n \"\"\"\n this could be moved onto the feedback object...\n but would be a lot of work to move it off the logger\n and not sure what the benefit would be\n \n see hlpr.plug.logger._loghlp()\n \"\"\"\n self.logger.statusQlab=self.progressText\n self.logger.statusQlab.setText('BuildDialog initialized')\n \n #======================================================================\n # setup tab----------\n #======================================================================\n #populate guis\n self.comboBox_vec.setFilters(QgsMapLayerProxyModel.VectorLayer) #SS. Inventory Layer: Drop down\n self.comboBox_aoi.setFilters(QgsMapLayerProxyModel.PolygonLayer) #SS. Project AOI\n self.comboBox_SSelv.addItems(['datum', 'ground']) #ss elevation type\n \n self.comboBox_aoi.setCurrentIndex(-1) #by default, lets have this be blank\n \n #Working Directory browse\n def browse_wd():\n return self.browse_button(self.lineEdit_wd, prompt='Select Working Directory',\n qfd = QFileDialog.getExistingDirectory)\n \n self.pushButton_wd.clicked.connect(browse_wd) # SS. Working Dir. Browse\n \n #WD force open\n def open_wd():\n force_open_dir(self.lineEdit_wd.text())\n \n self.pushButton_wd_open.clicked.connect(open_wd)\n \n #======================================================================\n # #Inventory Vector Layer\n #======================================================================\n #change the 'cid' display when the finv selection changes\n def upd_cid():\n return self.mfcb_connect(\n self.mFieldComboBox_cid, self.comboBox_vec.currentLayer(),\n fn_str = 'xid' )\n \n self.comboBox_vec.layerChanged.connect(upd_cid) #SS inventory vector layer\n \n #find a good layer\n try:\n for layname, vlay in vlays_d.items():\n if layname.startswith('finv'):\n break\n \n self.logger.info('setting comboBox_vec = %s'%vlay.name())\n self.comboBox_vec.setLayer(vlay)\n except Exception as e:\n self.logger.warning('failed to set inventory layer w: \\n %s'%e)\n \n #Vulnerability Curve Set\n def browse_curves():\n return self.browse_button(self.lineEdit_curve, prompt='Select Curve Set',\n qfd = QFileDialog.getOpenFileName)\n \n self.pushButton_SScurves.clicked.connect(browse_curves)# SS. Vuln Curve Set. Browse\n \n #program controls\n self.checkBox_SSoverwrite.stateChanged.connect(self.set_overwrite) #SS overwrite data files\n \n #generate new control file \n self.pushButton_generate.clicked.connect(self.build_scenario) #SS. generate\n \n #CanFlood Control File\n def browse_cf():\n return self.browse_button(self.lineEdit_cf_fp, prompt='Select CanFlood control file',\n qfd=QFileDialog.getOpenFileName)\n \n self.pushButton_cf.clicked.connect(browse_cf)# SS. Model Control File. Browse\n \n #======================================================================\n # hazard sampler---------\n #======================================================================\n # Set GUI elements\n self.comboBox_ras.setFilters(QgsMapLayerProxyModel.RasterLayer)\n \"\"\"\n todo: swap this out with better selection widget\n \"\"\"\n #selection \n self.pushButton_remove.clicked.connect(self.remove_text_edit)\n self.pushButton_clear.clicked.connect(self.clear_text_edit)\n self.pushButton_add_all.clicked.connect(self.add_all_text_edit)\n \n self.comboBox_ras.currentTextChanged.connect(self.add_ras)\n \n #=======================================================================\n # inundation\n #=======================================================================\n #connect dtm layer name to display box\n def upd_dtmlayname():\n vlay = self.comboBox_dtm.currentLayer()\n if isinstance(vlay,QgsVectorLayer):\n self.label_HS_dtmln.setText(vlay.name())\n \n self.comboBox_dtm.layerChanged.connect(upd_dtmlayname)\n \n\n #=======================================================================\n # #complex\n #=======================================================================\n #display the gtype when the finv changes\n def upd_gtype():\n vlay = self.comboBox_vec.currentLayer()\n if isinstance(vlay,QgsVectorLayer):\n gtype = QgsWkbTypes().displayString(vlay.wkbType())\n self.label_HS_finvgtype.setText(gtype)\n \n self.comboBox_vec.layerChanged.connect(upd_gtype) #SS inventory vector layer\n \n #display sampling stats options to user \n def upd_stat():\n vlay = self.comboBox_vec.currentLayer()\n if isinstance(vlay,QgsVectorLayer):\n gtype = QgsWkbTypes().displayString(vlay.wkbType())\n self.comboBox_HS_stat.setCurrentIndex(-1)\n \n if 'Polygon' in gtype:\n self.comboBox_HS_stat.addItems(\n ['Mean','Median','Min','Max'])\n \n self.comboBox_vec.layerChanged.connect(upd_stat) #SS inventory vector layer\n \n \n #=======================================================================\n # #execute\n #=======================================================================\n self.pushButton_HSgenerate.clicked.connect(self.run_rsamp)\n \n #======================================================================\n # event likelihoods\n #======================================================================\n self.pushButton_ELstore.clicked.connect(self.set_event_vals)\n \n \"\"\"dev button\n self.pushButton_ELdev.clicked.connect(self._pop_el_table)\"\"\"\n \n \n #======================================================================\n # Conditional Probabilities-----------\n #======================================================================\n \"\"\"todo: rename the buttons so they align w/ the set labels\n \n todo: automatically populate the first column of boxes w/ those layers\n sampled w/ rsamp\n \"\"\"\n #list of combo box names on the likelihood sampler tab\n self.ls_cb_d = { #set {hazard raster : lpol}\n 1: (self.MLCB_LS1_event_3, self.MLCB_LS1_lpol_3),\n 2: (self.MLCB_LS1_event_4, self.MLCB_LS1_lpol_4),\n 3: (self.MLCB_LS1_event_5, self.MLCB_LS1_lpol_5),\n 4: (self.MLCB_LS1_event, self.MLCB_LS1_lpol),\n 5: (self.MLCB_LS1_event_6, self.MLCB_LS1_lpol_6),\n 6: (self.MLCB_LS1_event_7, self.MLCB_LS1_lpol_7),\n 7: (self.MLCB_LS1_event_2, self.MLCB_LS1_lpol_2),\n 8: (self.MLCB_LS1_event_8, self.MLCB_LS1_lpol_8)\n }\n \n #loop and set filteres\n first = True\n for sname, (mlcb_haz, mlcb_lpol) in self.ls_cb_d.items():\n #set drop down filters on hazard bars\n mlcb_haz.setFilters(QgsMapLayerProxyModel.RasterLayer)\n mlcb_haz.setAllowEmptyLayer(True)\n mlcb_haz.setCurrentIndex(-1) #set selection to none\n \n #on polygon bars\n mlcb_lpol.setFilters(QgsMapLayerProxyModel.PolygonLayer)\n mlcb_lpol.setAllowEmptyLayer(True)\n mlcb_lpol.setCurrentIndex(-1) #set selection to none\n \n if first:\n mlcb_lpol_1 = mlcb_lpol\n first = False\n\n \n #connect to update the field name box (based on the first layer)\n def upd_lfield(): #updating the field box\n return self.mfcb_connect(\n self.mFieldComboBox_LSfn, mlcb_lpol_1.currentLayer(),\n fn_str = 'fail' )\n \n \n mlcb_lpol_1.layerChanged.connect(upd_lfield)\n \n \n #connect execute\n self.pushButton_LSsample.clicked.connect(self.run_lisamp)\n \n #======================================================================\n # DTM sampler---------\n #======================================================================\n self.comboBox_dtm.setFilters(QgsMapLayerProxyModel.RasterLayer)\n self.pushButton_DTMsamp.clicked.connect(self.run_dsamp)\n \n #======================================================================\n # validator-----------\n #======================================================================\n self.pushButton_Validate.clicked.connect(self.run_validate)\n \n #======================================================================\n # other------------\n #======================================================================\n #Vulnerability Curve Set\n def browse_rfda_crv():\n return self.browse_button(self.lineEdit_wd_OthRf_cv, prompt='Select RFDA curve .xls',\n qfd = QFileDialog.getOpenFileName)\n \n self.pushButton_wd_OthRf_cv.clicked.connect(browse_rfda_crv)\n \n self.mMapLayerComboBox_OthR_rinv.setFilters(QgsMapLayerProxyModel.PointLayer)\n \n self.pushButton_OthRfda.clicked.connect(self.run_rfda)\n\n\n\n \n #======================================================================\n # defaults-----------\n #======================================================================\n \"\"\"\"\n to speed up testing.. manually configure the project\n \"\"\"\n\n debug_dir =os.path.join(os.path.expanduser('~'), 'CanFlood', 'build')\n self.lineEdit_cf_fp.setText(os.path.join(debug_dir, 'CanFlood_scenario1.txt'))\n self.lineEdit_wd.setText(debug_dir)\n \n if not os.path.exists(debug_dir):\n log.info('builg directory: %s'%debug_dir)\n os.makedirs(debug_dir)\n \n #=======================================================================\n # wrap\n #=======================================================================\n \n \n \n \n \n \n\n\n #==========================================================================\n # Layer Loading---------------\n #==========================================================================\n def add_ras(self):\n x = [str(self.listWidget_ras.item(i).text()) for i in range(self.listWidget_ras.count())]\n self.ras_dict.update({ (self.comboBox_ras.currentText()) : (self.comboBox_ras.currentLayer()) })\n if (self.comboBox_ras.currentText()) not in x:\n self.listWidget_ras.addItem(self.comboBox_ras.currentText())\n self.ras_dict.update({ (self.comboBox_ras.currentText()) : (self.comboBox_ras.currentLayer()) })\n \n def clear_text_edit(self):\n if len(self.ras_dict) > 0:\n self.listWidget_ras.clear()\n self.ras_dict = {}\n \n def remove_text_edit(self):\n if (self.listWidget_ras.currentItem()) is not None:\n value = self.listWidget_ras.currentItem().text()\n item = self.listWidget_ras.takeItem(self.listWidget_ras.currentRow())\n item = None\n for k in list(self.ras_dict):\n if k == value:\n self.ras_dict.pop(value)\n\n def add_all_text_edit(self):\n layers = self.iface.mapCanvas().layers()\n #layers_vec = [layer for layer in layers if layer.type() == QgsMapLayer.VectorLayer]\n layers_ras = [layer for layer in layers if layer.type() == QgsMapLayer.RasterLayer]\n x = [str(self.listWidget_ras.item(i).text()) for i in range(self.listWidget_ras.count())]\n for layer in layers_ras:\n if (layer.name()) not in x:\n self.ras_dict.update( { layer.name() : layer} )\n self.listWidget_ras.addItem(str(layer.name()))\n\n #===========================================================================\n # common methods----------\n #===========================================================================\n def slice_aoi(self, vlay):\n \n aoi_vlay = self.comboBox_aoi.currentLayer()\n log = self.logger.getChild('slice_aoi')\n \n \n #=======================================================================\n # selection\n #=======================================================================\n if self.checkBox_sels.isChecked():\n if not aoi_vlay is None: \n raise Error('only one method of aoi selection is allowed')\n \n log.info('slicing finv \\'%s\\' w/ %i selected feats'%(\n vlay.name(), vlay.selectedFeatureCount()))\n \n res_vlay = self.saveselectedfeatures(vlay, logger=log)\n #=======================================================================\n # check for no selection\n #=======================================================================\n elif aoi_vlay is None:\n log.debug('no aoi selected... not slicing')\n return vlay\n\n #=======================================================================\n # slice\n #=======================================================================\n else:\n vlay.removeSelection()\n log.info('slicing finv \\'%s\\' and %i feats w/ aoi \\'%s\\''%(\n vlay.name(),vlay.dataProvider().featureCount(), aoi_vlay.name()))\n \n res_vlay = self.selectbylocation(vlay, aoi_vlay, result_type='layer', logger=log)\n \n assert isinstance(res_vlay, QgsVectorLayer)\n \n vlay.removeSelection()\n \n #=======================================================================\n # wrap\n #=======================================================================\n if self.checkBox_loadres.isChecked():\n self.qproj.addMapLayer(res_vlay)\n self.logger.info('added \\'%s\\' to canvas'%res_vlay.name())\n \n \n \n return res_vlay\n \n \n\n\n def build_scenario(self): #'Generate' on the setup tab\n \"\"\"\n Generate a CanFlood project from scratch\n \n This tab facilitates the creation of a Control File from user specified parameters and inventory, \n as well as providing general file control variables for the other tools in the toolset.\n \n \n \n \"\"\"\n log = self.logger.getChild('build_scenario')\n log.info('build_scenario started')\n self.tag = self.linEdit_ScenTag.text() #set the secnario tag from user provided name\n \"\"\"\n todo: make a fresh pull of this for each tool\n \"\"\"\n \n cid = self.mFieldComboBox_cid.currentField() #user selected field\n \n self.wd = self.lineEdit_wd.text() #pull the wd filepath from the user provided in 'Browse'\n \n finv_raw = self.comboBox_vec.currentLayer()\n \n\n \n #=======================================================================\n # prechecks\n #=======================================================================\n assert isinstance(self.wd, str)\n \n assert isinstance(self.tag, str)\n assert isinstance(finv_raw, QgsVectorLayer), 'must select a VectorLayer'\n \n \n #check cid\n assert isinstance(cid, str)\n if cid == '' or cid in self.invalid_cids:\n raise Error('user selected invalid cid \\'%s\\''%cid) \n \n assert cid in [field.name() for field in finv_raw.fields()]\n \n if not os.path.exists(self.wd):\n os.makedirs(self.wd)\n log.info('built working directory: %s'%self.wd)\n \n #=======================================================================\n # aoi slice\n #=======================================================================\n finv = self.slice_aoi(finv_raw)\n \n \n #=======================================================================\n # convert finv\n #=======================================================================\n self.feedback.upd_prog(10)\n finv_fp = self.convert_finv(finv, cid) #convert the finv to csv and write to file\n #======================================================================\n # build the control file\n #======================================================================\n \n assert os.path.exists(finv_fp)\n self.feedback.upd_prog(50)\n \n #called by build_scenario()\n dirname = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n \n #get the default template from the program files\n cf_src = os.path.join(dirname, '_pars/CanFlood_control_01.txt')\n assert os.path.exists(cf_src)\n\n \n\n \n #get control file name from user provided tag\n cf_fn = 'CanFlood_%s.txt'%self.tag\n cf_path = os.path.join(self.wd, cf_fn)\n\n \n #see if this exists\n if os.path.exists(cf_path):\n msg = 'generated control file already exists. overwrite=%s \\n %s'%(\n self.overwrite, cf_path)\n if self.overwrite:\n log.warning(msg)\n else:\n raise Error(msg)\n \n \n #copy over the default template\n copyfile(cf_src, cf_path)\n \n\n self.feedback.upd_prog(75)\n #======================================================================\n # update the control file\n #======================================================================\n \"\"\"todo: switch over to helper function\"\"\"\n pars = configparser.ConfigParser(allow_no_value=True)\n _ = pars.read(cf_path) #read it from the new location\n \n #parameters\n pars.set('parameters', 'cid', cid) #user selected field\n pars.set('parameters', 'name', self.tag) #user selected field\n pars.set('parameters', 'felv', self.comboBox_SSelv.currentText()) #user selected field\n \n #filepaths\n pars.set('dmg_fps', 'curves', self.lineEdit_curve.text())\n pars.set('dmg_fps', 'finv', finv_fp)\n \n \n #set note\n pars.set('parameters', '#control file template created from \\'scenario setup\\' on %s'%(\n datetime.datetime.now().strftime('%Y-%m-%d %H.%M.%S')\n ))\n \n #write the config file \n with open(cf_path, 'w') as configfile:\n pars.write(configfile)\n \n log.info(\"default CanFlood model config file created :\\n %s\"%cf_path)\n \n \"\"\"NO. should only populate this automatically from ModelControlFile.Browse\n self.lineEdit_curve.setText(os.path.normpath(os.path.join(self.wd, 'CanFlood - curve set 01.xls')))\"\"\"\n \n \"\"\"TODO:\n write aoi filepath to scratch file\n \"\"\"\n self.feedback.upd_prog(95)\n #======================================================================\n # wrap\n #======================================================================\n \n #display the control file in the dialog\n self.lineEdit_cf_fp.setText(cf_path)\n \n \"\"\"not sure what this is\n self.lineEdit_control_2.setText(os.path.normpath(os.path.join(self.wd, 'CanFlood_control_01.txt')))\"\"\"\n \n log.push('control file created for \"\\'%s\\''%self.tag)\n self.feedback.upd_prog(None) #set the progress bar back down to zero\n\n \n def convert_finv(self, #convert the finv vector to csv file\n vlay, \n cid): \n log = self.logger.getChild('convert_finv')\n #======================================================================\n # prechecks\n #======================================================================\n log.info('on \\'%s\\' w/ %i feats'%(\n vlay.name(), vlay.dataProvider().featureCount()))\n \n #extract data\n df = vlay_get_fdf(vlay, feedback=self.feedback)\n \n #drop geometery indexes\n for gindx in self.invalid_cids: \n df = df.drop(gindx, axis=1, errors='ignore')\n \n if not cid in df.columns:\n raise Error('cid not found in finv_df')\n \n assert df[cid].is_unique\n assert 'int' in df[cid].dtypes.name\n \n #write it as a csv\n out_fp = os.path.join(self.wd, 'finv_%s_%s.csv'%(self.tag, vlay.name()))\n df.to_csv(out_fp, index=False) \n \n log.info(\"inventory csv written to file:\\n %s\"%out_fp)\n \n return out_fp\n \n \n\n \n def run_rsamp(self): #execute rsamp\n log = self.logger.getChild('run_rsamp')\n\n log.info('user pressed \\'pushButton_HSgenerate\\'')\n \n #=======================================================================\n # assemble/prepare inputs\n #=======================================================================\n finv_raw = self.comboBox_vec.currentLayer()\n rlay_l = list(self.ras_dict.values())\n \n crs = self.qproj.crs()\n\n cf_fp = self.get_cf_fp()\n out_dir = self.lineEdit_wd.text()\n \n\n #update some parameters\n cid = self.mFieldComboBox_cid.currentField() #user selected field\n psmp_stat = self.comboBox_HS_stat.currentText()\n \n #inundation\n as_inun = self.checkBox_HS_in.isChecked()\n \n if as_inun:\n dthresh = self.mQgsDoubleSpinBox_HS.value()\n dtm_rlay=self.comboBox_dtm.currentLayer()\n \n assert isinstance(dthresh, float)\n assert isinstance(dtm_rlay, QgsRasterLayer)\n \n else:\n dthresh, dtm_rlay = None, None\n \n \n #=======================================================================\n # slice aoi\n #=======================================================================\n finv = self.slice_aoi(finv_raw)\n\n \n \n\n #======================================================================\n # precheck\n #======================================================================\n if finv is None:\n raise Error('got nothing for finv')\n if not isinstance(finv, QgsVectorLayer):\n raise Error('did not get a vector layer for finv')\n \n for rlay in rlay_l:\n if not isinstance(rlay, QgsRasterLayer):\n raise Error('unexpected type on raster layer')\n \n if not os.path.exists(out_dir):\n raise Error('working directory does not exist: %s'%out_dir)\n \n if cid is None or cid=='':\n raise Error('need to select a cid')\n \n if not cid in [field.name() for field in finv.fields()]:\n raise Error('requested cid field \\'%s\\' not found on the finv_raw'%cid)\n \n\n assert os.path.exists(cf_fp), 'bad control file specified'\n #======================================================================\n # execute\n #======================================================================\n\n #build the sample\n wrkr = Rsamp(logger=self.logger, \n tag = self.tag, #set by build_scenario() \n feedback = self.feedback, #let the instance build its own feedback worker\n cid=cid,crs = crs,\n out_dir = out_dir\n )\n \n \"\"\"try just passing the Dialog's feedback\n #connect the status bar to the worker's feedback\n wrkr.feedback.progressChanged.connect(self.upd_prog)\"\"\"\n \n \n \n #execute the tool\n res_vlay = wrkr.run(rlay_l, finv,\n psmp_stat=psmp_stat,\n as_inun=as_inun, dtm_rlay=dtm_rlay, dthresh=dthresh)\n \n #check it\n wrkr.check()\n \n #save csv results to file\n wrkr.write_res(res_vlay, )\n \n #update ocntrol file\n wrkr.upd_cf(cf_fp)\n \n #======================================================================\n # post---------\n #======================================================================\n \"\"\"\n the hazard sampler sets up a lot of the other tools\n \"\"\"\n #======================================================================\n # add to map\n #======================================================================\n if self.checkBox_loadres.isChecked():\n self.qproj.addMapLayer(res_vlay)\n self.logger.info('added \\'%s\\' to canvas'%res_vlay.name())\n \n #======================================================================\n # update event names\n #======================================================================\n self.event_name_set = [lay.name() for lay in rlay_l]\n \n log.info('set %i event names: \\n %s'%(len(self.event_name_set), \n self.event_name_set))\n \n #======================================================================\n # populate Event Likelihoods table\n #======================================================================\n l = self.event_name_set\n for tbl in [self.fieldsTable_EL]:\n\n tbl.setRowCount(len(l)) #add this many rows\n \n for rindx, ename in enumerate(l):\n tbl.setItem(rindx, 0, QTableWidgetItem(ename))\n \n log.info('populated tables with event names')\n \n #======================================================================\n # populate lisamp\n #======================================================================\n \n #get the mlcb\n try:\n rlay_d = {indxr: rlay for indxr, rlay in enumerate(rlay_l)}\n \n for indxr, (sname, (mlcb_h, mlcb_v)) in enumerate(self.ls_cb_d.items()):\n if indxr in rlay_d:\n mlcb_h.setLayer(rlay_l[indxr])\n \n else:\n \"\"\"\n todo: clear the remaining comboboxes\n \"\"\"\n break\n\n\n except Exception as e:\n log.error('failed to populate lisamp fields w/\\n %s'%e)\n \n \n #======================================================================\n # wrap\n #======================================================================\n self.feedback.upd_prog(None) #set the progress bar back down to zero\n\n log.push('Rsamp finished')\n \n return\n \n def run_dsamp(self): #sample dtm raster\n \n self.logger.info('user pressed \\'pushButton_DTMsamp\\'')\n\n \n #=======================================================================\n # assemble/prepare inputs\n #=======================================================================\n \n finv_raw = self.comboBox_vec.currentLayer()\n rlay = self.comboBox_dtm.currentLayer()\n \n crs = self.qproj.crs()\n\n cf_fp = self.get_cf_fp()\n out_dir = self.lineEdit_wd.text()\n \n\n #update some parameters\n cid = self.mFieldComboBox_cid.currentField() #user selected field\n psmp_stat = self.comboBox_HS_stat.currentText()\n \n\n #======================================================================\n # aoi slice\n #======================================================================\n finv = self.slice_aoi(finv_raw)\n \n\n #======================================================================\n # precheck\n #======================================================================\n \n if finv is None:\n raise Error('got nothing for finv')\n if not isinstance(finv, QgsVectorLayer):\n raise Error('did not get a vector layer for finv')\n \n\n if not isinstance(rlay, QgsRasterLayer):\n raise Error('unexpected type on raster layer')\n \n if not os.path.exists(out_dir):\n raise Error('working directory does not exist: %s'%out_dir)\n \n if cid is None or cid=='':\n raise Error('need to select a cid')\n \n if not cid in [field.name() for field in finv.fields()]:\n raise Error('requested cid field \\'%s\\' not found on the finv_raw'%cid)\n \n \n #======================================================================\n # execute\n #======================================================================\n\n #build the sample\n wrkr = Rsamp(logger=self.logger, \n tag=self.tag, #set by build_scenario() \n feedback = self.feedback, #needs to be connected to progress bar\n cid=cid,crs=crs, \n out_dir = out_dir, fname='gels'\n )\n \n \n #connect the status bar\n #wrkr.feedback.progressChanged.connect(self.upd_prog)\n \n res_vlay = wrkr.run([rlay], finv, psmp_stat=psmp_stat)\n \n #check it\n wrkr.dtm_check(res_vlay)\n \n #save csv results to file\n wrkr.write_res(res_vlay, out_dir = out_dir)\n \n #update ocntrol file\n wrkr.update_cf({\n 'dmg_fps':(\n {'gels':wrkr.out_fp},\n '#\\'gels\\' file path set from rsamp.py at %s'%(datetime.datetime.now().strftime('%Y-%m-%d %H.%M.%S')),\n ),\n 'parameters':(\n {'felv':'ground'}, \n )\n \n },cf_fp)\n \n #======================================================================\n # add to map\n #======================================================================\n if self.checkBox_loadres.isChecked():\n self.qproj.addMapLayer(finv)\n self.logger.info('added \\'%s\\' to canvas'%finv.name())\n \n self.feedback.upd_prog(None) #set the progress bar back down to zero\n self.logger.push('dsamp finished') \n \n def run_lisamp(self): #sample dtm raster\n \n self.logger.info('user pressed \\'pushButton_DTMsamp\\'')\n\n \n #=======================================================================\n # assemble/prepare inputs\n #=======================================================================\n finv_raw = self.comboBox_vec.currentLayer()\n crs = self.qproj.crs()\n cf_fp = self.get_cf_fp()\n out_dir = self.lineEdit_wd.text()\n cid = self.mFieldComboBox_cid.currentField() #user selected field\n \n lfield = self.mFieldComboBox_LSfn.currentField()\n \n #collect lpols\n lpol_d = dict()\n for sname, (mlcb_haz, mlcb_lpol) in self.ls_cb_d.items():\n hlay = mlcb_haz.currentLayer()\n \n if not isinstance(hlay, QgsRasterLayer):\n continue\n \n lpol_vlay = mlcb_lpol.currentLayer()\n \n if not isinstance(lpol_vlay, QgsVectorLayer):\n raise Error('must provide a matching VectorLayer for set %s'%sname)\n\n lpol_d[hlay.name()] = lpol_vlay \n \n #======================================================================\n # aoi slice\n #======================================================================\n finv = self.slice_aoi(finv_raw)\n \n\n #======================================================================\n # precheck\n #======================================================================\n \n if finv is None:\n raise Error('got nothing for finv')\n if not isinstance(finv, QgsVectorLayer):\n raise Error('did not get a vector layer for finv')\n \n if not os.path.exists(out_dir):\n raise Error('working directory does not exist: %s'%out_dir)\n \n if cid is None or cid=='':\n raise Error('need to select a cid')\n \n if lfield is None or lfield=='':\n raise Error('must select a valid lfield')\n \n if not cid in [field.name() for field in finv.fields()]:\n raise Error('requested cid field \\'%s\\' not found on the finv_raw'%cid)\n \n \n \n #======================================================================\n # execute\n #======================================================================\n\n #build the sample\n wrkr = LikeSampler(logger=self.logger, \n tag=self.tag, #set by build_scenario() \n feedback = self.feedback, #needs to be connected to progress bar\n crs = crs,\n )\n \n #connect the status bar\n #wrkr.feedback.progressChanged.connect(self.upd_prog)\n \n res_df = wrkr.run(finv, lpol_d, cid=cid, lfield=lfield)\n \n #check it\n wrkr.check()\n \n #save csv results to file\n wrkr.write_res(res_df, out_dir = out_dir)\n \n #update ocntrol file\n wrkr.upd_cf(cf_fp)\n \n #======================================================================\n # add to map\n #======================================================================\n if self.checkBox_loadres.isChecked():\n res_vlay = wrkr.vectorize(res_df)\n self.qproj.addMapLayer(res_vlay)\n self.logger.info('added \\'%s\\' to canvas'%finv.name())\n \n self.feedback.upd_prog(None) #set the progress bar back down to zero\n self.logger.push('lisamp finished') \n \n return\n \n def _pop_el_table(self): #developing the table widget\n \n\n l = ['e1', 'e2', 'e3']\n tbl = self.fieldsTable_EL\n tbl.setRowCount(len(l)) #add this many rows\n \n for rindx, ename in enumerate(l):\n tbl.setItem(rindx, 0, QTableWidgetItem(ename))\n \n self.logger.push('populated likelihoods table with event names')\n \n \n \n def set_event_vals(self): #saving the event likelihoods table to file\n \"\"\"store user specified event variables into the 'evals' dataset\n \n \n \"\"\"\n log = self.logger.getChild('set_event_vals')\n log.info('user pushed \\'pushButton_ELstore\\'')\n \n\n #======================================================================\n # collect variables\n #======================================================================\n #get displayed control file path\n cf_fp = self.get_cf_fp()\n out_dir = self.lineEdit_wd.text()\n \n #likelihood paramter\n if self.radioButton_ELari.isChecked():\n event_probs = 'ari'\n else:\n event_probs = 'aep'\n self.logger.info('\\'event_probs\\' set to \\'%s\\''%event_probs)\n \n \n #======================================================================\n # collcet table data\n #======================================================================\n\n df = qtbl_get_df(self.fieldsTable_EL)\n \n self.logger.info('extracted data w/ %s \\n%s'%(str(df.shape), df))\n \n # check it\n if df.iloc[:, 1].isna().any():\n raise Error('got %i nulls in the likelihood column'%df.iloc[:,1].isna().sum())\n \n miss_l = set(self.event_name_set).symmetric_difference(df.iloc[:,0].values)\n if len(miss_l)>0:\n raise Error('event name mismatch')\n \n \n #======================================================================\n # clean it\n #======================================================================\n aep_df = df.set_index(df.columns[0]).iloc[:,0].to_frame().T\n \n\n \n #======================================================================\n # #write to file\n #======================================================================\n ofn = os.path.join(self.lineEdit_wd.text(), 'evals_%i_%s.csv'%(len(aep_df.columns), self.tag))\n \n from hlpr.Q import Qcoms\n #build a shell worker for these taxks\n wrkr = Qcoms(logger=log, tag=self.tag, feedback=self.feedback, out_dir=out_dir)\n \n eaep_fp = wrkr.output_df(aep_df, ofn, \n overwrite=self.overwrite, write_index=False)\n \n \n \n #======================================================================\n # update the control file\n #======================================================================\n wrkr.update_cf(\n {\n 'parameters':({'event_probs':event_probs},),\n 'risk_fps':({'evals':eaep_fp}, \n '#evals file path set from %s.py at %s'%(\n __name__, datetime.datetime.now().strftime('%Y-%m-%d %H.%M.%S')))\n \n },\n cf_fp = cf_fp\n )\n \n \n \n self.logger.push('generated \\'aeps\\' and set \\'event_probs\\' to control file')\n \n def run_validate(self):\n #raise Error('broken')\n \"\"\"\n a lot of this is duplicated in model.scripts_.setup_pars\n \n TODO: consolidate with setup_pars\n \n \"\"\"\n log = self.logger.getChild('valid')\n log.info('user pressed \\'pushButton_Validate\\'')\n \n #======================================================================\n # load the control file\n #======================================================================\n #get the control file path\n cf_fp = self.get_cf_fp()\n \n #build/run theparser\n log.info('validating control file: \\n %s'%cf_fp)\n pars = configparser.ConfigParser(inline_comment_prefixes='#', allow_no_value=True)\n _ = pars.read(cf_fp) #read it\n \n self.feedback.upd_prog(10)\n #======================================================================\n # assemble the validation parameters\n #======================================================================\n #import the class objects\n from model.dmg2 import Dmg2\n from model.risk2 import Risk2\n from model.risk1 import Risk1\n \n #populate all possible test parameters\n \"\"\"\n todo: finish this\n \"\"\"\n vpars_pos_d = {\n 'risk1':(self.checkBox_Vr1, Risk1),\n 'dmg2':(self.checkBox_Vi2, Dmg2),\n 'risk2':(self.checkBox_Vr2, Risk2),\n #'risk3':(self.checkBox_Vr3, (None, None, None)),\n }\n \n #select based on user check boxes\n vpars_d = dict()\n \n for vtag, (checkBox, model) in vpars_pos_d.items():\n \n if checkBox.isChecked():\n vpars_d[vtag] = model\n \n if len(vpars_d) == 0:\n raise Error('no validation options selected!')\n \n log.info('user selected %i validation parameter sets'%len(vpars_d))\n \n #======================================================================\n # validate\n #======================================================================\n\n \n vflag_d = dict()\n for vtag, model in vpars_d.items():\n self.feedback.upd_prog(80/len(vpars_d), method='append')\n\n \"\"\"needto play with init sequences to get this to work\"\"\"\n\n \n #==================================================================\n # set validation flag\n #==================================================================\n vflag_d[model.valid_par] = 'True'\n \n #======================================================================\n # update control file\n #======================================================================\n self.update_cf(\n {'validation':(vflag_d, )\n },\n cf_fp = cf_fp\n )\n self.feedback.upd_prog(100)\n \n log.push('completed %i validations'%len(vpars_d))\n \n self.feedback.upd_prog(None)\n return\n \n def run_rfda(self): #Other.Rfda tab\n log = self.logger.getChild('run_rfda')\n \n #======================================================================\n # collect from ui\n #======================================================================\n rinv_vlay = self.mMapLayerComboBox_OthR_rinv.currentLayer()\n crv_fp = self.lineEdit_wd_OthRf_cv.text()\n bsmt_ht = self.lineEdit_OthRf_bht.text()\n #cid = self.mFieldComboBox_cid.currentField() #user selected field\n \n crs = self.qproj.crs()\n out_dir = self.lineEdit_wd.text()\n \n try:\n bsmt_ht = float(bsmt_ht)\n except Exception as e:\n raise Error('failed to convert bsmt_ht to float w/ \\n %s'%e)\n \n \n #======================================================================\n # input checks\n #======================================================================\n #======================================================================\n # if cid is None or cid=='':\n # raise Error('need to select a cid')\n #======================================================================\n \n wrkr = RFDAconv(logger=self.logger, out_dir=out_dir, tag=self.tag, bsmt_ht = bsmt_ht)\n #======================================================================\n # invnentory convert\n #======================================================================\n if isinstance(rinv_vlay, QgsVectorLayer):\n \n \n finv_vlay = wrkr.to_finv(rinv_vlay)\n \n self.qproj.addMapLayer(finv_vlay)\n log.info('added \\'%s\\' to canvas'%finv_vlay.name())\n \n #======================================================================\n # curve convert\n #======================================================================\n if os.path.exists(crv_fp):\n df_raw = pd.read_excel(crv_fp, header=None)\n \n df_d = wrkr.to_curveset(df_raw, logger=log)\n \n basefn = os.path.splitext(os.path.split(crv_fp)[1])[0]\n \n ofp = wrkr.output(df_d, basefn=basefn)\n \n else:\n log.info('no valid crv_fp provided')\n \n #======================================================================\n # wrap\n #======================================================================\n self.logger.push('finished')\n \n\n \n \n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n\n \n \n \n ","sub_path":"canflood/build/BuildDialog.py","file_name":"BuildDialog.py","file_ext":"py","file_size_in_byte":48672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"537516404","text":"import os\nfrom mario_lib import *\n\nmy_dir = os.path.dirname(os.path.realpath(__file__))\n\nbodies_file = my_dir + '/' + 'bodies.csv'\nchar_file = my_dir + '/' + 'characters.csv'\n\nbodies_dict, char_dict = read_and_combine(bodies_file, char_file)\n#print(bodies_dict)\n\n#for x in char_dict:\n# for x_2 in bodies_dict:\n #row = char_dict[x]\n #speed = row[1]\n# print(char_dict[x][1],'-----',bodies_dict[x_2][2])\n\n\n\n\n\n\n\noutput = best_speed(bodies_dict, char_dict)\n\nprint(output)\n\n\n\n\noutput_acc = best_acceleration(bodies_dict, char_dict)\nprint(output_acc)\n","sub_path":"marioTest.py","file_name":"marioTest.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"639314534","text":"import os\nimport requests\nfrom subprocess import call\n\n\n# yandex translation api key\napi_key = 'get your yandex api key here: https://tech.yandex.ru/translate/'\napp_name = 'notify-send-translate-selection'\n\n\ndef notify(title, message):\n call(['notify-send', title, message, '-h', 'string:x-canonical-private-synchronous:' + app_name])\n\n\nerror_code_descriptions = {\n 401: 'Invalid API key',\n 402: 'Blocked API key',\n 404: 'Exceeded the daily limit on the amount of translated text',\n 413: 'Exceeded the maximum text size',\n 422: 'The text cannot be translated',\n 501: 'The specified translation direction is not supported'\n}\n\n\nif __name__ == '__main__':\n text = os.popen('xsel').read()\n payload = {'key': api_key, 'text': text, 'lang': 'ru', 'format': 'plain'}\n url = 'https://translate.yandex.net/api/v1.5/tr.json/translate'\n response = requests.get(url, params=payload)\n if response.status_code == 200:\n notify(text, response.json().get('text')[0])\n else:\n notify('Невозможно перевести', error_code_descriptions.get(response.status_code, 'Неизвестная проблема'))\n","sub_path":"trsel.py","file_name":"trsel.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"371863175","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis module provides a convenient interface \nfor building a new galaxy-halo model by swapping out features \nfrom an existing model. \n\"\"\"\n\nfrom copy import copy \nfrom ..custom_exceptions import HalotoolsError\nfrom .model_factories import HodModelFactory \nfrom warnings import warn \n\n__all__ = ['HodModelArchitect']\n\n\nclass HodModelArchitect(object):\n \"\"\" Class used to create customized HOD-style models. \n \"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def customize_model(*args, **kwargs):\n \"\"\" Method takes a baseline composite model as input, \n together with an arbitrary number of new component models, \n and swaps in the new component models to create a and return new composite model. \n\n Parameters \n ----------\n baseline_model : HOD model instance \n `~halotools.empirical_models.HodModelFactory` instance. \n\n component_models : Halotools objects \n Instance of any component model that you want to swap in to the baseline_model. \n\n Returns \n --------\n new_model : HOD model instance \n `~halotools.empirical_models.HodModelFactory` instance. The ``new_model`` will \n be identical in every way to the ``baseline_model``, except the features in the \n input component_models will replace the features in the ``baseline_model``. \n\n \"\"\"\n\n try:\n baseline_model = kwargs['baseline_model']\n except KeyError:\n msg = (\"\\nThe customize_model method of HodModelArchitect \"\n \"requires a baseline_model keyword argument\\n\")\n raise HalotoolsError(msg)\n baseline_blueprint = baseline_model.model_blueprint\n new_blueprint = copy(baseline_blueprint)\n\n for new_component in args:\n try:\n gal_type = new_component.gal_type\n galprop_key = new_component.galprop_key\n except AttributeError:\n msg = (\"\\nEvery argument of the customize_model method of HodModelArchitect \"\n \"must be a model instance that has a ``gal_type`` and a ``galprop_key`` attribute.\\n\")\n raise HalotoolsError(msg)\n\n # Enforce self-consistency in the thresholds of new and old components\n if galprop_key == 'occupation':\n old_component = baseline_blueprint[gal_type][galprop_key]\n if new_component.threshold != old_component.threshold:\n msg = (\"\\n\\nYou tried to swap in a %s occupation component \\nthat has a different \" \n \"threshold than the original %s occupation component.\\n\"\n \"This is technically permissible, but in general, composite HOD-style models \\n\"\n \"must have the same threshold for all occupation components.\\n\"\n \"Thus if you do not request the HodModelArchitect to make the corresponding threshold change \\n\"\n \"for all gal_types, the resulting composite model will raise an exception and not build.\\n\")\n warn(msg % (gal_type, gal_type)) \n\n new_blueprint[gal_type][galprop_key] = new_component\n\n new_model = HodModelFactory(new_blueprint)\n return new_model\n\n\n\n\n\n\n\n\n","sub_path":"halotools/empirical_models/hod_designer.py","file_name":"hod_designer.py","file_ext":"py","file_size_in_byte":3331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"228757013","text":"class MajorityElement:\n @staticmethod\n def find_majority_element(nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n # boyer-moore voting algorithm. Idea is if we cancel each item with any other item other than itself,\n # at the end the majority item will be available. This algorithm only works if majority element exists.\n # if no such item exists, this algorithm would still return a value. So, if the existence is not guaranteed\n # then we have to to another O(n) pass to check that the element returned is actually majority.\n # Btw, using a hash map is trivial solution.\n if not nums:\n return None\n majority = nums[0]\n count = 1\n for i in range(1, len(nums)):\n if majority == nums[i]:\n count += 1\n else:\n count -= 1\n if count == 0:\n majority = nums[i]\n count = 1\n return majority\n","sub_path":"Python/dev/arrays/majority_element.py","file_name":"majority_element.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"} +{"seq_id":"467629016","text":"# -*- coding: utf-8 -*-\nfrom core.libs import *\n\nHOST = 'https://pelis24.mobi'\n\nLNG = Languages({\n Languages.en: ['Inglés'],\n Languages.es: ['Español'],\n Languages.la: ['Latino'],\n Languages.sub_es: ['Subtitulado']\n})\n\nQLT = Qualities({\n Qualities.scr: ['Ts Screener', 'BR-Screener'],\n Qualities.hd: ['HD Rip 720p'],\n Qualities.hd_full: ['HD Real 1080p'],\n Qualities.rip: ['Dvd Rip']\n})\n\n\ndef mainlist(item):\n logger.trace()\n itemlist = list()\n\n itemlist.append(item.clone(\n action=\"newest\",\n label=\"Novedades\",\n url=HOST,\n type=\"item\",\n content_type='movies'))\n\n itemlist.append(item.clone(\n action=\"contents\",\n label=\"Estrenos\",\n url=HOST + \"/genero/estreno\",\n type=\"item\",\n content_type='movies'))\n\n itemlist.append(item.clone(\n action=\"years\",\n label=\"Años\",\n url=HOST,\n type=\"item\"\n ))\n\n itemlist.append(item.clone(\n action=\"generos\",\n label=\"Géneros\",\n url=HOST,\n type=\"item\"\n ))\n\n itemlist.append(item.clone(\n action=\"search\",\n label=\"Buscar\",\n query=True,\n type='search',\n content_type='movies'\n ))\n\n return itemlist\n\n\ndef search(item):\n logger.trace()\n\n itemlist = contents(item.clone(\n url=HOST + '/?s=%s' % item.query,\n action='contents'\n ))\n itemlist = filter(lambda x: x.type == 'movie', itemlist)\n\n return itemlist\n\n\ndef years(item):\n logger.trace()\n itemlist = list()\n\n data = httptools.downloadpage(item.url).data\n\n data = scrapertools.find_single_match(\n data,\n '