diff --git "a/4520.jsonl" "b/4520.jsonl" new file mode 100644--- /dev/null +++ "b/4520.jsonl" @@ -0,0 +1,469 @@ +{"seq_id":"36957215211","text":"import time\nimport math\nimport os\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.support.ui import Select\n\ndef calc(x):\n return str(math.log(abs(12*math.sin(int(x)))))\n\ntry:\n link = \"http://suninjuly.github.io/file_input.html\"\n browser = webdriver.Chrome(ChromeDriverManager().install())\n browser.get(link)\n \n browser.find_element_by_css_selector(\"[name='firstname']\").send_keys(\"[name='firstname']\")\n browser.find_element_by_css_selector(\"[name='lastname']\").send_keys(\"[name='lastname']\")\n browser.find_element_by_css_selector(\"[name='email']\").send_keys(\"[name='email']\")\n\n current_dir = os.path.abspath(os.path.dirname(__file__))\n file_path = os.path.join(current_dir, 'file.txt') \n browser.find_element_by_css_selector(\"[type='file']\").send_keys(file_path)\n \n button = browser.find_element_by_css_selector(\"button.btn\")\n button.click()\n\n\nfinally:\n time.sleep(10)\n # закрываем браузер после всех манипуляций\n browser.quit()\n","repo_name":"MariaAzhgikhina/selenium-testing-practice","sub_path":"first-module/test10.py","file_name":"test10.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"42805162235","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def hasCycle(self, head: ListNode) -> bool:\n ptr=head\n while ptr:\n if hasattr(ptr,\"m\"):\n return True\n ptr.m=0\n ptr=ptr.next\n return False\n\nclass Solution:\n def hasCycle(self, head: ListNode) -> bool:\n ptr_slow=head\n ptr_fast=head\n while True:\n if ptr_slow is None or ptr_fast is None or ptr_fast.next is None:\n return False\n ptr_slow=ptr_slow.next\n ptr_fast=ptr_fast.next.next\n if ptr_slow==ptr_fast:\n return True\n return False","repo_name":"lkwq007/leetcode-py","sub_path":"141-Linked-List-Cycle.py","file_name":"141-Linked-List-Cycle.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"73"} +{"seq_id":"17633199410","text":"#!/usr/bin/env python3\n\n\"\"\"\nCreated by Nick Turner (@nickjvturner)\n\nSuper Ugly work in progress\n\nIntended for Simulation project files\n\n\"\"\"\n\nimport zipfile\nimport json\nimport time\nimport shutil\nimport pandas as pd\nfrom pathlib import Path\nimport xlsxwriter\nfrom collections import Counter\nfrom pprint import pprint\n\n\ndef main():\n nl = '\\n'\n\n # Get filename and current working directory\n print(f'{nl}{Path(__file__).name}')\n print(f'working_directory: {Path.cwd()}{nl}')\n\n # Get local file(s) with extension .esx\n for file in sorted(Path.cwd().iterdir()):\n # ignore files in directory containing _re-zip\n if (file.suffix == '.esx') and (not('re-zip' in file.stem)):\n proceed = input(f'Proceed with file: {str(file.name)}? (YES/no)')\n if proceed == 'no':\n continue\n\n print('filename:', file.name)\n\n # Define the project name\n project_name = file.stem\n print('project_name:', project_name)\n\n # Unzip the .esx project file into folder named {project_name}\n with zipfile.ZipFile(file.name, 'r') as zip_ref:\n zip_ref.extractall(project_name)\n print('project successfully unzipped')\n\n # Load the floorPlans.json file into the floorPlans Dictionary\n with open(Path(project_name) / 'floorPlans.json') as json_file:\n floorPlans = json.load(json_file)\n json_file.close()\n # pprint(floorPlans)\n\n # Create an intermediary dictionary to lookup floor names\n # populate it\n floorPlansDict = {\n floor['id']: floor['name'] for floor in floorPlans['floorPlans']\n }\n # pprint(floorPlansDict)\n\n # Create a simple list of floorPlans\n floorPlansList = []\n\n for floor, name in floorPlansDict.items():\n floorPlansList.append(name)\n # print(floorPlansList)\n\n # Load the accessPoints.json file into the accessPoints dictionary\n with open(Path(project_name) / 'accessPoints.json') as json_file:\n accessPoints = json.load(json_file)\n json_file.close()\n # pprint(accessPoints)\n\n # Load the simulatedRadios.json file into the simulatedRadios dictionary\n with open(Path(project_name) / 'simulatedRadios.json') as json_file:\n simulatedRadios = json.load(json_file)\n # pprint(simulatedRadios)\n\n # Create an intermediary dictionary to lookup simulated radio parameters and populate it\n # using dictionary comprehension\n simulatedRadioDict = {radio['accessPointId']: {x: y for x, y in radio.items()}\n for radio in simulatedRadios['simulatedRadios']}\n\n # pprint(simulatedRadioDict)\n\n processedAPdict = {}\n\n for ap in accessPoints['accessPoints']:\n processedAPdict[ap['name']] = {\n 'name': ap['name'],\n 'vendor': ap.get('vendor', ''),\n 'model': ap.get('model', ''),\n 'floor': floorPlansDict.get(ap.get('location', {}).get('floorPlanId', ''), ''),\n 'antennaTilt': simulatedRadioDict.get(ap['id'], {}).get('antennaTilt', ''),\n 'antennaMounting': simulatedRadioDict.get(ap['id'], {}).get('antennaMounting', ''),\n 'antennaHeight': simulatedRadioDict.get(ap['id'], {}).get('antennaHeight', '')\n }\n\n # pprint(processedAPdict)\n\n try:\n # Remove temporary directory containing unzipped project file\n shutil.rmtree(project_name)\n except Exception as e:\n print(e)\n\n print(f'{nl}{file.stem}')\n\n version = '1.2'\n output_filename = f'{file.stem} - BoM v{version}.xlsx'\n writer = pd.ExcelWriter(output_filename, engine='xlsxwriter')\n\n for floor in sorted(floorPlansList):\n\n floorAPs = {}\n\n print(floor)\n print('-' * len(floor))\n\n for ap, ap_details in processedAPdict.items():\n if ap_details['floor'] == floor:\n floorAPs.update({ap: processedAPdict[ap]})\n\n # Create a pandas dataframe from the data\n df = pd.DataFrame(floorAPs)\n\n # Transpose the dataframe so that the keys become columns and the values become rows\n df = df.transpose()\n\n # Sort the dataframe by the 'floor' and 'model' columns in ascending order\n df = df.sort_values(by=['name'], ascending=True)\n\n # Export the dataframe to an excel file\n df.to_excel(writer, sheet_name=floor, index=False)\n\n # Create a pandas dataframe from the data\n df = pd.DataFrame(processedAPdict)\n\n # Transpose the dataframe so that the keys become columns and the values become rows\n df = df.transpose()\n\n # Sort the dataframe by the 'floor' and 'model' columns in ascending order\n df = df.sort_values(by=['floor', 'name'], ascending=True)\n\n # Export the dataframe to an excel file\n df.to_excel(writer, sheet_name='ALL APs', index=False)\n\n # Close the ExcelWriter object\n writer.close()\n\nif __name__ == \"__main__\":\n start_time = time.time()\n main()\n run_time = time.time() - start_time\n print(f'** Time to run: {round(run_time, 2)} seconds **')\n","repo_name":"nickjvturner/badgerwifi-ekahau","sub_path":"ESX/simple_BoM_generator/04_AP_details_BoM_generator_per_floor_xlsx_output.py","file_name":"04_AP_details_BoM_generator_per_floor_xlsx_output.py","file_ext":"py","file_size_in_byte":5633,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"22179893894","text":"#!usr/bin/env python\n\nimport sys\n\ndef main(lines):\n m = {\n 'forward': 0,\n 'up': 0,\n 'down': 0,\n 'depth': 0\n }\n for line in lines:\n line = line.split()\n if line[0] == 'forward':\n m['depth'] = m['depth'] + (int(line[1]) * (m['down'] - m['up']))\n m[line[0]] = m[line[0]] + int(line[1])\n\n return m['forward'] * m['depth']\n\n\nif __name__ == '__main__':\n fo = open(sys.argv[1], 'r')\n lines = fo.readlines()\n print(main(lines))\n fo.close()\n","repo_name":"Tduncan13/adventOfCode","sub_path":"2021/day02/python/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"19915906587","text":"numbers = [1, 2, 3, 4, 5]\n\nn = 5\n\n\"\"\"\n예를 들어 자리가 5개가 있다면\n첫번쨰 자리에서 자리를 교환하는 경우의 수\n\"\"\"\n\n# i번쨰 원소의 자리를 바꿔가며 순열 생성\n# 자리를 바꿀 수 있는 경우의 수\n\ndef perm1(i):\n global cnt1\n # 1. 종료조건 : i 번쨰가 끝까지 왔을 떄\n if i == n:\n cnt1 += 1\n print(numbers)\n return\n\n # 2. 재귀호출\n # 현재 위치 i에서 다른위치 j에 있는 숫자와 한번씩 다 바꿔보기\n # 이전 i,j와 j,i가 바꾼 것은 같으므로 중복하지 않는다.(중복처리)\n # 하지만 위치를 바���지 않고 진행할 수 있음.\n # i == j인 경우 위치를 바꾸지 않고 다음 원소의 자리를 바꾸러 이동.\n for j in range(i, n): # 중복처리를 위해 i에서부터 시작한다.\n # i 번쨰와 j 번쨰의 위치를 바꾸고 진행\n numbers[i], numbers[j] = numbers[j], numbers[i]\n # 다음 i + 1번째 원소의 자리를 바꾸러간다.\n perm1(i + 1)\n # i번쨰와 j번째 위치를 되돌려놓고 다음 진행\n numbers[i], numbers[j] = numbers[j], numbers[i]\ncnt1 = 0\nperm1(0)\nprint(cnt1)","repo_name":"Gold-Gyu/PYTHON","sub_path":"0216부분집합 순열 재귀/신민석교수님 순열.py","file_name":"신민석교수님 순열.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"38239145471","text":"import os\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\n\n\nif os.environ.get(\"FLASK_ENV\") == \"development\":\n database_url = 'sqlite:///db.sqlite3'\nelse:\n database_url = os.environ.get('DATABASE_URL').replace(\n 'postgres', 'postgresql'\n )\n\nengine = create_engine(database_url)\ndb_session = scoped_session(\n sessionmaker(\n autocommit=False,\n autoflush=False,\n bind=engine\n )\n)\nBase = declarative_base()\nBase.query = db_session.query_property()\n\ndef init_db():\n from models import Project\n Base.metadata.create_all(bind=engine)\n","repo_name":"instruct-br/teste-python-jr-remoto-2021-06","sub_path":"template-flask/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"73"} +{"seq_id":"3949884871","text":"## Combine Population Hotspots and fasta headers ##\nfrom Bio import SeqIO\nimport glob\n\nhotspot_dir=\"/Volumes/MW_18TB/Alice_Shanfelter/LD_Files/Consensus_Seqs/Conseqs/\"\n\nhotspots = glob.glob(hotspot_dir + \"*coldspots_con.fa\")\n#PS_hotspot = glob.glob(hotspot_dir + \"PS*hotspots_con.fa\")\n\nfor fasta_file in hotspots:\n\n chrom = fasta_file.replace(hotspot_dir, \"\").replace(\"_coldspots_con.fa\", \"\")\n\n output = fasta_file.replace(\".fa\", \"_format.fa\")\n output = open(output, \"w\")\n fasta = SeqIO.parse(open(fasta_file), 'fasta')\n\n for line in fasta:\n header = str(line.id)\n seq = str(line.seq)\n\n #seq = format_fasta_seq(seq)\n\n header += \"_\" + chrom\n\n output.write(\">\" + header + \"\\n\" + seq + \"\\n\")\n","repo_name":"mzwink/stickleback_genomics","sub_path":"downsize_scripts/change_fasta_headers.py","file_name":"change_fasta_headers.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"30129849884","text":"import numpy as np\nimport pandas as pd\nimport pytest\n\nimport xarray as xr\nfrom xarray import DataArray, Dataset, Variable\nfrom xarray.core.groupby import _consolidate_slices\n\nfrom . import assert_allclose, assert_equal, assert_identical, create_test_data\n\n\n@pytest.fixture\ndef dataset():\n ds = xr.Dataset(\n {\"foo\": ((\"x\", \"y\", \"z\"), np.random.randn(3, 4, 2))},\n {\"x\": [\"a\", \"b\", \"c\"], \"y\": [1, 2, 3, 4], \"z\": [1, 2]},\n )\n ds[\"boo\"] = ((\"z\", \"y\"), [[\"f\", \"g\", \"h\", \"j\"]] * 2)\n\n return ds\n\n\n@pytest.fixture\ndef array(dataset):\n return dataset[\"foo\"]\n\n\ndef test_consolidate_slices():\n\n assert _consolidate_slices([slice(3), slice(3, 5)]) == [slice(5)]\n assert _consolidate_slices([slice(2, 3), slice(3, 6)]) == [slice(2, 6)]\n assert _consolidate_slices([slice(2, 3, 1), slice(3, 6, 1)]) == [slice(2, 6, 1)]\n\n slices = [slice(2, 3), slice(5, 6)]\n assert _consolidate_slices(slices) == slices\n\n with pytest.raises(ValueError):\n _consolidate_slices([slice(3), 4])\n\n\ndef test_groupby_dims_property(dataset):\n assert dataset.groupby(\"x\").dims == dataset.isel(x=1).dims\n assert dataset.groupby(\"y\").dims == dataset.isel(y=1).dims\n\n stacked = dataset.stack({\"xy\": (\"x\", \"y\")})\n assert stacked.groupby(\"xy\").dims == stacked.isel(xy=0).dims\n\n\ndef test_multi_index_groupby_map(dataset):\n # regression test for GH873\n ds = dataset.isel(z=1, drop=True)[[\"foo\"]]\n expected = 2 * ds\n actual = (\n ds.stack(space=[\"x\", \"y\"])\n .groupby(\"space\")\n .map(lambda x: 2 * x)\n .unstack(\"space\")\n )\n assert_equal(expected, actual)\n\n\ndef test_multi_index_groupby_sum():\n # regression test for GH873\n ds = xr.Dataset(\n {\"foo\": ((\"x\", \"y\", \"z\"), np.ones((3, 4, 2)))},\n {\"x\": [\"a\", \"b\", \"c\"], \"y\": [1, 2, 3, 4]},\n )\n expected = ds.sum(\"z\")\n actual = ds.stack(space=[\"x\", \"y\"]).groupby(\"space\").sum(\"z\").unstack(\"space\")\n assert_equal(expected, actual)\n\n\ndef test_groupby_da_datetime():\n # test groupby with a DataArray of dtype datetime for GH1132\n # create test data\n times = pd.date_range(\"2000-01-01\", periods=4)\n foo = xr.DataArray([1, 2, 3, 4], coords=dict(time=times), dims=\"time\")\n # create test index\n dd = times.to_pydatetime()\n reference_dates = [dd[0], dd[2]]\n labels = reference_dates[0:1] * 2 + reference_dates[1:2] * 2\n ind = xr.DataArray(\n labels, coords=dict(time=times), dims=\"time\", name=\"reference_date\"\n )\n g = foo.groupby(ind)\n actual = g.sum(dim=\"time\")\n expected = xr.DataArray(\n [3, 7], coords=dict(reference_date=reference_dates), dims=\"reference_date\"\n )\n assert_equal(expected, actual)\n\n\ndef test_groupby_duplicate_coordinate_labels():\n # fix for http://stackoverflow.com/questions/38065129\n array = xr.DataArray([1, 2, 3], [(\"x\", [1, 1, 2])])\n expected = xr.DataArray([3, 3], [(\"x\", [1, 2])])\n actual = array.groupby(\"x\").sum()\n assert_equal(expected, actual)\n\n\ndef test_groupby_input_mutation():\n # regression test for GH2153\n array = xr.DataArray([1, 2, 3], [(\"x\", [2, 2, 1])])\n array_copy = array.copy()\n expected = xr.DataArray([3, 3], [(\"x\", [1, 2])])\n actual = array.groupby(\"x\").sum()\n assert_identical(expected, actual)\n assert_identical(array, array_copy) # should not modify inputs\n\n\n@pytest.mark.parametrize(\n \"obj\",\n [\n xr.DataArray([1, 2, 3, 4, 5, 6], [(\"x\", [1, 1, 1, 2, 2, 2])]),\n xr.Dataset({\"foo\": (\"x\", [1, 2, 3, 4, 5, 6])}, {\"x\": [1, 1, 1, 2, 2, 2]}),\n ],\n)\ndef test_groupby_map_shrink_groups(obj):\n expected = obj.isel(x=[0, 1, 3, 4])\n actual = obj.groupby(\"x\").map(lambda f: f.isel(x=[0, 1]))\n assert_identical(expected, actual)\n\n\n@pytest.mark.parametrize(\n \"obj\",\n [\n xr.DataArray([1, 2, 3], [(\"x\", [1, 2, 2])]),\n xr.Dataset({\"foo\": (\"x\", [1, 2, 3])}, {\"x\": [1, 2, 2]}),\n ],\n)\ndef test_groupby_map_change_group_size(obj):\n def func(group):\n if group.sizes[\"x\"] == 1:\n result = group.isel(x=[0, 0])\n else:\n result = group.isel(x=[0])\n return result\n\n expected = obj.isel(x=[0, 0, 1])\n actual = obj.groupby(\"x\").map(func)\n assert_identical(expected, actual)\n\n\ndef test_da_groupby_map_func_args():\n def func(arg1, arg2, arg3=0):\n return arg1 + arg2 + arg3\n\n array = xr.DataArray([1, 1, 1], [(\"x\", [1, 2, 3])])\n expected = xr.DataArray([3, 3, 3], [(\"x\", [1, 2, 3])])\n actual = array.groupby(\"x\").map(func, args=(1,), arg3=1)\n assert_identical(expected, actual)\n\n\ndef test_ds_groupby_map_func_args():\n def func(arg1, arg2, arg3=0):\n return arg1 + arg2 + arg3\n\n dataset = xr.Dataset({\"foo\": (\"x\", [1, 1, 1])}, {\"x\": [1, 2, 3]})\n expected = xr.Dataset({\"foo\": (\"x\", [3, 3, 3])}, {\"x\": [1, 2, 3]})\n actual = dataset.groupby(\"x\").map(func, args=(1,), arg3=1)\n assert_identical(expected, actual)\n\n\ndef test_da_groupby_empty():\n\n empty_array = xr.DataArray([], dims=\"dim\")\n\n with pytest.raises(ValueError):\n empty_array.groupby(\"dim\")\n\n\ndef test_da_groupby_quantile():\n\n array = xr.DataArray(\n data=[1, 2, 3, 4, 5, 6], coords={\"x\": [1, 1, 1, 2, 2, 2]}, dims=\"x\"\n )\n\n # Scalar quantile\n expected = xr.DataArray(\n data=[2, 5], coords={\"x\": [1, 2], \"quantile\": 0.5}, dims=\"x\"\n )\n actual = array.groupby(\"x\").quantile(0.5)\n assert_identical(expected, actual)\n\n # Vector quantile\n expected = xr.DataArray(\n data=[[1, 3], [4, 6]],\n coords={\"x\": [1, 2], \"quantile\": [0, 1]},\n dims=(\"x\", \"quantile\"),\n )\n actual = array.groupby(\"x\").quantile([0, 1])\n assert_identical(expected, actual)\n\n # Multiple dimensions\n array = xr.DataArray(\n data=[[1, 11, 26], [2, 12, 22], [3, 13, 23], [4, 16, 24], [5, 15, 25]],\n coords={\"x\": [1, 1, 1, 2, 2], \"y\": [0, 0, 1]},\n dims=(\"x\", \"y\"),\n )\n\n actual_x = array.groupby(\"x\").quantile(0, dim=...)\n expected_x = xr.DataArray(\n data=[1, 4], coords={\"x\": [1, 2], \"quantile\": 0}, dims=\"x\"\n )\n assert_identical(expected_x, actual_x)\n\n actual_y = array.groupby(\"y\").quantile(0, dim=...)\n expected_y = xr.DataArray(\n data=[1, 22], coords={\"y\": [0, 1], \"quantile\": 0}, dims=\"y\"\n )\n assert_identical(expected_y, actual_y)\n\n actual_xx = array.groupby(\"x\").quantile(0)\n expected_xx = xr.DataArray(\n data=[[1, 11, 22], [4, 15, 24]],\n coords={\"x\": [1, 2], \"y\": [0, 0, 1], \"quantile\": 0},\n dims=(\"x\", \"y\"),\n )\n assert_identical(expected_xx, actual_xx)\n\n actual_yy = array.groupby(\"y\").quantile(0)\n expected_yy = xr.DataArray(\n data=[[1, 26], [2, 22], [3, 23], [4, 24], [5, 25]],\n coords={\"x\": [1, 1, 1, 2, 2], \"y\": [0, 1], \"quantile\": 0},\n dims=(\"x\", \"y\"),\n )\n assert_identical(expected_yy, actual_yy)\n\n times = pd.date_range(\"2000-01-01\", periods=365)\n x = [0, 1]\n foo = xr.DataArray(\n np.reshape(np.arange(365 * 2), (365, 2)),\n coords={\"time\": times, \"x\": x},\n dims=(\"time\", \"x\"),\n )\n g = foo.groupby(foo.time.dt.month)\n\n actual = g.quantile(0, dim=...)\n expected = xr.DataArray(\n data=[\n 0.0,\n 62.0,\n 120.0,\n 182.0,\n 242.0,\n 304.0,\n 364.0,\n 426.0,\n 488.0,\n 548.0,\n 610.0,\n 670.0,\n ],\n coords={\"month\": np.arange(1, 13), \"quantile\": 0},\n dims=\"month\",\n )\n assert_identical(expected, actual)\n\n actual = g.quantile(0, dim=\"time\")[:2]\n expected = xr.DataArray(\n data=[[0.0, 1], [62.0, 63]],\n coords={\"month\": [1, 2], \"x\": [0, 1], \"quantile\": 0},\n dims=(\"month\", \"x\"),\n )\n assert_identical(expected, actual)\n\n\ndef test_ds_groupby_quantile():\n ds = xr.Dataset(\n data_vars={\"a\": (\"x\", [1, 2, 3, 4, 5, 6])}, coords={\"x\": [1, 1, 1, 2, 2, 2]}\n )\n\n # Scalar quantile\n expected = xr.Dataset(\n data_vars={\"a\": (\"x\", [2, 5])}, coords={\"quantile\": 0.5, \"x\": [1, 2]}\n )\n actual = ds.groupby(\"x\").quantile(0.5)\n assert_identical(expected, actual)\n\n # Vector quantile\n expected = xr.Dataset(\n data_vars={\"a\": ((\"x\", \"quantile\"), [[1, 3], [4, 6]])},\n coords={\"x\": [1, 2], \"quantile\": [0, 1]},\n )\n actual = ds.groupby(\"x\").quantile([0, 1])\n assert_identical(expected, actual)\n\n # Multiple dimensions\n ds = xr.Dataset(\n data_vars={\n \"a\": (\n (\"x\", \"y\"),\n [[1, 11, 26], [2, 12, 22], [3, 13, 23], [4, 16, 24], [5, 15, 25]],\n )\n },\n coords={\"x\": [1, 1, 1, 2, 2], \"y\": [0, 0, 1]},\n )\n\n actual_x = ds.groupby(\"x\").quantile(0, dim=...)\n expected_x = xr.Dataset({\"a\": (\"x\", [1, 4])}, coords={\"x\": [1, 2], \"quantile\": 0})\n assert_identical(expected_x, actual_x)\n\n actual_y = ds.groupby(\"y\").quantile(0, dim=...)\n expected_y = xr.Dataset({\"a\": (\"y\", [1, 22])}, coords={\"y\": [0, 1], \"quantile\": 0})\n assert_identical(expected_y, actual_y)\n\n actual_xx = ds.groupby(\"x\").quantile(0)\n expected_xx = xr.Dataset(\n {\"a\": ((\"x\", \"y\"), [[1, 11, 22], [4, 15, 24]])},\n coords={\"x\": [1, 2], \"y\": [0, 0, 1], \"quantile\": 0},\n )\n assert_identical(expected_xx, actual_xx)\n\n actual_yy = ds.groupby(\"y\").quantile(0)\n expected_yy = xr.Dataset(\n {\"a\": ((\"x\", \"y\"), [[1, 26], [2, 22], [3, 23], [4, 24], [5, 25]])},\n coords={\"x\": [1, 1, 1, 2, 2], \"y\": [0, 1], \"quantile\": 0},\n ).transpose()\n assert_identical(expected_yy, actual_yy)\n\n times = pd.date_range(\"2000-01-01\", periods=365)\n x = [0, 1]\n foo = xr.Dataset(\n {\"a\": ((\"time\", \"x\"), np.reshape(np.arange(365 * 2), (365, 2)))},\n coords=dict(time=times, x=x),\n )\n g = foo.groupby(foo.time.dt.month)\n\n actual = g.quantile(0, dim=...)\n expected = xr.Dataset(\n {\n \"a\": (\n \"month\",\n [\n 0.0,\n 62.0,\n 120.0,\n 182.0,\n 242.0,\n 304.0,\n 364.0,\n 426.0,\n 488.0,\n 548.0,\n 610.0,\n 670.0,\n ],\n )\n },\n coords={\"month\": np.arange(1, 13), \"quantile\": 0},\n )\n assert_identical(expected, actual)\n\n actual = g.quantile(0, dim=\"time\").isel(month=slice(None, 2))\n expected = xr.Dataset(\n data_vars={\"a\": ((\"month\", \"x\"), [[0.0, 1], [62.0, 63]])},\n coords={\"month\": [1, 2], \"x\": [0, 1], \"quantile\": 0},\n )\n assert_identical(expected, actual)\n\n\ndef test_da_groupby_assign_coords():\n actual = xr.DataArray(\n [[3, 4, 5], [6, 7, 8]], dims=[\"y\", \"x\"], coords={\"y\": range(2), \"x\": range(3)}\n )\n actual1 = actual.groupby(\"x\").assign_coords({\"y\": [-1, -2]})\n actual2 = actual.groupby(\"x\").assign_coords(y=[-1, -2])\n expected = xr.DataArray(\n [[3, 4, 5], [6, 7, 8]], dims=[\"y\", \"x\"], coords={\"y\": [-1, -2], \"x\": range(3)}\n )\n assert_identical(expected, actual1)\n assert_identical(expected, actual2)\n\n\nrepr_da = xr.DataArray(\n np.random.randn(10, 20, 6, 24),\n dims=[\"x\", \"y\", \"z\", \"t\"],\n coords={\n \"z\": [\"a\", \"b\", \"c\", \"a\", \"b\", \"c\"],\n \"x\": [1, 1, 1, 2, 2, 3, 4, 5, 3, 4],\n \"t\": pd.date_range(\"2001-01-01\", freq=\"M\", periods=24),\n \"month\": (\"t\", list(range(1, 13)) * 2),\n },\n)\n\n\n@pytest.mark.parametrize(\"dim\", [\"x\", \"y\", \"z\", \"month\"])\n@pytest.mark.parametrize(\"obj\", [repr_da, repr_da.to_dataset(name=\"a\")])\ndef test_groupby_repr(obj, dim):\n actual = repr(obj.groupby(dim))\n expected = f\"{obj.__class__.__name__}GroupBy\"\n expected += \", grouped over %r\" % dim\n expected += \"\\n%r groups with labels \" % (len(np.unique(obj[dim])))\n if dim == \"x\":\n expected += \"1, 2, 3, 4, 5.\"\n elif dim == \"y\":\n expected += \"0, 1, 2, 3, 4, 5, ..., 15, 16, 17, 18, 19.\"\n elif dim == \"z\":\n expected += \"'a', 'b', 'c'.\"\n elif dim == \"month\":\n expected += \"1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12.\"\n assert actual == expected\n\n\n@pytest.mark.parametrize(\"obj\", [repr_da, repr_da.to_dataset(name=\"a\")])\ndef test_groupby_repr_datetime(obj):\n actual = repr(obj.groupby(\"t.month\"))\n expected = f\"{obj.__class__.__name__}GroupBy\"\n expected += \", grouped over 'month'\"\n expected += \"\\n%r groups with labels \" % (len(np.unique(obj.t.dt.month)))\n expected += \"1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12.\"\n assert actual == expected\n\n\ndef test_groupby_drops_nans():\n # GH2383\n # nan in 2D data variable (requires stacking)\n ds = xr.Dataset(\n {\n \"variable\": ((\"lat\", \"lon\", \"time\"), np.arange(60.0).reshape((4, 3, 5))),\n \"id\": ((\"lat\", \"lon\"), np.arange(12.0).reshape((4, 3))),\n },\n coords={\"lat\": np.arange(4), \"lon\": np.arange(3), \"time\": np.arange(5)},\n )\n\n ds[\"id\"].values[0, 0] = np.nan\n ds[\"id\"].values[3, 0] = np.nan\n ds[\"id\"].values[-1, -1] = np.nan\n\n grouped = ds.groupby(ds.id)\n\n # non reduction operation\n expected = ds.copy()\n expected.variable.values[0, 0, :] = np.nan\n expected.variable.values[-1, -1, :] = np.nan\n expected.variable.values[3, 0, :] = np.nan\n actual = grouped.map(lambda x: x).transpose(*ds.variable.dims)\n assert_identical(actual, expected)\n\n # reduction along grouped dimension\n actual = grouped.mean()\n stacked = ds.stack({\"xy\": [\"lat\", \"lon\"]})\n expected = (\n stacked.variable.where(stacked.id.notnull()).rename({\"xy\": \"id\"}).to_dataset()\n )\n expected[\"id\"] = stacked.id.values\n assert_identical(actual, expected.dropna(\"id\").transpose(*actual.dims))\n\n # reduction operation along a different dimension\n actual = grouped.mean(\"time\")\n expected = ds.mean(\"time\").where(ds.id.notnull())\n assert_identical(actual, expected)\n\n # NaN in non-dimensional coordinate\n array = xr.DataArray([1, 2, 3], [(\"x\", [1, 2, 3])])\n array[\"x1\"] = (\"x\", [1, 1, np.nan])\n expected = xr.DataArray(3, [(\"x1\", [1])])\n actual = array.groupby(\"x1\").sum()\n assert_equal(expected, actual)\n\n # NaT in non-dimensional coordinate\n array[\"t\"] = (\n \"x\",\n [\n np.datetime64(\"2001-01-01\"),\n np.datetime64(\"2001-01-01\"),\n np.datetime64(\"NaT\"),\n ],\n )\n expected = xr.DataArray(3, [(\"t\", [np.datetime64(\"2001-01-01\")])])\n actual = array.groupby(\"t\").sum()\n assert_equal(expected, actual)\n\n # test for repeated coordinate labels\n array = xr.DataArray([0, 1, 2, 4, 3, 4], [(\"x\", [np.nan, 1, 1, np.nan, 2, np.nan])])\n expected = xr.DataArray([3, 3], [(\"x\", [1, 2])])\n actual = array.groupby(\"x\").sum()\n assert_equal(expected, actual)\n\n\ndef test_groupby_grouping_errors():\n dataset = xr.Dataset({\"foo\": (\"x\", [1, 1, 1])}, {\"x\": [1, 2, 3]})\n with pytest.raises(\n ValueError, match=r\"None of the data falls within bins with edges\"\n ):\n dataset.groupby_bins(\"x\", bins=[0.1, 0.2, 0.3])\n\n with pytest.raises(\n ValueError, match=r\"None of the data falls within bins with edges\"\n ):\n dataset.to_array().groupby_bins(\"x\", bins=[0.1, 0.2, 0.3])\n\n with pytest.raises(ValueError, match=r\"All bin edges are NaN.\"):\n dataset.groupby_bins(\"x\", bins=[np.nan, np.nan, np.nan])\n\n with pytest.raises(ValueError, match=r\"All bin edges are NaN.\"):\n dataset.to_array().groupby_bins(\"x\", bins=[np.nan, np.nan, np.nan])\n\n with pytest.raises(ValueError, match=r\"Failed to group data.\"):\n dataset.groupby(dataset.foo * np.nan)\n\n with pytest.raises(ValueError, match=r\"Failed to group data.\"):\n dataset.to_array().groupby(dataset.foo * np.nan)\n\n\ndef test_groupby_reduce_dimension_error(array):\n grouped = array.groupby(\"y\")\n with pytest.raises(ValueError, match=r\"cannot reduce over dimensions\"):\n grouped.mean()\n\n with pytest.raises(ValueError, match=r\"cannot reduce over dimensions\"):\n grouped.mean(\"huh\")\n\n with pytest.raises(ValueError, match=r\"cannot reduce over dimensions\"):\n grouped.mean((\"x\", \"y\", \"asd\"))\n\n grouped = array.groupby(\"y\", squeeze=False)\n assert_identical(array, grouped.mean())\n\n assert_identical(array.mean(\"x\"), grouped.reduce(np.mean, \"x\"))\n assert_allclose(array.mean([\"x\", \"z\"]), grouped.reduce(np.mean, [\"x\", \"z\"]))\n\n\ndef test_groupby_multiple_string_args(array):\n with pytest.raises(TypeError):\n array.groupby(\"x\", \"y\")\n\n\ndef test_groupby_bins_timeseries():\n ds = xr.Dataset()\n ds[\"time\"] = xr.DataArray(\n pd.date_range(\"2010-08-01\", \"2010-08-15\", freq=\"15min\"), dims=\"time\"\n )\n ds[\"val\"] = xr.DataArray(np.ones(*ds[\"time\"].shape), dims=\"time\")\n time_bins = pd.date_range(start=\"2010-08-01\", end=\"2010-08-15\", freq=\"24H\")\n actual = ds.groupby_bins(\"time\", time_bins).sum()\n expected = xr.DataArray(\n 96 * np.ones((14,)),\n dims=[\"time_bins\"],\n coords={\"time_bins\": pd.cut(time_bins, time_bins).categories},\n ).to_dataset(name=\"val\")\n assert_identical(actual, expected)\n\n\ndef test_groupby_none_group_name():\n # GH158\n # xarray should not fail if a DataArray's name attribute is None\n\n data = np.arange(10) + 10\n da = xr.DataArray(data) # da.name = None\n key = xr.DataArray(np.floor_divide(data, 2))\n\n mean = da.groupby(key).mean()\n assert \"group\" in mean.dims\n\n\ndef test_groupby_getitem(dataset):\n\n assert_identical(dataset.sel(x=\"a\"), dataset.groupby(\"x\")[\"a\"])\n assert_identical(dataset.sel(z=1), dataset.groupby(\"z\")[1])\n\n assert_identical(dataset.foo.sel(x=\"a\"), dataset.foo.groupby(\"x\")[\"a\"])\n assert_identical(dataset.foo.sel(z=1), dataset.foo.groupby(\"z\")[1])\n\n actual = dataset.groupby(\"boo\")[\"f\"].unstack().transpose(\"x\", \"y\", \"z\")\n expected = dataset.sel(y=[1], z=[1, 2]).transpose(\"x\", \"y\", \"z\")\n assert_identical(expected, actual)\n\n\ndef test_groupby_dataset():\n data = Dataset(\n {\"z\": ([\"x\", \"y\"], np.random.randn(3, 5))},\n {\"x\": (\"x\", list(\"abc\")), \"c\": (\"x\", [0, 1, 0]), \"y\": range(5)},\n )\n groupby = data.groupby(\"x\")\n assert len(groupby) == 3\n expected_groups = {\"a\": 0, \"b\": 1, \"c\": 2}\n assert groupby.groups == expected_groups\n expected_items = [\n (\"a\", data.isel(x=0)),\n (\"b\", data.isel(x=1)),\n (\"c\", data.isel(x=2)),\n ]\n for actual, expected in zip(groupby, expected_items):\n assert actual[0] == expected[0]\n assert_equal(actual[1], expected[1])\n\n def identity(x):\n return x\n\n for k in [\"x\", \"c\", \"y\"]:\n actual = data.groupby(k, squeeze=False).map(identity)\n assert_equal(data, actual)\n\n\ndef test_groupby_dataset_returns_new_type():\n data = Dataset({\"z\": ([\"x\", \"y\"], np.random.randn(3, 5))})\n\n actual = data.groupby(\"x\").map(lambda ds: ds[\"z\"])\n expected = data[\"z\"]\n assert_identical(expected, actual)\n\n actual = data[\"z\"].groupby(\"x\").map(lambda x: x.to_dataset())\n expected = data\n assert_identical(expected, actual)\n\n\ndef test_groupby_dataset_iter():\n data = create_test_data()\n for n, (t, sub) in enumerate(list(data.groupby(\"dim1\"))[:3]):\n assert data[\"dim1\"][n] == t\n assert_equal(data[\"var1\"][n], sub[\"var1\"])\n assert_equal(data[\"var2\"][n], sub[\"var2\"])\n assert_equal(data[\"var3\"][:, n], sub[\"var3\"])\n\n\ndef test_groupby_dataset_errors():\n data = create_test_data()\n with pytest.raises(TypeError, match=r\"`group` must be\"):\n data.groupby(np.arange(10))\n with pytest.raises(ValueError, match=r\"length does not match\"):\n data.groupby(data[\"dim1\"][:3])\n with pytest.raises(TypeError, match=r\"`group` must be\"):\n data.groupby(data.coords[\"dim1\"].to_index())\n\n\ndef test_groupby_dataset_reduce():\n data = Dataset(\n {\n \"xy\": ([\"x\", \"y\"], np.random.randn(3, 4)),\n \"xonly\": (\"x\", np.random.randn(3)),\n \"yonly\": (\"y\", np.random.randn(4)),\n \"letters\": (\"y\", [\"a\", \"a\", \"b\", \"b\"]),\n }\n )\n\n expected = data.mean(\"y\")\n expected[\"yonly\"] = expected[\"yonly\"].variable.set_dims({\"x\": 3})\n actual = data.groupby(\"x\").mean(...)\n assert_allclose(expected, actual)\n\n actual = data.groupby(\"x\").mean(\"y\")\n assert_allclose(expected, actual)\n\n letters = data[\"letters\"]\n expected = Dataset(\n {\n \"xy\": data[\"xy\"].groupby(letters).mean(...),\n \"xonly\": (data[\"xonly\"].mean().variable.set_dims({\"letters\": 2})),\n \"yonly\": data[\"yonly\"].groupby(letters).mean(),\n }\n )\n actual = data.groupby(\"letters\").mean(...)\n assert_allclose(expected, actual)\n\n\ndef test_groupby_dataset_math():\n def reorder_dims(x):\n return x.transpose(\"dim1\", \"dim2\", \"dim3\", \"time\")\n\n ds = create_test_data()\n ds[\"dim1\"] = ds[\"dim1\"]\n for squeeze in [True, False]:\n grouped = ds.groupby(\"dim1\", squeeze=squeeze)\n\n expected = reorder_dims(ds + ds.coords[\"dim1\"])\n actual = grouped + ds.coords[\"dim1\"]\n assert_identical(expected, reorder_dims(actual))\n\n actual = ds.coords[\"dim1\"] + grouped\n assert_identical(expected, reorder_dims(actual))\n\n ds2 = 2 * ds\n expected = reorder_dims(ds + ds2)\n actual = grouped + ds2\n assert_identical(expected, reorder_dims(actual))\n\n actual = ds2 + grouped\n assert_identical(expected, reorder_dims(actual))\n\n grouped = ds.groupby(\"numbers\")\n zeros = DataArray([0, 0, 0, 0], [(\"numbers\", range(4))])\n expected = (ds + Variable(\"dim3\", np.zeros(10))).transpose(\n \"dim3\", \"dim1\", \"dim2\", \"time\"\n )\n actual = grouped + zeros\n assert_equal(expected, actual)\n\n actual = zeros + grouped\n assert_equal(expected, actual)\n\n with pytest.raises(ValueError, match=r\"incompat.* grouped binary\"):\n grouped + ds\n with pytest.raises(ValueError, match=r\"incompat.* grouped binary\"):\n ds + grouped\n with pytest.raises(TypeError, match=r\"only support binary ops\"):\n grouped + 1\n with pytest.raises(TypeError, match=r\"only support binary ops\"):\n grouped + grouped\n with pytest.raises(TypeError, match=r\"in-place operations\"):\n ds += grouped\n\n ds = Dataset(\n {\n \"x\": (\"time\", np.arange(100)),\n \"time\": pd.date_range(\"2000-01-01\", periods=100),\n }\n )\n with pytest.raises(ValueError, match=r\"incompat.* grouped binary\"):\n ds + ds.groupby(\"time.month\")\n\n\ndef test_groupby_dataset_math_virtual():\n ds = Dataset({\"x\": (\"t\", [1, 2, 3])}, {\"t\": pd.date_range(\"20100101\", periods=3)})\n grouped = ds.groupby(\"t.day\")\n actual = grouped - grouped.mean(...)\n expected = Dataset({\"x\": (\"t\", [0, 0, 0])}, ds[[\"t\", \"t.day\"]])\n assert_identical(actual, expected)\n\n\ndef test_groupby_dataset_nan():\n # nan should be excluded from groupby\n ds = Dataset({\"foo\": (\"x\", [1, 2, 3, 4])}, {\"bar\": (\"x\", [1, 1, 2, np.nan])})\n actual = ds.groupby(\"bar\").mean(...)\n expected = Dataset({\"foo\": (\"bar\", [1.5, 3]), \"bar\": [1, 2]})\n assert_identical(actual, expected)\n\n\ndef test_groupby_dataset_order():\n # groupby should preserve variables order\n ds = Dataset()\n for vn in [\"a\", \"b\", \"c\"]:\n ds[vn] = DataArray(np.arange(10), dims=[\"t\"])\n data_vars_ref = list(ds.data_vars.keys())\n ds = ds.groupby(\"t\").mean(...)\n data_vars = list(ds.data_vars.keys())\n assert data_vars == data_vars_ref\n # coords are now at the end of the list, so the test below fails\n # all_vars = list(ds.variables.keys())\n # all_vars_ref = list(ds.variables.keys())\n # .assertEqual(all_vars, all_vars_ref)\n\n\n# TODO: move other groupby tests from test_dataset and test_dataarray over here\n","repo_name":"sou133688/BayesianStatics","sub_path":"lib/python3.9/site-packages/xarray/tests/test_groupby.py","file_name":"test_groupby.py","file_ext":"py","file_size_in_byte":23760,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"19992157169","text":"from collections import defaultdict\nfrom itertools import combinations\n\n\ndef solution(orders, course):\n\n cnt_dic = defaultdict(list)\n\n for i in range(len(orders)):\n for ii in range(i+1, len(orders)): # 모든 요소끼리의 교집합을 본다\n inter = set(str(orders[i])).intersection(set(str(orders[ii])))\n\n if len(inter) > 1:\n k = list(inter)\n\n string = sorted(inter)\n string = ''.join(string)\n\n if i not in cnt_dic[string]:\n cnt_dic[string].append(i)\n\n if ii not in cnt_dic[string]:\n cnt_dic[string].append(ii)\n\n for c in range(2, len(inter)): # 교집합이 2개 이상일때 또 다른 교집합이 생길 수 있으므로\n comb = list(combinations(k, c))\n for cc in comb:\n string = sorted(cc)\n string = ''.join(string)\n\n if i not in cnt_dic[string]:\n cnt_dic[string].append(i)\n\n if ii not in cnt_dic[string]:\n cnt_dic[string].append(ii)\n answer = []\n\n for c in course:\n cnt = []\n string = []\n for k, val in cnt_dic.items():\n if len(k) == c:\n cnt.append(len(val))\n string.append(k)\n if cnt:\n max_val = max(cnt)\n for i, val in enumerate(cnt):\n if max_val == val:\n answer.append(string[i])\n\n answer.sort()\n return answer\n","repo_name":"kwakjeeyoon/coding_test","sub_path":"메뉴_리뉴얼/메뉴_리뉴얼_jiwoo.py","file_name":"메뉴_리뉴얼_jiwoo.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"38764701535","text":"from typing import Any, List, Optional\n\nfrom fastapi import APIRouter, Depends, HTTPException, UploadFile, FastAPI, File\nfrom starlette.responses import JSONResponse\n\nfrom app.crud import crud_bucket\nfrom botocore.exceptions import ClientError\n\nfrom app.utils import exception\n\nrouter = APIRouter()\n\n\n@router.get(\"/{bucket_id}\", response_model=List[Any])\ndef list_objects_in_a_bucket(bucket_id: str, file: Optional[str] = None, bucket=Depends(crud_bucket.CRUDBucket)) -> Any:\n if file:\n try:\n (obj, err) = bucket.load(bucket_id, file)\n except exception.NoSuchFileExists as e:\n return JSONResponse(status_code=404, content=str(e))\n else:\n return JSONResponse(obj)\n else:\n try:\n objects, err = bucket.list_ids(bucket_id)\n except exception.NoSuchBucketExists as e:\n return JSONResponse(status_code=404, content=str(e))\n return objects\n\n\n@router.get(\"/\", response_model=List[Any])\ndef list_all_buckets() -> Any:\n obj = crud_bucket.list_all_buckets()\n return JSONResponse(obj)\n\n\n@router.post(\"/{bucket_id}\", response_model=List[Any])\ndef save_object_in_a_bucket(bucket_id: str, prefix: str, file: UploadFile = File(...), bucket=Depends(crud_bucket.CRUDBucket)) -> Any:\n print(file)\n filename = file.filename\n if prefix:\n filename = prefix + \"/\" + file.filename\n res = bucket.save(bucket_id, file, filename)\n return JSONResponse(res)\n\n\n@router.put(\"/{bucket_name}\", response_model=List[Any])\ndef create_s3_bucket(bucket_name: str, region: str, bucket=Depends(crud_bucket.CRUDBucket)) -> Any:\n if region:\n try:\n res = bucket.create(bucket_name, region)\n except ClientError as e:\n return JSONResponse(status_code=400, content=str(e))\n return JSONResponse({\"result\": \"success\"})\n else:\n return JSONResponse(status_code=400, content=\"Region must be specified\")\n\n\n@router.delete(\"/{bucket_id}\", response_model=List[Any])\ndef delete_object_in_a_bucket(bucket_id: str, file: str = None, bucket=Depends(crud_bucket.CRUDBucket)) -> Any:\n if not file:\n return JSONResponse(\"file query parameter needs to be set\")\n res = bucket.delete_obj(bucket_id, file)\n return JSONResponse(res)\n","repo_name":"Cisco-Kosha/s3-connector","sub_path":"app/api/api_v1/endpoints/buckets.py","file_name":"buckets.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"71516833195","text":"import random\nimport string\nfrom func_timeout.StoppableThread import JoinThread\nimport joblib\nimport numpy\nimport requests\nimport jieba\n\nfrom openpyxl import load_workbook\nfrom urllib import request\nfrom func_timeout import func_set_timeout\nfrom lxml import etree\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\n\n\ndef write_data():\n data = load_workbook('table.xlsx')\n sheet = data.get_sheet_by_name('Sheet1')\n domain = sheet['A']\n label = sheet['B']\n domain_text = []\n label_text = []\n for x in range(1, len(domain)):\n domain_text.append(domain[x].value)\n for x in range(1, len(label)):\n label_text.append(label[x].value)\n danger_text = []\n save_text = []\n unknow_text = []\n for i in range(len(domain_text)):\n if label_text[i] == '未知':\n unknow_text.append(domain_text[i])\n elif label_text[i] == '危险':\n danger_text.append(domain_text[i])\n elif label_text[i] == '安全':\n save_text.append(domain_text[i])\n with open('save_text.txt', 'w') as f:\n for x in save_text:\n f.write(x)\n with open('unknow_text.txt', 'w') as f:\n for x in unknow_text:\n f.write(x)\n with open('danger_text.txt', 'w') as f:\n for x in danger_text:\n f.write(x)\n\n\ndef split_domain(domain_first_char):\n f = open(r'danger_text.txt', encoding='UTF-8')\n danger_domain_d = []\n for line in f.readlines():\n if line[0] == domain_first_char:\n danger_domain_d.append(line)\n new_text_name = 'danger_domain_' + domain_first_char + '.txt'\n with open(new_text_name, 'w') as f_d:\n for x in danger_domain_d:\n f_d.write(x)\n\n\n# pick the string included first letter\ndef special_select(sequences, char_to_idx, first_char):\n first_char_idx = char_to_idx[first_char]\n seq_included = []\n for i in sequences:\n if i[0] == first_char_idx:\n seq_included.append(i)\n start = numpy.random.randint(0, len(seq_included) - 1)\n select_pattern = seq_included[start]\n return select_pattern\n\n\ndef extract_illegal_word():\n f = open(r'./illegal_word_set/keywords.txt', encoding='UTF-8')\n illegal_word_list = []\n for line in f.readlines():\n line = line.strip().split()\n illegal_word_list.append(line[0])\n with open('illegal_word.txt', 'w') as f_d:\n for i in illegal_word_list:\n f_d.write(i)\n f_d.write('\\n')\n\n\ndef construct_illegal_word_type():\n illegal_word_type = {}\n f = open(r'./illegal_word_set/keywords.txt', encoding='UTF-8')\n for line in f.readlines():\n line = line.strip().split()\n if line[0] not in illegal_word_type:\n illegal_word_type[line[0]] = line[3]\n joblib.dump(illegal_word_type, './illegal_word_set/illegal_word_type.pkl')\n\n\n@func_set_timeout(5)\ndef is_domain(url):\n url_1 = 'https://' + url + '/'\n url_2 = 'http://' + url + '/'\n try:\n request.urlopen(url_1)\n return True\n except Exception as e:\n try:\n request.urlopen(url_2)\n return True\n except Exception as e:\n return False\n\n\ndef get_url(url):\n url_1 = 'https://' + url + '/'\n url_2 = 'http://' + url + '/'\n try:\n request.urlopen(url_1)\n return url_1\n except Exception as e:\n try:\n request.urlopen(url_2)\n return url_2\n except Exception as e:\n pass\n\n\n# method_1\ndef get_html_text(url):\n url = get_url(url)\n print('url ', url)\n str_html = requests.get(url)\n html_str = str_html.text\n # print(html_str)\n soup = BeautifulSoup(html_str, 'lxml')\n # print(soup)\n text = soup.get_text()\n my_list = text.split('\\n')\n my_list = [x.strip() for x in my_list if x.strip() != '']\n for ele in my_list:\n if '\\ufeff' in my_list:\n my_list.remove('\\ufeff')\n return my_list\n\n\n# method_2\ndef get_context_from_url(url, index):\n context_list = []\n url = get_url(url)\n print('Crawl web', index, url, 'page information')\n chrome_options = webdriver.ChromeOptions()\n # chrome_options.add_argument(\"--window-size=1920,1050\")\n chrome_options.add_argument('headless')\n try:\n driver = webdriver.Chrome(\"/home/nslab/Domain/chromedriver\", chrome_options=chrome_options)\n driver.get(url)\n pageSource = driver.page_source\n driver.close()\n soup = BeautifulSoup(pageSource, 'lxml')\n text = soup.get_text()\n context_list = text.split('\\n')\n context_list = [x.strip() for x in context_list if x.strip() != '']\n except:\n pass\n return context_list\n\n\ndef generate_random_domain(len) -> str: \n str1 = ''.join(random.choice(string.ascii_lowercase + string.digits) for i in range(len))\n str1 = ''.join([str1, '.com'])\n return str1\n\n\ndef cut_text_word(sentences):\n seg_list = jieba.cut(sentences, cut_all=True)\n return '/'.join(seg_list)\n","repo_name":"TianheWu/Domain","sub_path":"__Domain_process.py","file_name":"__Domain_process.py","file_ext":"py","file_size_in_byte":4966,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"21704414875","text":"from features import available_features, LSTMFeature\nfrom scipy.sparse import csr_matrix\nimport numpy as np\nfrom random import randint\n\n\nclass FeatureSet(object):\n\n def __init__(self, iterator, step_name, pos_type, config, freezed, label_func=None, vocabs=None, class_names=None):\n self.config = config\n self.name = step_name + pos_type\n self.iterator = iterator\n self.step_name = step_name\n self.pos_type = pos_type\n self.freezed = freezed\n self.vocabs = vocabs\n self.label_func = label_func\n self.class_names = class_names\n feature_names = self.config.get_value('features', lambda s: s.split(' '))\n self.binary_features = [available_features[f_name]() for f_name in feature_names]\n self.lstm_feature = LSTMFeature()\n if self.vocabs:\n self._set_vocabs()\n\n self.binary_feature_matrix = None\n self.binary_feature_width = None\n self.lstm_feature_vectors = None\n self.lstm_feature_row_width = None\n self.label_array = None\n self.number_of_instances = None\n self.num_classes = None if self.class_names is None else len(self.class_names)\n self.class_indices = None\n\n self.print('get binary feature matrix')\n binary_features_vectors = list()\n for feature in self.binary_features:\n feature_vectors = feature.get_vector_batch(self.iterator, self.freezed)\n binary_features_vectors.append(feature_vectors)\n features_length = [len(f) for f in self.binary_features]\n self.print('binary features length ' + str(features_length))\n self.binary_feature_matrix = dicts_to_sparse_matrix(binary_features_vectors, features_length, add_bias=True)\n self.binary_feature_width = self.binary_feature_matrix[0].shape[1]\n self.print('finished binary feature matrix')\n self.print('get lstm feature matrix')\n self.lstm_feature_vectors = self.lstm_feature.get_vector_batch(self.iterator, self.freezed)\n self.lstm_feature_row_width = len(self.lstm_feature)\n self.print('finished lstm feature matrix')\n\n # get the labels\n if self.label_func is not None:\n label_array_raw = self.label_func(self.iterator)\n self.class_names = list(np.unique(label_array_raw).tolist())\n self.num_classes = len(self.class_names)\n label_array = list()\n # calculate the class weights by frequency\n class_weights = [1 - (list(label_array_raw).count(c) / float(len(list(label_array_raw))))\n for c in self.class_names]\n self.class_weights = [w / min(class_weights) for w in class_weights]\n print('classes: ', self.class_names)\n\n for i, label_raw in enumerate(label_array_raw):\n label = [0 for _ in range(self.num_classes)]\n label[self.class_names.index(label_raw)] = 1\n label_array.append(label)\n\n self.label_array = np.asarray(label_array)\n\n # check if number of lstm feature instances, binary feature instance and labels are identical\n if (self.binary_feature_matrix.shape[0] != len(self.lstm_feature_vectors)) or \\\n (self.label_func and (self.binary_feature_matrix.shape[0] != len(self.label_array))):\n raise ValueError('No equal number of instances')\n self.number_of_instances = self.binary_feature_matrix.shape[0]\n\n def get_binary_feature_matrix(self):\n return self.binary_feature_matrix\n\n def get_lstm_features(self):\n return self.lstm_feature, self.lstm_feature_row_width\n\n def get_training_batch(self, batch_size, epoch):\n random_int = randint(0, self.number_of_instances)\n indices = [(i + random_int) % self.number_of_instances for i in range(batch_size)]\n batch_lstm_instances = list()\n batch_binary_instances = self.binary_feature_matrix[indices].toarray()\n labels = list()\n for i in indices:\n batch_lstm_instances.append(self.lstm_feature_vectors[i])\n labels.append(self.label_array[i])\n lstm_instance_time_major, sequence_lengths = self._lstm_time_major(batch_lstm_instances)\n labels = np.asarray(labels)\n batch_binary_instances = np.asarray(batch_binary_instances)\n return batch_binary_instances, lstm_instance_time_major, sequence_lengths, labels\n\n def get_prediction_instances(self, start, stop):\n lstm_instance_time_major, sequence_lengths = self._lstm_time_major(self.lstm_feature_vectors[start:stop])\n binary_instances = self.binary_feature_matrix[start:stop].toarray()\n return binary_instances, lstm_instance_time_major, sequence_lengths\n\n def get_prediction_instance(self, i):\n feature_vector = self.lstm_feature_vectors[i]\n sequence_lengths = [len(feature_vector)]\n lstm_instance_time_major = list()\n for row_index in range(len(feature_vector)):\n row = [1 if r in feature_vector[row_index] else 0 for r in range(self.lstm_feature_row_width)]\n lstm_instance_time_major.append([row])\n\n lstm_instance_time_major = np.asarray(lstm_instance_time_major, dtype=np.float32)\n sequence_lengths = np.asarray(sequence_lengths, dtype=np.int32)\n return lstm_instance_time_major, sequence_lengths\n\n def _lstm_time_major(self, lstm_feature_instances):\n # Tensorflow needs this format for sequences of different length\n sequence_lengths = [len(sequence) for sequence in lstm_feature_instances]\n max_sequence_length = max(sequence_lengths)\n\n instance_time_major = np.zeros(shape=(max_sequence_length, len(lstm_feature_instances),\n self.lstm_feature_row_width), dtype=np.float32)\n for s_id, sequence in enumerate(lstm_feature_instances):\n for r_id , row in enumerate(sequence):\n for key in row.keys():\n instance_time_major[r_id][s_id][key] = 1\n\n sequence_lengths = np.asarray(sequence_lengths, dtype=np.int32)\n return instance_time_major, sequence_lengths\n\n def get_vocabs(self):\n # get the vocabularies from the features of this step to be saved in the model object\n vocabs = {}\n for feature in self.binary_features:\n if hasattr(feature, 'get_vocab'):\n vocabs.update(feature.get_vocab())\n return vocabs\n\n def _set_vocabs(self):\n # load the vocabularies into the features to be able to reproduce one-hot-vectors of the trained model\n for feature in self.binary_features:\n if hasattr(feature, 'set_vocab'):\n feature.set_vocab(self.vocabs)\n\n def get_label_array(self):\n if self.label_func is None:\n raise ValueError('Need label function to generate labels')\n return self.label_array\n\n def print(self, s):\n print(self.step_name + ' - ' + self.pos_type + ': ' + s)\n\n\ndef dicts_to_sparse_matrix(features_vectors, features_length, add_bias=False):\n if any([len(features_vectors[0]) != len(f) for f in features_vectors]):\n raise ValueError('Every Feature needs values for every instance')\n\n shape = (len(features_vectors[0]), sum(features_length))\n row = list()\n col = list()\n data = list()\n\n for feature_idx, feature_vectors in enumerate(features_vectors):\n col_offset = sum(features_length[0:feature_idx])\n for row_idx, feature_vector in enumerate(feature_vectors):\n for col_idx, v in feature_vector.items():\n col_idx += col_offset\n row.append(row_idx)\n col.append(col_idx)\n data.append(v)\n if add_bias:\n for i in range(shape[0]):\n row.append(i)\n col.append(shape[1])\n data.append(1)\n shape = (shape[0], shape[1]+1)\n return csr_matrix((data, (row, col)), shape=shape)","repo_name":"raphael-sch/PythonPathLSTM","sub_path":"feature_set.py","file_name":"feature_set.py","file_ext":"py","file_size_in_byte":7979,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"12559493570","text":"\"\"\"\n-------------------------------------------------------\nCP164 \n-------------------------------------------------------\nAuthor: Gurkarman Reen\nID: 169030008\nEmail: reen0008@mylaurier.ca\n__updated__ = \"2023-02-10\"\n-------------------------------------------------------\n\"\"\"\n# Imports\nfrom Stack_array import Stack\nfrom Queue_array import Queue\nfrom Priority_Queue_array import Priority_Queue\nfrom List_array import List\nfrom Movie import Movie\n\n\n# Constants\n\ndef array_to_stack(stack, source):\n \"\"\"\n -------------------------------------------------------\n Pushes contents of source onto stack. At finish, source is empty.\n Last value in source is at bottom of stack,\n first value in source is on top of stack.\n Use: array_to_stack(stack, source)\n -------------------------------------------------------\n Parameters:\n stack - a Stack object (Stack)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n\n while source != []:\n stack.push(source.pop())\n\n return\n\n\ndef stack_to_array(stack, target):\n \"\"\"\n -------------------------------------------------------\n Pops contents of stack into target. At finish, stack is empty.\n Top value of stack is at end of target,\n bottom value of stack is at beginning of target.\n Use: stack_to_array(stack, target)\n -------------------------------------------------------\n Parameters:\n stack - a Stack object (Stack)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n\n while not stack.is_empty():\n target.insert(0, stack.pop())\n\n return\n\n\ndef stack_test(source):\n \"\"\"\n -------------------------------------------------------\n Tests the methods of Stack for empty and\n non-empty stacks using the data in source:\n is_empty, push, pop, peek\n (Testing pop and peek while empty throws exceptions)\n Use: stack_test(source)\n -------------------------------------------------------\n Parameters:\n source - list of data (list of ?)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n\n stack = Stack()\n if len(source) == 0:\n print(\"not valid source\")\n else:\n for i in source:\n stack.push(i)\n print(f\"Is it empty: {stack.is_empty()}\")\n print(f\"push: {stack.push(i)}\")\n print(f\"pop: \\n{stack.pop()}\")\n print(f\"peek:\\n{stack.peek()}\")\n\n return\n\n\ndef array_to_queue(queue, source):\n \"\"\"\n -------------------------------------------------------\n Inserts contents of source into queue. At finish, source is empty.\n Last value in source is at rear of queue,\n first value in source is at front of queue.\n Use: array_to_queue(queue, source)\n -------------------------------------------------------\n Parameters:\n queue - a Queue object (Queue)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while source != []:\n queue.insert(source.pop(0))\n\n\ndef queue_to_array(queue, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of queue into target. At finish, queue is empty.\n Front value of queue is at front of target,\n rear value of queue is at end of target.\n Use: queue_to_array(queue, target)\n -------------------------------------------------------\n Parameters:\n queue - a Queue object (Queue)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while not queue.is_empty():\n target.append(queue.remove())\n\n\ndef queue_test(a):\n \"\"\"\n -------------------------------------------------------\n Tests queue implementation.\n Tests the methods of Queue are tested for both empty and\n non-empty queues using the data in a:\n is_empty, insert, remove, peek, len\n Use: queue_test(a)\n -------------------------------------------------------\n Parameters:\n a - list of data (list of ?)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n q = Queue()\n\n if len(a) == 0:\n print(\"not valid\")\n else:\n for i in a:\n q.insert(i)\n print(f\"Is it empty: {q.is_empty()}\")\n print(f\"target: {q.insert(i)}\")\n print(f\"remove: \\n{q.remove()}\")\n print(f\"peek:\\n{q.peek()}\")\n\n # tests for the queue methods go here\n # print the results of the method calls and verify by hand\n\n return\n\n\ndef array_to_pq(pq, source):\n \"\"\"\n -------------------------------------------------------\n Inserts contents of source into pq. At finish, source is empty.\n Last value in source is at rear of pq,\n first value in source is at front of pq.\n Use: array_to_pq(pq, source)\n -------------------------------------------------------\n Parameters:\n pq - a Priority_Queue object (Priority_Queue)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while source != []:\n pq.insert(source.pop(0))\n\n\ndef pq_to_array(pq, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of pq into target. At finish, pq is empty.\n Highest priority value in pq is at front of target,\n lowest priority value in pq is at end of target.\n Use: pq_to_array(pq, target)\n -------------------------------------------------------\n Parameters:\n pq - a Priority_Queue object (Priority_Queue)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n while not pq.is_empty():\n target.append(pq.remove())\n\n\ndef priority_queue_test(a):\n \"\"\"\n -------------------------------------------------------\n Tests priority queue implementation.\n Test the methods of Priority_Queue are tested for both empty and\n non-empty priority queues using the data in a:\n is_empty, insert, remove, peek\n Use: priority_queue_test(a)\n -------------------------------------------------------\n Parameters:\n a - list of data (list of ?)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n pq = Priority_Queue()\n\n if len(a) == 0:\n print(\"not valid\")\n else:\n for i in a:\n pq.insert(i)\n print(f\"Is it empty: {pq.is_empty()}\")\n print(f\"insert: {pq.insert(i)}\")\n print(f\"remove: \\n{pq.remove()}\")\n print(f\"peek:\\n{pq.peek()}\")\n\n # tests for the priority queue methods go here\n # print the results of the method calls and verify by hand\n\n return\n\n\ndef array_to_list(llist, source):\n \"\"\"\n -------------------------------------------------------\n Appends contests of source to llist. At finish, source is empty.\n Last element in source is at rear of llist,\n first element in source is at front of llist.\n Use: array_to_list(llist, source)\n -------------------------------------------------------\n Parameters:\n llist - a List object (List)\n source - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n\n while source != []:\n llist.append(source.pop(0))\n\n return\n\n\ndef list_to_array(llist, target):\n \"\"\"\n -------------------------------------------------------\n Removes contents of llist into target. At finish, llist is empty.\n Front element of llist is at front of target,\n rear element of llist is at rear of target.\n Use: list_to_array(llist, target)\n -------------------------------------------------------\n Parameters:\n llist - a List object (List)\n target - a Python list (list)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n\n while not llist.is_empty():\n target.append(llist.pop(0))\n\n return\n\n\ndef list_test(source):\n \"\"\"\n -------------------------------------------------------\n Tests List implementation.\n The methods of List are tested for both empty and\n non-empty lists using the data in source\n Use: list_test(source)\n -------------------------------------------------------\n Parameters:\n source - list of data (list of ?)\n Returns:\n None\n -------------------------------------------------------\n \"\"\"\n lst = List()\n\n for i in source:\n lst.append(i)\n print(f\"Is it empty: {lst.is_empty()}\")\n print(f\"count: {lst.count(i)}\")\n key = lst[0]\n print(f\"index: \\n{lst.index(key)}\")\n print()\n\n print(f\"max:\\n{lst.max()}\")\n print()\n print(f\"min:\\n{lst.min()}\")\n print()\n print(f\"insert: {lst.insert(3, key)}\")\n print()\n print(f\"remove: \\n{lst.remove(key)}\")\n print()\n print(f\"find: \\n{lst.find(key)}\")\n\n print()\n\n for i in lst:\n print(i)\n print()\n\n # tests for the List methods go here\n # print the results of the method calls and verify by hand\n\n return\n","repo_name":"karmyreen/CP164","sub_path":"reen0008_a04/reen0008_data_structures/src/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":9253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"26867891395","text":"import argparse\nfrom optparse import OptionParser\n\nimport pandas\nfrom django.core.exceptions import ValidationError\nfrom django.core.management import BaseCommand\nfrom django.db import transaction\n\nfrom import_common.normalization import (\n normalize_oplaty_za_publikacje,\n normalize_rekord_id,\n)\n\nfrom bpp.models import ModelZOplataZaPublikacje, Rekord\n\n\nclass Command(BaseCommand):\n help = 'Import opłat za publikację wg \"własnego\" formatu z BPP'\n args = \" ...\"\n\n def add_arguments(self, parser: OptionParser):\n parser.add_argument(\n \"--dry\", action=argparse.BooleanOptionalAction, default=False\n )\n parser.add_argument(\"pliki\", type=argparse.FileType(\"rb\"), nargs=\"+\")\n\n @transaction.atomic\n def handle(self, dry, pliki, *args, **options):\n for plik in pliki:\n print()\n print()\n print()\n print(plik.name)\n print(\"=\" * 80)\n print()\n\n try:\n xlsx = pandas.read_excel(plik)\n except ValueError:\n print(f\"Nie umiem otworzyc pliku {plik.name}\")\n continue\n\n for wiersz, row in xlsx.iterrows():\n pk = normalize_rekord_id(row[1])\n if pk is None:\n continue\n\n rekord = Rekord.objects.get(pk=pk)\n original: ModelZOplataZaPublikacje = rekord.original\n\n try:\n row[\"Tytuł oryginalny\"]\n except KeyError:\n print(\n \"Plik nie ma kolumny 'Tytuł oryginalny', nie importuję pliku w ogóle\"\n )\n continue\n\n if rekord.tytul_oryginalny != row[\"Tytuł oryginalny\"]:\n print(\n f\"wiersz {wiersz+2} -- tytuł rekordu inny niz w bazie, nie importuję (plik: \"\n f\"{row['Tytuł oryginalny']}, baza {rekord.tytul_oryginalny})\"\n )\n print()\n continue\n\n try:\n normalize_oplaty_za_publikacje(\n original,\n # Publikacja bezkosztowa\n row[4],\n # Środki finansowe o których mowa w artykule 365\n row[5],\n # Środki finansowe na realizację projektu\n row[6],\n # Inne srodki finansowe\n row[7],\n # Kwota\n row[8],\n )\n except ValidationError as e:\n print(\n f\"wiersz {wiersz+2} -- problem z walidacją rekordu {rekord.tytul_oryginalny} -- \"\n f\"{e}. Zmiany nie zostały wprowadzone do bazy. \"\n )\n print()\n continue\n\n original.save()\n\n if dry:\n transaction.set_rollback(True)\n","repo_name":"iplweb/bpp","sub_path":"src/bpp/management/commands/import_oplaty_publikacje.py","file_name":"import_oplaty_publikacje.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"pl","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"33536705292","text":"\"\"\"For entities that have a condition template.\"\"\"\nfrom gemd.entity.has_dependencies import HasDependencies\nfrom gemd.entity.link_by_uid import LinkByUID\nfrom gemd.entity.setters import validate_list\nfrom gemd.entity.template.base_template import BaseTemplate\nfrom gemd.entity.template.condition_template import ConditionTemplate\nfrom gemd.entity.bounds.base_bounds import BaseBounds\n\nfrom typing import Optional, Union, Iterable, List, Tuple, Set\n\n\nclass HasConditionTemplates(HasDependencies):\n \"\"\"\n Mixin-trait for entities that include condition templates.\n\n Parameters\n ----------\n conditions: List[(ConditionTemplate, BaseBounds)]\n A list of tuples containing this entity's condition templates as well\n as any restrictions on those templates' bounds.\n\n \"\"\"\n\n def __init__(self, conditions: Iterable[Union[Union[ConditionTemplate, LinkByUID],\n Tuple[Union[ConditionTemplate, LinkByUID],\n Optional[BaseBounds]]]]):\n self._conditions = None\n self.conditions = conditions\n\n @property\n def conditions(self) -> List[Union[ConditionTemplate, LinkByUID]]:\n \"\"\"\n Get the list of condition template/bounds tuples.\n\n Returns\n -------\n List[(ConditionTemplate, bounds)]\n List of this entity's condition template/bounds pairs\n\n \"\"\"\n return self._conditions\n\n @conditions.setter\n def conditions(self, conditions: Iterable[Union[Union[ConditionTemplate, LinkByUID],\n Tuple[Union[ConditionTemplate, LinkByUID],\n Optional[BaseBounds]]]]):\n \"\"\"\n Set the list of condition templates.\n\n Parameters\n ----------\n conditions: List[(ConditionTemplate, bounds)]\n A list of tuples containing this entity's condition templates as well\n as any restrictions on those templates' bounds.\n\n Returns\n -------\n List[(ConditionTemplate, bounds)]\n List of this entity's condition template/bounds pairs\n\n \"\"\"\n if isinstance(conditions, Iterable):\n if any(isinstance(x, BaseBounds) for x in conditions):\n conditions = [conditions] # It's a template/bounds tuple (probably)\n self._conditions = validate_list(conditions,\n (ConditionTemplate, LinkByUID, list, tuple),\n trigger=BaseTemplate._homogenize_ranges\n )\n\n def validate_condition(self, condition: \"Condition\") -> bool: # noqa: F821\n \"\"\"Check if the condition is consistent w/ this template.\"\"\"\n if condition.template is not None:\n attr, bnd = next((x for x in self.conditions if condition.template == x[0]),\n (None, None))\n else:\n attr, bnd = next((x for x in self.conditions if condition.name == x[0].name),\n (None, None))\n\n if bnd is not None:\n return bnd.contains(condition.value)\n elif attr is not None and isinstance(attr, ConditionTemplate):\n return attr.bounds.contains(condition.value)\n else:\n return True # Nothing to check against\n\n def _local_dependencies(self) -> Set[Union[\"BaseEntity\", \"LinkByUID\"]]:\n \"\"\"Return a set of all immediate dependencies (no recursion).\"\"\"\n return {attr[0] for attr in self.conditions}\n","repo_name":"CitrineInformatics/gemd-python","sub_path":"gemd/entity/template/has_condition_templates.py","file_name":"has_condition_templates.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"73"} +{"seq_id":"43756480662","text":"import numpy as np\r\nimport pandas as pd\r\nfrom gensim.models import Word2Vec\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.decomposition import PCA\r\nimport matplotlib\r\nfrom DataProcess import text_process\r\n\r\n\r\ndanmaku = pd.read_csv('84887919comments1650424641.8339486.csv', encoding='gb18030').iloc[:, 5]\r\ndanmaku = danmaku.apply(text_process)\r\n\r\nwords = []\r\ncount = 0\r\nfor i in danmaku:\r\n if i:\r\n print(i)\r\n words.append(i)\r\n count += 1\r\n\r\n# 构建词向量\r\nmodel = Word2Vec(words, vector_size=30, window=3, min_count=3, epochs=20, negative=10)\r\n# 输出老番茄的词向量\r\nprint(model.wv.get_vector('老番茄'))\r\n\r\n# 对词向量进行降维,从而可以画图\r\nraw_word_vec = []\r\nword2index = {}\r\nfor i, w in enumerate(model.wv.index_to_key):\r\n raw_word_vec.append(model.wv[w])\r\n word2index[w] = i\r\nraw_word_vec = np.array(raw_word_vec)\r\nX_reduced = PCA(n_components=2).fit_transform(raw_word_vec)\r\n\r\n# 绘制星空图\r\n# 绘制所有单词向量的二维空间投影\r\nfig = plt.figure(figsize = (15, 10))\r\nax = fig.gca()\r\nax.set_facecolor('white')\r\nax.plot(X_reduced[:, 0], X_reduced[:, 1], '.', markersize = 1, alpha = 0.3, color = 'black')\r\n\r\n\r\n# 绘制几个特殊单词的向量\r\nwords = ['老番茄', '复旦之光', '某幻', '马哥','上海zoo','中国boy','猩猩','花少北','河北首富','下巴']\r\n\r\n# 设置中文字体 否则乱码\r\nzhfont1 = matplotlib.font_manager.FontProperties(fname='./华文仿宋.ttf', size=16)\r\nfor w in words:\r\n if w in word2index:\r\n ind = word2index[w]\r\n xy = X_reduced[ind]\r\n plt.plot(xy[0], xy[1], '.', alpha =1, color = 'orange',markersize=10)\r\n plt.text(xy[0], xy[1], w, fontproperties = zhfont1, alpha = 1, color = 'red')\r\nplt.show()","repo_name":"Timecollector/BiliBiliCommentsAnalysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"73"} +{"seq_id":"9865095341","text":"#!/usr/bin/python\n# -*-coding:utf-8-*-\nimport logging\nimport os\n\nlog = logging.getLogger(__name__)\n\n\ndef send_message(mobile_number, country_code, random_num, fake=False):\n\n appid = 1400205630 # SDK AppID 是1400开头\n\n # 短信应用 SDK AppKey\n appkey = os.environ['TENCENT_APPKEY']\n\n # 需要发送短信的手机号码\n mobile_numbers = [mobile_number]\n\n # 短信模板 ID,需要在短信应用中申请\n template_id = 324017 # NOTE: 这里的模板 ID`7839`只是一个示例,真实的模板 ID 需要在短信控制台中申请\n # templateId 7839 对应的内容是\"您的验证码是: {1}\"\n # 签名\n\n sms_sign = '腾讯云' # NOTE: 签名参数使用的是`签名内容`,而不是`签名ID`。这里的签名\"腾讯云\"只是一个示例,真实的签名需要在短信控制台申请。\n\n from qcloudsms_py import SmsSingleSender\n\n ssender = SmsSingleSender(appid, appkey)\n params = [\n str(random_num),\n '5',\n ] # 当模板没有参数时,`params = []`,数组具体的元素个数和模板中变量个数必须一致,例如示例中 templateId:5678 对应一个变量,参数数组中元素个数也必须是一个\n\n if not fake:\n try:\n result = ssender.send_with_param(\n country_code,\n mobile_numbers[0],\n template_id,\n params,\n sign=sms_sign,\n extend='',\n ext='',\n ) # 签名参数未提供或者为空时,会使用默认签名发送短信\n except Exception as e:\n raise e\n else:\n result = {'sid': 'fake_sid'}\n\n log.debug('sms send result', result)\n return result\n","repo_name":"chinese-bbb/web-backend","sub_path":"app/services/tencent/send_sms.py","file_name":"send_sms.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"zh","doc_type":"code","stars":4,"dataset":"github-code","pt":"73"} +{"seq_id":"19428018516","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom Perceptron import plot_decision_regions\n\n\n\nclass AdalineGD(object):\n def __init__(self, eta=0.01, n_iter=50, random_state=1):\n self.eta = eta\n self.n_iter = n_iter\n self.random_state = random_state\n\n def fit(self, X, y):\n rgen = np.random.RandomState(self.random_state)\n self.w_ = rgen.normal(loc=0.0, scale=0.01, size=1 + X.shape[1])\n self.cost_ = []\n\n for i in range(self.n_iter):\n net_input = self.net_input(X)\n # 何もしてない\n output = self.activation(net_input)\n errors = (y - output)\n # X.T.dot(errors)は特徴行列と誤差行列の行列*ベクトル積\n self.w_[1:] += self.eta * X.T.dot(errors)\n self.w_[0] += self.eta * errors.sum()\n cost = (errors ** 2).sum() / 2.0\n self.cost_.append(cost)\n return self\n\n def net_input(self, X):\n return np.dot(X, self.w_[1:]) + self.w_[0]\n\n def activation(self, X):\n # 線形活性化関数\n return X\n\n def predict(self, X):\n return np.where(self.activation(self.net_input(X)) >= 0.0, 1, -1)\n\nif __name__ == \"__main__\":\n df = pd.read_csv('https://archive.ics.uci.edu/ml/'\n 'machine-learning-databases/iris/iris.data', header=None)\n\n y = df.iloc[0:100, 4].values\n y = np.where(y == 'Iris-setosa', -1, 1)\n X = df.iloc[0:100, [0, 2]].values\n\n # 描画領域を1行2列の2つに分割\n # fig が全体, ax[0],ax[1]がfig内部それぞれのplt\n fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 4))\n\n\n ada1 = AdalineGD(n_iter=10, eta=0.01).fit(X, y)\n ax[0].plot(range(1, len(ada1.cost_) + 1), np.log10(ada1.cost_), marker='o')\n ax[0].set_xlabel('Epochs')\n ax[0].set_ylabel('log(Sum-squared-error)')\n ax[0].set_title('Adaline - Learning rate 0.01')\n\n ada2 = AdalineGD(n_iter=10, eta=0.0001).fit(X, y)\n ax[1].plot(range(1, len(ada2.cost_) + 1), ada2.cost_, marker='o')\n ax[1].set_xlabel('Epochs')\n ax[1].set_ylabel('Sum-squared-error')\n ax[1].set_title('Adaline - Learning rate 0.0001')\n\n # plt.savefig('images/02_11.png', dpi=300)\n plt.show()\n\n ## データに対して標準化を行う\n X_std = np.copy(X)\n X_std[:, 0] = (X[:, 0] - X[:, 0].mean()) / X[:, 0].std()\n X_std[:, 1] = (X[:, 1] - X[:, 1].mean()) / X[:, 1].std()\n\n ada = AdalineGD(n_iter=15, eta=0.01)\n ada.fit(X_std, y)\n\n plot_decision_regions(X_std, y, classifier=ada)\n plt.title('Adaline - Gradient Descent')\n plt.xlabel('sepal length [standardized]')\n plt.ylabel('petal length [standardized]')\n plt.legend(loc='upper left')\n plt.tight_layout()\n\n plt.show()\n\n plt.plot(range(1, len(ada.cost_) + 1), ada.cost_, marker='o')\n plt.xlabel('Epochs')\n plt.ylabel('Sum-squared-error')\n\n plt.tight_layout()\n\n plt.show()\n","repo_name":"msykhnd/MyExcercise","sub_path":"DataScienceBook/ch2/AdaptiveLinerNeuron_GD.py","file_name":"AdaptiveLinerNeuron_GD.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"33110041705","text":"from PyQt5.QtWidgets import QLabel\n# from PyQt5 import QtGui, QtCore\n# from PyQt5 import uic\nfrom menus.read_menu.info_popup import info_popup\n\nclass text_widget(QLabel):\n def __init__(self, text):\n super().__init__(text)\n\n self.setStyleSheet(\"font: 14pt; \\\n color: black;\\\n padding-right: 2px\")\n self.adjustSize()","repo_name":"JoaoCarlosNascimento/HandSignLanguage","sub_path":"Entrega/menus/read_menu/text_widget.py","file_name":"text_widget.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"6397634388","text":"import sys\nfrom datetime import datetime\n\nsys.path.append('src')\n\nfrom prob1 import ServerState as oldServerState\nfrom util.to_datetime import to_datetime\n\n# サーバ1つの状態を格納するクラス\nclass ServerState(oldServerState):\n def __init__(self, server_address, clash_threshold=1):\n super().__init__(server_address)\n self.clash_threshold = clash_threshold\n self.clash_count = 0\n\n def update(self, ping_datetime, ping_response):\n # pingの時刻と応答からサーバの状態を更新する\n if ping_response == '-' and self.is_working == True:\n # 動作状態にあるサーバからpingが返ってこない場合\n # サーバが故障したと判断して状態を更新する\n self.clash(ping_datetime)\n return None\n else:\n # サーバがタイムアウトしていない場合、タイムアウト回数をリセットする\n self.clash_count = 0\n\n if ping_response != '-' and self.is_working == False:\n # 故障状態にあるサーバからpingが返ってきた場合\n # サーバが復旧したと判断して状態を更新する\n # サーバの故障期間を出力する\n return self.recover(ping_datetime)\n\n # サーバの状態に変化がない\n return None\n\n def clash(self, ping_datetime):\n # タイムアウト回数を1増やす\n self.clash_count += 1\n\n # 初回のタイムアウトなら故障開始時刻を保存する\n if self.clash_count == 1:\n self.clash_from = ping_datetime\n\n # タイムアウト回数が閾値を上回った場合、故障と判定する\n if self.clash_count >= self.clash_threshold:\n # サーバが故障した時、動作フラグをFalseにして故障開始時刻を保存する\n self.is_working = False\n \n\n# サーバの監視ログリストからサーバの故障を検知する\ndef serveillance(log_list, clash_threshold):\n # 監視対象のサーバアドレスをキーとしてサーバの状態を格納する辞書を作成\n server_state_dict = dict()\n\n # 監視ログを読み込んで故障を検知する\n serveillance_result = []\n for log_line in log_list:\n # ログの文字列を各情報に分割する\n ping_datetime, server_address, ping_response = log_line.split(',')\n\n # pingの日時をstrからdatetimeに変換する\n ping_datetime = to_datetime(ping_datetime)\n\n # サーバが監視対象の辞書にない場合、新しく追加する\n if server_address not in server_state_dict:\n server_state_dict[server_address] = ServerState(server_address, clash_threshold)\n \n # サーバの状態を更新する\n result = server_state_dict[server_address].update(ping_datetime, ping_response)\n\n # サーバの状態について出力がある場合、保存する\n if result != None:\n serveillance_result.append(result)\n\n # 各サーバの監視を終了する\n for server in server_state_dict.values():\n result = server.finish()\n # サーバの状態について出力がある場合、保存する\n if result != None:\n serveillance_result.append(result)\n \n return serveillance_result\n\nif __name__ == '__main__':\n log_file_path = input('監視ログファイル : ')\n clash_threshold = int(input('故障と判定するまでのタイムアウト回数 : '))\n\n with open(log_file_path, 'r') as f:\n log_list = [line.rstrip() for line in f.readlines()]\n\n print('\\n'.join(serveillance(log_list, clash_threshold)))","repo_name":"yumanacamura/surveillance_system","sub_path":"src/prob2.py","file_name":"prob2.py","file_ext":"py","file_size_in_byte":3481,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"72078740395","text":"import time\nimport random\nfrom random import randrange\nimport numpy as np\nimport numpy.linalg as npla\nimport pandas as pd\nimport sys\n\n\nclass SGDSolver():\n\n def __init__(self, path):\n \"\"\"load input dataset specified in path and split data into train and validation.\n Hint: you can store both training and testing features and target vector as\n class variables\"\"\"\n\n # read the input\n dataset = pd.read_csv(path)\n # numpy array w/ size: n*7\n self.x = dataset.iloc[:, :-1].values\n # make the first column all ones because b = w[0]\n self.x[:, 0] = 1\n print(\"x is \", self.x)\n # numpy array w/ size n*1\n self.y = dataset.iloc[:, -1].values\n\n # initialize the mse by setting y_hat = 0\n # self.y = y_GT\n # e_mse = 1/n * sum(y_gt - y_regress)**2\n self.mse = np.sum((self.y - 0)**2) / len(self.y)\n\n def training(self, alpha, lam, nepoch, epsilon):\n \"\"\"Training process of linear regression will happen here. User will provide\n learning rate alpha, regularization term lam, specific number of training epoches,\n and a variable epsilon to specify pre-mature end condition,\n ex. if error < epsilon, training stops. Hint: You can store both weight and\n bias as class variables, so other functions can directly use them\"\"\"\n\n # find the best w and b until error < epsilon\n\n # 2-D grid search - outer: alpha; inner: lam\n # lr: learning rate\n # rw: regularization weight\n lr = alpha[0]\n while lr <= alpha[1]:\n rw = lam[0]\n while rw <= lam[1]:\n\n # initialize class variables for weight and bias\n # w: w\n # b: w[0]\n # self.x.shape[1] = # of features + 1\n w = np.random.normal(0, 1, self.x.shape[1])\n w[0] = 0\n\n # for specified number of epochs\n for i in range(nepoch):\n\n # initialize y_regression\n y_hat = []\n\n # run through all the samples\n # in this training dataset, there are 360 of them (self.x.shape[0] = 360)\n for j in range(self.x.shape[0]):\n # model prediction\n y_tmp = self.x[j].dot(w) # y_tmp is a value\n y_hat.append(y_tmp)\n\n # # loss value\n # L = 0.5 * (self.y[j] - y_tmp)**2 + \\\n # 0.5 * rw * npla.norm(w, 2)**2\n\n # update weight vector\n for k in range(len(w)):\n # run through each element by the indexing variable k\n w[k] = w[k] + lr * self.x[j][k] * \\\n (self.y[j] - self.x[j].dot(w)) - lr * rw * w[k]\n\n # at the end of every epoch, calculate the mean squared error\n # self.y = y_GT; y_hat = y_regression\n # e_mse = 1/n * sum(y_gt - y_regress)**2\n # sigmoid(z) = w*x_test\n # y_test - y\n # cross_entropy = np.sum(-{y_test*Log(y) + (1-y_test)* log(1-y)})\n mse_new = np.sum((self.y - y_hat)**2) / len(self.y)\n\n # want to minimize the mse\n # and memorize the best w\n if mse_new < self.mse:\n self.w = w\n self.mse = mse_new\n\n # stop if error < epsilon\n if self.mse < epsilon:\n return\n\n # increment regularization weight lambda\n rw *= 10\n\n # increment learning rate alpha\n lr *= 10\n\n def testing(self, testX):\n \"\"\"Use your trained weight and bias to compute the predicted y values,\n return the n*1 y vector\"\"\"\n\n # testX dimensions\n m, n = testX.shape\n\n # add one more column\n testX_copy = np.ones((m, n+1))\n # make the first column all ones and the rest the same as testX\n testX_copy[:, 1:] = testX\n\n # compute the predicted y value\n # testX_copy: n*(# of attributes + 1)\n # self.w: (# of attributes + 1)*1\n # y: n*1\n y = testX_copy.dot(self.w).reshape(m, 1)\n\n # return the y vector\n return y\n\n\n\"\"\" Training Process: You only need to modify nepoch, epsilon of training method,\nthis is for auto-testing \"\"\"\nmodel = SGDSolver('train.csv')\n# model = SGDSolver(sys.argv[1])\n# Compute the time to do grid search on training\nstart = time.time()\n\nmodel.training([10**-10, 10], [1, 1e10], 10, 0)\n\n\n# test = np.array([[310, 108, 5, 3.5, 3.5, 8.56, 0],\n# [329, 113, 4, 4.0, 4.5, 9.1, 1]])\n# print(\"chance of admit: \", model.testing(\n# test))\n\nend = time.time()\n","repo_name":"ZechenM/SGD-Linear-Regressor","sub_path":"LinReg-SGD.py","file_name":"LinReg-SGD.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"2669843062","text":"from django import forms\nfrom .models import Comment\n\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = \"__all__\"\n exclude = ['post']\n labels = {\n \"user_name\": \"Your Name\",\n \"user_email\": \"Your email address\",\n \"text\": \"Leave your comment\"\n }\n error_messages = {\n \"user_name\": {\n \"required\": \"Your name must not be empty\",\n \"max_length\": \"Please enter a shorter name!\"\n }\n }\n \n \n","repo_name":"Viachaslau/Blog","sub_path":"blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"40158242589","text":"####################################################################################################################\n####################################################################################################################\n# PERFORM A DIRECT SIMULATION WITH DASSFLOW2D AND GENERATE OBSERVED RESULTS\n# Q in\n#\n# In addition, compared to the \"lake at rest\" test case, here we generate observed data to perform a twin experiment\n# ----> w_obs=1 in input.txt and obs.txt file is provided\n####################################################################################################################\n####################################################################################################################\n\n#=======================================================#\n# Source librairies\n#=======================================================#\nimport dassflow2d as df2d\nimport numpy as np\nimport os\nimport sys\nimport matplotlib.pyplot as plt\n#=======================================================#\n# copy existing case files\n#=======================================================#\n\n#os.chdir('/home/pagarambois/Documents/Distant/dasshydro/cases/tuto_case/2_qin')\n#dassflow_dir = \"/home/pagarambois/Documents/Distant/dassflow2d-wrap\"\n\ndassflow_dir = os.getcwd() # os.path.abspath(os.curdir)\nif dassflow_dir.split('/')[-1] != \"dasshydro\":\n os.chdir(\"../../..\")\n dassflow_dir = os.getcwd()\n\ncase_dir = os.path.join(f\"{dassflow_dir}\",\"cases/tuto_case/2_qin\")\nrun_dir = os.path.join(f\"{dassflow_dir}\",\"code/bin_A\")\n\nprint(\"Dassflow directory is understood as: \", dassflow_dir)\nprint(\"Case is copied from: \", case_dir)\nprint(\"Running directory is: \", run_dir)\n\n# delete all files in your simulation directory before starting\nos.system(f\"rm -r {dassflow_dir}/code/bin_A/*\")\n# Copy recursively the files provided in DassFlow case repository into your own simulation directory **code/bin_A/**.\nos.system(f\"cp -r {dassflow_dir}/cases/tuto_case/2_qin/bin_A/* {dassflow_dir}/code/bin_A\")\nos.chdir( f\"{dassflow_dir}/code/\")\nos.system(\"make cleanres cleanmin\")\n#=======================================================#\n# initialise + run + save results\n#=======================================================#\n\nmy_model = df2d.dassflowmodel(bin_dir = run_dir,hdf5_path = f\"{run_dir}/res/simu.hdf5\", run_type = \"direct\",clean=True) # initialise fortran/python instance\nmy_model.init_all() # allocate and initialise many fortran variables\nmy_model.run() # run model\nmy_model.save_all() # save simulation results in hdf5 files\n\nbathy = my_model.outputs.all_res[0.0][[\"bathy\"]]\nmy_scalar = bathy\nh = my_model.outputs.all_res[0.0][[\"h\"]]\nlabels = dict(xlabel='X [m]', ylabel='Y [m]', zlabel='')\nplotter = my_model.meshing.plot(my_scalar,\n title_scale_bar =\"Zb [m] \", \n title_plot = \"bathymetry elevation on 2D mesh grid\", \n axis_labels = labels)\n\nplotter = my_model.meshing.plot(my_scalar=h)\n\nallx =[]\nallz = []\nfor i in range(my_model.meshing.mesh_fortran.nc):\n x =my_model.meshing.mesh_fortran.cell[i].grav.x\n y = my_model.meshing.mesh_fortran.cell[i].grav.y\n print(y)\n if(y==50.0):\n allx.append(x)\n allz.append(my_model.outputs.all_res[0.0][[\"bathy\"]].iloc[i-1])\n\ntime_out = my_model.outputs.all_times[2]\nv = my_model.outputs.all_res[time_out][[\"v\"]]\n \n\n\n\n#plot en passant par autre fonction... via struct de vars...\n# my_model.\n\nfor key, value in my_model.outputs.all_res.items():\n tmp = value[\"h\"]\n\n # my_model.meshing.plot()\n\n my_model.meshing.mesh_pyvista.plot(scalars = tmp, show_edges=True, cpos= \"xy\", notebook =False)\n\ntime = 0.0\nh0 = my_model.outputs.all_res[time][[\"h\"]]\nu0 = my_model.outputs.all_res[time][[\"u\"]]\nv0 = my_model.outputs.all_res[time][[\"v\"]]\n\nplotter = my_model.meshing.plot(my_scalar = U,\n title_scale_bar =\"\", \n title_plot = f\"Initial\", \n xlabel = \"X [m]\", \n ylabel = \"Y [m]\") # for a local run remove notebook option or set notebook=False \nplotter.show() # remove jupyter_backend if needed\n\n#=======================================================#\n# Save results as observed data\n#=======================================================#\nos.system(\"rm ../../code/bin_A/obs/*\")\nos.system(\"cp ../../code/bin_A/res/obs/* bin_A/obs/\")\n","repo_name":"DassHydro-dev/dassflow2d","sub_path":"cases/tuto_case/2_qin/1_main_tuto-forward-channel_Q_in.py","file_name":"1_main_tuto-forward-channel_Q_in.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"70028556077","text":"import socket, threading, re, os, time, base64, traceback\nimport webbrowser, subprocess, json, sys, shlex\nimport urllib.request, urllib.error\nfrom urllib.parse import urlparse as urlparse\nfrom .main import UFONet\ntry:\n import pygeoip\nexcept:\n print(\"\\n[Error] [AI] Cannot import lib: pygeoip. \\n\\n To install it try:\\n\\n $ 'sudo apt-get install python3-geoip libgeoip-dev libgeoip1'\\n\")\n sys.exit(2)\n\nclass AjaxMap(object):\n def __init__(self):\n self.geo_db_mirror1 = 'https://turina.space/bordercheck/maps.tar.gz' # Turina Server\n self._geoip=None\n self._geoasn=None\n self._geoipstatus='nomap'\n self._err=''\n ufonet = UFONet()\n ufonet.create_options()\n try:\n self.zombies = ufonet.extract_zombies()\n aliens_army = ufonet.extract_aliens()\n droids_army = ufonet.extract_droids()\n ucavs_army = ufonet.extract_ucavs()\n rpcs_army = ufonet.extract_rpcs()\n self.zombies.extend(aliens_army)\n self.zombies.extend(droids_army)\n self.zombies.extend(ucavs_army)\n self.zombies.extend(rpcs_army)\n except:\n return\n\n def get_err(self):\n return self._err\n\n # check for geoip data status\n # basic lock file mechanism to avoid multiple downloads\n def get_status(self):\n if os.path.exists('maps.downloading'):\n if not os.path.exists('maps.downloadmsg'):\n f=open(\"maps.downloadmsg\",\"wb\")\n f.write(\"\".encode('utf-8'))\n f.close()\n print(\"[Info] [AI] [Control] GeoIP data download started! -> [OK!]\\n\")\n self._geoipstatus='downloading'\n elif os.path.isdir('maps'):\n if self._geoip == None :\n self._geoip = pygeoip.GeoIP('maps/GeoLiteCity.dat')\n if self._geoasn == None :\n self._geoasn = pygeoip.GeoIP('maps/GeoIPASNum.dat')\n if os.path.exists(\"maps.downloadmsg\") :\n os.remove(\"maps.downloadmsg\")\n self._geoipstatus='ok'\n return self._geoipstatus\n\n def retrieve(self,url,name):\n try:\n handle = urllib.request.urlopen(url)\n CHUNK = 16384\n with open(name,'wb') as fp:\n while True:\n chunk = handle.read(CHUNK)\n if not chunk:\n break\n fp.write(chunk)\n except:\n traceback.print_exc()\n\n def download_maps(self):\n # generate geolocation values on a map\n if self.get_status() != 'nomap':\n return self._geoipstatus == 'ok'\n if os.path.exists(\"maps.downloadmsg\"):\n os.remove(\"maps.downloadmsg\")\n f=open(\"maps.downloading\",'w')\n f.write(\"download started'\n if os.path.exists('/tmp/ufonet.html'):\n for x in open(r'/tmp/ufonet.html').readlines():\n stat = stat + x\n else:\n stat=\"[Info] [AI] [Control] Generating statistics... -> [Waiting!]\"\n return stat+\"\"\n if self.get_status() != \"ok\":\n dljs=\"\"\n if self.get_status() == \"nomap\":\n dljs+=\"$('#ufomsg').load('/js/ajax.js?fetchgeoip=')\\n\"\n if 'doll' in list(pGet.keys()):\n dljs+=\"$('#ufomsg').load('/js/ajax.js?fetchdoll=\"+pGet['doll']+\"')\\n\"\n dljs+=\"doll=new Doll('\"+pGet[\"doll\"]+\"')\\n\"\n return \"[Info] [AI] GeoIP data download in progress...
See console for errors+\"\n if 'zombie' in list(pGet.keys()):\n zn=base64.b64decode(pGet['zombie']).decode('utf-8')\n nzn=self.get_next_zombie(zn)\n if nzn is not None:\n zombie=self.get_js(nzn)\n return \"\"\" \"\"\"\n else:\n return \"\\n\"\n if 'fetchdoll' in list(pGet.keys()):\n tn=pGet['fetchdoll']\n target = self.geo_ip(tn)\n if target is None:\n return \"doll waiting for geoip data !\"\n return \"\"\" doll up !\"\"\"\n if 'doll' in list(pGet.keys()):\n tn=pGet['doll']\n return \"\"\"\"\"\"\n return \"\\n\"\n","repo_name":"epsylon/ufonet","sub_path":"core/ajaxmap.py","file_name":"ajaxmap.py","file_ext":"py","file_size_in_byte":11053,"program_lang":"python","lang":"en","doc_type":"code","stars":1967,"dataset":"github-code","pt":"73"} +{"seq_id":"43146932113","text":"#!/usr/bin/python\nimport turtle\nimport random\ndef drawShape(sides, length):\n\tangle = 360.0 / sides\n\tfor side in range(sides):\n\t\tturtle.forward(length)\n\t\tturtle.right(angle)\ndef moveTurtle(x, y):\n\tturtle.penup()\n\tturtle.goto(x,y)\n\tturtle.pendown()\ndef drawSquare(length):\n\tdrawShape(4, length)\ndef drawTriangle(length):\n\tdrawShape(3, length)\ndef drawCircle(length):\n\tdrawShape(360, length)\n\n\n# drawShape(4,10)\n# moveTurtle(60, 30)\n# drawShape(3, 20)\n# moveTurtle(-10,-50)\n# drawShape(9,15)\n\n# moveTurtle(80, 40)\n# drawTriangle(4)\n# moveTurtle(100, 50)\n# drawCircle(27)\n# turtle.done();\ndef drawRandom():\n\tx = random.randrange(-200,200)\n\ty = random.randrange(-200,200)\n\tlength = random.randrange(75)\n\tshape = random.randrange(1,4)\n\tprint (length)\n\t# print \" x = \" + string(x) + \" y= \" + y + \" length \" + length + \" shape = \" + shape\n\tmoveTurtle(x,y)\n\tif shape == 1:\n\t\tdrawSquare(length)\n\telif shape == 2:\n\t\tdrawTriangle(length)\n\telif shape == 3:\n\t\tlength = length % 4\n\t\tdrawCircle(length)\n\n\nfor shape in range(100):\n\tturtle.fillcolor(\"red\")\n\tturtle.begin_fill()\n\tdrawRandom()\n\tturtle.end_fill()\nturtle.done()","repo_name":"blackteachinese/pythontestdemo","sub_path":"reusableShapes.py","file_name":"reusableShapes.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"42808890575","text":"class Solution:\n def regionsBySlashes(self, grid: List[str]) -> int:\n n=len(grid)\n template=[0]*n\n visited=[template[:] for _ in range(n)]\n total=0\n upper=1\n bottom=2\n cell=3\n direction=[\n [(1,0),(-1,0),(0,1),(0,-1)],\n [(-1,0),(0,-1)],\n [(1,0),(0,1)],\n [(-1,0),(0,1)],\n [(1,0),(0,-1)]\n ]\n def possible_direction(y,x,part):\n if grid[y][x]==\" \":\n return direction[0]\n elif grid[y][x]==\"/\":\n return direction[1+(0 if part==upper else 1)]\n else:\n return direction[3+(0 if part==upper else 1)]\n def dfs(y0,x0,part):\n if visited[y0][x0]&part!=0:\n return\n if grid[y0][x0]==\" \":\n visited[y0][x0]=cell\n else:\n visited[y0][x0]|=part\n for y_offset,x_offset in possible_direction(y0,x0,part):\n y1=y0+y_offset\n x1=x0+x_offset\n if 0<=y10 else bottom)\n elif x_offset>0:\n dfs(y1,x1,upper if grid[y1][x1]==\"/\" else bottom)\n else:\n dfs(y1,x1,bottom if grid[y1][x1]==\"/\" else upper)\n for y in range(n):\n for x in range(n):\n if visited[y][x]&upper==0:\n dfs(y,x,upper)\n total+=1\n if visited[y][x]&bottom==0:\n dfs(y,x,bottom)\n total+=1\n return total","repo_name":"lkwq007/leetcode-py","sub_path":"959-Regions-Cut-By-Slashes.py","file_name":"959-Regions-Cut-By-Slashes.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"73"} +{"seq_id":"21045203478","text":"\"\"\"Application class of vocal-contour.\n\nInludes core functions and interfaces for frame-level vocal transcription:\nmodel training, feature pre-processing, and audio transcription.\n\nSee Also\n--------\nomnizart.base.BaseTranscription: The base class of all transcription/application classes.\n\"\"\"\n\n# pylint: disable=C0103,W0612,E0611,W0613\nimport os\nfrom os.path import join as jpath\nfrom datetime import datetime\n\nimport numpy as np\nfrom scipy.io.wavfile import write as wavwrite\nimport h5py\nimport tensorflow as tf\nfrom tensorflow.keras.utils import to_categorical\nfrom mir_eval import sonify\n\nfrom omnizart.base import BaseTranscription, BaseDatasetLoader\nfrom omnizart.setting_loaders import VocalContourSettings\nfrom omnizart.feature.wrapper_func import extract_cfp_feature\nfrom omnizart.utils import get_logger, ensure_path_exists, parallel_generator, resolve_dataset_type, aggregate_f0_info\nfrom omnizart.io import write_yaml, write_agg_f0_results\nfrom omnizart.train import train_epochs, get_train_val_feat_file_list\nfrom omnizart.callbacks import EarlyStopping, ModelCheckpoint\nfrom omnizart.vocal_contour.inference import inference\nfrom omnizart.vocal_contour import labels as lextor\nfrom omnizart.constants import datasets as d_struct\nfrom omnizart.models.u_net import semantic_segmentation\nfrom omnizart.music.losses import focal_loss\n\n\nlogger = get_logger(\"Vocal Contour\")\n\n\nclass VocalContourTranscription(BaseTranscription):\n \"\"\"Application class for vocal-contour transcription.\"\"\"\n def __init__(self, conf_path=None):\n super().__init__(VocalContourSettings, conf_path=conf_path)\n\n def transcribe(self, input_audio, model_path=None, output=\"./\"):\n \"\"\"Transcribe frame-level fundamental frequency of vocal from the given audio.\n\n Parameters\n ----------\n input_audio: Path\n Path to the wav audio file.\n model_path: Path\n Path to the trained model or the transcription mode. If given a path, should be\n the folder that contains `arch.yaml`, `weights.h5`, and `configuration.yaml`.\n output: Path (optional)\n Path for writing out the extracted vocal f0. Default to current path.\n\n Returns\n -------\n f0: txt\n The transcribed f0 of the vocal contour in Hz.\n\n See Also\n --------\n omnizart.cli.vocal_contour.transcribe: The coressponding command line entry.\n \"\"\"\n if not os.path.isfile(input_audio):\n raise FileNotFoundError(f\"The given audio path does not exist. Path: {input_audio}\")\n\n logger.info(\"Loading model...\")\n model, model_settings = self._load_model(model_path)\n\n logger.info(\"Extracting feature...\")\n feature = extract_cfp_feature(\n input_audio,\n hop=model_settings.feature.hop_size,\n win_size=model_settings.feature.window_size,\n down_fs=model_settings.feature.sampling_rate\n )\n\n logger.info(\"Predicting...\")\n f0 = inference(feature[:, :, 0], model, timestep=model_settings.training.timesteps)\n agg_f0 = aggregate_f0_info(f0, t_unit=model_settings.feature.hop_size)\n\n timestamp = np.arange(len(f0)) * model_settings.feature.hop_size\n wav = sonify.pitch_contour(\n timestamp, f0, model_settings.feature.sampling_rate, amplitudes=0.5 * np.ones(len(f0))\n )\n\n output = self._output_midi(output, input_audio, verbose=False)\n if output is not None:\n write_agg_f0_results(agg_f0, f\"{output}_f0.csv\")\n wavwrite(f\"{output}_trans.wav\", model_settings.feature.sampling_rate, wav)\n logger.info(\"Text and Wav files have been written to %s\", os.path.abspath(os.path.dirname(output)))\n\n logger.info(\"Transcription finished\")\n return agg_f0\n\n def generate_feature(self, dataset_path, vocalcontour_settings=None, num_threads=4):\n \"\"\"Extract the feature from the given dataset.\n\n To train the model, the first step is to pre-process the data into feature\n representations. After downloading the dataset, use this function to generate\n the feature by giving the path of the stored dataset.\n\n To specify the output path, modify the attribute\n ``vocalcontour_settings.dataset.feature_save_path`` (TODO: to confirm).\n It defaults to the folder of the stored dataset, and creates\n two folders: ``train_feature`` and ``test_feature``.\n\n Parameters\n ----------\n dataset_path: Path\n Path to the downloaded dataset.\n vocalcontour_settings: VocalContourSettings\n The configuration instance that holds all relative settings for\n the life-cycle of building a model.\n num_threads:\n Number of threads for parallel extraction of the feature.\n\n See Also\n --------\n omnizart.constants.datasets:\n The supported datasets and the corresponding training/testing splits.\n \"\"\"\n settings = self._validate_and_get_settings(vocalcontour_settings)\n\n # Resolve feature output path\n train_feat_out_path, test_feat_out_path = self._resolve_feature_output_path(dataset_path, settings)\n logger.info(\"Output training feature to %s\", train_feat_out_path)\n logger.info(\"Output testing feature to %s\", test_feat_out_path)\n\n dataset_type = resolve_dataset_type(\n dataset_path,\n keywords={\"mir-1k\": \"mir1k\", \"mir1k\": \"mir1k\", \"medleydb\": \"medleydb\"}\n )\n if dataset_type is None:\n logger.warning(\n \"The given path %s does not match any built-in processable dataset. Do nothing...\",\n dataset_path\n )\n return\n\n logger.info(\"Inferred dataset type: %s\", dataset_type)\n struct = {\n \"mir1k\": d_struct.MIR1KStructure,\n \"medleydb\": d_struct.MedleyDBStructure\n }[dataset_type]\n label_extractor = {\n \"mir1k\": lextor.MIR1KlabelExtraction,\n \"medleydb\": lextor.MedleyDBLabelExtraction\n }[dataset_type]\n\n train_data_pair = struct.get_train_data_pair(dataset_path=dataset_path)\n logger.info(\n \"Start extract training feature of the dataset. \"\n \"This may take time to finish and affect the computer's performance\"\n )\n _parallel_feature_extraction(\n train_data_pair, train_feat_out_path, label_extractor, settings.feature, num_threads=num_threads\n )\n\n test_data_pair = struct.get_test_data_pair(dataset_path=dataset_path)\n logger.info(\n \"Start extract testing feature of the dataset. \"\n \"This may take time to finish and affect the computer's performance\"\n )\n _parallel_feature_extraction(\n test_data_pair, test_feat_out_path, label_extractor, settings.feature, num_threads=num_threads\n )\n\n # Writing out the settings\n write_yaml(settings.to_json(), jpath(train_feat_out_path, \".success.yaml\"))\n write_yaml(settings.to_json(), jpath(test_feat_out_path, \".success.yaml\"))\n logger.info(\"All done\")\n\n def train(self, feature_folder, model_name=None, input_model_path=None, vocalcontour_settings=None):\n \"\"\"Model training.\n\n Train the model from scratch or continue training given a model checkpoint.\n\n Parameters\n ----------\n feature_folder: Path\n Path to the generated feature.\n model_name: str\n The name of the trained model. If not given, will default to the\n current timestamp.\n input_model_path: Path\n Specify the path to the model checkpoint in order to fine-tune\n the model.\n vocalcontour_settings: VocalContourSettings\n The configuration that holds all relative settings for\n the life-cycle of model building.\n \"\"\"\n settings = self._validate_and_get_settings(vocalcontour_settings)\n\n if input_model_path is not None:\n logger.info(\"Continue to train one model: %s\", input_model_path)\n model, prev_set = self._load_model(input_model_path)\n settings.training.timesteps = prev_set.training.timesteps\n settings.model.save_path = prev_set.model.save_path\n\n logger.info(\"Constructing dataset instance\")\n split = settings.training.steps / (settings.training.steps + settings.training.val_steps)\n train_feat_files, val_feat_files = get_train_val_feat_file_list(feature_folder, split=split)\n\n output_types = (tf.float32, tf.float32)\n train_dataset = VocalContourDatasetLoader(\n feature_files=train_feat_files,\n num_samples=settings.training.batch_size * settings.training.steps,\n timesteps=settings.training.timesteps\n ).get_dataset(settings.training.batch_size, output_types=output_types)\n\n val_dataset = VocalContourDatasetLoader(\n feature_files=val_feat_files,\n num_samples=settings.training.val_batch_size * settings.training.val_steps,\n timesteps=settings.training.timesteps\n ).get_dataset(settings.training.val_batch_size, output_types=output_types)\n\n if input_model_path is None:\n logger.info(\"Constructing new model\")\n # NOTE: The default value of dropout rate for ConvBlock is different\n # in VocalSeg which is 0.2.\n model = semantic_segmentation(\n multi_grid_layer_n=1, feature_num=384, ch_num=1, timesteps=settings.training.timesteps\n )\n model.compile(optimizer=\"adam\", loss=focal_loss, metrics=['accuracy'])\n\n logger.info(\"Resolving model output path\")\n if model_name is None:\n model_name = str(datetime.now()).replace(\" \", \"_\")\n if not model_name.startswith(settings.model.save_prefix):\n model_name = f\"{settings.model.save_prefix}_{model_name}\"\n\n model_save_path = jpath(settings.model.save_path, model_name)\n ensure_path_exists(model_save_path)\n write_yaml(settings.to_json(), jpath(model_save_path, \"configurations.yaml\"))\n write_yaml(model.to_yaml(), jpath(model_save_path, \"arch.yaml\"), dump=False)\n logger.info(\"Model output to: %s\", model_save_path)\n\n logger.info(\"Constructing callbacks\")\n callbacks = [\n EarlyStopping(patience=settings.training.early_stop),\n ModelCheckpoint(model_save_path, save_weights_only=True)\n ]\n logger.info(\"Callback list: %s\", callbacks)\n\n logger.info(\"Start training\")\n history = train_epochs(\n model,\n train_dataset,\n validate_dataset=val_dataset,\n epochs=settings.training.epoch,\n steps=settings.training.steps,\n val_steps=settings.training.val_steps,\n callbacks=callbacks\n )\n\n return model_save_path, history\n\n\ndef _all_in_one_extract(data_pair, label_extractor, t_unit, **kwargs):\n feat = extract_cfp_feature(data_pair[0], **kwargs)\n label = label_extractor.extract_label(data_pair[1], t_unit=t_unit)\n flen = len(feat)\n llen = len(label)\n if flen > llen:\n diff = flen - llen\n label = np.pad(label, ((0, diff), (0, 0)), constant_values=0)\n elif llen > flen:\n label = label[:flen]\n return feat, label\n\n\ndef _parallel_feature_extraction(data_pair, out_path, label_extractor, feat_settings, num_threads=4):\n feat_extract_params = {\n \"hop\": feat_settings.hop_size,\n \"down_fs\": feat_settings.sampling_rate,\n \"win_size\": feat_settings.window_size\n }\n\n iters = enumerate(\n parallel_generator(\n _all_in_one_extract,\n data_pair,\n max_workers=num_threads,\n use_thread=True,\n chunk_size=num_threads,\n label_extractor=label_extractor,\n t_unit=feat_settings.hop_size,\n **feat_extract_params\n )\n )\n\n for idx, ((feature, label), audio_idx) in iters:\n audio = data_pair[audio_idx][0]\n\n print(f\"Progress: {idx + 1}/{len(data_pair)} - {audio}{' ' * 6}\", end=\"\\r\") # noqa: E226\n\n filename, _ = os.path.splitext(os.path.basename(audio))\n out_hdf = jpath(out_path, f\"{filename}.hdf\")\n saved = False\n retry_times = 5\n for retry in range(retry_times):\n if saved:\n break\n try:\n with h5py.File(out_hdf, \"w\") as out_f:\n out_f.create_dataset(\"feature\", data=feature)\n out_f.create_dataset(\"label\", data=label)\n saved = True\n except OSError as exp:\n logger.warning(\"OSError occurred, retrying %d times. Reason: %s\", retry + 1, str(exp))\n if not saved:\n logger.error(\"H5py failed to save the feature file after %d retries.\", retry_times)\n raise OSError\n print(\"\")\n\n\nclass VocalContourDatasetLoader(BaseDatasetLoader):\n \"\"\"Data loader for training the mdoel of ``vocal-contour``.\n\n Load feature and label for training.\n\n Parameters\n ----------\n feature_folder: Path\n Path to the extracted feature files in `*.hdf`.\n feature_files: list[Path]\n List of path to the feature files in`*.hdf`.\n num_samples: int\n Total number of samples to yield.\n timesteps: int\n Time length of the feature.\n channels: list[int]\n Channels to be used for training. Allowed values are [1, 2, 3].\n feature_num: int\n Target size of feature dimension.\n Zero padding is done to resolve mismatched input and target size.\n\n Yields\n ------\n feature:\n Input features for model training.\n label:\n Coressponding labels.\n \"\"\"\n def __init__(\n self,\n feature_folder=None,\n feature_files=None,\n num_samples=100,\n timesteps=128,\n channels=0,\n feature_num=384\n ):\n super().__init__(\n feature_folder=feature_folder, feature_files=feature_files, num_samples=num_samples, slice_hop=timesteps\n )\n\n self.feature_folder = feature_folder\n self.feature_files = feature_files\n self.num_samples = num_samples\n self.timesteps = timesteps\n self.channels = channels\n self.feature_num = feature_num\n\n self.hdf_refs = {}\n for hdf in self.hdf_files:\n ref = h5py.File(hdf, \"r\")\n self.hdf_refs[hdf] = ref\n\n def _pad(self, data):\n pad_bottom = (self.feature_num - data.shape[1]) // 2\n pad_top = self.feature_num - data.shape[1] - pad_bottom\n paddings = ((0, 0), (pad_bottom, pad_top))\n if len(data.shape) == 3:\n paddings += ((0, 0),)\n return np.pad(data, paddings)\n\n def _get_feature(self, hdf_name, slice_start):\n feat = self.hdf_refs[hdf_name][\"feature\"]\n feat = feat[:, :, self.channels]\n feat = self._pad(feat)\n feat = feat[slice_start:slice_start + self.slice_hop]\n return feat.reshape(self.timesteps, self.feature_num, 1)\n\n def _get_label(self, hdf_name, slice_start):\n label = self.hdf_refs[hdf_name][\"label\"]\n label = self._pad(label)\n label = label[slice_start:slice_start + self.slice_hop]\n return to_categorical(label, num_classes=2)\n\n def _pre_yield(self, feature, label):\n feat_len = len(feature)\n label_len = len(label)\n\n if (feat_len == self.timesteps) and (label_len == self.timesteps):\n # All normal\n return feature, label\n\n # The length of feature and label are inconsistent. Trim to the same size as the shorter one.\n if feat_len > label_len:\n feature = feature[:label_len]\n feat_len = len(feature)\n else:\n label = label[:feat_len]\n label_len = len(label)\n\n return feature, label\n","repo_name":"Music-and-Culture-Technology-Lab/omnizart","sub_path":"omnizart/vocal_contour/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":15972,"program_lang":"python","lang":"en","doc_type":"code","stars":1488,"dataset":"github-code","pt":"73"} +{"seq_id":"11249474404","text":"# Functions used to calculate the temperature profile based on \n# Eddy diffusivity\n\nimport numpy as np\n\ndef zStar(z, T):\n \"BL depth defined to be point of maximum dT/dz\"\n dTdzMax = 0\n zS = 0\n for iz in range(len(z)-1):\n dTdz = (T[iz+1] - T[iz])/(z[iz+1] - z[iz])\n if dTdz > dTdzMax:\n zS = 0.5*(z[iz+1] + z[iz])\n dTdzMax = dTdz\n \n return zS\n\ndef wStar(g, zS, Qs, theta00):\n \" Convective vertical velocity scale\"\n return (g*zS*Qs/theta00)**(1./3.)\n\ndef K(zTilde, zS, wS):\n \"Eddy diffusivity\"\n vonKarman = 0.4\n return zS*wS*vonKarman*(39*vonKarman*zTilde)**(1./3)\\\n *zTilde*(1-zTilde)**2\n\ndef zTilde(z, z0, zS):\n \"Height normalised by BL depth and surface roughness\"\n return np.where((z+z0)/(zS + z0) < 1, (z+z0)/(zS + z0), 1)\n\n","repo_name":"AtmosFOAM/hilaryRun","sub_path":"boundaryLayer/convectiveBL/ED/EDfunctions.py","file_name":"EDfunctions.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"1456480833","text":"class Restaurant:\r\n #constructor\r\n def __init__(self, nombre, categoria, precio):\r\n self.nombre = nombre\r\n self.categoria = categoria\r\n self.precio = precio\r\n\r\n def mostrar_informacion(self):\r\n print(f'\\r\\nNombre: {self.nombre}')\r\n print(f'Categoria: {self.categoria}')\r\n print(f'Precio: {self.precio}')\r\n\r\n#El constructor se ejecuta automaticamente al generar una instancia\r\n\r\nrestaurant = Restaurant('Pizzeria', 'Comida', 50)\r\nrestaurant.mostrar_informacion()\r\n\r\nrestaurant2 = Restaurant('Hamburguesas', 'Comida', 20)\r\nrestaurant2.mostrar_informacion()\r\n","repo_name":"JesusAlva10/RepositorioEjemplo","sub_path":"Curso Python/14-classes-2.py","file_name":"14-classes-2.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"38697136805","text":"import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\n\nN, K = map(int, input().split())\n\narr = deque([])\nsum = N\nsize = 1\nwhile N > 0 and sum > K:\n if N % 2 != 0:\n arr.append(size)\n sum += 1\n sum = sum - N + N // 2\n N //= 2\n size *= 2\n\narr.append(size)\nanswer = 0\n\npos = 0\nwhile sum > K and len(arr) > 2:\n sum -= 1\n answer += arr[pos + 1] - arr[pos]\n arr.popleft()\n arr[0] = arr[0] * 2\nprint(answer)\n","repo_name":"pyooster/python_codingTest","sub_path":"Rappire/5월 2주차/1052.py","file_name":"1052.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"29617743893","text":"import torchvision.models as models\nfrom torchvision import transforms\nimport torch\nfrom torch import nn\nfrom torch.nn import Module\nfrom torch.nn import BCEWithLogitsLoss\n\nfrom utils import IModel\nimport config\n\n\nclass Network(Module):\n def __init__(self):\n super(Network, self).__init__()\n\n # DEFINE NETWORK HERE\n\n ###########################################\n # Filters\n ###########################################\n self.blur = nn.Conv2d(in_channels=3, out_channels=3,\n kernel_size=3, padding=1, bias=False, stride=1)\n self.blur.weight = nn.Parameter(torch.ones((3, 3, 3, 3))/9.0)\n self.blur.weight.requires_grad = False\n\n ###########################################\n # Network layers\n ###########################################\n pretrained_model = models.densenet201(pretrained=True)\n self.features = nn.ModuleList(pretrained_model.children())[:-1]\n self.features = nn.Sequential(*self.features)\n\n ###########################################\n # Classifier\n ###########################################\n in_features = pretrained_model.classifier.in_features*8*8\n self.fc = nn.Sequential(\n nn.Linear(in_features, 1),\n nn.Sigmoid()\n )\n\n # END NETWORK DEFINITION\n\n def forward(self, x):\n # DEFINE NETWORK FORWARD PROPAGATION OF INPUT HERE\n x = self.blur(x)\n x = self.features(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n # END FORWARD PROPAGATION\n\n x = torch.flatten(x)\n return x\n\n# DEFINE MODEL HERE\n\n\nclass Model(IModel):\n # SET MODEL ATTRIBUTES HERE:\n ###########################################\n # Training attributes\n ###########################################\n loss_func = BCEWithLogitsLoss(pos_weight=torch.tensor(\n [config.DS_WEIGHT]).to(config.DEVICE))\n optimizer_func = torch.optim.RMSprop\n epochs = 500\n batch_size = 16\n lr = 0.0000022\n\n ###########################################\n # Training preprocessing\n ###########################################\n training_transforms = transforms.Compose([\n transforms.Resize(config.IMAGE_SHAPE),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(90),\n transforms.ToTensor(),\n transforms.GaussianBlur(3)\n ])\n\n ###########################################\n # Validation preprocessing\n ###########################################\n validation_transforms = transforms.Compose([\n transforms.Resize(config.IMAGE_SHAPE),\n transforms.ToTensor(),\n ])\n\n # END MODEL ATTRIBUTES\n\n def __init__(self, network):\n super(Model, self).__init__()\n\n self.network = network\n self.optimizer = self.optimizer_func(\n self.network.parameters(),\n lr=self.lr)\n# END MODEL DEFINITION\n\n\ndef get_model() -> IModel:\n # INSTANTIATE MODEL HERE:\n network = Network()\n\n for param in network.parameters():\n param.requires_grad = True\n\n for param in network.blur.parameters():\n param.requires_grad = False\n\n model = Model(network)\n # END MODEL INSTANTIATION\n\n return model\n","repo_name":"PBonvang/fundus-image-classifier","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"74717701995","text":"#!/usr/bin/env python3\n\nimport rospy\nfrom tracking.msg import TrackingResult\nfrom std_msgs.msg import Bool\nfrom std_msgs.msg import Int16\n\nfrom controller import Controller, Mode\n\ndef tracking_result_callback(data):\n global init_size, controller, motion, left_depth, right_depth\n \n if init_size is None:\n init_size = (data.x2 - data.x1) * (data.y2 - data.y1)\n \n ds = 1 - (data.x2 - data.x1) * (data.y2 - data.y1) / init_size\n dy = ((data.y1 + data.y2) / 2 - 240) / 240\n dx = ((data.x1 + data.x2) / 2 - 320) / 320\n\n # ds = 0 if(ds < 0.2) else ds\n speed = (ds + dy + abs(dx)) * 70\n # speed = ds * 70\n\n if speed < controller.MIN_SPEED: \n speed = 0\n else:\n speed = max(controller.MIN_SPEED, min(speed, 45))\n speed = int(speed)\n \n angle = ((data.x1 + data.x2) / 2 - 320) / 320\n # angle = ((data.x1 + data.x2) / 2 - 320) / 200\n # angle = angle + max((1000 - left_depth) / 400, 0)\n # angle = angle - max((1000 - right_depth) / 400, 0)\n # angle = angle if(abs(angle) > 0.1) else 0\n if angle > 0.2:\n speed = int(speed * 0.8)\n sign = 1 if(angle >= 0) else -1\n angle = max(-1, min(1, angle))\n angle = 90 + (abs(angle) ** 1.2) * 25 * sign\n print(angle)\n angle = max(controller.MIN_SERVO_ANGLE, min(controller.MAX_SERVO_ANGLE, angle))\n angle = int(angle)\n\n if motion and (left_depth > 900 or right_depth > 900):\n mode = Mode.FORWARD\n else:\n mode = Mode.STOP\n controller.set_motion(speed, angle, mode)\n\ndef motion_callback(data):\n global motion\n motion = data.data\n\ndef left_depth_callback(data):\n global left_depth\n left_depth = data.data\n if left_depth == 10000:\n left_depth = 622\n\ndef right_depth_callback(data):\n global right_depth\n right_depth = data.data\n if right_depth == 10000:\n right_depth = 622\n\nif __name__ == '__main__':\n rospy.init_node('control_node', anonymous=True)\n \n global init_size, controller, motion, left_depth, right_depth\n init_size = None\n controller = Controller()\n motion = False\n left_depth = 100000\n right_depth = 100000\n \n rospy.Subscriber('tracking_result', TrackingResult, tracking_result_callback)\n rospy.Subscriber('motion', Bool, motion_callback)\n rospy.Subscriber('camera/left/depth', Int16, left_depth_callback)\n rospy.Subscriber('camera/right/depth', Int16, right_depth_callback)\n rospy.spin()\n","repo_name":"xucj98/Jetson-Nano-Car","sub_path":"control/scripts/control_node.py","file_name":"control_node.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"39558711605","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom turtle import width\n\nfrom gui.BasicCalc import BasicCalc\n\nclass MainGUI:\n def __init__(self):\n\n\n # Main GUI window creation\n self.root = tk.Tk()\n self.root.geometry('600x400')\n self.root.wm_title('Engineering Calculator')\n\n # Create tab control\n tabCtrl = ttk.Notebook(self.root)\n tabCtrl.pack(expand=1, fill=\"both\")\n # Create the tabs\n basicCalcTab = ttk.Frame(tabCtrl)\n tab2 = ttk.Frame(tabCtrl)\n # Add the tabs\n tabCtrl.add(basicCalcTab, text='Basic Calculator', padding=5)\n tabCtrl.add(tab2, text='Tab 2')\n\n # Add the widget for each tab\n self.basicCalc = BasicCalc(self.root, basicCalcTab)\n\n\n def start(self):\n \"\"\"Start the app GUI\"\"\"\n self.root.mainloop()","repo_name":"njdreikosen/eng-calc","sub_path":"gui/MainGUI.py","file_name":"MainGUI.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"14841694873","text":"import re, os, pydicom\nfrom postprocessing import sendSR, extractCID\n\ndef writeSR(values):\n codes = extractCID.getcodes()\n with open(\"./output/output.xml\", 'r', encoding='utf-8') as sr:\n txt = sr.read()\n codeliste = re.findall('([\\t ]*){code\\|([\\d_]+)\\|(\\w+)}', txt)\n zuweisung = {}\n for elem in codeliste:\n zuweisung[elem[2]] = (elem[1], elem[0])\n for zk, zv in zuweisung.items():\n vv = values[f\"{zk}\"]\n for ok, ov in codes[zv[0]].items():\n if vv.upper() == ov[1].upper():\n pat = '{code\\|'+zv[0]+'\\|'+zk+'}'\n code = f\"{ok}\\n\"\\\n f\"{zv[1]}\\n\"\\\n f\"{zv[1]} {ov[0]}\\n\"\\\n f\"{zv[1]}\\n\"\\\n f\"{zv[1]}{ov[1]}\"\n txt = re.sub(pat, code, txt)\n\n\n for k, v in values.items():\n if \"date\" in k:\n v = v.replace('-', '')\n pat = \"{\"+k+\"}\"\n txt = re.sub(pat, v, txt)\n with open(\"./output/output.xml\", 'w', encoding='utf-8') as out:\n out.write(txt)\n os.system(\"xml2dsr ./output/output.xml ./output/output.dcm\")\n with pydicom.dcmread(\"./output/output.dcm\") as ds:\n pass\n os.system(\"dsr2html +U8 ./output/output.dcm ./output/output.html\")\n sendSR.sendSR(ds)\n","repo_name":"VRLAB-HSKL/VirtualRadiologyData","sub_path":"postprocessing/writeSR.py","file_name":"writeSR.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"15827959160","text":"\n\nfrom PyQt5 import QtCore, QtWidgets\nimport pyqtgraph as pg\nfrom itertools import islice\n\nimport numpy as np \n\nfrom pyqtgraph import ScatterPlotItem\n\n\n\n\n\n\n\n\n\n\nclass PlotGraphs(pg.GraphicsWindow):\n\n dataFile = \"Psi_2.x\"\n imagenes = []\n limits = []\n\n def readData(self):\n\n try:\n datos = np.loadtxt(self.dataFile) #recordar tener cuidado con la ubicacion del 'Phi_2.x'\n except:\n datos = np.array([[0, 1], [0, 1]])\n print(self.dataFile + \" not found\")\n\n xmin = min(datos[:,0])\n xmax = max(datos[:,0])\n index = np.where(datos == xmax)[0] #conozco el inicio (o el final) de cada bloque, me devuelve la posicion en datos[]\n #de cada elemento fin de bloque\n n = len(index) #la longitud de este array me dara el numero total de bloques (imagenes)\n self.imagenes = np.split(ary=datos, indices_or_sections=n, axis=0) #divido datos en cada uno de los bloques (imagenes)\n #axis=0 separa por filas (cada pareja de datos es una fila)\n #imagenes[i] me dara la imagen en t=ti\n #imagenes[0][:,0] me da las'x' de la imagen t=t0\n #imagenes[0][:,1] me da las'y' de la imagen t=t0\n ymin = min(self.imagenes[0][:,1]) #el min y max de la primera imagen sera max y min globales\n ymax = max(self.imagenes[0][:,1]) \n\n self.limits = [xmin, xmax, ymin, ymax]\n\n\n ind = 0\n\n \n def __init__(self, parent=None):\n\n self.readData()\n\n xmin = self.limits[0]\n xmax = self.limits[1]\n ymin = self.limits[2] \n ymax = self.limits[3]\n\n\n super().__init__(parent=parent)\n\n self.mainLayout = QtWidgets.QVBoxLayout()\n self.setLayout(self.mainLayout)\n\n self.timer = QtCore.QTimer(self)\n self.timer.setInterval(50) # in milliseconds\n self.timer.start()\n self.timer.timeout.connect(self.onNewData)\n\n self.plotItem = self.addPlot(title=\"Gravitational Wave 1 + 1 simulator\")\n\n self.plotItem.setXRange(xmin, xmax)\n self.plotItem.setYRange(ymin, ymax)\n\n self.plotDataItem = self.plotItem.plot([], pen=pg.mkPen('c', width=3))\n #self.scatter = self.plotItem.plot(size=10, pen=None, symbolBrush=(255,0,0), symbolSize=5, symbolPen=None) #brush=pg.mkBrush(255, 255, 255, 120\n\n def setData(self, x, y):\n self.plotDataItem.setData(x, y)\n #size = [elem**0.3*3 for elem in allM]\n #self.scatter.setData(x, y, symbolSize=size)\n\n def onNewData(self):\n\n try: \n allX = self.imagenes[self.ind][:,0]\n allY = self.imagenes[self.ind][:,1]\n self.ind += 1\n self.setData(allX, allY)\n except Exception as e: \n self.ind = 0\n print(e)\n #self.timer.stop()\n\n\ndef main():\n \n app = QtWidgets.QApplication([])\n\n pg.setConfigOptions(antialias=False) # True seems to work as well\n\n win = PlotGraphs()\n win.show()\n win.resize(800,600) \n win.raise_()\n app.exec_()\n \nif __name__ == \"__main__\":\n main()","repo_name":"ghyls/numericalRelativity","sub_path":"plotGraphs.py","file_name":"plotGraphs.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"27461517832","text":"from django.urls import path, include\r\n\r\nfrom . import views\r\n\r\n\r\nurlpatterns = [\r\n # path('js/graph_functions.js', views.js_graph_functions, name='js_graph_functions'),\r\n # path('myview2/', views.myview2, name='myview2'),\r\n\r\n path('update_visit_info_metadata', views.update_visit_info_metadata, name='update_visit_info_metadata'),\r\n path('update_list_of_instruments', views.update_list_of_instruments, name='update_list_of_instruments'),\r\n\r\n path('create_instruments//', views.create_instruments, name='create_instruments'),\r\n path('create_instruments', views.create_instruments, name='create_instruments'),\r\n\r\n path('ignore_visits//', views.ignore_visits, name='ignore_visits'),\r\n path('ignore_visits', views.ignore_visits, name='ignore_visits'),\r\n\r\n path('delete_instruments//', views.delete_instruments, name='delete_instruments'),\r\n path('delete_instruments', views.delete_instruments, name='delete_instruments'),\r\n\r\n path('rules/edit/', views.edit_rule, name='edit_rule'),\r\n path('rules/delete/', views.delete_rule, name='delete_rule'),\r\n path('rules/new', views.create_rule, name='create_rule'),\r\n path('rules', views.manage_rules, name='manage_rules'),\r\n\r\n path('test_rules', views.test_rules, name='test_rules'),\r\n\r\n path('', views.home, name='home'),\r\n]","repo_name":"Center-for-Health-Informatics/neurobehavioral","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"13831793703","text":"import numpy as np\nimport time\nfrom DP import DPmatching\n\ndef read_param(filename):\n with open(filename,\"r\") as f:\n for i, line in enumerate(f):\n line = line.replace('\\n','')\n if i == 0:\n continue\n if i == 1:\n name = line\n elif i == 2:\n number = int(line)\n a = np.zeros((number,15))\n else:\n h = map(float,line[:-1].split(\" \"))\n a[i-3] = [t for t in h]\n return name,a\ndef get_dist_map(a,b):\n \n wide = b.shape[0]\n height = a.shape[0]\n d = np.zeros((height,wide))\n for i in range(height):\n for j in range(wide):\n dist = (a[i]-b[j])**2\n sum_dist = float(np.sqrt(dist.sum()))\n\n d[i,j] = sum_dist\n return d\n\ndef read_files(filename):\n temp = []\n names = {}\n print(\"____read_file____\")\n for i in range(20): \n spel,LPC = read_param(filename+\"_{0:03d}.txt\".format(i+1))\n names[i] = spel\n temp.append(LPC)\n\n return names,temp\n\ndef main():\n tmp_names, temp = read_files(\"./city022/city022\")\n names, tmp = read_files(\"./city011/city011\")\n r = len(temp)\n scores = np.ones((r,r))\n print(\"___START_DPMATCHING___\")\n for i in range(r):\n for j in range(r):\n \n num = np.array(temp[i])\n num2 = np.array(tmp[j])\n t = get_dist_map(num,num2)\n scores[i,j] = DPmatching(t)\n print(\"__FINISH__\")\n print(scores.shape)\n tets = scores.tolist()\n acu = 0\n for n,score in enumerate(tets):\n #y = [t for t,n in enumerate(score) if n == min(score)]\n #y = np.where(score==min(score))\n y = score.index(min(score))\n if tmp_names[y]==names[n]:\n acu = acu + 1\n else:\n print(tmp_names[y],names[n])\n with open(\"score.txt\",\"a\") as f:\n f.writelines(str(score))\n #if n == y[0]:\n # acu = acu + 1\n print(\"acurate\",acu/(n+1))\n\n\nif __name__==\"__main__\":\n main()\n","repo_name":"hatakeyamayuta/DPmatching","sub_path":"DP_matching.py","file_name":"DP_matching.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"26556319532","text":"\"\"\"\nTest module\n\"\"\"\nfrom unittest import TestCase\nfrom unittest.mock import patch\n\nimport pygame\n\nfrom game.spirites.paddle import Paddle\n\n\nclass TestPaddle(TestCase):\n \"\"\"\n class which tests paddle\n \"\"\"\n\n def setUp(self) -> None:\n with patch.object(Paddle, \"__init__\", lambda x, y, z: None):\n paddle_mock = Paddle(None, None)\n paddle_mock._rect = pygame.Rect(300, 550, 79, 20)\n paddle_mock._area = pygame.Rect(0, 0, 600, 600)\n paddle_mock.paddle_velocity = pygame.math.Vector2()\n self.paddle = paddle_mock\n\n def test_move_left(self):\n \"\"\"\n moving left\n :return:\n \"\"\"\n expected = 293\n self.paddle.paddle_velocity.x = -7\n self.paddle.update()\n self.assertEqual(self.paddle.rect.x, expected)\n\n def test_move_right(self):\n \"\"\"\n moving right\n :return:\n \"\"\"\n expected = 307\n self.paddle.paddle_velocity.x = 7\n self.paddle.update()\n self.assertEqual(self.paddle.rect.x, expected)\n","repo_name":"lukinio/arkanoid","sub_path":"tests/test_paddle.py","file_name":"test_paddle.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"30073042461","text":"# https://www.codewars.com/kata/545cedaa9943f7fe7b000048/train/python\nimport re\n\n\ndef is_pangram(s):\n print(s.lower())\n print(True if len(re.findall(r'[a-z]', s.lower())) >= 26 and len(s) > len(set(s)) else False)\n\n\nis_pangram(\"The quick, brown fox jumps over the lazy dog!\")\nis_pangram(\"1bcdefghijklmnopqrstuvwxyz\")\nis_pangram(\"Aacdefghijklmnopqrstuvwxyz\")\n","repo_name":"mustafaergul/my_python_katas","sub_path":"6kyu/is_pangram.py","file_name":"is_pangram.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"75058024235","text":"import torch as th\nfrom torch import nn\n\n\nclass Remix(nn.Module):\n \"\"\"Remix.\n Mixes different noises with clean speech within a given batch\n \"\"\"\n\n def forward(self, sources):\n noise, clean = sources\n bs, *other = noise.shape\n device = noise.device\n perm = th.argsort(th.rand(bs, device=device), dim=0)\n return th.stack([noise[perm], clean])","repo_name":"KhanhNguyen4999/cmgan_denoiser","sub_path":"src/augment.py","file_name":"augment.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"16405526449","text":"import heapq\n\ndef solution(scoville, K):\n answer = 0\n heapq.heapify(scoville)\n while True:\n if len(scoville) < 2 and scoville[0]= K:\n heapq.heappush(scoville, fst)\n return answer\n else:\n scd = heapq.heappop(scoville)\n mix = fst + scd * 2\n heapq.heappush(scoville, mix)\n answer += 1\n","repo_name":"kky0426/TIL","sub_path":"Programmers/더 맵게.py","file_name":"더 맵게.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"21402715236","text":"from abc import ABC, abstractmethod\nfrom typing import Optional, Deque, List\nfrom collections import deque\nfrom datetime import datetime\n\nimport pandas as pd # type: ignore\nfrom logbook import Logger # type: ignore\nfrom ib_insync import IB, util, Event, Contract, BarData, BarDataList\n\nlog = Logger(__name__)\n\n\nclass BarStreamer(ABC):\n\n durationStr = \"12 D\"\n barSizeSetting = \"30 secs\"\n whatToShow = \"TRADES\"\n useRTH = False\n contract = None\n\n \"\"\" TODO: backfilling leaves one duplicated data point on joining time series\"\"\"\n\n def __call__(\n self, ib, contract: Contract, start_date: Optional[datetime] = None\n ) -> None:\n self.ib = ib\n self.contract = contract\n log.debug(f\"start_date: {start_date}\")\n if start_date:\n # 30s time-window to retrieve data\n self.durationStr = f\"{self.date_to_delta(start_date) + 30} S\"\n while True:\n log.debug(f\"Requesting bars for {self.contract.localSymbol}\")\n self.bars = self.get_bars()\n if self.bars is not None:\n break\n else:\n util.sleep(5)\n log.debug(f\"Bars received for {self.contract.localSymbol}\")\n self.subscribe()\n\n def date_to_delta(self, date: datetime) -> int:\n return (self.now - date).seconds\n\n def get_bars(self) -> BarDataList:\n log.debug(\n f\"reqHistoricalData params for {self.contract.localSymbol} \"\n f\"durationStr: {self.durationStr}\"\n )\n return self.ib.reqHistoricalData(\n self.contract,\n endDateTime=\"\",\n durationStr=self.durationStr,\n barSizeSetting=self.barSizeSetting,\n whatToShow=self.whatToShow,\n useRTH=self.useRTH,\n formatDate=1,\n keepUpToDate=True,\n timeout=0,\n )\n\n def subscribe(self) -> None:\n self.bars.updateEvent += self.onNewBar\n\n def onNewBar(self, bars: BarDataList, hasNewBar: bool) -> None:\n if hasNewBar:\n # latest bar is \"under construction\", [-2] is the latest\n # ready for consumption\n self.aggregate(bars[-2])\n\n @abstractmethod\n def aggregate(self, bar: BarData):\n pass\n\n\nclass StreamAggregator(BarStreamer):\n def __init__(self) -> None:\n self._createEvents()\n self.buffer: Deque[BarDataList] = deque()\n self.new_bars: List[BarData] = []\n self.backfill = True\n super().__init__()\n\n def _createEvents(self) -> None:\n self.newCandle = Event(\"newCandle\")\n\n def __call__(self, ib: IB, contract: Contract) -> None:\n date = self.all_bars[-1].date if contract == self.contract else None\n super().__call__(ib, contract, date)\n self.process_back_data(date)\n\n def process_back_data(self, date: Optional[datetime] = None) -> None:\n # flag needed on re-connect\n self.backfill = True\n for counter, bar in enumerate(self.bars[:-1]):\n # date given on reconnect only\n if (date and (bar.date > date)) or not date:\n self.aggregate(bar)\n # prevent from blocking too long\n if counter % 10000 == 0:\n log.debug(f\"releasing control {self.contract.localSymbol}\")\n util.sleep(0)\n log.debug(f\"startup data generated for {self.contract.localSymbol}\")\n self.backfill = False\n self.clear_buffer()\n\n def create_candle(self) -> None:\n df = util.df(self.new_bars)\n df.date = df.date.astype(\"datetime64[ns]\")\n df.set_index(\"date\", inplace=True)\n df[\"volume_weighted\"] = df.average * df.volume\n weighted_price = df.volume_weighted.sum() / df.volume.sum()\n self.newCandle.emit(\n {\n \"backfill\": self.backfill,\n \"date\": df.index[-1],\n \"open\": df.open[0],\n \"high\": df.high.max(),\n \"low\": df.low.min(),\n \"close\": df.close[-1],\n \"weighted_price\": weighted_price,\n # 'price': weighted_price,\n \"price\": df.close[-1],\n \"volume\": df.volume.sum(),\n }\n )\n\n def onNewBar(self, bars: BarDataList, hasNewBar: bool) -> None:\n if hasNewBar:\n if self.backfill:\n log.debug(f\"buffering bar for {self.contract.localSymbol}\")\n self.buffer.append(bars[-2])\n else:\n self.clear_buffer()\n self.aggregate(bars[-2])\n\n def clear_buffer(self) -> None:\n \"\"\"Utilize bars that have been buffered while processing back data.\"\"\"\n while self.buffer:\n log.debug(f\"clearing buffer for {self.contract.localSymbol}\")\n self.aggregate(self.buffer.popleft())\n\n\nclass VolumeStreamer(StreamAggregator):\n def __init__(\n self, volume: Optional[float] = None, avg_periods: Optional[float] = None\n ) -> None:\n super().__init__()\n self.all_bars = []\n self.volume = volume\n self.avg_periods = avg_periods\n self.aggregator = 0\n\n def __call__(self, ib: IB, contract: Contract) -> None:\n try:\n date = self.all_bars[-1].date if contract == self.contract else None\n except IndexError:\n date = None\n BarStreamer.__call__(self, ib, contract, date)\n if self.avg_periods:\n self.volume = self.reset_volume(self.avg_periods)\n else:\n self.volume = self.volume\n log.info(f\"Volume for {contract.localSymbol}: {self.volume}\")\n StreamAggregator.process_back_data(self, date)\n\n def reset_volume(self, avg_periods) -> int:\n # TODO: make span adjust to length of requested data\n bars = self.all_bars or self.bars\n if bars == self.bars:\n self.span = len(self.bars)\n df = util.df(bars)\n # last 5 days\n volume = df.iloc[-14100:].volume.rolling(avg_periods).sum().mean().round()\n log.debug(f\"volume: {volume}\")\n return volume\n\n @staticmethod\n def verify(bar: BarData) -> bool:\n \"\"\"\n Faulty bar often comes with volume = -1 or barCount = -1.\n \"\"\"\n return (bar.volume >= 0) and (bar.barCount >= 0)\n\n def aggregate(self, bar: BarData) -> None:\n if not self.verify(bar):\n log.warning(f\"Faulty bar for: {self.contract.localSymbol} {bar}\")\n return\n self.new_bars.append(bar)\n self.all_bars.append(bar)\n self.aggregator += bar.volume\n if not self.backfill:\n message = (\n f\"{bar.date} {self.aggregator}/{self.volume}\"\n f\" {self.contract.localSymbol}\"\n )\n log.debug(message)\n if self.aggregator >= self.volume:\n self.aggregator = 0\n self.create_candle()\n self.new_bars.clear()\n\n @property\n def all_bars_df(self):\n if self.all_bars:\n df = util.df(self.all_bars)\n df.date = df.date.astype(\"datetime64[ns]\")\n df.set_index(\"date\", inplace=True)\n else:\n df = pd.DataFrame()\n return df\n\n\nclass ResampledStreamer(StreamAggregator):\n def __init__(self, periods: int) -> None:\n self.periods = periods\n self.counter = 0\n super().__init__()\n\n def aggregate(self, bar) -> None:\n self.new_bars.append(bar)\n self.counter += 1\n if self.counter == self.periods:\n self.create_candle()\n self.counter = 0\n\n\nclass DirectStreamer(StreamAggregator):\n def aggregate(self, bar: BarData) -> None:\n self.new_bars.append(bar)\n self.create_candle()\n","repo_name":"t1user/ib_tools","sub_path":"streamers.py","file_name":"streamers.py","file_ext":"py","file_size_in_byte":7734,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"73"} +{"seq_id":"27963331947","text":"class Solution:\n def longestConsecutive(self, nums: List[int]) -> int:\n \"\"\"\n O(1) 으로 membership check 하기 위해 hash set 을 활용함. \n \n Runtime: 429 ms, faster than 81.01% of Python3 online submissions for Longest Consecutive Sequence.\n Memory Usage: 28.7 MB, less than 39.51% of Python3 online submissions for Longest Consecutive Sequence.\n \n \"\"\" \n nums_set = set(nums)\n max_len = 0\n \n for num in nums:\n if num not in nums_set:\n continue\n \n nums_set.remove(num)\n left = num - 1\n while left in nums_set:\n nums_set.remove(left)\n left -= 1\n \n right = num + 1\n while right in nums_set:\n nums_set.remove(right)\n right += 1\n \n max_len = max(max_len, right - left - 1)\n \n return max_len\n","repo_name":"leon1114/scsa_lc_study","sub_path":"2022/08/09/hglyou.py","file_name":"hglyou.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"32211025024","text":"import torch\nimport torch.utils.data as data\nfrom pathlib import Path\nimport csv\nimport pdb\n\nclass NMNIST(data.Dataset):\n def __init__(self, train=True, time_length=200, truncate_time_in_ms=300):\n self.time_length = time_length\n self.truncate_time_in_ms = truncate_time_in_ms\n if train:\n self.data_path_binary = Path(\"./dataset/N-MNIST/Train/\")\n else:\n self.data_path_binary = Path(\"./dataset/N-MNIST/Test/\")\n label_fname = self.data_path_binary / \"label.csv\"\n with open(label_fname) as label_file:\n reader = csv.reader(label_file)\n self.label = [int(item[0]) for item in list(reader)]\n\n def __len__(self):\n return 64\n return len(self.label)\n\n def __getitem__(self, index):\n x_data_bin = torch.zeros(self.time_length, 2, 34, 34, device='cpu')\n with open(self.data_path_binary / f\"{index+1:05d}.bin\", \"rb\") as f:\n spikes = f.read()\n f_length = len(spikes)\n assert f_length % 5 == 0\n n_spike = int(f_length / 5)\n for i in range(n_spike):\n # x = spikes[i*5]\n # y = spikes[i*5+1]\n channel = int(spikes[i*5+2] / 128)\n time = ((spikes[i*5+2] - 128 * channel) << 16) + (spikes[i*5+3] << 8) + spikes[i*5+4]\n if (time // 1000) >= self.truncate_time_in_ms:\n break\n time_step = int( time // (1000 * (self.truncate_time_in_ms / self.time_length)) )\n x_data_bin[time_step, channel, spikes[i*5], spikes[i*5+1]] = 1\n x_data = x_data_bin\n y_data = self.label[index]\n\n return x_data, y_data\n\ndef load_loader(config, num_workers, batch_size, test_batch_size, valid_size=10000, time_length=300):\n nmnist = NMNIST(train=True, time_length=time_length)\n nmnist_test = NMNIST(train=False, time_length=time_length)\n if config.multi_model:\n train_loader = []\n valid_loader = []\n for m in range(config.num_models):\n # nmnist_train, nmnist_valid = torch.utils.data.random_split(nmnist, [60000-valid_size, valid_size])\n nmnist_train, nmnist_valid = torch.utils.data.random_split(nmnist, [32, 32])\n\n train_loader.append(data.DataLoader(nmnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True))\n valid_loader.append(data.DataLoader(nmnist_valid, batch_size=test_batch_size, num_workers=num_workers, pin_memory=True))\n test_loader = data.DataLoader(nmnist_test, batch_size=test_batch_size, num_workers=num_workers)\n else:\n # nmnist_train, nmnist_valid = torch.utils.data.random_split(nmnist, [60000-valid_size, valid_size])\n nmnist_train, nmnist_valid = torch.utils.data.random_split(nmnist, [32, 32])\n train_loader = data.DataLoader(nmnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)\n valid_loader = data.DataLoader(nmnist_valid, batch_size=test_batch_size, num_workers=num_workers)\n test_loader = data.DataLoader(nmnist_test, batch_size=test_batch_size, num_workers=num_workers)\n\n return train_loader, valid_loader, test_loader\n","repo_name":"KyungsuKim42/ANTLR","sub_path":"nmnist_dataset.py","file_name":"nmnist_dataset.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"73"} +{"seq_id":"74876235435","text":"# 08 Listas\n# Es un conjunto ordenado de valores identificados por un indice que empiezan desde 0,1,2,3,4.. y conocidos como elementos.\n#son similares a las cadenas de texto (strings), que son conjuntos ordenados de caracteres, excepto en que los elementos de una lista pueden ser de cualquier tipo.\n\n# 8.1 Valores de una lista\n# Lista con valores enteros\nlista = [1, 2, 3]\n\n# Lista con valores de cadena\ncadena = ['Aaron', 'Steven']\n\n# Lista anidada\nlistAnidada =[1, 2, 3, [4,5]]\n\n# Listas que contienen numeros consecutivos por la funcion range que toma dos argumentos: Indica de donde empieza la lista hasta que termina\nprint(range(1, 5))\n\n# Lista que toma un arg: crea una lista que empieza desde 0:\nprint(range(1))\n\n# Lista que toma 3 arg: Crea una lista que empieza desdel 1 pero va de dos en dos hasta el 10\nres = range(1,10,2)\nprint(res)\n\n# lista vacia\n[]\n\n# Se puede asignar listas a variables \n\nnumeros = [12, 21]\n\n# Pasar listas comoo parametros a funciones\ndef tomarLista(lista):\n return lista\n\nimprimir_lista = tomarLista([1,2])\nprint(imprimir_lista)\n\n# 8.2 Acceso a los elementos\n# Operador [] -> dentro especifica el indice\n\n# Indice negativo: Trae el ultimo elemento de la lista\n\n# Variable indice para recorrer una lista con whileo con for\n\nfrutas = ['pera', 'uvas']\nfor i in frutas:\n print([i])\n\n\n# 8.3 Longitud de una lista\n\n# \n\n# Es comun recorrer listas con bucles while con la funcion len()\n\n\n# 8.4. Pertenencia a una lista\n\n# Operador in sirve para verificar si un elementoe xiste en una lista\n\nnumeros = [1,2,3]\n1 in numeros\n# Operador not en con in para comprobar si un elemento no es miembro de una lista:\n\n\n# 8.5. Listas y bucles for\n\n# 8.6. Operaciones con listas\n# El operador + concatena listas\na = [1,2,3]\nb = [1,2,3]\n\nc = a + b\nprint(c)\n\n# El operador * repite los elementos de las listas segun lo especificado\n\nrepetirLista= [0] * 4\n\n# 8.7. Porciones (slices)\n# Se puede acceder a las porciones con el operador [] similar a las cadenas.\n\n# 8.8. Las listas son mutables\n# Esto significa que podemos modificar unos de sus elementos de las lista\n\n\ncolores = ['rojo', \"azul\", \"azul\"]\n\ncolores[0] = 'negro'\ncolores[-1] = 'negro'\n\n# 8.9 Borrado en una lista\n# Para eliminar un elemento de la lista se utiliza del\n\n\n# 8.10. Objetos y valores\n# Cada variables es un objeto y tiene un identificador unico\n\n\n# 8.11. Alias (poner sobrenombres)\n\n\n#8.12. Clonar listas\n","repo_name":"aontaneda2000/REACT","sub_path":"06-python/libro/08-listas.py","file_name":"08-listas.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"8063381758","text":"import pandas as pd\n\nfrom bandit.arms import BernoulliArm, LogNormalArm, GaussianArm\nfrom bandit.algorithms import UCB1, UCB1Normal, Softmax, EpsilonGreedy\n\n\ndef create_line(line, algo, arms):\n\n \"\"\"\n Given an integer, a Bandit Algorithm, and a list of arms, chooses an arm and gets its reward.\n\n Inputs:\n line: int -- index for DataFrame construction\n algo: BaseBanditAlgorithm -- algorithm to test\n arms: list of BaseBanditArm -- Arm objects that will be tested with algo\n\n Output:\n (int, int, float) -- (line index, chosen arm, reward drawn from the arm).\n \"\"\"\n\n arm = algo.select_arm()\n reward = arms[arm].draw()\n algo.update(arm, reward)\n return (line, arm, reward)\n\n\ndef run_simulation(algo, arms, sim, horizon):\n\n \"\"\"\n Runs a fixed-duration Monte Carlo simulation for a given set of arms.\n\n Inputs:\n algo: BaseBanditAlgorithm -- algorithm to test\n arms: list of BaseBanditArm -- Arm objects that will be tested with algo\n sim: int -- ID of simulation being run\n horizon: int -- number of iterations on this simulation.\n\n Output:\n pd.DataFrame -- chosen arm and reward for each iteration of the simulation.\n \"\"\"\n\n algo.initialize(len(arms))\n\n aux = range(horizon)\n aux = map(lambda x: create_line(x, algo, arms), aux)\n\n res = pd.DataFrame.from_records(aux, columns=['Iteration','Arm','Reward'])\n\n res['Simulation'] = sim\n\n return res[['Simulation', 'Iteration', 'Arm', 'Reward']]\n\n\ndef test_algorithm(algo, arms, num_sims=1, horizon=1000):\n\n \"\"\"\n Runs a given number of Monte Carlo simulations for the arm set.\n\n Inputs:\n algo: BaseBanditAlgorithm -- algorithm to test\n arms: list of BaseBanditArm -- Arm objects that will be tested with algo\n num_sim: int -- number of simulations that will be run\n horizon: int -- number of iterations on each simulation\n\n Output:\n pd.DataFrame -- chosen arm and reward for each iteration of each simulation.\n \"\"\"\n\n sims = map(lambda x: run_simulation(algo, arms, x, horizon), range(num_sims))\n return pd.concat(sims, ignore_index=True)\n \n\nif __name__ == \"__main__\":\n\n # Normal Arms\n params = [(100, 10), (110, 10), (105, 10)]\n arms = map(lambda p: GaussianArm(p[0], p[1]), params)\n\n n_sims = 10\n n_iter = 10000\n\n algo = UCB1Normal()\n algo.initialize(len(arms))\n\n sim = test_algorithm(algo, arms, n_sims, n_iter)\n\n sim.to_csv('data/normal_ucb1_simulation_test.csv', index=False)\n","repo_name":"MarcoAlmada/bandit-panda","sub_path":"simulation_test.py","file_name":"simulation_test.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"73"} +{"seq_id":"74001529197","text":"import random\n\npeople = [\"A.B.J\", \"A.L.\", \"A.Š.\", \"A. Pe.\", \"A. Pu.\", \"A. M.\", \"D. G.\", \"D. R.\", \"E. S\", \"E. A.\", \"G. M.\",\n \"J. J.\", \"K. B.\", \"L. V.\", \"L. B.\", \"L. L.\", \"M. S.\", \"M. K.\", \"M. A.\", \"O. K.\", \"T. P.\", \"V. K.\",\n \"V. V.\", \"V. L.\"]\n\ngroups = []\nrandom.shuffle(people)\nwhile people:\n groups.append(people[:5])\n people = people[5:]\n\nprint(groups)\n","repo_name":"karina-klinkeviciute/codeAcademyPaskaitos","sub_path":"utils/groups_selector.py","file_name":"groups_selector.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"40809359705","text":"# -*- coding: utf-8 -*-\n\n# 앙상블 방법론(ensemble methods)\n# 모형 결합(model combining) : 다수개의 예측기를 결합하여 \n# 하나의 예측 모델을 생성하는 방법\n# 특정한 하나의 예측 방법이 아닌 복수의 예측 모형을 결합하여 \n# 더 나은 성능의 예측을 하려는 시도로 나온 방법\n\n# 단점\n# 앙상블 방법은 일반적으로 머신러닝 모델의 계산량이 증가함\n\n# 장점\n# - 단일 모형을 사용할 때 보다 성능 분산이 \n# 감소(과최적화를 방지)\n# - 개별 모형의 성능이 안좋을 경우���는 \n# 결합 모형의 성능이 더 향상\n\n# 앙상블의 모형 결합을 위한 방법\n# 취합(aggregation), 부스팅(boosting)\n\n# 취합(aggregation) : 사용할 모형의 집합이 이미 결정되어 있는 경우\n# - 다수결 (Majority Voting), 배깅 (Bagging), 랜덤포레스트 (Random Forests)\n# 부스팅(boosting) : 사용할 모형을 점진적으로 늘려나가려는 경우\n# - 에이다부스트 (AdaBoost), 그레디언트 부스트 (Gradient Boost)\n\n# 다수결 (Majority Voting) 처리를 사용한 모델 생성 예제\n# hard voting: 단순 투표. 개별 모형의 결과 기준\n# soft voting: 가중치 투표. 개별 모형의 조건부 확률의 합 기준\n\n# VotingClassifier 클래스\n# - estimators: \n# 예측기 목록, 리스트나 named parameter 형식을 지원\n# - voting: 문자열 {hard, soft}\n# 디폴트는 hard\n\nfrom sklearn.datasets import load_breast_cancer\n\nX, y = load_breast_cancer(return_X_y=True)\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, stratify=y, random_state=1)\n\n# 데이터 분석을 위한 개별 머신러닝 모델의 생성과 학습\nfrom sklearn.neighbors import KNeighborsClassifier\nkn_model = KNeighborsClassifier(\n n_neighbors=5).fit(X_train, y_train)\n\nfrom sklearn.linear_model import LogisticRegression\nlr_model = LogisticRegression(\n solver='lbfgs', \n max_iter=10000).fit(X_train, y_train)\n\nfrom sklearn.tree import DecisionTreeClassifier\ndt_model = DecisionTreeClassifier(\n max_depth=3, \n random_state=1).fit(X_train, y_train)\n\nprint('학습 평가(kn) : ', \n kn_model.score(X_train, y_train))\nprint('학습 평가(lr) : ', \n lr_model.score(X_train, y_train))\nprint('학습 평가(dt) : ', \n dt_model.score(X_train, y_train))\n\n# 앙상블 모형 객체 생성 및 학습\nfrom sklearn.ensemble import VotingClassifier\nensemble = VotingClassifier(\n estimators=[('kn', kn_model),\n ('lr', lr_model),\n ('dt', dt_model)],\n n_jobs=-1).fit(X_train, y_train)\n\nprint('학습 평가(ensemble) : ', \n ensemble.score(X_train, y_train))\n\n\nprint('테스트 평가(kn) : ', \n kn_model.score(X_test, y_test))\nprint('테스트 평가(lr) : ', \n lr_model.score(X_test, y_test))\nprint('테스트 평가(dt) : ', \n dt_model.score(X_test, y_test))\nprint('테스트 평가(ensemble) : ', \n ensemble.score(X_test, y_test))\n\n\n\n\n\n\n\n\n","repo_name":"SungmanHan/machineLearningStudy","sub_path":"2_scikit-learn/4_ensemble/1_Voting/ensemble_01_VotingClassifier.py","file_name":"ensemble_01_VotingClassifier.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"18765022362","text":"from django.http import *\nfrom django.template import RequestContext\nfrom django.shortcuts import render, get_object_or_404, render_to_response, redirect\nfrom django.utils import timezone\nfrom .models import Post\nfrom .forms import PostForm\nfrom django.contrib.auth import authenticate, login, logout\n\n# Create your views here.\n\ndef post_list(request):\n posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')\n return render(request, 'blog/post_list.html', {'posts': posts})\n\ndef post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n return render(request, 'blog/post_detail.html', {'post': post})\n\ndef post_new(request):\n if request.method == \"POST\":\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n #post.published_date = timezone.now()\n post.save()\n return redirect('blog.views.post_detail', pk=post.pk)\n else:\n form = PostForm()\n return render(request, 'blog/post_edit.html', {'form': form})\n\ndef post_edit(request, pk):\n post = get_object_or_404(Post, pk=pk)\n if request.method == \"POST\":\n form = PostForm(request.POST, instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.published_date = timezone.now()\n post.save()\n return redirect('blog.views.post_detail', pk=post.pk)\n else:\n form = PostForm(instance=post)\n return render(request, 'blog/post_edit.html', {'form': form})\n\ndef post_draft_list(request):\n posts = Post.objects.filter(published_date__isnull=True).order_by('-created_date')\n return render(request, 'blog/post_draft_list.html', {'posts': posts})\n\ndef post_publish(request, pk):\n post = get_object_or_404(Post, pk=pk)\n post.publish()\n return redirect('blog.views.post_detail', pk=pk)\n\ndef post_remove(request, pk):\n post = get_object_or_404(Post, pk=pk)\n post.delete()\n return redirect('blog.views.post_list')\n\ndef gallery(request):\n return render(request, 'blog/gallery.html')\n\ndef blmra(request):\n return render(request, 'blog/blmra.html')\n\ndef about_page(request):\n return render(request, 'blog/about_page.html')\n\ndef teamspeak(request):\n return render(request, 'blog/teamspeak.html')\n\ndef login_page(request):\n logout(request)\n username = password = ''\n if request.POST:\n username = request.POST['username']\n password = request.POST['password']\n\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return HttpResponseRedirect('/')\n return render_to_response('blog/login_page.html', context_instance=RequestContext(request))\n\n\n\n","repo_name":"samtanswell/tansworld","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"42367157528","text":"# coding: utf-8\n# https://leetcode-cn.com/problems/dui-cheng-de-er-cha-shu-lcof/\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def isSymmetric(self, root: TreeNode) -> bool:\n # preorder\n if not root or not root.left and not root.right:\n return True\n inorder_val, inorder_right_val = [], []\n inorder_val = self.inorder(root, inorder_val)\n inorder_right_val = self.inorder_right(root, inorder_right_val)\n if len(inorder_val) != len(inorder_right_val):\n return False\n else:\n for i in range(len(inorder_val)):\n if inorder_val[i] != inorder_right_val[i]:\n return False\n return True\n\n def inorder(self, root, res):\n if not root:\n res.append(None)\n return res\n res.append(root.val)\n self.inorder(root.left, res)\n self.inorder_right(root.right, res)\n return res\n\n def inorder_right(self, root, res):\n if not root:\n res.append(None)\n return res\n res.append(root.val)\n self.inorder_right(root.right, res)\n self.inorder(root.left, res)\n return res\n","repo_name":"bycxw/coder","sub_path":"jianzhioffer/28isSymmetric.py","file_name":"28isSymmetric.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"30052644344","text":"import numpy as np \nimport torch\nimport torch as nn \nfrom torch.autograd import Variable\n\ndtype = torch.FloatTensor\n# S: Symbol that shows starting of decoding input\n# E: Symbol that shows starting of decoding output\n# P: Symbol that will fill in blank sequence if current batch data size is short than time steps\nchar_arr = [c for c in 'SEPabcdefghijklmnopqrstuvwxyz']\nnum_dic = {n: i for i, n in enumerate(char_arr)}\n\nseq_data = [['man', 'women'], ['black', 'white'], ['king', 'queen'], ['girl', 'boy'], ['up', 'down'], ['high', 'low']]\n\n# Seq2Seq Parameter\nn_step = 5\nn_hidden = 128\nn_class = len(num_dic)\nbatch_size = len(seq_data)\n\ndef make_batch(seq_data):\n input_batch, output_batch, target_batch = [], [], []\n\n for seq in seq_data:\n for i in range(2):\n seq[i] = seq[i] + 'P' * (n_step - len(seq[i]))\n\n input = [num_dic[n] for n in seq[0]]\n output = [num_dic[n] for n in ('S' + seq[1])]\n target = [num_dic[n] for n in (seq[1] + 'E')]\n\n input_batch.append(np.eye(n_class)[input])\n output_batch.append(np.eye(n_class)[output])\n target_batch.append(target) # not one-hot\n\n # make tensor\n return Variable(torch.Tensor(input_batch)), Variable(torch.Tensor(output_batch)), Variable(torch.LongTensor(target_batch))\n","repo_name":"ZCcaptain/dive-into-deeplearning-pytorch","sub_path":"10_nlp/seq2seq.py","file_name":"seq2seq.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"71572989997","text":"import json\nimport os\n\nimport numpy as np\nfrom helpers import *\n\n\ndef compile_height_variation():\n current_path = os.getcwd()\n output_path = os.path.join(current_path, \"output\")\n\n heights = np.zeros(0)\n dots = np.zeros((0,0))\n i = 0\n for set_dir in os.listdir(output_path):\n output_set_path = os.path.join(output_path, set_dir)\n if not os.path.isdir(output_set_path):\n continue\n json_file_path = os.path.join(output_set_path, \"HeightVariationMetric.json\")\n with open(json_file_path, 'r') as f:\n series_data = json.load(f)['m_ave_dot_series']\n if i == 0:\n heights = np.array([item[\"Height\"] for item in series_data])\n dots = np.array([[item[\"Average Dot Product\"] for item in series_data]])\n else :\n dots = np.append(dots, [[item[\"Average Dot Product\"] for item in series_data]], axis=0)\n i = i + 1\n ave_dot = np.mean(dots, axis=0)\n\n eval_json = {}\n eval_json[\"m_ave_dot_series\"] = [{\"Height\" : heights[i], \"Average Dot Product\" : ave_dot[i]} for i in range(len(heights))]\n with open(os.path.join(output_path, \"HeightVariationMetric_compiled.json\"), 'w') as f_o:\n json.dump(eval_json, f_o)\n\n\nif __name__ == \"__main__\":\n print(\"Runing compiling height variation...\")\n compile_height_variation()\n print(\"Complete compiling height variation...\")","repo_name":"JeffreyLayton/Cloud-Shadow-Detection-Result-Generation","sub_path":"compile_height_variation.py","file_name":"compile_height_variation.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"1896115754","text":"from llnl.util.lang import union_dicts\n\nimport spack.schema.gitlab_ci\n\n# Schema for script fields\n# List of lists and/or strings\n# This is similar to what is allowed in\n# the gitlab schema\nscript_schema = {\n \"type\": \"array\",\n \"items\": {\"anyOf\": [{\"type\": \"string\"}, {\"type\": \"array\", \"items\": {\"type\": \"string\"}}]},\n}\n\n# Schema for CI image\nimage_schema = {\n \"oneOf\": [\n {\"type\": \"string\"},\n {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\"type\": \"string\"},\n \"entrypoint\": {\"type\": \"array\", \"items\": {\"type\": \"string\"}},\n },\n },\n ]\n}\n\n# Additional attributes are allow\n# and will be forwarded directly to the\n# CI target YAML for each job.\nattributes_schema = {\n \"type\": \"object\",\n \"additionalProperties\": True,\n \"properties\": {\n \"image\": image_schema,\n \"tags\": {\"type\": \"array\", \"items\": {\"type\": \"string\"}},\n \"variables\": {\n \"type\": \"object\",\n \"patternProperties\": {r\"[\\w\\d\\-_\\.]+\": {\"type\": \"string\"}},\n },\n \"before_script\": script_schema,\n \"script\": script_schema,\n \"after_script\": script_schema,\n },\n}\n\nsubmapping_schema = {\n \"type\": \"object\",\n \"additionalProperties\": False,\n \"required\": [\"submapping\"],\n \"properties\": {\n \"match_behavior\": {\"type\": \"string\", \"enum\": [\"first\", \"merge\"], \"default\": \"first\"},\n \"submapping\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"additionalProperties\": False,\n \"required\": [\"match\"],\n \"properties\": {\n \"match\": {\"type\": \"array\", \"items\": {\"type\": \"string\"}},\n \"build-job\": attributes_schema,\n \"build-job-remove\": attributes_schema,\n },\n },\n },\n },\n}\n\nnamed_attributes_schema = {\n \"oneOf\": [\n {\n \"type\": \"object\",\n \"additionalProperties\": False,\n \"properties\": {\"noop-job\": attributes_schema, \"noop-job-remove\": attributes_schema},\n },\n {\n \"type\": \"object\",\n \"additionalProperties\": False,\n \"properties\": {\"build-job\": attributes_schema, \"build-job-remove\": attributes_schema},\n },\n {\n \"type\": \"object\",\n \"additionalProperties\": False,\n \"properties\": {\"copy-job\": attributes_schema, \"copy-job-remove\": attributes_schema},\n },\n {\n \"type\": \"object\",\n \"additionalProperties\": False,\n \"properties\": {\n \"reindex-job\": attributes_schema,\n \"reindex-job-remove\": attributes_schema,\n },\n },\n {\n \"type\": \"object\",\n \"additionalProperties\": False,\n \"properties\": {\n \"signing-job\": attributes_schema,\n \"signing-job-remove\": attributes_schema,\n },\n },\n {\n \"type\": \"object\",\n \"additionalProperties\": False,\n \"properties\": {\n \"cleanup-job\": attributes_schema,\n \"cleanup-job-remove\": attributes_schema,\n },\n },\n {\n \"type\": \"object\",\n \"additionalProperties\": False,\n \"properties\": {\"any-job\": attributes_schema, \"any-job-remove\": attributes_schema},\n },\n ]\n}\n\npipeline_gen_schema = {\n \"type\": \"array\",\n \"items\": {\"oneOf\": [submapping_schema, named_attributes_schema]},\n}\n\ncore_shared_properties = union_dicts(\n {\n \"pipeline-gen\": pipeline_gen_schema,\n \"rebuild-index\": {\"type\": \"boolean\"},\n \"broken-specs-url\": {\"type\": \"string\"},\n \"broken-tests-packages\": {\"type\": \"array\", \"items\": {\"type\": \"string\"}},\n \"target\": {\"type\": \"string\", \"enum\": [\"gitlab\"], \"default\": \"gitlab\"},\n }\n)\n\n# TODO: Remove in Spack 0.23\nci_properties = {\n \"anyOf\": [\n {\n \"type\": \"object\",\n \"additionalProperties\": False,\n # \"required\": [\"mappings\"],\n \"properties\": union_dicts(\n core_shared_properties, {\"enable-artifacts-buildcache\": {\"type\": \"boolean\"}}\n ),\n },\n {\n \"type\": \"object\",\n \"additionalProperties\": False,\n # \"required\": [\"mappings\"],\n \"properties\": union_dicts(\n core_shared_properties, {\"temporary-storage-url-prefix\": {\"type\": \"string\"}}\n ),\n },\n ]\n}\n\n#: Properties for inclusion in other schemas\nproperties = {\n \"ci\": {\n \"oneOf\": [\n # TODO: Replace with core-shared-properties in Spack 0.23\n ci_properties,\n # Allow legacy format under `ci` for `config update ci`\n spack.schema.gitlab_ci.gitlab_ci_properties,\n ]\n }\n}\n\n#: Full schema with metadata\nschema = {\n \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n \"title\": \"Spack CI configuration file schema\",\n \"type\": \"object\",\n \"additionalProperties\": False,\n \"properties\": properties,\n}\n\n\ndef update(data):\n import llnl.util.tty as tty\n\n import spack.ci\n import spack.environment as ev\n\n # Warn if deprecated section is still in the environment\n ci_env = ev.active_environment()\n if ci_env:\n env_config = ci_env.manifest[ev.TOP_LEVEL_KEY]\n if \"gitlab-ci\" in env_config:\n tty.die(\"Error: `gitlab-ci` section detected with `ci`, these are not compatible\")\n\n # Detect if the ci section is using the new pipeline-gen\n # If it is, assume it has already been converted\n return spack.ci.translate_deprecated_config(data)\n","repo_name":"spack/spack","sub_path":"lib/spack/spack/schema/ci.py","file_name":"ci.py","file_ext":"py","file_size_in_byte":5684,"program_lang":"python","lang":"en","doc_type":"code","stars":3712,"dataset":"github-code","pt":"73"} +{"seq_id":"73582171757","text":"import cv2\n\n##########################\n####CREATED BY NEHORAI L.#\n##########################\n\n\nimg = cv2.imread('./Nadia_Murad.jpg')\n\nface_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')\n\ndef face_detection(img):\n \n face_copy = img.copy()\n face_rects = face_cascade.detectMultiScale(face_copy)\n \n \n for(x,y,w,h) in face_rects:\n cv2.rectangle(face_copy,(x,y),(x+w,y+h),(255,255,255),10)\n return face_copy\n\nimg = face_detection(img)\n\nwhile True:\n \n cv2.imshow('face_detection',img)\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \ncv2.destroyAllWindows()\n#DONE","repo_name":"Nehorai444/face-detection","sub_path":"face-detection-image.py","file_name":"face-detection-image.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"71279606317","text":"# This is a quick script that adds AllUsers as READER to a JSON file\n# representing an ACL on a GCS object. This is a quick workaround for a bug in\n# gsutil.\nimport json\nimport sys\n\nacl = json.load(sys.stdin)\nacl.append({\n \"entity\": \"allUsers\",\n \"role\": \"READER\"\n })\njson.dump(acl, sys.stdout)\n","repo_name":"boddumanohar/kubernetes-first-commit","sub_path":"src/release/make-public-gcs-acl.py","file_name":"make-public-gcs-acl.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"22632593922","text":"from typing import List, Optional\n\nfrom .hybrid import hybrid_property\n\n\ndef find_index_by_value_at_key(items: List[dict], key, value) -> Optional[int]:\n \"\"\"\n Inside of a list of dictionaries,\n find the index of the first dictionary,\n that has a matching value for the given key\n \"\"\"\n for index, item in enumerate(items):\n if item.get(key) == value:\n return index\n return None\n\n\n__all__ = [\n \"hybrid_property\",\n \"find_index_by_value_at_key\",\n]\n","repo_name":"ThaRising/dank-bank","sub_path":"src/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"40044253913","text":"#!/usr/lib/python2.6 python\n\nimport sys\nfrom utilities import MRLogisticRegression\nfrom base64 import b64decode as decode\nfrom pickle import loads\n\n# Creating model\nmodel = MRLogisticRegression()\n\n# Aggregating all the gradients and Hessians\ng_H=[]\nfor line in sys.stdin:\n line = line.strip()\n key, value = line.split(\"\\t\", 1)\n g_H.append(loads(decode(value)))\n\t\n# Computing model coefficients\nbeta = model.reducer(g_H)\n\n# Printing reducer results\nfor i in range(len(beta)):\n key = i\n value = beta[i]\n print >> sys.stdout, \"%s\\t%f\" % (key, value)\n","repo_name":"AndreaBravi/MapReduce","sub_path":"reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"31567880918","text":"def go_random():\n x = random.randint(0,600)\n y = random.randint(0,600)\n move(x,y)\n return x,y\n\ndef timber_n(n, a):\n start = team.WOOD\n timber(a)\n while team.WOOD < start+n:\n print (team.WOOD)\n sleep(0.1)\n return team.WOOD - start\n\ngo_random()\nsleep(1)\nwhile not explored:\n go_random()\n sleep(0.5)\n\na = explored[-1]\nif a.isWood:\n if team.WOOD < 200:\n print('Timbering')\n print(timber_n(200, a))\n print('timbered')\n\nsleep(2)\n\n","repo_name":"operatorequals/CodesOfEmpires","sub_path":"ai.py","file_name":"ai.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"10391496060","text":"import math\nimport pickle\nimport re\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import StratifiedKFold\nfrom tqdm import tqdm\n\nimport tokenizers\n\nDIR = \"./inputs/datasets/tkm/\"\n\nMAX_LEN = 120\n# PATH = '../input/tf-roberta/'\nPATH = './inputs/datasets/roberta/tokenizer/'\ntokenizer = tokenizers.ByteLevelBPETokenizer(\n # vocab_file=PATH + 'vocab-roberta-base.json',\n # merges_file=PATH + 'merges-roberta-base.txt',\n vocab_file=PATH + 'vocab.json',\n merges_file=PATH + 'merges.txt',\n lowercase=True,\n add_prefix_space=True\n)\nSEED = 88888\nnp.random.seed(SEED)\nsentiment_id = {'positive': 1313, 'negative': 2430, 'neutral': 7974}\n\n\ndef proc(train):\n # 前処理\n ct = train.shape[0]\n input_ids = np.ones((ct, MAX_LEN), dtype='int32')\n attention_mask = np.zeros((ct, MAX_LEN), dtype='int32')\n token_type_ids = np.zeros((ct, MAX_LEN), dtype='int32')\n start_tokens = np.zeros((ct, MAX_LEN), dtype='int32')\n end_tokens = np.zeros((ct, MAX_LEN), dtype='int32')\n\n text = train['text'].values\n selected_text = train['selected_text'].values\n sentiments = train['sentiment'].values\n for k in tqdm(range(train.shape[0])):\n ss = text[k].find(selected_text[k])\n # selected text の前に space が 1 or 2 個あったらそれに合わせる\n if text[k][max(ss - 2, 0):ss] == ' ':\n ss -= 2\n if ss > 0 and text[k][ss - 1] == ' ':\n ss -= 1\n\n ee = ss + len(selected_text[k])\n\n # 文頭に空白が一つだけある場合は ee -= 1\n # re.match は文頭から前提っぽい...?\n if re.match(r' [^ ]', text[k]) is not None:\n ee -= 1\n ss = max(0, ss)\n # selected text 以前に ' ' がある場合は\n # selected text が 1 文字以上あり、\n # 後ろから二番目が space である場合は sel = sel[:-2]\n if ' ' in text[k][:ss] and sentiments[k] != 'neutral':\n text1 = \" \".join(text[k].split())\n sel = text1[ss:ee].strip()\n if len(sel) > 1 and sel[-2] == ' ':\n sel = sel[:-2]\n\n selected_text[k] = sel\n # selected_text[k] = re.sub('[^AaIiUu] ', '', selected_text[k])\n # FIND OVERLAP\n text1 = \" \" + \" \".join(text[k].split())\n text2 = \" \".join(selected_text[k].split())\n idx = text1.find(text2)\n\n chars = np.zeros((len(text1)))\n chars[idx:idx + len(text2)] = 1\n if text1[idx - 1] == ' ':\n chars[idx - 1] = 1\n enc = tokenizer.encode(text1)\n\n # ID_OFFSETS\n offsets = enc.offsets\n\n # START END TOKENS\n toks = []\n for i, (a, b) in enumerate(offsets):\n sm = np.mean(chars[a:b])\n if sm > 0.5 and chars[a] != 0:\n toks.append(i)\n\n s_tok = sentiment_id[train.loc[k, 'sentiment']]\n input_ids[k, :len(enc.ids) + 3] = [0, s_tok] + enc.ids + [2]\n attention_mask[k, :len(enc.ids) + 3] = 1\n if len(toks) > 0:\n start_tokens[k, toks[0] + 2] = 1\n end_tokens[k, toks[-1] + 2] = 1\n train.to_csv('train_new.csv', index=False)\n return (input_ids,\n attention_mask,\n token_type_ids,\n start_tokens,\n end_tokens)\n\n\n# @jit\ndef jaccard(str1, str2):\n a = set(str1.lower().split())\n b = set(str2.lower().split())\n if (len(a) == 0) & (len(b) == 0):\n return 0.5\n c = a.intersection(b)\n return float(len(c)) / (len(a) + len(b) - len(c))\n\n\ndef load_data():\n train = pd.read_csv(\n './inputs/origin/train.csv').fillna('')\n # './input/tweet-sentiment-extraction/train.csv').fillna('')\n print(train.head())\n\n data = proc(train)\n with open('data2.pkl', 'wb') as f:\n pickle.dump(data, f, -1)\n\n\ndef main():\n train = pd.read_csv(\n './inputs/origin/train.csv').fillna('')\n # '../input/tweet-sentiment-extraction/train.csv').fillna('')\n text = train['text'].values\n selected_text = train['selected_text'].values\n\n # proc() を参照\n # with open('data2.pkl', 'rb') as f:\n # data = pickle.load(f)\n # (input_ids,\n # attention_mask,\n # token_type_ids,\n # start_tokens,\n # end_tokens) = data\n\n # CVの予測結果 n samples x MAX_LEN\n oof_start = np.load(DIR + 'oof_start.npy')\n oof_end = np.load(DIR + 'oof_end.npy')\n oof_all = np.load(DIR + 'oof_all.npy') # 私のモデルでは一応セグメンテーションも予測している\n\n i = 0\n list_st = []\n\n all = []\n for k in range(oof_start.shape[0]):\n if 'neutral' == train.loc[k, 'sentiment']:\n st = text[k].strip().lower()\n else:\n text1 = \" \" + \" \".join(text[k].split())\n\n enc = tokenizer.encode(text1)\n\n aa = np.argmax(oof_start[k])\n bb = np.argmax(oof_end[k])\n # head tail 反転に segmentation を利用\n if aa > bb:\n idx = oof_all[k] >= 0.5\n if idx.sum() > 0:\n idx = np.arange(oof_all.shape[1])[idx]\n aa = idx[0]\n bb = idx[-1]\n else:\n aa = bb = oof_all[k].argmax()\n\n text0 = text[k]\n ss = 0 if aa - 2 == 0 else enc.offsets[aa - 2][0]\n\n # NOTE: なんで +1 ?\n if bb - 2 >= len(enc.offsets) - 1:\n ee = enc.offsets[-1][1] + 1\n else:\n ee = enc.offsets[bb - 2][1] + 1\n\n st = text1[ss:ee].strip()\n\n ee -= text0[ss:ee].strip().count(' ')\n ee += text0[ss:ee].strip().count(' ')\n\n if ' ' in text0[:(ss + ee) // 2]:\n st = text0[ss:ee].strip()\n\n list_st.append(st)\n sc = jaccard(st, selected_text[k])\n\n all.append(sc)\n print(i, '>>>> FOLD Jaccard =', np.mean(all))\n\n\nif __name__ == '__main__':\n load_data()\n","repo_name":"guchio3/kaggle-tweet-sentiment","sub_path":"magics.py","file_name":"magics.py","file_ext":"py","file_size_in_byte":5924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"41106674796","text":"\nimport pandas as pd\nimport numpy as np\nimport sys, os\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ntry:\n fName = sys.argv[1]\nexcept:\n date = input('date? (e.g. 05-23): ')\n logFiles = os.listdir('./log/'+date)\n logFiles.sort(reverse=True)\n for i, logName in enumerate(logFiles):\n print(i,\":\", logName)\n fName = logFiles[int(input('log file number?: '))]\n\ndf = pd.read_csv('./log/'+date+'/'+fName, header=None, \n names=['timestamp', \n 'x1', 'y1', 'z1', \n 'x2', 'y2', 'z2', \n 'x3', 'y3', 'z3', \n 'x4', 'y4', 'z4', \n 'x5', 'y5', 'z5', ])\ndf['timestamp'] = df['timestamp'] - df['timestamp'].iloc[0]\ndf['x2'] = df['x1']+0.1\ndf['y2'] = df['y1']+0.1\ndf['z2'] = df['z1']+0.1\ndf['x3'] = df['x1']+0.3\ndf['y3'] = df['y1']+0.3\ndf['z3'] = df['z1']+0.3\ndf['x4'] = df['x1']-0.3\ndf['y4'] = df['y1']-0.3\ndf['z4'] = df['z1']-0.3\ndf['x5'] = df['y1']\ndf['y5'] = df['z1']\ndf['z5'] = df['x1']\n\n\n# AREA: Bounding box of expriment area\nfig2 = plt.figure()\nplt.subplots_adjust(left=0, right=1, bottom=0, top=1)\nax = fig2.add_subplot(111, projection='3d')\nax.plot(df['x1'], df['y1'], df['z1'], c='r')\nax.plot(df['x2'], df['y2'], df['z2'], c='g')\nax.plot(df['x3'], df['y3'], df['z3'], c='b')\nax.plot(df['x4'], df['y4'], df['z4'], c='c')\nax.plot(df['x5'], df['y5'], df['z5'], c='m')\n\nax.set_xlabel('x')\nax.set_ylabel('y')\nax.set_zlabel('z')\n\n# AREA: set viewer's limit\nax.set_xlim([-4.0, 4.0])\nax.set_ylim([-4.0, 4.0])\nax.set_zlim([0, 3.0])\n\nxrange = np.linspace(-2.6, 2.6, 10)\nyrange = np.linspace(-2.6, 2.6, 10)\nzrange = np.linspace( 0.0, 2.0, 10)\nXR, YR = np.meshgrid(xrange, yrange)\nXS, ZS = np.meshgrid(xrange, zrange)\nZR = 0*XR + 0*YR + 0\nfloor = ax.plot_surface(XR, YR, ZR, alpha=0.1, color='b')\nceili = ax.plot_surface(XR, YR, ZR+2, alpha=0.1, color='b')\nwall1 = ax.plot_surface(XS, 0*XS+2.6, ZS, alpha=0.1, color='r')\nwall2 = ax.plot_surface(XS, 0*XS-2.6, ZS, alpha=0.1, color='r')\nwall3 = ax.plot_surface(0*XS+2.6, XS, ZS, alpha=0.1, color='g')\nwall4 = ax.plot_surface(0*XS-2.6, XS, ZS, alpha=0.1, color='g')\nXb = [-5.2, 5.2]\nYb = [-5.2, 5.2]\nZb = [0, 3.0]\nfor xb, yb, zb in zip(Xb, Yb, Zb):\n ax.plot([xb], [yb], [zb], 'w')\n\nplt.show()","repo_name":"DGIST-CPSec/crazyflie-swarm","sub_path":"swarm_analyze.py","file_name":"swarm_analyze.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"23495938732","text":"import logging\nimport math\nimport random\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import CrossEntropyLoss\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import RandomSampler\nfrom tqdm import tqdm\nfrom itertools import chain\n\nfrom model.seq2seq import TransformerSeq2Seq\nfrom model.seq2seq_vocab import Seq2seqTokenizer\nfrom .loss import LabelSmoothingLoss\nfrom .optim import Adam\nfrom .optim import NoamOpt\nfrom .utils import pad_sequence\n\nSPECIAL_TOKENS = ['', '', '', '', '', '', '',\n '', '', '.', ',', '?', '!', ':']\nMIX_IGNORE_TOKENS = ['.', ',', '!', '?', ';', ':', '-', '*', '=', ')', '(', '\\'', '\"', ]\n\n\nclass Trainer:\n def __init__(self, model, train_dataset, trainer_config, writer, logger=None, test_dataset=None, valid_dataset=None,\n n_jobs=0, label_smoothing=0, device=torch.device('cuda'), evaluate_full_sequences=False,\n ignore_idxs=[], full_input=False, max_length=511, max_y_length=80, new_dataset=False,\n best_model_path='',\n no_persona=False, mixup=False, mixup_mode='alternate', mixup_dataset=None,\n mixup_ratio=0.15, bert_mixup=False, replace=False, pointer_gen=False):\n if logger is None:\n self.logger = logging.getLogger(__file__)\n else:\n self.logger = logger\n\n self.train_batch_size = trainer_config.train_batch_size\n self.test_batch_size = trainer_config.test_batch_size\n self.lr = trainer_config.lr\n self.lr_warmup = trainer_config.lr_warmup\n self.weight_decay = trainer_config.weight_decay\n self.batch_split = trainer_config.batch_split\n self.s2s_weight = 1\n self.single_input = True\n self.clip_grad = trainer_config.clip_grad\n self.n_epochs = trainer_config.n_epochs\n self.linear_schedule = trainer_config.linear_schedule\n self.patience = trainer_config.patience\n self.model_saving_interval = trainer_config.model_saving_interval\n self.device = device\n self.no_persona = no_persona\n self.evaluate_full_sequences = evaluate_full_sequences\n self.global_step = 0\n self.full_input = full_input\n self.max_length = max_length\n self.max_y_length = max_y_length\n self.new_dataset = new_dataset\n self.best_loss = 1e5\n self.best_model_path = best_model_path\n self.model_type = 'pretrain'\n self.patience_cnt = 0\n self.stop_training = False\n self.pointer_gen = pointer_gen\n\n self.loss_lambda = trainer_config.loss_lambda\n\n self.model = model.to(device)\n\n self.criterion = LabelSmoothingLoss(n_labels=self.model.n_embeddings, smoothing=label_smoothing,\n ignore_index=self.model.padding_idx).to(device)\n\n param_optimizer = list(self.model.named_parameters())\n # Here we should remove parameters which are not used during to avoid breaking apex with None grads\n self.loss_weight = None\n no_decay = ['bias', 'loss']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n 'weight_decay': self.weight_decay},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n\n base_optimizer = Adam(optimizer_grouped_parameters, lr=self.lr)\n\n if not self.linear_schedule:\n self.optimizer = NoamOpt(self.model.embeddings_size, self.lr_warmup, base_optimizer, lr=self.lr,\n linear_schedule=False, loss_weight=self.loss_weight)\n else:\n total_steps = len(train_dataset) * self.n_epochs // self.train_batch_size\n self.optimizer = NoamOpt(self.model.embeddings_size, self.lr_warmup, base_optimizer, linear_schedule=True,\n lr=self.lr, total_steps=total_steps, loss_weight=self.loss_weight)\n\n train_sampler = RandomSampler(train_dataset)\n self.train_dataloader = DataLoader(train_dataset, batch_size=self.train_batch_size // self.batch_split,\n sampler=train_sampler,\n num_workers=n_jobs, collate_fn=self.collate_func)\n self.train_dataset = train_dataset # used to sample negative examples\n if test_dataset is not None: # only do evaluation on main process\n self.test_dataloader = DataLoader(test_dataset, batch_size=self.test_batch_size, shuffle=False,\n num_workers=n_jobs, collate_fn=self.collate_func)\n if valid_dataset is not None:\n self.valid_dataloader = DataLoader(valid_dataset, batch_size=self.test_batch_size, shuffle=False,\n num_workers=n_jobs, collate_fn=self.collate_func)\n\n self.tokenizer = train_dataset.tokenizer\n self.writer = writer\n\n if isinstance(self.model, TransformerSeq2Seq):\n self.model_type = 'seq2seq'\n\n def state_dict(self):\n return {'model': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'global_step': self.global_step}\n\n def load_state_dict(self, state_dict):\n if state_dict.__contains__('model') and state_dict.__contains__('optimizer'):\n self.model.load_state_dict(state_dict['model'], strict=False)\n self.optimizer.load_state_dict(state_dict['optimizer'])\n self.global_step = state_dict['global_step']\n else:\n self.model.load_state_dict(state_dict, strict=False)\n\n def collate_func(self, data):\n utterances, relations, targets = zip(*data)\n\n contexts = [torch.tensor(u, dtype=torch.long) for u in utterances]\n\n decoder_index, y_out = [], []\n predicate_x_index, predicate_y_index = [], []\n for i in range(len(targets)):\n if len(targets[i]) > 0:\n decoder_index.extend([i] * len(targets[i]))\n y_out.extend([torch.tensor(d, dtype=torch.long) for d in targets[i]])\n predicate_x_index.extend([i] * len(relations[i]))\n predicate_y_index.extend(relations[i])\n y_out = pad_sequence(y_out, batch_first=True, padding_value=self.model.padding_idx)\n input_ids = pad_sequence(contexts, batch_first=True, padding_value=self.model.padding_idx)\n predicate_labels = torch.sparse_coo_tensor(torch.tensor([predicate_x_index, predicate_y_index]),\n torch.tensor([1 for _ in range(len(predicate_y_index))], dtype=torch.float32),\n [len(relations), self.tokenizer.n_relations - 1]).to_dense()\n decoder_index = torch.tensor(decoder_index, dtype=torch.long)\n return input_ids, predicate_labels, decoder_index, y_out\n\n # contexts = []\n #\n # if max(map(len, persona_info)) > 0:\n # persona_info = [torch.tensor(d, dtype=torch.long) for d in persona_info]\n # contexts.append(persona_info)\n #\n # if max(map(len, h)) > 0:\n # h = [torch.tensor(d, dtype=torch.long) for d in h]\n # contexts.append(h)\n #\n # y_out = [torch.tensor(d, dtype=torch.long) for d in y]\n #\n # if self.no_persona:\n # for c in contexts[1]:\n # c[0][0] = self.vocab.bos_id\n # y_out = [torch.cat(pieces, dim=0) for pieces in zip(*([contexts[1]] + [y_out]))]\n # lengths = [(contexts[1][i].size(0), y_out[i].size(0)) for i in range(len(y_out))]\n # contexts = lengths\n # else:\n # y_out1 = [torch.cat(pieces, dim=0) for pieces in zip(*(contexts))]\n # lengths = [(contexts[0][i].size(0) + contexts[1][i].size(0), y_out[i].size(0)) for i in\n # range(len(y_out))]\n # y_out = (y_out1, y_out)\n # contexts = lengths\n #\n # # Pad now so we pad correctly when we have only a single input (context concatenated with y)\n # if isinstance(y_out, tuple):\n # y_out = (\n # [y[-(self.max_length - 1):] for y in y_out[0]], [y[:(self.max_y_length - 1)] for y in y_out[1]])\n # else:\n # y_out = [y[-(self.max_length - 1):] for y in y_out]\n # contexts = [c if c[1] <= self.max_length - 1 else (c[0] - (c[1] - self.max_length + 1), self.max_length - 1)\n # for c in contexts]\n # if isinstance(y_out, tuple):\n # y_out = (pad_sequence(y_out[0], batch_first=True, padding_value=self.model.padding_idx),\n # pad_sequence(y_out[1], batch_first=True, padding_value=self.model.padding_idx))\n # else:\n # y_out = pad_sequence(y_out, batch_first=True, padding_value=self.model.padding_idx)\n #\n # return contexts, y_out\n\n def _s2s_loss(self, targets, enc_contexts, negative_samples):\n hidden_state, padding_mask = None, None\n\n nexts = targets[:, 1:].contiguous() if targets.dim() == 2 else targets[:, 1:, 0].contiguous()\n outputs, _, _ = self.model.decode(targets[:, :-1].contiguous(), enc_contexts)\n if self.full_input:\n for i in range(targets.shape[0]):\n for j in range(targets.shape[1]):\n if targets[i][j][1] == self.vocab.sent_dialog_id:\n nexts[i][: j] = self.model.padding_idx\n break\n\n outputs = outputs.view(-1, outputs.shape[-1]).float()\n nexts = nexts.view(-1)\n\n loss = self.criterion(F.log_softmax(outputs, dim=-1), nexts) if self.model.training \\\n else self.lm_criterion(outputs, nexts)\n return loss, hidden_state, padding_mask\n\n def optimizer_step(self, s2s_loss, pred_loss, full_loss):\n if self.clip_grad is not None:\n for group in self.optimizer.param_groups:\n nn.utils.clip_grad_norm_(group['params'], self.clip_grad)\n\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n global_step = max(self.global_step, 0)\n self.writer.add_scalar(\"training/s2s_loss\", s2s_loss, global_step=global_step)\n self.writer.add_scalar(\"training/pred_loss\", pred_loss, global_step=global_step)\n self.writer.add_scalar(\"training/full_loss\", full_loss, global_step=global_step)\n self.writer.add_scalar(\"training/lr\", self.optimizer.get_lr(), global_step=global_step)\n\n self.global_step += 1\n\n def _eval_train(self, epoch, risk_func=None): # add ppl and hits@1 evaluations\n self.model.train()\n\n tqdm_data = tqdm(self.train_dataloader, desc='Train (epoch #{})'.format(epoch))\n s2s_loss = 0\n pred_loss = 0\n for i, batch in enumerate(tqdm_data):\n input_ids, predicate_labels, decoder_index, targets = batch[0].to(self.device), \\\n batch[1].to(self.device), batch[2].to(self.device), batch[3].to(self.device)\n loss = self.model(input_ids, decoder_index, predicate_labels, targets)\n s2s_loss = (i * s2s_loss + loss[0].item()) / (i + 1)\n pred_loss = (i * pred_loss + loss[1].item()) / (i + 1)\n loss = (1 - self.loss_lambda) * loss[0] + self.loss_lambda * loss[1]\n full_loss = loss / self.batch_split\n tqdm_data.set_postfix({'s2s_loss': s2s_loss, 'pred_loss': pred_loss})\n\n # optimization\n full_loss = self.optimizer.backward(full_loss)\n if self.pointer_gen and (torch.isnan(self.model.generator.p_gen_linear._parameters['weight']._grad[0][0]) or \\\n torch.isinf(self.model.generator.p_gen_linear._parameters['weight']._grad[0][0])):\n self.optimizer.zero_grad()\n self.logger.info('Abnormal gradient')\n\n if (i + 1) % self.batch_split == 0:\n self.optimizer_step(s2s_loss, pred_loss, full_loss)\n if (i + 1) % self.batch_split != 0:\n self.optimizer_step(s2s_loss, pred_loss, full_loss)\n\n def _get_eval_loss(self, input_ids, decoder_index, predicate_labels, targets, metrics, index):\n results = self.model(input_ids, decoder_index, predicate_labels, targets, output_logits=True, output_encoder=True)\n\n metrics['s2s_loss'] = (metrics['s2s_loss'] * index + results[0].item()) / (index + 1)\n metrics['pred_loss'] = (metrics['pred_loss'] * index + results[1].item()) / (index + 1)\n metrics['full_loss'] = (metrics['full_loss'] * index +\n (self.loss_lambda * results[1].item() + ((1 - self.loss_lambda) * results[0].item()))) / (index + 1)\n predict_predicate = (results[3] >= 0.5).float()\n predict_predicate_num = torch.sum(predict_predicate).item()\n predict_predicate_acc_num = torch.sum((predict_predicate == predicate_labels) * predicate_labels).item()\n predicate_label_num = torch.sum(predicate_labels).item()\n metrics['pred_num'] += predict_predicate_num\n metrics['pred_acc_num'] += predict_predicate_acc_num\n metrics['label_num'] += predicate_label_num\n metrics['pred_acc'] = metrics['pred_acc_num'] / metrics['pred_num'] if metrics['pred_num'] != 0 else 0\n recall = metrics['pred_acc_num'] / metrics['label_num'] if metrics['label_num'] != 0 else 0\n f1 = 2 * metrics['pred_acc'] * recall / (metrics['pred_acc'] + recall) if metrics['pred_acc'] + recall != 0 else 0\n metrics['pred_f1'] = f1\n encoder_outputs_tuple = results[4:]\n return metrics, predict_predicate, encoder_outputs_tuple\n\n def _get_eval_predictions(self, input_ids, encoder_outputs_list, predict_predicate, decoder_index, targets):\n references, predictions, predictions_given_predicate = \\\n [[] for _ in range(predict_predicate.size(0))], [[] for _ in range(predict_predicate.size(0))], \\\n [[] for _ in range(predict_predicate.size(0))]\n ## Obtain the references for each sample, each reference is a list of tuple\n for i in range(decoder_index.size(0)):\n relation_string = self.tokenizer.decode([targets[i][0].item()])\n string = self.tokenizer.decode(targets[i][1:].tolist(), skip_special_tokens=True,\n clean_up_tokenization_spaces=False)\n subject_string = string.split(';')[0].strip()\n object_string = string.split(';')[1].strip()\n references[decoder_index[i].item()].append((subject_string, relation_string, object_string))\n\n ## Obtain the predictions for each sample if given the correct predicate\n if targets.size(0) > 0:\n start_ids = targets[:, :1]\n model_predictions = self.model.inference(input_ids, encoder_outputs_list, decoder_index, start_ids)\n for i in range(len(model_predictions)):\n relation_string = self.tokenizer.decode([start_ids[i].item()])\n string = self.tokenizer.decode(model_predictions[i], skip_special_tokens=True,\n clean_up_tokenization_spaces=False)\n spit_items = string.split(';')\n if len(spit_items) >= 2:\n subject_string = spit_items[0].strip()\n object_string = spit_items[1].strip()\n else:\n if len(string) > 0:\n split_items = string.split(' ')\n subject_string = spit_items[0]\n object_string = ' '.join(split_items[1:])\n else:\n subject_string, object_string = '', ''\n predictions_given_predicate[decoder_index[i].item()].append((subject_string, relation_string, object_string))\n\n ## Obtain the predictions for each sample using the predicted predicate\n nonzero_index = predict_predicate.nonzero()\n if nonzero_index.size(0) > 0:\n start_ids = nonzero_index[:, 1:] + self.tokenizer.no_relation_id + 1\n predict_decoder_index = nonzero_index[:, 0]\n model_predictions = self.model.inference(input_ids, encoder_outputs_list, predict_decoder_index, start_ids)\n for i in range(len(model_predictions)):\n relation_string = self.tokenizer.decode([start_ids[i].item()])\n string = self.tokenizer.decode(model_predictions[i], skip_special_tokens=True,\n clean_up_tokenization_spaces=False)\n spit_items = string.split(';')\n if len(spit_items) >= 2:\n subject_string = spit_items[0].strip()\n object_string = spit_items[1].strip()\n else:\n if len(string) > 0:\n split_items = string.split(' ')\n subject_string = spit_items[0]\n object_string = ' '.join(split_items[1:])\n else:\n subject_string, object_string = '', ''\n predictions[predict_decoder_index[i].item()].append((subject_string, relation_string, object_string))\n\n return references, predictions, predictions_given_predicate\n\n def _eval_test(self, metric_funcs={}, external_metrics_func=None, epoch=-1, inference=False, is_best=False,\n raw_entail_data=None):\n with torch.no_grad():\n self.model.eval()\n if epoch == -1:\n tqdm_data = tqdm(self.test_dataloader, desc='Test')\n self.logger.info('Starting testing on Test dataset')\n else:\n tqdm_data = tqdm(self.valid_dataloader, desc='Test')\n self.logger.info('Starting testing on Valid dataset')\n metrics = {name: 0 for name in\n ('s2s_loss', 'pred_loss', 'full_loss', 'pred_acc', 'pred_f1', 'pred_num', 'pred_acc_num', 'label_num')\n + tuple(metric_funcs.keys())}\n full_predictions, full_references, full_predictions_given_predicate = [], [], []\n for i, batch in enumerate(tqdm_data):\n '''Get the loss, ppl for each batch'''\n input_ids, predicate_labels, decoder_index, targets = batch[0].to(self.device), \\\n batch[1].to(self.device), batch[2].to(self.device), batch[3].to(self.device)\n metrics, predict_predicate, encoder_outputs_tuple = self._get_eval_loss(input_ids, decoder_index,\n predicate_labels, targets, metrics, i)\n # full sequence loss\n cur_references, cur_predictions, cur_predictions_given_predicate = self._get_eval_predictions(\n input_ids, encoder_outputs_tuple, predict_predicate, decoder_index, targets)\n full_predictions.extend(cur_predictions)\n full_predictions_given_predicate.extend(cur_predictions_given_predicate)\n full_references.extend(cur_references)\n tqdm_data.set_postfix({'s2s_loss': metrics['s2s_loss'], 'pred_loss': metrics['pred_loss'],\n 'full_loss': metrics['full_loss'], 'pred_acc': metrics['pred_acc'],\n 'pred_f1': metrics['pred_f1']})\n\n if external_metrics_func and self.evaluate_full_sequences:\n external_metrics = external_metrics_func(full_references, full_predictions,\n full_predictions_given_predicate, epoch, is_best)\n metrics.update(external_metrics)\n\n # logging\n global_step = max(self.global_step, 0)\n if self.writer is not None:\n for key, value in metrics.items():\n self.writer.add_scalar(\"eval/{}\".format(key), value, global_step=global_step)\n self.logger.info(metrics)\n\n if epoch != -1:\n if metrics['full_loss'] < self.best_loss:\n self.logger.info('Current loss BEATS the previous best one, previous best is %.5f', self.best_loss)\n self.best_loss = metrics['full_loss']\n torch.save(self.model.state_dict(), self.best_model_path)\n self.logger.info('Best model is saved on epoch %d', epoch)\n else:\n self.patience_cnt += 1\n self.logger.info('Current ppl CANNOT BEATS the previous best one, previous best is %.5f',\n self.best_loss)\n if self.patience > 0 and self.patience_cnt > self.patience:\n self.stop_training = True\n if epoch % self.model_saving_interval == 0 and epoch >= self.model_saving_interval and \\\n self.model_type in ['seq2seq']:\n torch.save(self.model.state_dict(), self.best_model_path + '_' + str(epoch))\n\n def _clip_grad_norm(self, grads, max_norm, norm_type=2):\n max_norm = float(max_norm)\n norm_type = float(norm_type)\n if norm_type == float('inf'):\n total_norm = max(grad.data.abs().max() for grad in grads)\n else:\n total_norm = 0\n for grad in grads:\n grad_norm = grad.data.norm(norm_type)\n total_norm += grad_norm ** norm_type\n total_norm = total_norm ** (1. / norm_type)\n clip_coef = max_norm / (total_norm + 1e-6)\n if clip_coef < 1:\n for grad in grads:\n grad.data.mul_(clip_coef)\n return total_norm\n\n def test(self, metric_funcs={}, external_metrics_func=None, epoch=-1, inference=False, raw_entail_data=None):\n if hasattr(self, 'valid_dataloader') or hasattr(self, 'test_dataloader'):\n self._eval_test(metric_funcs, external_metrics_func, epoch, inference, raw_entail_data=raw_entail_data)\n if epoch == -1 and not inference:\n self.logger.info('Loading the best model...')\n state_dict = torch.load(self.best_model_path, map_location=self.device)\n if state_dict.__contains__('model'):\n self.model.load_state_dict(state_dict['model'], strict=False)\n else:\n self.model.load_state_dict(state_dict)\n self._eval_test(metric_funcs, external_metrics_func, epoch, inference, is_best=True)\n\n def train(self, after_epoch_funcs=[], risk_func=None):\n for epoch in range(1, self.n_epochs + 1):\n self.logger.info('===============================')\n self.logger.info('Start training on Epoch %d', epoch)\n self._eval_train(epoch, risk_func)\n # self._eval_test()\n\n for func in after_epoch_funcs:\n func(epoch)\n self.logger.info('End training on Epoch %d', epoch)\n self.logger.info('===============================')\n if self.stop_training:\n self.logger.info('Training will be STOPPED in advance due to exceeding patience number')\n break\n\n for func in after_epoch_funcs:\n func(-1)\n","repo_name":"caoyu-noob/GetToKnowYou","sub_path":"model/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":23380,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"33365723507","text":"#!/home/simon/.virtualenvs/py36/bin/python\n\nimport datetime\nimport json\nimport operator\nimport os\nimport time\n\nimport zmq\n\nimport protocol\n\n\ntemplate = '''\nNode: {name}\nState: {state}\nWorkers: {workers_all} ({workers_up} up, {workers_down} down)\nCPU Util: {cpu_percent:>5.1f} %\nMem Util: {mem_percent:>5.1f} %\n'''\n\n\ncontext = zmq.Context()\n\nsink = context.socket(zmq.PULL)\nsink.bind('tcp://*:6010')\nrecv_count = 0\nlines_written = 0\noutput_file = None #'/tmp/destined.out'\noutput_buffer = []\n\ncontroller_address = 'tcp://192.168.1.106:6000'\nlast_poll = datetime.datetime.fromtimestamp(0)\nperiod_seconds = 1\n\nif output_file:\n mode = f'-> {output_file}'\nelse:\n mode = '(discarding results)'\n\nwhile True:\n\n waiting = sink.poll(timeout=period_seconds)\n if waiting:\n message = sink.recv()\n result = protocol.decode(message)\n output_buffer.append(json.dumps(result))\n recv_count += 1\n\n now = datetime.datetime.now()\n if (now - last_poll) >= datetime.timedelta(seconds=period_seconds):\n last_poll = now\n\n if output_file:\n with open(output_file, 'a') as outfile:\n outfile.writelines(output_buffer)\n lines_written += len(output_buffer)\n assert lines_written == recv_count\n output_buffer = []\n\n socket = context.socket(zmq.REQ)\n socket.connect(controller_address)\n socket.send(protocol.msg_request_state())\n response = socket.recv()\n\n controller_state = protocol.decode(response)\n nodes = sorted(controller_state['nodes'].values(), key=operator.itemgetter('name'))\n\n os.system('clear')\n print(f'========== {now.strftime(\"%Y-%m-%d %H:%M:%S\")} ==========')\n print(f'\\nReceived messages: {recv_count} {mode}')\n\n for node in nodes:\n print(template.format(\n workers_all=len(node['workers']),\n workers_up=sum(worker['up'] for worker in node['workers']),\n workers_down=sum(not worker['up'] for worker in node['workers']),\n **node))\n","repo_name":"simonbowly/ubiquitous-waffle","sub_path":"destined-distributed/control/poll.py","file_name":"poll.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"13365422942","text":"import json\nimport boto3\nfrom botocore.exceptions import ClientError, ParamValidationError\n\ndef user_login(dynamodb, username, password):\n\tsettings = dynamodb.get_item(\n\t\tTableName = 'users',\n\t\tKey = {\n\t\t\t'username': {'S': username}\n\t\t},\n\t\tProjectionExpression = 'password'\n\t)\n\tif 'Item' not in settings:\n\t\treturn {\n\t\t\t'success': 'False',\n\t\t\t'error_code': 401,\n\t\t\t'error': 'This username does not exist'\n\t\t}\n\tfrom passlib.hash import pbkdf2_sha256\n\tif pbkdf2_sha256.verify(password, settings['Item']['password']['S']):\n\t\tfrom secrets import token_urlsafe\n\t\tsessid = token_urlsafe(24)\n\t\tdynamodb.update_item(\n\t\t\tTableName = 'users',\n\t\t\tKey = {\n\t\t\t\t'username': {'S': username}\n\t\t\t},\n\t\t\tUpdateExpression = \"ADD session_ids :value\",\n\t\t\tExpressionAttributeValues = {\n\t\t\t\t\":value\": {\n\t\t\t\t\t'SS': [sessid]\n\t\t\t\t}\n\t\t\t}\n\t\t)\n\t\treturn {\n\t\t\t'success': True,\n\t\t\t'session_id': sessid\n\t\t}\n\telse:\n\t\treturn {\n\t\t\t'success': 'False',\n\t\t\t'error_code': 401,\n\t\t\t'error': 'The password is incorrect'\n\t\t}\n\ndef message_new(dynamodb, username, title, markdown, rights, student_readable):\n\tif rights != 'teacher' and rights != 'admin':\n\t\treturn {\n\t\t\t'success': False,\n\t\t\t'error_code': 401,\n\t\t\t'error': 'Only teachers and administrators can post'\n\t\t}\n\timport time\n\tfrom secrets import SystemRandom\n\ttry:\n\t\tdynamodb.put_item(\n\t\t\tTableName = 'messages',\n\t\t\tItem = {\n\t\t\t\t'id': {'N': str(SystemRandom().randint(10000, 99999))},\n\t\t\t\t'title': {'S': title},\n\t\t\t\t'poster': {'S': username},\n\t\t\t\t'posttime': {'N': str(round(time.time() * 1000))},\n\t\t\t\t'markdown': {'S': markdown},\n\t\t\t\t'student': {'BOOL': student_readable}\n\t\t\t},\n\t\t\tConditionExpression = 'attribute_not_exists(id)'\n\t\t)\n\t\treturn {\n\t\t\t'success': True\n\t\t}\n\texcept dynamodb.exceptions.ConditionalCheckFailedException:\n\t\tmessage_new(dynamodb, username, title, markdown, rights, student_readable)\n\texcept ClientError as e:\n\t\treturn {\n\t\t\t'success': False,\n\t\t\t'error_code': 500,\n\t\t\t'error': e.response['Error']['Message']\n\t\t}\n\ndef message_list(dynamodb, rights):\n\tif rights == 'teacher' or rights=='admin':\n\t\tmessages = dynamodb.scan(\n\t\t\tTableName='messages',\n\t\t\tProjectionExpression='id, poster, posttime, title'\n\t\t)['Items']\n\telse:\n\t\tmessages = dynamodb.scan(\n\t\t\tTableName = 'messages',\n\t\t\tFilterExpression = 'student = :true',\n\t\t\tProjectionExpression = 'id, poster, posttime, title',\n\t\t\tExpressionAttributeValues = {\n\t\t\t\t\":true\": {\n\t\t\t\t\t\"BOOL\": True\n\t\t\t\t}\n\t\t\t}\n\t\t)['Items']\n\treturn_dict = {}\n\tfor message in messages:\n\t\treturn_dict[message['id']['N']] = {\n\t\t\t'title': message['title']['S'],\n\t\t\t'time': message['posttime']['N'],\n\t\t\t'poster': message['poster']['S']\n\t\t}\n\treturn {\n\t\t'success': True,\n\t\t'messages': return_dict\n\t}\n\ndef message_view(dynamodb, username, user_rights, id):\n\ttry:\n\t\tmessage = dynamodb.get_item(\n\t\t\tTableName = 'messages',\n\t\t\tKey = {\n\t\t\t\t'id': {'N': str(id)}\n\t\t\t},\n\t\t\tProjectionExpression = 'markdown, student, form, attachments, responses.' + username\n\t\t)\n\t\tif 'Item' not in message:\n\t\t\treturn {\n\t\t\t\t'success': False,\n\t\t\t\t'error_code': 404,\n\t\t\t\t'error': 'This message does not exist'\n\t\t\t}\n\t\telse:\n\t\t\tmessage = message['Item']\n\n\t\tif not message['student']['BOOL'] and user_rights != 'teacher' and user_rights != 'admin':\n\t\t\treturn {\n\t\t\t\t'success': False,\n\t\t\t\t'error_code': 404,\n\t\t\t\t'error': 'This message does not exist'\n\t\t\t}\n\n\t\treturn_dict = {\n\t\t\t'success': True,\n\t\t\t'text': message['markdown']['S'],\n\t\t\t'responses': []\n\t\t}\n\n\t\tif 'attachments' in message:\n\t\t\treturn_dict['attachments'] = []\n\t\t\tfor attachment in message['attachments']['L']:\n\t\t\t\treturn_dict['attachments'].append({\n\t\t\t\t\t'name': attachment['M']['name']['S'],\n\t\t\t\t\t'link': attachment['M']['link']['S']\n\t\t\t\t})\n\n\t\tif 'form' in message:\n\t\t\treturn_dict['form'] = []\n\t\t\tfor form in message['form']['L']:\n\t\t\t\tform_dict = {\n\t\t\t\t\t'question': form['M']['question']['S'],\n\t\t\t\t\t'type': form['M']['type']['S']\n\t\t\t\t}\n\t\t\t\tif (form['M']['type']['S'] == 'mcq'):\n\t\t\t\t\tform_dict['options'] = form['M']['options']['SS']\n\t\t\t\treturn_dict['form'].append(form_dict)\n\t\t\tif 'username' in message['responses']['M']:\n\t\t\t\tfor response in message['responses']['M'][username]['L']:\n\t\t\t\t\treturn_dict['responses'].append(list(response.items())[0][1])\n\n\t\treturn return_dict\n\texcept ClientError as e:\n\t\treturn {\n\t\t\t'success': False,\n\t\t\t'error_code': 500,\n\t\t\t'error': e.response['Error']['Message']\n\t\t}\n\ndef message_respond(dynamodb, username, user_rights, id, response):\n\tmessage = dynamodb.get_item(\n\t\tTableName = 'messages',\n\t\tKey = {\n\t\t\t'id': {'N': str(id)}\n\t\t},\n\t\tProjectionExpression = 'form, student'\n\t)\n\tif 'Item' not in message:\n\t\treturn {\n\t\t\t'success': False,\n\t\t\t'error_code': 404,\n\t\t\t'error': 'This message does not exist'\n\t\t}\n\telse:\n\t\tmessage = message['Item']\n\n\tif not message['student']['BOOL'] and user_rights != 'teacher' and user_rights != 'admin':\n\t\treturn {\n\t\t\t'success': False,\n\t\t\t'error_code': 404,\n\t\t\t'error': 'This message does not exist'\n\t\t}\n\terror_dict = {\n\t\t'success': False,\n\t\t'error_code': 400,\n\t\t'error': 'Your JSON response does not match the schema'\n\t}\n\tif len(response) != len(message['form']['L']):\n\t\treturn error_dict\n\n\tanswer_array = []\n\tfor i in range(0, len(response)):\n\t\tif message['form']['L'][i]['M']['type']['S'] == 'mcq':\n\t\t\tif int(response[i]) >= len(message['form']['L'][i]['M']['options']['SS']):\n\t\t\t\treturn error_dict\n\t\tanswer_array.append({\n\t\t\t'N' if message['form']['L'][i]['M']['type']['S'] == 'mcq' else 'S': str(response[i])\n\t\t})\n\n\tdynamodb.update_item(\n\t\tTableName = 'messages',\n\t\tKey = {\n\t\t\t'id': {'N': str(id)}\n\t\t},\n\t\tUpdateExpression = 'SET responses.#UID = :response',\n\t\tExpressionAttributeNames = {\n\t\t\t'#UID': username\n\t\t},\n\t\tExpressionAttributeValues = {\n\t\t\t\":response\": {\n\t\t\t\t\"L\": answer_array\n\t\t\t}\n\t\t}\n\t)\n\treturn {\n\t\t'success': True\n\t}\n\ndef records_get(dynamodb, username):\n\trecords = dynamodb.get_item(\n\t\tTableName = 'users',\n\t\tKey = {\n\t\t\t'username': {'S': username}\n\t\t},\n\t\tProjectionExpression = 'attendance, discipline, via, badges'\n\t)['Item']\n\treturn_obj = {\n\t\t'success': True,\n\t\t'attendance': [],\n\t\t'discipline': [],\n\t\t'via': [],\n\t\t'badges': []\n\t}\n\tfor i in records['attendance']['L']:\n\t\treturn_obj['attendance'].append({\n\t\t\t'date': i['M']['date']['N'],\n\t\t\t'reason': i['M']['reason']['S']\n\t\t})\n\tfor i in records['via']['L']:\n\t\treturn_obj['via'].append({\n\t\t\t'date': i['M']['date']['N'],\n\t\t\t'activity': i['M']['activity']['S'],\n\t\t\t'beneficiary': i['M']['beneficiary']['S'],\n\t\t\t'hours': i['M']['hours']['N']\n\t\t})\n\tfor i in records['discipline']['L']:\n\t\treturn_obj['discipline'].append({\n\t\t\t'date': i['M']['date']['N'],\n\t\t\t'offence': i['M']['offence']['S'],\n\t\t\t'punishment': i['M']['punishment']['S'],\n\t\t\t'teacher': i['M']['teacher']['S']\n\t\t})\n\tfor i in records['badges']['L']:\n\t\tbadge = {\n\t\t\t'award': i['M']['award']['S'],\n\t\t\t'name': i['M']['name']['S'],\n\t\t\t'badge': i['M']['badge']['S'],\n\t\t\t'tags': {},\n\t\t\t'cert': {\n\t\t\t\t'link': i['M']['cert']['M']['link']['S'],\n\t\t\t\t'name': i['M']['cert']['M']['name']['S'],\n\t\t\t},\n\t\t\t'year': i['M']['year']['N']\n\t\t}\n\t\tfor tag in i['M']['tags']['M'].keys():\n\t\t\tbadge['tags'][tag] = i['M']['tags']['M'][tag]['N']\n\t\treturn_obj['badges'].append(badge)\n\n\treturn return_obj\n\ndef learning_list(dynamodb, username):\n\tclasses = dynamodb.scan(\n\t\tTableName = 'learning',\n\t\tProjectionExpression = 'id, class_name, teacher, assignments',\n\t\tFilterExpression = 'contains(members, :username)',\n\t\tExpressionAttributeValues = {\n\t\t\t\":username\": {\n\t\t\t\t\"S\": username\n\t\t\t}\n\t\t}\n\t)\n\tif 'Items' not in classes:\n\t\treturn {\n\t\t\t'success': True\n\t\t}\n\telse:\n\t\tclasses = classes['Items']\n\treturn_dict = []\n\tfor i in classes:\n\t\tassignments = []\n\t\tfor assignment in i['assignments']['M']:\n\t\t\tassignments.append(i['assignments']['M'][assignment]['M']['name']['S'])\n\t\treturn_dict.append({\n\t\t\t'teacher': i['teacher']['S'],\n\t\t\t'name': i['class_name']['S'],\n\t\t\t'code': i['id']['S'],\n\t\t\t'assignments': assignments\n\t\t})\n\treturn return_dict\n\ndef learning_list_topics(dynamodb, username, class_id):\n\ttopics = dynamodb.get_item(\n\t\tTableName = 'learning',\n\t\tKey = {\n\t\t\t'id': {'S': str(class_id)}\n\t\t},\n\t\tProjectionExpression = 'topics, members, class_name, teacher'\n\t) \n\t\n\tif 'Item' not in topics:\n\t\treturn {\n\t\t\t'success': False,\n\t\t\t'error_code': 404,\n\t\t\t'error': 'This class does not exist'\n\t\t}\n\ttopics = topics['Item']\n\tif username not in topics['members']['SS']:\n\t\treturn {\n\t\t\t'success': False,\n\t\t\t'error_code': 404,\n\t\t\t'error': 'This class does not exist'\n\t\t}\n\ttopics_list = []\n\tfor topic in topics['topics']['M']:\n\t\ttopics_list.append({\n\t\t\t'code': topic[1:],\n\t\t\t'name': topics['topics']['M'][topic]['M']['name']['S']\n\t\t})\n\treturn {\n\t\t'name': topics['class_name']['S'],\n\t\t'teacher': topics['teacher']['S'],\n\t\t'tags': topics_list\n\t}\n\ndef learning_get_topic(dynamodb, username, class_id, topic_id):\n\tgetTopic= dynamodb.get_item(\n\t\tTableName='learning',\n\t\tKey = {\n\t\t\t'id': {'S': str(class_id)}\n\t\t}, \n\t\tProjectionExpression='topics'\n\t)\n\tif 'Item' not in getTopic:\n\t\treturn {\n\t\t\t'success': False,\n\t\t\t'error_code': 404,\n\t\t\t'error': 'This class does not exist'\n\t\t}\n\tgetTopic = getTopic['Item']['topics']['M']['T' + topic_id]\n\treturn_list = []\n\tfor post in getTopic['M']['posts']['L']:\n\t\tattachments=[]\n\t\tif 'attachments' in post['M']:\n\t\t\tfor attachment in post['M']['attachments']['L']:\n\t\t\t\tattachments.append({\n\t\t\t\t\t\"name\":attachment['M']['name']['S'],\n\t\t\t\t\t\"link\":attachment['M']['link']['S']\n\t\t\t\t})\n\n\t\treturn_list.append({\n\t\t\t\"title\":post['M']['title']['S'],\n\t\t\t\"description\":post['M']['description']['S'],\n\t\t\t\"timestamp\":post['M']['timestamp']['N'],\n\t\t\t\"attachments\":attachments\n\t\t})\n\treturn return_list\n\ndef learning_show_assignments(dynamodb, username, class_id):\n\tassignments = dynamodb.get_item(\n\t\tTableName = 'learning',\n\t\tKey = {\n\t\t\t'id': {'S': str(class_id)}\n\t\t},\n\t\tProjectionExpression = 'members, assignments, class_name, teacher'\n\t)\n\tif 'Item' not in assignments:\n\t\treturn {\n\t\t\t'success': False,\n\t\t\t'error_code': 404,\n\t\t\t'error': 'This class does not exist'\n\t\t}\n\tassignments = assignments['Item']\n\tif username not in assignments['members']['SS']:\n\t\treturn {\n\t\t\t'success': False,\n\t\t\t'error_code': 404,\n\t\t\t'error': 'This assignment does not exist'\n\t\t}\n\treturn_dict = {\n\t\t'success': True,\n\t\t'name': assignments['class_name']['S'],\n\t\t'teacher': assignments['teacher']['S'],\n\t\t'assignments': []\n\t}\n\tfor assignment_code in assignments['assignments']['M'].keys():\n\t\tassignment_dict = {\n\t\t\t'code': assignment_code[1:],\n\t\t\t'name': assignments['assignments']['M'][assignment_code]['M']['name']['S'],\n\t\t\t# tags,\n\t\t\t'due': assignments['assignments']['M'][assignment_code]['M']['due']['N']\n\t\t}\n\t\treturn_dict['assignments'].append(assignment_dict)\n\treturn return_dict\n\ndef learning_assignment(dynamodb, username, class_id, assignment_id):\n\tassignments = dynamodb.get_item(\n\t\t\tTableName = 'learning',\n\t\t\tKey = {\n\t\t\t\t'id': {'S': str(class_id)}\n\t\t\t},\n\t\t\tProjectionExpression = 'members, class_name, assignments.A' + str(assignment_id) + ', assignment_submissions.A' + str(assignment_id) + '.' + username\n\t\t)\n\tif 'Item' not in assignments:\n\t\treturn {\n\t\t\t'success': False,\n\t\t\t'error_code': 404,\n\t\t\t'error': 'This class does not exist'\n\t\t}\n\tassignments = assignments['Item']\n\tif username not in assignments['members']['SS']:\n\t\treturn {\n\t\t\t'success': False,\n\t\t\t'error_code': 404,\n\t\t\t'error': 'This assignment does not exist'\n\t\t}\n\treturn_dict = {\n\t\t'name': assignments['assignments']['M']['A' + str(assignment_id)]['M']['name']['S'],\n\t\t'class': assignments['class_name']['S'],\n\t\t'questions': [],\n\t\t'answers': []\n\t}\n\tfor question in assignments['assignments']['M']['A' + str(assignment_id)]['M']['questions']['L']:\n\t\tassignment_dict = {\n\t\t\t'question': question['M']['question']['S'],\n\t\t\t'type': question['M']['type']['S'],\n\t\t\t'marks': question['M']['marks']['N']\n\t\t}\n\t\tif question['M']['type']['S'] == 'mcq':\n\t\t\tassignment_dict['options'] = []\n\t\t\tfor option in question['M']['options']['L']:\n\t\t\t\tassignment_dict['options'].append(option['S'])\n\t\tif 'image' in question['M']:\n\t\t\tassignment_dict['image'] = question['M']['image']['S']\n\t\treturn_dict['questions'].append(assignment_dict)\n\n\tif username in assignments['assignment_submissions']['M']['A' + str(assignment_id)]['M']:\n\t\tfor answer in assignments['assignment_submissions']['M']['A' + str(assignment_id)]['M'][username]['M']['answers']['L']:\n\t\t\treturn_dict['answers'].append(list(answer.items())[0][1])\n\treturn return_dict\n\ndef learning_assignment_submit(dynamodb, username, class_id, assignment_id, answers):\n\timport time\n\tassignment = dynamodb.get_item(\n\t\tTableName = 'learning',\n\t\tKey = {\n\t\t\t'id': {'S': str(class_id)}\n\t\t},\n\t\tProjectionExpression = 'members, assignments.A' + str(assignment_id) + '.questions'\n\t)\n\tif 'Item' not in assignment:\n\t\treturn {\n\t\t\t'success': False,\n\t\t\t'error_code': 404,\n\t\t\t'error': 'This assignment does not exist'\n\t\t}\n\telse:\n\t\tassignment = assignment['Item']\n\tif username not in assignment['members']['SS']:\n\t\treturn {\n\t\t\t'success': False,\n\t\t\t'error_code': 404,\n\t\t\t'error': 'This assignment does not exist'\n\t\t}\n\terror_dict = {\n\t\t'success': False,\n\t\t'error_code': 400,\n\t\t'error': 'Your JSON response does not match the schema'\n\t}\n\tif len(answers) != len(assignment['assignments']['M']['A' + str(assignment_id)]['M']['questions']['L']):\n\t\treturn error_dict\n\n\tanswer_array = []\n\tfor i in range(0, len(answers)):\n\t\tif assignment['assignments']['M']['A' + str(assignment_id)]['M']['questions']['L'][i]['M']['type']['S'] == 'mcq':\n\t\t\tif int(answers[i]) >= len(assignment['assignments']['M']['A' + str(assignment_id)]['M']['questions']['L'][i]['M']['options']['L']):\n\t\t\t\treturn error_dict\n\t\tanswer_array.append({\n\t\t\t'N' if assignment['assignments']['M']['A' + str(assignment_id)]['M']['questions']['L'][i]['M']['type']['S'] == 'mcq' else 'S': str(answers[i])\n\t\t})\n\tdynamodb.update_item(\n\t\tTableName = 'learning',\n\t\tKey = {\n\t\t\t'id': {'S': str(class_id)}\n\t\t},\n\t\tUpdateExpression = 'SET assignment_submissions.#AID.#UID = :response',\n\t\tExpressionAttributeNames = {\n\t\t\t'#AID': 'A' + str(assignment_id),\n\t\t\t'#UID': username\n\t\t},\n\t\tExpressionAttributeValues = {\n\t\t\t\":response\": {\n\t\t\t\t\"M\": {\n\t\t\t\t\t\"submission_time\": {\"N\": str(round(time.time() * 1000))},\n\t\t\t\t\t\"answers\": {\"L\": answer_array}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t)\n\treturn {\n\t\t'success': True\n\t}\n\ndef library_index(dynamodb, username):\n\treturn {\n\t\t'success': True,\n\t\t'school_code': dynamodb.get_item(\n\t\t\tTableName = 'users',\n\t\t\tKey = {\n\t\t\t\t'username': {'S': username}\n\t\t\t},\n\t\t\tProjectionExpression = 'library.school.school_code'\n\t\t)['Item']['library']['M']['school']['M']['school_code']['S']\n\t}\n\ndef library_books(dynamodb, username, library, library_code):\n\t# library_code is \"nlb\" or \"xxxx\" (school code)\n\tif library != 'school' and library != 'nlb':\n\t\treturn {\n\t\t\t'success': False,\n\t\t\t'error_code': 400,\n\t\t\t'error': 'Library can only be \"school\" or \"nlb\"'\n\t\t}\n\tif library == 'nlb' and library_code != 'nlb':\n\t\treturn {\n\t\t\t'success': False,\n\t\t\t'error_code': 400,\n\t\t\t'error': 'Library_code can only be \"nlb\"'\n\t\t}\n\tif library == 'school' and (library_code.isdigit() == False or len(library_code) != 4):\n\t\treturn {\n\t\t\t'success': False,\n\t\t\t'error_code': 400,\n\t\t\t'error': 'Library_code can only be a four-digit integer (including leading zeros)'\n\t\t}\n\n\tbooks = dynamodb.get_item(\n\t\tTableName = 'users',\n\t\tKey = {\n\t\t\t'username': {'S': username}\n\t\t},\n\t\tProjectionExpression = 'library.' + library + '.borrowed'\n\t)\n\tif 'Item' not in books:\n\t\treturn {\n\t\t\t'success': True,\n\t\t\t'books': []\n\t\t}\n\telse:\n\t\tbooks = books['Item']['library']['M'][library]['M']['borrowed']['L']\n\tid_list = []\n\tfor i in books:\n\t\tif i['M']['returned']['BOOL'] == True:\n\t\t\tbooks.remove(i)\n\tfor i in books:\n\t\tid_list.append({'id': i['M']['id']})\n\tif len(id_list) == 0:\n\t\treturn {\n\t\t\t'success': True,\n\t\t\t'books': []\n\t\t}\n\tbook_data = dynamodb.batch_get_item(\n\t\tRequestItems = {\n\t\t\t'books_' + library_code: {\n\t\t\t\t'Keys': id_list,\n\t\t\t\t'ProjectionExpression': 'id, book_name, author, synopsis'\n\t\t\t}\n\t\t}\n\t)['Responses']['books_' + library_code]\n\tbook_data_formatted = {}\n\tfor book in book_data:\n\t\tbook_data_formatted[book['id']['S']] = {\n\t\t\t'name': book['book_name']['S'],\n\t\t\t'synopsis': book['synopsis']['S'],\n\t\t\t'author': book['author']['S']\n\t\t}\n\treturn_list = []\n\tfor book in books:\n\t\treturn_list.append({\n\t\t\t'due': book['M']['due']['N'],\n\t\t\t**book_data_formatted[book['M']['id']['S']]\n\t\t})\n\n\treturn return_list\n\ndef settings_get(dynamodb, username):\n\tsettings = dynamodb.get_item(\n\t\tTableName = 'users',\n\t\tKey = {\n\t\t\t'username': {'S': username}\n\t\t},\n\t\tProjectionExpression = 'address, email, phone, theme, full_name'\n\t)['Item']\n\treturn {\n\t\t'success': True,\n\t\t'address': settings['address']['S'],\n\t\t'email': settings['email']['S'],\n\t\t'phone': settings['phone']['N'],\n\t\t'theme': settings['theme']['S'],\n\t\t'name': settings['full_name']['S'],\n\t}\n\ndef settings_update(dynamodb, username, key, value):\n\tif key not in {'address', 'email', 'phone', 'theme', 'password'}:\n\t\treturn {\n\t\t\t'success': False,\n\t\t\t'error_code': 400,\n\t\t\t'error': 'Only address, email, phone, theme, and password can be edited'\n\t\t}\n\tif key == 'password':\n\t\tif len(value) < 8:\n\t\t\treturn {\n\t\t\t\t'success': False,\n\t\t\t\t'error_code': 400,\n\t\t\t\t'error': 'Your must be at least 8 characters long'\n\t\t\t}\n\t\tfrom passlib.hash import pbkdf2_sha256\n\t\tvalue = pbkdf2_sha256.hash(value, rounds = 25000, salt_size = 16)\n\tif key == 'theme' and value not in {'light', 'dark'}:\n\t\treturn {\n\t\t\t'success': False,\n\t\t\t'error_code': 400,\n\t\t\t'error': 'Only light and dark themes are available'\n\t\t}\n\tif key == 'phone':\n\t\tif not value.isdigit():\n\t\t\treturn {\n\t\t\t\t'success': False,\n\t\t\t\t'error_code': 400,\n\t\t\t\t'error': 'Only numbers are allowed for phone numbers'\n\t\t\t}\n\t\tif int(value) < 80000000 or int(value) > 99999999:\n\t\t\treturn {\n\t\t\t\t'success': False,\n\t\t\t\t'error_code': 400,\n\t\t\t\t'error': 'Only 8-digit mobile phone numbers are allowed'\n\t\t\t}\n\tif key == 'email':\n\t\timport re\n\t\tif not re.match(r\"[^@]+@[^@]+\\.[^@]+\", value):\n\t\t\treturn {\n\t\t\t\t'success': False,\n\t\t\t\t'error_code': 400,\n\t\t\t\t'error': 'Invalid email format'\n\t\t\t}\n\tdynamodb.update_item(\n\t\tTableName = 'users',\n\t\tKey = {\n\t\t\t'username': {'S': username}\n\t\t},\n\t\tUpdateExpression = \"SET #KEY = :value\",\n\t\tExpressionAttributeNames = {\n\t\t\t'#KEY': key\n\t\t},\n\t\tExpressionAttributeValues = {\n\t\t\t\":value\": {\n\t\t\t\t\"N\" if key == 'phone' else 'S': value\n\t\t\t}\n\t\t}\n\t)\n\treturn {\n\t\t'success': True\n\t}\n\ndef lambda_handler(param, context):\n\tdynamodb = boto3.client('dynamodb')\n\tif param['user']['logged_in'] == False:\n\t\tif param['request']['type'] == 'user_login':\n\t\t\treturn user_login(dynamodb, param['request']['username'], param['request']['password'])\n\t\telse:\n\t\t\treturn {\n\t\t\t\t'success': False,\n\t\t\t\t'error': 404\n\t\t\t}\n\telse:\n\t\tuser_data = dynamodb.get_item(\n\t\t\tTableName = 'users',\n\t\t\tKey = {\n \t\t\t\t'username': {'S': param['user']['username']}\n\t\t\t},\n\t\t\tProjectionExpression = 'session_ids, rights, classes'\n\t\t)\n\t\tif 'Item' not in user_data:\n\t\t\treturn {\n\t\t\t\t'success': False,\n\t\t\t\t'error_code': 401,\n\t\t\t\t'error': 'This username does not exist'\n\t\t\t}\n\t\telse:\n\t\t\tuser_data = user_data['Item']\n\n\t\tif param['user']['session_id'] in user_data['session_ids']['SS']:\n\t\t\tif param['request']['type'] == 'message_new':\n\t\t\t\treturn message_new(dynamodb, param['user']['username'], param['request']['title'], param['request']['markdown'], user_data['rights']['S'], param['request']['student'])\n\t\t\telif param['request']['type'] == 'message_list':\n\t\t\t\treturn message_list(dynamodb, user_data['rights']['S'])\n\t\t\telif param['request']['type'] == 'message_view':\n\t\t\t\treturn message_view(dynamodb, param['user']['username'], user_data['rights']['S'], param['request']['id'])\n\t\t\telif param['request']['type'] == 'message_respond':\n\t\t\t\treturn message_respond(dynamodb, param['user']['username'], user_data['rights']['S'], param['request']['id'], param['request']['response'])\n\t\t\telif param['request']['type'] == 'records_get':\n\t\t\t\treturn records_get(dynamodb, param['user']['username'])\n\t\t\telif param['request']['type'] == 'learning_list':\n\t\t\t\treturn learning_list(dynamodb, param['user']['username'])\n\t\t\telif param['request']['type'] == 'learning_list_topics':\n\t\t\t\treturn learning_list_topics(dynamodb, param['user']['username'], param['request']['id'])\n\t\t\telif param['request']['type'] == 'learning_get_topic':\n\t\t\t\treturn learning_get_topic(dynamodb, param['user']['username'], param['request']['class_id'], param['request']['topic_id'])\n\t\t\telif param['request']['type'] == 'learning_show_assignments':\n\t\t\t\treturn learning_show_assignments(dynamodb, param['user']['username'], param['request']['class_id'])\n\t\t\telif param['request']['type'] == 'learning_assignment':\n\t\t\t\treturn learning_assignment(dynamodb, param['user']['username'], param['request']['class_id'], param['request']['assignment_id'])\n\t\t\telif param['request']['type'] == 'learning_assignment_submit':\n\t\t\t\treturn learning_assignment_submit(dynamodb, param['user']['username'], param['request']['class_id'], param['request']['assignment_id'], param['request']['answers'])\n\t\t\telif param['request']['type'] == 'library_index':\n\t\t\t\treturn library_index(dynamodb, param['user']['username'])\n\t\t\telif param['request']['type'] == 'library_books':\n\t\t\t\treturn library_books(dynamodb, param['user']['username'], param['request']['library'], param['request']['library_code'])\n\t\t\telif param['request']['type'] == 'settings_get':\n\t\t\t\treturn settings_get(dynamodb, param['user']['username'])\n\t\t\telif param['request']['type'] == 'settings_update':\n\t\t\t\treturn settings_update(dynamodb, param['user']['username'], param['request']['key'], param['request']['value'])\n\t\t\telse:\n\t\t\t\treturn {\n\t\t\t\t\t'success': False,\n\t\t\t\t\t'error': 404\n\t\t\t\t}\n\t\telse:\n\t\t\treturn {\n\t\t\t\t'success': False,\n\t\t\t\t'error_code': 401,\n\t\t\t\t'error': 'This session ID is invalid'\n\t\t\t}","repo_name":"sheepymeh/edupass","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":21427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"71778251756","text":"import os\nimport warnings\nimport sys\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import ElasticNet\n\nimport mlflow\nimport mlflow.sklearn\n\n\ndef train(in_alpha, in_l1_ratio):\n\n\n def eval_metrics(actual, pred):\n rmse = np.sqrt(mean_squared_error(actual, pred))\n mae = mean_absolute_error(actual, pred)\n r2 = r2_score(actual, pred)\n return rmse, mae, r2\n\n\n warnings.filterwarnings(\"ignore\")\n np.random.seed(40)\n\n # Read the wine-quality csv file (make sure you're running this from the root of MLflow!)\n # Assumes wine-quality.csv is located in the same folder as the notebook\n wine_path = \"wine-quality.csv\"\n data = pd.read_csv(wine_path)\n\n # Split the data into training and test sets. (0.75, 0.25) split.\n train, test = train_test_split(data)\n\n # The predicted column is \"quality\" which is a scalar from [3, 9]\n train_x = train.drop([\"quality\"], axis=1)\n test_x = test.drop([\"quality\"], axis=1)\n train_y = train[[\"quality\"]]\n test_y = test[[\"quality\"]]\n\n # Set default values if no alpha is provided\n if float(in_alpha) is None:\n alpha = 0.5\n else:\n alpha = float(in_alpha)\n\n # Set default values if no l1_ratio is provided\n if float(in_l1_ratio) is None:\n l1_ratio = 0.5\n else:\n l1_ratio = float(in_l1_ratio)\n mlflow.set_experiment('mlflow_demo')\n # Useful for multiple runs (only doing one run in this sample notebook) \n with mlflow.start_run():\n # Execute ElasticNet\n lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=42)\n lr.fit(train_x, train_y)\n\n # Evaluate Metrics\n predicted_qualities = lr.predict(test_x)\n (rmse, mae, r2) = eval_metrics(test_y, predicted_qualities)\n\n # Print out metrics\n print(\"Elasticnet model (alpha=%f, l1_ratio=%f):\" % (alpha, l1_ratio))\n print(\" RMSE: %s\" % rmse)\n print(\" MAE: %s\" % mae)\n print(\" R2: %s\" % r2)\n\n # Log parameter, metrics, and model to MLflow\n mlflow.log_param(\"alpha\", alpha)\n mlflow.log_param(\"l1_ratio\", l1_ratio)\n mlflow.log_metric(\"rmse\", rmse)\n mlflow.log_metric(\"r2\", r2)\n mlflow.log_metric(\"mae\", mae)\n\n mlflow.sklearn.log_model(lr, \"model_wine\")\n\nif __name__ == \"__main__\":\n alpha = float(sys.argv[1]) if len(sys.argv) > 1 else 0.5\n l1_ratio = float(sys.argv[2]) if len(sys.argv) > 2 else 0.5\n train(alpha,l1_ratio)","repo_name":"getindata/mlflow-demo","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"29617808453","text":"\"\"\"\r\nThis script gathers all samples in the provided dataset and copies them to a folder including a info file containing the image labels.\r\n\"\"\"\r\n#%% Imports\r\nimport os\r\nfrom os.path import exists\r\nimport pandas as pd\r\nimport shutil\r\nimport sys\r\n\r\nimport config\r\n\r\n#%% Helper functions\r\ndef get_image_name(path):\r\n return os.path.split(path)[-1]\r\n\r\ndef copy_images(df, source, dest):\r\n # check if the destination folder exists and if not create it\r\n if exists(dest):\r\n clean = input(f\"{dest} already exists, would you like to erase existing images before copying new images? [Y|N]: \")\r\n \r\n if clean.upper() == 'Y':\r\n shutil.rmtree(dest)\r\n else:\r\n print(\"Exiting to avoid mixing dataset\")\r\n sys.exit()\r\n\r\n if not exists(dest):\r\n os.makedirs(dest)\r\n\r\n for row in df.values:\r\n split = os.path.split(row[0]) \r\n type = split[0][1:]\r\n name = split[1]\r\n origin = os.path.join(source, type, name)\r\n image_dest = os.path.join(dest, name)\r\n\r\n shutil.copy(origin, image_dest)\r\n\r\n#%% Configure output directories\r\ndest_path = input(f\"Specify output directory ({config.DATA_PATH}): \") or config.DATA_PATH\r\nif not exists(dest_path):\r\n create_dest = input(f\"The directory {dest_path} doesn't exist, do you wish to create it? [Y/N]\").upper()\r\n\r\n if create_dest == \"Y\":\r\n os.makedirs(dest_path)\r\n else:\r\n sys.exit()\r\n\r\ntest_path = os.path.join(dest_path, \"test_data\")\r\ntest_info = os.path.join(dest_path, \"test.csv\")\r\n\r\nds_info = os.path.join(dest_path, \"info.txt\")\r\n\r\n#%% Determine source\r\nsource_path = input(f\"Specify source directory ({config.SOURCE_PATH}): \") or config.SOURCE_PATH\r\nif not exists(source_path):\r\n print(f\"Invalid path specified: {source_path}\")\r\n sys.exit()\r\n\r\ntrain_info_file = os.path.join(source_path, \"train.csv\")\r\nval_info_file = os.path.join(source_path, \"validation.csv\")\r\n\r\nif not exists(train_info_file) \\\r\n or not exists(val_info_file)\\\r\n or not exists(os.path.join(source_path,\"train\"))\\\r\n or not exists(os.path.join(source_path,\"validation\")):\r\n print(\"Specified source is missing some of the missing elements: train.csv, validation.csv, train/, validation/\")\r\n sys.exit()\r\n\r\n#%% Get get image labels\r\ntrain_df = pd.read_csv(train_info_file)\r\nval_df = pd.read_csv(val_info_file)\r\ndf = train_df.merge(val_df, how=\"outer\")\r\n\r\n#%% Create dataframe with type\r\ndf[\"name\"] = df[\"img_dir\"].apply(get_image_name)\r\ndf[\"type\"] = df[\"img_labels\"]\r\ndf = df.drop([\"index\",\"img_labels\"],axis=1)\r\n\r\n#%% Copy images to respective folders\r\ncopy_images(df, source_path, test_path)\r\n\r\n#%% Create information files\r\ndf = df.drop([\"img_dir\"],axis=1)\r\ndf.to_csv(test_info)\r\n\r\n#%% Dataset info \r\nprint(\"Dataset created\")\r\n\r\ninfo_text = f\"\"\"\r\nDataset stats:\r\n Size: {len(df)}\r\n\"\"\"\r\n\r\nwith open(ds_info, \"w\") as f:\r\n f.write(info_text)\r\n\r\nprint(info_text)","repo_name":"PBonvang/fundus-image-classifier","sub_path":"build_full_test_set.py","file_name":"build_full_test_set.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"35269197254","text":"# carica il file di bob per la stopping power\n\nimport numpy as np\nimport lab4\n\nT, dedx_data = np.loadtxt('../bob/stopping_power_au.txt', unpack=True)\n\nrho_au = 19.320 # densità dell'oro\nrho_al = 2.699\n\ndedx = lab4.interp(T, dedx_data)\ndedx_min = np.min(T)\n\nif __name__ == '__main__':\n from matplotlib import pyplot as plt\n fig = plt.figure('dedx')\n fig.clf()\n fig.set_tight_layout(True)\n\n ax = fig.add_subplot(111)\n ax.plot(T, dedx_data * rho_au / 10000, '.k')\n x = np.linspace(np.min(T), np.max(T), 10000)\n ax.plot(x, [dedx(x) * rho_au / 10000 for x in x], '-k')\n\n ax.set_xlabel('energia [MeV]')\n ax.set_ylabel('dE/dx [MeV $\\\\mu$m$^{-1}$]')\n ax.set_title('Perdita di energia particelle $\\\\alpha$ nell\\'oro')\n ax.grid(linestyle=':')\n\n fig.show()\n","repo_name":"Gattocrucco/lab4mpr","sub_path":"esp-3-rutherford/marasciulli/sezione/dedx.py","file_name":"dedx.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"28391405254","text":"import streamlit as st\r\nfrom PIL import Image\r\nfrom model import create_model,predict_single\r\nimport torchvision.transforms as transforms\r\nimport numpy as np\r\nimport cv2\r\nfrom edge import Canny_detector\r\nfrom region import Kmeans_cluster\r\n\r\ndef load_image(image_file):\r\n\timg = Image.open(image_file)\r\n\treturn img\r\n\r\ndef convert_image(img):\r\n im = Image.open(img).convert('RGB')\r\n opencvImage = cv2.cvtColor(np.array(im), cv2.COLOR_RGB2BGR)\r\n opencvImage = cv2.resize(opencvImage,(300,300))\r\n return opencvImage\r\n\r\ntransform = transforms.Compose([\r\n transforms.ToTensor()\r\n])\r\n\r\nlabels = {\r\n 0: 'Casting is defective',\r\n 1: 'Casting is ok'\r\n}\r\n\r\ndef decode_target(target, text_labels=False, threshold=0.5):\r\n result = []\r\n for i, x in enumerate(target):\r\n if (x >= threshold):\r\n result.append(labels[i])\r\n return ' '.join(result)\r\n\r\n\r\nst.subheader(\"Please upload a metal casting picture\")\r\nimage_file = st.file_uploader(\"Upload Here\", type=[\"png\",\"jpg\",\"jpeg\"])\r\n\r\nif image_file is not None:\r\n\r\n st.image(load_image(image_file),width=250)\r\n\r\n predict = st.button(\"Predict the condition\")\r\n\r\n if predict:\r\n\r\n status = True\r\n status = create_model()\r\n\r\n if(status):\r\n #im_pil = Image.fromarray(im_np)\r\n img_tensor = transform(load_image(image_file))\r\n pre = decode_target(predict_single(img_tensor))\r\n if(pre==\"Casting is defective\"):\r\n st.warning(pre)\r\n else:\r\n st.success('Casting is ok')\r\n img = convert_image(image_file)\r\n gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)\r\n _,thresh = cv2.threshold(gray, np.mean(gray), 255, cv2.THRESH_BINARY_INV)\r\n edges = cv2.dilate(cv2.Canny(thresh,0,255),None)\r\n cnt = sorted(cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[-2], key=cv2.contourArea)[-1]\r\n mask = np.zeros((256,256), np.uint8)\r\n masked = cv2.drawContours(mask, [cnt],-1, 255, -1)\r\n edge_img = Canny_detector(img)\r\n region_img = Kmeans_cluster(img,3)\r\n st.image(region_img,channels=\"RGB\")\r\n st.image(edge_img,clamp=True,channels=\"RGB\")\r\n\r\n if(status==False):\r\n st.spinner(\"Plaease wait we are processing you request\")\r\n","repo_name":"alternativeritam/Metal_Casting","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"17145973159","text":"import gc\nimport time\nimport os\nimport sys\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.webdriver import WebDriver\nfrom selenium.common.exceptions import WebDriverException,NoSuchElementException, TimeoutException\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.webdriver.common.action_chains import ActionChains\n\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.chrome.options import Options\n\nfrom utils.util import internet_on\n\n\nclass Scraper():\n\n def __init__(self, page_url:str, headless:bool = False) -> None:\n self.headless = headless\n self._driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()) \n ,options=self.get_options(self.headless)) # Define the driver we are using\n self._current_url = page_url\n self.init_page()\n\n \n def _get_driver(self):\n return self._driver\n\n def _get_current_url(self):\n return self._current_url\n\n def _set_current_url(self, url):\n self._current_url = url\n \n current_url = property(fget=_get_current_url, fset=_set_current_url)\n driver:WebDriver = property(fget=_get_driver)\n\n def get_options(self, headless:bool):\n chrome_options = Options()\n if headless:\n chrome_options.add_argument('--headless')\n chrome_options.add_argument(\"--disable-gpu\")\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--disable-dev-shm-usage')\n chrome_options.add_argument(\"--disable-extensions\")\n chrome_options.add_argument('--disable-blink-features=AutomationControlled')\n prefs = {'profile.default_content_setting_values': {'images': 2, 'plugins': 2, 'popups': 2, 'geolocation': 2, 'notifications': 2, 'auto_select_certificate': 2, 'fullscreen': 2,\n 'mouselock': 2, 'mixed_script': 2, 'media_stream': 2, 'media_stream_mic': 2, 'media_stream_camera': 2, 'protocol_handlers': 2, 'ppapi_broker': 2,\n 'automatic_downloads': 2, 'midi_sysex': 2, 'push_messaging': 2, 'ssl_cert_decisions': 2, 'metro_switch_to_desktop': 2,\n 'protected_media_identifier': 2, 'app_banner': 2, 'site_engagement': 2, 'durable_storage': 2}}\n chrome_options.add_experimental_option(\"prefs\", prefs)\n chrome_options.add_argument(\"--silent\")\n\n def init_page(self):\n while not internet_on(): continue\n self._driver.maximize_window()\n self._driver.get(self.current_url)\n \n def ready_document(self,tries=0):\n if tries == 4:\n return\n timeout = time.time() + 60\n while time.time() <= timeout:\n try:\n page_state = self._driver.execute_script('return document.readyState;')\n if page_state == 'complete':\n tries = 4\n return\n except WebDriverException as _:\n self.crash_refresh_page()\n\n if tries < 4:\n self._driver.refresh()\n self.ready_document(tries+1)\n print(\"La página se cayó\")\n duration = 5 # seconds\n freq = 440 # Hz\n if sys.platform == 'linux':\n os.system('play -nq -t alsa synth {} sine {}'.format(duration, freq))\n exit()\n \n def crash_refresh_page(self):\n while not internet_on():\n continue\n try:\n self._driver.close()\n gc.collect(2)\n except WebDriverException:\n pass\n self._driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()) \n ,options=self.get_options(self.headless)) \n self.init_page()\n self.ready_document()\n \n def element_wait_lambda_return(self,element, script):\n values = self.driver.execute_script(script, element)\n date, hour = values.split()[0:2]\n return date.replace('T00:00:00.000Z', f\" {hour[0:9]}\")\n\n def element_wait_searh(self, time:int, by, value:str) -> WebElement:\n return WebDriverWait(self._driver, time ).until(EC.presence_of_element_located((by, value)))\n\n def elements_wait_searh(self, time:int, by, value:str) -> list[WebElement]:\n return WebDriverWait(self._driver, time).until(EC.presence_of_all_elements_located((by, value)))\n \n def element_click_wait_searh(self, time:int, by, value:str) -> WebElement:\n return WebDriverWait(self._driver, time).until(EC.element_to_be_clickable((by, value)))\n \n def click(self,element:WebElement):\n self.driver.execute_script(\"arguments[0].scrollIntoView();\", element)\n time.sleep(1)\n element.click()\n \n def close(self):\n self._driver.close()","repo_name":"d3lt409/scraping-bet","sub_path":"src/models/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":5064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"38745962995","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 22 09:45:49 2019\n\nRUN THE FOLLOWING FIRST:\n\npython3 CANGAMetricsDriver.py -v Psi\n--ss testdata_CSne16_np4_1.nc\n--st testdata_ICO16_np4_1.nc\n--s2t testdata_ICO16_np4_1.nc\n--sm meshes/outCSne16.g_RIPPED\n--tm meshes/outICO16.g_RIPPED\n--smc 1 --tmc 1\n\nData in \"testdata\" files is not used. This run is to load the mesh data.\n\n@author: jeguerra\n\"\"\"\nimport pyshtools\nimport numpy as np\nimport math as mt\nfrom computeAreaIntegral import computeAreaIntegral\n\n'''\n#%% EVALUATE TEST = 1 DATA AND EXPAND IN SH\nNX = 720\nNY = 360\nlatgrid, longrid = np.meshgrid(np.linspace(-mt.pi, mt.pi, NY), \\\n np.linspace(0.0, 2.0 * mt.pi, NX), indexing='ij')\n\ntest1 = np.zeros((NY,NX))\ntest2 = np.zeros((NY,NX))\nfor ii in range(NY):\n for jj in range(NX):\n test1[ii,jj] = (2.0 + mt.cos(latgrid[ii,jj]) * \\\n mt.cos(latgrid[ii,jj]) * \\\n mt.cos(2.0 * longrid[ii,jj])) # TEST 1\n\n test2[ii,jj] = (2.0 + (np.sin(2.0 * latgrid[ii,jj]))**16.0 * \\\n np.cos(16.0 * longrid[ii,jj])) # TEST 2\n\n# Expand CS16 in SH\ncoeffs = pyshtools.expand.SHExpandDH(test2, sampling=2)\nclm = pyshtools.SHCoeffs.from_array(coeffs)\n'''\nAREAFV_S = 0.0\nAREAFV_T = 0.0\norder = 10\n\nNC = len(varConS)\nareaFV = np.zeros((NC, 1))\nfor ii in range(NC):\n # Compute areas by triangular quadrature on source FV mesh\n cdex = varConS[ii, :] - 1\n thisCell = varCoordS[:, cdex.astype(int)]\n areaFV[ii] = computeAreaIntegral(None, thisCell, order, False, False)\n AREAFV_S += areaFV[ii]\n\nNC = len(varConT)\nareaFV = np.zeros((NC, 1))\nfor ii in range(NC):\n # Compute areas by triangular quadrature on target FV mesh\n cdex = varConT[ii, :] - 1\n thisCell = varCoordT[:, cdex.astype(int)]\n areaFV[ii] = computeAreaIntegral(None, thisCell, order, False, False)\n AREAFV_T += areaFV[ii]\n\n# %%\n#print(\"Unit Sphere Area, Built-in reference value: %16.15e\" %(4.0 * mt.pi))\nprint(\n \"Test 2 Sphere Integral, computed on CS16 @ \" +\n str(order) +\n \"th order: %16.15e\" %\n AREAFV_S)\nprint(\n \"Test 2 Sphere Integral, computed on ICO16 @ \" +\n str(order) +\n \"th order: %16.15e\" %\n AREAFV_T)\n","repo_name":"CANGA/MIRA","sub_path":"test/TestAreaIntegralFV.py","file_name":"TestAreaIntegralFV.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"74188182635","text":"from typing import List\n\n\nclass Solution:\n def longestCommonPrefix(self, strs: List[str]) -> str:\n\n if not strs:\n return \"\"\n\n min_len = min([len(s) for s in strs])\n if min_len == 0:\n return \"\"\n\n common_prefix = \"\"\n for i in range(min_len):\n # 我居然看不懂自己半年前在写什么...\n # 我这个方法真实太牛逼了, 有点反向思维的意思, 取所有字符串的第 i 位, 构成一个集合, 如何集合的大小为 1, 说明这一位是统一的, 可以加入到公共前缀中.\n if len({s[i] for s in strs}) == 1:\n common_prefix += strs[0][i]\n else:\n break\n\n return common_prefix\n\nif __name__ == \"__main__\":\n sol = Solution()\n print(sol.longestCommonPrefix([\"flower\",\"flow\",\"flight\"]))\n print(sol.longestCommonPrefix([\"dog\",\"racecar\",\"car\"]))","repo_name":"iamkissg/leetcode","sub_path":"leetcode/14.longest_common_prefix.py","file_name":"14.longest_common_prefix.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"1907298084","text":"from spack.package import *\n\n\nclass Mpl(CMakePackage):\n \"\"\"A C++17 message passing library based on MPI.\"\"\"\n\n homepage = \"https://rabauke.github.io/mpl/html/\"\n git = \"https://github.com/rabauke/mpl.git\"\n url = \"https://github.com/rabauke/mpl/archive/refs/tags/v0.3.0.tar.gz\"\n maintainers(\"rabauke\")\n\n version(\"develop\", branch=\"master\")\n version(\"0.3.0\", tag=\"v0.3.0\", commit=\"e6bd4926914127f3609a14474aa4a9c4fabbff0b\")\n version(\"0.2.1\", tag=\"v0.2.1\", commit=\"5bee297b453d7b66a803453bfc6884611a36c4d0\")\n version(\"0.2.0\", tag=\"v0.2.0\", commit=\"f322352c93627c1b91d8efb1c4ee2e4873aed016\")\n version(\"0.1\", tag=\"v0.1\", commit=\"970d0f3436ddbfcf2eba12c5bc7f4f7660e433ca\")\n\n depends_on(\"mpi\")\n","repo_name":"spack/spack","sub_path":"var/spack/repos/builtin/packages/mpl/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":3712,"dataset":"github-code","pt":"73"} +{"seq_id":"28124592067","text":"#!usr/bin/python\n# -*- coding:utf8 -*-\n\"\"\"\nsession的使用\n\"\"\"\nimport requests\n\n# 准备请求头\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36\"\n}\n\n# 登录的url地址\nlogin_url = \"http://www.jxmjc.com/login.php?\"\n# 登录之后访问的页面\nmy_url = \"http://www.jxmjc.com/u.php\"\n\n# 请求体数据\ndata = {\n \"lgt\": 0,\n \"pwuser\": \"itcast_test\",\n \"pwpwd\": 123456,\n \"hideid\": 0,\n \"forward\": \"\",\n \"jumpurl\": \"http://www.jxmjc.com/u.php?verify=7de38797\",\n \"m\": \"bbs\",\n \"step\": 2,\n \"cktime\": 31536000,\n}\n\n# 创建session对象\nsession = requests.session()\n\n# 使用session发送登录请求\nresponse = session.post(login_url, headers=headers, data=data)\nprint(response.cookies)\n\n# cookie_jar和cookie_dict之间的转换\ncookies_dict = requests.utils.dict_from_cookiejar(response.cookies)\nprint(cookies_dict)\ncookies_jar = requests.utils.cookiejar_from_jar(cookies_dict)\nprint(cookies_jar)\n\n# # 使用session再次去请求登陆之后的页面\n# response = session.get(my_url, headers=headers)\n#\n# print(response.content.decode(\"gbk\"))\n#\n# with open('login_html.html', 'w', encoding='gbk') as f:\n# f.write(response.content.decode('gbk'))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"youaresherlock/PythonPractice","sub_path":"webspider/2.requests模块/2.3requests模块处理cookie/session的使用.py","file_name":"session的使用.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"36708462848","text":"import torch\r\nimport draw_graph as draw\r\nimport os\r\n\r\n#@save\r\ndef show_heatmaps(matrices, xlabel, ylabel, titles=None, figsize=(9, 16),\r\n cmap='Reds'):\r\n \"\"\"显示矩阵热图\"\"\"\r\n draw.use_svg_display()\r\n\r\n num_rows, num_cols = matrices.shape[0], matrices.shape[1]\r\n fig, axes = draw.plt.subplots(num_rows, num_cols, figsize=figsize,\r\n sharex=True, sharey=True, squeeze=False)\r\n for i, (row_axes, row_matrices) in enumerate(zip(axes, matrices)):\r\n for j, (ax, matrix) in enumerate(zip(row_axes, row_matrices)):\r\n pcm = ax.imshow(matrix.detach().numpy(), cmap=cmap)\r\n if i == num_rows - 1:\r\n ax.set_xlabel(xlabel)\r\n if j == 0:\r\n ax.set_ylabel(ylabel)\r\n if titles:\r\n ax.set_title(titles[j])\r\n fig.colorbar(pcm, ax=axes, shrink=0.6)\r\n path = '/home/lfq/workspace/classification/result'\r\n save_path = os.path.join(os.path.abspath(path), 'show_attention' + '.jpg')\r\n draw.plt.savefig(save_path)\r\n\r\nnum_layers, num_heads, num_steps = 1, 3, 12\r\n\r\n#下载测试例中对应的self-attention层中, 各个自注意力头得到的权重\r\nattention_weights = torch.load('/home/lfq/workspace/classification/attention_value.csv')\r\nattention_weights = attention_weights.reshape((num_layers, num_heads,\r\n -1, num_steps))\r\nattention_weights = attention_weights.cpu()\r\nprint(attention_weights.shape)\r\nshow_heatmaps(attention_weights, xlabel='Keys', ylabel='Queries', titles=['Head %d' % i for i in range(1, 4)])\r\n","repo_name":"Alan-lab/cdf_classification","sub_path":"Viewer/show_attention.py","file_name":"show_attention.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"73401624875","text":"import sys\nimport socket\n\nfrom common.constants import PROXY_PING, REVERSE_PING, BACKLOG, \\\n PING_TYPE_LEN, DIRECT_PING\nfrom common.exceptions import ConnectionClosedException\nfrom common.utils import receive\nfrom server.ping import DirectPing, ReversePing, ProxyPing\n\n\nclass Server:\n\n def __init__(self, address):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.bind(address)\n self.sock.listen(BACKLOG)\n\n def run(self):\n while True:\n conn, addr = self.sock.accept()\n if not conn:\n print(\"Server socket closed\", file=sys.stderr)\n break\n\n try:\n ping_type = receive(conn, PING_TYPE_LEN)\n if ping_type == DIRECT_PING:\n print(\"Running Direct Ping\")\n DirectPing(conn).run()\n elif ping_type == REVERSE_PING:\n print(\"Running Reverse Ping\")\n ReversePing(conn).run()\n elif ping_type == PROXY_PING:\n print(\"Running Proxy Ping\")\n ProxyPing(conn).run()\n print(\"DONE!\")\n except ConnectionClosedException as e:\n print(f\"ERROR: {str(e)}\")\n finally:\n conn.close()\n\n self.sock.close()\n","repo_name":"leobellaera/intro-distribuidos-tp1","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"24256396433","text":"import boto.ec2, boto.vpc, netaddr, os, random, time\nimport Config\nfrom tier import Tier\n\nrandom.seed()\n\nclass Vpc(boto.vpc.vpc.VPC):\n \"\"\"Container for VPC configuration and convenience methods.\"\"\"\n def __init__(self, connection=None):\n super(Vpc, self).__init__(connection)\n \n def __eq__(self, obj):\n if type(obj) == Vpc:\n return (self.id, self.cidr_block) == (obj.id, obj.cidr_block)\n elif type(obj) == str:\n if obj.split('-')[0] == 'vpc':\n return self.id == obj\n elif obj.split('-')[-1] == self.get_env():\n return self.get_name() == obj\n elif type(obj) == netaddr.IPNetwork:\n return self.cidr_block == obj\n return False\n\n def __ne__(self, obj):\n return not self == obj\n\n def get_name(self):\n return self.tags.get('Name', False)\n\n def set_name(self, data):\n self.v.add_tag('Name', data)\n\n def get_env(self):\n return self.tags.get('Environment', False)\n\n def set_env(self, data):\n self.v.add_tag('Environment', data)\n\n def get_cidr(self):\n return netaddr.IPNetwork(self.cidr_block)\n\n def _wait(self, vpc):\n try:\n while vpc.update() != 'available':\n time.sleep(1)\n except self.connection.ResponseError as e:\n self._wait(vpc)\n\n def wait(self, obj):\n try:\n while obj.state != 'available':\n time.sleep(1)\n except self.connection.ResponseError as e:\n self.wait(obj)\n\n def encode_tag(self, values):\n \"\"\"Encode a list of (key,value) tuples into a format suitable for a tag value\"\"\"\n tag_set = []\n for pair in values:\n (k, v) = pair\n tag_set.append(\"%s:%s\" % (k, v))\n return ';'.join(tag_set)\n\n def decode_tag(self, tag):\n \"\"\"Decode a tag value into a list of (key,value) tuples\"\"\"\n tag_set = []\n values = tag.split(';')\n for item in values:\n if item is not '':\n pair = tuple(item.split(':'))\n tag_set.append(pair)\n return tag_set\n\n def add_tier_tag(self, tier):\n tiers = self.get_tiers()\n tiers.append(tier)\n tag = self.encode_tag([ (t.name, t.cidr_block) for t in tiers ])\n self.add_tag('Tiers', tag)\n\n def del_tier_tag(self, tier):\n tiers = self.get_tiers()\n tiers.remove(tier)\n tag = self.encode_tag([ (t.name, t.cidr_block) for t in tiers ])\n self.add_tag('Tiers', tag)\n\n def get_tiers(self, name=False):\n tiertag = self.tags.get('Tiers', '')\n tiers = []\n\n for t in self.decode_tag(tiertag):\n (t_name, cidr_block) = t\n tier = Tier(name=t_name, cidr_block=netaddr.IPNetwork(cidr_block),\n connection=self.connection, vpc_id=self.id)\n tiers.append(tier)\n\n if name:\n for t in tiers:\n if t.name == name:\n return t\n else:\n return tiers\n\n def add_tier(self, tier_name, cidr_block, subnet_size=21):\n tier = Tier(name=tier_name, cidr_block=cidr_block, connection=self.connection, vpc_id=self.id)\n tier.build([z.name for z in self.region.get_zones()], subnet_size)\n self.add_tier_tag(tier)\n return tier\n\n def delete_tier(self, tier):\n tier.delete()\n self.del_tier_tag(tier)\n return True\n\n def rename_tier(self, old, new):\n tier = self.get_tiers(name=old)\n self.del_tier_tag(tier)\n tier.rename(new)\n self.add_tier_tag(tier)\n return tier\n\n def sg_rule(self, **kwargs):\n r = dict()\n r['ip_protocol'] = kwargs.get('ip_protocol', None)\n r['from_port'] = kwargs.get('from_port', None)\n r['to_port'] = kwargs.get('to_port', None)\n r['grants'] = kwargs.get('grants', [])\n r['src_group'] = kwargs.get('src_group', None)\n\n if type(r['src_group']) == str:\n r['src_group'] = self.get_secgrps(ids=r['src_group'])[0]\n\n rule = boto.ec2.securitygroup.IPPermissions()\n rule.__dict__.update(r)\n return rule\n\n\n def get_secgrps(self, **kwargs):\n \"\"\"Returns a list of sec grp objects by region, selected by name, id\"\"\"\n names = kwargs.get('names', [])\n ids = kwargs.get('ids', [])\n filters = kwargs.get('filters', {})\n filters.update({'vpc_id': self.id})\n groups = self.connection.get_all_security_groups(group_ids=ids, filters=filters)\n if names:\n groups = [ g for g in groups if g.name in names ]\n return groups\n\n def create_secgrp(self, **kwargs):\n \"\"\"Create a security group associated with this VPC\"\"\"\n name = kwargs.get('name', None)\n description = kwargs.get('description', None)\n rules = kwargs.get('rules', [])\n if name:\n group = self.connection.create_security_group(name, description, vpc_id=self.id)\n for rule in rules:\n for grant in rule.grants:\n group.authorize(ip_protocol=rule.ip_protocol, from_port=rule.from_port,\n to_port=rule.to_port, cidr_ip=grant, src_group=rule.src_group)\n return group\n\n def update_secgrp(self, **kwargs):\n \"\"\"Update a security group associated with this VPC\"\"\"\n id = kwargs.get('id', None)\n group = kwargs.get('group', None)\n authorize = kwargs.get('authorize', [])\n revoke = kwargs.get('revoke', [])\n filters = {'vpc_id': self.id}\n\n if id:\n groups = self.connection.get_all_security_groups(group_ids=[id], filters=filters)\n group = groups[0] if groups else None\n\n for rule in authorize:\n # rule 'grants' are a list of boto.ec2.securitygroup.GroupOrCIDR. From what\n # I can tell, its either a single member list of an object with a group_id attr,\n # or a list of one or more objs with a cidr_ip attr.\n\n if rule.grants[0].group_id:\n # grant by group id\n for grant in rule.grants:\n self.connection.authorize_security_group(group_id=group.id, \n ip_protocol=rule.ip_protocol, from_port=rule.from_port,\n to_port=rule.to_port, src_security_group_group_id=grant.group_id)\n else:\n # grant by cidr_ip\n for grant in rule.grants:\n self.connection.authorize_security_group(group_id=group.id, \n ip_protocol=rule.ip_protocol, from_port=rule.from_port,\n to_port=rule.to_port, cidr_ip=grant.cidr_ip)\n\n for rule in revoke:\n if rule.grants[0].group_id:\n # grant by group id\n for grant in rule.grants:\n self.connection.revoke_security_group(group_id=group.id, \n ip_protocol=rule.ip_protocol, from_port=rule.from_port,\n to_port=rule.to_port, src_security_group_group_id=grant.group_id)\n else:\n # grant by cidr_ip\n for grant in rule.grants:\n self.connection.revoke_security_group(group_id=group.id, \n ip_protocol=rule.ip_protocol, from_port=rule.from_port,\n to_port=rule.to_port, cidr_ip=grant.cidr_ip)\n\n return group\n\n def delete_secgrp(self, **kwargs):\n \"\"\"Delete a security group associated with this VPC\"\"\"\n id = kwargs.get('id', None)\n if id:\n self.connection.delete_security_group(group_id=id)\n return True\n\n def get_tables(self, **kwargs):\n name = kwargs.get('name', None)\n tables = self.connection.get_all_route_tables(filters=[('vpc_id', self.id)])\n\n if name:\n for t in tables:\n if t.tags.get('Name') == name:\n return t\n else:\n return tables\n\n def create_table(self, **kwargs):\n name = kwargs.get('name', None)\n\n if name:\n table = self.connection.create_route_table(self.id)\n table.add_tag('Name', name)\n return table\n\n def delete_table(self, table=None, **kwargs):\n name = kwargs.get('name', None)\n id = kwargs.get('id', None)\n\n if name:\n table = self.get_tables(name=name)\n if table:\n id = table.id\n if id:\n self.connection.delete_route_table(id)\n\n def get_igw(self):\n igws = self.connection.get_all_internet_gateways(filters={'attachment.vpc-id': self.id})\n if len(igws) > 0:\n return igws[0]\n else:\n return None\n\n def add_igw(self):\n if not self.get_igw():\n igw = self.connection.create_internet_gateway()\n self.connection.attach_internet_gateway(igw.id, self.id)\n igw.add_tag('Name', self.get_name())\n return igw\n return False\n\n def run_instances(self, **kwargs):\n image_id = kwargs.setdefault('image_id', None)\n root_type = kwargs.setdefault('root_type', 'ebs')\n min_count = kwargs.setdefault('min_count', 1)\n max_count = kwargs.setdefault('max_count', 1)\n key_name = kwargs.setdefault('key_name', self.get_name())\n user_data = kwargs.setdefault('user_data', Config.userdata)\n instance_type = kwargs.setdefault('instance_type', 't2.micro')\n monitoring_enabled = kwargs.setdefault('monitoring_enabled', True)\n subnet_id = kwargs.setdefault('subnet_id', None)\n tier = kwargs.setdefault('tier', None)\n security_group_names = kwargs.setdefault('security_group_names', ['SSH'])\n security_group_ids = kwargs.setdefault('security_group_ids', [])\n instance_profile = kwargs.setdefault('instance_profile', None)\n instance_profile_name = kwargs.setdefault('instance_profile_name', None)\n instance_profile_arn = kwargs.setdefault('instance_profile_arn', None)\n ebs_optimized = kwargs.setdefault('ebs_optimized', False)\n tags = kwargs.setdefault('tags', {})\n dry_run = kwargs.setdefault('dry_run', False)\n\n # Pick an image ID\n if not image_id:\n images = self.region.get_images(root_type=root_type)\n if len(images) > 0:\n kwargs['image_id'] = images[-1].id # get latest image\n kwargs.pop('root_type')\n\n # use subnet_id if set, otherwise use tier, otherwise default to backend\n if not subnet_id:\n if not tier:\n tier = self.get_tiers(name='backend')\n kwargs['subnet_id'] = random.choice(list(tier.subnets)).id\n kwargs.pop('tier')\n\n # Security group selection\n if not security_group_ids:\n for s in self.get_secgrps(names=security_group_names):\n kwargs['security_group_ids'].append(s.id)\n kwargs.pop('security_group_names')\n\n # Set default tags, if unset.\n tags.setdefault('Type', 'Generic')\n tags.setdefault('Env', self.get_env())\n tags.setdefault('Owner', os.getlogin())\n tags.setdefault('Name', \"%s-%s\" % (tags['Owner'], tags['Type']))\n kwargs.pop('tags')\n\n # Select instance profile - if by name or arn, get Profile obj\n if instance_profile:\n kwargs['instance_profile_name'] = instance_profile.name\n kwargs['instance_profile_arn'] = instance_profile.arn\n else:\n if not instance_profile_name and not instance_profile_arn:\n # Hope there's a matching profile for the type tag.\n kwargs['instance_profile_name'] = \"%s%s-%s\" % (tags['Type'], 'Instance', self.get_env())\n kwargs.pop('instance_profile')\n\n # run the instance.\n reservation = self.region.ec2conn.run_instances(**kwargs)\n\n # Add tags\n for instance in reservation.instances:\n instance.add_tags(tags)\n\n return reservation\n\n","repo_name":"mattghali/veep","sub_path":"veep/VPC/vpc.py","file_name":"vpc.py","file_ext":"py","file_size_in_byte":12057,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"3303254598","text":"# -*- coding: UTF-8 -*-\nfrom abstractplugin import AbstractPlugin\nfrom models.tweetresponse import TweetResponse\nimport random\n\n\nclass Buzzwords(AbstractPlugin):\n registered_commands = ['#donger', '#smite', '#rekt', '#dealwithit', '#swag']\n # TODO: add image check to unit test for this\n def get_response(self, tweet_wrapper):\n command = tweet_wrapper.get_tweet_command()\n operator = command.get_command_operator()\n if operator == \"#smite\":\n response_text = \"Get smitten, scrub\"\n response = TweetResponse(command, response_text)\n response.set_image_loc('resources/smite.jpg')\n return response\n\n if operator == \"#rekt\":\n response_text = self.getRektText()\n response = TweetResponse(command, response_text)\n response.set_image_loc('resources/rekt.gif')\n return response\n\n if operator == \"#dealwithit\":\n response_text = \"deal with it\"\n response = TweetResponse(command, response_text)\n response.set_image_loc('resources/dealwithit.gif')\n return response\n\n if operator == \"#swag\":\n response_text = \"swag\"\n response = TweetResponse(command, response_text)\n response.set_image_loc('resources/swag.jpg')\n return response\n\n if operator == \"#donger\":\n response_text = self.getDongerText()\n response = TweetResponse(command, response_text)\n return response\n\n if operator == \"#toucan\":\n response_text = self.getToucanText()\n response = TweetResponse(command, response_text)\n return response\n\n rekt_list = [\"REKT\", \"REKTangle\", \"SHREKT\", \"REKT-it Ralph\", \"Total REKTall\", \"The Lord of the REKT\",\n \"The Usual SusREKTs\", \"North by NorthREKT\", \"REKT to the Future\", \"Once Upon a Time in the REKT\",\n \"The Good, the Bad, and the REKT \", \"Tyrannosaurus REKT\"]\n\n def getRektText(self):\n random.shuffle(self.rekt_list)\n choice_one = self.rekt_list[0]\n choice_two = self.rekt_list[1]\n response_text = u\"\\n☐\"\n response_text += \" Not REKT\"\n response_text += u\"\\n☑ \"\n response_text += choice_one\n response_text += u\"\\n☐ \"\n response_text += choice_two\n return response_text\n\n\n\n donger_lines = [\n u\"༼ ºل͟º༼ ºل͟º༼ ºل͟º༼ ºل͟º ༽ºل͟º ༽ºل͟º ༽YOU CAME TO THE WRONG DONGERHOOD༼ ºل͟º༼ ºل͟º༼ ºل͟º༼ ºل͟º ༽ºل͟º ༽ºل͟º ༽ \",\n u\" ༼ ºل͟º ༼ ºل͟º ༼ ºل͟º ༽ ºل͟º ༽ ºل͟º ༽ YOU PASTARINO'D THE WRONG DONGERINO ༼ ºل͟º ༼ ºل͟º ༼ ºل͟º ༽ ºل͟º ༽ ºل͟º ༽\",\n u\"༼ ºل͟º༼ ºل͟º༽ºل͟º ༽ YOU COPERINO FRAPPUCCIONO PASTARINO'D THE WRONG DONGERINO ༼ ºل͟º༼ ºل͟º༽ºل͟º ༽\",\n u\" ༼ ºل͟º༼ ºل͟º༼ ºل͟º༼ ºل͟º ༽ºل͟º ༽ºل͟º ༽You either die a DONG, or live long enough to become the DONGER༼ ºل͟º༼ ºل͟º༼ ºل͟º༼ ºل͟º ༽ºل͟º ༽ºل͟º ༽\",\n u\"༼ ಠل͟ರೃ༼ ಠل͟ರೃ༼ ಠل͟ರೃ༼ ಠل͟ರೃ ༽ಠل͟ರೃ ༽ಠل͟ರೃ ༽ YOU ARRIVED IN THE INCORRECT DONGERHOOD, SIR༼ ಠل͟ರೃ༼ ಠل͟ರೃ༼ ಠل͟ರೃ༼ ಠل͟ರೃ ༽ಠل͟ರೃ ༽ಠل͟ರೃ ༽ \",\n u\"ᕙ༼ຈل͜ຈ༽ᕗ. ʜᴀʀᴅᴇʀ, ʙᴇᴛᴛᴇʀ, ғᴀsᴛᴇʀ, ᴅᴏɴɢᴇʀ .ᕙ༼ຈل͜ຈ༽ᕗ \",\n u\"(ง ͠° ͟ل͜ ͡°)ง ᴍᴀsᴛᴇʀ ʏᴏᴜʀ ᴅᴏɴɢᴇʀ, ᴍᴀsᴛᴇʀ ᴛʜᴇ ᴇɴᴇᴍʏ (ง ͠° ͟ل͜ ͡°)ง \",\n u\"(ง ͠° ل͜ °)ง LET ME DEMONSTRATE DONGER DIPLOMACY (ง ͠° ل͜ °)ง\",\n u\"乁( ◔ ౪◔)ㄏ\",\n u\"(\\ ( ͠° ͟ل͜ ͡°) /) OUR DONGERS ARE RAZOR SHARP (\\ ( ͠° ͟ل͜ ͡°) /) \",\n u\"ヽ༼◥▶ل͜◀◤༽ノ RO RO RAISE YOUR DONGERS ヽ༼◥▶ل͜◀◤༽ノ \",\n u\"̿̿ ̿̿ ̿'̿'̵͇̿̿з=༼ ▀̿̿Ĺ̯̿̿▀̿ ̿ ༽=ε/̵͇̿̿/’̿’̿ ̿ ̿̿[} ̿ ̿ ̿ ̿^ Stop right there criminal scum! no one RIOTs on my watch. I'm confiscating your goods. now pay your fine, or it's off to jail. \",\n u\"̿̿ ̿̿ ̿̿ ̿'̿'̵͇̿̿з=༼ ▀̿̿Ĺ̯̿̿▀̿ ̿ ༽ YOU'RE UNDER ARREST FOR BEING CASUAL. COME OUT WITH YOUR DONGERS RAISED ̿̿ ̿̿ ̿̿ ̿'̿'̵͇̿̿з=༼ ▀̿̿Ĺ̯̿̿▀̿ ̿ ༽ \",\n u\"(ง'̀-'́)ง DONG OR DIE (ง'̀-'́)ง \",\n u\"ヽ༼ຈل͜ຈ༽ノ raise your dongers ヽ༼ຈل͜ຈ༽ノ \",\n u\"ヽ༼ຈل͜ຈ༽ノ VOICE OF AN ANGEL ヽ༼ຈل͜ຈ༽ノ \",\n u\"ヽ༼ຈل͜ຈ༽ノ LETS GET DONGERATED ヽ༼ຈل͜ຈ༽ノ \",\n u\"ヽ༼ຈل͜ຈ༽ノ RAISE YOUR BARNO ヽ༼ຈل͜ຈ༽ノ \",\n u\"ヽ༼ຈل͜ຈ༽ノ OJ poured and candle lit, with this chant i summon Kripp ヽ༼ຈل͜ຈ༽ノ \",\n u\" ☑ OJ poured ☑ Candle lit ☑ Summoning the Kripp ヽ༼ຈل͜ຈ༽ノ\",\n u\"ヽ༼ຈل͜O༽ノ ʀᴀɪs ᴜʀ ᴅᴀɢᴇʀᴏ ヽ༼ຈل͜___ຈ༽ノ \",\n u\"(ง ͠° ͟ʖ ͡°)งSuccubus release Kripp or taste our rage(ง ͠° ͟ʖ ͡°)ง \",\n u\"ノ(ಠ_ಠノ ) ʟᴏᴡᴇʀ ʏᴏᴜʀ ᴅᴏɴɢᴇʀs ノ(ಠ_ಠノ)\",\n u\"ヽ༼Ὸل͜ຈ༽ノ HOIST THY DONGERS ヽ༼Ὸل͜ຈ༽ノ \",\n u\"ヽ( ͡° ͜ʖ ͡°)ノ Kripp you are kinda like my dad, except you're always there for me. ヽ( ͡° ͜ʖ ͡°)ノ \",\n u\" █▄༼ຈل͜ຈ༽▄█ yeah i work out \",\n u\"༼ ºل͟º ༽ I AM A DONG ༼ ºل͟º ༽ \",\n u\"༼ ºل͟º༽ I DIDN'T CHOOSE THE DONGLIFE, THE DONGLIFE CHOSE ME ༼ ºل͟º༽ \",\n u\"༼ ºل͟º༽ NO ONE CARED WHO I WAS UNTIL I PUT ON THE DONG ༼ ºل͟º༽ \",\n u\"༼ ºººººل͟ººººº ༽ I AM SUPER DONG ༼ ºººººل͟ººººº ༽ \",\n u\"┌∩┐༼ ºل͟º ༽┌∩┐ SUCK MY DONGER ┌∩┐༼ ºل͟º ༽┌∩┐ \",\n u\"ζ༼Ɵ͆ل͜Ɵ͆༽ᶘ FINALLY A REAL DONG ζ༼Ɵ͆ل͜Ɵ͆༽ᶘ \",\n u\"<ᴍᴇssᴀɢᴇ ᴅᴏɴɢᴇʀᴇᴅ> \",\n u\"ヽ༼ʘ̚ل͜ʘ̚༽ノIS THAT A DONGER IN YOUR POCKET?ヽ༼ʘ̚ل͜ʘ̚༽ノ \",\n u\" ༼ ͡■ل͜ ͡■༽ OPPA DONGER STYLE ༼ ͡■ل͜ ͡■༽ \",\n u\"( ° ͜ ʖ °) REGI OP ( ° ͜ ʖ °) \",\n u\"(̿▀̿ ̿Ĺ̯̿̿▀̿ ̿)̄ IM DONG,JAMES DONG (̿▀̿ ̿Ĺ̯̿̿▀̿ ̿)̄ \",\n u\"(ง⌐□ل͜□)ง WOULD YOU HIT A DONGER WITH GLASSES (ง⌐□ل͜□)ง \",\n u\"ʕ•ᴥ•ʔ CUDDLE UR DONGERS ʕ•ᴥ•ʔ \",\n u\"ლ(́◉◞౪◟◉‵ლ) let me hold your donger for a while ლ(́◉◞౪◟◉‵ლ) \",\n u\"ヽ༼ຈل͜ຈ༽ง MY RIGHT DONG IS ALOT STRONGER THAN MY LEFT ONE ヽ༼ຈل͜ຈ༽ง\",\n u\"(✌゚∀゚)☞ May the DONG be with you! ☚(゚ヮ゚☚) \",\n u\"(⌐■_■)=/̵͇̿̿/'̿'̿̿̿ ̿ ̿̿ ヽ༼ຈل͜ຈ༽ノ Keep Your Dongers Where i Can See Them \",\n u\"̿'̿'\\̵͇̿̿\\з=( ͠° ͟ʖ ͡°)=ε/̵͇̿̿/'̿̿ ̿ ̿ ̿ ̿ ̿ DUDE̿̿ ̿̿ ̿'̿'\\̵͇̿̿\\з=( ͠° ͟ʖ ͡°)=ε/̵͇̿̿/'̿̿ ̿ ̿ ̿ ̿ ̿ PLEASE NO COPY PASTERONI MACORONI DONGERIN \",\n u\"( ͝° ͜ʖ͡°) Mom always said my donger was big for my age ( ͝° ��ʖ͡°)\",\n u\"(/゚Д゚)/ WE WANT SPELUNKY (/゚Д゚)/\",\n u\"─=≡Σ((( つ◕ل͜◕)つ sᴜᴘᴇʀ ᴅᴏɴɢ \",\n u\"(✌゚∀゚)☞ POINT ME TO THE DONGERS (✌゚∀゚)☞ \",\n u\"ᕙ( ^ₒ^ c) 〇〇〇〇ᗩᗩᗩᗩᕼᕼ ᕙ( ^ₒ^ c)\",\n u\"ヽ༼ຈل͜ຈ༽ノ ArcheAge or BEES ヽ̛͟͢༼͝ຈ͢͠لຈ҉̛༽̨҉҉ノ̨\",\n u\" ୧༼ಠ益ಠ༽୨ MRGLRLRLR ୧༼ಠ益ಠ༽୨\",\n u\"┏(-_-)┓┏(-_-)┛┗(-_- )┓┗(-_-)┛┏(-_-)┓ ┏(-_-)┛┗(-_- )┓┗(-_-)┛┏(-_-)┓┏(-_-)┛┗(-_- )┓┗(-_-)┛┏(-_-)┓┏(-_-)┛┗(-_- )┓┗(-_-)┛┏(-_-)┓ \",\n u\"ヽ༼ຈل͜ຈ༽ノITS A HARD DONG LIFE ヽ༼ຈل͜ຈ༽ノ\",\n u\"ヽ༼ຈل͜ຈ༽ノMOLLYヽ༼ຈل͜ຈ༽ノ\",\n u\"༼ つ ຈل͜ຈ ༽つ GIVE MOLLY ༼ つ ຈل͜ຈ ༽つ\",\n u\" †ヽ༼ຈل͜ຈ༽ノ† By the power of donger I summon MOLLY †ヽ༼ຈل͜ຈ༽ノ† \",\n u\"ヽ༼ຈل͜ຈ༽ノTAKING A DUMPヽ༼ຈل͜ຈ༽ノ \",\n u\"ヽ༼ຈل͜ຈ༽ノ WHAT DOESNT KILL ME ONLY MAKES ME DONGER ᕙ༼ຈل͜ຈ༽ᕗ \",\n u\"ヽ༼ຈل͜ຈ༽ノ FOREVER DONG ヽ༼ຈل͜ຈ༽ノ \",\n u\"[̲̅$̲̅(̲̅ ͡° ͜ʖ ͡°̲̅)̲̅$̲̅] Mo' money, mo' Dongers [̲̅$̲̅(̲̅ ͡° ͜ʖ ͡°̲̅)̲̅$̲̅] \",\n u\"༼ᕗຈل͜ຈ༽ᕗ Drop Bows on 'em ༼ᕗຈل͜ຈ༽ᕗ \",\n u\"Ѱζ༼ᴼل͜ᴼ༽ᶘѰ HIT IT WITH THE FORK Ѱζ༼ᴼل͜ᴼ༽ᶘѰ \",\n u\"Ψ༼ຈل͜ຈ༽Ψ hit it with the fork Ψ༼ຈل͜ຈ༽Ψ\"\n ]\n\n def getDongerText(self):\n response_text = random.choice(self.donger_lines)\n return response_text\n\n toucan = [u\"\"\"░░░░░░░░▄▄▄▀▀▀▄▄███▄\n░░░░░▄▀▀░░░░░░░▐░▀██▌\n░░░▄▀░░░░▄▄███░▌▀▀░▀█\n░░▄█░░▄▀▀▒▒▒▒▒▄▐░░░░█▌\n░▐█▀▄▀▄▄▄▄▀▀▀▀▌░░░░░▐█▄\n░▌▄▄▀▀░░░░░░░░▌░░░░▄███████\"\"\"];\n\n def getToucanText(self):\n response_text = self.toucan;\n return response_text;","repo_name":"NavjotPanesar/DogeBot","sub_path":"plugins/plugin_buzzwords.py","file_name":"plugin_buzzwords.py","file_ext":"py","file_size_in_byte":9623,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"73"} +{"seq_id":"27878454537","text":"import sys\r\nimport socket\r\n\r\nHOST = '127.0.0.1' # Standard loop-back interface address (localhost)\r\nPORT = 65432 # Port to listen on (non-privileged ports are > 1023)\r\n\r\nwhile True:\r\n\twith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\r\n\t\ts.bind((HOST, PORT))\r\n\t\t\r\n\t\tprint('Start listen')\r\n\t\ts.listen()\r\n\t\tconn, addr = s.accept()\r\n\r\n\t\twith conn:\r\n\t\t\tprint('Connected by', addr)\r\n\t\t\twhile True:\r\n\t\t\t\tinput_data = conn.recv(1024)\r\n\t\t\t\tif not input_data:\r\n\t\t\t\t\tbreak\r\n\r\n\t\t\t\toutput_data = \"Odebrałem: \" + input_data + \" - serwis\" \r\n\t\t\t\t\t\r\n\t\t\t\tconn.sendall(output_data.encode())","repo_name":"mateusz-gosciniak/my-python-scripts","sub_path":"OCR Rekondis/new/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"33630524491","text":"from firestore.db import Connection\nfrom firestore.errors import (\n InvalidDocumentError,\n UnknownFieldError,\n ValidationError,\n OfflineDocumentError,\n)\nfrom google.cloud.firestore_v1 import DocumentReference\n\n\n# from firestore.datatypes.base import Base\nSTOP_WORDS = (\"the\", \"is\")\nDOT = \".\"\nSLASH = \"/\"\nUID = \"{}/{}\"\nMETADATA = (\"__module__\", \"__doc__\", \"__collection__\", \"__private__\", \"__exclude__\")\n\n\nclass Cache(dict):\n \"\"\"\n A class to make attribute lookup and writing\n swift and fast without the need for attribute access\n notation instead defaulting to object access notation\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._pk = False\n dict.__init__(self, *args, **kwargs)\n\n def __getattr__(self, key):\n # less error prone\n return self[key]\n\n def __setattr__(self, key, value):\n self[key] = value\n\n def add(self, key, value):\n self.__setattr__(key, value)\n\n\nclass Collection(object):\n \"\"\"Collections are recommended to be used when saving objects\n to firestore. They ensure a schema exists under which data can be\n stored i.e. reducing the error of typing name and naame across\n two different documents.\n\n They also help to group together commonly used actions across documents\n i.e. setting and saving, querying, and updating document instances.\n \"\"\"\n\n # If child documents don't specify a collection\n # then default their location to the root firestore\n # collection\n\n __collection__ = None\n __schema__ = None\n\n @classmethod\n def __autospector__(cls, *args, **kwargs):\n return {k: v for k, v in cls.__dict__.items() if k not in METADATA}\n\n def __deref__(self, doc_ref):\n \"\"\"\n Deref string based document references into classes\n upon instance assignment by looking up the doc_ref\n first in the globals of this module then walking\n up the directory tree until an instance is found\n or an error is thrown\n \"\"\"\n self.get(doc_ref)\n\n def __eq__(self, comparator):\n try:\n comparator.__loaded__\n except:\n return False\n\n if not self.__loaded__:\n return False\n a = self.__loaded__.get().to_dict() # pylint: disable=no-member\n b = comparator.__loaded__.get().to_dict()\n return a == b\n \n def __getattr__(self, key):\n return self._data[key]\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Root document holding all the utility methods\n needed for persistence to cloud firestore\n \"\"\"\n\n # This is the internal cache that holds all the field\n # values to be saved on google cloud firestore\n self._uniques = {}\n self._data = Cache()\n self._parent = self.__collection__\n self.__loaded__ = False\n self.__mutated__ = True\n\n # Similar to the ._data instance cache. However this\n # is a collection of all descriptor instances\n # that exist on this document class.\n # Useful for pk, unique, required and other document\n # level validation\n\n self.fields_cache = self.__autospector__()\n \n for k in kwargs:\n if k in (\"_pk\", \"_id\"):\n self._data.add(k, kwargs.get(k))\n continue\n if (\n k not in self.fields_cache.keys()\n ): # on the fly access to obviate the need for gc\n raise UnknownFieldError(\n f\"Key {k} not found in document {type(self).__name__}\"\n )\n # get the type of fields_cache and apply the rules to each\n # value in the dict\n self._data.add(k, self.fields_cache.get(k).cast(self, kwargs.get(k)))\n\n def add_field(self, field, value):\n \"\"\"\n Add a field to this instance's data for persistence after\n taking into cognizance all the validations present on the field\n \"\"\"\n self._data.add(field._name, value)\n\n @classmethod\n def bases(cls):\n _b = list(cls.__bases__)\n for __b in _b:\n _b.extend(__b.__bases__)\n return _b\n\n @property\n def collection(self):\n \"\"\"\n Return the class variable\n \"\"\"\n cls = type(self)\n if not cls.__collection__:\n return cls.__name__.lower()\n return type(self).__collection__.replace(DOT, SLASH)\n\n @collection.setter\n def collection(self, value):\n \"\"\"\n Note this changes the class variable\n \"\"\"\n type(self).__collection__ = value.replace(DOT, SLASH)\n\n @classmethod\n def count(cls, **kwargs):\n \"\"\"\n Count the number of records that exist on firestore\n up until 5000 then return 5k+ if records\n exceed that number. The implmentation of this\n method might (will!) change\n \"\"\"\n pass\n\n @property\n def dbpath(self):\n if self.__loaded__:\n return self.__loaded__.path # pylint: disable=no-member\n elif self._pk:\n return UID.format(self.collection, self._pk.value)\n else:\n raise OfflineDocumentError(\"\")\n\n def delete(self):\n \"\"\"\n Delete this account by using it's primary key\n or a unique identifier\n \"\"\"\n conn = Connection.get_connection()\n conn.delete(self)\n\n @classmethod\n def get(cls, document_id):\n \"\"\"\n Get a document by its unique identifier on firebase\n \"\"\"\n conn = Connection.get_connection()\n return conn.get(cls, UID.format(cls().collection, document_id))\n\n def get_field(self, field):\n \"\"\"\n Get a field form the internal _data store of field values\n \"\"\"\n return self._data.get(field._name)\n\n @classmethod\n def get_json_data(cls, exclude=None, minimize=True):\n \"\"\"\n Get json representation of this document.\n\n If there is data from the server then the last fetched\n data is used otherwise the untethered version of the data\n is used to create the json document. To ensure you get\n the latest data refresh the document.\n\n\n @exclude: (list) A list of key names to additionally\n exclude from the returned json data. This is merged\n with __exclude__ if present\n\n @minimize: (bool) A boolean (True/False) value depicting\n if references should be expanded into full blown JSON objects\n or left as uids.\n \"\"\"\n pass\n\n @classmethod\n def get_json_schema(cls):\n \"\"\"\n Get a json schema of this document with datatypes and required\n status\n \"\"\"\n if cls.__schema__:\n return cls.__schema__\n\n @classmethod\n def find(cls, *args, **kwargs):\n \"\"\"\n Find a document using the keyward value pairs and limit to\n 20 results if no limit key is passed in\n \"\"\"\n conn = Connection.get_connection()\n return conn.find(cls, *args, **kwargs)\n\n def load_json_data(self, json_data):\n pass\n\n def persist(self):\n \"\"\"Save changes made to this document and any children of this\n document to cloud firestore\n \"\"\"\n pass\n\n @property\n def pk(self):\n return self._data.get(self._data._pk)\n\n @pk.setter\n def pk(self, value):\n \"\"\"\n Sets what field is the pk\n \"\"\"\n # Only one field can have the pk attribute\n if self._data._pk:\n raise InvalidDocumentError(\n f\"Duplicate primary key `{value._name}` assigned on document \"\n f\"with existing pk `{self._data._pk}`\"\n )\n if isinstance(value, DocumentReference):\n self._data._pk = \"_id\"\n self._data.add(\"_id\", value.id)\n else:\n self._data._pk = value._name\n\n # Document instances private copy of the primary key field\n # instance for private limited use i.e. in firestore\n # lookup query to match pk name with value\n self._pk = value\n\n def _presave(self):\n \"\"\"\n Validates inputs and ensures all required fields and other\n constraints are present before the save operation is called\n \"\"\"\n\n for k in self.fields_cache:\n # get a local copy of the field instance\n f = self.fields_cache.get(k)\n\n # get the value saved in the local data cache\n v = self._data.get(k)\n\n if not v:\n if f.default:\n self._data.add(k, f.default)\n if callable(f.default):\n self._data.add(k, f.default())\n elif f.required or f.pk:\n raise ValidationError(\n f\"{f._name} is a required field of {type(self).__name__}\"\n )\n\n def save(self):\n \"\"\"\n Save changes made to document to cloud firestore.\n \"\"\"\n if not self.__mutated__:\n return\n self._presave()\n conn = Connection.get_connection()\n res = conn.post(self)\n self.__mutated__ = False\n return res\n\n @classmethod\n def search(cls, query_string, compound_search=False):\n \"\"\"\n Search for a document using text values. Note this\n is not supported locally by firebase and this library\n uses a read hack to implement text search.\n It is production ready but your mileage might vary.\n\n @param: query_string {str}\n --------------------------\n This is the text data, search text, or query to use\n as input for the actual search to be done on firestore\n\n @param: compound_search {bool}\n ------------------------------\n If compound search is enabled then the search terms\n i.e. text used for lookup will be flagged as compound\n before a search is submitted.\n This means all matching documents must have all the words\n in the search text before it returns.\n e.g. red car - only documents with both red and car will be\n returned, documents with only red or only car will\n be ignored\n\n @return: results {firestore.db.result.Results}\n ----------------------------------------------\n A collection of traversable result documents limited by\n the paginate field which maxes out at 100\n \"\"\"\n pass\n\n def to_firestore_dict(self):\n \"\"\"\n Convert this object into a firestore update compatible\n dict i.e. nested maps have root elements with the key\n document.nested\n \"\"\"\n pass\n\n def transaction(self):\n \"\"\"\n Perform a transaction i.e. persist all changes or roll back\n entire transaction\n \"\"\"\n pass\n\n @property\n def uniques(self):\n \"\"\"\n Unique fields only hold true if the value is not empty\n i.e. null.\n To prevent null mark the field as required, only fields\n that have a value will be used for the unique evaluation\n \"\"\"\n return self._uniques\n\n @uniques.setter\n def uniques(self, values):\n k, v = values\n self._uniques[k] = v\n","repo_name":"workenvoy/firestore","sub_path":"firestore/containers/collection.py","file_name":"collection.py","file_ext":"py","file_size_in_byte":11203,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"73"} +{"seq_id":"1934596054","text":"import os\nimport shutil\nimport bpy\nimport zlib\nimport struct\nfrom io_scene_gltf2.blender.exp import gltf2_blender_get\n\n\ndef create_image_file(context, blender_image, dst_path, file_format):\n \"\"\"Create JPEG or PNG file from a given Blender image.\"\"\"\n # Check, if source image exists e.g. does not exist if image is packed.\n file_exists = 1\n try:\n src_path = bpy.path.abspath(blender_image.filepath, library=blender_image.library)\n file = open(src_path)\n except IOError:\n file_exists = 0\n else:\n file.close()\n\n if file_exists == 0:\n # Image does not exist on disk ...\n blender_image.filepath = dst_path\n # ... so save it.\n blender_image.save()\n\n elif file_format == blender_image.file_format:\n # Copy source image to destination, keeping original format.\n\n src_path = bpy.path.abspath(blender_image.filepath, library=blender_image.library)\n\n # Required for comapre.\n src_path = src_path.replace('\\\\', '/')\n dst_path = dst_path.replace('\\\\', '/')\n\n # Check that source and destination path are not the same using os.path.abspath\n # because bpy.path.abspath seems to not always return an absolute path\n if os.path.abspath(dst_path) != os.path.abspath(src_path):\n shutil.copyfile(src_path, dst_path)\n\n else:\n # Render a new image to destination, converting to target format.\n\n # TODO: Reusing the existing scene means settings like exposure are applied on export,\n # which we don't want, but I'm not sure how to create a new Scene object through the\n # Python API. See: https://github.com/KhronosGroup/glTF-Blender-Exporter/issues/184.\n\n tmp_file_format = context.scene.render.image_settings.file_format\n tmp_color_depth = context.scene.render.image_settings.color_depth\n\n context.scene.render.image_settings.file_format = file_format\n context.scene.render.image_settings.color_depth = '8'\n blender_image.save_render(dst_path, context.scene)\n\n context.scene.render.image_settings.file_format = tmp_file_format\n context.scene.render.image_settings.color_depth = tmp_color_depth\n\n\ndef create_image_data(context, export_settings, blender_image, file_format):\n \"\"\"Create JPEG or PNG byte array from a given Blender image.\"\"\"\n if blender_image is None:\n return None\n\n if file_format == 'PNG':\n return _create_png_data(blender_image)\n else:\n return _create_jpg_data(context, export_settings, blender_image)\n\n\ndef _create_jpg_data(context, export_settings, blender_image):\n \"\"\"Create a JPEG byte array from a given Blender image.\"\"\"\n uri = gltf2_blender_get.get_image_uri(export_settings, blender_image)\n path = export_settings['gltf_filedirectory'] + uri\n\n create_image_file(context, blender_image, path, 'JPEG')\n\n jpg_data = open(path, 'rb').read()\n os.remove(path)\n\n return jpg_data\n\n\ndef _create_png_data(blender_image):\n \"\"\"Create a PNG byte array from a given Blender image.\"\"\"\n width, height = blender_image.size\n\n buf = bytearray([int(channel * 255.0) for channel in blender_image.pixels])\n\n #\n # Taken from 'blender-thumbnailer.py' in Blender.\n #\n\n # reverse the vertical line order and add null bytes at the start\n width_byte_4 = width * 4\n raw_data = b\"\".join(\n b'\\x00' + buf[span:span + width_byte_4] for span in range((height - 1) * width * 4, -1, - width_byte_4))\n\n def png_pack(png_tag, data):\n chunk_head = png_tag + data\n return struct.pack(\"!I\", len(data)) + chunk_head + struct.pack(\"!I\", 0xFFFFFFFF & zlib.crc32(chunk_head))\n\n return b\"\".join([\n b'\\x89PNG\\r\\n\\x1a\\n',\n png_pack(b'IHDR', struct.pack(\"!2I5B\", width, height, 8, 6, 0, 0, 0)),\n png_pack(b'IDAT', zlib.compress(raw_data, 9)),\n png_pack(b'IEND', b'')])\n\n","repo_name":"gamer325434354/BlenderXR","sub_path":"blender/release/scripts/addons/io_scene_gltf2/blender/com/gltf2_blender_image_util.py","file_name":"gltf2_blender_image_util.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"73"} +{"seq_id":"38575067220","text":"#! /usr/bin/python\nimport caffe\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\n\ndef draw(batchfile, start, end, ext, outdir):\n batch = caffe.proto.caffe_pb2.DatumVector()\n batch.ParseFromString(open(batchfile).read())\n batch = [caffe.io.datum_to_array(b) for b in batch.datums]\n for i, b in zip(range(start,end), batch[start:end]):\n for c in range(b.shape[0]):\n plt.imshow(b[c,:,:], cmap='gray')\n plt.savefig(outdir+ '/' + str(i) + '_' + str(c) + '_' + ext + '.png')\n\nif __name__ == '__main__':\n draw(sys.argv[1], int(sys.argv[2]), int(sys.argv[3]), sys.argv[4], sys.argv[5])\n\n","repo_name":"leinxx/SAR-DNN","sub_path":"sar_dnn/visualize/draw_batch.py","file_name":"draw_batch.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"16871991354","text":"\nimport urllib \nimport urllib2\nimport cookielib\nimport re\nimport threading\nimport os\nimport datetime\nfind=0\nclass SDU_Spider: \n # 申明相关的属性 \n def __init__(self,a): \n self.loginUrl = 'http://202.115.47.141/loginAction.do' # 登录的url\n self.resultUrl = 'http://202.115.47.141/xjInfoAction.do?oper=xjxx' \n self.cookieJar = cookielib.CookieJar() # 初始化一个CookieJar来处理Cookie的信息\n self.postdata=urllib.urlencode(a) # POST的数据\n \n self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookieJar))\n def sdu_init(self):\n # 初始化链接并且获取cookie\n myRequest = urllib2.Request(url = self.loginUrl,data = self.postdata) # 自定义一个请求\n result = self.opener.open(myRequest) # 访问登录页面,获取到必须的cookie的值\n result = self.opener.open(self.resultUrl) # 访问成绩页面,获得成绩的数据\n # 打印返回的内容\n con=result.read()\n if len(con)>3000:\n \n f=file('F:/python2/content/'+str(self.postdata)+'.html','w')\n f.write(con)\n f.close()\n global find\n find=1\n \ndef t(xuehao,a,b):\n \n for i in range(1,32):\n i=str(i).zfill(2)\n for ii in range(a,b):\n ii=str(ii).zfill(4)\n dic={'zjh':str(xuehao),'mm':i+ii}\n mySpider=SDU_Spider(dic)\n try:\n mySpider.sdu_init()\n except:\n continue\n \n\ndef hh(xuehao):\n t1=threading.Thread (target=t,args=(xuehao,0,1000))\n t2=threading.Thread (target=t,args=(xuehao,1000,2000))\n t3=threading.Thread (target=t,args=(xuehao,2000,3000))\n t4=threading.Thread (target=t,args=(xuehao,3000,4000))\n t5=threading.Thread (target=t,args=(xuehao,4000,5000))\n t6=threading.Thread (target=t,args=(xuehao,5000,6000))\n t7=threading.Thread (target=t,args=(xuehao,6000,7000))\n t8=threading.Thread (target=t,args=(xuehao,7000,8000))\n t9=threading.Thread (target=t,args=(xuehao,8000,9000))\n t10=threading.Thread(target=t,args=(xuehao,9000,10000))\n threads=[t1,t2,t3,t4,t5,t6,t7,t8,t9,t10]\n starttime = datetime.datetime.now()\n for th in threads:\n \n th.setDaemon(True)\n th.start()\n while True :\n if find==1:\n \n endtime = datetime.datetime.now()\n print (endtime - starttime)\n return\n \nf = open(\"test.txt\", \"r\") \nwhile True: \n line = f.readline() \n if line: \n \n line=line.strip()\n \n hh(line)\n print (line+' finsh')\n \n find=0\n else: \n break\n\n\n\n","repo_name":"xcm153/test","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"5860425170","text":"from abc import ABC, abstractmethod\nfrom datetime import datetime\nimport logging\nfrom typing import Any, List\n\n\nlogger = logging.getLogger(\"ssf\")\n\nfrom ssf.utils import lookup_dict\nfrom ssf.version import VERSION\nfrom ssf.config import SSFConfig\nfrom ssf.results import SSFExceptionInternalError\n\n\nclass TemplateSymbolParser(ABC):\n @abstractmethod\n def parse(self, symbol_id: str, indent: int = 0) -> str:\n \"\"\"\n Parameters:\n symbol_id (str): Text symbol that should be replaced with some other text.\n indent (int): A count of whitespace prefixing the line containing the symbol; this can be\n used to maintain block indententation if the replacement text is multi-line.\n Returns:\n Replacement text as a string, if the symbol_id is known.\n None if symbol_id not known.\n \"\"\"\n\n\nclass ConfigSymbolParser(TemplateSymbolParser):\n def __init__(self, ssf_config: SSFConfig):\n self.ssf_config = ssf_config\n\n def parse(self, symbol_id: str, indent: int = 0) -> str:\n # Where symbols have syntax \".... {{config.api.name}} ....\"\n # Will be replaced with lookup into the config namespace.\n if symbol_id.find(\"config.\") == 0:\n return lookup_dict(self.ssf_config, symbol_id, namespaced=True)\n return None\n\n\nclass AutogeneratedSymbolParser(TemplateSymbolParser):\n def parse(self, symbol_id: str, indent: int = 0) -> str:\n # Where symbol has syntax \".... {{autogenerated}} ....\"\n # Will be replaced with comments indicated file is auto-generated (with timestamp and version).\n if symbol_id.find(\"autogenerated\") == 0:\n timestamp = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n return (\n f\"# ** DO NOT EDIT **\\n\"\n f\"# Auto-generated by Simple Server Framework\\n\"\n f\"# Version: {VERSION}\\n\"\n f\"# Timestamp: {timestamp}\"\n )\n return None\n\n\ndef expand_template(\n ssf_config: SSFConfig,\n template_filename: str,\n expanded_filename: str,\n custom_parsers: List[TemplateSymbolParser] = [],\n):\n with open(template_filename, \"rt\") as template:\n with open(expanded_filename, \"wt\") as output:\n\n parsers = []\n\n # Add built-in/common parsers\n parsers.append(AutogeneratedSymbolParser())\n parsers.append(ConfigSymbolParser(ssf_config))\n\n # Add caller's custom parsers\n if custom_parsers:\n parsers += custom_parsers\n\n for line in template:\n\n # Parse symbols.\n while True:\n sym_begin = line.find(\"{{\")\n if sym_begin < 0:\n break\n sym_end = sym_begin + 2 + line[sym_begin + 2 :].find(\"}}\") + 1\n if sym_end < 0:\n raise SSFExceptionInternalError(\n f\"Failed to find closing brackets for symbol beginning at position {sym_begin} in {template_filename}, line {line}\"\n )\n symbol = line[sym_begin + 2 : sym_end - 1]\n indent = len(line) - len(line.lstrip())\n\n logger.debug(f\"Parsing {symbol}\")\n\n insert_lines = None\n for parser in parsers:\n insert_lines = parser.parse(symbol, indent)\n if insert_lines is not None:\n break\n\n if insert_lines is None:\n raise SSFExceptionInternalError(\n f\"Failed to replace template symbol {symbol} at position {sym_begin} in {template_filename}, line {line}\"\n )\n\n logger.debug(f\"insert_lines={insert_lines}\")\n\n new_line = line[0:sym_begin] + insert_lines + line[sym_end + 1 :]\n line = new_line\n\n if len(line.strip()) == 0:\n output.write(\"\\n\")\n else:\n output.write(line)\n","repo_name":"graphcore/simple-server-framework","sub_path":"ssf/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"26997054361","text":"# -*- coding:utf-8 -*-\nimport time\n'''\n题目:替换空格\n请实现一个函数,将一个字符串中的空格替换成“%20”。\n例如,当字符串为We Are Happy.则经过替换之后的字符串为We%20Are%20Happy。\n'''\n\nclass Solution(object):\n\n def func1(self, *args, **kwargs):\n '''\n 思路一:先分割,再合并\n '''\n return '%20'.join(args[0].split())\n\n def func2(self, *args, **kwargs):\n '''\n 思路二:直接替换\n '''\n return args[0].replace(' ','%20')\n\n def func3(self, *args, **kwargs):\n '''\n 思路三:\n ①先计算源字符串数组长度,并统计空格数量\n ②新字符串数组长度=源数组长度+2*空格数量\n ③在新字符串数组上,从后向前遍历,通过两个index移动并复制。\n '''\n str=args[0]\n str = list(str)\n\n count=0\n for i in str:\n if i==' ':\n count+=1\n p1=len(str)-1\n\n str.extend([None]*(count*2))\n p2=len(str)-1\n\n while p1>=0:\n if str[p1]==' ':\n for i in ['0', '2', '%']:\n str[p2] = i\n p2 -= 1\n else:\n str[p2] = str[p1]\n p2 -= 1\n p1 -= 1\n return ''.join(str)\n\n def main(self, *args, **kwargs):\n tic=time.time()\n print(self.func1(*args, **kwargs))\n toc=time.time()\n print('func 1 time:%s ms'%(toc-tic))\n tic = time.time()\n print(self.func2(*args, **kwargs))\n toc = time.time()\n print('func 2 time:%s ms' % (toc - tic))\n tic = time.time()\n print(self.func3(*args, **kwargs))\n toc = time.time()\n print('func 3 time:%s ms' % (toc - tic))\n\nif __name__ == '__main__':\n str='We Are Happy'\n Solution().main(str)","repo_name":"DVampire/offer","sub_path":"offer_05_01.py","file_name":"offer_05_01.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"12055325777","text":"import os\nimport pickle\nimport random\n\nfrom deepsnap.graph import Graph as DSGraph\nfrom deepsnap.batch import Batch\nfrom deepsnap.dataset import GraphDataset, Generator\nimport networkx as nx\nimport numpy as np\nfrom sklearn.manifold import TSNE\nimport torch\nimport torch.multiprocessing as mp\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch_geometric.data import DataLoader\nfrom torch.utils.data import DataLoader as TorchDataLoader\nfrom torch_geometric.datasets import TUDataset, PPI, QM9\nimport torch_geometric.utils as pyg_utils\nimport torch_geometric.nn as pyg_nn\nfrom tqdm import tqdm\nimport queue\nimport scipy.stats as stats\n\nfrom common import combined_syn\nfrom common import feature_preprocess\nfrom common import utils\n\ndef load_dataset(name):\n \"\"\" Load real-world datasets, available in PyTorch Geometric.\n\n Used as a helper for DiskDataSource.\n \"\"\"\n task = \"graph\"\n if name == \"enzymes\":\n dataset = TUDataset(root=\"/tmp/ENZYMES\", name=\"ENZYMES\")\n elif name == \"proteins\":\n dataset = TUDataset(root=\"/tmp/PROTEINS\", name=\"PROTEINS\")\n elif name == \"cox2\":\n dataset = TUDataset(root=\"/tmp/cox2\", name=\"COX2\")\n elif name == \"aids\":\n dataset = TUDataset(root=\"/tmp/AIDS\", name=\"AIDS\")\n elif name == \"reddit-binary\":\n dataset = TUDataset(root=\"/tmp/REDDIT-BINARY\", name=\"REDDIT-BINARY\")\n elif name == \"imdb-binary\":\n dataset = TUDataset(root=\"/tmp/IMDB-BINARY\", name=\"IMDB-BINARY\")\n elif name == \"firstmm_db\":\n dataset = TUDataset(root=\"/tmp/FIRSTMM_DB\", name=\"FIRSTMM_DB\")\n elif name == \"dblp\":\n dataset = TUDataset(root=\"/tmp/DBLP_v1\", name=\"DBLP_v1\")\n elif name == \"ppi\":\n dataset = PPI(root=\"/tmp/PPI\")\n elif name == \"qm9\":\n dataset = QM9(root=\"/tmp/QM9\")\n elif name == \"atlas\":\n dataset = [g for g in nx.graph_atlas_g()[1:] if nx.is_connected(g)]\n if task == \"graph\":\n train_len = int(0.8 * len(dataset))\n train, test = [], []\n dataset = list(dataset)\n random.shuffle(dataset)\n has_name = hasattr(dataset[0], \"name\")\n for i, graph in tqdm(enumerate(dataset)):\n if not type(graph) == nx.Graph:\n if has_name: del graph.name\n graph = pyg_utils.to_networkx(graph).to_undirected()\n if i < train_len:\n train.append(graph)\n else:\n test.append(graph)\n return train, test, task\n\nclass DataSource:\n def gen_batch(batch_target, batch_neg_target, batch_neg_query, train):\n raise NotImplementedError\n\nclass OTFSynDataSource(DataSource):\n \"\"\" On-the-fly generated synthetic data for training the subgraph model.\n\n At every iteration, new batch of graphs (positive and negative) are generated\n with a pre-defined generator (see combined_syn.py).\n\n DeepSNAP transforms are used to generate the positive and negative examples.\n \"\"\"\n def __init__(self, max_size=29, min_size=5, n_workers=4,\n max_queue_size=256, node_anchored=False):\n self.closed = False\n self.max_size = max_size\n self.min_size = min_size\n self.node_anchored = node_anchored\n self.generator = combined_syn.get_generator(np.arange(\n self.min_size + 1, self.max_size + 1))\n\n def gen_data_loaders(self, size, batch_size, train=True,\n use_distributed_sampling=False):\n loaders = []\n for i in range(2):\n dataset = combined_syn.get_dataset(\"graph\", size // 2,\n np.arange(self.min_size + 1, self.max_size + 1))\n sampler = torch.utils.data.distributed.DistributedSampler(\n dataset, num_replicas=hvd.size(), rank=hvd.rank()) if \\\n use_distributed_sampling else None\n loaders.append(TorchDataLoader(dataset,\n collate_fn=Batch.collate([]), batch_size=batch_size // 2 if i\n == 0 else batch_size // 2,\n sampler=sampler, shuffle=False))\n loaders.append([None]*(size // batch_size))\n return loaders\n\n def gen_batch(self, batch_target, batch_neg_target, batch_neg_query,\n train):\n def sample_subgraph(graph, offset=0, use_precomp_sizes=False,\n filter_negs=False, supersample_small_graphs=False, neg_target=None,\n hard_neg_idxs=None):\n if neg_target is not None: graph_idx = graph.G.graph[\"idx\"]\n use_hard_neg = (hard_neg_idxs is not None and graph.G.graph[\"idx\"]\n in hard_neg_idxs)\n done = False\n n_tries = 0\n while not done:\n if use_precomp_sizes:\n size = graph.G.graph[\"subgraph_size\"]\n else:\n if train and supersample_small_graphs:\n sizes = np.arange(self.min_size + offset,\n len(graph.G) + offset)\n ps = (sizes - self.min_size + 2) ** (-1.1)\n ps /= ps.sum()\n size = stats.rv_discrete(values=(sizes, ps)).rvs()\n else:\n d = 1 if train else 0\n size = random.randint(self.min_size + offset - d,\n len(graph.G) - 1 + offset)\n start_node = random.choice(list(graph.G.nodes))\n neigh = [start_node]\n frontier = list(set(graph.G.neighbors(start_node)) - set(neigh))\n visited = set([start_node])\n while len(neigh) < size:\n new_node = random.choice(list(frontier))\n assert new_node not in neigh\n neigh.append(new_node)\n visited.add(new_node)\n frontier += list(graph.G.neighbors(new_node))\n frontier = [x for x in frontier if x not in visited]\n if self.node_anchored:\n anchor = neigh[0]\n for v in graph.G.nodes:\n graph.G.nodes[v][\"node_feature\"] = (torch.ones(1) if\n anchor == v else torch.zeros(1))\n #print(v, graph.G.nodes[v][\"node_feature\"])\n neigh = graph.G.subgraph(neigh)\n if use_hard_neg and train:\n neigh = neigh.copy()\n if random.random() < 1.0 or not self.node_anchored: # add edges\n non_edges = list(nx.non_edges(neigh))\n if len(non_edges) > 0:\n for u, v in random.sample(non_edges, random.randint(1,\n min(len(non_edges), 5))):\n neigh.add_edge(u, v)\n else: # perturb anchor\n anchor = random.choice(list(neigh.nodes))\n for v in neigh.nodes:\n neigh.nodes[v][\"node_feature\"] = (torch.ones(1) if\n anchor == v else torch.zeros(1))\n\n if (filter_negs and train and len(neigh) <= 6 and neg_target is\n not None):\n matcher = nx.algorithms.isomorphism.GraphMatcher(\n neg_target[graph_idx], neigh)\n if not matcher.subgraph_is_isomorphic(): done = True\n else:\n done = True\n\n return graph, DSGraph(neigh)\n\n augmenter = feature_preprocess.FeatureAugment()\n\n pos_target = batch_target\n pos_target, pos_query = pos_target.apply_transform_multi(sample_subgraph)\n neg_target = batch_neg_target\n # TODO: use hard negs\n hard_neg_idxs = set(random.sample(range(len(neg_target.G)),\n int(len(neg_target.G) * 1/2)))\n #hard_neg_idxs = set()\n batch_neg_query = Batch.from_data_list(\n [DSGraph(self.generator.generate(size=len(g))\n if i not in hard_neg_idxs else g)\n for i, g in enumerate(neg_target.G)])\n for i, g in enumerate(batch_neg_query.G):\n g.graph[\"idx\"] = i\n _, neg_query = batch_neg_query.apply_transform_multi(sample_subgraph,\n hard_neg_idxs=hard_neg_idxs)\n if self.node_anchored:\n def add_anchor(g, anchors=None):\n if anchors is not None:\n anchor = anchors[g.G.graph[\"idx\"]]\n else:\n anchor = random.choice(list(g.G.nodes))\n for v in g.G.nodes:\n if \"node_feature\" not in g.G.nodes[v]:\n g.G.nodes[v][\"node_feature\"] = (torch.ones(1) if anchor == v\n else torch.zeros(1))\n return g\n neg_target = neg_target.apply_transform(add_anchor)\n pos_target = augmenter.augment(pos_target).to(utils.get_device())\n pos_query = augmenter.augment(pos_query).to(utils.get_device())\n neg_target = augmenter.augment(neg_target).to(utils.get_device())\n neg_query = augmenter.augment(neg_query).to(utils.get_device())\n #print(len(pos_target.G[0]), len(pos_query.G[0]))\n return pos_target, pos_query, neg_target, neg_query\n\nclass OTFSynImbalancedDataSource(OTFSynDataSource):\n \"\"\" Imbalanced on-the-fly synthetic data.\n\n Unlike the balanced dataset, this data source does not use 1:1 ratio for\n positive and negative examples. Instead, it randomly samples 2 graphs from\n the on-the-fly generator, and records the groundtruth label for the pair (subgraph or not).\n As a result, the data is imbalanced (subgraph relationships are rarer).\n This setting is a challenging model inference scenario.\n \"\"\"\n def __init__(self, max_size=29, min_size=5, n_workers=4,\n max_queue_size=256, node_anchored=False):\n super().__init__(max_size=max_size, min_size=min_size,\n n_workers=n_workers, node_anchored=node_anchored)\n self.batch_idx = 0\n\n def gen_batch(self, graphs_a, graphs_b, _, train):\n def add_anchor(g):\n anchor = random.choice(list(g.G.nodes))\n for v in g.G.nodes:\n g.G.nodes[v][\"node_feature\"] = (torch.ones(1) if anchor == v\n or not self.node_anchored else torch.zeros(1))\n return g\n pos_a, pos_b, neg_a, neg_b = [], [], [], []\n fn = \"data/cache/imbalanced-{}-{}\".format(str(self.node_anchored),\n self.batch_idx)\n if not os.path.exists(fn):\n graphs_a = graphs_a.apply_transform(add_anchor)\n graphs_b = graphs_b.apply_transform(add_anchor)\n for graph_a, graph_b in tqdm(list(zip(graphs_a.G, graphs_b.G))):\n matcher = nx.algorithms.isomorphism.GraphMatcher(graph_a, graph_b,\n node_match=(lambda a, b: (a[\"node_feature\"][0] > 0.5) ==\n (b[\"node_feature\"][0] > 0.5)) if self.node_anchored else None)\n if matcher.subgraph_is_isomorphic():\n pos_a.append(graph_a)\n pos_b.append(graph_b)\n else:\n neg_a.append(graph_a)\n neg_b.append(graph_b)\n if not os.path.exists(\"data/cache\"):\n os.makedirs(\"data/cache\")\n with open(fn, \"wb\") as f:\n pickle.dump((pos_a, pos_b, neg_a, neg_b), f)\n print(\"saved\", fn)\n else:\n with open(fn, \"rb\") as f:\n print(\"loaded\", fn)\n pos_a, pos_b, neg_a, neg_b = pickle.load(f)\n print(len(pos_a), len(neg_a))\n if pos_a:\n pos_a = utils.batch_nx_graphs(pos_a)\n pos_b = utils.batch_nx_graphs(pos_b)\n neg_a = utils.batch_nx_graphs(neg_a)\n neg_b = utils.batch_nx_graphs(neg_b)\n self.batch_idx += 1\n return pos_a, pos_b, neg_a, neg_b\n\nclass DiskDataSource(DataSource):\n \"\"\" Uses a set of graphs saved in a dataset file to train the subgraph model.\n\n At every iteration, new batch of graphs (positive and negative) are generated\n by sampling subgraphs from a given dataset.\n\n See the load_dataset function for supported datasets.\n \"\"\"\n def __init__(self, dataset_name, node_anchored=False, min_size=5,\n max_size=29):\n self.node_anchored = node_anchored\n self.dataset = load_dataset(dataset_name)\n self.min_size = min_size\n self.max_size = max_size\n\n def gen_data_loaders(self, size, batch_size, train=True,\n use_distributed_sampling=False):\n loaders = [[batch_size]*(size // batch_size) for i in range(3)]\n return loaders\n\n def gen_batch(self, a, b, c, train, max_size=15, min_size=5, seed=None,\n filter_negs=False, sample_method=\"tree-pair\"):\n batch_size = a\n train_set, test_set, task = self.dataset\n graphs = train_set if train else test_set\n if seed is not None:\n random.seed(seed)\n\n pos_a, pos_b = [], []\n pos_a_anchors, pos_b_anchors = [], []\n for i in range(batch_size // 2):\n if sample_method == \"tree-pair\":\n size = random.randint(min_size+1, max_size)\n graph, a = utils.sample_neigh(graphs, size)\n b = a[:random.randint(min_size, len(a) - 1)]\n elif sample_method == \"subgraph-tree\":\n graph = None\n while graph is None or len(graph) < min_size + 1:\n graph = random.choice(graphs)\n a = graph.nodes\n _, b = utils.sample_neigh([graph], random.randint(min_size,\n len(graph) - 1))\n if self.node_anchored:\n anchor = list(graph.nodes)[0]\n pos_a_anchors.append(anchor)\n pos_b_anchors.append(anchor)\n neigh_a, neigh_b = graph.subgraph(a), graph.subgraph(b)\n pos_a.append(neigh_a)\n pos_b.append(neigh_b)\n\n neg_a, neg_b = [], []\n neg_a_anchors, neg_b_anchors = [], []\n while len(neg_a) < batch_size // 2:\n if sample_method == \"tree-pair\":\n size = random.randint(min_size+1, max_size)\n graph_a, a = utils.sample_neigh(graphs, size)\n graph_b, b = utils.sample_neigh(graphs, random.randint(min_size,\n size - 1))\n elif sample_method == \"subgraph-tree\":\n graph_a = None\n while graph_a is None or len(graph_a) < min_size + 1:\n graph_a = random.choice(graphs)\n a = graph_a.nodes\n graph_b, b = utils.sample_neigh(graphs, random.randint(min_size,\n len(graph_a) - 1))\n if self.node_anchored:\n neg_a_anchors.append(list(graph_a.nodes)[0])\n neg_b_anchors.append(list(graph_b.nodes)[0])\n neigh_a, neigh_b = graph_a.subgraph(a), graph_b.subgraph(b)\n if filter_negs:\n matcher = nx.algorithms.isomorphism.GraphMatcher(neigh_a, neigh_b)\n if matcher.subgraph_is_isomorphic(): # a <= b (b is subgraph of a)\n continue\n neg_a.append(neigh_a)\n neg_b.append(neigh_b)\n\n pos_a = utils.batch_nx_graphs(pos_a, anchors=pos_a_anchors if\n self.node_anchored else None)\n pos_b = utils.batch_nx_graphs(pos_b, anchors=pos_b_anchors if\n self.node_anchored else None)\n neg_a = utils.batch_nx_graphs(neg_a, anchors=neg_a_anchors if\n self.node_anchored else None)\n neg_b = utils.batch_nx_graphs(neg_b, anchors=neg_b_anchors if\n self.node_anchored else None)\n return pos_a, pos_b, neg_a, neg_b\n\nclass DiskImbalancedDataSource(OTFSynDataSource):\n \"\"\" Imbalanced on-the-fly real data.\n\n Unlike the balanced dataset, this data source does not use 1:1 ratio for\n positive and negative examples. Instead, it randomly samples 2 graphs from\n the on-the-fly generator, and records the groundtruth label for the pair (subgraph or not).\n As a result, the data is imbalanced (subgraph relationships are rarer).\n This setting is a challenging model inference scenario.\n \"\"\"\n def __init__(self, dataset_name, max_size=29, min_size=5, n_workers=4,\n max_queue_size=256, node_anchored=False):\n super().__init__(max_size=max_size, min_size=min_size,\n n_workers=n_workers, node_anchored=node_anchored)\n self.batch_idx = 0\n self.dataset = load_dataset(dataset_name)\n self.train_set, self.test_set, _ = self.dataset\n self.dataset_name = dataset_name\n\n def gen_data_loaders(self, size, batch_size, train=True,\n use_distributed_sampling=False):\n loaders = []\n for i in range(2):\n neighs = []\n for j in range(size // 2):\n graph, neigh = utils.sample_neigh(self.train_set if train else\n self.test_set, random.randint(self.min_size, self.max_size))\n neighs.append(graph.subgraph(neigh))\n dataset = GraphDataset(neighs)\n loaders.append(TorchDataLoader(dataset,\n collate_fn=Batch.collate([]), batch_size=batch_size // 2 if i\n == 0 else batch_size // 2,\n sampler=None, shuffle=False))\n loaders.append([None]*(size // batch_size))\n return loaders\n\n def gen_batch(self, graphs_a, graphs_b, _, train):\n def add_anchor(g):\n anchor = random.choice(list(g.G.nodes))\n for v in g.G.nodes:\n g.G.nodes[v][\"node_feature\"] = (torch.ones(1) if anchor == v\n or not self.node_anchored else torch.zeros(1))\n return g\n pos_a, pos_b, neg_a, neg_b = [], [], [], []\n fn = \"data/cache/imbalanced-{}-{}-{}\".format(self.dataset_name.lower(),\n str(self.node_anchored), self.batch_idx)\n if not os.path.exists(fn):\n graphs_a = graphs_a.apply_transform(add_anchor)\n graphs_b = graphs_b.apply_transform(add_anchor)\n for graph_a, graph_b in tqdm(list(zip(graphs_a.G, graphs_b.G))):\n matcher = nx.algorithms.isomorphism.GraphMatcher(graph_a, graph_b,\n node_match=(lambda a, b: (a[\"node_feature\"][0] > 0.5) ==\n (b[\"node_feature\"][0] > 0.5)) if self.node_anchored else None)\n if matcher.subgraph_is_isomorphic():\n pos_a.append(graph_a)\n pos_b.append(graph_b)\n else:\n neg_a.append(graph_a)\n neg_b.append(graph_b)\n if not os.path.exists(\"data/cache\"):\n os.makedirs(\"data/cache\")\n with open(fn, \"wb\") as f:\n pickle.dump((pos_a, pos_b, neg_a, neg_b), f)\n print(\"saved\", fn)\n else:\n with open(fn, \"rb\") as f:\n print(\"loaded\", fn)\n pos_a, pos_b, neg_a, neg_b = pickle.load(f)\n print(len(pos_a), len(neg_a))\n if pos_a:\n pos_a = utils.batch_nx_graphs(pos_a)\n pos_b = utils.batch_nx_graphs(pos_b)\n neg_a = utils.batch_nx_graphs(neg_a)\n neg_b = utils.batch_nx_graphs(neg_b)\n self.batch_idx += 1\n return pos_a, pos_b, neg_a, neg_b\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n plt.rcParams.update({\"font.size\": 14})\n for name in [\"enzymes\", \"reddit-binary\", \"cox2\"]:\n data_source = DiskDataSource(name)\n train, test, _ = data_source.dataset\n i = 11\n neighs = [utils.sample_neigh(train, i) for j in range(10000)]\n clustering = [nx.average_clustering(graph.subgraph(nodes)) for graph,\n nodes in neighs]\n path_length = [nx.average_shortest_path_length(graph.subgraph(nodes))\n for graph, nodes in neighs]\n #plt.subplot(1, 2, i-9)\n plt.scatter(clustering, path_length, s=10, label=name)\n plt.legend()\n plt.savefig(\"plots/clustering-vs-path-length.png\")\n","repo_name":"snap-stanford/neural-subgraph-learning-GNN","sub_path":"common/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":20041,"program_lang":"python","lang":"en","doc_type":"code","stars":280,"dataset":"github-code","pt":"73"} +{"seq_id":"17730453662","text":"#class SystemLog:\n# def __init__(self, file, level1, level2):\n# logFormatter = logging.Formatter(\"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s\")\n# rootLogger = logging.getLogger()\n# fileHandler = logging.FileHandler(file)\n# fileHandler.setFormatter(logFormatter)\n# fileHandler.setLevel(level1)\n# rootLogger.addHandler(fileHandler)\n# consoleHandler = logging.StreamHandler()\n# consoleHandler.setFormatter(logFormatter)\n# rootLogger.addHandler(consoleHandler)\n# rootLogger.setLevel(level2)\n# self.log = logging\n\nimport datetime\nimport logging\n\nfrom src.config import HorusConfig\n\n\nclass SysLogger:\n def __init__(self):\n self.logger = logging.getLogger('horus')\n self.conf = HorusConfig()\n if len(self.logger.handlers) == 0:\n self.logger.setLevel(logging.DEBUG)\n now = datetime.datetime.now()\n handler = logging.FileHandler(self.conf.dir_log + 'horus_' + now.strftime(\"%Y-%m-%d\") + '.log')\n formatter = logging.Formatter(\"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s\")\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)\n consoleHandler = logging.StreamHandler()\n consoleHandler.setFormatter(formatter)\n self.logger.addHandler(consoleHandler)\n def getLog(self):\n return self.logger","repo_name":"SmartDataAnalytics/HORUS-NER","sub_path":"src/core/util/systemlog.py","file_name":"systemlog.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"73"} +{"seq_id":"11078952618","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 20 14:51:29 2022\r\n\r\n@author: MengFt\r\n\"\"\"\r\n\r\nfrom transTool import transTool\r\nimport PySimpleGUI as sg\r\nfrom PIL import Image,ImageGrab\r\nimport time\r\n#窗口布局\r\nclass transWindow(object):\r\n def __init__(self):\r\n print(\"创建一个翻译窗口,可进行汉译英和英译汉\") \r\n self.transLayoutVertical =[\r\n [sg.Button(\"百度翻译\",key=\"BDtranslate\"),\r\n sg.Button(\"清空翻译\",key=\"clearTranslation\"),\r\n sg.Button(\"转为纵向\",key=\"turn2Horizontal\"),\r\n sg.Radio('英译汉', \"transTypeRadio\", default=True, size=(10,1),key=\"E2C\"),\r\n sg.Radio('汉译英', \"transTypeRadio\", default=True, size=(10,1), key='C2E'),],\r\n [sg.Text('原文',font='Times 10',size=(20,1)),sg.Text('译文',font='Times 10',size=(20,1)),],\r\n [sg.Multiline(size=(15,30),font='Times 16', expand_x=True, expand_y=True,key=\"textBeforeTranslate\",enable_events=True),\r\n sg.Multiline(size=(15,30),font='Times 16', expand_x=True, expand_y=True,key=\"textAfterTranslate\",enable_events=True),],\r\n ]\r\n \r\n self.transLayoutHorizontal =[\r\n [sg.Button(\"百度翻译\",key=\"BDtranslate\"),\r\n sg.Button(\"清空翻译\",key=\"clearTranslation\"),\r\n sg.Button(\"转为横向\",key=\"turn2Vertical\"),\r\n sg.Radio('英译汉', \"transTypeRadio\", default=True, size=(10,1),key=\"E2C\"),\r\n sg.Radio('汉译英', \"transTypeRadio\", default=True, size=(10,1), key='C2E'),],\r\n [sg.Text('原文',font='Times 10',size=(20,1)),\r\n sg.Multiline(size=(15,30),font='Times 16', expand_x=True, expand_y=True,key=\"textBeforeTranslate\",enable_events=True),],\r\n [sg.Text('译文',font='Times 10',size=(20,1)),\r\n sg.Multiline(size=(15,30),font='Times 16', expand_x=True, expand_y=True,key=\"textAfterTranslate\",enable_events=True),],\r\n ]\r\n self.windowDirect=0 #0默认横向\r\n self.transType=0 #0默认英译汉\r\n self.BDTransTool=transTool()\r\n\r\n def getScreenSize(self):\r\n image = ImageGrab.grab()\r\n\r\n height=image.height\r\n width=image.width\r\n return (width,height)\r\n\r\n def currentTime(self):\r\n currentTime=time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime( int(time.time())))\r\n return currentTime\r\n \r\n def getTransType(self):\r\n if self.window['E2C'].get()==True:\r\n self.transType=0\r\n elif self.window['C2E'].get()==True:\r\n self.transType=1\r\n return self.transType\r\n \r\n \r\n def windowInit(self):\r\n #选取窗口类型\r\n if self.windowDirect==0:\r\n #横向布置窗口\r\n #tempLayout=self.transLayoutVertical\r\n tempLayout=[\r\n [sg.Button(\"百度翻译\",key=\"BDtranslate\",font='Times 15'),\r\n sg.Button(\"清空翻译\",key=\"clearTranslation\",font='Times 15'),\r\n sg.Button(\"转为纵向\",key=\"turn2Horizontal\",font='Times 15'),\r\n sg.Radio('英译汉', \"transTypeRadio\", default=True, size=(10,1),key=\"E2C\",font='Times 15'),\r\n sg.Radio('汉译英', \"transTypeRadio\", default=True, size=(10,1), key='C2E',font='Times 15'),],\r\n [sg.Text('原文',font='Times 15',size=(110,1)),sg.Text('译文',font='Times 15',size=(20,1)),],\r\n [sg.Multiline(size=(15,5),font='Times 16', expand_x=True, expand_y=True,key=\"textBeforeTranslate\",enable_events=True),\r\n sg.Multiline(size=(15,5),font='Times 16', expand_x=True, expand_y=True,key=\"textAfterTranslate\",enable_events=True),],\r\n ]\r\n windowSize=self.getScreenSize()\r\n transWindowSize=(windowSize[0]-100,400)\r\n transWindowLocation=(0,windowSize[1]-transWindowSize[1]-50) \r\n\r\n else:\r\n #tempLayout=self.transLayoutHorizontal\r\n tempLayout=[\r\n [sg.Button(\"百度翻译\",key=\"BDtranslate\",font='Times 15'),\r\n sg.Button(\"清空翻译\",key=\"clearTranslation\",font='Times 15'),\r\n sg.Button(\"转为横向\",key=\"turn2Vertical\",font='Times 15'),],\r\n [sg.Radio('英译汉', \"transTypeRadio\", default=True, size=(10,1),key=\"E2C\",font='Times 15'),\r\n sg.Radio('汉译英', \"transTypeRadio\", default=True, size=(10,1), key='C2E',font='Times 15'),],\r\n [sg.Text('原文',font='Times 15',size=(4,1)),\r\n sg.Multiline(size=(10,20),font='Times 16', expand_x=True, expand_y=True,key=\"textBeforeTranslate\",enable_events=True),],\r\n [sg.Text('译文',font='Times 15',size=(4,1)),\r\n sg.Multiline(size=(10,20),font='Times 16', expand_x=True, expand_y=True,key=\"textAfterTranslate\",enable_events=True),],\r\n ]\r\n windowSize=self.getScreenSize()\r\n transWindowSize=(400,windowSize[1]-100)\r\n transWindowLocation=(windowSize[0]-transWindowSize[0]-50,0) \r\n \r\n #tempLayout[-1].append(sg.Sizegrip()) \r\n\r\n self.window=sg.Window(\"翻译准确,写作顺畅!!\",\r\n tempLayout,\r\n resizable=True, \r\n grab_anywhere=True,\r\n keep_on_top=True,\r\n finalize=True, \r\n location=(transWindowLocation),\r\n margins=(0,0), \r\n )\r\n self.window.set_min_size(transWindowSize)\r\n\r\n #初始化后台翻译工具\r\n while True:\r\n event,values=self.window.read(timeout=1000) \r\n \r\n if not event in (None,\"__TIMEOUT__\"): \r\n print(\"当前时间为:{},子窗口1激活事件{}\".format(self.currentTime(),event))\r\n if event in (sg.WIN_CLOSED,'关闭'):\r\n self.window.close()\r\n break\r\n if event in ('BDtranslate'):\r\n print('开启翻译')\r\n stringBeforeTrans=self.window['textBeforeTranslate'].get()\r\n if stringBeforeTrans in (\"\",None):\r\n print(\"无翻译内容\")\r\n sg.popup_auto_close(\"翻译内容为空\",auto_close_duration=1,keep_on_top=True,font='Times 15')\r\n continue\r\n else:\r\n \r\n self.BDTransTool.inputString(stringBeforeTrans,transType=self.getTransType(),deleteWrap=True)\r\n stringAfterTrans=self.BDTransTool.getTrans()\r\n stringBeforeTrans=self.BDTransTool.inputWords\r\n self.window[\"textBeforeTranslate\"].update(stringBeforeTrans)\r\n self.window[\"textAfterTranslate\"].update(stringAfterTrans)\r\n \r\n if event in ('clearTranslation'):\r\n print('清空翻译')\r\n self.window[\"textBeforeTranslate\"].update(\"\")\r\n self.window[\"textAfterTranslate\"].update(\"\")\r\n\r\n if event in ('turn2Horizontal'):\r\n self.windowDirect=1\r\n print('转为纵向')\r\n self.window.close()\r\n self.windowInit()\r\n if event in ('turn2Vertical'):\r\n self.windowDirect=0\r\n print('转为纵向')\r\n self.window.close()\r\n self.windowInit()\r\n\r\n #如果关闭子窗口,自动重新打开主窗口 \r\n\r\n#页面操作\r\n\r\nif __name__ == \"__main__\":\r\n window1=transWindow()\r\n window1.windowInit()\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"MengFt-A-Learner/Convenient-translation-tool","sub_path":"TransWindow.py","file_name":"TransWindow.py","file_ext":"py","file_size_in_byte":7794,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"43229014771","text":"import struct\nimport sys\nimport re\nimport os\nimport math\n\ndef round_rmm(x: float) -> int:\n if x >= 0:\n return int(x + 0.5)\n else:\n return int(x - 0.5)\n\ndef entry(input_file):\n with open(input_file, 'r') as f:\n data = f.read()\n test_values = re.findall(r'NAN_BOXED\\((\\d+),', data)\n modify_function(test_values, sys.argv[1])\n\ndef int_to_binary(line: str) -> str:\n n = int(line.strip())\n binary = bin(n)[2:]\n return binary.zfill(32)\n\ndef binary_to_long(binary: str, fcsr: int) -> int:\n if binary[1:9] == '11111111':\n if binary[:1] == '1' and binary[9:] == '00000000000000000000000':\n return -pow(2,63) \n else:\n return pow(2,63) - 1\n float_value = struct.unpack('!f', struct.pack('!I', int(binary, 2)))[0]\n if fcsr == 0:\n rounded_value = round(float_value)\n if fcsr == 32:\n rounded_value = math.trunc(float_value)\n if fcsr == 64:\n rounded_value = math.floor(float_value)\n if fcsr == 96:\n rounded_value = math.ceil(float_value)\n if fcsr == 128:\n rounded_value = round_rmm(float_value)\n long_value = int(rounded_value)\n if (long_value > pow(2,63) - 1):\n return pow(2,63) - 1\n if (long_value < -pow(2,63)):\n return -pow(2,63)\n return long_value\n\ndef process_file(test_value: int, fcsr: int):\n binary = int_to_binary(str(test_value))\n long_val = binary_to_long(binary, fcsr)\n return long_val\n\ndef modify_function(numbers: list, input2: str):\n with open(input2, 'r') as f:\n lines = f.readlines()\n with open(input2, 'w') as f:\n i = 0\n for line in lines:\n match = re.search(r'(TEST_FPI.*?)\\s*\\(\\s*(.*?)\\s*\\)', line)\n if match:\n func_name = match.group(1)\n args = match.group(2).split(',')\n numbers[i] = process_file(numbers[i], int(args[4].strip()))\n args[5] = ' ' + str(numbers[i])\n i += 1\n new_line = re.sub(r'TEST_FPI.*?\\(.*?\\)', func_name + '(' + ','.join(args) + ')', line)\n f.write(new_line)\n else:\n f.write(line)\n\nif __name__ == '__main__':\n if __name__ == '__main__':\n entry(sys.argv[1])\n","repo_name":"QQeg/gem5-testing","sub_path":"update_answer_fcvt.l.s.py","file_name":"update_answer_fcvt.l.s.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"16748337353","text":"#visualizando dados\n#tabela basica\n\nimport matplotlib.pyplot as plt\n\nx1 = [1, 2, 3, 4, 5]\ny1 = [2, 4, 6, 8, 10]\n\nx2 = [2, 4, 6, 8, 10]\ny2 = [4, 8, 12, 16, 20]\n\ntitulo = \"Gráfico de dispersão (pontos)\"\neixox = \"Eixo X\"\neixoy = \"Eixo Y\"\n\n#legendas\nplt.title(titulo)\nplt.xlabel(eixox)\nplt.ylabel(eixoy)\n\nplt.scatter(x1, y1, label=\"Grupo 1\", color=\"g\", marker=\"h\", s=100)\n\nplt.plot(x1, y1)\nplt.legend()\n\nplt.savefig(\"Figura1.pdf\")\n\nplt.show()\n\n","repo_name":"christopher2306/Introdu-o-a-tratamento-de-dados-em-Python","sub_path":"grafico-linhas.py","file_name":"grafico-linhas.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"26914051879","text":"#!/usr/bin/env python\n\n# This file is designed to handle moving controls from s% xx to s% ctrl %xx\n# While this replicates alot of check_defaults.py we want this to be standalone\n# and only depend on the python stdlib. This way users can easily use it to port their\n# run_star_extras.f90 files without needing to worry about python packaging.\n\n# Usage: python update_ctrls.up file1.f90 file2.f90 ....\n\n# Note this only works for s% (or s %) it does not work if you renamed the star_type varaible \n# to something other than s, for instance in the binary module.\n\nimport os\nimport re\nfrom collections.abc import MutableSet\nimport functools\nimport operator\nimport sys\n\n\nMESA_DIR = os.environ[\"MESA_DIR\"]\n\nctrls_files = [ os.path.join(\"star_data\",\"private\",\"star_controls.inc\"),\n os.path.join(\"star_data\",\"private\",\"star_controls_dev.inc\")\n ]\n\nCRTL_NAME = 's% ctrl% '\n\n# inspiration from https://stackoverflow.com/a/27531275\nclass CaseInsensitiveSet(MutableSet):\n def __init__(self, iterable):\n self._values = {}\n self._fold = str.casefold\n for v in iterable:\n self.add(v)\n\n def __repr__(self):\n return repr(self._values.values())\n\n def __contains__(self, value):\n return self._fold(value) in self._values\n\n def __iter__(self):\n return iter(self._values.values())\n\n def __len__(self):\n return len(self._values)\n\n def items(self):\n return self._values.items()\n\n def add(self, value):\n if isinstance(value, CaseInsensitiveSet):\n for k,v in value.items():\n self._values[self._fold(k)] = v\n else:\n self._values[self._fold(value)] = value\n\n \n def discard(self, value):\n v = self._fold(value)\n if v in self._values:\n del self._values[v]\n\n\ndef get_options(filename, regexp):\n \"\"\"Return a set of MESA option names\"\"\"\n r = re.compile(regexp)\n with open(os.path.join(MESA_DIR, filename)) as f:\n matches = r.finditer(f.read())\n return CaseInsensitiveSet(m.group(1) for m in matches)\n\n\ndef get_columns(filename, regexp):\n \"\"\"Return a set of MESA column names\"\"\"\n r = re.compile(regexp)\n with open(os.path.join(MESA_DIR, filename)) as f:\n lines = f.readlines()\n matches = []\n for line in lines:\n m = r.match(line)\n if m is not None:\n matches.append(m.group(1))\n return CaseInsensitiveSet(matches)\n\ndef get_defaults(filename):\n # extract column names from defaults file\n\n # these lines look like:\n # ! initial_mass = 1\n # ? ^^^^^^^^^\n # that is, they may or may not be commented out\n # and may or may not have a( )\n # and may or may not have space before a =\n\n regexp = \"^[ \\t]*[ ]?(\\w+)(\\(.*\\))*[ ^t]*=\"\n\n return get_columns(filename, regexp)\n\ndef load_file(filename):\n with open(os.path.join(MESA_DIR, filename),\"r\") as f:\n lines = f.readlines()\n\n return lines\n\n\ndef get_inc(filename):\n # extract options from a inc file\n lines = load_file(filename)\n\n # Remove line continutaion characters\n lines = [i.replace(\"&\",\"\").strip() for i in lines if i]\n\n # Remove type defintion (i.e real(dp) :: x) leaves just x\n # as well as anything that starstwith a comment or has a comment embeded in it\n for idl,line in enumerate(lines):\n if \"::\" in line:\n lines[idl] = line.split(\"::\")[1].strip()\n\n lines = [i.split(\",\") for i in lines if i]\n\n # Flatten list of lists\n lines = functools.reduce(operator.iconcat, lines, [])\n\n # Remove array sizes from variables\n lines = [line.split(\"(\")[0] for line in lines if line]\n \n # Remove comments\n lines = [line.split(\"!\")[0] for line in lines if line]\n\n # Remove = x \n lines = [line.split(\"=\")[0] for line in lines if line]\n\n # Remove remaining empty strings\n lines = [line.strip() for line in lines if line]\n\n return CaseInsensitiveSet(lines)\n\n# Load controls names\ncinc = get_inc(ctrls_files[0])\n\nfor f in ctrls_files[1:]:\n cinc.add(get_inc(f))\n\n\ndef update(filename):\n try:\n lines = load_file(filename)\n except (UnicodeDecodeError, IsADirectoryError):\n return\n \n \" s[0 or more space] % [0 or more space] [1 or more character or number or _]\"\n # This wont match when s has been renamed \n regex_all = \"(s[ \\t]?[a-zA-Z0-9_]?%[ \\t]?[a-zA-Z0-9_]*)\"\n\n \" s [0 or more space] % [0 or more space] \"\n regex_s = 's[ \\ta-zA-Z0-9_]?%[ \\t]?'\n\n r_all = re.compile(regex_all)\n r_s = re.compile(regex_s)\n\n for idl,line in enumerate(lines):\n # Split on s% something\n line_split = re.split(regex_all,line)\n for idm,match in enumerate(line_split):\n # Remove the s% so we can check if the variable is a control\n var = match.replace('s%','').strip()\n if var in cinc:\n # If it is a control then replace s% with CRTL_NAME\n line_split[idm] = re.sub(regex_s,CRTL_NAME,match)\n lines[idl] = ''.join(line_split)\n\n with open(filename,'w') as f:\n f.writelines(lines)\n\nif __name__ == \"__main__\":\n for i in sys.argv[1:]:\n update(i)\n\n# Run over MESA_DIR\n\n# python3 linters/update_ctrls.py star/test/src/*\n# python3 linters/update_ctrls.py star/work/src/*\n# python3 linters/update_ctrls.py star/job/*\n# python3 linters/update_ctrls.py star/p*/*.f90 \n# python3 linters/update_ctrls.py star/test_suite/*/src/*.f90\n# python3 linters/update_ctrls.py star/other/*.f90\n# python3 linters/update_ctrls.py */test_suite/*/src/*.inc\n# python3 linters/update_ctrls.py */test_suite/*/src/*/*.inc\n\n# python3 linters/update_ctrls.py binary/test/src/*\n# python3 linters/update_ctrls.py binary/work/src/*\n# python3 linters/update_ctrls.py binary/job/*\n# python3 linters/update_ctrls.py binary/p*/*.f90 \n# python3 linters/update_ctrls.py binary/test_suite/*/src/*.f90\n# python3 linters/update_ctrls.py binary/other/*.f90\n\n# python3 linters/update_ctrls.py astero/test/src/*\n# python3 linters/update_ctrls.py astero/work/src/*\n# python3 linters/update_ctrls.py astero/job/*\n# python3 linters/update_ctrls.py astero/p*/*.f90 \n# python3 linters/update_ctrls.py astero/test_suite/*/src/*.f90\n# python3 linters/update_ctrls.py astero/other/*.f90\n","repo_name":"MESAHub/mesa","sub_path":"linters/update_ctrls.py","file_name":"update_ctrls.py","file_ext":"py","file_size_in_byte":6260,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"73"} +{"seq_id":"4300859936","text":"import datetime\n\nimport django\nfrom django.contrib.auth.models import AnonymousUser, User\nfrom django.test import RequestFactory, TestCase\nfrom django.urls import reverse\n\nfrom SteamProphet.apps.SteamProphet.models import VotingPeriod, Week\nfrom ..views import CreatePicksView\n\n\nclass TestCreatePicksView(TestCase):\n def setUp(self):\n super().setUp()\n self.factory = RequestFactory()\n self.user = User.objects.create_user('user')\n\n def test_userNeedsToBeAuthenticated_GET(self):\n request = self.factory.get('')\n request.user = AnonymousUser()\n self.assertRedirects(CreatePicksView.as_view()(request), reverse('login'),\n fetch_redirect_response=False)\n\n def test_userNeedsToBeAuthenticated_POST(self):\n request = self.factory.post('')\n request.user = AnonymousUser()\n self.assertRedirects(CreatePicksView.as_view()(request), reverse('login'),\n fetch_redirect_response=False)\n\n def test_view_is_inaccessible_if_there_is_no_voting_period(self):\n self.assertViewIsInaccessible()\n\n def test_view_is_inaccessible_before_voting_period(self):\n now = django.utils.timezone.now()\n start = now + datetime.timedelta(days=10)\n end = start + datetime.timedelta(days=10)\n week = Week.objects.create(week=1)\n VotingPeriod.objects.create(week=week, start=start, end=end)\n self.assertViewIsInaccessible()\n\n def test_view_is_inaccessible_after_voting_period(self):\n now = django.utils.timezone.now()\n start = now - datetime.timedelta(days=10)\n end = start + datetime.timedelta(days=5)\n week = Week.objects.create(week=1)\n VotingPeriod.objects.create(week=week, start=start, end=end)\n self.assertViewIsInaccessible()\n\n def test_view_is_accessible_during_voting_period(self):\n now = django.utils.timezone.now()\n start = now - datetime.timedelta(days=10)\n end = now + datetime.timedelta(days=10)\n week = Week.objects.create(week=1)\n VotingPeriod.objects.create(week=week, start=start, end=end)\n self.assertViewIsAccessible()\n\n def assertViewIsAccessible(self):\n request = self.factory.get('')\n request.user = self.user\n response = CreatePicksView.as_view()(request)\n self.assertEqual(response.status_code, 200)\n\n def assertViewIsInaccessible(self):\n get_request = self.factory.get('')\n get_request.user = self.user\n get_response = CreatePicksView.as_view()(get_request)\n\n post_request = self.factory.post('')\n post_request.user = self.user\n post_response = CreatePicksView.as_view()(post_request)\n\n self.assertEqual(get_response.status_code, 400)\n self.assertEqual(post_response.status_code, 400)\n","repo_name":"sgarcialaguna/SteamProphet","sub_path":"SteamProphet/apps/SteamProphet/tests/testCreatePicksView.py","file_name":"testCreatePicksView.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"9580509640","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urljoin\nimport os\nimport shutil\nimport re\n\nTOC_URL = \"http://solomon.dkbl.alexanderstreet.com/cgi-bin/asp/philo/dkbl/volumes_toc.pl?&church=ON\"\n\ndef write_paragraph(filename, text, dirname='paragraphs'):\n completeName = os.path.join(dirname, filename+'.txt')\n print('Writing ' + completeName)\n with open(completeName, 'w') as f:\n f.write(text)\n\ndef get_paragraph_number(soup):\n node = soup.find('a', {'name': True})\n if node is None:\n return None\n\n loc = node.next_sibling.next_sibling\n match = re.search(r'\\d+', loc.text)\n if match:\n paragraph = int(match.group(0))\n else:\n paragraph = 0\n return paragraph\n\ndef download_links(url, link_text):\n page = requests.get(url)\n soup = BeautifulSoup(page.text)\n\n for a in soup.find_all('a'):\n if link_text in a.get_text():\n yield urljoin(page.url, a['href'])\n\ndef download_volume(vol_no, url):\n i = 0\n for url in download_links(url, 'View Text'):\n page = requests.get(url)\n soup = BeautifulSoup(page.text)\n head = soup.find('span', {'class': 'head'})\n title = head.get_text()\n\n if title is None:\n raise Exception('No found.')\n if (title == \"EDITORS' PREFACE\"):\n continue\n i += 1\n\n number = get_paragraph_number(soup)\n filename = '{:02}-{:02}-{:02}-{}'.format(\n vol_no,\n i,\n number,\n '_'.join(title.split()).lower(),\n )\n\n abstract = '\\n'.join(hibold.get_text() for hibold in soup.find_all(class_='hibold'))\n content = head.parent\n paragraph_text = content.get_text()\n\n\n write_paragraph(filename, paragraph_text)\n\ndef main(url = TOC_URL):\n\n shutil.rmtree('paragraphs')\n os.makedirs('paragraphs')\n\n volume_links = list(download_links(url, 'Table of Contents'))\n for (i, url) in enumerate(volume_links[:-2]):\n download_volume(i+1, url)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"csbailey5t/barth","sub_path":"slow.py","file_name":"slow.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"800720702","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nimport pandas as pd\nimport argparse\nimport os\n\nimport matplotlib.pyplot as plt\n\nfrom AnalysisModule.MPIAnalysisModule.MPIAPICategories import *\n\nFONT_SIZE_LARGE = 16\nFONT_SIZE_SMALLER = 14\n\n# plt.style.use('seaborn-v0_8-notebook')\n# plt.style.use('seaborn-v0_8-colorblind')\nplt.style.use('seaborn-v0_8')\n\nplt.rc('font', size=FONT_SIZE_LARGE) # controls default text sizes\nplt.rc('axes', labelsize=FONT_SIZE_SMALLER) # fontsize of the x and y labels\nplt.rc('xtick', labelsize=FONT_SIZE_SMALLER) # fontsize of the tick labels\nplt.rc('ytick', labelsize=FONT_SIZE_SMALLER) # fontsize of the tick labels\n\nplt.rcParams['axes.facecolor'] = 'white'\n\n\ndef is_series_same(s):\n a = s.to_numpy() # s.values (pandas<0.24)\n return (a[0] == a).all()\n\n\ndef get_num_type_usage_category_plots(df, output_dir):\n fig = plt.figure()\n\n num_type_uses = df[df['DATATYPE'].isin(mpi_type_creation_funcs + ['inconclusive'])]\n\n pivot_df_type_use_type = pd.pivot_table(num_type_uses, values='src_location', index='Code',\n columns='DATATYPE',\n aggfunc='count', fill_value=0)\n\n plt.clf()\n pivot_df_type_use_type.plot.bar(width=1, stacked=True, ax=fig.gca())\n plt.title(f'Number of calls using derived datatypes', color='black')\n lgd = plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), title=\"Type was created by\")\n plt.ylabel(\"Num Calls\")\n plt.savefig(output_dir + \"/num_types_used_by_type.pdf\", bbox_extra_artists=(lgd,), bbox_inches='tight')\n plt.clf()\n\n pivot_df_type_use_call = pd.pivot_table(num_type_uses, values='src_location', index='Code',\n columns='call',\n aggfunc='count', fill_value=0)\n pivot_df_type_use_call.plot.bar(width=1, stacked=True, ax=fig.gca())\n lgd = plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), title=\"Type was used in\")\n plt.title(f'Number of calls using derived datatypes', color='black')\n plt.ylabel(\"Num Calls\")\n plt.savefig(output_dir + \"/num_types_used_by_call.pdf\", bbox_extra_artists=(lgd,), bbox_inches='tight')\n plt.clf()\n\n num_type_creates = df[df['call'].isin(mpi_type_creation_funcs)]\n pivot_df_type_create = pd.pivot_table(num_type_creates, values='src_location', index='Code',\n columns='call',\n aggfunc='count', fill_value=0)\n\n pivot_df_type_create.plot.bar(width=1, stacked=True, ax=fig.gca())\n plt.title(f'Number of calls creating derived datatypes', color='black')\n lgd = plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))\n plt.ylabel(\"Num Calls\")\n plt.savefig(output_dir + \"/num_types_created_stacked.pdf\", bbox_extra_artists=(lgd,), bbox_inches='tight')\n # TODO pandas warning here?\n num_type_uses = df[~df['DATATYPE'].isin(predefined_mpi_dtype_consants)].copy()\n num_type_uses['DATATYPE'].loc[\n ~num_type_uses['DATATYPE'].isin(mpi_type_creation_funcs + ['inconclusive'])] = 'indecidable-non-mpi'\n\n pivot_df_type_use_type = pd.pivot_table(num_type_uses, values='src_location', index='Code',\n columns='DATATYPE',\n aggfunc='count', fill_value=0)\n\n plt.clf()\n pivot_df_type_use_type.plot.bar(width=1, stacked=True, ax=fig.gca())\n plt.title(f'Number of calls using derived datatypes', color='black')\n lgd = plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), title=\"Type was created by\")\n plt.ylabel(\"Num Calls\")\n plt.savefig(output_dir + \"/num_types_used_by_type_with_undefined.pdf\", bbox_extra_artists=(lgd,),\n bbox_inches='tight')\n plt.clf()\n\n\ndef plot_df(df, title, output_dir, angle=0, order=None, counter_clockwise=True, col_to_plot='plot_labels',\n use_pie=False):\n pivot_df = df.pivot_table(values='Code', index=col_to_plot, columns='call', aggfunc='count', fill_value=0)\n pivot_df_per_project = df.pivot_table(values='Code', index=col_to_plot, columns='call', aggfunc='nunique',\n fill_value=0)\n if order is None:\n order = pivot_df.index\n pivot_df = pivot_df.reindex(order)\n pivot_df_agg = pivot_df.agg(\"sum\")\n pivot_df['overall'] = pivot_df.sum(axis=1)\n num_calls = pivot_df_agg.sum()\n pivot_df_per_project['overall'] = pivot_df_per_project.sum(axis=1)\n counter_clockwise_default = True\n angle_default = 0\n\n if use_pie:\n generate_pie_plot(pivot_df, 'overall', f'overall usage of {title} ({num_calls} calls)',\n f\"{output_dir}/overall_{title}.pdf\",\n angle,\n counter_clockwise)\n else:\n generate_bar_plot(pivot_df, 'overall', f'overall usage of {title} ({num_calls} calls)',\n f\"{output_dir}/overall_{title}.pdf\")\n\n print('overall')\n pivot_df['percent'] = 100 * pivot_df['overall'] / num_calls\n print(pivot_df['percent'])\n\n for cat, member in mpi_categories.items():\n members_used = [m for m in member if m in pivot_df.columns]\n # sometimes there is no function call that have the given argument\n if len(members_used) > 0:\n num_calls = pivot_df_agg[members_used].sum()\n pivot_df[cat] = pivot_df[members_used].sum(axis=1)\n\n print(cat)\n pivot_df['percent'] = 100 * pivot_df[cat] / num_calls\n print(pivot_df['percent'])\n\n if use_pie:\n generate_pie_plot(pivot_df, cat, f'usage of {title} in {cat} ({num_calls} calls)',\n f\"{output_dir}/{cat}_{title}.pdf\",\n angle,\n counter_clockwise)\n else:\n generate_bar_plot(pivot_df, cat, f'usage of {title} in {cat} ({num_calls} calls)',\n f\"{output_dir}/{cat}_{title}.pdf\")\n\n for call in members_used:\n num_calls = pivot_df_agg[call]\n if use_pie:\n generate_pie_plot(pivot_df, call, f'usage of {title} in {call} ({num_calls} calls)',\n f\"{output_dir}/{cat}/{call}_{title}.pdf\", angle, counter_clockwise)\n else:\n generate_bar_plot(pivot_df, call, f'usage of {title} in {call} ({num_calls} calls)',\n f\"{output_dir}/{cat}/{call}_{title}.pdf\")\n plt.close('all')\n\n\ndef generate_pie_plot(pivot_df, col, title, fname, angle, counter_clockwise):\n plt.clf()\n fig = plt.figure()\n plt.title(title, color='black')\n # plot = pivot_df.plot.pie(y=col, legend=False, startangle=angle, counterclock=counter_clockwise)\n # plot.set_ylabel(\"\")\n sum = pivot_df[col].sum()\n plot = (\n pivot_df.assign(plot_this=lambda df_: 100 * df_[col] / sum)['plot_this']\n .plot.pie(ax=fig.gca())\n )\n # plot.set_legend(loc='center left', bbox_to_anchor=(1.0, 0.85))\n plt.savefig(fname, bbox_inches='tight')\n plt.close(fig)\n\n\ndef generate_bar_plot(pivot_df, col, title, fname):\n plt.clf()\n fig = plt.figure(figsize=(16, 3))\n ax = fig.gca()\n ax.set_xlim(0.0, 100.0)\n # plt.title(title, color='black')\n # plot = pivot_df.plot.pie(y=col, legend=False, startangle=angle, counterclock=counter_clockwise)\n # plot.set_ylabel(\"\")\n sum = pivot_df[col].sum()\n\n plot_df = pd.DataFrame(\n pivot_df.assign(plot_this=lambda df_: 100 * df_[col] / sum)['plot_this'].rename('')).transpose()\n\n plot = plot_df.plot.barh(stacked=True, ax=ax, legend=False, edgecolor=\"black\")\n ax.set_ylabel('')\n ax.set_xlabel('% Distribution')\n # ax.legend(loc='center left', bbox_to_anchor=(1, 0.65))\n\n y_sep = 0.1\n above = True\n for bar in ax.containers:\n label = bar.get_label()\n\n for rect in bar.patches:\n if rect.get_width() > 0:\n txt = ax.text(rect.get_x(), rect.get_y() + 0.2, label, weight='bold')\n # as datsa coordinates\n text_coords = ax.transData.inverted().transform(txt.get_window_extent())\n txt_width = text_coords[1][0] - text_coords[0][0]\n txt_height = text_coords[1][1] - text_coords[0][1]\n center_y = rect.get_y() + rect.get_height() / 2 - txt_height / 2\n center_x = rect.get_x() + rect.get_width() / 2 - txt_width / 2\n if rect.get_width() > txt_width:\n # text fits\n txt.set_position((center_x, center_y))\n else:\n if above:\n y = rect.get_y() + rect.get_height() + y_sep\n # manual placement for better redability\n if label == \"Binary or Logical Op\":\n y = y + y_sep / 2\n center_x = center_x + 10\n ax.plot([center_x + txt_width / 2, rect.get_x() + rect.get_width() / 2],\n [y, rect.get_y() + rect.get_height()], color='gray', linestyle='-', linewidth=2)\n else:\n y = rect.get_y() - y_sep - txt_height\n ax.plot([center_x + txt_width / 2, center_x + txt_width / 2], [y + txt_height, rect.get_y()],\n color='gray',\n linestyle='-', linewidth=2)\n above = not above\n txt.set_position((center_x, y))\n pass\n\n ax.grid(which='major', axis='x', color='gray', linestyle='--', linewidth=1)\n\n plt.savefig(fname, bbox_inches='tight')\n plt.close(fig)\n\n\ndef get_type_usage_bar_plot(df, output_dir):\n def get_plot_label_overview(row):\n dtype = row['DATATYPE']\n category = row['DATATYPE_CATEGORY']\n if not pd.isna(dtype) and (category == 'MPI_constant'):\n return \"Predefined\"\n elif (category == 'handle'):\n return 'Derived'\n elif (category == 'by_define'):\n return 'Define'\n elif (category == 'function_call'):\n return 'Function'\n elif category == 'literal_constant' or category == 'arith_expression' or category == 'other_variable':\n return 'VarP'\n elif category == \"other_variable_creation_func_exist\":\n return 'VarU'\n else:\n return category\n\n def get_plot_label_predefined(row, other_detail=False):\n dtype = row['DATATYPE']\n category = row['DATATYPE_CATEGORY']\n if not pd.isna(dtype) and category == 'MPI_constant':\n if dtype == 'MPI_INT' or dtype == 'MPI_INTEGER' or dtype == 'MPI_INTEGER4' or dtype == 'MPI_UNSIGNED':\n return 'INT'\n elif dtype == 'MPI_DOUBLE' or dtype == 'MPI_DOUBLE_PRECISION' or dtype == 'MPI_REAL8' or dtype == 'MPI_LONG_DOUBLE':\n return 'DOUBLE'\n elif dtype == 'MPI_REAL' or dtype == 'MPI_REAL4' or dtype == 'MPI_FLOAT':\n return 'FLOAT'\n elif dtype == 'MPI_LONG' or dtype == 'MPI_LONG_INT' or dtype == 'MPI_LONG_LONG_INT' or dtype == 'MPI_LONG_LONG' or dtype == 'MPI_UNSIGNED_LONG' or dtype == 'MPI_UNSIGNED_LONG_LONG' or dtype == 'MPI_INTEGER8' or dtype == 'MPI_UINT64_T':\n return 'LONG_INT'\n elif dtype == 'MPI_CHAR' or dtype == 'MPI_CHARACTER' or dtype == 'MPI_UNSIGNED_CHAR':\n return 'CHAR'\n elif dtype == 'MPI_BYTE':\n return 'BYTE'\n elif dtype == 'MPI_2INT' or dtype == 'MPI_2INTEGER' or dtype == 'MPI_2REAL' or dtype == 'MPI_2DOUBLE_PRECISION' or dtype == 'MPI_DOUBLE_INT' or dtype == 'MPI_FLOAT_INT' or dtype == 'MPI_COMPLEX' or dtype == 'MPI_DOUBLE_COMPLEX':\n return 'Composed'\n else:\n if other_detail:\n return dtype.replace(\"MPI_\", \"\")\n else:\n if dtype == 'MPI_2INT' or dtype == 'MPI_2INTEGER' or dtype == 'MPI_2REAL' or dtype == 'MPI_2DOUBLE_PRECISION' or dtype == 'MPI_DOUBLE_INT' or dtype == 'MPI_FLOAT_INT' or dtype == 'MPI_COMPLEX' or dtype == 'MPI_DOUBLE_COMPLEX':\n return 'Composed'\n else:\n return \"Other\"\n else:\n return pd.NA\n\n def get_plot_label_derived(row, other_detail=False):\n dtype = row['DATATYPE']\n category = row['DATATYPE_CATEGORY']\n if not pd.isna(dtype) and (category == 'handle'):\n striped_type = dtype.replace(\"MPI_Type_\", \"\").replace(\"create_\", \"\").capitalize()\n if striped_type in ['Inconclusive', 'Dup', 'Contiguous', 'Hvector', 'Indexed',\n 'Struct', 'Subarray', 'Vector']:\n return striped_type\n if other_detail:\n return striped_type\n else:\n return 'Other'\n else:\n return pd.NA\n\n other_detail = False\n # print(pivot_df.head(10))\n df_1 = (df\n .assign(plot_label_overview=lambda df_: df_.apply(get_plot_label_overview, axis=1))\n .assign(\n plot_label_predefined=lambda df_: df_.apply(get_plot_label_predefined, axis=1, args=(other_detail,)))\n .assign(plot_label_derived=lambda df_: df_.apply(get_plot_label_derived, axis=1))\n )\n\n for cat, member in mpi_categories.items():\n if cat == 'coll':\n generate_multi_bar_pot(df_1, member, cat, f\"{output_dir}/{cat}_datatypes.pdf\", use_manual_positioning=1)\n elif cat == 'pt2pt':\n generate_multi_bar_pot(df_1, member, cat, f\"{output_dir}/{cat}_datatypes.pdf\", use_manual_positioning=2)\n else:\n generate_multi_bar_pot(df_1, member, cat, f\"{output_dir}/{cat}_datatypes.pdf\")\n for m in member:\n generate_multi_bar_pot(df_1, [m], m, f\"{output_dir}/{cat}/{m}_datatypes.pdf\")\n\n\ndef generate_multi_bar_pot(df, funcs_to_use, name, fname, use_manual_positioning=False):\n pivot_df_overview = df.pivot_table(values='Code', index='plot_label_overview', columns='call', aggfunc='count',\n fill_value=0)\n funcs_to_use = [f for f in funcs_to_use if f in pivot_df_overview.columns]\n if len(funcs_to_use) == 0:\n return\n pivot_df_predefined = df.pivot_table(values='Code', index='plot_label_predefined', columns='call',\n aggfunc='count', fill_value=0)\n pivot_df_derived = df.pivot_table(values='Code', index='plot_label_derived', columns='call', aggfunc='count',\n fill_value=0)\n\n # ordering of the for better redability\n desired_order = ['BYTE', 'CHAR', 'DOUBLE', 'FLOAT', 'LONG_INT', 'INT', 'Composed', 'Other']\n new_index = [i for i in desired_order if i in pivot_df_predefined.index]\n assert len(new_index) == len(pivot_df_predefined.index)\n pivot_df_predefined = pivot_df_predefined.reindex(new_index)\n\n desired_order = ['Derived', 'VarU', 'Function', 'Define', 'VarP', 'Predefined']\n new_index = [i for i in desired_order if i in pivot_df_overview.index]\n # dont drop data\n assert len(new_index) == len(pivot_df_overview.index)\n pivot_df_overview = pivot_df_overview.reindex(new_index)\n\n desired_order = ['Inconclusive', 'Dup']\n new_index = [i for i in desired_order if i in pivot_df_derived.index] + [i for i in pivot_df_derived.index if\n i not in desired_order]\n assert len(new_index) == len(pivot_df_derived.index)\n pivot_df_derived = pivot_df_derived.reindex(new_index)\n\n fig = plt.figure(figsize=(16, 4))\n fig.clf()\n\n ax = fig.gca()\n\n def get_percentage(pivot_df, cols):\n cols = [c for c in cols if c in pivot_df.columns]\n if len(cols) == 1:\n sum = pivot_df[cols].sum()\n return pivot_df.assign(percentage=lambda df_: 100 * df_[cols] / sum)['percentage']\n else:\n sum = pivot_df[cols].sum(axis=1).sum()\n return pivot_df.assign(percentage=lambda df_: 100 * df_[cols].sum(axis=1) / sum)['percentage']\n\n df_plot = pd.DataFrame([get_percentage(pivot_df_derived, funcs_to_use).rename(\"Derived Types\"),\n get_percentage(pivot_df_overview, funcs_to_use).rename(\"Overall\"),\n get_percentage(pivot_df_predefined, funcs_to_use).rename(\"Predefined Types\")])\n plot = df_plot.plot.barh(stacked=True, ax=ax, legend=False, edgecolor=\"black\")\n ax.set_ylabel('')\n ax.set_xlabel('% Distribution')\n ax.set_xlim(0.0, 100.0)\n # ax.set_title(f'dtype Usage in {name}')\n # ax.legend(loc='center left', bbox_to_anchor=(1, 0.65))\n # ax.grid(axis='y',color='red', linestyle='dashed', linewidth=3)\n\n label = ax.get_yticklabels()[1]\n label.set_weight(\"bold\")\n\n # get the corresponding colors\n color_pre = None\n color_deriv = None\n\n for bar in ax.containers:\n label = bar.get_label()\n if label == \"Derived\":\n for rect in bar.patches:\n if rect.get_width() > 0:\n color_deriv = rect.get_facecolor()\n elif label == \"Predefined\":\n for rect in bar.patches:\n if rect.get_width() > 0:\n color_pre = rect.get_facecolor()\n\n pos_derived_end = get_percentage(pivot_df_overview, funcs_to_use)['Derived']\n # ax.plot([pos_derived_end, 100], [0.75, 0.25], color='black', linestyle='-', linewidth=1,alpha=0.5)\n # ax.plot([0, 0], [0.75, 0.25], color='black', linestyle='-', linewidth=1,alpha=0.5)\n poly_x = [0, pos_derived_end, 100, 0]\n poly_y = [0.75, 0.75, 0.25, 0.25]\n ax.fill(poly_x, poly_y, color=color_deriv, alpha=0.5)\n\n pos_predefined_end = 100 - get_percentage(pivot_df_overview, funcs_to_use)['Predefined']\n # ax.plot([pos_predefined_end, 0], [1.25, 1.75], color='black', linestyle='-', linewidth=1,alpha=0.5)\n # ax.plot([100, 100], [1.25, 1.75], color='black', linestyle='-', linewidth=1,alpha=0.5)\n poly_x = [0, pos_predefined_end, 100, 100]\n poly_y = [1.75, 1.25, 1.25, 1.75]\n ax.fill(poly_x, poly_y, color=color_pre, alpha=0.5)\n\n above = False\n y_sep = 0.1\n for bar in ax.containers:\n label = bar.get_label()\n\n for rect in bar.patches:\n if rect.get_width() > 0:\n txt = ax.text(rect.get_x(), rect.get_y() + 0.2, label, weight='bold')\n # as data coordinates\n text_coords = ax.transData.inverted().transform(txt.get_window_extent())\n txt_width = text_coords[1][0] - text_coords[0][0]\n txt_height = text_coords[1][1] - text_coords[0][1]\n center_y = rect.get_y() + rect.get_height() / 2 - txt_height / 2\n center_x = rect.get_x() + rect.get_width() / 2 - txt_width / 2\n if rect.get_width() > txt_width:\n # text fits\n txt.set_position((center_x, center_y))\n else:\n if above:\n y = rect.get_y() + rect.get_height() + y_sep\n if use_manual_positioning == 1 and label == \"Other\":\n # manual adjustment for better redability\n center_x = center_x - 1\n ax.plot([center_x + txt_width / 2, rect.get_x() + rect.get_width() / 2],\n [y, rect.get_y() + rect.get_height()], color='gray', linestyle='-', linewidth=2)\n else:\n y = rect.get_y() - y_sep - txt_height\n if use_manual_positioning == 2 and label == \"Struct\":\n # manual adjustment for better redability\n center_x = center_x - 2\n ax.plot([center_x + txt_width / 2, rect.get_x() + rect.get_width() / 2],\n [y + txt_height, rect.get_y()],\n color='gray', linestyle='-', linewidth=2)\n\n txt.set_position((center_x, y))\n above = not above\n pass\n ax.grid(which='major', axis='x', color='gray', linestyle='--', linewidth=1)\n plt.savefig(fname, bbox_inches='tight')\n plt.close(fig)\n\n\ndef get_feature_usage_per_category(df, output_dir):\n pivot_df_per_project = df.pivot_table(values='Code', columns='call', aggfunc='nunique',\n fill_value=0)\n num_codes = df['Code'].nunique()\n fig = plt.figure()\n # ax=fig.gca()\n\n cat_to_use = mpi_categories\n cat_to_use['comm_creation'] = mpi_comm_creator_funcs\n cat_to_use['type_creation'] = mpi_type_creation_funcs\n cat_to_use['group_creation'] = mpi_group_creator_funcs\n for cat, member in cat_to_use.items():\n fig.clf()\n members_used = [m for m in member if m in pivot_df_per_project.columns]\n plot = (pivot_df_per_project[members_used]\n .iloc[0]\n .sort_values()\n .apply(lambda x: 100.0 * x / num_codes)\n # select top 20\n [-20:]\n .plot\n .bar(\n xlabel=\"MPI Call\",\n ylabel=\"% of Total Applications\",\n ax=fig.gca()\n )\n )\n for p in plot.patches:\n plot.annotate(f\"{p.get_height():.0f}%\", (p.get_x() * 1.005, p.get_height() * 1.005))\n\n fname = f\"{output_dir}/{cat}_usage.pdf\"\n plt.savefig(fname, bbox_inches='tight')\n plt.close(fig)\n\n\ndef get_comm_usage_mix_plots(df, output_dir):\n def get_plot_label(row):\n creator = row['call']\n if not pd.isna(creator) and creator in mpi_comm_creator_funcs:\n if creator == 'MPI_Comm_dup_with_info' or creator == 'MPI_Comm_idup':\n return 'Variants of MPI_Comm_dup'\n return creator\n else:\n return \"ERROR\"\n\n select_df = df[df['call'].isin(mpi_comm_creator_funcs)].assign(\n plot_labels=lambda df_: df_.apply(get_plot_label, axis=1))\n\n fig = plt.figure()\n plt.clf()\n num_calls = len(select_df)\n plt.title(f'usage of MPI communicator creation functions ({num_calls} calls)', color='black')\n select_df.groupby('plot_labels').size().plot.pie(ax=fig.gca())\n lgd = plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.1))\n\n plt.savefig(f\"{output_dir}/CommCreation.pdf\", bbox_extra_artists=(lgd,), bbox_inches='tight')\n plt.close(fig)\n\n def get_plot_label(row):\n comm = row['COMMUNICATOR']\n category = row['COMMUNICATOR_CATEGORY']\n if not pd.isna(comm) and (\n comm in predefined_mpi_communicator_consants or comm in mpi_comm_creator_funcs) or category == 'handle':\n if comm == 'MPI_Comm_split_type':\n return 'Comm_split'\n elif comm == 'MPI_COMM_NULL':\n # com null is basically not used anywas, so there is nothing meaningful to see in the plots\n return pd.NA\n return comm.replace(\"MPI_\", \"\").capitalize()\n else:\n if category == 'other_variable':\n return 'VarP'\n else:\n return 'VarU'\n return category\n\n plot_df(df.assign(plot_labels=lambda df_: df_.apply(get_plot_label, axis=1)), \"comm\", output_dir)\n\n\ndef get_rank_usage_mix_plots(df, output_dir):\n def get_plot_label(row):\n rank = row['RANK']\n category = row['RANK_CATEGORY']\n if not pd.isna(rank) and (rank in predefined_mpi_constants):\n return rank\n elif rank == \"0\":\n return \"Literal_0\"\n else:\n return category\n\n plot_df(df.assign(plot_labels=lambda df_: df_.apply(get_plot_label, axis=1)), \"rank\", output_dir)\n\n\ndef get_info_usage_mix_plots(df, output_dir):\n def get_plot_label(row):\n info = row['INFO']\n category = row['INFO_CATEGORY']\n\n if not pd.isna(info) and (info in predefined_mpi_constants):\n return info\n else:\n return category\n\n plot_df(df.assign(plot_labels=lambda df_: df_.apply(get_plot_label, axis=1)), \"info\", output_dir)\n\n\ndef get_tag_usage_mix_plots(df, output_dir):\n def get_plot_label(row):\n tag = row['TAG']\n category = row['TAG_CATEGORY']\n if not pd.isna(tag) and (tag in predefined_mpi_constants):\n return tag\n elif category == \"literal_constant\":\n # if tag in ['0','1','10','20','30','50','2','40']:\n if tag in ['0', '1', '42']:\n return tag\n else:\n return \"other literal constant\"\n else:\n return category\n\n # pivot_df = (\n # df.assign(plot_labels=lambda df_: df_.apply(get_plot_label, axis=1))\n # .pivot_table(values='Code', index='plot_labels', columns='call', aggfunc='count',\n # fill_value=0)\n # )\n # print(pivot_df.sum(axis=1).nlargest(25))\n\n plot_df(df.assign(plot_labels=lambda df_: df_.apply(get_plot_label, axis=1)), \"tag\", output_dir)\n\n\ndef get_status_usage_mix_plots(df, output_dir):\n def get_plot_label(row):\n status = row['STATUS']\n category = row['STATUS_CATEGORY']\n if not pd.isna(status) and (status in predefined_mpi_constants):\n return status\n else:\n return 'other_variable'\n # return category\n\n plot_df(df.assign(plot_labels=lambda df_: df_.apply(get_plot_label, axis=1)), \"status\", output_dir)\n\n\ndef get_errhandler_mix_plots(df, output_dir):\n df = df[df['call'].isin(['MPI_File_set_errhandler', 'MPI_Comm_set_errhandler', 'MPI_Win_set_errhandler',\n 'MPI_Session_set_errhandler', ])]\n\n def get_plot_label(row):\n handler = row['ERRHANDLER']\n category = row['ERRHANDLER_CATEGORY']\n if not pd.isna(handler) and (handler in predefined_mpi_constants):\n return handler\n else:\n return category\n\n plot_df(df.assign(plot_labels=lambda df_: df_.apply(get_plot_label, axis=1)), \"errhandler\", output_dir)\n\n\ndef get_const_count_usage_plots(df, output_dir):\n col_to_use = 'POLYXFER_NUM_ELEM_NNI'\n\n # first pass: elimiate all calls that do not have a count arg and transforme everything else to number\n count_df = df[df[col_to_use].notnull()].copy()\n count_df[col_to_use] = pd.to_numeric(count_df[col_to_use], errors='coerce')\n\n na_count = count_df[col_to_use].isna().sum()\n num_calls = len(count_df)\n percent = na_count * 100.0 / num_calls\n\n # drop nan and convert to int (better histogram)\n count_df = count_df[count_df[col_to_use].notnull()]\n count_df[col_to_use] = count_df[col_to_use].astype(int)\n\n print(\"Constants used as count argument\")\n print(count_df[col_to_use].value_counts())\n y_max = count_df[col_to_use].value_counts().nlargest(2).iloc[1] * 2\n\n max_c_to_show = 128\n\n fig = plt.figure()\n plt.clf()\n (count_df\n .loc[count_df[col_to_use].notnull()] # boolean mask\n .astype({col_to_use: int})\n .query(f\"{col_to_use} <= {max_c_to_show}\")\n .loc[:, col_to_use] # select column\n .plot\n .hist(\n bins=max_c_to_show, ax=fig.gca(),\n xlabel=\"count\", ylabel=\"num of usage\",\n title=f\"Overall usage of constant count arguments {percent:.2f}% has non-constant value\",\n ylim=(0, y_max),\n ))\n plt.savefig(f\"{output_dir}/count_usage.pdf\")\n plt.close(fig)\n\n def get_plot_label(row):\n value = row['POLYXFER_NUM_ELEM_NNI']\n category = row['POLYXFER_NUM_ELEM_NNI_CATEGORY']\n if not pd.isna(value) and (value in predefined_mpi_constants):\n return value\n # elif value in ['0', '1', '2', '3', '4']:\n # return value\n else:\n return category\n\n print(\"with handle as dtype\")\n plot_df(df[df['DATATYPE_CATEGORY'] == 'handle'].assign(plot_labels=lambda df_: df_.apply(get_plot_label, axis=1)),\n \"count\", output_dir)\n\n print(\"with float as dtype\")\n plot_df(df[df['DATATYPE'].isin(\n ['MPI_FLOAT', 'MPI_REAL', 'MPI_DOUBLE', 'MPI_DOUBLE_PRECISION', 'MPI_REAL8', 'MPI_LONG_DOUBLE'])].assign(\n plot_labels=lambda df_: df_.apply(get_plot_label, axis=1)), \"count\", output_dir)\n\n print(\"with BYTE as dtype\")\n plot_df(df[df['DATATYPE'].isin(\n ['MPI_BYTE', 'MPI_CHAR', 'MPI_CHARACTER'])].assign(\n plot_labels=lambda df_: df_.apply(get_plot_label, axis=1)), \"count\", output_dir)\n\n print(\"with int as dtype\")\n plot_df(df[df['DATATYPE'].isin(\n ['MPI_INT', 'MPI_INTEGER', 'MPI_UNSIGNED', 'MPI_LONG', 'MPI_LONG_INT', 'MPI_LONG_LONG_INT', 'MPI_LONG_LONG',\n 'MPI_UNSIGNED_LONG', 'MPI_UNSIGNED_LONG_LONG', 'MPI_INTEGER8', 'MPI_UINT64_T'])].assign(\n plot_labels=lambda df_: df_.apply(get_plot_label, axis=1)), \"count\", output_dir)\n\n # plot_df(df.assign(plot_labels=lambda df_: df_.apply(get_plot_label, axis=1)), \"count\", output_dir)\n\n\ndef get_reduce_op_usage_plots(df, output_dir):\n def get_plot_label(row):\n value = row['OPERATION']\n category = row['OPERATION_CATEGORY']\n if not pd.isna(value) and (value in predefined_mpi_constants):\n if value.startswith('MPI_B') or value.startswith('MPI_L'):\n return \"Binary or Logical Op\"\n if value == 'MPI_MINLOC' or value == 'MPI_MAXLOC':\n return \"MIN/MAXLOC\"\n # if value == 'MPI_PROD':\n # return 'other_variable'\n return value.replace(\"MPI_\", \"\").capitalize()\n else:\n if category == \"other_variable_creation_func_exist\" or category == \"function_call\":\n return 'VarU'\n elif category == 'other_variable':\n return 'VarP'\n return category\n\n new_df = df.assign(plot_labels=lambda df_: df_.apply(get_plot_label, axis=1))\n\n print(\"total number of MPI_PROD\")\n print(len(df[df['OPERATION'] == 'MPI_PROD']))\n\n plot_df(new_df, \"op\", output_dir)\n\n print(\"Are Operations created commutative?:\")\n print(df[df['call'] == 'MPI_Op_create']['LOGICAL'].value_counts())\n\n\ndef get_num_defines_resolved(df, output_dir):\n print(\"Parameters given by preprocessor Define\")\n # some_params_defined=df[df['params_by_define'] != \"[]\"]\n print(df['params_by_define'].value_counts())\n pass\n\n\ndef get_converter_funcs(df, output_dir):\n print(\"Are f2c or c2f funcs used:\")\n print(df[df['call'].isin(mpi_converter_funcs)]['call'].value_counts())\n pass\n\n\ndef get_codes_per_feature(df, output_dir):\n to_plot = ['collective', 'pt2pt', 'comm_group', 'datatype', 'error', 'file', 'info', 'arrt_cache', 'persistent',\n 'one_sided', 'process_mgmt', 'tool_iface', 'topology', 'dtype_constr', 'p2p_noreq']\n fig = plt.figure()\n fig.clf()\n table = (\n pd.pivot_table(df, values='src_location', index='Code', columns='call',\n aggfunc='count', fill_value=0)\n # onl sum up hte columns that are there (unused MPI funcs are not there)\n .assign(collective=lambda df_: df_[df_.columns.intersection(mpi_coll)].sum(axis=1))\n .assign(pt2pt=lambda df_: df_[df_.columns.intersection(mpi_p2p)].sum(axis=1))\n .assign(\n p2p_noreq=lambda df_: df_[df_.columns.intersection(set(mpi_scorep_p2p) - mpi_persistent - mpi_request)].sum(\n axis=1))\n .assign(comm_group=lambda df_: df_[df_.columns.intersection(mpi_comm_group)].sum(axis=1))\n .assign(\n datatype=lambda df_: df_[df_.columns.intersection(mpi_types)].sum(axis=1))\n .assign(error=lambda df_: df_[df_.columns.intersection(mpi_error)].sum(axis=1))\n .assign(file=lambda df_: df_[df_.columns.intersection(mpi_io)].sum(axis=1))\n .assign(info=lambda df_: df_[df_.columns.intersection(mpi_info)].sum(axis=1))\n .assign(arrt_cache=lambda df_: df_[df_.columns.intersection(mpi_attrib)].sum(axis=1))\n .assign(persistent=lambda df_: df_[df_.columns.intersection(mpi_persistent)].sum(axis=1))\n .assign(one_sided=lambda df_: df_[df_.columns.intersection(mpi_rma)].sum(axis=1))\n .assign(process_mgmt=lambda df_: df_[df_.columns.intersection(mpi_processm)].sum(axis=1))\n .assign(tool_iface=lambda df_: df_[df_.columns.intersection(mpi_tools)].sum(axis=1))\n .assign(topology=lambda df_: df_[df_.columns.intersection(mpi_topo)].sum(axis=1))\n .assign(requests=lambda df_: df_[df_.columns.intersection(mpi_request)].sum(axis=1))\n .assign(misc=lambda df_: df_[df_.columns.intersection(mpi_misc)].sum(axis=1))\n .assign(dtype_constr=lambda df_: df_[df_.columns.intersection(mpi_types_constructor_only)].sum(axis=1))\n [to_plot]\n )\n\n plot = (table\n # count values larger 0\n .agg(lambda col: 100.0 * (col > 0).sum() / len(col))\n .sort_values(axis=0)\n .plot\n .bar(\n xlabel=\"Unique MPI Feature\",\n ylabel=\"% of Total Applications\",\n ax=fig.gca()\n )\n )\n for p in plot.patches:\n plot.annotate(f\"{p.get_height():.0f}%\", (p.get_x() * 1.005, p.get_height() * 1.005))\n\n # plt.show()\n\n plt.savefig(f\"{output_dir}/Features_By_code.pdf\", bbox_extra_artists=(plot,), bbox_inches='tight')\n plt.close(fig)\n pass\n\n\ndef get_hybrid_codes(df, output_dir):\n fig = plt.figure()\n fig.clf()\n indicators = ['openmp', 'openacc', 'cuda_device_kernel', 'cuda_global_kernel', 'opencl_global', 'opencl_kernel']\n indicators = ['openmp', 'openacc', 'cuda', 'opencl']\n to_plot_labels = ['OpenMP', 'None', 'CUDA\\nOpenMP', 'CUDA\\nOpenCL', 'OpenMP\\nOpenACC', 'CUDA', 'CUDA\\nOpenACC']\n to_plot = ['OpenMP', 'No', 'OpenMP_CUDA', 'CUDA_OpenCL', 'OpenMP_OpenACC', 'CUDA', 'CUDA_OpenACC',\n 'OpenMP_OpenCL', 'OpenMP_CUDA_OpenACC', 'OpenMP_CUDA_OpenCL']\n\n table = (\n pd.pivot_table(df, values='src_location', index='Code', columns='call',\n aggfunc='count', fill_value=0)\n .assign(cuda=lambda df_: df_[['cuda_device_kernel', 'cuda_global_kernel']].sum(axis=1))\n .assign(opencl=lambda df_: df_[['opencl_global', 'opencl_kernel']].sum(axis=1))\n # used for the plots\n .assign(No=lambda df_: df_[indicators].sum(axis=1) == 0)\n .assign(OpenMP=lambda df_: df_.apply(\n lambda row: 1 if row['openmp'] > 0 and row['cuda'] == 0 and row['openacc'] == 0 and row[\n 'opencl'] == 0 else 0, axis=1))\n .assign(OpenMP_OpenACC=lambda df_: df_.apply(\n lambda row: 1 if row['openmp'] > 0 and row['cuda'] == 0 and row['openacc'] > 0 and row[\n 'opencl'] == 0 else 0, axis=1))\n .assign(OpenMP_CUDA=lambda df_: df_.apply(\n lambda row: 1 if row['openmp'] > 0 and row['cuda'] > 0 and row['openacc'] == 0 and row[\n 'opencl'] == 0 else 0, axis=1))\n .assign(CUDA=lambda df_: df_.apply(\n lambda row: 1 if row['openmp'] == 0 and row['cuda'] > 0 and row['openacc'] == 0 and row[\n 'opencl'] == 0 else 0, axis=1))\n .assign(CUDA_OpenCL=lambda df_: df_.apply(\n lambda row: 1 if row['openmp'] == 0 and row['cuda'] > 0 and row['openacc'] == 0 and row[\n 'opencl'] > 0 else 0, axis=1))\n .assign(CUDA_OpenACC=lambda df_: df_.apply(\n lambda row: 1 if row['openmp'] == 0 and row['cuda'] > 0 and row['openacc'] > 0 and row[\n 'opencl'] == 0 else 0, axis=1))\n .assign(OpenMP_OpenCL=lambda df_: df_.apply(\n lambda row: 1 if row['openmp'] > 0 and row['cuda'] == 0 and row['openacc'] == 0 and row[\n 'opencl'] > 0 else 0, axis=1))\n .assign(OpenMP_CUDA_OpenACC=lambda df_: df_.apply(\n lambda row: 1 if row['openmp'] > 0 and row['cuda'] > 0 and row['openacc'] > 0 and row[\n 'opencl'] == 0 else 0, axis=1))\n .assign(OpenMP_CUDA_OpenCL=lambda df_: df_.apply(\n lambda row: 1 if row['openmp'] > 0 and row['cuda'] > 0 and row['openacc'] == 0 and row[\n 'opencl'] > 0 else 0, axis=1))\n )\n\n plot = (table\n [to_plot]\n .agg(lambda col: 100.0 * (col == True).sum() / len(col))\n .sort_values(axis=0)\n .plot\n .bar(\n xlabel=\"X in MPI+X\",\n ylabel=\"% of Total Applications\",\n ax=fig.gca(),\n )\n )\n for p in plot.patches:\n plot.annotate(f\"{p.get_height():.0f}%\", (p.get_x() * 1.005, p.get_height() * 1.005))\n\n # does not work with sorting:\n # fig.gca().set_xticklabels(to_plot_labels)\n\n plt.savefig(f\"{output_dir}/HybridUsage.pdf\", bbox_extra_artists=(plot,), bbox_inches='tight')\n plt.close(fig)\n\n pass\n\n\ndef get_thread_level(df, output_dir):\n def maximum_thread_level(column):\n set_to_check = column.unique()\n if 'MPI_THREAD_MULTIPLE' in set_to_check:\n return 'MULTIPLE'\n elif 'MPI_THREAD_SERIALIZED' in set_to_check:\n return 'SERIALIZED'\n elif 'MPI_THREAD_FUNNELED' in set_to_check:\n return 'FUNNELED'\n elif 'MPI_THREAD_SINGLE' in set_to_check:\n return 'SINGLE'\n else:\n # pd.NA\n return 'other_variable'\n\n plot = (df\n .query(f'call==\"MPI_Init_thread\"')\n .groupby('Code')['THREAD_LEVEL']\n .agg(maximum_thread_level)\n .value_counts(normalize=True, sort=True)\n .apply(lambda x: 100 * x)\n .plot\n .bar())\n\n for p in plot.patches:\n plot.annotate(f\"{p.get_height():.0f}%\", (p.get_x() * 1.005, p.get_height() * 1.005))\n\n plot.set_xlabel(\"Maximum Required Thread Level\")\n plot.set_ylabel(\"Percentage of Applications using MPI_Init_thread\")\n plt.savefig(f\"{output_dir}/thread_level.pdf\", bbox_extra_artists=(plot,), bbox_inches='tight')\n\n\ndef get_version_per_code(df, output_dir):\n fig = plt.figure()\n fig.clf()\n\n plot = (\n df\n .groupby('Code')\n ['version']\n .max()\n .plot\n .hist(\n ax=fig.gca(),\n # does not work for histogram?\n xlabel=\"Mimimum Required MPI version\",\n ylabel=\"Application Count\",\n )\n )\n\n print(\"classified as 4.0:\")\n print(df[df['version'] == 4.0]['call'].value_counts())\n\n plot.set_xlabel(\"Mimimum Required MPI version\")\n plot.set_ylabel(\"Application Count\")\n\n plt.savefig(f\"{output_dir}/versions_by_code.pdf\", bbox_extra_artists=(plot,), bbox_inches='tight')\n plt.close(fig)\n\n\ndef get_creation_base_types(df, output_dir):\n def get_plot_label_overview(row):\n dtype = row['DATATYPE']\n category = row['DATATYPE_CATEGORY']\n if not pd.isna(dtype) and (category == 'MPI_constant' or category == 'handle'):\n if dtype == 'MPI_INT' or dtype == 'MPI_INTEGER' or dtype == 'MPI_UNSIGNED':\n return 'INT'\n elif dtype == 'MPI_DOUBLE' or dtype == 'MPI_DOUBLE_PRECISION' or dtype == 'MPI_REAL8' or dtype == 'MPI_LONG_DOUBLE':\n return 'DOUBLE'\n elif dtype == 'MPI_REAL' or dtype == 'MPI_FLOAT':\n return 'FLOAT'\n elif dtype == 'MPI_LONG' or dtype == 'MPI_LONG_INT' or dtype == 'MPI_LONG_LONG_INT' or dtype == 'MPI_LONG_LONG' or dtype == 'MPI_UNSIGNED_LONG' or dtype == 'MPI_UNSIGNED_LONG_LONG' or dtype == 'MPI_INTEGER8' or dtype == 'MPI_UINT64_T':\n return 'LONG_INT'\n elif dtype == 'MPI_CHAR' or dtype == 'MPI_CHARACTER' or dtype == 'MPI_UNSIGNED_CHAR':\n return 'CHAR'\n elif dtype == 'MPI_BYTE':\n return 'MPI_BYTE'\n elif dtype.startswith(\"MPI_Type\") or dtype == 'inconclusive':\n # return dtype\n return 'Derived'\n # elif dtype == 'MPI_2INT' or dtype == 'MPI_2INTEGER' or dtype == 'MPI_2REAL' or dtype=='MPI_2DOUBLE_PRECISION' or dtype == 'MPI_DOUBLE_INT'or dtype == 'MPI_FLOAT_INT' or dtype == 'MPI_COMPLEX'or dtype == 'MPI_DOUBLE_COMPLEX':\n # return 'Predefined \"composed\" types'\n else:\n return \"other predefined type\"\n else:\n if category == 'literal_constant' or category == 'function_call':\n return 'other_variable'\n return category\n\n def get_plot_label_detail(row):\n dtype = row['DATATYPE']\n category = row['DATATYPE_CATEGORY']\n if not pd.isna(dtype) and (category == 'MPI_constant' or category == 'handle'):\n if dtype.startswith(\"MPI_Type\") or dtype == 'inconclusive':\n return dtype\n # return 'Derived'\n else:\n return \"predefined MPI type\"\n else:\n if category == 'literal_constant' or category == 'function_call':\n return 'other_variable'\n return category\n\n overview_table = (df[df['call'].isin(mpi_type_creation_funcs)]\n .assign(plot_labels=lambda df_: df_.apply(get_plot_label_overview, axis=1))\n .pivot_table(values='Code', index='call', columns='plot_labels', aggfunc='count', fill_value=0)\n .reset_index()\n )\n print(overview_table.to_string())\n fig = plt.figure()\n fig.clf()\n plot = (df[df['call'].isin(mpi_type_creation_funcs)]\n .assign(plot_labels=lambda df_: df_.apply(get_plot_label_overview, axis=1))\n .pivot_table(values='Code', index='call', columns='plot_labels', aggfunc='count', fill_value=0)\n .plot.bar(stacked=True, ax=fig.gca())\n )\n lgd = plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.85))\n plt.savefig(f\"{output_dir}/basetypes_overview.pdf\", bbox_extra_artists=(lgd,), bbox_inches='tight')\n\n base_table = (df[df['call'].isin(mpi_type_creation_funcs)]\n .assign(plot_labels=lambda df_: df_.apply(get_plot_label_detail, axis=1))\n .pivot_table(values='Code', index='call', columns='plot_labels', aggfunc='count', fill_value=0)\n .reset_index()\n )\n print(base_table.to_string())\n fig.clf()\n plot = (df[df['call'].isin(mpi_type_creation_funcs)]\n .assign(plot_labels=lambda df_: df_.apply(get_plot_label_detail, axis=1))\n .pivot_table(values='Code', index='call', columns='plot_labels', aggfunc='count', fill_value=0)\n .plot.bar(stacked=True, ax=fig.gca())\n )\n\n lgd = plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.85))\n plt.savefig(f\"{output_dir}/basetypes_detail.pdf\", bbox_extra_artists=(lgd,), bbox_inches='tight')\n plt.close(fig)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--input', default='output.csv',\n help='the result data to visualize')\n parser.add_argument('--output_dir', default='visualization',\n help='directory where all the plots will be created')\n\n args = parser.parse_args()\n df = pd.read_csv(args.input, low_memory=False)\n\n # make dirs to organize all the plots\n for cat in mpi_categories:\n os.makedirs(args.output_dir + \"/\" + cat, exist_ok=True)\n\n print(\"Percentage of calls, where analysis completely failed: (e.g. wrong formatting)\")\n all_calls = len(df)\n failed_calls = len(df[df['analysis_successful'] == False])\n percent = 100.0 * failed_calls / all_calls\n print(\"%.2f%%\" % percent)\n df = df[df['analysis_successful'] != False]\n # if col is missing (e.g. openmp pragma we still keep this record)\n\n get_type_usage_bar_plot(df, args.output_dir)\n get_creation_base_types(df, args.output_dir)\n get_num_type_usage_category_plots(df, args.output_dir)\n get_reduce_op_usage_plots(df, args.output_dir)\n get_const_count_usage_plots(df, args.output_dir)\n get_num_type_usage_category_plots(df, args.output_dir)\n get_comm_usage_mix_plots(df, args.output_dir)\n get_rank_usage_mix_plots(df, args.output_dir)\n get_tag_usage_mix_plots(df, args.output_dir)\n get_info_usage_mix_plots(df, args.output_dir)\n get_errhandler_mix_plots(df, args.output_dir)\n get_status_usage_mix_plots(df, args.output_dir)\n get_num_defines_resolved(df, args.output_dir)\n\n get_codes_per_feature(df, args.output_dir)\n get_version_per_code(df, args.output_dir)\n get_hybrid_codes(df, args.output_dir)\n get_converter_funcs(df, args.output_dir)\n get_thread_level(df, args.output_dir)\n\n get_feature_usage_per_category(df, args.output_dir)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tudasc/mpi-arg-usage","sub_path":"generate_plots.py","file_name":"generate_plots.py","file_ext":"py","file_size_in_byte":44521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"42542438003","text":"'''Map Professor actions to execution functions'''\nfrom xlibs import exc\nfrom xlibs.professor import start_warming\n\n\nACTION_MAP = {\n 'warm_up': start_warming,\n}\n\n\ndef get_executor(*, action: str):\n try:\n return ACTION_MAP[action]\n\n except KeyError as error:\n raise exc.XLambdaExceptionInvalidRequest(\n f'Action \"{action}\" is not recognized, expected: '\n f'{\", \".join(ACTION_MAP.keys())}.'\n ) from error\n","repo_name":"DavidARivkin/xlambda","sub_path":"xlibs/professor/action_mapper.py","file_name":"action_mapper.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"38464645412","text":"import matplotlib.pyplot as plt \nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport pylab as pl\nclass field:\n def __init__(self,V0=[],a=[],b=[],V=[]):\n self.V0=V0 \n self.a=a\n self.b=b\n for i in range(201):\n self.a.append(0) \n for j in range(50):\n self.V0.append(self.a)\n \n for i in range(50):\n self.b.append(0)\n for i in range(101):\n self.b.append(1)\n for i in range(50):\n self.b.append(0)\n for i in range(101):\n self.V0.append(self.b) \n \n for i in range(50):\n self.V0.append(self.a)\n self.V=V\n def update_V(self):\n Vnew=self.V0\n for k in range(100):\n Vold=Vnew\n Vnew=[self.a]\n for i in range(199):\n vtemp=[0]\n for j in range(199):\n vtemp.append((Vold[i][j+1]+Vold[i+2][j+1]+Vold[i+1][j]+Vold[i+1][j+2])/4)\n vtemp.append(0)\n Vnew.append(vtemp)\n Vnew.append(self.a)\n for i in range(101):\n for j in range(101):\n Vnew[i+50][j+50]=1\n\n \n for k in range(10000):\n Vold=Vnew\n Vnew=[self.a]\n for i in range(199):\n vtemp=[0]\n for j in range(199):\n vtemp.append((Vold[i][j+1]+Vold[i+2][j+1]+Vold[i+1][j]+Vold[i+1][j+2])/4)\n vtemp.append(0)\n Vnew.append(vtemp)\n Vnew.append(self.a)\n for i in range(101):\n for j in range(101):\n Vnew[i+50][j+50]=1\n deltaV=[]\n for i in range(201):\n for j in range(201):\n deltaV.append(abs(Vnew[i][j]-Vold[i][j]))\n S=sum(deltaV)\n if S<0.00001*101*101:\n self.V=Vnew\n print(k)\n break \n\nv=[]\nx=[]\ny=[]\nz=field()\nz.update_V()\nfor i in range(201):\n for j in range(201):\n v.append(z.V[i][j])\n y.append(1-0.01*i)\n x.append(-1+0.01*j)\nfig=plt.figure() \nax=fig.add_subplot(111, projection='3d') \nax.plot(x,y,v,\".\") \nplt.xlabel(\"x\")\nplt.ylabel(\"y\")\nplt.zlabel(\"V\")\nplt.show()\n","repo_name":"woshishuishuishuishui/compuational_physics_N2014301020042","sub_path":"5.1 V(x,y).py","file_name":"5.1 V(x,y).py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"21690141814","text":"import numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom tigramite import data_processing as pp\nfrom tigramite import plotting as tp\nfrom tigramite.pcmci import PCMCI\nfrom tigramite.independence_tests.parcorr import ParCorr\n\nfrom numpy.random import multivariate_normal\n\ndef simulate_dependent_autocorrelated_variables(n, rho, sigma):\n # Generate random noise\n np.random.seed(123)\n epsilon = np.random.multivariate_normal([0, 0], [[sigma**2, rho*sigma**2], [rho*sigma**2, sigma**2]], n)\n\n # Initialize the variables\n x = np.zeros(n)\n y = np.zeros(n)\n\n # Generate the values for the variables\n for i in range(1, n):\n x[i] = rho * x[i-1] + epsilon[i, 0]\n y[i] = rho * y[i-1] + epsilon[i, 1]\n\n return x, y\n\n\n\n\n# Simulation parameters\nn = 1000 # Number of data points\nrho = 0.8 # Autocorrelation coefficient\nsigma = 1.0 # Standard deviation\n\n# Simulate dependent autocorrelated variables\nx, y = simulate_dependent_autocorrelated_variables(n, rho, sigma)\n\nplt.scatter(x, y)\nplt.show()\n\nplt.plot(x)\nplt.plot(y)\nplt.show()\n\ndf = pd.DataFrame({'x':x, 'y':y})\nvar_names = ['X', 'Y']\n\nx_lag = df['x'].shift(-1)\nx_lag = x_lag.dropna()\n\nnp.corrcoef(df['x'][:-1], x_lag)\n\n\nparcorr = ParCorr(significance='analytic', verbosity=3)\ndataframe1 = pp.DataFrame(df.values, datatime={0: np.arange(len(df))}, var_names=var_names)\npcmci = PCMCI(dataframe=dataframe1, cond_ind_test=parcorr, verbosity=4)\n\nresults1 = pcmci.run_pcmci(tau_max=2, tau_min=1, pc_alpha=0.05, alpha_level=0.05, fdr_method='fdr_bh')\n","repo_name":"benjamino15/Thesis-ENSO","sub_path":"pcmci_simulation_example.py","file_name":"pcmci_simulation_example.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"26995908300","text":"from sklearn.datasets import load_iris\niris = load_iris()\n\n#print iris.data\nx = iris.data\ny = iris.target\n#print y\n\n\n################################\n##Using Classification ALgos\n##\n##Estimator : Scikit learn algo.\n##sklearn.linear_model >> Logistoc REGRESSIONS\n##Load estimator\n##run estimator.fit(X,y ) >>>> \n##run predict (on test values)\n##\n##from sklearn.linear_model import LogisticRegression\n##estimator.fit(X,y)\n################################\n","repo_name":"dimpy-chhabra/ACM_Winter_Workshop","sub_path":"Day_4/Scikit-Learn/code_01.py","file_name":"code_01.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"17744731831","text":"import random\n\ndef main():\n a = [int(random.random()*100) for _ in xrange(50)]\n print(a)\n print(BubbleSortMax(a))\n\n# Minimum wandert nach vorne\ndef BubbleSortMin(a):\n i = 0\n\n while i < len(a):\n j = len(a) - 1\n while j > i:\n if a[j] < a[j-1]:\n a[j], a[j-1] = a[j-1], a[j]\n j -= 1\n i += 1\n return a\n\n# Maximum wandert nach hinten\ndef BubbleSortMax(a):\n i = len(a)\n\n while i > 0:\n j = 0\n while j < i - 1:\n if a[j] > a[j+1]:\n a[j], a[j+1] = a[j+1], a[j]\n j += 1\n i -= 1\n return a\n\nif __name__ == '__main__':\n main()\n","repo_name":"m-m0/Algorithmen-Datenstrukturen","sub_path":"3/BubbleSort.py","file_name":"BubbleSort.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"8611931663","text":"import streamlit as st\nimport pandas as pd\nimport base64\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nst.title('NBA Player Stats Explorer')\n\nst.markdown(\"\"\"\nThis app is a tool to explore the **NBA** player stats using webscrapper.\n* **Python libs :** base64, matplotlib, numpy, pandas, seaborn, streamlit\n* **Data source :** [Basketball-reference.com](https://www.basketball-reference.com/).\n\"\"\")\n\nst.sidebar.header('User Input Features')\nselected_year = st.sidebar.selectbox('Year', list(reversed(range(1950,2023))))\n\n# Web scrapper to get the data\n@st.cache\ndef load_data(year):\n url = 'https://www.basketball-reference.com/leagues/NBA_' + str(year) + '_per_game.html'\n html = pd.read_html(url, header = 0)\n df = html[0]\n raw = df.drop(df[df.Age == 'Age'].index) #remove the header row\n raw = raw.fillna(0) #fill the missing values with 0\n playerstats = raw.drop(['Rk'], axis=1) #remove the Rk column\n\n return playerstats\n\nplayerstats = load_data(selected_year)\n\n#sidebar - Team Selection\nsorted_unique_teams = sorted(playerstats.Tm.unique())\nselected_team = st.sidebar.multiselect('Team', sorted_unique_teams, sorted_unique_teams)\n\n#sidebar - Position Selection\nunique_pos = ['C', 'PF', 'SF', 'PG', 'SG']\nselected_pos = st.sidebar.multiselect('Position', unique_pos, unique_pos)\n\n#filtering data\ndf_selected_team = playerstats[(playerstats.Tm.isin(selected_team)) & (playerstats.Pos.isin(selected_pos))]\n\nst.header('Display Player Stats of Selected Team(s)')\nst.write('Data Dimension :' + str(df_selected_team.shape[0]) + ' rows and ' + str(df_selected_team.shape[1]) + ' columns')\nst.dataframe(df_selected_team.astype(str))\n\n#Download the data\n# https://discuss.streamlit.io/t/how-to-download-a-file-from-a-streamlit-app/10\n\ndef filedownload(df):\n csv = df.to_csv(index=False)\n b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here\n href = f'Download csv file'\n return href\n\nst.markdown(filedownload(df_selected_team), unsafe_allow_html=True)\n\n#Heatmap\nif st.button('Intercorrelation Heatmap'):\n st.header('Heatmap of Intercorrelation')\n df_selected_team.to_csv('playerstats.csv', index=False)\n df = pd.read_csv('playerstats.csv')\n\n corr = df.corr()\n mask = np.zeros_like(corr)\n mask[np.triu_indices_from(mask)] = True\n with sns.axes_style(\"white\"):\n f, ax = plt.subplots(figsize=(11, 9))\n ax = sns.heatmap(corr, mask=mask, vmax=1, square=True)\n st.pyplot(f)\n","repo_name":"alvinalzali/Penguins-heroku","sub_path":"dynamicStatsExplorer.py","file_name":"dynamicStatsExplorer.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"19592248201","text":"\"\"\"Dynamic programming\"\"\"\n\n\ndef fib_memoization(number: int) -> int:\n \"\"\"Fibonnaci uses memoization\"\"\"\n memo = [0, 1]\n for i in range(2, number + 1):\n memo.append(memo[i - 1] + memo[i - 2])\n return memo[number]\n\n\ndef grid_traveler(rows: int, columns: int, memoization: {}) -> int:\n \"\"\"\n Return the number of paths from the top-left to the bottom-right of the grids (rows x columns)\n \"\"\"\n matrix_key = str(rows) + \",\" + str(columns)\n if matrix_key in memoization:\n return memoization[matrix_key]\n if (rows == 0) or (columns == 0):\n return 0\n if (rows == 1) and (columns == 1):\n return 1\n memoization[matrix_key] = grid_traveler(\n rows - 1, columns, memoization\n ) + grid_traveler(rows, columns - 1, memoization)\n return memoization[matrix_key]\n","repo_name":"PhucHuynhVan/Data-Structure-And-Algorithms","sub_path":"dynamic_programming/dynamic_programming.py","file_name":"dynamic_programming.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"16701509558","text":"from django.test import TestCase\nfrom oscar.apps.dashboard.menu import get_nodes\nfrom oscar.core.compat import get_user_model\n\n\nUser = get_user_model()\n\n\nclass TestCategory(TestCase):\n\n def setUp(self):\n self.staff_user = User.objects.create_user('staff', 'staff@example.com',\n 'pw1')\n self.staff_user.is_staff = True\n self.staff_user.save()\n self.non_staff_user = User.objects.create_user('nostaff',\n 'nostaff@example.com',\n 'pw2')\n self.non_staff_user.save()\n\n def test_staff_user_has_menu(self):\n menu = get_nodes(self.staff_user)\n self.assertTrue(menu)\n\n def test_non_staff_user_has_empty_menu(self):\n menu = get_nodes(self.non_staff_user)\n self.assertEqual(menu, [])\n","repo_name":"cndn/intelligent-code-completion","sub_path":"raw_data/17299_test_nav.py","file_name":"17299_test_nav.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"63"} +{"seq_id":"8353078413","text":"from flask import Flask, request, abort\nfrom flaskext.mysql import MySQL\n\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import *\n\nimport execjs\n\nfrom datetime import datetime, timedelta\n\nimport re\n\nimport os\n\napp = Flask(__name__)\n\n#config\napp.config['MYSQL_DATABASE_USER'] = '...'\napp.config['MYSQL_DATABASE_PASSWORD'] = '...'\napp.config['MYSQL_DATABASE_DB'] = '...'\napp.config['MYSQL_DATABASE_HOST'] = '...'\n\n# Channel Access Token\nline_bot_api = LineBotApi('...')\n# Channel Secret\nhandler = WebhookHandler('...')\n\nerrString = '輸入「星期日 90」\\n會幫您紀錄為向曹賣買進的價格\\n\\n輸入「星期一 早上 90」\\n會幫您紀錄為星期一早上的價格\\n\\n輸入「下午 90」\\n會幫您紀錄為今日下午的價格\\n\\n輸入「90」\\n會幫您紀錄為現在的價格\\n\\n輸入「0」\\n會幫您清除現在的價格'\n\n# 監聽所有來自 /callback 的 Post Request\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n # handle webhook body\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n abort(400)\n return 'OK'\n\n# 處理按鈕\n@handler.add(PostbackEvent)\ndef handle_postback(event):\n data = event.postback.data\n if (data == \"buy\"):\n message = TextSendMessage(text=\"多少錢呢?\")\n line_bot_api.reply_message(event.reply_token, message)\n \n\n# 處理訊息\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_message(event):\n sourceType = event.source.type\n user_id = event.source.user_id\n profile = line_bot_api.get_profile(user_id)\n y = event.message.text\n print(y)\n\n############\n\n calendar = (datetime.utcnow() + timedelta(hours=8)).isocalendar()\n week = calendar[1]\n day = calendar[2]\n if (day == 7):\n week = (datetime.utcnow() + timedelta(days=1) + timedelta(hours=8)).isocalendar()[1]\n ap = time = (datetime.utcnow() + timedelta(hours=8)).strftime(\"%p\")\n\n############\n \n dataListIndex = 0\n y = y.replace(\" \", \"\")\n match = re.search('星期(.)([^0-9]*)([0-9]+)$', y)\n if (match):\n matchDay = match.group(1)\n matchAP = match.group(2)\n y = match.group(3)\n if (matchDay == '日' or matchDay == '天'):\n dataListIndex = 3\n else:\n offset = 0\n if (matchAP == '早上' or matchAP == '上午' or matchAP == '白天'):\n offset = 0\n elif (matchAP == '晚上' or matchAP == '下午'):\n offset = 1\n else:\n if (sourceType == 'user'):\n message = TextSendMessage(text=\"格式錯誤\")\n errMessage = TextSendMessage(text=errString)\n line_bot_api.reply_message(event.reply_token, [message, errMessage])\n return\n if (matchDay == '一'):\n dataListIndex = 4 + offset\n elif (matchDay == '二'):\n dataListIndex = 6 + offset\n elif (matchDay == '三'):\n dataListIndex = 8 + offset\n elif (matchDay == '四'):\n dataListIndex = 10 + offset\n elif (matchDay == '五'):\n dataListIndex = 12 + offset\n elif (matchDay == '六'):\n dataListIndex = 14 + offset\n else:\n if (sourceType == 'user'):\n message = TextSendMessage(text=\"格式錯誤\")\n errMessage = TextSendMessage(text=errString)\n line_bot_api.reply_message(event.reply_token, [message, errMessage])\n return\n else:\n match = re.search('([^0-9]*)([0-9]+)$', y)\n if (match):\n matchDay = day\n matchAP = match.group(1)\n y = match.group(2)\n \n if ('昨' in matchAP):\n matchDay = matchDay - 1\n if (matchDay == 0):\n matchDay = 7\n if (matchDay == 7):\n dataListIndex = 3\n else:\n dataListIndex = (matchDay + 1) * 2\n \n if (matchAP == ''):\n if (ap == \"PM\"):\n dataListIndex = dataListIndex + 1\n elif ('早上' in matchAP or '上午' in matchAP or '白天' in matchAP):\n dataListIndex = dataListIndex\n elif ('晚上' in matchAP or '下午' in matchAP):\n dataListIndex = dataListIndex + 1\n else:\n if (sourceType == 'user'):\n message = TextSendMessage(text=\"格式錯誤\")\n errMessage = TextSendMessage(text=errString)\n line_bot_api.reply_message(event.reply_token, [message, errMessage])\n return\n else:\n if (sourceType == 'user'):\n message = TextSendMessage(text=\"格式錯誤\")\n errMessage = TextSendMessage(text=errString)\n line_bot_api.reply_message(event.reply_token, [message, errMessage])\n return\n \n############\n \n #init MySQL\n mysql = MySQL()\n mysql.init_app(app)\n connection = mysql.connect()\n cursor = connection.cursor()\n \n############\n \n sql = \"SELECT * FROM `turnip` WHERE `uid`=%s\"\n cursor.execute(sql, (user_id,))\n data = cursor.fetchone()\n if (data):\n data = data[1:]\n if (data[1] == week - 1):\n data = [user_id, week, data[16], '', '', '', '', '', '', '', '', '', '', '', '', '', -1]\n elif (data[1] < week):\n data = [user_id, week, -1, '', '', '', '', '', '', '', '', '', '', '', '', '', -1]\n else:\n data = [user_id, week, -1, '', '', '', '', '', '', '', '', '', '', '', '', '', -1]\n sql = \"INSERT INTO `turnip` (`uid`) VALUES (%s)\"\n cursor.execute(sql, (user_id,))\n connection.commit()\n\n############\n\n dataList=list(data)\n if (y == '0'):\n dataList[dataListIndex] = ''\n else:\n dataList[dataListIndex] = y\n\n############\n \n p = [0, 0, 0, 0]\n posList = [[], [], [], []]\n with open('./predictions.js') as f:\n ctx = execjs.compile(f.read())\n dics = ctx.call('calculateOutput', dataList[3], [dataList[4], dataList[5], dataList[6], dataList[7], dataList[8], dataList[9], dataList[10], dataList[11], dataList[12], dataList[13], dataList[14], dataList[15]], False, dataList[2])\n index = 0\n for i in range(len(dics)):\n if (i == 0):\n continue\n index = dics[i]['pattern_number']\n p[index] = dics[i]['category_total_probability']\n posList[index].append(dics[i])\n\n for i in range(len(p)):\n if (p[i] >= 1):\n dataList[16] = i\n break\n\n############\n \n templateText = \"\"\n patterns = [\"波型\", \"三期型\", \"遞減型\", \"四期型\"]\n phases = [\"星期日\", \"星期日\", \"星期一早上\", \"星期一下午\", \"星期二早上\", \"星期二下午\", \"星期三早上\", \"星期三下午\", \"星期四早上\", \"星期四下午\", \"星期五早上\", \"星期五下午\", \"星期六早上\", \"星期六下午\"]\n currentPhase = day * 2\n if (ap == \"PM\"):\n currentPhase = currentPhase + 1\n if (day == 7):\n currentPhase = 2\n for i in range(len(p)):\n if (p[i] >= 1):\n templateText = templateText + \"%s\"%(patterns[i])\n weekMax = posList[i][0]['weekMax']\n comment = \"\\n請趕快賣出\"\n if (i == 0):\n prices = posList[i][0]['prices']\n for j in range(len(prices)):\n if (prices[j]['max'] == weekMax):\n comment = \"\\n請在%d~%d的價格時賣出\"%(prices[j]['min'], prices[j]['max'])\n break\n elif (i == 1 or i == 3):\n for j in range(len(posList[i])):\n prices = posList[i][j]['prices']\n for k in range(len(prices)):\n if (k <= currentPhase):\n continue\n if (prices[k]['max'] == weekMax):\n if (len(posList[i]) == 1):\n comment = \"\\n請在%s賣出\"%(phases[k])\n else:\n comment = \"\\n可望最早在%s有%d~%d的價格\"%(phases[k], prices[k]['min'], prices[k]['max'])\n break\n if (comment != \"\\n請趕快賣出\"):\n break\n templateText = templateText + comment\n break\n elif (p[i] != 0):\n if (len(templateText) != 0):\n templateText = templateText + \"\\n\"\n templateText = templateText + \"%s:%d%%\"%(patterns[i], (p[i]+0.005)*100)\n\n if (dataList[16] < 0):\n for i in range(len(p)):\n if (p[i] != 0):\n weekMax = posList[i][0]['weekMax']\n if (i == 1 or i == 3):\n comment = \"\"\n for j in range(len(posList[i])):\n prices = posList[i][j]['prices']\n for k in range(len(prices)):\n if (k <= currentPhase):\n continue\n if (prices[k]['max'] == weekMax):\n comment = \"\\n如果是%s,可望最早在%s有%d~%d的價格\"%(patterns[i], phases[k], prices[k]['min'], prices[k]['max'])\n break\n if (comment != \"\"):\n break\n templateText = templateText + comment\n break\n \n if (day == 7 and y.isdigit()):\n message = TextSendMessage(text=\"一顆%s鈴錢~漲價吧~漲價的話~就太好了~\"%(y))\n line_bot_api.reply_message(event.reply_token, message)\n else:\n buttons_template_message = TemplateSendMessage(\n alt_text=\"結果\",\n template=ButtonsTemplate(\n text=templateText,\n actions=[\n URIAction(\n label='View Detail',\n uri=\"https://turnipprophet.io/?prices=%s.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s.%s&pattern=%d\"%(dataList[3], dataList[4], dataList[5], dataList[6], dataList[7], dataList[8], dataList[9], dataList[10], dataList[11], dataList[12], dataList[13], dataList[14], dataList[15], dataList[2])\n )\n ]\n )\n ) \n line_bot_api.reply_message(event.reply_token, buttons_template_message)\n\n data = tuple(dataList)\n\n############\n\n print(data)\n sql = \"UPDATE `turnip` SET week=%s, pattern=%s, sun=%s, monA=%s, monP=%s, tueA=%s, tueP=%s, wedA=%s, wedP=%s, thuA=%s, thuP=%s, friA=%s, friP=%s, satA=%s, satP=%s, result=%s WHERE uid=%s\"\n cursor.execute(sql, (data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15], data[16], data[0]))\n connection.commit()\n\nimport os\nif __name__ == \"__main__\":\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n","repo_name":"iamhands0me/LINE-Bot_Daisy-Mae","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"22504364185","text":"from datetime import datetime\n\nfrom pandas import Timestamp as tm\nfrom pandas.tseries.offsets import BusinessDay as bd\n\nfrom custom import basic_custom_actions\nfrom test_framework.data_sets.base_data_set import BaseDataSet\nfrom test_framework.java_api_wrappers.ors_messages.FixNewOrderSingle import FixNewOrderSingle\n\n\nclass FixNewOrderSingleOMS(FixNewOrderSingle):\n def __init__(self, data_set: BaseDataSet, parameters: dict = None):\n super().__init__()\n self.change_parameters(parameters)\n self.data_set = data_set\n self.base_parameters = {\n 'SEND_SUBJECT': 'QUOD.ORS.FIX',\n 'REPLY_SUBJECT': 'QUOD.FIX_REPLY.gtwquod4',\n 'NewOrderSingleBlock': {\n 'Side': 'Buy',\n 'Price': \"10\",\n 'QtyType': 'Units',\n 'OrdType': 'Limit',\n 'TimeInForce': 'Day',\n 'PositionEffect': 'Open',\n 'SettlCurrency': data_set.get_currency_by_name(\"currency_1\"),\n 'OrdCapacity': 'Agency',\n 'TransactTime': (tm(datetime.utcnow().isoformat()) + bd(n=2)).date().strftime('%Y-%m-%dT%H:%M:%S'),\n 'MaxPriceLevels': \"1\",\n 'ClientInstructionsOnly': 'No',\n 'OrdQty': \"100\",\n 'ClientAccountGroupID': data_set.get_client_by_name(\"client_1\"),\n 'ExecutionPolicy': 'DMA',\n \"InstrumentBlock\": {\"InstrSymbol\": \"FR0010436584_EUR\",\n \"SecurityID\": \"FR0010436584\",\n \"SecurityIDSource\": \"ISIN\",\n \"InstrType\": \"Equity\"},\n \"ClOrdID\": basic_custom_actions.client_orderid(9),\n }\n }\n\n def set_default_care_limit(self):\n self.change_parameters(self.base_parameters)\n self.update_fields_in_component('NewOrderSingleBlock',\n {\"OrdType\": 'Limit', \"Price\": \"20\", 'ExecutionPolicy': 'Care'})\n return self\n\n def set_default_dma_limit(self):\n self.change_parameters(self.base_parameters)\n self.update_fields_in_component('NewOrderSingleBlock', {\"Price\": '20', \"OrdType\": 'Limit'})\n return self\n\n def set_default_dma_market(self):\n self.change_parameters(self.base_parameters)\n return self\n\n def set_default_care_market(self):\n self.change_parameters(self.base_parameters)\n self.update_fields_in_component('NewOrderSingleBlock', {'ExecutionPolicy': 'Care'})\n return self\n","repo_name":"YevhenMoroz/th2-script-quod-demo","sub_path":"test_framework/java_api_wrappers/oms/ors_messges/FixNewOrderSingleOMS.py","file_name":"FixNewOrderSingleOMS.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"14389266178","text":"import paho.mqtt.client as mqtt\nimport json\nimport RPi.GPIO as io\nio.setmode(io.BCM)\n\nio.setup(17, io.IN, pull_up_down=io.PUD_UP)\nio.setup(18, io.IN, pull_up_down=io.PUD_UP)\nio.setup(27, io.IN, pull_up_down=io.PUD_UP)\n\nio.add_event_detect(17, io.FALLING, bouncetime=200)\nio.add_event_detect(18, io.FALLING, bouncetime=200)\nio.add_event_detect(27, io.FALLING, bouncetime=200)\n\n# tuplje object met pin nummers\nleds = (9,10)\n\n# initialisatie functie voor leds met als parameter een tuple\nio.setmode(io.BCM)\nio.setup(9, io.OUT)\nio.setup(10, io.OUT)\n\n#Booleans\nled1 = False\nled2 = False\nled3 = False\n\n# set state van de leds met als parameters 2 tuples\n# tuple van pin nummers en een met bools van de state\ndef set_leds(leds, states):\n io.output(leds, states)\n\n# calback voor het verwerken van de berichten\ndef on_message(mqttc, obj, msg):\n# try:\n # payload omzetten van bytestring naar string\n #\n\n # json wordt verwacht json string moet omgezet worden naar een python\n # dictonary voor verwerking\n # x = json.loads(p)\n \n if msg.payload.decode() == 'led1ON': \n io.output(9,1)\n elif msg.payload.decode() == 'led1OFF':\n io.output(9,0)\n\n if msg.payload.decode() == 'led2ON':\n io.output(10,1)\n elif msg.payload.decode() == 'led2OFF':\n io.output(10,0)\n\n if msg.payload.decode() == 'ledsOFF':\n io.output(9,0)\n io.output(10,0)\n if msg.payload.decode() == 'ledsON':\n io.output(9,1)\n io.output(10,1)\n\n \n print(msg.topic)\n print(msg.payload.decode())\n\n#def manual():\n# global led1\n# global led2\n# global led3\n# try:\n# mqttc = mqtt.Client()\n# mqttc.connect(\"broker.hivemq.com\")\n# mqttc.connect(\"127.0.0.1\")\n# while True:\n# if io.event_detected(17):\n# if led1 == False:\n# mqttc.publish('home/groundfloor/kitchen/lights/light1', payload='led1OFF', qos=0, retain=False)\n# led1 = True\n# print(led1)\n# elif led1 == True:\n# mqttc.publish('home/groundfloor/kitchen/lights/light1', payload='led1ON', qos=0, retain=False)\n# led1 = False\n \n# if io.event_detected(18):\n# if led2 == False: \n# mqttc.publish('home/groundfloor/kitchen/lights/light2', payload='led2OFF', qos=0, retain=False)\n# led2 = True\n# elif led2 == True:\n# mqttc.publish('home/groundfloor/kitchen/lights/light2', payload='led2ON', qos=0, retain=False)\n# led2 = False\n\n# if io.event_detected(27):\n# if led3 == False:\n# mqttc.publish('home/groundfloor/kitchen', payload='ledsOFF', qos=0, retain=False)\n# led3 = True\n# elif led3 == True:\n# mqttc.publish('home/groundfloor/kitchen', payload='ledsON', qos=0, retain=False)\n# led3 = False\n \n# except KeyboardInterrupt:\n# pass\n\ndef main():\n global led1\n global led2\n global led3\n try:\n # initialisatie van alle elementen\n # init_leds(leds)\n mqttc = mqtt.Client()\n# mqttc.subscribe('home/groundfloor/kitchen/lights/light1')\n# mqttc.subscribe('home/groundfloor/kitchen/lights/light2')\n\n mqttc.on_message = on_message\n# mqttc.connect(\"127.0.0.1\")\n mqttc.connect(\"broker.hivemq.com\")\n mqttc.subscribe('home/groundfloor/kitchen/#')\n# mqttc.subscribe('home/groundfloor/kitchen/lights/light2')\n\n while True:\n \n if io.event_detected(17):\n if led1 == False:\n mqttc.publish('home/groundfloor/livingroom', payload='led1OFF', qos=0, retain=False)\n led1 = True\n elif led1 == True:\n mqttc.publish('home/groundfloor/livingroom', payload='led1ON', qos=0, retain=False)\n led1 = False\n \n if io.event_detected(18):\n if led2 == False: \n mqttc.publish('home/groundfloor/livingroom', payload='led2OFF', qos=0, retain=False)\n led2 = True\n elif led2 == True:\n mqttc.publish('home/groundfloor/livingroom', payload='led2ON', qos=0, retain=False)\n led2 = False\n\n if io.event_detected(27):\n if led3 == False:\n mqttc.publish('home/groundfloor/livingroom', payload='ledsOFF', qos=0, retain=False)\n led3 = True\n elif led3 == True:\n mqttc.publish('home/groundfloor/livingroom', payload='ledsON', qos=0, retain=False)\n led3 = False\n \n mqttc.loop()\n\n except KeyboardInterrupt:\n pass\n\n finally:\n io.cleanup()\n\n# main segment\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"localdevm/pimqqt","sub_path":"Mqtt.py","file_name":"Mqtt.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"43552616039","text":"from nltk.tokenize import word_tokenize \nfrom nltk.corpus import stopwords\nfrom nltk.corpus import PlaintextCorpusReader\nfrom nltk.stem import SnowballStemmer\nfrom bs4 import BeautifulSoup\n\ndef removeSpecialCharacters(text):\n\tgood = {'\\n'}\n\tfor i in \"abcdefghijklmnopqrstuvwxyz áéíóúñü\":\n\t\tgood.add(i)\n\tans = \"\"\n\tfor c in text:\n\t\tif c in good:\n\t\t\tans += c\n\treturn ans\n\ndef cleanHTML(html):\n\treturn BeautifulSoup(html,'html.parser').get_text().lower()\n\ndef opFile(root):\n corpus = PlaintextCorpusReader(root, '.*')\n return corpus.raw()\n\ndef splitText(txt):\n\treturn txt.replace('/', ' ').replace('.', ' ').replace('-', ' ')\n\ndef lemmaDict(path):\n with open(path, encoding='latin-1') as f:\n lines = f.readlines()\n \n lemm = {}\n\n for line in lines:\n line = line.strip()\n if line != '':\n words = line.split()\n token = words[0].strip()\n token = token.replace(\"#\", \"\")\n lemma = words[-1].strip()\n lemm[token] = lemma\n return list(lemm.items())\n\ndef lemmatize(text, lemm_dir):\n lemmatized = []\n lemmas = dict(lemmaDict(lemm_dir))\n for word in text:\n if word in lemmas.keys():\n lemmatized.append(lemmas[word])\n else:\n lemmatized.append(word)\n return lemmatized\n\ndef comenzar():\n org_txt = opFile(\"c:/Users/sebas/Desktop/EXCELSIOR_100_files\")\n\n html_clean_text = cleanHTML(org_txt)\n\n html_clean_text = splitText(html_clean_text)\n\n html_clean_text = removeSpecialCharacters(html_clean_text)\n\n tokens = word_tokenize(html_clean_text,\"spanish\")\n\n normalized_tokens = [] \n\n nltk_stop_words = stopwords.words(\"spanish\")\n\n for w in tokens: \n if w not in nltk_stop_words: \n normalized_tokens.append(w)\n\n lemmatized_tokens = lemmatize(normalized_tokens, \"C:/Users/sebas/Downloads/generate.txt\")\n\ncomenzar()","repo_name":"SebastianCD/natural-language-processing","sub_path":"TextNormalization.py","file_name":"TextNormalization.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"5195981440","text":"from math import gcd\nfrom itertools import combinations\nn, m = map(int, input().split())\n\npoints = list()\npo_set = set()\nans_list = list()\nline_flag = [[[0]*4 for _ in range(n-1)]for _ in range(n-1)]\n# ['right', 'down', 'rightdown', 'rightup']\nfor _ in range(m):\n x, y = map(int, input().split())\n points.append([x, y])\n po_set.add(\"{:02}{:02}\".format(x, y))\n\ndef jadge_in_po(po):\n return True if \"{:02}{:02}\".format(po[0], po[1]) in po_set else False\n\ndef add_po(po):\n points.append([po[0], po[1]])\n po_set.add(\"{:02}{:02}\".format(po[0], po[1]))\n return 0\n\ndef add_line(po1, po2):\n if po1[0] == po2[0]: # down\n const_x = po1[0]\n if po1[1] < po2[1]:\n for y in range(po1[1], po2[1]):\n line_flag[const_x][y][1] = 1\n else:\n for y in range(po2[1], po1[1]):\n line_flag[const_x][y][1] = 1\n elif po1[1] == po2[1]: # right\n const_y = po1[1]\n if po1[0] < po2[0]:\n for x in range(po1[0], po2[0]):\n line_flag[x][const_y][0] = 1\n else:\n for x in range(po2[0], po1[0]):\n line_flag[x][const_y][0] = 1\n elif (po2[0]-po1[0]) * (po2[1]-po1[1]) > 0: # rightup\n if po1[0] < po2[0]:\n for x, y in zip(range(po1[0], po2[0]), range(po1[1]+1, po2[1]+1)):\n line_flag[x][y][3] = 1\n else:\n for x, y in zip(range(po2[0], po1[0]), range(po2[1]+1, po1[1]+1)):\n line_flag[x][y][3] = 1\n else: # rightdown\n if po1[0] < po2[0]:\n for x, y in zip(range(po1[0], po2[0]), range(po1[1], po2[1])):\n line_flag[x][y][2] = 1\n else:\n for x, y in zip(range(po2[0], po1[0]), range(po2[1], po1[1])):\n line_flag[x][y][2] = 1\n return 0\n\ndef pos_rectangle(new_p, p0, p1, p2):\n if jadge_in_po(new_p):\n return False\n else:\n if not (line_jadge_no(p0, p1) or line_jadge_no(p1, p2) \\\n or line_jadge_no(p2, new_p) or line_jadge_no(new_p, p0)):\n return True\n else:\n return False\n\ndef line_jadge_no(po1, po2):\n # その線が描画不可能かどうかを調べる\n sx = po2[0]-po1[0]\n sy = po2[1]-po1[1]\n gc = gcd(sx, sy)\n dx = sx // gc\n dy = sy // gc\n for i in range(gc-1):\n # print('jadge [{}, {}]'.format(po1[0]+dx*(i+1), po1[1]+dy*(i+1)))\n if jadge_in_po([po1[0]+dx*(i+1), po1[1]+dy*(i+1)]):\n return True\n else:\n # return False\n if pos_line(po1, po2):\n return True\n else:\n return False\n\ndef pos_line(po1, po2):\n if po1[0] == po2[0]: # down\n const_x = po1[0]\n if po1[1] < po2[1]:\n for y in range(po1[1], po2[1]):\n if line_flag[const_x][y][1] == 1:\n return True\n else:\n for y in range(po2[1], po1[1]):\n if line_flag[const_x][y][1] == 1:\n return True\n elif po1[1] == po2[1]: # right\n const_y = po1[1]\n if po1[0] < po2[0]:\n for x in range(po1[0], po2[0]):\n if line_flag[x][const_y][0] == 1:\n return True\n else:\n for x in range(po2[0], po1[0]):\n if line_flag[x][const_y][0] == 1:\n return True\n elif (po2[0]-po1[0]) * (po2[1]-po1[1]) > 0: # rightup\n if po1[0] < po2[0]:\n for x, y in zip(range(po1[0], po2[0]), range(po1[1], po2[1])):\n if line_flag[x][y][3] == 1:\n return True\n else:\n for x, y in zip(range(po2[0], po1[0]), range(po2[1], po1[1])):\n if line_flag[x][y][3] == 1:\n return True\n else: # rightdown\n if po1[0] < po2[0]:\n for x, y in zip(range(po1[0], po2[0]), range(po1[1]-1, po2[1]-1)):\n if line_flag[x][y][2] == 1:\n return True\n else:\n for x, y in zip(range(po2[0], po1[0]), range(po2[1]-1, po1[1]-1)):\n if line_flag[x][y][3] == 1:\n return True\n return False\n\ndef main():\n def make_new_po(p0, p1, p2):\n ans_po = [-1, ]\n sum_x = p0[0]+p1[0]+p2[0]\n sum_y = p0[1]+p1[1]+p2[1]\n if ((p1[0]-p0[0])*(p2[0]-p0[0]) + (p1[1]-p0[1])*(p2[1]-p0[1]) == 0) and \\\n (abs(p1[0]-p0[0]) == abs(p1[1]-p0[1])):\n ans_po = [sum_x-2*p0[0], sum_y-2*p0[1]] # n102\n if pos_rectangle(ans_po, p1, p0, p2):\n add_po(ans_po)\n ans_list.append(list(map(str, \\\n [ans_po[0], ans_po[1], p1[0], p1[1], p0[0], p0[1], p2[0], p2[1]])))\n add_line(ans_po, p0)\n add_line(p0, p1)\n add_line(p1, p2)\n add_line(p2, ans_po)\n elif ((p0[0]-p1[0])*(p2[0]-p1[0]) + (p0[1]-p1[1])*(p2[1]-p1[1]) == 0) and \\\n (abs(p1[0]-p0[0]) == abs(p1[1]-p0[1])):\n ans_po = [sum_x-2*p1[0], sum_y-2*p1[1]] # n012\n if pos_rectangle(ans_po, p0, p1, p2):\n add_po(ans_po)\n ans_list.append(list(map(str, \\\n [ans_po[0], ans_po[1], p0[0], p0[1], p1[0], p1[1], p2[0], p2[1]])))\n add_line(ans_po, p0)\n add_line(p0, p1)\n add_line(p1, p2)\n add_line(p2, ans_po)\n elif ((p0[0]-p2[0])*(p1[0]-p2[0]) + (p0[1]-p2[1])*(p1[1]-p2[1]) == 0) and \\\n (abs(p2[0]-p0[0]) == abs(p2[1]-p0[1])):\n ans_po = [sum_x-2*p2[0], sum_y-2*p2[1]] # n021\n if pos_rectangle(ans_po, p0, p2, p1):\n add_po(ans_po)\n ans_list.append(list(map(str, \\\n [ans_po[0], ans_po[1], p0[0], p0[1], p2[0], p2[1], p1[0], p1[1]])))\n add_line(ans_po, p0)\n add_line(p0, p1)\n add_line(p1, p2)\n add_line(p2, ans_po)\n else:\n return 0\n\n if (ans_po[0] < 0) or (n-1 < ans_po[0]):\n return 0\n elif (ans_po[1] < 0) or (n-1 < ans_po[1]):\n return 0\n\n return ans_po\n\n # => nake_new_po\n # => pos_rectangle => line_jadge_no\n for p0, p1, p2 in combinations(points, 3):\n tar = make_new_po(p0, p1, p2)\n\nmain()\nprint(len(ans_list))\n\nfor ans_raw in ans_list:\n print(\" \".join(ans_raw))\n\n\n# point_sum = 0\n# for l1 in line_flag:\n# for l2 in l1:\n# point_sum += sum(l2)\n#\n# print(point_sum)\n\n\n\n\n\n","repo_name":"corawada/Atcoder","sub_path":"ahc/ahc014/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":6492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"7388627415","text":"import requests\nfrom pprint import pprint\n\n\nclass YaUploader:\n def __init__(self, token: str):\n self.token = token\n self.url = 'https://cloud-api.yandex.net/v1/disk/resources/'\n\n def _get_headers(self):\n return {\n 'Content-Type': 'application/json',\n 'Authorization': f'OAuth {self.token}'\n }\n\n def _get_upload_link(self, file_path):\n upload_url = self.url + 'upload'\n headers = self._get_headers()\n params = {'path': file_path, 'overwrite': 'true'}\n response = requests.get(upload_url, headers=headers, params=params)\n return response.json()\n\n def _add_folder(self, path):\n headers = self._get_headers()\n requests.put(f'{self.url}?path={path}', headers=headers)\n\n def _upload(self, file_path, path_to_file):\n link_dict = self._get_upload_link(file_path=file_path)\n href = link_dict['href']\n response = requests.put(href, data=open(path_to_file, 'rb'))\n response.raise_for_status()\n if response.status_code == 201:\n print('Success')\n\n def upload_files_from_a_list(self, path_to_file_list):\n for path_to_file in path_to_file_list:\n directory, file_name = path_to_file.split('/')\n uploader._add_folder(directory)\n uploader._upload(path_to_file, path_to_file)\n\n\nif __name__ == '__main__':\n\n with open('ТокенYa.txt', 'r') as f:\n token = f.read().strip()\n uploader = YaUploader(token)\n\n path_to_file_list = ['Requests/text.txt', 'Портфолио/photo.jpg', 'ООП дз/OOP.py']\n\n uploader.upload_files_from_a_list(path_to_file_list)","repo_name":"TannyKorry/Requests","sub_path":"Task#2.py","file_name":"Task#2.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"18439698868","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom ckeditor_uploader.fields import RichTextUploadingField\n# Create your models here.\n\nclass CompanyType(models.Model):\n company_type = models.CharField(max_length=20,verbose_name=\"公司\",help_text=\"公司\")\n\n def __str__(self):\n return self.company_type\n\nclass Company(models.Model):\n content = RichTextUploadingField(verbose_name='文章')\n created_time = models.DateTimeField(auto_now_add=True,verbose_name=\"创建时间\")\n author = models.ForeignKey(User,on_delete=models.DO_NOTHING,verbose_name=\"作者\")\n image_url = models.ImageField(upload_to=\"company\", default='', blank=True)\n company_type = models.ForeignKey(CompanyType,on_delete=models.DO_NOTHING,verbose_name=\"君枫珠宝\")\n\n class Meta:\n ordering = [\"created_time\"]\n db_table = \"tb_company\"\n verbose_name = \"君枫珠宝\"\n verbose_name_plural = verbose_name","repo_name":"ylz1990/jfzb","sub_path":"apps/company/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"32584907850","text":"from flask import g, current_app\n\nfrom src.api import telegram\nfrom src.api.yandex import make_photo_preview_request\nfrom src.blueprints.telegram_bot._common.yandex_disk import (\n get_element_info,\n YandexAPIGetElementInfoError,\n YandexAPIRequestError\n)\nfrom src.blueprints.telegram_bot._common.stateful_chat import (\n set_disposable_handler\n)\nfrom src.blueprints.telegram_bot._common.command_names import (\n CommandName\n)\nfrom src.blueprints.telegram_bot.webhook.dispatcher_events import (\n DispatcherEvent\n)\nfrom ._common.responses import (\n cancel_command,\n request_absolute_path,\n send_yandex_disk_error\n)\nfrom ._common.decorators import (\n yd_access_token_required,\n get_db_data\n)\nfrom ._common.utils import (\n extract_absolute_path,\n create_element_info_html_text\n)\n\n\n@yd_access_token_required\n@get_db_data\ndef handle(*args, **kwargs):\n \"\"\"\n Handles `/element_info` command.\n \"\"\"\n message = kwargs.get(\n \"message\",\n g.telegram_message\n )\n user_id = kwargs.get(\n \"user_id\",\n g.telegram_user.id\n )\n chat_id = kwargs.get(\n \"chat_id\",\n g.telegram_chat.id\n )\n path = extract_absolute_path(\n message,\n CommandName.ELEMENT_INFO.value,\n kwargs.get(\"route_source\")\n )\n\n if not path:\n set_disposable_handler(\n user_id,\n chat_id,\n CommandName.ELEMENT_INFO.value,\n [\n DispatcherEvent.PLAIN_TEXT.value,\n DispatcherEvent.BOT_COMMAND.value,\n DispatcherEvent.EMAIL.value,\n DispatcherEvent.HASHTAG.value,\n DispatcherEvent.URL.value\n ],\n current_app.config[\"RUNTIME_DISPOSABLE_HANDLER_EXPIRE\"]\n )\n\n return request_absolute_path(chat_id)\n\n user = g.db_user\n access_token = user.yandex_disk_token.get_access_token()\n info = None\n\n try:\n info = get_element_info(\n access_token,\n path,\n get_public_info=True\n )\n except YandexAPIRequestError as error:\n cancel_command(chat_id)\n raise error\n except YandexAPIGetElementInfoError as error:\n send_yandex_disk_error(chat_id, str(error))\n\n # it is expected error and should be\n # logged only to user\n return\n\n text = create_element_info_html_text(info, True)\n params = {\n \"chat_id\": chat_id,\n \"text\": text,\n \"parse_mode\": \"HTML\",\n \"disable_web_page_preview\": True\n }\n download_url = info.get(\"file\")\n\n if download_url:\n params[\"reply_markup\"] = {\n \"inline_keyboard\": [[\n {\n \"text\": \"Download\",\n \"url\": download_url\n }\n ]]\n }\n\n telegram.send_message(**params)\n\n preview = info.get(\"preview\")\n\n if preview:\n # Yandex requires user OAuth token to get preview\n result = make_photo_preview_request(preview, access_token)\n\n if result[\"ok\"]:\n data = result[\"content\"]\n filename = info.get(\"name\", \"?\")\n\n telegram.send_photo(\n chat_id=chat_id,\n photo=(\n filename,\n data,\n \"image/jpeg\"\n ),\n disable_notification=True\n )\n","repo_name":"primeithard/yandex-disk-telegram-bot","sub_path":"src/blueprints/telegram_bot/webhook/commands/element_info.py","file_name":"element_info.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"40019260848","text":"import cv2\nimport mahotas\nfrom pylab import *\nfrom skimage import color\nfrom predict.colorfeatureextractor import ColorFeatureExtractor\nfrom predict.featureextractor import FeatureExtractor\n\n\n__author__ = 'Group16'\n\n\"\"\"\n\n This class will extract moments (geometric and Zernike) from a hue image.\n\n Written by Group 16: Tim Deweert, Karsten Goossens & Gilles Vandewiele\n Commissioned by UGent, course Machine Learning\n\"\"\"\n\n\nclass ShapeFeatureExtractor(FeatureExtractor):\n\n def __init__(self, _radius):\n self.radius = _radius\n\n def extract_feature_vector(self, image):\n \"\"\"\n extract geometric and Zernike moments from a hue image which is first converted to a grayscale image\n :param image: a hue image (can be extracted using the Color Feature Extractor\n :return: a feature vector containing geometric and Zernike moments\n \"\"\"\n # hue_image = ColorFeatureExtractor.extract_hue(image)\n # hue_gray_image = (np.rint(asarray(hue_image) * 255)).astype(np.uint8)\n # contour = self.calculateRimContour(hue_gray_image)\n # feature_vector = self.calculateGeometricMoments(contour)\n # feature_vector = append(feature_vector, self.extract_zernike(color.rgb2gray(image), self.radius))\n feature_vector = self.extract_zernike(color.rgb2gray(image), self.radius)\n return feature_vector\n\n @staticmethod\n def calculateRimContour(hue):\n img = resize(hue, (64, 64))\n ret, thresh = cv2.threshold(img, 127, 255, 0)\n imgg, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, 1)\n if len(contours) == 0:\n return None\n index = 0;\n max_area = cv2.contourArea(contours[0])\n for i in range(0, len(contours)):\n area = cv2.contourArea(contours[i])\n if area > max_area:\n max_area = area\n index = i\n return contours[index]\n\n @staticmethod\n def calculateGeometricMoments(contour):\n if contour is None:\n return [0, 0, 0, 0]\n\n mu = cv2.moments(contour, False)\n\n # The zero order central moment is zero so no shape can be predicted\n if (mu[\"m00\"] == 0):\n return [0, 0, 0, 0]\n\n # Calculate first moment invariant\n I = (mu[\"mu20\"] * mu[\"mu02\"] - mu[\"mu11\"] ** 2) / (mu[\"m00\"] ** 4)\n\n # Measure ellipticity\n E = 0\n if I < 1 / (16 * math.pi ** 2):\n E = 16 * (math.pi ** 2) * I\n else:\n E = 1 / (16 * (math.pi ** 2) * I)\n\n # Measure triangularity\n T = 0\n if I < 1 / 108:\n T = 108 * I\n else:\n T = 1 / (108 * I)\n\n # Measure octagonality\n O = 0\n if I < 1 / (15.932 * math.pi ** 2):\n O = 15.932 * (math.pi ** 2) * I\n else:\n O = 1 / (15.932 * (math.pi ** 2) * I)\n\n # Measure Rectangularity\n minRect = cv2.minAreaRect(contour)\n box = cv2.boxPoints(minRect)\n area = cv2.contourArea(contour)\n areaMinRect = cv2.contourArea(box)\n R = area / areaMinRect\n\n return [E, T, O, R]\n\n def extract_zernike(self, element, _radius):\n return mahotas.features.zernike_moments(resize(element, (64, 64)), radius=_radius, degree=10)\n\n\n","repo_name":"GillesVandewiele/TrafficSignRecognizer","sub_path":"predict/shapefeatureextractor.py","file_name":"shapefeatureextractor.py","file_ext":"py","file_size_in_byte":3289,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"12655294870","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('lizard_efcis', '0045_remove_wns_status'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='BioStatus',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('naam', models.CharField(unique=True, max_length=50)),\n ],\n options={\n 'ordering': ['naam'],\n 'verbose_name': 'biologische status',\n 'verbose_name_plural': 'biologische statussen',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='FCStatus',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('naam', models.CharField(unique=True, max_length=50)),\n ],\n options={\n 'ordering': ['naam'],\n 'verbose_name': 'fysisch/chemische status',\n 'verbose_name_plural': 'fysisch/chemische statussen',\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"lizardsystem/lizard-efcis","sub_path":"lizard_efcis/migrations/0046_biostatus_fcstatus.py","file_name":"0046_biostatus_fcstatus.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"25899289294","text":"def isValid(self, s):\n \"\"\"\n :type s: str\n :rtype: bool\n \"\"\"\n map = {'(': ')', '{': '}', '[': ']'}\n stack = []\n \n for i, c in enumerate(s):\n if c in map: \n stack.append(c)\n elif c in map.values(): \n # Also fails if stack is empty -> too many closing parens.\n if not stack or map[stack.pop()] != c: return False\n\n return not stack\n","repo_name":"michkim/Programming-Practice","sub_path":"Leetcode/020_valid_parentheses.py","file_name":"020_valid_parentheses.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"3107417018","text":"import torch\nimport copy\n\nfrom plnn.branch_and_bound import pick_out, add_domain, prune_domains\nfrom torch import nn\nimport random\nfrom plnn.kw_score_conv import choose_node_conv, choose_node_perturbed\nfrom exp_utils.plnn_utils import dump_domain, dom_to_branch\nimport pickle\nimport glob\nimport os\nimport numpy as np\nimport time\n\n\ndom_path = '/home/jodie/PLNN/PLNN-verification-journal/cifar_kw_m2_train_data/'\n\nclass ReLUDomain:\n '''\n Object representing a domain where the domain is specified by decision\n assigned to ReLUs.\n Comparison between instances is based on the values of\n the lower bound estimated for the instances.\n\n The domain is specified by `mask` which corresponds to a pattern of ReLUs.\n Neurons mapping to a 0 value are assumed to always have negative input (0 output slope)\n \" 1 \" positive input (1 output slope).\n \" -1 value are considered free and have no assumptions.\n\n For a MaxPooling unit, -1 indicates that we haven't picked a dominating input\n Otherwise, this indicates which one is the dominant one\n '''\n #def __init__(self, mask, lower_bound=-float('inf'), upper_bound=float('inf'), lower_all=None, upper_all = None, ub_point=None, dual_vars = None, dual_vars_other=None, primals=None):\n #def __init__(self,mask,lower_bound=-float('inf'), upper_bound=float('inf'), lower_all=None, upper_all=None, dom_name =None):\n def __init__(self, lower_bound=-float('inf'), upper_bound=float('inf'), dom_name = None ):\n #self.mask = mask\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n #self.lower_all = lower_all\n #self.upper_all = upper_all\n self.dom_name = dom_name\n #self.ub_point = ub_point\n #self.dual_vars = dual_vars\n #self.dual_vars_other = dual_vars_other\n #self.primals = primals\n \n\n\n def __lt__(self, other):\n return self.lower_bound < other.lower_bound\n\n def __le__(self, other):\n return self.lower_bound <= other.lower_bound\n\n def __eq__(self, other):\n return self.lower_bound == other.lower_bound\n\n\n\ndef relu_stab(net, domain, bounded, x, ball_eps, dump_trace, eps=1e-4, pgd_threshold = 1, sparsest_layer=0, decision_bound=None, criteria='kw', record=None):\n '''\n Uses branch and bound algorithm to evaluate the global minimum\n of a given neural network.\n `net` : Neural Network class, defining the `get_upper_bound` and\n `get_lower_bound` functions, supporting the `mask` argument\n indicating the phase of the ReLU.\n `eps` : Maximum difference between the UB and LB over the minimum\n before we consider having converged\n `decision_bound`: If not None, stop the search if the UB and LB are both\n superior or both inferior to this value.\n `pgd_threshold` : Once the number of relus being fixed during the algorithm\n is above pdg_threshold percentage of the total ambiguous nodes\n at the beginning, we initiate pgd attacks to find \n a better global upper bound\n\n Returns : Lower bound and Upper bound on the global minimum,\n as well as the point where the upper bound is achieved\n '''\n nb_visited_states = 0\n #global_ub_point, global_ub = net.get_upper_bound(domain)\n global_ub, global_lb, global_ub_point, dual_vars, dual_vars_other, primals, updated_mask, lower_bounds, upper_bounds, pre_relu_indices= net.build_the_model(domain, x, ball_eps, bounded)\n print('kw ball is bounded: ', bounded)\n print(global_lb)\n if global_lb > 0 :\n out = 'UNSAT'\n print('Early Stop, No need for BaB')\n return global_lb, global_ub, nb_visited_states, out\n\n dom_idx = 0\n dom_name = dom_path+f'domain_{criteria}/'+dump_trace + f'_dom_{dom_idx}'\n candidate_domain = ReLUDomain(lower_bound=global_lb, upper_bound = global_ub, dom_name = dom_name)\n domains = [candidate_domain]\n dump_domain(dom_name,\n updated_mask,\n global_lb,\n global_ub,\n lower_bounds,\n upper_bounds,\n dual_vars,\n dual_vars_other,\n global_ub_point,\n primals\n ) \n \n #import pdb; pdb.set_trace()\n prune_counter = 0\n icp_score = 0\n\n random_order = list(range(len(updated_mask)))\n try: \n random_order.remove(sparsest_layer)\n random_order = [sparsest_layer]+random_order\n except:\n pass\n\n while global_ub - global_lb > eps:\n # Pick a domain to branch over and remove that from our current list of\n # domains. Also, potentially perform some pruning on the way.\n candidate_domain = pick_out(domains, global_ub - eps)\n # Generate new, smaller domains by splitting over a ReLU\n dom_name = candidate_domain.dom_name\n with open(dom_name,\"rb\") as f:\n dc = pickle.load(f)\n mask = dc['X']['mask']\n orig_lbs = dc['X']['lower_bounds']\n orig_ubs = dc['X']['upper_bounds'] \n f.close()\n # debug choose_node_conv\n #mask[2] = torch.zeros(mask[2].shape)\n #icp_score =2\n \n #### return best kw, a perturbed kw, and list of choices\n kw_decision, perturbed_decision, pool_size, icp_score, score = choose_node_perturbed(orig_lbs, orig_ubs, mask, net.layers, pre_relu_indices, icp_score, random_order, sparsest_layer, gt =True)\n\n if criteria == 'kw':\n branching_decision = kw_decision\n elif criteria == 'kw_perturbed':\n branching_decision = perturbed_decision\n elif criteria.split('_')[0] =='maxmin' or criteria.split('_')[0] =='maxsum':\n selected_indices = testing_indices(mask, score)\n print('start computing ground truth')\n start = time.time()\n branching_decision, pool_size = gt_split(net, dump_trace, dom_name, nb_visited_states, mask, orig_lbs, orig_ubs, global_lb, selected_indices, criteria)\n end = time.time()\n print('one branch gt computation requires: ', end-start)\n \n print('branching decision: ', branching_decision) \n print('total available perturbed options: ', pool_size)\n\n\n for choice in [0,1]:\n # Find the upper and lower bounds on the minimum in the domain\n # defined by n_mask_i\n nb_visited_states += 1\n if (nb_visited_states % 10) == 0:\n print(f\"Running Nb states visited: {nb_visited_states}\")\n \n mask_temp = [i.clone() for i in mask]\n dom_ub,dom_lb, dom_ub_point, dom_dual_vars, dom_dual_vars_other, dom_primals, updated_mask, dom_lb_all, dom_ub_all= net.get_lower_bound( mask_temp, orig_lbs, orig_ubs, branching_decision, choice)\n \n if dom_ub < global_ub:\n global_ub = dom_ub\n \n\n print('dom_lb: ', dom_lb)\n print('dom_ub: ', dom_ub)\n\n if dom_lb < global_ub:\n dom_idx += 1\n dom_name = dom_path+f'domain_{criteria}/'+dump_trace + f'_dom_{dom_idx}'\n dom_to_add = ReLUDomain(lower_bound = dom_lb,\n upper_bound = dom_ub,\n dom_name = dom_name)\n \n \n dump_domain(dom_name,\n updated_mask,\n dom_lb,\n dom_ub,\n dom_lb_all,\n dom_ub_all,\n dom_dual_vars,\n dom_dual_vars_other,\n dom_ub_point,\n dom_primals\n ) \n add_domain(dom_to_add, domains)\n prune_counter += 1\n\n if nb_visited_states > 1500:\n record.write('early termination\\n')\n record.write(f'global_lb {global_lb}\\n')\n record.write(f'global_ub {global_ub}\\n')\n record.write(f'nb_states {nb_visited_states}\\n')\n record.close()\n return global_lb, global_ub, nb_visited_states, True\n \n\n domains = prune_domains(domains, global_ub - eps)\n\n prune_counter = 0\n\n\n if len(domains) > 0:\n global_lb = domains[0].lower_bound\n else:\n # If there is no more domains, we have pruned them all\n global_lb = global_ub - eps\n\n print(f\"Current: lb:{global_lb}\\t ub: {global_ub}\")\n if record is not None:\n record.write('glb {:.4f} branching decision {} choices {:d}\\n'.format(global_lb, branching_decision, pool_size))\n record.flush()\n\n\n # Stopping criterion\n if (decision_bound is not None) and (global_lb >= decision_bound):\n break\n elif global_ub < decision_bound:\n break\n \n if global_ub < 0:\n out = 'SAT'\n elif global_lb >0:\n out = 'UNSAT'\n else:\n out = 'None'\n\n if record is not None:\n record.write(f'{out}\\n')\n record.write(f'global_lb {global_lb}\\n')\n record.write(f'global_ub {global_ub}\\n')\n record.write(f'nb_states {nb_visited_states}\\n')\n record.close()\n \n dom_name = dom_path+f'domain_{criteria}/'+dump_trace + '_dom*'\n files = glob.glob(dom_name)\n for i in files:\n os.remove(i)\n return global_lb, global_ub, nb_visited_states, out\n\n\n\ndef testing_indices(mask, score):\n '''\n select a representative subset of indices of the set of all available unfixed relu choices\n 1. ensure at least 10% coverage 34+15+2\n 2. include the top 40 kw choices (with preference giving to layer 1 and layer 2)\n =====> only need to augment the choices on layer 0\n '''\n selected_indices = {}\n for i in range(len(mask)):\n selected_indices[i] = []\n new_score = {}\n new_score_l2 = {}\n new_score_l1 = {}\n for i in range(len(score)):\n for j in range(len(score[i])):\n if mask[i][j] == -1:\n new_score[f'relu_{i}_{j}'] = score[i][j].item()\n if (i==1):\n new_score_l1[f'relu_{i}_{j}'] = score[i][j].item()\n if (i==2):\n new_score_l2[f'relu_{i}_{j}'] = score[i][j].item()\n\n \n new_score = sorted(new_score.items(), key = lambda x : x[1])\n new_score_l1 = sorted(new_score_l1.items(), key = lambda x : x[1])\n new_score_l2 = sorted(new_score_l2.items(), key = lambda x : x[1])\n new_score.reverse()\n new_score_l1.reverse()\n new_score_l2.reverse()\n kw_choices = new_score[:60]+new_score_l1[:20]+new_score_l2[:20]\n for i in set(kw_choices):\n selected_indices[int(i[0].split('_')[1])].append(int(i[0].split('_')[2]))\n\n for relu_idx in range(len(mask)-1, -1, -1):\n all_available_choices = ((mask[relu_idx]==-1).nonzero().view(-1)).tolist()\n required_number = int(len(all_available_choices)*0.1)\n done_choices = selected_indices[relu_idx]\n required_number = required_number - len(done_choices)\n ## DEBUG\n # if len(done_choices) == 0:\n if required_number <= 0:\n # No need to add points on this layer\n continue\n else:\n remained_choices = np.setdiff1d(all_available_choices, done_choices)\n selected_choices = np.random.choice(remained_choices, required_number, replace=False)\n selected_indices[relu_idx].extend(selected_choices)\n\n print(selected_indices) \n return selected_indices\n\n\n\n\ndef kw_split(net, candidate_domain):\n mask = candidate_domain.mask\n orig_lbs = candidate_domain.lower_all_pa\n orig_ubs = candidate_domain.upper_all_pa\n decision = choose_dim(orig_lbs, orig_ubs, mask, net.layers)\n mask_temp_1 = [i.copy() for i in mask]\n mask_temp_1[decision[0]][decision[1]]= 0\n mask_temp_2 = [i.copy() for i in mask]\n mask_temp_2[decision[0]][decision[1]]= 1\n print(f'idx: {decision}')\n all_new_masks = [mask_temp_1, mask_temp_2]\n return all_new_masks\n\n\ndef gt_split(net, dump_trace, dom_name, nb_visited_states, mask, lower_bounds, upper_bounds, global_lb, selected_indices, criteria):\n\n #compute ground truths for domain_splits\n gt_score_relu = {}\n gt_lb_relu = {}\n\n\n # first get all interest choices\n for layer in range(len(selected_indices)):\n for index in selected_indices[layer]:\n try:\n mask_temp = [i.clone() for i in mask]\n _, dom_lb, _, _, _,_,_,_,_ = net.get_lower_bound( mask_temp, lower_bounds, upper_bounds, [layer, index], 0)\n\n mask_temp = [i.clone() for i in mask]\n _,dom_lb1,_, _,_,_, _, _, _ = net.get_lower_bound( mask_temp, lower_bounds, upper_bounds, [layer, index], 1)\n\n lbs = torch.Tensor([dom_lb, dom_lb1])\n print('decision: ', layer, index)\n if criteria.split('_')[0] == 'maxsum':\n lowest_lb_relu = min(0, dom_lb) + min(0, dom_lb1) - 2*global_lb\n gt_score_relu[f'relu_{layer}_{index}'] = lowest_lb_relu\n print(f'sub lower bounds [{dom_lb}, {dom_lb1}]')\n print(f'maxsum score: {lowest_lb_relu}')\n\n \n elif criteria.split('_')[0] == 'maxmin':\n lowest_lb_relu = min(min(0, dom_lb), min(0, dom_lb1))-global_lb\n gt_score_relu[f'relu_{layer}_{index}'] = lowest_lb_relu\n print(f'current choice [{layer}, {index}]')\n print(f'sub lower bounds [{dom_lb}, {dom_lb1}]')\n print(f'maxmin score: {lowest_lb_relu}')\n\n except NotImplementedError:\n continue\n\n try:\n gt_lb_relu[layer][index] = lbs\n except KeyError:\n gt_lb_relu[layer]={}\n gt_lb_relu[layer][index] = lbs\n\n # choose a requied decision\n\n gt_score_relu = sorted(gt_score_relu.items(), key = lambda x : x[1])\n gt_score_relu.reverse()\n\n\n if criteria.split('_')[-1] == 'perturbed':\n candidates = []\n best_score = gt_score_relu[0][1]\n for i in gt_score_relu:\n if i[1]/best_score >= 0.8:\n candidates.append(i[0])\n else:\n break\n pool_size = len(candidates)\n temp = random.choice(candidates)\n branching_decision = [int(temp.split('_')[1]), int(temp.split('_')[-1])]\n\n else:\n pool_size = -1\n branching_decision = [int(gt_score_relu[0][0].split('_')[1]), int(gt_score_relu[0][0].split('_')[-1])]\n \n if dump_trace is not None:\n trace_fname = dom_path+ f'branch_{criteria}/'+dump_trace + '_branch_{}'.format(nb_visited_states)\n print(\"\\n\",trace_fname,\"\\n\")\n\n dom_to_branch(trace_fname,\n dom_name,\n gt_lb_relu,\n branching_decision,\n ) \n\n return branching_decision, pool_size \n\n\n\ndef gt_split_kw(net, candidate_domain, kw_decision, kw_indices, nb_visited_states, dump_trace):\n mask = candidate_domain.mask\n orig_lbs = candidate_domain.lower_all\n orig_ubs = candidate_domain.upper_all\n largest_lowest_lb_dom = -float('inf')\n largest_lowest_lb_dim_dom = None\n largest_lowest_lb_relu = -float('inf')\n largest_lowest_lb_index_relu = None\n largest_lowest_lb_layer_relu = None\n records_relu = {}\n gt_lb_relu = {}\n\n ##compute ground truths for relu_splits\n\n # first get all interest choices\n for decision in kw_indices:\n mask_temp = [i.clone() for i in mask]\n dom_ub,dom_lb, dom_ub_point, updated_mask, dom_lb_all, dom_ub_all= net.get_lower_bound( mask_temp, orig_lbs, orig_ubs, decision, 0)\n\n mask_temp = [i.clone() for i in mask]\n dom_ub1,dom_lb1, dom_ub_point1, updated_mask1, dom_lb_all1, dom_ub_all1= net.get_lower_bound( mask_temp, orig_lbs, orig_ubs, decision, 1)\n\n lbs = torch.Tensor([dom_lb, dom_lb1])\n try:\n gt_lb_relu[decision[0]][decision[1]] = lbs\n except KeyError:\n gt_lb_relu[decision[0]]={}\n gt_lb_relu[decision[0]][decision[1]] = lbs\n\n print(f'idx: {decision} solutions: {lbs}')\n\n if decision == kw_decision:\n records_relu[\"dom_ub\"] =[dom_ub,dom_ub1]\n records_relu[\"dom_lb\"] = [dom_lb, dom_lb1]\n records_relu[\"dom_ub_point\"] = [dom_ub_point, dom_ub_point1]\n records_relu[\"mask\"] = [updated_mask, updated_mask1]\n records_relu[\"dom_lb_all\"] = [dom_lb_all, dom_lb_all1]\n records_relu[\"dom_ub_all\"] = [dom_ub_all, dom_ub_all1]\n\n # dump traces\n if dump_trace is not None:\n\n trace_fname = dump_trace + '_branch_{}'.format(nb_visited_states)\n print(\"\\n\",trace_fname,\"\\n\")\n\n dec = kw_decision\n print(f'final decision {dec}')\n\n if dump_trace is not None:\n dump_relu_problem(trace_fname,\n candidate_domain.mask,\n candidate_domain.lower_bound,\n candidate_domain.upper_bound,\n candidate_domain.lower_all,\n candidate_domain.upper_all,\n gt_lb_relu,\n dec) \n return records_relu \n\n\ndef relu_split(layers, mask):\n '''\n Given a mask that defines a domain, split it according to a non-linerarity.\n\n The non-linearity is chosen to be as early as possible in the network, but\n this is just a heuristic.\n\n `layers`: list of layers in the network. Allows us to distinguish\n Maxpooling and ReLUs\n `mask`: A list of [list of {-1, 0, 1}] where each elements corresponds to a layer,\n giving constraints on the Neuron.\n Returns: A list of masks, in the same format\n\n '''\n done_split = False\n non_lin_layer_idx = 0\n all_new_masks = []\n for layer_idx, layer in enumerate(layers):\n if type(layer) in [nn.ReLU, nn.MaxPool1d]:\n non_lin_lay_mask = mask[non_lin_layer_idx]\n if done_split:\n # We have done our split, so no need for any additional split\n # -> Pass along all of the stuff\n for new_mask in all_new_masks:\n new_mask.append(non_lin_lay_mask)\n elif all([neuron_dec != -1 for neuron_dec in non_lin_lay_mask]):\n # All the neuron in this layer have already an assumption.\n # This will just be passed along when we do our split.\n pass\n else:\n # This is the first layer we encounter that is not completely\n # assumed so we will take the first \"undecided\" neuron and\n # split on it.\n\n # Start by making two copies of everything that came before\n if type(layer) is nn.ReLU:\n all_new_masks.append([])\n all_new_masks.append([])\n elif type(layer) is nn.MaxPool1d:\n for _ in range(layer.kernel_size):\n all_new_masks.append([])\n else:\n raise NotImplementedError\n\n for prev_lay_mask in mask[:non_lin_layer_idx]:\n for new_mask in all_new_masks:\n new_mask.append(prev_lay_mask)\n\n # Now, deal with the layer that we are actually splitting\n neuron_to_flip = non_lin_lay_mask.index(-1)\n for choice, new_mask in enumerate(all_new_masks):\n # choice will be 0,1 for ReLU\n # it will be 0, .. kernel_size-1 for MaxPool1d\n mod_layer = non_lin_lay_mask[:]\n mod_layer[neuron_to_flip] = choice\n new_mask.append(mod_layer)\n\n done_split = True\n non_lin_layer_idx += 1\n for new_mask in all_new_masks:\n assert len(new_mask) == len(mask)\n if not done_split:\n all_new_masks = [mask]\n return all_new_masks\n","repo_name":"JingyueLu/PLNN-verification-journal","sub_path":"plnn/relu_stability.py","file_name":"relu_stability.py","file_ext":"py","file_size_in_byte":20379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"3523917714","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 10 21:59:06 2022\n\n@author: smithakolan\n\"\"\"\n\nimport shap\nimport top10Dapps\nimport json\nimport pandas as pd\n\n#Get NFT ID, RARITY, LAST SALE PRICE, NO. OF TRANSACTIONS\ndapps = top10Dapps.top_10_dapps\n\nfor dapp in dapps:\n nftFile = open('/Users/smithakolan/Documents/GitHub/metacent-rarity/cleanednfts/' + dapp +'.json')\n nftData = json.load(nftFile)\n\n tnxsdf = pd.read_csv('/Users/smithakolan/Documents/GitHub/metacent-rarity/transaction_data/'+ dapp+ '.csv',\n usecols = ['TOKEN_ID'])\n\n tnxsCount = tnxsdf.groupby(['TOKEN_ID']).size().reset_index(name='counts')\n\n dataList = []\n nftCount = len(nftData)\n for i in range(0, nftCount):\n token_id = nftData[i]['token_id']\n rarity = nftData[i]['rarity']\n \n if(nftData[i]['last_sale']):\n last_sale_price = nftData[i]['last_sale']['total_price']\n \n else:\n last_sale_price = None\n \n dataList.append([token_id, rarity, last_sale_price])\n \n df = pd.DataFrame(dataList, columns =['TOKEN_ID', 'rarity', 'last_sale_price'])\n df['TOKEN_ID'] = pd.to_numeric(df['TOKEN_ID'])\n\n newDF = pd.merge(df, tnxsCount, on='TOKEN_ID', how='outer')\n\n newDF = newDF.dropna()\n\n\n newDF.to_csv('/Users/smithakolan/Documents/GitHub/metacent-rarity/training_data/' + dapp + '.csv', index = False)\n\n \n\"\"\"\n\ndataList\n \n\n# load JS visualization code to notebook\nshap.initjs()\n\n# use Tree SHAP explainer to explain the gradient boosting tree model\n# you only need to explain and plot the first explaination\n# --- Write your code below ---\n\nexplainer = shap.TreeExplainer(gb)\nshap_values = explainer.shap_values(X_test)\n\n# visualize the first prediction's explanation (use matplotlib=True to avoid Javascript)\ndisplay(shap.force_plot(explainer.expected_value, shap_values[0,:], X_test.iloc[0,:]))\n\n# shap.force_plot(explainer.expected_value, shap_values[0,:], X_test.iloc[0,:], link=\"logit\")\nshap.summary_plot(shap_values, features=X_test, feature_names=feature_names)\n\n# Rarity, Last Sale price, \n\"\"\"\n","repo_name":"smithakolan/metacent-rarity","sub_path":"Analysis/getTrainingData.py","file_name":"getTrainingData.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"43483146944","text":"from ursina import *\n\n\nclass Player(Entity):\n def __init__(self, position, model, texture, collider, scale_x, scale_y):\n super().__init__()\n self.hp = 100\n self.dmg = 0\n\n self.position = position\n self.dx = self.dy = 0\n self.dt = 1 / 60\n self.g = 9.8\n\n self.model = model\n self.texture = texture\n self.collider = collider\n self.scale_x = scale_x\n self.scale_y = scale_y\n self.always_on_top = True\n\n self.frame = 0\n\n def update(self):\n self.x += self.dx\n self.y += self.dy\n self.hp -= self.dmg\n\n def attack(self, other):\n pass\n\n def is_hit(self):\n pass\n\n def move(self, other):\n self.dx = held_keys['d'] * 15 * self.dt - held_keys['a'] * 15 * self.dt\n if self.dx < 0:\n if self.frame > 9:\n self.frame = 0\n if self.frame % 3 == 0:\n self.texture = f\"animations//char1_walk_left//{self.frame // 3}.png\"\n elif self.dx > 0:\n if self.frame > 9:\n self.frame = 0\n if self.frame % 3 == 0:\n self.texture = f\"animations//char1_walk_right//{self.frame // 3}.png\"\n self.frame += 1\n\n self.dy -= self.g * self.dt\n if self.intersects(other).hit:\n self.y = 0\n self.dy = .75 * held_keys['w']\n if self.y < -10:\n self.y = 1\n self.x = 0\n self.dy = 0\n","repo_name":"jedben2/Python-Multiplayer-Game","sub_path":"game/game_mods/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"28116217892","text":"def min_coin_count(value, coin_list):\n coin_count = 0\n \n coin_list.sort(reverse=True) #sort정렬, 기본값은 오름차순 정렬, reverse=True옵션은 내림차순 정렬\n for coin in coin_list:\n coin_count += value // coin #나누기 연산 후 나머지 버림\n value = value % coin\n \n return coin_count\n \ndef min_coin_count(value, coin_list):\n total_coin_count = 0\n details = list()\n \n coin_list.sort(reverse=True)\n for coin in coin_list:\n coin_num = value // coin\n total_coin_count += coin_num\n details.append([coin, coin_num])\n value -= coin_num * coin\n return total_coin_count, details","repo_name":"z0299/coding_test_practice","sub_path":"coin.py","file_name":"coin.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"20244062013","text":"import requests\nimport json\nimport time\nimport sys\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\nimport configparser\n\nconfig = configparser.ConfigParser()\nconfig.read('settings.ini')\nnad_url = config['nad']['url']\nnad_pass = config['nad']['password']\nnad_user = config['nad']['username']\nrvison_ip = config['rvision']['ip']\nrvision_key = config['rvision']['key']\nsession = requests.Session()\nsession.trust_env = False\n\ndef auth():\n\n\tglobal session\n\turl = f\"{nad_url}/api/v2/auth/login\"\n\tpayload = json.dumps({\n\t\t\"username\": nad_user,\n\t\t\"password\": nad_pass\n\t})\n\theaders = {\n\t\t'Referer': f\"{nad_url}/\",\n\t\t'Content-Type': 'application/json'\n\t}\n\n\tresponse = session.post(url, headers=headers,verify=False, data=payload)\n\treturn response.headers['Set-Cookie'].split(\"csrftoken=\")[1].split(\";\")[0]\n\ndef get_events(csrf,src,dst,alert,time_from,time_to):\n\n\tglobal session\n\turl = f\"{nad_url}/api/v2/bql?source=2\"\n\tpayload = f\"SELECT \\\"pr\\\", \\\"msg\\\", \\\"cls\\\", \\\"success.affected\\\", \\\"ts\\\", \\\"attacker.ip\\\", \\\"attacker.geo.country\\\", \\\"victim.ip\\\", \\\"victim.geo.country\\\", \\\"sid\\\", \\\"attacker.dns\\\", \\\"victim.dns\\\", \\\"att_ck\\\", \\\"id\\\", extract_raw_object('false_positive'), (SELECT \\\"rpt.cat\\\", \\\"id\\\", \\\"start\\\", \\\"end\\\", \\\"flags\\\", \\\"state\\\", \\\"app_proto\\\", \\\"has_files\\\", \\\"rpt.color\\\", \\\"rpt.type\\\", \\\"rpt.where\\\", \\\"rpt.id\\\" FROM flow LIMIT 1)\\r\\nFROM alert\\r\\nWHERE \\\"ts\\\" >= {time_from} AND \\\"ts\\\" <= {time_to} AND EXISTS (SELECT * FROM flow WHERE \\\"end\\\" >= {time_from} AND \\\"end\\\" <= {time_to} AND (alert.pr == 1 && alert.msg == \\\"{alert}\\\" && alert.attacker.ip == {src} && alert.victim.ip == {dst}))\\r\\nORDER BY \\\"pr\\\" asc\\r\\nLIMIT 1\"\n\theaders = {\n\t\t'Referer': f\"{nad_url}/\",\n\t\t'X-Csrftoken': csrf,\n\t\t'Content-Type': 'text/plain'\n\t\t}\n\n\tresponse = session.post(url, headers=headers, verify=False, data=payload)\n\tresponse = json.loads(response.text)\n\treturn response[\"result\"]\n\ndef get_current_event_info(csrf,id,key,time_from,time_to):\n\n\tglobal session\n\turl = f\"{nad_url}/api/v2/flow/{key}/alert/{id}?end={time_to}&source=2&start={time_from}\"\n\tpayload = \"\"\n\theaders = {\n\t\t'Referer': f\"{nad_url}/\",\n\t\t'X-Csrftoken': csrf\n\t\t}\n\n\tresponse = session.get(url, headers=headers, verify=False, data=payload)\n\treturn json.loads(response.text)\n\ndef update_inc(id,sign,detect,name,src,dst,time,desc):\n\n\turl = f\"https://{rvison_ip}/api/v2/incidents\"\n\tpayload = {\n\t\t\t'identifier': id,\n\t\t\t'detectDescription':sign,\n\t\t\t'description':desc,\n\t\t\t'events_data': f'{{\\\"detection\\\":\\\"{detect}\\\", \\\"name\\\":\\\"{name}\\\", \\\"src\\\":\\\"{src}\\\", \\\"dst\\\":\\\"{dst}\\\", \\\"time\\\":\\\"{time}\\\",\\\"source_id\\\":\" \"}}'\n\t\t}\n\theaders = {\n\t\t'X-Token' : rvision_key\n\t}\n\n\tresponse = requests.request(\"POST\", url, headers=headers, verify = False, data=payload)\n\treturn response.text\n\ntt = int(time.time())\ntf = tt - (48*60*60)\ntt *=1000\ncsrf = auth()\n\ntry:\n\tevents = get_events(csrf,sys.argv[1],sys.argv[2],sys.argv[3],tf,tt)\n\tfor event in events:\n\t\tname = event[1]\n\t\tdetect = event[2]\n\t\ttime = event[4].replace(\"T\",\" \")\n\t\tid = event[13]\n\t\tkey = str(event[15]).split(\"'\")[1].split(\"\\'\")[0]\n\n\tevent_info = get_current_event_info(csrf,id,key,tf,tt)\n\tsign = event_info[\"signature\"]\n\tdesc = str(detect)\n\tupdate_inc(sys.argv[4],sign[\"rule\"],detect,name,sys.argv[1],sys.argv[2],time,desc)\n\nexcept Exception as ex:\n\tprint(ex)\n","repo_name":"SeregaDeveloper/rvision_connectors","sub_path":"nad_connector.py","file_name":"nad_connector.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"22500361383","text":"import numpy as np\nimport cv2\nfrom basic_fns import *\nimport control_pts as ctrl\nfrom numpy import float32\n\ndef make_delaunay_class(image, rectangle, shape):\n\n rectangle = (0, 0, image.shape[1], image.shape[0])\n delaunay = cv2.Subdiv2D(rectangle)\n for k in range(shape.num_parts):\n delaunay.insert((shape.part(k).x, shape.part(k).y))\n return delaunay\n\ndef rect_contains(rect, point) :\n if point[0] < rect[0] :\n return False\n elif point[1] < rect[1] :\n return False\n elif point[0] > rect[2] :\n return False\n elif point[1] > rect[3] :\n return False\n return True\n\ndef draw_delaunay_tri(img):\n\n rectangle, shape = ctrl.get_rect_and_68control_points(img)\n md = make_delaunay_class(img, rectangle, shape)\n triangleList = md.getTriangleList()\n triangleList = triangleList\n size = img.shape\n r = (0, 0, size[1], size[0])\n\n for t in triangleList :\n if(885 in t or -885 in t):\n color = (255, 0, 0)\n continue\n else:\n color = (0, 0, 255)\n pt1 = (t[0], t[1])\n pt2 = (t[2], t[3])\n pt3 = (t[4], t[5])\n\n if rect_contains(r, pt1) and rect_contains(r, pt2) and rect_contains(r, pt3) :\n cv2.line(img, pt1, pt2, color, 1)\n cv2.line(img, pt2, pt3, color, 1)\n cv2.line(img, pt3, pt1, color, 1)\n\n return img\n\ndef get_rect(rect):\n return (rect.left(), rect.top(), rect.right(), rect.bottom())\n\ndef warp_example_to_subject(example, subject):\n\n dsize = example.shape; rows = example.shape[0]; cols = example.shape[1]\n ex_rect, ex_shape = ctrl.get_rect_and_68control_points(example)\n ex_points_and_indices = ctrl.get_points_and_indices(ex_shape)\n ex_del = make_delaunay_class(example, ex_rect, ex_shape)\n sub_rect, sub_shape = ctrl.get_rect_and_68control_points(subject)\n sub_points_and_indices = ctrl.get_points_and_indices(sub_shape)\n sub_del = make_delaunay_class(subject, sub_rect, sub_shape)\n #we have calculated control points and delaunay triangles of both example and subject\n\n warped_tri = sub_del.getTriangleList()\n input_tri = ex_del.getTriangleList()\n output = np.ones(example.shape, dtype=float32)\n\n for i in range(warped_tri.shape[0]):\n # Affine Transformation\n dst = np.reshape(warped_tri[i, :], newshape=(3, 2))\n if(885 in dst or -885 in dst):\n continue\n tri2 = dst.astype(float32)\n pt_num=[]\n for pt in tri2:\n for k in sub_points_and_indices:\n if(pt[0] == k[0][0] and pt[1] == k[0][1]):\n pt_num.append(k[1])\n\n src = np.ndarray(shape=(3, 2))\n for i, num in enumerate(pt_num):\n src[i, :] = ex_points_and_indices[num][0]\n tri1 = src.astype(dtype=float32)\n\n r1 = cv2.boundingRect(tri1) #xywh\n r2 = cv2.boundingRect(tri2)\n tri1Cropped = []\n tri2Cropped = []\n for i in range(3):\n tri1Cropped.append(((tri1[i][0] - r1[0]),(tri1[i][1] - r1[1])))\n tri2Cropped.append(((tri2[i][0] - r2[0]),(tri2[i][1] - r2[1])))\n img1Cropped = example[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]\n warpMat = cv2.getAffineTransform(np.float32(tri1Cropped), np.float32(tri2Cropped))\n img2Cropped = cv2.warpAffine( img1Cropped, warpMat, (r2[2], r2[3]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101 )\n # Get mask by filling triangle\n mask = np.zeros((r2[3], r2[2], 3), dtype = np.float32)\n\n cv2.fillConvexPoly(mask, np.int32(tri2Cropped), (1.0, 1.0, 1.0), 16, 0);\n # Apply mask to cropped region\n img2Cropped = img2Cropped * mask\n\n # Copy triangular region of the rectangular patch to the output image\n output[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = output[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] * ( (1.0, 1.0, 1.0) - mask )\n output[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] = output[r2[1]:r2[1]+r2[3], r2[0]:r2[0]+r2[2]] + img2Cropped\n\n return output\n\ndef merge_example_and_image(example, subject):\n\n subject_face = warp_example_to_subject(subject, subject)\n output = subject - subject_face\n warped_example = warp_example_to_subject(example, subject)\n output = output + warped_example\n return output\n\n\n#\n# example = read_image_as_rgb('example.png')\n# ex = read_image_as_rgb('example.png')\n# subject = read_image_as_rgb('subject.png')\n# sub = np.copy(subject)\n#\n# del_ex = draw_delaunay_tri(ex)\n# write_image('del_ex.png', rgb2bgr(del_ex))\n# del_sub = draw_delaunay_tri(sub)\n# write_image('del_sub.png', rgb2bgr(del_sub))\n#\n# output = warp_example_to_subject(example, subject)\n# show_image('please', rgb2bgr(output))\n# write_image('example2subject.png', rgb2bgr(output))\n","repo_name":"Simon-zys/COL-783-Assignments","sub_path":"Assignment 2/makeup/delaunay.py","file_name":"delaunay.py","file_ext":"py","file_size_in_byte":4755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"73663083080","text":"# TRADUCCION DE CICLOS E IF:\npila_saltos = [] # Almacenamos los saltos del programa\n\n#TRADUCCION FUNCIONES\npila_saltos_funciones = [] # Almacena los saltos que deben ser ejecutados para revisar funciones\npila_funciones = {} # Auxiliar, no tabla de simbolos\nflag_func = True # flag que identifica la primera funcion que nos llevara a el MAIN\npila_saltos_funciones_aux = [] # Contiene las direcciones de retorno de los call\npila_end_procedure = [] # Contiene las direcciones de donde saltamos de los modulos\n\n# Variables dimensionadas\ntamano_total = 0 # Contiene el tamaño total de espacio que necesita un arreglo\n\ndef updateSimbolTable(simbolo,tipo,tabla_de_simbolos,Simbol_Index):\n tabla_de_simbolos[simbolo] = {} # Nos permite crear un diccionario de tipo NESTED\n tabla_de_simbolos[simbolo][\"Index\"] = Simbol_Index # Guardamos la posicion en la tabla de simbolos\n tabla_de_simbolos[simbolo][\"Type\"] = tipo # Guardamos en el diccionario el tipo de la variable\n tabla_de_simbolos[simbolo][\"Value\"] = None\n\n Simbol_Index = Simbol_Index + 1 # Incrementamos la posicion de la tabla de simbolos\n\n return Simbol_Index\n\ndef fillSimbolTable(simbolo,tabla_de_simbolos,valor):\n tabla_de_simbolos[simbolo][\"Value\"] = valor\n\n# CUADRUPLOS\ntemporales = [] # Variable para ingresar los temporales\ntemporalesCopy = [] # Copia de los temporales\ntemporalCounter = 1 # Variable para contar el numero de temporales (T1 inicial)\nsTemporal = \"T\" # String constante para los temporales\npila_cuadruplos = [] # Pila para guardar los cuadruplos generados \ncontador_cuadruplos = 0 # contador de los cuadruplos generados\n#temporalesCopyArr = []\n# Funcion que permite generar cuadruplos\n\ndef genTemporales():\n global temporalCounter\n temporal = sTemporal + str(temporalCounter) # Creamos un temporal tipo String\n temporales.append(temporal) # Añadimos al arreglo de temporales el string \n temporalesCopy.append(temporal) # Creamos una copia que no sera Pop() sus valores\n #temporalesCopyArr.append(temporal) # Creamos una copia que no sera Pop() sus valores\n\n temporalCounter = temporalCounter + 1 # Aumentamos en 1 nuestro contador de temporales\n\n\ndef genCuadruplos(simbolo,operation,pila_operandos):\n if (operation == \"=\"): # Validacion de Creacion / asignacion\n global contador_cuadruplos\n operand1 = pila_operandos.pop()\n result = simbolo\n if not simbolo in pila_operandos: # Verificamos si el simbolo ya habia sido declarado, para no meterlo en la pila mas veces\n pila_operandos.append(result) \n cuadruplo = [operation, operand1,None, result] # Armamos el cuadruplo\n #print(\"Cuadruplo -> \",cuadruplo) # Imprimimos el cuadruplo\n pila_cuadruplos.append(cuadruplo)\n contador_cuadruplos = contador_cuadruplos + 1\n #print(pila_operandos)\n else:\n cuadruplo = [operation, operand1,None, result] # Armamos el cuadruplo\n #print(\"Cuadruplo -> \",cuadruplo) # Imprimimos el cuadruplo\n pila_cuadruplos.append(cuadruplo)\n contador_cuadruplos = contador_cuadruplos + 1\n\n elif(operation == \"+\" or operation == \"-\" or operation == \"*\" or operation == \"/\" or operation == \"^\"): # Validacion Aritmetica\n operand2 = pila_operandos.pop()\n operand1 = pila_operandos.pop()\n genTemporales() # Generamos un temporal\n result = temporales.pop() \n pila_operandos.append(result)\n cuadruplo = [operation, operand1, operand2, result] # Armamos el cuadruplo\n #print(\"Cuadruplo -> \",cuadruplo) # Imprimimos el cuadruplo\n pila_cuadruplos.append(cuadruplo)\n contador_cuadruplos = contador_cuadruplos + 1\n #print(pila_operandos)\n \n elif(operation == \"&\" or operation == \"==\" or operation == \"!=\" or \n operation == \">\" or operation == \">=\" or operation == \"<=\" or\n operation == \"|\" or operation == \"<\"): # Validacion logica\n\n operand2 = pila_operandos.pop()\n operand1 = pila_operandos.pop()\n genTemporales() # Generamos un temporal\n result = temporales.pop() \n pila_operandos.append(result)\n cuadruplo = [operation, operand1, operand2, result] # Armamos el cuadruplo\n #print(\"Cuadruplo -> \",cuadruplo) # Imprimimos el cuadruplo\n pila_cuadruplos.append(cuadruplo)\n contador_cuadruplos = contador_cuadruplos + 1\n #print(pila_operandos)\n\n elif(operation == \"READ\"):\n operand1 = pila_operandos.pop()\n cuadruplo = [operation, operand1,None, None] # Armamos el cuadruplo\n #print(\"Cuadruplo -> \",cuadruplo) # Imprimimos el cuadruplo\n pila_cuadruplos.append(cuadruplo)\n contador_cuadruplos = contador_cuadruplos + 1\n \n elif(operation == \"WRITE\"):\n operand1 = pila_operandos.pop()\n cuadruplo = [operation, operand1,None, None] # Armamos el cuadruplo\n #print(\"Cuadruplo -> \",cuadruplo) # Imprimimos el cuadruplo\n pila_cuadruplos.append(cuadruplo)\n contador_cuadruplos = contador_cuadruplos + 1\n \n else:\n print(operation)\n print(\"error detected! bad logic!!\")\n\n# FUNCIONES IF -TRADUCCION-\ndef genCuadruplos_if(pila_operandos):\n global contador_cuadruplos\n operand1 = pila_operandos.pop() # Recuperamos el operando\n genTemporales() # Generamos un temporal\n result = temporales.pop() # Guardamos el temporal como resultado\n pila_operandos.append(result) # Añadimos a la pila de operandos el resultado\n pila_saltos.append(contador_cuadruplos) # Añadimos el cuadruplo a saltar en su pila\n cuadruplo = [\"GOTOF\", operand1,None,None] # Armamos el cuadruplo\n pila_cuadruplos.append(cuadruplo) # Añadimos el cuadruplo a la pila de cuadruplos\n contador_cuadruplos = contador_cuadruplos + 1 # Aumentamos el contador de cuadruplos\n\ndef genCuadruplos_if_then():\n global contador_cuadruplos\n salto = pila_saltos.pop() # Obtenemos el salto\n pila_cuadruplos[salto][3] = contador_cuadruplos + 1 # Actualizamos el valor del salto de cuadruplo a saltar \n pila_saltos.append(contador_cuadruplos) #\n cuadruplo = [\"GOTO\",None,None,None] # Armamos el cuadruplo\n pila_cuadruplos.append(cuadruplo)\n contador_cuadruplos = contador_cuadruplos + 1\n\ndef endifgoto():\n global contador_cuadruplos\n salto = pila_saltos.pop()\n pila_cuadruplos[salto][3] = contador_cuadruplos\n\n# FUNCIONES FOR -TRADUCCION-\n\ndef genCuadruplos_for(pila_operandos):\n global contador_cuadruplos\n operand1 = pila_operandos.pop() # Recuperamos el operando\n genTemporales() # Generamos un temporal\n result = temporales.pop() # Guardamos el temporal como resultado\n pila_operandos.append(result) # Añadimos a la pila de operandos el resultado\n pila_saltos.append(contador_cuadruplos) # Añadimos el cuadruplo a saltar en su pila\n cuadruplo = [\"GOTOF\", operand1,None,None] # Armamos el cuadruplo\n pila_cuadruplos.append(cuadruplo) # Añadimos el cuadruplo a la pila de cuadruplos\n contador_cuadruplos = contador_cuadruplos + 1 # Aumentamos el contador de cuadruplos\n \ndef genCuadruplos_endfor():\n global contador_cuadruplos \n salto = pila_saltos.pop()\n cuadruplo = [\"GOTO\",None,None,None]\n pila_cuadruplos.append(cuadruplo)\n pila_cuadruplos[contador_cuadruplos][3] = salto - 1\n contador_cuadruplos = contador_cuadruplos + 1 # Aumentamos el contador de cuadruplos\n pila_saltos.append(salto)\n \n\ndef genCuadruplos_plus_for(aux_for,operation,pila_operandos):\n global contador_cuadruplos\n cuadruplo = [operation, aux_for, 1, aux_for] # Armamos el cuadruplo\n #print(\"Cuadruplo -> \",cuadruplo) # Imprimimos el cuadruplo\n pila_cuadruplos.append(cuadruplo)\n contador_cuadruplos = contador_cuadruplos + 1 \n\ndef genCuadruplos_endingfor():\n global contador_cuadruplos\n salto = pila_saltos.pop()\n pila_cuadruplos[salto][3] = contador_cuadruplos\n\n\n# FUNCIONES DO WHILE -TRADUCCION-\ndef genCuadruplos_do_while(pila_operandos):\n global contador_cuadruplos\n resultado = pila_operandos.pop()\n pila_saltos.append(contador_cuadruplos)\n cuadruplo = [\"GOTOF\",resultado,None,None]\n pila_cuadruplos.append(cuadruplo)\n contador_cuadruplos = contador_cuadruplos + 1\n print(\"PILA SE DALTOS DO_WHILE\",pila_saltos)\n\n\ndef genCuadruplos_while(pila_operandos):\n global contador_cuadruplos\n pila_saltos.append(contador_cuadruplos)\n print(\"PILA SE DALTOS WHILE\",pila_saltos)\n\n #contador_cuadruplos = contador_cuadruplos + 1\n# print(\"entro al while\",pila_saltos)\n# global contador_cuadruplos\n# pila_saltos.append(contador_cuadruplos) # Guardamos la posicion donde estamos actualmente y rellenar\n# result = pila_operandos.pop() # recuperamos el resultado generado por la expresion de log_exp\n# cuadruplo = [\"GOTOF\",result,None,None]\n# pila_cuadruplos.append(cuadruplo)\n# print(\"salto GOTOF -> \",pila_saltos)\n# print(pila_saltos)\n# contador_cuadruplos = contador_cuadruplos + 1\n\n\ndef genCuadruplos_whilethen():\n global contador_cuadruplos\n print(\"PILA SE DALTOS WHILETHE\",pila_saltos)\n f = pila_saltos.pop()\n retorno = pila_saltos.pop()\n cuadruplo = [\"GOTO\",None,None,retorno-1]\n pila_cuadruplos.append(cuadruplo)\n pila_cuadruplos[f][3] = contador_cuadruplos\n contador_cuadruplos = contador_cuadruplos + 1\n# global contador_cuadruplos\n# salto = pila_saltos.pop() # obtenemos donde saltamos y regresamos a donde hicimos su expresion\n# cuadruplo = [\"GOTO\",None,None,salto-1] # Generamos cuadruplo\n# pila_cuadruplos.append(cuadruplo) # Añadimos cuadruplo a nuestra pila\n# pila_saltos.append(contador_cuadruplos) # Mandamos a la pila nuestra ubicacion\n# contador_cuadruplos = contador_cuadruplos + 1 # actualizamos contador\n# pila_cuadruplos[salto][3] = contador_cuadruplos # completamos cuadruplo\n\n# pila_saltos.append(salto)\n# salto = pila_saltos.pop() # obtenemos la posición a rellenar\n# print(\"salto GOTO -> \",pila_saltos)\n# print(pila_saltos)\n# pila_cuadruplos[salto][3] = contador_cuadruplos # completamos cuadruplo\n# print(\"salio al while\",pila_saltos)\n\ndef genCuadruplos_endingwhile():\n global contador_cuadruplos\n salto = pila_saltos.pop() # obtenemos la posición a rellenar\n pila_cuadruplos[salto][3] = contador_cuadruplos # completamos cuadruplo\n\n# FUNCIONES\ndef genCuadruploinit_dec_func(simbolo):\n global contador_cuadruplos\n # Solo el la primera funcion nos genera un goto al main\n print(\"Pila Saltos funcionesc= \",len(pila_funciones))\n if(len(pila_funciones) < 1): # Verificamos si es la primera funcion que existe\n pila_saltos_funciones.append(contador_cuadruplos) # asignamos posición actual del cuadruplo a la pila de saltos\n cuadruplo = [\"GOTOS\",None,None,None] # Aun falta Llenar su espacio de destino (para la primera funcion)\n pila_cuadruplos.append(cuadruplo) \n # Guardamos datos de la funcion\n pila_funciones[simbolo] = {} # Nos permite crear un diccionario de tipo NESTED\n pila_funciones[simbolo][\"Salto\"] = contador_cuadruplos # Guardamos la posicion de su cuadruplo\n\n contador_cuadruplos = contador_cuadruplos + 1\n\n \n pila_saltos_funciones.append(contador_cuadruplos) # Guardamos donde salto la funcion\n pila_funciones[simbolo] = {} # Nos permite crear un diccionario de tipo NESTED\n pila_funciones[simbolo][\"Salto\"] = contador_cuadruplos # Guardamos la posicion de su cuadruplo\n\n# FUNCIONES #\n\ndef genCuadruplo_dec_func():\n global contador_cuadruplos\n pila_saltos_funciones.append(contador_cuadruplos)\n cuadruplo = [\"Endprocedure\",None,None,None] # Aun falta Llenar su espacio de destino de Main\n pila_end_procedure.append(contador_cuadruplos)\n pila_cuadruplos.append(cuadruplo)\n contador_cuadruplos = contador_cuadruplos + 1\n \ndef fill_functionsCuadruplos_main(posicionMain):\n pila_cuadruplos[pila_saltos_funciones[0]][3] = posicionMain # Actualizamos el valor con la posicion del main\n\ndef fill_functionsCuadruplos():\n # Llenamos las llamadas a funcion\n #for x in range (len(pila_end_procedure)):\n valuemodif = pila_saltos_funciones_aux.pop()\n cuadMod = pila_end_procedure[0]\n pila_end_procedure.pop(0)\n pila_cuadruplos[cuadMod][3] = valuemodif + 1 # Actualizamos el valor con la posicion del main\n\n\ndef gen_callCuadruplo(simbolo):\n global contador_cuadruplos\n # Lo buscamos en el diccionario de funciones\n Posicion = pila_funciones[simbolo][\"Salto\"] # Guardamos la posicion de su cuadruplo\n cuadruplo = [\"CALL\",None,None,Posicion]\n pila_cuadruplos.append(cuadruplo)\n pila_saltos_funciones.append(contador_cuadruplos)\n pila_saltos_funciones_aux.append(contador_cuadruplos)\n contador_cuadruplos = contador_cuadruplos + 1\n\ndef endProgram():\n global contador_cuadruplos\n cuadruplo = [\"Endprogram\",None,None,None]\n pila_cuadruplos.append(cuadruplo)\n contador_cuadruplos = contador_cuadruplos + 1\n\n# FUNCIONES PARA LOS ARREGLOS #\n# CUANDO SE DECLARAN INICIALMENTE #\ndef genCuad_Arr1D(pila_operandos,simbolo,tipo,tabla_de_simbolos,Simbol_Index):\n\n global contador_cuadruplos\n Resultado_Indice = pila_operandos.pop()\n print(\"Resultado indice \",Resultado_Indice)\n cuadruplo = [\"VER\",Resultado_Indice,0,Resultado_Indice]\n pila_cuadruplos.append(cuadruplo)\n contador_cuadruplos = contador_cuadruplos + 1\n #Actualizamos tabla de simbolos 1D:\n tabla_de_simbolos[simbolo] = {} # Nos permite crear un diccionario de tipo NESTED\n tabla_de_simbolos[simbolo][\"Index\"] = Simbol_Index # Guardamos la posicion en la tabla de simbolos\n tabla_de_simbolos[simbolo][\"Type\"] = tipo # Guardamos en el diccionario el tipo de la variable\n tabla_de_simbolos[simbolo][\"Value\"] = None\n Simbol_Index = Simbol_Index + 1\n return Simbol_Index\n \n\n\ndef createSpaceInMemory(simbolo,dimension1,dimension2,dimension3,memArreglos):\n global tamano_total\n tamano_total = dimension1*dimension2*dimension3 # Obtenemos el tamaño total de espacios a utilizar \n \n print(\"Tamaño total -> \",tamano_total) \n for x in range(tamano_total): # Creamos el numero n de espacios en el arreglo\n memArreglos.append(simbolo) # Simbolicamente agregamos su asignacion en memoria\n # Generamos Informacion en tabla de simbolos func updateSimbolTable_ARR, se llama desde el main\n\ndef updateSimbolTable_ARR_1D(simbolo,tipo,tabla_de_simbolos,Simbol_Index,base,dimension1,dimension2,dimension3):\n global tamano_total\n base = Simbol_Index # aumentamos la base al tamano_total + 1\n lim_Inf = base # Generamos nuestro limite inferior \n lim_Sup = Simbol_Index + tamano_total - 1 # Generamos nuestro limite superior\n tabla_de_simbolos[simbolo] = {} # Nos permite crear un diccionario de tipo NESTED\n tabla_de_simbolos[simbolo][\"1D\"] = dimension1 # Guardamos la posicion en la tabla de simbolos\n tabla_de_simbolos[simbolo][\"Type\"] = tipo # Guardamos en el diccionario el tipo de la variable\n tabla_de_simbolos[simbolo][\"Base\"] = base # Guardamos la posicion en la tabla de simbolos\n tabla_de_simbolos[simbolo][\"Lim_Inf\"] = lim_Inf # Guardamos la posicion en la tabla de simbolos\n tabla_de_simbolos[simbolo][\"Lim_sup\"] = lim_Sup # Guardamos la posicion en la tabla de simbolos\n tabla_de_simbolos[simbolo][\"M\"] = tamano_total # Guardamos la posicion en la tabla de simbolos\n tabla_de_simbolos[simbolo][\"Array\"] = True\n Simbol_Index = Simbol_Index + tamano_total # Incrementamos la posicion de la tabla de simbolos\n return Simbol_Index\n\ndef updateSimbolTable_ARR_2D(simbolo,tipo,tabla_de_simbolos,Simbol_Index,base,dimension1, dimension2, dimension3):\n global tamano_total\n base = Simbol_Index # aumentamos la base al tamano_total + 1\n lim_Inf = base # Generamos nuestro limite inferior \n lim_Sup = Simbol_Index + tamano_total - 1 # Generamos nuestro limite superior\n tabla_de_simbolos[simbolo] = {} # Nos permite crear un diccionario de tipo NESTED\n tabla_de_simbolos[simbolo][\"1D\"] = dimension1 # Guardamos la posicion en la tabla de simbolos\n tabla_de_simbolos[simbolo][\"2D\"] = dimension2 # Guardamos la posicion en la tabla de simbolos\n tabla_de_simbolos[simbolo][\"Type\"] = tipo # Guardamos en el diccionario el tipo de la variable\n tabla_de_simbolos[simbolo][\"Base\"] = base # Guardamos la posicion en la tabla de simbolos\n tabla_de_simbolos[simbolo][\"Lim_Inf\"] = lim_Inf # Guardamos la posicion en la tabla de simbolos\n tabla_de_simbolos[simbolo][\"Lim_sup\"] = lim_Sup # Guardamos la posicion en la tabla de simbolos\n tabla_de_simbolos[simbolo][\"M\"] = tamano_total # Guardamos la posicion en la tabla de simbolos\n tabla_de_simbolos[simbolo][\"m1\"] = dimension2*dimension3 # Guardamos la posicion en la tabla de simbolos \n\n tabla_de_simbolos[simbolo][\"Array\"] = True\n Simbol_Index = Simbol_Index + tamano_total # Incrementamos la posicion de la tabla de simbolos\n return Simbol_Index\n\ndef updateSimbolTable_ARR_3D(simbolo,tipo,tabla_de_simbolos,Simbol_Index,base,dimension1, dimension2, dimension3):\n global tamano_total\n base = Simbol_Index # aumentamos la base al tamano_total + 1\n lim_Inf = base # Generamos nuestro limite inferior \n lim_Sup = Simbol_Index + tamano_total - 1 # Generamos nuestro limite superior\n tabla_de_simbolos[simbolo] = {} # Nos permite crear un diccionario de tipo NESTED\n tabla_de_simbolos[simbolo][\"1D\"] = dimension1 # Guardamos la posicion en la tabla de simbolos\n tabla_de_simbolos[simbolo][\"2D\"] = dimension2 # Guardamos la posicion en la tabla de simbolos\n tabla_de_simbolos[simbolo][\"3D\"] = dimension3 # Guardamos la posicion en la tabla de simbolos\n tabla_de_simbolos[simbolo][\"Type\"] = tipo # Guardamos en el diccionario el tipo de la variable\n tabla_de_simbolos[simbolo][\"Base\"] = base # Guardamos la posicion en la tabla de simbolos\n tabla_de_simbolos[simbolo][\"Lim_Inf\"] = lim_Inf # Guardamos la posicion en la tabla de simbolos\n tabla_de_simbolos[simbolo][\"Lim_sup\"] = lim_Sup # Guardamos la posicion en la tabla de simbolos\n tabla_de_simbolos[simbolo][\"M\"] = tamano_total # Guardamos la posicion en la tabla de simbolos\n tabla_de_simbolos[simbolo][\"m1\"] = dimension2*dimension3 # Guardamos la posicion en la tabla de simbolos \n tabla_de_simbolos[simbolo][\"m2\"] = dimension3*1 # Guardamos la posicion en la tabla de simbolos \n tabla_de_simbolos[simbolo][\"Array\"] = True\n Simbol_Index = Simbol_Index + tamano_total # Incrementamos la posicion de la tabla de simbolos\n return Simbol_Index\n","repo_name":"HectorPequeno/LyT_LittleCode","sub_path":"LittleTools.py","file_name":"LittleTools.py","file_ext":"py","file_size_in_byte":19638,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"39880696255","text":"from DrQA.drqa import retriever\nimport json\nimport argparse\nimport os\n# from PATH import TFIDF_PATH, CURR_DIR\n\n\n\n# RETRIEVE THE CURRENT DIRECTORY =====================================================================================================\n\ndef read_dataset(path):\n f = open(path, \"r\")\n dataset = json.load(f)\n return dataset\n\ndef write_output(path_save, data):\n if not os.path.exists(path_save):\n os.mknod(path_save)\n # Write the combined dataset to a JSON file\n with open(path_save, 'w') as f:\n json.dump(data, f, indent=4)\n\n\n# CONVERT THE JSON FILE INTO DRQA FORM ==============================================================================================\ndef prepare_db(dataset_name, PATHS):\n \"\"\"\n Input: the orginal dataset in EQA format + the name of the data (this will be used as the main name for the TFIDF process)\n Output: db_format dataset for creating database + 2 dictionary of contexts and questions\n \n Format: -------------------\n contexts = {\n \"1\": context1,\n \"2\": context2,\n ...\n }\n\n questions = {\n question1: [context1, context2, ....],\n ...\n }\n ---------------------------\n \n The `questions` dict will be use in the last step: create a loop to find the tfidf rank of each question in the dictionary keys\n The contexts in the `questions` dict are the ground truth of the questions\n \"\"\"\n\n # Read the dataset from json ---------------------\n dataset = read_dataset(PATHS.DATA[\"dataset_path\"])\n data = dataset['data']\n\n # Create dataset in db_form and prepare ground truth context for each questions ---------------------\n ques_dict, context_dict = {}, {}\n contexts_set = set()\n db_form = []\n\n # this is used as the index of the context, which will \n # be used to pull out to check the tfidf individually if needed\n count = 0 \n \n for item in data:\n context = item[\"context\"]\n\n # If the context is not added\n if context not in contexts_set: \n # Add context into db_form for retrieving\n db_form.append({\"id\": str(count), \"text\": context}) # db format\n\n # Add new context found to the contexts dictionary\n context_dict[str(count)] = context\n \n count += 1 # increase the ID\n\n # add context to the set\n contexts_set.add(context)\n \n # Add question and corresponding ground truth context\n ques = item['question']\n if ques in ques_dict.keys():\n ques_dict[ques].add(context)\n else:\n ques_dict[ques] = set()\n ques_dict[ques].add(context)\n\n\n # Create destination for db_form dataset ---------------------\n db_form_path = PATHS.TFIDF_PATH[\"db_form\"] + dataset_name + \"_db_form.json\"\n save_folder = os.path.dirname(db_form_path)\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n outfile = open(db_form_path, \"w\")\n \n # Write output\n for i in db_form:\n outfile.write(json.dumps(i) + '\\n')\n\n\n return (db_form_path, (ques_dict, context_dict))\n\n\n\n\n# CONVERT THE TFIDF_form.JSON FORM INTO .DB DATABASE =============================================================================================\ndef convert_to_db(db_form_path, PATHS):\n db_path = PATHS.TFIDF_PATH[\"db_path\"] + PATHS.DATA[\"dataset_name\"] + \".db\" # create path to save the database\n db_buider_PATH = PATHS.CURR_DIR + \"/DrQA/scripts/retriever/build_db.py\" # evoke the db builder python file\n \n command = \"python \" + db_buider_PATH + \" \" + db_form_path + \" \" + db_path # the command for the db builder\n os.system(command)\n\n return db_path\n\n\n\n\n# CREATE THE TFIDF RANKER FORM THE DATABASE .DB INTO TFIDF RANKER ==========================================================================\ndef build_tfidf(db_path, PATHS):\n tfidf_folder_path = PATHS.TFIDF_PATH['tfidf_folder_path'] # path to save the tfidf ranker\n tfidf_buider_PATH = PATHS.CURR_DIR + \"/DrQA/scripts/retriever/build_tfidf.py\" # evoke the drqa tfidf builder python file\n\n command = \"python \" + tfidf_buider_PATH + \" \" + db_path + \" \" + tfidf_folder_path # the command for the tfidf builder\n os.system(command)\n\n tfidf_path = tfidf_folder_path + \"/\" + PATHS.DATA[\"dataset_name\"] + \"-tfidf-ngram=2-hash=16777216-tokenizer=simple.npz\" # return the path to the ranker\n return tfidf_path\n\n\n\n\n# USING THE TFIDF RANKER TO CREATE A JSON RANKING FILE ==================================================================================\ndef rank_doc(ranker, ques_dict, context_dict, PATHS, top_k=10, ground_truth_score=False):\n \n # tf-idf file\n tfidf_pairs = {'documents': []} \n\n # Use to calculate the score of the ground_truth\n reverse_contexts = {k: v for v, k in context_dict.items()}\n ground_truth = {}\n \n # This will be used to log the status of the TFIDF process\n total_ques = len(ques_dict)\n num_ques = 0\n\n # for each question as the key of the ques_dict dict, we look up for its tfidf\n for ques, texts in ques_dict.items():\n \n if ground_truth_score != True:\n relevant = [] # tf-idf context of the question\n\n doc_id, doc_scores, doc_texts = ranker.closest_docs(ques, k = top_k+5) # get the most 10 relevant context\n\n # Select each relevant context such that\n # <10 relevant text && the context is not ground_truth && the context is not duplicated\n for i in range(len(doc_id)):\n if (len(relevant) < top_k) and (context_dict[doc_id[i]] not in ques_dict[ques]) and (context_dict[doc_id[i]] not in relevant):\n relevant.append(context_dict[ doc_id[i] ])\n\n # Add to the table all the relevant context found\n for item in relevant:\n tfidf_pairs['documents'].append({\n \"question\" : ques,\n \"context\": item\n })\n\n # -------- Code to score GROUND_TRUTH ---------------\n else:\n index_ground_truth = list(map(lambda x: reverse_contexts[x], ques_dict[ques]))\n\n for i in range(len(index_ground_truth)):\n doc_id_gt, doc_scores_gt, doc_texts_gt = ranker.closest_docs(ques, k=1, index_ques=index_ground_truth[i])\n if ques not in ground_truth.keys():\n ground_truth[ques] = []\n ground_truth[ques].append([context_dict[doc_id_gt[0]], 0 if len(doc_scores_gt.tolist()) == 0 else doc_scores_gt.tolist()[0]])\n\n # Logging the status of the process\n num_ques += 1\n if num_ques % 500 == 0:\n print(\"Proccessing:\", num_ques, \"/\", total_ques)\n\n # Writing to sample.json\n if ground_truth_score != True:\n write_output(PATHS.TFIDF_PATH[\"relevant\"] + PATHS.DATA[\"dataset_name\"] + \"_relevant.json\", tfidf_pairs)\n else:\n write_output(PATHS.TFIDF_PATH[\"gt_score\"] + PATHS.DATA[\"dataset_name\"] + \"_gt_score.json\", ground_truth)\n\n\n# ========================================================================================================================\n# PIPELINE ===============================================================================================================\n# ========================================================================================================================\ndef ranker_pipeline(top_k, gt_score, PATHS):\n dataset_path = PATHS.DATA[\"dataset_path\"]\n dataset_name = PATHS.DATA[\"dataset_name\"]\n\n\n # CONVERTING TO DRQA DATASETS\n print(\"CONVERTING TO TFIDF DATASETS =======================================================\")\n db_form_path, (ques_dict, context_dict) = prepare_db(dataset_name, PATHS)\n print(\" **** Dataset ready:\", db_form_path, \" **** \\n\")\n\n # # CONVERTING TO DATABASE\n print(\"CONVERTING TO DATABASE =======================================================\")\n db_path = convert_to_db(db_form_path, PATHS)\n print(\" **** Database ready:\", db_path, \" **** \\n\")\n\n # # PREPATING TFIDF RANKER\n print(\"PREPARING TFIDF RANKER ========================================================\")\n tfidf_path = build_tfidf(db_path, PATHS)\n ranker = retriever.get_class('tfidf')(db_path=db_path, tfidf_path=tfidf_path)\n print(\" **** TFIDF ready:\", tfidf_path, \" **** \\n\")\n \n # RANKING THE ORIGINAL DOCUMENT\n print(\"RANKING THE ORIGINAL DOCUMENT ========================================================\")\n rank_doc(ranker, ques_dict, context_dict, PATHS, top_k, gt_score)\n print(\"success!\\n\")\n\n","repo_name":"sonqt/agent-unanswerable","sub_path":"src/step1/retriever_tfidf.py","file_name":"retriever_tfidf.py","file_ext":"py","file_size_in_byte":8597,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"16770373558","text":"import socket\nimport threading\nimport ssl\nimport OpenSSL\nimport pytest\nfrom unittest import mock\n\nfrom mitmproxy import connections\nfrom mitmproxy import exceptions\nfrom mitmproxy.net import tcp\nfrom mitmproxy.net.http import http1\nfrom mitmproxy.test import tflow\nfrom mitmproxy.test import tutils\nfrom .net import tservers\nfrom pathod import test\n\n\nclass TestClientConnection:\n\n def test_send(self):\n c = tflow.tclient_conn()\n c.send(b'foobar')\n c.send([b'foo', b'bar'])\n with pytest.raises(TypeError):\n c.send('string')\n with pytest.raises(TypeError):\n c.send(['string', 'not'])\n assert c.wfile.getvalue() == b'foobarfoobar'\n\n def test_repr(self):\n c = tflow.tclient_conn()\n assert '127.0.0.1:22' in repr(c)\n assert 'ALPN' in repr(c)\n assert 'TLS' not in repr(c)\n\n c.alpn_proto_negotiated = None\n c.tls_established = True\n assert 'ALPN' not in repr(c)\n assert 'TLS' in repr(c)\n\n def test_tls_established_property(self):\n c = tflow.tclient_conn()\n c.tls_established = True\n assert c.ssl_established\n assert c.tls_established\n c.tls_established = False\n assert not c.ssl_established\n assert not c.tls_established\n\n def test_make_dummy(self):\n c = connections.ClientConnection.make_dummy(('foobar', 1234))\n assert c.address == ('foobar', 1234)\n\n def test_state(self):\n c = tflow.tclient_conn()\n assert connections.ClientConnection.from_state(c.get_state()).get_state() == \\\n c.get_state()\n\n c2 = tflow.tclient_conn()\n c2.address = (c2.address[0], 4242)\n assert not c == c2\n\n c2.timestamp_start = 42\n c.set_state(c2.get_state())\n assert c.timestamp_start == 42\n\n c3 = c.copy()\n assert c3.get_state() != c.get_state()\n c.id = c3.id = \"foo\"\n assert c3.get_state() == c.get_state()\n\n def test_eq(self):\n c = tflow.tclient_conn()\n c2 = c.copy()\n assert c == c\n assert c != c2\n assert c != 42\n assert hash(c) != hash(c2)\n\n\nclass TestServerConnection:\n\n def test_send(self):\n c = tflow.tserver_conn()\n c.send(b'foobar')\n c.send([b'foo', b'bar'])\n with pytest.raises(TypeError):\n c.send('string')\n with pytest.raises(TypeError):\n c.send(['string', 'not'])\n assert c.wfile.getvalue() == b'foobarfoobar'\n\n def test_repr(self):\n c = tflow.tserver_conn()\n\n c.sni = 'foobar'\n c.tls_established = True\n c.alpn_proto_negotiated = b'h2'\n assert 'address:22' in repr(c)\n assert 'ALPN' in repr(c)\n assert 'TLSv1.2: foobar' in repr(c)\n\n c.sni = None\n c.tls_established = True\n c.alpn_proto_negotiated = None\n assert 'ALPN' not in repr(c)\n assert 'TLS' in repr(c)\n\n c.sni = None\n c.tls_established = False\n assert 'TLS' not in repr(c)\n\n def test_tls_established_property(self):\n c = tflow.tserver_conn()\n c.tls_established = True\n assert c.ssl_established\n assert c.tls_established\n c.tls_established = False\n assert not c.ssl_established\n assert not c.tls_established\n\n def test_make_dummy(self):\n c = connections.ServerConnection.make_dummy(('foobar', 1234))\n assert c.address == ('foobar', 1234)\n\n def test_simple(self):\n d = test.Daemon()\n c = connections.ServerConnection((d.IFACE, d.port))\n c.connect()\n f = tflow.tflow()\n f.server_conn = c\n f.request.path = \"/p/200:da\"\n\n # use this protocol just to assemble - not for actual sending\n c.wfile.write(http1.assemble_request(f.request))\n c.wfile.flush()\n\n assert http1.read_response(c.rfile, f.request, 1000)\n assert d.last_log()\n\n c.finish()\n c.close()\n d.shutdown()\n\n def test_terminate_error(self):\n d = test.Daemon()\n c = connections.ServerConnection((d.IFACE, d.port))\n c.connect()\n c.close()\n c.connection = mock.Mock()\n c.connection.recv = mock.Mock(return_value=False)\n c.connection.flush = mock.Mock(side_effect=exceptions.TcpDisconnect)\n d.shutdown()\n\n def test_sni(self):\n c = connections.ServerConnection(('', 1234))\n with pytest.raises(ValueError, matches='sni must be str, not '):\n c.establish_ssl(None, b'foobar')\n\n def test_state(self):\n c = tflow.tserver_conn()\n c2 = c.copy()\n assert c2.get_state() != c.get_state()\n c.id = c2.id = \"foo\"\n assert c2.get_state() == c.get_state()\n\n def test_eq(self):\n c = tflow.tserver_conn()\n c2 = c.copy()\n assert c == c\n assert c != c2\n assert c != 42\n assert hash(c) != hash(c2)\n\n\nclass TestClientConnectionTLS:\n\n @pytest.mark.parametrize(\"sni\", [\n None,\n \"example.com\"\n ])\n def test_tls_with_sni(self, sni):\n address = ('127.0.0.1', 0)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind(address)\n sock.listen()\n address = sock.getsockname()\n\n def client_run():\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n s = socket.create_connection(address)\n s = ctx.wrap_socket(s, server_hostname=sni)\n s.send(b'foobar')\n s.close()\n threading.Thread(target=client_run).start()\n\n connection, client_address = sock.accept()\n c = connections.ClientConnection(connection, client_address, None)\n\n cert = tutils.test_data.path(\"mitmproxy/net/data/server.crt\")\n with open(tutils.test_data.path(\"mitmproxy/net/data/server.key\")) as f:\n raw_key = f.read()\n key = OpenSSL.crypto.load_privatekey(\n OpenSSL.crypto.FILETYPE_PEM,\n raw_key)\n c.convert_to_ssl(cert, key)\n assert c.connected()\n assert c.sni == sni\n assert c.tls_established\n assert c.rfile.read(6) == b'foobar'\n c.finish()\n sock.close()\n\n\nclass TestServerConnectionTLS(tservers.ServerTestBase):\n ssl = True\n\n class handler(tcp.BaseHandler):\n def handle(self):\n self.finish()\n\n @pytest.mark.parametrize(\"clientcert\", [\n None,\n tutils.test_data.path(\"mitmproxy/data/clientcert\"),\n tutils.test_data.path(\"mitmproxy/data/clientcert/client.pem\"),\n ])\n def test_tls(self, clientcert):\n c = connections.ServerConnection((\"127.0.0.1\", self.port))\n c.connect()\n c.establish_ssl(clientcert, \"foo.com\")\n assert c.connected()\n assert c.sni == \"foo.com\"\n assert c.tls_established\n c.close()\n c.finish()\n","repo_name":"cndn/intelligent-code-completion","sub_path":"raw_data/40338_test_connections.py","file_name":"40338_test_connections.py","file_ext":"py","file_size_in_byte":6983,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"63"} +{"seq_id":"24313297729","text":"import pymongo\nimport os\nimport urllib\nfrom src.main.database.user.user import User\nfrom src.main.database.expense.expense import Expense\nfrom dotenv import load_dotenv\nfrom src.main.utils.base_logger import logger\nfrom bson import ObjectId\n\nproject_folder = os.path.expanduser('./') # adjust as appropriate\nload_dotenv(os.path.join(project_folder, '.env'))\n\n#getting the db data\ndb_user = urllib.parse.quote(os.getenv(\"db_user\"))\ndb_password = urllib.parse.quote(os.getenv(\"db_password\"))\ndb_url = urllib.parse.quote(os.getenv(\"db_url\"))\n\n#creating the mongo client\nmyClient = pymongo.MongoClient(\"mongodb+srv://\"+db_user+\":\"+db_password+\"@\"+db_url)\n#getting the db and users collections\nmyDb = myClient[\"expenses\"]\nmyUsers = myDb[\"Users\"]\n\n\n\n#USER\ndef newUser(info):\n user= User(info.id,info.first_name,info.username)\n\n if(exists(user)):\n logger.info(\"User exists\")\n else:\n createUserDB(user)\n\ndef createUserDB(user):\n response= myUsers.insert_one(user.getSchema())\n logger.info(\"New user create with id:\"+str(response.inserted_id))\n\ndef exists(user):\n response = myUsers.find_one({\"id\":user.id},{\"id\":1})\n logger.info(\"find_one user: \"+str(response))\n if(response != None):\n return True\n else:\n return False\n\n\n#EXPENSES\n\ndef createExpense(userid,text):\n data = text.split(\",\")\n new_id = ObjectId()\n new_expense = Expense(new_id,data[0],data[1],data[2])\n response = myUsers.update({\"id\":userid}, {'$push': {'expenses': new_expense.getSchema()}})\n logger.info(\"elements updated: \"+str(response[\"updatedExisting\"]))\n return new_expense.toString()\n\ndef deleteExpense(userid,expense_id):\n response = myUsers.update({\"id\":userid}, {'$pull': {'expenses': {\"_id\":ObjectId(str(expense_id))}}})\n\ndef updateExpense(userid,expense_id,fild_to_update,new_value_fild):\n response = myUsers.update({\"id\":userid,'expenses._id': ObjectId(str(expense_id))},\n {'$set': {\"expenses.$.\"+str(fild_to_update):str(new_value_fild)}})\n\ndef getExpenses(userid):\n expenses = myUsers.find_one({\"id\":userid},{\"expenses\":1})\n list_expenses = \"\"\n for ex in expenses[\"expenses\"]:\n list_expenses+= \"* \"+str(ex[\"date\"])+\"| \"+str(ex[\"name\"])+\" - \"+str(ex[\"price\"])+\"€\\n\\n\"\n\n return list_expenses\n\n\ndef getExpensesSelectable(userid):\n expenses = myUsers.find_one({\"id\":userid},{\"expenses\":1})\n list_expenses = []\n i=1\n for ex in expenses[\"expenses\"]:\n list_expenses.append({\n \"_id\":ObjectId(ex[\"_id\"]),\n \"id\":i,\n \"name\":ex[\"name\"],\n \"price\":ex[\"price\"],\n \"date\":ex[\"date\"]\n })\n i+=1\n\n return list_expenses","repo_name":"calinvasileandrei/ExpensesTelegramBot","sub_path":"src/main/database/db_manager.py","file_name":"db_manager.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"39032345575","text":"import random\nimport faiss\nimport torch\nimport torch.nn.functional as F\nfrom collections import Counter\n\ndef knn_search(queries, targets, k):\n dim = queries.shape[-1]\n index = faiss.IndexFlatIP(dim)\n index.add(targets.cpu().numpy())\n distances, ranks = index.search(queries.cpu().numpy(), k)\n return distances, ranks\n\ndef sample_queries(classes, ratio=0.5):\n sample_idxs = []\n class_samples = {}\n for idx, one_class in enumerate(classes):\n if one_class not in class_samples:\n class_samples[one_class] = []\n class_samples[one_class].append(idx)\n for one_class, class_sample in class_samples.items():\n num_sample = int(len(class_sample)*ratio)\n sample = class_sample[:num_sample]\n sample_idxs += sample \n return sample_idxs\n \ndef split_query_target(features, authors, topics):\n if len(features) == len(authors) == len(topics):\n features = features.detach()\n features = F.normalize(features)\n authors = authors.detach().tolist()\n topics = topics.detach().tolist() \n idx_list = list(range(len(features)))\n author_count = Counter(authors)\n query_idxs = sample_queries(authors)\n query_idxs = [idx for idx in query_idxs if author_count[authors[idx]] > 1]\n target_idxs = [idx for idx in idx_list if idx not in query_idxs]\n \n query_authors = [authors[idx]for idx in query_idxs]\n query_topics = [topics[idx ]for idx in query_idxs]\n \n target_authors = [authors[idx ]for idx in target_idxs]\n target_topics = [topics[idx ]for idx in target_idxs]\n \n queries = [features[idx].unsqueeze(0) for idx in query_idxs]\n targets = [features[idx].unsqueeze(0) for idx in target_idxs]\n queries = torch.cat(queries, 0)\n targets = torch.cat(targets, 0) \n else:\n raise Exception(\"sample number of text, author, topics are not the same\")\n return (queries, targets,\n query_authors, target_authors,\n query_topics, target_topics)\n\ndef accuracy_error_rate_at_k(query_authors, target_authors, query_topics, target_topics, ranks, k):\n correct = 0\n diff_topic_pred = 0\n same_topic_pred = 0\n error_count = 0\n precision = 0\n\n for idx, rank in enumerate(ranks):\n gold_author = query_authors[idx]\n pred_authors = [target_authors[i] for i in rank[:k]]\n sample_correct = False\n correct_count = 0\n for pred_author in pred_authors:\n if pred_author == gold_author:\n sample_correct = True\n correct_count += 1\n precision += correct_count/k\n if sample_correct:\n correct += 1 \n else:\n pred_topics = [target_topics[i] for i in rank[:k]]\n sample_topic = query_topics[idx]\n error_count += 1\n same_topic_count = 0\n diff_topic_count = 0\n for topic in pred_topics:\n if topic == sample_topic:\n same_topic_count+=1\n else:\n diff_topic_count+=1\n same_topic_pred += same_topic_count/k\n diff_topic_pred += diff_topic_count/k\n accuracy = correct/len(ranks)\n precision = precision/len(ranks)\n if error_count > 0:\n same_topic_error = same_topic_pred/error_count\n diff_topic_error = diff_topic_pred/error_count\n else:\n same_topic_error = 0\n diff_topic_error = 0\n \n return accuracy, precision, same_topic_error, diff_topic_error\n\ndef mrr(query_authors, target_authors, ranks):\n mrr_value = 0.0\n for idx, rank in enumerate(ranks):\n gold_author = query_authors[idx]\n pred_authors = [target_authors[i] for i in rank]\n for rank_idx, pred_author in enumerate(pred_authors):\n if pred_author == gold_author:\n mrr_value += 1.0 / (rank_idx + 1)\n break\n mrr_value /= len(ranks)\n \n return mrr_value","repo_name":"jitkapat/TopicReg","sub_path":"authorship/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":4000,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"8282769884","text":"'''\nhttps://www.hackerrank.com/challenges/flipping-bits/problem\n'''\n\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the flippingBits function below.\ndef flippingBits(n):\n val = bin(n).split('b')[1]\n val = '0'*(32-len(val)) + val\n \n new = ''\n \n for v in val:\n if v=='0':\n new+='1'\n else:\n new+='0'\n \n return int(new, 2)\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n q = int(input())\n\n for q_itr in range(q):\n n = int(input())\n\n result = flippingBits(n)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"amitamola/Hackerrank-codes","sub_path":"Algorithms/Flipping bits.py","file_name":"Flipping bits.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"36430994526","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport time\n#\n# When you call a function that contains the \"yield\"\n# comand in it, the function returns a generator.\n#\ndef yesman0():\n print(\"yes 1\")\n yield\n print(\"yes 2\")\n yield\n print(\"I really mean yes\")\n\n# calling the function yields a generator, but nothing else\n# Notice, no printing occurs when the generator gets created\nygen = yesman0()\nprint(\"This is a generator object: {}\".format(ygen))\n\n\n# In[ ]:\n\n\n# generator object handles next() and runs until it encounters the next yield keyword\nnext(ygen) # prints yes1 and then returns at first yield\nnext(ygen) # continues from there, printing yes2 and returning at second yield\nnext(ygen) # continues from there, printing final message and then throwing StopIteration exception\n\n\n# In[ ]:\n\n\n# notice the third call does the print, but then throws the\n# StopIteration exception. Typical iteration constructs will catch that\n# and handle it gracefully.\n#\n# Since we are using explicit next() call to do iteration, you will your\n# own thing, you'll need to catch it.\n# Below we catch the end of the iteration and break out of the loop\n\nygen = yesman0()\nwhile True:\n try:\n next(ygen)\n time.sleep(1.0)\n except StopIteration:\n break\n\n\n# In[ ]:\n\n\n# By catching the StopIteration exception and using it to determine\n# to break out of the infinite loop, we end up with a graceful exit. \n\n# Also ordinary (for _ in) iteration works fine, since it stops when the\n# StopIteration occurs. This is a pretty compact.\nfor _ in yesman0():\n time.sleep(1.0)\n\n\n# In[ ]:\n\n\n# You can wrap up a generator in another function and make it\n# a generator. This generator lets you compose two generators\n# back to back. When one finishes, the other continues.\ndef yesmen0():\n yield from yesman0()\n yield from yesman0()\n\nfor _ in yesmen0():\n time.sleep(0.5)\n\n\n# In[ ]:\n\n\n# generators can generate forever\ndef always_yes():\n while True:\n print(\"yes\")\n yield\n\n# type ctrl-c to stop this if you are running\n# it in a command shell, use the square \"stop\" button\n# if you are running in jupyter notebook\nfor _ in always_yes():\n time.sleep(1.0)\n\n\n# In[ ]:\n\n\n# generators can return things that the caller can use\n# Return values using the \"yield\" command, e.g. yield \ndef infinite_yeses():\n while True:\n yield \"yes\"\n\n# notice that the generator now looks like an infinite collection\n# of answers (ctrl-c or stop button will stop this)\nfor answer in infinite_yeses():\n print(answer)\n time.sleep(1)\n\n\n# In[ ]:\n\n\n# of course, you can make the generator more useful by\n# being able to change its behavior depending on how you create it.\ndef answerman(answer):\n while True:\n yield answer\n\nfor answer in answerman(\"no\"):\n print(answer)\n time.sleep(1)\n\n\n# In[ ]:\n\n\ndef oncewaffle(choices):\n for choice in choices:\n yield choice\n\n# will generate the collection of answers once (not so interesting)\nfor answer in oncewaffle([\"yes\", \"no\", \"maybe\", \"later\"]):\n print(answer)\n time.sleep(1)\n\n\n# In[ ]:\n\n\n# more interesting, perhaps is a generator that never tires of answering\ndef foreverwaffle(choices):\n while True:\n for choice in choices:\n yield choice\n\n# will generate an infinite number of choices (in rotation)\n# ctrl-c or stop button will stop this\nfor answer in foreverwaffle([\"yes\", \"no\", \"maybe\", \"later\"]):\n print(answer)\n time.sleep(1)\n\n\n# In[ ]:\n\n\n# Sometimes we would rather that our generator optionally stop generating.\n# Sometimes, you may want to limit how much time a generator works. Or\n# maybe stop if some condition occurs. Let's say that you just want it\n# to generate for 3 seconds.\n\ndef momentary_answerman(answer, duration):\n stop_time = time.time() + duration\n while time.time() <= stop_time:\n yield answer\n\nfor answer in momentary_answerman(\"maybe\", 3):\n print(answer)\n time.sleep(0.25)\n\n\n# In[ ]:\n\n\n# As we get a bit closer to doing robotics applications, we'll need parts\n# of robots to do testing. These are stubs that stand-in for real robot\n# functions, but just print messages, so we know what is happening.\n\nclass FakeDriveTrain:\n \"\"\"\n Fake drivetrain is just a stand-in for a real drivetrain.\n Used for this demonstration. It just prints out what it is doing.\n Give it a name when you make it, and it includes the name\n in the output message.\n \"\"\"\n def __init__(self, train):\n self.train = train\n \n def driveArcade(self, forward, rotate):\n if forward == 0 and rotate == 0:\n print(\"{} is STOPPED\".format(self.train))\n else:\n print(\"Driving {} forward {} and rotating {}\".format(self.train, forward, rotate))\n\n#\n# So, now you have a framework for doing a task with given arguments\n# (so far, only printing a string, but we can make it more sophisticated)\n# Once time expires, the loop ends, and the drivetrain is stopped. (usually a good thing)\n#\ndef timed_arcadedrive(duration, drivetrain, forward, rotate):\n stop_time = time.time() + duration\n while time.time() <= stop_time:\n drivetrain.driveArcade(forward, rotate)\n yield\n drivetrain.driveArcade(0, 0)\n\ndt = FakeDriveTrain(\"arcade\")\ndrive_auton = timed_arcadedrive(5, dt, 1.0, 0)\n\nfor _ in drive_auton:\n time.sleep(1.0)\n\n \n\n\n# In[ ]:\n\n\n#\n# But, then we have to have a timed_xxx() function for every kind of thing.\n# Perhaps it would be better to define a generic AutoTask object that has\n# a do_step() method. Then, as long as any subtask is an AutoTask object,\n# or at least has a do_step() method, we can ask the timed_xxxx() function to\n# call the do_step() until the time expires.\n#\nclass AutoTask:\n \"\"\"\n Nobody should create an AutoTask object. This is just\n a template for what derived classes should implement.\n \"\"\"\n def __init__(self):\n pass\n\n def do_step(self):\n pass\n \n\nclass AutoArcade(AutoTask):\n def __init__(self, train, forward, rotate):\n \"\"\"\n The task has to store all of the state necessary for\n the do_step method to be callable with no arguments.\n This way, the timed_xxx() function knows *nothing*\n about the subtask.\n \"\"\"\n self.train = train\n self.forward = forward\n self.rotate = rotate\n\n def do_step(self):\n \"\"\"\n Here is the meat of the generic-looking step. It\n puts all of the saved state together to do the thing.\n \"\"\"\n self.train.driveArcade(self.forward, self.rotate)\n\n#\n# Now timed_task() looks more generic. You can hand it any task\n# and it can step the task for the specified amount of time.\n#\ndef timed_task(duration, task):\n stop_time = time.time() + duration\n while time.time() <= stop_time:\n task.do_step()\n yield\n\n \n# create the drivetrain\ndt = FakeDriveTrain(\"Arcade\")\n# create the driving subtask\ndrive_task = AutoArcade(dt, 1.0, 0.1)\n# auton steps the drive_task for 7 seconds\nauton = timed_task(7, drive_task)\n\nfor _ in auton:\n time.sleep(1.0)\n\n\n# In[ ]:\n\n\n# We can assemble things compactly (with labels) so that\n# the plan is more clear and fits on a single line\n\nfor _ in timed_task(duration=7, task=AutoArcade(train=dt, forward=1.0, rotate=0.1)):\n time.sleep(3.5)\n\n\n# In[ ]:\n\n\n# if we use the \"yield from\" operator, we can compose several\n# autonomous plans together. But to get a generator out\n# of it, we have to put it into a function and call the function.\n\ndef full_auton(train):\n \"\"\"\n Chain two autonomous plans together into a single generator\n \"\"\"\n yield from timed_task(5.0, AutoArcade(train, 1.0, 0.0))\n yield from timed_task(4.0, AutoArcade(train, -1.0, 0.0))\n\nfor _ in full_auton(dt):\n time.sleep(1.0)\n\n\n# In[ ]:\n\n\n# sometimes we don't want to run the AutoTask for the full length of time.\n# For example, we might want to drive until we are within 5cm of the target\n\n# For this we need an UltraSonic sensor (why am I not surprised?)\nclass FakeUltraSonic:\n \"\"\"\n Since this is not a real sonar unit, we can load the range readings into it\n as an iterator that produces the target ranges\n \"\"\"\n def __init__(self, range_iter):\n self.range_iter = range_iter\n \n def range_cm(self):\n \"\"\"\n Ultrasonic just returns successive elements of\n the iterator that was passed to the constructor.\n When that runs out, it just returns 0. (slam!)\n \"\"\"\n try:\n cm = next(self.range_iter)\n print(\"range to target: {}cm\".format(cm))\n except StopIteration:\n cm = 0\n return cm\n\nclass AutoDriveToTarget(AutoTask):\n \"\"\"\n Need more arguments to create the more complex auto plan\n \"\"\"\n def __init__(self, train, forward, rotate, sonar, end_range_cm):\n \"\"\"\n The task has to store all of the state necessary for\n the time method to be callable with no arguments.\n \"\"\"\n self.train = train\n self.forward = forward\n self.rotate = rotate\n self.sonar = sonar\n self.end_range_cm = end_range_cm\n \n def do_step(self):\n \"\"\"\n Now we can't just do our thing, we have to tell the caller\n whether or not we are done driving. So, we can return a\n True if we keep going, or False if we are done\n \"\"\"\n if self.sonar.range_cm() <= self.end_range_cm:\n print(\"Within range: {}\".format(self.end_range_cm))\n self.train.driveArcade(0, 0)\n return False\n else:\n self.train.driveArcade(self.forward, self.rotate)\n return True\n\n\n# We have to upgrade the timed_task wrapper so it can interpret the return\n# value from do_step(). We keep going as long as do_step() returns true.\n# Once it returns false, we break out of the loop (and the generator ends)\ndef timed_task(duration, task):\n stop_time = time.time() + duration\n while time.time() <= stop_time:\n if task.do_step():\n yield\n else:\n break\n\n#\n# \n# readings will be 9, 8, 7, 6, 5, 4, .... 0, 0, 0 ouch, ouch, ouch\n#\nus = FakeUltraSonic(iter(range(9, 0, -1)))\ndt = FakeDriveTrain(\"FAKE-FOR-US-AUTON\")\n\n# drive until within 5cm of the target or at most for\n# AutoDriveToTarget needs a drivetrain and an ultrasonic\ndrivetask = AutoDriveToTarget(dt, 0.5, 0, us, 5.0)\n\n# do drivetask no longer than 7 seconds\nprint(\"Drive no longer than 7 seconds\")\nfor _ in timed_task(7, drivetask):\n time.sleep(1.0)\n\n\n# In[ ]:\n\n\n#\n# \n# readings will be 9, 8, 7, 6, 5, 4, .... 0\n#\nus = FakeUltraSonic(iter(range(9, 0, -1)))\ndt = FakeDriveTrain(\"FAKE-FOR-US-AUTON\")\n\n# drive until within 5cm of the target or at most for\n# AutoDriveToTarget needs a drivetrain and an ultrasonic\ndrivetask = AutoDriveToTarget(dt, 0.5, 0, us, 5.0)\n\nprint(\"Try again, but drive no longer than 3 seconds\")\nfor _ in timed_task(3, drivetask):\n time.sleep(1.0)\n\n\n# In[ ]:\n\n\n# But it is a bit awkward having the subtask require some kind of special\n# return value. What if the subtask was also a generator? Then the\n# timed_task wrapper could just \"yield from task.do_step()\" and the subtask\n# could finish generating when it is done. Then the rule for building\n# auton tasks is that they are generators, and they stop generating when\n# they reach their objective. The timed_task wrapper can stop them when\n# their alloted time expires. (in the event they don't reach their objective)\n#\n# This can really simplify the writing of autonomous tasks, but it does\n# mean the infrastructure has to be a bit more sophisticated. This is\n# because we need to evaluate a function in the subtask that contains a\n# \"yield\" in order to get a generator that the parent (timed) task can\n# yield from.\n#\n# Generally, when we define an autonomous task, it takes a certain form.\n# It needs some initialization (to grab the current time, or, to reset\n# something), it needs the actual steps, and it may need some kind of\n# stop action. If you could create a generator framework that would\n# support that, you could use it to make a rich variety of tasks.\n#\n# But, remember the function that creates the generator doesn't do anything\n# when the generator is first created, so we miss out on an initialization\n# opportunity unless the generator is always created immediately after\n# an initialization task is done. That may not be convenient.\n# So we add another level where we define the function generator and call/return\n# it from the run() call:\n#\ndef initialize():\n print(\"initialize\")\n\ndef finish():\n print(\"finish\")\n\ndef ideal_auton_run():\n \"\"\"\n Define the function that will create the generator.\n Return the generator to be the \"body\" of the task.\n This way, everything does an initialize(), the generator,\n and then the finish().\n \"\"\"\n def the_generator():\n for i in range(10):\n print(\"step {}\".format(i))\n yield\n finish()\n \n initialize()\n return the_generator()\n\n\nauton = ideal_auton_run() # initialize happens here now!\nfor _ in auton:\n time.sleep(1.0)\n# and finish() will always be called just as the loop completes\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"FRC1076/ContinuingEducation","sub_path":"generators-for-auton/learn-generators.py","file_name":"learn-generators.py","file_ext":"py","file_size_in_byte":13293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"39484303235","text":"from typing import Optional, List\nfrom pytorch_lightning.callbacks import progress\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom pytorch_lightning import utilities as pl_utils\nfrom pytorch_lightning.trainer.trainer import Trainer\n\n\nclass LitProgressBar(progress.TQDMProgressBar):\n\n def __init__(self, custom_metrics: Optional[List[str]] = []):\n \"\"\"\n custom_metrics: list of metrics to display other than loss\n \"\"\"\n super(LitProgressBar, self).__init__()\n self.enable = True\n self.custom_metrics = custom_metrics\n\n def enable(self):\n self.enable = True\n\n def disable(self):\n self.enable = False\n \n def add_custom_metrics(self, trainer: Trainer):\n new_quantities = {}\n for met in self.custom_metrics:\n if met in trainer.callback_metrics:\n new_quantities[met] = trainer.callback_metrics[met].item()\n return new_quantities\n\n def get_metrics(self, trainer: Trainer, pl_module: LightningModule):\n ## do stuff here, do not override the method in LightningModule\n standard_metrics = pl_module.get_progress_bar_dict()\n pbar_metrics = trainer.progress_bar_metrics\n duplicates = list(standard_metrics.keys() & pbar_metrics.keys())\n if duplicates:\n pl_utils.rank_zero.rank_zero_warn(\n f\"The progress bar already tracks a metric with the name(s) '{', '.join(duplicates)}' and\"\n f\" `self.log('{duplicates[0]}', ..., prog_bar=True)` will overwrite this value. \"\n \" If this is undesired, change the name or override `get_metrics()` in the progress bar callback.\",\n )\n\n return {**standard_metrics, **pbar_metrics, **self.add_custom_metrics(trainer)}\n\n","repo_name":"nvedant07/deep-learning-base","sub_path":"training/progress_bar.py","file_name":"progress_bar.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"34177204845","text":"from app import app\nfrom flask import Response, request, jsonify, redirect, url_for, send_file, render_template\n\n\nimport os\nimport base64\nimport requests\nimport numpy as np\nimport cv2\nfrom PIL import Image\nimport datetime\n\nimport torch\nimport torch.utils.data as data\nimport torchvision.datasets as datasets\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\n\nfrom app.backbone import Backbone\nfrom app.vision.ssd.config.fd_config import define_img_size\nfrom app.vision.ssd.mb_tiny_RFB_fd import create_Mb_Tiny_RFB_fd, create_Mb_Tiny_RFB_fd_predictor\n\nfrom app.arcface_torch.backbones import get_model\nfrom app.arcface_torch.branch_util import *\n\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\nprint('\\nUsing ', device, '\\n')\ninput_size=[112, 112]\ntransform = transforms.Compose(\n [\n transforms.Resize(\n [int(128 * input_size[0] / 112), int(128 * input_size[0] / 112)],\n ), # smaller side resized\n transforms.CenterCrop([input_size[0], input_size[1]]),\n # transforms.Resize([input_size[0], input_size[1]]),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\n ],\n)\n\nbackbone = Backbone(input_size)\nbackbone.load_state_dict(torch.load('app/ms1mv3_arcface_r50_fp16/backbone_ir50_ms1m_epoch120.pth', map_location=torch.device(\"cpu\")))\nbackbone.to(device)\nbackbone.eval()\n\n\nmodel_dream = Branch(feat_dim=512)\n# model.cuda()\ncheckpoint = torch.load('./app/arcface_torch/checkpoint_512.pth')\nmodel_dream.load_state_dict(checkpoint['state_dict'])\nmodel_dream.eval()\n\nfrom utils.TFLiteFaceAlignment import * \nfrom utils.TFLiteFaceDetector import * \nfrom utils.functions import *\n\npath = \"./\"\n\nfd = UltraLightFaceDetecion(path + \"utils/weights/RFB-320.tflite\", conf_threshold=0.98)\nfa = CoordinateAlignmentModel(path + \"utils/weights/coor_2d106.tflite\")\n\ndef loadBase64Img(uri):\n encoded_data = uri.split(',')[1]\n nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8)\n img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n return img\n\ndef load_image(img):\n\texact_image = False; base64_img = False; url_img = False\n\n\tif type(img).__module__ == np.__name__:\n\t\texact_image = True\n\n\telif len(img) > 11 and img[0:11] == \"data:image/\":\n\t\tbase64_img = True\n\n\telif len(img) > 11 and img.startswith(\"http\"):\n\t\turl_img = True\n\n\t#---------------------------\n\n\tif base64_img == True:\n\t\timg = loadBase64Img(img)\n\n\telif url_img:\n\t\timg = np.array(Image.open(requests.get(img, stream=True).raw))\n\n\telif exact_image != True: #image path passed as input\n\t\tif os.path.isfile(img) != True:\n\t\t\traise ValueError(\"Confirm that \",img,\" exists\")\n\n\t\timg = cv2.imread(img)\n\n\treturn img\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return render_template('index.html')\n\n@app.route(\"/dream\", methods=['POST'])\ndef facerec_DREAM():\n\n req = request.get_json()\n img_input = \"\"\n if \"img\" in list(req.keys()):\n img_input = req[\"img\"]\n\n validate_img = False\n if len(img_input) > 11 and img_input[0:11] == \"data:image/\":\n validate_img = True\n\n if validate_img != True:\n return jsonify({\"result\": {'message': 'Vui lòng truyền ảnh dưới dạng Base64'}}), 400\n \n orig_image = load_image(img_input)\n\n # Step 1: Get a face from current frame.\n orig_image = cv2.resize(orig_image, (600, 400), interpolation = cv2.INTER_CUBIC)\n temp_boxes, _ = fd.inference(orig_image)\n\n # Find landmarks of each face\n landmarks = fa.get_landmarks(orig_image, temp_boxes)\n feats = []\n\n for bbox_I, landmark_I in zip(temp_boxes, landmarks):\n bbox_I = [int(number) for number in bbox_I]\n x1, y1, x2, y2 = bbox_I\n now = round(datetime.datetime.now().timestamp() * 1000)\n cv2.imwrite('detectedFaces/' + str(now) + '.jpg', orig_image[y1: y2, x1: x2])\n roll, pitch, yaw, _ = estimatePose(orig_image, landmark_I)\n\n with torch.no_grad():\n embedding_I = F.normalize(backbone(transform(Image.fromarray(orig_image[y1: y2, x1: x2])).unsqueeze(0).to(device))).cpu()\n yaw = np.zeros([1, 1])\n yaw[0,0] = norm_angle(float(yaw))\n original_embedding_tensor = np.expand_dims(embedding_I.detach().cpu().numpy(), axis=0)\n feature_original = torch.autograd.Variable(torch.from_numpy(original_embedding_tensor.astype(np.float32)))\n yaw = torch.autograd.Variable(torch.from_numpy(yaw.astype(np.float32)))\n\n new_embedding = model_dream(feature_original, yaw)\n new_embedding = new_embedding.to('cpu').data.numpy()\n embedding_I = new_embedding[0, :].tolist()[0]\n feats.append(embedding_I)\n \n return jsonify({'result': {\"feats\": feats}}), 200\n\n","repo_name":"vietbacnguyen96/DREAM_APIs","sub_path":"app/apis.py","file_name":"apis.py","file_ext":"py","file_size_in_byte":4736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"16781184668","text":"import re\nimport datetime as dt\n\nimport pytz\nfrom pupa.scrape import Scraper, Event\n\nfrom openstates.utils import LXMLMixin\n\nurl = \"http://assembly.state.ny.us/leg/?sh=hear\"\n\n\nclass NYEventScraper(Scraper, LXMLMixin):\n _tz = pytz.timezone('US/Eastern')\n\n def lower_parse_page(self, url):\n page = self.lxmlize(url)\n tables = page.xpath(\"//table[@class='pubhrgtbl']\")\n date = None\n for table in tables:\n metainf = {}\n rows = table.xpath(\".//tr\")\n for row in rows:\n tds = row.xpath(\"./*\")\n if len(tds) < 2:\n continue\n key, value = tds\n\n if key.tag == 'th' and key.get(\"class\") == 'hrgdate':\n date = key.text_content()\n date = re.sub(r\"\\s+\", \" \", date)\n date = re.sub(\".*POSTPONED NEW DATE\", \"\", date).strip()\n\n # Due to the html structure this shouldn't be an elif\n # It needs to fire twice in the same loop iteration\n if value.tag == 'th' and value.get(\"class\") == 'commtitle':\n coms = value.xpath('.//div[contains(@class,\"comm-txt\")]/text()')\n\n elif key.tag == 'td':\n key = key.text_content().strip()\n value = value.text_content().strip()\n value = value.replace(u'\\x96', '-')\n value = re.sub(r\"\\s+\", \" \", value)\n metainf[key] = value\n\n time = metainf['Time:']\n repl = {\n \"A.M.\": \"AM\",\n \"P.M.\": \"PM\",\n }\n drepl = {\n \"Sept\": \"Sep\"\n }\n for r in repl:\n time = time.replace(r, repl[r])\n\n for r in drepl:\n date = date.replace(r, drepl[r])\n\n time = re.sub(\"-.*\", \"\", time)\n time = time.strip()\n\n year = dt.datetime.now().year\n\n date = \"%s %s %s\" % (\n date,\n year,\n time\n )\n\n if \"tbd\" in date.lower():\n continue\n\n date = date.replace(' PLEASE NOTE NEW TIME', '')\n\n # Check if the event has been postponed.\n postponed = 'POSTPONED' in date\n if postponed:\n date = date.replace(' POSTPONED', '')\n\n date_formats = [\"%B %d %Y %I:%M %p\", \"%b. %d %Y %I:%M %p\"]\n datetime = None\n for fmt in date_formats:\n try:\n datetime = dt.datetime.strptime(date, fmt)\n except ValueError:\n pass\n\n # If the datetime can't be parsed, bail.\n if datetime is None:\n return\n\n title_key = set(metainf) & set([\n 'Public Hearing:', 'Summit:', 'Roundtable:',\n 'Public Roundtable:', 'Public Meeting:', 'Public Forum:',\n 'Meeting:'])\n assert len(title_key) == 1, \"Couldn't determine event title.\"\n title_key = list(title_key).pop()\n title = metainf[title_key]\n\n title = re.sub(\n r\"\\*\\*Click here to view public hearing notice\\*\\*\",\n \"\",\n title\n )\n\n # If event was postponed, add a warning to the title.\n if postponed:\n title = 'POSTPONED: %s' % title\n\n event = Event(\n name=title,\n start_date=self._tz.localize(datetime),\n location_name=metainf['Place:'],\n )\n event.extras = {'contact': metainf['Contact:']}\n if 'Media Contact:' in metainf:\n event.extras.update(media_contact=metainf['Media Contact:'])\n event.add_source(url)\n\n for com in coms:\n event.add_participant(\n com.strip(),\n type='committee',\n note='host',\n )\n participant = event.participants[-1]\n participant['extras'] = {'chamber': self.classify_committee(com)},\n\n yield event\n\n def scrape(self):\n yield from self.lower_parse_page(url)\n\n def classify_committee(self, name):\n chamber = 'other'\n if \"senate\" in name.lower():\n chamber = 'upper'\n if \"assembly\" in name.lower():\n chamber = 'lower'\n if \"joint\" in name.lower():\n chamber = 'joint'\n return chamber\n","repo_name":"cndn/intelligent-code-completion","sub_path":"raw_data/43956_events.py","file_name":"43956_events.py","file_ext":"py","file_size_in_byte":4556,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"63"} +{"seq_id":"30011747917","text":"import requests\nimport json\nimport os\nimport mimetypes\n# #############################################\nfrom ..wis_constants import wis_service_base_url\nfrom ..wis_exception import WISException\nfrom ..auth import Token\n# #############################################\n\n\ndef create_task(token: Token, media_path: str, \n activation_faces_blur: bool=True, activation_plates_blur: bool=True,\n output_detections_url: bool=False, included_area: dict=None) -> str:\n \"\"\" Create a new process job\n\n Inputs: \n token (Token): ...\n media_path (str): ...\n activation_faces_blur (bool): ...\n activation_plates_blur (bool): ...\n output_detections_url (bool): ...\n included_area (dict): ...\n\n Returns:\n job_id (str): ...\n \"\"\"\n\n url = wis_service_base_url + \"/anonymization\"\n headers = { **token.header }\n payload = {\n 'activation_faces_blur': json.dumps(activation_faces_blur),\n 'activation_plates_blur': json.dumps(activation_plates_blur),\n 'output_detections_url': json.dumps(output_detections_url),\n 'included_area': json.dumps(included_area)\n }\n\n assert os.path.exists(media_path), 'File not found: %s' % media_path\n files = [\n ('input_media',\n (\n os.path.basename(media_path), \n open(media_path,'rb'),\n mimetypes.guess_type(media_path)[0]\n )\n )\n ]\n response = requests.request(\"POST\", url, headers=headers, data=payload, files=files)\n\n if response.status_code == 200:\n return response.json()['anonymization_job_id']\n else:\n raise WISException.from_response(response)\n# #############################################\n\n","repo_name":"wassafr/wis-api-samples","sub_path":"python/src/wis/anonymization/create_task.py","file_name":"create_task.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"34115555504","text":"import math\nfrom kivy.graphics import Line\nfrom kivy.vector import Vector\nfrom .path import Path\n\n\nclass ArrowPath(Path):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.arrowhead_length = 15 * self.line_width // 2\n self.arrowhead_angle = math.radians(45)\n\n barb1_points, barb2_points = self._calculate_arrowhead_points()\n\n with self.canvas:\n self._barb1 = Line(points=barb1_points, width=self.line_width)\n self._barb2 = Line(points=barb2_points, width=self.line_width)\n\n def on_touch_move(self, touch):\n super().on_touch_move(touch)\n\n if touch.button == \"left\":\n barb1_points, barb2_points = self._calculate_arrowhead_points()\n\n self._barb1.points = barb1_points\n self._barb2.points = barb2_points\n return True\n return False\n\n def _calculate_arrowhead_points(self):\n # Calculate the angle of the arrow\n xe, ye = self.end_point\n\n num_points = len(self.path.points) // 2\n last_n_points = min(num_points, 15)\n first_point_idx = -last_n_points\n xs = self.path.points[first_point_idx * 2]\n ys = self.path.points[first_point_idx * 2 + 1]\n\n angle = math.atan2(ye - ys, xe - xs)\n\n # Calculate the positions of the two lines that form the arrowhead\n x1 = xe - self.arrowhead_length * math.cos(angle + self.arrowhead_angle)\n y1 = ye - self.arrowhead_length * math.sin(angle + self.arrowhead_angle)\n x2 = xe - self.arrowhead_length * math.cos(angle - self.arrowhead_angle)\n y2 = ye - self.arrowhead_length * math.sin(angle - self.arrowhead_angle)\n\n barb1_points = [x1, y1, xe, ye]\n barb2_points = [x2, y2, xe, ye]\n\n return barb1_points, barb2_points\n\n def build_shape_preview(self, *args):\n if self.shadow:\n self.shadow.on_pos(*args)\n\n x0 = int(self.parent.x)\n y0 = int(self.parent.y)\n y_center = y0 + int(self.parent.height / 2)\n y_max = int(self.parent.height)\n x_max = int(self.parent.width)\n last_n_points = 50\n cos_x_max = x_max - last_n_points\n\n sh_points = []\n\n # Curve points\n for x in range(0, cos_x_max, 5):\n rads = (x / cos_x_max) * (2 * math.pi)\n y = y_max * abs(math.cos(rads))\n p = (x + x0, y + y0)\n sh_points.extend(p)\n\n # Just for fun ;P - Last segment: straight line to allow the arrowhead to be clearly visible\n s0 = Vector(sh_points[-8], sh_points[-7])\n s1 = Vector(sh_points[-2], sh_points[-1])\n\n m = (s1.y - s0.y) / (s1.x - s0.x)\n s_fun = lambda x: m * (x - s0.x) + s0.y\n\n for x in range(s0.x, x_max, 2):\n sh_points.extend((x, s_fun(x)))\n\n self.path.points = [*sh_points]\n\n self.start_point = Vector(sh_points[0], sh_points[1])\n self.end_point = Vector(sh_points[-2], sh_points[-1])\n\n self._barb1.points, self._barb2.points = self._calculate_arrowhead_points()\n","repo_name":"anteloc/componga","sub_path":"src/componga/shapes/arrowpath.py","file_name":"arrowpath.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"44287782354","text":"from flask import Flask, flash, redirect, render_template, request, send_from_directory, send_file\nfrom werkzeug.utils import secure_filename\nimport belat.worker as worker\nimport belat\nimport os, random, hashlib\n\napp = Flask(__name__)\napp.secret_key = os.urandom(24)\n\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\napp.config['MAX_CONTENT_LENGTH'] = 1024 * 1024 * 16 # 16 mb max\napp.config[\"UPLOAD_FOLDER\"] = \"uploads\"\n\nALLOWED_EXTENSIONS = [\"txt\", \"epub\", \"fb2\"]\n\n@app.route(\"/favicon.ico\")\ndef favicon():\n return send_from_directory(os.path.join(app.root_path, \"static\"),\n \"favicon.ico\", mimetype=\"image/vnd.microsoft.icon\")\n\n@app.route(\"/\", methods=[\"POST\", \"GET\"])\ndef index_page():\n text_in = \"\"\n text_out = \"\"\n sel_scheme = 0\n sel_dir = 0\n\n if request.method == \"POST\":\n text_in = request.form.get(\"text_in\")\n\n sel_scheme = int(request.form.get(\"scheme\"))\n scheme_lat = worker.Worker.get_schemes_from_json(print)[sel_scheme]\n\n sel_dir = int(request.form.get(\"dir\"))\n\n if sel_dir == 0:\n text_out = scheme_lat.cyr_to_lat(text_in)\n elif sel_dir == 1:\n text_out = scheme_lat.lat_to_cyr(text_in)\n\n return render_template(\"index.html\", schemes=worker.Worker.get_schemes_from_json(print), \n version=belat.VERSION, text_in=text_in, text_out=text_out, sel_scheme=sel_scheme, sel_dir=sel_dir)\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route(\"/file\", methods=[\"POST\", \"GET\"])\ndef file_page():\n sel_enc_in = \"utf8\"\n sel_enc_out = \"utf8\"\n sel_scheme = 0\n sel_dir = 0\n sel_file_type = 0\n\n download_link = \"\"\n\n err_msg = \"\"\n\n file_types = [\"txt\", \"epub\", \"fb2\"]\n encodings = [\"utf8\", \"cp1251\", \"koi8-r\"]\n\n if request.method == \"POST\":\n if \"file\" not in request.files:\n flash(\"Не атрымалася загрузіць файл. Праверце шляхі і тыпы файлаў\")\n return redirect(request.url)\n file = request.files[\"file\"]\n if file.filename == \"\":\n flash('Вы не выбралі файл')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n\n file_in = request.files[\"file\"]\n file_in.seek(0)\n extens = filename.rsplit('.', 1)[1].lower()\n file_in_path = os.path.join(app.config['UPLOAD_FOLDER'], str( hashlib.md5( (filename+str(random.randint(0,255^2))).encode() ).hexdigest())+\".\"+extens)\n file_in.seek(0)\n file_in.save(file_in_path)\n\n sel_scheme = int(request.form.get(\"scheme\"))\n scheme_lat = worker.Worker.get_schemes_from_json(print)[sel_scheme]\n\n sel_dir = int(request.form.get(\"dir\"))\n sel_enc_in = int(request.form.get(\"enc_in\"))\n sel_enc_out = int(request.form.get(\"enc_out\"))\n sel_file_type = int(request.form.get(\"file_type\"))\n dir_work = \"\"\n\n if sel_dir == 0:\n dir_work = worker.Worker.CTL\n elif sel_dir == 1:\n dir_work = worker.Worker.LTC\n\n worker.Worker(file_in_path, file_in_path, encodings[sel_enc_in], encodings[sel_enc_out], \n dir_work, scheme_lat, file_types[sel_file_type], belat.VERSION).work()\n\n file_short_name = os.path.split(file_in_path)[-1]\n\n download_link = \"/download/\"+file_short_name\n\n return render_template(\"file_work.html\", schemes=worker.Worker.get_schemes_from_json(print), file_types=file_types,\n version=belat.VERSION, encodings=encodings, download_link=download_link, err_msg=err_msg,\n sel_scheme=sel_scheme, sel_dir=sel_dir, sel_enc_in=sel_enc_in, sel_enc_out=sel_enc_out, sel_file_type=sel_file_type)\n\n@app.route(\"/download/\")\ndef download_file(file_name):\n return send_from_directory(app.config[\"UPLOAD_FOLDER\"], secure_filename(file_name))\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"alex-rusakevich/blsite","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"43199696268","text":"import os\nimport sys\nimport click\nimport tarfile\nimport logging\nimport glob\nfrom braindataprep.utils import (\n get_tree_path,\n fileparts,\n write_tsv,\n write_from_buffer,\n copy_from_buffer,\n copy_json,\n read_json,\n write_json,\n)\nfrom .keys import allkeys, compat_keys\n\ntry:\n import openpyxl\nexcept ImportError:\n logging.error(\n 'Cannot find `openpyxl`. Did you install with [oasis2] flag? '\n 'Try `pip install braindataprep[oasis2]`.'\n )\n\n\"\"\"\nExpected input\n--------------\nOASIS-III/\n sourcedata/\n OASIS3_data_files/\n # demographics + cognitive tests\n demo.tar.gz\n dictionaries.tar.gz\n # Modality-specific BIDS json\n MRI-json.tar.gz\n CT-json.tar.gz\n PET-json.tar.gz\n # derivatives\n FS.tar.gz\n PUP.tar.gz\n # UDS\n UDSa1.tar.gz\n UDSa2.tar.gz\n UDSa3.tar.gz\n UDSa4d.tar.gz\n UDSa4g.tar.gz\n UDSa5.tar.gz\n UDSb1.tar.gz\n UDSb2.tar.gz\n UDSb3.tar.gz\n UDSb4.tar.gz\n UDSb5.tar.gz\n UDSb6.tar.gz\n UDSb7.tar.gz\n UDSb8.tar.gz\n UDSb9.tar.gz\n pychometrics.tar.gz (== UDSc1)\n UDSd1.tar.gz\n UDSd2.tar.gz\n OAS3{04d}_MR_d{04d}/\n anat{d}.tar.gz\n func{d}.tar.gz\n fmap{d}.tar.gz\n dwi{d}.tar.gz\n swi{d}.tar.gz\n OAS3{04d}_CT_d{04d}/\n CT{d}.tar.gz\n OAS3{04d}_PIB_d{04d}/\n pet{d}.tar.gz\n OAS3{04d}_AV45_d{04d}/\n pet{d}.tar.gz\n\nExpected output\n---------------\nOASIS-III/\n dataset_description.json\n participants.{tsv|json}\n sessions.json\n phenotypes/\n UDSv2_a1_demographics.{tsv|json}\n UDSv2_a2_informant.{tsv|json}\n UDSv2_a3_family_history.{tsv|json}\n UDSv2_a4_medications.{tsv|json}\n UDSv2_a5_health_history.{tsv|json}\n UDSv2_b1_physical.{tsv|json}\n UDSv2_b2_hiscvd.{tsv|json}\n UDSv2_b3_updrs.{tsv|json}\n UDSv2_b4_cdr.{tsv|json}\n UDSv2_b5_npiq.{tsv|json}\n UDSv2_b6_gds.{tsv|json}\n UDSv2_b7_fas.{tsv|json}\n UDSv2_b8_neurofind.{tsv|json}\n UDSv2_b9_symptoms.{tsv|json}\n UDSv2_c1_neuropsy.{tsv|json}\n UDSv2_d1_diagnosis.{tsv|json}\n UDSv2_d2_medical_conditions.{tsv|json}\n rawdata/\n sub-{04d}/\n sub-{04d}_sessions.tsv\n ses-{d}/\n anat/\n sub-{:04d}_ses-{:04d}_T1w.{nii.gz|json}\n sub-{:04d}_ses-{:04d}_T2w.{nii.gz|json}\n sub-{:04d}_ses-{:04d}_acq-TSE_T2w.{nii.gz|json}\n sub-{:04d}_ses-{:04d}_FLAIR.{nii.gz|json}\n sub-{:04d}_ses-{:04d}_T2star.{nii.gz|json}\n sub-{:04d}_ses-{:04d}_angio.{nii.gz|json}\n perf/\n sub-{:04d}_ses-{:04d}_pasl.nii.gz\n func/\n sub-{:04d}_ses-{:04d}_task-rest*_run-{:02d}_bold.nii.gz\n fmap/\n sub-{:04d}_ses-{:04d}_echo-1_run-01_fieldmap.nii.gz\n dwi/\n sub-{:04d}_ses-{:04d}_run-{:02d}_dwi.nii.gz\n swi/\n ...\n pet/\n sub-{:04d}_trc-PIB_pet.{nii.gz|json}\n sub-{:04d}_trc-AV45_pet.{nii.gz|json}\n\"\"\"\n\n\"\"\"Folder containing template README/JSON/...\"\"\"\nTPLDIR = os.path.join(os.path.dirname(__file__), 'templates')\n\n\n@click.command()\n@click.option(\n '--path', default=None, help='Path to tree')\n@click.option(\n '--key', multiple=True,\n type=click.Choice(allkeys),\n help='Only download these keys')\n@click.option(\n '--sub', multiple=True, type=int,\n help='Only download these subjects')\n@click.option(\n '--json-only', is_flag=True, default=False,\n help='Only write jsons (not volumes)'\n)\ndef bidsify(path, key, sub, json_only):\n logging.info('OASIS-I - bidsify')\n path = get_tree_path(path)\n keys = set(key or allkeys)\n subs = sub\n oasispath = os.path.join(path, 'OASIS-3')\n src = os.path.join(oasispath, 'sourcedata')\n raw = os.path.join(oasispath, 'rawdata')\n pheno = os.path.join(oasispath, 'phenotypes')\n if not os.path.exists(src):\n raise FileNotFoundError('sourcedata folder not found')\n\n # ------------------------------------------------------------------\n # Write toplevel meta-information\n # - README\n # - dataset_description.json\n # - participants.tsv\n # - participants.json\n # ------------------------------------------------------------------\n if keys.intersection(compat_keys(\"meta\")):\n\n copy_from_buffer(\n os.path.join(TPLDIR, 'README'),\n os.path.join(oasispath, 'README')\n )\n\n copy_json(\n os.path.join(TPLDIR, 'dataset_description.json'),\n os.path.join(oasispath, 'dataset_description.json')\n )\n\n copy_json(\n os.path.join(TPLDIR, 'participants.json'),\n os.path.join(oasispath, 'participants.json')\n )\n\n json_pheno = os.path.join(TPLDIR, 'phenotypes', 'UDSv2', '*.json')\n for fname in glob.glob(json_pheno):\n ofname = os.path.join(pheno, os.path.basename(fname))\n if 'a3' in os.path.basename(fname):\n obj = read_json(fname)\n for key, val in dict(obj).items():\n if \"{d}\" in key:\n assert key.startswith((\"SIB\", \"KID\", \"REL\")), key\n nb = 20 if key.startswith(\"SIB\") else 15\n del obj[key]\n desc = val.pop(\"Description\")\n for d in range(nb):\n obj[key.format(d=d)] = {\n 'Description': desc.format(d=d),\n **val\n }\n write_json(obj, ofname)\n else:\n copy_json(fname, ofname)\n\n # make_phenotypes()\n\n # make_participants(\n # os.path.join(pheno, 'UDSv2_a1_demographics.tsv'),\n # os.path.join(oasispath, 'participants.tsv'),\n # )\n\n copy_json(\n os.path.join(TPLDIR, 'sessions.json'),\n os.path.join(oasispath, 'sessions.json')\n )\n\n # we always need this to write subject-specific session files\n # session_tables = make_sessions(\n # [os.path.join(pheno, 'UDSv2_a1_demographics.tsv'),\n # os.path.join(pheno, 'UDSv2_b4_cdr.tsv'),\n # os.path.join(pheno, 'UDSv2_d1_diagnosis.tsv')],\n # os.path.join(src, 'oasis_longitudinal_demographics.xlsx')\n # )\n\n # ------------------------------------------------------------------\n # Convert raw and minimally processed data\n # ------------------------------------------------------------------\n\n all_sessions = list(sorted(glob.glob(\n os.path.join(src, 'OAS3*')\n )))\n for session in all_sessions:\n\n participant_id, modality_type, session_id \\\n = os.path.basename(session).split('_')\n participant_id = int(participant_id[4:])\n session_id = int(session_id[1:])\n\n if subs and participant_id not in subs:\n logging.info(f'skip sub-{participant_id:04d}')\n continue\n\n if modality_type == 'MR':\n if not keys.intersection(compat_keys(\"mri\")):\n continue\n elif modality_type == \"CT\":\n if not keys.intersection(compat_keys(\"ct\")):\n continue\n elif modality_type == \"FDG\":\n if not keys.intersection(compat_keys(\"fdg\")):\n continue\n elif modality_type == \"PIB\":\n if not keys.intersection(compat_keys(\"pib\")):\n continue\n elif modality_type == \"AV45\":\n if not keys.intersection(compat_keys(\"av45\")):\n continue\n else:\n continue\n\n session_path = os.path.join(\n raw, f'sub-{participant_id:04d}', f'ses-{session_id:04d}'\n )\n all_scans = list(sorted(glob.glob(\n os.path.join(session, '*.tar.gz')\n )))\n for tarpath in all_scans:\n submodality_type = fileparts(tarpath)[1][:-1]\n modality_path = os.path.join(session_path, submodality_type)\n\n if not keys.intersection(compat_keys(submodality_type)):\n continue\n\n with tarfile.open(tarpath, 'r:gz') as f:\n niimember = None\n jsonmember = None\n bvalmember = None\n bvecmember = None\n tsvmember = None\n for member in f.getmembers():\n if niimember and jsonmember:\n break\n if member.name.endswith('.nii.gz'):\n niimember = member\n if member.name.endswith('.json'):\n jsonmember = member\n if member.name.endswith('.bval'):\n bvalmember = member\n if member.name.endswith('.bvec'):\n bvecmember = member\n if member.name.endswith('.tsv'):\n tsvmember = member\n\n if niimember is None:\n logging.warning(\n f\"No nifti found in {os.path.basename(tarpath)}\"\n )\n continue\n\n find_key = None\n for key in (\"_T1w\", \"_T2w\", \"TSE\", \"_FLAIR\", \"_T2star\",\n \"angio\", \"asl\", \"bold\", \"fmap\", \"_dwi\", \"_swi\",\n \"_ct\", \"fdg\", \"pib\", \"av45\", \"av1451\"):\n if key in niimember.name.lower():\n find_key = key.split('_')[-1]\n if find_key and not keys.intersection(compat_keys(find_key)):\n logging.info(f'skip {os.path.basename(niimember.name)}')\n\n basename = niimember.name.split('/')[-1].split('.')[0]\n flags = list(basename.split('_'))\n\n # fix subdirectory for perfusion\n if basename.endswith('asl'):\n submodality_type = 'perf'\n modality_path = os.path.join(session_path, 'perf')\n\n # fix naming convention for fieldmaps\n if basename.endswith('fieldmap'):\n necho = None\n iecho = None\n irun = None\n for i, flag in enumerate(flags):\n if flag.startswith('echo-'):\n iecho = i\n necho = int(flag.split('-')[-1])\n if flag.startswith('run-'):\n irun = i\n if iecho is not None:\n flags[-1] = f'magnitude{necho:d}'\n if irun > iecho:\n del flags[irun]\n del flags[iecho]\n else:\n del flags[iecho]\n del flags[irun]\n else:\n flags[-1] = 'phasediff'\n del flags[irun]\n\n # fix naming convention for PET tracers\n if basename.endswith('pet'):\n for i, flag in enumerate(flags):\n if flag.startswith('acq-'):\n flag = 'trc-' + flag.split('-')[-1]\n flags[i] = flag\n\n # fix subject and session names\n for i, flag in enumerate(flags):\n if flag.startswith('sub-'):\n flag = f'sub-{participant_id:04d}'\n if flag.startswith(('ses-', 'sess-')):\n flag = f'ses-{session_id:04d}'\n flags[i] = flag\n\n basename = '_'.join(flags)\n\n write_from_buffer(\n f.extractfile(niimember),\n os.path.join(modality_path, f'{basename}.nii.gz')\n )\n if jsonmember is None:\n logging.warning(\n f\"No json found in {os.path.basename(tarpath)}\"\n )\n else:\n write_from_buffer(\n f.extractfile(jsonmember),\n os.path.join(modality_path, f'{basename}.json')\n )\n if submodality_type == 'dwi':\n if bvalmember is None:\n logging.warning(\n f\"No bval found in {os.path.basename(tarpath)}\"\n )\n else:\n write_from_buffer(\n f.extractfile(bvalmember),\n os.path.join(modality_path, f'{basename}.bval')\n )\n if bvecmember is None:\n logging.warning(\n f\"No bvec found in {os.path.basename(tarpath)}\"\n )\n else:\n write_from_buffer(\n f.extractfile(bvecmember),\n os.path.join(modality_path, f'{basename}.bvec')\n )\n if submodality_type == 'pet':\n if tsvmember is None:\n logging.warning(\n f\"No tsv found in {os.path.basename(tarpath)}\"\n )\n else:\n write_from_buffer(\n f.extractfile(tsvmember),\n os.path.join(modality_path, f'{basename}.tsv')\n )\n\n\ndef make_participants(path_xlsx, path_tsv):\n\n \"\"\"\n oasis_header = [\n 'SUB_ID',\n 'MRI_ID',\n 'Group',\n 'Visit',\n 'Delay',\n 'M/F',\n 'Hand',\n 'Age',\n 'Educ',\n 'SES',\n 'MMSE',\n 'CDR',\n 'eTIV',\n 'nWBV',\n 'ASF',\n ]\n \"\"\"\n\n participants_header = [\n 'participant_id',\n 'sex',\n 'handedness',\n 'age',\n ]\n\n def iter_rows():\n xlsx = openpyxl.load_workbook(path_xlsx, data_only=True)\n xlsx = xlsx[xlsx.sheetnames[0]]\n yield participants_header\n for nrow in range(2, xlsx.max_row+1):\n id = xlsx[nrow][0].value\n visit = int(xlsx[nrow][3].value)\n if visit != 1:\n continue\n sex = xlsx[nrow][5].value\n hand = xlsx[nrow][6].value\n age = xlsx[nrow][7].value\n id = int(id.split('_')[-1])\n yield [f'sub-{id:04d}', sex, hand, age]\n\n write_tsv(iter_rows(), path_tsv)\n\n\ndef make_sessions(path_xlsx):\n\n \"\"\"\n oasis_header = [\n 'SUB_ID',\n 'MRI_ID',\n 'Group',\n 'Visit',\n 'Delay',\n 'M/F',\n 'Hand',\n 'Age',\n 'Educ',\n 'SES',\n 'MMSE',\n 'CDR',\n 'eTIV',\n 'nWBV',\n 'ASF',\n ]\n \"\"\"\n\n sessions_header = [\n 'session_id',\n 'delay',\n 'pathology',\n 'age',\n 'educ',\n 'ses',\n 'mmse',\n 'cdr',\n 'etiv',\n 'nwbv',\n 'asf',\n ]\n\n pathology_map = {\n \"Nondemented\": \"N\",\n \"Demented\": \"D\",\n \"Converted\": \"C\",\n }\n\n xlsx = openpyxl.load_workbook(path_xlsx, data_only=True)\n xlsx = xlsx[xlsx.sheetnames[0]]\n sessions_tables = {}\n for nrow in range(2, xlsx.max_row+1):\n id = int(xlsx[nrow][0].value.split('_')[-1])\n sessions_tables.setdefault(id, [sessions_header])\n sessions_tables[id].append([\n f'ses-{xlsx[nrow][3].value}', # visit\n xlsx[nrow][4].value, # delay\n pathology_map[xlsx[nrow][2].value], # pathology\n xlsx[nrow][7].value, # age\n xlsx[nrow][8].value, # educ\n xlsx[nrow][9].value, # ses\n xlsx[nrow][10].value, # mmse\n xlsx[nrow][11].value, # cdr\n xlsx[nrow][12].value, # tiv\n xlsx[nrow][13].value, # nwbv\n xlsx[nrow][14].value, # asf\n ])\n return sessions_tables\n\n\nif __name__ == '__main__':\n\n root = logging.getLogger()\n root.setLevel(0)\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(0)\n formatter = logging.Formatter('%(levelname)s | %(message)s')\n handler.setFormatter(formatter)\n root.addHandler(handler)\n\n bidsify()\n","repo_name":"balbasty/braindataprep","sub_path":"braindataprep/datasets/OASIS/III/bidsify.py","file_name":"bidsify.py","file_ext":"py","file_size_in_byte":16946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"28474394087","text":"#!/usr/bin/env python\nimport argparse\nimport csv\nimport os\nimport sys\nimport time\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"infile\")\nparser.add_argument(\"-s\", \"--seed\", type=int, required=True)\nparser.add_argument(\"-o\", \"--outfile\", default=\"toys.csv\")\nparser.add_argument(\"-n\", \"--ntoys\", type=int, default=10)\n\nparser.add_argument(\"--workspace-name\", default=\"combined\")\nparser.add_argument(\"--model-config\", default=\"ModelConfig\")\nparser.add_argument(\"--data-name\", default=\"obsData\")\n\nparser.add_argument(\"--mu-range\", default=15., type=float)\n\nparser.add_argument(\"--optimizer-strategy\", type=int, default=1)\nparser.add_argument(\"--optimizer\", choices=[\"Minuit2\", \"Minuit\"], default=\"Minuit2\")\nparser.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\n\nargs = parser.parse_args()\n\n# Get macro directory to load needed ROOT macros\nmacro_path = os.path.join(os.path.dirname(__file__), \"..\", \"macros\")\nmacro_path = os.path.abspath(macro_path)\n\nimport ROOT as R\nR.gROOT.SetBatch(True)\nR.gROOT.ProcessLine(\".L {}/DiscoveryTestStatToys.C++\".format(macro_path))\n\n\n# Retrieves value of a RooRealVar from RooArgSet\n# Returns None if it does not exist\ndef retrieve_arg(argset, argname):\n if isinstance(argset.find(argname), R.RooRealVar):\n return argset.find(argname).getVal()\n\n return None\n\n\nR.RooRandom.randomGenerator().SetSeed(10000 + args.seed)\nR.Math.MinimizerOptions.SetDefaultMinimizer(args.optimizer)\nR.Math.MinimizerOptions.SetDefaultStrategy(args.optimizer_strategy)\n\nif args.verbose:\n # Doesn't really do anything...\n R.Math.MinimizerOptions.SetDefaultPrintLevel(3)\n\nstart_time = time.time()\n\nhtr = R.DiscoveryTestStatToys(\n args.infile,\n args.workspace_name,\n args.model_config,\n args.data_name,\n args.ntoys,\n args.mu_range,\n args.verbose)\n\nend_time = time.time()\n\nresults = []\nnull_details = htr.GetNullDetailedOutput()\nfor i in range(null_details.numEntries()):\n argset = null_details.get(i)\n\n ts = retrieve_arg(argset, \"ModelConfigB_only_TS0\")\n muhat = retrieve_arg(argset, \"ModelConfigB_only_TS0_fitUncond_SigXsecOverSM\")\n\n uncond_status = retrieve_arg(argset, \"ModelConfigB_only_TS0_fitUncond_fitStatus\")\n uncond_minNLL = retrieve_arg(argset, \"ModelConfigB_only_TS0_fitUncond_minNLL\")\n\n cond_status = retrieve_arg(argset, \"ModelConfigB_only_TS0_fitCond_fitStatus\")\n cond_minNLL = retrieve_arg(argset, \"ModelConfigB_only_TS0_fitCond_minNLL\")\n\n cond_zhf = retrieve_arg(argset, \"ModelConfigB_only_TS0_fitCond_ATLAS_norm_Zhf\")\n uncond_zhf = retrieve_arg(argset, \"ModelConfigB_only_TS0_fitUncond_ATLAS_norm_Zhf\")\n\n cond_ttbar = retrieve_arg(argset, \"ModelConfigB_only_TS0_fitCond_ATLAS_norm_ttbar\")\n uncond_ttbar = retrieve_arg(argset, \"ModelConfigB_only_TS0_fitUncond_ATLAS_norm_ttbar\")\n\n uncond_covQual = retrieve_arg(argset, \"ModelConfigB_only_TS0_fitUncond_covQual\")\n cond_covQual = retrieve_arg(argset, \"ModelConfigB_only_TS0_fitCond_covQual\")\n\n results.append({\n \"index\": i,\n \"seed\": args.seed,\n \"q0\": 2 * ts,\n \"muhat\": muhat,\n \"uncond_status\": uncond_status,\n \"uncond_minNLL\": uncond_minNLL,\n \"cond_status\": cond_status,\n \"cond_minNLL\": cond_minNLL,\n \"zhf_norm_cond\": cond_zhf,\n \"zhf_norm_uncond\": uncond_zhf,\n \"ttbar_norm_cond\": cond_ttbar,\n \"ttbar_norm_uncond\": uncond_ttbar,\n \"uncond_covQual\": uncond_covQual,\n \"cond_covQual\": cond_covQual,\n })\n\n\ntotal_time = end_time - start_time\ntime_per_toy = total_time / args.ntoys\n\nprint(\"Total time: {:2f} s\".format(total_time))\nprint(\"Time per toy: {:2f} s/toy\".format(time_per_toy))\n\nwith open(args.outfile, \"w\") as csvfile:\n fieldnames = [\n \"q0\", \"muhat\",\n \"uncond_status\", \"uncond_minNLL\",\n \"cond_status\", \"cond_minNLL\",\n \"seed\", \"index\",\n \"avg_time\", \"mu_range\",\n \"zhf_norm_cond\", \"zhf_norm_uncond\",\n \"ttbar_norm_cond\", \"ttbar_norm_uncond\",\n \"uncond_covQual\", \"cond_covQual\"]\n\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for row in results:\n row[\"avg_time\"] = time_per_toy\n row[\"mu_range\"] = args.mu_range\n writer.writerow(row)\n","repo_name":"chrisdeutsch/bbtt_global_significance","sub_path":"scripts/runDiscoveryTestStatToys.py","file_name":"runDiscoveryTestStatToys.py","file_ext":"py","file_size_in_byte":4217,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"25749251854","text":"import riftChatBotUtils\nimport datetime, threading\n\n# Warn guildmates that CQ is about to end\ndef bot_cq(riftBot, req):\n\treq.argList = ['3m30s', 'CQ ending soon']\n\t\n\treturn bot_timers_add(riftBot, req)\n\n# Output server date\ndef bot_date(riftBot, req):\n\tdtdt = datetime.datetime\n\treq.response.append(dtdt.strftime(dtdt.utcnow(), '%d/%m/%y'))\n\treturn req\n\t\t\n# Output server time\ndef bot_time(riftBot, req):\n\tdtdt = datetime.datetime\n\treq.response.append(dtdt.strftime(dtdt.utcnow(), '%X'))\n\treturn req\n\n# !timers is basically an alias for !timers list\ndef bot_timers(riftBot, req):\n\tif req.argList and req.argList[0] in ['-h', '--help']:\n\t\tfunc, opts, desc = __botFunctions__[\"timers\"]\n\t\treq.response.append(desc)\n\t\treq.response.append('Options: %s' % \",\".join(__timers_options__))\n\t\treturn req\n\t\t\n\treturn bot_timers_list(riftBot, req)\n\t\n# Register a new timer\ndef bot_timers_add(riftBot, req):\n\tif not req.argList:\n\t\treq.response.append('Usage: !timers add hh:mm[:ss]/[Ah][Bm][Cs]')\n\t\t\n\telif req.argList[0] in ['-h', '--help']:\n\t\tfunc, opts, desc = __timers_options__[\"add\"]\n\t\treq.response.append(desc)\n\t\treq.response.append('Usage: !timers add hh:mm[:ss]/[Ah][Bm][Cs]')\n\t\t\n\telse:\n\t\t# Connect to the database\n\t\tDB = riftBot.dbConnect()\n\t\tcursor = DB.cursor()\n\t\t\n\t\tdtdt = datetime.datetime\n\t\tnow = dtdt.utcnow()\n\t\t\n\t\tcountdown = ''\n\t\t\n\t\t# Process the user's input from one of two formats\n\t\ttimeStr = req.argList[0]\n\t\tif any([c in ['h','m','s'] for c in timeStr]):\n\t\t\t# HHhMMmSSs\n\t\t\tif ':' in timeStr:\n\t\t\t\treq.response.append('Syntax Error')\n\t\t\t\t\n\t\t\telse:\n\t\t\t\th,m,s = [0,0,0]\n\t\t\t\ttry:\n\t\t\t\t\tn1 = 0\n\t\t\t\t\tfor n2, c in enumerate(timeStr):\n\t\t\t\t\t\tif c in ['h', 'H']:\n\t\t\t\t\t\t\th = int(timeStr[n1:n2])\n\t\t\t\t\t\t\tn1 = n2+1\n\t\t\t\t\t\t\t\n\t\t\t\t\t\telif c in ['m', 'M']:\n\t\t\t\t\t\t\tm = int(timeStr[n1:n2])\n\t\t\t\t\t\t\tn1 = n2+1\n\t\t\t\t\t\t\t\n\t\t\t\t\t\telif c in ['s', 'S']:\n\t\t\t\t\t\t\ts = int(timeStr[n1:n2])\n\t\t\t\t\t\t\tn1 = n2+1\n\t\t\t\t\t\t\n\t\t\t\t\tcountdown = datetime.timedelta(hours=h, minutes=m, seconds=s)\n\t\t\t\t\ttimerTime = now + countdown\n\t\t\t\t\n\t\t\t\texcept ValueError:\n\t\t\t\t\treq.response.append('Syntax Error')\n\n\t\telif ':' in timeStr and timeStr.count(\":\") < 3:\n\t\t\t# HH:MM:SS\n\t\t\tif timeStr.count(\":\") == 2:\n\t\t\t\ttry:\n\t\t\t\t\tfromMidnight = dtdt.strptime(timeStr, '%H:%M:%S').time()\n\t\t\t\t\t\n\t\t\t\texcept ValueError:\n\t\t\t\t\treq.response.append('Syntax Error')\n\t\t\t\t\t\n\t\t\telif timeStr.count(\":\") == 1:\n\t\t\t\ttry:\n\t\t\t\t\tfromMidnight = dtdt.strptime(timeStr, '%H:%M').time()\n\t\t\t\t\t\n\t\t\t\texcept ValueError:\n\t\t\t\t\treq.response.append('Syntax Error')\n\t\t\t\n\t\t\t# If we successfully parsed user input, convert to time to wait\n\t\t\tif fromMidnight:\n\t\t\t\ttimerTime = dtdt.combine(now.date(), fromMidnight)\n\t\t\t\tif timerTime < now:\n\t\t\t\t\tcountdown = timerTime - now + datetime.timedelta(days=1)\n\t\t\t\telse:\n\t\t\t\t\tcountdown = timerTime - now\n\t\t\t\t\n\t\telse:\n\t\t\treq.response.append('Syntax Error')\n\t\t\n\t\t# If a time to wait was successfully parsed\n\t\tif countdown:\n\t\t\t# Get a new timer ID\n\t\t\ttimerId = cursor.execute(\"SELECT MAX(timerId) AS m FROM timers\").fetchone()\n\t\t\tif timerId and timerId['m']:\n\t\t\t\ttimerId = timerId['m'] + 1\n\t\t\telse:\n\t\t\t\ttimerId = 1\n\t\t\t\t\n\t\t\t# Register the timer in the database\n\t\t\talertToGuild = (1 if req.toGuild else 0)\n\t\t\tcursor.execute(\"INSERT INTO timers VALUES (?,?,?,?,?,?)\", (timerId, req.requester, req.requesterId, alertToGuild, ' '.join(req.argList[1:]), timerTime.strftime('%c')))\n\t\t\tDB.commit()\n\t\t\t\n\t\t\t# Set the timer and store it\n\t\t\ttimer = threading.Timer(countdown.total_seconds(), bot_timers_trigger, [riftBot, timerId])\n\t\t\ttimer.daemon = True\n\t\t\ttimer.start()\n\t\t\treq.response.append('timer with ID %i due in %0.0fs' % (timerId, countdown.total_seconds()))\n\t\t\tif not riftBot.appendTimer(timerId, timer):\n\t\t\t\treq.response.append('Error: this timer is uninterruptible!')\n\t\n\t\tDB.close()\n\t\t\n\treturn req\n\n# List pending timers\ndef bot_timers_list(riftBot, req):\n\tif req.argList and req.argList[0] in ['-h', '--help']:\n\t\tfunc, opts, desc = __timers_options__[\"list\"]\n\t\treq.response.append(desc)\n\t\treq.response.append('Usage: !timers list [player]')\n\t\n\telse:\n\t\t# Default is the user\n\t\tif req.argList:\n\t\t\tmain = req.argList[0].lower()\n\t\telse:\n\t\t\tmain = req.requester\n\t\t\n\t\t# Connect to the database\n\t\tDB = riftBot.dbConnect()\n\t\tcursor = DB.cursor()\n\t\t\n\t\t# Get a list of the player's timers and output them\n\t\ttimers = cursor.execute(\"SELECT timerId, message, timeStamp FROM timers WHERE player=?\", (main,)).fetchall()\n\t\tif timers:\n\t\t\tfor timer in timers:\n\t\t\t\tcountdown = datetime.datetime.strptime(timer['timeStamp'], '%c') - datetime.datetime.utcnow()\n\t\t\t\treq.response.append('Timer %i: %s due in %is' % (timer['timerId'], timer['message'], countdown.total_seconds()))\n\t\t\n\t\telse:\n\t\t\treq.response.append('%s has no pending timers' % main.title())\n\t\t\n\t\tDB.close()\n\t\t\n\treturn req\n\n# Remove a pending timer\ndef bot_timers_remove(riftBot, req):\n\tif not req.argList:\n\t\treq.response.append('Usage: !timers rem ID [ID ..]')\n\t\t\n\telif req.argList[0] in ['-h', '--help']:\n\t\tfunc, opts, desc = __timers_options__[\"rem\"]\n\t\treq.response.append(desc)\n\t\treq.response.append('Usage: !timers rem ID [ID ..]')\n\t\n\telse:\n\t\t# Load the timers database\n\t\tDB = riftBot.dbConnect()\n\t\tcursor = DB.cursor()\n\t\t\n\t\t# Get a list of timers this user owns\n\t\ttimers = cursor.execute(\"SELECT timerId FROM timers WHERE player=?\", (req.requester,)).fetchall()\n\t\tplayerTimers = [timer['timerId'] for timer in timers]\n\t\tfor arg in req.argList:\n\t\t\t# Get the timer the user specified\n\t\t\ttimer = cursor.execute(\"SELECT 1 FROM timers WHERE timerId=?\", (int(arg),)).fetchone()\n\t\t\tif timer:\n\t\t\t\ttry:\n\t\t\t\t\tif int(arg) in playerTimers or req.su:\n\t\t\t\t\t\t# Remove the timer from the database and cancel the timer function\n\t\t\t\t\t\tcursor.execute(\"DELETE FROM timers WHERE timerId=?\", (int(arg),))\n\t\t\t\t\t\tif cursor.rowcount > 0 and riftBot.removeTimer(int(arg)):\n\t\t\t\t\t\t\treq.response.append('Timer %s removed' % arg)\n\t\t\t\t\t\t\tDB.commit()\n\t\t\t\t\t\t\t\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\treq.response.append('Error: Removal of timer %s failed' % arg)\n\t\t\t\t\t\t\tDB.rollback()\n\t\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\treq.response.append('%s does not own timer %s' % (req.requester.title(), arg))\n\t\t\t\t\t\n\t\t\t\texcept ValueError:\n\t\t\t\t\treq.response.append('%s is not a valid timer ID' % arg)\n\t\t\t\t\t\t\n\t\t\telse:\n\t\t\t\treq.response.append('No timer pending with ID %s' % arg)\n\t\t\t\n\t\tDB.close()\n\t\t\n\treturn req\n\t\n# The function passed to threading.Timer which outputs the message in future\ndef bot_timers_trigger(riftBot, timerId):\n\t# Connect to the database\n\tDB = riftBot.dbConnect()\n\tcursor = DB.cursor()\n\t\n\t# Create a new request object to output the response\n\treq = riftChatBotUtils.riftChatRequest()\n\t\t\n\t# Look up the triggered timer\n\ttimerInfo = cursor.execute(\"SELECT player, playerId, sendGuild, message FROM timers WHERE timerId=?\", (timerId,)).fetchone()\n\tif timerInfo:\n\t\t# Prepare the request object\n\t\treq.requester = timerInfo['player']\n\t\treq.requesterId = timerInfo['playerId']\n\t\tif timerInfo['sendGuild'] == 1:\n\t\t\treq.toGuild = True\n\t\treq.toWhisp = True\n\t\treq.response.append(\"%s's Timer: %s\" % (timerInfo['player'].title(), timerInfo['message']))\n\t\t\n\t\t# Remove the timer from the database\n\t\tcursor.execute(\"DELETE FROM timers WHERE timerId=?\", (timerId,))\n\t\tDB.commit()\n\t\triftBot.removeTimer(timerId)\n\t\t\n\telse:\n\t\t# Something has gone awfully wrong\n\t\treq.toGuild = True\n\t\treq.toWhisp = False\n\t\treq.response.append('Error: An orphaned timer was triggered')\n\t\n\tDB.close()\n\t\t\n\triftBot.sendResponse(req)\n\n# Run on bot startup\ndef __bot_init__(riftBot):\n\t# Retrieve any previously created timers and start them again\n\tDB = riftBot.dbConnect()\n\tcursor = DB.cursor()\n\t\t\n\tcursor.execute(\"CREATE TABLE IF NOT EXISTS timers (timerId INT PRIMARY KEY, player VARCHAR(30), playerId VARCHAR(30), sendGuild INT, message VARCHAR(255), timeStamp VARCHAR(30))\")\n\tcursor.execute(\"CREATE TABLE IF NOT EXISTS admins (player VARCHAR(30) PRIMARY KEY, playerId VARCHAR(30))\")\n\t\n\t# Retrieve old timers (i.e. in case we just crashed)\n\ttimers = cursor.execute(\"SELECT * FROM timers\").fetchall()\n\tfor oldTimer in timers:\n\t\tif oldTimer['timerId'] and oldTimer['timeStamp']:\n\t\t\ttimerTime = datetime.datetime.strptime(oldTimer['timeStamp'], '%c')\n\t\t\tcountdown = timerTime - datetime.datetime.utcnow()\n\t\t\t\n\t\t\t# Have we missed it?\n\t\t\tif countdown.total_seconds < 0:\n\t\t\t\tif countdown.total_seconds > -300:\n\t\t\t\t\t# Trigger the timer if its less than 5 minutes late, no point necroing\n\t\t\t\t\tbot_timers_trigger(riftBot, oldTimer['timerId'])\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tcursor.execute(\"DELETE FROM timers WHERE timerid=?\", (timer['timerId'],))\n\t\t\t\t\n\t\t\telse:\n\t\t\t\t# Set the timer and store it\n\t\t\t\ttimer = threading.Timer(countdown.total_seconds(), bot_timers_trigger, [riftBot, oldTimer['timerId']])\n\t\t\t\ttimer.daemon = True\n\t\t\t\ttimer.start()\n\t\t\t\triftBot.appendTimer(oldTimer['timerId'], timer)\n\t\t\t\t\n\t\telse:\n\t\t\tcursor.execute(\"DELETE FROM timers WHERE timerid=?\", (timer['timerId'],))\n\t\n\tDB.commit()\n\tDB.close()\n\n# A list of options for the timers function\n__timers_options__ = {\n\t'add'\t: (bot_timers_add, [], \"Add a chat alert\"),\n\t'list'\t: (bot_timers_list, [], \"List chat alerts\"),\n\t'remove': (bot_timers_remove, [], \"Remove a chat alert\")\n\t}\n\n# A list of functions contained in this module, format: (function, options, description)\n__botFunctions__ = {\n\t'cq'\t: (bot_cq, [], \"Alias for !timer 3m30s CQ ending soon\"),\n\t'date'\t: (bot_date, [], \"Print server date\"),\n\t'time'\t: (bot_time, [], \"Print server time\"),\n\t'timers': (bot_timers, __timers_options__, \"Schedule / manage chat alerts\")\n\t}\n\t","repo_name":"TapdancingRodent/RAT","sub_path":"botFunctionModules/temporal.py","file_name":"temporal.py","file_ext":"py","file_size_in_byte":9362,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"63"} +{"seq_id":"34117651019","text":"# groupdraw.py\nfrom graphics import *\nfrom classGraphicsGroup import GraphicsGroup\n\ndef main():\n\twin = GraphWin()\n\twin.setCoords(0,0,100,100)\n\ta = GraphicsGroup(Point(50,50))\n\tcirc = Circle(Point(40,40), 10)\n\tcirc.setFill(\"blue\")\n\tcirc.setOutline(\"blue\")\n\ttri = Polygon(Point(35, 35), Point(35, 50), Point(50, 35))\n\ttri.setFill(\"red\")\n\ttri.setOutline(\"red\")\n\ta.addObject(circ)\n\ta.addObject(tri)\n\ta.draw(win)\n\tp = win.getMouse()\n\ta.move(p.getX() - a.anchor.getX(), p.getY() - a.anchor.getY())\n\twin.getMouse()\n\ta.undraw()\n\twin.getMouse()\n\twin.close()\n\nmain()\n\t\n\t\n","repo_name":"alextickle/zelle-exercises","sub_path":"ch11/groupdraw.py","file_name":"groupdraw.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"9577991435","text":"\n\ndef solution():\n num = int(input())\n \n # n 이라는 숫자를 1과 2로 채울건데 순서 상관있음(모양)\n \n # 방법의 수 -> dp로 푸는게 편하다.\n \n # n = 1 이면 1 한개\n # 2 면 1 놓고 n = 1인거 또는 2 놓는거\n # 3이면 1 놓고 2 놓던지 , 2놓고 1놓던지\n \n # top-down이 웬만해서 빠르다.\n # 재귀 시 dict 이용하면 편하다.\n # answer = {1:1, 2:2}\n # def rec(x):\n # if x in answer:\n # return answer[x]\n # else:\n # answer[x] = rec(x-2) + rec(x-1)\n # return answer[x]\n # print(int(rec(num))%10007)\n \n # bottom-up\n array = [0] * (num+2)\n array[1] = 1\n array[2] = 2\n \n for idx in range(3, len(array)):\n array[idx] = array[idx-1] + array[idx-2]\n print(int(array[num])%10007)\n\nsolution()\n ","repo_name":"ryol8888/Study","sub_path":"Algorithm/2xN_tiling.py","file_name":"2xN_tiling.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"42196455388","text":"from setuptools import setup, find_packages\n\nversion = '0.2.2-dev'\n\nsetup(\n name='activitystreams',\n version=version,\n description=\"ActivityStreams 2.0 JSON implementation\",\n long_description=\"\"\"Activity Streams 2.0\"\"\",\n classifiers=[],\n keywords='activitystreams',\n author='Oscar Eriksson',\n author_email='oscar.eriks@gmail.com',\n url='https://github.com/sovaa/activitystreams',\n license='LICENSE.txt',\n packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'strict-rfc3339==0.7', # parsing dates\n ],\n test_requires=[\n 'nose2==0.10.0'\n ])\n","repo_name":"sovaa/activitystreams","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"41554809602","text":"from rgbmatrix import graphics, RGBMatrix, RGBMatrixOptions\nimport time\n\n# Set up the RGBMatrixOptions object with your desired matrix configuration\noptions = RGBMatrixOptions()\noptions.rows = 64\noptions.cols = 64\noptions.chain_length = 2\noptions.parallel = 1\noptions.hardware_mapping = 'adafruit-hat-pwm'\noptions.brightness = 25\n\n# Create a RGBMatrix object with the given options\nmatrix = RGBMatrix(options = options)\n\n# Create a font to use for drawing text\nfont = graphics.Font()\nfont.LoadFont(\"/home/pi/Desktop/rpi-rgb-led-matrix/fonts/5x8.bdf\")\nchar_width = 5\n# Define some colors\nwhite = graphics.Color(255, 255, 255)\n\n# Define the text to scroll\ntext = \"A very loooooooooooooooooooooooooong song title :)\"\n\n\n#def scroll_text(matrix, text, color, font_path, x_origin, y_origin)\n\n\n# Scroll the text horizontally across the matrix display\nstart_scroll = 127\npos = start_scroll\ny_border = 2\nwhile True:\n # Clear the matrix display\n matrix.Clear()\n\n # Draw the scrolling text\n graphics.DrawText(matrix, font, pos, 9, white, text)\n matrix.SetPixel(64, y_border + 0, 0, 0, 0)\n matrix.SetPixel(65, y_border + 0, 0, 0, 0)\n matrix.SetPixel(64, y_border + 1, 0, 0, 0)\n matrix.SetPixel(65, y_border + 1, 0, 0, 0)\n matrix.SetPixel(64, y_border + 2, 0, 0, 0)\n matrix.SetPixel(65, y_border + 2, 0, 0, 0)\n matrix.SetPixel(64, y_border + 3, 0, 0, 0)\n matrix.SetPixel(65, y_border + 3, 0, 0, 0)\n matrix.SetPixel(64, y_border + 4, 0, 0, 0)\n matrix.SetPixel(65, y_border + 4, 0, 0, 0)\n matrix.SetPixel(64, y_border + 5, 0, 0, 0)\n matrix.SetPixel(65, y_border + 5, 0, 0, 0)\n matrix.SetPixel(64, y_border + 6, 0, 0, 0)\n matrix.SetPixel(65, y_border + 6, 0, 0, 0)\n matrix.SetPixel(64, y_border + 7, 0, 0, 0)\n matrix.SetPixel(65, y_border + 7, 0, 0, 0)\n matrix.SetPixel(64, y_border + 8, 0, 0, 0)\n matrix.SetPixel(65, y_border + 8, 0, 0, 0)\n matrix.SetPixel(64, y_border + 9, 0, 0, 0)\n matrix.SetPixel(65, y_border + 9, 0, 0, 0)\n\n matrix.SetPixel(126, y_border + 0, 0, 0, 0)\n matrix.SetPixel(127, y_border + 0, 0, 0, 0)\n matrix.SetPixel(126, y_border + 1, 0, 0, 0)\n matrix.SetPixel(127, y_border + 1, 0, 0, 0)\n matrix.SetPixel(126, y_border + 2, 0, 0, 0)\n matrix.SetPixel(127, y_border + 2, 0, 0, 0)\n matrix.SetPixel(126, y_border + 3, 0, 0, 0)\n matrix.SetPixel(127, y_border + 3, 0, 0, 0)\n matrix.SetPixel(126, y_border + 4, 0, 0, 0)\n matrix.SetPixel(127, y_border + 4, 0, 0, 0)\n matrix.SetPixel(126, y_border + 5, 0, 0, 0)\n matrix.SetPixel(127, y_border + 5, 0, 0, 0)\n matrix.SetPixel(126, y_border + 6, 0, 0, 0)\n matrix.SetPixel(127, y_border + 6, 0, 0, 0)\n matrix.SetPixel(126, y_border + 7, 0, 0, 0)\n matrix.SetPixel(127, y_border + 7, 0, 0, 0)\n matrix.SetPixel(126, y_border + 8, 0, 0, 0)\n matrix.SetPixel(127, y_border + 8, 0, 0, 0)\n matrix.SetPixel(126, y_border + 9, 0, 0, 0)\n matrix.SetPixel(127, y_border + 9, 0, 0, 0)\n\n # Decrement the horizontal position\n pos -= 1\n \n # If the entire text has scrolled off the left edge of the matrix\n # Once the last part of the text is visible pause for 0.5 seconds\n # and then restart the scroll.\n if pos + len(text) * char_width < 126:\n time.sleep(0.5)\n # Reset the horizontal position to the right edge of the matrix\n pos = start_scroll\n \n # Wait for a short period of time to slow down the scrolling speed\n time.sleep(0.05)\n","repo_name":"3rian-exe/Widget-ExhiBiT","sub_path":"Widgets/Spotify Album Art/visual elements/scroll_text.py","file_name":"scroll_text.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"4815338572","text":"''' Shows results using Streamlit '''\r\n\r\nimport display.printing\r\nfrom common.data import Database\r\nfrom display.plotting import Plotter\r\nfrom display.streaming import Streamer\r\n \r\ndef plot_data(database, streamer):\r\n # plot results of analysis\r\n plotter = Plotter(database, streamer)\r\n plotter.add_data()\r\n plotter.plot_results()\r\n\r\ndef main():\r\n streamer = Streamer()\r\n\r\n # prepare database\r\n database = Database()\r\n \r\n plot_data(database, streamer)\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"mfranzonello/playpaws","sub_path":"play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"12107261454","text":"from menu import Menu, MenuItem\nfrom coffee_maker import CoffeeMaker\nfrom money_machine import MoneyMachine\n\n\nmenu = Menu()\nmachine = CoffeeMaker()\nmoney = MoneyMachine()\n\nmachine_on = True\nwhile machine_on:\n selection = input(f\"What would you like? {menu.get_items().title()}\")\n\n if selection == \"off\":\n machine_on = False\n elif selection == \"report\":\n machine.report()\n money.report()\n else:\n coffee = menu.find_drink(selection)\n if machine.is_resource_sufficient(coffee) and money.make_payment(coffee.cost):\n machine.make_coffee(coffee)\n\n\n\n","repo_name":"damte99/python-learning","sub_path":"coffee-machine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"13887990418","text":"#--------------------------------\n#TITLE: Lab 11: Functions\n#FILE: lab11.py\n#AUTHOR: Ben Charest\n#CLASS: CSC110, semester, Python for Scientists\n#CLASS MEETING TIME: MWF 11am\n#DATE: 3/29/2021\n#DATE SUBMITTED: 4/5/2021\n#DESCRIPTION: Learning about more functions\n#--------------------------------\n\n# def min(x, y, z):\n# if (x <= y and x <= z):\n# smallest = x\n# elif (y <= x and y <= z):\n# smallest = y\n# else:\n# smallest = z\n# return smallest\n# print(min(3,7,2))\n\n# def areaPerimeter(width,height):\n# \treturn (width*height, 2*(width+height));\n#\n# a,p = areaPerimeter(2,3)\n# print(\"The area is\", a, \"and the perimeter is\", p);\n\n# def areaPerimeter(width,height=3):\n# \treturn (width*height, 2*(width+height))\n#\n# a2, p2 = areaPerimeter(2);\n# print(\"The area is still\", a2, \"and the perimeter is still\", p2);\n\n##turtlePolygon.py - Draws a polygon with a turtle\n##Practice with functions, while loops and the turtle module\n\n# from turtle import *\n#\n# def drawRegularPolygon(number_sides, side_length, my_turtle=Turtle()):\n# \"\"\" Directs a turtle to draw a regular polygon \"\"\"\n# internal_angle = (number_sides - 2)*180/number_sides;\n# turn_angle = 180 - internal_angle;\n#\n# side_number = 0;\n# while side_number < number_sides :\n# my_turtle.fd(side_length);\n# my_turtle.left(turn_angle);\n# side_number += 1;\n#\n# ##Main program\n# cage = Screen();\n# cage.setup(width=400, height=400, startx=0, starty=0);\n# bernie = Turtle(shape = \"turtle\");\n# number_sides = int(input('Side number: '))\n# side_length = int(input('Side length: '))\n# drawRegularPolygon(number_sides, side_length);\n\n#trees program\nimport matplotlib.pyplot as plt\n\ndef harvestPlant(trees_initial=7000, harvest=.12, planted=600):\n after = trees_initial - (trees_initial * harvest) + planted\n return after\n\n\n\nyears = int(input('How many years will the harvesting take place?'))\n\ntrees_initial = int(input('starting #'))\nharvest = float(input('% harvested each year'))\nplanted = int(input('# planted each year'))\nx = []\nfor i in range(years - 1):\n x.append(i)\n\ny = []\n\nafter = harvestPlant(trees_initial, harvest, planted)\nprint(after)\nfor i in range(years - 1):\n after = harvestPlant(after, harvest, planted)\n print(after)\n y.append(after)\n\nplt.scatter(x, y)\nplt.title('Tree # vs. Years')\nplt.xlabel('Years')\nplt.ylabel('Tree #')\nplt.show()\n\n","repo_name":"8enji/SCGSSM","sub_path":"CS110/Miscellaneous/lab11.py","file_name":"lab11.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"4092635172","text":"__author__ = 'pwidqssg'\n\nimport re\nimport xml.etree.ElementTree as ET\n\nfrom caddies.builder import Builder\nfrom caddies.objects import *\n\n\nclass USReader:\n def __init__(self, filepath):\n self.filepath = filepath\n self.tree = ET.parse(self.filepath)\n self.builder = Builder()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.cleanup()\n\n def cleanup(self):\n pass\n # os.unlink(self.filepath)\n\n def extractText(self, node):\n chunks = []\n for sub in node.iter():\n add_brackets = False\n if sub.tag == 'format' and sub.attrib['code'] == 'TF':\n add_brackets = True\n if sub.text is not None:\n if add_brackets:\n chunks.append('[')\n chunks.append(sub.text)\n if add_brackets:\n chunks.append(']')\n if sub.tail is not None:\n chunks.append(sub.tail)\n\n return \"\".join(chunks)\n\n def readInstance(self):\n self.instance = instance.Instance(id=1, agency='uk.us')\n self.instance.instrument = self.tree.find('.//specification//sd_properties/label').text\n\n def readQuestion(self, module, xml_q, parent=None):\n literal = xml_q.find('qt_properties/text')\n if literal != None:\n\n textid = xml_q.get('name').replace('.', '_', 1)\n\n decimal = xml_q.find('qt_properties/decimals')\n options = xml_q.findall('qt_properties/options/option')\n\n if (decimal == None and len(options) == 0) and xml_q.attrib['type'] != 'text':\n self.builder.addStatement(textid, self.extractText(literal))\n else:\n question = self.builder.newQuestion(textid, self.extractText(literal))\n\n instr = xml_q.find('qt_properties/help')\n if instr != None:\n instr = self.extractText(instr).strip()\n if instr != '':\n question.instruction = instr\n\n if xml_q.attrib['type'] == 'text':\n question.add_text('Generic text')\n\n if decimal != None:\n range = xml_q.find('qt_properties/range')\n if range != None:\n max = range.get('max')\n min = range.get('min')\n question.add_numeric(textid, 'Integer' if self.extractText(decimal) == '0' else 'Float', min,\n max)\n\n for option in options:\n opt_text = self.extractText(option.find('text'))\n if opt_text == None or opt_text.strip() == \"\":\n opt_text = self.extractText(option.find('label'))\n question.add_code(option.get('value'), opt_text.strip())\n\n self.builder.submitQuestion(question, parent)\n\n def readCondition(self, module, xml_c, parent=None):\n condition = xml_c.find('if/condition')\n if condition == None:\n logic = \"\"\n else:\n logic = self.extractText(condition)\n label = xml_c.find('if/sd_properties/label')\n if label == None:\n text = \"NO CONDITION TEXT\"\n else:\n text = self.extractText(label)\n\n try:\n if logic[:1] == '[' and logic[-1:] == ']':\n logic = logic[1:-1]\n\n logic = logic.replace('<', '<').replace('>', '>')\n\n logic_chunks = re.split('([^\\w\\.])', logic)\n\n textid = logic_chunks[0].split('.')[0]\n temp = []\n for chunk in logic_chunks:\n chunk = chunk.strip().lower()\n if re.match('[a-zA-Z]', chunk) != None:\n found = False\n for qc in self.builder.cc_question:\n if qc.textid[-1 * len(chunk):] == chunk:\n temp.append(qc.textid)\n found = True\n break\n if not found:\n temp.append(chunk.replace('or', '||').replace('and', '&&'))\n elif chunk == '':\n pass\n else:\n temp.append(chunk.replace('|', '||').replace(',', '||').replace('&', '&&'))\n logic_chunks = temp\n\n logic_expressions = []\n logic_expressions.append([])\n while len(logic_chunks) > 0:\n chunk = logic_chunks.pop(0)\n if chunk == '||' or chunk == '&&' or chunk == '(' or chunk == ')':\n logic_expressions.append(chunk)\n logic_expressions.append([])\n elif chunk == '<':\n if len(logic_chunks) > 0 and logic_chunks[0] == '>':\n logic_chunks.pop(0)\n logic_expressions[-1].append('!=')\n else:\n logic_expressions[-1].append(chunk)\n else:\n logic_expressions[-1].append(chunk)\n\n temp_logic_expressions = []\n for expression in logic_expressions:\n if isinstance(expression, list):\n if len(expression) > 0:\n temp_logic_expressions.append(expression)\n else:\n temp_logic_expressions.append(expression)\n logic_expressions = temp_logic_expressions\n\n for i in range(len(logic_expressions)):\n if isinstance(logic_expressions[i], list):\n if len(logic_expressions[i]) == 1 and i > 1:\n logic_expressions[i] = [logic_expressions[i - 2][0],\n logic_expressions[i - 2][1],\n logic_expressions[i][0]]\n\n logic_expressions = [x for x in logic_expressions if isinstance(x, basestring) or (\n (isinstance(x, list) and (x[0][:3] == 'qc_' or x[2][:3] == 'qc_')))]\n\n i = 0\n while i < len(logic_expressions):\n if isinstance(logic_expressions[i], basestring):\n if i == 0 or i + 1 >= len(logic_expressions):\n logic_expressions.pop(i)\n continue\n if not isinstance(logic_expressions[i - 1], list) or not isinstance(logic_expressions[i + 1], list):\n logic_expressions.pop(i)\n continue\n i += 1\n\n for expr in logic_expressions:\n if (isinstance(expr, basestring)): continue\n if (expr[0][:3] == 'qc_' and expr[2][:3] == 'qc_'): continue\n if (expr[0][:3] == 'qc_'):\n qref = expr[0]\n val = expr[2]\n else:\n qref = expr[2]\n val = expr[0]\n\n for quest in self.builder._submitted_questions:\n if 'qc_' + quest.textid == qref:\n if len(quest.codes) > 0:\n pass\n elif len([x for x in quest.rd if x['type'] == 'Text']):\n val = '\"' + val + '\"'\n elif len([x for x in quest.rd if x['type'] == 'Integer' or x['type'] == 'Float']):\n val = \"'\" + val + \"'\"\n expr[0] = qref\n expr[2] = val\n\n logic_expressions = [' '.join(x) if isinstance(x, list) else x for x in logic_expressions]\n logic = ' '.join(logic_expressions)\n\n logic = logic.replace('=', '==').replace('!==', '!=')\n except:\n logic = ''\n text = 'If ' + text + ' [' + logic + ']'\n\n cond = self.builder.addCondition(textid, text, parent)\n\n for xml_elem in xml_c.find('if/specification_elements'):\n self.readElement(module, xml_elem, cond)\n\n def readModule(self, xml_m, parent=None):\n seq = self.builder.addSequence(self.extractText(xml_m.find('rm_properties/label')), parent)\n\n for xml_elem in xml_m.find('specification_elements'):\n self.readElement(xml_m.get('name'), xml_elem, seq)\n\n def readDataout(self, module, xml_d, parent=None):\n label = xml_d.find('sd_properties/label')\n pass_down = parent\n\n if label != None:\n seq = self.builder.addSequence(self.extractText(xml_d.find('sd_properties/label')), parent)\n pass_down = seq\n\n for xml_elem in xml_d.find('specification_elements'):\n self.readElement(module, xml_elem, pass_down)\n\n def readSection(self, module, xml_s, parent=None):\n seq = self.builder.addSequence(self.extractText(xml_s.find('sd_properties/label')), parent)\n\n for xml_elem in xml_s.find('specification_elements'):\n self.readElement(module, xml_elem, seq)\n\n def readLoop(self, module, xml_l, parent=None):\n loop = self.builder.addLoop('default', '_var', loop_while=xml_l.get('args'), parent=parent)\n\n for xml_elem in xml_l.find('specification_elements'):\n self.readElement(module, xml_elem, loop)\n\n def readElement(self, module, element, parent=None):\n if element.tag == 'question':\n self.readQuestion(module, element, parent)\n elif element.tag == 'branch':\n self.readCondition(module, element, parent)\n elif element.tag == 'module':\n self.readModule(element, parent)\n elif element.tag == 'dataout':\n self.readDataout(module, element, parent)\n elif element.tag == 'section':\n self.readSection(module, element, parent)\n elif element.tag == 'loop':\n self.readLoop(module, element, parent)\n\n def readQRE(self):\n first_module_parent = self.tree.find('.//module/..')\n for xml_elem in list(first_module_parent):\n self.readElement('', xml_elem)\n\n module = self.tree.find('.//specification_elements/module').get('name')\n\n # for xml_elem in self.tree.find('.//module/specification_elements/dataout/specification_elements'):\n # self.readElement(module, xml_elem)\n\n return self.builder\n","repo_name":"CLOSER-Cohorts/us2caddies","sub_path":"us2caddies/usreader.py","file_name":"usreader.py","file_ext":"py","file_size_in_byte":10312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"2833099162","text":"#!/usr/bin/python3\n# encoding: utf-8\n\nimport os\nimport sys\nimport getopt\nimport logging\nimport shutil\nimport psutil\nfrom modules.com_run import ComGenerator\nfrom modules.web_server import ListenServer\nfrom modules.Wlisten_server import WListenServer\nfrom modules.payload_builder_factory import PayloadBuilderFactory\nfrom common import utils, mp_session, help\nfrom common.utils import MSTypes\nfrom common.definitions import VERSION, LOGLEVEL\nif sys.platform == \"win32\":\n try:\n import win32com.client #@UnresolvedImport @UnusedImport\n except:\n print(\"Error: Could not find win32com.\")\n sys.exit(1)\nMP_TYPE=\"Pro\"\nif utils.checkModuleExist(\"pro_core\"):\n from pro_modules.utilities.dcom_run import DcomGenerator\n from pro_modules.payload_builders.containers import ContainerGenerator\n from pro_core.payload_builder_factory_pro import PayloadBuilderFactoryPro\n from pro_core import arg_mgt_pro, mp_session_pro\nelse:\n MP_TYPE=\"Community\"\n\nfrom colorama import init\nfrom termcolor import colored\n\n# use Colorama to make Termcolor work on Windows too\ninit()\n\n\n\nWORKING_DIR = \"temp\"\n\nBANNER = help.getToolPres()\n\n\ndef main(argv):\n global MP_TYPE\n logLevel = LOGLEVEL\n # initialize macro_pack session object\n working_directory = os.path.join(os.getcwd(), WORKING_DIR)\n if MP_TYPE == \"Pro\":\n mpSession = mp_session_pro.MpSessionPro(working_directory, VERSION, MP_TYPE)\n else:\n mpSession = mp_session.MpSession(working_directory, VERSION, MP_TYPE)\n\n try:\n longOptions = [\"embed=\", \"listen=\", \"port=\", \"webdav-listen=\", \"generate=\", \"quiet\", \"input-file=\", \"encode\",\n \"obfuscate\", \"obfuscate-form\", \"obfuscate-names\", \"obfuscate-declares\", \"obfuscate-strings\",\n \"obfuscate-names-charset=\", \"obfuscate-names-minlen=\", \"obfuscate-names-maxlen=\",\n \"file=\",\"template=\",\"listtemplates\",\"listformats\",\"icon=\", \"start-function=\",\"uac-bypass\",\n \"unicode-rtlo=\", \"dde\", \"print\", \"force-yes\", \"help\"]\n shortOptions= \"e:l:w:s:f:t:G:hqmop\"\n # only for Pro release\n if MP_TYPE == \"Pro\":\n longOptions.extend(arg_mgt_pro.proArgsLongOptions)\n shortOptions += arg_mgt_pro.proArgsShortOptions\n # Only enabled on windows\n if sys.platform == \"win32\":\n longOptions.extend([\"run=\", \"run-visible\"])\n\n opts, args = getopt.getopt(argv, shortOptions, longOptions) # @UnusedVariable\n except getopt.GetoptError:\n help.printUsage(BANNER, sys.argv[0])\n sys.exit(2)\n for opt, arg in opts:\n if opt in (\"-o\", \"--obfuscate\"):\n mpSession.obfuscateForm = True\n mpSession.obfuscateNames = True\n mpSession.obfuscateStrings = True\n mpSession.obfuscateDeclares = True\n elif opt==\"--obfuscate-form\":\n mpSession.obfuscateForm = True\n elif opt==\"--obfuscate-declares\":\n mpSession.obfuscateDeclares = True\n elif opt==\"--obfuscate-names\":\n mpSession.obfuscateNames = True\n elif opt==\"--obfuscate-names-charset\":\n try:\n mpSession.obfuscatedNamesCharset = arg\n except ValueError:\n help.printUsage(BANNER, sys.argv[0])\n sys.exit(0)\n elif opt==\"--obfuscate-names-minlen\":\n try:\n mpSession.obfuscatedNamesMinLen = int(arg)\n except ValueError:\n help.printUsage(BANNER, sys.argv[0])\n sys.exit(0)\n if mpSession.obfuscatedNamesMinLen < 4 or mpSession.obfuscatedNamesMinLen > 255:\n help.printUsage(BANNER, sys.argv[0])\n sys.exit(0)\n elif opt==\"--obfuscate-names-maxlen\":\n try:\n mpSession.obfuscatedNamesMaxLen = int(arg)\n except ValueError:\n help.printUsage(BANNER, sys.argv[0])\n sys.exit(0)\n if mpSession.obfuscatedNamesMaxLen < 4 or mpSession.obfuscatedNamesMaxLen > 255:\n help.printUsage(BANNER, sys.argv[0])\n sys.exit(0)\n elif opt==\"--obfuscate-strings\":\n mpSession.obfuscateStrings = True\n elif opt==\"-s\" or opt==\"--start-function\":\n mpSession.startFunction = arg\n elif opt==\"-l\" or opt==\"--listen\":\n mpSession.listen = True\n mpSession.listenRoot = os.path.abspath(arg)\n elif opt==\"--port\":\n mpSession.listenPort = int(arg)\n mpSession.WlistenPort = int(arg)\n elif opt==\"--icon\":\n mpSession.icon = arg\n elif opt==\"-w\" or opt==\"--webdav-listen\":\n mpSession.Wlisten = True\n mpSession.WRoot = os.path.abspath(arg)\n elif opt == \"-f\" or opt== \"--input-file\":\n mpSession.fileInput = arg\n elif opt == \"-e\" or opt== \"--embed\":\n mpSession.embeddedFilePath = os.path.abspath(arg)\n elif opt==\"-t\" or opt==\"--template\":\n mpSession.template = arg\n elif opt == \"--listtemplates\":\n help.printTemplatesUsage(BANNER, sys.argv[0])\n sys.exit(0)\n elif opt==\"-q\" or opt==\"--quiet\":\n logLevel = \"WARN\"\n mpSession.logLevel = \"WARN\"\n elif opt==\"-p\" or opt==\"--print\":\n mpSession.printFile = True\n elif opt == \"--dde\":\n if sys.platform == \"win32\":\n mpSession.ddeMode = True\n elif opt == \"--run\":\n if sys.platform == \"win32\":\n mpSession.runTarget = os.path.abspath(arg)\n elif opt == \"--run-visible\":\n if sys.platform == \"win32\":\n mpSession.runVisible = True\n elif opt == \"--force-yes\":\n mpSession.forceYes = True\n elif opt==\"--uac-bypass\":\n mpSession.uacBypass = True\n elif opt == \"--unicode-rtlo\":\n mpSession.unicodeRtlo = arg\n elif opt in (\"-G\", \"--generate\"):\n mpSession.outputFilePath = os.path.abspath(arg)\n elif opt == \"--listformats\":\n help.printAvailableFormats(BANNER)\n sys.exit(0)\n elif opt==\"-h\" or opt==\"--help\":\n help.printUsage(BANNER, sys.argv[0])\n sys.exit(0)\n else:\n if MP_TYPE == \"Pro\":\n arg_mgt_pro.processProArg(opt, arg, mpSession, BANNER)\n else:\n help.printUsage(BANNER, sys.argv[0])\n sys.exit(0)\n\n if logLevel == \"INFO\":\n os.system('cls' if os.name == 'nt' else 'clear')\n\n # Logging\n logging.basicConfig(level=getattr(logging, logLevel),format=\"%(message)s\", handlers=[utils.ColorLogFiler()])\n\n\n logging.info(colored(BANNER, 'green'))\n\n if MP_TYPE == \"Pro\":\n if mpSession.communityMode:\n logging.warning(\" [!] Running in community mode (pro features not applied)\")\n MP_TYPE=\"Community\"\n else:\n arg_mgt_pro.verify(mpSession)\n # If no argument just suggest to use help\n if len(argv) == 0:\n logging.info(\" [+] Thank you for using MacroPack Pro! \")\n logging.info(\" [+] To get some help: \\n\\t %s --help \\n\\t %s --builder \\n\" % (os.path.basename(utils.getRunningApp()), os.path.basename(utils.getRunningApp())))\n sys.exit(0)\n\n logging.info(\" [+] Preparations...\")\n\n # check input args\n if mpSession.fileInput is None:\n # Argument not supplied, try to get file content from stdin\n if not os.isatty(0): # check if something is being piped\n logging.info(\" [-] Waiting for piped input feed...\")\n mpSession.stdinContent = sys.stdin.readlines()\n # Close Stdin pipe, so we can call input() later without triggering EOF\n #sys.stdin.close()\n if sys.platform == \"win32\":\n sys.stdin = open(\"conIN$\")\n else:\n sys.stdin = sys.__stdin__\n\n else:\n if not os.path.isfile(mpSession.fileInput):\n logging.error(\" [!] ERROR: Could not find %s!\" % mpSession.fileInput)\n sys.exit(2)\n else:\n logging.info(\" [-] Input file path: %s\" % mpSession.fileInput)\n\n\n \n \n # Check output file format\n if mpSession.outputFilePath:\n if not os.path.isdir(os.path.dirname(mpSession.outputFilePath)):\n logging.error(\" [!] Could not find output folder %s.\" % os.path.dirname(mpSession.outputFilePath))\n sys.exit(2)\n \n if mpSession.outputFileType == MSTypes.UNKNOWN:\n logging.error(\" [!] %s is not a supported extension. Use --listformats to view supported MacroPack formats.\" % os.path.splitext(mpSession.outputFilePath)[1])\n sys.exit(2)\n else:\n logging.info(\" [-] Target output format: %s\" % mpSession.outputFileType)\n elif not mpSession.listen and not mpSession.Wlisten and mpSession.runTarget is None and (MP_TYPE != \"Pro\" or mpSession.dcomTarget is None):\n logging.error(\" [!] You need to provide an output file! (get help using %s -h)\" % os.path.basename(utils.getRunningApp()))\n sys.exit(2)\n\n\n if not mpSession.isTrojanMode:\n # verify that output file does not already exist\n if os.path.isfile(mpSession.outputFilePath):\n logging.error(\" [!] ERROR: Output file %s already exist!\" % mpSession.outputFilePath)\n sys.exit(2)\n\n #Create temporary folder\n logging.info(\" [-] Temporary working dir: %s\" % working_directory)\n if not os.path.exists(working_directory):\n os.makedirs(working_directory)\n\n try:\n # Create temporary work file.\n if mpSession.ddeMode or mpSession.template or (mpSession.outputFileType not in MSTypes.VB_FORMATS+[MSTypes.VBA] and not mpSession.htaMacro):\n inputFile = os.path.join(working_directory, \"command.cmd\")\n else:\n inputFile = os.path.join(working_directory, utils.randomAlpha(9)) + \".vba\"\n if mpSession.stdinContent is not None:\n import time\n time.sleep(0.4) # Needed to avoid some weird race condition\n logging.debug(\" [-] Store std input in file...\")\n f = open(inputFile, 'w')\n f.writelines(mpSession.stdinContent)\n f.close()\n else:\n # Create temporary work file\n if mpSession.fileInput is not None:\n # Check there are not binary chars in input fil \n if utils.isBinaryString(open(mpSession.fileInput, 'rb').read(1024)):\n logging.error(\" [!] ERROR: Invalid format for %s. Input should be text format containing your VBA script.\" % mpSession.fileInput)\n logging.info(\" [+] Cleaning...\")\n if os.path.isdir(working_directory):\n shutil.rmtree(working_directory)\n sys.exit(2)\n logging.debug(\" [-] Store input file...\")\n shutil.copy2(mpSession.fileInput, inputFile)\n \n if os.path.isfile(inputFile): \n logging.debug(\" [-] Temporary input file: %s\" % inputFile)\n \n \n # Edit outputfile name to spoof extension if unicodeRtlo option is enabled\n if mpSession.unicodeRtlo:\n # Reminder; mpSession.unicodeRtlo contains the extension we want to spoof, such as \"jpg\"\n logging.info(\" [+] Inject %s false extension with unicode RTLO\" % mpSession.unicodeRtlo)\n # Separate document path and extension\n (fileName, fileExtension) = os.path.splitext(mpSession.outputFilePath)\n \n logging.info(\" [-] Extension %s \" % fileExtension)\n # Append unicode RTLO to file name\n fileName += '\\u202e' \n # Append extension to spoof in reverse order\n fileName += '\\ufeff' + mpSession.unicodeRtlo[::-1] # Prepend invisible space so filename does not end with flagged extension\n # Append file extension\n fileName += fileExtension \n mpSession.outputFilePath = fileName\n logging.info(\" [-] File name modified to: %s\" % mpSession.outputFilePath)\n \n\n # Retrieve the right payload builder\n if mpSession.outputFileType != MSTypes.UNKNOWN:\n if MP_TYPE == \"Pro\" and not mpSession.communityMode:\n payloadBuilder = PayloadBuilderFactoryPro().getPayloadBuilder(mpSession)\n else:\n payloadBuilder = PayloadBuilderFactory().getPayloadBuilder(mpSession)\n # Build payload\n if payloadBuilder is not None:\n payloadBuilder.run()\n if MP_TYPE == \"Pro\":\n generator = ContainerGenerator(mpSession)\n generator.run()\n\n #run com attack\n if mpSession.runTarget:\n generator = ComGenerator(mpSession)\n generator.run()\n\n if MP_TYPE == \"Pro\":\n #run dcom attack\n if mpSession.dcom:\n generator = DcomGenerator(mpSession)\n generator.run()\n\n # Activate Web server\n if mpSession.listen:\n listener = ListenServer(mpSession)\n listener.run()\n\n # Activate WebDav server\n if mpSession.Wlisten:\n Wlistener = WListenServer(mpSession)\n Wlistener.run()\n\n except Exception:\n logging.exception(\" [!] Exception caught!\")\n except KeyboardInterrupt:\n logging.error(\" [!] Keyboard interrupt caught!\")\n\n\n logging.info(\" [+] Cleaning...\")\n if os.path.isdir(working_directory):\n shutil.rmtree(working_directory)\n\n logging.info(\" Done!\\n\")\n\n\n sys.exit(0)\n\n\nif __name__ == '__main__':\n # check if running from explorer, if yes restart from cmd line\n running_from = psutil.Process(os.getpid()).parent().parent().name()\n if running_from == 'explorer.exe':\n os.system(\"cmd.exe /k \\\"%s\\\"\" % utils.getRunningApp())\n main(sys.argv[1:])\n","repo_name":"sevagas/macro_pack","sub_path":"src/macro_pack.py","file_name":"macro_pack.py","file_ext":"py","file_size_in_byte":14046,"program_lang":"python","lang":"en","doc_type":"code","stars":1970,"dataset":"github-code","pt":"73"} +{"seq_id":"21197204540","text":"import os\nimport torch\nimport PIL.Image as Image\nfrom tqdm import tqdm\nfrom model import Net\nfrom data_loader import transform\nfrom utils import load_state, load_model as ld_model\n\n\ndef load_model(model, checkpoint_path):\n state = load_state(checkpoint_path)\n ld_model(model, state)\n\n\ndef pil_loader(path):\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n with Image.open(f) as img:\n return img.convert('RGB')\n\n\ndef do_test(checkpoint_path, data_path, out_path):\n model = Net()\n load_model(model, checkpoint_path)\n # model.cuda()\n model.eval()\n\n test_dir = os.path.join(data_path, 'test_images')\n output_file = open(out_path, \"w\")\n output_file.write(\"Filename,ClassId\\n\")\n for f in tqdm(os.listdir(test_dir)):\n if 'ppm' in f:\n data = transform(pil_loader(test_dir + '/' + f))\n data = data.view(1, data.size(0), data.size(1), data.size(2))\n output = model(data)\n pred = output.data.max(1, keepdim=True)[1]\n\n file_id = f[0:5]\n output_file.write(\"%s,%d\\n\" % (file_id, pred))\n\n output_file.close()\n\n\nif __name__ == '__main__':\n do_test('checkpoints/stn7/epoch_20.pth', 'data/nyucvfall2019', 'submission.csv')\n","repo_name":"endvroy/CV-hw2","sub_path":"eval_test.py","file_name":"eval_test.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"922599292","text":"# https://leetcode.cn/problems/partition-list/\n# 给你一个链表的头节点 head 和一个特定值 x ,请你对链表进行分隔,使得所有 小于 x 的节点都出现在 大于或等于 x 的节点之前。\n#\n# 你应当 保留 两个分区中每个节点的初始相对位置。\nfrom typing import Optional\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\nclass Solution:\n def partition(self, head: Optional[ListNode], x: int) -> Optional[ListNode]:\n if not head:\n return head\n small = ListNode(0)\n large = ListNode(0)\n small_head = small\n large_head = large\n while head:\n if head.val < x:\n small.next = head\n small = small.next\n else:\n large.next = head\n large = large.next\n head = head.next\n large.next = None\n small.next = large_head.next\n return small_head.next\n\n\nif __name__ == '__main__':\n # test\n head = ListNode(1)\n head.next = ListNode(4)\n head.next.next = ListNode(3)\n head.next.next.next = ListNode(2)\n head.next.next.next.next = ListNode(5)\n head.next.next.next.next.next = ListNode(2)\n solution = Solution()\n partitioned_list = solution.partition(head, 3)\n while partitioned_list:\n print(partitioned_list.val)\n partitioned_list = partitioned_list.next","repo_name":"jameskaron/LeetCode","sub_path":"labuladong算法小抄/86.分隔链表.py","file_name":"86.分隔链表.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"29942389231","text":"import copy\n\nimport tensorflow.keras.initializers as init\nfrom tensorflow.keras import Sequential, Input, Model\nfrom tensorflow.keras.layers import Concatenate, Add, LayerNormalization, Dense, Input, Softmax, ReLU, Conv2D, MaxPool2D, AveragePooling2D, BatchNormalization, Flatten, LeakyReLU, Dropout\nfrom tensorflow.keras.optimizers import Adam, SGD\nfrom tensorflow.keras.losses import mean_squared_error\nimport tensorflow as tf\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras.models import clone_model\n\n\nclass ValueNetwork(Sequential):\n\n def __init__(self, input_size, h_layer, reg_coef=0.01):\n super(ValueNetwork, self).__init__()\n self.add(Dense(h_layer, activation='relu', kernel_regularizer=regularizers.l2(reg_coef), input_dim=input_size))\n self.add(Dense(1, kernel_regularizer=regularizers.l2(reg_coef)))\n\n\nclass PolicyNetwork(Sequential):\n def __init__(self, input_size, h_layer, output_dim):\n super(PolicyNetwork, self).__init__()\n self.add(Dense(h_layer, activation='relu', input_dim=input_size))\n self.add(Dense(output_dim))\n\nclass ActorNetwork(Sequential):\n\n def __init__(self, input_size, h_layer, output_dim):\n super(ActorNetwork, self).__init__()\n self.add(Dense(h_layer, activation='relu', input_dim=input_size))\n self.add(Dense(output_dim))\n self.add(Softmax())\n\ndef value_network(inp_dim, h_layers, reg_coef=0.01):\n model = Sequential()\n for i in range(len(h_layers)):\n if i==0:\n model.add(Dense(h_layers[i], input_shape=(inp_dim,), kernel_regularizer=regularizers.l2(reg_coef)))\n model.add(ReLU())\n else:\n model.add(Dense(h_layers[i],kernel_regularizer=regularizers.l2(reg_coef)))\n model.add(ReLU())\n\n model.add(Dense(1, kernel_regularizer=regularizers.l2(reg_coef)))\n\n return model\n\ndef conv_value_network(inp_shape, filter_layers):\n model = Sequential()\n for i in range(len(filter_layers)):\n if i == 0:\n model.add(Conv2D(filter_layers[i], kernel_size=(3, 3), padding='same', use_bias=True))\n model.add(LeakyReLU())\n\n else:\n model.add(Conv2D(filter_layers[i], (3, 3), padding='same', use_bias=True))\n model.add(LeakyReLU())\n\n model.add(Flatten())\n model.add(Dense(32))\n model.add(LeakyReLU())\n model.add(Dense(1))\n\n\n return model\n\ndef state_to_action_net(inp_shape, filter_layers):\n model = Sequential(name='live')\n for i in range(len(filter_layers)):\n if i == 0:\n model.add(\n Conv2D(filter_layers[i], kernel_size=4, padding='same', use_bias=False, input_shape=inp_shape, dtype=tf.float64))\n model.add(ReLU())\n\n else:\n model.add(Conv2D(filter_layers[i], kernel_size=4, padding='same', use_bias=False, dtype=tf.float64))\n model.add(ReLU())\n model.add(Flatten())\n model.add(Dense(7, use_bias=True))\n\n\n return model\n\ndef state_to_action_net_zero(inp_shape, filter_layers):\n model = Sequential(name='target')\n for i in range(len(filter_layers)):\n if i == 0:\n model.add(\n Conv2D(filter_layers[i], kernel_size=4, padding='same', use_bias=False, input_shape=inp_shape))\n model.add(LayerNormalization())\n model.add(ReLU())\n\n else:\n model.add(Conv2D(filter_layers[i], kernel_size=4, padding='same', use_bias=False))\n model.add(LayerNormalization())\n model.add(ReLU())\n model.add(Flatten())\n model.add(Dropout(0.1))\n model.add(Dense(128, use_bias=True))\n model.add(Dropout(0.1))\n model.add(Dense(7, use_bias=True, kernel_initializer=init.zeros()))\n\n return model\n\ndef res_block(inputs, filters, conv_size):\n x = inputs\n for i in range(len(filters)):\n if i == len(filters) - 1:\n x = Conv2D(filters[i], conv_size, padding='same', use_bias=False)(x)\n x = BatchNormalization()(x)\n else:\n x = Conv2D(filters[i], conv_size, padding='same', use_bias=False)(x)\n x = BatchNormalization()(x)\n x = ReLU()(x)\n x = Add()([x, inputs])\n x = ReLU()(x)\n\n return x\n\ndef res_model(initial_filters, num_res, res_struc, dense):\n inputs = Input(shape=(6, 7, 3))\n\n for i in range(len(initial_filters)):\n if i == 0:\n x = Conv2D(initial_filters[i], 3, padding='same')(inputs)\n x = BatchNormalization()(x)\n x = ReLU()(x)\n else:\n x = Conv2D(initial_filters[i], 3, padding='same')(x)\n x = BatchNormalization()(x)\n x = ReLU()(x)\n for j in range(num_res):\n x = res_block(x, res_struc, 3)\n\n x = Conv2D(1, 1, padding='same')(x)\n x = BatchNormalization()(x)\n x = ReLU()(x)\n x = Flatten()(x)\n for k in range(len(dense)):\n x = Dense(dense[k])(x)\n x = ReLU()(x)\n x = Dense(7)(x)\n return Model(inputs, x)\n\ndef res_model_zero(initial_filters, num_res, res_struc, dense):\n inputs = Input(shape=(6, 7, 3))\n\n for i in range(len(initial_filters)):\n if i == 0:\n x = Conv2D(initial_filters[i], 3, padding='same')(inputs)\n x = LayerNormalization()(x)\n x = ReLU()(x)\n else:\n x = Conv2D(initial_filters[i], 3, padding='same')(x)\n x = LayerNormalization()(x)\n x = ReLU()(x)\n for j in range(num_res):\n x = res_block(x, res_struc, 3)\n\n x = Conv2D(1, 1, padding='same')\n x = LayerNormalization()(x)\n x = Flatten()(x)\n for k in range(len(dense)):\n x = Dense(dense[k])(x)\n x = ReLU()(x)\n x = Dense(7, kernel_initializer=init.zeros())(x)\n return Model(inputs, x)\n\n\ndef feature_extraction(inputs, conv_filter_numbers, conv_filter_size):\n x = inputs\n for i in range(len(conv_filter_numbers)):\n x = Conv2D(conv_filter_numbers[i], conv_filter_size, padding='same')(x)\n x = ReLU()(x)\n\n return x\n\ndef action_block(inputs, hidden_layers, zero=False):\n x = inputs\n for i in range(len(hidden_layers)):\n x = Dense(hidden_layers[i])(x)\n x = Dropout(0.1)(x)\n x = ReLU()(x)\n\n if not zero:\n x = Dense(1)(x)\n else:\n x = Dense(1, kernel_initializer=init.zeros())(x)\n return x\n\n\n\n\ndef gpu_net(feature_extraction_filters, feature_extraction_kernel_size, res_block_structure, res_block_filter_size, num_res_blocks, action_net_layers, zero=False):\n state_input = Input(shape=(6, 7, 3))\n action_input = Input(shape=(7,))\n x = state_input\n x = feature_extraction(x, feature_extraction_filters, feature_extraction_kernel_size)\n for i in range(num_res_blocks):\n x = res_block(x, res_block_structure, res_block_filter_size)\n x = Conv2D(1, 1)(x)\n x = Flatten()(x)\n x = Concatenate()([x, action_input])\n x = action_block(x, action_net_layers, zero=zero)\n\n model = Model(inputs=[state_input, action_input], outputs=x)\n\n return model\n","repo_name":"BowerJames/TicTacToe","sub_path":"Networks.py","file_name":"Networks.py","file_ext":"py","file_size_in_byte":6970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"346705343","text":"title = 'Chaturbate'\nimage = 'chaturbate-icon.png'\nart = 'chaturbate-fanart.png'\norder = 11\n\n\nclass Site:\n def __init__(self, params):\n import re\n from addon import Addon\n from addondict import AddonDict\n from BeautifulSoup import BeautifulSoup, SoupStrainer, Comment\n\n a = Addon()\n site = self.__module__\n mode = params['mode']\n\n base_url = 'https://chaturbate.com'\n home_url = base_url\n\n false_positives = ['#']\n\n if mode == 'main':\n item_list = [{'site': site, 'mode': 'list', 'title': a.language(30021), 'content': '',\n 'url': home_url, 'cover_url': a.image('featuredcams.png', image), 'backdrop_url': a.art(), 'type': 3},\n {'site': site, 'mode': 'bygender', 'title': a.language(30017), 'content': '',\n 'cover_url': a.image('bygender.png', image), 'backdrop_url': a.art(), 'type': 3},\n {'site': site, 'mode': 'byage', 'title': a.language(30018), 'content': '',\n 'cover_url': a.image('byage.png', image), 'backdrop_url': a.art(), 'type': 3},\n {'site': site, 'mode': 'byregion', 'title': a.language(30019), 'content': '',\n 'cover_url': a.image('byregion.png', image), 'backdrop_url': a.art(), 'type': 3},\n {'site': site, 'mode': 'bystatus', 'title': a.language(30020), 'content': '',\n 'cover_url': a.image('bystatus.png', image), 'backdrop_url': a.art(), 'type': 3}]\n item_list.extend(a.favs_hist_menu(site))\n item_list.extend(a.extended_menu())\n a.add_items(item_list)\n a.end_of_directory()\n\n elif mode == 'bygender':\n item_list = [{'site': site, 'mode': 'list', 'title': a.language(30022), 'content': '',\n 'url': base_url + '/female-cams/', 'cover_url': a.image('femalecams.png', image),\n 'backdrop_url': a.art(), 'type': 3},\n {'site': site, 'mode': 'list', 'title': a.language(30023), 'content': '',\n 'url': base_url + '/male-cams/', 'cover_url': a.image('malecams.png', image),\n 'backdrop_url': a.art(), 'type': 3},\n {'site': site, 'mode': 'list', 'title': a.language(30024), 'content': '',\n 'url': base_url + '/couple-cams/', 'cover_url': a.image('couplecams.png', image),\n 'backdrop_url': a.art(), 'type': 3},\n {'site': site, 'mode': 'list', 'title': a.language(30025), 'content': '',\n 'url': base_url + '/transsexual-cams/', 'cover_url': a.image('transcams.png', image),\n 'backdrop_url': a.art(), 'type': 3}]\n a.add_items(item_list)\n a.end_of_directory()\n\n elif mode == 'byage':\n item_list = [{'site': site, 'mode': 'list', 'title': a.language(30026), 'content': '',\n 'url': base_url + '/teen-cams/', 'cover_url': a.image('teencams.png', image),\n 'backdrop_url': a.art(), 'type': 3},\n {'site': site, 'mode': 'list', 'title': a.language(30027), 'content': '',\n 'url': base_url + '/18to21-cams/', 'cover_url': a.image('18to21cams.png', image),\n 'backdrop_url': a.art(), 'type': 3},\n {'site': site, 'mode': 'list', 'title': a.language(30028), 'content': '',\n 'url': base_url + '/20to30-cams/', 'cover_url': a.image('20to30cams.png', image),\n 'backdrop_url': a.art(), 'type': 3},\n {'site': site, 'mode': 'list', 'title': a.language(30029), 'content': '',\n 'url': base_url + '/30to50-cams/', 'cover_url': a.image('30to50cams.png', image),\n 'backdrop_url': a.art(), 'type': 3},\n {'site': site, 'mode': 'list', 'title': a.language(30030), 'content': '',\n 'url': base_url + '/mature-cams/', 'cover_url': a.image('maturecams.png', image),\n 'backdrop_url': a.art(), 'type': 3}]\n a.add_items(item_list)\n a.end_of_directory()\n\n elif mode == 'byregion':\n item_list = [{'site': site, 'mode': 'list', 'title': a.language(30031), 'content': '',\n 'url': base_url + '/north-american-cams/', 'cover_url': a.image('north-americancams.png', image),\n 'backdrop_url': a.art(), 'type': 3},\n {'site': site, 'mode': 'list', 'title': a.language(30032), 'content': '',\n 'url': base_url + '/other-region-cams/', 'cover_url': a.image('other-regioncams.png', image),\n 'backdrop_url': a.art(), 'type': 3},\n {'site': site, 'mode': 'list', 'title': a.language(30033), 'content': '',\n 'url': base_url + '/euro-russian-cams/', 'cover_url': a.image('euro-russiancams.png', image),\n 'backdrop_url': a.art(), 'type': 3},\n {'site': site, 'mode': 'list', 'title': a.language(30034), 'content': '',\n 'url': base_url + '/philippines-cams/', 'cover_url': a.image('philippinescams.png', image),\n 'backdrop_url': a.art(), 'type': 3},\n {'site': site, 'mode': 'list', 'title': a.language(30035), 'content': '',\n 'url': base_url + '/asian-cams/', 'cover_url': a.image('asiancams.png', image),\n 'backdrop_url': a.art(), 'type': 3},\n {'site': site, 'mode': 'list', 'title': a.language(30036), 'content': '',\n 'url': base_url + '/south-american-cams/', 'cover_url': a.image('south-americancams.png', image),\n 'backdrop_url': a.art(), 'type': 3}]\n a.add_items(item_list)\n a.end_of_directory()\n\n elif mode == 'bystatus':\n item_list = [{'site': site, 'mode': 'list', 'title': a.language(30037), 'content': '',\n 'url': base_url + '/exhibitionist-cams/', 'cover_url': a.image('exhibitionistcams.png', image),\n 'backdrop_url': a.art(), 'type': 3},\n {'site': site, 'mode': 'list', 'title': a.language(30038), 'content': '',\n 'url': base_url + '/hd-cams/', 'cover_url': a.image('hdcams.png', image),\n 'backdrop_url': a.art(), 'type': 3}]\n a.add_items(item_list)\n a.end_of_directory()\n\n elif mode == 'list':\n if params.get('content', '') == 'goto':\n last_item = re.search('page=([0-9]+)', params['url'])\n if last_item:\n last_item = int(last_item.group(1))\n else:\n last_item = 10000\n item = a.page_input(last_item)\n if item:\n params['url'] = re.sub('page=[0-9]+', 'page=' + str(item), params['url']).replace(' ', '+')\n else:\n exit(1)\n html = a.get_page(params['url'])\n soup = BeautifulSoup(html, parseOnlyThese=SoupStrainer('div', {'class': 'c-1 endless_page_template'}))\n item_list = []\n params['mode'] = 'play'\n params['content'] = 'episodes'\n params['type'] = 0\n params['context'] = 0\n params['duration'] = ''\n params['sub_site'] = site\n if soup:\n ul = soup.find('ul', {'class': 'list'})\n if ul:\n addondict = AddonDict(0).update(params)\n for item in ul.findAll('li'):\n _dict = addondict.copy()\n clip_link = item.find('a')\n if clip_link:\n url = clip_link.get('href')\n if not url.startswith('http://'):\n url = base_url + url\n _dict['url'] = url\n ctitle = ''\n cage = ''\n cname = ''\n ccams = ''\n details = item.find('div', {'class': 'details'})\n if details:\n temp = details.find('a')\n if temp:\n cname = str(temp.contents[0])\n temp = details.find('span', {'class': re.compile('age.*')})\n if temp:\n cage = temp.string.encode('utf-8')\n temp = details.find('li', {'class': 'cams'})\n if temp:\n ccams = str(temp.contents[0])\n temp = details.find('li', {'title': True})\n if temp:\n ctitle = temp.get('title').encode('UTF-8')\n if cname:\n usetitle = '%s [%syr, %s] %s' % (cname, cage, ccams, ctitle)\n _dict['title'] = usetitle\n _dict['tvshowtitle'] = _dict['title']\n _dict['originaltitle'] = _dict['title']\n img = item.find('img')\n if img:\n img = img.get('src')\n if img.startswith('//'):\n img = 'http:' + img\n else:\n img = ''\n _dict['cover_url'] = a.image(img)\n _dict['thumb_url'] = _dict['cover_url']\n _dict['poster'] = _dict['cover_url']\n item_list.extend([_dict])\n\n pages = BeautifulSoup(html, parseOnlyThese=SoupStrainer('ul', {'class': 'paging'}))\n if pages:\n previouspage = pages.find('a', {'class': re.compile('prev.*')})\n nextpage = pages.find('a', {'class': re.compile('next.*')})\n lastpage = pages.find('span', {'class': 'endless_separator'})\n if lastpage:\n lastpage = lastpage.findNext('a')\n\n if previouspage:\n previouspage = previouspage.get('href').replace(' ', '+')\n if previouspage != '#':\n if not previouspage.startswith('http://'):\n previouspage = base_url + previouspage\n item_list.extend([{'site': site, 'mode': 'list', 'url': previouspage, 'content': params['content'],\n 'title': a.language(30017, True), 'cover_url': a.image('previous.png', image),\n 'backdrop_url': a.art(), 'type': 3}])\n if nextpage:\n nextpage = nextpage.get('href').replace(' ', '+')\n if nextpage != '#':\n if not nextpage.startswith('http://'):\n nextpage = base_url + nextpage\n item_list.extend([{'site': site, 'mode': 'list', 'url': nextpage, 'content': params['content'],\n 'title': a.language(30018, True), 'cover_url': a.image('next.png', image),\n 'backdrop_url': a.art(), 'type': 3}])\n if lastpage:\n lastpage = lastpage.get('href').replace(' ', '+')\n if lastpage != '#':\n if not lastpage.startswith('http://'):\n lastpage = base_url + lastpage\n item_list.extend([{'site': site, 'mode': 'list', 'url': lastpage, 'content': 'goto',\n 'title': a.language(30019, True), 'cover_url': a.image('goto.png', image),\n 'backdrop_url': a.art(), 'type': 3}])\n\n a.add_items(item_list)\n a.end_of_directory()\n\n elif mode == 'play':\n html = a.get_page(params['url'])\n link = re.search('html \\+= \"src=\\'(.+?)\\'', html)\n if link:\n from playback import Playback\n Playback().play_this(link.group(1), params['title'], params['cover_url'], a.common.usedirsources())\n else:\n a.alert(a.language(30904, True), sound=False)\n","repo_name":"yam4me/repository.openeleq","sub_path":"plugin.program.openeleq.tools/resources/xxx/plugin.video.adult.3xz/resources/lib/sites/chaturbate.py","file_name":"chaturbate.py","file_ext":"py","file_size_in_byte":13205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"71807868396","text":"from flask import Flask\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom flask.ext import restful\nfrom flask.ext.httpauth import HTTPBasicAuth\n\nimport os\nfrom config import basedir\n\n# 自定义jinja2表达式,避免跟其他脚本语言冲突\nclass CustomFlask(Flask):\n jinja_options = Flask.jinja_options.copy()\n jinja_options.update(dict(\n block_start_string='<%',\n block_end_string='%>',\n variable_start_string='%%',\n variable_end_string='%%',\n comment_start_string='<#',\n comment_end_string='#>',\n ))\n\napp = CustomFlask(__name__)\napp.config.from_object('config')\n# flask-sqlalchemy\ndb = SQLAlchemy(app)\n# flask-restful\napi = restful.Api(app)\n# flask-httpauth\nauth = HTTPBasicAuth()\n\nfrom app import views\n\n@app.after_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Origin', '*')\n response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')\n response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')\n return response\n\nif not app.debug:\n import logging\n from logging.handlers import RotatingFileHandler\n file_handler = RotatingFileHandler('testlog.log', 'a', 1 * 1024 * 1024, 10)\n file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))\n app.logger.setLevel(logging.INFO)\n file_handler.setLevel(logging.INFO)\n app.logger.addHandler(file_handler)\n app.logger.info('log startup')\n","repo_name":"51azxc/blogbyflask","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"13703569395","text":"\"\"\"\nTesting views in the bag app\n\"\"\"\n\nfrom django.test import TestCase\n\nfrom products.models import Product, Category\n\n\nclass TestBagViews(TestCase):\n \"\"\"\n Test views in bag app\n \"\"\"\n def setUp(self):\n \"\"\"\n Set up info needed for testing\n \"\"\"\n\n self.category1 = Category.objects.create(\n name='testCategory',\n friendly_name='TestCategory',\n )\n\n self.product1 = Product.objects.create(\n category=self.category1,\n name='testProduct',\n description='test',\n price='75',\n image='test',\n )\n\n self.quantity = 1\n\n self.bag_with_products = [{\n 'product': str(self.product1.id),\n 'quantity': int(self.quantity),\n 'total': 123\n }]\n\n def test_view_bag_view(self):\n \"\"\"\n Test that bag is viewable\n \"\"\"\n response = self.client.get('/bag/')\n self.assertTrue(response.status_code, 200)\n\n def test_add_to_bag_view(self):\n \"\"\"\n Test that item can be added to bag\n \"\"\"\n bag_data = {\n 'product': Product.objects.get(pk=self.product1.id),\n 'quantity': int(self.quantity),\n 'redirect_url': f'/products/{self.product1.id}/'\n }\n\n response = self.client.post(f'/bag/add/{self.product1.id}/', bag_data)\n self.assertEqual(response.status_code, 302)\n","repo_name":"AmyOShea/MS4-ARTstop","sub_path":"bag/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"31339623577","text":"from dotenv import load_dotenv\nfrom telegram import Update\nimport os\nimport requests\nimport json\nfrom PIL import Image\n\nuser_name = '@dsxservicosbot'\n \nload_dotenv()\n\nclass TelegramBot():\n def __init__(self):\n TOKEN = os.getenv(\"API_KEY\")\n self.url = f\"https://api.telegram.org/bot{TOKEN}/\"\n \n\n def start(self):\n print(\"Inicializando bot...\")\n update_id = None\n while True:\n update = self.get_message(update_id)\n messages = update['result']\n if messages:\n for message in messages:\n try:\n update_id = message['update_id']\n chat_id = message['message']['from']['id']\n message_text = message['message']['text']\n answer_bot = self.create_answer(message_text)\n self.send_answer(chat_id, answer_bot)\n except:\n pass\n\n def get_message(self, update_id):\n link_request = f\"{self.url}getUpdates?timeout=1000\"\n\n if update_id:\n link_request = f\"{self.url}getUpdates?timeout=1000&offset={update_id + 1}\"\n\n result = requests.get(link_request)\n return json.loads(result.content)\n \n def create_answer(self, message_text):\n if message_text in ['validar']: \n return 'Olá, aguardando o envio da planilha'\n \n def send_answer(self, chat_id, answer):\n link_to_send = f'{self.url}sendMessage?chat_id={chat_id}&text={answer}'\n requests.get(link_to_send)","repo_name":"LastHasagi/Telegram-bot","sub_path":"src/PEARL.py","file_name":"PEARL.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"40319343923","text":"from ytd import *\nclass Queue:\n def __init__(self):\n self.current=None\n self.lista=[]\n self.currname=None\n self.next=None\n\n async def updateplay(self,loop):\n if self.lista:\n self.current=self.lista.pop(0)\n if self.next is not None:\n self.currname=self.next\n else:\n self.currname=await YTDLSource.from_url(self.current,loop=loop)\n\n else:\n self.current=None\n self.next=None\n\n async def downnex(self,loop):\n if self.lista:\n self.next=await YTDLSource.from_url(self.lista[0],loop=loop)\n\n def list(self):\n temp=''\n temp+=\"Obecnie grane jest {}\\n\".format(self.current)\n temp+=\"W kolejce jest\\n\"\n for a in self.lista:\n temp+=a+\"\\n\"\n return temp\n\n def add(self,song):\n self.lista.append(song)\n \n def remove(self,i):\n if int(i)==0:\n self.next=None\n del self.lista[int(i)]\n \n \n\n","repo_name":"Wesenheit/Szefu","sub_path":"Queue.py","file_name":"Queue.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"3824531024","text":"# encoding: utf-8\n\n# 独立使用django的model\nimport sys\nimport os\n# 获取当前文件的路径,即Mxshop/db_tools\npwd = os.path.dirname(os.path.realpath(__file__))\n# 往上一级,回到根目录,即/Mxshop\nsys.path.append(pwd + \"../\")\n# django环境,需先设置,才能使用django相关\n# os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"VueDjangoFrameWorkShop.settings\")\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"Mxshop.settings\")\n\nimport django\n# 初始化django\ndjango.setup()\n\n# 这行代码必须在初始化django之后\nfrom goods.models import GoodsCategory\n\nfrom db_tools.data.category_data import row_data\n\n# 一级分类\nfor lev1_cat in row_data:\n lev1_intance = GoodsCategory()\n lev1_intance.code = lev1_cat[\"code\"]\n lev1_intance.name = lev1_cat[\"name\"]\n lev1_intance.category_type = 1\n lev1_intance.save()\n\n # 该一级分类之下的二级分类\n for lev2_cat in lev1_cat[\"sub_categorys\"]:\n lev2_intance = GoodsCategory()\n lev2_intance.code = lev2_cat[\"code\"]\n lev2_intance.name = lev2_cat[\"name\"]\n lev2_intance.category_type = 2\n lev2_intance.parent_category = lev1_intance\n lev2_intance.save()\n\n # 该二级分类之下的三级分类\n for lev3_cat in lev2_cat[\"sub_categorys\"]:\n lev3_intance = GoodsCategory()\n lev3_intance.code = lev3_cat[\"code\"]\n lev3_intance.name = lev3_cat[\"name\"]\n lev3_intance.category_type = 3\n lev3_intance.parent_category = lev2_intance\n lev3_intance.save()\n\n","repo_name":"Alnan/Mxshop","sub_path":"Mxshop/db_tools/import_category_data.py","file_name":"import_category_data.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"35587286988","text":"import pygsheets\n\nURL = 'https://docs.google.com/spreadsheets/d/1Is-Jf1gZJRj3bEw6Tlaby8uOuXriQCDL8meFgu1jZ4I/edit#gid=0'\n\nPEOPLE = {'BB': 0, \"Rachel\": 1, \"Rosie\": 2, \"Shaz\": 3, \"Nic\": 4, \"Oli\": 5}\nHEAT_LIST = [1.86, 0, 2.38 , 1.28, 1.85, 1.85]\nkWh_cst = 0.303545125229\n \nclient_var = pygsheets.authorize(service_account_file='firm-source-357906-0d2c70f00dc1.json') #allows access to drive\n\ndef sheet_id(spread_sheet_url):\n \"\"\"Returns the sheet id\"\"\"\n split_url = spread_sheet_url.split('/d/')\n url_key = split_url[1].split('/')\n return url_key[0]\n\ndef return_sheet(id_key):\n \"\"\"Opens and returns the worksheet\"\"\"\n sheet = client_var.open_by_key(id_key) #opens the sheet based on id\n wks = sheet.worksheet_by_title('Main_sheet') #reads the file as a sheet\n entire_sheet = wks.range('A1:H50', returnas='matrix') #returns only the relative rows &cols\n #cleaned_values = [[item for item in unique_list if item]for unique_list in entire_sheet] #removes empty strings #Probably dont need\n return entire_sheet, wks\n\ndef power_dict_make(work_sheet):\n \"\"\"Returns a dic containing everyone's ind pwr usage & the FN cost\"\"\"\n cost_dict = {\"BB\": 0, \"Rachel\": 0, \"Rosie\": 0, \"Shaz\": 0, \"Nic\": 0, \"Oli\": 0}\n FN_counter = 0\n for rows in work_sheet:\n if rows[1] != \"\" and rows[0] != \"Weeks\":\n FN_counter += 1\n\n FN_power_people = work_sheet[FN_counter]\n work_sheet[FN_counter].pop(0) #removes the an unwanted date element\n \n total_pp_cost = 0 #Total power cost from everyone's heaters\n for person in cost_dict.keys(): #Incrementing through the cost dict\n for peep in PEOPLE.items(): #Incrementing through peoples IDs\n if person == peep[0]: #Checks if the person match's the ID with key\n cost_dict[person] = kWh_cst* (float(FN_power_people[peep[1]]) - (14 * HEAT_LIST[peep[1]])) #Sets persons cost\n total_pp_cost += kWh_cst* (float(FN_power_people[peep[1]]) - (14 * HEAT_LIST[peep[1]]))\n \n FN_total_cost = float(FN_power_people[6])\n remainder_cost = FN_total_cost - total_pp_cost\n \n for person, current_cost in cost_dict.items(): #Loop adds on the base power cost\n cost_dict[person] = current_cost + (remainder_cost/6)\n \n return cost_dict\n\ndef pretty_print(cost_dict):\n \"\"\"Prints everything out nicely\"\"\"\n print(\"\\n==================\")\n print(f\"THIS MONTHS POWER\")\n for person, cost in cost_dict.items():\n print(f\"{person:6}: ${cost:.2f}\")\n\n print(\"==================\\n\")\n\n\n","repo_name":"Oliver-LK/flat-power-code","sub_path":"Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"13358428871","text":"import os\nimport sys\nimport shutil\nimport argparse\nimport logging\nimport time\nimport random\nimport numpy as np\nimport pandas as pd\n\nimport torch\nimport torch.optim as optim\nfrom torchvision import transforms\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nfrom torch.utils.data import DataLoader\nfrom torchvision.utils import make_grid\n\n\n# 使用benchmark以启动CUDNN_FIND自动寻找最快的操作,当计算图不会改变的时候(每次输入形状相同,模型不��变)的情况下可以提高性能,反之则降低性能\ncudnn.benchmark = False\ncudnn.deterministic = True\nrandom.seed(1337)\nnp.random.seed(1337)\ntorch.manual_seed(1337)\ntorch.cuda.manual_seed(1337)\n\n\ndef sigmoid_rampup(current, rampup_length):\n \"\"\"Exponential rampup from https://arxiv.org/abs/1610.02242\"\"\"\n if rampup_length == 0:\n return 1.0\n else:\n current = np.clip(current, 0.0, rampup_length)\n phase = 1.0 - current / rampup_length\n return float(np.exp(-5.0 * phase * phase))\n\n\ndef linear_rampup(current, rampup_length):\n \"\"\"Linear rampup\"\"\"\n assert current >= 0 and rampup_length >= 0\n if current >= rampup_length:\n return 1.0\n else:\n return current / rampup_length\n\n\ndef cosine_rampdown(current, rampdown_length):\n \"\"\"Cosine rampdown from https://arxiv.org/abs/1608.03983\"\"\"\n assert 0 <= current <= rampdown_length\n return float(.5 * (np.cos(np.pi * current / rampdown_length) + 1))\n\n\n# 获取\ndef get_current_consistency_weight(epoch):\n # Consistency ramp-up from https://arxiv.org/abs/1610.02242\n return 1.0 * sigmoid_rampup(epoch, 30)\n\n\n# ema指的是老师模型根据学生模型进行调整参数\ndef update_ema_variables(model, ema_model, alpha, global_step):\n # Use the true average until the exponential average is more correct\n alpha = min(1 - 1 / (global_step + 1), alpha)\n for ema_param, param in zip(ema_model.parameters(), model.parameters()):\n ema_param.data.mul_(alpha).add_(1 - alpha, param.data)\n\n\n# dataset-defination\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport itertools\nfrom torch.utils.data.sampler import Sampler\n\nN_CLASSES_TOOTH = 2\nCLASS_NAMES_TOOTH = ['Caries', 'Health']\n\n\nclass Tooth_Dataset(Dataset):\n def __init__(self, transform=None):\n super(Tooth_Dataset, self).__init__()\n imgs = []\n labels = []\n labeled = []\n for dirname, _, filenames in os.walk('../input/tooth-project-2/dataset_origin/dataset_origin/caries'):\n for filename in filenames:\n imgs.append(str(os.path.join(dirname, filename)))\n labeled.append(str(os.path.join(dirname, filename)))\n labels.append([0, 1, 0])\n for dirname, _, filenames in os.walk('../input/tooth-project-2/dataset_origin/dataset_origin/health'):\n for filename in filenames:\n imgs.append(str(os.path.join(dirname, filename)))\n labeled.append(str(os.path.join(dirname, filename)))\n labels.append([1, 0, 0])\n # 无标签数据的引入\n for dirname, _, filenames in os.walk('../input/unlabeled/unlabeled'):\n for filename in filenames:\n imgs.append(str(os.path.join(dirname, filename)))\n labels.append([0, 0, 0])\n self.images = imgs\n self.labels = labels\n self.labeled = labeled\n self.transform = transform\n print('labeled images:{}'.format(len(self.labeled)))\n print('Total # images:{}, labels:{}'.format(len(self.images), len(self.labels)))\n\n def __getitem__(self, index):\n items = self.images[index]\n image = Image.open(items).convert('RGB')\n label = self.labels[index]\n if self.transform is not None:\n image = self.transform(image)\n # image中包含了有标签数据和无标签数据\n return items, index, image, torch.FloatTensor(label)\n\n def __len__(self):\n return len(self.images)\n\n\nclass TwoStreamBatchSampler(Sampler):\n \"\"\"Iterate two sets of indices\n\n An 'epoch' is one iteration through the primary indices.\n During the epoch, the secondary indices are iterated through\n as many times as needed.\n \"\"\"\n\n # primary_indices为已标注数据的索引的list,secondary_indices为未标注数据的索引的list\n def __init__(self, primary_indices, secondary_indices, batch_size, secondary_batch_size):\n self.primary_indices = primary_indices\n self.secondary_indices = secondary_indices\n self.secondary_batch_size = secondary_batch_size\n self.primary_batch_size = batch_size - secondary_batch_size\n\n assert len(self.primary_indices) >= self.primary_batch_size > 0\n assert len(self.secondary_indices) >= self.secondary_batch_size > 0\n\n def __iter__(self):\n # primary_iter:已标注数据的索引序列打乱顺序,每次迭代只会遍历一次已标注数据\n primary_iter = iterate_once(self.primary_indices)\n # secondary_iter:未标注数据的索引序列,每次迭代可以遍历很多次\n secondary_iter = iterate_eternally(self.secondary_indices)\n return (\n primary_batch + secondary_batch\n for (primary_batch, secondary_batch)\n # grouper指的是在两个序列当中进行采样\n in zip(grouper(primary_iter, self.primary_batch_size),\n grouper(secondary_iter, self.secondary_batch_size))\n )\n\n def __len__(self):\n return len(self.primary_indices) // self.primary_batch_size\n\n\nclass TransformTwice:\n def __init__(self, transform):\n self.transform = transform\n\n def __call__(self, inp):\n out1 = self.transform(inp)\n out2 = self.transform(inp)\n return out1, out2\n\n\ndef iterate_once(iterable):\n return np.random.permutation(iterable) # 产生一个随机序列\n\n\ndef iterate_eternally(indices): # 将多个迭代器进行高效连接,传入的是未标注数据索引的list\n def infinite_shuffles():\n while True:\n yield np.random.permutation(indices)\n\n return itertools.chain.from_iterable(infinite_shuffles())\n\n\ndef grouper(iterable, n):\n \"Collect data into fixed-length chunks or blocks\"\n # grouper('ABCDEFG', 3) --> ABC DEF\"\n args = [iter(iterable)] * n\n return zip(*args)\n\n\n# end of dataset defination\n\n# defination of model(densenet)\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n# from networks import densenet\nimport re\nimport torch.utils.model_zoo as model_zoo\nfrom collections import OrderedDict\nimport os\n\n\nclass _DenseLayer(nn.Sequential):\n def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):\n super(_DenseLayer, self).__init__()\n self.add_module('norm1', nn.BatchNorm2d(num_input_features)),\n self.add_module('relu1', nn.ReLU(inplace=True)),\n self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *\n growth_rate, kernel_size=1, stride=1, bias=False)),\n self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),\n self.add_module('relu2', nn.ReLU(inplace=True)),\n self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,\n kernel_size=3, stride=1, padding=1, bias=False)),\n self.drop_rate = drop_rate\n self.drop_layer = nn.Dropout(p=drop_rate)\n\n def forward(self, x):\n new_features = super(_DenseLayer, self).forward(x)\n # if self.drop_rate > 0:\n # print (self.drop_rate)\n # new_features = self.drop_layer(new_features)\n return torch.cat([x, new_features], 1)\n\n\nclass _DenseBlock(nn.Sequential):\n def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate):\n super(_DenseBlock, self).__init__()\n for i in range(num_layers):\n layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate)\n self.add_module('denselayer%d' % (i + 1), layer)\n\n\nclass _Transition(nn.Sequential):\n def __init__(self, num_input_features, num_output_features):\n super(_Transition, self).__init__()\n self.add_module('norm', nn.BatchNorm2d(num_input_features))\n self.add_module('relu', nn.ReLU(inplace=True))\n self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,\n kernel_size=1, stride=1, bias=False))\n self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))\n\n\nclass DenseNet(nn.Module):\n\n def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),\n num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000):\n\n super(DenseNet, self).__init__()\n\n # First convolution\n self.features = nn.Sequential(OrderedDict([\n ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),\n ('norm0', nn.BatchNorm2d(num_init_features)),\n ('relu0', nn.ReLU(inplace=True)),\n ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),\n ]))\n\n # Each denseblock\n num_features = num_init_features\n for i, num_layers in enumerate(block_config):\n block = _DenseBlock(num_layers=num_layers, num_input_features=num_features,\n bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)\n self.features.add_module('denseblock%d' % (i + 1), block)\n num_features = num_features + num_layers * growth_rate\n if i != len(block_config) - 1:\n trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)\n self.features.add_module('transition%d' % (i + 1), trans)\n num_features = num_features // 2\n\n # Final batch norm\n self.features.add_module('norm5', nn.BatchNorm2d(num_features))\n\n # Linear layer\n self.classifier = nn.Linear(num_features, num_classes)\n\n # Official init from torch repo.\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n features = self.features(x)\n out = F.relu(features, inplace=True)\n print(out.size())\n out = F.adaptive_avg_pool2d(out, (1, 1)).view(features.size(0), -1)\n print(out.size())\n out = self.classifier(out)\n return out\n\n\ndef densenet121(pretrained=False, **kwargs):\n model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), **kwargs)\n return model\n\n\nclass DenseNet121(nn.Module):\n def __init__(self, out_size, mode, drop_rate=0.0):\n super(DenseNet121, self).__init__()\n assert mode in ('U-Ones', 'U-Zeros', 'U-MultiClass')\n self.densenet121 = densenet121(pretrained=False, drop_rate=drop_rate)\n num_ftrs = self.densenet121.classifier.in_features\n # 加上一个全连接层\n if mode in ('U-Ones', 'U-Zeros'):\n self.densenet121.classifier = nn.Sequential(\n nn.Linear(num_ftrs, out_size),\n # nn.Sigmoid()\n )\n elif mode in ('U-MultiClass',):\n self.densenet121.classifier = None\n self.densenet121.Linear_0 = nn.Linear(num_ftrs, out_size)\n self.densenet121.Linear_1 = nn.Linear(num_ftrs, out_size)\n self.densenet121.Linear_u = nn.Linear(num_ftrs, out_size)\n\n self.mode = mode\n\n # Official init from torch repo.\n for m in self.densenet121.modules():\n if isinstance(m, nn.Linear):\n nn.init.constant_(m.bias, 0)\n\n self.drop_rate = drop_rate\n self.drop_layer = nn.Dropout(p=drop_rate)\n\n def forward(self, x):\n features = self.densenet121.features(x)\n out = F.relu(features, inplace=True)\n\n out = F.adaptive_avg_pool2d(out, (1, 1)).view(features.size(0), -1)\n\n if self.drop_rate > 0:\n out = self.drop_layer(out)\n self.activations = out\n if self.mode in ('U-Ones', 'U-Zeros'):\n out = self.densenet121.classifier(out)\n elif self.mode in ('U-MultiClass',):\n n_batch = x.size(0)\n out_0 = self.densenet121.Linear_0(out).view(n_batch, 1, -1)\n out_1 = self.densenet121.Linear_1(out).view(n_batch, 1, -1)\n out_u = self.densenet121.Linear_u(out).view(n_batch, 1, -1)\n out = torch.cat((out_0, out_1, out_u), dim=1)\n\n return self.activations, out\n\n\n# end of model defination\n\n\ndef create_model(ema=False, device=None):\n # Network definition\n net = DenseNet121(out_size=N_CLASSES_TOOTH, mode='U-Ones', drop_rate=0.2)\n # model = net.cuda()\n model = net.to(device)\n if ema:\n for param in model.parameters():\n param.detach_()\n return model\n\n\nbatch_size = 16\nbase_lr = 1e-4\n# number of labeled data per batch\nlabeled_bs = 4\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel = create_model(device=device)\nema_model = create_model(ema=True, device=device)\noptimizer = torch.optim.Adam(model.parameters(), lr=base_lr, betas=(0.9, 0.999), weight_decay=5e-4)\n\n# dataset\nnormalize = transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n\ntrain_dataset = Tooth_Dataset(transform=TransformTwice(transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.RandomAffine(degrees=10, translate=(0.02, 0.02)), # 随机进行旋转\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n])))\nval_dataset = Tooth_Dataset(transform=TransformTwice(transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.RandomAffine(degrees=10, translate=(0.02, 0.02)), # 随机进行旋转\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n])))\ntest_dataset = Tooth_Dataset(transform=TransformTwice(transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.RandomAffine(degrees=10, translate=(0.02, 0.02)), # 随机进行旋转\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n])))\n\n# 已标注数据的索引\nlabeled_idxs = list(range(2726))\n# 未标注数据在dataset的索引\nunlabeled_idxs = list(range(2726, 22499))\n# 对有标签数据和无标签数据的分别采样和集成batch\nbatch_sampler = TwoStreamBatchSampler(labeled_idxs, unlabeled_idxs, batch_size, batch_size - labeled_bs)\n\n\n# num_worker代表线程数目,每个worker初始化执行函数\ndef worker_init_fn(worker_id):\n random.seed(1337 + worker_id)\n\n\ntrain_dataloader = DataLoader(dataset=train_dataset, batch_sampler=batch_sampler,\n num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)\nval_dataloader = DataLoader(dataset=val_dataset, batch_size=batch_size,\n shuffle=False, num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)\ntest_dataloader = DataLoader(dataset=test_dataset, batch_size=batch_size,\n shuffle=False, num_workers=4, pin_memory=True, worker_init_fn=worker_init_fn)\n\nmodel.train()\n# end of dataload\n\n# defination of loss\nimport torch\nimport torch.nn\nfrom torch.nn import functional as F\nimport numpy as np\n\n\"\"\"\nThe different uncertainty methods loss implementation.\nIncluding:\n Ignore, Zeros, Ones, SelfTrained, MultiClass\n\"\"\"\n\nMETHODS = ['U-Ignore', 'U-Zeros', 'U-Ones', 'U-SelfTrained', 'U-MultiClass']\n# CLASS_NUM = [1113, 6705, 514, 327, 1099, 115, 142]\n# CLASS_WEIGHT = torch.Tensor([10000 / i for i in CLASS_NUM]).cuda()\n\n\nclass Loss_Zeros(object):\n \"\"\"\n map all uncertainty values to 0\n \"\"\"\n\n def __init__(self):\n self.base_loss = torch.nn.BCELoss(reduction='mean')\n\n def __call__(self, output, target):\n target[target == -1] = 0\n return self.base_loss(output, target)\n\n\nclass Loss_Ones(object):\n \"\"\"\n map all uncertainty values to 1\n \"\"\"\n\n def __init__(self):\n self.base_loss = torch.nn.BCEWithLogitsLoss(reduction='mean')\n\n def __call__(self, output, target):\n target[target == -1] = 1\n return self.base_loss(output, target)\n\n\nclass cross_entropy_loss(object):\n \"\"\"\n map all uncertainty values to a unique value \"2\"\n \"\"\"\n\n def __init__(self):\n self.base_loss = torch.nn.CrossEntropyLoss(weight=None, reduction='mean')\n\n def __call__(self, output, target):\n # target[target == -1] = 2\n output_softmax = F.softmax(output, dim=1)\n target = torch.argmax(target, dim=1)\n return self.base_loss(output_softmax, target.long())\n\n\n# class weighted_cross_entropy_loss(object):\n# \"\"\"\n# map all uncertainty values to a unique value \"2\"\n# \"\"\"\n\n# def __init__(self):\n# self.base_loss = torch.nn.CrossEntropyLoss(weight=CLASS_WEIGHT, reduction='mean')\n\n# def __call__(self, output, target):\n# # target[target == -1] = 2\n# output_softmax = F.softmax(output, dim=1)\n# target = torch.argmax(target, dim=1)\n# return self.base_loss(output_softmax, target.long())\n\ndef get_UncertaintyLoss(method):\n assert method in METHODS\n\n if method == 'U-Zeros':\n return Loss_Zeros()\n\n if method == 'U-Ones':\n return Loss_Ones()\n\n\ndef dice_loss(score, target):\n target = target.float()\n smooth = 1e-5\n intersect = torch.sum(score * target)\n y_sum = torch.sum(target * target)\n z_sum = torch.sum(score * score)\n loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)\n loss = 1 - loss\n return loss\n\n\ndef dice_loss1(score, target):\n target = target.float()\n smooth = 1e-5\n intersect = torch.sum(score * target)\n y_sum = torch.sum(target)\n z_sum = torch.sum(score)\n loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)\n loss = 1 - loss\n return loss\n\n\ndef entropy_loss(p, C=2):\n ## p N*C*W*H*D\n y1 = -1 * torch.sum(p * torch.log(p + 1e-6), dim=1) / torch.tensor(np.log(C)).cuda()\n ent = torch.mean(y1)\n\n return ent\n\n\ndef softmax_dice_loss(input_logits, target_logits):\n \"\"\"Takes softmax on both sides and returns MSE loss\n\n Note:\n - Returns the sum over all examples. Divide by the batch size afterwards\n if you want the mean.\n - Sends gradients to inputs but not the targets.\n \"\"\"\n assert input_logits.size() == target_logits.size()\n input_softmax = F.softmax(input_logits, dim=1)\n target_softmax = F.softmax(target_logits, dim=1)\n n = input_logits.shape[1]\n dice = 0\n for i in range(0, n):\n dice += dice_loss1(input_softmax[:, i], target_softmax[:, i])\n mean_dice = dice / n\n\n return mean_dice\n\n\ndef entropy_loss_map(p, C=2):\n ent = -1 * torch.sum(p * torch.log(p + 1e-6), dim=1, keepdim=True) / torch.tensor(np.log(C)).cuda()\n return ent\n\n\ndef softmax_mse_loss(input_logits, target_logits):\n \"\"\"Takes softmax on both sides and returns MSE loss\n\n Note:\n - Returns the sum over all examples. Divide by the batch size afterwards\n if you want the mean.\n - Sends gradients to inputs but not the targets.\n \"\"\"\n assert input_logits.size() == target_logits.size()\n input_softmax = F.softmax(input_logits, dim=1)\n target_softmax = F.softmax(target_logits, dim=1)\n\n mse_loss = (input_softmax - target_softmax) ** 2\n return mse_loss\n\n\ndef cam_attention_map(activations, channel_weight):\n # activations 48*49*1024\n # channel_weight 48*1024\n attention = activations.permute(1, 0, 2).mul(channel_weight)\n attention = attention.permute(1, 0, 2)\n attention = torch.sum(attention, -1)\n attention = torch.reshape(attention, (48, 7, 7))\n\n return attention\n\n\ndef cam_activation(batch_feature, channel_weight):\n # batch_feature = batch_feature.permute(0,2,3,1)#48 7 7 1024\n # activations = torch.reshape(batch_feature, (batch_feature.shape[0], -1, batch_feature.shape[3]))#48*49*1024\n\n # attention = activations.permute(1,0,2)#.mul(channel_weight)#49*48*1024\n # attention = attention.permute(1,2,0)#48*1024*49\n # attention = F.softmax(attention, -1)#48*1024*49\n\n # activations2 = activations.permute(0, 2, 1) #48 1024 49\n # activations2 = activations2 * attention\n # activations2 = torch.sum(activations2, -1)#48*1024\n batch_feature = batch_feature.permute(0, 2, 3, 1)\n # 48*49*1024\n activations = torch.reshape(batch_feature, (batch_feature.shape[0], -1, batch_feature.shape[3]))\n\n # 49*48*1024\n attention = activations.permute(1, 0, 2).mul(channel_weight)\n # 48*49*1024\n attention = attention.permute(1, 0, 2)\n # 48*49\n attention = torch.sum(attention, -1)\n attention = F.softmax(attention, -1)\n\n activations2 = activations.permute(2, 0, 1) # 1024*48*49\n activations2 = activations2 * attention\n activations2 = torch.sum(activations2, -1) # 1024*48\n # 48 1024\n activations2 = activations2.permute(1, 0)\n\n return activations2\n\n\ndef relation_mse_loss_cam(activations, ema_activations, model, label):\n \"\"\"Takes softmax on both sides and returns MSE loss\n\n Note:\n - Returns the sum over all examples. Divide by the batch size afterwards\n if you want the mean.\n - Sends gradients to inputs but not the targets.\n \"\"\"\n weight = model.module.densenet121.classifier[0].weight\n # 48*1024\n channel_weight = label.mm(weight)\n\n activations = cam_activation(activations.clone(), channel_weight)\n ema_activations = cam_activation(ema_activations.clone(), channel_weight)\n\n assert activations.size() == ema_activations.size()\n\n activations = torch.reshape(activations, (activations.shape[0], -1))\n ema_activations = torch.reshape(ema_activations, (ema_activations.shape[0], -1))\n\n similarity = activations.mm(activations.t())\n norm = torch.reshape(torch.norm(similarity, 2, 1), (-1, 1))\n norm_similarity = similarity / norm\n\n ema_similarity = ema_activations.mm(ema_activations.t())\n ema_norm = torch.reshape(torch.norm(ema_similarity, 2, 1), (-1, 1))\n ema_norm_similarity = ema_similarity / ema_norm\n\n similarity_mse_loss = (norm_similarity - ema_norm_similarity) ** 2\n return similarity_mse_loss\n\n\ndef relation_mse_loss(activations, ema_activations):\n \"\"\"Takes softmax on both sides and returns MSE loss\n\n Note:\n - Returns the sum over all examples. Divide by the batch size afterwards\n if you want the mean.\n - Sends gradients to inputs but not the targets.\n \"\"\"\n\n assert activations.size() == ema_activations.size()\n\n activations = torch.reshape(activations, (activations.shape[0], -1))\n ema_activations = torch.reshape(ema_activations, (ema_activations.shape[0], -1))\n\n similarity = activations.mm(activations.t())\n norm = torch.reshape(torch.norm(similarity, 2, 1), (-1, 1))\n norm_similarity = similarity / norm\n\n ema_similarity = ema_activations.mm(ema_activations.t())\n ema_norm = torch.reshape(torch.norm(ema_similarity, 2, 1), (-1, 1))\n ema_norm_similarity = ema_similarity / ema_norm\n\n similarity_mse_loss = (norm_similarity - ema_norm_similarity) ** 2\n return similarity_mse_loss\n\n\ndef feature_mse_loss(activations, ema_activations):\n \"\"\"Takes softmax on both sides and returns MSE loss\n\n Note:\n - Returns the sum over all examples. Divide by the batch size afterwards\n if you want the mean.\n - Sends gradients to inputs but not the targets.\n \"\"\"\n\n assert activations.size() == ema_activations.size()\n\n activations = torch.reshape(activations, (activations.shape[0], -1))\n ema_activations = torch.reshape(ema_activations, (ema_activations.shape[0], -1))\n\n # similarity = activations.mm(activations.t())\n # norm = torch.reshape(torch.norm(similarity, 2, 1), (-1, 1))\n # norm_similarity = similarity / norm\n\n # ema_similarity = ema_activations.mm(ema_activations.t())\n # ema_norm = torch.reshape(torch.norm(ema_similarity, 2, 1), (-1, 1))\n # ema_norm_similarity = ema_similarity / ema_norm\n\n similarity_mse_loss = (activations - ema_activations) ** 2\n return similarity_mse_loss\n\n\ndef sigmoid_mse_loss(input_logits, target_logits):\n assert input_logits.size() == target_logits.size()\n input_softmax = torch.sigmoid(input_logits)\n target_softmax = torch.sigmoid(target_logits)\n\n loss_fn = torch.nn.MSELoss(reduction='mean')\n mse_loss = loss_fn(input_softmax, target_softmax)\n return mse_loss\n\n\ndef softmax_kl_loss(input_logits, target_logits):\n \"\"\"Takes softmax on both sides and returns KL divergence\n\n Note:\n - Returns the sum over all examples. Divide by the batch size afterwards\n if you want the mean.\n - Sends gradients to inputs but not the targets.\n \"\"\"\n assert input_logits.size() == target_logits.size()\n input_log_softmax = F.log_softmax(input_logits, dim=1)\n target_softmax = F.softmax(target_logits, dim=1)\n\n # return F.kl_div(input_log_softmax, target_softmax)\n kl_div = F.kl_div(input_log_softmax, target_softmax, reduction='none')\n # mean_kl_div = torch.mean(0.2*kl_div[:,0,...]+0.8*kl_div[:,1,...])\n return kl_div\n\n\ndef symmetric_mse_loss(input1, input2):\n \"\"\"Like F.mse_loss but sends gradients to both directions\n\n Note:\n - Returns the sum over all examples. Divide by the batch size afterwards\n if you want the mean.\n - Sends gradients to both input1 and input2.\n \"\"\"\n assert input1.size() == input2.size()\n return torch.mean((input1 - input2) ** 2)\n\n\n# end of loss defination\n\n\n# 设置分类loss函数为交叉损失熵\nloss_fn = cross_entropy_loss()\n# 超参数\niter_num = 0\nlr_ = base_lr\nema_decay = 0.99\nmodel.train()\n\n# train\nfor epoch in range(0, 100):\n time1 = time.time()\n iter_max = len(train_dataloader)\n train_l_sum, train_acc_sum, n, batch_count, start = 0.0, 0.0, 0, 0, time.time()\n # label_batch指的是标签的batch,前面的()内部是image的batch\n for i, (_, _, (image_batch, ema_image_batch), label_batch) in enumerate(train_dataloader):\n time2 = time.time()\n # print('fetch data cost {}'.format(time2-time1))\n image_batch, ema_image_batch, label_batch = image_batch.to(device), ema_image_batch.to(\n device), label_batch.to(device)\n # unlabeled_image_batch = ema_image_batch[labeled_bs:]\n\n # noise1 = torch.clamp(torch.randn_like(image_batch) * 0.1, -0.1, 0.1)\n # noise2 = torch.clamp(torch.randn_like(ema_image_batch) * 0.1, -0.1, 0.1)\n ema_inputs = ema_image_batch # + noise2\n inputs = image_batch # + noise1\n\n activations, outputs = model(inputs)\n with torch.no_grad():\n ema_activations, ema_output = ema_model(ema_inputs)\n\n ## calculate the loss\n loss_classification = loss_fn(outputs[:labeled_bs], label_batch[:labeled_bs])\n loss = loss_classification\n\n ## MT loss (have no effect in the beginneing)\n consistency_weight = get_current_consistency_weight(epoch)\n consistency_dist = torch.sum(\n softmax_mse_loss(outputs, ema_output)) / batch_size # / dataset.N_CLASSES\n consistency_loss = consistency_weight * consistency_dist\n\n # consistency_relation_dist = torch.sum(losses.relation_mse_loss_cam(activations, ema_activations, model, label_batch)) / batch_size\n consistency_relation_dist = torch.sum(relation_mse_loss(activations, ema_activations)) / batch_size\n consistency_relation_loss = consistency_weight * consistency_relation_dist * 1\n\n if epoch > 20:\n loss = loss_classification + consistency_loss + consistency_relation_loss\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n update_ema_variables(model, ema_model, ema_decay, iter_num)\n with torch.no_grad():\n train_l_sum += loss.cpu().item()\n train_acc_sum += (outputs[:labeled_bs].argmax(dim=1) == label_batch[:labeled_bs].argmax(dim=1)).sum().cpu().item()\n n += label_batch[:labeled_bs].shape[0]\n batch_count += 1\n iter_num = iter_num + 1\n# test_acc = evaluate_accuracy(test_dataloader, model, device)\n print('epoch %d, loss %.4f, train acc %.5f, time %.1f sec' % (epoch + 1, train_l_sum / batch_count, train_acc_sum / n, time.time() - start))\ntorch.save(model.state_dict(), 'model.pth')","repo_name":"Eric-jinkens/teethDease","sub_path":"models/Semi-supervised/MT_SRC.py","file_name":"MT_SRC.py","file_ext":"py","file_size_in_byte":28600,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"34343342298","text":"#Numbers are divisible by 3 print'nav', by 7 print'Gurukul', by both 'NavGurukul'....\ndef func(num):\n\ti=1\n\twhile i<=num:\n\t\tif i%3==0:\n\t\t\tprint('Nav')\n\t\telif i%7==0:\n\t\t\tprint('Gurukul')\n\t\telif i%3==0 and i%7==0:\n\t\t\tprint('NavGurukul')\n\t\telse:\n\t\t\tprint(i)\n\t\ti=i+1\nnum=int(input('enter any number'))\nfunc(num)","repo_name":"subhadra-parida/python_function","sub_path":"fun,q22.py","file_name":"fun,q22.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"35187087537","text":"\"\"\"Identification numbers\n\nRevision ID: 965c3826a7aa\nRevises: 984e5cba2065\nCreate Date: 2023-09-24 22:30:50.538728\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"965c3826a7aa\"\ndown_revision = \"984e5cba2065\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"identification_numbers\",\n sa.Column(\"id\", sa.Integer(), nullable=False),\n sa.Column(\"NIP\", sa.String(), nullable=True),\n sa.Column(\"REGON\", sa.String(), nullable=True),\n sa.Column(\"KRS\", sa.String(), nullable=True),\n sa.Column(\"sad\", sa.String(), nullable=True),\n sa.Column(\"created_at\", sa.DateTime(), nullable=False),\n sa.Column(\"updated_at\", sa.DateTime(), nullable=True),\n sa.Column(\"creator_id\", sa.Integer(), nullable=True),\n sa.Column(\"editor_id\", sa.Integer(), nullable=True),\n sa.Column(\"company_id\", sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(\n [\"company_id\"],\n [\"companies.id\"],\n name=op.f(\"fk_identification_numbers_company_id_companies\"),\n ),\n sa.ForeignKeyConstraint(\n [\"creator_id\"],\n [\"users.id\"],\n name=op.f(\"fk_identification_numbers_creator_id_users\"),\n ondelete=\"SET NULL\",\n ),\n sa.ForeignKeyConstraint(\n [\"editor_id\"],\n [\"users.id\"],\n name=op.f(\"fk_identification_numbers_editor_id_users\"),\n ondelete=\"SET NULL\",\n ),\n sa.PrimaryKeyConstraint(\"id\", name=op.f(\"pk_identification_numbers\")),\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table(\"identification_numbers\")\n # ### end Alembic commands ###\n","repo_name":"krysros/Marker","sub_path":"marker/alembic/versions/20230924_965c3826a7aa.py","file_name":"20230924_965c3826a7aa.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"23202731504","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QLabel, QMainWindow ,QPushButton , QLineEdit\nfrom PyQt5.QtGui import QPixmap, QFont\nfrom PyQt5.QtCore import Qt, QTimer, QDateTime\nfrom datetime import datetime\nfrom utilities.components import *\n\n\nclass time_slots(QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.width = 480\n self.height = 800\n self.resize(480, 800)\n\n self.background_image = QLabel(self)\n self.background_image.setPixmap(QPixmap(\"images/background.png\"))\n self.background_image.setGeometry(0, 0, self.width, self.height)\n\n newSlot = imgbutton2(self, \"images/icons/newTimeSlotIcon.png\", 100, 100, [18, 99], self.openNewTimeSlot)\n\n editSlot = imgbutton2(self, \"images/icons/EditSlot.png\", 100, 100, [133, 99], self.openEditTimeSlot)\n\n self.backbtnv2 = imgbutton2(self, \"images/icons/BackIcon.png\", 30, 30, (5, 44), self.openCanteenSettings)\n\n self.slots = imgbutton2(self, \"images/icons/TimeSlot50x50.png\", 50, 50, [215, 34], self.close)\n self.slots.setEnabled(False)\n\n # Create label for date and time\n self.date_time_label = QLabel(self)\n self.date_time_label.setGeometry(5, 4, 190, 20)\n\n # Set font for date and time label\n font_small = QFont(\"inika\", 10, QFont.Normal)\n self.date_time_label.setFont(font_small)\n\n # Create label for time\n self.time_label = QLabel(self)\n self.time_label.setGeometry(195, 547, 300, 30)\n self.time_label.setFont(font_small)\n\n # Update date and time every second\n self.timer = QTimer()\n self.timer.timeout.connect(self.update_date_time)\n self.timer.start(1000)\n\n # Initial date and time display\n self.update_date_time()\n\n def openCanteenSettings(self):\n from canteenSettings import canteen_setting\n self.openCanteenSettings = canteen_setting()\n self.openCanteenSettings.show()\n self.close()\n\n def openNewTimeSlot(self):\n from NewTimeSlots import new_time_slots\n self.openNewTimeSlot = new_time_slots()\n self.openNewTimeSlot.show()\n self.close()\n\n def openEditTimeSlot(self):\n from EditTimeSlot import edit_time_slots\n self.openEditTimeSlot = edit_time_slots()\n self.openEditTimeSlot.show()\n self.close()\n\n def update_date_time(self):\n # Get current date and time\n current_datetime = datetime.now()\n\n # Format the date as \"14th June 2023\"\n formatted_date = current_datetime.strftime(\"%d{} %B %Y\").format(\n \"th\" if 10 <= current_datetime.day <= 19 else\n {1: \"st\", 2: \"nd\", 3: \"rd\"}.get(current_datetime.day % 10, \"th\")\n )\n\n # Get the current day\n current_day = current_datetime.strftime(\"%A\")\n\n # Format the time as \"hh:mm:ss\"\n formatted_time = current_datetime.strftime(\"%H:%M:%S\")\n\n # Update date and time labels\n current_datetime_str = f\"{formatted_date} - {formatted_time}\"\n self.date_time_label.setText(current_datetime_str)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = time_slots()\n window.show()\n sys.exit(app.exec_())\n","repo_name":"Epitage-Consulting-Pvt-Ltd/Test-Device_V0.0","sub_path":"proto3/TimeSlots.py","file_name":"TimeSlots.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"28435935513","text":"def year_day():\n d1, m1, y1 = [int(x) for x in input('plz enter the first date like this: \\n1 6 1398 \\n').split()]\n d2, m2, y2 = [int(x) for x in input('plz enter the second date like this: \\n1 6 1398 \\n').split()]\n if m1 == 12:\n totald1 = d1 + (m1 * 29) + (y1 * 365) + int(m1 / 4)\n totald2 = d2 + (m2 * 29) + (y2 * 365) + int(m2 / 4)\n elif m1 <= 6:\n totald1 = d1 + (m1 * 31) + (y1 * 365)\n totald2 = d2 + (m2 * 31) + (y2 * 365)\n elif m2 >= 6:\n totald1 = d1 + (m1 * 30) + (y1 * 365)\n totald2 = d2 + (m2 * 30) + (y2 * 365)\n\n diff = totald1 - totald2 # total days\n if diff < 0:\n diff = diff * -1\n\n ydiff = diff // 365\n x1 = diff - (ydiff * 356) # total months full years excluded\n mdiff = (x1 // 29)\n ddiff = x1 - (mdiff * 29) # total days full years and months excluded\n\n print(ydiff, ' years and ', mdiff, ' months and ', ddiff, 'days')\n print('with total ', diff, ' days!')\n\n\nyear_day()\n","repo_name":"rz01m/age-subtraction","sub_path":"age_subtraction.py","file_name":"age_subtraction.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"9720499927","text":"import re\r\nimport matplotlib.pyplot as plt\r\n\r\n# Pedir al usuario que introduzca la ecuación\r\nrest = [\r\n \"3x+2y<=18\",\r\n \"1x+0y<=4\",\r\n \"0x+2y<=12\",\r\n \"1x+0y>=0\",\r\n \"0x+1y>=0\"]\r\nf_objetivo = \"30000x+50000y=Z\" #se pueden incluir decimales, por ejemplo:\"2.3\" \r\ntipo = \"max\" # puede ser de tipo \"max\" o \"min\"\r\n\r\n#############################convertir ecuacion o inecuacion en una funcion############################\r\n\r\n\r\n#obtiene mediante expresiones literales los coeficientes de una funcion o restriccion con estructura especifica\r\ndef coeficientes(input):\r\n coeficientes = re.findall(r'[-]?\\d*\\.?\\d+', input)\r\n #terminos = re.findall(r'[a-zA-Z]*', input_str)\r\n return coeficientes\r\n\r\n#convierte los coeficientes extraidos de un string a entero o float.\r\ndef str_int(str):\r\n arr = (coeficientes(str))\r\n li = []\r\n for i in range(len(arr)):\r\n if(arr[i].isdigit()):\r\n li.append(int(arr[i]))\r\n else:\r\n li.append(float(arr[i]))\r\n if li[1] == 0:\r\n return [li, True]\r\n\r\n return [li, False]\r\n\r\n\r\ndef funcion(x, coe):\r\n coe1 = coe[0]\r\n imagen = []\r\n pre = []\r\n if coe[1]:\r\n for i in range(len(x)):\r\n imagen.append(x[i])\r\n pre.append((coe1[2]-(coe1[1]*x[i]))/coe1[0])\r\n #print(x, imagen)\r\n return pre, imagen\r\n for i in range(len(x)):\r\n imagen.append((coe1[2]-(coe1[0]*x[i]))/coe1[1])\r\n #print(x, imagen)\r\n return x, imagen\r\n\r\n#determina el la desigualdad\r\ndef inecuacion(sig, a, b):\r\n sign = sig[0]\r\n if sign == \"<=\":\r\n if a <= b:\r\n return True\r\n else:\r\n return False\r\n elif sign == \">=\":\r\n if a >= b:\r\n return True\r\n else:\r\n return False\r\n elif sign == \">\":\r\n if a > b:\r\n return True\r\n else:\r\n return False\r\n elif sign == \"<\":\r\n if a < b:\r\n return True\r\n else:\r\n return False\r\n elif sign == \"=\":\r\n if a == b:\r\n return True\r\n else:\r\n return False\r\n \r\n\r\n\r\ndef region_factible(inter):\r\n rf = []\r\n cont = 0\r\n for j in range(len(inter)):\r\n for i in range(len(rest)):\r\n c = str_int(rest[i])\r\n signo = re.findall(r'[<>]=?|!=', rest[i])\r\n #if len(signo)==0:\r\n #signo = re.findall(r'(?<=[\\w\\d\\]>])=|(?<=<)=|!=|\\s=', rest[i])\r\n \r\n if inecuacion(signo, c[0][0]*inter[j][0]+c[0][1]*inter[j][1], c[0][2]):\r\n cont += 1\r\n if cont == len(rest):\r\n rf.append(inter[j])\r\n cont = 0\r\n return rf\r\n\r\n\r\n# retorna todas las interseccion entre rectas.\r\ndef obtener_inter():\r\n inte = []\r\n c = []\r\n for i in range(len(rest)):\r\n c.append(str_int(rest[i])[0])\r\n \r\n for i in range(len(rest)):\r\n for j in range(len(rest)-i):\r\n if encontrar(c[i], c[j+i]) != None:\r\n # inte.append(interse)\r\n inte.append(encontrar(c[i], c[j+i]))\r\n print(\"Todas las intersecciones encontradas: \", inte)\r\n return inte\r\n\r\n\r\n# Genera valores para la variable independiente x\r\nx = [0, 100]\r\n\r\n\r\ndef encontrar(co1, co2):\r\n # Convertir la primera ecuación a la forma y = mx + b\r\n a, b, c = map(int, co1)\r\n if b == 0:\r\n m1 = None\r\n b1 = c\r\n else:\r\n m1 = -a/b\r\n b1 = c/b\r\n # Convertir la segunda ecuación a la forma y = mx + b\r\n a, b, c = map(int, co2)\r\n if b == 0:\r\n m2 = None\r\n b2 = c\r\n else:\r\n m2 = -a/b\r\n b2 = c/b\r\n # Calcular la intersección de las dos rectas\r\n if m2 is None and m1 is None:\r\n return None\r\n if m2 is None:\r\n x = b2\r\n y = m1 * x + b1\r\n elif m1 is None:\r\n x = b1\r\n y = m2 * x + b2\r\n elif m1 == m2: #Las rectas son paralelas y no tienen un punto de intersección.\r\n return None\r\n else:\r\n x = (b2 - b1) / (m1 - m2)\r\n y = m1 * x + b1\r\n return x, y\r\n\r\n\r\ndef f_obj(coe, x, y):\r\n co = coe[0]\r\n result = (co[0]*x)+(co[1]*y)\r\n print(\"----------valor de f objetivo y interseccion evaluada----------\")\r\n print(result)\r\n print(x, y)\r\n return result\r\n\r\n\r\n#De todas las intersecciones dentro de la region factible obtiene la menor o la mayor dependiendo si es minimo o maximo\r\ndef obtener_optimo():\r\n temp = 0\r\n mayor = 0\r\n interseccion_optima = 0, 0\r\n i_f = region_factible(obtener_inter())\r\n print(\"intersecciones dentro de la region factible: \",i_f)\r\n ax = []\r\n ay = []\r\n for i in range(len(i_f)):\r\n xf, yf = i_f[i]\r\n if i==0:\r\n temp = (f_obj(str_int(f_objetivo), xf, yf))\r\n op = (f_obj(str_int(f_objetivo), xf, yf))\r\n if tipo==\"min\":\r\n if temp >= op:\r\n mayor = op\r\n temp = op\r\n interseccion_optima = xf, yf\r\n else:\r\n mayor = temp\r\n elif tipo==\"max\":\r\n if temp <= op:\r\n mayor = op\r\n temp = op\r\n interseccion_optima = xf, yf\r\n else:\r\n mayor = temp\r\n\r\n print(\"valor optimo: \", mayor)\r\n print(\"interseccion optima: \", interseccion_optima)\r\n return mayor, interseccion_optima, i_f\r\n\r\n\r\n# Graficar los ejes x e y\r\nplt.axhline(y=0, color='k')\r\nplt.axvline(x=0, color='k')\r\n\r\n# Agregar etiquetas a los ejes\r\nplt.xlabel('Eje x')\r\nplt.ylabel('Eje y')\r\n#grafica todas las rectas generadas a partir de las restricciones y el punto donde esta el optimo\r\ndef ordenar(orden):\r\n temp = orden[0]\r\n indice=0\r\n for i in range(len(orden)):\r\n \r\n if orden[i][0]==0 and orden[i][1]==0:\r\n return orden[i], i\r\n else:\r\n if temp[0] >= orden[i][0]:\r\n \r\n if temp[0] == orden[i][0]:\r\n if temp[1]>= orden[i][1]:\r\n menor = temp\r\n else:\r\n menor = orden[i]\r\n temp = orden[i]\r\n indice = i \r\n else:\r\n menor = temp\r\n return menor, indice\r\ndef r_objetivo(opt, x):\r\n coe_o = str_int(f_objetivo)\r\n print(coe_o)\r\n lisy =[]\r\n for i in x:\r\n y = (opt-(coe_o[0][0]*i))/coe_o[0][1]\r\n lisy.append(y)\r\n return x, lisy\r\ndef grafica():\r\n optimo, interseccion, i_f = obtener_optimo()\r\n #obtenemos los puntos a graficar(rectas)\r\n \r\n for i in range(len(rest)):\r\n xg, yg = funcion(x, str_int(rest[i]))\r\n plt.plot(xg, yg, label=rest[i])\r\n #graficamos el punto optimo encontrado\r\n orden = []\r\n puntos_factibles = i_f\r\n print(\"aaaaaaaaaaaaaaa\",i_f)\r\n for k in range(len(puntos_factibles)):\r\n if (puntos_factibles[k][0]==interseccion[0] and puntos_factibles[k][1]==interseccion[1]):\r\n plt.scatter(interseccion[0], interseccion[1], color='blue',zorder=10, s=10)\r\n plt.text(interseccion[0], interseccion[1], '('+str(interseccion[0])+','+str(interseccion[1])+')')\r\n else:\r\n plt.scatter(puntos_factibles[k][0], puntos_factibles[k][1], color='red', zorder=10, s=10)\r\n plt.text(puntos_factibles[k][0], puntos_factibles[k][1], '('+str(puntos_factibles[k][0])+','+str(puntos_factibles[k][1])+')')\r\n for i in range(len(i_f)):\r\n min , ind = (ordenar(i_f))\r\n del i_f[ind]\r\n orden.append(min) \r\n\r\n print(orden)\r\n ax = [] \r\n ay = []\r\n\r\n for i in range(len(orden)):\r\n ax.append(orden[i][0])\r\n ay.append(orden[i][1])\r\n xfo, yfo = r_objetivo(optimo, x)\r\n print(xfo, yfo)\r\n plt.plot(xfo, yfo, label=f_objetivo)\r\n plt.fill(ax, ay, 'g')\r\n plt.legend(loc='upper right', fontsize='large')\r\n # Mostrar la gráfica\r\n plt.show()\r\n\r\ngrafica()","repo_name":"Hexzzard/AlgoritmosV1","sub_path":"Optimo_grafico.py","file_name":"Optimo_grafico.py","file_ext":"py","file_size_in_byte":7796,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"27524829744","text":"\"\"\"\nTimothy\n1119007\n\"\"\"\ndef hitung_kemiripan_by_jaccard(a,b):\n setA = a & b\n setB = a | b\n A = len(setA)\n B = len(setB)\n if(A>B):\n return B/A\n else:\n return A/B\n\ndef hitung_selisih(a,b):\n hasil=a-b\n return hasil\n\nif __name__ == \"__main__\":\n transaksi_jordie = [\"Dragon Hoops\", \"Dragon Hoops\", \"Booked\", \"The Crossover\", \"Rebound\"]\n transaksi_sarah = [\"The Oracle Code\", \"Under The Moon\", \"Overdrive\", \"The Crossover\"]\n transaksi_shannon = [\"Breaking Glass\", \"Breaking Glass\"]\n transaksi_steve = [\"Breaking Glass\", \"Dragon Hoops\", \"The Crossover\",\"Nightwalker\"]\n transaksi_manuel = [\"Booked\",\"Overdrive\",\"Dragon Hoops\",\"The Crossover\",\"The Playbook\"]\n\n set_jordie = set(transaksi_jordie)\n set_sarah = set(transaksi_sarah)\n set_shannon = set(transaksi_shannon)\n set_steve = set(transaksi_steve)\n set_manuel = set(transaksi_manuel)\n\n produk_fav = {\n \"Jordie\": set_jordie,\n \"Sarah\": set_sarah,\n \"Shannon\": set_shannon,\n \"Steve\": set_steve,\n \"Manuel\": set_manuel\n }\n\n for x in produk_fav:\n print(x,end=' ')\n print(produk_fav[x])\n print()\n\n nama1 = input(\"Masukkan nama pelanggan yang sedang belanja: \")\n\n for x in produk_fav:\n if(x!=nama1):\n hasil = hitung_kemiripan_by_jaccard(produk_fav[nama1],produk_fav[x])\n print(\"Nilai kemiripan dengan \"+x+\" = \",end='')\n print(hasil)\n print()\n\n nama2 = input(\"Berdasarkan nilai tertinggi (paling mirip), masukkan nama pelanggan untuk dasar rekomendasi: \")\n print(\"Berdasarkan daftar pembelian \"+nama1+\", rekomendasi pembelian untuk \"+nama1+\" adalah: \")\n hasil = hitung_selisih(produk_fav[nama2],produk_fav[nama1])\n print(hasil)","repo_name":"TimothyRay18/Praktikum-Matematika-Informatika-Py-","sub_path":"Week 9/soal3.py","file_name":"soal3.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"21578861815","text":"import sys\nimport os\nimport time\n\nSCRIPT_DIR = os.path.dirname(sys.argv[0])\nSCRIPT_NAME = os.path.basename(sys.argv[0])\nos.chdir(SCRIPT_DIR)\n\nINPUT_FILE_NAME = SCRIPT_NAME.replace(\"py\", \"txt\")\nprint(f\"=== {SCRIPT_NAME} ===\")\n\n\ndef readInputFile(file=INPUT_FILE_NAME):\n 'read the input file'\n\n inputLines = []\n print(f\"-> read {file}\")\n with open(file, \"r\") as inputFile:\n for line in inputFile:\n line = line.rstrip(\"\\n\")\n inputLines.append(line)\n return inputLines\n\n\ng_inputLines = []\ng_data_l = []\n\n\ndef initDataStructure():\n global g_data_l\n g_data_l = []\n for line in g_inputLines:\n g_data_l.append((line[0], int(line[1:])))\n # print(g_data_l)\n\n\ndef resolve_part2():\n #print(\"resolve_part1():\", len(g_data_l), g_data_l, g_adj_l)\n\n waypoint_ew = 10 # EAST\n waypoint_ns = 1 # NORTH\n pos_ew = 0\n pos_ns = 0\n\n #print(f\" : POS({pos_ew:3},{pos_ns:3}) WP({waypoint_ew:3},{waypoint_ns:3})\")\n for cmd, arg in g_data_l:\n #print(\"->\", cmd, arg)\n if (cmd == \"L\" or cmd == \"R\"):\n if (cmd == \"L\"):\n arg = 360 - arg\n for _ in range(arg // 90):\n tmp = waypoint_ew\n waypoint_ew = waypoint_ns\n waypoint_ns = -tmp\n #print(\"90:\", waypoint_ew, waypoint_ns)\n elif (cmd == 'N'):\n waypoint_ns += arg\n elif (cmd == 'S'):\n waypoint_ns -= arg\n elif (cmd == 'E'):\n waypoint_ew += arg\n elif (cmd == 'W'):\n waypoint_ew -= arg\n elif (cmd == 'F'):\n pos_ns += arg * waypoint_ns\n pos_ew += arg * waypoint_ew\n #print(f\"{cmd} {arg:3} : POS({pos_ew:3},{pos_ns:3}) WP({waypoint_ew:3},{waypoint_ns:3})\")\n\n print(f\"POS({pos_ew:3},{pos_ns:3}) WP({waypoint_ew:3},{waypoint_ns:3})\")\n return abs(pos_ew) + abs(pos_ns)\n\n\ndef resolve_part1():\n #print(\"resolve_part1():\", len(g_data_l), g_data_l, g_adj_l)\n\n pos_ew = 0\n pos_ns = 0\n heading = 90 # EAST\n\n for cmd, arg in g_data_l:\n #print(\"->\", cmd, arg)\n if (cmd == \"L\" or cmd == \"R\"):\n if (cmd == \"L\"):\n heading += 360 - arg\n else:\n heading += arg\n heading %= 360\n elif (cmd == 'N'):\n pos_ns += arg\n elif (cmd == 'S'):\n pos_ns -= arg\n elif (cmd == 'E'):\n pos_ew += arg\n elif (cmd == 'W'):\n pos_ew -= arg\n elif (cmd == 'F'):\n if (heading == 0):\n pos_ns += arg\n elif (heading == 90):\n pos_ew += arg\n elif (heading == 180):\n pos_ns -= arg\n else:\n pos_ew -= arg\n #print(f\"{cmd} {arg:3} : ES={pos_ew:3} NS={pos_ns:3} heading={heading}\")\n\n print(f\"ES={pos_ew:3} NS={pos_ns:3} heading={heading}\")\n return abs(pos_ew) + abs(pos_ns)\n\n\n#inputLines = readInputFile(\"AoC_2020_12_sample.txt\")\ng_inputLines = readInputFile()\n\nres = -1\n\n###\n# PART 1\n###\n\nprint()\nprint(f\"### PART 1 ###\")\n\ntic = time.perf_counter()\n\ninitDataStructure()\nres = resolve_part1()\n\ntoc = time.perf_counter()\n\nprint(f\"-> result part 1 = {res}\")\nprint(f\"{toc - tic:0.4f} seconds\")\n\n###\n# PART 2\n###\n\nprint()\nprint(f\"### PART 2 ###\")\n\ntic = time.perf_counter()\n\ninitDataStructure()\nres = resolve_part2()\n\ntoc = time.perf_counter()\n\nprint(f\"-> result part 2 = {res}\")\nprint(f\"{toc - tic:0.4f} seconds\")\n","repo_name":"arzhuras/AdventOfCode","sub_path":"2020/12 - Ferry moves/Aoc_2020_12.py","file_name":"Aoc_2020_12.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"19543475934","text":"from sys import stdin\n\nD = \"_abcdefghijklmnopqrstuvwxyz.\"\n\nfor s in (s for s in stdin if s != \"0\\n\"):\n k, s = s.split()\n k = int(k)\n res = [0] * len(s)\n for i in range(len(s)):\n res[(k * i) % len(s)] = (D.index(s[i]) + i) % 28\n\n print(''.join(list(map(lambda x: D[x], res))))\n \n","repo_name":"mkroflin/UVa","sub_path":"problems/641.py","file_name":"641.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"31458093868","text":"import io\n\nimport pytest\nimport serial\n\nfrom .. import sniffer\n\n\ndef test_tty_sniffer_init():\n outfile = io.StringIO()\n obj = sniffer.TTYRIOTSniffer(\n \"/dev/ttyUSB1\", channel=13, baudrate=12345, outfile=outfile\n )\n assert obj.baudrate == 12345\n with pytest.raises(ConnectionError):\n obj.port\n assert obj.out is outfile\n\n\ndef test_tty_sniffer_connect(mocker):\n outfile = io.StringIO()\n port = mocker.patch(\"serial.Serial\")\n obj = sniffer.TTYRIOTSniffer(\n \"/dev/ttyUSB1\", channel=13, baudrate=12345, outfile=outfile\n )\n obj.connect()\n port.assert_called_once()\n assert obj.port == port()\n\n\ndef test_tty_sniffer_config(mocker):\n outfile = io.StringIO()\n port = mocker.patch(\"serial.Serial\")\n handle = mocker.MagicMock()\n handle.readline.side_effect = [b\"foobar\", b\"Iface 38774\"] + (3 * [b\"> \"])\n port.return_value = handle\n obj = sniffer.TTYRIOTSniffer(\n \"/dev/ttyUSB1\", channel=13, baudrate=12345, outfile=outfile\n )\n obj.connect()\n obj.config()\n print(port.write.args)\n handle.readline.side_effect = None\n handle.readline.return_value = b\"\"\n with pytest.raises(AssertionError):\n obj.config()\n\n\ndef test_tty_sniffer_generate_outfile(mocker):\n outfile = io.StringIO()\n port = mocker.patch(\"serial.Serial\")\n mocker.patch(\"time.time\", return_value=12345.6789)\n handle = mocker.MagicMock()\n handle.readline.side_effect = [\n b\"rftest-rx --- len 04\\n\",\n b\"\",\n b\"01 .. 02 03 04\\n\",\n b\"rftest-rx --- len 03\\n\",\n b\"05 06\\n\",\n b\"07 08\\n\",\n serial.SerialException,\n ]\n port.return_value = handle\n obj = sniffer.TTYRIOTSniffer(\n \"/dev/ttyUSB1\", channel=13, baudrate=12345, outfile=outfile\n )\n obj.connect()\n try:\n obj.generate_outfile()\n except serial.SerialException:\n pass\n assert outfile.getvalue() == \"12345.6789:4:01020304\\n12345.6789:3:05060708\\n\"\n\n\ndef test_tty_sniffer_run1(mocker):\n outfile = io.StringIO()\n mocker.patch.object(sniffer.TTYRIOTSniffer, \"connect\")\n mocker.patch.object(sniffer.TTYRIOTSniffer, \"config\")\n generate_outfile = mocker.patch.object(sniffer.TTYRIOTSniffer, \"generate_outfile\")\n obj = sniffer.TTYRIOTSniffer(\n \"/dev/ttyUSB1\", channel=13, baudrate=12345, outfile=outfile\n )\n obj.run()\n generate_outfile.assert_called_once()\n\n\ndef test_tty_sniffer_run2(mocker):\n outfile = io.StringIO()\n mocker.patch(\"serial.Serial\")\n mocker.patch(\"time.sleep\")\n mocker.patch.object(\n sniffer.TTYRIOTSniffer, \"config\", side_effect=[serial.SerialException, None]\n )\n generate_outfile = mocker.patch.object(sniffer.TTYRIOTSniffer, \"generate_outfile\")\n obj = sniffer.TTYRIOTSniffer(\n \"/dev/ttyUSB1\", channel=13, baudrate=12345, outfile=outfile\n )\n obj.run()\n generate_outfile.assert_called_once()\n","repo_name":"miri64/RIOT_playground","sub_path":"lndw22_doc/websniffer/tests/test_sniffer.py","file_name":"test_sniffer.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"70199381997","text":"import torch\nfrom mmcv.ops import nms_match\nimport numpy as np\nfrom mmdet.core.bbox.builder import BBOX_SAMPLERS\nfrom mmdet.core.bbox.transforms import bbox2roi\nfrom mmdet.core.bbox.samplers.base_sampler import BaseSampler\nfrom mmdet.core.bbox.samplers.sampling_result import SamplingResult\nimport torch.nn.functional as F\n\n\n@BBOX_SAMPLERS.register_module()\nclass GNRSampler(BaseSampler):\n r\"\"\"Importance-based Sample Reweighting (ISR_N), described in `Prime Sample\n Attention in Object Detection `_.\n\n Score hierarchical local rank (HLR) differentiates with RandomSampler in\n negative part. It firstly computes Score-HLR in a two-step way,\n then linearly maps score hlr to the loss weights.\n\n Args:\n num (int): Total number of sampled RoIs.\n pos_fraction (float): Fraction of positive samples.\n context (:class:`BaseRoIHead`): RoI head that the sampler belongs to.\n neg_pos_ub (int): Upper bound of the ratio of num negative to num\n positive, -1 means no upper bound.\n add_gt_as_proposals (bool): Whether to add ground truth as proposals.\n k (float): Power of the non-linear mapping.\n bias (float): Shift of the non-linear mapping.\n score_thr (float): Minimum score that a negative sample is to be\n considered as valid bbox.\n \"\"\"\n\n def __init__(self,\n num,\n pos_fraction,\n context,\n floor_thr=0.1,\n num_bins=3,\n neg_pos_ub=-1,\n add_gt_as_proposals=True,\n alpha=0.25,\n gamma=20.0,\n iou_thr=0.5,\n **kwargs):\n super().__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals)\n\n self.floor_thr = floor_thr\n self.num_bins = num_bins\n self.context = context\n self.alpha = alpha\n self.gamma = gamma\n self.iou_thr = iou_thr\n\n\n @staticmethod\n def random_choice(gallery, num):\n \"\"\"Randomly select some elements from the gallery.\n\n If `gallery` is a Tensor, the returned indices will be a Tensor;\n If `gallery` is a ndarray or list, the returned indices will be a\n ndarray.\n\n Args:\n gallery (Tensor | ndarray | list): indices pool.\n num (int): expected sample num.\n\n Returns:\n Tensor or ndarray: sampled indices.\n \"\"\"\n assert len(gallery) >= num\n\n is_tensor = isinstance(gallery, torch.Tensor)\n if not is_tensor:\n if torch.cuda.is_available():\n #device = torch.cuda.current_device()\n device = torch.cuda.device_count() - 1\n else:\n device = 'cpu'\n gallery = torch.tensor(gallery, dtype=torch.long, device=device)\n perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]\n rand_inds = gallery[perm]\n if not is_tensor:\n rand_inds = rand_inds.cpu().numpy()\n return rand_inds\n\n def normalize(self, feats, axis=-1):\n normal_feats = 20. * feats / (torch.norm(feats, 2, axis, keepdim=True).expand_as(feats) + 1e-12)\n return normal_feats\n\n def sample_via_interval(self, max_overlaps, full_set, num_expected):\n \"\"\"Sample according to the iou interval.\n\n Args:\n max_overlaps (torch.Tensor): IoU between bounding boxes and ground\n truth boxes.\n full_set (set(int)): A full set of indices of boxes。\n num_expected (int): Number of expected samples。\n\n Returns:\n np.ndarray: Indices of samples\n \"\"\"\n max_iou = self.iou_thr\n iou_interval = max_iou / self.num_bins\n per_num_expected = int(num_expected / self.num_bins)\n\n sampled_inds = []\n for i in range(self.num_bins):\n start_iou = i * iou_interval # problem\n end_iou = (i + 1) * iou_interval\n tmp_set = set(\n np.where(\n np.logical_and(max_overlaps >= start_iou,\n max_overlaps < end_iou))[0])\n tmp_inds = list(tmp_set & full_set)\n if len(tmp_inds) > per_num_expected:\n tmp_sampled_set = self.random_choice(tmp_inds,\n per_num_expected)\n else:\n tmp_sampled_set = np.array(tmp_inds, dtype=np.int)\n sampled_inds.append(tmp_sampled_set)\n\n sampled_inds_list = sampled_inds\n sampled_inds = np.concatenate(sampled_inds)\n if len(sampled_inds) < num_expected:\n num_extra = num_expected - len(sampled_inds)\n extra_inds = np.array(list(full_set - set(sampled_inds)))\n if len(extra_inds) > num_extra:\n extra_inds = self.random_choice(extra_inds, num_extra)\n sampled_inds_list.append(extra_inds)\n sampled_inds = np.concatenate([sampled_inds, extra_inds])\n sampled_inds_confusion = np.concatenate([sampled_inds_list[0], sampled_inds_list[-1]])\n else:\n sampled_inds_confusion = sampled_inds_list[0]\n\n return sampled_inds, sampled_inds_confusion\n\n def sample_confusion_via_iou(self, max_overlaps, full_set):\n \"\"\"Sample according to the iou interval.\n\n Args:\n max_overlaps (torch.Tensor): IoU between bounding boxes and ground\n truth boxes.\n full_set (set(int)): A full set of indices of boxes。\n num_expected (int): Number of expected samples。\n\n Returns:\n np.ndarray: Indices of samples\n \"\"\"\n iou_thr = self.floor_thr\n start_iou = 0\n sampled_set = set(np.where(\n np.logical_and(max_overlaps >= start_iou, max_overlaps <= iou_thr))[0])\n confusion_sampled_inds = list(sampled_set & full_set)\n return np.array(confusion_sampled_inds)\n\n def _sample_pos(self, assign_result, num_expected, **kwargs):\n \"\"\"Randomly sample some positive samples.\"\"\"\n pos_inds = torch.nonzero(assign_result.gt_inds > 0).flatten()\n if pos_inds.numel() <= num_expected:\n return pos_inds\n else:\n return self.random_choice(pos_inds, num_expected)\n\n def _sample_neg(self,\n assign_result,\n num_expected,\n bboxes,\n query_feats,\n support_proto,\n feats=None,\n img_meta=None,\n **kwargs):\n \"\"\"Sample negative samples. \"\"\"\n neg_inds = torch.nonzero(assign_result.gt_inds == 0).flatten()\n num_neg = neg_inds.size(0)\n if num_neg == 0:\n return neg_inds, None\n\n max_overlaps = assign_result.max_overlaps.cpu().numpy()\n # balance sampling for negative samples\n neg_set = set(neg_inds.cpu().numpy())\n iou_sampling_neg_inds = list(neg_set)\n\n if len(iou_sampling_neg_inds) > num_expected:\n if self.num_bins >= 2:\n iou_sampled_inds, confusion_sampled_inds = self.sample_via_interval(\n max_overlaps, neg_set, num_expected)\n else:\n iou_sampled_inds = self.random_choice(iou_sampling_neg_inds, num_expected)\n confusion_sampled_inds = self.sample_confusion_via_iou(max_overlaps, set(iou_sampled_inds))\n else:\n iou_sampled_inds = np.array(iou_sampling_neg_inds, dtype=np.int)\n confusion_sampled_inds = self.sample_confusion_via_iou(max_overlaps, set(iou_sampled_inds))\n\n # if len(iou_sampled_inds) < num_expected:\n # num_extra = num_expected - len(iou_sampled_inds)\n # extra_inds = np.array(list(neg_set - set(iou_sampled_inds)))\n # if len(extra_inds) > num_extra:\n # extra_inds = self.random_choice(extra_inds, num_extra)\n # sampled_inds = np.concatenate((iou_sampled_inds, extra_inds))\n # else:\n sampled_inds = iou_sampled_inds\n\n inds = np.argsort(sampled_inds)\n aso = sampled_inds[inds]\n bina = np.searchsorted(aso[:-1], confusion_sampled_inds)\n inds_in_confusion_sampled_inds = np.where(confusion_sampled_inds == aso[bina])[0]\n inds_in_sampled_inds = inds[bina[inds_in_confusion_sampled_inds]]\n confusion_sample_index_tensor = torch.LongTensor(inds_in_sampled_inds).to(assign_result.gt_inds.device)\n sampled_inds = torch.from_numpy(sampled_inds).long().to(assign_result.gt_inds.device)\n confusion_sampled_inds = torch.from_numpy(confusion_sampled_inds).long().to(assign_result.gt_inds.device)\n # try:\n # confusion_sampled_inds = torch.from_numpy(confusion_sampled_inds).long().to(assign_result.gt_inds.device)\n # except:\n # print('num_neg is {}'.format(num_neg))\n # print('the len of gt_inds is {}'.format(len(assign_result.gt_inds)))\n # print(len(confusion_sampled_inds))\n\n if len(confusion_sampled_inds):\n with torch.no_grad():\n confusion_neg_bboxes = bboxes[confusion_sampled_inds]\n confusion_neg_rois = bbox2roi([confusion_neg_bboxes])\n confusion_neg_feats = self.context.extract_roi_feat([query_feats], confusion_neg_rois)\n if len(confusion_neg_feats.size()) > 2:\n confusion_neg_feats = self.context.maxpool(confusion_neg_feats).view(confusion_neg_feats.shape[0], confusion_neg_feats.shape[1])\n support_proto = support_proto.view(support_proto.shape[0], support_proto.shape[1])\n confusion_neg_feats = self.normalize(confusion_neg_feats)\n support_proto = self.normalize(support_proto)\n #cls_score_u = torch.cdist(confusion_neg_feats_nor, support_proto_nor, p=2.0)\n cls_score_cos = F.cosine_similarity(confusion_neg_feats.unsqueeze(1), support_proto.unsqueeze(0), dim=-1)\n max_score, argmax_score = cls_score_cos[:, :-1].max(-1)\n #max_score, argmax_score = cls_score_cos.softmax(-1)[:, :-1].max(-1)\n neg_label_weights = max_score.new_ones(len(sampled_inds))\n confusion_neg_label_weights = max_score.new_ones(len(max_score))\n confusion_neg_label_weights = self.alpha + (1 - self.alpha) * \\\n torch.exp(torch.exp(-self.gamma * torch.sub(confusion_neg_label_weights, max_score)))\n neg_label_weights_new = neg_label_weights.scatter(0, confusion_sample_index_tensor, confusion_neg_label_weights)\n else:\n neg_label_weights_new = sampled_inds.new_ones(len(sampled_inds))\n\n return sampled_inds, neg_label_weights_new\n\n\n def sample(self,\n assign_result,\n bboxes,\n gt_bboxes,\n query_feats,\n support_proto,\n gt_labels=None,\n img_meta=None,\n **kwargs):\n \"\"\"Sample positive and negative bboxes.\n\n This is a simple implementation of bbox sampling given candidates,\n assigning results and ground truth bboxes.\n\n Args:\n assign_result (:obj:`AssignResult`): Bbox assigning results.\n bboxes (Tensor): Boxes to be sampled from.\n gt_bboxes (Tensor): Ground truth bboxes.\n gt_labels (Tensor, optional): Class labels of ground truth bboxes.\n\n Returns:\n tuple[:obj:`SamplingResult`, Tensor]: Sampling result and negative\n label weights.\n \"\"\"\n bboxes = bboxes[:, :4]\n\n gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8)\n if self.add_gt_as_proposals:\n bboxes = torch.cat([gt_bboxes, bboxes], dim=0)\n assign_result.add_gt_(gt_labels)\n gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)\n gt_flags = torch.cat([gt_ones, gt_flags])\n\n num_expected_pos = int(self.num * self.pos_fraction)\n pos_inds = self.pos_sampler._sample_pos(\n assign_result, num_expected_pos, bboxes=bboxes, **kwargs)\n num_sampled_pos = pos_inds.numel()\n num_expected_neg = self.num - num_sampled_pos\n if self.neg_pos_ub >= 0:\n _pos = max(1, num_sampled_pos)\n neg_upper_bound = int(self.neg_pos_ub * _pos)\n if num_expected_neg > neg_upper_bound:\n num_expected_neg = neg_upper_bound\n neg_inds, neg_label_weights = self.neg_sampler._sample_neg(\n assign_result,\n num_expected_neg,\n bboxes,\n query_feats,\n support_proto,\n img_meta=img_meta,\n **kwargs)\n\n return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,\n assign_result, gt_flags), neg_label_weights\n\n\n\n","repo_name":"Ybowei/UNP","sub_path":"mmfewshot/detection/models/utils/ngr_sampler.py","file_name":"ngr_sampler.py","file_ext":"py","file_size_in_byte":12967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"1895844064","text":"import os\nimport sys\n\nfrom spack.compiler import Compiler, UnsupportedCompilerFlag\nfrom spack.version import Version\n\n\nclass Intel(Compiler):\n # Subclasses use possible names of C compiler\n cc_names = [\"icc\"]\n\n # Subclasses use possible names of C++ compiler\n cxx_names = [\"icpc\"]\n\n # Subclasses use possible names of Fortran 77 compiler\n f77_names = [\"ifort\"]\n\n # Subclasses use possible names of Fortran 90 compiler\n fc_names = [\"ifort\"]\n\n # Named wrapper links within build_env_path\n link_paths = {\n \"cc\": os.path.join(\"intel\", \"icc\"),\n \"cxx\": os.path.join(\"intel\", \"icpc\"),\n \"f77\": os.path.join(\"intel\", \"ifort\"),\n \"fc\": os.path.join(\"intel\", \"ifort\"),\n }\n\n PrgEnv = \"PrgEnv-intel\"\n PrgEnv_compiler = \"intel\"\n\n if sys.platform == \"win32\":\n version_argument = \"/QV\"\n else:\n version_argument = \"--version\"\n\n if sys.platform == \"win32\":\n version_regex = r\"([1-9][0-9]*\\.[0-9]*\\.[0-9]*)\"\n else:\n version_regex = r\"\\((?:IFORT|ICC)\\) ([^ ]+)\"\n\n @property\n def verbose_flag(self):\n return \"-v\"\n\n required_libs = [\"libirc\", \"libifcore\", \"libifcoremt\", \"libirng\"]\n\n @property\n def debug_flags(self):\n return [\"-debug\", \"-g\", \"-g0\", \"-g1\", \"-g2\", \"-g3\"]\n\n @property\n def opt_flags(self):\n return [\"-O\", \"-O0\", \"-O1\", \"-O2\", \"-O3\", \"-Ofast\", \"-Os\"]\n\n @property\n def openmp_flag(self):\n if self.real_version < Version(\"16.0\"):\n return \"-openmp\"\n else:\n return \"-qopenmp\"\n\n @property\n def cxx11_flag(self):\n if self.real_version < Version(\"11.1\"):\n raise UnsupportedCompilerFlag(self, \"the C++11 standard\", \"cxx11_flag\", \"< 11.1\")\n\n elif self.real_version < Version(\"13\"):\n return \"-std=c++0x\"\n else:\n return \"-std=c++11\"\n\n @property\n def cxx14_flag(self):\n # Adapted from CMake's Intel-CXX rules.\n if self.real_version < Version(\"15\"):\n raise UnsupportedCompilerFlag(self, \"the C++14 standard\", \"cxx14_flag\", \"< 15\")\n elif self.real_version < Version(\"15.0.2\"):\n return \"-std=c++1y\"\n else:\n return \"-std=c++14\"\n\n @property\n def cxx17_flag(self):\n # https://www.intel.com/content/www/us/en/developer/articles/news/c17-features-supported-by-c-compiler.html\n if self.real_version < Version(\"19\"):\n raise UnsupportedCompilerFlag(self, \"the C++17 standard\", \"cxx17_flag\", \"< 19\")\n else:\n return \"-std=c++17\"\n\n @property\n def c99_flag(self):\n if self.real_version < Version(\"12\"):\n raise UnsupportedCompilerFlag(self, \"the C99 standard\", \"c99_flag\", \"< 12\")\n else:\n return \"-std=c99\"\n\n @property\n def c11_flag(self):\n if self.real_version < Version(\"16\"):\n raise UnsupportedCompilerFlag(self, \"the C11 standard\", \"c11_flag\", \"< 16\")\n else:\n return \"-std=c1x\"\n\n @property\n def cc_pic_flag(self):\n return \"-fPIC\"\n\n @property\n def cxx_pic_flag(self):\n return \"-fPIC\"\n\n @property\n def f77_pic_flag(self):\n return \"-fPIC\"\n\n @property\n def fc_pic_flag(self):\n return \"-fPIC\"\n\n @property\n def stdcxx_libs(self):\n return (\"-cxxlib\",)\n","repo_name":"spack/spack","sub_path":"lib/spack/spack/compilers/intel.py","file_name":"intel.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","stars":3712,"dataset":"github-code","pt":"73"} +{"seq_id":"14617709334","text":"from config import *\nfrom apis.api_createAppt import *\nimport pytest\n\nclass Test_payment_creditCard():\n '''使用credit card支付'''\n\n def setup_class(self) -> None:\n self.s = requests.session()\n\n def teardown_class(self) -> None:\n self.s.close()\n\n def test_01_acreateAppt(self,set_global_data):\n groomingId = create_appt(header)\n set_global_data(\"groomingId\", groomingId)\n pup = grooming_appointment_detail_pup(header, groomingId).json()\n invoiceId = pup[\"data\"][\"invoiceId\"]\n set_global_data(\"invoiceId\", invoiceId)\n\n invoiceDetail = grooming_invoice_order_detail(header, invoiceId).json()\n amount = invoiceDetail[\"remainAmount\"]\n set_global_data(\"amount\", amount)\n\n oriAccountBalance=payment_stripe_getConnectedInfo(header)\n set_global_data(\"oriAccountBalance\", oriAccountBalance)\n\n def test_02_cc_successful(self, get_global_data):\n invoiceId = get_global_data(\"invoiceId\")\n amount = get_global_data(\"amount\")\n\n stripePaymentMethod = 1\n processingFee = payment_payment_processingFee(header, amount, stripePaymentMethod).json()[\"processingFee\"]\n paymentAmount = amount + processingFee\n payload = {\n \"amount\": amount,\n \"tipsAmount\": 0,\n \"cardNumber\": \"1111\",\n \"cardType\": \"Visa\",\n \"customerId\": customerId,\n \"description\": \"\",\n \"expMonth\": \"11\",\n \"expYear\": \"2024\",\n \"invoiceId\": invoiceId,\n \"methodId\": 1,\n \"module\": \"grooming\",\n \"paidBy\": customerName,\n \"signature\": \"\",\n \"staffId\": staffId,\n \"stripePaymentMethodId\": \"\",\n \"stripePaymentMethod\": 1,\n \"isOnline\": False,\n \"chargeToken\": \"tok_visa\",\n \"saveCard\": False,\n \"isDeposit\": 0,\n \"addProcessingFee\": True\n }\n paymentResp = payment_payment_createAndConfirm(header, payload, businessId).json()\n print(paymentResp)\n\n # 轮询5次\n for i in range(1, 6):\n resp = grooming_invoice_order_detail(header, invoiceId).json()\n if resp[\"status\"] == 'completed':\n print(i, invoiceId, \":payment success!\")\n break\n if resp[\"status\"] == 'failed':\n print(i, invoiceId, \":payment failed!\")\n break\n elif i == 5 and resp[\"status\"] == 'processing':\n print(i, invoiceId, \"Something wrong!\")\n # 判断invoice返回paid amount以及invoice状态\n assert resp[\"paidAmount\"] == paymentAmount\n assert resp[\"convenienceFee\"] == processingFee\n assert resp[\"status\"] == \"completed\"\n\n # 判断appt是否自动finished\n pup = grooming_appointment_detail_pup(header, get_global_data(\"groomingId\")).json()\n assert pup[\"data\"][\"status\"] == 3 #finished\n\n\n # 取本次payment的paymentId、applicationFee\n clientPayments=payment_payment_list(header,customerId).json()\n applicationFee=clientPayments[\"data\"][\"paymentList\"][0][\"processingFee\"]\n oriAccountBalance=get_global_data(\"oriAccountBalance\")\n accountBalance=oriAccountBalance+paymentAmount-applicationFee\n accBalance='%.2f'%accountBalance\n newAccountBalance=payment_stripe_getConnectedInfo(header)\n newAccBalance = '%.2f' % newAccountBalance\n\n # try:\n # assert accBalance == newAccBalance+1\n # except:\n # print(\"Account balance error!\")\n # 断言--账户balance\n assert accBalance == newAccBalance\nif __name__==\"__main__\":\n pytest.main(['-vs','test_payment_creditCard.py'])\n","repo_name":"yyangxiao/APIauto_payment_1","sub_path":"TestCase/test_payment_creditCard.py","file_name":"test_payment_creditCard.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"20893197039","text":"from collections import deque\nn,q = map(int,input().split())\nn = 2**n\narr = []\nfor i in range(n):\n arr.append(list(map(int,input().split())))\nskills = list(map(int,input().split()))\nddx = [-1, 0, 1, 0]\nddy = [0, 1, 0, -1]\ndef do_magic(s,arr):\n r = 2**s\n new_arr = [[0]*n for i in range(n)]\n for x in range(0,n,r):\n for y in range(0,n,r):\n for j in range(r):\n for k in range(r):\n new_arr[x+k][y+r-1-j] = arr[x+j][y+k]\n return new_arr\n\ndef calc():\n new_arr = [[0]*n for i in range(n)]\n for i in range(n):\n for j in range(n):\n if arr[i][j] == 0:\n continue\n cnt = 0\n for k in range(4):\n nx = i + ddx[k]\n ny = j + ddy[k]\n if 0 <= nx < n and 0 <= ny < n and arr[nx][ny]: # 얼음과 인접합 칸\n cnt += 1\n if cnt < 3:\n new_arr[i][j] = 1\n\n for i in range(n):\n for j in range(n):\n if new_arr[i][j]:\n arr[i][j] -= 1\n\ndef bfs():\n visited = [[0]*(n) for i in range(n)]\n rtn = 0\n queue = deque([])\n for i in range(n):\n for j in range(n):\n if not visited[i][j] and arr[i][j]:\n queue.append((i,j))\n cnt = 0\n while queue:\n x,y = queue.popleft()\n for k in range(4):\n nx = x + ddx[k]\n ny = y + ddy[k]\n if 0 <= nx int:\n recorder = [1] * len(nums)\n for i_index, i in enumerate(nums):\n for j_index, j in enumerate(nums[:i_index]):\n if j < i:\n recorder[i_index] = max(recorder[j_index] + 1, recorder[i_index])\n return (max(recorder))\n# @lc code=end\n\n","repo_name":"ChinYoung/leetcode-solution","sub_path":"300.最长递增子序列.py","file_name":"300.最长递增子序列.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"23119285018","text":"import pygame\nimport random\n\n# Initialize Pygame\npygame.init()\n\n# Set screen dimensions\nWIDTH = 640\nHEIGHT = 480\n\n# Set colors\nBLACK = (0, 0, 0)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\n\n# Set snake properties\nSNAKE_SIZE = 20\nSNAKE_SPEED = 10\n\n# Create the game window\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"Snake Game\")\n\nclock = pygame.time.Clock()\n\n# Define a function to display messages on the screen\ndef display_message(text, color, size, x, y):\n font = pygame.font.SysFont(None, size)\n message = font.render(text, True, color)\n screen.blit(message, (x, y))\n\n# Define the main game loop\ndef game_loop():\n game_over = False\n game_quit = False\n\n # Set initial snake position\n snake_x = WIDTH / 2\n snake_y = HEIGHT / 2\n\n # Set initial velocity\n velocity_x = 0\n velocity_y = 0\n\n # Create the snake body\n snake_body = []\n snake_length = 1\n\n # Set initial food position\n food_x = round(random.randrange(0, WIDTH - SNAKE_SIZE) / 20) * 20\n food_y = round(random.randrange(0, HEIGHT - SNAKE_SIZE) / 20) * 20\n\n while not game_quit:\n while game_over:\n screen.fill(BLACK)\n display_message(\"Game Over! Press Q-Quit or C-Play Again\", RED, 40, WIDTH / 6, HEIGHT / 3)\n pygame.display.update()\n\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n game_quit = True\n game_over = False\n if event.key == pygame.K_c:\n game_loop()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_quit = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n velocity_x = -SNAKE_SPEED\n velocity_y = 0\n if event.key == pygame.K_RIGHT:\n velocity_x = SNAKE_SPEED\n velocity_y = 0\n if event.key == pygame.K_UP:\n velocity_y = -SNAKE_SPEED\n velocity_x = 0\n if event.key == pygame.K_DOWN:\n velocity_y = SNAKE_SPEED\n velocity_x = 0\n\n # Check for boundary collision\n if snake_x >= WIDTH or snake_x < 0 or snake_y >= HEIGHT or snake_y < 0:\n game_over = False\n\n # Update the snake position\n snake_x += velocity_x\n snake_y += velocity_y\n\n # Draw the game window\n screen.fill(BLACK)\n pygame.draw.rect(screen, GREEN, [food_x, food_y, SNAKE_SIZE, SNAKE_SIZE])\n\n # Update the snake body\n snake_head = []\n snake_head.append(snake_x)\n snake_head.append(snake_y)\n snake_body.append(snake_head)\n\n if len(snake_body) > snake_length:\n del snake_body[0]\n\n for segment in snake_body[:-1]:\n if segment == snake_head:\n game_over = True\n\n for segment in snake_body:\n pygame.draw.rect(screen, RED, [segment[0], segment[1], SNAKE_SIZE, SNAKE_SIZE])\n\n pygame.display.update()\n\n # Check for food collision\n if snake_x == food_x and snake_y == food_y:\n food_x = round(random.randrange(0, WIDTH - SNAKE_SIZE) / 20) * 20\n food_y = round(random.randrange(0, HEIGHT - SNAKE_SIZE) / 20) * 20\n snake_length += 1\n\n clock.tick(15)\n\n pygame.quit()\n\n# Start the game loop\ngame_loop()\n","repo_name":"AdithyaPatel2000/head1","sub_path":"Asignments/game_applicaiton.py","file_name":"game_applicaiton.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"1463453468","text":"import csv\nimport os\n\nfrom exhaustive.exhaustive import master_phil\nfrom exhaustive.exhaustive import run as exhaustive\nfrom plot import scatter_plot\nfrom exhaustive.utils import get_minimum_fofc\nfrom exhaustive.utils import u_iso_to_b_fac\n\nparams = master_phil.extract()\n\n\ndef list_files(directory, extension):\n return [f for f in os.listdir(directory) if f.endswith(\".\" + extension)]\n\n\ndef parse_repeat_soak_csv(params):\n input_df = pd.read_csv(params.input.csv)\n for index, row in input_df.iterrows():\n yield row[\"CrystalName\"], row[\"RefinementPDB_latest\"], row[\n \"RefinementMTZ_latest\"\n ]\n\n\n# example for a single dataset\n\n# params.input.pdb = \"/dls/labxchem/data/2018/lb18145-55/processing/analysis/initial_model/NUDT22A-x0927/refine.pdb\"\n# params.input.mtz = \"/dls/labxchem/data/2018/lb18145-55/processing/analysis/initial_model/NUDT22A-x0927/refine.mtz\"\n# params.input.xtal_name = \"NUDT22A-x0927\"\n# params.output.out_dir = \"/dls/science/groups/i04-1/elliot-dev/Work/\" \\\n# \"exhaustive_search_data/test_occ_group_states\"\n# params.output.log_dir = \"/dls/science/groups/i04-1/elliot-dev/Work/\" \\\n# \"exhaustive_search_data/test_occ_group_states/logs\"\n# params.exhaustive.output.csv_name = os.path.join(params.output.out_dir, \"NUDT22A-x0927_test_occ_group.csv\")\n\n# params.input.pdb = \"/dls/labxchem/data/2016/lb13385-61/processing/analysis/initial_model/FALZA-x0085/refine.pdb\"\n# params.input.mtz = \"/dls/labxchem/data/2016/lb13385-61/processing/analysis/initial_model/FALZA-x0085/refine.mtz\"\n# params.input.xtal_name = \"FALZA-x0085\"\n# params.output.out_dir = \"/dls/science/groups/i04-1/elliot-dev/Work/\" \\\n# \"exhaustive_search_data/FALZA_exhaus_18_09_14\"\n# params.output.log_dir = os.path.join(params.output.out_dir, \"logs\")\n# params.exhaustive.output.csv_name = os.path.join(params.output.out_dir, \"exhaustive_search.csv\")\n\n# Running exhaustive search for covalent ratios/ titration series\n\n# start_xtal_num = 6192\n# end_xtal_num = 6251\n# #in_dir = \"/dls/science/groups/i04-1/elliot-dev/Work/exhaustive_search_data/covalent_ratios\"\n# # in_dir = \"/dls/science/groups/i04-1/elliot-dev/Work/exhaustive_search_data/covalent_1812_test\"\n# qsub = False\n\n# Running exhaustive search for covalent ratios dose experiements\n\n# start_xtal_num = 6192\n# end_xtal_num = 6251\n# out_dir = \"/dls/science/groups/i04-1/elliot-dev/Work/exhaustive_search_data/covalent_ratios_dose\"\n# prefix = \"NUDT7A-x\"\nqsub = False\n\n# # copy data to new folder\n\n# if not os.path.exists(out_dir):\n# os.mkdir(out_dir)\n# os.system('cp -a {}/. {}'.format(in_dir,out_dir))\n\n# Single dataset\n# params.exhaustive.output.csv_name = os.path.join(params.output.out_dir, \"exhaustive_search.csv\")\n# exhaustive(params=params)\n# scatter_plot(params.exhaustive.output.csv_name)\n# plot_protein_region(params)\n\n# FALZA exhaustive search\n\n# out_dir = \"/dls/science/groups/i04-1/elliot-dev/Work/exhaustive_search_data/FALZA_exhaus_18_09_18_step_0_01_low_U_iso_0/\"\n# #loop_dir= \"/dls/science/groups/i04-1/elliot-dev/Work/exhaustive_search_data/repeat_soaks/2018-05-28/NUDT22_from_occ_group_with_refinement/\"\n# loop_dir = \"/dls/labxchem/data/2016/lb13385-61/processing/analysis/initial_model\"\n# if not os.path.exists(out_dir):\n# os.mkdir(out_dir)\n\n# xtals=['FALZA-x0079','FALZA-x0085','FALZA-x0172','FALZA-x0177','FALZA-x0271','FALZA-x0309','FALZA-x0402','FALZA-x0438']\n\n# for num in range(start_xtal_num, end_xtal_num + 1):\n# xtal_name = prefix + \"{0:0>4}\".format(num)\n# xtals.append(xtal_name)\n#\n# print(xtals)\n\n\nparams = master_phil.extract()\n\nparams.settings.processes = 14\nparams.exhaustive.options.step = 0.01\nparams.exhaustive.options.convex_hull = False\nparams.exhaustive.options.per_residue = True\nparams.exhaustive.options.ligand_grid_points = False\nparams.exhaustive.options.generate_mtz = False\nparams.exhaustive.options.lower_u_iso = 0.00\n\n############## DCP2B ##################################################################\n\n# out_dir = \"/dls/science/groups/i04-1/elliot-dev/Work/exhaustive_search_data/DCP2B_18_09_20_exhaus\"\n# params.input.database_path = \"/dls/labxchem/data/2016/lb13385-64/processing/database/soakDBDataFile.sqlite\"\n#\n# if not os.path.exists(out_dir):\n# logging.info('Creating output directory {}'.format(out_dir))\n# os.mkdir(out_dir)\n# else:\n# logging.info('Output directory {} exists and is being used'.format(params.output.out_dir))\n#\n# logging.info('Looping over all files that are \\'in refinement\\' '\n# 'or better in the supplied datafile: \\n {}'.format(params.input.database_path))\n#\n# csv_paths = []\n# for xtal_name, pdb, mtz in get_xtals_from_db(params,\n# refinement_outcomes=\"'4 - CompChem ready', \"\n# \"'5 - Deposition ready',\"\n# \"'6 - Deposited'\" ):\n#\n# logging.info(xtal_name)\n#\n# assert os.path.exists(pdb), 'PDB File does not exist: {}'.format(pdb)\n# assert os.path.exists(mtz), 'MTZ File does not exist: {}'.format(mtz)\n#\n# params.input.xtal_name = xtal_name\n# params.input.pdb = pdb\n# params.input.mtz = mtz\n# params.exhaustive.output.csv_name = \"exhaustive_search.csv\"\n# params.output.out_dir = os.path.join(out_dir, xtal_name)\n#\n# if not os.path.exists(params.output.out_dir):\n# os.mkdir(os.path.join(params.output.out_dir))\n#\n# os.chdir(os.path.join(params.output.out_dir))\n#\n# # try:\n# # exhaustive(params)\n# # except UnboundLocalError:\n# # logging.info(\"Skipping onto the next crystal\")\n# # continue\n#\n#\n# #scatter_plot(os.path.join(params.output.out_dir,params.exhaustive.output.csv_name))\n#\n# logging.info('Completed: {}'.format(xtal_name))\n# csv_paths.append(os.path.join(params.output.out_dir,\n# params.exhaustive.output.csv_name))\n\n######################## NUDT22 #########################################\n\nin_dir = \"/dls/science/groups/i04-1/elliot-dev/Work/exhaustive_search_data/NUDT22_repeat_soaks\"\nloop_dir = in_dir\nout_dir = \"/dls/science/groups/i04-1/elliot-dev/Work/exhaustive_search_data/NUDT22_repeat_soaks\"\nprefix = \"NUDT22A-x\"\n\ncompound_dirs = [\n os.path.join(loop_dir, compound_dir)\n for compound_dir in os.listdir(loop_dir)\n if os.path.isdir(os.path.join(loop_dir, compound_dir))\n]\n\nfor compound_dir in compound_dirs:\n\n compound = os.path.basename(compound_dir)\n if not compound.startswith(\"FMOP\"):\n continue\n\n xtal_dirs = [\n os.path.join(compound_dir, xtal_dir)\n for xtal_dir in os.listdir(compound_dir)\n if os.path.isdir(os.path.join(compound_dir, xtal_dir))\n ]\n\n csv_paths = []\n for xtal_dir in xtal_dirs:\n\n xtal_name = os.path.basename(xtal_dir)\n print(xtal_name)\n\n if xtal_name in xtal_dir:\n\n params.input.xtal_name = xtal_name\n params.input.pdb = os.path.join(xtal_dir, \"refine.pdb\")\n params.input.mtz = os.path.join(xtal_dir, \"refine.mtz\")\n params.output.out_dir = os.path.join(compound_dir, xtal_name)\n\n if not os.path.exists(params.output.out_dir):\n os.mkdir(params.output.out_dir)\n\n if not os.path.exists(params.input.pdb):\n print(\"input pdb doesn't exist: {}\".format(params.input.pdb))\n continue\n\n if not os.path.exists(params.input.mtz):\n print(\"input mtz doesn't exsit: {}\".format(params.input.mtz))\n continue\n\n params.exhaustive.output.csv_name = os.path.join(\n params.output.out_dir, \"exhaustive_search.csv\"\n )\n\n csv_paths.append(params.exhaustive.output.csv_name)\n\n if os.path.exists(params.exhaustive.output.csv_name):\n continue\n\n exhaustive(params=params)\n scatter_plot(params.exhaustive.output.csv_name)\n\n with open(os.path.join(compound_dir, \"es_minima.csv\"), \"wb\") as minima_csv:\n\n minima_writer = csv.writer(minima_csv, delimiter=\",\")\n\n for path in csv_paths:\n occ, u_iso, fofc = get_minimum_fofc(path)\n b_fac = u_iso_to_b_fac(u_iso)\n\n xtal_name = os.path.split(os.path.split(path)[0])[1]\n\n minima_writer.writerow([xtal_name, occ, b_fac, fofc])\n\n#### Covalent ratios ###################\n\n# in_dir = \"/dls/science/groups/i04-1/elliot-dev/Work/exhaustive_search_data/covalent_ratios\"\n# loop_dir = in_dir\n# out_dir = \"/dls/science/groups/i04-1/elliot-dev/Work/exhaustive_search_data/covalent_ratios\"\n# prefix = \"NUDT7A-x\"\n#\n# xtal_dirs = [os.path.join(loop_dir, xtal_dir) for xtal_dir in os.listdir(loop_dir)\n# if os.path.isdir(os.path.join(loop_dir, xtal_dir))\n# and not xtal_dir.endswith(\"LIG_CYS\")]\n#\n# csv_paths = []\n#\n# for xtal_dir in xtal_dirs:\n#\n# xtal_name = os.path.basename(xtal_dir)\n# print(xtal_name)\n#\n#\n# if xtal_name in xtal_dir:\n#\n# params.input.xtal_name = xtal_name\n#\n# print(xtal_dir)\n# print(xtal_name)\n#\n# compounds = list_files(xtal_dir,\"cif\")\n# compound_name = (list_files(xtal_dir,\"cif\")[0]).split(\".\")[0]\n#\n# params.input.pdb = os.path.join(xtal_dir,\"refine.pdb\")\n# params.input.mtz = os.path.join(xtal_dir,\"refine.mtz\")\n# params.output.out_dir = os.path.join(out_dir, compound_name, xtal_name)\n#\n# if not os.path.exists(os.path.join(out_dir, compound_name)):\n# os.mkdir(os.path.join(out_dir, compound_name))\n#\n# if not os.path.exists(params.output.out_dir):\n# os.mkdir(params.output.out_dir)\n#\n# if not os.path.exists(params.input.pdb):\n# print(\"input pdb doesn't exist: {}\".format(params.input.pdb))\n# continue\n# if not os.path.exists(params.input.mtz):\n# print(\"input mtz doesn't exsit: {}\".format(params.input.mtz))\n# continue\n#\n# params.exhaustive.output.csv_name = os.path.join(params.output.out_dir, \"exhaustive_search.csv\")\n#\n# csv_paths.append(params.exhaustive.output.csv_name)\n#\n# if os.path.exists(params.exhaustive.output.csv_name):\n# continue\n#\n# exhaustive(params=params)\n# scatter_plot(params.exhaustive.output.csv_name)\n#\n#\n#\n# with open(os.path.join(out_dir,\"es_minima.csv\"),'wb') as minima_csv:\n#\n# minima_writer = csv.writer(minima_csv, delimiter=',')\n#\n# for path in csv_paths:\n# occ, u_iso, fofc = get_minimum_fofc(path)\n# b_fac = u_iso_to_b_fac(u_iso)\n#\n# xtal_name = os.path.split(os.path.split(path)[0])[1]\n#\n# minima_writer.writerow([xtal_name, occ, b_fac, fofc])\n\n# for xtal_name in xtals:\n#\n# params.input.xtal_name = xtal_name\n# params.input.pdb = os.path.join(os.path.join(in_dir, xtal_name, \"refine.pdb\"))\n# params.input.mtz = os.path.join(os.path.join(in_dir, xtal_name, \"refine.mtz\"))\n#\n# if not os.path.exists(params.input.pdb):\n# print(\"input pdb doesn't exist: {}\".format(params.input.pdb))\n# continue\n# if not os.path.exists(params.input.mtz):\n# print(\"input mtz doesn't exsit: {}\".format(params.input.mtz))\n# continue\n#\n# params.output.out_dir = os.path.join(out_dir, xtal_name)\n# params.output.log_dir = os.path.join(out_dir, xtal_name, \"logs\")\n# params.exhaustive.output.csv_name = os.path.join(params.output.out_dir, \"exhaustive_search.csv\")\n#\n# if not qsub:\n# exhaustive(params=params)\n# scatter_plot(params.exhaustive.output.csv_name)\n#\n# if qsub:\n#\n# # pickle params\n#\n# with open(os.path.join(out_dir,\n# xtal_name,\n# '{}_param.pickle'.format(xtal_name)),'wb') as handle:\n# pickle.dump(params, handle, protocol=pickle.HIGHEST_PROTOCOL)\n#\n# # write python script\n#\n# python_script = open(os.path.join(out_dir,\n# xtal_name,\n# \"{}_exhaustive.py\".format(xtal_name)),'w')\n# python_script.write(\"import pickle\\n\"\n# \"import sys\\n\"\n# \"import os\\n\"\n# \"sys.path.append(\\\"/dls/science/groups/i04-1/elliot-dev/\"\n# \"Work/exhaustive_search\\\")\\n\"\n# \"from exhaustive.exhaustive import run as exhaustive\\n\"\n# \"import libtbx.phil\\n\"\n# \"out_dir=\\\"{}\\\"\\n\".format(out_dir) +\n# \"xtal_name=\\\"{}\\\"\\n\".format(xtal_name) +\n# \"with open(os.path.join(out_dir, \" \\\n# \"xtal_name,\\'{}_param.pickle\\'.format(xtal_name)),'rb') as handle:\\n\"\n# \"\\tparams = pickle.load(handle)\\n\"\n# \"exhaustive(params)\")\n# python_script.close()\n#\n# # write bash script\n# bash_script=open(os.path.join(out_dir, xtal_name,\n# \"{}_exhaustive.sh\".format(xtal_name)),'w')\n# bash_script.write(\"source /dls/science/groups/i04-1/software/\" \\\n# \"pandda-update/ccp4/ccp4-7.0/setup-scripts/ccp4.setup-sh\\n\"\n# # \"export PYTHONPATH=\\\"${PYTHONPATH}:\" \\\n# # \"/dls/science/groups/i04-1/elliot-dev/\"\n# # \"Work/exhaustive_search/exhaustive\\\"\\n\"\n# \"ccp4-python \" + os.path.join(out_dir, xtal_name,\n# \"{}_exhaustive.py\".format(xtal_name)))\n# bash_script.close()\n# # submit job\n# os.system(\"qsub {}\".format(os.path.join(out_dir, xtal_name,xtal_name+\"_exhaustive.sh\")))\n\n\n# Get exhaustive search minima fofc\n# with open(os.path.join(out_dir,\"es_minima.csv\"),'wb') as minima_csv:\n#\n# minima_writer = csv.writer(minima_csv, delimiter=',')\n#\n# for xtal_name in xtals:\n#\n# params.output.out_dir = os.path.join(out_dir, xtal_name)\n# params.exhaustive.output.csv_name = os.path.join(params.output.out_dir, \"exhaustive_search.csv\")\n# if os.path.exists(params.exhaustive.output.csv_name):\n# os.chdir(os.path.join(out_dir, xtal_name))\n# scatter_plot(params.exhaustive.output.csv_name)\n# else:\n# continue\n#\n# if os.path.exists(params.exhaustive.output.csv_name):\n# occ, u_iso, fofc = get_minimum_fofc(params.exhaustive.output.csv_name)\n# b_fac=u_iso_to_b_fac(u_iso)\n#\n# print([xtal_name, occ, b_fac, fofc])\n#\n# minima_writer.writerow([xtal_name, occ, b_fac, fofc])\n\n# refine minima\n\n# with open(os.path.join(out_dir,\"refined_occs.csv\"),'wb') as minima_csv:\n#\n# minima_writer = csv.writer(minima_csv, delimiter=',')\n#\n# for xtal_name in xtals:\n#\n# if os.path.exists(os.path.join(out_dir,xtal_name,\"refine.pdb\")):\n#\n# occ = get_lig_occ(os.path.join(out_dir,xtal_name,\"refine.pdb\"))\n#\n# minima_writer.writerow([xtal_name,occ])\n# else:\n# continue\n","repo_name":"nelse003/exhaustive_search","sub_path":"exhaustive/jiffies/run_exhaustive.py","file_name":"run_exhaustive.py","file_ext":"py","file_size_in_byte":15284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"39379242874","text":"import os\nimport sys\nsys.path.insert(0, os.getcwd())\nfrom engine import BaseModel\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.animation import FuncAnimation\nfrom typing import Tuple\n\n\nclass PLSA(BaseModel):\n def __init__(self, k, max_iter, tol=1e-3):\n super().__init__()\n self.nz = k\n self.max_iter = max_iter\n self.tol = tol\n self.history = []\n\n def fit(self, X):\n nw, nd = X.shape\n\n # P(w|z)\n self.p_w_z = np.random.rand(nw, self.nz)\n # P(z|d)\n self.p_z_d = np.random.rand(self.nz, nd)\n\n for _ in range(self.max_iter):\n self.history.append((self.p_w_z, self.p_z_d))\n # E step\n t = np.einsum('wz,zd->dzw', self.p_w_z, self.p_z_d)\n p_d_zw = t / np.sum(t, axis=1, keepdims=True)\n # M step\n new_p_w_z = (np.einsum('wd,dzw->wz', X, p_d_zw) / np.einsum('wd,dzw->z', X, p_d_zw))\n new_p_z_d = (np.einsum('wd,dzw->zd', X, p_d_zw) / np.sum(X, 0))\n if np.allclose(new_p_w_z, self.p_w_z, atol=self.tol):\n break\n self.p_w_z = new_p_w_z\n self.p_z_d = new_p_z_d\n self.history.append((self.p_w_z, self.p_z_d))\n return self.p_w_z, self.p_z_d\n\n def show_anime(self, save_path='gif/PLSA.gif'):\n fig: plt.Figure = plt.figure()\n fig.set_tight_layout(True)\n ax: Axes3D = fig.add_subplot(1, 1, 1, projection='3d')\n x, y = np.meshgrid(np.linspace(0, 1), np.linspace(0, 1))\n z = 1 - x - y\n z[z < 0] = np.nan\n ax.view_init(elev=25., azim=10.)\n\n def update(i):\n p_w_z, p_z_d = self.history[i]\n plt.cla()\n ax.set_title(f'iter {i}')\n ax.plot_surface(x, y, z, alpha=0.3)\n ax.set_zlim(0, 1)\n ax.scatter3D(p_z_d[0], p_z_d[1], p_z_d[2], label=r'$P(z_k|d_j)$')\n ax.scatter3D(p_w_z[:, 0], p_w_z[:, 1], p_w_z[:, 2], label=r'$P(w_i|z_k)$')\n ax.legend()\n\n anim = FuncAnimation(fig, update, frames=len(self.history))\n anim.save(save_path, writer='imagemagick', fps=6)\n plt.show()\n\n\nif __name__ == \"__main__\":\n X = np.asarray([[0, 0, 1, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 1],\n [0, 1, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 1],\n [1, 0, 0, 0, 0, 1, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 1],\n [0, 0, 0, 0, 0, 2, 0, 0, 1],\n [1, 0, 1, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 1, 1, 0, 0, 0, 0]])\n plsa = PLSA(k=3, max_iter=100)\n p_w_z, p_z_d = plsa.fit(X)\n plsa.show_anime()\n","repo_name":"zhen8838/Statistical-Learning-Method","sub_path":"PLSA/PLSA.py","file_name":"PLSA.py","file_ext":"py","file_size_in_byte":2607,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"73"} +{"seq_id":"17932246786","text":"# importing required library\nimport pygame\nimport time\n\npygame.init()\nX = 600\nY = 600\nscrn = pygame.display.set_mode((X, Y))\npygame.display.set_caption('image')\n\n# trouve image et fait objet (woaw)\n\n\nlisteframe = []\nfor i in range(1, 36):\n listeframe.append(pygame.image.load(\"/home/ncr/boom/frame\" + str(i)+str(\".jpg\")).convert())\n\ndef animator2000(framelist,coord1,coord2):\n for i in range(len(framelist)):\n scrn.blit(framelist[i], (coord1, coord2))\n pygame.display.update()\n time.sleep(0.01)\n\nanimator2000(listeframe, 0, 0)\npygame.display.flip()\nstatus = True\nwhile (status):\n\n for i in pygame.event.get():\n\n if i.type == pygame.QUIT:\n status = False\n\npygame.quit()\n","repo_name":"Ghostytriickster/arkanoid-python","sub_path":"boom/boom.py","file_name":"boom.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"43990825469","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\n\n# List of news websites to scrape\nwebsites = ['https://www.nytimes.com/', 'https://www.bbc.com/', 'https://www.theguardian.com/us']\n\n# CSV file to store the collected data\nfilename = 'news_articles.csv'\n\n# Open the file for writing\nwith open(filename, 'w', newline='', encoding='utf-8') as csvfile:\n writer = csv.writer(csvfile)\n # Write the header row to the file\n writer.writerow(['title', 'author', 'date_published', 'source', 'content'])\n\n for site in websites:\n # Send a request to the website\n response = requests.get(site)\n\n # Parse the HTML content using Beautiful Soup\n soup = BeautifulSoup(response.content, 'html.parser')\n\n # Find all the articles on the page\n articles = soup.find_all('article')\n\n # Loop through each article and extract the relevant data\n for article in articles:\n # Extract the title of the article\n title = article.find('h2').text.strip()\n\n # Extract the author of the article, if available\n author = article.find('span', class_='css-1n7hynb').text.strip() if article.find('span', class_='css-1n7hynb') else ''\n\n # Extract the date the article was published, if available\n date_published = article.find('time')['datetime'] if article.find('time') else ''\n\n # Extract the source of the article\n source = site\n\n # Extract the content of the article\n content = article.find('p').text.strip() if article.find('p') else ''\n\n # Write the extracted data to the CSV file\n writer.writerow([title, author, date_published, source, content])\n","repo_name":"dullahgtt/Grover-Enhancer","sub_path":"Article Dataset/Web Scraper/bbc-nyct-guardian-web-scraper.py","file_name":"bbc-nyct-guardian-web-scraper.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"42807924865","text":"class Solution:\n def checkSubarraySum(self, nums: List[int], k: int) -> bool:\n if (k==1 or k==-1) and len(nums)>1:\n return True\n record={0:-1}\n acc=0\n for idx in range(len(nums)):\n item=nums[idx]\n acc+=item\n tmp=acc%k if k else acc\n if tmp in record:\n if idx-record[tmp]>1:\n return True\n else:\n record[tmp]=idx\n return False","repo_name":"lkwq007/leetcode-py","sub_path":"523-Continuous-Subarray-Sum.py","file_name":"523-Continuous-Subarray-Sum.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"73"} +{"seq_id":"29020943185","text":"class Trie:\n \n def __init__(self):\n self.children = {}\n self.isEnd = False\n \n def insert(self, word: str) -> None:\n temp = self\n for i in range(len(word)):\n if word[i] in temp.children:\n temp = temp.children[word[i]]\n else:\n node = Trie()\n temp.children[word[i]] = node\n temp = node\n \n temp.isEnd = True\n \n def search(self, word: str) -> bool:\n temp = self\n for i in range(len(word)):\n if word[i] in temp.children:\n temp = temp.children[word[i]]\n else:\n return False\n \n return temp.isEnd\n\n def startsWith(self, prefix: str) -> bool:\n temp = self\n for i in range(len(prefix)):\n if prefix[i] in temp.children:\n temp = temp.children[prefix[i]]\n else:\n return False\n \n return True\n\n\n# Your Trie object will be instantiated and called as such:\n# obj = Trie()\n# obj.insert(word)\n# param_2 = obj.search(word)\n# param_3 = obj.startsWith(prefix)","repo_name":"matt468779/A2SV","sub_path":"0208-implement-trie-prefix-tree/0208-implement-trie-prefix-tree.py","file_name":"0208-implement-trie-prefix-tree.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"9235861302","text":"# import os\n# import shutil\n# datapath = 'E:/realdata/'\n# train_data = 'E:/traindata'\n# val_data = 'E:/realvaldata'\n# data_list = os.listdir(datapath)\n# for obj in data_list:\n# path = os.path.join(datapath, obj)\n# data = os.listdir(path)\n# n = len(data)\n# for i in range(n):\n# if i< n//2:\n# rfilename = os.path.join(path, data[i])\n# dfilename = '%06d.png'%(i+1)\n# dpath = os.path.join(train_data, obj)\n# shutil.copy(rfilename, os.path.join(dpath, dfilename))\n# else:\n# rfilename = os.path.join(path, data[i])\n# dfilename = '%06d.png' % (i + 1)\n# dpath = os.path.join(val_data, obj)\n# shutil.copy(rfilename, os.path.join(dpath, dfilename))\n\nimport os\nimport shutil\ntraindata = 'E:/realvaldata'\ntraindata1 = 'E:/valdata1/images'\ndata_list = os.listdir(traindata)\ntotal = 0\nfor obj in data_list:\n path = os.path.join(traindata, obj)\n data = os.listdir(path)\n n = len(data)\n total += n\n for i in range(total-n,total):\n dfilename = '%06d.png' %(i+155)\n shutil.copy(os.path.join(path, data[i-total+n]), os.path.join(traindata1, dfilename))\n\n\nprint(total)\n\n\n\n\n\n","repo_name":"powermano/test","sub_path":"make_filename.py","file_name":"make_filename.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"26090987194","text":"#Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\carbon\\common\\stdlib\\testfixtures\\tests\\test_log_capture.py\nfrom testfixtures import log_capture, compare, Comparison as C, LogCapture\nfrom unittest import TestCase\nfrom logging import getLogger\nroot = getLogger()\none = getLogger('one')\ntwo = getLogger('two')\nchild = getLogger('one.child')\n\nclass TestLog_Capture(TestCase):\n\n @log_capture('two', 'one.child')\n @log_capture('one')\n @log_capture()\n def test_logging(self, l1, l2, l3):\n root.info('1')\n one.info('2')\n two.info('3')\n child.info('4')\n l1.check(('root', 'INFO', '1'), ('one', 'INFO', '2'), ('two', 'INFO', '3'), ('one.child', 'INFO', '4'))\n l2.check(('one', 'INFO', '2'), ('one.child', 'INFO', '4'))\n l3.check(('two', 'INFO', '3'), ('one.child', 'INFO', '4'))\n compare(l3.records, [C('logging.LogRecord'), C('logging.LogRecord')])\n\n def test_uninstall_properly(self):\n root = getLogger()\n child = getLogger('child')\n before_root = root.handlers[:]\n before_child = child.handlers[:]\n try:\n old_root_level = root.level\n root.setLevel(49)\n old_child_level = child.level\n child.setLevel(69)\n\n @log_capture('child')\n @log_capture()\n def test_method(l1, l2):\n root = getLogger()\n root.info('1')\n self.assertEqual(root.level, 1)\n child = getLogger('child')\n self.assertEqual(child.level, 1)\n child.info('2')\n l1.check(('root', 'INFO', '1'), ('child', 'INFO', '2'))\n l2.check(('child', 'INFO', '2'))\n\n test_method()\n self.assertEqual(root.level, 49)\n self.assertEqual(child.level, 69)\n self.assertEqual(root.handlers, before_root)\n self.assertEqual(child.handlers, before_child)\n finally:\n root.setLevel(old_root_level)\n child.setLevel(old_child_level)\n\n @log_capture()\n def test_decorator_returns_logcapture(self, l):\n self.failUnless(isinstance(l, LogCapture))\n\n def test_remove_existing_handlers(self):\n logger = getLogger()\n original = logger.handlers\n try:\n logger.handlers = start = [object()]\n\n @log_capture()\n def test_method(l):\n logger.info('during')\n l.check(('root', 'INFO', 'during'))\n\n test_method()\n compare(logger.handlers, start)\n finally:\n logger.handlers = original\n\n def test_clear_global_state(self):\n from logging import _handlers, _handlerList\n capture = LogCapture()\n capture.uninstall()\n self.assertFalse(capture in _handlers)\n self.assertFalse(capture in _handlerList)\n\n def test_no_propogate(self):\n logger = getLogger('child')\n compare(logger.propagate, True)\n\n @log_capture('child', propagate=False)\n def test_method(l):\n logger.info('a log message')\n l.check(('child', 'INFO', 'a log message'))\n\n with LogCapture() as global_log:\n test_method()\n global_log.check()\n compare(logger.propagate, True)\n","repo_name":"connoryang/1v1dec","sub_path":"testfixtures/tests/test_log_capture.py","file_name":"test_log_capture.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"34035105871","text":"\"\"\"Wrapper for adding an environment info to track task ID.\"\"\"\nfrom garage import Wrapper\n\n\nclass TaskNameWrapper(Wrapper):\n \"\"\"Add task_name or task_id to env infos.\n\n Args:\n env (gym.Env): The environment to wrap.\n task_name (str or None): Task name to be added, if any.\n task_id (int or None): Task ID to be added, if any.\n\n \"\"\"\n\n def __init__(self, env, *, task_name=None, task_id=None):\n super().__init__(env)\n self._task_name = task_name\n self._task_id = task_id\n\n def step(self, action):\n \"\"\"gym.Env step for the active task env.\n\n Args:\n action (np.ndarray): Action performed by the agent in the\n environment.\n\n Returns:\n tuple:\n np.ndarray: Agent's observation of the current environment.\n float: Amount of reward yielded by previous action.\n bool: True iff the episode has ended.\n dict[str, np.ndarray]: Contains auxiliary diagnostic\n information about this time-step.\n\n \"\"\"\n es = super().step(action)\n if self._task_name is not None:\n es.env_info['task_name'] = self._task_name\n if self._task_id is not None:\n es.env_info['task_id'] = self._task_id\n return es\n","repo_name":"rlworkgroup/garage","sub_path":"src/garage/envs/task_name_wrapper.py","file_name":"task_name_wrapper.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":1748,"dataset":"github-code","pt":"73"} +{"seq_id":"29900395644","text":"import torch\r\nfrom torch.utils.data import TensorDataset\r\nfrom tqdm import tqdm\r\nbar_format = '{desc:<5.5}{percentage:3.0f}%|{bar:10}{r_bar}'\r\n\r\n\r\ndef get_ent_rel(path_data):\r\n with open(path_data + 'entities.txt', 'r') as f:\r\n ent_list = f.read().split('\\n')\r\n with open(path_data + 'relations.txt', 'r') as f:\r\n rel_list = f.read().split('\\n')\r\n ent_list = ent_list[:-1] if ent_list[-1] == '' else ent_list\r\n rel_list = rel_list[:-1] if rel_list[-1] == '' else rel_list\r\n return ent_list, rel_list\r\n\r\n\r\ndef get_facts(path_data):\r\n facts = list()\r\n for data in ['test', 'dev', 'train']:\r\n file = path_data + f'{data}.txt'\r\n with open(file, 'r') as f:\r\n facts_f = f.read().split('\\n')\r\n facts_f = facts_f[:-1] if facts_f[-1] == '' else facts_f\r\n facts += facts_f\r\n facts = [fact.split('\\t') for fact in facts]\r\n facts = [fact[:3] for fact in facts if len(fact) == 3 or fact[-1] == '1']\r\n return facts\r\n\r\n\r\ndef get_graph(path_data, entity_list, relation_list):\r\n facts = get_facts(path_data)\r\n\r\n nodes = {\r\n entity: {'out': dict(), 'in': dict()} for entity in entity_list\r\n }\r\n edges = {\r\n relation: {'in': list(), 'out': list()} for relation in relation_list\r\n }\r\n for fact in tqdm(facts, bar_format=bar_format):\r\n subject, relation, object = fact\r\n node_s = nodes[subject]\r\n node_s_out = node_s['out'].get(relation, list()) + [object]\r\n node_s['out'][relation] = node_s_out\r\n node_o = nodes[object]\r\n node_o_in = node_o['in'].get(relation, list()) + [subject]\r\n node_o['in'][relation] = node_o_in\r\n nodes[subject] = node_s\r\n nodes[object] = node_o\r\n\r\n edge_r = edges[relation]\r\n edge_out = edge_r['out']\r\n edge_out.append(object)\r\n edge_r['out'] = edge_out\r\n edge_in = edge_r['in']\r\n edge_in.append(subject)\r\n edge_r['in'] = edge_in\r\n edges[relation] = edge_r\r\n\r\n for entity, node in nodes.items():\r\n for relation, objects in node['out'].items():\r\n node['out'][relation] = list(set(objects))\r\n for relation, objects in node['in'].items():\r\n node['in'][relation] = list(set(objects))\r\n nodes[entity] = node\r\n\r\n for relation, edge in edges.items():\r\n edge['out'] = list(set(edge['out']))\r\n edge['in'] = list(set(edge['in']))\r\n edges[relation] = edge\r\n\r\n return nodes, edges\r\n\r\n\r\ndef get_positives(fact, nodes):\r\n s, R, o = fact\r\n r_list = R.split(',')\r\n entities = [s]\r\n for r in r_list:\r\n if r[:2] == '**':\r\n rel = r[2:]\r\n edge = 'in'\r\n else:\r\n rel = r\r\n edge = 'out'\r\n ent_next = list()\r\n for node in (nodes[ent] for ent in entities):\r\n ent_next += node[edge].get(rel, list())\r\n entities = list(set(ent_next))\r\n return entities\r\n\r\n\r\ndef get_negatives(fact, edges):\r\n s, R, o = fact\r\n r_list = R.split(',')\r\n r = r_list[-1]\r\n if r[:2] == '**':\r\n r_last = r[2:]\r\n edge = 'in'\r\n else:\r\n r_last = r\r\n edge = 'out'\r\n return edges[r_last][edge]\r\n\r\n\r\n# def get_dataset(data, entity_dict, relation_dict, path_data):\r\n# with open(path_data + f'{data}.txt', 'r') as f:\r\n# lines = f.read().split('\\n')\r\n# lines = lines[:-1] if lines[-1] == '' else lines\r\n# facts = [line.split('\\t') for line in lines]\r\n# facts = [fact[:3] for fact in facts if len(fact) == 3 or fact[-1] == '1']\r\n#\r\n# idx_list, s_list, R_list, o_list = list(), list(), list(), list()\r\n# for i, fact in enumerate(facts):\r\n# s, R, o = fact\r\n# idx_list.append(i)\r\n# s_list.append(entity_dict[s])\r\n# r_list = R.split(',')\r\n# R_list.append([relation_dict[r] for r in r_list])\r\n# o_list.append(entity_dict[o])\r\n#\r\n# data = TensorDataset(\r\n# *[torch.LongTensor(L) for L in [idx_list, s_list, R_list, o_list]]\r\n# )\r\n# return data, facts\r\n\r\n\r\ndef get_path_dataset(data, size, entity_dict, relation_dict_both, path_data):\r\n with open(path_data + f'paths/{data}', 'r') as f:\r\n lines = f.read().split('\\n')\r\n lines = lines[:-1] if lines[-1] == '' else lines\r\n facts = [line.split('\\t') for line in lines]\r\n facts_out = list()\r\n idx_list, s_list, R_list, o_list = list(), list(), list(), list()\r\n i = 0\r\n for fact in facts:\r\n s, R, o = fact\r\n r_list = R.split(',')\r\n if len(r_list) != size:\r\n continue\r\n facts_out.append(fact)\r\n idx_list.append(i)\r\n s_list.append(entity_dict[s])\r\n R_list.append([relation_dict_both[r] for r in r_list])\r\n o_list.append(entity_dict[o])\r\n i += 1\r\n data = TensorDataset(\r\n *[torch.LongTensor(L) for L in [idx_list, s_list, R_list, o_list]]\r\n )\r\n return data, facts_out\r\n","repo_name":"nec-research/dccg","sub_path":"utils/kg.py","file_name":"kg.py","file_ext":"py","file_size_in_byte":4923,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"73"} +{"seq_id":"32824613871","text":"\"\"\"\nETA = explode them all\nterraformers\nrolling stones\nexploding kittens\nsappers\nsplit devils\ndynamite squad\nbig bang terraformersdividers by zero\ndissasemble team / decomposition team\n\"\"\"\n\n\"\"\"\n===vex part for append classGuide if in volumesampe\nfloat vs = volumesample(1,0,v@P);\nint classGuide=prim(1,\"classGuide\",0)\nif(vs>0)\n{\n\tappend(i[]cG,classGuide);\t\n}\n\n===vex part for combining arrays in !detail mode!\nint combG[];\nfor(int i=0;ibCount:\n\t\tbCount=b\n\n#find most frequent classGuide\nfor a,b in new_vals:\n\tif b==bCount:\n\t\tmFreq=a\n\ng.addAtrib(hou.attribType.Global,\"mostFreq\",mFreq)\n\n","repo_name":"igor-si/shared","sub_path":"recipies/hou_py/hPyCheckArayChunks.py","file_name":"hPyCheckArayChunks.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"71928010156","text":"import torch\nimport matplotlib.pyplot as plt\nfrom datapipe import TrainDataset\nfrom unet import *\nfrom diffusion import *\nfrom elucidated_diffusion import *\nfrom stats import *\n\ndef plot(sample, num_sample, mode, milestone=None):\n grid = int(math.sqrt(num_sample))\n if mode == \"GT\":\n for index in range(10):\n fig, axes = plt.subplots(figsize=(18, 10),nrows=grid, ncols=grid)\n for i in range(index*num_sample, (index+1)*num_sample):\n row = (i-index*num_sample) // grid\n col = (i-index*num_sample) % grid\n im = axes[row][col].imshow(sample[i][0].cpu().detach().numpy(),interpolation='bilinear',cmap=plt.cm.jet,origin='lower', vmin=0, vmax=0.0001)\n fig.colorbar(im, ax=axes[row][col])\n fig.savefig(f\"/home/liqiny/climate/GT-CRM_QPI-{index}.png\")\n plt.close(\"all\")\n else:\n fig, axes = plt.subplots(figsize=(18, 10),nrows=grid, ncols=grid)\n for i in range(num_sample):\n row = i // grid\n col = i % grid\n im = axes[row][col].imshow(sample[i].cpu().detach().numpy(),interpolation='bilinear',cmap=plt.cm.jet,origin='lower', vmin=0, vmax=0.001)\n fig.colorbar(im, ax=axes[row][col])\n fig.savefig(f\"/home/liqiny/climate/ED_sample-1000-{milestone}-0.001.png\")\n plt.close(\"all\")\n\ndef modelSample():\n unet = Unet(\n dim = 64,\n dim_mults = (4, 8, 16, 16),\n random_fourier_features = True,\n channels = 5,\n learned_sinusoidal_dim = 64\n ).cuda()\n\n '''\n Elucidated Diffusion\n '''\n elucidated_diffusion = ElucidatedDiffusion(\n unet,\n image_size = (24, 32),\n num_sample_steps = 1000,\n channels = 5\n ).cuda()\n\n device = torch.device(\"cuda:0\")\n\n train_dataset = TrainDataset(\"/extra/ucibdl0/shared/data/climate/h3/*.nc\")\n ds_maxmin = train_dataset.getMaxMin()\n # milestones = [58, 73, 75, 81, 83, 89, 90, 91, 92, 98, 100]\n # for milestone in milestones:\n milestone = 200\n data = torch.load(str(f\"/home/liqiny/climate/results/model-{milestone}.pt\"), map_location=device)\n elucidated_diffusion.load_state_dict(data['model'])\n\n # for i in range(2):\n # print(f\"loop {i}\\n\")\n # sample = elucidated_diffusion.sample(batch_size=2000, ds_maxmin=ds_maxmin)\n # torch.save(sample, f\"/home/liqiny/climate/sample_200_4_2.pt\")\n # sample = torch.squeeze(sample)\n # plot(sample, 25, \"model\", milestone=milestone)\n # sample = torch.load(\"/home/liqiny/climate/sample.pt\")\n\n # sample1 = torch.load(f\"/home/liqiny/climate/sample_100_10k.pt\")\n sample = torch.load(f\"/home/liqiny/climate/sample_200_10k.pt\")\n\n for i in range(0,5):\n s = torch.load(f\"/home/liqiny/climate/sample_200_{i}_2.pt\")\n sample = torch.cat((sample, s), 0)\n torch.save(sample, f\"/home/liqiny/climate/sample_100_20k.pt\")\n\n sampleStats1(sample, milestone)\n sampleStats2(sample, milestone)\n\n\ndef groundTruth():\n train_dataset = TrainDataset(\"/extra/ucibdl0/shared/data/climate/h3/*.nc\")\n sample = train_dataset.CRM_QPI\n plot(sample, 25, \"GT\")\n\n\nif __name__ == \"__main__\":\n # modelSample()\n\n unet = Unet(\n dim = 64,\n dim_mults = (4, 8, 16, 16),\n random_fourier_features = True,\n channels = 5,\n learned_sinusoidal_dim = 64,\n condition = True\n ).cuda()\n\n x = torch.randn([100, 5, 24, 32]).cuda()\n y = torch.randn([100, 18, 24, 32]).cuda()\n t = torch.randint(0, 1000, (100,)).cuda()\n z = unet(x, t, cond=y)\n print(z.size())\n \n # groundTruth()\n\n # loss = torch.load('/home/liqiny/climate/results/CRM_QC_ED_100k/model/loss.pt')\n # plt.plot(loss.cpu().detach().numpy())\n # plt.savefig('123.png')\n # plt.close('all')","repo_name":"liqinye/climate-modeling","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"3063034909","text":"import sys\nsys.path.append('.')\nimport pytest\nfrom sqlalchemy import text\nfrom app.server import create_app, db\n\nflag_delete_rows = False\n\n\n@pytest.fixture(scope='module')\ndef client(delete_database_rows=flag_delete_rows):\n app = create_app('test')\n testing_client = app.test_client()\n\n with app.app_context():\n db.create_all()\n \n yield testing_client\n \n if delete_database_rows:\n with app.app_context():\n session = db.session\n session.execute(text('DELETE FROM data'))\n session.commit()\n","repo_name":"MiqSA/data_search","sub_path":"app/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"74176442157","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# System modules\nimport unittest\nimport logging\nimport os\nimport datetime\nfrom unittest.mock import patch\n\n# External modules\nimport numpy as np\n\nimport xarray as xr\n\n# Internal modules\nfrom pylawr.grid.cartesian import CartesianGrid\nfrom pylawr.grid.polar import PolarGrid\nfrom pylawr.transform.spatial.beamexpansion import TAG_BEAM_EXPANSION_CORR\nfrom pylawr.functions.fit import fit_extrapolator\nfrom pylawr.transform.temporal.extrapolation import Extrapolator\nfrom pylawr.remap.nearest import NearestNeighbor\nfrom pylawr.utilities.helpers import create_array\n\n\nlogging.basicConfig(level=logging.DEBUG)\n\nBASE_PATH = os.path.dirname(os.path.realpath(__file__))\n\n\nclass TestFitFunctions(unittest.TestCase):\n def setUp(self):\n # cartesian\n self.save_path = 'test_array.nc'\n len_x = 300\n len_y = 300\n self.grid = CartesianGrid(nr_points=(len_x, len_y))\n self.array = create_array(self.grid)\n self.array.lawr.add_tag(TAG_BEAM_EXPANSION_CORR)\n self.array.to_netcdf('test_array.nc')\n\n # polar\n self.save_path_polar = 'test_polar_array.nc'\n self.array_polar = create_array(PolarGrid())\n self.array_polar.lawr.add_tag(TAG_BEAM_EXPANSION_CORR)\n self.array_polar.to_netcdf('test_polar_array.nc')\n\n def tearDown(self):\n os.remove(self.save_path)\n os.remove(self.save_path_polar)\n\n def test_fit_polar_array(self):\n returned_value = fit_extrapolator(refl_array=self.array_polar,\n pre_refl_path=self.save_path_polar,\n grid_extrapolation=CartesianGrid(),\n remapper=NearestNeighbor(1))\n self.assertIsInstance(returned_value, Extrapolator)\n\n def test_fit_extrapolator_returns_extrapolator(self):\n returned_value = fit_extrapolator(refl_array=self.array,\n pre_refl_path=self.save_path,\n grid_extrapolation=self.grid,\n remapper=NearestNeighbor(1))\n self.assertIsInstance(returned_value, Extrapolator)\n\n def test_fit_extrapolator_returns_fitted_extrapolator(self):\n returned_value = fit_extrapolator(refl_array=self.array,\n pre_refl_path=self.save_path,\n grid_extrapolation=self.grid,\n remapper=NearestNeighbor(1))\n self.assertTrue(returned_value.fitted)\n\n @patch('pylawr.transform.temporal.extrapolation.Extrapolator.fit')\n def test_fit_extrapolator_passes_grid_to_fit(self, grid_mock):\n _ = fit_extrapolator(refl_array=self.array,\n pre_refl_path=self.save_path,\n grid_extrapolation=self.grid,\n remapper=NearestNeighbor(1))\n self.assertEqual(id(grid_mock.call_args[1]['grid']), id(self.grid))\n\n def test_fit_extrapolator_passes_args_kwargs_to_extrapolator(self):\n extrapolator = fit_extrapolator(self.array, self.save_path,\n grid_extrapolation=self.grid,\n remapper=NearestNeighbor(1),\n cut_percentage=0.3,\n max_timediff=42)\n self.assertEqual(extrapolator.cut_percentage, 0.3)\n self.assertEqual(extrapolator.max_timediff, 42)\n\n def test_fit_extrapolator_array_needs_grid(self):\n self.array.lawr.grid = None\n with self.assertRaises(AttributeError) as e:\n _ = fit_extrapolator(self.array, self.save_path, grid=self.grid)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"ObsMod/pylawr","sub_path":"tests/functions/test_functions_fit.py","file_name":"test_functions_fit.py","file_ext":"py","file_size_in_byte":3840,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"73"} +{"seq_id":"7858285403","text":"# Restaurant Tab Calculation Program\r\n# This program will calculate a restaurant tab with tax\r\n\r\n# initialization\r\ntax= 0.08\r\nyes=1\r\nno=0\r\n\r\n#program greeting\r\nprint(\"Welcome to my humble abode, the best restaurant in the world!!!!\")\r\nprint(\"Type 1/0 if you want or not one of the following dishes:\")\r\n\r\n# cost of ordered items\r\n\r\nappetizer_per=print(\"Pulpo Gallega,$25\")\r\nPulpo_Gallega=int(25)\r\nPulpo=float(input(\"Buy?:\"))\r\nentree1_per=print(\"Red Snapper Fillet,$35\")\r\nRed_Snapper_Fillet=int(35)\r\nRed=float(input(\"Buy?:\"))\r\nentree2_per=print(\"Paella Peska,$30\")\r\nPaella_Peska=int(30)\r\nPaella=float(input(\"Buy?:\"))\r\nentree3_per=print(\"Main Lobster Tail,$35\")\r\nMain_Lobster_Tail=int(35)\r\nMain=float(input(\"Buy?:\"))\r\ndessert_per=print(\"Tiramisu,$8\")\r\nTiramisu=int(8)\r\nTira=float(input(\"Buy?:\"))\r\n\r\n#total items\r\namt_per=Pulpo_Gallega + Red_Snapper_Fillet + Paella_Peska + Main_Lobster_Tail + Tiramisu\r\n\r\n#compute tab with tax\r\nitems_cost = amt_per\r\ntab = items_cost + items_cost * tax\r\n\r\n#display amount owed\r\nprint('\\nordered items: $', format(items_cost, '.2f'))\r\nprint('ordered items: $', format(items_cost * tax, '.2f'))\r\nprint('Check: $', format(tab, '.2f'))\r\n","repo_name":"lmejia3/Simple-python-program","sub_path":"When you order in a restaurant(2).py","file_name":"When you order in a restaurant(2).py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"39074390556","text":"import numpy as np\n\n\ndef _replace(x, y, idx):\n \"\"\"\n Replace the elements in 'x' with the elements in 'y' at the given indices.\n \"\"\"\n x = x.copy()\n x[idx] = y\n return x\n\n\ndef unbroadcast(target, g, broadcast_idx=0):\n \"\"\"\n 'target' is the operand that is broadcast to 'g'. We need to sum 'g' along the broadcast axes.\n In the ordinary broadcasting convention, 'target' is broadcast to 'g' by\n 1. first adding leading singleton dimensions to 'target' until it has the same number of dimensions as 'g'\n 2. then repeating 'target' along the singleton dimensions until it has the same shape as 'g'\n \"\"\"\n if broadcast_idx > 0:\n summed_axis = tuple(\n range(broadcast_idx, broadcast_idx + len(g.shape) - len(target.shape))\n )\n else:\n summed_axis = tuple(\n range(broadcast_idx, broadcast_idx - len(g.shape) + len(target.shape), -1)\n )\n if summed_axis:\n g = np.sum(g, axis=summed_axis)\n\n summed_axis = tuple(\n idx for idx, size in enumerate(target.shape) if size == 1 and g.shape[idx] != 1\n )\n if summed_axis:\n g = np.sum(g, axis=summed_axis, keepdims=True)\n return g\n\n\ndef sum_vjp(g, x, **kwargs):\n axis = kwargs.get(\"axis\", None)\n keepdims = kwargs.get(\"keepdims\", False)\n if axis is None:\n assert not keepdims, f\"Cannot keep dimension because axis=None.\"\n assert not g.shape or g.shape == (\n 1,\n ), f\"Invalid shape for the out-gradient tensor. Got {g.shape}.\"\n axis = tuple(range(len(x.shape)))\n elif isinstance(axis, int):\n axis = (axis,)\n if not keepdims:\n g = np.expand_dims(g, axis=axis)\n for a in axis:\n assert (\n g.shape[a] == 1\n ), f\"Invalid dimension {a} on the out-gradient tensor. Got {g.shape[a]}.\"\n g = np.repeat(g, repeats=x.shape[a], axis=a)\n return (g,)\n\n\ndef matmul_vjp(g, x, y, **kwargs):\n def dot_lhs(g, lhs, rhs):\n if len(rhs.shape) == 0:\n return np.sum(np.multiply(rhs, g))\n if len(lhs.shape) == 1 and len(rhs.shape) == 1:\n return np.multiply(g, rhs)\n if len(lhs.shape) == 2 and len(rhs.shape) == 1:\n return np.multiply(np.expand_dims(g, axis=-1), rhs)\n if len(lhs.shape) == 1 and len(rhs.shape) == 2:\n return np.matmul(rhs, g)\n ndim = len(rhs.shape)\n return np.matmul(\n g, np.transpose(rhs, axes=tuple(range(ndim - 2)) + (ndim - 1, ndim - 2))\n )\n\n def dot_rhs(g, lhs, rhs):\n if len(rhs.shape) == 0:\n return np.sum(np.multiply(lhs, g))\n if len(lhs.shape) == 1 and len(rhs.shape) == 1:\n return np.multiply(g, lhs)\n if len(lhs.shape) == 2 and len(rhs.shape) == 1:\n return np.matmul(g, lhs)\n if len(lhs.shape) == 1 and len(rhs.shape) == 2:\n return np.multiply(np.expand_dims(lhs, axis=-1), g)\n ndim = len(lhs.shape)\n return np.matmul(\n np.transpose(lhs, axes=tuple(range(ndim - 2)) + (ndim - 1, ndim - 2)), g\n )\n\n return dot_lhs(g, x, y), dot_rhs(g, x, y)\n\n\ndef squeeze_vjp(g, x, **kwargs):\n \"\"\"\n The only problem with the vjp of 'np.squeeze' is when axis=None. We take special care of this case.\n \"\"\"\n if kwargs.get(\"axis\", None) is None:\n return np.expand_dims(\n g, axis=tuple(idx for idx, size in enumerate(x.shape) if size == 1)\n )\n else:\n return (np.expand_dims(g, axis=kwargs[\"axis\"]),)\n\n\ndef repeat_vjp(g, x, **kwargs):\n \"\"\"\n The vjp of 'np.repeat' is to sum the vector along the repeated axis.\n We put 'repeats' into keyword arguments because it is non-differentiable.\n Now there are two cases:\n 1. if repeats is an 'int', the case is easy, and we just sum across the corresponding axis.\n 2. if the repeats is an array, we need to sum over the axis with extra care.\n In addition, if axis=None, 'np.repeat' flattens the array and vjp reconstructs the original shape.\n \"\"\"\n axis = kwargs.get(\"axis\", None)\n assert axis is None or isinstance(\n axis, int\n ), f\"Unsupported type for 'axis': {type(axis)}.\"\n repeats = kwargs[\"repeats\"]\n if isinstance(repeats, int):\n if axis is None:\n return (np.sum(np.reshape(g, (*x.shape, repeats)), axis=-1),)\n else:\n # [aabbccddee] gets reshaped into [aa,bb,cc,dd,ee] and the repeated dim is summed over.\n return (\n np.sum(\n np.reshape(\n g,\n x.shape[:axis] + (x.shape[axis], repeats) + x.shape[axis + 1 :],\n ),\n axis=axis + 1,\n ),\n )\n elif isinstance(repeats, np.ndarray):\n # TODO: implement this\n raise NotImplementedError\n else:\n raise RuntimeError(f\"Unsupported type for 'repeats': {type(repeats)}.\")\n","repo_name":"honglu2875/jag","sub_path":"src/jag/ops/_ops/_funcs.py","file_name":"_funcs.py","file_ext":"py","file_size_in_byte":4915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"25667720918","text":"import streamlit as st\n\nimages_dir = \"./images\"\nfavicon_path = f\"{images_dir}/favicon.ico\"\n\ncol1, col2, col3 = st.columns([2,4,2])\n\nwith col1:\n st.markdown(\"## Me ##\")\n st.image(\"./images/favicon.png\")#, width=200)\n st.markdown(\"Lead Plasma Research Physicist\")\n st.write(\"/ Senior Director of Nuclear Power and Artificial Intelligence\")\n\n\nwith col2:\n st.markdown(\"## MiniMe ##\")\n st.image(\"./images/tommy_boi.webp\", width=200)\n st.write(\"Plasma Research Non-Physicist\")\n\nwith col3:\n st.write(\"## MiniMe-MiniMe / MiniYou ##\")\n st.image(\"./images/slack.png\")#, width=200)\n st.text(\"Plasma Research Physicist **Intern**\")\n","repo_name":"takotime808/plasma_bois","sub_path":"pages/02_✨_About_Us_v2.py","file_name":"02_✨_About_Us_v2.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"11450344699","text":"import json\n\nimport aiohttp\nimport discord\n\n\nasync def convertcurrency(cmd, message, args):\n if args:\n if len(args) == 4:\n amount = args[0]\n from_curr = args[1].upper()\n to_curr = args[3].upper()\n try:\n amount = float(amount)\n except ValueError:\n amount = None\n if amount:\n response = None\n start_response = discord.Embed(color=0x3B88C3, title='🏧 Contacting our banks...')\n start_message = await message.channel.send(embed=start_response)\n api_url = f'http://free.currencyconverterapi.com/api/v3/convert?q={from_curr}_{to_curr}&compact=ultra'\n async with aiohttp.ClientSession() as session:\n async with session.get(api_url) as data:\n data = await data.read()\n data = json.loads(data)\n if data:\n curr_key = list(data.keys())[0]\n multi = data[curr_key]\n out_amount = round(amount * multi, 5)\n title = f'🏧 {amount} {from_curr} = {out_amount} {to_curr}'\n end_response = discord.Embed(color=0x3B88C3, title=title)\n else:\n end_response = discord.Embed(color=0xBE1931, title='❗ Invalid currency.')\n await start_message.edit(embed=end_response)\n else:\n response = discord.Embed(color=0xBE1931, title='❗ Invalid amount.')\n else:\n response = discord.Embed(color=0xBE1931, title='❗ Bad number of arguments.')\n else:\n response = discord.Embed(color=0xBE1931, title='❗ Nothing inputted.')\n if response:\n await message.channel.send(embed=response)\n","repo_name":"noli01/SigmaBot","sub_path":"sigma/modules/utilities/tools/convertcurrency.py","file_name":"convertcurrency.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"38458864603","text":"from typing import List\n\n\ndef getKey(s: str):\n counter = [0] * 26\n for c in s:\n i = ord(c) - ord('a')\n counter[i] += 1\n k = \"\"\n for i, c in enumerate(counter):\n if c == 0:\n continue\n k += f\"{c}{chr(ord('a') + i)}\"\n return k\n\n\nclass Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n group_map: dict[str, str] = {}\n for s in strs:\n k = getKey(s)\n if group_map.get(k) is None:\n group_map[k] = []\n group_map[k].append(s)\n return [group_map[k] for k in group_map]\n # Time: O(n.max(m)), n: length of strs, m: length of strs[i]\n # Space: O(n), in worst case: each key assign each strs[i]\n\n\n# General cases\nstrs = [\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"]\n# Expected: [[\"bat\"],[\"nat\",\"tan\"],[\"ate\",\"eat\",\"tea\"]]\n# group_map = {\n# \"1a1e1t\": [\"eat\", \"tea\", \"ate\"]\n# \"1a1n1t\": [\"tan\", \"nat\"]\n# \"1a1b1t\": [\"bat\"]\n# }\nresult = Solution().groupAnagrams(strs)\nprint(result)\n\n# Corner cases\n# strs has 1 item with empty string\n# Expected = [[\"\"]]\nstrs = [\"\"]\n# group_map = {\n# \"\": [\"\"]\n# }\nresult = Solution().groupAnagrams(strs)\nprint(result)\n\n# strs has duplicate items\nstrs = [\"\", \"\", \"\"]\n# Expected = [[\"\", \"\", \"\"]]\nresult = Solution().groupAnagrams(strs)\nprint(result)\n\nstrs = [\"bdddddddddd\", \"bbbbbbbbbbc\"]\n# group_map = {\n# \"1b10d\": [\"bdddddddddd\"]\n# \"10b1c\": [\"bbbbbbbbbbc\"]\n# }\nresult = Solution().groupAnagrams(strs)\nprint(result)\n","repo_name":"tuandq-cs/coding-pratice","sub_path":"leetcode/week1/string/group-anagram/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"10510197845","text":"import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"miscore\",\n version=\"0.0.2\",\n author=\"Sebastian Proost\",\n author_email=\"sebastian.proost@gmail.com\",\n description=\"Package to manage high scores in JSON format\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/sepro/MiScore\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/sepro/MiScore/issues\",\n },\n install_requires=[\"click>=8.1.3\", \"pydantic>=1.9.2,<2.0.0\"],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n packages=[\"miscore\"],\n python_requires=\">=3.6\",\n)\n","repo_name":"sepro/MiScore","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"25811036396","text":"#Exercício 20:\r\n#Escrva uma função num_divisores que recebe\r\n#um n e devolve o número de divisores de n.\r\n #O programa deverá correr sempre até o utilizador\r\n # introduzir o número 9999\r\n\r\n\r\ndef num_divisores(num):\r\n x = 1\r\n for i in range(1, num//2+1):\r\n if num % i == 0:\r\n x += 1\r\n return x\r\n\r\nx = int(input(\"Introduza um nº inteiro: \"))\r\nwhile True:\r\n if x == 9999:\r\n break\r\n print(\"Número de divisões: \", num_divisores(x))\r\n x = int(input(\"Introduzir um nº inteiro: \"))\r\n\r\n\r\n \r\n \r\n\r\n\r\n\r\n\r\n","repo_name":"JuhOliveira1202/Python17","sub_path":"Exercise 20 - Dividers Function.py","file_name":"Exercise 20 - Dividers Function.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"40091536164","text":"import logging\nimport os\n# Third-Party\n# N/A\n# Custom Modules\nimport splunk_appinspect\n\nlogger = logging.getLogger(__name__)\n\nreport_display_order = 40\n\n\n@splunk_appinspect.tags('cloud', 'manual')\n@splunk_appinspect.cert_version(min='1.1.22')\ndef check_for_python_files(app, reporter):\n \"\"\"Check if the app contains python scripts.\"\"\"\n application_files = list(app.iterate_files(types=[\".py\"]))\n if application_files:\n for directory, file, ext in application_files:\n current_file_relative_path = os.path.join(directory, file)\n reporter_output = (\"python script found.\"\n \" File: {}\").format(current_file_relative_path)\n reporter.manual_check(reporter_output, current_file_relative_path)\n\n else:\n reporter_output = \"No python scripts found in app.\"\n reporter.not_applicable(reporter_output)\n\n\n@splunk_appinspect.tags('splunk_appinspect', 'cloud')\n@splunk_appinspect.cert_version(min='1.0.0')\ndef check_for_compiled_python(app, reporter):\n \"\"\"Check that there are no `.pyc` or `.pyo` files included in the app.\"\"\"\n for dir, file, ext in app.iterate_files(types=['.pyc', '.pyo']):\n current_file_relative_path = os.path.join(dir, file)\n reporter_output = (\"A Compiled Python file was detected. File: {}\"\n ).format(current_file_relative_path)\n reporter.fail(reporter_output, current_file_relative_path)\n\n\n@splunk_appinspect.tags(\"cloud\", \"manual\")\n@splunk_appinspect.cert_version(min='1.1.17')\ndef check_for_possible_threading(app, reporter):\n \"\"\"Check for the use of threading, and multiprocesses. Threading must be\n used with discretion and not negatively affect the Splunk installation as a\n whole.\n \"\"\"\n questionable_statements_regex = [\"from\\s+os\\s+import\\s+(?:.*,)?\\s*fork(?!\\w+)\",\n \"from\\s+os\\s+import\\s+(?:.*,)?\\s*forkpty(?!\\w+)\",\n \"os\\s*\\.\\s*fork\",\n \"from\\s+os\\s+import\\s+(?:.*,)?\\s*spawn\",\n \"os\\s*\\.\\s*spawn\",\n \"from\\s+os\\s+import\\s+(?:.*,)?\\s*setsid(?!\\w+)\",\n \"os\\s*\\.\\s*setsid\",\n \"from\\s+distutils\\s+import\\s+(?:.*,)?\\s*spawn(?!\\w+)\",\n \"distutils\\s*\\.\\s*spawn\"]\n matches = app.search_for_patterns(questionable_statements_regex,\n types=['.py'])\n python_files = list(app.iterate_files(types=['.py']))\n\n if python_files:\n for (fileref_output, match) in matches:\n filename, line_number = fileref_output.rsplit(\":\", 1)\n reporter_output = (\"The following line will be inspected during code review.\"\n \" Match: {}\"\n \" File: {}\"\n \" Line: {}\"\n ).format(match.group(), filename, line_number)\n reporter.manual_check(reporter_output, filename, line_number)\n else:\n reporter_output = (\"No python files found.\")\n reporter.not_applicable(reporter_output)\n\n\n@splunk_appinspect.tags(\"splunk_appinspect\", \"cloud\", \"security\", \"manual\")\n@splunk_appinspect.cert_version(min=\"1.5.1\")\ndef check_built_in_import_function(app, reporter):\n \"\"\"Check that the python `__import__` method is not used in a way that\n can be exploited (e.g., __import__(conf_setting) is at risk of code \n injection).\n \"\"\"\n # This method shouldn't be used because imports should be explicit to \n # prevent execution of unintended code. If you're dynamically loading \n # libraries via strings there is some concern\n # https://docs.python.org/2/library/functions.html#__import__\n # Nice SO dicussion on this here:\n # http://stackoverflow.com/questions/28231738/import-vs-import-vs-importlib-import-module\n # http://stackoverflow.com/questions/2724260/why-does-pythons-import-require-fromlist\n # https://nedbatchelder.com/blog/201206/eval_really_is_dangerous.html\n all_python_files = list(app.iterate_files(types=[\".py\"]))\n\n import_patterns = [\"__import__\"]\n matches = app.search_for_patterns(import_patterns,\n types=[\".py\"])\n if len(all_python_files) > 0:\n for (fileref_output, match) in matches:\n filepath, line_number = fileref_output.rsplit(\":\", 1)\n reporter_output = (\"The `__import__` function was detected being\"\n \" used. Please use the `import` keyword instead.\"\n \" Third-Party libraries are exempt from this\"\n \" requirement.\"\n \" File: {}\"\n \" Line: {}\").format(filepath, line_number)\n file_dirname = os.path.dirname(filepath)\n # Check for dynamic imports that could be exploited for command injection\n reporter.manual_check(reporter_output, filepath, line_number)\n else:\n reporter_output = (\"No python files detected.\")\n reporter.not_applicable(reporter_output)\n","repo_name":"splunkdevabhi/appinspect","sub_path":"splunk_appinspect/checks/check_python_files.py","file_name":"check_python_files.py","file_ext":"py","file_size_in_byte":5212,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"29333868394","text":"import pygame as pg\nimport random\nimport time\n\n\nclass App:\n\n def __init__(self): \n print(\"App loaded...\")\n self.screen = pg.display.set_mode((720, 720))\n self.bars = [180+(x*8) for x in range(60)]\n random.shuffle(self.bars)\n self.idx = 0\n\n def update(self):\n pg.display.flip()\n\n def draw(self):\n self.screen.fill((25,25,25))\n for i in range(60):\n pg.draw.rect(self.screen, (200, 200, 250), pg.Rect(i*12,0,12,self.bars[i]), 1)\n\n def bubble_sort(self):\n for i in range(len(self.bars)):\n for j in range(0, len(self.bars)-i-1):\n if self.bars[j] > self.bars[j+1]:\n self.bars[j], self.bars[j+1] = self.bars[j+1], self.bars[j]\n self.draw()\n self.update()\n time.sleep(0.005)\n\n def run(self):\n self.bubble_sort()\n self.draw()\n self.update()","repo_name":"pybiscuit/sorting","sub_path":"classes/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"72006992271","text":"inputPrices = (10, 20, 5, 70, 90)\n\nhighestDifference = 0\ndiff = 0\nlength = len(inputPrices)\n\n# find the difference in forward fashion and always store the highest difference\nfor index in range(length):\n for innerIndex in range(length):\n if innerIndex != len(inputPrices)-1:\n diff = inputPrices[index] - inputPrices[innerIndex+1]\n if diff > highestDifference:\n highestDifference = diff\nprint(highestDifference)\n\n","repo_name":"AmarRagipindi/pythonpractice","sub_path":"day4_tuples_loops_ar.py","file_name":"day4_tuples_loops_ar.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"2534762221","text":"name = \"Muhsan Tech Now\"\nname1 = \" / Tech Now\"\nvar = 3\nvarf1 = 24\nvarf2 = 24\n# print(type(var1))\n# print(var + var1)\n# print(name + name1)\n# print(100 * str(int(varf1) + int(varf2)))\n# print(10 * f\"{name}\\n\")\n\"\"\"\nstr()\nint()\nfloat()\n\n\"\"\"\n# print(int(varf1) + int(varf1))\n\"\"\"\nQuiz - Solved in the video\nExercise - Next Video\nProject - \n\"\"\"\nprint(\"Enter your Number\")\n# imm = input()\n# print(\"you entered \", int(imm)+10)\n\nprint(\"Enter 1nd Number\")\nimm1 = input()\nprint(\"Enter 2nd Number\")\nimm2 = input()\n\nprint(\"You are Sum is: \", int(imm1) + int(imm2))","repo_name":"muhsan-javed/Python-Practice-Code","sub_path":"Python Classes for Beginner/MT_07_Variables_Datatypes_Typecasting_python.py","file_name":"MT_07_Variables_Datatypes_Typecasting_python.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"3833792812","text":"\"\"\"텐서플로 estimator 사용 테스트\n error 발생함!!!\n\"\"\"\nimport cv2\nimport os\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom IPython.display import display, Image, HTML\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold\nimport time\n\nimport cv2\n\n# Use a custom OpenCV function to read the image, instead of the standard\n# TensorFlow `tf.read_file()` operation.\ndef _read_py_function(filename, label):\n image_decoded = cv2.imread(filename.decode(), cv2.IMREAD_GRAYSCALE)\n return image_decoded, label\n\n# Use standard TensorFlow operations to resize the image to a fixed shape.\ndef _resize_function(image_decoded, label):\n image_decoded.set_shape([None, None, None])\n image_resized = tf.image.resize_images(image_decoded, [28, 28])\n return image_resized, label\n\ndef _parse_function (filename, label):\n #print('filename: ', filename)\n image_string = tf.read_file(filename)\n image_decoded = tf.image.decode_jpeg(image_string)\n #image_resized = tf.image.resize_images(image_decoded, [150, 150])\n image_decoded.set_shape([150, 150, 3])\n return image_decoded, label\n\n\ndef input_fn(filenames, labels, batch_size = 16):\n dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))\n dataset = dataset.shuffle(1000).repeat().batch(batch_size)\n #dataset = dataset.map(_parse_function)\n dataset = dataset.map(lambda filename, label: tuple(tf.py_func(\n _read_py_function, [filename, label], [tf.uint8, label.dtype])))\n\n return dataset\n\n\ndef model_fn(features, labels, mode):\n training = True\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n training = True\n elif mode == tf.estimator.ModeKeys.EVAL:\n training = False\n elif mode == tf.estimator.ModeKeys.PREDICT:\n training = False\n\n train_op = None\n loss = None\n eval_metric_ops = None\n\n conv1_1 = tf.layers.conv2d(inputs=features, filters=64, kernel_size=[3, 3], padding=\"SAME\", activation=tf.nn.relu)\n pool1 = tf.layers.max_pooling2d(inputs=conv1_1, pool_size=[2, 2], padding=\"SAME\", strides=2)\n\n conv2_1 = tf.layers.conv2d(inputs=pool1, filters=128, kernel_size=[3, 3], padding=\"SAME\", activation=tf.nn.relu)\n pool2 = tf.layers.max_pooling2d(inputs=conv2_1, pool_size=[2, 2], padding=\"SAME\", strides=2)\n\n conv3_1 = tf.layers.conv2d(inputs=pool2, filters=256, kernel_size=[3, 3], padding=\"SAME\", activation=tf.nn.relu)\n pool3 = tf.layers.max_pooling2d(inputs=conv3_1, pool_size=[2, 2], padding=\"SAME\", strides=2)\n\n initializer = tf.contrib.layers.xavier_initializer()\n\n # 150 -> 75 -> 38 -> 19 -> 10 -> 5\n # Dense Layer with Relu\n flat6 = tf.layers.flatten(pool3) # tf.reshape(pool3, [-1, 256 * 19 * 19])\n fc6 = tf.layers.dense(inputs=flat6, units=1000, activation=tf.nn.relu, kernel_initializer=initializer)\n dropout6 = tf.layers.dropout(inputs=fc6, rate=0.5, training=training)\n\n # flat7 = tf.reshape(dropout6, [-1, 1000])\n fc7 = tf.layers.dense(inputs=dropout6, units=500, activation=tf.nn.relu, kernel_initializer=initializer)\n dropout7 = tf.layers.dropout(inputs=fc7, rate=0.5, training=training)\n\n logits = tf.layers.dense(inputs=dropout7, units=2)\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions={\n \"prob\":tf.nn.softmax(logits)})\n else:\n global_step = tf.train.get_global_step()\n loss = tf.losses.softmax_cross_entropy(labels, logits)\n train_op = tf.train.AdamOptimizer(0.0001).minimize(loss, global_step)\n\n accuracy = tf.metrics.accuracy(tf.argmax(labels, axis=1), tf.argmax(tf.nn.softmax(logits), axis=1))\n eval_metric_ops = {\"acc\": accuracy}\n return tf.estimator.EstimatorSpec(\n mode=mode,\n train_op=train_op,\n loss=loss,\n eval_metric_ops=eval_metric_ops)\n\n\nif __name__ == \"__main__\":\n est = tf.estimator.Estimator(model_fn)\n\n DATA_DIR = \"../data/\"\n TRAIN_DIR = DATA_DIR + \"train_resize/\"\n\n train_images = [TRAIN_DIR+i for i in os.listdir(TRAIN_DIR)]\n train_dogs = [TRAIN_DIR+i for i in os.listdir(TRAIN_DIR) if 'dog' in i]\n train_cats = [TRAIN_DIR+i for i in os.listdir(TRAIN_DIR) if 'cat' in i]\n\n print(\"train_dogs: \", len(train_dogs))\n print(\"train_cats: \", len(train_cats))\n\n train_dogs = train_dogs[:100]\n train_cats = train_cats[:100]\n\n train_dog_cat = train_dogs + train_cats\n\n label_dog = [0 for i in range(len(train_dogs))]\n label_cat = [1 for i in range(len(train_cats))]\n label = label_dog + label_cat\n label_one_hot = np.eye(2)[label]\n\n x_train, x_valid, y_train, y_valid = train_test_split (\n train_dog_cat, label_one_hot, test_size=0.3, random_state=42)\n\n print(\"train: \", len(x_train))\n print(\"valid: \", len(x_valid))\n\n\n for epoch in range(10):\n est.train(input_fn(x_train, y_train))\n est.evaluate(input_fn(x_train, y_train))\n\n '''\n pred_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": np.array([list(range(10))], np.float32)},\n num_epochs=1,\n shuffle=False)\n\n predictions = est.predict(pred_input_fn)\n for i in predictions:\n print(i[\"prob\"])\n '''","repo_name":"inoray/ML_DL_Tensorflow_study","sub_path":"season_02_Kaggle/02_Dogs_vs_Cats_Redux/김성헌/tf_estimator_test.py","file_name":"tf_estimator_test.py","file_ext":"py","file_size_in_byte":5298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"74516496912","text":"import re\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom privateviews.decorators import PublicView\n\n\nclass LoginRequiredMiddleware(object):\n\n def __init__(self):\n self.public_patterns = []\n self.public_views = []\n\n if hasattr(settings, 'PUBLIC_VIEWS'):\n for view_path in settings.PUBLIC_VIEWS:\n view = self.get_view(view_path)\n self.public_views.append(view)\n if hasattr(settings, 'PUBLIC_PATHS'):\n for public_path in settings.PUBLIC_PATHS:\n self.public_patterns.append(re.compile(public_path))\n if hasattr(settings, 'LOGIN_URL'):\n pattern = re.compile(r'^%s$' % re.escape(settings.LOGIN_URL))\n self.public_patterns.append(pattern)\n\n def get_view(self, view_path):\n i = view_path.rfind('.')\n module_path, view_name = view_path[:i], view_path[i + 1:]\n module = __import__(module_path, globals(), locals(), [view_name])\n return getattr(module, view_name)\n\n def matches_public_view(self, view):\n if self.public_views:\n for public_view in self.public_views:\n if view == public_view:\n return True\n return False\n\n def matches_public_path(self, path):\n if self.public_patterns:\n for pattern in self.public_patterns:\n if pattern.match(path) is not None:\n return True\n return False\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n if (request.user.is_authenticated()\n or isinstance(view_func, PublicView)\n or self.matches_public_path(request.path)\n or self.matches_public_view(view_func)):\n return None\n else:\n return login_required(view_func)(request, *view_args, **view_kwargs)\n","repo_name":"dabapps/django-private-views","sub_path":"privateviews/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"83"} +{"seq_id":"32714894581","text":"# Zadanie 5. Napisać program, który wczytuje wprowadzany z klawiatury ciąg liczb naturalnych zakończonych zerem\n# stanowiącym wyłącznie znacznik końca danych. Program powinien wypisać 10 co do wielkości\n# wartość, jaka wystąpiła w ciągu. Można założyć, że w ciągu znajduje się wystarczająca liczba elementów.\n\nresult = int(input())\ncount = 1\nwhile True:\n n = int(input())\n if n == 0:\n break\n\n if count >= 10:\n if n > result:\n result = n\n else:\n if n < result:\n result = n\n count += 1\n\nprint(result)\n\n","repo_name":"Mikosztyla/UniversityProgramms","sub_path":"WDI_sets/Arrays/5_10_biggest_number.py","file_name":"5_10_biggest_number.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"519083355","text":"import instaloader\n\ndef insta_collector(request):\n L = instaloader.Instaloader()\n\n if request_args and 'handle' in request_args and 'lastname' in request_args:\n handle = request_args['handle']\n PROFILE = handle\n profile = instaloader.Profile.from_username(L.context, PROFILE)\n\n posts_sorted_by_likes = sorted(profile.get_posts(), key=lambda post: post.likes, reverse=True)\n\n for post in posts_sorted_by_likes:\n L.download_post(post, PROFILE)","repo_name":"jakeybourne/instagram-video-creator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"73899790351","text":"\"\"\"\nDisaster Resoponse Project\n(Train Classifier)\n\nScript Execution\n> python models/train_classifier.py data/DisasterResponse.db models/classifier.pkl\n\nINPUTS - \n 1) SQLite db path (containing pre-processed data)\n 2) pickle file name to save ML model\n\"\"\"\n\n# Import Libraries\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom sqlalchemy import create_engine\nimport re\nimport pickle\nimport nltk\nnltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger', 'stopwords'])\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.model_selection import GridSearchCV, train_test_split\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.metrics import classification_report, make_scorer\nimport warnings\nwarnings.filterwarnings('ignore')\n\ndef load_data(database_filepath):\n \n \"\"\" Loads data from SQL Database.\n \n INPUTS - \n database_filepath (str): SQL database filepath\n\n OUTPUTS - \n X: Dataframe of features dataset\n Y: Dataframe of target labels dataset.\n category_names: List of target labels\n \"\"\"\n # Load data from database\n engine = create_engine('sqlite:///{}'.format(database_filepath))\n df = pd.read_sql_table('DisasterResponse', con=engine)\n \n # Create X and Y datasets\n X = df['message']\n Y = df.iloc[:,4:]\n category_names = Y.columns\n\n return X, Y, category_names \n\n\ndef tokenize(text):\n \n \"\"\" Tokenizes text data\n \n INPUT - \n text (str): Messages for processing\n \n OUTPUT - \n clean_words (list): Processed text after normalizing, tokenizing and lemmatizing\n \"\"\"\n # Normalize text\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower())\n \n # tokenize text\n words = word_tokenize(text)\n \n # remove stop words\n stopwords_ = stopwords.words(\"english\")\n words = [word for word in words if word not in stopwords_]\n \n # extract root form of words\n clean_words = [WordNetLemmatizer().lemmatize(word, pos='v') for word in words]\n\n return clean_words\n\n\ndef build_model():\n \n \"\"\" Build model with GridSearchCV\n \n OUTPUT - \n model: Trained model after performing grid search\n \"\"\"\n # create pipeline\n pipeline = Pipeline([('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n\n # hyper-parameter grid\n parameters = {'clf__estimator__n_estimators': [10], \n 'clf__estimator__min_samples_split': [2]}\n \n # create model\n model = GridSearchCV(estimator=pipeline,\n param_grid=parameters,\n verbose=3,\n cv=3)\n \n return model\n\n\ndef evaluate_model(model, X_test, Y_test, category_names):\n \n \"\"\" Shows model's performance on test data\n \n INPUTS - \n model: trained model\n X_test: Test features\n Y_test: Test targets labels\n category_names: List of target labels\n \"\"\"\n\n # predict\n y_pred = model.predict(X_test)\n\n # print classification report\n print(classification_report(Y_test.values, y_pred, target_names=category_names))\n\n # print accuracy score\n print('Accuracy: {}'.format(np.mean(Y_test.values == y_pred)))\n\n\ndef save_model(model, model_filepath):\n \n \"\"\" Saves the model to a Python pickle file \n \n INPUT - \n model: Trained model\n model_filepath: Filepath to save the model\n \"\"\"\n\n # save model to pickle file\n pickle.dump(model, open(model_filepath, 'wb'))\n\n\ndef main():\n if len(sys.argv) == 3:\n database_filepath, model_filepath = sys.argv[1:]\n print('Loading data...\\n DATABASE: {}'.format(database_filepath))\n X, Y, category_names = load_data(database_filepath)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n \n print('Building model...')\n model = build_model()\n \n print('Training model...')\n model.fit(X_train, Y_train)\n \n print('Evaluating model...')\n evaluate_model(model, X_test, Y_test, category_names)\n\n print('Saving model...\\n MODEL: {}'.format(model_filepath))\n save_model(model, model_filepath)\n\n print('Trained model saved!')\n\n else:\n print('Please provide the filepath of the disaster messages database '\\\n 'as the first argument and the filepath of the pickle file to '\\\n 'save the model to as the second argument. \\n\\nExample: python '\\\n 'train_classifier.py ../data/DisasterResponse.db classifier.pkl')\n\n\nif __name__ == '__main__':\n main()","repo_name":"ade-mola/disaster-response-pipeline","sub_path":"models/train_classifier.py","file_name":"train_classifier.py","file_ext":"py","file_size_in_byte":4960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"71799002510","text":"#!/usr/bin/env python3\n\n\"\"\"Prime number sieve.\"\"\"\n\ndef sieve(k) :\n \"\"\"Prime number sieve for all primes up to and including K.\"\"\"\n\n pin = list(range(2,k))\n pout = []\n while pin :\n pout.append(pin[0])\n pin = pin[1:]\n if not pin :\n break\n for p in [q for q in pout if q*q <= pin[-1]] :\n pin = [x for x in pin if (x%p) != 0]\n return pout\n\nif __name__ == '__main__' :\n print ('Run sievetest.py for unit tests.')\n","repo_name":"xprime480/projects","sub_path":"lib/python/sieve.py","file_name":"sieve.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"21947341065","text":"def parse(in_file):\n with open(in_file) as f:\n (notes, my_ticket, nearby_tickets) = f.read().split(\"\\n\\n\")\n my_ticket = [int(n) for n in my_ticket.split(\"\\n\")[1].split(\",\")]\n new_notes = {}\n for note in notes.split(\"\\n\"):\n (k, v) = note.split(\":\")\n v = v.split(\"or\")\n (a, b) = v\n new_notes[k] = ([int(n) for n in a.split(\"-\")], [int(n) for n in b.split(\"-\")])\n nearby_tickets = [\n [int(n) for n in ticket.split(\",\")]\n for ticket in nearby_tickets.split(\"\\n\")[1:-1]\n ]\n return new_notes, my_ticket, nearby_tickets\n\n\ndef check_number(number, notes):\n for field in notes:\n (first, second) = notes[field]\n (a, b) = first\n (c, d) = second\n if number in range(a, b + 1) or number in range(c, d + 1):\n return True\n return False\n\n\ndef solve(in_file):\n notes, my_ticket, nearby_tickets = parse(in_file)\n error = 0\n for ticket in nearby_tickets:\n for number in ticket:\n if not check_number(number, notes):\n error += number\n return error\n\n\n# print(solve('sample.txt')) # 71\nprint(solve(\"input.txt\")) # 28882\n","repo_name":"twsh/Advent-of-Code","sub_path":"2020/16/1/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"6810589849","text":"# Análise de Banco de Dados de Bolsistas da CAPES\n# Marcus Filipe C. P. Quirino\n# 23/03/22\n# Importar a biblioteca pandas utilizada pra ler e interpretar as informações no arquivo .csv\nimport pandas as pd\nimport sys\n# Atribuindo o banco de dados a variavel df (DataFrame)\ndf = pd.read_csv(\"capes_bolsistas.csv\")\n\n\n# Conjunto de funções necessárias\n# Função meio gambiarra pra limpar a tela. Printa 100 linhas em branco\ndef clear():\n print(100 * \"\\n\")\n\n\n# Função usada para retirar os espaços na função buscar_bolsista()\ndef remove(string):\n return \"\".join(string.split())\n\n\n# Função usada para encriptar o nome do bolsista na função buscar_bolsita\ndef encriptar_nome(nome_descriptado):\n # Lista com as letras do alfabeto para aplicar a cifra de cesar\n alfabeto = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',\n 'V', 'W', 'X', 'Y', 'Z',\n 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',\n 'V', 'W', 'X', 'Y', 'Z']\n # Pegar primeira letra do nome do bolsista\n primeira_letra = nome_descriptado[0]\n # Pegar a ultima letra do nome do bolsista\n ultima_letra = nome_descriptado[-1]\n # Variavel vazia\n meio = \"\"\n # For loop pra retirar o meio do nome do bolsista\n for vezes in range(1, len(nome_descriptado) - 1):\n meio += nome_descriptado[vezes]\n # Primeira parte da criptografia que consistia em inverter as letras do nome menos a última e a primeira\n nome_pre_encriptado = primeira_letra + meio[::-1] + ultima_letra\n nome_encriptado = \"\"\n # For loop necessário por aplicar a cifra de cesar\n for letra in nome_pre_encriptado.upper():\n # Verificar a posicão da letra no alfabeto\n posicao = alfabeto.index(letra)\n # Mudar a posição da letra em 1\n nova_posicao = posicao + 1\n nova_letra = alfabeto[nova_posicao]\n nome_encriptado += nova_letra\n # Retorna o nome encriptado\n return nome_encriptado\n\n\n# lógica do menu\ndef selecao_interface():\n option = int(input(\"> \"))\n if option == 1:\n # Chamar função de consultar bolsa zero/ano\n consultar_bolsa_ano()\n elif option == 2:\n # Chamar função de pesquisar por bolsista e encriptar nome\n buscar_bolsista()\n elif option == 3:\n # Chamar função de consultar média anual\n media_anual()\n elif option == 4:\n # Chamar função de rankear as 3 maiores e menores bolsas de todos os tempos\n ranking_bolsas()\n elif option == 0:\n # Fechar programa\n sys.exit()\n else:\n # Código responsavel por impedir input inválido\n while option != (1, 2, 3, 4, 0):\n print(\"DIGITE UM NÚMERO VÁLIDO: (1, 2, 3, 4 OU 0)\\n\")\n option = int(input(\"> \"))\n if option == 1:\n consultar_bolsa_ano()\n elif option == 2:\n buscar_bolsista()\n elif option == 3:\n media_anual()\n elif option == 4:\n ranking_bolsas()\n elif option == 0:\n sys.exit()\n\n\n# interface inicial\ndef interface_inicial():\n # Limpa a tela\n clear()\n # Interface grafica do menu\n print(53 * \"#\")\n print(\"====ANÁLIZE DE BANDO DE DADOS DOS BOLSISTAS CAPES====\\n\")\n print(\"-[1]Consultar bolsa zero/Ano\")\n print(\"-[2]Buscar Bolsista\")\n print(\"-[3]Consultar média anual\")\n print(\"-[4]Ranking dos valores das bolsas\")\n print(\"-[0]sair\")\n print(\"\\nSelecione a opção digitando o número correspondente\")\n print(53 * \"#\")\n # Chamar função de lógica do menu (linha: 53)\n selecao_interface()\n\n\n# consultar bolsa zero/ano\ndef consultar_bolsa_ano():\n clear()\n print(52 * \"#\")\n print(\"======PRIMEIRO BOLSITA DO SEU RESPECTIVO ANO========\\n\")\n # Checar se o ano é válido\n ano = input(\"Digite o ano:\\n> \")\n if int(ano) != 2013 and int(ano) != 2014 and int(ano) != 2015 and int(ano) != 2016:\n while int(ano) != 2013 and int(ano) != 2014 and int(ano) != 2015 and int(ano) != 2016:\n ano = input(\"Digite o ano:\\n> \")\n lista_ano = df.loc[df[\"AN_REFERENCIA\"] == int(ano)]\n lista_organizada = lista_ano.sort_index(ascending=False)\n dados_bolsista = lista_organizada[[\"NM_BOLSISTA\", \"CPF_BOLSISTA\", \"NM_ENTIDADE_ENSINO\", \"CD_MOEDA\",\n \"VL_BOLSISTA_PAGAMENTO\"]]\n print(dados_bolsista.iloc[0])\n # Tentar de novo ou voltar ao menu principal\n print(52 * \"#\")\n option = int(input(\"Digite 1 pra tentar de novo ou 0 pra voltar ao menu principal\\n> \"))\n if option == 1:\n # Chamar função de novo\n consultar_bolsa_ano()\n elif option == 0:\n # Voltar ao menu principal\n interface_inicial()\n else:\n # Código responsavel por impedir input inválido\n while option != (1, 0):\n option = int(input(\"DIGITE UM NÚMERO VÁLIDO! (1 OU 0)\\n> \"))\n if option == 1:\n consultar_bolsa_ano()\n elif option == 0:\n interface_inicial()\n\n\n# buscar bolsista\ndef buscar_bolsista():\n # Limpar tela\n clear()\n # Interface gráfica\n print(52 * \"#\")\n print(\"=================BUSCAR BOLSISTA====================\\n\")\n nome_bolsista = str(input(\"Digite o nome inteiro do bolsista:\\n> \"))\n # Localiza o nome do bolsista no banco de dados\n bolsita = df.loc[df[\"NM_BOLSISTA\"] == nome_bolsista.upper()]\n # Atribui o nome do bolsista a variavel string\n string = bolsita.iloc[0, 0]\n # remove espaços do nome\n nome_bolsista_formatado = remove(string)\n # Encripta o nome com a função encriptar_nome() (linha: 22)\n encriptar_nome(nome_bolsista_formatado)\n # Filtra as informações relevantes no banco de dados\n info = bolsita[[\"AN_REFERENCIA\", \"NM_ENTIDADE_ENSINO\", \"CD_MOEDA\", \"VL_BOLSISTA_PAGAMENTO\"]]\n # Mostra o resultado\n print(encriptar_nome(nome_bolsista_formatado))\n print(info)\n # Tentar de novo ou voltar ao menu principal\n print(52 * \"#\")\n option = int(input(\"Digite 1 pra tentar de novo ou 0 pra voltar ao menu principal\\n> \"))\n if option == 1:\n # Chamar função de novo\n buscar_bolsista()\n elif option == 0:\n # Voltar ao menu principal\n interface_inicial()\n else:\n # Código responsavel por impedir input inválido\n while option != (1, 0):\n option = int(input(\"DIGITE UM NÚMERO VÁLIDO! (1 OU 0)\\n> \"))\n if option == 1:\n consultar_bolsa_ano()\n elif option == 0:\n interface_inicial()\n\n\n# media anual\ndef media_anual():\n # Limpa a tela\n clear()\n # Interface gráfica\n print(52 * \"#\")\n print(\"======MÉDIA ANUAL DAS BOLSAS========\\n\")\n ano = input(\"Digite o ano:\\n> \")\n # Checar se o ano é válido\n if int(ano) != 2013 and int(ano) != 2014 and int(ano) != 2015 and int(ano) != 2016:\n while int(ano) != 2013 and int(ano) != 2014 and int(ano) != 2015 and int(ano) != 2016:\n ano = input(\"Digite o ano:\\n> \")\n # Localiza o ano pedido\n lista_ano = df.loc[df[\"AN_REFERENCIA\"] == int(ano)]\n # Filtra as informações relevantes no banco de dados\n lista_ano_formatada = lista_ano[[\"AN_REFERENCIA\", \"CD_MOEDA\", \"VL_BOLSISTA_PAGAMENTO\"]]\n # Mostra o resultado da média\n print(lista_ano_formatada.groupby([\"AN_REFERENCIA\"]).mean())\n # Tentar de novo ou voltar ao menu principal\n print(52 * \"#\")\n option = int(input(\"Digite 1 pra tentar de novo ou 0 pra voltar ao menu principal\\n> \"))\n if option == 1:\n # Chamar função de novo\n media_anual()\n elif option == 0:\n # Voltar ao menu principal\n interface_inicial()\n else:\n # Código responsavel por impedir input inválido\n while option != (1, 0):\n option = int(input(\"DIGITE UM NÚMERO VÁLIDO! (1 OU 0)\\n> \"))\n if option == 1:\n consultar_bolsa_ano()\n elif option == 0:\n interface_inicial()\n\n\n# top 3 maiores e menores bolsas\ndef ranking_bolsas():\n # Limpa a tela\n clear()\n # Interface gráfica\n print(52 * \"#\")\n print(\"================RANKING DAS BOLSAS==================\\n\")\n print(\"TOP 3 MAIORES BOLSAS:\\n\")\n # Organiza a lista pelo maiores valores e pega os primeiros 3\n lista_top3 = df.sort_values(\"VL_BOLSISTA_PAGAMENTO\", ascending=False)\n print(lista_top3[[\"NM_BOLSISTA\", \"CD_MOEDA\", \"VL_BOLSISTA_PAGAMENTO\"]][0:3])\n print(\"\\n\")\n print(\"TOP 3 MENORES BOLSAS:\\n\")\n # Oraganiza a lista pelos menores valores e pega os 3 menores\n lista_botton3 = df.sort_values(\"VL_BOLSISTA_PAGAMENTO\")\n print(lista_botton3[[\"NM_BOLSISTA\", \"CD_MOEDA\", \"VL_BOLSISTA_PAGAMENTO\"]][0:3])\n # Tentar de novo ou voltar ao menu principal\n print(52 * \"#\")\n option = int(input(\"Digite 1 pra tentar de novo ou 0 pra voltar ao menu principal\\n> \"))\n if option == 1:\n # Chamar função de novo\n ranking_bolsas()\n elif option == 0:\n # Voltar ao menu principal\n interface_inicial()\n else:\n # Código responsavel por impedir input inválido\n while option != (1, 0):\n option = int(input(\"DIGITE UM NÚMERO VÁLIDO! (1 OU 0)\\n> \"))\n if option == 1:\n consultar_bolsa_ano()\n elif option == 0:\n interface_inicial()\n\n\n# Função inicial principal\ninterface_inicial()\n","repo_name":"MarcusQuirino/Projeto-Capes","sub_path":"capes_db.py","file_name":"capes_db.py","file_ext":"py","file_size_in_byte":9521,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"14438628309","text":"import logging\nfrom os import EX_CANTCREAT\nimport re\nfrom utils.data_process import Data\nfrom re import L, sub\n\n\nclass Bool_Search(object):\n def __init__(self, inverted_index, dictionary):\n self.ii = inverted_index\n self.dict = dictionary\n\n def search(self, query):\n l_bracket_stack = []\n query = Bool_Search._preprocess_(query)\n index = 0\n # resolve priority provided by brackets\n # and pass plain expression to self._process_\n while index < len(query):\n if query[index] == '(':\n l_bracket_stack.append(index)\n index += 1\n elif query[index] == ')':\n # match last left bracket\n l_bracket_index = l_bracket_stack.pop()\n # pop left bracket\n query.pop(l_bracket_index)\n # pop expression in between\n # replace right bracket with procssed single inverted index\n to_process = [query.pop(l_bracket_index)\n for i in range(0, index-l_bracket_index-1)]\n query[l_bracket_index] = Bool_Search.process(to_process)\n index = l_bracket_index + 1\n # pass bool keyword to process to handle, ignore\n elif query[index] == 'and' or query[index] == 'not' or query[index] == 'or':\n index += 1\n # replace word with corresponding inverted index\n else:\n if query[index] in self.dict:\n query[index] = self.ii[self.dict[query[index]]]\n else:\n query[index] = None\n index += 1\n\n if index > 1:\n query[0] = Bool_Search.process(query)\n return query[0]\n\n @staticmethod\n def intersection(iia, iib):\n # handle circumstances in which one or more word is not found\n if iia == None or iib == None:\n return None\n i = 0\n j = 0\n res = []\n while i < len(iia) and j < len(iib):\n if iia[i] > iib[j]:\n j += 1\n elif iia[i] < iib[j]:\n i += 1\n else:\n res.append(iia[i])\n i += 1\n j += 1\n return res\n\n @staticmethod\n def strip(iia, iib):\n # handle circumstances in which one or more word is not found\n if iia == None:\n return None\n elif iib == None:\n return iia\n i = 0\n j = 0\n res = iia\n while i < len(res) and j < len(iib):\n if res[i] > iib[j]:\n j += 1\n elif res[i] < iib[j]:\n i += 1\n else:\n res.pop(i)\n j += 1\n return res\n\n @staticmethod\n def complement(iia, iib):\n # handle circumstances in which one or more word is not found\n if iia == None and iib == None:\n return None\n elif iia == None:\n return iib\n elif iib == None:\n return iia\n i = 0\n j = 0\n res = []\n while i < len(iia) and j < len(iib):\n if iia[i] > iib[j]:\n res.append(iib[j])\n j += 1\n elif iia[i] < iib[j]:\n res.append(iia[i])\n i += 1\n else:\n res.append(iia[i])\n i += 1\n j += 1\n if i < len(iia):\n res.extend(iia[i:])\n if j < len(iib):\n res.extend(iib[j:])\n return res\n\n # generate key word list from query string\n def _preprocess_(query):\n # add space before & after bracket for split\n query = sub('\\(', ' ( ', query)\n query = sub('\\)', ' ) ', query)\n # remove continuous spaces\n query = sub(' {2,}', ' ', query)\n query = query.lower()\n query = query.split()\n query = Data.lemma(query)\n return query\n # calculate target inverted index by operator\n\n @staticmethod\n def process(ii_list):\n while len(ii_list) > 1:\n iia, op, iib = ii_list.pop(0), ii_list.pop(0), ii_list[0]\n if op == 'and':\n ii_list[0] = Bool_Search.intersection(iia, iib)\n elif op == 'or':\n ii_list[0] = Bool_Search.complement(iia, iib)\n elif op == 'not':\n ii_list[0] = Bool_Search.strip(iia, iib)\n return ii_list[0]\n\n\ndef load(path = 'output'):\n import zstd\n import pickle\n with open(f'{path}/inverted_index.zstd', 'rb') as f:\n ii = zstd.decompress(f.read())\n ii = pickle.loads(ii)\n f.close()\n with open(f'{path}/dictionary.zstd', 'rb') as f:\n dictionary = zstd.decompress(f.read())\n dictionary = pickle.loads(dictionary)\n f.close()\n with open(f'{path}/metadata.zstd', 'rb') as f:\n metadata = zstd.decompress(f.read())\n metadata = pickle.loads(metadata)\n f.close()\n return ii, dictionary, metadata\n\n\nif __name__ == '__main__':\n # Beware that python API limit data size to 2GB\n # Coz all source files' size = 1.9GB so we can ignore it safely\n logging.info(\"Loading data from file\")\n ii, dictionary, metadata = load()\n bs = Bool_Search(ii, dictionary)\n print(\"Ctrl + C to exit\")\n while True:\n query = input(\"Enter expression for bool search: \")\n try:\n res = bs.search(query)\n except:\n continue\n if res != None:\n for docid in range(len(res)):\n print('{}\\t{}'.format(docid+1, metadata[res[docid]]['title']))\n else:\n print('Not found')\n","repo_name":"Catoverflow/WebSearch","sub_path":"bool_search.py","file_name":"bool_search.py","file_ext":"py","file_size_in_byte":5659,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"83"} +{"seq_id":"10089318037","text":"from flask import Blueprint, request, jsonify\nfrom flask_login import login_required\nfrom models import db, File\nimport shutil\nimport os\n\ndelete_file = Blueprint(\"delete_file\", __name__)\n\n@delete_file.route(\"/delete-file\", methods=['POST'])\n@login_required\ndef file_delete(): \n try:\n news_Id = request.form[\"edit\"]\n files = File.query.filter_by(news_Id=news_Id).all()\n list = [[i.id, i.name] for i in files]\n return jsonify(list)\n except:\n file_id = request.form[\"file_id\"]\n file_del = File.query.filter_by(id=file_id).first()\n try:\n shutil.rmtree(os.path.join('static/uploads/', str(file_del.id)))\n except:\n pass\n File.query.filter(File.id == file_id).delete(synchronize_session=False)\n db.session.commit()\n return jsonify({})","repo_name":"johnny031/taiict_backend","sub_path":"views/delete_file.py","file_name":"delete_file.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27339184972","text":"import os\nimport math\nimport time\nimport datetime\nfrom multiprocessing import Process\nfrom multiprocessing import Queue\nfrom skimage.measure import compare_ssim as ssim\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport imageio\n\nimport torch\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lrs\nimport nibabel\nimport glob\nclass timer():\n def __init__(self):\n self.acc = 0\n self.tic()\n\n def tic(self):\n self.t0 = time.time()\n\n def toc(self, restart=False):\n diff = time.time() - self.t0\n if restart: self.t0 = time.time()\n return diff\n\n def hold(self):\n self.acc += self.toc()\n\n def release(self):\n ret = self.acc\n self.acc = 0\n\n return ret\n\n def reset(self):\n self.acc = 0\n\nclass checkpoint():\n def __init__(self, args):\n self.args = args\n self.ok = True\n self.ssimlog = torch.Tensor()\n self.log = torch.Tensor()\n self.log2 = torch.Tensor()\n now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')\n\n if not args.load:\n if not args.save:\n args.save = now\n# self.dir = os.path.join('..', 'experiment', args.save)\n self.dir = os.path.join('.', 'experiment', args.save)\n else:\n self.dir = os.path.join('.', 'experiment', args.load)\n if os.path.exists(self.dir):\n self.log = torch.load(self.get_path('psnr_log.pt'))\n self.log2 = torch.load(self.get_path('train_psnr_log.pt'))\n print('Continue from epoch {}...'.format(len(self.log)))\n else:\n args.load = ''\n\n if args.reset:\n os.system('rm -rf ' + self.dir)\n args.load = ''\n\n os.makedirs(self.dir, exist_ok=True)\n os.makedirs(self.get_path('model'), exist_ok=True)\n #for d in args.data_test:\n os.makedirs(self.get_path('results-{}'.format(args.data_test)), exist_ok=True)\n\n open_type = 'a' if os.path.exists(self.get_path('log.txt'))else 'w'\n self.log_file = open(self.get_path('log.txt'), open_type)\n with open(self.get_path('config.txt'), open_type) as f:\n f.write(now + '\\n\\n')\n for arg in vars(args):\n f.write('{}: {}\\n'.format(arg, getattr(args, arg)))\n f.write('\\n')\n\n self.n_processes = 8\n\n def get_path(self, *subdir):\n return os.path.join(self.dir, *subdir)\n\n def save(self,trainer, epoch, is_best=False):\n trainer.model.save(self.get_path('model'), epoch, is_best=is_best)\n trainer.loss.save(self.dir)\n trainer.loss.plot_loss(self.dir, epoch)\n\n self.plot_psnr(epoch)\n #self.plot_psnr(epoch,'test')\n #trainer.optimizer.save(self.dir)\n torch.save(trainer.optimizer.state_dict(), os.path.join(self.dir, 'optimizer.pt'))\n torch.save(self.log, self.get_path('psnr_log.pt'))\n torch.save(self.log2, self.get_path('train_psnr_log.pt'))\n\n def add_log(self, log, train=False):\n if train:\n self.log2 = torch.cat([self.log2, log])\n else:\n self.log = torch.cat([self.log, log])\n self.ssimlog=torch.cat([self.ssimlog, log])\n\n def write_log(self, log, refresh=False):\n print(log)\n self.log_file.write(log + '\\n')\n if refresh:\n self.log_file.close()\n self.log_file = open(self.get_path('log.txt'), 'a')\n\n def done(self):\n self.log_file.close()\n\n def plot_psnr(self, epoch):\n axis = np.linspace(1, epoch, epoch)\n \n label = 'SR on {}'.format(self.args.data_train)\n fig = plt.figure()\n ax = fig.add_axes([0,0,1,1])\n ax.text(0.75, 0.25, 'Scale {}'.format(self.args.scale[0]),\n horizontalalignment='right',\n verticalalignment='bottom',\n transform=ax.transAxes)\n plt.title(label)\n \n plt.plot(\taxis,\n self.log2[:, idx_scale].numpy(),\n label='Training'\n )\n plt.plot(\n axis,\n self.log[:, idx_scale].numpy(),\n label='Validation'\n )\n plt.legend()\n plt.xlabel('Epochs')\n plt.ylabel('PSNR')\n plt.grid(True)\n\n plt.savefig(self.get_path('PSNR.pdf'))\n plt.close(fig)\n \n\n def begin_background(self):\n self.queue = Queue()\n\n def bg_target(queue):\n while True:\n if not queue.empty():\n filename, tensor = queue.get()\n if filename is None: break\n imageio.imwrite(filename, tensor.numpy())\n \n self.process = [\n Process(target=bg_target, args=(self.queue,)) \\\n for _ in range(self.n_processes)\n ]\n \n for p in self.process: p.start()\n\n def end_background(self):\n for _ in range(self.n_processes): self.queue.put((None, None))\n while not self.queue.empty(): time.sleep(1)\n for p in self.process: p.join()\n\n def save_results(self, dataset, filename, save_list, scale):\n if self.args.save_results:\n filename = self.get_path(\n 'results-{}'.format(dataset),\n '{}'.format(filename)\n )\n\n postfix = ('SR', 'LR', 'HR')\n for v, p in zip(save_list, postfix):\n normalized = v[0].mul(255 / self.args.rgb_range)\n tensor_cpu = normalized.byte().permute(1, 2, 0).cpu()\n self.queue.put(('{}.png'.format(filename), tensor_cpu))\n def aggregateandcalcpsnr(self,patches_sr,patches_hr,n_slices,subject_path_hr,ds):\n i=0\n patch=self.args.patch_size\n overlapping=patch//2\n filename = self.get_path(\n 'results-{}'.format(self.args.data_test),\n '{}'.format(subject_path_hr.split('/')[-1])\n )\n os.makedirs(filename, exist_ok=True)\n whole_sr=torch.zeros(512,512,n_slices,device=torch.device('cpu'))\n whole_hr=torch.zeros(512,512,n_slices,device=torch.device('cpu'))\n if self.args.model== 'NDFSRCNN':\n for n_slice in range(n_slices):\n whole_sr[:-1,:-1,n_slice]=torch.from_numpy(patches_sr[n_slice])\n whole_hr[:-1,:-1,n_slice]=torch.from_numpy(patches_hr[n_slice])\n elif self.args.model== 'MDFSRCNN':\n# patch_r=torch.zeros(128,128,device=torch.device('cpu'))\n overlapping=256\n patch=256\n for depth in range(0,n_slices,64):\n for height in range(0,512,overlapping):\n for width in range(0,512,overlapping):\n start_x,start_y,start_z=width,height,depth\n end_x,end_y,end_z=width+patch-1,height+patch-1,depth+64-1\n if (end_x<=512) and (end_y<=512) and (end_z<=n_slices) :\n # whole_sr[start_x:end_x,start_y:end_y,start_z:end_z]+=torch.from_numpy(patches_sr[i])\n \n \n \n whole_hr[start_x:end_x,start_y:end_y,start_z:end_z]=torch.from_numpy(patches_hr[i])\n whole_sr[start_x:end_x,start_y:end_y,start_z:end_z]=torch.from_numpy(patches_sr[i])\n i+=1\n \n else:\n \n for depth in range(0,n_slices,overlapping):\n for height in range(0,512,overlapping):\n for width in range(0,512,overlapping):\n start_x,start_y,start_z=width,height,depth\n end_x,end_y,end_z=width+patch,height+patch,depth+patch\n if (end_x<=512) and (end_y<=512) and (end_z<=n_slices) :\n # whole_sr[start_x:end_x,start_y:end_y,start_z:end_z]+=torch.from_numpy(patches_sr[i])\n \n \n \n whole_hr[start_x:end_x,start_y:end_y,start_z:end_z]=torch.from_numpy(patches_hr[i])\n whole_sr[start_x:end_x,start_y:end_y,start_z:end_z]=torch.from_numpy(patches_sr[i])\n # if (not(start_x==0) and not(start_y==0)and not(start_z==0)):\n # whole_sr[start_x:end_x-32,start_y:end_y-32,start_z:end_z-32]/=2\n # elif (not(start_x==0) and not(start_y==0)):\n # whole_sr[start_x:end_x-32,start_y:end_y-32,start_z:end_z]/=2\n # elif (not(start_x==0) and not(start_z==0)):\n # whole_sr[start_x:end_x-32,start_y:end_y,start_z:end_z-32]/=2\n # elif (not(start_y==0) and not(start_z==0)):\n # whole_sr[start_x:end_x,start_y:end_y-32,start_z:end_z-32]/=2\n # elif not(start_x==0):\n # whole_sr[start_x:end_x-32,start_y:end_y,start_z:end_z]/=2\n # elif not(start_y==0):\n # whole_sr[start_x:end_x,start_y:end_y-32,start_z:end_z]/=2\n # elif not(start_z==0):\n # whole_sr[start_x:end_x,start_y:end_y,start_z:end_z-32]/=2\n i+=1\n #whole_sr = quantize(whole_sr, 32767)\n #whole_hr = quantize(whole_hr, 32767)\n #whole_hr=torch.from_numpy(np.ascontiguousarray(np.array(np.load(glob.glob(subject_path_hr+'/full*.npy')[0]))).transpose(1,2,0)).float().cpu()\n psnr=calc_psnr(whole_sr,whole_hr)\n ssim=calc_ssim(whole_sr.numpy(),whole_hr.numpy())\n #print(whole_hr.shape)\n for i in range(n_slices):\n ds.PixelData=np.ascontiguousarray(np.around(whole_sr[:,:,i].numpy()),dtype=np.int16)\n ds.save_as(filename+'/'+str(i)+'_sr.dcm')\n ds.PixelData=np.ascontiguousarray(np.around(whole_hr[:,:,i].numpy()),dtype=np.int16)\n ds.save_as(filename+'/'+str(i)+'_hr.dcm')\n #nibabel.save(nibabel.Nifti1Image(whole_sr.numpy(),np.diag([1, 2, 3, 1])),filename+'_sr.nii')\n #nibabel.save(nibabel.Nifti1Image(whole_hr.numpy(),np.diag([1, 2, 3, 1])),filename+'_hr.nii')\n #print(whole_hr[:,:,i])\n return psnr, ssim\n \ndef quantize(img, rgb_range):\n \n pixel_range = 32767 / rgb_range\n return img.mul(pixel_range).clamp(-2048, 6916).round().div(pixel_range)\ndef calc_psnr(sr, hr, dataset=None):\n if hr.nelement() == 1: return 0\n# hr_=hr-torch.min(hr)\n# imax=torch.max(hr_)\n# hr_grey=(hr_/imax)*255\n# sr_=sr-torch.min(sr)\n# imax=torch.max(sr_)\n# sr_grey=(sr_/imax)*255\n# squared_error = (hr_grey- sr_grey).pow(2)\n# mse = squared_error.mean()\n squared_error = (hr- sr).pow(2)\n mse = squared_error.mean()\n amax=torch.max(hr)\n psnr = 10 * math.log10(amax**2/mse)\n \n \n \n \n \n return psnr\n\ndef calc_ssim(sr, hr, dataset=None):\n #if hr.nelement() == 1: return 0\n \n ssim_=ssim(hr, sr, gradient=False, data_range=None, multichannel=False, gaussian_weights=True, full=False)\n \n \n \n \n \n return ssim_\n","repo_name":"ShwetaShanbhag/SuperResolution","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":11352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"27860711575","text":"import csv\nfrom src.track_orders import TrackOrders\n\n\ndef get_orders(path_to_file: str):\n try:\n with open(path_to_file, \"r\") as file:\n file_data = csv.reader(file)\n orders = [*file_data]\n except FileNotFoundError:\n validExtension = \"csv\"\n prefix_msg = (\n \"Extensão inválida:\"\n if not path_to_file.endswith(validExtension)\n else \"Arquivo inexistente: \"\n )\n raise FileNotFoundError(f\"{prefix_msg}'{path_to_file}'\")\n else:\n return orders\n\n\ndef create_track(orders):\n tracker_orders = TrackOrders()\n\n for name, order, day in orders:\n tracker_orders.add_new_order(name, order, day)\n\n return tracker_orders\n\n\ndef generate_log_file(log_file, path_to_save):\n with open(path_to_save, \"w\") as file:\n file.write(log_file)\n\n\ndef analyze_log(path_to_file):\n orders = get_orders(path_to_file)\n\n orders_trackables = create_track(orders)\n\n maria_most_ordered = orders_trackables.get_most_ordered_dish_per_customer(\n \"maria\"\n )\n arnaldo_order_hamburguer = (\n orders_trackables.get_order_quantity_per_customer(\n \"arnaldo\", \"hamburguer\"\n )\n )\n joao_never_ask = orders_trackables.get_never_ordered_per_customer(\"joao\")\n joao_never_went = orders_trackables.get_days_never_visited_per_customer(\n \"joao\"\n )\n\n data_to_save = (\n f\"{maria_most_ordered}\\n\"\n f\"{arnaldo_order_hamburguer}\\n\"\n f\"{joao_never_ask}\\n\"\n f\"{joao_never_went}\\n\"\n )\n\n generate_log_file(data_to_save, \"data/mkt_campaign.txt\")\n","repo_name":"davidrogger/trybe-project-restaurant-orders","sub_path":"src/analyze_log.py","file_name":"analyze_log.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"42425869745","text":"from util import *\n\n\n@apply\ndef apply(given, index=None):\n et, f = given.of(Infer)\n eqs = et.of(And)\n if index is None:\n for index, eq in enumerate(eqs):\n if eq.is_Equal:\n break\n\n eq = eqs[index]\n old, new = eq.of(Equal)\n\n return Infer(et, f._subs(old, new))\n\n\n@prove\ndef prove(Eq):\n from axiom import algebra\n\n x, y = Symbol(integer=True)\n t, f, g = Function(integer=True)\n Eq << apply(Infer(Equal(t(x), y) & (f(x) > y), Equal(f(t(x), y), g(x))))\n\n Eq << algebra.infer.given.infer.et.apply(Eq[0])\n\n Eq << Eq[-1].this.rhs.apply(algebra.et.given.et.subs.eq, index=2)\n\n\nif __name__ == '__main__':\n run()\n# created on 2018-06-11\n","repo_name":"cosmosZhou/sympy","sub_path":"axiom/algebra/infer_et/given/infer/subs.py","file_name":"subs.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"37317201975","text":"import numpy as np\nimport pycsi\nimport csitest\nimport os\nimport matplotlib.pyplot as plt\n\nloader = csitest.MyTest.npzloader\n\nnpzpath = 'npsave/1030/csi/'\n\ncal = {'0': \"1030B12\",\n '30': \"1030B13\",\n '60': \"1030B14\",\n '90': \"1030B15\",\n '120': \"1030B16\",\n '150': \"1030B17\",\n '180': \"1030B18\",\n '210': \"1030B19\",\n '240': \"1030B20\",\n '270': \"1030B21\",\n '300': \"1030B22\",\n '330': \"1030B23\",\n }\n\ndiffs = []\nfor key, value in cal.items():\n ref = loader(value, npzpath)\n if ref.data.remove_inf_values() == 'bad':\n diffs.append([np.nan, np.nan, np.nan])\n continue\n\n ref_angle = eval(key)\n\n ref_csi = ref.data.amp * np.exp(1.j * ref.data.phase)\n ref_diff = np.mean(ref_csi * ref_csi[:, :, 0, :][:, :, np.newaxis, :].conj(), axis=(0, 1))\n true_diff = np.exp([-1.j * np.pi * antenna * np.sin(ref_angle * np.pi / 180) for antenna in range(3)]).reshape(-1, 1)\n #true_diff = np.exp(0)\n\n diffs.append(np.squeeze(np.angle(ref_diff.reshape(-1, 1) * true_diff.conj())).tolist())\n\nprint(diffs)\ndiffs = np.array(diffs)\n\nprint(np.mean(diffs, axis=0))\nx = list(range(0, 360, 30))\nplt.scatter(x, diffs[:, 0], c='r', label='0-0')\nplt.scatter(x, diffs[:, 1], c='b', label='1-0')\nplt.scatter(x, diffs[:, 2], c='g', label='2-0')\nplt.title(\"Initial Phase Offsets\")\n\nplt.xlabel('Position / $deg$')\nplt.ylabel('Phase Difference / $rad$')\nplt.legend()\nplt.show()\n","repo_name":"MilkywayCocktail/CSI-python","sub_path":"ipo.py","file_name":"ipo.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"73365515471","text":"from django.db import models\nimport random\nfrom datetime import date, timedelta\nfrom django_countries.fields import CountryField\n\nfrom django.urls import reverse \n\n# Create your models here.\n\nclass AutorManager(models.Manager):\n def crear_nacionalidad(self):\n numero = random.randint(1, 3)\n return (\"Argentina\" if numero == 2 else (\"Chile\" if numero == 1 else \"Uruguay\"))\n \n def crear_fecha(self, ene):\n return date.today() + timedelta(days=ene)\n \n def crear_autor(self, cantidad):\n for i in range(cantidad):\n self.create(nombre=\"Autor \" + str(i), nacionalidad = self.crear_nacionalidad(), fecha_nacimiento = self.crear_fecha(i))\n \nclass Autor(models.Model):\n foto = models.ImageField(upload_to=\"autores\", default='autores/nofoto.jpg')\n objects = AutorManager()\n nombre = models.CharField(max_length=100)\n nacionalidad = CountryField()\n fecha_nacimiento = models.DateField(default='--')\n def get_absolute_url(self):\n return reverse(\"autores-list\")\n \n \nclass LibroManager(models.Manager):\n def crear_libro(self,cantidad):\n for i in range(cantidad):\n aleatorio = random.randint(1,6)\n # filter retorna QuerySet y get retorna Objeto\n autor_aleatorio = Autor.objects.all()[aleatorio:aleatorio+1]\n libro = self.create(\n isbn = 11112223 + i,\n titulo = \"Titulo \" + str(i),\n editorial = \"Editorial \" + str(i),\n anio = 1988 + random.randint(1,6),\n tipo_libro = 'Novela' if (aleatorio % 4) == 0 else \n ('Teatro' if (aleatorio % 4) == 1 else \n ('Poesía' if (aleatorio % 4) == 2 else 'Ensayo'))\n )\n libro.autor.add(autor_aleatorio[0].id) # Necesito pasarle el id del autor de alguna manera \n\nclass Libro(models.Model):\n objects=LibroManager()\n TIPO_LIBRO_CHOICES = [\n (\"NOVELA\", 'Novela'),\n (\"TEATRO\", 'Teatro'), \n (\"POESIA\", 'Poesía'), \n (\"ENSAYO\", 'Ensayo'),\n ]\n titulo = models.CharField(max_length= 100, default='--')\n tipo_libro = models.CharField(\n max_length = 20, \n choices = TIPO_LIBRO_CHOICES,\n default = \"POESIA\",\n ) \n editorial = models.CharField(max_length=100, default='--')\n anio = models.IntegerField(default='--')\n isbn = models.IntegerField(default='--')\n # Se declara una relacion muchos a muchos con Autor, pero solo aquí porque está declarado arriba Autor\n autor = models.ManyToManyField(Autor)\n \nclass Copia(models.Model):\n TIPO_ESTADO_CHOICES = [\n ('PRESTADA', \"Prestada\"), \n (\"BIBLIOTECA\", 'En biblioteca'), \n ('RETRASO', 'Retraso'), \n ('REPARACION', 'Repatración'),\n ]\n tipo_estado = models.CharField(\n max_length = 20,\n choices = TIPO_ESTADO_CHOICES,\n default = 'BIBLIOTECA',\n )\n \nclass Lector(models.Model):\n ESTADO_LECTOR_CHOICES = [\n ('HABILITADO', 'Habilitado'), \n ('MULTADO', 'Multado'), \n ]\n estado_lector = models.CharField(\n max_length = 20,\n choices = ESTADO_LECTOR_CHOICES,\n default = 'HABILITADO',\n ) \n \nclass Prestamo(models.Model):\n f_entrega = models.DateField\n f_devolucion = models.DateField\n multa = models.BooleanField\n \n","repo_name":"matiasgel/laboratorio_2023","sub_path":"projecto_libreria/libreria/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"25042409168","text":"# a = 98 \n# b = 77\n\n\"\"\"\na : 98 : 0110 0010\nb : 77 : 0100 1101\na : ^ : 0010 1111\n\na : : 0010 1111\nb : 77 : 0100 1101\nb : ^ : 0110 0010\n\na : : 0010 1111\nb : : 0110 0010\na : ^ : 0100 1101\n\"\"\"\n# a = a^b\n# b = a^b\n# a = a^b\n\n# for loop\n\n# fruits = [\"banana\", \"apple\", \"mango\", \"orange\", \"grapes\", \"kiwi\", \"guava\", \"sapota\"]\n\"\"\"\ni = 0\nwhile i < len(fruits): # i = 0, 1, 2, 3, 4, 5, 6, 7\n print(fruits[i])\n i += 1\n\"\"\"\n# for i in fruits: # i = \"banana\", \"apple\", \"mango\", \"orange\", \"grapes\", \"kiwi\", \"guava\", \"sapota\"\n# print(i)\n\"\"\"\nfor fruit in fruits:\n if fruit == \"orange\":\n # break\n # continue\n pass\n print(fruit)\nprint(\"Thanks!\")\n\"\"\"\n\"\"\"\nchoice = int(input(\"Enter Choice: \"))\nif choice == 1:\n print(\"Your balance\")\nelif choice == 2:\n print(\"Your Due\")\nelif choice == 3:\n print(\"Loan emi\")\nelif choice == 4:\n print(\"Interest amount\")\nelif choice == 5:\n pass\nelif choice == 6:\n pass\nelif choice == 7:\n pass\nelif choice == 8:\n pass\nelif choice == 9:\n print(\"Your Call is being diverted to customer care executive.\")\nelse:\n print(\"Invalid option\")\n\"\"\"\n# India = (\"New Delhi\", \"Mumbai\", \"Chennai\", \"Kolkata\", \"Bangluru\", \"Ahmedabad\")\n# for city in India:\n# print(city)\n\n# India = {\"New Delhi\", \"Mumbai\", \"Chennai\", \"Kolkata\", \"Bangluru\", \"Ahmedabad\"}\n\"\"\"\nmyName = \"Python Rossum\"\nfor character in myName:\n print(character)\n\"\"\"\n\n\"\"\"\nfor(i = 0; i < 20; i = i + 2)\n{\n\n}\n\"\"\"\n# range(5) = 0,1,2,3,4\n# for i in range(10):\n# print(i)\n\n# for i in range(5, 15):\n# print(fruits[i])\n\n# for i in range(5, 15, 2):\n# print(fruits[i])\n\n# User defined list:\n\"\"\"\nmyList = []\nprint(\"Enter the elements to be added in the list:\")\nwhile True:\n quit = input(\"Press 'q' to quit, 'Enter' to enter member: \").lower()\n if quit == \"q\":\n break\n member = input()\n if member.isnumeric():\n member = float(member)\n myList.append(member)\nprint(myList)\n\"\"\"\n# counting number of digits in a given number\n\"\"\"\nnumber = int(input(\"Enter the number: \")) # 5672\ndigits = len(str(number))\nprint(\"number of digits:\", digits)\n\"\"\"\n# list comprehension:\n\n# numbers = []\n# for i in range(1, 101):\n# numbers.append(i)\n\"\"\"\nnumbers = [i for i in range(1, 101)]\nprint(numbers)\n\"\"\"\n\"\"\"\nPerfect numbers:\n30 = 1, 2, 3, 5, 6, 10, 15\n28 = \n\nArmstrong numbers:\n1634\nif (1^4)+(6^4)+(3^4)+(4^4) == 1634: armstrong\n\"\"\"\n\"\"\"\nn = int(input(\"Enter n: \"))\nflag = 1\nfor i in range(2, n):\n if n % i == 0:\n flag = 0\n print(\"Not Prime.\")\n break\nif flag == 1:\n print(\"Prime.\")\n\"\"\"\n# break - else\nn = int(input(\"Enter n: \"))\nfor i in range(2, n):\n if n % i == 0:\n print(\"Not Prime.\")\n break\nelse:\n print(\"Prime.\")","repo_name":"Kunal-63/Python-Tutorial","sub_path":"29apr.py","file_name":"29apr.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"72285242832","text":"import os\n\n\nclass ProjectTree:\n def __init__(self, project_tree, root_dir=None):\n self.project_tree = project_tree\n self.root_dir = root_dir\n\n def create_project_tree(self, tree: dict, root_name=None):\n if not root_name:\n root_name = os.getcwd()\n\n os.makedirs(f\"{root_name}\", exist_ok=True)\n if tree.get(\"children\"):\n for i in tree.get(\"children\"):\n if i.get(\"type\") == \"directory\":\n self.create_project_tree(i, root_name + \"/\" +\n i.get(\"name\"))\n else:\n with open(os.path.join(root_name,\n i.get(\"name\")), 'a') as temp_file:\n if i.get(\"content\"):\n temp_file.write(i.get(\"content\"))\n else:\n temp_file.write(\"\")\n\n\ndef print_dir_tree(dirname, path=os.path.pathsep):\n data = []\n for name in os.listdir(dirname):\n dct = {'name': name}\n\n full_path = os.path.join(dirname, name)\n if os.path.isfile(full_path):\n dct['type'] = 'file'\n elif os.path.isdir(full_path):\n dct['type'] = 'folder'\n dct['children'] = print_dir_tree(\n full_path, path=path + name + os.path.pathsep)\n data.append(dct)\n return data\n","repo_name":"Sergii22/py-init-structure","sub_path":"py_init_structure/common_components/trees/project_trees.py","file_name":"project_trees.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"4863027236","text":"import math\nequation = input('Which equation?: ')\n\ndef numba1():\n flag = 0\n vxs = input('vx: ')\n if vxs != '':\n vx = float(vxs)\n else:\n flag += 4\n vx0s = input('vx0: ')\n if vx0s != '':\n vx0 = float(vx0s)\n else:\n flag += 1\n axs = input('ax: ')\n if axs != '':\n ax = float(axs)\n else:\n flag += 2\n ts = input('t: ')\n if ts != '':\n t = float(ts)\n else:\n flag += 3\n if flag == 1:\n vx0 = vx - ( ax * t )\n if flag == 2:\n if t == 0:\n print(\"it's over\")\n else:\n ax = (vx - vx0) / t\n if flag == 3:\n t = (vx - vx0) / ax\n if flag == 4:\n vx = vx0 + ax * t\n else:\n print('Need more variables')\n\n print('vx: ', vx, 'vx0: ', vx0, 'ax: ', ax, 't: ', t)\n \ndef numba2():\n flag = 0\n xs = input('x: ')\n if xs != '':\n x = float(xs)\n else:\n flag += 1\n x0s = input('x0: ')\n if x0s != '':\n x0 = float(x0s)\n else:\n flag += 2\n vx0s = input('vx0: ')\n if vx0s != '':\n vx0 = float(vx0s)\n else:\n flag += 3\n axs = input('ax: ')\n if axs != '':\n ax = float(axs)\n else:\n flag += 4\n ts = input('t: ')\n if ts != '':\n t1 = float(ts)\n t2 = 'N/A'\n else:\n flag += 5\n if flag == 1:\n x = x0 + vx0 * t1 + 0.5 * ax * t1**2\n if flag == 2:\n x0 = -(ax * t1**2) / 2 - t1 * vx0 + x\n if flag == 3:\n vx0 = -(ax * t1**2 + 2 * x0 - 2 * x) / (2 * t1)\n if flag == 4:\n if t1 == 0:\n print(\"it's over\")\n else:\n ax = -2 * (x0 + t1 * vx0 - x) / t1**2\n if flag == 5:\n evil = vx0 ** 2 - 2 * ax * (x0 - x)\n if evil >= 0:\n t1 = (-vx0 + math.sqrt(evil)) / ax\n t2 = (-vx0 - math.sqrt(evil)) / ax\n else:\n t1 = \"evil\"\n t2 = 'evil'\n \n print('x: ', x, 'x0: ', x0, 'vx0: ', vx0, 'ax: ', ax, 't(1): ', t1, 't2: ', t2)\n \ndef numba3():\n flag = 0\n xs = input('x: ')\n if xs != '':\n x = float(xs)\n else:\n flag += 1\n x0s = input('x0: ')\n if x0s != '':\n x0 = float(x0s)\n else:\n flag += 2\n vxs = input('vx: ')\n if vxs != '':\n vx = float(vxs)\n else:\n flag += 3\n vx0s = input('vx0: ')\n if vx0s != '':\n vx0 = float(vx0s)\n else:\n flag += 4\n axs = input('ax: ')\n if axs != '':\n ax = float(axs)\n else:\n flag += 5\n if flag == 1:\n x = (vx ** 2 - vx0 ** 2) / (2 * ax + x0)\n if flag == 2:\n x0 = (x - vx ** 2 - vx0 ** 2) / (2 * ax)\n if flag == 3:\n v2 = vx0 ** 2 + 2 * ax * (x - x0)\n vx = math.sqrt(v2)\n if flag == 4:\n vx02 = vx ** 2 - 2 * ax * (x - x0)\n vx0 = math.sqrt(vx02)\n if flag == 5:\n ax = (vx ** 2 - vx0 ** 2) / (2 * (x - x0))\n s = x - x0\n print('s: ', s, 'x: ', x, 'x0: ', x0, 'vx: ', vx, 'vx0: ', vx0, 'ax: ', ax, )\n \n \nif equation == '1':\n numba1()\n \nif equation == '2':\n numba2()\n\nif equation == '3':\n numba3()\n\nelse:\n print('Not a valid equation')\n ","repo_name":"BurntButtons/NSpire-Physics-Calculator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"32848197530","text":"from flask import Flask,request\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\n# Members API Route\n# git request\n@app.route(\"/members\")\ndef members():\n return{\"members\":[\"Member1\" , \"Member2\" , \"Member3\"]}\n# post request\n@app.route(\"/tryPost\", methods=[\"POST\"])\ndef post_request():\n posted_data = request.json.get(\"data\")\n \n # Do something with the member posted_data, for example add it to a database\n print(posted_data)\n return {\"message\": f\"{posted_data} added successfully\"}\n\nif __name__ == \"__main__\":\n app.run(debug = True)\n","repo_name":"yousef-alaa-mostafa/flask-react-project","sub_path":"flask-server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"7716783767","text":"# Definition for Node.\n# class Node:\n# def __init__(self, val=0, left=None, right=None, random=None):\n# self.val = val\n# self.left = left\n# self.right = right\n# self.random = random\nclass NodeCopy:\n def __init__(self, val=0, left=None, right=None, random=None):\n self.val = val\n self.left = left\n self.right = right\n self.random = random\n \nclass Solution:\n def __init__(self):\n self.v = {}\n \n def copyRandomBinaryTree(self, root: 'Node') -> 'NodeCopy':\n if not root:\n return None\n if root in self.v:\n return self.v[root]\n colone = NodeCopy(root.val)\n self.v[root] = colone\n colone.left = self.copyRandomBinaryTree(root.left)\n colone.right = self.copyRandomBinaryTree(root.right)\n colone.random = self.copyRandomBinaryTree(root.random)\n return self.v[root]","repo_name":"JaylenZhang19/Leetcode","sub_path":"1485. Clone Binary Tree With Random Pointer.py","file_name":"1485. Clone Binary Tree With Random Pointer.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"72550755151","text":"from DoublyLinkedList import DoublyLinkedList\n# def remove_duplicates2(self):\n# cur = self.head\n# l1=[]\n\n# while cur:\n \n# if cur.data in l1:\n# nxt = cur.next\n# self.delete_node(cur.data)\n# cur = nxt\n# else:\n# l1.append(cur.data)\n# cur = cur.next\n\n\n \n \n# def delete_node(self, node):\n# cur = self.head\n# while cur:\n# if cur == node and cur == self.head:\n# # Case 1:\n# if not cur.next:\n# cur = None \n# self.head = None\n# return\n\n# # Case 2:\n# else:\n# nxt = cur.next\n# cur.next = None \n# nxt.prev = None\n# cur = None\n# self.head = nxt\n# return \n\n# elif cur == node:\n# # Case 3:\n# if cur.next:\n# nxt = cur.next \n# prev = cur.prev\n# prev.next = nxt\n# nxt.prev = prev\n# cur.next = None \n# cur.prev = None\n# cur = None\n# return\n\n# # Case 4:\n# else:\n# prev = cur.prev \n# prev.next = None \n# cur.prev = None \n# cur = None \n# return \n# cur = cur.next\n\n\nif __name__ == '__main__':\n dlist = DoublyLinkedList()\n dlist.append(11)\n dlist.append(2)\n dlist.append(12)\n dlist.append(33)\n dlist.append(2)\n dlist.append(12)\n dlist.append(3)\n dlist.append(2)\n dlist.append(12)\n\n\n dlist.print_list()\n dlist.remove_duplicates()\n # dlist.remove_duplicates2()\n dlist.print_list()\n","repo_name":"harish2222/Data-Structures","sub_path":"Doubly_Linked_list/remove_dup.py","file_name":"remove_dup.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"11260108165","text":"from collections import deque\nimport sys\n\ninput = sys.stdin.readline\n\nn, m = map(int, input().split())\ngraph = [[] for _ in range(n + 1)]\nvisited = [False] * (n + 1)\nfor _ in range(m):\n i, j = map(int, input().split())\n graph[i].append(j)\n graph[j].append(i)\n\n\ndef bfs(v):\n queue = deque()\n queue.append(v)\n visited[v] = True\n while queue:\n x = queue.popleft()\n for k in graph[x]:\n if not visited[k]:\n queue.append(k)\n visited[k] = True\n\n\ncnt = 0\nfor i in range(1, n + 1):\n if not visited[i]:\n bfs(i)\n cnt += 1\n\nprint(cnt)","repo_name":"sbtiffanykim/problem-solving","sub_path":"BOJ/11724_연결요소의개수.py","file_name":"11724_연결요소의개수.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"32440525339","text":"from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nfrom django.urls import reverse\n\nfrom .models import IotDevice, Whitelist, Blacklist\nfrom .find_devices import get_devices\n\nimport dns.resolver\nimport ipaddress\n# Create your views here.\n\noption1 = True\noption2 = True\noption3 = False\n\ndef load_file(file_name):\n starts = []\n with open(file_name, 'r') as file:\n for line in file:\n line = line[:-1]\n starts.append(line)\n return starts\n\n\ndef get_ips(inputip):\n ips = dns.resolver.resolve(inputip, 'A')\n ips_form_dns = []\n\n for ip in ips:\n ips_form_dns.append(ip.to_text())\n return ips_form_dns\n\ndef is_valid_ip(ip):\n try:\n ipaddress.ip_address(ip)\n return True\n except:\n return False\n \n\ndef index(response,id):\n device = IotDevice.objects.get(id=id)\n \n if response.method == \"POST\":\n if response.POST.get(\"newItemWhitelist\"):\n txt = response.POST.get(\"new\") \n listip = [] \n if is_valid_ip(txt):\n device.whitelist_set.create(dst_ip=txt,size=0)\n else:\n try:\n listIps = get_ips(txt)\n if(listIps != []):\n for ip in listIps:\n device.whitelist_set.create(dst_ip=ip,size=0)\n else:\n print(\"invalid\")\n except:\n print(\"invalid\")\n \n elif response.POST.get(\"newItemBlacklist\"):\n #TODO: провери дали го има в whitelist\n txt = response.POST.get(\"new\") \n listip = [] \n if is_valid_ip(txt):\n device.blacklist_set.create(dst_ip=txt,size=0)\n else:\n try:\n listIps = get_ips(txt)\n if(listIps != []):\n for ip in listIps:\n device.blacklist_set.create(dst_ip=ip,size=0)\n else:\n print(\"invalid\")\n except:\n print(\"invalid\")\n \n elif response.POST.get(\"whiteToBlacklist\"): \n id = int(response.POST.get(\"whiteToBlacklist\"))\n currentDevice = device.whitelist_set.all().filter(id=id)\n tempip = currentDevice[0].dst_ip\n currentDevice.delete()\n device.blacklist_set.create(dst_ip=tempip,size=0)\n \n elif response.POST.get(\"blackToWhitelist\"):\n id = int(response.POST.get(\"blackToWhitelist\"))\n currentDevice = device.blacklist_set.all().filter(id=id)\n tempip = currentDevice[0].dst_ip\n currentDevice.delete()\n device.whitelist_set.create(dst_ip=tempip,size=0)\n \n elif response.POST.get(\"removeFromWhiteList\"):\n print(\"remove from whitelist\")\n id = int(response.POST.get(\"removeFromWhiteList\"))\n currentDevice = device.whitelist_set.all().filter(id=id)\n currentDevice.delete()\n \n elif response.POST.get(\"removeFromBlackList\"):\n print(\"removeFromBlackList\")\n id = int(response.POST.get(\"removeFromBlackList\"))\n currentDevice = device.blacklist_set.all().filter(id=id)\n currentDevice.delete()\n \n #TODO: make this button:\n # elif response.POST.get(\"removeAllFromBlackList\"):\n # print(\"removeAllFromBlackList\")\n # id = int(response.POST.get(\"removeAllFromBlackList\"))\n # currentDevice = device.blacklist_set.all().filter(id=id)\n # currentDevice.delete() \n \n return render(response,\"main/list.html\",{\"device\":device})\n\n\ndef home(response):\n devices = IotDevice.objects.all()\n \n if response.method == \"POST\":\n if response.POST.get(\"getDevices\"):\n print(\"Getting devices\")\n get_devices(option1,option2,option3) \n elif response.POST.get(\"removeDevice\"):\n print(\"removeDevice\")\n id = int(response.POST.get(\"removeDevice\"))\n currentDevice = IotDevice.objects.filter(id=id)\n currentDevice.delete()\n \n return render(response,\"main/home.html\",{\"devices\":devices})\n\ndef settings(request):\n global option1, option2, option3\n checkboxes = [{\"name\":\"Packet count\", \"enabled\": option1},\n {\"name\":\"Max 5 whitelisted\", \"enabled\": option2},\n {\"name\":\"Packet size\", \"enabled\": option3}]\n if request.method == \"POST\":\n options = request.POST.getlist(\"options\")\n if(options.__contains__(\"Packet count\")):\n option1 = True\n else:\n option1 = False\n \n if(options.__contains__(\"Max 5 whitelisted\")):\n option2 = True\n else:\n option2 = False\n \n if(options.__contains__(\"Packet size\")):\n option3 = True\n else:\n option3 = False\n print(option1,option2,option3)\n return redirect(\"/\")\n return render(request,\"main/settings.html\",{\"options\":checkboxes}) ","repo_name":"y0608/TtTHackTUES9","sub_path":"Web/TtTsite/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5233,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"43401640417","text":"import os\nimport sys\nimport datetime\nimport logging\nimport configparser\nfrom time import sleep\nfrom argparse import ArgumentParser\nfrom source.utilities.scribbles import Scribbles\nfrom source.utilities.sys_utils import Utilities\n\nclass Base:\n \"\"\"\n Cornerstone class for the application.\n \"\"\"\n \n init_timestamp = datetime.datetime.now().strftime('%m_%d_%Y_%I_%M%p')\n\n #logging variables\n logs_main_directory = 'logs'\n logs_directory = f\"{logs_main_directory}/{init_timestamp}\"\n log_file_name = None # override ex: script_1.log\n\n # etc\n error_received = None # override ex: True if error is reported\n\n\n def __init__(self) -> None:\n \"\"\"\n Initializes an instance of the Base class.\n\n @params: none\n @return: none\n \"\"\"\n\n self.script_name = os.path.basename(sys.modules[self.__module__].__file__)\n self.utilities = Utilities()\n self.argparser = ArgumentParser()\n\n self.init_config()\n self.create_directories()\n\n\n def add_args(self) -> None:\n \"\"\"\n Processes and passes arguments.\n\n @params: none\n @return: none\n \"\"\"\n\n self.argparser.add_argument('--run',\n action='store_true', default=False,\n help='run the application')\n\n self.argparser.add_argument('--verbose',\n action=\"store_true\", default=False,\n help=\"adds debug statements to logs\")\n\n\n def parse_args(self) -> None:\n \"\"\"\n Parses passed arguments.\n\n @params: none\n @return: none\n \"\"\"\n\n self.args, unknown = self.argparser.parse_known_args()\n\n if len(sys.argv) <= 1:\n self.argparser.print_help()\n quit()\n\n\n def init_config(self) -> None:\n \"\"\"\n Initializes the config file settings.\n\n @params: none\n @return: none\n \"\"\"\n\n self.config_parser = configparser.ConfigParser()\n self.config_parser.read('app.config')\n\n # General\n self.app_name = self.config_parser['General'].get('app_name', 'Unknown')\n\n\n def init_logging(self) -> None:\n \"\"\"\n Initializes logging functions for this Base class or child.\n Logs will be named after the running script.\n\n @params: none\n @return: none\n \"\"\"\n\n self.debug_level = logging.INFO\n\n if self.args.verbose:\n self.debug_level = logging.DEBUG\n\n self.log_filename = f\"{self.utilities.basename_of(path=self.script_name)}.log\"\n self.log_path = f\"{self.logs_directory}/{self.log_filename}\"\n\n #INIT scribbles\n self.scribbles = Scribbles(self.script_name, self.debug_level, self.log_path)\n\n\n def create_directories(self) -> None:\n \"\"\"\n Creates any directories needed by the app, or this specific run.\n\n @params: none\n @return: none\n \"\"\"\n\n self.utilities.create_directory(self.logs_directory)\n","repo_name":"jconway4/PythonSkelly","sub_path":"source/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"15248673387","text":"import fastapi\nimport starlette\nfrom fastapi import FastAPI, UploadFile\nfrom starlette.datastructures import FormData\n\napp = FastAPI()\n\n\n@app.post(\"/files\")\nasync def create_files(request: fastapi.Request):\n form = await request.form()\n\n dict_to_return: dict = {}\n\n # primeiro campo\n a_optional = form.get('a_optional')\n if a_optional is not None:\n dict_to_return[\"a_optional_size\"] = a_optional.size\n\n # segundo campo\n b_optional = form.get('b_optional')\n if b_optional is not None:\n dict_to_return[\"b_optional_size\"] = b_optional.size\n\n # terceiro campo, mas considerando como lista\n # atenção aqui, pois é 'getlist'\n list_repeat_optional = form.getlist('repeat_optional')\n\n if list_repeat_optional is not None:\n counter: int = 0\n for item in list_repeat_optional:\n if isinstance(item, starlette.datastructures.UploadFile):\n arquivo: UploadFile = item\n print(arquivo.size)\n dict_to_return[f\"repeat_optional_{counter}_size\"] = arquivo.size\n counter += 1\n\n return dict_to_return\n","repo_name":"nettooe/python-fastapi-upload-files","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"5697658917","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport subprocess\nimport string\nfrom analyze import *\nfrom write_batch import write_batch as wb\nimport config as cfg\nimport time\n\ndef bash_command(cmd):\n subprocess.Popen(cmd, shell=True, executable='/bin/bash')\n\n#Function for running equilibrations\n#Equilibrations require 2 coordinates, one flat bottom and one flat bottom high\n#Function needs to insert a range (+-0.25nm) into pull coord init parameters\ndef run_eq(domain: string, iter: int):\n file_name = 'pull_eq_' + str(domain) + str(iter)\n mdp_file = 'pull_eq.mdp'\n\n #delete last 2 lines of mdp file\n lines = open(mdp_file, 'r').readlines()\n del lines[-2:]\n\n # get init from config.py\n global init\n current_coord = init\n range_high = current_coord + 0.25\n range_low = current_coord - 0.25\n\n #insert new lines into mdp file\n lines.append(\"pull_coord1_init = \" + str(range_high))\n lines.append(\"\\npull_coord2_init = \" + str(range_low))\n open(mdp_file, 'w').writelines(lines)\n\n bash_command(\"gmx_mpi grompp -f pull_eq_{}.mdp -o pull_eq_{}.tpr -c {} -r {} -p topol.top -n {} -maxwarn 1\".format(domain, file_name, cfg.gro, cfg.gro, cfg.ndx))\n wb.write_batch(file_name)\n bash_command(\"sbatch {}\".format(file_name))\n print(\"Equilibration {} submitted\".format(file_name))\n\n ##waiting\n\n command=\"gmx_mpi rms -s {}.tpr -f {}.xtc -o {}_rmsd.xvg -tu ns\".format(file_name, file_name, file_name)\n subprocess.run([command], input=\"4 4\", text=True, shell=True, executable='/bin/bash')\n\n rmsd_xvg_file = file_name + '_rmsd.xvg'\n # from analyze.py use function analyze\n result=analyze(rmsd_xvg_file, domain)\n print(\"Result: \", result)\n if result == 0:\n print(\"Running equilibration again with longer wall time\")\n wb.wall_time()\n run_eq(domain, iter)\n else:\n print(\"Equilibration was successful\")\n\n\n## Testing\n\n# First see if mdp file modification works\ndomain_dict=cfg.domains[0]\nglobal init\ninit = domain_dict['start']\n#works\n\n# next rmsd file \n# needs pull_eq_TK2.tpr and pull_eq_TK2.xtc\n# works\n\n# next analyze\n\nrun_eq('TK', 2)","repo_name":"molecularmachinist/pathfinder","sub_path":"pull_auto/Python_versions/run_eq.py","file_name":"run_eq.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"48828666924","text":"import numpy as np\nimport cvxpy as cvx\nimport time\nfrom functools import wraps\nfrom scipy.optimize import minimize\n\n\ndef timeit(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.perf_counter()\n result = func(*args, **kwargs)\n end = time.perf_counter()\n # print(f'Function: {func.__name__}\\n Time: {end - start:.4f}')\n return result\n \n return wrapper\n\n\n\nclass SupportVectorClustering():\n def __init__(self, p, q, C = None, kernel = None) -> None:\n self.p = p\n self.q = q\n self.C = C\n self.kernel = self._gaussian_kernel if kernel is None else kernel\n\n self._N = None\n self._beta = None\n self._x = None\n self._km = None\n\n self.svs_indx = 0\n self.bsv_indx = 0\n self.ovs_indx = 0\n \n\n def _gaussian_kernel(self, x1, x2):\n return np.exp(-self.q * np.linalg.norm(x1 - x2)**2)\n\n\n def _calc_kernel_matrix(self):\n km=np.zeros((self._N, self._N))\n \n for i in range(self._N):\n for j in range(self._N):\n km[i,j] = self.kernel(self._x[i], self._x[j])\n return km \n \n def fit(self, x):\n self._x = x\n self._N = len(x)\n self.C = 1 / (self.p * self._N) if self.C is None else self.C \n \n self._km = cvx.psd_wrap(self._calc_kernel_matrix())\n beta = cvx.Variable(self._N)\n \n objective = cvx.Maximize(cvx.sum(cvx.diag(self._km) @ beta)- cvx.quad_form(beta, self._km)) # 1 - cvx.quad_form(beta, km)\n constraints = [0 <= beta, beta<=self.C, cvx.sum(beta)==1] # 1 - self._beta.T @self._km @ self._beta\n result = cvx.Problem(objective, constraints).solve()\n \n self._beta = beta.value\n\n\n def _line_segment_adj(self,x1,x2,R,n=10):\n res = all([self.r_func(x1 + (x2 - x1)/(n +1) * i) < R for i in range(n)])\n return res\n \n @timeit\n def _get_adj_mtx(self, EPS = 10**-8):\n\n self.bsv_indx = np.where(self._beta >= (self.C - EPS))[0]\n self.svs_indx = np.where((self._beta < (self.C - EPS)) & (self._beta > EPS))[0]\n self.ovs_indx = np.where(self._beta < EPS)[0]\n\n R = np.mean([self.r_func(self._x[i]) for i in self.svs_indx])\n\n adj_mtx = np.zeros((self._N, self._N))\n\n for i in range(self._N):\n if i not in self.bsv_indx:\n for j in range(i, self._N):\n if j not in self.bsv_indx:\n adj_mtx[i,j]=adj_mtx[j,i]=self._line_segment_adj(self._x[i],self._x[j], R)\n\n return adj_mtx \n\n\n def get_clusters(self):\n adj_mtx = self._get_adj_mtx()\n indices = list(range(self._N))\n clusters = {}\n num_clusters = -1\n \n while indices:\n num_clusters+=1\n clusters[num_clusters]=[]\n curr_id = indices.pop(0)\n queue = [curr_id]\n\n while queue:\n cid = queue.pop(0)\n for i in indices:\n if adj_mtx[i,cid]:\n queue.append(i)\n indices.remove(i)\n clusters[num_clusters].append(cid)\n \n return clusters\n \n\n def r_func(self, x):\n return self.kernel(x,x)-2*np.sum([self._beta[i]*self.kernel(self._x[i], x) for i in range(self._N)]) + (self._beta.T @self._km @ self._beta).value\n \n\n @staticmethod\n def get_begin_p(x):\n return 1 / (len(x))\n \n @staticmethod\n def get_begin_q(x):\n return 1 / max([np.linalg.norm(x[i] - x[j])**2 for i in range(len(x)) for j in range(len(x))])\n \n \n\n\n # def another_fit(self, x):\n # self._x = x\n # self._N = len(x)\n # self.C = 1 / (self.p * self._N) if self.C is None else self.C \n\n # self._km = self._calc_kernel_matrix()\n # def func(x):\n # return np.sum(x) - x.T @ self._km @ x\n\n # con1 = {'type': 'eq', 'fun': lambda b: sum(b) - 1}\n # con2 = [{'type': 'ineq', 'fun': lambda b: -1 * (b[i] - self.C)} for i in range(self._N)]\n # con3 = [{'type': 'ineq', 'fun': lambda b: b[i] - 0} for i in range(self._N)]\n # result = minimize(lambda x: -1 * func(x), np.ones(self._N), constraints=[con1, *con2, *con3])\n # self._beta = result.x\n\n\n # r_func if use another_fit\n # return self.kernel(x,x)-2*np.sum([self._beta[i]*self.kernel(self._x[i], x) for i in range(self._N)]) + self._beta.T @self._km @ self._beta","repo_name":"DmitryjVeselyj/Stat_Labs","sub_path":"3_Lab/svc.py","file_name":"svc.py","file_ext":"py","file_size_in_byte":4522,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"12900158051","text":"import copy\nimport datetime\nfrom calendar import monthrange\nfrom itertools import chain\n\nfrom hscommon.util import nonone\nfrom hscommon.trans import tr\n\nfrom .date import (inc_day, inc_week, inc_month, inc_year, inc_weekday_in_month,\n inc_last_weekday_in_month)\nfrom .transaction import Transaction\n\nclass RepeatType:\n Daily = 'daily'\n Weekly = 'weekly'\n Monthly = 'monthly'\n Yearly = 'yearly'\n Weekday = 'weekday'\n WeekdayLast = 'weekday_last'\n\nRTYPE2INCFUNC = {\n RepeatType.Daily: inc_day,\n RepeatType.Weekly: inc_week,\n RepeatType.Monthly: inc_month,\n RepeatType.Yearly: inc_year,\n RepeatType.Weekday: inc_weekday_in_month,\n RepeatType.WeekdayLast: inc_last_weekday_in_month,\n}\n\nONE_DAY = datetime.timedelta(1)\n\nclass DateCounter:\n def __init__(self, base_date, repeat_type, repeat_every, end):\n self.base_date = base_date\n self.end = end\n self.inccount = 0\n self.incfunc = RTYPE2INCFUNC[repeat_type]\n self.incsize = repeat_every\n self.current_date = None\n \n def __iter__(self):\n return self\n \n def __next__(self):\n # It's possible for a DateCounter to be created with an end date smaller than its start\n # date. In this case, simply never yield any date.\n if self.base_date > self.end:\n raise StopIteration()\n if self.current_date is None: # first date of the iteration is base_date\n self.current_date = self.base_date\n return self.current_date\n new_date = None\n while new_date is None:\n self.inccount += self.incsize\n new_date = self.incfunc(self.base_date, self.inccount)\n if new_date <= self.current_date or new_date > self.end:\n raise StopIteration()\n self.current_date = new_date\n return new_date\n \n\nclass Spawn(Transaction):\n def __init__(self, recurrence, ref, recurrence_date, date=None):\n date = date or recurrence_date\n Transaction.__init__(self, date, ref.description, ref.payee, ref.checkno)\n self.recurrence_date = recurrence_date\n self.ref = ref\n self.recurrence = recurrence\n self.set_splits(ref.splits)\n for split in self.splits:\n split.reconciliation_date = None\n self.balance()\n \n\nclass Recurrence:\n def __init__(self, ref, repeat_type, repeat_every):\n if repeat_type not in RTYPE2INCFUNC:\n # invalid repeat type, default to monthly\n repeat_type = RepeatType.Monthly\n self.ref = ref\n self._repeat_type = repeat_type\n self._repeat_every = repeat_every\n self.stop_date = None\n self.date2exception = {}\n self.date2globalchange = {}\n self.date2instances = {}\n self.rtype2desc = {\n RepeatType.Daily: tr('Daily'),\n RepeatType.Weekly: tr('Weekly'),\n RepeatType.Monthly: tr('Monthly'),\n RepeatType.Yearly: tr('Yearly'),\n RepeatType.Weekday: '', # dynamic\n RepeatType.WeekdayLast: '', # dynamic\n }\n self._update_rtype_descs()\n \n def __repr__(self):\n return '' % (self.repeat_type, self.repeat_every)\n \n #--- Private\n def _all_exceptions(self):\n exceptions = chain(self.date2exception.values(), self.date2globalchange.values())\n return (e for e in exceptions if e is not None)\n \n def _create_spawn(self, ref, date):\n return Spawn(self, ref, date)\n \n def _update_ref(self):\n # Go through our recurrence dates and see if we should either move our start date due to\n # deleted spawns or to update or ref transaction due to a global change that end up being\n # on our first recurrence date.\n date_counter = DateCounter(self.start_date, self.repeat_type, self.repeat_every, datetime.date.max)\n for d in date_counter:\n if d in self.date2exception and self.date2exception[d] is None:\n continue\n if d in self.date2globalchange:\n self.ref = self.date2globalchange[d].replicate()\n else:\n self.ref.date = d\n break\n self.date2exception = {d: ex for d, ex in self.date2exception.items() if d > self.start_date}\n self.date2globalchange = {d: ex for d, ex in self.date2globalchange.items() if d > self.start_date}\n self.reset_spawn_cache()\n self._update_rtype_descs()\n \n def _update_rtype_descs(self):\n date = self.start_date\n weekday_name = date.strftime('%A')\n week_no = (date.day - 1) // 7\n position = [tr('first'), tr('second'), tr('third'), tr('fourth'), tr('fifth')][week_no]\n self.rtype2desc[RepeatType.Weekday] = tr('Every %s %s of the month') % (position, weekday_name)\n _, days_in_month = monthrange(date.year, date.month)\n if days_in_month - date.day < 7:\n self.rtype2desc[RepeatType.WeekdayLast] = tr('Every last %s of the month') % weekday_name\n else:\n self.rtype2desc[RepeatType.WeekdayLast] = ''\n \n #--- Public\n def affected_accounts(self):\n result = self.ref.affected_accounts()\n for exception in self._all_exceptions():\n result |= exception.affected_accounts()\n return result\n \n def change_globally(self, spawn):\n for date in list(self.date2globalchange.keys()):\n if date >= spawn.recurrence_date:\n del self.date2globalchange[date]\n for date, exception in list(self.date2exception.items()):\n # we don't want to remove local deletions\n if exception is not None and date >= spawn.recurrence_date:\n del self.date2exception[date]\n self.date2globalchange[spawn.recurrence_date] = spawn\n self._update_ref()\n \n def delete(self, spawn):\n self.delete_at(spawn.recurrence_date)\n \n def delete_at(self, date):\n self.date2exception[date] = None\n self._update_ref()\n \n def get_spawns(self, end):\n # END DATE ADJUSTMENT\n # if a changed date end up being smaller than the \"spawn date\", it's possible that a spawn\n # that should have been spawned for the date range is not spawned. Therefore, we always\n # spawn at least until the date of the last exception. For global changes, it's even more\n # complicated. If the global date delta is negative enough, we can end up with a spawn that\n # doesn't go far enough, so we must adjust our max date by this delta.\n if self.date2exception:\n end = max(end, max(self.date2exception.keys()))\n if self.date2globalchange:\n min_date_delta = min(ref.date-date for date, ref in self.date2globalchange.items())\n if min_date_delta < datetime.timedelta(days=0):\n end += -min_date_delta\n end = min(end, nonone(self.stop_date, datetime.date.max))\n \n date_counter = DateCounter(self.start_date, self.repeat_type, self.repeat_every, end)\n result = []\n global_date_delta = datetime.timedelta(days=0)\n current_ref = self.ref\n for current_date in date_counter:\n if current_date in self.date2globalchange:\n current_ref = self.date2globalchange[current_date]\n global_date_delta = current_ref.date - current_date\n if current_date in self.date2exception:\n exception = self.date2exception[current_date]\n if exception is not None:\n result.append(exception)\n else:\n if current_date not in self.date2instances:\n spawn = self._create_spawn(current_ref, current_date)\n if global_date_delta:\n # Only muck with spawn.date if we have a delta. otherwise we're breaking\n # budgets.\n spawn.date = current_date + global_date_delta\n self.date2instances[current_date] = spawn\n result.append(self.date2instances[current_date])\n return result\n \n def reassign_account(self, account, reassign_to=None):\n self.ref.reassign_account(account, reassign_to)\n for exception in self._all_exceptions():\n exception.reassign_account(account, reassign_to)\n self.reset_spawn_cache()\n \n def replicate(self):\n result = copy.copy(self)\n result.date2exception = copy.copy(self.date2exception)\n result.date2globalchange = copy.copy(self.date2globalchange)\n result.date2instances = {}\n result.ref = self.ref.replicate()\n return result\n \n def reset_exceptions(self):\n self.date2exception = {}\n self.date2globalchange = {}\n \n def reset_spawn_cache(self):\n self.date2instances = {}\n \n def stop_at(self, spawn):\n self.stop_date = spawn.recurrence_date\n \n def stop_before(self, spawn):\n self.stop_date = spawn.recurrence_date - ONE_DAY\n \n #--- Properties\n @property\n def is_alive(self):\n \"\"\"Returns whether get_spawns() can ever return anything given the start and stop date\"\"\"\n if self.stop_date is None:\n return True\n return bool(self.get_spawns(self.stop_date))\n \n @property\n def repeat_every(self):\n return self._repeat_every\n \n @repeat_every.setter\n def repeat_every(self, value):\n if value == self._repeat_every:\n return\n self._repeat_every = value\n self.reset_exceptions()\n \n @property\n def repeat_type(self):\n return self._repeat_type\n \n @repeat_type.setter\n def repeat_type(self, value):\n if value == self._repeat_type:\n return\n self._repeat_type = value\n self.reset_exceptions()\n \n @property\n def repeat_type_desc(self):\n return self.rtype2desc[self._repeat_type]\n \n @property\n def start_date(self):\n return self.ref.date\n \n @start_date.setter\n def start_date(self, value):\n if value == self.ref.date:\n return\n self.ref.date = value\n self.reset_exceptions()\n self._update_rtype_descs()\n \n","repo_name":"Mouchnino/moneyguru","sub_path":"core/model/recurrence.py","file_name":"recurrence.py","file_ext":"py","file_size_in_byte":10250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"71025743631","text":"# !/usr/bin/env python\n\"\"\"Define the unit tests for the :mod:`colour.io.luts.cinespace_csp` module.\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport shutil\nimport tempfile\nimport unittest\n\nimport numpy as np\n\nfrom colour.io import LUT1D, LUT3x1D, read_LUT_Cinespace, write_LUT_Cinespace\nfrom colour.utilities import tstack\n\n__author__ = \"Colour Developers\"\n__copyright__ = \"Copyright 2013 Colour Developers\"\n__license__ = \"BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause\"\n__maintainer__ = \"Colour Developers\"\n__email__ = \"colour-developers@colour-science.org\"\n__status__ = \"Production\"\n\n__all__ = [\n \"ROOT_LUTS\",\n \"TestReadLUTCinespace\",\n \"TestWriteLUTCinespace\",\n]\n\nROOT_LUTS: str = os.path.join(\n os.path.dirname(__file__), \"resources\", \"cinespace\"\n)\n\n\nclass TestReadLUTCinespace(unittest.TestCase):\n \"\"\"\n Define :func:`colour.io.luts.cinespace_csp.read_LUT_Cinespace` definition\n unit tests methods.\n \"\"\"\n\n def test_read_LUT_Cinespace(self):\n \"\"\"\n Test :func:`colour.io.luts.cinespace_csp.read_LUT_Cinespace`\n definition.\n \"\"\"\n\n LUT_1 = read_LUT_Cinespace(\n os.path.join(ROOT_LUTS, \"ACES_Proxy_10_to_ACES.csp\")\n )\n\n np.testing.assert_array_almost_equal(\n LUT_1.table,\n np.array(\n [\n [4.88300000e-04, 4.88300000e-04, 4.88300000e-04],\n [7.71400000e-04, 7.71400000e-04, 7.71400000e-04],\n [1.21900000e-03, 1.21900000e-03, 1.21900000e-03],\n [1.92600000e-03, 1.92600000e-03, 1.92600000e-03],\n [3.04400000e-03, 3.04400000e-03, 3.04400000e-03],\n [4.80900000e-03, 4.80900000e-03, 4.80900000e-03],\n [7.59900000e-03, 7.59900000e-03, 7.59900000e-03],\n [1.20100000e-02, 1.20100000e-02, 1.20100000e-02],\n [1.89700000e-02, 1.89700000e-02, 1.89700000e-02],\n [2.99800000e-02, 2.99800000e-02, 2.99800000e-02],\n [4.73700000e-02, 4.73700000e-02, 4.73700000e-02],\n [7.48400000e-02, 7.48400000e-02, 7.48400000e-02],\n [1.18300000e-01, 1.18300000e-01, 1.18300000e-01],\n [1.86900000e-01, 1.86900000e-01, 1.86900000e-01],\n [2.95200000e-01, 2.95200000e-01, 2.95200000e-01],\n [4.66500000e-01, 4.66500000e-01, 4.66500000e-01],\n [7.37100000e-01, 7.37100000e-01, 7.37100000e-01],\n [1.16500000e00, 1.16500000e00, 1.16500000e00],\n [1.84000000e00, 1.84000000e00, 1.84000000e00],\n [2.90800000e00, 2.90800000e00, 2.90800000e00],\n [4.59500000e00, 4.59500000e00, 4.59500000e00],\n [7.26000000e00, 7.26000000e00, 7.26000000e00],\n [1.14700000e01, 1.14700000e01, 1.14700000e01],\n [1.81300000e01, 1.81300000e01, 1.81300000e01],\n [2.86400000e01, 2.86400000e01, 2.86400000e01],\n [4.52500000e01, 4.52500000e01, 4.52500000e01],\n [7.15100000e01, 7.15100000e01, 7.15100000e01],\n [1.13000000e02, 1.13000000e02, 1.13000000e02],\n [1.78500000e02, 1.78500000e02, 1.78500000e02],\n [2.82100000e02, 2.82100000e02, 2.82100000e02],\n [4.45700000e02, 4.45700000e02, 4.45700000e02],\n [7.04300000e02, 7.04300000e02, 7.04300000e02],\n ]\n ),\n )\n self.assertEqual(LUT_1.name, \"ACES Proxy 10 to ACES\")\n self.assertEqual(LUT_1.dimensions, 2)\n np.testing.assert_array_equal(\n LUT_1.domain, np.array([[0, 0, 0], [1, 1, 1]])\n )\n self.assertEqual(LUT_1.size, 32)\n self.assertListEqual(LUT_1.comments, [])\n\n LUT_2 = read_LUT_Cinespace(os.path.join(ROOT_LUTS, \"Demo.csp\"))\n self.assertListEqual(\n LUT_2.comments, [\"Comments are ignored by most parsers\"]\n )\n np.testing.assert_array_equal(\n LUT_2.domain, np.array([[0, 0, 0], [1, 2, 3]])\n )\n\n LUT_3 = read_LUT_Cinespace(\n os.path.join(ROOT_LUTS, \"Three_Dimensional_Table.csp\")\n )\n self.assertEqual(LUT_3.dimensions, 3)\n self.assertEqual(LUT_3.size, 2)\n\n LUT_4 = read_LUT_Cinespace(\n os.path.join(ROOT_LUTS, \"Explicit_Domain.csp\")\n )\n self.assertEqual(LUT_4[0].is_domain_explicit(), True)\n self.assertEqual(LUT_4[1].table.shape, (2, 3, 4, 3))\n\n LUT_5 = read_LUT_Cinespace(\n os.path.join(ROOT_LUTS, \"Uncommon_3x1D_With_Pre_Lut.csp\")\n )\n self.assertIsInstance(LUT_5[0], LUT3x1D)\n self.assertIsInstance(LUT_5[1], LUT3x1D)\n\n\nclass TestWriteLUTCinespace(unittest.TestCase):\n \"\"\"\n Define :func:`colour.io.luts.cinespace_csp.write_LUT_Cinespace` definition\n unit tests methods.\n \"\"\"\n\n def setUp(self):\n \"\"\"Initialise the common tests attributes.\"\"\"\n\n self._temporary_directory = tempfile.mkdtemp()\n\n def tearDown(self):\n \"\"\"After tests actions.\"\"\"\n\n shutil.rmtree(self._temporary_directory)\n\n def test_write_LUT_Cinespace(self):\n \"\"\"\n Test :func:`colour.io.luts.cinespace_csp.write_LUT_Cinespace`\n definition.\n \"\"\"\n\n LUT_1_r = read_LUT_Cinespace(\n os.path.join(ROOT_LUTS, \"ACES_Proxy_10_to_ACES.csp\")\n )\n write_LUT_Cinespace(\n LUT_1_r,\n os.path.join(\n self._temporary_directory, \"ACES_Proxy_10_to_ACES.csp\"\n ),\n )\n LUT_1_t = read_LUT_Cinespace(\n os.path.join(\n self._temporary_directory, \"ACES_Proxy_10_to_ACES.csp\"\n )\n )\n self.assertEqual(LUT_1_r, LUT_1_t)\n self.assertEqual(LUT_1_r, LUT_1_t)\n\n LUT_2_r = read_LUT_Cinespace(os.path.join(ROOT_LUTS, \"Demo.csp\"))\n write_LUT_Cinespace(\n LUT_2_r, os.path.join(self._temporary_directory, \"Demo.csp\")\n )\n LUT_2_t = read_LUT_Cinespace(\n os.path.join(self._temporary_directory, \"Demo.csp\")\n )\n self.assertEqual(LUT_2_r, LUT_2_t)\n self.assertListEqual(LUT_2_r.comments, LUT_2_t.comments)\n\n LUT_3_r = read_LUT_Cinespace(\n os.path.join(ROOT_LUTS, \"Three_Dimensional_Table.csp\")\n )\n write_LUT_Cinespace(\n LUT_3_r,\n os.path.join(\n self._temporary_directory, \"Three_Dimensional_Table.csp\"\n ),\n )\n LUT_3_t = read_LUT_Cinespace(\n os.path.join(\n self._temporary_directory, \"Three_Dimensional_Table.csp\"\n )\n )\n self.assertEqual(LUT_3_r, LUT_3_t)\n\n domain = tstack(\n (\n np.array([0.0, 0.1, 0.2, 0.4, 0.8, 1.2]),\n np.array([-0.1, 0.5, 1.0, np.nan, np.nan, np.nan]),\n np.array([-1.0, -0.5, 0.0, 0.5, 1.0, np.nan]),\n )\n )\n LUT_4_t = LUT3x1D(\n domain=domain, table=domain * 2, name=\"Ragged Domain\"\n )\n write_LUT_Cinespace(\n LUT_4_t,\n os.path.join(self._temporary_directory, \"Ragged_Domain.csp\"),\n )\n LUT_4_r = read_LUT_Cinespace(\n os.path.join(ROOT_LUTS, \"Ragged_Domain.csp\")\n )\n np.testing.assert_array_almost_equal(LUT_4_t.domain, LUT_4_r.domain)\n np.testing.assert_array_almost_equal(\n LUT_4_t.table, LUT_4_r.table, decimal=6\n )\n\n LUT_5_r = read_LUT_Cinespace(\n os.path.join(ROOT_LUTS, \"Three_Dimensional_Table_With_Shaper.csp\")\n )\n LUT_5_r.sequence[0] = LUT_5_r.sequence[0].convert(\n LUT1D, force_conversion=True\n )\n write_LUT_Cinespace(\n LUT_5_r,\n os.path.join(\n self._temporary_directory,\n \"Three_Dimensional_Table_With_Shaper.csp\",\n ),\n )\n LUT_5_r = read_LUT_Cinespace(\n os.path.join(ROOT_LUTS, \"Three_Dimensional_Table_With_Shaper.csp\")\n )\n LUT_5_t = read_LUT_Cinespace(\n os.path.join(\n self._temporary_directory,\n \"Three_Dimensional_Table_With_Shaper.csp\",\n )\n )\n self.assertEqual(LUT_5_r, LUT_5_t)\n\n LUT_6_r = read_LUT_Cinespace(\n os.path.join(ROOT_LUTS, \"Three_Dimensional_Table_With_Shaper.csp\")\n )\n LUT_6_r.sequence[0] = LUT_6_r.sequence[0].convert(\n LUT3x1D, force_conversion=True\n )\n write_LUT_Cinespace(\n LUT_6_r,\n os.path.join(\n self._temporary_directory,\n \"Three_Dimensional_Table_With_Shaper.csp\",\n ),\n )\n LUT_6_r = read_LUT_Cinespace(\n os.path.join(ROOT_LUTS, \"Three_Dimensional_Table_With_Shaper.csp\")\n )\n LUT_6_t = read_LUT_Cinespace(\n os.path.join(\n self._temporary_directory,\n \"Three_Dimensional_Table_With_Shaper.csp\",\n )\n )\n self.assertEqual(LUT_6_r, LUT_6_t)\n\n LUT_7_r = read_LUT_Cinespace(\n os.path.join(ROOT_LUTS, \"ACES_Proxy_10_to_ACES.csp\")\n )\n write_LUT_Cinespace(\n LUT_7_r.convert(LUT1D, force_conversion=True),\n os.path.join(\n self._temporary_directory, \"ACES_Proxy_10_to_ACES.csp\"\n ),\n )\n LUT_7_t = read_LUT_Cinespace(\n os.path.join(\n self._temporary_directory, \"ACES_Proxy_10_to_ACES.csp\"\n )\n )\n self.assertEqual(LUT_7_r, LUT_7_t)\n\n def test_raise_exception_write_LUT_Cinespace(self):\n \"\"\"\n Test :func:`colour.io.luts.cinespace_csp.write_LUT_Cinespace`\n definition raised exception.\n \"\"\"\n\n self.assertRaises(TypeError, write_LUT_Cinespace, object(), \"\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"colour-science/colour","sub_path":"colour/io/luts/tests/test_cinespace_csp.py","file_name":"test_cinespace_csp.py","file_ext":"py","file_size_in_byte":10025,"program_lang":"python","lang":"en","doc_type":"code","stars":1843,"dataset":"github-code","pt":"83"} +{"seq_id":"34514582432","text":"# -*- coding: utf-8 -*-\n\n\nimport django.core.validators\nfrom django.db import migrations\nfrom django.db import models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = []\n\n operations = [\n migrations.CreateModel(\n name='RedisServer',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('label', models.CharField(blank=True, max_length=50, verbose_name='Label', null=True)),\n (\n 'hostname',\n models.CharField(\n verbose_name='Hostname', max_length=250, help_text='This can also be the absolute path to a redis socket'\n ),\n ),\n (\n 'port',\n models.IntegerField(\n default=6379,\n validators=[django.core.validators.MaxValueValidator(65535), django.core.validators.MinValueValidator(1)],\n verbose_name='Port',\n blank=True,\n null=True,\n ),\n ),\n ('password', models.CharField(blank=True, max_length=250, verbose_name='Password', null=True)),\n (\n 'sampling_threshold',\n models.IntegerField(\n default=1000,\n verbose_name='Sampling threshold',\n help_text='Number of keys after which only a sample (of random keys) is shown on the inspect page.',\n ),\n ),\n (\n 'sampling_size',\n models.IntegerField(\n default=200,\n verbose_name='Sampling size',\n help_text='Number of random keys shown when sampling is used. Note that each key translates to a RANDOMKEY call'\n ' in redis.',\n ),\n ),\n ],\n options={\n 'verbose_name_plural': 'Redis Servers',\n 'verbose_name': 'Redis Server',\n 'permissions': (('can_inspect', 'Can inspect redis servers'),),\n },\n bases=(models.Model,),\n ),\n migrations.AlterUniqueTogether(\n name='redisserver',\n unique_together=set([('hostname', 'port')]),\n ),\n ]\n","repo_name":"ionelmc/django-redisboard","sub_path":"src/redisboard/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","stars":266,"dataset":"github-code","pt":"83"} +{"seq_id":"10107389293","text":"\"\"\" Wrapper around wtforms_alchemy and flask_wtf. \"\"\"\nfrom flask import abort, redirect, render_template, request, url_for\nfrom flask_security import current_user\nfrom flask_wtf import FlaskForm as Form\nfrom sqlalchemy.exc import SQLAlchemyError\nfrom wtforms_alchemy import model_form_factory, ClassMap, FormGenerator\nfrom wtforms import fields, widgets\nfrom barrel import db\n\n########################################\n\n\nclass MoneyWidget(widgets.Input):\n \"\"\" Widget to show money \"\"\"\n currency = '€'\n\n def __init__(self):\n self.input_type = 'number'\n\n def __call__(self, field, **kwargs):\n kwargs.setdefault('step', 0.01)\n return '
%s%s
'\\\n % (self.currency, super(MoneyWidget, self).__call__(field, **kwargs))\n\n\nclass MoneyField(fields.Field):\n \"\"\" Form field to show money. \"\"\"\n widget = MoneyWidget\n\n########################################\n\n\nclass PercentageWidget(widgets.Input):\n \"\"\" Widget to show percentages \"\"\"\n\n def __init__(self):\n self.input_type = 'number'\n\n def __call__(self, field, **kwargs):\n kwargs.setdefault('step', 0.01)\n return ('
%s%%
'\n % (super(PercentageWidget, self).__call__(field, **kwargs)))\n\n\nclass PercentageField(fields.Field):\n widget = PercentageWidget\n\n########################################\n\n\nclass FormModelMixin(object):\n \"\"\" Extend models with the get_form method below. \"\"\"\n\n @classmethod\n def get_form(cls):\n \"\"\" Get a basic form class for this model. \"\"\"\n class FormClass(BarrelForms.ModelForm):\n class Meta:\n model = cls\n return FormClass\n\n\nclass BarrelForms(object):\n \"\"\" A base form with additional methods. \"\"\"\n\n FormModelMixin = FormModelMixin\n\n ########################################\n\n def __init__(self, app, messages=None, lang='en',\n date_format='%Y-%m-%d', datetime_format='%Y-%m-%d %H:%M'):\n \"\"\"\n Args:\n app: Flask app\n messsages (dict of strings): texts for flash messages (\"updated\", \"created\", \"error\" and \"illegal\")\n lang (\"en\" or \"nl\"): selects a default set of messages\n date_format (string): format for parsing and showing dates\n datetime_format (string): format for parsing and showing datetimes\n \"\"\"\n self.app = app\n self.messages = messages or dict(\n nl=dict(\n updated='Gegevens aangepast',\n created='Nieuwe gegevens opgeslagen',\n error='%s (zie log voor details)',\n illegal='Gegevens onjuist'\n ),\n en=dict(\n updated='Data updated',\n created='New data stored',\n error='%s (see log for detaiils)',\n illegal='Data incorrect'\n )\n )[lang]\n\n class ModelForm(model_form_factory(Form,\n date_format=date_format,\n datetime_format=datetime_format)):\n\n # this gets money in the wtforms type map, but not on the screen\n # class Meta:\n # type_map = ClassMap({db.MoneyType: MoneyField})\n\n def __iter__(self):\n ''' make the 'only' attribute order-sensitive '''\n field_order = getattr(self, 'only', None)\n if field_order:\n temp_fields = []\n for name in field_order:\n if name == '*':\n temp_fields.extend([f for f in self._unbound_fields\n if f[0] not in field_order])\n else:\n temp_fields.append([f for f in self._unbound_fields if f[0] == name][0])\n self._unbound_fields = temp_fields\n return super(Form, self).__iter__()\n\n BarrelForms.ModelForm = ModelForm\n\n ########################################\n\n def handle_form(self, model_class, form_class=None, model=None, **kwargs):\n \"\"\" Validate form and create or update object.\n\n Automatically rolls back in case of errors.\n Errors are logged and sent to the user as flash messages.\n\n Args:\n model_class: class of model\n form_class: by default form class from model_class\n model: current model (leads to update)\n kwargs: will be forwarded to create/update\n \"\"\"\n form_class = form_class or model_class.get_form()\n form = form_class(request.form, obj=model)\n if form.validate_on_submit():\n self.app.logger.report('Submit: %s' % request.full_path)\n kwargs.update(form.data)\n try:\n if model:\n model.update(**kwargs)\n self.app.logger.flash(self.messages['updated'], 'success')\n else:\n # print kwargs\n model_class.create(**kwargs)\n self.app.logger.flash(self.messages['created'], 'success')\n except SQLAlchemyError as e:\n self.app.db.session.rollback()\n self.app.logger.flash(self.messages['error'] % e, 'error')\n else:\n if request.method == 'POST':\n self.app.logger.flash(self.messages['illegal'], 'error', form.errors)\n else:\n self.app.logger.report('Form: %s' % request.full_path)\n\n return form\n\n ########################################\n\n @staticmethod\n def breadcrums(obj):\n bc = []\n parent = obj.parent()\n if parent:\n bc = BarrelForms.breadcrums(parent)\n bc.append(dict(route=obj.__class__.__name__.lower(), id=obj.id, name=str(obj)))\n return bc\n\n ########################################\n\n def render_page(self, id, model_class, template='lists/base.jinja2',\n form_class=None, next_page=None, columns='', **kwargs):\n ''' Render a list with a form modal.\n\n Args:\n id: identity of model (if 0, a new model may be created)\n model_class: class of model\n template (string): template used for list\n form_class: class of form to be used (form class of model_class by default)\n next_page (url): page to go to after this page (current page by default)\n columns (list of strings): names of culumns to show in list\n kwargs: forwarded to :py:func:`handle_form`\n '''\n form_class = form_class or model_class.get_form()\n model = model_class.get(id) if id else None\n api = model_class.get_api()\n\n show_form = (id == 0)\n if api in request.form: # submit\n form = self.handle_form(model_class, form_class, model=model, **kwargs)\n show_form = bool(form.errors)\n elif model: # edit\n form = form_class(None, obj=model)\n show_form = True\n else: # new\n form = form_class()\n\n perm = current_user.get_permission(model)\n if self.app.config['DEBUG']:\n if perm == '-':\n abort(403)\n self.app.logger.report('%s template %s for %s (%s form, %s) with %s' %\n (request.method, template, api, 'show' if show_form else 'hide',\n perm, pformat(request.form)))\n if form.errors:\n self.app.logger.report('Form errors: %s' % form.errors)\n if not next_page or form.errors:\n return render_template(template,\n api=api,\n columns=columns.split(),\n form=form,\n model=model,\n readonly=perm == 'ro',\n show_form=show_form,\n **kwargs)\n else:\n return redirect(next_page)\n\n ########################################\n\n def child_form(self, id, model_class, parent_id, parent_class, **kwargs):\n ''' Render a form of a model with a child.\n\n Args:\n id: identity of model (if 0, a new child model is created for this parent)\n model_class: class of model\n parent_id: identity of parent model (if 0, a new model may be created)\n model_class: class of parent model\n kwargs: forwarded to :py:func:`render_page`\n '''\n api = model_class.get_api()\n parent_api = parent_class.get_api()\n if not id:\n kwargs['%s_id' % parent_api] = parent_id\n obj = model_class.create(naam=\"Nieuwe %s\" % api, **kwargs)\n return redirect(url_for(api, id=obj.id))\n else:\n obj = model_class.get(id)\n parent_id = getattr(obj, '%s_id' % parent_api)\n next_page = url_for(parent_api, id=parent_id) if request.method == 'POST' else None\n kwargs[parent_api] = parent_class.get(parent_id)\n return self.render_page(id, model_class,\n template='%s.jinja2' % api,\n next_page=next_page,\n **kwargs)\n\n########################################\n\n\ndef enable(app, **kwargs):\n \"\"\" Enable this module.\n\n Available as app.forms\n\n Args:\n app: Flask app\n kwargs: forwarded to BarrelForms\n Returns:\n BarrelForms object\n \"\"\"\n app.logger.info('Enabling forms')\n app.forms = BarrelForms(app, **kwargs)\n return app.forms\n\n########################################\n","repo_name":"otech-nl/barrel","sub_path":"barrel/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":9884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"39969568868","text":"\"\"\"added vendor code\n\nRevision ID: e55fdce10f9b\nRevises: d1a586c540d3\nCreate Date: 2022-07-07 19:45:19.349781\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'e55fdce10f9b'\ndown_revision = 'd1a586c540d3'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('product', schema=None) as batch_op:\n batch_op.add_column(sa.Column('vendor_code', sa.BigInteger(), nullable=False))\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('product', schema=None) as batch_op:\n batch_op.drop_column('vendor_code')\n\n # ### end Alembic commands ###\n","repo_name":"wparty/online-shop","sub_path":"server/migrations/versions/e55fdce10f9b_added_vendor_code.py","file_name":"e55fdce10f9b_added_vendor_code.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"18673186041","text":"'''\n1. Sorteie 10 inteiros entre 1 e 100 para uma lista e descubra o maior e o menor valor, sem usar \nas funções max e min.\n'''\nimport random\n\nlista = []\nc = 1\nmaior = 1\nmenor = 100\nwhile c <= 10:\n x = random.randint(1,100)\n lista.append(x)\n if x >= maior:\n maior = x\n if x <= menor:\n menor = x\n c += 1\n\nprint('Na lista %s o maior nº: %d menor nº: %d' %(lista,maior,menor))\n","repo_name":"akira2nd/CODES","sub_path":"Algoritmos e Lógica de Programação/lista 4 akira/questao01.py","file_name":"questao01.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"36588072087","text":"'''\nタグを検索して表示するプログラム\nこのプログラムではtitle, h2, liのタグを検索して表示する\n'''\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n# webページの取得と解析\nload_url = \"https://www.ymori.com/books/python2nen/test1.html\"\nhtml = requests.get(load_url)\nsoup = BeautifulSoup(html.content, \"html.parser\")\n\n# title,h2,liタグを検索して表示する\nprint(soup.find(\"title\"))\nprint(soup.find(\"h2\"))\nprint(soup.find(\"li\"))\n\n","repo_name":"takatoshi0905/Python2nen_study","sub_path":"ore_chap02/02_02_BS2.py","file_name":"02_02_BS2.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"36510710379","text":"import matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom matplotlib.patches import Rectangle\nimport requests\nimport os\nimport uuid\nfrom PIL import Image\n\n\ndef bounding_box(img: Image, bboxes: list):\n (x, y) = img.size\n for bbox in bboxes:\n [ymin, xmin, ymax, xmax] = [\n y * bbox[0],\n x * bbox[1],\n y * bbox[2],\n x * bbox[3]\n ]\n\n plt.imshow(img)\n rect = Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, linewidth=1, edgecolor='r', facecolor='none')\n plt.gca().add_patch(rect)\n plt.show()\n\n\ndef im_get(url):\n resp = requests.get(url)\n temp_file_name = os.path.join(\"/zeppelin/data/\", str(uuid.uuid4()))\n with open(temp_file_name, 'wb') as fptr:\n fptr.write(resp.content)\n\n img = Image.open(temp_file_name)\n os.remove(temp_file_name)\n return img\n","repo_name":"wylswz/twitter_pride_vanity","sub_path":"algorithms/archive/APISample/Visualization.py","file_name":"Visualization.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"38264431668","text":"# 调用__init__模块\nfrom routes import *\n# 调用User 数据模型\nfrom models.user import User\n\n# 用蓝图把user 变成一个模块\nmain = Blueprint('user', __name__)\n\n\n# 把index()视图函数注册为user模块的路由'/'\n@main.route('/')\ndef index():\n u = current_user()\n if u is not None:\n return redirect('/weibo')\n return render_template('user_login.html')\n # 如果用户存在(已登陆或注册),重定向到'/weibo'路由所在视图函数\n # 如果用户不存在,则打开注册登陆界面\n\n\n# 把register()视图函数注册为user模块的路由'/user/register'\n@main.route('/user/register', methods=['POST'])\ndef register():\n form = request.form\n u = User(form)\n if u.valid_register():\n u.save()\n print('注册成功')\n session['user_id'] = u.id\n return redirect('/weibo')\n else:\n return redirect(url_for('.index'))\n # 如果注册验证成功,把用户的id 存进session\n # 如果验证不成功,则重定向到注册登录界面\n\n\n# 把switch()视图函数注册为user模块的路由'/user/switch'\n@main.route('/user/switch')\ndef switch():\n return render_template('user_register.html')\n\n\n# 把login()视图函数注册为user模块的路由'/user/login'\n@main.route('/user/login', methods=['POST'])\ndef login():\n form = request.form\n u = User(form)\n user = User.query.filter_by(username=u.username).first()\n if user is not None and user.valid_login(u):\n print('登录成功')\n session['user_id'] = user.id\n return redirect('/weibo')\n else:\n print('登录失败')\n return redirect(url_for('.index'))\n # 如果用户输入的用户名和密码在数据库中存在且验证正确\n # 就把用户的id存进session中,切换到weibo页面\n # 否则重定向到注册登录页面\n\n\n# 把update()视图函数注册为user模块的路由'/user/update'\n@main.route('/user/update', methods=['POST'])\ndef update():\n u = current_user()\n password = request.form.get('password', '888')\n if u.change_password(password):\n print('修改成功')\n else:\n print('用户密码修改失败')\n return redirect('/user/profile')\n # 如果修改密码验证失败,重定向到用户个人界面\n\n\n@main.route('/user/profile', methods=['GET'])\ndef profile():\n u = current_user()\n if u is not None:\n return render_template('profile.html', user=u)\n else:\n abort(400)\n\n\n@main.route('/user/logout')\ndef logout():\n u = current_user()\n session.pop('user_id', None)\n return redirect(url_for('.index'))\n\n\n\n","repo_name":"ChandlerTJW/dove","sub_path":"routes/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22897064368","text":"\"\"\"\nAll Paths(i.e, also called as End-Points) are defined here.\n\"\"\"\nimport json\nimport requests\nfrom model import models\nfrom controller import crud\nfrom fastapi import Depends\nfrom connect import cnf_dict\nfrom sqlalchemy.orm import Session\nfrom schema_mapping import schemas\nfrom authentication.ouath2 import get_current_user\nfrom fastapi import APIRouter, HTTPException, status\nfrom infrastructure.database import get_db_connection, session\n\n\nengine = get_db_connection().engine # created object/instance\n\nrouter = APIRouter(\n prefix='/do-select',\n tags=['Do-Select']\n)\n\nmodels.Base.metadata.create_all(bind=engine)\n\n\n# Dependency\ndef get_db():\n try:\n yield session\n finally:\n session.close()\n\n\n@router.get('/')\ndef read():\n do_select_url = cnf_dict['DO_SELECT']['do_select_url']\n header = {\n 'DoSelect-Api-Key': cnf_dict['DO_SELECT']['do_select_api_key'],\n 'DoSelect-Api-Secret': cnf_dict['DO_SELECT']['do_select_api_secret']\n }\n data = requests.get(url=do_select_url, headers=header)\n if not data:\n return {'response': 'No Data Found'}\n return data.json()\n\n\n@router.post(\"/assessment\", response_model=schemas.Assessment)\ndef create_assessment(\n assessment: schemas.AssessmentCreate,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n return crud.create_assessment(assessment, db=db)\n\n\n@router.get(\"/assessment/{slug_id}\", response_model=schemas.Assessment)\ndef read_slug(\n slug_id: int,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n data = db.query(models.Assessment).filter(models.Assessment.id == slug_id).first()\n if not data:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='Slug Id Not Found')\n return data\n\n\n@router.post(\"/tenant\", response_model=schemas.Tenant)\ndef create_tenant(\n tenant: schemas.Tenant,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n return crud.create_tenant(tenant, db)\n\n\n@router.get(\"/tenant/{tenant_name}\", response_model=schemas.Tenant)\ndef read_tenant(\n tenant_name: str,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n data = db.query(models.Tenant).filter(models.Tenant.tenant_name == tenant_name).first()\n if not data:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='Tenant Not Found')\n return data\n\n\n@router.post(\"/skill_master\", response_model=schemas.SkillMaster)\ndef create_skill(\n skill: schemas.SkillMaster,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n return crud.create_skill(skill, db)\n\n\n@router.get(\"/skill_master/{skill_name}\", response_model=schemas.SkillMaster)\ndef read_skill(\n skill_name: str,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n data = db.query(models.SkillMaster).filter(models.SkillMaster.skill_name == skill_name).first()\n if not data:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='Skill Not Found')\n return data\n\n\n@router.post(\"/partner_type\", response_model=schemas.Partner)\ndef create_partner(\n partner: schemas.Partner,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n return crud.create_partner(partner, db)\n\n\n@router.get(\"/partner_type/{partner_name}\", response_model=schemas.Partner)\ndef read_partner(\n partner_name: str,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n data = db.query(models.PartnerType).filter(models.PartnerType.partner_name == partner_name).first()\n if not data:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='Skill Not Found')\n return data\n\n\n@router.post(\"/application\", response_model=schemas.Application)\ndef create_application(\n application: schemas.Application,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n return crud.create_application(application, db)\n\n\n@router.get(\"/application/{application_name}\", response_model=schemas.Application)\ndef read_application(\n application_name: str,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n data = db.query(\n models.ApplicationMaster).filter(\n models.ApplicationMaster.application_name == application_name\n ).first()\n if not data:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='Skill Not Found')\n return data\n\n\n@router.put(\"/application/{application_name}\", response_model=schemas.Application)\ndef update_application(\n application_name: str,\n application: schemas.Application,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n return crud.update_application(application_name, application, db)\n\n\n@router.delete(\"/application/{application_name}\", response_model=schemas.Application)\ndef delete_application(\n application_name: str,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n data = db.query(\n models.ApplicationMaster).filter(\n models.ApplicationMaster.application_name == application_name\n ).first()\n if not data:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='Application Name Not Found!')\n return crud.delete_application(application_name, db)\n\n\n@router.post(\"/category\", response_model=schemas.CategoryMaster)\ndef create_category(\n category: schemas.CategoryMaster,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n return crud.create_category(category, db)\n\n\n@router.get(\"/category/{category_type}\", response_model=schemas.CategoryMaster)\ndef read_category(\n category_type: str,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n data = db.query(\n models.CategoryMaster).filter(\n models.CategoryMaster.category_type == category_type\n ).first()\n if not data:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='Category Type Not Found!')\n return data\n\n\n@router.put(\"/category/{category_type}\", response_model=schemas.CategoryMaster)\ndef update_category(\n category_type: str,\n category: schemas.CategoryMaster,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n return crud.update_category(category_type, category, db)\n\n\n@router.delete(\"/category/{category_type}\", response_model=schemas.CategoryMaster)\ndef delete_category(\n category_type: str,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n data = db.query(\n models.CategoryMaster).filter(\n models.CategoryMaster.category_type == category_type\n ).first()\n if not data:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='Category Type Not Found!')\n return crud.delete_category(category_type, db)\n\n\n@router.post(\"/assessment_partner\", response_model=schemas.AssessmentPartner)\ndef create_assessment_partner(\n assessment_partner: schemas.AssessmentPartner,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n return crud.create_assessment_partner(assessment_partner, db)\n\n\n@router.get(\"/assessment_partner/{partner_name}\", response_model=schemas.AssessmentPartner)\ndef read_assessment_partner(\n partner_name: str,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n data = db.query(\n models.AssessmentPartner).filter(\n models.AssessmentPartner.partner_name == partner_name\n ).first()\n if not data:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='Category Type Not Found!')\n return data\n\n\n@router.put(\"/assessment_partner/{partner_name}\", response_model=schemas.AssessmentPartner)\ndef update_assessment_partner(\n partner_name: str,\n partner: schemas.AssessmentPartner,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n return crud.update_assessment_partner(partner_name, partner, db)\n\n\n@router.delete(\"/assessment_partner/{partner_name}\", response_model=schemas.AssessmentPartner)\ndef delete_assessment_partner(\n partner_name: str,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n data = db.query(\n models.AssessmentPartner).filter(\n models.AssessmentPartner.partner_name == partner_name\n ).first()\n if not data:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='Category Type Not Found!')\n return crud.delete_assessment_partner(partner_name, db)\n\n\n@router.post(\"/subscription\", response_model=schemas.SubscriptionMaster)\ndef create_subscription(\n subscription: schemas.SubscriptionMaster,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n return crud.create_subscription(subscription, db)\n\n\n@router.get(\"/subscription/{subscription_id}\", response_model=schemas.SubscriptionMaster)\ndef read_assessment_partner(\n subscription_id: int,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n data = db.query(\n models.SubscriptionMaster).filter(\n models.SubscriptionMaster.id == subscription_id\n ).first()\n if not data:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='Subscription ID Not Found!')\n return data\n\n\n@router.post(\"/assessment_mapping\", response_model=schemas.AssessmentMapping)\ndef create_mapping(\n mapping: schemas.AssessmentMapping,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n return crud.create_mapping(mapping, db)\n\n\n@router.get(\"/assessment_mapping\", response_model=schemas.AssessmentMapping)\ndef read_assessment_partner(\n mapping_id: int,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n data = db.query(\n models.AssessmentMapping).filter(\n models.AssessmentMapping.id == mapping_id\n ).first()\n if not data:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='Assessment Mapping Not Found!')\n return data\n\n\n@router.get(\"/assessment_integration/{client_name}/{application_name}/{skill_name}/{category_type}\")\ndef read_assessment_integration(\n client_name: str,\n application_name: str,\n skill_name: str,\n category_type: str,\n db: Session = Depends(get_db),\n current_user: schemas.SignUp = Depends(get_current_user)): # This is line will allow user to use credentials.\n\n client = db.query(models.Tenant).filter(models.Tenant.tenant_name == client_name).first()\n if not client:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='Client Not Found!')\n\n if client.status:\n subscription = db.query(models.SubscriptionMaster).filter(\n models.SubscriptionMaster.tenant_id == client.id,\n models.SubscriptionMaster.subscription is True\n )\n if not subscription:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='Subscription Not Found for this Client!')\n\n application = db.query(\n models.ApplicationMaster).filter(models.ApplicationMaster.application_name == application_name).first()\n if not application:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail='Assessment Not Mapped')\n application_id = application.id\n\n skill = db.query(models.SkillMaster).filter(models.SkillMaster.skill_name == skill_name).first()\n if not skill:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='Skill Not Found!')\n\n category = db.query(models.CategoryMaster).filter(models.CategoryMaster.category_type == category_type).first()\n if not category:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='Category Not Found!')\n\n application_mapping = db.query(models.AssessmentMapping). \\\n filter(models.AssessmentMapping.skill_master_id == skill.id,\n models.AssessmentMapping.assessment_partner_id == client.id,\n models.AssessmentMapping.category_master_id == category.id).all()\n\n if not application_mapping:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='Skill Not Found!')\n final_dict = {}\n result = []\n for app_details in application_mapping:\n partner_subscription = db.query(models.AssessmentPartner). \\\n filter(models.AssessmentPartner.id == app_details.assessment_partner_id).first()\n if not partner_subscription:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='Assessment Partner Not Found!')\n\n assessment_partner_name = partner_subscription.partner_name\n test_url = app_details.test_url\n test_slug = app_details.test_id\n\n do_select_api_key = cnf_dict['DO_SELECT']['do_select_api_key']\n do_select_api_secret = cnf_dict['DO_SELECT']['do_select_api_secret']\n do_select_url = cnf_dict['DO_SELECT']['do_select_url'] + '/' + test_slug\n header = {\n 'DoSelect-Api-Key': do_select_api_key,\n 'DoSelect-Api-Secret': do_select_api_secret\n }\n response = requests.get(do_select_url, headers=header)\n result.append(json.loads(response.text))\n\n final_dict['total_candidates'] = result[0].get('total_candidates', 0)\n final_dict['duration'] = result[0].get('duration', 0)\n final_dict['public_access_url'] = result[0].get('public_access_url', 0)\n final_dict['test_url'] = test_url\n final_dict['test_slug'] = test_slug\n final_dict['assessment_partner_name'] = assessment_partner_name\n return final_dict\n\n\n","repo_name":"Kaoushikkumarr/fast-api","sub_path":"routes/do_select.py","file_name":"do_select.py","file_ext":"py","file_size_in_byte":15712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"2381144530","text":"import pandas as pd\nfrom pandas import *\n\n\ndef DfInitTable():\n dfInitTable = DataFrame(pd.read_csv(\"./test.csv\"))\n return dfInitTable\n\ndef DfNameAndAlias(dfInitArg):\n dfNameAndAlias = dfInitArg [[\"NAME\",\"All_Alias\"]]\n return dfNameAndAlias\ndef Result(dfInitTalbe, dfNameAndAlias): #进行合并去重操作\n stateDic = {}\n dfColumns = dfInitTable.columns.values\n dfNewTable = DataFrame(columns=[dfColumns[i] for i in range(len(dfColumns))])\n try:\n for i in range(len(dfInitTable)):\n if not (\"ixState%d\"%i in stateDic.keys() and stateDic[\"ixState%d\"%i] == \"true\"):\n stateDic[\"ixState%d\"%i] = \"true\"\n dfNamei = dfNameAndAlias.loc[i, \"NAME\"]\n dfAliasi = dfNameAndAlias.loc[i, \"All_Alias\"]\n dfAliasiSet = set(dfAliasi.split(\"###\"))\n replIndexi = dfInitTable.loc[i]\n falgMerge = False\n for j in range (i+1,len(dfInitTable)):\n dfNamej = dfNameAndAlias.loc[j, \"NAME\"]\n dfAliasj = dfNameAndAlias.loc[j, \"All_Alias\"]\n dfAliasjSet = set(dfAliasj.split(\"###\"))\n boolState = \"ixState%d\"%j not in stateDic\n if boolState:\n stateDic[\"ixState%d\"%j] = \"false\"\n else:\n pass \n if (stateDic[\"ixState%d\"%j] == \"false\") and (dfNamei == dfNamej or len(dfAliasiSet & dfAliasjSet) != 0):\n stateDic[\"ixState%d\"%j] = \"true\"\n falgMerge = True\n seriesMerge = mergeThreeCol(dfInitTable, i, j)\n dfNewTable.loc[i] = seriesMerge\n else:\n break\n if not falgMerge:\n dfNewTable.loc[i] = replIndexi\n else:\n continue \n except e:\n print (\"错误信息%s\"%e)\n return dfNewTable\n \ndef mergeThreeCol(dfInitTable, i, j): #合并要求的三列数据\n setAliasi = set(dfInitTable.loc[i, \"All_Alias\"].split(\"###\"))\n setAliasj = set(dfInitTable.loc[j, \"All_Alias\"].split(\"###\"))\n setCodei = set(dfInitTable.loc[i, \"CODE\"].split(\"###\"))\n setCodej = set(dfInitTable.loc[j, \"CODE\"].split(\"###\"))\n setSourcei = set(dfInitTable.loc[i, \"SOURCE\"].split(\"###\"))\n setSourcej = set(dfInitTable.loc[j, \"SOURCE\"].split(\"###\"))\n strAliasij = \"###\".join(setAliasi | setAliasj)\n strCodeij = \"###\".join(setCodei | setCodej)\n strSourceij = \"###\".join(setSourcei | setSourcej)\n strSourceij = sortedStrSource(strSourceij)\n replIndexi = dfInitTable.loc[i]\n replIndexi = replIndexi.replace([replIndexi[\"All_Alias\"],replIndexi[\"CODE\"],replIndexi[\"SOURCE\"]],\n [strAliasij,strCodeij,strSourceij])\n dfInitTable.loc[i] = replIndexi\n return replIndexi\n \ndef sortedStrSource(strSourceij):\n listSorted = []\n if \"###\" in strSourceij:\n listStrSourceij = strSourceij.split(\"###\")\n if \"卫计委\" in listStrSourceij:\n listSorted.append(\"卫计委\")\n if \"MOH\" in listStrSourceij:\n listSorted.append(\"MOH\") \n if \"CPA\" in listStrSourceij:\n listSorted.append(\"CPA\") \n if \"MUNDI\" in listStrSourceij:\n listSorted.append(\"MUNDI\") \n if \"MERCK\" in listStrSourceij:\n listSorted.append(\"MERCK\") \n if \"挂号\" in listStrSourceij:\n listSorted.append(\"挂号\") \n if \"PFIZER\" in listStrSourceij:\n listSorted.append(\"PFIZER\") \n elif \"Haodf\" in listStrSourceij:\n listSorted.append(\"Haodf\") \n return \"###\".join(listSorted)\n else:\n return strSourceij\n \n\n\ndfInitTable = DfInitTable() #加载初始DataFrame 使用EditPlus保存编码格式为UTF-8\nprint (dfInitTable)\ndfNameAndAlias = DfNameAndAlias(dfInitTable) #由Name和Alias组成的DataFrame\ndfNewFrameTable = Result(dfInitTable,dfNameAndAlias) #得到一个新的dataframe\ndfNewFrameTable.to_csv('./result.csv',index=False) #输出结果会在result.csv里面\n","repo_name":"west789/clean-data-by-pandas","sub_path":"Test1.py","file_name":"Test1.py","file_ext":"py","file_size_in_byte":4136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"18183966481","text":"import copy,pickle,json\nimport sys\nfrom collections import namedtuple\n\n\n'''\n\n序列化与反序列化(json、pickle、shelve)\n\n 我们把变量从内存中变成可存储或传输的过程称之为序列化,在Python中叫pickling\n 反过来,把变量内容从序列化的对象重新读到内存里称之为反序列化,即unpickling\n \n https://www.cnblogs.com/gcgc/p/10973418.html\n \n'''\n\ndir = sys.path[0]\n\noutput_file1 = dir+'/pickle.txt'\noutput_file2 = dir+'/data.json'\n\n\nprint(\"===== pickle 序列化&反序列化 =====\")\n\n\n# 对象序列化到文件对象,就是存入文件\ndata1 = {'a': [1, 2.0, 3, 4+6j],\n 'b': ('string', u'Unicode string'),\n 'c': None}\ndata2 = [1, 2, 3]\noutput = open(output_file1, 'wb')\npickle.dump(data1, output)\npickle.dump(data2, output)\noutput.close()\n\n\n# 对象反序列化,从文件读取数据\npkl_file = open(output_file1, 'rb')\ndata1 = pickle.load(pkl_file) # 一次只能读取一行\nprint(data1)\npkl_file.close()\n\n# 读取所有内容\ndef func():\n with open(output_file1, 'rb') as input_file: # 自动关闭文件流\n try:\n while True:\n data = pickle.load(input_file)\n print(data)\n except EOFError:\n input_file.close()\n\nfunc()\n\npython_data = {\"a\",\"b\",\"c\"}\n\npickle_data = b'\\x80\\x03cbuiltins\\nset\\nq\\x00]q\\x01(X\\x01\\x00\\x00\\x00cq\\x02X\\x01\\x00\\x00\\x00aq\\x03X\\x01\\x00\\x00\\x00bq\\x04e\\x85q\\x05Rq\\x06.'\n\nprint(pickle.dumps(python_data)) # python数据序列化成二进制格式\n\nprint(pickle.loads(pickle_data)) # 二进制格式反序列化成python数据\n\n\nprint(\"===== json 序列化&反序列化 =====\")\n\n\npython_data = {'a':'str', 'c': True, 'e': 10, 'b': 11.1, 'd': None, 'f': [1, 2, 3], 'g':(4, 5, 6)}\n\njson_data = '{\"default\":{\"a\":\"test\"}, \"c\": true, \"b\": 11.1, \"e\": 10, \"d\": null, \"g\": [4, 5, 6], \"f\": [1, 2, 3]}'\n\nprint(json.dumps(python_data,sort_keys=True)) # python数据转json格式\n\nprint(json.loads(json_data)) # json数据转python格式\n\n\nwith open(output_file2,'w') as output_file:\n json.dump(python_data,output_file) # 序列化python数据到json文件\n\nwith open(output_file2,'r') as input_file:\n data = json.load(input_file) # 反序列化json文件到python数据\n print(data,type(data))\n\n\n'重点注意:json 序列化&反序列实例对象'\n\n'序列化:Python对象 --> dict --> JSON object'\n\n\nclass Teacher:\n\n def __init__(self):\n self.name = \"\"\n self.age = 0\n self.sex = \"\"\n self.score = 0\n\n\nt1 = Teacher()\nt1.name= \"teacher1\"\nt1.age= 30\nt1.sex = \"男\"\n\nt2 = copy.deepcopy(t1)\nt2.name = \"teacher2\"\nt2.sex = \"女\"\nt2.score = 90\nt2.temp = \"test\"\n\n\n\ntemp1 = json.dumps(t1,ensure_ascii=False,default=lambda obj:obj.__dict__) # 传入实例对象属性 t1.__dict__\n\ntemp2 = json.dumps(t2,ensure_ascii=False,default=lambda obj:obj.__dict__)\n\nprint(temp1)\nprint(temp2)\n\n\n'反序列化的过程是“JSON object -> dict --> Python对象'\n\n\njson_data = '{\"name\":\"test\",\"age\":20,\"sex\":\"男\",\"grade\":[1,2,3],\"score\":{\"math\":90,\"computer\":95},\"books\":[{\"name\":\"math\",\"type\":\"study\"},{\"name\":\"The Little Prince\",\"type\":\"literature\"}]}'\n\nprint(\"====== 使用元祖解析json数据 =====\")\n\nBook = namedtuple('Book', ['name', 'type'])\nScore = namedtuple('Score', ['math', 'computer'])\nUser = namedtuple('User', ['name', 'age', 'sex','grade','score','books'])\n\ndata = json.loads(json_data)\n\nu = User(**data) # 必须保证json数据中的key值与对象属性名个数、名称一致\ns = Score(**u.score)\nprint(u)\nprint(s)\nbook_list = u.books\nfor book in book_list:\n b = Book(**book)\n print(b)\n\n\nprint(\"====== 使用字典解析json数据 =====\")\n\n\nclass Dict:\n\n def __init__(self, data):\n self.__dict__ = data\n\nd = json.loads(json_data,object_hook=Dict)\n\nprint(d)\nprint(d.name)\nprint(d.score)\nprint(d.books)\n","repo_name":"liuk2008/PythonDemo","sub_path":"python/13、序列化和反序列化.py","file_name":"13、序列化和反序列化.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"80"} +{"seq_id":"40706502487","text":"import os\nimport torch\nfrom torch.utils.ffi import create_extension\n\n\nsources = ['src/ext_lib.c']\nheaders = ['src/ext_lib.h']\ndefines = []\nwith_cuda = False\n\nif torch.cuda.is_available():\n print('Including CUDA code.')\n sources += ['src/ext_lib_cuda.c']\n headers += ['src/ext_lib_cuda.h']\n defines += [('WITH_CUDA', None)]\n with_cuda = True\n\nffi = create_extension(\n '_ext.ext_lib',\n headers=headers,\n sources=sources,\n define_macros=defines,\n relative_to=__file__,\n with_cuda=with_cuda\n)\n\nif __name__ == '__main__':\n ffi.build()\n","repo_name":"DingKe/pytorch_workplace","sub_path":"cffi/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":174,"dataset":"github-code","pt":"80"} +{"seq_id":"34866622774","text":"#written for python 3\n#0-1 First version\n#0-1b New handshake listener system\n#display version\n\nimport tkinter as tk #user interface library\nimport os \nimport serial #pyserial library\nfrom psigraph import * #change for release\n\nclass trimmedUI:\n #You can use this if you prefer a minimal UI.\n def __init__(self):\n self.logType=1\n self.master=tk.Tk()\n self.frame=tk.Frame(self.master)\n self.frame.pack(expand=1,fill=tk.BOTH)\n def log(self,s,sw=0):\n #sw=0 for in, 1 for out, 2 for mode indicator\n if self.logType==1:\n print(s)\n \ndef alphahexToByte(s):\n #returns an integer equal to the 2-digit alphahex string (a=0, p=f)\n return ((ord(s[0])-97)*16+(ord(s[1])-97))\n\ndef byteToAlphahex(b):\n #returns a 2-digit alphahex string from the byte value b.\n return chr((b&15)+97)+chr((b&240)//16+97)\ndef alphahexToNumber(s,n):\n #returns the unsigned integer conversion from an n digit alphahex string s\n out=0\n for i in range(0,n-1):\n out+=(ord(s[i])-97)*16**(n-i-1)\n return out\ndef pad(s,n):\n #appends zeros the front of a string until it is n characters long.\n out=s\n while len(out)self.measureInterval/4:\n self.openSerial()\n self.measureCount=0\n if self.mode==1:\n #mode 1 is for when the serial port is connected but a handshake\n #is not established. if it stays in this state indefinitely then\n #possibly the arduino isn't responding correctly.\n self.gui.log(\"Serial open, awaiting handshake\",2)\n if self.measureCount>self.measureInterval/4:\n self.feelSerial()\n self.measureCount=0\n if self.mode==2:\n #mode 2 is the main mode for communication back and forth.\n self.gui.log(\"Connected to board, ID \"+self.instrumentVersion,2) #check whether you are passing a function handle or a function's response. Brackets matter.\n self.main()\n self.measureCount+=1\n self.gui.master.after(self.updatems,self.loop) #return control to gui.\n def main(self):\n #The main function contains program actions, like measurements.\n #The getSerial function is used throughout to sniff for packets.\n #it will return a string containing packet contents, if any\n #are delivered. Packets have a maximum length of bufferLength.\n if self.firstFlag==1: #things to do on the first call of main()\n self.firstFlag=0\n #self.setM41T83(13,11,10,9,8,255,50)\n if self.measureCount>self.measureInterval:\n #print(\"Starting measurement\")\n self.measureDict[self.measureID]()\n self.measureID+=1\n if self.measureID>=len(self.measureDict):\n self.measureID=0\n self.measureCount=0\n self.getSerial()\n self.processSerial() #call every tick\n def processSerial(self):\n while len(self.received)>0:\n s=self.getReceived()\n if s==\"\":\n return\n if len(s)<2:\n print(\"Did not process malformed packet\")\n tag=s[0:2]\n packet=s[2:]\n print(\"Processing packet \"+packet+\" with tag \"+tag)\n #s should be a trimmed serial string\n try:\n self.packetDict[tag](packet)\n return\n except KeyError:\n print(\"Tag not recognized, packet discarded\")\n return\n def processCC2D25(self,packet):\n b0=alphahexToByte(packet[-4:-2])\n b1=alphahexToByte(packet[-2:])\n tenperature=(b0*256+b1)/16384*165-40 #TENperature\n print(\"Temperature in degrees C \"+str(tenperature))\n return\n def processM41T83(self,packet):\n print(str(alphahexToByte(packet[0:2])))\n return\n def processMeasurement(self,packet):\n #print(alphahexToNumber(packet[0:3],3))\n v=[]\n for i in range(0,8):\n v.append(alphahexToNumber(packet[i*3:(i*3+3)],3)/204.8)\n try:\n #print(packet[0:3]+\",\"+packet[3:6]+\",\"+packet[6:9]+\",\"+packet[9:12]+\",\"+packet[12:15]+\",\"+packet[15:18]+\",\"+packet[18:21]+\",\"+packet[21:24])\n out=\"Measured V:\"\n out+=str(alphahexToNumber(packet[0:3],3)/204.8)+\", \"\n out+=str(alphahexToNumber(packet[3:6],3)/204.8)+\", \"\n out+=str(alphahexToNumber(packet[6:9],3)/204.8)+\", \"\n out+=str(alphahexToNumber(packet[9:12],3)/204.8)+\", \"\n out+=str(alphahexToNumber(packet[12:15],3)/204.8)+\", \"\n out+=str(alphahexToNumber(packet[15:18],3)/204.8)+\", \"\n out+=str(alphahexToNumber(packet[18:21],3)/204.8)+\", \"\n out+=str(alphahexToNumber(packet[21:24],3)/204.8)\n print(out)\n except:\n print(\"Error\")\n #handle the bar graph\n self.gui.bars.set_values(v)\n return\n def getReceived(self):\n if len(self.received)>0:\n s=self.received[0]\n if len(self.received)>1:\n self.received=self.received[1:]\n else:\n self.received=[]\n return s\n else:\n return \"\"\n def getCC2D25(self):\n self.putSerial(\"HT\")\n def setM41T83(self,YY,MM,DD,HH,MN,SS,MS):\n out=\"tS\"+chr(YY+20)+chr(MM+20)+chr(DD+20)+chr(HH+20)+chr(MN+20)+chr(SS+20)+chr(MS+20)\n self.putSerial(out)\n #print(out)\n def getM41T83(self):\n self.putSerial(\"tG\")\n def getMeasurement(self):\n self.putSerial(\"M0\")\n def openSerial(self):\n #This code scans com ports and opens serial connections. If you\n #are planning on multiple serial objects, this code will need revising\n #as it assumes there is only an arduino attached to the computer, an\n #assumption that will have to be fixed.\n \n #if you know the specific port of the arduino, set it here first.\n try:\n self.ch=serial.Serial(self.arduinoPort,9600,timeout=(self.updatesec)/100) #serial channel\n self.mode=1\n print(\"Opened connection to \"+self.arduinoPort)\n return\n except serial.serialutil.SerialException as err:\n self.mode=0\n i=0\n #windows code\n for i in range(0,256):\n try:\n self.ch=serial.Serial(\"COM\"+str(i),9600,timeout=(self.updatesec)) #serial channel\n self.mode=1\n print(\"Opened connection to COM\"+str(i))\n #self.feelSerial()\n return\n except serial.serialutil.SerialException as err:\n self.mode=0\n #linux code\n for i in range(0,255):\n try:\n self.ch=serial.Serial(\"/dev/ttyUSB\"+str(i),9600,timeout=(self.updatesec)) #serial channel\n self.mode=1\n print(\"Opened connection to /dev/ttyUSB\"+str(i))\n #self.feelSerial()\n return\n except serial.serialutil.SerialException as err:\n self.mode=0\n def feelSerial(self):\n #This code feels for a handshake.\n self.putSerial('VV')\n self.getSerial()\n out=self.getReceived()\n if out[0:2]=='VV':\n #retrieve board version\n self.instrumentVersion=out[2:]\n print(\"Handshake received from board \"+self.instrumentVersion)\n self.mode=2\n def closeSerial(self):\n #neatly closing the serial channel is important especially on windows,\n #in case of exceptions. I'm not sure that this code is unbugged.\n try:\n self.ch.close()\n print(\"closed connection\")\n self.mode=0\n except serial.serialutil.SerialException as err:\n self.mode=0\n print(\"Cannot close serial port, error: \"+format(err.strerror))\n except:\n print(\"Cannot close serial port.\")\n def putSerial(self,s):\n #This function should be called when sending serial commands!\n #it encapsulates the packets properly.\n out=s+str(chr(13))\n #s#=str(chr(2))+s+str(chr(13))\n try:\n self.ch.write(out.encode())\n except serial.serialutil.SerialException as err:\n self.mode=0\n print('connection terminated')\n self.gui.log('Serial out >'+out,1)\n def getSerial(self):\n #waits for a packeted (starts with chr(2), ends with chr(13)) to be\n #sent over serial. unpacketed data is added to self.buffer, completed\n #packets are added to the list self.received.\n snag=0\n begun=0\n try:\n while(snag==0):# and len(buffer)0:\n #the way python handles strings and characters is flat-out\n #fucking insane. Just so you know.\n c_buffer=self.ch.read(1)\n s_buffer=str(c_buffer)\n #if s_buffer[2:6]==\"\\\\x02\":\n begun=1\n if begun==1:\n if s_buffer[2:4]==\"\\\\r\":\n #self.buffer=self.buffer[1:]#snip\n print(\"packet received >\"+self.buffer)\n self.received.append(self.buffer)\n self.buffer=\"\"\n self.begun=0\n else:\n self.buffer=self.buffer+s_buffer[2:3]\n #if there's a char error (ie a non printed character), you'll just get a slash.\n else:\n snag=1\n except IOError:\n self.gui.log(\"Connection lost\",2)\n mode=0\n''' def getSerial(self):\n #hopefully just adds a line to received.\n s=self.ch.readline()\n if s!=\"\":\n self.received.append(s)\n #self.getSerial()'''\n\nclass App:\n def __init__(self):\n self.com=lairCom()\n self.com.closeSerial()\n\n#print(alphahexToNumber(\"aba\",3))\nrun=App()\nprint(\"Beendet\")\n","repo_name":"philspaceindustries/SOGS","sub_path":"python/lair0-2.py","file_name":"lair0-2.py","file_ext":"py","file_size_in_byte":11402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"8438510561","text":"import sys\nimport os\nimport numpy as np\nimport linecache\nimport csv\nimport json\nfrom matplotlib import pyplot as plt\ntry:\n import tikzplotlib\nexcept ImportError:\n print('Warning: No module tikzplotlib found. Not necessary on car but for development.')\n\n# custom modules\nvel_opt_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(vel_opt_path)\n\n\ndef read_ltpl_raw(csv_name: str) -> tuple:\n\n \"\"\"\n Python version: 3.5\n Created by: Thomas Herrmann (thomas.herrmann@tum.de)\n Created on: 01.02.2020\n\n Documentation: Read log data from TUM trajectory planner module\n\n Inputs:\n csv_name: Path and file name to log file\n\n Outputs:\n s_glob_arr: array filled with global s coordinate values [m]\n v_arr: array containing the driven velocity [m/s]\n ax_arr: array containing the driven longitudinal acceleration [m/s^2]\n ay_arr: array containing the driven lateral acceleration [m/s^2]\n \"\"\"\n\n # [m]\n s_glob_arr = []\n # [m/s]\n v_arr = []\n # [m/s^2]\n ax_arr = []\n # [rad/m]\n kappa_arr = []\n\n with open(csv_name) as csvfile:\n row_count = sum(1 for row in\n csv.reader(csvfile,\n delimiter=';',\n lineterminator='\\n'))\n\n for i in range(3, row_count):\n row_lc = linecache.getline(csv_name, i)\n row_lc = row_lc[:-1].rsplit(';')\n\n s_glob = json.loads(row_lc[1])\n s_glob_arr.append(s_glob)\n v = json.loads(row_lc[8])['straight'][0][0]\n v_arr.append(v)\n ax = json.loads(row_lc[9])['straight'][0][0]\n ax_arr.append(ax)\n kappa = json.loads(row_lc[11])['straight'][0][0]\n kappa_arr.append(kappa)\n\n ################################################################################################################\n # Calculations with LTPL log data\n ################################################################################################################\n ay_arr = kappa_arr * np.square(v_arr)\n\n plt.figure()\n plt.subplot(2, 1, 1)\n plt.plot(s_glob_arr, v_arr)\n plt.xlabel(r's in m')\n plt.ylabel(r'v in m/s')\n plt.subplot(2, 1, 2)\n plt.plot(s_glob_arr, ay_arr)\n plt.xlabel(r's in m')\n plt.ylabel(r'a_y in m/s2')\n\n tikzplotlib.save('ltpl.tex')\n plt.show()\n\n return s_glob_arr, v_arr, ax_arr, ay_arr\n\n\nif __name__ == '__main__':\n\n csv_name = vel_opt_path + '/../logs/ltpl/2020_04_01/18_47_51_data.csv'\n\n read_ltpl_raw(csv_name=csv_name)\n","repo_name":"TUMFTM/velocity_optimization","sub_path":"velocity_optimization/opt_postproc/src/read_ltpl_raw.py","file_name":"read_ltpl_raw.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"80"} +{"seq_id":"15883096411","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom OptionJudgment import OptionJudgment\nfrom threading import Thread\nimport pythoncom\n\n\nclass ProgaramMainScreen:\n\n def __init__(self):\n\n self.window = Tk()\n self.window.title(\"Auto SQL\") # 메인 화면 타이틀 제목 설정\n self.window.geometry(\"800x500\") # 메인 화면 크기 설정\n self.window.resizable(False, False) # 가로값 x 세로값 변경 불가\n self.window.configure(background ='#e5e5e5') # rgb코드로 색상 변경\n\n\n # 라벨 생성\n\n # 메인 화면 라벨 설정\n mainLabel = Label(self.window, text=\"로그 데이터를 처리중 입니다. 잠시만 기다려 주세요\", bg = '#e5e5e5') # 데이터 베이스 IP 라벨\n mainLabel.place(x=260, y=60) # mainLabel의 위치\n\n bottomLabel = Label(self.window, text=\" ※ 로그 데이터를 처리가 완료되면 프로그램이 자동으로 종료 됩니다 \", bg='#e5e5e5') # 데이터 베이스 IP 라벨\n bottomLabel.place(x=220, y=400) # mainLabel의 위치\n\n\n # gif 파일 재생\n frameCnt = 30\n frames = [PhotoImage(file='C:/Users/cusoft/Desktop/LoadingMark/bufferMark2.gif', format='gif -index %i' % (i)) for i in\n range(frameCnt)]\n\n # frame을 after메서드를 사용해서 업데이트\n label = Label(self.window, bg = '#e5e5e5')\n label.place(x=300, y=140)\n\n\n\n def update(ind):\n\n frame = frames[ind]\n ind += 1\n if ind == frameCnt:\n ind = 0\n label.configure(image=frame)\n self.window.after(50, update, ind)\n\n def main_event_hangdling():\n\n pythoncom.CoInitialize()\n\n oj = OptionJudgment()\n oj.option_jud()\n pythoncom.CoInitialize()\n self.window.destroy()\n\n th1 = Thread(target=main_event_hangdling)\n th1.start()\n self.window.after(0, update, 0)\n self.window.mainloop()\n\n\n\n\n#메인 화면 호출\nms = ProgaramMainScreen()\n","repo_name":"sagara12/StartAutoLogProgram","sub_path":"ProgramMainScreen.py","file_name":"ProgramMainScreen.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"23887272108","text":"\"\"\"\nFunctions regarding the class Committee, managing a committee of models\n\"\"\"\nfrom nn_regression import create_datasets, normalize, build_model, retransform\nimport numpy as np\nfrom analysis import pash_to_dataframe, plot_contour, plot_points\nimport matplotlib.pyplot as plt\nfrom hyperparameters import calculate_mse\nimport os\nimport shutil\nimport tensorflow as tf\n\nclass Committee:\n \"\"\"\n A committee of identical models, initialized with the number of models,\n The models are automatically built, and fit when the appropriate functions are called\n \"\"\"\n def __init__(self, models_number):\n \"\"\"\n Initialzations\n :param models_number: (int) number of models in the committee\n \"\"\"\n self.models_number = models_number\n self.models = []\n def build_model(self, normalizer, layers, optimizer=\"adam\"):\n \"\"\"\n Build and compile the models of the committee\n :param normalizer: the first normalizer layer of the models\n :param layers: (int list) [# neurons in layer1, ... # neurons in layern]\n containing the shape of the networks\n :param optimizer: (str) Optimizer of the backpropagation\n :return: None\n \"\"\"\n for i in range (self.models_number):\n self.models += [build_model(normalizer, layers, optimizer=optimizer)]\n\n def fit(self, train_features, train_labels, epochs,batch_size=30, verbose = 1, split_train = False, bootstrap = None):\n \"\"\"\n Fit the models with the train_features over some epochs epochs\n :param train_features: DataFrame, [DataFrame1, DataFrame2....]\n If a list (of len n_model) is given, each element is given\n to the respective model\n Else has to be a dataframe\n The input training data\n :param train_labels: As above\n The expected labels from the training data\n :param epochs: int, Number of epochs of training\n :param batch_size: int, batch size\n :param verbose: Bool, if 1, prints the loss function at each epoch\n if 0, only prints when each model starts the training\n :param split_train: Bool, if True, the training set is divided into n_models parts\n as possible\n Overwritten by a float bootstrap\n Overwritten by a list of train features\n :param bootstrap: None, float, if float, each model trains on a sample of the training set\n Overwrites split_train\n Overwritten by a list of train features\n :return: history_list: list of the output of each fit\n \"\"\"\n history_list = []\n if split_train is True:\n n = len(train_features)//self.models_number\n i = 0\n for j in range(len(self.models)):\n model = self.models[j]\n print(\"train model \"+str(i+1)+\"/\"+str(self.models_number))\n if type(train_features) is list:\n # Case where the training set is already split\n train_features_spec = train_features[j]\n train_labels_spec = train_labels[j]\n elif type(bootstrap) is float:\n # Bootstrap method: each model takes a sample\n train_features_spec = train_features.sample(frac=bootstrap)\n indexes = train_features_spec.index\n train_labels_spec = train_labels[indexes]\n elif split_train:\n # Case where we have to split the training set in equal parts\n train_features_spec = train_features.sample(frac=1./(self.models_number-i))\n indexes = train_features_spec.index\n train_labels_spec = train_labels[indexes]\n train_features = train_features.drop(indexes)\n train_labels = train_labels.drop(indexes)\n else:\n # Each model trains on the same training set\n train_features_spec = train_features\n train_labels_spec = train_labels\n history_list += [model.fit(train_features_spec, train_labels_spec, batch_size=batch_size, epochs=epochs, verbose=verbose)]\n i += 1\n return history_list\n def predict (self, data_features):\n \"\"\"\n Predict the output from the input features\n :param data_features: dataframe containing the input\n :return: list of arrays containing the predictions from each model\n \"\"\"\n list_prediction = []\n for model in self.models:\n list_prediction += [model.predict(data_features)]\n return list_prediction\n\ndef get_mean_var(list_prediction):\n \"\"\"\n Get the mean prediction and the variance over it from the output of committee.predict\n :param list_prediction: list of arrays, output of committee.predict\n :return: mean_list: list of the mean prediction from all models\n :return: var_list: list of the standard on the prediction from all models\n \"\"\"\n mean_list = np.mean(list_prediction, 0)\n var_list = np.var(list_prediction, 0)\n return mean_list, var_list\n\ndef plot_histo (committee, point_features, expected_value, ax=plt.gca()):\n \"\"\"\n plot the histogram of the prediction on one point by each model of a committee\n Adds a vertical bar for the mean and for the expected value\n :param committee: Committee object\n :param point_features: single entry dataframe\n :param expected_value: expected value on the point\n :param ax: ax on which to plot the histogram\n :return:\n \"\"\"\n predicted_list = np.ravel(committee.predict(point_features))\n n, b, p= ax.hist(predicted_list)\n ax.vlines(expected_value, 0, max(n), colors=\"r\")\n ax.vlines(np.mean(predicted_list), 0, max(n), colors=\"g\")\n ax.set_xlabel('Barrier')\n\n\nclass HistOnClick:\n \"\"\"\n This class makes the histogram of the values for a point, predicted\n by different models of the committee when the point is clicked.\n It adds the mean predicted value and the expected value.\n \"\"\"\n def __init__(self, ax1, ax2, committee, data, features, target, in_order = True):\n \"\"\"\n :param ax1: ax where the points can be clicked\n :param ax2: ax on which the histogram is plotted\n :param committee: a Committee object\n :param data: data from which the expected target comes\n :param features: list of str, list of keys\n :param target: str, key of the target\n :param in_order: say True if the dataset is ordered in a grid pattern. It makes the function quicker\n \"\"\"\n self.committee = committee\n self.ax1 = ax1\n self.ax2 = ax2\n self.ax1.figure.canvas.mpl_connect('button_press_event', self.on_click)\n\n self.in_order = in_order\n self.data = data\n self.features = features\n self.target = target\n # If the data is in agrid pattern , determine the step, min and max in both dimensions\n if in_order is True:\n index = 0\n diff = 1\n while diff > 0:\n diff = data.loc[index+1, features[1]]-data.loc[index, features[1]]\n index += 1\n lent = len(data)\n self.min1 = data.loc[0, features[0]]\n max1 = data.loc[lent-1, features[0]]\n self.min2 = data.loc[0, features[1]]\n len2 = index\n len1 = lent // index\n self.len2 = len2\n max2 = data.loc[len2 - 1, features[1]]\n self.step1 = (max1 - self.min1 ) / len1\n self.step2 = (max2 - self.min2 ) / len2\n def on_click (self, event):\n \"\"\"\n On a click, plot the histogram corresponding to the point\n :param event: from the click event\n \"\"\"\n features = self.features\n target = self.target\n data = self.data\n # Get the point coordinates\n coord = np.array([event.xdata, event.ydata])\n # If not in order, find the closest point in data with brute force\n if self.in_order is False:\n index = 0\n norm = 1000\n for i in range(len(data)):\n point_features = data.loc[[i], features]\n newnorm = np.linalg.norm(point_features-coord)\n if newnorm < norm :\n norm = newnorm\n index = i\n # if it is ordered, get its position in data\n else:\n pos1 = round((coord[0] - self.min1 )/self.step1 - 1)\n pos2 = round((coord[1] - self.min2 )/self.step2 - 1)\n index = int(pos1*self.len2 + pos2)\n self.ax2.cla()\n self.ax1.set_data(data.loc[[index], features[0]], data.loc[[index], features[1]])\n point_features = data.loc[[index], features]\n expected_value = data.loc[[index], target]\n plot_histo(self.committee, point_features, expected_value, self.ax2)\n self.ax2.figure.canvas.draw()\n\ndef interactive_plots(committee, features, target, data, train_dataset, plot_train=True):\n \"\"\"\n Plots many interesting plots for a committee: Variance, histogram, predicted and absolute error\n :param committee: Committee object\n :param features: list of str, keys\n :param target: str, key\n :param data: dataframe to test\n :param train_dataset: dataframe, train dataset will be plotted if plot_train is True\n :param plot_train: bool, if True, plot the training dataset\n :return:\n \"\"\"\n list_prediction = committee.predict(data[features])\n predicted_target, variance = get_mean_var(list_prediction)\n predicted_target = retransform(data[features], predicted_target)\n variance = retransform(data[features], variance)\n\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n\n plot_contour(variance, features[0], features[1], target, colorbar=True, levels=40, ax=ax1, bar_name=\"Variance\")\n plot_contour(predicted_target, features[0], features[1], target, colorbar=True, levels=40, ax=ax3,\n bar_name=\"Predicted Barrier\")\n\n diff = np.abs(data[target] - predicted_target[target])\n data_diff = retransform(data[features], diff)\n plot_contour(data_diff, features[0], features[1], target, colorbar=True, ax=ax4, levels=40,\n bar_name=\"Barrier difference with expected\")\n\n if plot_train:\n plot_points(train_dataset, features, ax1)\n plot_points(train_dataset, features, ax3)\n plot_points(train_dataset, features, ax4)\n\n pointer, = ax1.plot([0], [0], \"+\")\n object = HistOnClick(pointer, ax2, committee, data, features, target)\n\n plt.show()\n return object\n\ndef save_committee(dir_name, committee):\n \"\"\"\n Save a committee\n :param dir_name: str, name of the committee\n :param committee: Committee object\n :return: None\n \"\"\"\n os.chdir(\"data/models/\")\n if dir_name in os.listdir():\n shutil.rmtree(dir_name)\n os.mkdir(dir_name)\n os.chdir(dir_name)\n i = 0\n for model in committee.models:\n model.save(\"model\"+str(i))\n i += 1\n os.chdir(\"../../..\")\n\ndef load_committee(dir_name):\n \"\"\"\n Load a committee from a file\n :param dir_name: str, name of the committee\n :return: Committee object\n \"\"\"\n models = []\n n_models = 0\n os.chdir(\"data/models/\"+dir_name)\n for filename in os.listdir():\n models += [tf.keras.models.load_model(filename)]\n n_models += 1\n committee = Committee(n_models)\n committee.models = models\n os.chdir(\"../../..\")\n return committee\n\nif __name__ == \"__main__\":\n # Get the data\n features = [\"epsilon\", \"a3\"]\n data = pash_to_dataframe(\"data/pash/large_pash.dat\")\n train_dataset, test_dataset, \\\n train_features, train_labels, \\\n test_features, test_labels \\\n = create_datasets(data, features, \"Barrier\", frac=0.1)\n print(\"Loaded data\")\n # Create a committee\n \"\"\"\n committee = Committee(50)\n normalizer = normalize(train_features)\n committee.build_model(normalizer, [150, 150, 150], optimizer=\"adamax\")\n # Fit on the data\n committee.fit(train_features, train_labels, epochs=2000, verbose=0, bootstrap=.1)\n # Example on saving and loading a committee\n print(\"Saving model\")\n save_committee(\"testing\", committee)\n \"\"\"\n print(\"Loading model\")\n committee = load_committee(\"testing\")\n # Compute the MSE\n list_prediction = committee.predict(data[features])\n predicted_target, variance = get_mean_var(list_prediction)\n predicted_target = np.ravel(predicted_target)\n rmse = np.sqrt(calculate_mse(predicted_target, data[\"Barrier\"]))\n print(\"rmse \", rmse, \"MeV\")\n # Plot the variance, the prediction, difference with expected values and the histograms\n o = interactive_plots(committee, features, \"Barrier\", data, train_dataset, plot_train=False)\n\n","repo_name":"CorentinvdBdO/sruthi-co-pes","sub_path":"src/committee.py","file_name":"committee.py","file_ext":"py","file_size_in_byte":12950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"1469678196","text":"\"\"\"\n\nModule for the DDM class.\n\nThe DDM class is an internal representation used by DomainMatrix. The letters\nDDM stand for Dense Domain Matrix. A DDM instance represents a matrix using\nelements from a polynomial Domain (e.g. ZZ, QQ, ...) in a dense-matrix\nrepresentation.\n\nBasic usage:\n\n >>> from sympy import ZZ, QQ\n >>> from sympy.polys.matrices.ddm import DDM\n >>> A = DDM([[ZZ(0), ZZ(1)], [ZZ(-1), ZZ(0)]], (2, 2), ZZ)\n >>> A.shape\n (2, 2)\n >>> A\n [[0, 1], [-1, 0]]\n >>> type(A)\n \n >>> A @ A\n [[-1, 0], [0, -1]]\n\nThe ddm_* functions are designed to operate on DDM as well as on an ordinary\nlist of lists:\n\n >>> from sympy.polys.matrices.dense import ddm_idet\n >>> ddm_idet(A, QQ)\n 1\n >>> ddm_idet([[0, 1], [-1, 0]], QQ)\n 1\n >>> A\n [[-1, 0], [0, 1]]\n\nNote that ddm_idet modifies the input matrix in-place. It is recommended to\nuse the DDM.det method as a friendlier interface to this instead which takes\ncare of copying the matrix:\n\n >>> B = DDM([[ZZ(0), ZZ(1)], [ZZ(-1), ZZ(0)]], (2, 2), ZZ)\n >>> B.det()\n 1\n\nNormally DDM would not be used directly and is just part of the internal\nrepresentation of DomainMatrix which adds further functionality including e.g.\nunifying domains.\n\nThe dense format used by DDM is a list of lists of elements e.g. the 2x2\nidentity matrix is like [[1, 0], [0, 1]]. The DDM class itself is a subclass\nof list and its list items are plain lists. Elements are accessed as e.g.\nddm[i][j] where ddm[i] gives the ith row and ddm[i][j] gets the element in the\njth column of that row. Subclassing list makes e.g. iteration and indexing\nvery efficient. We do not override __getitem__ because it would lose that\nbenefit.\n\nThe core routines are implemented by the ddm_* functions defined in dense.py.\nThose functions are intended to be able to operate on a raw list-of-lists\nrepresentation of matrices with most functions operating in-place. The DDM\nclass takes care of copying etc and also stores a Domain object associated\nwith its elements. This makes it possible to implement things like A + B with\ndomain checking and also shape checking so that the list of lists\nrepresentation is friendlier.\n\n\"\"\"\nfrom .exceptions import DDMBadInputError, DDMShapeError, DDMDomainError\n\nfrom .dense import (\n ddm_iadd,\n ddm_isub,\n ddm_ineg,\n ddm_imul,\n ddm_imatmul,\n ddm_irref,\n ddm_idet,\n ddm_iinv,\n ddm_ilu_split,\n ddm_ilu_solve,\n ddm_berk,\n )\n\n\nclass DDM(list):\n \"\"\"Dense matrix based on polys domain elements\n\n This is a list subclass and is a wrapper for a list of lists that supports\n basic matrix arithmetic +, -, *, **.\n \"\"\"\n\n fmt = 'dense'\n\n def __init__(self, rowslist, shape, domain):\n super().__init__(rowslist)\n self.shape = self.rows, self.cols = m, n = shape\n self.domain = domain\n\n if not (len(self) == m and all(len(row) == n for row in self)):\n raise DDMBadInputError(\"Inconsistent row-list/shape\")\n\n def to_list(self):\n return list(self)\n\n def to_ddm(self):\n return self\n\n def to_sdm(self):\n return SDM.from_list(self, self.shape, self.domain)\n\n def convert_to(self, K):\n Kold = self.domain\n if K == Kold:\n return self.copy()\n rows = ([K.convert_from(e, Kold) for e in row] for row in self)\n return DDM(rows, self.shape, K)\n\n def __str__(self):\n rowsstr = ['[%s]' % ', '.join(map(str, row)) for row in self]\n return '[%s]' % ', '.join(rowsstr)\n\n def __repr__(self):\n cls = type(self).__name__\n rows = list.__repr__(self)\n return '%s(%s, %s, %s)' % (cls, rows, self.shape, self.domain)\n\n def __eq__(self, other):\n if not isinstance(other, DDM):\n return False\n return (super().__eq__(other) and self.domain == other.domain)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n @classmethod\n def zeros(cls, shape, domain):\n z = domain.zero\n m, n = shape\n rowslist = ([z] * n for _ in range(m))\n return DDM(rowslist, shape, domain)\n\n @classmethod\n def eye(cls, size, domain):\n one = domain.one\n ddm = cls.zeros((size, size), domain)\n for i in range(size):\n ddm[i][i] = one\n return ddm\n\n def copy(self):\n copyrows = (row[:] for row in self)\n return DDM(copyrows, self.shape, self.domain)\n\n def __add__(a, b):\n if not isinstance(b, DDM):\n return NotImplemented\n return a.add(b)\n\n def __sub__(a, b):\n if not isinstance(b, DDM):\n return NotImplemented\n return a.sub(b)\n\n def __neg__(a):\n return a.neg()\n\n def __mul__(a, b):\n if b in a.domain:\n return a.mul(b)\n else:\n return NotImplemented\n\n def __rmul__(a, b):\n if b in a.domain:\n return a.mul(b)\n else:\n return NotImplemented\n\n def __matmul__(a, b):\n if isinstance(b, DDM):\n return a.matmul(b)\n else:\n return NotImplemented\n\n @classmethod\n def _check(cls, a, op, b, ashape, bshape):\n if a.domain != b.domain:\n msg = \"Domain mismatch: %s %s %s\" % (a.domain, op, b.domain)\n raise DDMDomainError(msg)\n if ashape != bshape:\n msg = \"Shape mismatch: %s %s %s\" % (a.shape, op, b.shape)\n raise DDMShapeError(msg)\n\n def add(a, b):\n \"\"\"a + b\"\"\"\n a._check(a, '+', b, a.shape, b.shape)\n c = a.copy()\n ddm_iadd(c, b)\n return c\n\n def sub(a, b):\n \"\"\"a - b\"\"\"\n a._check(a, '-', b, a.shape, b.shape)\n c = a.copy()\n ddm_isub(c, b)\n return c\n\n def neg(a):\n \"\"\"-a\"\"\"\n b = a.copy()\n ddm_ineg(b)\n return b\n\n def mul(a, b):\n c = a.copy()\n ddm_imul(c, b)\n return c\n\n def matmul(a, b):\n \"\"\"a @ b (matrix product)\"\"\"\n m, o = a.shape\n o2, n = b.shape\n a._check(a, '*', b, o, o2)\n c = a.zeros((m, n), a.domain)\n ddm_imatmul(c, a, b)\n return c\n\n def hstack(A, B):\n Anew = list(A.copy())\n rows, cols = A.shape\n domain = A.domain\n\n Brows, Bcols = B.shape\n assert Brows == rows\n assert B.domain == domain\n\n cols += Bcols\n\n for i, Bi in enumerate(B):\n Anew[i].extend(Bi)\n\n return DDM(Anew, (rows, cols), A.domain)\n\n def rref(a):\n \"\"\"Reduced-row echelon form of a and list of pivots\"\"\"\n b = a.copy()\n pivots = ddm_irref(b)\n return b, pivots\n\n def nullspace(a):\n rref, pivots = a.rref()\n rows, cols = a.shape\n domain = a.domain\n\n basis = []\n nonpivots = []\n for i in range(cols):\n if i in pivots:\n continue\n nonpivots.append(i)\n vec = [domain.one if i == j else domain.zero for j in range(cols)]\n for ii, jj in enumerate(pivots):\n vec[jj] -= rref[ii][i]\n basis.append(vec)\n\n return DDM(basis, (len(basis), cols), domain), nonpivots\n\n def det(a):\n \"\"\"Determinant of a\"\"\"\n m, n = a.shape\n if m != n:\n raise DDMShapeError(\"Determinant of non-square matrix\")\n b = a.copy()\n K = b.domain\n deta = ddm_idet(b, K)\n return deta\n\n def inv(a):\n \"\"\"Inverse of a\"\"\"\n m, n = a.shape\n if m != n:\n raise DDMShapeError(\"Determinant of non-square matrix\")\n ainv = a.copy()\n K = a.domain\n ddm_iinv(ainv, a, K)\n return ainv\n\n def lu(a):\n \"\"\"L, U decomposition of a\"\"\"\n m, n = a.shape\n K = a.domain\n\n U = a.copy()\n L = a.eye(m, K)\n swaps = ddm_ilu_split(L, U, K)\n\n return L, U, swaps\n\n def lu_solve(a, b):\n \"\"\"x where a*x = b\"\"\"\n m, n = a.shape\n m2, o = b.shape\n a._check(a, 'lu_solve', b, m, m2)\n\n L, U, swaps = a.lu()\n x = a.zeros((n, o), a.domain)\n ddm_ilu_solve(x, L, U, swaps, b)\n return x\n\n def charpoly(a):\n \"\"\"Coefficients of characteristic polynomial of a\"\"\"\n K = a.domain\n m, n = a.shape\n if m != n:\n raise DDMShapeError(\"Charpoly of non-square matrix\")\n vec = ddm_berk(a, K)\n coeffs = [vec[i][0] for i in range(n+1)]\n return coeffs\n\n\nfrom .sdm import SDM\n","repo_name":"thebaselab/codeapp","sub_path":"LanguageResources/Library/lib/python3.9/site-packages/sympy/polys/matrices/ddm.py","file_name":"ddm.py","file_ext":"py","file_size_in_byte":8558,"program_lang":"python","lang":"en","doc_type":"code","stars":2422,"dataset":"github-code","pt":"80"} +{"seq_id":"27957887710","text":"from math import log, ceil\nfrom collections import Counter, defaultdict\n\nclass InvertedIndex:\n def __init__(self,documents):\n ## Parameters...\n self.documents = documents\n self.ndocs = len(self.documents.values())\n self.inverted_index = self.index_documents()\n\n def index_documents(self):\n Tokens_dict = defaultdict(list)\n tf_score = defaultdict(list)\n Posting_dict = defaultdict(list)\n\n for doc_id, doc in self.documents.items():\n tokens = doc.split()\n for tok in tokens:\n Tokens_dict[doc_id].append(tok)\n Tokens_counter = {doc_id: Counter(doc) for doc_id, doc in Tokens_dict.items()}\n\n for doc_id, counter in Tokens_counter.items():\n for token, tf in counter.items():\n tf_score[token].append((doc_id, tf))\n\n for token in tf_score.keys():\n df = len(tf_score[token])\n for doc_id, tf in tf_score[token]:\n tfidf_value = ceil((1.0 + log(1.0 + log(tf)))) * ceil((1.0 + log(self.ndocs / (1 + df))))\n Posting_dict[token].append((doc_id, tfidf_value))\n\n for token in tf_score:\n Posting_dict[token].sort()\n\n return Posting_dict\n\n def get_inverted_index(self):\n return self.inverted_index\n\n","repo_name":"jienhui9407/WAND-Algorithm","sub_path":"Inv_Index.py","file_name":"Inv_Index.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"80"} +{"seq_id":"4503686247","text":"import torch\nfrom torch import nn\nfrom typing import Tuple\n\nfrom networks.basic_nn import BasicNN\nimport networks.common_layers as cl\n\nVGG_11 = (\n (1, 64), (1, 128), (2, 256), (2, 512), (2, 512)\n)\n\n\nclass VGG(BasicNN):\n required_shape = (224, 224)\n\n def __init__(self, in_channels: int, out_feature: int,\n conv_arch: Tuple[int, int] = VGG_11,\n device: torch.device = 'cpu') -> None:\n conv_blks = [\n cl.Reshape(VGG.required_shape),\n nn.BatchNorm2d(in_channels)\n ]\n for (num_convs, out_channels) in conv_arch:\n conv_blks += [\n cl.VGGBlock(num_convs, in_channels, out_channels),\n ]\n in_channels = out_channels\n # 适用于Vortex\n conv_blks += [\n nn.Flatten(),\n # nn.BatchNorm1d(in_channels * 7 * 7),\n nn.Linear(in_channels * 7 * 7, 4096), nn.ReLU(),\n nn.Dropout(0.5),\n nn.Linear(4096, 4096), nn.ReLU(),\n nn.Dropout(0.5),\n nn.Linear(4096, out_feature),\n nn.Softmax(dim=1)\n ]\n super().__init__(device, *conv_blks)\n","repo_name":"StoneInHisWorld/d2l-learning","sub_path":"networks/vgg.py","file_name":"vgg.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"40619072016","text":"from tg import TGController\nfrom tgext.crud import EasyCrudRestController\nfrom .base import CrudTest, Movie, DBSession, metadata, Genre, Actor\n\nimport transaction\n\n\nclass TestRestJsonReadDictified(CrudTest):\n \"\"\"\n Tests for GET requests that enabled dictification, this will rely on\n sprox provider dictify function to resolve also relationships.\n \"\"\"\n\n def controller_factory(self):\n class MovieController(EasyCrudRestController):\n model = Movie\n pagination = {'items_per_page': 3}\n json_dictify = True\n\n class ActorController(EasyCrudRestController):\n model = Actor\n json_dictify = True\n\n class RestJsonController(TGController):\n movies = MovieController(DBSession)\n actors = ActorController(DBSession)\n\n return RestJsonController()\n\n def setUp(self):\n super(TestRestJsonReadDictified, self).setUp()\n genre = Genre(name='action')\n DBSession.add(genre)\n\n actors = [Actor(name='James Who'), Actor(name='John Doe'), Actor(name='Man Alone')]\n list(map(DBSession.add, actors))\n\n DBSession.add(Movie(title='First Movie', genre=genre, actors=actors[:2]))\n DBSession.add(Movie(title='Second Movie', genre=genre))\n DBSession.add(Movie(title='Third Movie', genre=genre))\n DBSession.add(Movie(title='Fourth Movie', genre=genre))\n DBSession.add(Movie(title='Fifth Movie'))\n DBSession.add(Movie(title='Sixth Movie'))\n DBSession.flush()\n transaction.commit()\n\n def test_get_all(self):\n result = self.app.get('/movies.json?order_by=movie_id')\n result = result.json['value_list']\n assert result['total'] == 6, result\n assert result['page'] == 1, result\n assert result['entries'][0]['title'] == 'First Movie', result\n assert len(result['entries'][0]['actors']) == 2, result\n\n result = self.app.get('/movies.json?page=2&order_by=movie_id')\n result = result.json['value_list']\n assert result['total'] == 6, result\n assert result['page'] == 2, result\n assert result['entries'][0]['title'] == 'Fourth Movie', result\n\n def test_get_all_filter(self):\n actor = DBSession.query(Actor).first()\n\n result = self.app.get('/actors.json?movie_id=%s' % actor.movie_id)\n result = result.json['value_list']\n assert result['total'] == 2, result\n\n def test_get_all___json__(self):\n actor = DBSession.query(Actor).filter(Actor.movie_id!=None).first()\n movie_title = actor.movie.title\n\n result = self.app.get('/actors.json?movie_id=%s' % actor.movie_id)\n result = result.json['value_list']\n assert result['total'] > 0, result\n\n for entry in result['entries']:\n assert entry['movie_title'] == movie_title\n\n def test_get_one(self):\n movie = DBSession.query(Movie).first()\n movie_actors_count = len(movie.actors)\n\n result = self.app.get('/movies/%s.json' % movie.movie_id)\n result = result.json\n assert result['model'] == 'Movie', result\n assert result['value']['title'] == movie.title\n assert result['value']['movie_id'] == movie.movie_id\n assert len(result['value']['actors']) == movie_actors_count\n\n def test_get_one___json__(self):\n actor = DBSession.query(Actor).filter(Actor.movie_id!=None).first()\n movie_title = actor.movie.title\n\n result = self.app.get('/actors/%s.json' % actor.actor_id)\n result = result.json\n assert result['model'] == 'Actor', result\n assert result['value']['name'] == actor.name\n assert result['value']['movie_title'] == movie_title\n","repo_name":"TurboGears/tgext.crud","sub_path":"tests/test_rest_json_dictified.py","file_name":"test_rest_json_dictified.py","file_ext":"py","file_size_in_byte":3717,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"80"} +{"seq_id":"361427966","text":"from numpy import log, exp, max, where, abs, arange, pi, sqrt, random, array, inf, diag\nfrom scipy.optimize import leastsq\n\ndef pVoigt(x, p):\n maximum = p[0]\n pos = p[1]\n FWHM = p[2]\n eta = p[3]\n a = p[4]\n b = p[5]\n gauss = maximum * exp(-log(2.) * ((x-pos)/(0.5*FWHM))**2)\n lorentz = maximum / (1. + ((x - pos)/(0.5*FWHM))**2)\n return eta*lorentz + (1-eta)*gauss + a*x + b\n\ndef guess_param(x,y):\n a = 0\n b = y.min()\n maximum = y.max()\n pos = x[y==maximum][0]\n d=y-(maximum/2.) - b/2.\n indexes = where(d > 0)[0]\n FWHM = abs(x[indexes[-1]] - x[indexes[0]])\n eta = 0.5\n return array([maximum-b, pos, FWHM, eta, a, b])\n\ndef pVoigt_area(p):\n maximum = p[0]\n FWHM = p[2]\n eta = p[3]\n beta = (eta*pi*FWHM/2.) + (1-eta)*(FWHM/2.)*sqrt(pi/log(2))\n return beta*maximum\n\ndef pVoigt_area_err(p, perr):\n maximum = p[0]\n dmax = perr[0]\n FWHM = p[2]\n dFWHM = perr[2]\n eta = p[3]\n deta = perr[3]\n\n dbeta2 = ((eta*pi*dFWHM/2.)**2) + ((deta*pi*FWHM/2.)**2) + (((1-eta)*(dFWHM/2.)*sqrt(pi/log(2)))**2) + ((deta*(FWHM/2.)*sqrt(pi/log(2)))**2)\n\n beta = (eta*pi*FWHM/2.) + (1-eta)*(FWHM/2.)*sqrt(pi/log(2))\n area = beta*maximum\n err_area = area*sqrt((dmax/maximum)**2 + (dbeta2/(beta**2)))\n\n return err_area\n\ndef pVfit_param(x,y):\n errfunc = lambda p, x, y: (pVoigt(x, p) - y)#/(y**0.5)\n p0 = guess_param(x,y)\n p1 = leastsq(errfunc, p0[:], args=(x, y))[0]\n return p1\n\ndef pVfit_param_err(x,y):\n errfunc = lambda p, x, y: (pVoigt(x, p) - y)#/(y**0.5)\n p0 = guess_param(x,y)\n p1, pcov, infodict, errmsg, success = leastsq(errfunc, p0[:], args=(x, y), full_output=1)\n\n #compute esd from fit\n if (len(y) > len(p0)) and pcov is not None:\n s_sq = (errfunc(p1, x, y)**2).sum()/(len(y)-len(p0))\n pcov = pcov * s_sq\n else:\n pcov = inf\n\n error = []\n for i in range(len(p1)):\n try:\n error.append(abs(pcov[i,i])**0.5)\n except:\n error.append( 0.00 )\n\n return p1, error","repo_name":"aboulle/DxTools","sub_path":"misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"80"} +{"seq_id":"72234525378","text":"# import required module\nfrom pathlib import Path\nimport os.path\n\n \n# get the path/directory\n#folder_dir = 'D:\\Git\\Salmon\\Dataset\\Autodetect\\'\nfolder_dir = Path('D:\\\\Git\\\\Salmon\\\\Dataset\\\\Autodetect\\\\')\n \n# iterate over files in\n# that directory\nxmls = Path(folder_dir).glob('*.xml')\nfor xml in xmls:\n #print(xml)\n jpgfile = str(Path(xml).parent.absolute())+\"\\\\\"+str(Path(xml).stem)+\".jpg\"\n #print(jpgfile)\n if not os.path.exists(jpgfile):\n print(\"Warning\")\n Path.unlink(xml) \n\njpgs = Path(folder_dir).glob('*.jpg')\nfor jpg in jpgs:\n #print(jpg)\n xmlfile = str(Path(jpg).parent.absolute())+\"\\\\\"+str(Path(jpg).stem)+\".xml\"\n print(jpgfile)\n if not os.path.exists(jpgfile):\n print(\"Warning\")\n print(jpgfile)\n\n # MOVE THE FILE Path.unlink(xml) \n\n","repo_name":"jarleven/Salmon","sub_path":"Dataset/Autodetect/cleanxml.py","file_name":"cleanxml.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"80"} +{"seq_id":"16457618542","text":"import string\n\n\nwith open(\"input.txt\", \"r\") as file:\n res = []\n for line in file.readlines():\n length = len(line)\n a, b = line[: length // 2], line[length // 2 :]\n\n for char in a:\n if char in b:\n res.append(char)\n break\n\n print(sum(map(lambda x: string.ascii_letters.index(x) + 1, res)))\n\nwith open(\"input.txt\", \"r\") as file:\n res = []\n lines = file.readlines()\n for i in range(0, len(lines), 3):\n block = list(map(lambda x: x.strip(), lines[i : i + 3]))\n for char in block[0]:\n if char in block[1] and char in block[2]:\n res.append(char)\n break\n\n print(sum(map(lambda x: string.ascii_letters.index(x) + 1, res)))\n","repo_name":"Pizza989/Advent-of-Code","sub_path":"2022/day3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"5467061422","text":"from .types import (\n OrganizationIntegrationRepository,\n)\nfrom db_model import (\n TABLE,\n)\nfrom db_model.organizations.utils import (\n remove_org_id_prefix,\n)\nfrom db_model.utils import (\n get_as_utc_iso_format,\n)\nfrom dynamodb import (\n keys,\n operations,\n)\n\n\nasync def update_unreliable_repositories(\n *,\n repository: OrganizationIntegrationRepository,\n) -> None:\n organization_id = remove_org_id_prefix(repository.organization_id)\n key_structure = TABLE.primary_key\n primary_key = keys.build_key(\n facet=TABLE.facets[\"organization_unreliable_integration_repository\"],\n values={\n \"id\": organization_id,\n \"hash\": repository.id,\n \"branch\": repository.branch.lower(),\n },\n )\n item = {\n key_structure.partition_key: primary_key.partition_key,\n key_structure.sort_key: primary_key.sort_key,\n \"branch\": repository.branch,\n \"last_commit_date\": get_as_utc_iso_format(repository.last_commit_date)\n if repository.last_commit_date\n else None,\n \"url\": repository.url,\n }\n\n await operations.put_item(\n facet=TABLE.facets[\"organization_unreliable_integration_repository\"],\n item=item,\n table=TABLE,\n )\n","repo_name":"jeanbaptistemora/fluidattacks-universe2","sub_path":"integrates/back/src/db_model/integration_repositories/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"73527767617","text":"class RNN_plus_v1_cell(tf.keras.layers.LSTMCell):\n def __init__(self, units, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', dropout=0., recurrent_dropout=0., use_bias=True, **kwargs):\n if units < 0:\n raise ValueError(f'Received an invalid value for argument `units`, '\n f'expected a positive integer, got {units}.')\n # By default use cached variable under v2 mode, see b/143699808.\n if tf.compat.v1.executing_eagerly_outside_functions():\n self._enable_caching_device = kwargs.pop('enable_caching_device', True)\n else:\n self._enable_caching_device = kwargs.pop('enable_caching_device', False)\n super(RNN_plus_v1_cell, self).__init__(units, **kwargs)\n self.units = units\n self.state_size = self.units\n self.output_size = self.units\n \n self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)\n self.recurrent_initializer = tf.keras.initializers.get(recurrent_initializer)\n self.bias_initializer = tf.keras.initializers.get(bias_initializer)\n \n self.dropout = min(1., max(0., dropout))\n self.recurrent_dropout = min(1., max(0., recurrent_dropout))\n self.state_size = [self.units, self.units]\n self.output_size = self.units\n self.use_bias = True\n \n def build(self, input_shape):\n input_dim = input_shape[-1]\n self.kernel = self.add_weight(shape=(input_dim, self.units), name='w_input', initializer=self.kernel_initializer, regularizer=None, constraint=None)\n self.recurrent_kernel = self.add_weight(shape=(self.units, self.units), name='w_otherpeeps', initializer=self.recurrent_initializer, regularizer=None, constraint=None)\n self.outputs_kernel = self.add_weight(shape=(self.units, self.units), name='w_outputs', initializer=self.recurrent_initializer, regularizer=None, constraint=None)\n self.bias = self.add_weight( shape=(self.units,), name='b', initializer=self.bias_initializer, regularizer=None, constraint=None) if self.use_bias else None\n self.built = True\n \n def call(self, inputs, states, training=None):\n prev_output, state0 = states[0], states[1]\n \n # w_in = tf.linalg.set_diag(self.kernel, np.zeros((self.units,), dtype=int))\n w_in = self.kernel\n w_out = tf.linalg.set_diag(self.outputs_kernel, np.zeros((self.units,), dtype=int))\n w_state = tf.linalg.set_diag(self.recurrent_kernel, np.zeros((self.units,), dtype=int))\n \n inputs = tf.keras.backend.dot(inputs, w_in)\n if self.bias is not None:\n inputs = tf.keras.backend.bias_add(inputs, self.bias)\n \n prev_output = tf.keras.backend.dot(prev_output, w_out)\n op0 = tf.keras.backend.dot(state0, w_state)\n \n output = srelu(prev_output - tf.nn.relu(inputs * state0))\n state0 = srelu(tf.nn.relu(inputs) - op0)\n return output, [output, state0]\n \n def get_initial_state(self, inputs=None, batch_size=None, dtype=None):\n return list(_generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype))","repo_name":"AchillesProject/EvolvedNeuron_for_HAR","sub_path":"pythons/best7.py","file_name":"best7.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"44761841668","text":"r\"\"\"\nCGLS Solver\n===========\n\nThis example shows how to use the :py:func:`pylops.optimization.leastsquares.cgls`\nsolver to minimize the following cost function:\n\n.. math::\n J = || \\mathbf{y} - \\mathbf{Ax} ||_2^2 + \\epsilon || \\mathbf{x} ||_2^2\n\n\"\"\"\n\nimport warnings\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nimport pylops\n\nplt.close('all')\nwarnings.filterwarnings('ignore')\n\n###############################################################################\n# Let's define a matrix :math:`\\mathbf{A}` or size (``N`` and ``M``) and\n# fill the matrix with random numbers\n\nN, M = 20, 10\nA = np.random.normal(0, 1, (N, M))\nAop = pylops.MatrixMult(A, dtype='float64')\n\nx = np.ones(M)\n\n###############################################################################\n# We can now use the cgls solver to invert this matrix\n\ny = Aop * x\nxest, istop, nit, r1norm, r2norm, cost = \\\n pylops.optimization.solver.cgls(Aop, y, x0=np.zeros_like(x),\n niter=10, tol=1e-10, show=True)\n\nprint('x= %s' % x)\nprint('cgls solution xest= %s' % xest)\n\nplt.figure(figsize=(12, 3))\nplt.plot(cost, 'k', lw=2)\nplt.title('Cost function')\n\n###############################################################################\n# Note that while we used a dense matrix here, any other linear operator\n# can be fed to cgls as is the case for any other PyLops solver.\n","repo_name":"hazwanh/pylops","sub_path":"examples/plot_cgls.py","file_name":"plot_cgls.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"80"} +{"seq_id":"33062383286","text":"# -*- coding: utf-8 -*-\nfrom Plugins.Extensions.MediaPortal.plugin import _\nfrom Plugins.Extensions.MediaPortal.resources.imports import *\n\nclass txxxcrypt:\n\n\tdef getVideoPage(self, data):\n\t\ttry:\n\t\t\timport execjs\n\t\t\tnode = execjs.get(\"Node\")\n\t\texcept:\n\t\t\tprintl('nodejs not found',self,'E')\n\t\t\tself.session.open(MessageBoxExt, _(\"This plugin requires packages python-pyexecjs and nodejs.\"), MessageBoxExt.TYPE_INFO)\n\t\t\treturn\n\t\tdecoder = \"decrypt=function(_0xf4bdx6) {\"\\\n\t\t\t\"var _0xf4bdx7 = '',\"\\\n\t\t\t\" _0xf4bdx8 = 0;\"\\\n\t\t\t\"/[^\\u0410\\u0412\\u0421\\u0415\\u041cA-Za-z0-9\\.\\,\\~]/g ['exec'](_0xf4bdx6) && console['log']('error decoding url');\"\\\n\t\t\t\"_0xf4bdx6 = _0xf4bdx6['replace'](/[^\\u0410\\u0412\\u0421\\u0415\\u041cA-Za-z0-9\\.\\,\\~]/g, '');\"\\\n\t\t\t\"do {\"\\\n\t\t\t\"var _0xf4bdx9 = '\\u0410\\u0412\\u0421D\\u0415FGHIJKL\\u041CNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,~' ['indexOf'](_0xf4bdx6['charAt'](_0xf4bdx8++)),\"\\\n\t\t\t\"_0xf4bdxa = '\\u0410\\u0412\\u0421D\\u0415FGHIJKL\\u041CNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,~' ['indexOf'](_0xf4bdx6['charAt'](_0xf4bdx8++)),\"\\\n\t\t\t\"_0xf4bdxb = '\\u0410\\u0412\\u0421D\\u0415FGHIJKL\\u041CNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,~' ['indexOf'](_0xf4bdx6['charAt'](_0xf4bdx8++)),\"\\\n\t\t\t\"_0xf4bdxc = '\\u0410\\u0412\\u0421D\\u0415FGHIJKL\\u041CNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,~' ['indexOf'](_0xf4bdx6['charAt'](_0xf4bdx8++)),\"\\\n\t\t\t\"_0xf4bdx9 = _0xf4bdx9 << 2 | _0xf4bdxa >> 4,\"\\\n\t\t\t\"_0xf4bdxa = (_0xf4bdxa & 15) << 4 | _0xf4bdxb >> 2,\"\\\n\t\t\t\"_0xf4bdxd = (_0xf4bdxb & 3) << 6 | _0xf4bdxc,\"\\\n\t\t\t\"_0xf4bdx7 = _0xf4bdx7 + String['fromCharCode'](_0xf4bdx9);\"\\\n\t\t\t\"64 != _0xf4bdxb && (_0xf4bdx7 += String['fromCharCode'](_0xf4bdxa));\"\\\n\t\t\t\"64 != _0xf4bdxc && (_0xf4bdx7 += String['fromCharCode'](_0xf4bdxd))\"\\\n\t\t\t\"} while (_0xf4bdx8 < _0xf4bdx6['length']);;\"\\\n\t\t\t\"return unescape(_0xf4bdx7)\"\\\n\t\t\t\"};\"\n\t\tvideo_url = re.findall('var video_url\\s{0,1}={0,1}(.*?);', data, re.S)\n\t\thash = re.findall('video_url\\s{0,1}\\+\\=\\s{0,1}(?:\\\"|\\')\\|\\|/get_file/(\\d+/[a-f0-9]+)/', data, re.S)\n\t\thash2 = re.findall('video_url\\s{0,1}\\+\\=\\s{0,1}(?:\\\"|\\')\\|\\|/get_file/(\\d+/[a-f0-9]+)/\\|\\|(.*?)\\|\\|(.*?)(?:\\\"|\\');', data, re.S)\n\t\tjs = decoder + \"\\n\" + 'video_url=decrypt('+video_url[0]+');' + \"return video_url;\"\n\t\turl = str(node.exec_(js))\n\t\tif hash:\n\t\t\tmainurl = url.split('get_file/')[0]\n\t\t\ttokenurl = url.split('get_file/')[1]\n\t\t\ttokenurl = tokenurl.replace(tokenurl.split('/')[0]+'/'+tokenurl.split('/')[1],hash[0])\n\t\t\turl = mainurl + \"get_file/\" + tokenurl\n\t\tif hash2:\n\t\t\turl = url + '&lip=' + hash2[0][1] +'<=' + hash2[0][2]\n\t\turl = url.replace('https','http')\n\t\tself.playVideo(url)","repo_name":"schleichdi2/OpenNfr_E2_Gui-6.4","sub_path":"lib/python/Plugins/Extensions/MediaPortal/resources/txxxcrypt.py","file_name":"txxxcrypt.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"80"} +{"seq_id":"22565774731","text":"# _*_ coding:utf-8 _*_\nimport copy\nimport warnings\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom layers.decoders.pytorch_crf import CRF\nfrom layers.encoders.rnns.stacked_rnn import StackedBRNN\n\nwarnings.filterwarnings(\"ignore\")\n\n\nclass SentenceEncoder(nn.Module):\n def __init__(self, args, embed_size):\n super(SentenceEncoder, self).__init__()\n rnn_type = nn.LSTM if args.rnn_encoder == 'lstm' else nn.GRU\n self.encoder = StackedBRNN(\n input_size=embed_size,\n hidden_size=args.hidden_size,\n num_layers=args.num_layers,\n dropout_rate=args.dropout,\n dropout_output=True,\n concat_layers=False,\n rnn_type=rnn_type,\n padding=True\n )\n\n def forward(self, input, mask):\n return self.encoder(input, mask)\n\n\nclass NERNet(nn.Module):\n \"\"\"\n NERNet : Lstm+CRF\n \"\"\"\n\n def __init__(self, args, model_conf):\n super(NERNet, self).__init__()\n char_emb = model_conf['char_emb']\n bichar_emb = model_conf['bichar_emb']\n embed_size = args.char_emb_dim\n if char_emb is not None:\n # self.char_emb = nn.Embedding.from_pretrained(char_emb, freeze=False, padding_idx=0)\n\n self.char_emb = nn.Embedding(num_embeddings=char_emb.shape[0], embedding_dim=char_emb.shape[1],\n padding_idx=0, _weight=char_emb)\n self.char_emb.weight.requires_grad = True\n embed_size = char_emb.size()[1]\n else:\n vocab_size = len(model_conf['char_vocab'])\n self.char_emb = nn.Embedding(num_embeddings=vocab_size, embedding_dim=args.char_emb_dim,\n padding_idx=0)\n self.bichar_emb = None\n if bichar_emb is not None:\n # self.bichar_emb = nn.Embedding.from_pretrained(bichar_emb, freeze=False, padding_idx=0)\n self.bichar_emb = nn.Embedding(num_embeddings=bichar_emb.shape[0], embedding_dim=bichar_emb.shape[1],\n padding_idx=0, _weight=bichar_emb)\n self.bichar_emb.weight.requires_grad = True\n\n embed_size += bichar_emb.size()[1]\n\n self.drop = nn.Dropout(p=0.5)\n # self.sentence_encoder = SentenceEncoder(args, embed_size)\n self.sentence_encoder = nn.LSTM(embed_size, args.hidden_size, num_layers=1, batch_first=True,\n bidirectional=True)\n self.emission = nn.Linear(args.hidden_size * 2, len(model_conf['entity_type']))\n self.crf = CRF(len(model_conf['entity_type']), batch_first=True)\n\n def forward(self, char_id, bichar_id, label_id=None, is_eval=False):\n # use anti-mask for answers-locator\n mask = char_id.eq(0)\n chars = self.char_emb(char_id)\n\n if self.bichar_emb is not None:\n bichars = self.bichar_emb(bichar_id)\n chars = torch.cat([chars, bichars], dim=-1)\n chars = self.drop(chars)\n\n # sen_encoded = self.sentence_encoder(chars, mask)\n sen_encoded, _ = self.sentence_encoder(chars)\n sen_encoded = self.drop(sen_encoded)\n\n bio_mask = char_id != 0\n emission = self.emission(sen_encoded)\n emission = F.log_softmax(emission, dim=-1)\n\n if not is_eval:\n crf_loss = -self.crf(emission, label_id, mask=bio_mask, reduction='mean')\n return crf_loss\n else:\n pred = self.crf.decode(emissions=emission, mask=bio_mask)\n\n # TODO:check\n max_len = char_id.size(1)\n temp_tag = copy.deepcopy(pred)\n for line in temp_tag:\n line.extend([0] * (max_len - len(line)))\n ent_pre = torch.tensor(temp_tag).to(emission.device)\n return ent_pre\n","repo_name":"loujie0822/DeepIE","sub_path":"models/ner_net/lstm_crf.py","file_name":"lstm_crf.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","stars":1885,"dataset":"github-code","pt":"80"} +{"seq_id":"30212029792","text":"import tkinter\nfrom tkinter import ttk, END\n\nroot = tkinter.Tk()\nroot.title('Metric Helper')\nroot.iconbitmap('ruler.ico')\nroot.resizable(False, False)\n\n# define fonts and colors\nfield_font = ('Arial', 10)\nbg_color = \"#c75c5c\"\nbutton_color = \"#f5cf87\"\nroot.config(bg=bg_color)\n\n# define functions\ndef convert():\n metric_values = {\n 'femto': 10**-15,\n 'pico': 10**-12,\n 'nano': 10**-9,\n 'micro': 10**-6,\n 'milli': 10**-3,\n 'centi': 10**-2,\n 'deci': 10**-1,\n 'base value': 10**0,\n 'deca': 10**1,\n 'hecto': 10**2,\n 'kilo': 10**3,\n 'mega': 10**6,\n 'giga': 10**9,\n 'tera': 10**12,\n 'peta': 10**15\n }\n start_value = float(input_field.get())\n start_prefix = input_combobox.get()\n end_prefix = output_combobox.get()\n\n # convert to the base unit first\n base_value = start_value * metric_values[start_prefix]\n end_value = base_value/metric_values[end_prefix]\n\n output_field.delete(0, END)\n output_field.insert(0, str(end_value))\n\n\n# define layout\ninput_field = tkinter.Entry(root, widt=20, font=field_font, borderwidth=3)\noutput_field = tkinter.Entry(root, width=20, font=field_font, borderwidth=3)\nequal_label = tkinter.Label(root, text=\"=\", font=field_font, bg=bg_color)\n\ninput_field.grid(row=0, column=0, padx=10, pady=10)\nequal_label.grid(row=0, column=1, padx=10, pady=10)\noutput_field.grid(row=0, column=2, padx=10, pady=10)\n\ninput_field.insert(0, 'Enter your quantity')\n\n# create combobox for metric values\nto_label = tkinter.Label(root, text=\" to \", font=field_font, bg=bg_color)\nmetric_list = [\"femto\", \"pico\", \"nano\", \"micro\", \"milli\", \"centi\", \"deci\", \"base value\", \"deca\", \"hecto\", \"kilo\", \"mega\", \"giga\", \"tera\", \"peta\"]\ninput_combobox = ttk.Combobox(root, value=metric_list, font=field_font, justify='center')\noutput_combobox = ttk.Combobox(root, value=metric_list, font=field_font, justify='center')\ninput_combobox.grid(row=1, column=0, padx=10, pady=10)\nto_label.grid(row=1, column=1, padx=10, pady=10)\noutput_combobox.grid(row=1, column=2, padx=10, pady=10)\n\ninput_combobox.set(\"base value\")\noutput_combobox.set(\"base value\")\n\n\n\n\n# create conversion buttons\nconvert_button = tkinter.Button(root, text='Convert', font=field_font, bg=button_color, command=convert)\nconvert_button.grid(row=2, column=0, columnspan=3, padx=10, pady=10, ipadx=50)\n\n\n\nroot.mainloop()","repo_name":"DariuszOkonski/metric-helper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"1420964988","text":"\"\"\"\n\"\"\"\n\nimport logging\nimport pathlib\nfrom unittest.mock import Mock\n\nimport pytest\n\nfrom squid_py.ocean.asset import Asset\nfrom squid_py.ddo import DDO\nimport squid_py.ocean.ocean as ocean\n\n# Disable low level loggers\nfrom squid_py.service_agreement.service_factory import ServiceDescriptor\n\nlogging.getLogger(\"urllib3\").setLevel(logging.WARNING)\nlogging.getLogger(\"web3\").setLevel(logging.WARNING)\n\n\ndef test_create_asset_ddo_file():\n # An asset can be created directly from a DDO .json file\n sample_ddo_path = pathlib.Path.cwd() / 'tests/resources/ddo' / 'ddo_sample1.json'\n assert sample_ddo_path.exists(), \"{} does not exist!\".format(sample_ddo_path)\n\n asset1 = Asset.from_ddo_json_file(sample_ddo_path)\n\n assert isinstance(asset1.ddo, DDO)\n assert asset1.ddo.is_valid\n\n assert asset1.has_metadata\n print(asset1.metadata)\n\n\ndef test_register_data_asset_market(publisher_ocean_instance, consumer_ocean_instance):\n \"\"\"\n Setup accounts and asset, register this asset in Keeper node (On-chain only)\n \"\"\"\n pub_ocn = publisher_ocean_instance\n cons_ocn = consumer_ocean_instance\n logging.debug(\"\".format())\n asset_price = 100\n sample_ddo_path = pathlib.Path.cwd() / 'tests/resources/ddo' / 'ddo_sample1.json'\n assert sample_ddo_path.exists(), \"{} does not exist!\".format(sample_ddo_path)\n\n ##########################################################\n # Setup 2 accounts\n ##########################################################\n aquarius_acct = pub_ocn.main_account\n consumer_acct = cons_ocn.main_account\n\n # ensure Ocean token balance\n if aquarius_acct.ocean_balance == 0:\n rcpt = aquarius_acct.request_tokens(200)\n pub_ocn._web3.eth.waitForTransactionReceipt(rcpt)\n if consumer_acct.ocean_balance == 0:\n rcpt = consumer_acct.request_tokens(200)\n cons_ocn._web3.eth.waitForTransactionReceipt(rcpt)\n\n # You will need some token to make this transfer!\n assert aquarius_acct.ocean_balance > 0\n assert consumer_acct.ocean_balance > 0\n\n ##########################################################\n # Create an Asset with valid metadata\n ##########################################################\n\n asset = Asset.from_ddo_json_file(sample_ddo_path)\n\n ##########################################################\n # Register\n ##########################################################\n # The asset requires an ID before registration!\n # Hack, clear the did to allow generating a new one\n asset.ddo._did = None\n asset.generate_did()\n\n # Call the Register function\n result = pub_ocn.keeper.market.register_asset(asset, asset_price, aquarius_acct.address)\n\n # Check exists\n chain_asset_exists = pub_ocn.keeper.market.check_asset(asset.asset_id)\n logging.info(\"check_asset = {}\".format(chain_asset_exists))\n assert chain_asset_exists\n\n # Check price\n chain_asset_price = pub_ocn.keeper.market.get_asset_price(asset.asset_id)\n assert asset_price == chain_asset_price\n logging.info(\"chain_asset_price = {}\".format(chain_asset_price))\n\n\ndef test_publish_data_asset_aquarius(publisher_ocean_instance, consumer_ocean_instance):\n \"\"\"\n Setup accounts and asset, register this asset on Aquarius (MetaData store)\n \"\"\"\n pub_ocn = publisher_ocean_instance\n cons_ocn = consumer_ocean_instance\n\n logging.debug(\"\".format())\n sample_ddo_path = pathlib.Path.cwd() / 'tests/resources/ddo' / 'ddo_sample1.json'\n assert sample_ddo_path.exists(), \"{} does not exist!\".format(sample_ddo_path)\n\n ##########################################################\n # Setup 2 accounts\n ##########################################################\n aquarius_acct = pub_ocn.main_account\n consumer_acct = cons_ocn.main_account\n\n # ensure Ocean token balance\n if aquarius_acct.ocean_balance == 0:\n rcpt = aquarius_acct.request_tokens(200)\n pub_ocn._web3.eth.waitForTransactionReceipt(rcpt)\n if consumer_acct.ocean_balance == 0:\n rcpt = consumer_acct.request_tokens(200)\n cons_ocn._web3.eth.waitForTransactionReceipt(rcpt)\n\n # You will need some token to make this transfer!\n assert aquarius_acct.ocean_balance > 0\n assert consumer_acct.ocean_balance > 0\n\n ##########################################################\n # Create an Asset with valid metadata\n ##########################################################\n asset = Asset.from_ddo_json_file(sample_ddo_path)\n\n ##########################################################\n # List currently published assets\n ##########################################################\n meta_data_assets = pub_ocn.metadata_store.list_assets()\n if meta_data_assets:\n print(\"Currently registered assets:\")\n print(meta_data_assets)\n\n if asset.did in meta_data_assets:\n pub_ocn.metadata_store.get_asset_metadata(asset.did)\n pub_ocn.metadata_store.retire_asset_metadata(asset.did)\n # Publish the metadata\n this_metadata = pub_ocn.metadata_store.publish_asset_metadata(asset.ddo)\n\n print(\"Publishing again should raise error\")\n with pytest.raises(ValueError):\n this_metadata = pub_ocn.metadata_store.publish_asset_metadata(asset.ddo)\n\n # TODO: Ensure returned metadata equals sent!\n # get_asset_metadata only returns 'base' key, is this correct?\n published_metadata = cons_ocn.metadata_store.get_asset_metadata(asset.ddo.did)\n\n assert published_metadata\n # only compare top level keys\n # assert sorted(list(asset.metadata['base'].keys())) == sorted(list(published_metadata['base'].keys()))\n # asset.metadata == published_metadata\n","repo_name":"shekhar-shubhendu/squid-py","sub_path":"tests/test_assets.py","file_name":"test_assets.py","file_ext":"py","file_size_in_byte":5666,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"80"} +{"seq_id":"72919753218","text":"from odoo import fields, models,api,_\nfrom uuid import uuid4\nimport qrcode\nimport base64\nimport logging\n\nfrom lxml import etree\n\n\n\n\nclass AccountMove(models.Model):\n _inherit = \"account.move\"\n\n work_order = fields.Char()\n date_of_supply = fields.Date()\n place_of_supply = fields.Char()\n po_number = fields.Char()\n delivery_no = fields.Char()\n\n def _ubl_add_attachments(self, parent_node, ns, version=\"2.1\"):\n self.ensure_one()\n self.billing_refence(parent_node, ns, version=\"2.1\")\n # if self.decoded_data:\n self.testing()\n self.qr_code(parent_node, ns, version=\"2.1\")\n self.qr_1code(parent_node, ns, version=\"2.1\")\n self.pih_code(parent_node, ns, version=\"2.1\")\n\n # self.signature_refence(parent_node, ns, version=\"2.1\")\n # if self.company_id.embed_pdf_in_ubl_xml_invoice and not self.env.context.get(\n # \"no_embedded_pdf\"\n # ):\n # self.signature_refence(parent_node, ns, version=\"2.1\")\n filename = \"Invoice-\" + self.name + \".pdf\"\n docu_reference = etree.SubElement(\n parent_node, ns[\"cac\"] + \"AdditionalDocumentReference\"\n )\n docu_reference_id = etree.SubElement(docu_reference, ns[\"cbc\"] + \"ID\")\n docu_reference_id.text = filename\n attach_node = etree.SubElement(docu_reference, ns[\"cac\"] + \"Attachment\")\n binary_node = etree.SubElement(\n attach_node,\n ns[\"cbc\"] + \"EmbeddedDocumentBinaryObject\",\n mimeCode=\"application/pdf\",\n filename=filename,\n )\n ctx = dict()\n ctx[\"no_embedded_ubl_xml\"] = True\n ctx[\"force_report_rendering\"] = True\n # pdf_inv = (\n # self.with_context(ctx)\n # .env.ref(\"account.account_invoices\")\n # ._render_qweb_pdf(self.ids)[0]\n # )\n ########changed########################\n pdf_inv = self.with_context(ctx).env.ref(\n 'account_invoice_ubl.account_invoices_1')._render_qweb_pdf(self.ids)[0]\n pdf_inv = self.with_context(ctx).env.ref(\n 'account_invoice_ubl.account_invoices_b2b')._render_qweb_pdf(self.ids)[0]\n pdf_inv = self.with_context(ctx).env.ref(\n 'account_invoice_ubl.account_invoices_b2b_credit')._render_qweb_pdf(self.ids)[0]\n # pdf_inv = self.with_context(ctx).env.ref(\n # 'account_invoice_ubl.account_invoices_b2b_debit')._render_qweb_pdf(self.ids)[0]\n pdf_inv = self.with_context(ctx).env.ref(\n 'account_invoice_ubl.account_invoices_b2c')._render_qweb_pdf(self.ids)[0]\n pdf_inv = self.with_context(ctx).env.ref(\n 'account_invoice_ubl.account_invoices_b2c_credit')._render_qweb_pdf(self.ids)[0]\n # +++++++++++++++++++++++++++++++OUR CUSTOMES ADD HERE+++++++++++++++++++++++++++++++++++++\n # pdf_inv = self.with_context(ctx).env.ref(\n pdf_inv = \\\n self.with_context(ctx).env.ref('sbm_e_invoice.sbm_invoice_report')._render_qweb_pdf(\n self.ids)[0]\n\n # -----------------------------aboveeeeeeee---------------------------------\n\n binary_node.text = base64.b64encode(pdf_inv)\n # self.qr3_code(parent_node, ns, version=\"2.1\")\n\n # filename = \"ICV\"\n # icv_reference = etree.SubElement(\n # parent_node, ns[\"cac\"] + \"AdditionalDocumentReference\"\n # )\n # icv_reference_id = etree.SubElement(icv_reference, ns[\"cbc\"] + \"ID\")\n # icv_reference_id.text = filename\n # icv_reference_node = etree.SubElement(icv_reference, ns[\"cac\"] + \"UUID\")\n # icv_reference_node.text = self.name\n\n @api.model\n def _get_invoice_report_names(self):\n return [\n \"account.report_invoice\",\n \"account.report_invoice_with_payments\",\n \"account_invoice_ubl.report_invoice_1\",\n \"account_invoice_ubl.report_invoice_b2b\",\n \"account_invoice_ubl.report_invoice_b2b_credit\",\n # \"account_invoice_ubl.report_invoice_b2b_debit\",\n \"account_invoice_ubl.report_invoice_b2c\",\n \"account_invoice_ubl.report_invoice_b2c_credit\",\n # \"account_invoice_ubl.report_invoice_b2c_debit\",\n \"sbm_e_invoice.sbm_invoice_report\",\n\n ]\n\n\n\n # def disc(self):\n # for line in self.invoice_line_ids:\n # disct=line.discount\n # return disct\n\n check_amount_in_words = fields.Char()\n\n # # @api.depends('price_unit')\n # def ar_invoice_date(self):\n # m = str(self.invoice_date)\n # if m.split('-'):\n # interger_part_arabic = ''\n # for each in m.split('-'):\n # if interger_part_arabic:\n # interger_part_arabic = interger_part_arabic + '-'\n # interger_part_arabic += convert_numbers.english_to_arabic(int(each))\n #\n # print('interger_part_arabic')\n # return interger_part_arabic\n # # else:\n # # line.ar_invoice_date = \"-\"\n #\n #\n #\n #\n # def total_price(self):\n # for line in self.invoice_line_ids:\n # day_book = self.env['account.tax'].search(\n # [('name', '=', line.tax_ids.name), ('type_tax_use', '=', line.tax_ids.type_tax_use)])\n # print(day_book)\n # # total=sum(day_book.children_tax_ids.mapped('amount'))\n # # print(total)\n # # return total\n # amount = 0\n #\n # total = 0.0\n # tot = 0\n # amount = line.price_unit\n # total = total + day_book.amount / 100\n #\n # tot = amount * total\n #\n # amount=amount + tot\n # return amount\n\n\n\n\n\n\n def amount_words(self):\n # amount_total_words = self.currency_id.amount_to_text(self.amount_total)\n return self.currency_id.amount_to_text(self.amount_total)\n\n def calculate_discount(self):\n disct = 0\n for line in self.invoice_line_ids:\n disct = line.discount\n return disct\n\n def testing(self):\n # data = \"\\n BROTHERS GROUP55\\n\"+\"\\n300090000000003\\n\"+\"\\n2021-11-05T13:45:38\\n\"+\"\\n9.00'+'\\n1.18\"\n # data = \" BROTHERS GROUP 300090000000003 2021-11-08T13:45:38 9.00 1.18\"\n # data = \"\u0001\u0011\"+\"BROTHERS GROUP \"+\"\u0002\u000F\"+\"300090000000003\"+\"\u0003\u0013\"+\"2021-11-08\"+\"T\"+\"13:45:38\"+\"\u0004\u0004\"+\"9.00\"+\"\u0005\u0004\"+\"1.18\"\n leng = len(self.company_id.name)\n company_name = self.company_id.name\n if 17 > leng:\n for r in range(17 - leng):\n if len(company_name) != 17:\n company_name += ' '\n else:\n break\n else:\n if 17 < leng:\n company_name = company_name[:17]\n vat_leng = len(self.company_id.vat)\n vat_name = self.company_id.vat\n if 17 > vat_leng:\n for r in range(15 - vat_leng):\n if len(vat_name) != 15:\n vat_name += ' '\n else:\n break\n else:\n if 17 < leng:\n vat_name = vat_name[:17]\n\n amount_total = str(round(self.amount_total))\n amount_leng = len(str(round(self.amount_total)))\n if len(amount_total) < 17:\n for r in range(17 - amount_leng):\n if len(amount_total) != 17:\n amount_total += ' '\n else:\n break\n\n tax_leng = len(str(round(self.amount_tax)))\n amount_tax_total = str(round(self.amount_tax))\n if len(amount_tax_total) < 17:\n for r in range(17 - tax_leng):\n if len(amount_tax_total) != 17:\n amount_tax_total += ' '\n else:\n break\n\n # print(\"The number of digits in the number are:\", amount_total)\n\n # data = \"\u0001\u0011\"+'Salah Hospital'+\"\u0002\u000F\"+'31012239350000311123'+\"\u0003\u0013\"+'2023-01-01'+\"T\"+str(self.datetime_field.time())+\"\u0004\u0004\"+str(200.00)+\"\u0005\u0004\"+str(-125.00)\n # data = \"\u0001\u0011\"+str(company_name)+\"\u0002\u000F\"+str(self.company_id.vat)+\"\u0003\u0013\"+str(self.invoice_date)+\"T\"+str(self.datetime_field.time())+\"\u0004\u0004\"+str(self.amount_total)+\"\u0005\u0004\"+str(self.amount_tax)\n # data = \"\u0001\u0011\"+str(company_name)+\"\u0002\u000F\"+str(self.company_id.vat)+\"\u0003\u0014\"+str(self.invoice_date)+\"T\"+str(self.datetime_field.time())+\"\u0006\"+str(self.amount_total)+\"\u0005\u0004\"+str(self.amount_tax)+\"\u0006\"+'nMkXME2tSovykLKU6VUnIq8667SMCoc6A7tKcMKpY0 ='+\"\u0007\"+\"3056301006072\"\n # data = \"\u0001\u0011\"+str(company_name)+\"\u0002\u000F\"+str(self.company_id.vat)+\"\u0003\u0014\"+str(self.invoice_date)+\"T\"+str(self.datetime_field.time())+\"Z\"+\"\u0004\u0006\"+str(self.amount_total)+\"\u0005\u0006\"+str(self.amount_tax)\n\n # data = \"\u0001\u0011\"+str(company_name)+\"\u0002\u000F\"+str(self.company_id.vat)+\"\u0003\u0014\"+str(self.invoice_date)+\"T\"+str(self.datetime_field.time())+\"Z\"+\"\u0004\u0011\"+amount_total+\"\u0005\u0011\"+amount_tax_total\n\n data = \"\u0001\u0011\" + str(company_name) + \"\u0002\u000F\" + str(vat_name) + \"\u0003\u0014\" + str(self.invoice_date) + \"T\" + str(\n self.datetime_field.time()) + \"Z\" + \"\u0004\u0011\" + amount_total + \"\u0005\u0011\" + amount_tax_total\n import base64\n mou = base64.b64encode(bytes(data, 'utf-8'))\n # print(str(mou),'888888888888')\n # print(mou.decode(),'111888888888888')\n # mou =\n #\n # qr_image = base64.b64encode(data)\n # # self.qr_code_image = qr_image\n # # print(base64.b64decode(data),'jjjjjjjjjjjjjjjjjj')\n # print(self.qr_code_image.decode())\n # print(base64.b64decode(data))\n self.decoded_data = str(mou.decode())\n # test =mou.decode('ascii')\n # print(self.decoded_data,'decoded_data')\n # print(test,'test')\n\n ####below 3 working\n # return 'AQpGaXJzdCBTaG9wAg8zMTAxODkzNzU5MjAwMDMDFDIwMjEtMDEtMDVUMDk6MzI6NDBaBAYyNTAuMDAFBDEwLjAwBkA4YjBhNWY5OWFkNjIxM2Y1ZmRiYTNmMmRiOGY5ODlmYjk5MmMwYWI0ODZhMjkyMmIyMjFiMTViYzg2Mzg5ZDVh'\n # return 'ARFGaXJzdCBTaG9wICAgICAgIAIPMzEwMTg5Mzc1OTIwMDAzAxQyMDIxLTAxLTA1VDA5OjMyOjQwWgQGMjUwLjAwBQQxMC4wMAZAOGIwYTVmOTlhZDYyMTNmNWZkYmEzZjJkYjhmOTg5ZmI5OTJjMGFiNDg2YTI5MjJiMjIxYjE1YmM4NjM4OWQ1YQ=='\n # return 'ARFGaXJzdCBTaG9wICAgICAgIAIPMzEwMTg5Mzc1OTIwMDAzAxQyMDIxLTAxLTA1VDA5OjMyOjQwWgQGMTI1OTAwBQY2MDAwMDAGQDhiMGE1Zjk5YWQ2MjEzZjVmZGJhM2YyZGI4Zjk4OWZiOTkyYzBhYjQ4NmEyOTIyYjIyMWIxNWJjODYzODlkNWE='\n\n #\n #\n # \u0001\u0011First\n # Shop \u0002\u000F310189375920003\u0003\u00142021 - 01 - 05\n # T09: 32:40\n # Z\u0004\u0006250.00\u0005\u000610.00\u0006\n #\n # @8\n #\n # b0a5f99ad6213f5fdba3f2db8f989fb992c0ab486a2922b221b15bc86389d5a\n\n return str(mou.decode())\n\n # return 'ARFCUk9USEVSUyBHUk9VUCAgIAIPMzAwMDkwMDAwMDAwMDAzAxMyMDIxLTExLTA4VDEzOjQ1OjM4BAQ5LjAwBQQxLjE4'\n\n # @api.depends('amount_total')\n # def compute_total_words(self):\n # for invoice in self:\n # amount_total_words = invoice.currency_id.amount_to_text(invoice.amount_total)\n # print(amount_total_words)\n # return amount_total_words\n\n\n # def amount_to_words(self):\n # amount_total_words = self.currency_id.amount_to_text(self.amount_total)\n # # print(invoice.currency_id,\"currency\")\n # print(amount_total_words)\n # return amount_total_words\n\n\n\n\n\n\n datetime_field = fields.Datetime(string=\"Create Date\", default=lambda self: fields.Datetime.now())\n decoded_data = fields.Char(string=\"Decoded Data\")\n\n # def total_amount_to_words_natcom(self):\n # for invoice in self:\n # print(invoice.amount_total)\n # print(invoice.currency_id)\n # # amount_total_words = invoice.currency_id.amount_to_text(invoice.amount_total)\n # amount_total_words = invoice.currency_id.amount_to_text(invoice.amount_untaxed)\n # print(amount_total_words)\n # return invoice.currency_id.amount_to_text(invoice.amount_untaxed)\n\n\n # def total_amount_to_words(self):\n # self.check_amount_in_words = self.currency_id.amount_to_text(self.amount_untaxed)\n # print(self.check_amount_in_words)\n # # return self.check_amount_in_words\n # return self.currency_id.amount_to_text(self.amount_untaxed)\n\n # @api.onchange('\n\n\n\n\nclass AccountMoveLine(models.Model):\n _inherit = \"account.move.line\"\n\n def tax(self):\n day_book = self.env['account.tax'].search([('name', '=', self.tax_ids.name),('type_tax_use','=',self.tax_ids.type_tax_use)])\n # total=sum(day_book.children_tax_ids.mapped('amount'))\n # print(total)\n # return total\n total = 0.0\n # for line in day_book.children_tax_ids:\n amount = self.price_unit\n total=total + day_book.amount/100\n\n tot=amount * total\n # # amount=line.mapped('amount')\n # # amount = sum(m.mapped('amount'))\n # return amount\n return tot\n\n def compute_amount_tax(self):\n amount = 0\n for line in self.tax_ids:\n amount = amount + line.amount\n return (self.price_subtotal * amount)/100\n\n def compute_amount_total(self):\n amount = 0\n for line in self.tax_ids:\n amount = amount + line.amount\n return ((self.price_subtotal * amount) / 100) + self.price_subtotal\n\n def total_tax(self):\n for line in self.invoice_line_ids:\n\n day_book = self.env['account.tax'].search([('name', '=', line.tax_ids.name),('type_tax_use','=',line.tax_ids.type_tax_use)])\n # total=sum(day_book.children_tax_ids.mapped('amount'))\n # print(total)\n # return total\n # amount = 0\n\n # total = 0.0\n # tot = 0\n # amount = line.price_unit\n # amount = line.price_subtotal\n # total+= amount * day_book.amount/100\n #\n # tot=amount * total\n # print(total)\n # # amount=line.mapped('amount')\n # # amount = sum(m.mapped('amount'))\n # return amount\n # return tot\n\n\n def price(self):\n day_book = self.env['account.tax'].search(\n [('name', '=', self.tax_ids.name), ('type_tax_use', '=', self.tax_ids.type_tax_use)])\n total = 0.0\n # for line in day_book.children_tax_ids:\n amount = self.price_unit\n total = total + day_book.amount / 100\n\n tot = amount * total\n\n amount=amount + tot\n return amount\n\n # # day_book = self.env['account.tax'].search([('name', '=', self.tax_ids.name)])\n # # print(day_book)\n # # for m in day_book.children_tax_ids:\n # # amount=m.mapped('amount')\n # # return amount\n #\n #\n #\n\n # datetime_field = fields.Datetime(string=\"Create Date\", default=lambda self: fields.Datetime.now())\n # decoded_data = fields.Char(string=\"Decoded Data\")\n\n\n # def ghju(self):\n\n\n # datetime_field=fields.Datetime()\n\n\n\n\n\nclass IrActionsReport(models.Model):\n _inherit = \"ir.actions.report\"\n\n @classmethod\n def _get_invoice_reports_ubl(cls):\n return [\n \"account.report_invoice\",\n 'account_invoice_ubl.report_invoice_1',\n 'account_invoice_ubl.report_invoice_b2b',\n 'account_invoice_ubl.report_invoice_b2b_credit',\n 'account_invoice_ubl.report_invoice_b2b_debit',\n 'account_invoice_ubl.report_invoice_b2c',\n 'account_invoice_ubl.report_invoice_b2c_credit',\n 'account_invoice_ubl.report_invoice_b2c_debit',\n \"account.report_invoice_with_payments\",\n \"account.account_invoice_report_duplicate_main\",\n \"sbm_e_invoice.invoice_format_view4\",\n\n ]\n","repo_name":"yarmiztech/sbm_e_invoice","sub_path":"models/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":15464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"74075835777","text":"\nfrom django.conf.urls import url, include\nfrom . import views, views2\n\napp_name='[axf]'\n\nurlpatterns = [\n url(r'^home/$', views.home, name=\"home\"),\n url(r'^market/(\\d+)/$', views.market, name=\"market\"),\n url(r'^cart/$', views.cart, name=\"cart\"),\n #修改购物车\n url(r'^changecart/(\\d+)/$', views.changecart, name=\"changecart\"),\n\n url(r'^mine/$', views.mine, name=\"mine\"),\n url(r'^verifycode', views2.verifycode),\n\n url(r'^login/$', views.login),\n #注册\n url(r'^register/$', views.register),\n #验证帐号是否被注册\n url(r'checkuserid', views.checkuserid),\n #退出登录\n url(r'^quit/$', views.quit, name=\"quit\"),\n #创建订单\n url(r'^saveorder/$', views.saveorder),\n #待支付订单\n url(r'^myorder/$', views.myorder),\n\n]\n","repo_name":"hedanhe/axf","sub_path":"axf/axf/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"28122529251","text":"###############################################\n## Name: Creating K stacks in a single array ##\n## Owner: Darshit Pandya ##\n## Purpose: Data Structures Practice ##\n###############################################\n\nclass KStacks():\n\n\t'''This class is all about creating k stacks in a single array '''\n\tdef __init__(self, stacks_count, size_):\n\n\t\tself.k = stacks_count\n\t\tself.n = size_\n\n\t\tself.top_tracker = [-1] * self.k\n\t\tself.array_ = [None] * self.n\n\n\t\tself.free = 0\n\t\tself.next = [i + 1 for i in range(self.n)] ## we need this as we are not dividing the whole array in fixed set sizes\n\t\tself.next[self.n - 1] = -1\n\n\n\tdef is_empty(self,stack_number):\n\t\t## Checks if the given stack number stack is empty or not\n\t\tif self.top_tracker[stack_number] == -1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\t\t\n\n\tdef is_full(self):\n\t\t## Checks if the given stack number is full or not\n\t\tif self.free == -1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef print_stack(self, stack_number):\n\t\t## Given the stack number, it checks the top of the stack and\n\t\t## accesses the next list to find the linked elements of that \n\t\t## stack\n\n\t\tif not self.is_empty(stack_number):\n\t\t\t\n\t\t\ttemp = self.top_tracker[stack_number]\n\t\t\twhile temp !=-1:\n\t\t\t\tprint(self.array_[temp])\n\t\t\t\ttemp = self.next[temp]\n\t\telse:\n\t\t\tprint(\"Stack is Empty. Can't print anything\")\n\t\t\texit(1)\n\n\tdef push_(self, stack_number, element):\n\t\t## This function pushes the element in the given stack\n\t\t## and updates the top of the stack in that element\n\t\tif not self.is_full():\n\t\t# Step 1: Check which variable is free\n\t\t\tinsert_at = self.free\n\n\t\t\t# Step 2: Insert the element at the free space\n\t\t\tself.array_[insert_at] = element\n\t\t\tself.free = self.next[self.free]\n\t\t\t# Step 3: For the particular stack, change the pointer\n\t\t\tself.next[insert_at] = self.top_tracker[stack_number]\n\n\t\t\t# Step 4: Set the next free variable and the top of the stack for the particular stack\n\t\t\tself.top_tracker[stack_number] = insert_at\n\t\t\t\t\n\t\telse:\n\t\t\tprint(\"Stack Overflow\")\n\t\t\texit(1)\n\n\tdef pop_(self, stack_number):\n\t\t## Pops the top of the given stack_number stack\n\t\t\n\t\tif not self.is_empty(stack_number):\n\n\t\t\t## Step 1: Access the top of the stack of the given stack\n\t\t\ttop_of_stack = self.top_tracker[stack_number]\n\n\t\t\t## Step 2: Remove the element from the stack(array)\n\t\t\tvalue_tos = self.array_[top_of_stack]\n\n\t\t\t## Step 3: Change the top of the stack to the next(current)\n\t\t\tself.top_tracker[stack_number] = self.next[top_of_stack]\n\n\t\t\t## Step 4: Change the next(current) with -1\n\t\t\tself.next[top_of_stack] = self.free \n\t\t\tself.free = top_of_stack\n\t\t\treturn value_tos\n\n\t\telse:\n\n\t\t\tprint(\"Stack is empty\")\n\t\t\texit(1)\n\nif __name__ =='__main__':\n\n\t## Create an object of the class\n\tstack_obj = KStacks(3, 10)\n\tstack_obj.push_(0, 10)\n\tstack_obj.push_(0, 11)\n\tstack_obj.push_(0, 17)\n\tstack_obj.push_(0, 18)\n\n\tstack_obj.push_(1, 12)\n\tstack_obj.push_(1, 13)\n\tstack_obj.push_(1, 19)\n\tstack_obj.push_(1, 20)\n\n\tstack_obj.push_(2, 14)\n\tstack_obj.push_(2, 15)\n\t#stack_obj.print_stack(1)\n\tstack_obj.pop_(2)\n\tstack_obj.pop_(2)\n\tstack_obj.push_(2, 21)\n\t#print(stack_obj.pop_(2))\n\n\n\n\n","repo_name":"darshitpandya18/Data-Structures","sub_path":"Stacks/KStacksUsingArrays.py","file_name":"KStacksUsingArrays.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"5355157234","text":"from __future__ import print_function\nimport argparse\nimport random\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.autograd import Variable\nfrom torch.optim import lr_scheduler\nimport numpy as np\nfrom warpctc_pytorch import CTCLoss\nimport os\nimport utils\nimport dataset\nfrom dataset import *\nimport models.crnn as crnn\nimport re\nimport test_params\nimport logging\nimport os\nimport time\nimport sys\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\nimport mmcv\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--trainroot', required=True, help='path to dataset')\nparser.add_argument('--valroot', required=True, help='path to dataset')\nparser.add_argument('--cuda', type=bool, default=True, help='enables cuda')\nparser.add_argument('--GPU_ID', type=int, default=None, help='GPU_ID')\n\nopt = parser.parse_args()\nval_epoch = 0\n\ndef get_log_dir():\n run_id = test_params.name+f'_lr_{test_params.lr:.5f}_batchSize_{test_params.batchSize:d}_time_%s_'%time.strftime('%m%d%H%M%S')+'/'\n log_dir = os.path.join(test_params.log_dir, run_id)\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n return log_dir\n\ndef get_logger(log_dir, name, log_filename='info.log', level=logging.INFO):\n logger = logging.getLogger(name)\n logger.setLevel(level)\n # Add file handler and stdout handler\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n file_handler = logging.FileHandler(os.path.join(log_dir, log_filename))\n file_handler.setFormatter(formatter)\n # Add console handler.\n console_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setFormatter(console_formatter)\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n # Add google cloud log handler\n logger.info('Log directory: %s', log_dir)\n return logger\n \n# custom weights initialization called on crnn\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n m.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\ndef val(net, dataset, criterion, epoch, step, max_iter=100000):\n logger.info('Start val')\n # for p in crnn.parameters():\n # p.requires_grad = False\n net.eval()\n data_loader = torch.utils.data.DataLoader(\n dataset, shuffle=False, batch_size=1, num_workers=int(test_params.workers), collate_fn=alignCollate(imgH=test_params.imgH, imgW=test_params.imgW, keep_ratio=test_params.keep_ratio))\n val_iter = iter(data_loader)\n i = 0\n n_correct = 0\n loss_avg = utils.averager()\n max_iter = len(data_loader)\n record_dir = log_dir + 'epoch_%d_step_%d_data.txt'%(epoch, step)\n r = 1\n f = open(record_dir, \"a\")\n num_label, num_pred = test_params.total_num, 0\n\n start = time.time()\n prog_bar = mmcv.ProgressBar(max_iter)\n for i in range(max_iter):\n data = val_iter.next()\n i += 1\n cpu_images, cpu_texts = data\n batch_size = cpu_images.size(0)\n utils.loadData(image, cpu_images)\n t, l = converter.encode(cpu_texts)\n utils.loadData(text, t)\n utils.loadData(length, l)\n\n with torch.no_grad():\n preds = crnn(image)\n\n preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))\n cost = criterion(preds, text, preds_size, length) / batch_size\n loss_avg.add(cost)\n _, preds = preds.max(2)\n preds = preds.transpose(1, 0).contiguous().view(-1)\n print(preds)\n sim_preds = converter.decode(preds.data, preds_size.data, raw=False)\n if not isinstance(sim_preds, list):\n sim_preds = [sim_preds]\n for pred in sim_preds:\n f.write(str(r).zfill(6)+\".jpg \"+pred+\"\\n\")\n r += 1\n list_1 = []\n for i in cpu_texts:\n string = i.decode('utf-8', 'strict')\n list_1.append(string) \n for pred, target in zip(sim_preds, list_1):\n if pred == target:\n n_correct += 1\n\n num_pred += len(sim_preds)\n\n prog_bar.update()\n print(\"\")\n f.close()\n\n \n raw_preds = converter.decode(preds.data, preds_size.data, raw=True)[:test_params.n_test_disp]\n for raw_pred, pred, gt in zip(raw_preds, sim_preds, list_1):\n logger.info('%-20s => %-20s, gt: %-20s' % (raw_pred, pred, gt))\n\n logger.info('correct_num: %d'%(n_correct))\n logger.info('Total_num: %d'%(max_iter*test_params.batchSize))\n accuracy = float(n_correct) / num_pred\n recall = float(n_correct) / num_label\n logger.info('Test loss: %f, accuray: %f, recall: %f, F1 score: %f, Cost : %.4fs per img'\n % (loss_avg.val(), accuracy, recall, 2*accuracy*recall/(accuracy+recall+1e-2), (time.time()-start)/max_iter))\n\n global val_epoch\n writer.add_scalar(\"val/loss\", loss_avg.val(), val_epoch)\n writer.add_scalar(\"val/acc\", accuracy, val_epoch)\n writer.add_scalar(\"val/recall\", recall, val_epoch)\n writer.add_scalar(\"val/F1\", 2*accuracy*recall/(accuracy+recall+1e-2), val_epoch)\n val_epoch += 1\n\n\ndef trainBatch(net, criterion, optimizer, train_iter):\n data = train_iter.next()\n cpu_images, cpu_texts = data\n batch_size = cpu_images.size(0)\n utils.loadData(image, cpu_images)\n t, l = converter.encode(cpu_texts)\n utils.loadData(text, t)\n utils.loadData(length, l)\n preds = crnn(image)\n preds_size = Variable(torch.IntTensor([preds.size(0)] * batch_size))\n cost = criterion(preds, text, preds_size, length) / batch_size\n crnn.zero_grad()\n cost.backward()\n optimizer.step()\n return cost\n \ndef training():\n for total_steps in range(test_params.niter):\n train_iter = iter(train_loader)\n i = 0\n logger.info('length of train_data: %d'%(len(train_loader)))\n\n eval_time = 0.0\n prog_bar = mmcv.ProgressBar(test_params.displayInterval)\n while i < len(train_loader):\n torch.cuda.empty_cache()\n for p in crnn.parameters():\n p.requires_grad = True\n crnn.train()\n val(crnn, test_dataset, criterion, total_steps, i)\n return\n start = time.time()\n cost = trainBatch(crnn, criterion, optimizer, train_iter)\n eval_time += time.time()-start\n\n loss_avg.add(cost)\n i += 1\n prog_bar.update()\n\n\n if i % test_params.tbInterval == 0:\n print(\"\\n>>>> Tensorboard Log\")\n writer.add_scalar('train/loss', loss_avg.val(), int(i+total_steps*len(train_loader))) # record to tb\n\n if i % test_params.displayInterval == 0:\n sys.stdout.write(\"\\r%100s\\r\"%' ')\n sys.stdout.flush()\n logger.info('[%d/%d][%d/%d] Loss: %f, Cost: %.4fs per batch' %\n (total_steps, test_params.niter, i, len(train_loader), loss_avg.val(), eval_time/i))\n\n if eval_time/i < 0.2: test_params.displayInterval = 1000\n elif eval_time/i < 0.5: test_params.displayInterval = 400\n elif eval_time/i < 1.0: test_params.displayInterval = 200\n prog_bar = mmcv.ProgressBar(test_params.displayInterval) # new interval\n\n loss_avg.reset()\n \n \n\n val(crnn, test_dataset, criterion, total_steps, i)\n torch.cuda.empty_cache()\n if (total_steps+1) % test_params.saveInterval == 0:\n string = \"model save to {0}crnn_Rec_done_epoch_{1}.pth\".format(log_dir, total_steps)\n logger.info(string)\n torch.save(crnn.state_dict(), '{0}crnn_Rec_done_epoch_{1}.pth'.format(log_dir, total_steps))\n\nif __name__ == '__main__':\n \n manualSeed = random.randint(1, 10000) # fix seed\n random.seed(manualSeed)\n np.random.seed(manualSeed)\n torch.manual_seed(manualSeed)\n cudnn.benchmark = True\n log_dir = get_log_dir()\n logger = get_logger(log_dir, test_params.name, test_params.name+'_info.log')\n logger.info(opt)\n\n # tensorboardX\n writer = SummaryWriter(os.path.join(log_dir, 'tb_logs'))\n\n # store model path\n if not os.path.exists('./expr'):\n os.mkdir('./expr')\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(opt.GPU_ID)\n # read train set\n train_dataset = dataset.lmdbDataset(root=opt.trainroot, rgb=test_params.rgb, rand_hcrop=True)\n assert train_dataset\n if test_params.random_sample:\n sampler = dataset.randomSequentialSampler(train_dataset, test_params.batchSize)\n else:\n sampler = None\n\n # images will be resize to 32*160\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=test_params.batchSize,\n shuffle=False, sampler=sampler,\n num_workers=int(test_params.workers),\n collate_fn=dataset.alignCollate(imgH=test_params.imgH, imgW=test_params.imgW, keep_ratio=test_params.keep_ratio))\n \n # read test set\n # images will be resize to 32*160\n test_dataset = dataset.lmdbDataset(\n root=opt.valroot, rgb=test_params.rgb)\n\n nclass = len(test_params.alphabet) + 1\n nc = 1\n\n converter = utils.strLabelConverter(test_params.alphabet)\n criterion = CTCLoss()\n\n # cnn and rnn\n image = torch.FloatTensor(test_params.batchSize, 1, test_params.imgH, test_params.imgH)\n text = torch.IntTensor(test_params.batchSize * 5)\n length = torch.IntTensor(test_params.batchSize)\n\n crnn = crnn.CRNN(test_params.imgH, nc, nclass, test_params.nh)\n if opt.cuda:\n crnn.cuda()\n image = image.cuda()\n criterion = criterion.cuda()\n\n crnn.apply(weights_init)\n if test_params.crnn != '':\n logger.info('loading pretrained model from %s' % test_params.crnn)\n if test_params.without_fully:\n pretrained_dict = torch.load(test_params.crnn)\n model_dict=crnn.state_dict()\n pretrained_dict.pop('rnn.1.embedding.weight')\n pretrained_dict.pop('rnn.1.embedding.bias') \n crnn.load_state_dict(pretrained_dict, strict=False) \n else: \n crnn.load_state_dict(torch.load(test_params.crnn), strict=False)\n\n image = Variable(image)\n text = Variable(text)\n length = Variable(length)\n\n # loss averager\n loss_avg = utils.averager()\n\n # setup optimizer\n if test_params.adam:\n optimizer = optim.Adam(crnn.parameters(), lr=test_params.lr,\n betas=(test_params.beta1, 0.999))\n elif test_params.adadelta:\n optimizer = optim.Adadelta(crnn.parameters(), lr=test_params.lr)\n else:\n optimizer = optim.RMSprop(crnn.parameters(), lr=test_params.lr)\n #optimizer = lr_scheduler.StepLR(optimizer, step_size=25*len(train_loader), gamma=0.1) \n\n training()\n","repo_name":"DataFountainCode/Huawei_Handwriting","sub_path":"代码/crnn_chinese_characters_rec/test_crnn_main.py","file_name":"test_crnn_main.py","file_ext":"py","file_size_in_byte":10928,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"80"} +{"seq_id":"27182503190","text":"#\n# Shared methods and classes for testing\n#\nimport pybamm\n\n\nclass VarsForTesting(object):\n def __init__(self, t=None, c=None, e=None, j=None):\n self.t = t\n self.c = c\n self.e = e\n self.j = j\n\n\ndef pdes_io(model):\n y = model.initial_conditions()\n vars = pybamm.Variables(model)\n vars.update(0, y)\n dydt = model.pdes_rhs(vars)\n return y, dydt\n","repo_name":"galvanic653960572/PyBaMM","sub_path":"tests/shared.py","file_name":"shared.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"80"} +{"seq_id":"43387493682","text":"#!/usr/bin/python\r\n\r\n'''\r\nPlot photometry data\r\n'''\r\n\r\ndef read_file(file):\r\n df = pd.read_csv(file)\r\n if 'norm' not in df.columns:\r\n df = df.melt(id_vars='TIMErel', var_name='sample', value_name='norm')\r\n return df\r\n\r\ndef add_group_info(df, group_file):\r\n groups = pd.read_csv(group_file)\r\n df = df.join(groups.set_index('sample'), on='sample')\r\n return df\r\n\r\ndef round_time(df):\r\n freq = 1/((df['TIMErel'].max()-df['TIMErel'].min())/\\\r\n df.groupby('sample')['TIMErel'].count()[0])\r\n freq = int(round(freq, 0))\r\n decimals = len(str(1/freq).split('.')[1])\r\n df['TIMErel'] = df['TIMErel'].apply(lambda x: round(x, decimals))\r\n return df\r\n\r\ndef plot(df, plottype, fname, grouping_var=None):\r\n print(f\"Plotting {plottype} and saving as {fname}. Be patient, this takes time.\")\r\n plt.axhline(y=0, color='black')\r\n plt.axvline(x=0, color='black')\r\n if plottype == 'lineplot':\r\n sns.lineplot(x='TIMErel', y='norm', hue=grouping_var, data=df)\r\n plt.ylabel('\\u0394F/F')\r\n elif plottype == 'heatmap':\r\n df = df.pivot(index='TIMErel', columns='sample', values='norm')\r\n sns.heatmap(df.transpose(), cmap=\"PiYG\", cbar_kws={'label': '\\u0394F/F'})\r\n plt.xlabel('Time (s)')\r\n plt.gca().xaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}'))\r\n plt.savefig(fname, dpi=600, pad_inches=0)\r\n\r\n# - run -----------------------------------------------------------------------\r\nif __name__ == '__main__':\r\n import argparse\r\n import numpy as np\r\n import pandas as pd\r\n from matplotlib import pyplot as plt\r\n from matplotlib.ticker import StrMethodFormatter\r\n import seaborn as sns\r\n\r\n # parse arguments\r\n parser = argparse.ArgumentParser(description='Plot data')\r\n parser.add_argument('file', help='data file from combine.py', type=str)\r\n parser.add_argument('plottype', help='plot type: \"lineplot\" or \"heatmap\"',\r\n type=str)\r\n parser.add_argument('-xmin', help='x-axis min (example: -xmin -10',\r\n default=None)\r\n parser.add_argument('-xmax', help='x-axis max (example: -xmax 100)',\r\n default=None)\r\n parser.add_argument('-width', help='figure height in inches (example: -width 5)',\r\n default=6)\r\n parser.add_argument('-height', help='figure height in inches (example: -height 5)',\r\n default=4)\r\n parser.add_argument('-filename', help='file name (example: -filename heatmap.png)',\r\n default='plot.png')\r\n parser.add_argument('-groups', help='grouping file (*.csv)', default=None)\r\n args = parser.parse_args()\r\n\r\n # configure plot universals\r\n plt.rcParams[\"figure.figsize\"] = [args.width, args.height]\r\n plt.rcParams['font.sans-serif'] = ['Helvetica', 'Arial', 'Unica', 'Imago',\r\n 'Rail Alphabet', 'Tahoma', 'DejaVu Sans']\r\n plt.rcParams['font.family'] = \"sans-serif\"\r\n grouping_var=None\r\n\r\n # read in data\r\n df = read_file(args.file)\r\n\r\n # filter data by plotting axis min and max\r\n if args.xmin is not None:\r\n df = df[df['TIMErel'] >= int(args.xmin)]\r\n if args.xmax is not None:\r\n df = df[df['TIMErel'] <= int(args.xmax)]\r\n\r\n # round data for easier viewing during plotting\r\n df = round_time(df)\r\n\r\n # add group info is supplied\r\n if args.groups is not None:\r\n df = add_group_info(df, args.groups)\r\n grouping_var='group'\r\n\r\n # plot\r\n plot(df, args.plottype, args.filename, grouping_var)\r\n","repo_name":"alanrupp/photometry","sub_path":"python/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"80"} +{"seq_id":"3745176276","text":"import re\n\nmyList = []\ndata1, data2 = {}, {}\n\ninFile = open(\"input/day07.txt\")\n\n# Enter data into dictionary\nfor line in inFile:\n entry = line.replace(\"\\n\",\"\").replace(\".\", \"\").replace(\" bags\", \"\").replace(\" bag\", \"\")\n values = re.split(\" contain |, \", entry)\n if values[1] != \"no other\": # Don't bother putting bags into the list that don't have anything in it\n data1[values[0]], data2[values[0]] = [], []\n for x in range(1, len(values)):\n data1[values[0]].append(values[x][2:])\n data2[values[0]].append([values[x][0], values[x][2:]])\n\nprint(\"===== PART 1 =====\")\ntoCheck = [\"shiny gold\"]\nchecked = []\n\n# Check through all the bags that contain silver gold or contain a bag that contains silver gold etc.\n# Pretty ineffecient, but it does the job for a small input file\nwhile len(toCheck) > 0:\n for item in data1:\n if toCheck[0] in data1[item]:\n if item not in checked:\n toCheck.append(item)\n checked.append(item)\n toCheck.pop(0)\n\nprint(\"Outer Bags: \" + str(len(checked)))\n\nprint(\"===== PART 2 =====\")\n\n# DFS (sort of) starting from \"shiny gold\"\ndef nextBag(toCheck, data):\n total = 1\n if toCheck not in data: # Empty Bag isn't in the dictionary, end of bag sequence\n return total\n else :\n for bags in data[toCheck]: # Check each bag within the current bag, then check that bag, then ...\n total += int(bags[0]) * nextBag(bags[1], data)\n return total\n\nprint(\"The shiny gold bag contains \" + str(nextBag(\"shiny gold\", data2) - 1) + \" bags\")","repo_name":"sophieKaelin/adventofcode2020","sub_path":"day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"80"} +{"seq_id":"72341798979","text":"#!/usr/local/bin/python3\n\nfrom cgitb import enable\nenable()\n\nfrom cgi import FieldStorage, escape\nimport pymysql as db\nfrom hashlib import sha256\nfrom shelve import open\nfrom http.cookies import SimpleCookie\nfrom os import environ\nfrom html import escape\n\ncookie = SimpleCookie()\nhttp_cookie_header = environ.get('HTTP_COOKIE')\nform_data = FieldStorage()\nevent_id = ''\n\nevent_id = str(escape(form_data.getfirst('delete-button')))\n\n#if cookie present\nif http_cookie_header:\n cookie.load(http_cookie_header)\n #if sid cookie\n if 'sid' in cookie:\n sid = cookie['sid'].value\n session_store = open('sess_' + sid, writeback=False)\n #if authenticated cookie redirect to teacher.py\n if session_store['authenticated']:\n if session_store['account_type'] == \"2\":\n try:\n connection = db.connect('cs1.ucc.ie', 'rjf1', 'ahf1Aeho', '2021_rjf1')\n cursor = connection.cursor(db.cursors.DictCursor)\n cursor.execute(\"\"\"DELETE FROM `calendar` WHERE `id` = '%s';\"\"\" % (event_id))\n connection.commit()\n cursor.close()\n connection.close()\n except db.Error:\n result = \"

Sorry! We are experiencing problems at the moment. Please call back later.

\"\n\nprint('Location: teacher.py#schedule')\n\nprint('Content-Type: text/html')\nprint()\n","repo_name":"cian-strolla/Schoolify","sub_path":"src/deleteEvent.py","file_name":"deleteEvent.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"80"} +{"seq_id":"7892744623","text":"import logging\nfrom typing import Any, Callable, Optional, TypeVar, cast\nfrom mognet.tasks.task_registry import TaskRegistry, task_registry\n\n_log = logging.getLogger(__name__)\n\n\n_T = TypeVar(\"_T\")\n\n\ndef task(*, name: Optional[str] = None):\n \"\"\"\n Register a function as a task that can be run.\n\n The name argument is recommended, but not required. It is used as an identifier\n for which task to run when creating Request objects.\n\n If the name is not provided, the function's full name (module + name) is used instead.\n Bear in mind that this means that if you rename the module or the function, things may break\n during rolling upgrades.\n \"\"\"\n\n def task_decorator(t: _T) -> _T:\n reg = task_registry.get(None)\n\n if reg is None:\n _log.debug(\"No global task registry set. Creating one\")\n\n reg = TaskRegistry()\n reg.register_globally()\n\n reg.add_task_function(cast(Callable, t), name=name)\n\n return t\n\n return task_decorator\n","repo_name":"DS4SD/project-mognet","sub_path":"mognet/decorators/task_decorator.py","file_name":"task_decorator.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"80"} +{"seq_id":"4427327821","text":"import numpy as np\r\nimport random \r\n\r\n#Variables glabales\r\ntabla = np.array([[' ', ' ', ' ',' ',' '], [' ', ' ', ' ',' ',' '], [' ', ' ', ' ',' ',' '],[' ', ' ', ' ',' ',' ']])\r\nlotes_seleccionados = []\r\nclientes = []\r\n\r\n#definicion para mostrar los lotes\r\ndef mostrar_lotes(): \r\n print(\" 1. 2. 3. 4. 5. \")\r\n print(\"1. {} | {} | {} | {} | {} \".format(tabla[0][0], tabla[0][1], tabla[0][2], tabla[0][3], tabla[0][4]))\r\n print(\" -------------------------\")\r\n print(\"2. {} | {} | {} | {} | {} \".format(tabla[1][0], tabla[1][1], tabla[1][2], tabla[1][3], tabla[1][4]))\r\n print(\" -------------------------\")\r\n print(\"3. {} | {} | {} | {} | {} \".format(tabla[2][0], tabla[2][1], tabla[2][2], tabla[2][3], tabla[2][4]))\r\n print(\" --------------------------\")\r\n print(\"4. {} | {} | {} | {} | {} \".format(tabla[3][0], tabla[3][1], tabla[3][2], tabla[3][3], tabla[3][4]))\r\n print(\" -------------------------\")\r\n\r\n\r\n#definicion para mostrar los lotes dispobiles\r\ndef mostrar_lotes_disponibles():\r\n print(\"Lotes Disponibles: \")\r\n mostrar_lotes()\r\n\r\n#definicion para seleccionar lote/confirmacion o no\r\ndef seleccion_lote():\r\n mostrar_lotes_disponibles()\r\n fila = int(input(\"Ingrese el número de fila del lote (1-4): \"))\r\n columna = int(input(\"Ingrese el número de columna del lote (1-5): \"))\r\n \r\n if fila < 1 or fila > len(tabla) or columna < 1 or columna > len(tabla[0]):\r\n print(\"Coordenadas inválidas. Inténtelo de nuevo.\")\r\n return\r\n \r\n if tabla[fila-1][columna-1] == ' ':\r\n confirmacion = int(input(\"Desea confirmar la selección y compra del lote? 1. SI - 2. NO: \"))\r\n if confirmacion == 1:\r\n lote = {\r\n 'fila': fila,\r\n 'columna': columna,\r\n }\r\n \r\n lotes_seleccionados.append(lote)\r\n tabla[fila-1][columna-1] = 'X'\r\n \r\n ver_detalle = int(input(\"Desea ver los detalles del lote seleccionado? 1. SI - 2. NO: \"))\r\n if ver_detalle == 1:\r\n mostrar_detalles_lote()\r\n else: \r\n (\"\")\r\n print(\"Por favor ingrese sus datos\")\r\n rut = input(\"Ingrese su RUT: \")\r\n nombre = input(\"Ingrese su nombre completo: \")\r\n telefono = input(\"Ingrese su teléfono: \")\r\n email = input(\"Ingrese su correo electrónico: \")\r\n\r\n cliente = {\r\n 'rut': rut,\r\n 'nombre': nombre,\r\n 'telefono': telefono,\r\n 'email': email\r\n }\r\n\r\n clientes.append(cliente)\r\n \r\n print(\"Lote seleccionado exitosamente.\")\r\n else:\r\n print(\"Selección de lote cancelada.\")\r\n else:\r\n print(\"El lote seleccionado no está disponible.\")\r\n\r\n#definicion para mostrar los detalles del lote comprado\r\ndef mostrar_detalles_lote():\r\n if lotes_seleccionados:\r\n for lote in lotes_seleccionados:\r\n print(\"Lote seleccionado: Fila {}, Columna {}\".format(lote['fila'], lote['columna']))\r\n print(\"Detalles del lote:\")\r\n print(\"Medidas: {} Metros Cuadrados\".format(random.randint(1500, 2500)))\r\n print(\"Características: {}\".format(obtener_caracteristicas_aleatorias()))\r\n print(\"Precio: {} CLP\".format(obtener_precio_lote()))\r\n print()\r\n else:\r\n print(\"No se han seleccionado lotes.\")\r\n\r\n#definicion para generar caracteristicas del lote\r\ndef obtener_caracteristicas_aleatorias():\r\n caracteristicas = [\"Salida a entrada Princpal\", \"Desnivel Leve\", \"Desnivel en pendiente\", \"Acceso a Rio Blanco\", \"Lote con Conexion a Luz y Agua\"]\r\n num_caracteristicas = random.randint(1, 3)\r\n return random.sample(caracteristicas, num_caracteristicas)\r\n\r\ndef obtener_precio_lote():\r\n precios_lote = [\"35.000.000\", \"45.000.000\", \"50.000.000\", \"55.000.000\", \"60.000.000\", \"65.000.000\", \"70.000.000\"]\r\n return random.sample(precios_lote,1)\r\n\r\n#definicion para mostrar los clientes que han comprado lote\r\ndef mostrar_clientes():\r\n if clientes:\r\n print(\"Clientes:\")\r\n for cliente in clientes:\r\n print(\"RUT: {}\".format(cliente['rut']))\r\n print(\"Nombre: {}\".format(cliente['nombre']))\r\n print(\"Teléfono: {}\".format(cliente['telefono']))\r\n print(\"Correo electrónico: {}\".format(cliente['email']))\r\n print()\r\n else:\r\n print(\"No hay clientes registrados.\")\r\n\r\n#definicion de menu principal de compra\r\ndef Menu_loteDuoc():\r\n while True:\r\n print(\"=== MENÚ ===\")\r\n print(\"1. Ver disponibilidad de lotes\")\r\n print(\"2. Seleccionar un lote\")\r\n print(\"3. Ver detalles del lote seleccionado\")\r\n print(\"4. Ver clientes\")\r\n print(\"5. Salir\")\r\n \r\n opcion = int(input(\"Seleccione una opción: \"))\r\n \r\n if opcion == 1:\r\n mostrar_lotes_disponibles()\r\n elif opcion == 2:\r\n seleccion_lote()\r\n elif opcion == 3:\r\n mostrar_detalles_lote()\r\n elif opcion == 4:\r\n mostrar_clientes()\r\n elif opcion == 5:\r\n print(\"Gracias por usar el sistema. ¡Hasta luego!\")\r\n break\r\n else:\r\n print(\"Opción inválida. Inténtelo de nuevo.\")\r\nMenu_loteDuoc()\r\n","repo_name":"aguarumo/AndresGuarumo_PGY1121_007_V","sub_path":"AndresGuarumo_PGY1121_007_V.py.py","file_name":"AndresGuarumo_PGY1121_007_V.py.py","file_ext":"py","file_size_in_byte":5440,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"24965561296","text":"from unittest import TestCase\n\nfrom pyspark.sql.functions import date_format\nfrom pyspark.sql.types import StructType, StructField, StringType, TimestampType, DateType, DoubleType\nfrom src.Builders.SparkBuilder import create_spark_session\nfrom src.Builders.SparkLogger import Log4j\nfrom src.Processing.BigQuerySparkProcessing import BigQuerySparkProcessing\n\n\nclass BigQuerySparkProcessingTest(TestCase):\n spark = create_spark_session()\n logger = Log4j(spark)\n\n def test_process_data(self):\n commit_table_schema = StructType([\n StructField(\"repo_name\", StringType(), True),\n StructField(\"time_sec\", StringType(), True),\n StructField(\"date\", TimestampType(), True)\n ])\n\n languages_table_schema = StructType([\n StructField(\"repo_name\", StringType(), True),\n StructField(\"name\", StringType(), True)\n ])\n\n expected_df_schema = StructType([\n StructField(\"language\", StringType(), True),\n StructField(\"date_commit\", DateType(), True),\n StructField(\"avg_time\", DoubleType(), True)\n ])\n\n commit_table = self.spark.read.schema(commit_table_schema) \\\n .option(\"multiline\", \"true\") \\\n .format(\"json\") \\\n .load(\"data/commit_table.json\")\n\n languages_table = self.spark.read.schema(languages_table_schema) \\\n .option(\"multiline\", \"true\") \\\n .format(\"json\") \\\n .load(\"data/languages_table.json\")\n\n expected_df = self.spark.read.schema(expected_df_schema) \\\n .option(\"multiline\", \"true\") \\\n .format(\"json\") \\\n .load(\"data/expected_df.json\") \\\n .withColumn(\"date_commit\", date_format(\"date_commit\", 'yyyy-MM-dd')) # fix the date type\n\n bigquery_processor = BigQuerySparkProcessing(self.spark, self.logger)\n result_df = bigquery_processor.process_data(commit_table, languages_table)\n\n self.assertEqual(result_df.dtypes, expected_df.dtypes,\n \"Expected Dataframe should contains the same (columns,types) as those of the result Dataframe\")\n\n self.assertEqual(result_df.collect(), expected_df.collect(),\n \"Expected Dataframe should be equal to result Dataframe\")\n","repo_name":"reda-mouffok/BigQuery-Github-Analysis","sub_path":"test/BigQuerySparkProcessingTest.py","file_name":"BigQuerySparkProcessingTest.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"18692618576","text":"import numpy as np\n\n\ndef S_ARMA(f, phis, thetas, sigma2):\n \"\"\"\n Compute spectral density for ARMA(len(phis),len(thetas)).\n\n If p = 0, q > 0, i.e. you pass in an empty array for phis, \n then it will compute the spectrum of an MA(q) process. \n If p > 0, q = 0, i.e. you pass in an empty array for thetas, \n it will compute the spectrum of an AR(p) process.\n\n :param f: n-dim numpy array, freqs to be evaluated at.\n :param phis: p-dim numpy array, [phi1,p, ..., phip,p].\n :param thetas: q-dim numpy array, [theta1,q, ..., thetaq,q]\n :param sigma2: float, variance of the white noise.\n\n :return sf: n-dim numpy array, sdf evaluated at each f\n \"\"\"\n\n def Gsq(param):\n total = 1\n for i in range(len(param)):\n total -= param[i] * np.exp(-2j * np.pi * f * (i+1))\n return total.conjugate() * total\n \n return sigma2 * Gsq(thetas) / Gsq(phis)\n\n\ndef ARMA22_sim(phis, thetas, sigma2, N):\n \"\"\"\n Simulate N values of a Gaussian ARMA(2,2) process.\n \n :param phis: 2-dim array, phi values for AR part.\n :param thetas: 2-dim array, theta values for MA part.\n :param sigma2: float, variance of the white noise.\n :param N: integer, number of values to be simulated\n\n :return x: N-dim array, simulated values\n \"\"\"\n sd = np.sqrt(sigma2) # standard deviation\n x = np.array([0, 0]) # initialise burn in method\n err = np.random.normal(scale=sd, size=2) # first two error\n\n for i in range(98+N):\n nexterr = np.random.normal(scale=sd)\n nextx = phis[0]*x[-1] + phis[1]*x[-2] + nexterr \\\n - thetas[0]*err[-1] - thetas[1]*err[-2]\n x = np.append(x, nextx)\n err = err[1], nexterr\n \n return x[-N:]\n\n\ndef periodogram(X):\n \"\"\"\n Compute the periodogram for the time series X.\n\n :param X: n-dim array, time series\n :return spf: n-dim array, periodogram at fourier frequencies\n \"\"\"\n\n Ak = np.fft.fft(X)\n spf = (Ak.conjugate() * Ak) / len(X)\n\n return spf\n\n\ndef direct(X,p):\n \"\"\"\n Compute the direct spectral estimate for time series X,\n using the p x 100% cosine taper.\n\n :param X: n-dim array, time series\n :param p: float, 0 <= p <= 1 for tapering\n :return sdf: n-dim array, direct spectral est at fourier freqs\n \"\"\"\n N = X.shape[-1]\n a = int(np.floor(p*N/2))\n b= np.floor(p*N)\n ht = np.zeros(N)\n for i in range(a):\n ht[i] = 0.5 * (1 - np.cos(2 * np.pi * i / (b+1)))\n for i in range(a, N-a):\n ht[i] = 1\n for i in range(N-a,N):\n ht[i] = 0.5 * (1 - np.cos(2 * np.pi * (N+1-i) / (b+1)))\n C = np.sqrt(np.sum(ht**2))\n ht = ht / C\n\n taperX = ht*X\n Ak = np.fft.fft(taperX)\n sdf = Ak.conjugate() * Ak\n\n return sdf\n\n\ndef calc_phis(r, f=12/128):\n \"\"\"\n Calculate phis for AR(2) process that exhibits pseudo cylical behaviour\n and have roots at z1 = 1/r*e^(i2f pi) and z2 = 1/r*e^(-i2f pi)\n\n :param r: float\n :param f: float, frequency to be calculated at\n :return phis: 2-dim array, phis defining the AR(2) process \n \"\"\"\n\n phis = np.array([2*r*np.cos(2*np.pi*f), -r**2])\n return phis\n","repo_name":"azb119/MATH60046CW","sub_path":"question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"37546424991","text":"from bwp.sites import site\nfrom bwp.models import ModelBWP\nfrom models import Document, Report\n\n\nclass DocumentBWP(ModelBWP):\n list_display = (\n 'title',\n 'content_type',\n 'qualifier',\n 'template_name',\n 'id',\n )\n ordering = ['content_type']\n\n\nsite.register(Document, DocumentBWP)\n\n\nclass ReportBWP(ModelBWP):\n list_display = ('document', 'created', 'user', 'url', 'id')\n search_fields = ['document__title']\n\n\nsite.register(Report, ReportBWP)\n","repo_name":"rosix-ru/django-bwp","sub_path":"bwp/contrib/reports/__bwp__.py","file_name":"__bwp__.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"74763861378","text":"import streamlit as st\n\n\ndef predict_maths(live_vars, math_features, math_pipeline, math_label_map):\n\n # subset feature variables\n math_live_vars = live_vars.filter(math_features)\n\n # Make prediction\n math_prediction = math_pipeline.predict(math_live_vars)\n math_prediction_probability = math_pipeline.predict_proba(math_live_vars)\n\n # display results\n probability = math_prediction_probability[0, math_prediction][0]*100\n math_classes = math_label_map[math_prediction[0]]\n\n if math_prediction != 1:\n statement = (\n f\"There is a {probability.round(2)}% probability that the\"\n f\" student will score **{math_classes}** in their math exam.\"\n )\n\n else:\n statement = (\n f\"The model predicts that the student will score \"\n f\" **{math_classes}** in their math exam.\"\n )\n\n st.write(statement)\n\n\ndef predict_reading(live_vars,\n reading_features,\n reading_pipeline,\n reading_label_map):\n\n # subset feature variables\n reading_live_vars = live_vars.filter(reading_features)\n\n # Make prediction\n reading_prediction = reading_pipeline.predict(reading_live_vars)\n reading_prediction_probability = (\n reading_pipeline.predict_proba(reading_live_vars))\n\n # display results\n probability = reading_prediction_probability[0, reading_prediction][0]*100\n reading_classes = reading_label_map[reading_prediction[0]]\n\n if reading_prediction != 1:\n statement = (\n f\"There is a {probability.round(2)}% probability that the\"\n f\" student will score **{reading_classes}** in their\"\n \" reading exam\"\n )\n\n else:\n statement = (\n f\"The model predicts that the student will score \"\n f\" **{reading_classes}** in their reading exam.\"\n )\n\n st.write(statement)\n\n\ndef predict_writing(live_vars,\n writing_features,\n writing_pipeline,\n writing_label_map):\n\n # subset feature variables\n writing_live_vars = live_vars.filter(writing_features)\n\n # Make prediction\n writing_prediction = writing_pipeline.predict(writing_live_vars)\n writing_prediction_probability = (\n writing_pipeline.predict_proba(writing_live_vars))\n\n # display results\n probability = writing_prediction_probability[0, writing_prediction][0]*100\n writing_classes = writing_label_map[writing_prediction[0]]\n\n if writing_prediction != 1:\n statement = (\n f\"There is a {probability.round(2)}% probability that the\"\n f\" student will score **{writing_classes}** in their\"\n \" writing exam\"\n )\n\n else:\n statement = (\n f\"The model predicts that the student will score\"\n f\" **{writing_classes}** in their writing exam.\"\n )\n\n st.write(statement)\n","repo_name":"AdamBoley/Exam-Scores-Analysis","sub_path":"src/machine_learning/predictive_analysis_interface.py","file_name":"predictive_analysis_interface.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"80"} +{"seq_id":"10133412403","text":"from torch import nn\nfrom torchinfo import summary\n\n# from utilities import get_config\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, in_channels, out_channels, stride=1):\n super().__init__()\n\n # Parameters\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.stride = stride\n self.activation = nn.ReLU(inplace=True)\n\n # Convolutional blocks\n self.blocks = nn.Identity()\n\n # Match dimensions of block's input and output for summation\n self.shortcut = nn.Sequential(\n nn.Conv1d(in_channels, out_channels, 1, stride=stride, bias=False),\n nn.BatchNorm1d(out_channels)\n )\n\n def conv_block(self, in_channels, out_channels, kernel_size, last=False, **kwargs):\n layers = [\n nn.Conv1d(in_channels, out_channels, kernel_size, **kwargs),\n nn.BatchNorm1d(out_channels),\n ]\n\n # Activate all but the last hidden layer so that the block\n # output and residual can be summed\n if last == False:\n layers.append(nn.ReLU(inplace=True))\n \n return nn.Sequential(*layers)\n\n def forward(self, x):\n residual = self.shortcut(x) if self.should_apply_shortcut else x\n out = self.blocks(x)\n out = self.activation(out + residual)\n return out\n\n @property\n def should_apply_shortcut(self):\n return self.in_channels != self.out_channels or self.stride != 1\n\n\nclass BasicBlock(ResidualBlock):\n def __init__(self, in_channels, out_channels, stride=1):\n super().__init__(in_channels, out_channels, stride)\n\n self.blocks = nn.Sequential(\n self.conv_block(in_channels, out_channels, 3, stride=stride, padding=1, bias=False),\n self.conv_block(out_channels, out_channels, 3, last=True, padding=1, bias=False)\n )\n\n\nclass BottleneckBlock(ResidualBlock):\n def __init__(self, in_channels, out_channels, reduction=4, stride=1):\n super().__init__(in_channels, out_channels, stride)\n\n self.channels = out_channels // reduction\n\n self.blocks = nn.Sequential(\n self.conv_block(in_channels, self.channels, 1, bias=False),\n self.conv_block(self.channels, self.channels, 3, stride=stride, padding=1, bias=False), # Downsample here as per line 107 https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py\n self.conv_block(self.channels, out_channels, 1, last=True, bias=False)\n )\n\n\nclass ResNet(nn.Module):\n def __init__(self, c):\n super(ResNet, self).__init__()\n self.in_channels = c.channels[0]\n\n # Feature extractor layer\n self.conv_block = nn.Sequential(\n nn.Conv1d(1, self.in_channels, c.kernel, padding=c.padding, stride=c.stride),\n nn.BatchNorm1d(self.in_channels),\n nn.ReLU(inplace=True),\n nn.MaxPool1d(2, stride=2, padding=1)\n )\n\n # Residual layers\n block = BottleneckBlock if c.block == 'bottleneck' else BasicBlock\n layers = []\n for i in range(c.n_layers):\n stride = 1 if i == 0 else 2\n layers.append(self._make_layer(block, c.channels[i], c.blocks[i], stride))\n self.layers = nn.ModuleList(layers)\n\n # Classifier\n self.decoder = nn.Sequential(\n nn.AdaptiveAvgPool1d(1), # Converts each channel into a single value\n nn.Flatten(1), # Concatenates channels\n nn.Linear(c.channels[-1], c.n_classes)\n )\n\n # Initialise weights and biases\n self._init_weights()\n\n def forward(self, x):\n x = x.unsqueeze(1)\n x = self.conv_block(x)\n for layer in self.layers:\n x = layer(x)\n x = self.decoder(x)\n return x\n\n def _make_layer(self, block, out_channels, n_blocks, stride):\n # First residual block in layer may downsample\n blocks = [block(self.in_channels, out_channels, stride=stride)]\n\n # In channels for next layer will be this layer's out channels\n self.in_channels = out_channels\n\n # Remaining residual blocks in layer\n for _ in range(1, n_blocks):\n blocks.append(block(self.in_channels, out_channels))\n\n return nn.Sequential(*blocks)\n \n def _init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv1d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm1d)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n\n# def main():\n# config = get_config('config-resnet.yaml')\n# model = ResNet(config.resnet)\n# summary(model, input_size=(64, 12048))\n\n\n# if __name__ == \"__main__\":\n# main()\n","repo_name":"comprna/riser","sub_path":"riser/nets/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":4811,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"80"} +{"seq_id":"26107614250","text":"# encoding: utf-8\nfrom __future__ import unicode_literals\n\nimport json\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import ExtractorError\n\n\nclass SohuIE(InfoExtractor):\n _VALID_URL = r'https?://(?Pmy\\.)?tv\\.sohu\\.com/.+?/(?(mytv)|n)(?P\\d+)\\.shtml.*?'\n\n _TEST = {\n 'url': 'http://tv.sohu.com/20130724/n382479172.shtml#super',\n 'md5': 'bde8d9a6ffd82c63a1eefaef4eeefec7',\n 'info_dict': {\n 'id': '382479172',\n 'ext': 'mp4',\n 'title': 'MV:Far East Movement《The Illest》',\n },\n 'skip': 'Only available from China',\n }\n\n def _real_extract(self, url):\n\n def _fetch_data(vid_id, mytv=False):\n if mytv:\n base_data_url = 'http://my.tv.sohu.com/play/videonew.do?vid='\n else:\n base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid='\n data_url = base_data_url + str(vid_id)\n data_json = self._download_webpage(\n data_url, video_id,\n note='Downloading JSON data for ' + str(vid_id))\n return json.loads(data_json)\n\n mobj = re.match(self._VALID_URL, url)\n video_id = mobj.group('id')\n mytv = mobj.group('mytv') is not None\n\n webpage = self._download_webpage(url, video_id)\n raw_title = self._html_search_regex(r'(?s)(.+?)',\n webpage, 'video title')\n title = raw_title.partition('-')[0].strip()\n\n vid = self._html_search_regex(r'var vid ?= ?[\"\\'](\\d+)[\"\\']', webpage,\n 'video path')\n data = _fetch_data(vid, mytv)\n\n QUALITIES = ('ori', 'super', 'high', 'nor')\n vid_ids = [data['data'][q + 'Vid']\n for q in QUALITIES\n if data['data'][q + 'Vid'] != 0]\n if not vid_ids:\n raise ExtractorError('No formats available for this video')\n\n # For now, we just pick the highest available quality\n vid_id = vid_ids[-1]\n\n format_data = data if vid == vid_id else _fetch_data(vid_id, mytv)\n part_count = format_data['data']['totalBlocks']\n allot = format_data['allot']\n prot = format_data['prot']\n clipsURL = format_data['data']['clipsURL']\n su = format_data['data']['su']\n\n playlist = []\n for i in range(part_count):\n part_url = ('http://%s/?prot=%s&file=%s&new=%s' %\n (allot, prot, clipsURL[i], su[i]))\n part_str = self._download_webpage(\n part_url, video_id,\n note='Downloading part %d of %d' % (i + 1, part_count))\n\n part_info = part_str.split('|')\n video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3])\n\n video_info = {\n 'id': '%s_part%02d' % (video_id, i + 1),\n 'title': title,\n 'url': video_url,\n 'ext': 'mp4',\n }\n playlist.append(video_info)\n\n if len(playlist) == 1:\n info = playlist[0]\n info['id'] = video_id\n else:\n info = {\n '_type': 'playlist',\n 'entries': playlist,\n 'id': video_id,\n }\n\n return info\n","repo_name":"vivekaxl/LexisNexis","sub_path":"ExtractFeatures/Data/rameshkopparapu/sohu.py","file_name":"sohu.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"80"} +{"seq_id":"21468760355","text":"\"\"\"Extracts details about trades.\"\"\"\n\nimport logging\nfrom decimal import Decimal\nfrom typing import Tuple\n\nfrom ibkr_report.definitions import (\n AssetCategory,\n Field,\n ReportOptions,\n RowData,\n TradeDetails,\n)\nfrom ibkr_report.exchangerates import ExchangeRates\nfrom ibkr_report.tools import (\n add_years,\n date_without_time,\n decimal_cleanup,\n get_date,\n)\n\nlog = logging.getLogger(__name__)\n\n\nclass Trade:\n \"\"\"Trade which might be related to several ClosedLot rows.\"\"\"\n\n fee: Decimal = Decimal(0)\n closed_quantity: Decimal = Decimal(0)\n total_selling_price: Decimal = Decimal(0)\n data: RowData\n options: ReportOptions\n rates: ExchangeRates\n\n def __init__(\n self, items: Tuple[str, ...], options: ReportOptions, rates: ExchangeRates\n ) -> None:\n \"\"\"Initializes the Trade and calculates the total selling price from it.\"\"\"\n self.options = options\n self.rates = rates\n self.data = self._row_data(items)\n\n fee = decimal_cleanup(items[self.options.fields[Field.COMMISSION_AND_FEES]])\n self.fee = fee / self.data.rate\n\n # Sold stocks have a negative value in the \"Quantity\" column\n if self.data.quantity < Decimal(0):\n proceeds = decimal_cleanup(items[self.options.fields[Field.PROCEEDS]])\n self.total_selling_price = proceeds / self.data.rate\n log.debug(\n 'Trade: \"%s\" \"%s\" %.2f',\n self.data.date_str,\n self.data.symbol,\n self.data.quantity,\n )\n\n def details_from_closed_lot(self, items: Tuple[str, ...]) -> TradeDetails:\n \"\"\"Calculates the realized gains or losses from the ClosedLot related to the Trade.\"\"\"\n lot_data = self._row_data(items)\n self._validate_lot(lot_data)\n\n sell_date = date_without_time(self.data.date_str)\n unit_sell_price = self.data.price_per_share\n buy_date = date_without_time(lot_data.date_str)\n unit_buy_price = lot_data.price_per_share\n\n # Swap if closing a short position\n if lot_data.quantity < Decimal(0):\n sell_date, buy_date = buy_date, sell_date\n unit_sell_price, unit_buy_price = unit_buy_price, unit_sell_price\n\n # One option represents 100 shares of the underlying stock\n multiplier = (\n 100\n if items[self.options.fields[Field.ASSET_CATEGORY]] == AssetCategory.OPTIONS\n else 1\n )\n lot_sell_price = abs(lot_data.quantity) * unit_sell_price * multiplier\n lot_buy_price = abs(lot_data.quantity) * unit_buy_price * multiplier\n lot_fee = lot_data.quantity * self.fee / self.data.quantity\n realized = lot_sell_price - lot_buy_price - lot_fee\n if self.options.deemed_acquisition_cost:\n deemed_profit = self.deemed_profit(lot_sell_price, buy_date, sell_date)\n realized = min(realized, deemed_profit)\n\n log.info(\n \"Symbol: %s, Quantity: %.2f, Buy date: %s, Sell date: %s, \"\n \"Selling price: %.2f, Gains/Losses: %.2f\",\n lot_data.symbol,\n abs(lot_data.quantity),\n buy_date,\n sell_date,\n lot_sell_price,\n realized,\n )\n self.closed_quantity += lot_data.quantity\n if self.closed_quantity + self.data.quantity == Decimal(0):\n log.debug(\"All lots closed\")\n return TradeDetails(\n symbol=lot_data.symbol,\n quantity=abs(lot_data.quantity),\n buy_date=buy_date,\n sell_date=sell_date,\n price=lot_sell_price,\n realized=realized,\n )\n\n def _row_data(self, items: Tuple[str, ...]) -> RowData:\n symbol = items[self.options.fields[Field.SYMBOL]]\n date_str = items[self.options.fields[Field.DATE_TIME]]\n rate = self.rates.get_rate(\n currency_from=self.options.report_currency,\n currency_to=items[self.options.fields[Field.CURRENCY]],\n date_str=date_str,\n )\n original_price_per_share = items[self.options.fields[Field.TRANSACTION_PRICE]]\n price_per_share = decimal_cleanup(original_price_per_share) / rate\n quantity = decimal_cleanup(items[self.options.fields[Field.QUANTITY]])\n return RowData(symbol, date_str, rate, price_per_share, quantity)\n\n def _validate_lot(self, lot_data: RowData) -> None:\n error_msg = \"\"\n\n if self.data.symbol != lot_data.symbol:\n error_msg = (\n f\"Symbol mismatch! Date: {lot_data.date_str}, \"\n f\"Trade: {self.data.symbol}, ClosedLot: {lot_data.symbol}\"\n )\n elif abs(self.data.quantity + lot_data.quantity) > abs(self.data.quantity):\n error_msg = (\n 'Invalid data. \"Trade\" and \"ClosedLot\" quantities do not match. '\n f\"Date: {lot_data.date_str}, Symbol: {lot_data.symbol}\"\n )\n if error_msg:\n log.debug(lot_data)\n raise ValueError(error_msg)\n\n @staticmethod\n def deemed_profit(sell_price: Decimal, buy_date: str, sell_date: str) -> Decimal:\n \"\"\"If you have owned the shares you sell for less than 10 years, the deemed\n acquisition cost is 20% of the selling price of the shares.\n If you have owned the shares you sell for at least 10 years, the deemed\n acquisition cost is 40% of the selling price of the shares.\n\n https://www.vero.fi/en/individuals/property/investments/selling-shares/\n \"\"\"\n multiplier = Decimal(0.8)\n if get_date(buy_date) <= add_years(get_date(sell_date), -10):\n multiplier = Decimal(0.6)\n return multiplier * sell_price\n","repo_name":"oittaa/ibkr-report-parser","sub_path":"ibkr_report/trade.py","file_name":"trade.py","file_ext":"py","file_size_in_byte":5704,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"80"} +{"seq_id":"37490661285","text":"import frappe\nfrom frappe import auth\nfrom frappe import _\n\n\n@frappe.whitelist()\ndef get_sales_invoice(name) :\n\n try :\n doc = frappe.get_doc(\"Sales Invoice\",name)\n # doc = frappe.get_last_doc(\"Sales Invoice\", filters={\"name\": name}, order_by=\"name desc\")\n # sales_invoice=frappe.db.sql(f\"\"\" SELECT name \"\"\")\n except Exception as e :\n doc = None\n frappe.throw(_(\" Sales Invoice Doctype Not Found\"))\n return doc\n\n\n@frappe.whitelist()\ndef get_sales_order(name) :\n\n try :\n doc = frappe.get_doc(\"Sales Order\",name)\n except Exception as e :\n doc = None\n frappe.throw(_(\" Sales Order Doctype Not Found\"))\n return doc\n\n@frappe.whitelist()\ndef get_customer_shipment(name) :\n\n try :\n doc = frappe.get_doc(\"Customer Shipment\",name)\n except Exception as e :\n doc = None\n frappe.throw(_(\"Customer Shipment Doctype Not Found\"))\n return doc\n\n# to get get catalogs that author made\n@frappe.whitelist(allow_guest=True)\ndef get_author_catalog(author) :\n\n try :\n catalogs=frappe.db.sql(f\"\"\" SELECT name, status FROM `tabCatalog` WHERE author='{author}' \"\"\",as_dict=True)\n except Exception as e :\n catalogs = None\n frappe.throw(_(\"Catalogs Not Found\"))\n return catalogs\n\n\n@frappe.whitelist( allow_guest=True)\ndef login(usr, pwd):\n try:\n login_manager = frappe.auth.LoginManager()\n login_manager.authenticate(user=usr, pwd=pwd)\n login_manager.post_login()\n except frappe.exceptions.AuthenticationError:\n frappe.clear_messages()\n frappe.local.response[\"message\"] = {\n \"success_key\":0,\n \"message\":\"Authentication Error!\"\n }\n\n return\n\n api_generate = generate_keys(frappe.session.user)\n user = frappe.get_doc('User', frappe.session.user)\n\n frappe.response[\"message\"] = {\n \"success_key\":1,\n \"message\":\"Authentication success\",\n \"sid\":frappe.session.sid,\n \"api_key\":user.api_key,\n \"api_secret\":api_generate,\n \"username\":user.username,\n \"email\":user.email\n }\n\n\n\ndef generate_keys(user):\n user_details = frappe.get_doc('User', user)\n api_secret = frappe.generate_hash(length=15)\n\n if not user_details.api_key:\n api_key = frappe.generate_hash(length=15)\n user_details.api_key = api_key\n\n user_details.api_secret = api_secret\n user_details.save()\n\n return api_secret","repo_name":"ahmedaksam78/erpnext","sub_path":"erpnext/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"27784958459","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom grabber import get_links, get_links_from_text\nfrom concurrent.futures import ProcessPoolExecutor\nfrom threading import active_count\nfrom sys import argv\nfrom time import sleep\nfrom blacklist import blacklisted_terms\nimport json\nimport requests\nimport redis\n\nmax_threads=10\nsleep_time = 0\nfile_type=\".mp3\"\nquery_string = 'intitle:\"index of\" +\"last modified\" +\"parent directory\"+ ('+file_type+') %s'\nindexof_identifiers=[\"Index of\", \"Name\", \"Last modified\", \"Size\", \"Description\", \"Parent Directory\"]\nverbose=True\nthreaded=True\n\ndef db_connect():\n return redis.StrictRedis(host=\"2.lp1.eu\", port=6379, db=0)\n\ndef usage():\n print(\"usage : %s query\" %argv[0])\n return 1\n\ndef is_indexof(text):\n for identifier in indexof_identifiers:\n if identifier not in text:\n if verbose:\n print(\"\\t[Identifier %s not found]\" %(identifier))\n return False\n return True\n\ndef fetch_file_links(link):\n r = requests.get(\"http://\"+link)\n if verbose:\n print(\"fetching files on %s\" %link)\n if not is_indexof(r.text):\n if verbose:\n print(\"\\t[%s not an open dirlisting]\" %link)\n return []\n links = []\n splits = r.text.split('a href=\"')\n for split in splits:\n find = split.find('\"')\n if split.find('<') < find:\n find = split.find('<')\n if len(split[:find]) > 0:\n links.append(split[:find].replace(\"&prev=search\", \"\"))\n return links\n\ndef get_matching_links(origin_url, links, query):\n matching_links = []\n query_terms = query.split(\" \")\n for link in links:\n match = 0\n if type(link) == str and link[-4:].lower() == file_type:\n for term in query_terms:\n try:\n if term.replace(\" \", \"\").lower() in link.lower():\n if verbose:\n print(\"\\t\\t[\\033[0;32m%s FOUND in %s match +1\\033[0;0m]\" %(term, link)) \n match += 1\n except UnicodeEncodeError as e:\n print(e)\n if match == len(query_terms):\n matching_links.append(origin_url+link)\n return matching_links\n\ndef is_blacklisted(link):\n for term in blacklisted_terms:\n if term in link:\n return True\n return False\n\ndef handle_link(query, link):\n if not is_blacklisted(link):\n file_links = fetch_file_links(link)\n matching_links = get_matching_links(link, file_links, query)\n for link in matching_links:\n save_in_db(query, link)\n else:\n print(\"[%s] is blacklisted\" %link)\n \ndef save_in_db(query, link):\n r = db_connect()\n links_array = r.get(query)\n if links_array is None:\n links_array = \"[]\"\n else:\n links_array = links_array.decode(\"utf-8\")\n links_array = json.loads(links_array)\n if \"http://\"+link not in links_array:\n links_array.append(\"http://\"+link)\n r.set(query, json.dumps(links_array))\n print(\"Getting value in redis for %s : [%s]\" %(query, r.get(query)))\n return\n print(\"[%s was already in DB]\" %link)\n\ndef get_matching_weight(terms, text):\n weight = 0\n for term in terms:\n if term.lower() in text.lower():\n weight += 1\n return weight\n \ndef check_in_db(query):\n query_terms = query.split(\" \")\n r = db_connect()\n results = {}\n found_terms = 0\n for term in query_terms:\n for key in r.keys(\"*\"):\n if term in key.decode(\"utf-8\"):\n found_terms += 1\n try:\n link_list = json.loads(r.get(key).decode(\"utf-8\"))\n results[key.decode(\"utf-8\")] = link_list, get_matching_weight(query_terms, key.decode(\"utf-8\")) \n except Exception as e:\n print(e)\n return results, True\n return results, False\n\ndef fetch_links(query):\n query = query.replace(\"-\", \"\")\n links = get_links(query_string %query, 0)\n for link in links:\n if threaded:\n with ProcessPoolExecutor(max_workers=max_threads) as e:\n thread = [e.submit(handle_link, query, link)]\n sleep(sleep_time)\n else:\n handle_link(query, link)\n r = db_connect()\n if r.get(query) is None:\n return False, \"Query returned no results :(\"\n return True, r.get(query).decode(\"utf-8\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"lp1dev/google_links_grabber","sub_path":"music_finder.py","file_name":"music_finder.py","file_ext":"py","file_size_in_byte":4464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"80"} +{"seq_id":"26814137462","text":"words = []\nnumber = int(input('Digite el numero de palabras: '))\ni = 0\n\nwhile(i != number):\n word = input('Palabra: ')\n while ' ' in word:\n word = input('Ingrese una sola palabra: ')\n else:\n words.append(word)\n i+=1\n\n\nfor word in words:\n print('%s, '%(word),end = '')\n\n","repo_name":"Omarlopezw/Algoritmo-y-estructura-de-datos-3-2023","sub_path":"Unidad_2_Python_Parte X2.3_Funciones_Argumentos/listOfWords.py","file_name":"listOfWords.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"4196149398","text":"import os\nimport numpy as np\n\n\ndef parse_vertex(s):\n tokens = s.split()\n return np.array([\n float(tokens[0]),\n float(tokens[1]),\n float(tokens[2]),\n float(tokens[3]),\n float(tokens[4])\n ])\n\n\ndef parse_face(s):\n ret = {}\n tokens = s.split()\n ret['index1'] = int(tokens[1])\n ret['index2'] = int(tokens[2])\n ret['index3'] = int(tokens[3])\n\n return ret\n\n\ndef parse_PLY():\n vertices = []\n face = []\n #path = os.path.join(\".\", \"Sphere.ply\", \"Sphere.ply\")\n path = os.path.join(\".\", \"bunny.ply\", \"bunny.ply\")\n with open(path, \"r\") as f:\n for i in range(13): #13 10\n line = f.readline().strip(chr(10))\n\n lx = ly = lz = 100.0\n rx = ry = rz = -100.0\n for i in range(35947): #35947 642\n line = f.readline().strip(chr(10))\n vertex = parse_vertex(line)\n vertices.append(vertex)\n lx = min(lx, vertex[0])\n rx = max(rx, vertex[0])\n ly = min(ly, vertex[1])\n ry = max(ry, vertex[1])\n lz = min(lz, vertex[2])\n rz = max(rz, vertex[2])\n\n for i in range(69451): #69451 1280\n line = f.readline().strip(chr(10))\n index = parse_face(line)\n face.append(index)\n\n print(lx, \", \", rx)\n print(ly, \", \", ry)\n print(lz, \", \", rz)\n print(\"center x : \", (lx + rx) / 2)\n print(\"center y : \", (ly + ry) / 2)\n print(\"center z : \", (lz + rz) / 2)\n return [vertices, face]\n","repo_name":"Nameresu/bvhtree","sub_path":"parserPLY.py","file_name":"parserPLY.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"11785981620","text":"from eunjeon import Mecab\n\n\ndef validate(msg):\n # print(len(msg.split()))\n # if len(msg.split()) == 1:\n # return ['PASS', 'ONEWORD', \"\"]\n\n \"\"\"'\n eunjeon.Mecab 를 이용해 입력받은 문장의 축을 이루는 명사+동사의 개수가\n 전체 형태소의 개수보다 많을 경우wordpop() 함수롤 실행시킴.\n\n 입력값에 대하여 선택적으로 봇이 답변하도록 하여 채팅창 도배를막음.\n \"\"\"\n tagger = Mecab('/usr/local/lib/mecab/dic/mecab-ko-dic')\n Pos = tagger.pos(msg)\n numWord = 0 # 동사개수 - 형태소 분리 후 VV 개수로 파악.\n realword = [] # 실제 의미를 가지는 단어들\n target = [\"NNG\", \"NNP\", \"NNB\", \"NNBC\", \"NP\", \"VV\", \"VA\", \"VX\", \"XSV\", \"XR\", \"MAG\",\n \"IC\"] # POS tag chart : https://bit.ly/2KOA1ua\n for i in Pos:\n if i[1] in target:\n print(i[0])\n numWord += 1\n realword.append(i[0]) # 실제 의미를 가지는 단어들만 전달되도록 필터링함.\n if Pos[-1][1] == \"SF\": # 문장부호는 맨 마지막에 한번만 붙이도록.\n realword.append(Pos[-1][0])\n\n numPos = len(Pos) # 전체 형태소의 개수\n print(Pos)\n print(numWord, numPos - numWord)\n if numWord >= (numPos - numWord): # 의미를 가지는 요소가 문장의 과반 이상일 경우(비율은 수정가능)\n return wordpop(realword)\n else:\n return [\"PASS\", numWord, numPos]\n\n\ndef wordpop(msg):\n res = []\n print(\"input : \", msg)\n\n for i in msg:\n res.append(i[0])\n print(res)\n return res\n","repo_name":"CXZ7720/Slackbot-Shorty","sub_path":"shorty.py","file_name":"shorty.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"80"} +{"seq_id":"15563927816","text":"\"\"\"add on_menu to categories\n\nRevision ID: 67c92b445434\nRevises: 91da931cbf24\nCreate Date: 2022-06-07 20:31:23.508474\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = '67c92b445434'\ndown_revision = '91da931cbf24'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('categories', sa.Column('on_menu', sa.Boolean(), nullable=True))\n op.add_column('users', sa.Column('name', sa.Unicode(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('users', 'name')\n op.drop_column('categories', 'on_menu')\n # ### end Alembic commands ###\n","repo_name":"Jefferson682/delivery","sub_path":"migrations/versions/67c92b445434_add_on_menu_to_categories.py","file_name":"67c92b445434_add_on_menu_to_categories.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"69892501378","text":"# Copy Right Kairos03 2018. All Right Reserved.\n\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\n\nfrom data import process\n\n\nclass Dataset:\n def __init__(self, batch_size, data, label, is_shuffle=False, is_valid=False):\n self.data = data\n self.label = label\n\n self.valid_data = None\n self.valid_label = None\n\n self.data_size = self.data.shape[0]\n print(self.data_size)\n self.batch_size = batch_size\n self.total_batch = int(self.data_size / self.batch_size) + 1\n self.batch_cnt = 0\n\n self.is_shuffle = is_shuffle\n self.is_valid = is_valid\n\n if is_valid:\n self.data, self.valid_data, self.label, self.valid_label = train_test_split(self.data,\n self.label,\n test_size=0.33,\n random_state=486)\n\n self.data_size = self.data.shape[0]\n self.valid_size = self.valid_data.shape[0]\n\n self.total_batch = int(self.data_size / self.batch_size) + 1\n self.valid_total_batch = int(self.valid_size / self.batch_size) + 1\n\n def next_batch(self, seed, valid_set=False):\n\n if valid_set:\n data = self.valid_data\n label = self.valid_label\n total_batch = self.valid_total_batch\n else:\n data = self.data\n label = self.label\n total_batch = self.total_batch\n\n # shuffle\n if self.is_shuffle and self.batch_cnt == 0:\n np.random.seed(seed)\n np.random.shuffle(data)\n np.random.seed(seed)\n np.random.shuffle(label)\n\n start = self.batch_cnt * self.batch_size\n self.batch_cnt += 1\n\n if self.batch_cnt == total_batch:\n end = None\n else:\n end = self.batch_cnt * self.batch_size\n\n xs = data[start:end]\n ys = label[start:end]\n\n if self.batch_cnt >= total_batch:\n self.batch_cnt = 0\n\n return xs, ys\n\n # def get_test(self):\n # return self.test[:][0], self.test[:][1]\n\n\ndef get_dataset(batch_size, data, label, is_shuffle, is_valid):\n return Dataset(batch_size=batch_size, data=data, label=label, is_shuffle=is_shuffle, is_valid=is_valid)\n\n\nif __name__ == '__main__':\n\n # deprecated\n # x, y, idx = load_data()\n\n x, y = process.load_image_train_dataset()\n RANDOM_SEED = 128\n\n dd = get_dataset(500, x, y, is_shuffle=False, is_valid=True)\n\n print(dd.valid_data.shape)\n print(dd.data.shape)\n\n for e in range(2):\n for b in range(dd.total_batch):\n xss, yss = dd.next_batch(RANDOM_SEED, valid_set=False)\n print(e, b, xss.shape, yss.shape)\n\n for e in range(2):\n for b in range(dd.valid_total_batch):\n xss, yss = dd.next_batch(RANDOM_SEED, valid_set=True)\n print(e, b, xss.shape, yss.shape)\n","repo_name":"kairos03/RODC","sub_path":"data/data_input.py","file_name":"data_input.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"80"} +{"seq_id":"40827845611","text":"import cv2\nimport glob\nimport os \nfrom PIL import Image\n\nimport torch\nimport numpy as np\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\nfrom torchvision.utils import save_image\n\nfrom videotransforms import video_transforms, volume_transforms\n\nclass MdfDataLoader(Dataset):\n def __init__(self, isize, nfr, path_li, transforms=None):\n # set self\n self.isize = isize\n self.nfr = nfr\n self.paths = path_li\n self.transforms = transforms\n self.mask_transforms = video_transforms.Compose([\n video_transforms.Resize((self.isize, self.isize)),\n volume_transforms.ClipToTensor(channel_nb=1)\n ])\n \n # Set index\n self.data_path_li, self.real_path_li, self.mask_path_li= self.path_reader(self.paths) #video path list\n nframe_li = self.count_frame(self.mask_path_li) #num of frame list\n div_nfr_li = [ i // self.nfr for i in nframe_li] #num of nfrsize list\n # div_nfr_li -> data index\n self.total_div_nfr = div_nfr_li\n for i in range(len(div_nfr_li)):\n if i != 0: self.total_div_nfr[i] += self.total_div_nfr[i-1]\n \n def path_reader(self, path_list):\n data_path = [line.rstrip() for line in open(path_list)]\n mask_path = []\n real_path = []\n for video in data_path:\n root = video.rsplit(\"/\", 1)[:-1]\n name = root[0].rsplit(\"/\", 1)[-1]\n mask_path.append( os.path.join(root[0], \"[Mask]\" + name + \".mp4\") )\n real_path.append( os.path.join(root[0], \"[Original]\" + name + \".mp4\") )\n return data_path, real_path, mask_path\n\n def count_frame(self, path):\n nframe_li = []\n for i, p in enumerate(path):\n cap = cv2.VideoCapture(p)\n nframe_li.append(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)))\n cap.release()\n return nframe_li\n \n def __getitem__(self, index):\n # get start point\n video_id, ff = self.get_first_frame(index)\n # read video data\n frsize_data = self.video_reader(self.data_path_li[video_id], ff)\n\n if \"Fake\" in self.data_path_li[video_id]:\n frsize_real = self.video_reader(self.real_path_li[video_id], ff)\n frsize_mask = self.video_reader(self.mask_path_li[video_id], ff, mask=True)\n transdata = frsize_data + frsize_real + frsize_mask\n \n if self.transforms: \n transdata = self.transforms(transdata)\n frsize_data, frsize_real, frsize_mask = torch.split(transdata, self.nfr, dim=1)\n frsize_lb = torch.ones(self.nfr)\n\n\n elif \"Original\" in self.data_path_li[video_id]:\n frsize_mask = torch.zeros((1, self.nfr, self.isize, self.isize))\n if self.transforms:\n frsize_data = self.transforms(frsize_data)\n frsize_real = frsize_data\n frsize_lb = torch.zeros(self.nfr)\n \n return frsize_data*2-1, frsize_real*2-1, torch.unsqueeze(frsize_mask[0],dim=0), frsize_lb\n #return frsize_data*2-1, frsize_real*2-1, frsize_mask, frsize_lb\n\n def __len__(self):\n return self.total_div_nfr[-1]\n \n def get_first_frame(self, index):\n for i, v in enumerate(self.total_div_nfr):\n if v >= index:\n if i==0: first_frame = (index-1) * self.nfr\n else: first_frame = (index-self.total_div_nfr[i-1] - 1 ) * self.nfr\n return i, first_frame\n \n\n def video_reader(self, video_path, ff, mask=False):\n \n data = []\n assert os.path.exists(video_path), \"File not exist == {}\".format(video_path)\n cap = cv2.VideoCapture(video_path)\n cap.set(cv2.CAP_PROP_POS_FRAMES, ff)\n #print(\"video == {}\".format(video_path))\n \n for i in range(self.nfr):\n ret, frame = cap.read()\n if mask == True:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.bitwise_not(frame)\n frame = cv2.Canny(frame, 100, 200)\n else:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = Image.fromarray(np.uint8(frame)) \n data.append(frame)\n cap.release()\n return data\n \n\nclass DataLoader(object):\n def __init__(self, args):\n # Initialozation\n self.args = args\n self.isize = args.isize\n self.nfr = self.args.nfr\n self.plist = {'train': args.tr_plist, 'test': args.ts_plist}\n\n # set transforms\n train_transforms = video_transforms.Compose([\n video_transforms.Resize((int(self.isize*1.1),int(self.isize*1.1))),\n video_transforms.RandomRotation(10),\n video_transforms.RandomCrop((self.args.isize, self.args.isize)),\n video_transforms.RandomHorizontalFlip(),\n #video_transforms.ColorJitter(),\n video_transforms.Resize((self.isize, self.isize)),\n volume_transforms.ClipToTensor()\n ])\n test_transforms = video_transforms.Compose([\n video_transforms.Resize((self.isize, self.isize)),\n volume_transforms.ClipToTensor()\n ])\n self.transforms = {'train': train_transforms, 'test':test_transforms}\n \n def load_data(self):\n \n print(\"load Data\")\n splits = ['train', 'test']\n shuffle = {'train': True, 'test': True}\n\n # dataset\n dataset = {}\n loader = lambda x: MdfDataLoader(self.isize, self.nfr, \n self.plist[x], transforms=self.transforms[x])\n\n dataset['train'] = loader('train')\n dataset['test'] = loader('test')\n \n # dataloader\n dataloader = { x: torch.utils.data.DataLoader( \n dataset=dataset[x],\n batch_size = self.args.batchsize,\n drop_last=True,\n shuffle=shuffle[x],\n num_workers=self.args.workers\n )\n for x in splits }\n return dataloader\n \n\n","repo_name":"umaionigiri/vfd_gan","sub_path":"lib/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"39891042439","text":"def test():\n t=int(input())\n for _ in range(0,t):\n mn=input().split()\n m=int(mn[0])\n n=int(mn[1])\n res=[]\n for i in range(m,n+1):\n if isPrime(i):\n res.append(i)\n res=' '.join(list(map(str,res)))\n print(res)\n\ndef isPrime(n):\n if n<=1:\n return False\n else:\n for i in range(2,n):\n if n%i==0:\n return False\n return True\n\ntest()","repo_name":"AdamZhouSE/pythonHomework","sub_path":"Code/CodeRecords/2194/60698/273761.py","file_name":"273761.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"80"} +{"seq_id":"30640880254","text":"import numpy as np\n\nclass MCMCSet(object):\n \"\"\"Class for storage and management of multiple MCMC objects representing\n repeated runs of the MCMC.\"\"\"\n\n def __init__(self, name):\n \"\"\"Create the MCMCSet object and assign a name.\n\n Assigns a name to the set and initializes an empty list\n of MCMC objects.\n\n Parameters\n ----------\n name : string\n The string describing the model/name/data/mcmc parameters that\n is used to identify the set of chains.\n \"\"\"\n\n self.name = name\n \"\"\"The name associated with this set of chains (e.g., the model\n name, fit parameters, etc.)\"\"\"\n self.chains = []\n \"\"\"The list of chains in the MCMC set.\"\"\"\n self.pooled_positions = None\n \"\"\"numpy array of the pooled positions (if ``pool_positions`` is\n called).\"\"\"\n\n def add_chain(self, chain):\n \"\"\"Add an MCMC chain to the set.\"\"\"\n self.chains.append(chain)\n\n def prune_all_chains(self, burn, thin=1):\n \"\"\"Iterates over all the chains and prunes each one with the\n specified arguments.\n \"\"\"\n for chain in self.chains:\n chain.prune(burn, thin)\n\n # If any chains are empty after pruning (i.e., there were no accepts)\n # then remove them from the list\n for chain in self.chains:\n if len(chain.positions) == 0:\n # TODO: Should this be an exception?\n print(\"WARNING: Chain had no steps after pruning \" \\\n \"(probably because no moves were accepted) \" \\\n \"and is being removed.\")\n self.chains.remove(chain)\n\n def all_pruned(self):\n \"\"\"Indicates whether all chains have been pruned already.\n \"\"\"\n\n if not self.chains:\n raise Exception(\"There are no chains in the MCMCSet.\")\n\n for chain in self.chains:\n if not chain.pruned:\n return False\n return True\n\n def pool_chains(self):\n \"\"\"Pool the chains into a single set of pooled positions stored along\n with the MCMCSet.\n \"\"\"\n\n if not self.chains:\n raise Exception(\"There are no chains in the MCMCSet.\")\n\n # First, count the total number of steps after pruning and make sure\n # all chains have been pruned.\n total_positions = 0\n for chain in self.chains:\n if not chain.pruned:\n raise Exception(\"The chains have not yet been pruned.\")\n else:\n total_positions += len(chain.positions)\n\n # Allocate enough space for the pooled positions\n self.pooled_positions = np.zeros((total_positions,\n self.chains[0].num_estimate))\n\n # Iterate again, filling in the pooled positions\n start_index = 0\n for chain in self.chains:\n last_index = start_index + len(chain.positions)\n self.pooled_positions[start_index:last_index,:] = chain.positions\n start_index = last_index\n\n def get_sample_position(self):\n \"\"\"Returns a position sampled at random from the pooled chains.\n\n Requires that the chains have already been pooled. \n \"\"\"\n if not self.chains:\n raise Exception(\"There are no chains in the MCMCSet.\")\n\n if self.pooled_positions is None:\n raise Exception(\"Cannot get a sample position until the chains \" \\\n \"have been pooled.\")\n\n if len(self.pooled_positions) == 0:\n raise NoPositionsException('There are no positions in the combined '\n 'pool of positions.')\n\n rand_index = np.random.randint(len(self.pooled_positions))\n return self.pooled_positions[rand_index]\n\n def get_sample_simulation(self, observables=True):\n \"\"\"Uses the model in the first chain in the set to run a simulation for\n a randomly sampled position from the pooled chains.\n \"\"\"\n\n position = self.get_sample_position()\n return self.chains[0].simulate(position=position, observables=True)\n\n def initialize_and_pool(self, chains, burn, thin=1):\n \"\"\"Adds the chains to the MCMCSet and prunes and pools them.\"\"\"\n for chain in chains:\n self.add_chain(chain)\n\n self.prune_all_chains(burn, thin)\n self.pool_chains()\n\n def maximum_likelihood(self):\n \"\"\"Returns the maximum log likelihood (minimum negative log likelihood)\n from the set of chains, along with the position giving the maximum\n likelihood.\n \"\"\"\n if not self.chains:\n raise Exception(\"There are no chains in the MCMCSet.\")\n\n max_likelihood = np.inf\n max_likelihood_position = None\n for chain in self.chains:\n # Make sure the chain is not empty!\n if len(chain.likelihoods) > 0:\n chain_max_likelihood_index = np.nanargmin(chain.likelihoods)\n chain_max_likelihood = \\\n chain.likelihoods[chain_max_likelihood_index]\n if chain_max_likelihood < max_likelihood:\n max_likelihood = chain_max_likelihood\n max_likelihood_position = \\\n chain.positions[chain_max_likelihood_index]\n\n # Check if there are no positions\n if max_likelihood_position is None:\n raise NoPositionsException('The maximum likelihood could not be '\n 'determined because there are no accepted positions.')\n return (max_likelihood, max_likelihood_position)\n\n def maximum_posterior(self):\n \"\"\"Returns the maximum log posterior (minimum negative log posterior)\n from the set of chains, along with the position giving the maximum\n posterior.\n \"\"\"\n if not self.chains:\n raise Exception(\"There are no chains in the MCMCSet.\")\n\n max_posterior = np.inf\n max_posterior_position = None\n for chain in self.chains:\n # Make sure the chain is not empty!\n if len(chain.posteriors) > 0:\n chain_max_posterior_index = np.nanargmin(chain.posteriors)\n chain_max_posterior = \\\n chain.posteriors[chain_max_posterior_index]\n if chain_max_posterior < max_posterior:\n max_posterior = chain_max_posterior\n max_posterior_position = \\\n chain.positions[chain_max_posterior_index]\n\n # Check if there are no positions\n if max_posterior_position is None:\n raise NoPositionsException('The maximum posterior could not be determined '\n 'because there are no accepted positions.')\n\n return (max_posterior, max_posterior_position)\n\nclass NoPositionsException(Exception):\n pass\n","repo_name":"jmuhlich/bayessb","sub_path":"bayessb/multichain.py","file_name":"multichain.py","file_ext":"py","file_size_in_byte":6973,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"80"} +{"seq_id":"39996778269","text":"string = input()\nalist = list(map(int, string.lstrip(\"[\").rstrip(\"]\").split(\",\")))\nlength = len(alist)\nn = int(input())\nalist = sorted(alist)\nm = []\nfor i in range(length - 1):\n for j in range(i + 1, length):\n m.append(alist[j] - alist[i])\nm = sorted(m)\nprint(m[n - 1])\n\n\n","repo_name":"AdamZhouSE/pythonHomework","sub_path":"Code/CodeRecords/2603/60730/270498.py","file_name":"270498.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"80"} +{"seq_id":"30360345627","text":"from os.path import dirname, join, abspath\nimport numpy as np\nfrom pyrep import PyRep\nfrom pyrep.robots.arms.panda import Panda\nfrom pyrep.objects.dummy import Dummy\nfrom pyrep.objects.shape import Shape\nfrom pyrep.objects.proximity_sensor import ProximitySensor\nfrom pyrep.errors import ConfigurationPathError\nfrom pyrep.robots.end_effectors.panda_gripper import PandaGripper\n\nDIR_PATH = dirname(abspath(__file__))\nTTT_FILE = 'three_obstacles.ttt'\n\n\nclass Robot(object): # Estructura del robot\n def __init__(self, my_robot_arm, my_robot_gripper, my_robot_tip):\n self.arm = my_robot_arm\n self.gripper = my_robot_gripper\n self.tip = my_robot_tip\n self.pos = self.arm.get_position()\n\n\nclass InitTask(object): # Estructura del obstaculo\n def __init__(self):\n self.initial_pos = Dummy('target0')\n self.final_pos = Dummy('target1')\n self.obstacle0 = Shape('Cylinder')\n self.obstacle1 = Shape('Cylinder0')\n self.obstacle2 = Shape('Cylinder1')\n self.sensor = ProximitySensor('Panda_sensor')\n\n\nclass Lists(object):\n def __init__(self):\n self.list_of_parameters = []\n self.list_of_rewards = []\n\n\nclass ThreeObstacles(object):\n def __init__(self, headless_mode: bool):\n self.pyrep = PyRep()\n self.pyrep.launch(join(DIR_PATH, TTT_FILE), headless=headless_mode)\n self.robot = Robot(Panda(), PandaGripper(), Dummy('Panda_tip'))\n self.task = InitTask()\n self.lists = Lists()\n\n def avoidance_with_waypoints(self, wp_params: np.array):\n waypoint1, waypoint2 = self.get_waypoints_esf(wp_params)\n\n # Definición de la trayectoria\n tray = [self.task.initial_pos, waypoint1, waypoint2, self.task.final_pos]\n\n d_tray_1 = self.task.initial_pos.check_distance(waypoint1)\n d_tray_2 = waypoint1.check_distance(waypoint2)\n d_tray_3 = waypoint2.check_distance(self.task.final_pos)\n d_tray = d_tray_1 + d_tray_2 + d_tray_3\n\n # Ejecución de la trayectoria\n self.pyrep.start()\n reward_long = - 4 * d_tray ** 2\n reward_dist = 0.0\n\n for pos in tray:\n try:\n path = self.robot.arm.get_linear_path(position=pos.get_position(),\n euler=[0.0, np.radians(180), 0.0])\n # Step the simulation and advance the agent along the path\n done = False\n while not done:\n done = path.step()\n self.pyrep.step()\n\n distance_obstacle0 = self.robot.gripper.check_distance(self.task.obstacle0)\n distance_obstacle1 = self.robot.gripper.check_distance(self.task.obstacle1)\n distance_obstacle2 = self.robot.gripper.check_distance(self.task.obstacle2)\n\n reward_dist -= (20 * np.exp(-300 * distance_obstacle0) +\n 20 * np.exp(-300 * distance_obstacle1) +\n 20 * np.exp(-300 * distance_obstacle2))\n except ConfigurationPathError:\n reward = -400.0\n self.pyrep.stop()\n self.lists.list_of_parameters.append(list(wp_params))\n self.lists.list_of_rewards.append(reward)\n return -reward\n\n reward = reward_long + reward_dist\n\n self.pyrep.stop()\n self.lists.list_of_parameters.append(list(wp_params))\n self.lists.list_of_rewards.append(reward)\n return -reward\n\n def shutdown(self):\n self.pyrep.shutdown() # Close the application\n\n def clean_lists(self):\n self.lists = Lists()\n\n def return_lists(self):\n return self.lists\n\n def get_waypoints_esf(self, wp_params: np.array):\n radio1 = wp_params[0]\n tita1 = wp_params[1]\n pos1_rel = np.array([radio1 * np.sin(tita1),\n radio1 * np.cos(tita1),\n 0])\n pos1_abs = pos1_rel + self.task.initial_pos.get_position()\n waypoint1 = Dummy.create()\n waypoint1.set_position(pos1_abs)\n\n radio2 = wp_params[2]\n tita2 = wp_params[3]\n pos2_rel = np.array([radio2 * np.sin(tita2),\n radio2 * np.cos(tita2),\n 0])\n pos2_abs = pos2_rel + pos1_abs\n waypoint2 = Dummy.create()\n waypoint2.set_position(pos2_abs)\n\n return waypoint1, waypoint2\n\n\ndef calc_distance(vector1: np.array, vector2: np.array):\n distance_3d = np.array(vector1 - vector2)\n distance = np.linalg.norm(distance_3d)\n return distance\n","repo_name":"deedaniel/TFM","sub_path":"three_obstacles/three_obstacles_fun.py","file_name":"three_obstacles_fun.py","file_ext":"py","file_size_in_byte":4642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"10486953717","text":"\"\"\"\nDefines class Order, creating a list of sandwiches\nand its methods\n\"\"\"\n\nfrom sandwich import Sandwich\n\nclass Order:\n \"\"\"\n One object of class Order stores a number of Sandwich objects\n in a list\n \"\"\"\n def __init__(self):\n \"\"\"\n initializes a new Order object\n \"\"\"\n self.orderList = []\n\n def addSandwich(self, newSandwich):\n \"\"\"\n adds newSandwich to the Order\n \"\"\"\n self.orderList.append(newSandwich)\n\n def price(self):\n \"\"\"\n Adds the prices of all sandwich opjects in the order\n \"\"\"\n orderTotal = 0\n for item in self.orderList:\n orderTotal = orderTotal + item.getPrice()\n return orderTotal\n\n def __str__(self):\n \"\"\"\n returns a string containing all the sandwiches in the Order\n \"\"\"\n returnedString = \"\"\n for item in self.orderList:\n returnedString = returnedString + \"\\n\" + str(item)\n return returnedString\n\n# tests class Order\n\nif __name__ == \"__main__\":\n s1 = Sandwich(\"Joe\")\n s1.setMeat(\"steak\")\n s1.addCondiment(\"Lettuce\")\n print(s1)\n print(s1.getPrice())\n\n s2 = Sandwich(\"Mary\")\n s2.setCheese(\"cheddar\")\n s2.addCondiment(\"Mayo\")\n print(s2)\n print(s2.getPrice())\n\n\n s3 = Sandwich(\"Elizabeth\")\n s3.setBread(\"sourdough\")\n s3.setMeat(\"ham\")\n s3.setCheese(\"swiss\")\n s3.addCondiment(\"mayo\")\n s3.addCondiment(\"mustard\")\n s3.setToasted(True)\n print(s3)\n print(s3.getPrice())\n\n order = Order()\n print(order)\n order.addSandwich(s1)\n print(order)\n print(order.price())\n order.addSandwich(s2)\n print(order)\n print(order.price())\n order.addSandwich(s3)\n print(order)\n print(order.price())\n","repo_name":"jorgefpont/CSF021A","sub_path":"csf021aWork/module9/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"35778629711","text":"import cardgame\n\n# Initialisations\nplayer_a_deck = cardgame.make_empty_deck()\nplayer_a_memo = cardgame.make_empty_deck()\n\nplayer_b_deck = cardgame.make_empty_deck()\nplayer_b_memo = cardgame.make_empty_deck()\n\nmain_deck = cardgame.make_shuffled_deck()\n\n# Distribution des cartes aux joueurs\nturn = 0\nwhile main_deck.nb > 0:\n\tdrawed_card = main_deck.draw_above()\n\n\tif turn == 0:\n\t\tplayer_a_deck.add_card_above(drawed_card)\n\n\telse:\n\t\tplayer_b_deck.add_card_above(drawed_card)\n\n\tturn = (turn + 1) % 2\n\n# Affichage de la carte piochée\nprint(\"\\tDrawed Card :\")\nprint(drawed_card.display_symbol())\n\n# Affichage du deck et du memo du joueur A\nprint(f\"\\tA ({player_a_deck.nb}) :\")\nplayer_a_deck.show_deck_symbols()\nprint(f\"\\tA Memo ({player_a_memo.nb}) :\")\nplayer_a_memo.show_deck_symbols()\n\n# Affichage du deck et du memo du joueur B\nprint(f\"\\tB ({player_b_deck.nb}) :\")\nplayer_b_deck.show_deck_symbols()\nprint(f\"\\tB Memo ({player_b_memo.nb}) :\")\nplayer_b_memo.show_deck_symbols()\n# Affichage du deck principal\nprint(f\"\\tMain ({main_deck.nb}) :\")\nmain_deck.show_deck_symbols()","repo_name":"rekky1aws/python-game-lib","sub_path":"cardgame_tester.py","file_name":"cardgame_tester.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"12508042176","text":"import pickle\nimport random\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\n\n\nclass ParallelLanguageDataset(Dataset):\n def __init__(self, data_path_1, data_path_2, tokenizer, max_len):\n self.data_1, self.data_2 = self.load_data(data_path_1, data_path_2)\n self.max_len = max_len\n self.tokenizer = tokenizer\n\n def __len__(self):\n return len(self.data_1)\n\n def __getitem__(self, item_idx):\n sent1 = str(self.data_1[item_idx])\n sent2 = str(self.data_2[item_idx])\n encoded_output_sent1 = self.tokenizer.encode_plus(\n sent1,\n add_special_tokens=True,\n max_length=self.max_len,\n pad_to_max_length=True,\n return_attention_mask=True,\n return_token_type_ids=True,\n return_tensors=\"pt\",\n )\n encoded_output_sent2 = self.tokenizer.encode_plus(\n sent2,\n add_special_tokens=True,\n max_length=self.max_len,\n pad_to_max_length=True,\n return_attention_mask=True,\n return_token_type_ids=True,\n return_tensors=\"pt\",\n )\n\n encoded_output_sent1[\"attention_mask\"][\n encoded_output_sent1[\"attention_mask\"] == 1\n ] = 2\n encoded_output_sent1[\"attention_mask\"][\n encoded_output_sent1[\"attention_mask\"] == 0\n ] = True\n encoded_output_sent1[\"attention_mask\"][\n encoded_output_sent1[\"attention_mask\"] == 2\n ] = False\n encoded_output_sent1[\"attention_mask\"] = encoded_output_sent1[\n \"attention_mask\"\n ].type(torch.bool)\n\n encoded_output_sent2[\"attention_mask\"][\n encoded_output_sent2[\"attention_mask\"] == 1\n ] = 2\n encoded_output_sent2[\"attention_mask\"][\n encoded_output_sent2[\"attention_mask\"] == 0\n ] = True\n encoded_output_sent2[\"attention_mask\"][\n encoded_output_sent2[\"attention_mask\"] == 2\n ] = False\n encoded_output_sent2[\"attention_mask\"] = encoded_output_sent2[\n \"attention_mask\"\n ].type(torch.bool)\n\n return_dict = {\n \"ids1\": encoded_output_sent1[\"input_ids\"].flatten(),\n \"ids2\": encoded_output_sent2[\"input_ids\"].flatten(),\n \"masks_sent1\": encoded_output_sent1[\"attention_mask\"].flatten(),\n \"masks_sent2\": encoded_output_sent2[\"attention_mask\"].flatten(),\n }\n return return_dict\n\n def load_data(self, data_path_1, data_path_2):\n with open(data_path_1, \"r\") as f:\n data_1 = f.read().splitlines()[1:200]\n with open(data_path_2, \"r\") as f:\n data_2 = f.read().splitlines()[1:200]\n return data_1, data_2\n","repo_name":"abhisheksgumadi/machine-translation-transformers","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"80"} +{"seq_id":"75010365698","text":"import threading\nimport time\n\ndef is_prime_num(my_num):\n if my_num <= 1:\n return False\n if my_num <= 3:\n return True\n if my_num % 2 == 0:\n return False\n\n for divisor in range(3, int((my_num**0.5)+1), 2):\n if my_num % divisor == 0:\n return False\n return True\n\n\ndef find_prime_in_range(thread_num, start, end, output_prime_numbers):\n prime_numbers = []\n for num in range(start, end):\n if is_prime_num(num):\n prime_numbers.append(num)\n # using delay to see the progress of multiprocessing\n # print(f\"\\nThread # {thread_num+1} : {num}\")\n # #time.sleep(1)\n output_prime_numbers.extend(prime_numbers)\n \n # Without the progress\n # prime_numbers = [num for num in range(start, end) if is_prime_num(num)]\n # output_prime_numbers.extend(prime_numbers)\n\n\ndef main():\n threads = []\n output_prime_numbers = []\n num_of_threads = 4\n start_num = 1\n end_num = 100000\n chunk_size_per_thread = (end_num - start_num) // num_of_threads\n\n start_time = time.time() # start time record\n\n for i in range(num_of_threads):\n thread_start = start_num + i * chunk_size_per_thread\n thread_end = thread_start + chunk_size_per_thread\n thread = threading.Thread(target=find_prime_in_range, args=(i, thread_start, thread_end, output_prime_numbers))\n threads.append(thread)\n thread.start()\n\n # main thread waits for all the sub-thread\n for thread in threads:\n thread.join()\n\n end_time = time.time() # end time record\n execution_time = end_time - start_time\n\n print(\"\\n\\nArray of prime numbers:\\n\", sorted(output_prime_numbers), end=\"\\n\\n\")\n print(f\"Execution time: {execution_time} seconds\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"PeezzaPy/prime-num-threading","sub_path":"threading_prime.py","file_name":"threading_prime.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"80"} +{"seq_id":"31433787259","text":"\nimport pandas as pd\nimport numpy as np\nfrom collections import Counter\n\n\ndef main():\n\n project = '/Users/deirdre/Documents/VA-ML/Project/EMS-Prediction/' # path to main directory\n file_name = 'data/Myo-Video-accuracy.csv'\n watch_data = pd.read_csv(project + file_name, header=0, sep=',', index_col=False)\n predictions = np.array(watch_data['pred'])\n true_clasess = np.array(watch_data['gt'])\n freq = {}\n\n # loop through predicted classes\n u_classes = np.unique(true_clasess)\n for pc in u_classes:\n\n idx = np.where(predictions == pc)\n true_class = true_clasess[idx]\n freq[pc] = {}\n\n # find true classes dist\n for tc in u_classes:\n jdx = np.where(true_class == tc)\n freq[pc][tc] = len(jdx[0])\n # freq[pc][tc] = int(len(jdx[0]) / 2)\n # freq[pc][tc] = int(len(jdx[0]) / 3)\n\n print(freq)\n\n\nmain()\n","repo_name":"dmscul11/EMS-Prediction","sub_path":"code/count-freq-predictions.py","file_name":"count-freq-predictions.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"29748125744","text":"import numpy as np\nimport uproot\nfrom tqdm import tqdm\nfrom src.invariant_mass import find_invariant_mass\n\nfrom src.selection_rule_jpsi import selection_rule_iterator\nfrom src.selection_rule_jpsi import assign_kaon_iterator\nfrom src.selection_rule_jpsi import charge_rule_iterator\nfrom src.selection_rule_jpsi import num_muons\nfrom src.two_body_resonance import find_resonance\n\nfrom utils.constants import MASS_MUON\n\n\n\nlist_of_interesting_keys = []\nfor i in range(1, 4):\n exec(f\"list_of_interesting_keys.append('H{i}_PX')\")\n exec(f\"list_of_interesting_keys.append('H{i}_PY') \")\n exec(f\"list_of_interesting_keys.append('H{i}_PZ') \")\n exec(f\"list_of_interesting_keys.append('H{i}_Charge') \")\n exec(f\"list_of_interesting_keys.append('H{i}_ProbK') \")\n exec(f\"list_of_interesting_keys.append('H{i}_ProbPi') \")\n exec(f\"list_of_interesting_keys.append('H{i}_isMuon') \")\n\n\ndef two_body_muon_reconstruction(momentum_1 = [0, 0, 0],\n momentum_2 = [0, 0, 0],\n momentum_3 = [0, 0, 0],\n muon_prob = [1, 1, 1]): \n cleaned_momentum = []\n total_momentum_array = [momentum_1, momentum_2, momentum_3]\n\n for i in range(3):\n if muon_prob[i] == 1:\n cleaned_momentum.append(total_momentum_array[i])\n del total_momentum_array\n momentum_1 = cleaned_momentum[0]\n momentum_2 = cleaned_momentum[1]\n\n p1_x, p1_y, p1_z = momentum_1\n p2_x, p2_y, p2_z = momentum_2\n\n E1 = np.sqrt(p1_x**2 + p1_y**2 + p1_z**2 + MASS_MUON**2)\n E2 = np.sqrt(p2_x**2 + p2_y**2 + p2_z**2 + MASS_MUON**2)\n\n energy_squared = np.square(E1 + E2)\n total_momentum_x = p1_x + p2_x\n total_momentum_y = p1_y + p2_y\n total_momentum_z = p1_z + p2_z\n\n inv_mass_squared = energy_squared - (total_momentum_x**2 + total_momentum_y**2 + total_momentum_z**2)\n\n return np.sqrt(inv_mass_squared)\n\n\n\ndef read_file(path_name=\"\", MAX_EVENTS=5000, mode=1, keys = list_of_interesting_keys,\n selection=False, output=\"\", interest=None):\n #if interest == \"B+\" or interest == \"B+\" then we apply the selection rule\n if not path_name:\n path = 'data/' # set this to '' to run on the GitHub version\n else:\n path = path_name\n\n #temporarily \n event_counter = 0\n\n jpsi_reconstructed = []\n is_kaon_H1 = []\n is_kaon_H2 = []\n is_kaon_H3 = []\n\n print(f\" Selecting {interest} events\")\n\n print(\"Input data varaiables: \")\n if mode == 0:\n events_test = uproot.open(path+'example_file.root')\n print(\"Test mode\")\n trees = [events_test['PhaseSpaceTree;1']] # Test mode\n# return [[0], [0], [0], [0]]\n print(events_test.keys())\n\n elif mode == 1:\n events_sim = uproot.open(path+'PhaseSpaceSimulation.root')\n trees = [events_sim['PhaseSpaceTree']] # Simulation\n print(\"Phase space simulation\")\n print(events_sim.keys())\n elif mode == 2:\n # Magnet down data\n events_down = uproot.open(path+'B2HHH_MagnetDown.root')\n trees = [events_down['DecayTree']]\n print(\"Magnet down data\")\n print(events_down.keys())\n elif mode == 3:\n # Magnet up data\n events_up = uproot.open(path+'B2HHH_MagnetUp.root')\n trees = [events_up['DecayTree']]\n print(\"Magnet up data\")\n print(events_up.keys())\n elif mode == 4:\n events_up = uproot.open(path+'B2HHH_MagnetUp.root')\n events_down = uproot.open(path+'B2HHH_MagnetDown.root')\n trees = [events_down['DecayTree'], events_up['DecayTree']]\n print(\"Magnet up and down data\")\n print(events_down.keys())\n else:\n print(\"Mode not recognised\")\n return [[0], [0], [0], [0]]\n print(\"Varialbes read\")\n print()\n for tree in trees:\n # This outer loop is a technical loop of uproot over chunks of events\n for data in tree.iterate(keys):\n num_elem = MAX_EVENTS if MAX_EVENTS < len(data['H1_PZ']) else len(data['H1_PZ'])\n\n for i in tqdm(range(0, num_elem)):\n event_counter += 1\n if 0 < MAX_EVENTS and MAX_EVENTS < event_counter:\n break\n\n if (data['H1_PZ'][i] < 0) or (data['H2_PZ'][i] < 0) or (data['H3_PZ'][i] < 0):\n continue\n\n\n if selection:\n probabilities_itr = [[data['H1_ProbPi'][i], data['H1_ProbK'][i]],\n [data['H2_ProbPi'][i], data['H2_ProbK'][i]],\n [data['H3_ProbPi'][i], data['H3_ProbK'][i]]]\n \n charges_itr = [data['H1_Charge'][i], data['H2_Charge'][i], data['H3_Charge'][i]]\n \n muon_prob = [data['H1_isMuon'][i], data['H2_isMuon'][i], data['H3_isMuon'][i]]\n\n\n #if there are no muons we can continue\n if muon_prob.count(1) !=2:\n continue\n \n if selection_rule_iterator(probabilities_itr, charges_itr) is False:\n continue\n\n if assign_kaon_iterator(probabilities_itr, charges_itr, muon_prob) is False:\n #now we only have 2 muons and one kaon\n continue\n \n if interest == \"B+\" or interest == \"B-\":\n if interest == \"B+\":\n if charge_rule_iterator(charges_itr, +1) is False:\n continue\n elif interest == \"B-\":\n if charge_rule_iterator(charges_itr, -1) is False:\n continue\n #now we have 2 muons and one kaon and the correct charge (e.g. B+ -> K+ mu+ mu-)\n kaon_place = assign_kaon_iterator(probabilities_itr, charges_itr, muon_prob)\n is_kaon_H1.append(kaon_place[0])\n is_kaon_H2.append(kaon_place[1])\n is_kaon_H3.append(kaon_place[2])\n\n #now we have an array of [0, 0, 1] where there is a kaon in the third place\n #we need to find the the ind\n \n \n # Your invariant mass calculation should go here\n p1_array = [data['H1_PX'][i], data['H1_PY'][i], data['H1_PZ'][i]]\n p2_array = [data['H2_PX'][i], data['H2_PY'][i], data['H2_PZ'][i]]\n p3_array = [data['H3_PX'][i], data['H3_PY'][i], data['H3_PZ'][i]]\n\n jpsi_inv_mass = two_body_muon_reconstruction(p1_array, p2_array, p3_array, muon_prob)\n jpsi_reconstructed.append(jpsi_inv_mass)\n\n return jpsi_reconstructed\n \n\n\nif __name__ == \"__main__\":\n read_file()\n","repo_name":"aavashsubedi/labs_yr3","sub_path":"matter_antimatter/src/file_read_jpsi.py","file_name":"file_read_jpsi.py","file_ext":"py","file_size_in_byte":6874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"8389334762","text":"import json\nfrom itertools import chain\nimport copy\n\nseason = \"1920\"\ndataLoc = \"../data/\" + season + \"/\"\n\nclass Player:\n def __init__(self, fName, lName, role):\n self.fName = fName\n self.lName = lName\n self.role = role\n\n def setTeam(self, team):\n self.team = team\n\n def getTeam(self):\n return self.team\n\n def getName(self):\n return self.fName + \" \" + self.lName \n\n def getRole(self):\n return self.role\n\n def getFileName(self):\n return dataLoc + \"players/\" + self.fName + '_' + self.lName + '.txt'\n\nclass Team:\n def __init__(self, name, id):\n self.name = name\n self.id = id\n self.players = []\n\n def add_player(self, player):\n self.players.append(player)\n\n\nteams_list = {}\nroles_list = {}\n\nwith open(dataLoc + \"allData/\" + season + \".txt\") as GD_json_file:\n general_data = json.load(GD_json_file)\n teamsList = general_data['teams']\n\n\n roles = general_data['element_types']\n for role in roles:\n roles_list[role['id']] = role['singular_name_short']\n\n for team in teamsList:\n t = Team (team['name'], team['id'])\n teams_list[team['id']] = t\n \n playersList = general_data['elements']\n for player in playersList:\n p = Player(player['first_name'], player['second_name'], roles_list[player['element_type']])\n p.setTeam(teams_list[player['team']])\n teams_list[player['team']].add_player(p)\n\n#roundRanges = range(0, 38)\nroundRanges = chain(range(0, 29), range(38,47))\n\nroundsStr = \"\" \nfor i in copy.deepcopy(roundRanges):\n roundsStr = roundsStr + str(i+1) + \", \"\n\nwith open(dataLoc + \"out_rounds.csv\", \"w\") as outFile:\n with open(dataLoc + \"out_rounds_sum.csv\", \"w\") as outSumFile:\n with open(dataLoc + \"out_rounds_sum_from_here.csv\", \"w\") as outSumFromHereFile:\n \n outFile.write(\"team, role, player, \" + roundsStr + \"\\n\")\n outSumFile.write(\"team, role, player, \" + roundsStr + \"\\n\")\n outSumFromHereFile.write(\"team, role, player, 0, \" + roundsStr + \"\\n\")\n \n for key, value in teams_list.items():\n for p in value.players:\n rounds = {}\n with open(p.getFileName()) as player_json_file:\n \n total_games = 0\n player_data = json.load(player_json_file)\n for round in player_data['history']:\n if round['round'] in rounds:\n rounds[round['round']] = rounds[round['round']] + round['total_points']\n else:\n rounds[round['round']] = round['total_points']\n\n if round['minutes']>0:\n total_games = total_games+1 \n\n\n total = 0\n round_str = p.getTeam().name + \", \" + p.getRole() + ', ' + p.getName() + \", \"\n round_sum_str = p.getTeam().name + \", \" + p.getRole() + ', ' + p.getName() + \", \"\n \n for i in copy.deepcopy(roundRanges):\n if i+1 in rounds:\n total = total + rounds[i+1]\n round_str = round_str + str(rounds[i+1]) + ', '\n else:\n round_str = round_str + '-, '\n\n round_sum_str = round_sum_str + str(total) + ', '\n\n if total_games == 0:\n total_games = 1\n \n outFile.write(round_str + str(total) + \",\"+str(total_games)+\",\"+str(total/total_games)+\"\\n\")\n outSumFile.write(round_sum_str + \"\\n\")\n\n total_from_here_sum = p.getTeam().name + \", \" + p.getRole() + ', ' + p.getName() + \", \" + str(total) + \", \"\n\n for i in copy.deepcopy(roundRanges):\n if i+1 in rounds:\n total = total - rounds[i+1]\n\n total_from_here_sum = total_from_here_sum + str(total) + ', '\n \n outSumFromHereFile.write(total_from_here_sum + \"\\n\")\n \n #data = json.load(json_file)\n #x = 12\n #data['history']\n #x = 3","repo_name":"Avnam/fantaZ","sub_path":"code/toCsv.py","file_name":"toCsv.py","file_ext":"py","file_size_in_byte":4446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"1063090165","text":"import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom lib.runner.logger import get_logger\n\nfrom lib.runner.registry import EVALUATOR \nimport json\nimport os\nimport cv2\n\nfrom .lane import LaneEval\n\nimport os.path as osp\nimport numpy as np\n#import cv2\nimport torchvision\nimport lib.utils_resa.transforms as tf\nfrom lib.Panel.DrivingAssist import DrivingAssistant\nfrom lib.Panel.LaneCorrect import LaneCorrector\ndef split_path(path):\n \"\"\"split path tree into list\"\"\"\n folders = []\n while True:\n path, folder = os.path.split(path)\n if folder != \"\":\n folders.insert(0, folder)\n else:\n if path != \"\":\n folders.insert(0, path)\n break\n return folders\n\n\n@EVALUATOR.register_module\nclass Tusimple(nn.Module):\n def __init__(self, cfg):\n super(Tusimple, self).__init__()\n self.cfg = cfg\n print(\"cfg = \",cfg) \n exp_dir = os.path.join(self.cfg.work_dir, \"output\")\n if not os.path.exists(exp_dir):\n os.mkdir(exp_dir)\n self.out_path = os.path.join(exp_dir, \"coord_output\")\n if not os.path.exists(self.out_path):\n os.mkdir(self.out_path)\n self.dump_to_json = [] \n self.thresh = cfg.evaluator.thresh\n self.logger = get_logger('resa')\n if cfg.view:\n self.view_dir = os.path.join(self.cfg.work_dir, 'vis')\n\n def evaluate_pred(self, dataset, seg_pred, exist_pred, batch):\n img_name = batch['meta']['img_name']\n img_path = batch['meta']['full_img_path']\n \n for b in range(len(seg_pred)):\n #print(\"seg.shape = \",seg_pred.shape)\n seg = seg_pred[b]\n \n exist = [1 if exist_pred[b, i] >\n 0.5 else 0 for i in range(self.cfg.num_classes-1)]\n lane_coords = dataset.probmap2lane(seg, exist, thresh = self.thresh)\n for i in range(len(lane_coords)):\n lane_coords[i] = sorted(\n lane_coords[i], key=lambda pair: pair[1])\n\n path_tree = split_path(img_name[b])\n save_dir, save_name = path_tree[-3:-1], path_tree[-1]\n save_dir = os.path.join(self.out_path, *save_dir)\n save_name = save_name[:-3] + \"lines.txt\"\n save_name = os.path.join(save_dir, save_name)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir, exist_ok=True)\n\n with open(save_name, \"w\") as f:\n for l in lane_coords:\n for (x, y) in l:\n print(\"{} {}\".format(x, y), end=\" \", file=f)\n print(file=f)\n\n json_dict = {}\n json_dict['lanes'] = []\n json_dict['h_sample'] = []\n json_dict['raw_file'] = os.path.join(*path_tree[-4:])\n json_dict['run_time'] = 0\n for l in lane_coords:\n if len(l) == 0:\n continue\n json_dict['lanes'].append([])\n for (x, y) in l:\n json_dict['lanes'][-1].append(int(x))\n for (x, y) in lane_coords[0]:\n json_dict['h_sample'].append(y)\n self.dump_to_json.append(json.dumps(json_dict))\n if self.cfg.view:\n img = cv2.imread(img_path[b])\n new_img_name = img_name[b].replace('/', '_')\n save_dir = os.path.join(self.view_dir, new_img_name)\n dataset.view(img, lane_coords, save_dir)\n def demo_pred(self, path,seg_pred,exist_pred, batch, ori_img = 0):\n #print(\"demo_pred\")\n try:\n img_path = batch['meta']\n img_path = img_path.replace(\"\\\\\",\"/\")\n #print(\"img_path = \",img_path)\n img_name = img_path.split('/')[-1]\n except :\n pass\n #print(\"img_name = \",img_name)\n for b in range(len(seg_pred)):\n #print(\"seg.shape = \",seg_pred.shape)\n seg = seg_pred[b]\n #print(\"---1---\")\n exist = [1 if exist_pred[b, i] >\n 0.5 else 0 for i in range(self.cfg.num_classes-1)]\n #print(\"---2---\")\n demo_data = TuSimple_Demo(path)\n lane_coords = demo_data.demo_probmap2lane(seg, exist, thresh = self.thresh)\n #print(\"---3---\")\n\n for i in range(len(lane_coords)):\n lane_coords[i] = sorted(\n lane_coords[i], key=lambda pair: pair[1])\n #print(\"---4---\")\n \"\"\"\n \n path_tree = split_path(img_name[b])\n save_dir, save_name = path_tree[-3:-1], path_tree[-1]\n save_dir = os.path.join(self.out_path, *save_dir)\n save_name = save_name[:-3] + \"lines.txt\"\n save_name = os.path.join(save_dir, save_name)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir, exist_ok=True)\n\n with open(save_name, \"w\") as f:\n for l in lane_coords:\n for (x, y) in l:\n print(\"{} {}\".format(x, y), end=\" \", file=f)\n print(file=f)\n \n json_dict = {}\n json_dict['lanes'] = []\n json_dict['h_sample'] = []\n json_dict['raw_file'] = os.path.join(*path_tree[-4:])\n json_dict['run_time'] = 0\n\n for l in lane_coords:\n if len(l) == 0:\n continue\n json_dict['lanes'].append([])\n for (x, y) in l:\n json_dict['lanes'][-1].append(int(x))\n for (x, y) in lane_coords[0]:\n json_dict['h_sample'].append(y)\n self.dump_to_json.append(json.dumps(json_dict))\n \"\"\"\n \n #print(\"img_path = \",img_path)\n if type(ori_img) == type(int(0)):\n img = cv2.imread(img_path)\n new_img_name = img_name[b].replace('/', '_')\n save_dir = \"./inference/ResaNet_output_vx3/\"\n demo_data.view(img, lane_coords, img_path)\n else:\n img = ori_img\n img = demo_data.view(img, lane_coords, img)\n return img\n def evaluate(self, dataset, output, batch):\n seg_pred, exist_pred = output['seg'], output['exist']\n seg_pred = F.softmax(seg_pred, dim=1)\n \n seg_pred = seg_pred.detach().cpu().numpy()\n exist_pred = exist_pred.detach().cpu().numpy()\n self.evaluate_pred(dataset, seg_pred, exist_pred, batch)\n def demo(self,path, output, batch,ori_image = 0):\n seg_pred, exist_pred = output['seg'], output['exist']\n seg_pred = F.softmax(seg_pred, dim=1)\n \n seg_pred = seg_pred.detach().cpu().numpy()\n exist_pred = exist_pred.detach().cpu().numpy()\n if type(ori_image) == type(int(0)) :\n self.demo_pred(path,seg_pred,exist_pred, batch)\n else:\n img = self.demo_pred(path,seg_pred,exist_pred, batch, ori_img = ori_image)\n return img\n def summarize(self):\n best_acc = 0\n output_file = os.path.join(self.out_path, 'predict_test.json')\n with open(output_file, \"w+\") as f:\n for line in self.dump_to_json:\n print(line, end=\"\\n\", file=f)\n\n eval_result, acc = LaneEval.bench_one_submit(output_file,\n self.cfg.test_json_file)\n\n self.logger.info(eval_result)\n self.dump_to_json = []\n best_acc = max(acc, best_acc)\n return best_acc\n\n\n\n\nclass TuSimple_Demo():\n def __init__(self,path):\n #print(\"TuSimple_Demo\")\n self.path = path\n\n def fix_gap(self, coordinate):\n if any(x > 0 for x in coordinate):\n start = [i for i, x in enumerate(coordinate) if x > 0][0]\n end = [i for i, x in reversed(list(enumerate(coordinate))) if x > 0][0]\n lane = coordinate[start:end+1]\n if any(x < 0 for x in lane):\n gap_start = [i for i, x in enumerate(\n lane[:-1]) if x > 0 and lane[i+1] < 0]\n gap_end = [i+1 for i,\n x in enumerate(lane[:-1]) if x < 0 and lane[i+1] > 0]\n gap_id = [i for i, x in enumerate(lane) if x < 0]\n if len(gap_start) == 0 or len(gap_end) == 0:\n return coordinate\n for id in gap_id:\n for i in range(len(gap_start)):\n if i >= len(gap_end):\n return coordinate\n if id > gap_start[i] and id < gap_end[i]:\n gap_width = float(gap_end[i] - gap_start[i])\n lane[id] = int((id - gap_start[i]) / gap_width * lane[gap_end[i]] + (\n gap_end[i] - id) / gap_width * lane[gap_start[i]])\n if not all(x > 0 for x in lane):\n print(\"Gaps still exist!\")\n coordinate[start:end+1] = lane\n return coordinate\n\n def is_short(self, lane):\n start = [i for i, x in enumerate(lane) if x > 0]\n if not start:\n return 1\n else:\n return 0\n\n def get_lane(self, prob_map, y_px_gap, pts, thresh, resize_shape=None):\n \"\"\"\n Arguments:\n ----------\n prob_map: prob map for single lane, np array size (h, w)\n resize_shape: reshape size target, (H, W)\n \n Return:\n ----------\n coords: x coords bottom up every y_px_gap px, 0 for non-exist, in resized shape\n \"\"\"\n if resize_shape is None:\n resize_shape = prob_map.shape\n h, w = prob_map.shape\n H, W = resize_shape\n H -= 160\n \n coords = np.zeros(pts)\n coords[:] = -1.0\n for i in range(pts):\n y = int((H - 10 - i * y_px_gap) * h / H)\n if y < 0:\n break\n line = prob_map[y, :]\n id = np.argmax(line)\n if line[id] > thresh:\n coords[i] = int(id / w * W)\n if (coords > 0).sum() < 2:\n coords = np.zeros(pts)\n self.fix_gap(coords)\n #print(coords.shape)\n\n return coords\n\n def demo_probmap2lane(self, seg_pred, exist, resize_shape=(720, 1280), smooth=True, y_px_gap=10, pts=56, thresh=0.6):\n \"\"\"\n Arguments:\n ----------\n seg_pred: np.array size (5, h, w)\n resize_shape: reshape size target, (H, W)\n exist: list of existence, e.g. [0, 1, 1, 0]\n smooth: whether to smooth the probability or not\n y_px_gap: y pixel gap for sampling\n pts: how many points for one lane\n thresh: probability threshold\n \n Return:\n ----------\n coordinates: [x, y] list of lanes, e.g.: [ [[9, 569], [50, 549]] ,[[630, 569], [647, 549]] ]\n \"\"\"\n\n if resize_shape is None:\n resize_shape = seg_pred.shape[1:] # seg_pred (5, h, w)\n _, h, w = seg_pred.shape\n H, W = resize_shape\n coordinates = []\n #print(\"prp1\")\n for i in range(6):\n prob_map = seg_pred[i + 1]\n if smooth:\n prob_map = cv2.blur(prob_map, (9, 9), borderType=cv2.BORDER_REPLICATE)\n coords = self.get_lane(prob_map, y_px_gap, pts, thresh, resize_shape)\n if self.is_short(coords):\n continue\n coordinates.append(\n [[coords[j], H - 10 - j * y_px_gap] if coords[j] > 0 else [-1, H - 10 - j * y_px_gap] for j in\n range(pts)])\n \n #print(\"prp2\")\n if len(coordinates) == 0:\n coords = np.zeros(pts)\n coordinates.append(\n [[coords[j], H - 10 - j * y_px_gap] if coords[j] > 0 else [-1, H - 10 - j * y_px_gap] for j in\n range(pts)])\n #print(coordinates)\n #print(\"prp3\")\n return coordinates\n def view(self, img, coords,image_path = 0):\n \"\"\"\n img:原本的圖片\n coords 是預測出來的線條,他的格式和tusimple的lanes一樣\n \"\"\"\n\n Self_Correction_System = LaneCorrector(coords)#Salmon's code, add anonther arguments if you need\n coords = Self_Correction_System.Salmon_Fliter()# Salmon' code is written here\n\n center_x = 1280/2\n leftlane_starndar = 1280/2\n coord_index = 0\n \n coords = self.sort_key(coords)\n\n\n #print(\"coords.len = \",len(coords))\n \"\"\"\n 這邊辨識哪兩條車道線屬於行駛中的車道\n \"\"\"\n for coord in coords:\n total_pt_number = 0\n x_Sum = 0\n for i in range(56):\n if coord[i][0]>0 and coord[i][1]>=300:\n total_pt_number+=1\n x_Sum+= coord[i][0]\n if int(x_Sum/total_pt_number) <= leftlane_starndar:\n coord_index+=1\n else:\n break\n #print(\"LeftRight\")\n \n \"\"\"\n left所記錄的是車子行駛中的車道的左側車道線 right則是行駛車道中的右側車道線\n 紀錄方法:[車道線編號,車道的點]\n 例如:[1, [[5, 160], [6, 170], [9, 180].......]]\n 這是為了畫中間的箭頭\n \"\"\"\n\n left = [] # [lane_index(int),lane(list)]\n right = [] # [lane_index(int),lane(list)]\n if len(coords)==0: # 沒有車道線被偵測出\n left.append(-1)\n right.append(-1)\n elif coord_index == 0: # 左邊沒有車道線\n left.append(-1)\n right.append(0)\n right.append(coords[0])\n elif coord_index == len(coords) and len(coords) != 0:# 右邊沒有車道線\n right.append(-1)\n left.append(len(coords)-1)\n left.append(coords[-1])\n else : # 左右都有車道線(只有這個才能畫箭頭)\n right.append(coord_index)\n right.append(coords[coord_index])\n left.append(coord_index-1)\n left.append(coords[coord_index-1])\n #y_sam = [x for x in range(360,720,10)].reverse()\n #print(\"draw.....\")\n color = [\n (255,0,0),(0,255,0),(0,0,255),(255,255,0),(255,0,255),(255,255,0)\n ]\n color_index = 0\n lane_index = 0\n \n #這邊是畫點點或話線的地方\n for coord in coords:\n if lane_index == left[0] or lane_index ==right[0]:#如果這條線是車子行駛的車道 畫線 \n drawline = False\n for x, y in coord:\n if x <= 0 or y <= 0:\n continue\n x, y = int(x), int(y)\n if not drawline:\n x1 = x\n y1 = y\n drawline = True\n else:\n x2 = x\n y2 = y\n cv2.line(img,(x1,y1),(x2,y2),color[color_index],5)\n x1 = x2\n y1 = y2\n i+=1\n else:#不是車子行駛的車道 畫圈圈\n for x, y in coord:\n if x <= 0 or y <= 0:\n continue\n x, y = int(x), int(y)\n cv2.circle(img, (x, y), 4, color[color_index], 2) \n color_index += 1\n lane_index += 1\n \n\n #arr_end_x 是箭頭的最底下的部分的x座標 arr_start_x是箭頭尖端的x座標\n assistant = DrivingAssistant(img)\n arr_end_coor = assistant.CenterArrowedLine(left,right)\n assistant.KeepCenter(left,right,arr_end_coor)\n\n\n\n #print(img_save_name)\n if type(image_path) == type(\"string\"):\n cv2.imwrite(image_path,img)\n return assistant.road_image\n def SelfCorrection(self,coords):\n \"\"\"\n Arguments:\n ----------\n coords:coords of lane that haven't been corrected\n Return:\n ----------\n coords:coords of lane that have been corrected\n \"\"\"\n return coords\n def KeepCenter(self,img,left,right,arr_end_coor):\n \"\"\"\n Arguments:\n ----------\n left:The left lane coords\n right:The right lane coords\n arr_end_coor: the start of the arrow\n Return:\n ----------\n img:image that is added CenterPoint and KeepCenter Message\n \"\"\"\n \n #把你的程式碼加在這裡\n center_coor = int(1280/2-1)\n cv2.circle(img, (center_coor,720-10), 5, (0,0,255), -2)\n cv2.line(img,(center_coor,710),(center_coor,650),(255,0,255),3)\n cv2.line(img,(center_coor,710),arr_end_coor,(0,0,255),2)\n if right[0] == -1 or left[0]==-1: \n return img\n \"\"\"\n right = np.array(right[1])\n left = np.array(left[1])\n \n idx_r = np.where(right[:,0]>-1)[0][-1]\n idx_l = np.where(left[:,0]>-1)[0][-1]\n if idx_r > idx_l:\n idx_r = idx_l\n else:\n idx_l = idx_r\n lane_center = right[idx_r,0] + left[idx_l,0]\n \n lane_center = lane_center//2\n \n if lane_center - center_coor > 10:\n flag = 'KeepRight'\n elif lane_center - center_coor < -10:\n flag = 'KeepLeft'\n else:\n flag = 'In the center'\n \"\"\"\n lane_center = arr_end_coor[0]\n if lane_center - center_coor > 10:\n flag = 'KeepRight'\n elif lane_center - center_coor < -10:\n flag = 'KeepLeft'\n else:\n flag = 'In the center'\n\n #flag is KeepCenter Message\n if flag is 'KeepRight':\n cv2.arrowedLine(img,(10,30),(70,30),(255,255,255),3,tipLength=0.5)\n elif flag is 'KeepLeft':\n cv2.arrowedLine(img,(70,30),(10,30),(255,255,255),3,tipLength=0.5)\n else:\n img = cv2.circle(img, (40,30), 25, (255,255,255), 2)\n cv2.putText(img, flag, (center_coor-100,50), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),1, cv2.LINE_AA)\n #cv2.imshow('img',img)\n #cv2.waitKey(1)\n\n return img \n def sort_key(self, coords):\n \n for i in range(56):\n a = 0\n for coord in coords:\n if coord[i][0] >0 :# 有點\n a+=1\n if a == len(coords):\n break\n \n \n coords.sort(key = lambda x: x[i][0] ) \n #print(\"coords = \",coords)\n #print(\"coords len = \",len(coords))\n return coords\n","repo_name":"ecoyukino/YOLOP_RESA","sub_path":"lib/runner/evaluator/tusimple/tusimple.py","file_name":"tusimple.py","file_ext":"py","file_size_in_byte":18605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"27283386254","text":"import os\nimport json\nimport soundfile as sf\nimport argparse\nimport glob\nimport re\nfrom pathlib import Path\n\nparser = argparse.ArgumentParser(\"parsing tac dataset\")\nparser.add_argument(\"--in_dir\", type=str)\nparser.add_argument(\"--out_json\", type=str)\n\n\ndef parse_dataset(in_dir, out_json):\n\n examples = []\n for n_mic_f in glob.glob(os.path.join(in_dir, \"*\")):\n for sample_dir in glob.glob(os.path.join(n_mic_f, \"*\")):\n c_ex = {}\n for wav in glob.glob(os.path.join(sample_dir, \"*.wav\")):\n\n source_or_mix = Path(wav).stem.split(\"_\")[0]\n n_mic = int(re.findall(\"\\d+\", Path(wav).stem.split(\"_\")[-1])[0])\n length = len(sf.SoundFile(wav))\n\n if n_mic not in c_ex.keys():\n c_ex[n_mic] = {source_or_mix: wav, \"length\": length}\n else:\n assert c_ex[n_mic][\"length\"] == length\n c_ex[n_mic][source_or_mix] = wav\n examples.append(c_ex)\n\n os.makedirs(Path(out_json).parent, exist_ok=True)\n\n with open(out_json, \"w\") as f:\n json.dump(examples, f, indent=4)\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n parse_dataset(args.in_dir, args.out_json)\n","repo_name":"asteroid-team/asteroid","sub_path":"egs/TAC/local/parse_data.py","file_name":"parse_data.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":1961,"dataset":"github-code","pt":"80"} +{"seq_id":"4208717075","text":"import random\n\nfrom players import player\nfrom properties import property\n\nclass event:\n def __init__(self, name, ID):\n self.name = name\n self.ID = ID\n \n def __repr__(self):\n return \"Event : {}, ID : {}\".format(self.name, self.ID)\n \n def diction():\n def go(p):\n print('Collect $400!')\n p = player.bal_plus(p, 400)\n return p\n \n def jail(current_player):\n if current_player.is_jailed == 0:\n print(\"You're just visiting!\")\n return current_player\n \n else:\n \n def jail_pay(current_player):\n current_player = player.bal_minus(current_player, 50)\n current_player.is_jailed = 0\n print('Much obliged, you\\'re free to go!')\n return current_player\n def jail_roll(current_player):\n print('Good luck getting those doubles!') \n die1 = random.randint(1,6)\n die2 = random.randint(1,6)\n if die1 == die2:\n print(f'Double {die1}!\\nYou\\'re free to go!')\n current_player.is_jailed = False\n else:\n print(f'Rolled a {die1} and a {die2}\\nYou have {4-current_player.is_jailed} more turns left in your sentence!') #Should add 3 turn rule by turning is_jailed into an int\n current_player.is_jailed += 1\n if current_player.is_jailed == 4:\n current_player.is_jailed = 0\n print('You\\'ve served your sentence, you\\'re free to go!')\n return current_player\n \n prison_options = {\n 'PAY' : jail_pay,\n 'ROLL' : jail_roll\n }\n print('Pay the fine or serve your sentence!!')\n user_in = input('Type in PAY or ROLL :')\n while True:\n if user_in.upper in prison_options:\n current_player = prison_options[user_in.upper](current_player)\n break\n else:\n print('Please input PAY or ROLL')\n return current_player\n \n \n def free_prk(current_player):\n print('Woo! Free Parking!')\n return current_player\n \n def go_2_jail(current_player):\n current_player = event.jailer()\n return current_player\n def in_tax(current_player):\n print('Pay $200 in income taxes!')\n current_player = player.bal_minus(current_player, 200)\n return current_player\n def lib_tax(current_player):\n print('Pay $100 in taxes!')\n current_player = player.bal_minus(current_player, 100)\n return current_player\n event_index = {\n 0 : go,\n 4 : in_tax,\n 38 : lib_tax,\n 10 : jail,\n 20 : free_prk,\n 30 : go_2_jail\n }\n return event_index\n \n def jailer(p):\n print('Go straight to jail! Do not pass go and do not collect $200!')\n p.space = 10\n p.is_jailed = 1\n return p\n\nclass rail_event:\n def __init__(self, name, ID, value, owner, rent, num_of_houses):\n self.name = name\n self.ID = ID\n self.value = value\n self.owner = owner\n self.rent = rent\n self.num_of_houses = num_of_houses\n \n def __repr__(self):\n return \"Property : {}\\nPrice ${}, Rent {}\\nHouses = {}, Owner = Player {}\".format(self.name, self.value, self.rent, self.num_of_houses, self.owner)\n \n def rail_call(p, board, player_list):\n if board[p.space].owner == 0:\n if board[p.space].value <= p.balance:\n \n def buy(p, board, player_list):\n print(f'Balance = ${p.balance} - ${board[p.space].value} = ${p.balance - board[p.space].value}\\nCongratulations! You\\'ve bought {board[p.space].name}')\n p.balance -= board[p.space].value\n board[p.space].owner = p.num\n return p, board, player_list\n def no_buy(p, board, player_list):\n print('Maybe next time!')\n return p, board, player_list\n \n d = {\n 'YES' : buy,\n 'NO' : no_buy\n }\n \n user_in = input(f'This property is unowned! Would you like to purchase it for ${board[p.space].value}?\\nYES or NO : ')\n while True:\n if user_in.upper() in d:\n return d[user_in.upper()](p, board, player_list)\n else:\n user_in = input('Please enter a YES or NO :')\n else:\n print('Looks like you don\\'t have enough for this property! Maybe next time')\n elif board[p.space].owner == p.num:\n print('You own this! Safe and sound, for now!')\n elif board[p.space].owner != p.num:\n p, board, player_list = property.owned(p, board, player_list)\n else:\n print('Owner value: Invalid')\n return p, board, player_list\n \n \n\nclass util_event:\n def __init__(self, name, ID, value, owner, num_of_houses):\n self.name = name\n self.ID = ID\n self.value = value\n self.owner = owner\n self.num_of_houses = num_of_houses\n \n def __repr__(self):\n return \"Property : {}\\nPrice ${}\\nHouses = {}, Owner = Player {}\".format(self.name, self.value, self.num_of_houses, self.owner)\n \n def util_call(p, board, player_list, roll):\n p_space = p.space\n p_num = p.num\n if board[p_space].owner == 0:\n p, board = property.unowned(p, board)\n if board[p_space].owner:\n util_owned = 0\n util_list = [board[12], board[28]]\n for x in util_list:\n if x.owner == p_num:\n util_owned += 1\n for x in util_list:\n x.num_of_houses = util_owned\n else:\n pass\n elif board[p_space].owner == p_num:\n print('You own this!')\n elif board[p_space].owner != p_num:\n owner_name = player_list[board[p.space].owner].name\n if board[p.space].num_of_houses == 1:\n multiplier = 4\n elif board[p.space].num_of_houses == 2:\n multiplier = 10\n rent = multiplier * roll\n print(f'Uh oh! Looks like it\\'s owned by {owner_name}')\n print(f'With a roll of {roll} and a multiplier of {multiplier}x, pay rent of ${rent} to {owner_name}!')\n if p.balance < rent:\n print('Looks like you need some more funds! Lets see if there\\'s anything for you to mortgage')\n owed = p.balance - rent\n player_bal = p.balance\n p, board, owed = player.broke(p, board, owed)\n if p.balance is None:\n rent = player_bal + (rent-owed)\n player_list[board[p.space].owner] = player.bal_plus(player_list[board[p.space].owner], rent)\n print(f'Player {p.num} : {p.name} is out!')\n for x in board:\n if x.owner == p.num:\n x.owner, x.num_of_houses = 0, 0\n print(f'{x.name} is now unowned!')\n else:\n print('They owned no properties.')\n pass\n del player_list[p.num]\n else:\n print('You\\'ve managed to pay off your debts! For now...')\n p = player.bal_minus(p, rent)\n player_list[board[p.space].owner] = player.bal_plus(player_list[board[p.space].owner], rent)\n else:\n p = player.bal_minus(p, rent)\n player_list[board[p.space].owner] = player.bal_plus(player_list[board[p.space].owner], rent)\n return p, board, player_list\n \n def util_check(current_player, board):\n num_of_telecom = 0\n l = [board[12], board[28]]\n for x in l:\n if x.owner == current_player.num:\n num_of_telecom += 1\n else:\n pass\n for x in l:\n if x.owner == current_player.num:\n x.num_of_houses = num_of_telecom\n print(f'Num telecom = {x.num_of_houses} on {x.name}')\n else:\n pass\n return current_player, board\n \n ","repo_name":"JoshuaDotExe/monopoly","sub_path":"events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":8890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"70439483459","text":"import pandas as pd\nfrom os import listdir\nimport string\n\nfiles = listdir(\"../labelData\")\nfiles = [file for file in files if \"csv\" in file]\n\ninput_companies = []\nfor file in files:\n df_comps = pd.read_csv(\"../labelData/\" + file, index_col=None, header=None)\n\n companyTupleList = []\n def buildTupleList(row):\n companyTuple = (row[0], row[1])\n companyTupleList.append(companyTuple)\n\n df_comps.apply(buildTupleList, axis=1)\n\n for company, related in companyTupleList:\n companyDict = {}\n companyDict['name'] = company\n companyDict['query'] = \"{} product\".format(company)\n companyDict['related'] = related\n\n exclude = set(string.punctuation)\n companyName = ''.join(p for p in company if p not in exclude)\n companyName = companyName.replace(\" \", \"_\").lower() ##Build self.companyName\n companyDict['filename'] = companyName\n\n companyDict['targetCompany'] = file.replace(\".csv\", \"\")\n input_companies.append(companyDict)\n\npd.DataFrame(input_companies).to_csv(\"CompanyStats.csv\")","repo_name":"GoatWang/newBingCrawler_LabeledCompany","sub_path":"companyStats/companyStats.py","file_name":"companyStats.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"43861799768","text":"import torch\nfrom transformers import AutoModelForTokenClassification\nimport optparse\nfrom constants import CUDA_VISIBLE_DEVICES\nfrom constants import RESULT_PATH\nimport os.path\nimport os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = CUDA_VISIBLE_DEVICES\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ndef main():\n \n parser = optparse.OptionParser()\n \n \n parser.add_option('-n', '--name',\n action=\"store\", dest=\"name\",\n help=\"model name\", default=\"NER_Pretrained_phoBERT\")\n \n parser.add_option('-c', '--classes',\n action=\"store\", dest=\"classes\",\n help=\"number of classes\", default=0)\n \n \n options, args = parser.parse_args()\n \n if (options.name):\n print(RESULT_PATH + f\"/{options.name}.pt\")\n assert os.path.exists(RESULT_PATH + f\"/{options.name}.pt\")\n \n num_classes = int(options.classes)\n model = AutoModelForTokenClassification.from_pretrained(\"vinai/phobert-base\", \n num_labels=num_classes).to(device)\n model.load_state_dict(torch.load(RESULT_PATH + f\"/{options.name}.pt\", map_location=device))\n \n quantized_model = torch.quantization.quantize_dynamic(model, \n {torch.nn.Linear}, \n dtype=torch.qint8)\n torch.save(quantized_model.state_dict(), RESULT_PATH + f\"/quantized_{options.name}.pt\")\n print('Quantize model successfully!')\n \n \nif __name__ == \"__main__\":\n main()","repo_name":"HKAB/nlp-model-compression","sub_path":"quantization/quantize_tc_model.py","file_name":"quantize_tc_model.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"4821533958","text":"a# Definition for singly-linked list.\n'''\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n self.visted = False\n''' \n\nclass Solution:\n def hasCycle(self, head: ListNode) -> bool:\n visited = set()\n while head:\n visited.add(head)\n if head.next is None:\n return False\n elif head.next in visited:\n return True\n else:\n head = head.next\n return False\n","repo_name":"cuijiaxun/LeetCodePythonPractice","sub_path":"141_Linked_List_Cycle.py","file_name":"141_Linked_List_Cycle.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"20863069104","text":"import subprocess\nimport os\n\ndef encrypt_file(file_path, recipient=None):\n if not os.path.isfile(file_path):\n print(f\"Error: '{file_path}' is not a valid file.\")\n return\n\n if recipient:\n command = f\"gpg --encrypt --recipient {recipient} {file_path}\"\n else:\n command = f\"gpg --symmetric {file_path}\"\n\n\n try:\n subprocess.run(command, shell=True, check=True)\n print(f\"File '{file_path}' successfully encrypted.\")\n except subprocess.CalledProcessError as e:\n print(f\"Error: {e}\")\n\ndef decrypt_file(file_path):\n if not os.path.isfile(file_path):\n print(f\"Error: '{file_path}' is not a valid file.\")\n return\n\n command = f\"gpg --decrypt {file_path}\"\n\n try:\n subprocess.run(command, shell=True, check=True)\n print(f\"File '{file_path}' successfully decrypted.\")\n except subprocess.CalledProcessError as e:\n print(f\"Error: {e}\")\n\nif __name__ == \"__main__\":\n choice = input(\"Encrypt (e) or decrypt (d) a file? \").lower()\n\n if choice == \"e\":\n file_path = input(\"Enter the path to the file you want to encrypt: \")\n recipient = input(\"Enter the recipient's key ID (if using asymmetric encryption, leave blank for symmetric encryption): \")\n encrypt_file(file_path, recipient)\n elif choice == \"d\":\n file_path = input(\"Enter the path to the file you want to decrypt: \")\n decrypt_file(file_path)\n else:\n print(\"Invalid choice. Please choose 'e' or 'd'.\")\n","repo_name":"ENIGMA02/File-Encryption-Decryption","sub_path":"File_encrypt/encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"74836705218","text":"\n\n# To consolidate our knowledge of the map() function, we are going to use it to implement \n# our own custom zip() function. The zip() function is a function that takes a number of \n# iterables and then creates a tuple containing each of the elements in the iterables.\n# -------------------------------------\n# my_strings = ['a', 'b', 'c', 'd', 'e']\n# my_numbers = [1, 2, 3, 4, 5]\n# results = list(zip(my_strings, my_numbers))\n# print(results)\n# print(type(results[1]))\n# -------------------------------------\n# Python 3\n\nmy_strings = ['a', 'b', 'c', 'd', 'e']\nmy_numbers = [1, 2, 3, 4, 5]\n\nresults = list(map(lambda x, y: (x, y), my_strings, my_numbers))\n\nprint(results)","repo_name":"thuantt0101/PY_HELLO-WORLD","sub_path":"zip.py","file_name":"zip.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"73836189698","text":"from .context import solvers\nfrom solvers import Alphametic\nimport unittest\n\nclass AlphameticTest(unittest.TestCase):\n \"\"\"Tests for the Alphametic solver\"\"\"\n\n def testDivision(self):\n a = Alphametic()\n a.AddDivision(dividend=\"FHPOHSKF\", divisor=\"ITSSKR\", quotient=\"HIF\")\n a.AddDivision(dividend=\"FHPOHS\", divisor=\"ITSSKR\", quotient=\"H\", remainder=\"TPRPI\")\n a.AddProduct(result=\"FISSHK\", initial_value=\"ITSSKR\", multiplier=\"H\")\n a.AddSubtraction(result=\"TPRPI\", initial_value=\"FHPOHS\", reduction=\"FISSHK\")\n a.AddDivision(dividend=\"TPRPIK\", divisor=\"ITSSKR\", quotient=\"I\", remainder=\"RRPCI\")\n a.AddProduct(result=\"ITSSKR\", initial_value=\"ITSSKR\", multiplier=\"I\")\n a.AddSubtraction(result=\"RRPCI\", initial_value=\"TPRPIK\", reduction=\"ITSSKR\")\n a.AddDivision(dividend=\"RRPCIF\", divisor=\"ITSSKR\", quotient=\"F\", remainder=\"ITPCKP\")\n a.AddProduct(result=\"OHSSCF\", initial_value=\"ITSSKR\", multiplier=\"F\")\n a.AddSubtraction(result=\"ITPCKP\", initial_value=\"RRPCIF\", reduction=\"OHSSCF\")\n expectedSolution = {\"P\":0, \"I\": 1, \"T\": 2, \"C\": 3, \"H\": 4, \"F\": 5, \"O\": 6, \"R\": 7, \"K\": 8, \"S\": 9}\n self.assertEqual(a.Solution(), expectedSolution)\n\n def testKnownLetters(self):\n a = Alphametic()\n a.AddSum(initial_value=\"CEYLON\", addition=\"BLACK\", result=\"KETTLE\")\n a.AddKnownLetter(\"N\", 8)\n a.AddKnownLetter(\"Y\", 2)\n expectedSolution = {\"A\":3, \"O\":1, \"B\":9, \"T\":0, \"L\":7, \"K\":6, \"E\":4, \"C\":5, \"Y\":2, \"N\":8}\n self.assertEqual(a.Solution(), expectedSolution)\n","repo_name":"beckbria/logic-solvers","sub_path":"tests/test_alphametic.py","file_name":"test_alphametic.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"34061379261","text":"from operator import truediv\nimport os\nfrom pathlib import Path\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'django-insecure-g2@*anp+iek@!npj%o6&4qny8^cqu)sh34f4jo)&8*rky*&f-x'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = ['127.0.0.1']\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n\n 'ckeditor_uploader',\n 'ckeditor',\n\n 'django_filters',\n 'myboard',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'FinalProject.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [BASE_DIR / 'templates'],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.messages.context_processors.messages',\n 'django.contrib.auth.context_processors.auth',\n \n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'FinalProject.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/4.0/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': BASE_DIR / 'db.sqlite3',\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# CELERY SETTINGS\nCELERY_BROKER_URL = 'redis://localhost:6379'\nCELERY_RESULT_BACKEND = 'redis://localhost:6379'\nCELERY_ACCEPT_CONTENT = ['application/json']\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\n\n# ACCOUNT\nAUTHENTICATION_BACKENDS = (\n \"django.contrib.auth.backends.ModelBackend\",\n \"allauth.account.auth_backends.AuthenticationBackend\",\n)\n\nLOGIN_REDIRECT_URL = '/'\nLOGIN_URL = '/accounts/login/'\nLOGOUT_REDIRECT_URL = '/accounts/login/'\n\nACCOUNT_EMAIL_CONFIGURATION_EXPIRE_DAYS = 1\nACCOUNT_USERNAME_MIN_LENGTH = 4\n\nACCOUNT_EMAIL_VRIFICATION = 'mandatory'\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_AUTHENTIFICATION_METHOD = 'email'\n\n# EMAIL\n\nEMAIL_HOST = 'smtp.mail.ru'\nEMAIL_PORT = 465 \nEMAIL_HOST_USER = 'dmasar'\nEMAIL_HOST_PASSWORD = '********' #пароль скрыт \nEMAIL_USE_SSL = True\nEMAIL_USE_TLS = False\nDEFAULT_FROM_EMAIL = EMAIL_HOST_USER + '@mail.ru'\n\nEMAIL_ADMIN = 'dmasar@mail.ru'\n\nSERVER_EMAIL = 'dmasar@mail.ru' \nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_UNIQUE_EMAIL = True\nACCOUNT_USERNAME_REQUIRED = False\nACCOUNT_AUTHENTICATION_METHOD = 'email'\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_CONFIRM_EMAIL_ON_GET = True\nACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 1\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/4.0/topics/i18n/\n\nLANGUAGE_CODE = 'ru-RU'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = False\nTIME_ZONE = 'Europe/Moscow'\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/4.0/howto/static-files/\n\nSTATICFILES_DIRS = [\n BASE_DIR / \"static\"\n]\nSTATIC_URL = '/static/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media/')\nMEDIA_URL = '/media/'\nCKEDITOR_UPLOAD_PATH = 'uploads/'\n\nCKEDITOR_UPLOAD_SLUGIFY_FILENAME = False\nCKEDITOR_RESTRICT_BY_USER = True\nCKEDITOR_BROWSE_SHOW_DIRS = True\nCKEDITOR_RESTRICT_BY_DATE = False\n\n# Default primary key field type\n# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field\n\nDEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'\n\nSITE_ID = 1","repo_name":"DmitrySarykov/FinalProject","sub_path":"FinalProject/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"2526167625","text":"import tensorflow as tf\nfrom tqdm import tqdm\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"../input/mnist_data/MNIST_data\", one_hot = True)\ndef init_weights(shape):\n init_random_dist = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(init_random_dist)\n\ndef init_bias(shape):\n init_random_bias = tf.constant(0.1, shape=shape)\n return tf.Variable(init_random_bias)\n\ndef conv2d(x,W):\n return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding=\"SAME\")\n\ndef max_pool_2by2(x):\n return tf.nn.max_pool(x, ksize=[1,2,2,1] , strides = [1,2,2,1], padding=\"SAME\")\n\ndef convolutional_layer(input_x, shape):\n W = init_weights(shape)\n b = init_bias([shape[3]])\n return tf.nn.relu(conv2d(input_x, W) + b)\n\ndef normal_full_layer(input_layer, size):\n input_size = int(input_layer.get_shape()[1])\n W = init_weights([input_size, size])\n b = init_bias([size])\n return tf.matmul(input_layer, W) + b\n\nx = tf.placeholder(tf.float32, shape=[None, 784])\ny_true = tf.placeholder(tf.float32, shape=[None,10])\n\nx_image = tf.reshape(x, shape=[-1,28,28,1]) # 784 = 28*28\n\nconvo1 = convolutional_layer(x_image, shape = [5,5,1,32])\nconvo_1_pooling = max_pool_2by2(convo1)\n\nconvo_2 = convolutional_layer(convo_1_pooling,shape=[6,6,32,64])\nconvo_2_pooling = max_pool_2by2(convo_2)\n\n\nconvo_2_flat = tf.reshape(convo_2_pooling,[-1,7*7*64])\nfull_layer_one = tf.nn.relu(normal_full_layer(convo_2_flat,1024))\n#1024 is the nos. of neurons we want in our fully connected layer\n\n\nhold_prob = tf.placeholder(tf.float32)\nfull_one_dropout = tf.nn.dropout(full_layer_one,keep_prob=hold_prob)\n\ny_pred = normal_full_layer(full_one_dropout,10)\n\nprint(\"input Size: \", x.get_shape())\nprint(\"After reshaping, input Size: \", x_image.get_shape())\nprint(\"After first conolution: \", convo1.get_shape())\nprint(\"After first Pooling: \", convo_1_pooling.get_shape())\nprint(\"After second conolution: \", convo_2.get_shape())\nprint(\"After second Pooling: \", convo_2_pooling.get_shape())\nprint(\"After flatening: \",convo_2_flat.get_shape())\nprint(\"After first fully dense NN: \",full_layer_one.get_shape())\nprint(\"After first dropout: \",full_one_dropout.get_shape())\nprint(\"Prediction: \", y_pred.get_shape())\n\ncross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true,logits=y_pred))\n\noptimizer = tf.train.AdamOptimizer(learning_rate=0.0001)\ntrain = optimizer.minimize(cross_entropy)\n\ninit = tf.global_variables_initializer()\n\nsess = tf.Session()\n\nsess.run(init)\n\nepochs = 50000\nfor i in tqdm(range(epochs)):\n batch_x , batch_y = mnist.train.next_batch(50)\n sess.run(train,feed_dict={x:batch_x,y_true:batch_y,hold_prob:0.5})\n\nsaver = tf.train.Saver()\n\n# To restore the previous weights run the next command\nsaver.save(sess, \"./CNN Model/CNN Model\")\nsaver.restore(sess, \"./CNN Model/CNN Model\")\nmatches = tf.equal(tf.argmax(y_pred,1),tf.argmax(y_true,1))\nacc = tf.reduce_mean(tf.cast(matches,tf.float32))\nprint(sess.run(acc,feed_dict={x:mnist.test.images,y_true:mnist.test.labels,hold_prob:1.0}))\nsess.close()","repo_name":"pateljainilanilbhai/pythonprograms","sub_path":"mnisttt.py","file_name":"mnisttt.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"80"} +{"seq_id":"36709065558","text":"import logging\n\nfrom clisy.searcher.searcher_impl.amazon_searcher import AmazonSearcher\nfrom clisy.searcher.searcher_impl.creative_commons_image_searcher import CreativeCommonsImageSearcher\nfrom clisy.searcher.searcher_impl.duck_duck_go_searcher import DuckDuckGoSearcher\nfrom clisy.searcher.searcher_impl.google_searcher import GoogleSearcher\nfrom clisy.searcher.searcher_impl.imdb_searcher import IMDbSearcher\nfrom clisy.searcher.searcher_impl.swiggy_searcher import SwiggySearcher\nfrom clisy.searcher.searcher_impl.wikipedia_searcher import WikipediaSearcher\nfrom clisy.searcher.searcher_impl.wirecutter_searcher import WirecutterSearcher\nfrom clisy.searcher.searchoptions import SearchOptions\n\n\nclass ClisyFactory:\n _logger = logging.getLogger(__name__)\n searchers = {\n SearchOptions.DUCKDUCKGO: DuckDuckGoSearcher,\n SearchOptions.GOOGLE: GoogleSearcher,\n SearchOptions.WIKIPEDIA: WikipediaSearcher,\n SearchOptions.WIRECUTTER: WirecutterSearcher,\n SearchOptions.AMAZON: AmazonSearcher,\n SearchOptions.CREATIVE_COMMONS_IMAGE: CreativeCommonsImageSearcher,\n SearchOptions.IMDB: IMDbSearcher,\n SearchOptions.SWIGGY: SwiggySearcher\n }\n\n def __init__(self):\n self.search_option = SearchOptions.DUCKDUCKGO\n\n def __init__(self, search_option=str):\n self.search_option = search_option\n\n def get_searcher(self):\n try:\n return self.searchers[self.search_option]()\n except Exception as ex:\n self._logger.error(self,\n \"This search is not yet supported. Please refer to documentation to understand what \"\n \"all options are supported at present. Following are the search options supported so \"\n \"far : %s . Here is the error %r\",\n SearchOptions, ex)\n","repo_name":"nilukush/clisy","sub_path":"src/clisy/searcher/clisyfactory.py","file_name":"clisyfactory.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"40022395139","text":"def ifainb(a,b):\n if a==\"\":\n return True\n elif(len(b)-len(a)!=1):\n return False\n for i in a:\n if i not in b:\n return False\n return True\ndef wordchain(preword,words):\n ans=[]\n if(len(words)==0):\n return preword\n if(words[0][0]!=len(preword[-1])+1 and preword[-1]!=\"\"):\n return preword\n for word in words[0][1]:\n if(ifainb(preword[-1],word)):\n temp=wordchain(preword[:]+[word],words[1:])\n if(len(ans) 0:\n data = []\n for label in labels:\n label_data = [x for x in self.data if x['label'] == label]\n label_data = random.sample(label_data, labeled_examples_per_class)\n data += label_data\n random.shuffle(data)\n self.data = data\n print(f'Loaded {len(self.data)} images equally ditributed from {len(labels)} classes.')\n \n if num_expanded is not None and num_expanded > 0:\n indexes = self._expansion_indexes(len(self.data), num_expanded)\n self.data = [self.data[i] for i in indexes]\n print(f'Loaded {len(self.data)} images after expansion.')\n print(f'Final data split as: {Counter([x[\"label\"] for x in self.data])}')\n print()\n \n self.img_dir = img_path\n self.tokenizer = tokenizer\n self.args = args\n self.vocab = vocab\n self.n_classes = len(args.labels)\n self.text_start_token = [\"[CLS]\"] if args.model != \"mmbt\" else [\"[SEP]\"]\n\n with numpy_seed(0):\n for row in self.data:\n if np.random.random() < args.drop_img_percent:\n row[\"image\"] = None\n\n self.max_seq_len = args.max_seq_len\n if args.model == \"mmbt\":\n self.max_seq_len -= args.num_image_embeds\n\n self.transforms = transforms\n self.errors = 0\n self.text_aug0 = text_aug0\n self.text_aug = text_aug\n\n \n def __len__(self):\n return len(self.data)\n \n def _expansion_indexes(self, initial_size, size):\n expand_count = size // initial_size\n indexes = np.arange(initial_size)\n expanded = np.hstack([indexes for _ in range(expand_count)]) if expand_count else np.array([], dtype=int)\n \n if expanded.shape[0] < size:\n diff = size - expanded.shape[0]\n expanded = np.hstack([expanded, np.random.choice(indexes, diff)])\n \n assert expanded.shape[0] == size\n return expanded\n \n def get_classic_item(self, index):\n try:\n image = Image.open(\n os.path.join(self.img_dir, self.data[index][\"image\"])\n ).convert(\"RGB\")\n except:\n image = Image.fromarray(128 * np.ones((256, 256, 3), dtype=np.uint8))\n image = self.transforms(image)\n \n label = self.args.labels.index(self.data[index][\"label\"])\n\n return image, label\n \n \n def get_multimodal_item_orig(self, index):\n if self.args.task == \"vsnli\":\n sent1 = self.tokenizer(self.data[index][\"sentence1\"])\n sent2 = self.tokenizer(self.data[index][\"sentence2\"])\n truncate_seq_pair(sent1, sent2, self.args.max_seq_len - 3)\n sentence = self.text_start_token + sent1 + [\"[SEP]\"] + sent2 + [\"[SEP]\"]\n segment = torch.cat(\n [torch.zeros(2 + len(sent1)), torch.ones(len(sent2) + 1)]\n )\n else:\n sentence = (\n self.text_start_token\n + self.tokenizer(self.data[index][\"text\"])[\n : (self.args.max_seq_len - 1)\n ]\n )\n segment = torch.zeros(len(sentence))\n\n sentence = torch.LongTensor(\n [\n self.vocab.stoi[w] if w in self.vocab.stoi else self.vocab.stoi[\"[UNK]\"]\n for w in sentence\n ]\n )\n\n if self.args.task_type == \"multilabel\":\n label = torch.zeros(self.n_classes)\n label[\n [self.args.labels.index(tgt) for tgt in self.data[index][\"label\"]]\n ] = 1\n else:\n label = torch.LongTensor(\n [self.args.labels.index(self.data[index][\"label\"])]\n )\n\n image = None\n try:\n if self.args.model in [\"img\", \"concatbow\", \"concatbert\", \"mmbt\"]:\n if self.data[index][\"image\"]:\n image = Image.open(\n os.path.join(self.img_dir, self.data[index][\"image\"])\n ).convert(\"RGB\")\n else:\n image = Image.fromarray(128 * np.ones((256, 256, 3), dtype=np.uint8))\n image = self.transforms(image)\n except:\n self.errors += 1\n if self.errors > len(self.data) / 100:\n raise ValueError('wrong paths')\n image = Image.fromarray(128 * np.ones((256, 256, 3), dtype=np.uint8))\n image = self.transforms(image)\n if self.args.model == \"mmbt\":\n # The first SEP is part of Image Token.\n segment = segment[1:]\n sentence = sentence[1:]\n # The first segment (0) is of images.\n segment += 1\n\n return sentence, segment, image, label\n\n def get_mmbt_sentence_and_segment(self, text):\n sentence = (\n self.text_start_token\n + self.tokenizer(text)[\n : (self.args.max_seq_len - 1)\n ]\n )\n segment = torch.zeros(len(sentence))\n\n sentence = torch.LongTensor(\n [\n self.vocab.stoi[w] if w in self.vocab.stoi else self.vocab.stoi[\"[UNK]\"]\n for w in sentence\n ]\n )\n if self.args.model == \"mmbt\":\n # The first SEP is part of Image Token.\n segment = segment[1:]\n sentence = sentence[1:]\n # The first segment (0) is of images.\n segment += 1\n\n return sentence, segment\n\n def get_mmbt_label(self, label):\n if self.args.task_type == \"multilabel\":\n label = torch.zeros(self.n_classes)\n label[\n [self.args.labels.index(tgt) for tgt in label]\n ] = 1\n else:\n label_idx = self.args.labels.index(label) if label != 'unknown' else -1\n label = torch.LongTensor([label_idx])\n return label\n\n def get_mmbt_image(self, img_path):\n image = None\n try:\n if self.args.model in [\"img\", \"concatbow\", \"concatbert\", \"mmbt\"]:\n if img_path:\n image = Image.open(\n os.path.join(self.img_dir, img_path)\n ).convert(\"RGB\")\n else:\n image = Image.fromarray(128 * np.ones((256, 256, 3), dtype=np.uint8))\n image = self.transforms(image)\n except:\n self.errors += 1\n if self.errors > len(self.data) / 100:\n raise ValueError('wrong paths')\n image = Image.fromarray(128 * np.ones((256, 256, 3), dtype=np.uint8))\n image = self.transforms(image)\n return image\n\n def _get_text(self, index, aug):\n if aug == 'none':\n text = self.data[index][\"text\"]\n else:\n text = random.choice(self.data[index][aug])\n\n sentence, segment = self.get_mmbt_sentence_and_segment(text)\n return sentence, segment\n\n\n def get_multimodal_item(self, index):\n sentence, segment = self._get_text(index, self.text_aug0)\n\n label = self.get_mmbt_label(self.data[index][\"label\"])\n image = self.get_mmbt_image(self.data[index][\"image\"])\n \n if self.text_aug is None:\n return sentence, segment, image, label\n else:\n sentence_aug, segment_aug = self._get_text(index, self.text_aug)\n return sentence, segment, image, label, sentence_aug, segment_aug\n \n \n def __getitem__(self, index):\n if self.args.my_format == 'classic':\n return self.get_classic_item(index)\n elif self.args.my_format =='multimodal':\n return self.get_multimodal_item(index)\n else:\n raise ValueError(f'Unrecognized args.my_format {self.args.my_format}')\n","repo_name":"iustinsirbu13/multimodal-ssl-for-disaster-tweet-classification","sub_path":"code/FixMatch-pytorch/data/myDataset.py","file_name":"myDataset.py","file_ext":"py","file_size_in_byte":9112,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"80"} +{"seq_id":"14061587210","text":"# [출력 예시]\n\n# [오늘의 날씨]\n# 비, 어제보다 3˚ 낮아요\n# 현재 00C ( 최저 00도 / 최고 00도)\n\n# 미세먼지 00 좋음\n# 초미세먼지 00 좋음\n# 오존지수 00좋음\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\n\ndef create_soup(url):\n headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.101 Safari/537.36'}\n res = requests.get(url,headers=headers)\n res.raise_for_status()\n soup = BeautifulSoup(res.text, \"lxml\")\n return soup\n\ndef print_news(index, title, link):\n print(\"{}. {}\".format(index+1, title))\n print(\"링크 : {}\".format(link))\n \n \ndef scrape_weather():\n print(\"[오늘의 날씨]\")\n url = \"https://search.naver.com/search.naver?sm=tab_hty.top&where=nexearch&query=%EB%8C%80%EA%B5%AC+%EB%82%A0%EC%94%A8&oquery=%EB%8C%80%EA%B5%AC%EB%82%A0%EC%94%A8&tqi=hKlhIdprvh8ssCzIB94ssssstTl-119752\"\n soup = create_soup(url)\n cast_txt = soup.find(\"p\", attrs={\"class\":\"cast_txt\"}).get_text() #날씨 설명\n\n today_temp = soup.find(\"p\", attrs={\"class\":\"info_temperature\"}).get_text().replace(\"도씨\",\"\")\n low_temp = soup.find(\"span\", attrs={\"class\":\"min\"}).get_text()\n high_temp = soup.find(\"span\", attrs={\"class\":\"max\"}).get_text()\n\n #또 다른 방법\n #today_temp = soup.find(\"span\", attrs={\"class\":\"todaytemp\"}).get_text() #오늘 날씨\n #temp_mark = soup.find(\"span\", attrs={\"class\":\"tempmark\"}).get_text()[-1:] #℃\n #min_to_max = soup.find_all(\"span\", attrs={\"class\":\"num\"})\n #low_temp = min_to_max[0].get_text() # 오늘 최저기온\n #high_temp = min_to_max[1].get_text() # 오늘 최고기온\n\n #미세먼지 정보\n dust = soup.find(\"dl\", attrs={\"class\":\"indicator\"}) \n pm10 = dust.find_all(\"dd\")[0].get_text() #미세먼지\n pm25 = dust.find_all(\"dd\")[1].get_text() #초미세먼지\n \n \n #출력\n print(cast_txt)\n print(\"현재 \"+today_temp+\"( 최저 \"+low_temp+\" / 최고 \"+high_temp+\" )\")\n print()\n print(\"미세먼지 {}\".format(pm10))\n print(\"초미세먼지 {}\".format(pm25))\n print()\n\n\ndef scrape_headline_news():\n print(\"[헤드라인 뉴스]\")\n url = \"https://news.naver.com/\"\n soup = create_soup(url)\n news_list = soup.find(\"ul\", attrs={\"class\":\"hdline_article_list\"}).find_all(\"li\", limit=3) # 3개까지 찾는 것\n #print(news_list)\n for index, news in enumerate(news_list):\n title = news.find(\"a\").get_text().strip()\n link = url + news.find(\"a\")[\"href\"]\n print_news(index, title, link)\n print()\n\n #또 다른 방법\n #titles = soup.find_all(\"div\", attrs={\"class\":\"hdline_article_tit\"})\n #for i in range(3):\n # link = titles[i].a[\"href\"]\n # print(\"{}. \".format(i+1)+titles[i].get_text().strip())\n # print(\"( 링크 : \"+url+link+\" )\")\n # print()\n \n\ndef scrape_it_news():\n print(\"[IT 뉴스]\")\n url = \"https://news.naver.com/main/list.nhn?mode=LS2D&mid=shm&sid1=105&sid2=230\"\n soup = create_soup(url)\n news_list = soup.find(\"ul\", attrs={\"class\":\"type06_headline\"}).find_all(\"li\", limit=3) #li태그 3개만 불러오기\n\n for index, news in enumerate(news_list):\n a_idx = 0\n img = news.find(\"img\")\n if img:\n a_idx = 1 # img 태그가 있으면 1번째 a 태그의 정보를 사용\n title = news.find_all(\"a\")[a_idx].get_text().strip()\n link = news.find_all(\"a\")[a_idx][\"href\"]\n print_news(index, title, link)\n print()\n \n #또 다른 방법\n #class_title = soup.find_all(\"dt\", attrs={\"class\":\"photo\"})\n #titles = soup.find_all(\"dt\")\n #results = [x for x in titles if x not in class_title] # 전체리스트 - 클래스있는 리스트\n #for i in range(3):\n # link = results[i].a[\"href\"]\n # print(\"{}. \".format(i+1)+results[i].a.get_text().strip())\n # print(\"( 링크 : \"+link+\" )\")\n # print()\n\n\n\ndef scrape_english():\n print(\"[오늘의 영어 회화]\")\n url = \"https://www.hackers.co.kr/?c=s_eng/eng_contents/I_others_english&keywd=haceng_submain_lnb_eng_I_others_english&logger_kw=haceng_submain_lnb_eng_I_others_english#;\"\n soup = create_soup(url)\n today_expression = soup.find_all(\"b\", attrs={\"class\":\"conv_txtTitle\"})\n\n sentences = soup.find_all(\"div\", attrs={\"id\":re.compile(\"^conv_kor_t\")})\n print(\" (영어 지문) \")\n for sentence in sentences[len(sentences)//2:]: #8문장이 있다고 가정할 때, 5~8까지 짤라서 가져\n print(sentence.get_text().strip())\n \n print()\n print(\" (한글 지문) \")\n for sentence in sentences[:len(sentences)//2]: #8문장이 있다고 가정할 때, 5~8까지 짤라서 가져\n print(sentence.get_text().strip())\n print()\n # 또 다른 방법\n #kor = today_expression[0]\n #eng = today_expression[1]\n #contents = soup.find_all(\"span\", attrs={\"class\":\"conv_sub\"}) # 0~3 : 한국어, 4~7 : 영어\n #print(\"## 한글 지문 ##\")\n #print(\"오늘의 표현 : \" + kor.get_text()+\"\\n\")\n #for i in range(4):\n # content = contents[i].get_text()\n # print(content)\n # print()\n #print(\"## 영어 지문 ##\")\n #print(\"오늘의 표현 : \" + eng.get_text()+\"\\n\")\n #for i in range(4,8):\n #content = contents[i].get_text()\n #print(content)\n #print()\n\n\n\nif __name__ == \"__main__\": # 직접 실행할때는 아래의 함수들이 실행되지만, 다른 파일에서 실행하게 되면 실행x\n scrape_weather() #오늘의 날씨 정보 가져오기\n scrape_headline_news() # 헤드라인 뉴스 정보 가져오기\n scrape_it_news() # IT 뉴스 정보 가져오기\n scrape_english() # 오늘의 영어회화 가져오기\n ","repo_name":"jinsu9758/python_study","sub_path":"selenium/project1_upgrade.py","file_name":"project1_upgrade.py","file_ext":"py","file_size_in_byte":5723,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"29970728626","text":"from collections import deque\nfood, stamina = deque([int(i) for i in input().split(\", \")]), deque([int(i) for i in input().split(\", \")])\nclimbed_peaks = []\n\npeaks = deque([(\"Vihren\", 80), (\"Kutelo\", 90), (\"Banski Suhodol\", 100), (\"Polezhan\",\t60), (\"Kamenitza\", 70)])\n \nday = 1\nwhile food and stamina:\n sum_food_stamina = food.pop() + stamina.popleft()\n if sum_food_stamina >= peaks[0][1]:\n climbed_peaks.append(peaks[0][0])\n peaks.popleft()\n\n if not peaks:\n print(\"Alex did it! He climbed all top five Pirin peaks in one week -> @FIVEinAWEEK\")\n break \n if not food or not stamina or day == 7:\n print(\"Alex failed! He has to organize his journey better next time -> @PIRINWINS\")\n break\n \n\n day += 1\nif climbed_peaks:\n print(\"Conquered peaks:\")\n for peak in climbed_peaks:\n print(peak)\n","repo_name":"xaoccc/python","sub_path":"Python Advanced/Exams/Python Advanced Retake Exam - 14 December 2022/01. Climb The Peaks.py","file_name":"01. Climb The Peaks.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"80"} +{"seq_id":"26800724935","text":"from user import User\nfrom reply import *\n\n\nclass SocialNetwork:\n def __init__(self):\n self.users = {}\n self.users_id = {}\n self.posts_id = {}\n self.current_user = None\n self.post_id = 0\n\n def signup(self, username, password):\n user_id = len(self.users) + 1\n new_user = User(user_id, username, password)\n self.users[username] = new_user\n self.users_id[user_id] = username\n print(f\"User '{username}' signed up successfully.\")\n\n def login(self, username, password):\n if username in self.users and self.users[username].password == password:\n self.current_user = self.users[username]\n print(f\"Welcome, {self.current_user.username}!\")\n else:\n print(\"Invalid username or password.\")\n\n def display_news_feed(self, sorting_option):\n if not self.current_user:\n print(\"Please login first to view the news feed.\")\n return\n\n sorted_feed = self.sort_news_feed(sorting_option)\n for post in sorted_feed:\n\n username = self.users_id.get(post.user_id)\n\n user = self.users.get(username)\n print(f\"User: {username}, User Id: {post.user_id}, Post Id: {post.post_id}, Post: {post.content}, \"\n f\"Upvotes: {post.upvotes}, Downvotes: {post.downvotes}, \"\n f\"Comments Number: {comments_count.get(post.post_id,0)}, Time: {post.get_time_ago()}\")\n\n if comments.get(post.post_id,0) != 0:\n\n for comment in comments.get(post.post_id):\n print(f\"Comments : \")\n print(f\"User: {comment.user_id}, {comment.content}\")\n\n def post_feed_item(self, content):\n if not self.current_user:\n print(\"Please login first to post.\")\n return\n\n self.current_user.post_feed_item(self.post_id, content)\n self.post_id += 1\n self.posts_id[self.post_id] = self.current_user.user_id\n\n print(\"Posted successfully.\")\n\n def follow_user(self, username_to_follow):\n if not self.current_user:\n print(\"Please login first to follow users.\")\n return\n\n if username_to_follow in self.users:\n user_to_follow = self.users[username_to_follow]\n self.current_user.follow(user_to_follow)\n print(f\"You are now following '{username_to_follow}'.\")\n else:\n print(f\"User '{username_to_follow}' not found.\")\n\n def reply_to_post(self, post_id, content):\n if not self.current_user:\n print(\"Please login first to reply.\")\n return\n\n user_id = self.posts_id.get(post_id)\n username = self.users_id.get(user_id)\n user = self.users.get(username)\n\n for post in user.posts:\n if post.post_id == post_id:\n self.current_user.reply(post_id, content)\n print(\"Replied successfully.\")\n return\n\n print(f\"Post with ID {post_id} not found.\")\n\n def upvote_post(self, post_id):\n if not self.current_user:\n print(\"Please login first to upvote.\")\n return\n\n user_id = self.posts_id.get(post_id)\n username = self.users_id.get(user_id)\n user = self.users.get(username)\n\n for post in user.posts:\n if post.post_id == post_id:\n self.current_user.upvote(post)\n print(\"Upvoted successfully.\")\n return\n\n print(f\"Post with ID {post_id} not found.\")\n\n def downvote_post(self, post_id):\n if not self.current_user:\n print(\"Please login first to downvote.\")\n return\n\n for post in self.current_user.posts:\n if post.post_id == post_id:\n self.current_user.downvote(post)\n print(\"Downvoted successfully.\")\n return\n\n print(f\"Post with ID {post_id} not found.\")\n\n def sort_news_feed(self, sorting_option):\n if not self.current_user:\n return []\n\n if sorting_option == \"followed_users\":\n sorted_feed = sorted(self.current_user.posts + self.get_followed_users_posts(),\n key=lambda post: (post.user_id in self.current_user.followed_users, post.timestamp),\n reverse=True)\n elif sorting_option == \"score\":\n sorted_feed = sorted(self.current_user.posts + self.get_followed_users_posts(),\n key=lambda post: (post.upvotes - post.downvotes, post.timestamp),\n reverse=True)\n elif sorting_option == \"comments\":\n sorted_feed = sorted(self.current_user.posts + self.get_followed_users_posts(),\n key=lambda post: (len(post.comments), post.timestamp),\n reverse=True)\n else:\n sorted_feed = sorted(self.current_user.posts + self.get_followed_users_posts(),\n key=lambda post: post.timestamp, reverse=True)\n\n return sorted_feed\n\n def get_followed_users_posts(self):\n followed_posts = []\n for user in self.users.values():\n if user.user_id in self.current_user.followed_users:\n followed_posts.extend(user.posts)\n return followed_posts\n\n def get_users(self):\n if not self.current_user:\n print(\"Please login first to view usernames\")\n return\n for user in self.users.values():\n print(user.username)\n","repo_name":"bhuvneshsaini/Newsfeed","sub_path":"social_network.py","file_name":"social_network.py","file_ext":"py","file_size_in_byte":5571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"23613010374","text":"#This program reads a file, takes the average, minimum, and maximum, of tickets\r\n#CISI1400\r\n#Python File IO HW\r\n#Cory Schneider\r\nimport os\r\n\r\nuserchoice = input(\"Enter a filename: \").strip()\r\n\r\nif os.path.isfile(userchoice):\r\n infile = open(userchoice, \"r\")\r\n s = infile.read()\r\n tickets = [eval('x') for x in s.split()]\r\n sum_a = []\r\n for i in tickets:\r\n try:\r\n sum_a.append(float(i))\r\n except ValueError:\r\n next\r\n print(\"**********************************\")\r\n print(\" TICKET REPORT \")\r\n print(\"**********************************\")\r\n print(\"\\nThere are \",len(tickets),\" tickets in the database.\\n\")\r\n print(\"Maximum Ticket price is $\",max(sum_a))\r\n print(\"Minimum Ticket price is $\",min(sum_a))\r\n print(\"Average Ticket price is $\",\"{:.2f}\".format(sum(sum_a)/len(tickets)),\"\\n\")\r\n print(\"Thank you for using our ticket system!\\n\")\r\n print(\"**********************************\\n\")\r\n infile.close()\r\nelse:\r\n print(\"Failed to open file\")\r\n \r\noutput_file = open(\"output.txt\", \"w\")\r\n\r\nfor line in userchoice:\r\n output_file.write(line + \"\\n\")\r\n output_file.write(\"hello\\n\")\r\n\r\noutput_file.close()\r\n","repo_name":"CorySchneider/Python-I-O","sub_path":"Pythong IO Homework.py","file_name":"Pythong IO Homework.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"25955524339","text":"import os\n\n# print(os.environ['PYTHONPATH'].split(os.pathsep))\n\n\nimport kigame\n\nACCELERATION_OF_GRAVITY = -0.1\n\nimage = kigame.Sprite(source='alienBlue_walk1.png', pos=(20, 20))\nimage.add_texture('alienBlue_walk2.png')\n\nspeed = 10\n\n\ndef update():\n global speed\n\n if kigame.key.spacebar:\n speed += 0.5\n\n image.y += speed\n speed += ACCELERATION_OF_GRAVITY\n\n if speed > 0:\n image.next_texture()\n\n\nkigame.GameApp().run()\n","repo_name":"Arigotoma/kigame","sub_path":"examples/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"37237163761","text":"from django.contrib.auth.models import User\r\n\r\nusers = [\r\n 'adent',\r\n 'bbolling',\r\n 'hprynne',\r\n 'mtulliver',\r\n 'ymakioka',\r\n 'ctinajero',\r\n 'jgatz',\r\n 'dbrooks',\r\n 'ebovary',\r\n 'swestern',\r\n 'narcher',\r\n 'mpierce',\r\n 'ctrask',\r\n 'jeyre',\r\n 'jlfinch',\r\n 'scarton',\r\n 'mwormwood',\r\n 'earroway',\r\n 'dschwartz',\r\n 'jbrown'\r\n]\r\n\r\nfor username in users:\r\n user=User.objects.create_user(username, password='1234')\r\n user.is_superuser=False\r\n user.is_staff=False\r\n user.save()\r\n","repo_name":"leavescalculator/LeavesCalculator","sub_path":"scripts/create_users.py","file_name":"create_users.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"25641464240","text":"import math\n\n\nclass PhotoAlbum:\n def __init__(self, pages):\n self.pages = pages\n self.photos = [[] for _ in range(self.pages)]\n\n @classmethod\n def from_photos_count(cls, photos_count):\n total_pages = math.ceil(photos_count / 4)\n return cls(total_pages)\n\n def add_photo(self, label):\n added = False\n page_number, slot_number = (0, 0)\n for page in range(len(self.photos)):\n if not self.photos[page]:\n self.photos[page] = [[], [], [], []]\n for slot in range(4):\n if not self.photos[page][slot]:\n self.photos[page][slot] = label\n page_number, slot_number = (page, slot)\n added = True\n break\n if added:\n break\n\n if added:\n return f\"{label} photo added successfully on page {page_number + 1} slot {slot_number + 1}\"\n else:\n return \"No more free slots\"\n\n def display(self):\n result = \"-\" * 11\n\n for page in range(len(self.photos)):\n result += \"\\n\"\n for photo in range(len(self.photos[page])):\n if not self.photos[page][photo] == []:\n if photo == len(self.photos[page]) - 1:\n result += \"[]\"\n else:\n result += \"[] \"\n result += \"\\n\"\n result += \"-\" * 11\n\n return result\n","repo_name":"antondelchev/Python-OOP","sub_path":"Static and Class Methods/photo_album.py","file_name":"photo_album.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"72285033539","text":"import tkinter as tk\n\n\nclass OneRepMaxCalculator:\n \"\"\"\n A class used to calculate the estimated one-repetition maximum (1RM) for a weightlifting exercise.\n\n Attributes\n ----------\n window : tk.Tk\n The main window of the application.\n weight_entry : tk.Entry\n Entry field to input the weight lifted.\n rep_entry : tk.Entry\n Entry field to input the number of reps performed.\n result_value_label : tk.Label\n Label to display the calculated 1RM.\n\n Methods\n -------\n calculate_1rm():\n Calculates the estimated 1RM based on the Epley formula.\n display_result():\n Displays the calculated 1RM in the application window.\n run():\n Runs the application.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initializes the OneRepMaxCalculator with a window and widgets.\"\"\"\n self.window = tk.Tk()\n self.window.title(\"One-Rep Max Calculator\")\n self.window.geometry(\"300x150\")\n\n # Create and pack widgets\n tk.Label(self.window, text=\"Enter the weight you lifted (in kg):\").pack()\n self.weight_entry = tk.Entry(self.window)\n self.weight_entry.pack()\n\n tk.Label(self.window, text=\"Enter the number of reps you performed:\").pack()\n self.rep_entry = tk.Entry(self.window)\n self.rep_entry.pack()\n\n tk.Button(self.window, text=\"Calculate\", command=self.display_result).pack()\n\n tk.Label(self.window, text=\"Your estimated one-rep max (1RM):\").pack()\n self.result_value_label = tk.Label(self.window)\n self.result_value_label.pack()\n\n def calculate_1rm(self):\n \"\"\"Calculates and returns the estimated 1RM.\"\"\"\n weight = int(self.weight_entry.get())\n reps = int(self.rep_entry.get())\n return (weight * reps * 0.0333) + weight\n\n def display_result(self):\n \"\"\"Calculates the 1RM and updates result_value_label with it.\"\"\"\n one_rep_max = self.calculate_1rm()\n self.result_value_label.config(text=f\"{one_rep_max} kg\")\n\n def run(self):\n \"\"\"Runs the Tkinter event loop.\"\"\"\n self.window.mainloop()\n\n\n# Usage\nif __name__ == \"__main__\":\n calculator = OneRepMaxCalculator()\n calculator.run()\n\n# Improve the program.\n# Make the fonts, bigger.\n# - Use text formatting...\n# Use dark mode.\n# Have an option to use dark mode and light mode.\n","repo_name":"geekcomputers/Python","sub_path":"nitkarshchourasia/one_rep_max_calculator/one_rep_max_calculator_gui.py","file_name":"one_rep_max_calculator_gui.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","stars":28675,"dataset":"github-code","pt":"80"} +{"seq_id":"5521482624","text":"from string import printable as chars\nfrom random import randint, choice\n\nfrom django.core.urlresolvers import reverse\nfrom django.test import TestCase\nimport simplejson as json\n\nfrom teams.models import Task\nfrom videos.models import Video\nfrom utils import test_factories\nfrom utils.multi_query_set import MultiQuerySet\nfrom utils.compress import compress, decompress\nfrom utils.chunkediter import chunkediter\n\nclass MultiQuerySetTest(TestCase):\n fixtures = ['test.json']\n\n def test_full(self):\n self.assertEqual(list(Video.objects.all()),\n list(MultiQuerySet(Video.objects.all())),\n \"Full, single MQS didn't match full QS.\")\n\n self.assertEqual(list(Video.objects.all()),\n list(MultiQuerySet(Video.objects.none(),\n Video.objects.all(),\n Video.objects.none())),\n \"Full MQS with blanks didn't match full QS.\")\n\n self.assertEqual(list(Video.objects.all()) + list(Video.objects.all()),\n list(MultiQuerySet(Video.objects.none(),\n Video.objects.all(),\n Video.objects.none(),\n Video.objects.all())),\n \"Double MQS with blanks didn't match double full QS.\")\n\n def test_slice(self):\n qs = Video.objects.all()\n mqs = MultiQuerySet(Video.objects.all())\n\n self.assertEqual(list(qs[0:1]),\n list(mqs[0:1]),\n \"MQS[:1] failed.\")\n\n self.assertEqual(list(qs[0:2]),\n list(mqs[0:2]),\n \"MQS[:2] failed.\")\n\n self.assertEqual(list(qs[0:3]),\n list(mqs[0:3]),\n \"MQS[:3] (out-of-bounds endpoint) failed.\")\n\n self.assertEqual(list(qs[1:3]),\n list(mqs[1:3]),\n \"MQS[1:3] failed.\")\n\n self.assertEqual(list(qs[2:3]),\n list(mqs[2:3]),\n \"MQS[2:3] failed.\")\n\n self.assertEqual(list(qs[1:1]),\n list(mqs[1:1]),\n \"MQS[1:1] (empty slice) failed.\")\n\n def test_slice_multiple(self):\n qs = list(Video.objects.all())\n qs = qs + qs + qs\n mqs = MultiQuerySet(Video.objects.all(),\n Video.objects.all(),\n Video.objects.all())\n\n self.assertEqual(qs[0:3],\n list(mqs[0:3]),\n \"MQS[:3] failed.\")\n\n self.assertEqual(qs[0:6],\n list(mqs[0:6]),\n \"MQS[:6] (entire range) failed.\")\n\n self.assertEqual(qs[0:7],\n list(mqs[0:7]),\n \"MQS[:7] (out-of-bounds endpoint) failed.\")\n\n self.assertEqual(qs[1:3],\n list(mqs[1:3]),\n \"MQS[1:3] failed.\")\n\n self.assertEqual(qs[1:6],\n list(mqs[1:6]),\n \"MQS[1:6] (entire range) failed.\")\n\n self.assertEqual(qs[1:7],\n list(mqs[1:7]),\n \"MQS[1:7] (out-of-bounds endpoint) failed.\")\n\n self.assertEqual(qs[3:3],\n list(mqs[3:3]),\n \"MQS[3:3] failed.\")\n\n self.assertEqual(qs[3:6],\n list(mqs[3:6]),\n \"MQS[3:6] (entire range) failed.\")\n\n self.assertEqual(qs[3:7],\n list(mqs[3:7]),\n \"MQS[3:7] (out-of-bounds endpoint) failed.\")\n\n\nclass CompressTest(TestCase):\n def test_compression(self):\n # Make sure the empty string is handled.\n self.assertEqual('', decompress(compress('')))\n\n # Make sure a bunch of random ASCII data compresses correctly.\n for _ in xrange(100):\n l = randint(1, 4096)\n data = ''.join(choice(chars) for _ in xrange(l))\n self.assertEqual(data, decompress(compress(data)))\n\n # Make sure a bunch of random bytes compress correctly.\n for _ in xrange(100):\n l = randint(1, 4096)\n data = ''.join(chr(randint(0, 255)) for _ in xrange(l))\n self.assertEqual(data, decompress(compress(data)))\n\n # Make sure a bunch of random Unicode data compresses correctly.\n for _ in xrange(100):\n l = randint(1, 1024)\n data = ''.join(choice(u'☃ಠ_ಠ✿☺☻☹♣♠♥♦⌘⌥✔★☆™※±×~≈÷≠π'\n u'αßÁáÀàÅåÄ䯿ÇçÉéÈèÊêÍíÌìÎîÑñ'\n u'ÓóÒòÔôÖöØøÚúÙùÜüŽž')\n for _ in xrange(l))\n\n encoded_data = data.encode('utf-8')\n round_tripped = decompress(compress(encoded_data)).decode('utf-8')\n\n self.assertEqual(data, round_tripped)\n\n\n# TODO: Test chunking somehow.\nclass ChunkedIterTest(TestCase):\n def test_iterate(self):\n data = [1, 10, 100, 1000, 10000]\n\n sum = 0\n for i in chunkediter(data):\n sum += i\n self.assertEqual(sum, 11111)\n\n sum = 0\n for i in chunkediter(data, 2):\n sum += i\n self.assertEqual(sum, 11111)\n\n sum = 0\n for i in chunkediter(data, 1):\n sum += i\n self.assertEqual(sum, 11111)\n\n def test_empty(self):\n data = []\n\n sum = 0\n for i in chunkediter(data):\n sum += i\n self.assertEqual(sum, 0)\n\n sum = 0\n for i in chunkediter(data, 1):\n sum += i\n self.assertEqual(sum, 0)\n\n\nclass BleachSanityTest(TestCase):\n\n def test_weird_input(self):\n import bleach\n html = \"hello\"\n value = bleach.clean(html, strip=True, tags=[], attributes=[])\n self.assertEquals(u\"hello\", value)\n\n html = \"\"\n value = bleach.clean(html, strip=True, tags=[], attributes=[])\n self.assertEquals(u\"\", value)\n\n html = '

'\n value = bleach.clean(html, strip=True, tags=[], attributes=[])\n self.assertEquals(u\"\", value)\n\nclass TestEditor(object):\n \"\"\"Simulates the editor widget for unit tests\"\"\"\n def __init__(self, client, video, original_language_code=None,\n base_language_code=None, mode=None):\n \"\"\"Construct a TestEditor\n\n :param client: django TestClient object for HTTP requests\n :param video: Video object to edit\n :param original_language_code: language code for the video audio.\n Should be set if and only if the primary_audio_language_code hasn't\n been set for the video.\n :param base_language_code: base language code for to use for\n translation tasks.\n :param mode: one of (\"review\", \"approve\" or None)\n \"\"\"\n self.client = client\n self.video = video\n self.base_language_code = base_language_code\n if original_language_code is None:\n self.original_language_code = video.primary_audio_language_code\n else:\n if video.primary_audio_language_code is not None:\n raise AssertionError(\n \"primary_audio_language_code is set (%r)\" %\n video.primary_audio_language_code)\n self.original_language_code = original_language_code\n self.mode = mode\n self.task_approved = None\n self.task_id = None\n self.task_notes = None\n self.task_type = None\n\n def set_task_data(self, task, approved, notes):\n \"\"\"Set data for the task that this edit is for.\n\n :param task: Task object\n :param approved: did the user approve the task. Should be one of the\n values of Task.APPROVED_IDS.\n :param notes: String to set for notes\n \"\"\"\n type_map = {\n 10: 'subtitle',\n 20: 'translate',\n 30: 'review',\n 40: 'approve',\n }\n self.task_id = task.id\n self.task_type = type_map[task.type]\n self.task_notes = notes\n self.task_approved = approved\n\n def _submit_widget_rpc(self, method, **data):\n \"\"\"POST data to the widget:rpc view.\"\"\"\n\n url = reverse('widget:rpc', args=(method,))\n post_data = dict((k, json.dumps(v)) for k, v in data.items())\n response = self.client.post(url, post_data)\n response_data = json.loads(response.content)\n if 'error' in response_data:\n raise AssertionError(\"Error calling widget rpc method %s:\\n%s\" %\n (method, response_data['error']))\n return response_data\n\n def run(self, language_code, completed=True, save_for_later=False):\n \"\"\"Make the HTTP requests to simulate the editor\n\n We will use test_factories.dxfp_sample() for the subtitle data.\n\n :param language_code: code for the language of these subtitles\n :param completed: simulate the completed checkbox being set\n :param save_for_later: simulate the save for later button\n \"\"\"\n\n self._submit_widget_rpc('fetch_start_dialog_contents',\n video_id=self.video.video_id)\n existing_language = self.video.subtitle_language(language_code)\n if existing_language is not None:\n subtitle_language_pk = existing_language.pk\n else:\n subtitle_language_pk = None\n\n response_data = self._submit_widget_rpc(\n 'start_editing',\n video_id=self.video.video_id,\n language_code=language_code,\n original_language_code=self.original_language_code,\n base_language_code=self.base_language_code,\n mode=self.mode,\n subtitle_language_pk=subtitle_language_pk)\n session_pk = response_data['session_pk']\n\n self._submit_widget_rpc('finished_subtitles',\n completed=completed,\n save_for_later=save_for_later,\n session_pk=session_pk,\n subtitles=test_factories.dxfp_sample('en'),\n task_approved=self.task_approved,\n task_id=self.task_id,\n task_notes=self.task_notes,\n task_type=self.task_type)\n","repo_name":"Magnil/unisubs","sub_path":"utils/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":10689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"80"} +{"seq_id":"29487940786","text":"import logging\n\nlogger = logging.getLogger()\n\nimport merc\nimport interp\n\n\ndef do_worth(ch, argument):\n if ch.is_npc():\n ch.send(\"You have %ld gold and %ld silver.\\n\" % (ch.gold, ch.silver))\n return\n ch.send(\"You have %ld gold, %ld silver, and %d experience (%d exp to level).\\n\" % (\n ch.gold, ch.silver, ch.exp, (ch.level + 1) * ch.exp_per_level(ch.points) - ch.exp))\n\n\ninterp.register_command(interp.cmd_type('worth', do_worth, merc.POS_SLEEPING, 0, merc.LOG_NORMAL, 1))\n","repo_name":"quixadhal/PyRom","sub_path":"Rom24/pysrc/commands/do_worth.py","file_name":"do_worth.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"80"} +{"seq_id":"34531941047","text":"from sys import path\npath.append('..')\n\nfrom player import *\nfrom basics import *\n\nMAX_SHARES_PER_TURN = 3\n\nclass SmallestAntiStrategy(Player):\n \"\"\"\n A strategy that chooses the smallest tile according to tile<=?\n and as many shares as possible in anti-alphabetical order.\n \"\"\"\n\n #Returns a TileMove\n def place_tile(self):\n \"\"\"\n Places the smallest tile determined by tile<=?\n \"\"\"\n #assume sorted by tile<=?\n sorted_tiles = list(self.tiles).sort()\n tile_move = TileMove()\n for tile in sorted_tiles:\n if self.board.can_place_tile(tile):\n tile_move.tile = tile\n if self.board.is_founding(tile):\n tile_move.hotel = sorted(self.board.not_placed_hotels())[0]\n elif self.board.is_merging(tile):\n tile_move.hotel = sorted(self.board.merge_acquirers(tile))[0]\n return tile_move\n\n #Returns a BuyMove\n def buy_stock(self):\n \"\"\"\n Purchases up to the max number of stocks in anti-alphabetical order\n \"\"\"\n shares_to_buy = list()\n for i in range(MAX_SHARE_PER_TURN):\n #Sorted in anti-alphabetical order\n for hotel, remaining in sorted(self.available_shares.items(), key=lambda i: i[0], reverse=True):\n if remaining > 0 and self.money > calculate_stock_price(hotel, len(self.board()[hotel_name])):\n shares_to_buy.append(hotel)\n break\n return BuyMove(shares_to_buy)\n","repo_name":"shargoj-hw/acquire","sub_path":"player/smallest_anti.py","file_name":"smallest_anti.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"80"} +{"seq_id":"24513945267","text":"import numpy as np\n\nclass MatrisClass():\n def __init__(self,satir,sutun):\n self.satir = satir\n self.sutun = sutun\n self.matris = []\n\n #matris 1 in boyutlandırılması \n for i in range(2):\n for j in range(2):\n eleman1= int(input(\"{}. boyut {}. satırı giriniz: \".format(i,j)))\n self.matris.append(eleman1)\n self.matris = np.array(self.matris).reshape(2,2)\n\n def indekse_gore_guncelle(self):\n self.matris_yazdir()\n print(\"Hangi Satırı Düzenlemek İstiyorsunuz.\")\n x = int(input(\"Kacıncı Satır? \"))\n y = int(input(\"Kacıncı Sutun? \"))\n print(self.matris[x,y])\n yeni_deger = int(input(\"Yeni Degeri Girin: \"))\n self.matris[x,y] = yeni_deger\n\n def deger_getir(self,x,y):\n print(\"Matrisin İstenen Degerleri:\",self.matris[x,y])\n\n def satir_getir(self):\n print(self.satir)\n \n def sutun_getir(self):\n print(self.sutun)\n\n def matris_yazdir(self):\n print(self.matris)\n\n def matris_ile_nesne_olustur(satir,sutun):\n matris_nesne = np.array(matris_nesne).reshape(2,2)\n\ndef iki_matrisi_carpma_ve_yazdir(x,y):\n sonuc = np.multiply(x,y)\n print(sonuc)\n\nmatris = MatrisClass([],[])\nmatris.matris_yazdir()\nmatris.deger_getir(1,0)\n\nmatris2 = MatrisClass([],[])\nmatris2.matris_yazdir()\n\nmatris.indekse_gore_guncelle()\nprint(matris.matris)\n\niki_matrisi_carpma_ve_yazdir(matris,matris2)","repo_name":"ErenElagz/Archives","sub_path":"Python/Bil207/Matris Çarpımı.py","file_name":"Matris Çarpımı.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"80"} +{"seq_id":"27657605701","text":"import numpy as np\nimport pandas as pd\nimport torch\nfrom torch.utils.data import Dataset\nfrom transformers import *\nimport torch.utils.data as Data\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer\nfrom FastAutoAugment.nlp_augmentations import synonym_replacement_transform, random_insertion_transform, random_swap_transform, random_deletion_transform\nimport spacy_sentence_bert\nfrom tqdm import tqdm\nfrom advaug import advaug;\nimport pickle\nfrom sentence_transformers import SentenceTransformer\nimport scipy\nfrom os import path\n\ndef read_csv_and_return_x_y(data_path, dataset_type='ag_news'):\n train_df = pd.read_csv(data_path, header=None)\n train_df = train_df.dropna()\n # Here we only use the bodies and removed titles to do the classifications\n if dataset_type==\"imdb\":\n assert False\n elif dataset_type==\"ag_news\":\n # Converting 1-4 to 0-3\n y = np.array(train_df[0].values) - 1\n X = np.array(train_df[2].values)\n elif dataset_type==\"yahoo_answers\":\n y = np.array(train_df[0].values) - 1\n X = np.array(train_df[2].values)\n return X, y\n\ndef get_datasets(data_path,\n max_seq_len=256,\n model='distilbert-base-uncased',\n train_aug=False,\n dataset_type='ag_news',\n stratified_split_k=5,\n percentage_of_val_in_each_split = 0.2):\n \"\"\"\n Read data, split the dataset, and build dataset for dataloaders.\n \"\"\"\n # Load the tokenizer for bert\n tokenizer = AutoTokenizer.from_pretrained(model)\n\n train_df = pd.read_csv(data_path, header=None)\n train_df = train_df.dropna()\n\n # Here we only use the bodies and removed titles to do the classifications\n n_labels = 0\n if dataset_type==\"imdb\":\n y = train_df[1].values\n y = np.array([1 if val=='positive' else 0 for val in y])\n X = np.array(train_df[0].values)\n elif dataset_type==\"ag_news\":\n # Converting 1-4 to 0-3\n y = train_df[0].values\n y = np.array(y) - 1\n X = np.array(train_df[1].values)\n n_labels = 4\n\n # Split the labeled training set, unlabeled training set, development set\n X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)\n\n # Build the dataset class for each set\n train_dataset = create_dataset(X_train, y_train, tokenizer, max_seq_len)\n val_dataset = create_dataset(X_val, y_val, tokenizer, max_seq_len)\n\n return train_dataset, val_dataset, n_labels\n\n# def get_closest_neighbors(dataset_text):\n# all_similarities = []\n# sentence_bert_model = spacy_sentence_bert.load_model('en_bert_base_nli_mean_tokens')\n# for i in tqdm(range(len(dataset_text))):\n# similarities_for_i = []\n# for j in range(len(dataset_text)):\n# if i==j:\n# continue\n# string_1 = sentence_bert_model(str(dataset_text[i]))\n# string_2 = sentence_bert_model(str(dataset_text[j]))\n# similarity = string_1.similarity(string_2)\n# similarities_for_i.append((similarity, j))\n# similarities_for_i.sort(reverse=True)\n# similarities_for_i = [item[1] for item in similarities_for_i]\n# all_similarities.append(similarities_for_i)\n# return all_similarities\n\ndef get_closest_neighbors(dataset_text):\n model = SentenceTransformer('bert-base-nli-mean-tokens')\n sentence_embeddings = model.encode(dataset_text)\n all_similarities = []\n for i in tqdm(range(len(dataset_text))):\n query_embedding = sentence_embeddings[i]\n distances = scipy.spatial.distance.cdist([query_embedding], sentence_embeddings, \"cosine\")[0]\n results = zip(range(len(distances)), distances)\n results = sorted(results, key=lambda x: x[1])\n results = [item[0] for item in results]\n assert results[0] == i\n results = results[1:]\n all_similarities.append(results)\n return all_similarities\n\nclass create_dataset(Dataset):\n def __init__(self, dataset_text, dataset_label,\n tokenizer_type, max_seq_len=256, mix=None, num_classes=10, alpha=-1, knn_lada=3, mu_lada=0.5, translation_loss = 0.2, sampling_ratio = 0.25, probability_of_application = 1, dataset_identifier='train_10'):\n self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_type)\n self.text = dataset_text\n self.labels = dataset_label\n self.max_seq_len = max_seq_len\n self.mix = mix\n self.num_classes = num_classes\n self.probability_of_application = probability_of_application\n self.dataset_identifier = dataset_identifier\n\n if mix in ['TMix_with_EDA', 'TMix_with_EDA_synonym_replacement', 'TMix_with_EDA_random_insertion', 'TMix_with_EDA_random_swap', 'TMix_with_EDA_random_deletion']:\n assert alpha != -1, 'Assign alpha with TMix_with_EDA'\n self.alpha = alpha\n if mix == 'TMix_with_EDA':\n self.augmentations = [synonym_replacement_transform, random_insertion_transform, random_swap_transform, random_deletion_transform]\n elif mix == 'TMix_with_EDA_synonym_replacement':\n self.augmentations = [synonym_replacement_transform]\n elif mix == 'TMix_with_EDA_random_insertion':\n self.augmentations = [random_insertion_transform]\n elif mix == 'TMix_with_EDA_random_swap':\n self.augmentations = [random_swap_transform]\n elif mix == 'TMix_with_EDA_random_deletion':\n self.augmentations = [random_deletion_transform]\n else:\n assert False\n\n if mix == 'Inter_LADA':\n similarity_file = f'data/computed_data/Intra_LADA_for_10_per_class_yahoo_{self.dataset_identifier}.pkl'\n if dataset_identifier == 'sst2_train_1_percent':\n similarity_file = f'data/computed_data/Inter_LADA_for_{self.dataset_identifier}.pkl'\n if dataset_identifier == 'sst2_val_10_samples':\n similarity_file = f'data/computed_data/Inter_LADA_for_{self.dataset_identifier}.pkl'\n if path.exists(similarity_file):\n print(\"Using precomputed close neighbors\")\n self.close_neighbors = pickle.load(open(similarity_file, 'rb'))\n else:\n print(\"Creating close neighbors\")\n self.close_neighbors = get_closest_neighbors(dataset_text)\n pickle.dump(self.close_neighbors, open(similarity_file, 'wb'))\n self.knn_lada = knn_lada\n self.mu_lada = mu_lada\n\n if mix == 'TMix_with_AdvAug':\n adv_aug_sentences = f'data/computed_data/adv_aug_{translation_loss}_{sampling_ratio}_{self.dataset_identifier}.pkl'\n self.translation_loss = translation_loss\n self.sampling_ratio = sampling_ratio\n if path.exists(adv_aug_sentences):\n print(\"Using precomputed adv augmentations\")\n self.adverserial_sentences = pickle.load(open(adv_aug_sentences, 'rb'))\n else:\n print(\"Creating adv augmentations\")\n # print(type(dataset_text))\n self.adverserial_sentences = advaug(dataset_text.tolist(), translation_loss, sampling_ratio)\n assert len(self.adverserial_sentences) == len(dataset_text)\n pickle.dump(self.adverserial_sentences, open(adv_aug_sentences, 'wb'))\n\n def __len__(self):\n return len(self.labels)\n\n def encode_text(self, text):\n tokens = self.tokenizer.tokenize(text)\n if len(tokens) > self.max_seq_len:\n tokens = tokens[:self.max_seq_len]\n length = len(tokens)\n encode_result = self.tokenizer.convert_tokens_to_ids(tokens)\n padding = [0] * (self.max_seq_len - len(encode_result))\n encode_result += padding\n return encode_result, length\n\n def prepare_data(self, idx):\n text = self.text[idx]\n # tokens = self.tokenizer.tokenize(text)\n # if len(tokens) > self.max_seq_len:\n # tokens = tokens[:self.max_seq_len]\n # length = len(tokens)\n # encode_result = self.tokenizer.convert_tokens_to_ids(tokens)\n # padding = [0] * (self.max_seq_len - len(encode_result))\n # encode_result += padding\n encode_result, length = self.encode_text(text)\n attention_mask = [1] * length + [0] * (self.max_seq_len - length)\n return (torch.tensor(encode_result),\n torch.tensor(attention_mask),\n self.labels[idx],\n length)\n\n def __getitem__(self, idx):\n data_for_idx = self.prepare_data(idx)\n if self.mix is None:\n return data_for_idx\n\n if self.mix == 'duplicate' or np.random.rand() > self.probability_of_application:\n # Probability of applying the augmentation, useful for FastAutoAugment\n encoded_1 = data_for_idx[0]\n label_1 = [0]*self.num_classes\n label_1[data_for_idx[2]] = 1\n return (encoded_1, encoded_1, torch.Tensor(label_1), torch.Tensor(label_1))\n\n if self.mix == 'TMix':\n random_index = np.random.randint(0, len(self.labels))\n data_for_random_idx = self.prepare_data(random_index)\n\n # Combine both\n label_1 = [0]*self.num_classes\n label_1[data_for_idx[2]] = 1\n label_2 = [0]*self.num_classes\n label_2[data_for_random_idx[2]] = 1\n\n encoded_1 = data_for_idx[0]\n encoded_2 = data_for_random_idx[0]\n # length = max(data_for_idx[3], data_for_random_idx[3])\n # attention_mask_1 = data_for_idx[1]\n # attention_mask_2 = data_for_random_idx[1]\n return (encoded_1, encoded_2, torch.Tensor(label_1), torch.Tensor(label_2))\n\n if self.mix in ['TMix_with_EDA', 'TMix_with_EDA_synonym_replacement', 'TMix_with_EDA_random_insertion', 'TMix_with_EDA_random_swap', 'TMix_with_EDA_random_deletion']:\n transform_index = np.random.randint(0, len(self.augmentations))\n transform = self.augmentations[transform_index]\n augmented_sentence = transform(self.text[idx], self.alpha, 1)[0]\n encoded_1 = data_for_idx[0]\n encoded_2, _ = self.encode_text(augmented_sentence)\n label_1 = [0]*self.num_classes\n label_1[data_for_idx[2]] = 1\n # label_2 = label_1.copy()\n return (encoded_1, torch.tensor(encoded_2), torch.Tensor(label_1), torch.Tensor(label_1))\n\n if self.mix == 'Intra_LADA':\n # Permute the sentence\n sentence_split = np.array(self.text[idx].split(' '))\n permutation = np.random.permutation(range(len(sentence_split)))\n sentence_split_new = sentence_split[permutation]\n augmented_sentence = ' '.join(sentence_split_new)\n encoded_1 = data_for_idx[0]\n encoded_2, _ = self.encode_text(augmented_sentence)\n label_1 = [0]*self.num_classes\n label_1[data_for_idx[2]] = 1\n return (encoded_1, torch.tensor(encoded_2), torch.Tensor(label_1), torch.Tensor(label_1))\n\n if self.mix == 'Inter_LADA':\n random_index = None\n similar_indices = self.close_neighbors[idx]\n if np.random.rand() < self.mu_lada:\n random_index = np.random.choice(similar_indices[:self.knn_lada])\n else:\n random_index = np.random.choice(similar_indices[self.knn_lada:])\n\n data_for_random_idx = self.prepare_data(random_index)\n\n # Combine both\n label_1 = [0]*self.num_classes\n label_1[data_for_idx[2]] = 1\n label_2 = [0]*self.num_classes\n label_2[data_for_random_idx[2]] = 1\n\n encoded_1 = data_for_idx[0]\n encoded_2 = data_for_random_idx[0]\n # length = max(data_for_idx[3], data_for_random_idx[3])\n # attention_mask_1 = data_for_idx[1]\n # attention_mask_2 = data_for_random_idx[1]\n return (encoded_1, encoded_2, torch.Tensor(label_1), torch.Tensor(label_2))\n\n if self.mix == 'TMix_with_AdvAug':\n augmented_sentence = self.adverserial_sentences[idx]\n encoded_1 = data_for_idx[0]\n encoded_2, _ = self.encode_text(augmented_sentence)\n label_1 = [0] * self.num_classes\n label_1[data_for_idx[2]] = 1\n # label_2 = label_1.copy()\n return (encoded_1, torch.tensor(encoded_2), torch.Tensor(label_1), torch.Tensor(label_1))\n assert False, self.mix\n\n\nif __name__ == '__main__':\n pass\n # train_dataset, val_dataset, n_labels = get_datasets('/home/gondi/Documents/MSCS/Research/fast-autoaugment/data/IMDB_Dataset.csv', imdb=True)\n # train_dataset, val_dataset, n_labels = get_datasets('/home/gondi/Documents/MSCS/Research/fast-autoaugment/data/ag_news_csv/train.csv')\n # print(len(train_dataset))\n # print(train_dataset[10])\n","repo_name":"sgondala/Automix","sub_path":"FastAutoAugment/read_data.py","file_name":"read_data.py","file_ext":"py","file_size_in_byte":13022,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"80"} +{"seq_id":"42870854874","text":"from sentence_transformers import SentenceTransformer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport numpy as np\n\nembedding_model = None\n\ndef init_embedding_model(device=None):\n global embedding_model\n embedding_model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')\n if device is not None:\n embedding_model = embedding_model.to(device)\n \ndef get_similarity_scores(sentences):\n if embedding_model is None:\n raise Exception(\"embedding model not initialized\")\n embeddings = embedding_model.encode(sentences)\n similarity_scores = cosine_similarity(embeddings)\n return similarity_scores \n \ndef get_sentencepairs(articles):\n sentence_pairs = []\n sentences = set([])\n sentence2urls = {}\n for i, a1 in enumerate(articles):\n url1 = a1['url']\n for j, a2 in enumerate(articles):\n url2 = a2['url']\n if i == j:\n continue\n \n for s1 in a1['claims']:\n c1 = s1['claim']\n if c1 in sentence2urls:\n sentence2urls[c1].add(url1)\n else:\n sentence2urls[c1] = set([url1])\n \n for s2 in a2['claims']:\n c2 = s2['claim']\n sentence_pairs.append((c1, c2))\n sentences.add(c1)\n sentences.add(c2)\n \n if c2 in sentence2urls:\n sentence2urls[c2].add(url2)\n else:\n sentence2urls[c2] = set([url2])\n \n sentences = [x for x in list(sorted(sentences)) if len(x) > 0]\n return sentences, sentence_pairs, sentence2urls","repo_name":"jmilbauer/NewsSense","sub_path":"src/filtering.py","file_name":"filtering.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"80"} +{"seq_id":"18270926105","text":"import boto3\r\nimport base64\r\nfrom botocore.exceptions import NoCredentialsError\r\nimport io\r\nimport os\r\n\r\nfrom dotenv import load_dotenv\r\nload_dotenv(verbose=True)\r\n\r\nbucket_name = \"cloud1-project-bucket\"\r\n\r\ndef download_file_from_bucket(s3_key):\r\n \r\n \r\n s3 = boto3.resource('s3',\r\n aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),\r\n aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY'),\r\n region_name=os.getenv('REGION_NAME'))\r\n\r\n obj = s3.Object(bucket_name, s3_key)\r\n io_stream = io.BytesIO()\r\n obj.download_fileobj(io_stream)\r\n\r\n io_stream.seek(0)\r\n data = base64.b64encode(io_stream.read()).decode(\"utf-8\")\r\n\r\n print (\"Download Successful\")\r\n\r\n return data\r\n\r\ndef delete_file_from_bucket(s3_key):\r\n\r\n s3 = boto3.client('s3',\r\n aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),\r\n aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY'),\r\n region_name=os.getenv('REGION_NAME'))\r\n\r\n s3.delete_object(\r\n Bucket= bucket_name,\r\n Key= s3_key,\r\n ) \r\n\r\ndef delete_all_from_bucket ():\r\n\r\n s3 = boto3.resource('s3',\r\n aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),\r\n aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY'),\r\n region_name=os.getenv('REGION_NAME'))\r\n\r\n bucket = s3.Bucket(bucket_name)\r\n\r\n bucket.objects.all().delete()\r\n print (\"S3 Bucket Dumped!\")\r\n\r\n# s3.download_file(Key=s3_key, Bucket=\"cloud1-project-bucket\", Filename=dst_path)\r\n\r\n# download_file_from_bucket('tci-s3-demo', 'children_download.csv')\r\n# with open('children_download.csv') as fo:\r\n# print(fo.read())\r\n\r\n\r\n\r\n\r\ndef upload_to_aws(local_file, s3_key):\r\n\r\n \r\n s3 = boto3.client('s3',\r\n aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),\r\n aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY'),\r\n region_name=os.getenv('REGION_NAME'))\r\n\r\n try:\r\n s3.upload_fileobj(Fileobj=local_file, Bucket=\"cloud1-project-bucket\", Key=s3_key)\r\n print(\"Upload Successful\")\r\n return True\r\n except FileNotFoundError:\r\n print(\"The file was not found\")\r\n return False\r\n except NoCredentialsError:\r\n print(\"Credentials not available\")\r\n return False\r\n\r\n\r\n# upload_to_aws('local_file', 's3_file_name')\r\n\r\n\r\n# for bucket in s3.buckets.all():\r\n# print(bucket.name)\r\n\r\n# s3.download_file(\r\n# Bucket= s3, Key=\"train.csv\", Filename=\"data/downloaded_from_s3.csv\"\r\n# )\r\n\r\n\r\n\r\n","repo_name":"CloakedReaper27/Flask-AWS-Project","sub_path":"Cloud/S3.py","file_name":"S3.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"71184864897","text":"import numpy as numpy\nimport math\nfrom . import Tools\nfrom .Tools import setAttribute as setatt\nfrom .NonLinearModel import NonLinearModel\nfrom .LorentzModel import LorentzModel\nfrom .GaussModel import GaussModel\n\n__author__ = \"Do Kester\"\n__year__ = 2020\n__license__ = \"GPL3\"\n__version__ = \"2.5.3\"\n__url__ = \"https://www.bayesicfitting.nl\"\n__status__ = \"Perpetual Beta\"\n\n# * This file is part of the BayesicFitting package.\n# *\n# * BayesicFitting is free software: you can redistribute it and/or modify\n# * it under the terms of the GNU Lesser General Public License as\n# * published by the Free Software Foundation, either version 3 of\n# * the License, or ( at your option ) any later version.\n# *\n# * BayesicFitting is distributed in the hope that it will be useful,\n# * but WITHOUT ANY WARRANTY; without even the implied warranty of\n# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# * GNU Lesser General Public License for more details.\n# *\n# * The GPL3 license can be found at .\n# *\n# * 2016 - 2020 Do Kester\n\nclass PseudoVoigtModel( NonLinearModel ):\n \"\"\"\n Approximation of VoigtModel as the sum of a GaussModel and a LorentzModel\n\n F(x:p) = p_3 * L(x:p) + ( 1 - p_3 ) * G(x:p)\n\n where L() and G() are the LorentzModel and the GaussModel, resp. and p_3\n is the fractional contribution of them. 0 < p_3 < 1.\n\n The models takes 4 parameters: amplitude, center frequency, half-width and\n the balance between the models\n.\n These are initialised to [1, 0, 1, 0.5].\n Parameter 2 (width) is always kept positive ( >=0 ).\n\n Examples\n --------\n >>> voigt = PseudoVoigtModel( )\n >>> voigt.setParameters( [5, 4, 1, 0.7] )\n >>> print( voigt( numpy.arange( 41 , dtype=float ) / 5 ) ) # from [0,8]\n\n Attributes\n ----------\n gauss : GaussModel\n to construct the gauss parts\n lorentz : LorentzModel\n to construct the lorentz parts\n\n Attributes from Model\n ---------------------\n npchain, parameters, stdevs, xUnit, yUnit\n\n Attributes from FixedModel\n --------------------------\n npmax, fixed, parlist, mlist\n\n Attributes from BaseModel\n --------------------------\n npbase, ndim, priors, posIndex, nonZero, tiny, deltaP, parNames\n\n \"\"\"\n\n def __init__( self, copy=None, **kwargs ):\n \"\"\"\n PseudoVoigt model.\n
\n Number of parameters is 4.\n\n Parameters\n ----------\n copy : PseudoVoigtModel\n to be copied\n fixed : None or dictionary of {int:float|Model}\n int index of parameter to fix permanently.\n float|Model values for the fixed parameters.\n Attribute fixed can only be set in the constructor.\n See: @FixedModel\n\n \"\"\"\n param = [1.0, 0.0, 1.0, 0.5]\n names = [\"amplitude\",\"center\",\"width\",\"balance\"]\n\n super( PseudoVoigtModel, self ).__init__( 4, copy=copy, params=param,\n names=names, **kwargs )\n\n setatt( self, \"gauss\", GaussModel() )\n setatt( self, \"lorentz\", LorentzModel() )\n if copy is None :\n self.posIndex = [2]\n\n def copy( self ):\n \"\"\" Copy method. \"\"\"\n return PseudoVoigtModel( copy=self )\n\n def __setattr__( self, name, value ):\n \"\"\"\n Set attributes.\n\n Parameters\n ----------\n name : string\n name of the attribute\n value :\n value of the attribute\n\n \"\"\"\n if name == \"gauss\" :\n setatt( self, name, value, type=GaussModel )\n elif name == \"lorentz\" :\n setatt( self, name, value, type=LorentzModel )\n else :\n super( PseudoVoigtModel, self ).__setattr__( name, value )\n\n\n def baseResult( self, xdata, params ):\n \"\"\"\n Returns the result of the model function.\n\n Note:\n 1. The \"balance\" parameter (item 3) should be kept between [0..1]\n 2. The \"width\" parameter (item 2)\n the width in the parameter array ( items 2 & 3 ) are kept\n strictly positive. I.e. they are changed when upon xdata they are negative.\n\n Parameters\n ----------\n xdata : array_like\n values at which to calculate the result\n params : array_like\n values for the parameters.\n\n \"\"\"\n return ( params[3] * self.lorentz.baseResult( xdata, params[:3] ) +\n ( 1 - params[3] ) * self.gauss.baseResult( xdata, params[:3] ) )\n\n def basePartial( self, xdata, params, parlist=None ):\n \"\"\"\n Returns the partials at the xdata value.\n\n Parameters\n ----------\n xdata : array_like\n values at which to calculate the partials\n params : array_like\n values for the parameters.\n parlist : array_like\n list of indices active parameters (or None for all)\n\n \"\"\"\n pl = parlist\n if parlist is not None and 3 in pl :\n pl = parlist.copy()\n pl.remove( 3 )\n\n pars = params[:3]\n partial = ( params[3] * self.lorentz.basePartial( xdata, pars, parlist=pl ) +\n ( 1 - params[3] ) * self.gauss.basePartial( xdata, pars, parlist=pl ) )\n\n if parlist is None or 3 in parlist :\n part3 = self.lorentz.baseResult( xdata, pars ) - self.gauss.baseResult( xdata, pars )\n (n0,n1) = partial.shape\n shp = ( n0, n1+1 )\n pp = numpy.zeros( shp, dtype=float )\n pp[:,:n1] = partial\n pp[:,n1] = part3\n partial = pp\n\n return partial\n\n def baseDerivative( self, xdata, params ) :\n \"\"\"\n Return the derivative df/dx at each xdata (=x).\n\n Parameters\n ----------\n xdata : array_like\n values at which to calculate the derivative\n params : array_like\n values for the parameters.\n\n \"\"\"\n return ( params[3] * self.lorentz.baseDerivative( xdata, params[:3] ) +\n ( 1 - params[3] ) * self.gauss.baseDerivative( xdata, params[:3] ) )\n\n\n def baseName( self ):\n \"\"\"\n Returns a string representation of the model.\n \"\"\"\n return str( \"PseudoVoigt: p_3 * Lorentz + ( 1 - p_3 ) * Gauss\" )\n\n def baseParameterUnit( self, k ):\n \"\"\"\n Return the name of a parameter.\n\n Parameters\n ---------\n k : int\n parameter number.\n\n \"\"\"\n if k == 0:\n return self.yUnit\n return self.xUnit\n\n\n","repo_name":"dokester/BayesicFitting","sub_path":"BayesicFitting/source/PseudoVoigtModel.py","file_name":"PseudoVoigtModel.py","file_ext":"py","file_size_in_byte":6582,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"80"} +{"seq_id":"10808446379","text":"#/usr/bin/env python\n\nimport sys\nimport string\nimport os\n\ndef usage():\n print ('Convert_label_to_cntk.py -in [fea_list file] [label_list_file] delay_number')\n\ndef createDir(d):\n if not os.path.isdir(d):\n os.makedirs(d)\n\nif len(sys.argv) != 5:\n usage()\nelse:\n fr = open(sys.argv[2], 'r')\n lines = [x.rstrip() for x in fr]\n fr.close()\n \n fr = open (sys.argv[3], 'r')\n delay = int(sys.argv[4])\n lablines = [x.rstrip() for x in fr]\n fr.close()\n print (\"#!MLF!#\")\n frnum=0\n for line in lines:\n linenew = line.split('.')\n tmp = linenew[0].split('/')\n print (\"\\\"\"+tmp[-1]+\"\\\"\")\n fr = open (lablines[frnum], 'r')\n labs = [x.rstrip() for x in fr]\n fr.close()\n i = 0\n for lab in labs:\n j = i+1\n k = i-delay\n if i-delay < 0:\n k = 0\n print (i, j, labs[k], labs[k])\n i = i+1\n print (\".\")\n frnum = frnum + 1\n\n","repo_name":"microsoft/CNTK","sub_path":"Examples/Speech/Miscellaneous/AMI/scripts/Convert_label_to_cntk.py","file_name":"Convert_label_to_cntk.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":17415,"dataset":"github-code","pt":"80"} +{"seq_id":"18853249243","text":"from flask import Flask, request, jsonify\nfrom flask_pymongo import PyMongo, ObjectId\nfrom flask_cors import CORS\n\napp = Flask(__name__)\napp.config['MONGO_URI'] = 'mongodb://localhost/postrespedrodb'\nmongo = PyMongo(app)\n\n@app.route(\"/postres\", methods=[\"POST\"])\ndef createPostre():\n id = mongo.db.postres.insert({\n \"name\": request.json[\"name\"],\n \"flavor\": request.json[\"flavor\"],\n \"price\": request.json[\"price\"],\n })\n print(request.json)\n return jsonify(str(ObjectId(id)))\n\n\n@app.route(\"/postres\", methods=[\"GET\"])\ndef getPostres():\n postres = []\n for document in mongo.db.postres.find():\n postres.append({\n '_id': str(ObjectId(document['_id'])),\n 'name': document['name'],\n 'price': document['price'],\n 'flavor': document['flavor']\n })\n return jsonify(postres)\n\n\n@app.route(\"/postres/\", methods=[\"DELETE\"])\ndef deletePostre(id):\n mongo.db.postres.delete_one({\n '_id': ObjectId(id)\n })\n return 'deleted'\n\n\n@app.route(\"/postre/\", methods=[\"GET\"])\ndef getPostre(id):\n postre = mongo.db.postres.find_one({\n '_id': ObjectId(id)\n })\n return jsonify({\n '_id': str(ObjectId(postre['_id'])),\n 'name': postre['name'],\n 'price': postre['price'],\n 'flavor': postre['flavor']\n })\n\n@app.route(\"/postre/\", methods=[\"PUT\"])\ndef updatePostre(id):\n print(id)\n mongo.db.postres.update_one({'_id': ObjectId(id)},{'$set':\n {\n 'name': request.json['name'],\n 'price': request.json['price'],\n 'flavor': request.json['flavor']\n }})\n return 'updated'\n\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"imsebarz/postres-backend","sub_path":"backend/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"29258917062","text":"import logging\nimport random\nfrom argparse import ArgumentParser\nimport sys\n\nimport numpy\n\nimport os\nfrom data import data\nfrom downstream import classify\nfrom model import node2vec\nfrom config import Config\nfrom logging import getLogger\nfrom utils import get_downstream, load_embeddings, ensure_dir, get_local_time, get_model\n\n\ndef parse_args():\n parser = ArgumentParser()\n parser.add_argument('--dataset', required=True, help='Dataset file',\n choices=[\n 'wiki', 'blogCatalog', 'bj_roadmap_edge'\n ])\n parser.add_argument('--config_file', required=False, help='Config file', default=None)\n parser.add_argument('--output_dim', required=False, help='Output dim', default=None)\n parser.add_argument('--method', required=True, help='The learning method',\n choices=[\n 'node2vec',\n 'deepWalk',\n 'HRNR'\n ])\n parser.add_argument('--downstream', required=True, help='The downstream task',\n choices=[\n 'classify',\n 'rnn'\n ])\n parser.add_argument('--exp_id', required=False, help='Experiment ID', default=None)\n parser.add_argument('--cached_embedding', required=False, help='Cached Embedding', default=None)\n args = parser.parse_args()\n return Config(vars(args))\n\n\ndef run_model(config):\n logger = getLogger()\n logger.setLevel(logging.INFO)\n\n log_dir = './output/log'\n ensure_dir(log_dir)\n log_filename = '{}-{}-{}-{}.log'.format(config['exp_id'],\n config['model'], config['dataset'], get_local_time())\n log_file_name = os.path.join(log_dir, log_filename)\n\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n file_handler = logging.FileHandler(log_file_name)\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n console_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setFormatter(console_formatter)\n logger.addHandler(console_handler)\n\n logger.info(\"Begin pipeline, method={}, dataset={}, downstream={}, exp_id={}\"\n .format(config[\"method\"], config[\"dataset\"],\n config[\"downstream\"], config[\"exp_id\"]))\n logger.info(config.config)\n logger.info(\"Start reading dataset...\")\n dataset = data(config, logger)\n logger.info(\"Dataset read.\")\n\n cached_embedding = config[\"cached_embedding\"]\n if cached_embedding is not None:\n vectors = load_embeddings(cached_embedding)\n else:\n logger.info(\"Start building method...\")\n m = get_model(config, dataset, logger)\n logger.info(\"Model built.\")\n\n logger.info(\"Saving embeddings...\")\n m.save_embeddings()\n logger.info(\"Embeddings saved.\")\n vectors = m.vectors\n\n train_data, valid_data, test_data = dataset.get_data()\n data_feature = dataset.get_data_feature()\n downstream = get_downstream(config, data_feature, vectors, logger)\n\n logger.info(\"Start training downstream task...\")\n downstream.train(train_data, valid_data)\n logger.info(\"Start Evaluating...\")\n downstream.evaluate(test_data)\n\n\nif __name__ == '__main__':\n random.seed(32)\n numpy.random.seed(32)\n run_model(parse_args())\n","repo_name":"PotassiumWings/NET","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"15573829705","text":"import json\nfrom typing import List\n\nfrom autoinfo.data.plain import Model\n\n\nclass ModelsResponseParser:\n def __init__(self, logger):\n self.__logger = logger\n\n def __call__(self, decoded_response_text: str, *args, **kwargs):\n models_data = json.loads(f\"[{decoded_response_text}]\")\n models_list_item = self.__parse_models(models_data)\n\n return models_list_item\n\n # noinspection PyMethodMayBeStatic\n def __parse_models(self, models_data):\n handled_models = set()\n models: List[Model] = []\n\n for code, name in models_data:\n if name not in ['- Popular Models -', '- All Models -'] and code not in handled_models:\n handled_models.add(code)\n models.append(Model(model_code=code, model_name=name))\n\n return models\n","repo_name":"JeyKip/autoinfo-scrapper","sub_path":"src/scrapper/scrapper/spiders/parsers/models_response_parser.py","file_name":"models_response_parser.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"25790414915","text":"import os\nimport pygame\nimport sys\nimport time\nimport Tkinter as tk\nfrom PIL import Image, ImageTk\n\n\nclass Game:\n\n def __init__(self):\n self.parent = tk.Tk()\n self.screen_width = self.parent.winfo_screenwidth()\n self.screen_height = self.parent.winfo_screenheight()\n\n self.canvas = tk.Canvas(self.parent, width=500, height=500)\n self.x = 250\n self.y=250\n self.r = 20\n self.circle = ImageTk.PhotoImage(Image.open(\"resources/images/circle.jpg\"))\n self.canvas.create_image(self.x, self.y, image=self.circle)\n self.canvas.pack()\n self.parent.update_idletasks()\n\n\n def moveThing(self, direction):\n moveconst = 25\n if direction == 0 :\n self.y -= moveconst\n if direction == 1:\n self.x += moveconst\n if direction == 2:\n self.y += moveconst\n if direction == 3:\n self.x -= moveconst\n self.canvas.delete('all')\n self.canvas.create_image(self.x, self.y, image=self.circle)\n self.canvas.pack()\n self.parent.update_idletasks()\n\n def resetPosition(self):\n self.x = 250\n self.y=250\n self.canvas.delete('all')\n self.canvas.create_image(self.x, self.y, image=self.circle)\n self.canvas.pack()\n self.parent.update_idletasks()\n\n\n#def moveThing(self, direction):\n # #direction: 0=up 1=right 2=down 3=left\n # #initialize force variables\n # moveconst = 1\n #\n # if direction == 0 :\n # #apply upward force (-y)\n # self.move_u = moveconst\n # if direction == 1:\n # #apply rightward force (+x)\n # self.move_r = moveconst\n # if direction == 2:\n # #apply downward force (+y)\n # self.move_d = moveconst\n # if direction == 3:\n # #apply leftward force (-x)\n # self.move_l = moveconst\n # #\n # # if event.key == pygame.K_UP or event.key == pygame.K_w:\n # # self.move_u = 0\n # # if event.key == pygame.K_RIGHT or event.key == pygame.K_d:\n # # self.move_r = 0\n # # if event.key == pygame.K_DOWN or event.key == pygame.K_s:\n # # self.move_d = 0\n # # if event.key == pygame.K_LEFT or event.key == pygame.K_a:\n # # self.move_l = 0\n #\n # if(self.move_u == 1 or self.move_d == 1):\n # if(self.move_u == moveconst and self.move_d == moveconst):\n # self.move_y = 0\n # elif(self.move_u == moveconst):\n # self.move_y = -moveconst\n # else:\n # self.move_y = moveconst\n # else:\n # self.move_y = 0\n #\n # if(self.move_r == moveconst or self.move_l == moveconst):\n # if(self.move_r == moveconst and self.move_l == moveconst):\n # self.move_x = 0\n # elif(self.move_r == moveconst):\n # self.move_x = moveconst\n # else:\n # self.move_x = -moveconst\n # else:\n # self.move_x = 0\n #\n # diff_x = self.usr_entity.x - self.anchor_x\n # diff_y = self.usr_entity.y - self.anchor_y\n #\n # #set entity velocity\n # if(self.move_x == 0):\n # #reset x to anchor\n # if(abs(diff_x) > 120 / self.FRAMERATE):\n # if(diff_x < 0):\n # return_v_x = 120 / self.FRAMERATE\n # else:\n # return_v_x = -120 / self.FRAMERATE\n # else:\n # return_v_x = -1 * (diff_x % (120 / self.FRAMERATE))\n # self.usr_entity.v_x = return_v_x\n # else:\n # self.usr_entity.v_x = (self.move_x * 60 / self.FRAMERATE)\n # #do boundary check\n # if(self.move_r == moveconst and diff_x > self.WIN_WIDTH // 2 - self.WIN_WIDTH // 8 + self.usr_entity.size // 2):\n # self.usr_entity.v_x = 0\n # if(self.move_l == moveconst and diff_x < (-1 * (self.WIN_WIDTH // 2 - self.WIN_WIDTH // 8) - self.usr_entity.size // 2)):\n # self.usr_entity.v_x = 0\n # if(self.move_y == 0):\n # #reset x to anchor\n # if(abs(diff_y) > 120 / self.FRAMERATE):\n # if(diff_y < 0):\n # return_v_y = 120 / self.FRAMERATE\n # else:\n # return_v_y = -120 / self.FRAMERATE\n # else:\n # return_v_y = -1 * (diff_y % (120 / self.FRAMERATE))\n # self.usr_entity.v_y = return_v_y\n # else:\n # self.usr_entity.v_y = (self.move_y * 60 / self.FRAMERATE)\n # #do boundary check\n # if(self.move_d == moveconst and diff_y > self.WIN_WIDTH // 2 - self.WIN_WIDTH // 8 + self.usr_entity.size // 2):\n # self.usr_entity.v_y = 0\n # if(self.move_u == moveconst and diff_y < (-1 * (self.WIN_WIDTH // 2 - self.WIN_WIDTH // 8) - self.usr_entity.size // 2)):\n # self.usr_entity.v_y = 0\n #\n #\n # self.usr_entity.move()\n #\n # #display entity\n # self.usr_entity.clear(self.screen)\n # self.usr_entity.display(self.screen)\n #\n # pygame.display.flip()\n # self.move_u = 0\n # self.move_r = 0\n # self.move_d = 0\n # self.move_l = 0\n # self.move_x = 0\n # self.move_y = 0\n #\n # self.return_v_x = 0\n # self.return_v_y = 0\n","repo_name":"samrobison/NeuralKeyboard","sub_path":"MovingCircle.py","file_name":"MovingCircle.py","file_ext":"py","file_size_in_byte":5453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"9064228763","text":"# Defining a file \r\n\r\n# Creating a file using Write mode \r\n# In write mode , if a file with same name is alerady present , it wil delete the file and create new one \r\nfile_name = input('Enter file name: ')\r\nfileA=open(file_name,'w') \r\nfileA.write('Hello Everyone !! , We have successfully cerated the file')\r\nfileA.close()\r\nprint()\r\n\r\n# File writing from list of content\r\nfile_name=input('Enter your file name: ')\r\nnames=['Yashwanth is a good boy \\n','Rahul is a good boy \\n','Musk is a good boy \\n']\r\nfileA=open(file_name,'w')\r\nfileA.write('The file is successfully created !! \\n')\r\nfileA.writelines(names)\r\n\r\n# File Reading the file\r\nfileA=open('names.txt','r')\r\ndata=fileA.read()\r\nlineonly=fileA.readline()\r\nfileA.seek(0) \r\nprint(lineonly)\r\nprint(data)\r\nfileA.close()\r\nprint(type(data))\r\n\r\n# moving file in python\r\nimport shutil\r\n\r\nsource='names.txt'\r\ndestination='C:/Users/yashw/OneDrive/Desktop/names.txt'\r\noutput=shutil.move(source,destination)\r\nprint(output)\r\n","repo_name":"YashwanthRaj/TechieNest-Internship","sub_path":"Course-Files/File Interaction.py","file_name":"File Interaction.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"26087632931","text":"from openpyxl import Workbook, load_workbook\nfrom io import BytesIO\n\n# Criando um arquivo de planilha\n\nwb = Workbook()\nws = wb.active\n\ndados = [\n ('Nome', 'Idade', 'Sexo'),\n ('Eric', '22', 'M'),\n ('Guilherme', '22', 'M'),\n ('Leonardo', '22', 'M'),\n]\n\nfor dado in dados:\n ws.append(dado)\n\nwb.save('sheet.xlsx')\n\n# Escrevendo planilha em stream de bytes\n\nbytestream = BytesIO()\nwb.save(bytestream)\nbytestream.seek(0)\nwith open('bytes_sheet.xlsx', 'wb') as f:\n f.write(bytestream.read())\n\n# Lendo arquivo de planilha\n\nwb = load_workbook('sheet.xlsx')\nws = wb.active\n\nfor row in ws.values:\n print(row)\n\n# Criando lista de dicionário\n\ndados = []\nvalues = list(ws.values)\nheaders = values[0]\nfor row in values:\n dados.append({col: row[i] for i, col in enumerate(headers)})\n\nfor row in dados:\n print(row)\n\n# Inserindo linhas\n\nwb = load_workbook('sheet.xlsx')\nws = wb.active\n\nws.insert_rows(3)\nws.delete_cols(3)\n\nwb.save('insert_sheet.xlsx')","repo_name":"satoshi-eric/my-workspace","sub_path":"openpyxl_test/sheet.py","file_name":"sheet.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"42704669456","text":"import socket\nimport os\nimport time\n\nUDP_IP = '222.107.177.42'\nUDP_PORT = 6667\n\npath = './img'\nimg_list = os.listdir(path)\nimg_list.sort()\n\nclientSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\nfor file_name in img_list:\n clientSock.sendto(file_name.encode(), (UDP_IP, UDP_PORT))\n # print(file_name)\n data_transferred = 0\n with open('./img/' + file_name, 'rb') as f:\n print(file_name)\n try:\n data = f.read(1024)\n while data:\n data_transferred += clientSock.sendto(data, (UDP_IP, UDP_PORT))\n data = f.read(1024)\n time.sleep(0.0001)\n print(\"sleep\")\n except Exception as e:\n print(e)\n clientSock.sendto('end'.encode(), (UDP_IP, UDP_PORT))\n print('end')\n\n","repo_name":"Kim-SuBin/2020_winter_Intern","sub_path":"Project/udp_socket/client_SenTerm.py","file_name":"client_SenTerm.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"40008350649","text":"num = input()\n\nlst =[]\nfor i in range(int(num)):\n lst.append(input())\n\nfor i in range(len(lst)):\n temp = lst[i].split(' ')\n n = int(temp[0])\n l = int(temp[1])\n r = int(temp[2])\n res = ''\n binary = list(bin(n))\n del binary[0:2]\n binary =list(map(int,binary))\n\n for j in range(len(binary)-r,len(binary)-l+1):\n if binary[j] == 0:\n binary[j] = 1\n else:\n binary[j] = 0\n res = ''.join(list(map(str,binary)))\n res = '0b'+res\n print(int(res,2))","repo_name":"AdamZhouSE/pythonHomework","sub_path":"Code/CodeRecords/2649/61046/272264.py","file_name":"272264.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"80"} +{"seq_id":"36335260561","text":"import gym \nimport random \nimport numpy as np \nfrom deepq import DeepQ\n\nenv = gym.make(\"CartPole-v0\")\n\ntime = 20\nsteps = 10000\nexploration_rate = 1\nleraning_rate = 0.0025\ndiscount_factor = 0.99\nlearn_start = 128\nmemory_size = 1000000\n\ndeepQ = DeepQ(4, 2, memory_size, discount_factor, leraning_rate, learn_start)\ndeepQ.loadModel()\navg_steps = []\n\nfor epoch in range(time):\n observation =env.reset()\n for step in range(steps):\n env.render()\n qValues = deepQ.getQValues(observation)\n\n action = deepQ.selectAction(qValues, exploration_rate)\n\n new_observation, reward, done, info = env.step(action)\n observation = new_observation\n if done: \n avg_steps.append(step)\n print(\"Done after {} Steps.\".format(step))\n break\n exploration_rate *= 0.995\n # explorationRate -= (2.0/epochs)\n exploration_rate = max (0.05, exploration_rate)\nenv.close()","repo_name":"lurayy/Cartpole-balancer","sub_path":"Other methods/play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"19289489050","text":"# 임의의 한 위치부터 k개의 접시 연속 -> 할인\n# 초밥 종류 1개의 쿠폰 발행 -> 무료 제공\n\nimport sys\ninput = sys.stdin.readline\n\nN, d,k,c = map(int,input().split())\nsushi = []\nkind = {}\nkind[c] = 1 # 쿠폰번호에 해당하는 스시\n\nL,R = 0, k-1\n\nfor _ in range(N) :\n sushi.append(int(input()))\n\n# k만큼 개수의 구간 종류별 개수 저장\nfor i in range(R+1) :\n if sushi[i] in kind : \n kind[sushi[i]] += 1\n else :\n kind[sushi[i]] = 1\n\nanswer = 0\nwhile L < N :\n answer = max(len(kind), answer)\n\n kind[sushi[L]] -= 1\n if kind[sushi[L]] == 0 :\n del kind[sushi[L]]\n L+=1\n R+=1\n if sushi[R%N] in kind :\n kind[sushi[R%N]] += 1\n else :\n kind[sushi[R%N]] = 1\n\nprint(answer)\n\n","repo_name":"seminss/python-algorithm-study","sub_path":"hyejinkwon/Two Pointer/회전 초밥.py","file_name":"회전 초밥.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"80"} +{"seq_id":"27427392805","text":"\"\"\"\n import_magic python interface\n\"\"\"\n\nimport fileinput\nimport json\nimport os\nimport sys\nfrom tempfile import gettempdir\n\ntry:\n from itertools import accumulate\nexcept ImportError:\n import operator\n\n def accumulate(iterable, func=operator.add):\n it = iter(iterable)\n try:\n total = next(it)\n except StopIteration:\n return\n yield total\n for element in it:\n total = func(total, element)\n yield total\n\n\ntry:\n import importmagic\nexcept ImportError:\n importmagic = None\n\ntry:\n import isort\nexcept ImportError:\n isort = None\n\n\ndef relativize(module, root, path):\n if not path or not path.startswith(root):\n return module\n path = os.path.relpath(path, root)\n path_parts = path.split(os.path.sep)\n mod_parts = module.split('.')\n common_parts = 0\n for mod_part, path_part in zip(mod_parts, path_parts):\n if mod_part == path_part:\n common_parts += 1\n else:\n break\n if not common_parts:\n return module\n path_parts = path_parts[common_parts:]\n mod_parts = mod_parts[common_parts:]\n module = '.' * len(path_parts) + '.'.join(mod_parts)\n return module\n\n\nclass Commands(object):\n def __init__(self):\n if isort is None or importmagic is None:\n self.write(\n notification='error',\n message='importmagic/isort not found',\n detail='You must install isort and importmagic for %s' %\n sys.executable\n )\n sys.exit(0)\n\n self.data = self.read()\n self.cmd = self.data.pop('cmd', None)\n self.cwd = self.data.pop('cwd', gettempdir())\n\n self.index_file = os.path.join(self.cwd, '.magicindex.json')\n self._tmp_index_file = os.path.join(self.cwd, '.magicindex.json.tmp')\n self.index = self.read_index() if os.path.exists(\n self.index_file\n ) else None\n\n def run(self):\n fun = getattr(self, self.cmd, None)\n if self.index is None and self.cmd in ('file_import_magic',\n 'add_import',\n 'list_possible_imports'):\n if os.path.exists(self._tmp_index_file):\n return\n self.create_index()\n self.index = self.read_index()\n\n if not fun:\n self.write(\n notification='error',\n message=self.cmd,\n detail='Unknown command %s' % self.cmd\n )\n return\n\n fun(**self.data)\n\n def read_index(self):\n with open(self.index_file) as fd:\n return importmagic.SymbolIndex.deserialize(fd)\n\n def create_index(self):\n index = importmagic.SymbolIndex()\n index.build_index(sys.path + [self.cwd])\n with open(self._tmp_index_file, 'w') as fd:\n index.serialize(fd)\n # Prevent multiple access\n if os.path.exists(self._tmp_index_file):\n os.rename(self._tmp_index_file, self.index_file)\n\n def read(self):\n return json.loads(''.join(fileinput.input()))\n\n def write(self, **dct):\n sys.stdout.write(json.dumps(dct))\n sys.stdout.flush()\n\n def reindex(self):\n self.create_index()\n self.write(\n notification='success',\n message='Reindex',\n detail='%s reindexed.' % self.index_file\n )\n sys.exit(0)\n\n def clean_imports(self, source):\n scope = importmagic.Scope.from_source(source)\n unresolved, unreferenced = (\n scope.find_unresolved_and_unreferenced_symbols()\n )\n imports = importmagic.Imports(self.index, source)\n imports.remove(unreferenced)\n source = ''.join(imports.update_source())\n\n source = isort.code(source)\n self.write(file=source, unresolved=list(unresolved))\n\n def add_import(self, source, new_import):\n source = isort.code(\n source, config=isort.settings.Config(add_imports=(new_import, ))\n )\n self.write(file=source)\n\n def list_possible_imports(self, prefix, source, path=None, relative=False):\n scores = self.index.symbol_scores(prefix)\n imports = []\n for _, module, variable in scores:\n if relative:\n module = relativize(module, self.cwd, path)\n if variable is None:\n imports.append('import %s' % module)\n else:\n imports.append('from %s import %s' % (module, variable))\n self.write(imports=imports)\n\n def init(self):\n if self.index is None:\n self.create_index()\n self.write(message='%s indexed.' % self.index_file)\n else:\n self.write(message='Using index %s.' % self.index_file)\n\n\nif __name__ == '__main__':\n Commands().run()\n","repo_name":"paradoxxxzero/atom-python-import-magic","sub_path":"lib/import_magic_interface.py","file_name":"import_magic_interface.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"80"} +{"seq_id":"74786405378","text":"# 2020-12-30 10:14\ndef unique(data, n):\n if n == 0: # 基本情况\n return True\n else:\n if data[n] == data[n - 1]: # 条件判断\n return False\n else:\n return unique(data, n - 1) # 往基本情况靠近\n\n\n# 正确算法\ndef unique2(S):\n \"\"\"Return True if there are no duplicate elements in sequence S.\"\"\"\n temp = sorted(S) # create a sorted copy of S\n for j in range(1, len(temp)):\n if temp[j - 1] == temp[j]:\n return False # found duplicate pair\n return True # if we reach this, elements wew unique\n\n\nif __name__ == '__main__':\n import numpy as np\n\n for i in range(100):\n data = np.random.randint(0, 9, size=(100,))\n if unique(data, len(data) - 1) == unique2(data):\n pass\n else:\n print(\"计算错误\")\n","repo_name":"LK007CX/Data-Structures-and-Algorithms-in-Python","sub_path":"unit/unique.py","file_name":"unique.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"80"} +{"seq_id":"2749564149","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Time : 2018/9/9\n@Author : huanggangyu\n\"\"\"\n\nfrom bi.core.common import statistic_cube, statistic_groups, statistic_dimension\nfrom pyspark.sql import functions as F\nfrom bi.core.filter import filter_age\nfrom bi.core.alias import get_position_alias, get_position_industry\n\n\ndef statistic_salary(df, mysql_url):\n \"\"\"\n 薪资分析\n :param df:\n :param mysql_url:\n :return:\n \"\"\"\n\n parameters = [\n # 专业维度相关分析\n ([\"major\"], \"degree\", (\"major__gender\", \"gender\")),\n ([\"major\"], \"degree\", (\"major__address\", \"address\")),\n ([\"major\"], \"degree\", (\"major__company\", \"company_name\")),\n ([\"major\"], \"degree\", (\"major__industry\", \"industry\")),\n\n # 学校维度相关分析\n ([\"school_name\"], \"degree\", (\"school__gender\", \"gender\")),\n ([\"school_name\"], \"degree\", (\"school__address\", \"address\")),\n ([\"school_name\"], \"degree\", (\"school__company\", \"company_name\")),\n ([\"school_name\"], \"degree\", (\"school__industry\", \"industry\")),\n\n # 学校 + 专业 相关维度的分析\n ([\"school_name\", \"major\"], \"degree\", (\"school__major__position\", \"position_name\", True, True)),\n ([\"school_name\", \"major\"], \"degree\", (\"school__major__industry\", \"industry\", True, True)),\n ([\"school_name\", \"major\"], \"degree\", (\"school__major__company\", \"company_name\", True, True)),\n ([\"school_name\", \"major\"], \"degree\", (\"school__major__flow\", \"company_name,industry\", True, True)),\n\n ([\"industry\"], \"address\", (\"industry__address__compare\", None, True, True)),\n ([\"industry\"], \"address\", (\"industry__address__age\", \"age_range\")),\n ([\"industry\"], \"address\", (\"industry__address__degree\", \"degree\")),\n\n # 职位 + 地点 相关维度分析\n ([\"position_name\"], \"address\", (\"position__address__compare\", None, True, True)),\n ([\"position_name\"], \"address\", (\"position__address__gender\", \"gender\")),\n ([\"position_name\"], \"address\", (\"position__address__work_year_range\", \"work_year_range\")),\n ([\"position_name\"], \"address\", (\"position__address__degree\", \"degree\")),\n ([\"address\"], \"gender\", (\"person__rank\", \"age\", True, True, True)),\n\n # 含职位别名\n ([\"major\"], \"degree\", (\"major__position\", \"position_name\", True, False, False, True)),\n ([\"school_name\"], \"degree\", (\"school__position\", \"position_name\", True, False, False, True))\n ]\n\n statistic_dimension(df, mysql_url, parameters, statistic_salary_dimension)\n\n\ndef statistic_salary_dimension(df, groups, na_field, fields, is_sa=True, is_cube=False, is_age=False, need_alias=False):\n \"\"\"\n 薪资分析\n :param df:\n :return:\n \"\"\"\n na_field = [na_field] if na_field else []\n fields = fields.split(',') if fields else []\n all_fields = groups + na_field + fields\n for f in all_fields:\n df = df.filter(df[f].isNotNull())\n if is_age:\n df = filter_age(df).withColumn(\"age\", F.udf(str)(df.age))\n if is_cube:\n md_df = statistic_cube(df, groups, na_field, fields, is_sa)\n else:\n md_df = statistic_groups(df, groups, na_field, fields, is_sa)\n\n if need_alias:\n md_df = md_df.join(get_position_industry(df), \"position_name\")\n md_df = md_df.join(get_position_alias(df), \"position_name\")\n return md_df\n","repo_name":"reganzm/ai","sub_path":"大数据/pyspark/bi-analysis/bi/statistic/salary.py","file_name":"salary.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"10533689779","text":"from datetime import date, timedelta\n\nfrom ichnaea.content.models import (\n Score,\n User,\n Stat,\n STAT_TYPE,\n)\nfrom ichnaea.models import (\n Cell,\n RADIO_TYPE,\n)\nfrom ichnaea.tests.base import DBTestCase\nfrom ichnaea import util\n\n\nclass TestStats(DBTestCase):\n\n def test_global_stats(self):\n from ichnaea.content.stats import global_stats\n session = self.db_master_session\n day = util.utcnow().date() - timedelta(1)\n stats = [\n Stat(key=STAT_TYPE['cell'], time=day, value=6100000),\n Stat(key=STAT_TYPE['wifi'], time=day, value=3212000),\n Stat(key=STAT_TYPE['unique_cell'], time=day, value=3289900),\n Stat(key=STAT_TYPE['unique_wifi'], time=day, value=2009000),\n ]\n session.add_all(stats)\n session.commit()\n\n result = global_stats(session)\n self.assertDictEqual(\n result, {\n 'cell': '6.10', 'unique_cell': '3.28',\n 'wifi': '3.21', 'unique_wifi': '2.00'\n })\n\n def test_global_stats_missing_today(self):\n from ichnaea.content.stats import global_stats\n session = self.db_master_session\n day = util.utcnow().date() - timedelta(1)\n yesterday = day - timedelta(days=1)\n stats = [\n Stat(key=STAT_TYPE['cell'], time=yesterday, value=5000000),\n Stat(key=STAT_TYPE['cell'], time=day, value=6000000),\n Stat(key=STAT_TYPE['wifi'], time=day, value=3000000),\n Stat(key=STAT_TYPE['unique_cell'], time=yesterday, value=4000000),\n ]\n session.add_all(stats)\n session.commit()\n\n result = global_stats(session)\n self.assertDictEqual(\n result, {\n 'cell': '6.00', 'unique_cell': '4.00',\n 'wifi': '3.00', 'unique_wifi': '0.00'\n })\n\n def test_histogram(self):\n from ichnaea.content.stats import histogram\n session = self.db_master_session\n today = util.utcnow().date()\n one_day = today - timedelta(days=1)\n two_days = today - timedelta(days=2)\n one_month = today - timedelta(days=35)\n two_months = today - timedelta(days=70)\n long_ago = today - timedelta(days=100)\n stats = [\n Stat(name='cell', time=long_ago, value=40),\n Stat(name='cell', time=two_months, value=50),\n Stat(name='cell', time=one_month, value=60),\n Stat(name='cell', time=two_days, value=70),\n Stat(name='cell', time=one_day, value=80),\n Stat(name='cell', time=today, value=90),\n ]\n session.add_all(stats)\n session.commit()\n result = histogram(session, 'cell', days=90)\n self.assertTrue(\n {'num': 80, 'day': one_day.strftime('%Y-%m-%d')} in result)\n\n if two_months.month == 12:\n expected = date(two_months.year + 1, 1, 1)\n else:\n expected = date(two_months.year, two_months.month + 1, 1)\n self.assertTrue(\n {'num': 50, 'day': expected.strftime('%Y-%m-%d')} in result)\n\n def test_histogram_different_stat_name(self):\n from ichnaea.content.stats import histogram\n session = self.db_master_session\n day = util.utcnow().date() - timedelta(days=1)\n stat = Stat(time=day, value=9)\n stat.name = 'unique_cell'\n session.add(stat)\n session.commit()\n result = histogram(session, 'unique_cell')\n self.assertEqual(result, [{'num': 9, 'day': day.strftime('%Y-%m-%d')}])\n\n def test_leaders(self):\n from ichnaea.content.stats import leaders\n session = self.db_master_session\n test_data = []\n for i in range(20):\n test_data.append((u'nick-%s' % i, 7))\n highest = u'nick-high-too-long_'\n highest += (128 - len(highest)) * u'x'\n test_data.append((highest, 10))\n lowest = u'nick-low'\n test_data.append((lowest, 5))\n for nick, value in test_data:\n user = User(nickname=nick)\n session.add(user)\n session.flush()\n score = Score(userid=user.id, value=value)\n score.name = 'location'\n session.add(score)\n session.commit()\n # check the result\n result = leaders(session)\n self.assertEqual(len(result), 22)\n self.assertEqual(result[0]['nickname'], highest[:24] + u'...')\n self.assertEqual(result[0]['num'], 10)\n self.assertTrue(lowest in [r['nickname'] for r in result])\n\n def test_leaders_weekly(self):\n from ichnaea.content.stats import leaders_weekly\n session = self.db_master_session\n test_data = []\n for i in range(1, 11):\n test_data.append((u'nick-%s' % i, i))\n for nick, value in test_data:\n user = User(nickname=nick)\n session.add(user)\n session.flush()\n score = Score(userid=user.id, value=value)\n score.name = 'new_cell'\n session.add(score)\n score = Score(userid=user.id, value=21 - value)\n score.name = 'new_wifi'\n session.add(score)\n session.commit()\n\n # check the result\n result = leaders_weekly(session, batch=5)\n self.assertEqual(len(result), 2)\n self.assertEqual(set(result.keys()), set(['new_cell', 'new_wifi']))\n\n # check the cell scores\n scores = result['new_cell']\n self.assertEqual(len(scores), 5)\n self.assertEqual(scores[0]['nickname'], 'nick-10')\n self.assertEqual(scores[0]['num'], 10)\n self.assertEqual(scores[-1]['nickname'], 'nick-6')\n self.assertEqual(scores[-1]['num'], 6)\n\n # check the wifi scores\n scores = result['new_wifi']\n self.assertEqual(len(scores), 5)\n self.assertEqual(scores[0]['nickname'], 'nick-1')\n self.assertEqual(scores[0]['num'], 20)\n self.assertEqual(scores[-1]['nickname'], 'nick-5')\n self.assertEqual(scores[-1]['num'], 16)\n\n def test_countries(self):\n from ichnaea.content.stats import countries\n session = self.db_master_session\n test_data = [\n Cell(radio=RADIO_TYPE[''], mcc=208, mnc=1),\n Cell(radio=RADIO_TYPE['gsm'], mcc=1, mnc=1),\n Cell(radio=RADIO_TYPE['lte'], mcc=262, mnc=1),\n Cell(radio=RADIO_TYPE['gsm'], mcc=310, mnc=1),\n Cell(radio=RADIO_TYPE['gsm'], mcc=310, mnc=2),\n Cell(radio=RADIO_TYPE['gsm'], mcc=313, mnc=1),\n Cell(radio=RADIO_TYPE['cdma'], mcc=310, mnc=1),\n Cell(radio=RADIO_TYPE['umts'], mcc=425, mnc=1),\n Cell(radio=RADIO_TYPE['lte'], mcc=425, mnc=1),\n ]\n session.add_all(test_data)\n session.commit()\n\n # check the result\n expected = set(['BMU', 'DEU', 'GUM', 'ISR', 'PRI', 'PSE', 'USA'])\n result = countries(session)\n self.assertEqual(len(result), len(expected))\n self.assertEqual(set([r['code'] for r in result]), expected)\n\n countries = {}\n for r in result:\n code = r['code']\n countries[code] = r\n del countries[code]['code']\n del countries[code]['name']\n\n # a simple case with a 1:1 mapping of mcc to ISO country code\n self.assertEqual(countries['DEU'], {'cdma': 0, 'gsm': 0, 'lte': 1,\n 'total': 1, 'umts': 0, 'multiple': False})\n\n # mcc 310 is valid for both GUM/USA, 313 only for USA\n self.assertEqual(countries['USA'], {'cdma': 1, 'gsm': 3, 'lte': 0,\n 'total': 4, 'umts': 0, 'multiple': True})\n self.assertEqual(countries['GUM'], {'cdma': 1, 'gsm': 2, 'lte': 0,\n 'total': 3, 'umts': 0, 'multiple': True})\n\n # These two countries share a mcc, so we report the same data\n # for both of them\n self.assertEqual(countries['ISR'], {'cdma': 0, 'gsm': 0, 'lte': 1,\n 'total': 2, 'umts': 1, 'multiple': True})\n self.assertEqual(countries['ISR'], countries['PSE'])\n","repo_name":"elkos/ichnaea","sub_path":"ichnaea/content/tests/test_stats.py","file_name":"test_stats.py","file_ext":"py","file_size_in_byte":8073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"80"} +{"seq_id":"4951587476","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def lowestCommonAncestor(self, root: 'TreeNode', nodes: 'List[TreeNode]') -> 'TreeNode':\n def LCAofTwoNodes(root,a,b):\n \n if not root:\n return None\n \n if root in (a,b):\n return root\n \n leftLCA=LCAofTwoNodes(root.left,a,b)\n rightLCA=LCAofTwoNodes(root.right,a,b)\n \n if leftLCA and rightLCA:\n return root\n if rightLCA:\n return rightLCA\n return leftLCA\n \n a=nodes[0]\n for i in range(1,len(nodes)):\n b=nodes[i]\n a=LCAofTwoNodes(root,a,b)\n return a","repo_name":"sonalsrivas/Leetcode-Solutions---Python---Java","sub_path":"1676-lowest-common-ancestor-of-a-binary-tree-iv/1676-lowest-common-ancestor-of-a-binary-tree-iv.py","file_name":"1676-lowest-common-ancestor-of-a-binary-tree-iv.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"32996494433","text":"# Exercício: Combinar duas imagens diferentes existentes na pasta de imagens\n\nimport cv2\nimport numpy as np\n\nimageA = cv2.imread('../../imagens/gato.jpg')\nimageB = cv2.imread('../../imagens/tigre.jpg')\n\n# Deixar o tamanho da imagem B igual ao da A\nheight, width, _ = imageA.shape\nimageB_resize = cv2.resize(imageB, (width, height), interpolation = cv2.INTER_AREA)\n\nprint(imageA.shape)\nprint(imageB_resize.shape)\n\n# Mostrar imagens A e B após o ajuste\ncv2.imshow(\"Input A\", imageA)\ncv2.imshow(\"Input B\", imageB_resize)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"victorrodrigues20/PDI_2sem2017","sub_path":"Python/005-Operações_com_Imagens/02_Exercicio_Operacoes_Aritmeticas.py","file_name":"02_Exercicio_Operacoes_Aritmeticas.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"27143821171","text":"from alfred.dl.tf.common import mute_tf\nmute_tf()\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\nimport numpy as np\n\nfrom alfred.utils.log import logger as logging\nimport os\nimport sys\n\n\ntarget_size = 224\n# use_keras_fit = True\nuse_keras_fit = False\nckpt_path = './checkpoints/flowers_mbv2-{epoch}.ckpt'\n\n\ndef preprocess(x):\n \"\"\"\n minus mean pixel or normalize?\n \"\"\"\n x['image'] = tf.image.resize(x['image'], (target_size, target_size))\n x['image'] /= 255.\n x['image'] = 2*x['image'] - 1\n return x['image'], x['label']\n\ndef build_model():\n mobilenetv2 = tf.keras.applications.MobileNetV2(\n input_shape=(target_size, target_size, 3),\n weights='imagenet',\n include_top=False)\n mobilenetv2.trainable=True\n model = tf.keras.Sequential([\n mobilenetv2,\n tf.keras.layers.GlobalAveragePooling2D(),\n tf.keras.layers.Dense(5)\n ])\n return model\n\ndef train():\n # using mobilenetv2 classify tf_flowers dataset\n dataset, _ = tfds.load('tf_flowers', with_info=True)\n train_dataset = dataset['train']\n train_dataset = train_dataset.shuffle(100).map(preprocess).batch(4).repeat()\n\n # init model\n model = build_model()\n # model.summary()\n logging.info('model loaded.')\n \n start_epoch = 0\n latest_ckpt = tf.train.latest_checkpoint(os.path.dirname(ckpt_path))\n if latest_ckpt:\n start_epoch = int(latest_ckpt.split('-')[1].split('.')[0])\n model.load_weights(latest_ckpt)\n logging.info('model resumed from: {}, start at epoch: {}'.format(latest_ckpt, start_epoch))\n else:\n logging.info('passing resume since weights not there. training from scratch')\n\n if use_keras_fit:\n model.compile(\n optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n model.fit(train_dataset, epochs=50)\n model.save(ckpt_path.format(epoch='final'))\n # logging.error('this not currently possiable to fit a generator into keras.model.')\n else:\n loss_object = tf.losses.SparseCategoricalCrossentropy()\n optimizer = tf.optimizers.RMSprop()\n\n train_loss = tf.metrics.Mean(name='train_loss')\n train_accuracy = tf.metrics.SparseCategoricalAccuracy(name='train_accuracy')\n\n for epoch in range(start_epoch, 120):\n try:\n for batch, data in enumerate(train_dataset):\n images, labels = data\n with tf.GradientTape() as tape:\n predictions = model(images)\n loss = loss_object(labels, predictions)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n train_loss(loss)\n train_accuracy(labels, predictions)\n if batch % 50 == 0:\n logging.info('Epoch: {}, iter: {}, loss: {}, train_accuracy: {}'.format(\n epoch, batch, train_loss.result(), train_accuracy.result()))\n except KeyboardInterrupt:\n logging.info('interrupted.')\n model.save_weights(ckpt_path.format(epoch=epoch))\n logging.info('model saved into: {}'.format(ckpt_path.format(epoch=epoch)))\n exit(0)\n\n\n\nif __name__ == \"__main__\":\n train()\n","repo_name":"lucasjinreal/tfboys","sub_path":"tf2.0/image_classify/train_flowers_mbv2_finetune.py","file_name":"train_flowers_mbv2_finetune.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"80"} +{"seq_id":"2823560580","text":"import os\nimport mimetypes\nfrom urllib.parse import quote, unquote #sera que pode usar? Vale a pena?\nimport socket\n\n# Diretório base do servidor\nbase_directory = '/home/evaldo'\n\n# Porta em que o servidor irá rodar\nport = 8001\n\n# Função para gerar a resposta HTTP\ndef generate_response(status, content_type, content):\n response = f\"HTTP/1.1 {status}\\r\\n\"\n response += f\"Content-Type: {content_type}; charset= utf-8\\r\\n\"\n response += f\"Content-Length: {len(content)}\\r\\n\"\n response += \"\\r\\n\"\n #algumas vezes vem binario, outras string...\n #perciso separar o que vier text/ do resto\n if isinstance(content, str):\n response += content\n elif content_type.__contains__(\"text\"): \n if content_type.__contains__(\"csv\"):\n response = response.encode() + content\n return response\n else:\n #se for um text em binário, decodifica e concatena\n response += content.decode()\n else:\n #imagens e tals? o que fazer? vem em bytes\n response = response.encode() + content\n return response\n\n #esse encode faz umas coisas funcionar, e quebra outras...\n return response.encode()\n\n# Função para listar os arquivos e pastas de um diretório\ndef list_files(directory):\n entries = os.listdir(directory)\n links = []\n for entry in entries:\n entry_path = os.path.join(directory, entry)\n if os.path.isfile(entry_path):\n link = f'{entry}'\n links.append(link)\n elif os.path.isdir(entry_path):\n link = f'{entry}/'\n links.append(link)\n \n arquivos = \"
\".join(links)\n titulo = f' TP PARA O FLÁVIO
TP PARA O FLÁVIO
'\n pagina = titulo + arquivos\n return pagina\n\n# Função para tratar as solicitações HTTP\ndef handle_request(client_socket, request):\n request_lines = request.split(\"\\r\\n\")\n\n # Verifica se a solicitação está completa\n if not request_lines[-1]:\n print(\"Processando request \", request_lines[0])\n request_method, request_path, _ = request_lines[0].split(\" \")\n\n if request_method == \"GET\":\n if request_path == \"/HEADER\":\n # Retorna o cabeçalho HTTP da requisição\n response = generate_response(\"200 OK\", \"text/plain\", str(request).encode()) #mantém esse encode?\n else:\n file_path = os.path.join(base_directory, unquote(request_path[1:]))\n\n if os.path.isdir(file_path):\n # Lista o conteúdo do diretório\n content = list_files(file_path)\n response = generate_response(\"200 OK\", \"text/html\", unquote(content))\n elif os.path.isfile(file_path):\n # Verifica o tipo MIME do arquivo\n content_type, _ = mimetypes.guess_type(file_path)\n \n if content_type == 'text/plain':\n # Se for arquivo TXT, o tratamento é diferenciado\n with open(file_path, 'r') as file:\n content = file.read()\n \n response = generate_response(\"200 OK\", content_type, content)\n else:\n with open(file_path, 'rb') as file:\n content = file.read()\n \n response = generate_response(\"200 OK\", content_type, content)\n else:\n # Arquivo ou diretório não encontrado\n response = generate_response(\"404 Not Found\", \"text/html\", \"

404 Not Found

\")\n else:\n # Método não suportado\n response = generate_response(\"405 Method Not Allowed\", \"text/html\", \"

405 Method Not Allowed

\")\n else:\n # Solicitação incompleta, aguarda próxima parte\n print(\"Solicitação incompleta!\")\n response = b\"\" # Retorna uma resposta vazia por enquanto\n\n # Envia a resposta ao cliente\n client_socket.sendall(response)\n client_socket.close()\n\n# Cria o socket do servidor\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_socket.bind((\"localhost\", port))\nserver_socket.listen(1)\nprint(f\"Servidor rodando na porta {port}\")\n\nwhile True:\n # Aceita a conexão de um cliente\n client_socket, client_address = server_socket.accept()\n\n # Recebe a solicitação do cliente\n request = b\"\"\n while True:\n data = client_socket.recv(1024)\n request += data\n if not request:\n print(\"request vazia\")\n break\n if b\"\\r\\n\\r\\n\" in request:\n break\n\n # Trata a solicitação e envia a resposta\n if request:\n handle_request(client_socket, request.decode())\n","repo_name":"EvaldoSouza/cliente_servidor_web","sub_path":"server_8.py","file_name":"server_8.py","file_ext":"py","file_size_in_byte":4896,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"18647759955","text":"import pathlib\nimport shutil\nimport subprocess\n\nROOT_PATH = pathlib.Path(__file__).parent.parent.absolute()\nSRC_PATH = ROOT_PATH / 'ports/linux'\n\nsubprocess.run('cmake . -Bbuild -GNinja -DCMAKE_BUILD_TYPE=Release', cwd=SRC_PATH, shell=True)\nsubprocess.run('cmake --build build', cwd=SRC_PATH, shell=True)\n\n# Remove this part and use cmake install prefix\nshutil.copy(\n SRC_PATH / 'build/libconnx.so',\n ROOT_PATH / 'pyconnx/connx/libconnx.so')\n","repo_name":"tsnlab/connx","sub_path":"pyconnx/build_package.py","file_name":"build_package.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"80"} +{"seq_id":"7638897476","text":"from discord.ext import commands\n\nclass Error(commands.Cog):\n def __init__(self,bot):\n self.bot = bot\n\n \"\"\"An Error Handler for commands\"\"\"\n\n @commands.Cog.listener()\n async def on_command_error(self,ctx,error):\n if isinstance(error, commands.CommandOnCooldown):\n await ctx.send('**This command is on cooldown for %.2fs **' % error.retry_after)\n if isinstance(error,commands.CommandNotFound):\n return\n if isinstance(error,commands.BotMissingPermissions):\n await ctx.send('It Looks Like I Dont have permission to execute this command. Please Check My Role Position and permissions')\n else:\n print(error)\n\ndef setup(bot):\n bot.add_cog(Error(bot))","repo_name":"oxy-Op/oxin","sub_path":"events/on_command_error.py","file_name":"on_command_error.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"12452843465","text":"\"\"\"\n\nfile:\t\tcommands.py\nauthor:\t\tAmir Heinisch \nversion:\t16/08/2021\n\n\"\"\"\n\nimport enum\nimport re\nimport select\nfrom os import getcwd, listdir\nfrom os.path import join, isfile, getsize\nfrom utils import TIMEOUT, DIR, fid, recv_until\n\n## Requests ##\n\ndef list_files(sock, printout=True):\n\t# Send command.\n\tsent = sock.sendall(SERVER_COMMAND.LIST_FILES.command.name.encode('ascii'))\n\tif sent == 0:\n\t\traise RuntimeError('Socket connection broken')\n\n\t# Receive response.\n\tdata = []\n\twhile True:\n\t\treceived_bytes = b''\n\n\t\t# NOTE:\tSince there can be a huge and variable number of files\n\t\t# \t\tin a directory, we receive until a given timeout.\n\t\tready = select.select([sock], [], [], TIMEOUT)\n\t\tif ready[0]:\n\t\t\treceived_bytes = sock.recv(4096)\n\t\t\tif received_bytes == b'': # Connection broke.\n\t\t\t\tbreak\n\t\telif data:\n\t\t\tbreak\n\n\t\tdata.append(received_bytes)\n\n\tresponse = (b''.join(data)).decode('ascii')\n\n\t# Check format of received data.\n\tpattern = re.compile(\n\t\t'^([a-fA-f0-9]{32}[;][a-zA-Z0-9._]{3,255}[;][0-9]+[\\r\\n]*)+$')\n\tif not 'No files available at the moment.' in response and \\\n\t\tnot pattern.match(response):\n\t\traise RuntimeError('Protocol violation!')\n\n\tif printout:\n\t\tprint(response)\n\telse:\n\t\treturn response\n\ndef download(sock, filename=None):\n\t# Execute LIST_FILES command before to get filenames, sizes and ids.\n\tcommand = SERVER_COMMAND.LIST_FILES\n\tfiles = command.request(sock, False)\n\n\tmsg = 'No files available at the moment.\\n'\n\tif msg == files:\n\t\tprint(msg)\n\t\treturn\n\n\t# NOTE: Parsing should work..string matched regex before.\n\tfiles = list(map(lambda x: x.split(';'), files.split('\\n')[:-1]))\n\n\t# Execute download command.\n\tsent = sock.sendall(SERVER_COMMAND.DOWNLOAD.command.name.encode('ascii'))\n\tif sent == 0:\n\t\traise RuntimeError('Socket connection broken')\n\n\t# Handle ask for file id.\n\tmsg = 'file_id'\n\tif not recv_until(sock, len(msg), [msg]):\n\t\traise RuntimeError('Protocol violation!')\n\n\t# Get file id and size.\n\tfile_id = None\n\tfile_size = None\n\tif not filename:\n\t\tfilename = input('Filename: ')\n\tfiles = list(filter(lambda x: filename == x[1], files))\n\tif len(files) > 0:\n\t\tfile_id = files[0][0]\n\t\tfile_size = int(files[0][2])\n\telse:\n\t\traise RuntimeError('File does not exist!')\n\n\t# Send file id.\n\tsent = sock.sendall(file_id.encode('ascii'))\n\tif sent == 0:\n\t\traise RuntimeError('Socket connection broken')\n\n\t# Handle file byte stream.\n\tresponse = recv_until(sock, file_size, decoding=None)\n\tif not response:\n\t\traise RuntimeError('Protocol violation!')\n\tprint('-> Receiveing ', end='')\n\tprint(response[:20], end='')\n\tprint('...')\n\n\tif isfile(filename): # NOTE: not specified.\n\t\tfilename = filename + '.download'\n\n\twith open(filename, 'wb') as f:\n\t\tf.write(response)\n\n\tif file_id == fid(filename):\n\t\tprint('SUCCESS! File is not currupted..')\n\telse:\n\t\tprint('ERROR! File is corrupted..')\n\ndef upload(sock, filename=None):\n\t# Send command.\n\tsent = sock.sendall(SERVER_COMMAND.UPLOAD.command.name.encode('ascii'))\n\tif sent == 0:\n\t\traise RuntimeError('Socket connection broken')\n\n\t# Handle ask for filename and filesize.\n\tmsg = 'filename_filesize'\n\tif not recv_until(sock, len(msg), [msg]):\n\t\traise RuntimeError('Protocol violation!')\n\n\t# Send file name and size.\n\tif not filename:\n\t\tfilename = input('Filename: ')\n\tfilesize = getsize(filename)\n\tsent = sock.sendall((filename + ';' + str(filesize)).encode('ascii'))\n\tif sent == 0:\n\t\traise RuntimeError('Socket connection broken')\n\n\t# Handle upload begin.\n\tmsg = 'ready to receive a file'\n\tif not recv_until(sock, len(msg), [msg]):\n\t\traise RuntimeError('Protocol violation!')\n\n\t# Upload\n\twith open(filename, 'rb') as f:\n\t\tprint(f'-> Uploading file ({filename})')\n\t\tsock.sendall(f.read())\n\n\t# Verify upload.\n\tfile_id = recv_until(sock, 32) # md5 -> 128 bit = 32 hex.\n\tif file_id == fid(filename):\n\t\tprint('SUCCESS! File is not currupted..')\n\telse:\n\t\tprint('ERROR! File is corrupted..')\n\n\n## Responses ##\n\ndef response_list_files(sock):\n\tresponse = ('\\n'.join(\n\t\t[\n\t\t\tfid(f) + ';' + f + ';' + str(getsize(f)) for f in listdir(DIR)\n\t\t\tif isfile(join(DIR, f))\n\t\t]\n\t) or 'No files available at the moment.' ) + '\\n'\n\n\tsock.sendall(response.encode('ascii'))\n\ndef response_download(sock):\n\t# Ask for file id.\n\tsock.sendall('file_id'.encode('ascii'))\n\n\tfile_id = recv_until(sock, 32, '^[a-zA-Z0-9]{32}$')\n\tif not file_id:\n\t\traise RuntimeError('Protocol violation!')\n\n\tfor p in listdir(DIR):\n\t\tpath = join(DIR, p)\n\t\tif isfile(path) and fid(path) == file_id:\n\t\t\twith open(path, 'rb') as f:\n\t\t\t\tprint(f'-> Sending file ({path})')\n\t\t\t\tsock.sendall(f.read())\n\t\t\t\treturn\n\n\traise RuntimeError('Invalid file id!')\n\n\ndef response_upload(sock):\n\t# Send question for filename and filesize.\n\tsent = sock.sendall('filename_filesize'.encode('ascii'))\n\tif sent == 0:\n\t\traise RuntimeError('Socket connection broken')\n\n\t# Handle ask for filename and filesize.\n\t# TODO: handle path traversel attacks.\n\tresponse = recv_until(sock, 300, '^[a-zA-Z0-9._]{3,255}[;][0-9]+$')\n\tif not response:\n\t\traise RuntimeError('Protocol violation!')\n\tfilename, filesize = response.split(';')\n\tfilesize = int(filesize)\n\n\t# Send ready to receive.\n\tsent = sock.sendall('ready to receive a file'.encode('ascii'))\n\tif sent == 0:\n\t\traise RuntimeError('Socket connection broken')\n\n\t# Handle file byte stream.\n\tresponse = recv_until(sock, filesize, decoding=None)\n\tif not response:\n\t\traise RuntimeError('Protocol violation!')\n\tprint('-> Receiveing ', end='')\n\tprint(response[:20], end='')\n\tprint('...')\n\n\tif isfile(filename): # NOTE: not specified.\n\t\tfilename = filename + '.upload'\n\n\t# Write file.\n\twith open(filename, 'wb') as f:\n\t\tf.write(response)\n\n\t# Send file id.\n\tsent = sock.sendall(fid(filename).encode('ascii'))\n\tif sent == 0:\n\t\traise RuntimeError('Socket connection broken')\n\n\n## Command abstraction ##\n\nclass Command:\n\tdef __init__(self, name, request, response):\n\t\tself.name = name\n\t\tself.request = request\n\t\tself.respond = response\n\n\nclass SERVER_COMMAND(enum.Enum):\n\tLIST_FILES = Command('LIST_FILES', list_files, response_list_files)\n\tDOWNLOAD = Command('DOWNLOAD', download, response_download)\n\tUPLOAD = Command('UPLOAD', upload, response_upload)\n\n\tdef __new__(cls, command):\n\t\tnext_idx = len(cls.__members__) + 1\n\t\tobj = object.__new__(cls)\n\t\tobj._value_ = next_idx\n\t\tobj.command = command\n\t\treturn obj\n\n\tdef request(self, *args):\n\t\treturn self.command.request(*args)\n\n\tdef respond(self, *args):\n\t\treturn self.command.respond(*args)\n\n\tdef complete(self, text, state):\n\t\t\"\"\" Readline tab completion \"\"\"\n\t\tresults = [ c.name for c in SERVER_COMMAND if c.name.startswith(text) ]\n\t\tresults += [None]\n\t\treturn results[state]\n\n\t@staticmethod\n\tdef print_commands():\n\t\tline = [ f'[{x.value}] {x.name}' for x in SERVER_COMMAND ]\n\t\tprint('\\n'.join(line))\n\n\t@staticmethod\n\tdef values():\n\t\treturn [ v.name for v in SERVER_COMMAND]\n\n\t@staticmethod\n\tdef max_len_command():\n\t\tmax_command_len = 0\n\t\tfor val in SERVER_COMMAND:\n\t\t\tif len(val.name) > max_command_len:\n\t\t\t\tmax_command_len = len(val.name)\n\t\treturn max_command_len\n\n","repo_name":"amir-heinisch/snippets","sub_path":"python/file_sharing/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":6965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"8786569653","text":"# 체육복\n\ndef solution(n, lost, reserve):\n a = [0] + [1] * n\n for x in lost:\n a[x] -= 1\n for x in reserve:\n a[x] += 1\n for i in range(1, n+1):\n if a[i] == 0:\n if a[i-1] == 2:\n a[i-1] -= 1\n a[i] += 1\n elif i != n and a[i+1] == 2:\n a[i+1] -= 1\n a[i] += 1\n answer = 0\n for x in a:\n if x != 0:\n answer += 1\n return answer\n\n\nif __name__ == '__main__':\n print(solution(3, [3], [1]))\n","repo_name":"sjpark-dev/python-practice","sub_path":"problems/programmers/level_1_to_2/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"18075516255","text":"import cv2\r\nimport numpy as np\r\nimport os\r\nimport glob\r\nimport sys\r\nsys.path.append(\"src/\")\r\nimport tracks_import as ti\r\nfrom loguru import logger\r\n\r\nbase_path = \"dataset/\"\r\ntracks_files = sorted(glob.glob(base_path + \"*_tracks.csv\"))\r\nstatic_tracks_files = sorted(glob.glob(base_path + \"*_tracksMeta.csv\"))\r\nrecording_meta_files = sorted(glob.glob(base_path + \"*_recordingMeta.csv\"))\r\n\r\nrecording_id = 0\r\nstart_id = 33\r\nfor track_file, static_tracks_file, recording_meta_file in zip(tracks_files,\r\n static_tracks_files,\r\n recording_meta_files):\r\n\r\n\r\n logger.info(\"Loading csv files {}, {} and {}\", track_file, static_tracks_file, recording_meta_file)\r\n\r\n # tracks, info, meta\r\n tracks, static_info, meta_info = ti.read_from_csv(track_file, static_tracks_file, recording_meta_file)\r\n\r\n # create folder for frame_images\r\n filepath = base_path + str(recording_id)\r\n if not os.path.exists(filepath): os.mkdir(filepath)\r\n\r\n # scale factor inD=12 rounD=10\r\n scale_factor = 12\r\n if recording_id > 32:\r\n scale_factor = 10\r\n\r\n object_list = {}\r\n for i in range(len(tracks)):\r\n object_list[i] = tracks[i]['xCenter'].shape\r\n\r\n maximum_frames = np.max([static_info[track[\"trackId\"]][\"finalFrame\"] for track in tracks])\r\n\r\n\r\n # Save ids for each frame\r\n ids_for_frame = {}\r\n for i_frame in range(maximum_frames):\r\n indices = [i_track for i_track, track in enumerate(tracks)\r\n if\r\n static_info[track[\"trackId\"]][\"initialFrame\"] <= i_frame <= static_info[track[\"trackId\"]][\r\n \"finalFrame\"]]\r\n ids_for_frame[i_frame] = indices\r\n\r\n\r\n def get_color(type=None):\r\n if(type == 'car') or (type == 'van'): return 27\r\n elif (type == 'truck_bus') or (type == 'truck') or (type == 'bus') or (type == 'trailer'): return 28\r\n elif (type == 'motorcycle'): return 34\r\n elif (type == 'bicycle'): return 32\r\n elif (type == 'pedestrian'): return 42\r\n\r\n if recording_id >= start_id:\r\n for current_frame in range(maximum_frames):\r\n # load semantic image\r\n image = cv2.imread(str(base_path) + 'semantic_' + str(recording_id) + '.png', cv2.IMREAD_GRAYSCALE)\r\n\r\n\r\n for track_ind in ids_for_frame[current_frame]:\r\n track = tracks[track_ind]\r\n\r\n track_id = track[\"trackId\"]\r\n static_track_information = static_info[track_id]\r\n initial_frame = static_track_information[\"initialFrame\"]\r\n current_index = current_frame - initial_frame\r\n\r\n object_class = static_track_information[\"class\"]\r\n is_vehicle = object_class in [\"car\", \"truck_bus\", \"motorcycle\", \"bicycle\"]\r\n bounding_box = track[\"bboxVis\"][current_index] / scale_factor\r\n center_point = track[\"centerVis\"][current_index] / scale_factor\r\n\r\n\r\n # pedestrian 10x10 pixel/ one pixel 4x4 cm\r\n image = cv2.circle(image, center=(int(center_point[0]), int(center_point[1])),\r\n radius=5, color=get_color(object_class), thickness=-1)\r\n\r\n if is_vehicle:\r\n # Polygon corner points coordinates\r\n pts = np.array(bounding_box, np.int32)\r\n image = cv2.polylines(image, [pts], True, get_color(object_class), thickness=1)\r\n\r\n # add direction\r\n triangle_factor = 0.75\r\n a_x = bounding_box[3, 0] + ((bounding_box[2, 0] - bounding_box[3, 0]) * triangle_factor)\r\n b_x = bounding_box[0, 0] + ((bounding_box[1, 0] - bounding_box[0, 0]) * triangle_factor)\r\n c_x = bounding_box[2, 0] + ((bounding_box[1, 0] - bounding_box[2, 0]) * 0.5)\r\n\r\n a_y = bounding_box[3, 1] + ((bounding_box[2, 1] - bounding_box[3, 1]) * triangle_factor)\r\n b_y = bounding_box[0, 1] + ((bounding_box[1, 1] - bounding_box[0, 1]) * triangle_factor)\r\n c_y = bounding_box[2, 1] + ((bounding_box[1, 1] - bounding_box[2, 1]) * 0.5)\r\n\r\n pts = np.array([[a_x, a_y], [b_x, b_y], [c_x, c_y]], np.int32)\r\n image = cv2.polylines(image, [pts], True, get_color(object_class), thickness=1)\r\n\r\n\r\n #cv2.imshow(\"cropped\", image)\r\n #cv2.waitKey(0)\r\n\r\n image = cv2.imwrite(str(filepath) + '/image_frames_' + str(recording_id) + '_' + str(current_frame) + '.png', image)\r\n logger.info(\"Image write recording id {}/34 frame {}/{}\", recording_id, current_frame, maximum_frames)\r\n\r\n # count track id\r\n recording_id += 1\r\n #show last image\r\n #cv2.imshow(\"image\", image)\r\n #cv2.waitKey(0)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"sassi83/trajectory-prediction","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"41059545","text":"from django.db import models\nfrom user.models import Account\nfrom .signals import saveSlug\n\nclass Blog(models.Model):\n title = models.CharField(max_length=150)\n subtitle = models.CharField(max_length=400)\n slug = models.SlugField(blank=True,null=True)\n body = models.TextField()\n date = models.DateField(auto_now_add=True)\n time = models.TimeField(auto_now_add=True)\n photo = models.ImageField(upload_to='blog/')\n views = models.PositiveIntegerField(default=0)\n\n class Meta:\n ordering = ['date']\n verbose_name_plural = \"Blog\"\n\n def __str__(self):\n return f'{self.title}'\n\nclass Comment(models.Model):\n blog = models.ForeignKey(Blog, on_delete=models.SET_NULL, null=True)\n user = models.ForeignKey(Account, on_delete=models.CASCADE)\n date_comment = models.DateTimeField(auto_now_add=True)\n comment = models.TextField()\n\n class Meta:\n ordering = ['date_comment']\n verbose_name_plural = 'Kommentlar'\n\n def __str__(self):\n return f'{self.blog.title} - {self.comment}'\n\nclass NewsletterFollowers(models.Model):\n email = models.EmailField(unique=True)\n followers = models.ManyToManyField(Account)\n\n class Meta:\n verbose_name_plural = \"Yangiliklar tasmasi obunachilari\"\n\n def __str__(self):\n return self.email\n\nclass Contact(models.Model):\n user = models.ForeignKey(Account, on_delete=models.SET_NULL, null=True, blank=True)\n name = models.CharField(max_length=120, blank=True)\n email = models.EmailField(blank=True)\n message = models.TextField()\n\n class Meta:\n verbose_name_plural = 'Kontakt'\n\n def __str__(self):\n if not self.user:\n return self.name\n return self.user.username\n\nclass Describe(models.Model):\n text = models.TextField()\n create_date = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n verbose_name_plural = \"Maktab haqida ma'lumot\"\n\n def __str__(self):\n return self.text\n\n\nsaveSlug(instance=Blog)","repo_name":"ravshanbekio/18-maktab","sub_path":"base/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"34754057039","text":"import hashlib\nimport logging\n\nfrom stevedore import driver\nfrom pecan import conf\n\nfrom cauth.service import base\nfrom cauth.model import db as auth_map\nfrom cauth.utils import transaction\nfrom cauth.utils import exceptions\n\n\ndef differentiate(login, domain, uid):\n suffix = hashlib.sha1((domain + '/' + str(uid)).encode()).hexdigest()\n return login + '_' + suffix[:6]\n\n\nclass UserDetailsCreator(transaction.TransactionLogger):\n log = logging.getLogger(\"cauth.UserDetailsCreator\")\n\n def __init__(self, conf):\n self.services = []\n for service in conf.services:\n try:\n plugin = driver.DriverManager(\n namespace='cauth.service',\n name=service,\n invoke_on_load=True,\n invoke_args=(conf,)).driver\n self.services.append(plugin)\n except base.ServiceConfigurationError as e:\n self.logger.error(str(e))\n\n def create_user(self, user):\n external_info = user.get('external_auth', {})\n transactionID = user.get('transactionID', '')\n c_id = -1\n # skip if authenticating with an API key\n if external_info:\n if external_info.get('domain') == 'CAUTH_API_KEY':\n c_id = external_info['external_id']\n else:\n external_info['username'] = user['login']\n try:\n c_id = auth_map.get_or_create_authenticated_user(\n **external_info)\n except exceptions.UsernameConflictException as e:\n strategy = conf.auth.get('login_collision_strategy')\n if strategy == 'DIFFERENTIATE':\n old_login = user['login']\n user['login'] = differentiate(\n user['login'],\n external_info.get('domain', ''),\n external_info['external_id'])\n external_info['username'] = user['login']\n self.tinfo(\n \"Login \\\"%s\\\" already registered for domain \"\n \"%s, uid %s, differentiating login as %s\",\n transactionID, old_login,\n e.external_auth_details['domain'],\n e.external_auth_details['external_id'],\n user['login'])\n c_id = auth_map.get_or_create_authenticated_user(\n **external_info)\n elif strategy == 'FORBID':\n raise\n else:\n self.terror(\"Incorrect login collision strategy \"\n \"\\\"%s\\\", defaulting to \\\"FORBID\\\"\",\n transactionID, strategy)\n raise\n del user['external_auth']\n user['external_id'] = c_id\n for service in self.services:\n try:\n service.register_new_user(user)\n except base.UserRegistrationError:\n self.texception('Error when adding user %s (ID %s)',\n transactionID, user['login'], c_id)\n return c_id\n","repo_name":"softwarefactory-project/cauth","sub_path":"cauth/utils/userdetails.py","file_name":"userdetails.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"12766317239","text":"import numpy as np\nfrom flask import Flask, request, jsonify, render_template\nimport joblib\n\napp = Flask(__name__,template_folder='templates')\nmodel = joblib.load('finalized_model.sav')\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\ndef predict():\n feature_list = request.form.to_dict()\n feature_list = list(feature_list.values())\n feature_list = list(map(int, feature_list))\n final_features = np.array(feature_list).reshape(1, 27)\n prediction = model.predict(final_features)\n\n output = prediction[0]\n\n if output == 0:\n text = \"It's Green , Play !\"\n else:\n text = \"It's Not Green , Don't Play!\"\n\n return render_template('index.html', prediction_text='{}'.format(text))\n \nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"iheb-bibani/Gambling","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"42962734514","text":"import numpy as np\n\nfrom repository import CardRepository as cr\n\n\ndef assign_cards_to_user(user, cards):\n already_assigned = np.empty(shape=0)\n assigned_to_user = np.empty(shape=0)\n for card in cards:\n if card.user_id is None:\n card.user_id = user.id\n assigned_to_user = np.append(assigned_to_user, card)\n else:\n already_assigned = np.append(already_assigned, int(card.id))\n cr.save_all(assigned_to_user)\n return already_assigned\n\n\ndef get_user_with_id_from_list_of_users(user_id, users_list):\n possible_users = list(filter(lambda user: user.id == user_id, users_list))\n if len(possible_users) == 1:\n return possible_users[0]\n elif len(possible_users) > 1:\n raise ValueError(f\"More than one user with id {user_id} in list\")\n else:\n raise ValueError(f\"User with id {user_id} not present.\")","repo_name":"PaoloRuggirello/bingo-be","sub_path":"helper/UserHelper.py","file_name":"UserHelper.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"24219277285","text":"import pandas as pd\n# import dict\nimport numpy as np\nimport dbPointer\nimport json\nfrom collections import OrderedDict\nfrom tqdm import tqdm\nimport operator\nimport copy\n\n# GLOBAL VARIABLES\nDICT_SIZE = 3000\nMAX_LENGTH = 50\n\ndef is_ascii(s):\n return all(ord(c) < 128 for c in s)\n\ndef read_data():\n google_sheet_id = \"1Osl3p1MVKL7NAF3TAr4V3EoUmn-GTTUXTz1yP8vF99E\"\n sheet_name = \"CombinedSheet\"\n google_sheet_url = \"https://docs.google.com/spreadsheets/d/{}/gviz/tq?tqx=out:csv&sheet={}\".format(google_sheet_id, sheet_name)\n df = pd.read_csv(google_sheet_url)\n return df\n\ndef fill_standard_slots():\n dict_mobile = {}\n dict_laptop = {}\n dict_camera = {}\n\n dict_mobile[\"ram\"] = \"Not mentioned\"\n dict_mobile[\"camera\"] = \"Not mentioned\"\n dict_mobile[\"cost\"] = \"Not mentioned\"\n dict_mobile[\"battery\"] = \"Not mentioned\"\n dict_mobile[\"model\"] = \"Not mentioned\"\n dict_mobile[\"processor\"] = \"Not mentioned\"\n dict_mobile[\"color\"] = \"Not mentioned\"\n dict_mobile[\"brand\"] = \"Not mentioned\"\n dict_mobile[\"os\"] = \"Not mentioned\"\n\n dict_laptop[\"ram\"] = \"Not mentioned\"\n dict_laptop[\"brand\"] = \"Not mentioned\"\n dict_laptop[\"model\"] = \"Not mentioned\"\n dict_laptop[\"processor\"] = \"Not mentioned\"\n dict_laptop[\"color\"] = \"Not mentioned\"\n dict_laptop[\"cost\"] = \"Not mentioned\"\n dict_laptop[\"battery\"] = \"Not mentioned\"\n dict_laptop[\"os\"] = \"Not mentioned\"\n\n dict_camera[\"brand\"] = \"Not mentioned\"\n dict_camera[\"cost\"] = \"Not mentioned\"\n dict_camera[\"model\"] = \"Not mentioned\"\n\n dict_combined = {\"smartphone_tablet\": dict_mobile, \"laptop\": dict_laptop, \"camera\": dict_camera}\n return dict_combined\n\n\ndef addDBPointer(agent_metadata):\n \"\"\"Create database pointer for all related domains.\"\"\"\n domains = ['smartphone_tablet', 'laptop', 'camera']\n pointer_vector = np.zeros(6 * len(domains))\n for domain in domains:\n num_entities = dbPointer.queryResult(domain, agent_metadata)\n pointer_vector = dbPointer.oneHotVector(num_entities, domain, pointer_vector)\n\n return pointer_vector\n\ndef create_data(df):\n \"\"\"\n Creating the dictionary for delexicalizing the data\n \"\"\"\n delex_data = {}\n dialogue = []\n dialogue_number = 1\n current_domain = \"No Domain\"\n user_log = {}\n agent_log = {}\n mobile_tags = []\n laptop_tags = []\n camera_tags = []\n ids = []\n agent_metadata = fill_standard_slots()\n\n '''\n Building sets to find out different slots in different domains\n '''\n smartphone_tablet_set = set()\n laptop_set = set()\n camera_set = set()\n\n\n for i in df.index:\n print(dialogue_number)\n user_utterance = df[\"USER\"][i]\n agent_utterance = df[\"AGENT\"][i]\n if user_utterance == (str)(dialogue_number):\n if dialogue_number != 1:\n delex_data[dialogue_number-1] = dialogue\n dialogue = []\n current_domain = \"No Domain\"\n # if dialogue_number != 1:\n # print(\"hello\")\n dialogue_number += 1\n agent_metadata = fill_standard_slots()\n continue\n\n user_utterance = user_utterance.lower()\n agent_utterance = agent_utterance.lower()\n\n slot_tags = df[\"Tag\"][i]\n if current_domain == \"No Domain\":\n tmp = df[\"Task Info\"][i]\n if tmp == tmp:\n arr = tmp.split(',')\n if len(arr) == 1:\n current_domain = arr[0]\n if len(arr) > 1:\n current_domain = \"MultiDomain\"\n ids.append(dialogue_number-1)\n if current_domain == \"MultiDomain\":\n continue\n # agent_log[\"metadata\"] = agent_metadata\n\n # print(slot_tags)\n if slot_tags != slot_tags:\n pointer_vector = addDBPointer(agent_metadata)\n user_log[\"text\"] = user_utterance\n agent_log[\"text\"] = agent_utterance\n user_log[\"db_pointer\"] = pointer_vector.tolist()\n agent_log[\"metadata\"] = copy.deepcopy(agent_metadata)\n dialogue.append(copy.deepcopy(user_log))\n dialogue.append(copy.deepcopy(agent_log))\n continue\n slots = slot_tags.split(',')\n # print(slots)\n for slot in slots:\n arr = slot.split('-')\n if len(arr) >= 2:\n slot_name = arr[0].strip().lower()\n slot_val = arr[1].strip().lower()\n if current_domain == \"Smartphone\" or current_domain == \"Tablet\":\n tag_name = '[smartphone_tablet_' + slot_name + ']'\n smartphone_tablet_set.add(slot_name)\n # mobile_tags.append(slot_name)\n if agent_metadata[\"smartphone_tablet\"].__contains__(slot_name):\n agent_metadata[\"smartphone_tablet\"][slot_name] = slot_val\n elif current_domain == \"Laptop\":\n tag_name = '[laptop_' + slot_name + ']'\n laptop_set.add(slot_name)\n # laptop_tags.append(slot_name)\n if agent_metadata[\"laptop\"].__contains__(slot_name):\n agent_metadata[\"laptop\"][slot_name] = slot_val\n elif current_domain == \"Camera\":\n tag_name = '[camera_' + slot_name + ']'\n camera_set.add(slot_name)\n # camera_tags.append(slot_name)\n if agent_metadata[\"camera\"].__contains__(slot_name):\n agent_metadata[\"camera\"][slot_name] = slot_val\n user_utterance = user_utterance.replace(slot_val, tag_name)\n agent_utterance = agent_utterance.replace(slot_val, tag_name)\n\n pointer_vector = addDBPointer(agent_metadata)\n user_log[\"text\"] = user_utterance\n agent_log[\"text\"] = agent_utterance\n user_log[\"db_pointer\"] = pointer_vector.tolist()\n agent_log[\"metadata\"] = copy.deepcopy(agent_metadata)\n dialogue.append(copy.deepcopy(user_log))\n dialogue.append(copy.deepcopy(agent_log))\n\n\n # mobile_set = list(set(mobile_tags))\n # print(mobile_set)\n # laptop_set = list(set(laptop_tags))\n # print(laptop_set)\n # camera_set = list(set(camera_tags))\n # print(camera_set)\n print(\"Smartphone_tablet_Set size = \", len(smartphone_tablet_set))\n print(smartphone_tablet_set)\n\n print(\"Laptop_Set size = \", len(laptop_set))\n print(laptop_set)\n\n print(\"Camera_Set size = \", len(camera_set))\n print(camera_set)\n\n with open('data/btpData.json', 'w') as f:\n json.dump(delex_data, f)\n\n return delex_data\n\ndef get_summary_bstate(bstate):\n domains = ['smartphone_tablet', 'laptop', 'camera']\n summary_bstate = []\n summary_bstate += [0]\n for domain in domains:\n domain_active = False\n\n for slot in bstate[domain]:\n if bstate[domain][slot] == \"Not mentioned\":\n summary_bstate.append(0)\n else:\n domain_active = True\n summary_bstate.append(1)\n\n # quasi domain-tracker\n if domain_active:\n summary_bstate += [1]\n else:\n summary_bstate += [0]\n\n print(len(summary_bstate))\n assert len(summary_bstate) == 24\n return summary_bstate\n\ndef get_belief_state(bstate):\n domains = ['smartphone_tablet', 'laptop', 'camera']\n raw_bstate = {}\n for domain in domains:\n for slot, value in bstate[domain].items():\n if value != \"Not mentioned\":\n raw_bstate['[' + domain + slot + ']'] = value\n\n return raw_bstate\n\ndef analyze_dialogue(dialogue, max_len):\n \"\"\"Cleaning procedure for all kinds of errors in text and annotation.\"\"\"\n d = dialogue\n # do all the necessary postprocessing\n if len(d) % 2 != 0:\n # print path\n print('odd # of turns')\n return None # odd number of turns, wrong dialogue\n\n if len(d) == 0:\n # print path\n print('Empty Dialogue')\n return None\n\n d_pp = {}\n usr_turns = []\n sys_turns = []\n for i in range(len(d)):\n if len(d[i]['text'].split()) > max_len:\n print(\"too long\")\n return None\n if i % 2 == 0: #user turn\n if 'db_pointer' not in d[i]:\n print(\"No DB\")\n return None\n text = d[i]['text']\n if not is_ascii(text):\n print(\"Not ASCII\")\n return None\n usr_turns.append(d[i])\n else:\n text = d[i]['text']\n if not is_ascii(text):\n print(\"Not ASCII\")\n return None\n belief_summary = get_summary_bstate(d[i]['metadata'])\n d[i]['belief_summary'] = belief_summary\n\n # get raw belief state\n belief_state = get_belief_state(d[i]['metadata'])\n d[i]['belief_state'] = belief_state\n sys_turns.append(d[i])\n\n d_pp['usr_log'] = usr_turns\n d_pp['sys_log'] = sys_turns\n\n return d_pp\n\n\ndef get_dial(dialogue):\n \"\"\"Extract a dialogue from the file\"\"\"\n dial = []\n d_orig = analyze_dialogue(dialogue, MAX_LENGTH)\n if d_orig is None:\n return None\n\n usr = [t['text'] for t in d_orig['usr_log']]\n db = [t['db_pointer'] for t in d_orig['usr_log']]\n bs = [t['belief_summary'] for t in d_orig['sys_log']]\n belief_state = [t['belief_state'] for t in d_orig['sys_log']]\n sys = [t['text'] for t in d_orig['sys_log']]\n for u, d, s, b, bstate in zip(usr, db, sys, bs, belief_state):\n dial.append((u, s, d, b, bstate))\n\n return dial\n\ndef divideData(data, persona_data):\n train_dials = {}\n val_dials = {}\n\n # dictionaries\n word_freqs_usr = OrderedDict()\n word_freqs_sys = OrderedDict()\n\n dialogue_number = 1\n dialogue_len = 0\n\n for dialogue_name in data:\n dial = get_dial(data[dialogue_name])\n print(\"dialogue number : \", dialogue_number)\n if dial:\n dialogue= {}\n dialogue['personality'] = []\n dialogue['sentiment'] = []\n dialogue['usr'] = []\n dialogue['sys'] = []\n dialogue['db'] = []\n dialogue['bs'] = []\n dialogue['bstate'] = []\n idx = 0\n for turn in dial:\n dialogue['personality'].append([persona_data[dialogue_name][idx]['persona']])\n dialogue['sentiment'].append([persona_data[dialogue_name][idx]['sentiment']])\n dialogue['usr'].append(turn[0])\n dialogue['sys'].append(turn[1])\n dialogue['db'].append(turn[2])\n dialogue['bs'].append(turn[3])\n dialogue['bstate'].append(turn[4])\n idx += 1\n train_dials[dialogue_name] = dialogue\n if dialogue_number > 430:\n val_dials[dialogue_name] = dialogue\n\n for turn in dial:\n line = turn[0]\n words_in = line.strip().split(' ')\n for w in words_in:\n if w not in word_freqs_usr:\n word_freqs_usr[w] = 0\n word_freqs_usr[w] += 1\n\n line = turn[1]\n words_in = line.strip().split(' ')\n for w in words_in:\n if w not in word_freqs_sys:\n word_freqs_sys[w] = 0\n word_freqs_sys[w] += 1\n dialogue_number += 1\n\n with open('data/val_dials.json', 'w') as f:\n json.dump(val_dials, f, indent=4)\n\n with open('data/train_dials.json', 'w') as f:\n json.dump(train_dials, f, indent=4)\n\n return word_freqs_usr, word_freqs_sys\n\ndef createDict(word_freqs):\n\n word_freqs = sorted(word_freqs.items(), key=operator.itemgetter(1), reverse=True)\n sorted_words = []\n for i in word_freqs:\n sorted_words.append(i[0])\n # for key, val in word_freqs.items():\n # print(key)\n # print(val)\n #\n # words = word_freqs.keys()\n # freqs = word_freqs.values()\n #\n # sorted_idx = np.argsort(freqs)\n # sorted_words = []\n # for ii in sorted_idx[::-1]:\n # sorted_words.append(words[ii])\n # sorted_words = [words[ii] for ii in sorted_idx[::-1]]\n\n # Extra vocabulary symbols\n _GO = '_GO'\n EOS = '_EOS'\n UNK = '_UNK'\n PAD = '_PAD'\n extra_tokens = [_GO, EOS, UNK, PAD]\n\n worddict = OrderedDict()\n for ii, ww in enumerate(extra_tokens):\n worddict[ww] = ii\n for ii, ww in enumerate(sorted_words):\n worddict[ww] = ii + len(extra_tokens)\n\n\n\n delete_keys = [key for key, idx in worddict.items() if idx >= DICT_SIZE]\n\n for key in delete_keys:\n del worddict[key]\n\n vocab_len = len(worddict)\n print(\"vocab length : \", vocab_len)\n\n # for key, idx in worddict.items():\n # if idx >= DICT_SIZE:\n # del worddict[key]\n\n return worddict\n\ndef buildDictionaries(word_freqs_usr, word_freqs_sys):\n \"\"\"Build dictionaries for both user and system sides.\n You can specify the size of the dictionary through DICT_SIZE variable.\"\"\"\n dicts = []\n worddict_usr = createDict(word_freqs_usr)\n dicts.append(worddict_usr)\n worddict_sys = createDict(word_freqs_sys)\n dicts.append(worddict_sys)\n\n # reverse dictionaries\n idx2words = []\n for dictionary in dicts:\n dic = {}\n for k,v in dictionary.items():\n dic[v] = k\n idx2words.append(dic)\n\n with open('data/input_lang.index2word.json', 'w') as f:\n json.dump(idx2words[0], f, indent=2)\n with open('data/input_lang.word2index.json', 'w') as f:\n json.dump(dicts[0], f,indent=2)\n with open('data/output_lang.index2word.json', 'w') as f:\n json.dump(idx2words[1], f,indent=2)\n with open('data/output_lang.word2index.json', 'w') as f:\n json.dump(dicts[1], f,indent=2)\n\n\ndef main():\n \"\"\"\"\n Reading the data from Google Sheet in a data-frame\n \"\"\"\n # df = read_data()\n\n\n # delex_data = create_data(df)\n with open('data/btpData.json') as f:\n delex_data = json.load(f)\n\n with open('data/personaData.json') as f:\n persona_data = json.load(f)\n\n print('Divide dialogues for separate bits - usr, sys, db, bs')\n word_freqs_usr, word_freqs_sys = divideData(delex_data, persona_data)\n\n print('Building dictionaries')\n buildDictionaries(word_freqs_usr, word_freqs_sys)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Abhijeet289/PersonaBasedGeneration","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":14420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"10567500299","text":"import copy\nimport random\n\n#construct the color list of balls.\nclass Hat:\n def __init__(self, **kwargs):\n self.contents = []\n for key, value in kwargs.items():\n for i in range(value):\n self.contents.append(key)\n\n #use random to draw balls.\n def draw(self, n):\n n = min(n, len(self.contents))\n return [self.contents.pop(random.randrange(len(self.contents))) for _ in range(n)]\n \n#use copy and calculate the probability.\ndef experiment(hat, expected_balls, num_balls_drawn, num_experiments):\n done = 0\n for i in range(num_experiments):\n another_hat = copy.deepcopy(hat)\n balls_drawn = another_hat.draw(num_balls_drawn)\n balls_req = sum([1 for key, value in expected_balls.items() if balls_drawn.count(key) >= value])\n done += 1 if balls_req == len(expected_balls) else 0\n\n return done / num_experiments\n \n","repo_name":"coskuncancoban/scientific_computing_freecodecamp","sub_path":"probability-calculator/prob_calculator.py","file_name":"prob_calculator.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"10217601331","text":"\"\"\"health URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path,include\nfrom . import views\n# #update by naitik\n# from doctor import views as doc_views\n# from patient import views as pat_views\n\nurlpatterns = [\n path('',views.home,name='home'),\n path('patient/',include('patient.urls')),\n path('doctor/',include('doctor.urls')),\n path('hospital/',include('hospital.urls')),\n path('admin/', admin.site.urls),\n path('aboutus', views.aboutus_view),\n path('contactus', views.contactus_view),\n path('seedocs', views.seedocs),\n]\n","repo_name":"jashjain21/Doc-on-Call","sub_path":"medical/health/health/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"80"} +{"seq_id":"40205406511","text":"from testsuite_support.builder_and_runner import (\n BuilderAndRunner,\n GPRLS,\n GPRCLEAN,\n)\n\nbnr = BuilderAndRunner()\n\nall_tools = [GPRLS, GPRCLEAN]\n\n\ndef execute(cmd, check=False):\n print(\"-------------------------\")\n print(\" \".join(cmd))\n print(\"-------------------------\")\n if check:\n bnr.check_call(cmd)\n else:\n p = bnr.call(cmd)\n if p.status != 0:\n print(f\"STATUS: {p.status}\")\n print(\"\")\n\n\n# check gprtool\n\nfor t in all_tools:\n execute([t, \"-Pdemo\"], True)\n\n# check gprtool -ws\n\nfor t in all_tools:\n execute([t, \"-Pdemo\", \"-ws\"], True)\n","repo_name":"AdaCore/gpr","sub_path":"testsuite/tests/disable_warnings/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"80"} +{"seq_id":"7166614176","text":"import json\nimport time\n\nimport numpy as np\n\n\nclass Task:\n\n def __init__(self, identifier, size):\n self.identifier = identifier\n self.size = size\n self.a = np.random.rand(size, size)\n self.b = np.random.rand(size)\n self.time = 0\n self.x = np.zeros(size)\n\n def work(self):\n start = time.perf_counter()\n self.x = np.linalg.solve(self.a, self.b)\n self.time = time.perf_counter() - start\n\n def to_json(self) -> str:\n data = {\n \"identifier\": self.identifier,\n \"size\": self.size,\n \"a\": self.a.tolist(),\n \"b\": self.b.tolist(),\n \"x\": self.x.tolist(),\n \"time\": self.time,\n }\n return json.dumps(data)\n\n @classmethod\n def from_json(cls, text: str) -> \"Task\":\n data = json.loads(text)\n\n task = cls(identifier=data[\"identifier\"], size=data[\"size\"])\n task.a = np.array(data[\"a\"])\n task.b = np.array(data[\"b\"])\n task.x = np.array(data[\"x\"])\n task.time = data[\"time\"]\n\n return task\n\n def __eq__(self, other: \"Task\") -> bool:\n if not isinstance(other, Task):\n return False\n\n if not (self.identifier == other.identifier\n and self.size == other.size\n and self.time == other.time):\n return False\n\n return (np.array_equal(self.a, other.a)\n and np.array_equal(self.b, other.b)\n and np.array_equal(self.x, other.x))\n\n\nif __name__ == '__main__':\n a = Task(1, 100)\n txt = a.to_json()\n b = Task.from_json(txt)\n print(a == b)\n","repo_name":"PeterDezy/multithreading","sub_path":"src/Task.py","file_name":"Task.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"1446611539","text":"INPUT = r'D:\\Repositories\\NameProposal\\names_clear_57k.txt'\nOUTPUT = r'names_superclear.txt'\n\ndef destroy_sign(c):\n if c.isalpha() or c.isdigit():\n return c\n else:\n return ' '\n\ndef destroy_signs(line):\n norm_line = [destroy_sign(c) for c in line]\n norm_line = ''.join(norm_line)\n norm_line = norm_line.strip()\n return norm_line\n\ndef normalize(line):\n line = line.lower()\n line = destroy_signs(line)\n return line\n\ndef is_nice_name(name, freq):\n if len(name) < 4:\n return False\n if freq > 1:\n return False\n return True\n\nif __name__ == '__main__':\n histogram = {}\n with open(INPUT, 'r') as f:\n for line in f:\n norm_line = normalize(line)\n items = norm_line.split(' ')\n for item in items:\n if item in histogram:\n histogram[item] += 1\n else:\n histogram[item] = 1\n\n result = []\n keys = sorted(histogram.keys())\n for startup_name in keys:\n freq = histogram[startup_name]\n if is_nice_name(startup_name, freq):\n result.append(startup_name)\n\n with open(OUTPUT, 'w') as f:\n for item in result:\n f.write(item + '\\n')","repo_name":"tinnulion/markov_startup_name_generator","sub_path":"data_cleaner.py","file_name":"data_cleaner.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"80"} +{"seq_id":"26221000342","text":"# -*- coding: utf-8 -*-\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport threading\nimport time\nimport os\n \ndef Get_WallHaven(getUrl, dsctUrl, num):\n global gimagelist\n global gCondition\n for i in range(22, num):\n print(i)\n url = getUrl+str(i)\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}\n content = requests.get(url, headers=headers, stream=True)\n # 获取a标签链接\n soup = BeautifulSoup(content.text, 'html.parser').findAll('img', class_='lazyload')\n for img in soup:\n imgUrl = 'https://alpha.wallhaven.cc/wallpapers/full/wallhaven-' + img['data-src'][-10:]\n imgInfo = requests.get(imgUrl, headers=headers, stream=True)\n if imgInfo.status_code == 200:\n gimagelist.append(imgUrl) # 添加下载图片URL列表\n # path = dsctUrl+img['data-src'][-10:] # 拼接文件名\n # open(path, 'wb').write(imgInfo.content) # 保存图片\n \n\ndef download_pic(url):\n path = '/Users/yemingming/Desktop/Crawler/CrawlerWallhaven/wallhaven/'+url[-10:]\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}\n mgInfo = requests.get(url, headers=headers, stream=True)\n open(path, 'wb').write(mgInfo.content)\n print('图片下载完成:%s ' % path[-10:], time.strftime(\"%Y-%m-%d %H:%M:%S\")) # chunk_size = 1024 # 单次请求最大值\n \nclass Producer(threading.Thread):\n def run(self):\n print('%s started' % threading.current_thread())\n global gimagelist\n global gCondition\n for j in range(1): # 我默认循环一次,你也可以改很多\n searchurl = ''\n if searchurl not in visited: # 如果没有访问过\n gCondition.acquire() # 上锁\n gCondition.notify_all() # 唤醒所有等待的消费者\n gCondition.release() # 释放锁\n visited.add(searchurl) # 标记为已访问过\n\n\nclass Consumer(threading.Thread):\n def run(self):\n print('%s started' % threading.current_thread())\n while True:\n global gimagelist\n global gCondition\n gCondition.acquire() # 上锁\n while len(gimagelist)==0:\n gCondition.wait() # 有则等待\n url=gimagelist.pop()\n gCondition.release()\n download_pic(url) # 下载图片\n \n \nif __name__ == '__main__':\n # 图片的url\n gimagelist = []\n # 用来存放已经搜过的页面的url\n visited = set()\n # 线程相关\n gCondition = threading.Condition()\n # 爬取网站地址\n getUrl = 'https://alpha.wallhaven.cc/random?page=' # random随机,toplist按排名\n # 文件存放地址\n dsctUrl = '/Users/yemingming/Desktop/Crawler/CrawlerWallhaven/wallhaven/'\n # 页数\n num =25\n Get_WallHaven(getUrl, dsctUrl, num)\n Producer().start()\n for i in range(10): # 十个消费者线程数\n Consumer().start()","repo_name":"TonyITing/learngit","sub_path":"crawler/CrawlerWallhaven/GetWallhaven.py","file_name":"GetWallhaven.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"40661958776","text":"from classes import Role, User, ActionType, Resource\n\nclass First():\n role1 = Role()\n role1.create_role(\"Admin\", {\"Resource1\": [1, 2, 3], \"Resource2\": [1, 2]})\n role2 = Role()\n role2.create_role(\"Customer\", {\"Resource1\": [1], \"Resource3\": [1, 2]})\n role3 = Role()\n role3.create_role(\"Partner\", {\"Resource2\": [1, 2]})\n\n user = User()\n user.create_user(\"Admin\")\n user.assign_role(role1)\n user.display_user()\n\n user1 = User()\n user1.create_user(\"User1\")\n user1.assign_role(role2)\n user1.display_user()\n\n user2 = User()\n user2.create_user(\"User2\")\n user2.assign_role(role3)\n user2.display_user()","repo_name":"aditikr158/Role-Based-Access-Control","sub_path":"first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"32926002976","text":"import wx\nimport os\nfrom CalendarDialog import CalendarDialog\n\nclass DateTimeInputGroup(wx.Panel):\n def __init__(self, parent, ID=-1, inputLabel=\"\", topFrame=None):\n self.topFrame = topFrame\n wx.Panel.__init__(self, parent, ID)\n\n inputGroupSizer = wx.GridBagSizer(hgap=5, vgap=5)\n inputLabel = wx.StaticText(self, wx.ID_ANY, inputLabel)\n self.inputText = wx.TextCtrl(self, wx.ID_ANY)\n png = wx.Image(os.path.join(os.path.dirname(__file__), \"img\", \"Calendar.png\"), wx.BITMAP_TYPE_PNG).ConvertToBitmap()\n inputButton = wx.BitmapButton(self, wx.ID_ANY, png)\n inputGroupSizer.Add(inputLabel, pos=(0, 0))\n inputGroupSizer.Add(self.inputText, pos=(1, 0), flag=wx.EXPAND)\n inputGroupSizer.Add(inputButton, pos=(1, 1))\n inputGroupSizer.AddGrowableCol(0)\n\n self.Bind(wx.EVT_BUTTON, self.openCalendar, inputButton)\n\n self.SetSizer(inputGroupSizer)\n\n self.calendar = CalendarDialog()\n\n def openCalendar(self, event):\n self.topFrame.ToggleWindowStyle(wx.STAY_ON_TOP)\n\n if self.calendar.ShowModal() == wx.ID_OK:\n self.inputText.SetValue(self.calendar.dateTime.strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n self.topFrame.ToggleWindowStyle(wx.STAY_ON_TOP)\n\n @property\n def value(self):\n return self.calendar.dateTime","repo_name":"UGAROY/arcpy-playground","sub_path":"pythonaddins/addins/IntersectionManager/Install/components/DateTimeInputGroup.py","file_name":"DateTimeInputGroup.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"80"} +{"seq_id":"19598707419","text":"# LOGGER CLASS\n# The data storage is separated from the rocket class and integrated via the Simulation class\n# There is a basic version that only stores a csv file, under Logger, and there a few additional print statements under FeedbackLogger\n\nimport numpy as np\nimport pandas as pd\n\nfrom copy import deepcopy, copy\n\nfrom lib.presetObject import PresetObject\nfrom lib.data import nested_dictionary_lookup, force_save\nfrom lib.general import magnitude\nfrom lib.logging.logger_features import Feature, feature_time\n\n\nclass Logger(PresetObject):\n \"\"\"\n Logs only the src.data.\n Should be hooked into the Simulation object, but a reference also has to be set in the Rocket object.\n \n Stores an array of objects, since I think that is slightly better than appending to a dataframe.\n \"\"\"\n\n def __init__(self, simulation, **kwargs):\n # The logger has a reference to the simulation, since that is where it gets the data from.\n # However, the simulation controls the logger in pretty much all other ways.\n self.simulation = simulation\n self.splitting_arrays = True\n self.features: set[Feature] = set([feature_time])\n # This should probably be overridden in custom subclasses, like one for rocket and motor.\n self.full_path = \"./output.csv\"\n\n super().overwrite_defaults(**kwargs)\n\n self.rows = []\n self.current_row = {}\n\n def copy(self):\n # Hopefully this is being called from the simulation and the rocket I am about to make gets overridden (This comment exists from a time when I was working on the Goddard Problem Optimization)\n return deepcopy(self)\n\n def add_items(self, data):\n \"\"\"\n Update the current row of data, which should eventually be saved by save_row\n \"\"\"\n if self.splitting_arrays:\n k = list(data.keys())[0]\n v = list(data.values())[0]\n\n\n if isinstance(v, np.ndarray):\n if len(v) != 0:\n data = {}\n index = 1\n for item in v:\n data[k + str(index)] = item\n\n index += 1\n\n self.current_row.update(data)\n\n def save_row(self):\n self.rows.append(self.current_row)\n self.current_row = {}\n\n def handle_frame(self):\n \"\"\"\n This is the only thing that needs to be run for the logger to work in the Simulation class (also the save_to_csv)\n \"\"\"\n for feature in self.features:\n try:\n value = copy(nested_dictionary_lookup(self.simulation, feature.path))\n except Exception as e:\n value = None\n print(f\"Could not find value for {feature}\")\n print(e)\n \n self.add_items({feature.get_label(): value})\n\n self.save_row()\n\n def get_dataframe(self):\n df = pd.DataFrame(self.rows)\n\n try:\n # Rather than using the index (0, 1, 2, 3, 4...), I will index the rows by the time the row is recorded at\n df.set_index(feature_time.get_label(), inplace=True)\n except KeyError as e:\n print(\"Attempted to create dataframe, but there was no time index. Likely, the simulation did not make it past one frame, and no time was ever logged.\")\n \n return df\n\n def save_to_csv(self):\n df = self.get_dataframe()\n force_save(df, self.full_path)\n \n \n\n def reset(self):\n \"\"\"\n Reinitialize the Logger object\n \"\"\"\n self.__init__(self.simulation, self.features)\n\nclass FeedbackLogger(Logger):\n def print(self, statement):\n if self.verbose:\n print(statement)\n\n def __init__(self, logging_object, **kwargs):\n self.verbose = True\n\n super().__init__(logging_object)\n\n self.partial_debugging = True\n self.debug_every = 10 # seconds\n self.last_debugged = 0\n\n self.overwrite_defaults(**kwargs)\n\n self.print(\"Logger is prepared to run simulation\")\n\n def display_partial_data(self):\n print(f\"We are {self.simulation.time} seconds through the simulation\")\n self.last_debugged += self.debug_every\n\n def handle_frame(self):\n if self.partial_debugging and self.simulation.time > self.last_debugged + self.debug_every:\n self.display_partial_data()\n\n\n return super().handle_frame()\n\n \n def save_to_csv(self):\n self.print(super().save_to_csv())\n\n self.print(f\"Saved the trial to csv at {self.full_path}\")\n\n","repo_name":"rkuhlf/rocket-simulation","sub_path":"lib/logging/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":4603,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"80"} +{"seq_id":"2191354273","text":"from classes.ninja import Ninja\nfrom classes.dolphin import Dolphin\n\nmike = Ninja(\"Mike\")\n\ngerald = Dolphin(\"Gerald\", 8, 100)\ntimothy = Dolphin(\"Timothy\", 4, 40)\n\nfinn = Dolphin(\"Finn\", 30, 150)\n\n\ndef battle(ninja, dolphin):\n # print(\"Battling!\")\n dolphin.show_stats()\n ninja.show_stats()\n while dolphin.health > 0 and ninja.health > 0 :\n print(f\"{ninja.name} is attacking {dolphin.name}!\")\n ninja.attack(dolphin)\n print(f\"{dolphin.name}'s Health: {dolphin.health}\")\n if dolphin.health > 0:\n print(f\"{dolphin.name} is attacking {ninja.name}!\")\n dolphin.attack(ninja)\n print(f\"{ninja.name}'s Health: {ninja.health}\")\n \n if dolphin.health>ninja.health:\n dolphin.show_stats()\n print(f\"{dolphin.name} wins!\")\n \n else: \n ninja.show_stats()\n print(f\"{ninja.name} wins!\")\n \n\nbattle(mike, timothy)\nbattle(mike, gerald)\nbattle(mike, finn)\n\n","repo_name":"JKwon-37/Ninjas-vs-Dolphins","sub_path":"group_game.py","file_name":"group_game.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"8614667989","text":"\"\"\"\nCreated on Nov 22, 2017\n\n@author: nhan.nguyen\n\nContaining classes to catch the log on console and write it file.\n\"\"\"\n\nimport sys\nimport os\nimport time\nimport errno\nimport logging\nfrom .result import Status\nfrom .constant import Colors\n\n\nclass Printer(object):\n \"\"\"\n Class that write content to several file.\n Use this class when you want to write log not only on console but only on some other files.\n \"\"\"\n def __init__(self, *files):\n self.files = files\n\n def write(self, content):\n \"\"\"\n Write a content into several files.\n :param content: (optional) content you want to write.\n \"\"\"\n for f in self.files:\n f.write(content)\n f.flush() # Want this content is displayed immediately on file\n\n def flush(self):\n \"\"\"\n Make the content in buffer display immediately on files.\n \"\"\"\n for f in self.files:\n f.flush()\n\n\nclass Logger:\n \"\"\"\n Catch the log written by Python on console.\n \"\"\"\n __log_dir = os.path.join(os.path.dirname(__file__), \"..\") + \"/test_output/log_files/\"\n __KEEP_LOG_FLAG = \"-l\"\n __LOG_LVL = logging.DEBUG\n\n def __init__(self, test_name: str):\n Logger.__init_log_folder()\n self.__log_file_path = \"{}{}_{}.log\".format(Logger.__log_dir, test_name,\n str(time.strftime(\"%Y-%m-%d_%H-%M-%S\")))\n self.__log = open(self.__log_file_path, \"w\")\n self.__original_stdout = sys.stdout\n sys.stdout = Printer(sys.stdout, self.__log)\n logging.basicConfig(stream=sys.stdout, level=Logger.__LOG_LVL)\n\n def save_log(self, test_status: str = Status.FAILED):\n \"\"\"\n If \"-l\" is exist in sys.argv or test_status is Failed then keeping the log file.\n If test_status is Passed and missing \"-l\" from sys.argv then deleting log file.\n\n :param test_status: Passed of Failed.\n \"\"\"\n self.__log.close()\n sys.stdout = self.__original_stdout\n if test_status == Status.PASSED and Logger.__KEEP_LOG_FLAG not in sys.argv:\n if os.path.isfile(self.__log_file_path):\n os.remove(self.__log_file_path)\n print(Colors.OKBLUE + \"\\nLog file has been removed\\n\" + Colors.ENDC)\n return\n\n if os.path.isfile(self.__log_file_path):\n print(Colors.OKBLUE + \"\\nLog file has been kept at: {}\\n\".format(self.__log_file_path) + Colors.ENDC)\n\n @staticmethod\n def __init_log_folder():\n \"\"\"\n Create log_files folder if it is not exist.\n\n :raise OSError.\n \"\"\"\n try:\n os.makedirs(Logger.__log_dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise e\n","repo_name":"NgoAnhKhoi/pull-request","sub_path":"indy-tests/libraries/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"10328862418","text":"from Malt.GL import Texture\nfrom Malt.GL.GL import *\n\nTEXTURES = {}\n\ndef load_texture(msg):\n name = msg['name']\n data = msg['buffer'].buffer()\n resolution = msg['resolution']\n channels = msg['channels']\n sRGB = msg['sRGB']\n\n internal_formats = [\n GL_R32F,\n GL_RG32F,\n GL_RGB32F,\n GL_RGBA32F,\n ]\n pixel_formats = [\n GL_RED,\n GL_RG,\n GL_RGB,\n GL_RGBA\n ]\n internal_format = internal_formats[channels-1]\n pixel_format = pixel_formats[channels-1]\n \n if sRGB:\n if channels == 4:\n internal_format = GL_SRGB_ALPHA\n else:\n internal_format = GL_SRGB\n\n #Nearest + Anisotropy seems to yield the best results with temporal super sampling\n TEXTURES[name] = Texture.Texture(resolution, internal_format, GL_FLOAT, data, pixel_format=pixel_format, \n wrap=GL_REPEAT, min_filter=GL_NEAREST_MIPMAP_NEAREST, build_mipmaps=True, anisotropy=True)\n\nGRADIENTS = {}\n\ndef load_gradient(name, pixels, nearest):\n GRADIENTS[name] = Texture.Gradient(pixels, len(pixels)/4, nearest_interpolation=nearest)\n","repo_name":"bnpr/Malt","sub_path":"Bridge/Texture.py","file_name":"Texture.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":889,"dataset":"github-code","pt":"80"} +{"seq_id":"70177042179","text":"\n\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nimport psycopg2\n\nfrom bmstu_lab_m.models import Cargo\n#from bmstu_lab_m.models import CargoOrder\n#from bmstu_lab_m.models import DeliveryOrders\n#from bmstu_lab_m.models import Users\n\n\n\n'''Заявки на доставку грузов на Марс на Starship. \nУслуги - товары, доставляемыe на Марс на Starship, \n заявки - заявки на конкретный объем товаров\n'''\n\n\n\ndef GetAllCargo(request):\n \n res=[]\n input_text = request.GET.get(\"good_item\")\n data = Cargo.objects.filter(is_deleted=False)\n \n if input_text is not None:\n for elem in data:\n \n if input_text in elem.title:\n res.append(elem)\n #print(elem)\n return render(\n request,'all_cargo.html', {'data' : {\n 'items' : res,\n 'input' : input_text\n } }\n )\n \n \n return render(\n request,'all_cargo.html', {\n 'data' :\n {\n 'items' : data\n }\n }\n \n )\n\ndef GetCurrentCargo(request, id):\n data = Cargo.objects.filter(id_cargo=id)\n \n # result ={}\n # for i in arr:\n # if i['id'] == id:\n # result = i\n return render(request, 'current_cargo.html', \n {'data' : {\n 'item' : data[0]\n }}\n )\n\n\nfrom django.urls import reverse\n\n@csrf_exempt\ndef DeleteCurrentCargo(request):\n if request.method == 'POST':\n \n id_del = request.POST.get('id_del') #работает,надо только бд прикрутить в all_cargo\n\n\n conn = psycopg2.connect(dbname=\"starship_delivery\", host=\"127.0.0.1\", user=\"postgres\", password=\"1111\", port=\"5432\")\n cursor = conn.cursor()\n cursor.execute(f\"update cargo set is_deleted = true where id_cargo = {id_del}\")\n conn.commit() # реальное выполнение команд sql1\n cursor.close()\n conn.close()\n\n redirect_url = reverse('all_cargo') \n return HttpResponseRedirect(redirect_url)\n \n\n","repo_name":"Summit2/WEB_5_semester","sub_path":"bmstu_lab_m/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"80"} +{"seq_id":"13527164905","text":"from collections import defaultdict\n\nimport numpy as np\n\nfrom src.routing_algorithms.BASE_routing import BASE_routing\nfrom src.routing_algorithms.georouting import GeoRouting\nimport random\nimport src.utilities.utilities as util\n\n\nclass AIPath(BASE_routing):\n exploration = 0\n exploitation = 0\n\n def __init__(self, drone, simulator):\n BASE_routing.__init__(self, drone, simulator)\n # random generator\n self.rnd_for_routing_ai = np.random.RandomState(self.simulator.seed)\n self.taken_actions = {} # id event : (old_action)\n self.epsilon = 0.05\n self.n = [0, 0]\n self.q_value = [0, 0]\n self.dictionary = defaultdict(list) # {id_event : [drones already rewarded]}\n self.expired_packets = defaultdict(list) # {id_event : [drones that expired packet id_event]}\n\n\n def feedback(self, drone, id_event, delay, outcome):\n \"\"\" return a possible feedback, if the destination drone has received the packet \"\"\"\n\n if id_event not in self.taken_actions:\n return None\n\t\t\n\t\t# reward_m is a Multiplier for the reward\n action, reward_m = self.taken_actions[id_event]\n reward = (self.simulator.event_duration - delay) * reward_m\n self.n[action] += 1\n self.q_value[action] = self.q_value[action] + ( (reward - self.q_value[action]) / self.n[action] )\n\n def relay_selection(self, opt_neighbors, pkd):\n # now epsilon greedy selection of the action\n # 1) case epsilon, we take a random action\n if random.uniform(0, 1) < self.epsilon:\n action = random.randint(0, 1)\n # 2) case of 1 - epsilon we take the greatest q-value\n else:\n action = np.argmax(self.q_value) # with funcion argmax of numpy we take the max of the q_value array\n\n drone_to_send = None\n\n # exploration keep the packet\n if action == 1:\n AIPath.exploitation += 1\n drone_to_send = None\n # exploitation give the packet with GEO routing and check if packets expired from the drone choose\n else:\n AIPath.exploration += 1\n drone_to_send = GeoRouting.relay_selection(self, opt_neighbors, pkd)\n\n if drone_to_send != None:\n # If the recipient is a ferry\n if drone_to_send.identifier <= 2:\n # If it's headed to the depot\n if len(drone_to_send.waypoint_history) > 0 and drone_to_send.waypoint_history[-1] != self.simulator.depot_coordinates:\n reward_m= 10\n else:\n reward_m = 8\n\n # If the recipient is a routing drone\n elif drone_to_send.identifier > 2: \n #If a drone ferry sends a packet to a routing drone\n if self.drone.identifier <= 2: \n #If it's headed to the depot\n if len(drone_to_send.waypoint_history) > 0 and drone_to_send.waypoint_history[-1] == drone_to_send.path[-2]:\n reward_m = 10\n else:\n reward_m = 1\n # If a routing drone sends a packet to another routing drone\n elif self.drone.identifier > 2: \n # If it's headed to the depot\n if len(drone_to_send.waypoint_history) > 0 and drone_to_send.waypoint_history[-1] == drone_to_send.path[-2]:\n reward_m = 10\n # The recipient drone must return to the depot first\n if len(self.drone.path) - len(self.drone.waypoint_history) > len(drone_to_send.path) - len(drone_to_send.waypoint_history):\n reward_m = 5\n else:\n reward_m= 1 \n else:\n n_step_sender = 0\n # We calculate the number of steps required to reach the depot for the drone that held the package\n if self.drone.identifier > 2:\n n_step_sender = self.n_step(self.drone)\n # We calculate the number of steps for the recipients\n min_n_step = float('inf')\n for pkd_id, drone in opt_neighbors:\n n_step_recipient = self.n_step(drone)\n if n_step_recipient < min_n_step:\n min_n_step = n_step_recipient\n \n if n_step_sender > min_n_step:\n reward_m = 10\n else:\n reward_m = 1\n\n self.taken_actions[pkd.event_ref.identifier] = (action, reward_m)\n\n return drone_to_send\n\n\n\n def n_step(self, drone):\n # IF I AM A ROUTING DRONE\n if drone.identifier > 2:\n # We calculate the number of steps required to reach the depot\n # If it is the first execution of the path\n if len(drone.waypoint_history) <= len(drone.waypoint_history):\n indx = len(drone.waypoint_history)\n n_step = len(drone.path) - indx\n # If it's not the first round of the path\n else:\n n_iter = len(drone.waypoint_history) % len(drone.path) # MODULO\n indx = int(len(drone.waypoint_history)/n_iter)\n n_step = len(drone.path) - indx\n # IF I AM A FERRIES\n else:\n n_step = 1\n # It is heading for the depot\n if drone.waypoint_history[-1] != self.simulator.depot_coordinates:\n n_step = 0\n\n return n_step\n\n\n def print(self):\n \"\"\"\n This method is called at the end of the simulation, can be usefull to print some\n metrics about the learning process\n \"\"\"\n print(\"Which times we got exploration and exploitation?\")\n print(\"Exploration -> \", AIPath.exploration)\n print(\"Exploitation -> \", AIPath.exploitation)\n","repo_name":"AndreaBe99/autonoumus-networking-homework","sub_path":"HW_1/ai_routing.py","file_name":"ai_routing.py","file_ext":"py","file_size_in_byte":5813,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"80"} +{"seq_id":"40048708478","text":"def solution(skill, skill_trees):\n ans = 0\n set_s = set(skill)\n for st in skill_trees:\n idx = 0\n for s in st:\n if s not in set_s: continue\n if s == skill[idx]: \n idx += 1\n else:\n idx = -1\n break \n if idx != -1:\n ans += 1\n return ans\n\n'''\nidx : 비교할 스킬 순서, skill[0]으로 초기화 \nskill tree를 돌면서 \n- skill에 포함되지 않은 스킬이면 continue\n- skill[idx]랑 다른 스킬이면 스킬 순서를 어긋난 것이므로 break \n'''","repo_name":"Seunghwa-Han/School","sub_path":"Coding Test/programmers/스킬트리.py","file_name":"스킬트리.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"22710238157","text":"from xml.dom import minidom\nfrom Nodo import *\nclass Colocar :\n letras=\"\"\n lista5 = Lista()\n numerod=Lista()\n def ordenarMatriz(self,datow):\n n =\"\";\n h=0;\n h2=0\n po=0\n r=0\n contador=0\n m=\"\"\n mini = minidom.parse(datow)\n nombres = mini.getElementsByTagName(\"matriz\")\n datos = mini.getElementsByTagName(\"dato\")\n nombres2 = mini.getElementsByTagName(\"matriz\")\n for nom in range(len(nombres)):\n n = n+nombres[nom].attributes[\"nombre\"].value+\",\"\n d = n[:-1]\n y = d.split(\",\")\n y = list(dict.fromkeys(y))\n\n for u in y :\n r=r+1\n \n self.lista5.insertar(u)\n for nomd in nombres:\n if(u == nomd.attributes[\"nombre\"].value):\n if(h == 0):\n m=m+str(h2)+\",\"\n po=po+1\n h=h+1;\n else:\n h2=h2+1\n \n h=0\n h2=0\n i=0;\n letras = m[:-1].split(\",\")\n self.numerod.insertar(r)\n r=0\n return letras\n \n def nombres(self):\n return self.lista5 \n\n def cantidad(self):\n return self.numerod\n ","repo_name":"Kevin-Contreras/-IPC2_Proyecto1_201900157","sub_path":"COLOCAR.PY","file_name":"COLOCAR.PY","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"6880365643","text":"import itertools\nfrom typing import *\n# https://leetcode.com/problems/permutations/\n\n\"\"\"\nInput: nums = [1,2,3]\nOutput: [[1,2,3],[1,3,2],[2,1,3],[2,3,1],[3,1,2],[3,2,1]]\n\nConstraints:\n\n1 <= nums.length <= 6\n-10 <= nums[i] <= 10\nAll the integers of nums are unique.\n\"\"\"\nclass Solution:\n def permute(self, nums: List[int]) -> List[List[int]]:\n # sol_2-book: itertools 모듈 사용\n return list(itertools.permutations(nums))\n\n # sol_2-b_mine: \n # 아래 코드의 출력은 각 elem이 tuple인데, 릿코드에선 정답 판정\n return list(itertools.permutations(nums))\n # return list(map(list, itertools.permutations(nums)))\n \n # sol_1-book: DFS를 이용한 순열 생성\n results = []\n prev_elements = []\n \n def dfs(elements):\n # 리프 노드일 때 결과 추가\n if len(elements) == 0:\n ### [:]로 copy하지 않으면 참조가 됨. 이후에 prev 값 변경 시 results의 원소가 영향 받음\n results.append(prev_elements[:])\n\n # 순열 생성 재귀 호출\n for e in elements:\n next_elements = elements[:]\n next_elements.remove(e)\n\n prev_elements.append(e)\n dfs(next_elements)\n prev_elements.pop()\n dfs(nums)\n return results\n\n # mine: 일단 brute-force로\n res: List[List[int]] = []\n def btr(nums:List[int], curr:List[int]=[]):\n if not nums:\n res.append(curr)\n return\n for i in nums:\n btr([n for n in nums if n != i], [*curr, i])\n btr(nums)\n return res","repo_name":"vanism2091/Algorithm_py","sub_path":"book/Python_Alg_Interview/ch12.graph/34_46.py","file_name":"34_46.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"32302493011","text":"import numpy as np\n\n\ndef ndarray():\n # generamos un array lleno de 0 hasta 51 0s\n arr = np.zeros((51, 51))\n # generamos un array de 0 al 51 elementos\n val = np.arange(51)\n # llena la diagonal de una matriz con los valores proporcionados en el array val\n np.fill_diagonal(arr, val)\n return arr\n","repo_name":"emirand20/R_M07_UF02_practica9","sub_path":"ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"70297571437","text":"def decompose_from_string(str_number):\n for i, digit in enumerate(list(str_number.strip()), start=1):\n print(f\"{i} цифра равна {digit}\")\n\n\ndef decompose_from_number(int_number):\n digits = []\n base = 10\n while int_number:\n digits.insert(0, int_number % base)\n int_number //= base\n if not digits:\n digits.append(0)\n for i, digit in enumerate(digits, start=1):\n print(f\"{i} цифра равна {digit}\")\n\n\nwhile True:\n str_number = input('Enter a number: ')\n try:\n int_number = int(str_number)\n except ValueError:\n print('Invalid format of number, please try again')\n continue\n break\ndecompose_from_string(str_number)\nprint('***************')\ndecompose_from_number(int_number)\n","repo_name":"IlyaOrlov/PythonCourse","sub_path":"Practice/a_gubin/PyLec_4/DecomposeNumber.py","file_name":"DecomposeNumber.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"73"} +{"seq_id":"39689157698","text":"import numpy as np\n\ndef Gauss_Jordan_Elimination(X):\n # declare dimension\n N = 3\n\n # k repeats to row direction\n for k in range(N):\n\n # j repeats to column direction\n for j in range(k + 1, N + 1):\n X[k][j] = X[k][j] / X[k][k]\n\n # i repeats to row direction\n # become 0 except [k][k]element\n for i in range(N):\n if i != k:\n X[i][j] = X[i][j] - X[k][j] * X[i][k]\n return X\n\nif __name__ == '__main__':\n\n #A = np.array([[2, 3, 4], [3, 5, 2], [4, 3, 30]], dtype=float)\n #B = np.array([[6], [5], [32]], dtype=float)\n A = np.array([[7, 1, 2], [1, 8, 3], [2, 3, 9]], dtype=float)\n B = np.array([[10], [8], [6]], dtype=float)\n\n # prepare matrix for calculation\n X = np.hstack([A, B])\n print(X)\n\n X = Gauss_Jordan_Elimination(X)\n\n print(f'x1 = {X[0][3]}, x2 = {X[1][3]}, x3 = {X[2][3]}')\n print(X)\n","repo_name":"Yuu-taremayu/school","sub_path":"simuration/class_7_10/Gauss-Jordan_Elimination.py","file_name":"Gauss-Jordan_Elimination.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"41452918142","text":"import os\nimport os.path as osp\nimport logging\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom model import Resnet as ResNet20\nfrom model import MobileNetV2_FaceNet as MobileFaceNet\nfrom metric import ArcFace,DenseClassifier,NormLinear\nfrom loss import FocalLoss\nfrom dataset import load_data\nfrom config import Config as conf\nimport test\n\nprint(\"Import OK\")\n\ndef add_weight_decay(net,weight_decay, skip_list=()):\n decay = []\n no_decay = []\n for name ,param in net.named_parameters():\n if not param.requires_grad: continue # skip frozen weights\n if len(param.shape) == 1 or name in skip_list:\n #print(name)\n no_decay.append(param)\n else:\n decay.append(param)\n return [{'params' : no_decay, 'weight_decay' : 0.0},\n {'params' : decay, 'weight_decay' : weight_decay}]\n\n\ndef get_logger(filename, verbosity=1, name=None):\n level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}\n formatter = logging.Formatter(\n \"[%(asctime)s][%(filename)s] %(message)s\"\n )\n logger = logging.getLogger(name)\n logger.setLevel(level_dict[verbosity])\n\n fh = logging.FileHandler(filename, \"w\")\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n sh = logging.StreamHandler()\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n\n return logger\n\nclass Train():\n def __init__(self, ConfigTable):\n \"\"\" Train on pytorch:\n Step1: Create a dataloader to prepare train data.\n Step2: Create a logger to write train imfomartion to your .log.\n Step3: Choose your network backbone, metric function, loss function.\n Step4: In pytorch the weight_decay will be applied to bias, batchnorm layer, change weight_decay to 0 at these params.\n Step5: Define the optimizer eg. SGD, Adam\n Step6: Now, let's start your train\n \"\"\"\n self.config = ConfigTable # Config table for train including lr_step, checkpoints\n os.makedirs(self.config.checkpoints_path, exist_ok = True)\n nowtime = time.strftime(\"%Y-%m-%d-%H:%M:%S\", time.localtime())\n self.logger = get_logger(\"./\"+ nowtime +\"-train.log\")\n self.logger.info('Successfully create train log file at \\'./{}-train.log\\''.format(nowtime))\n # dataloader\n self.dataloader, self.class_num = load_data(self.config, training=True)\n self.embedding_size = self.config.embedding_size\n self.device = self.config.device\n self.logger.info('Successfully create DataLoader. In data the total of class numbers is {} and embedding size is {}'.format(self.class_num,self.embedding_size))\n # generate network\n if self.config.backbone == 'myfmobile':\n self.net = MobileFaceNet(self.embedding_size).to(self.device) # Create net and copy net tensor to the GPU, do it before loading data\n self.logger.info(\"Network backbone is {}\".format(self.config.backbone))\n self.logger.info(\"{}\".format(self.net))\n elif self.config.backbone == 'resnet20':\n self.net = ResNet20().to(self.device)\n self.logger.info(\"Network backbone is {}\".format(self.config.backbone))\n self.logger.info(\"{}\".format(self.net))\n\n if self.config.metric == 'arcface':\n self.metric = ArcFace(self.embedding_size, self.class_num).to(self.device)\n self.logger.info(\"Metric fucntion is {}\".format(self.config.metric))\n self.logger.info(\"{}\".format(self.metric))\n\n elif self.config.metric == 'softmax':\n self.metric = DenseClassifier(self.embedding_size, self.class_num).to(self.device)\n self.logger.info(\"Metric fucntion is {}\".format(self.config.metric))\n self.logger.info(\"{}\".format(self.metric))\n\n elif self.config.metric == 'normlinear':\n self.metric = NormLinear(512, self.class_num).to(self.device)\n self.logger.info(\"Metric fucntion is {}\".format(self.config.metric))\n self.logger.info(\"{}\".format(self.metric))\n else:\n self.logger.info(\"Please specify a metric\")\n exit(0)\n \n self._weight_init()\n # Send data to multiple gpu\n self.net = nn.DataParallel(self.net)\n self.metric = nn.DataParallel(self.metric)\n # Remove weight_decay in batchnorm and convolution bias, refer to https://arxiv.org/abs/1706.05350\n net_params = add_weight_decay(self.net,self.config.weight_decay)\n metric_params = add_weight_decay(self.metric,self.config.class_wd)\n self.parameters = net_params + metric_params\n\n if self.config.loss == 'focal_loss':\n self.criterion = FocalLoss(gamma = 2)\n self.logger.info(\"Loss function is FocalLoss\")\n else:\n self.criterion = nn.CrossEntropyLoss()\n self.logger.info(\"Loss function is CrossEntropyLoss\")\n \n if self.config.optimizer == 'sgd':\n self.optimizer = optim.SGD(self.parameters, lr = self.config.lr, momentum=self.config.momentum, weight_decay = self.config.weight_decay)\n self.logger.info(\"Optimaizer is SGD\")\n self.logger.info(\"{}\".format(self.optimizer))\n else:\n self.optimizer = optim.Adam(self.parameters,lr = self.config.lr, weight_decay=self.config.weight_decay)\n self.logger.info(\"Optimaizer is Adam\")\n self.logger.info(\"{}\".format(self.optimizer))\n \n def _schedule_lr(self,optimizer):\n # there is a but in optim.lr_scheduler.StepLR when current equals lr_step, the lr is not current_lr * gamma **(current_epoch // lr-step)\n for params in optimizer.param_groups:\n params['lr'] = params['lr'] / self.config.lr_gamma\n return optimizer.param_groups[0]['lr']\n\n def _weight_init(self):\n for op in self.net.modules():\n if isinstance(op, nn.Conv2d):\n nn.init.kaiming_uniform_(op.weight.data,nonlinearity=\"relu\")\n #nn.init.kaiming_uniform_(op.weight.bias,val=0)\n elif isinstance(op, nn.Linear):\n nn.init.normal_(op.weight.data) # default mean=0, std =1\n #nn.init.constant_(op.weight.bias,val=0)\n def _get_accuracy(self, output, target, topk=(1,)):\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n def _learner(self, optimizer, model, metric, epoch, dataloader):\n model.train()\n for batch_idx, (inputs, labels) in enumerate(dataloader):\n iteration = epoch * len(dataloader) + batch_idx\n # net start to forward\n inputs = inputs.to(self.device)\n labels = labels.to(self.device)\n x = model(inputs) # embedding features\n if self.config.metric == 'arcface': \n thetas, origin_theta = metric(x, labels)\n else : \n thetas = metric(x)\n loss = self.criterion(thetas, labels) # loss function ce\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (batch_idx % self.config.step_show) == 0:\n current_lr = optimizer.param_groups[0]['lr']\n prec1, prec5 = self._get_accuracy(origin_thetas.data, labels, topk=(1,5)) if self.config.metric == 'arcface' else self._get_accuracy(thetas.data, labels, topk=(1,5))\n self.logger.info('Epoch:[{}/{}] Iteration:{}\\t loss = {:.4f}\\t accuracy = {:.3f}\\t lr = {:f}'.format(\n epoch ,self.config.MAX_EPOCH, iteration, loss, prec1, current_lr))\n\n\n def train(self,start_epoch):\n optimizer = self.optimizer\n model = self.net\n metric = self.metric\n dataloader = self.dataloader\n test_best_acc = 0\n best_epoch = 0\n for epoch in range(start_epoch, self.config.MAX_EPOCH):\n if (epoch % self.config.lr_step) == 0 and epoch !=0:\n self._schedule_lr(optimizer)\n \n self._learner(optimizer, model, metric, epoch, dataloader)\n\n if epoch >= self.config.test_step:\n accuracy, threshold = test.test(self.config, model)\n self.logger.info('Start to test, accuracy = {}, threshold = {}'.format(accuracy, threshold))\n if accuracy > test_best_acc :\n model_name = 'checkpoint_{}.pth'.format(epoch)\n test_best_acc = accuracy\n #best_epoch = epoch\n checkpoint = {\n \"epoch\" : epoch,\n \"lr\" : optimizer.param_groups[0]['lr'],\n \"net\" : model.state_dict(),\n \"metric\" : metric.state_dict(),\n \"acc\" : test_best_acc\n }\n torch.save(checkpoint, model_name)\n self.logger.info('Save model to \\'{}\\''.format(model_name))\n \n def _resume_train(self, model_path, lr): # only for arcface, change 'metric' in config to arcface to make _learner() work\n checkpoint = torch.load(model_path)\n model = MobileFaceNet(self.config.embedding_size).to(self.device)\n metric = ArcFace(self.embedding_size, self.class_num).to(self.device)\n model = nn.DataParallel(model)\n metric = nn.DataParallel(metric)\n\n model.load_state_dict(checkpoint['net'])\n \n net_params = add_weight_decay(model,self.config.weight_decay)\n metric_params = add_weight_decay(metric,self.config.class_wd)\n parameters = net_params + metric_params\n optimizer = optim.SGD(parameters, lr = lr, momentum = self.config.momentum, weight_decay = self.config.weight_decay)\n current_lr = optimizer.param_groups[0]['lr']\n self.logger.info('Resume train {}, metric state is arcface, basic learning rate is {:f}'.format(model_path, current_lr))\n accuracy,threshold = test.test(self.config, model) #f\"./{self.config.checkpoints_path}/epoch{e}_{batch_idx}.pth\"\n self.logger.info('Loading model from \\'{}\\',\\t test accuracy = {:.4f}, threshold = {:.4f}\\n'.format(model_path,accuracy, threshold))\n self._learner(optimizer, model, metric)\n\nif __name__ == \"__main__\":\n resume_train_model = './softmax_loss_checkpoints/210000.pth'\n #train = Train(conf)\n #train.resume_train(conf,'./checkpoints/210000.pth','arcface',0.001)\n #train.train(0)\n torch","repo_name":"Jaso0n/FaceRecognize","sub_path":"Pytorch-faceRecognize/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10794,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"34615599734","text":"import re\n\nclass Opcode:\n def __init__(self, opcode, textOld=None, textNew=None):\n self.oc = opcode\n self.textOld = textOld\n self.textNew = textNew\n\n self.type = opcode[0]\n self.startOld = opcode[1]\n self.endOld = opcode[2]\n self.startNew = opcode[3]\n self.endNew = opcode[4]\n\n def copy(self):\n return Opcode(self.oc, self.textOld, self.textNew)\n\n def getTextNew(self):\n return self.textNew[self.startNew:self.endNew]\n \n def getTextOld(self):\n return self.textOld[self.startOld:self.endOld]\n\n def joinIfNeighbor(self, otherOpcode):\n if self.isNeighboring(otherOpcode):\n return self.join(otherOpcode)\n else:\n return False\n\n def join(self, otherOpcode):\n if self.type != otherOpcode.type:\n return False\n\n self.startOld = min(self.startOld, otherOpcode.startOld)\n self.endOld = max(self.endOld, otherOpcode.endOld)\n\n self.startNew = min(self.startNew, otherOpcode.startNew)\n self.endNew = max(self.endNew, otherOpcode.endNew)\n\n return True\n\n def isNeighboring(self, otherOpcode):\n if self.type != otherOpcode.type:\n return False\n\n if self.type == 'delete':\n # empty space in new text same?\n if self.startNew == otherOpcode.startNew and self.endNew == otherOpcode.endNew:\n return True\n elif self.type == 'insert':\n # insertion point the same?\n if self.startOld == otherOpcode.startOld and self.endOld == otherOpcode.endOld:\n return True\n elif self.type == 'replace':\n # neighboring, if max. 1 space contained between both\n betweenStart = min(self.endOld, otherOpcode.endOld)\n betweenEnd = max(self.startOld, otherOpcode.startOld)\n betweenString = self.textOld[betweenStart:betweenEnd]\n\n if re.fullmatch(\"[a-zA-Z]* ?[a-zA-Z]*\", betweenString):\n return True\n\n elif self.type == 'equal':\n if self.endNew == otherOpcode.startNew or otherOpcode.endNew == self.startNew:\n return True\n\n return False\n\n def expandReplace(self):\n if self.type != 'replace':\n return\n \n while self.startNew >= 1 and self.textNew[self.startNew-1].isalpha():\n self.startNew -= 1\n self.startOld -= 1\n\n while self.endNew < len(self.textNew) and self.textNew[self.endNew].isalpha():\n self.endNew += 1\n self.endOld += 1\n\n def expandToTag(self):\n startsInTag, tagStart = self._startsInTag()\n endsInTag, tagEnd = self._endsInTag()\n\n # entirely within a tag\n if endsInTag and startsInTag:\n print(\"Dropping opcode <%s>, because it is within a tag\" % str(self))\n return []\n # contains a complete tag\n elif self._containsTag():\n print(\"Opcode contains tag: <%s>\" % str(self))\n newOpcodes = []\n tagPattern = re.compile(\"<.*>\")\n tagMatch = re.search(tagPattern, self.textNew[self.startNew:self.endNew])\n while tagMatch:\n partCode = self.copy()\n partCode.endNew = self.startNew + tagMatch.start(0)\n self.startNew = self.startNew + tagMatch.end(0)\n for pc in partCode.expandToTag():\n newOpcodes.append(pc)\n tagMatch = re.search(tagPattern, self.textNew[self.startNew:self.endNew])\n newOpcodes.append(self)\n print(\"Split up an opcode into %i, because it contained tags\" % len(newOpcodes))\n return newOpcodes\n\n elif '<' in self.getTextNew():\n text = self.getTextNew()\n index = text.find('<')\n if index == -1:\n print(\"Error: expected to find tag start in text '%s'\", text)\n return [self]\n else:\n self.endNew = self.startNew + index\n print(\"Clipped opcode <%s>, because it ended in a tag\" % str(self))\n return [self]\n\n elif '>' in self.getTextNew():\n text = self.getTextNew()\n index = text.find('>')\n if index == -1:\n print(\"Error: expected to find tag end in text '%s'\", text)\n return [self]\n else:\n self.startNew = self.startNew + index + 1\n print(\"Clipped opcode <%s>, because it ended in a tag\" % str(self))\n return [self]\n elif not endsInTag and not startsInTag:\n return [self]\n\n # TODO check if tag is within it and split?\n\n def _containsTag(self):\n print(self)\n text = self.textNew[self.startNew:self.endNew]\n if ('<' in text) and ('>' in text):\n return True\n else:\n return False\n\n def _startsInTag(self):\n # find tag begin or end before start\n index = self.startNew\n while index > 0:\n if self.textNew[index - 1] == '<':\n return True, index\n elif self.textNew[index - 1] == '>':\n return False, index\n index -= 1\n return False, -1\n\n def _endsInTag(self):\n index = self.endNew\n while index < len(self.textNew):\n if self.textNew[index] == '>':\n return True, index\n elif self.textNew[index] == '<':\n return False, index\n index += 1\n return False, -1\n\n def __getitem__(self, item):\n return self.oc[item]\n\n def __repr__(self) -> str:\n return self.type + \" [\" + self.textOld[self.startOld:self.endOld] + \"] -> [\" +self.textNew[self.startNew:self.endNew] + \"]\"\n\n @staticmethod\n def getOpcodeArray(opcodes, textOld=None, textNew=None):\n opcodeObjects = []\n for oc in opcodes:\n opcodeObjects.append(Opcode(oc, textOld, textNew))\n return opcodeObjects","repo_name":"Tomp0801/PolicyCheck","sub_path":"src/policyManagement/Opcodes.py","file_name":"Opcodes.py","file_ext":"py","file_size_in_byte":6013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"9004623523","text":"from app import app\nfrom app.item_search import search_item as search_item_in_index\nfrom flask import request, jsonify\n\n\n@app.route('/api/item')\ndef search_item():\n query_args = request.args.getlist('q')\n\n if len(query_args) == 0:\n error_message = \"\"\"\n No 'q' query args sent.\n Usage: send several search words separated by commas, like this:\n /api/item?q=word1,word2,word3\n \"\"\"\n response = jsonify(error=error_message)\n response.status_code = 400\n return response\n\n joined_words = query_args[0]\n words = joined_words.split(',')\n items = search_item_in_index(words)\n\n return jsonify([item.as_dict() for item in items])\n","repo_name":"ziggylineous/udacity-full-stack-dev","sub_path":"item_catalog/app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"17891245651","text":"from application.db.people import employers as employers\n\n\nclass Employer:\n position = ''\n name = ''\n\n def __init__(self, name):\n for workers in employers:\n if name in workers['name']:\n self.position = workers['position']\n self.name = name\n self.salary = workers['salary']\n\n def give_salary(self):\n print('Зарплата начислена сотруднику {} в размере {} руб.'.format(self.name, self.salary))\n\n","repo_name":"Gruant/advanced_python","sub_path":"accounting/application/salary.py","file_name":"salary.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"3256717676","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef ranking(features, population, rank_threshold=20):\n # sort population dict after energy\n population_list = sorted(population.items(), key=lambda item: item[1][0])[:rank_threshold]\n feat_indices = [list(key) for key, val in population_list]\n frequencies, _ = np.histogram(np.array(feat_indices).flatten(), bins=len(features), range=(0, len(features)))\n\n argsorted = np.argsort(frequencies)[::-1]\n\n plt.plot(frequencies[argsorted], marker='x', ls='', ms=7)\n sorted_features = list(np.asarray(features)[argsorted])\n plt.xticks(range(len(features)), sorted_features, rotation=90)\n plt.ylabel('frequency')\n plt.ylim(0, None)\n plt.grid(axis='x')\n\n plt.gca().set_axisbelow(True)\n plt.minorticks_off()\n\n return sorted_features\n","repo_name":"feichtip/selanneal","sub_path":"src/selanneal/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"17552319258","text":"from csv import DictWriter\n\nimport requests\n\nurl = \"https://api.publicapis.org/entries\"\nprint(f\"Loading data from {url}\")\nresponse = requests.get(url)\ndata = response.json()\n\nfieldnames = data['entries'][0].keys()\nwith open('apis.csv', 'w') as file:\n writer = DictWriter(file, fieldnames=fieldnames)\n writer.writeheader()\n writer.writerows(data['entries'])\n","repo_name":"sjeyendran24/python-foundations-3-weeks","sub_path":"problems/solutions/problem_6_data_from_api.py","file_name":"problem_6_data_from_api.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"73"} +{"seq_id":"20940301470","text":"\"\"\"\n给定一个只包括 '(',')','{','}','[',']' 的字符串 s ,判断字符串是否有效。有效字符串需满足:\n1.左括号必须用相同类型的右括号闭合。\n2.左括号必须以正确的顺序闭合。\n\"\"\"\nfrom data_structure.stack.stacks import Stack\n\n\nclass Solution:\n\n def is_valid(self, strings):\n stack = Stack()\n for string in strings:\n if string in \"([{\":\n stack.push(string)\n else:\n if stack.is_empty():\n return False\n elif self._match(stack.peek(), string):\n stack.pop()\n else:\n return False\n else:\n if stack.is_empty():\n return True\n else:\n return False\n\n def _match(self, left_bracket, right_bracket):\n return \"([{\".index(left_bracket) == \")]}\".index(right_bracket)\n","repo_name":"jiangpancjy/algorithm","sub_path":"data_structure/stack/questions/multiple_bracket_match.py","file_name":"multiple_bracket_match.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"1747078996","text":"#!/usr/bin/env python3\n\"\"\"/***************************************************************************\n *\n * Authors: Ruben Sanchez Garcia\n *\n * CSIC\n *\n * This program is free software; you can redistribute it and/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation; either version 2 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with this program; if not, write to the Free Software\n * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA\n * 02111-1307 USA\n *\n * All comments concerning this program package may be sent to the\n * e-mail address 'xmipp@cnb.csic.es'\n ***************************************************************************/\n\"\"\"\n\nimport sys, os\n\nfrom subprocess import check_call\n\nfrom joblib import delayed, Parallel\nimport numpy as np\nfrom xmipp_base import *\n\nclass ScriptPreproMics(XmippScript):\n def __init__(self):\n XmippScript.__init__(self)\n \n def defineParams(self):\n self.addUsageLine('Preprocess all mics in directory')\n ## params\n \n self.addParamsLine('-i : A file that contains the path of input micrograph and possibly CTFs')\n\n self.addParamsLine('-s : sampling rate of the micrographs Angstroms/pixel')\n \n self.addParamsLine('-d : Downsamplig factor')\n\n self.addParamsLine('-o : A path to the directory where preprocessed micrograph will be saved')\n \n self.addParamsLine('[--invert_contrast ] : Invert micrograph contrast')\n \n self.addParamsLine('[ --phase_flip ] : Apply phase_flipping micrograph contrast')\n \n self.addParamsLine('[ -t ] : Number of threads')\n\n ## examples\n self.addExampleLine(' xmipp_preprocess_mics -i path/to/inputs/file.txt -s 1.6 -d 4 -t 2 -o path/to/outDir')\n self.addExampleLine(' path/to/inputs/file.txt:\\n'\n '#mic ctfparams\\n'\n 'Runs/004986_XmippProtScreenDeepConsensus/extra/preProcMics/010_movie_aligned.mrc Runs/004986_XmippProtScreenDeepConsensus/tmp/010_movie_aligned.mrc.ctfParam\\n'\n 'Runs/004986_XmippProtScreenDeepConsensus/extra/preProcMics/100_movie_aligned.mrc Runs/004986_XmippProtScreenDeepConsensus/tmp/100_movie_aligned.mrc.ctfParam\\n'\n 'Runs/004986_XmippProtScreenDeepConsensus/extra/preProcMics/107_movie_aligned.mrc Runs/004986_XmippProtScreenDeepConsensus/tmp/107_movie_aligned.mrc.ctfParam\\n'\n )\n\n\n def run(self):\n \n numberOfThreads= self.getIntParam('-t')\n inputFile= self.getParam('-i')\n samplingRate= self.getDoubleParam('-s')\n downFactor= self.getDoubleParam('-d')\n outDir= self.getParam('-o')\n invert_contrast= self.checkParam('--invert_contrast')\n phase_flip= self.checkParam('--phase_flip')\n\n argsList=[]\n with open(inputFile) as f:\n for line in f:\n if line.startswith(\"#\"):\n continue\n else:\n lineArray= line.split()\n if len(lineArray)<1: continue\n mic_fname= lineArray[0]\n mic_basename= os.path.basename(mic_fname)\n out_name= os.path.join(outDir, mic_basename)\n ctf_fname= None\n if phase_flip:\n if len(lineArray)!=2:\n raise ValueError(\"Error, input file bad format. If -c option, it must have 2 cols: 'micFname ctfFname'\")\n else:\n ctf_fname= lineArray[1]\n argsList+=[ (mic_fname, samplingRate, out_name, ctf_fname, \n invert_contrast, phase_flip, downFactor)]\n Parallel(n_jobs= numberOfThreads, backend=\"multiprocessing\", verbose=1)(\n delayed(preproOneMic)(*arg) for arg in argsList)\n\n \ndef preproOneMic(mic_fname, samplingRate, out_name, ctf_fname=None, invert_contrast=False, phase_flip=False, downFactor=1):\n \"\"\" Preprocess one micrograph\n \"\"\"\n if os.path.isfile(out_name): return\n out_name_tmp= out_name+\".tmp.mrc\"\n if downFactor != 1:\n cmd = \"xmipp_transform_downsample -i %s -o %s --step %f --method fourier\" % (mic_fname, out_name_tmp, downFactor)\n print(cmd)\n check_call(cmd, shell=True)\n mic_fname = out_name_tmp\n \n if phase_flip:\n cmd = \"xmipp_ctf_phase_flip -i %s -o %s --ctf %s --sampling %f\"% (mic_fname, out_name_tmp, ctf_fname, samplingRate*downFactor)\n print(cmd)\n check_call(cmd, shell=True)\n mic_fname = out_name_tmp\n \n cmd = \"xmipp_transform_normalize -i %s -o %s --method OldXmipp\" % (mic_fname, out_name)\n if invert_contrast:\n cmd += \" --invert\"\n print(cmd)\n check_call(cmd, shell=True)\n try:\n os.remove(out_name_tmp)\n except:\n pass\nif __name__ == '__main__':\n\n exitCode=ScriptPreproMics().tryRun()\n sys.exit(exitCode)\n","repo_name":"I2PC/xmipp","sub_path":"src/xmipp/applications/scripts/preprocess_mics/preprocess_mics.py","file_name":"preprocess_mics.py","file_ext":"py","file_size_in_byte":5290,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"73"} +{"seq_id":"74825969835","text":"import unittest\nfrom main import Solution\n\n\nclass Test(unittest.TestCase):\n\n test_cases = [\n (\"aabcccccaaa\", \"a2b1c5a3\"),\n (\"abcdef\", \"abcdef\")\n ]\n\n\n def test_compress_string_by_brute_force(self):\n sol = Solution()\n for text, expected in self.test_cases:\n actual = sol.compress_string_by_brute_force(text)\n self.assertEqual(actual, expected)\n\n \n def test_compress_string(self):\n sol = Solution()\n for text, expected in self.test_cases:\n actual = sol.compress_string(text)\n self.assertEqual(actual, expected)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"aiien61/100-days-of-coding-challenges","sub_path":"Arrays_Strings/string_compression/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"73371560877","text":"#!/usr/local/bin/python3 Python\n\nimport json\n\nimport sort\n\nclass Person(object):\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def __repr__(self):\n return 'Person Object name : {name}, age: {age}'.format(name=self.name, age = self.age)\n\ndef object2dict(object):\n d = {}\n d['__class__'] = object.__class__.__name__\n d['__module__'] = object.__module__\n d.update(object.__dict__)\n return d\n\ndef dict2object(d):\n try:\n if '__class__' in d:\n class_name = d.pop('__class__')\n module_name = d.pop('__module__')\n module = __import__(module_name)\n class_ = getattr(module, class_name)\n args = dict((key, value) for key, value in d.items())\n inst = class_(**args)\n else:\n inst = d\n except TypeError as e:\n inst = d\n return inst\n\ndef openFile(fileName, mode):\n f = open(fileName,mode=mode)\n print('文件内容 : {fileContent}'.format(fileContent=f.read()))\n # f.truncate()\n f.write('4')\n f.close()\n\nif __name__ == '__main__':\n l = [1,2,3]\n sort.bubble(l)\n print(l)","repo_name":"AngryLi/python-note","sub_path":"server/Project/PythonDemo/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"4873488372","text":"import argparse\nimport os, json\nfrom pycocotools.coco import COCO\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--coco_path\", type=str, required=True)\n parser.add_argument(\"--out_fp\", type=str, default='sub.json')\n parser.add_argument(\"--n_samples\", type=int, required=True)\n\n args = parser.parse_args()\n return args\n\ndef create_coco_subset(coco, img_ids):\n # has 3 keys: 'images', 'annotations' and 'categories'\n sub_coco = {\n 'images': [],\n 'annotations' : [],\n 'categories' : [],\n }\n\n # doesn't change format from ori json, it's just how coco returns the value in dict\n sub_coco['categories'] = [cat for key,cat in coco.cats.items()]\n\n # parse and append annotations\n for img_id in img_ids:\n img_info = coco.imgs[img_id]\n anns_ids = coco.getAnnIds(img_id)\n img_anns = coco.loadAnns(anns_ids) # returns list of dict\n\n sub_coco['images'].append(img_info)\n for img_ann in img_anns:\n sub_coco['annotations'].append(img_ann)\n \n return sub_coco\n\nif __name__=='__main__':\n # argparse\n args = parse_args()\n print(args.coco_path, args.n_samples, args.out_fp)\n print(os.getcwd())\n coco = COCO(args.coco_path)\n\n # grab first n samples\n sub_image_ids = sorted(coco.getImgIds()[:args.n_samples])\n sub_coco = create_coco_subset(coco, sub_image_ids)\n print(sub_coco.keys())\n\n with open(args.out_fp, 'w') as f:\n json.dump(sub_coco, f)","repo_name":"sandhi-artha/ex-dl","sub_path":"6/scripts/coco_subset.py","file_name":"coco_subset.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"12551377765","text":"#!/usr/bin/env python3\nimport subprocess\nimport sys\nimport os\nabout = \"This script uses obspy FDSN and IRIS FetchData to download event waveforms.\"\nusage = '''\nUSAGE: python3 getWaveforms.py \n\n : a text file with 2 columns:\n 1)network 2)station\n\n : a text file with at least 7 columns:\n 1)evtDate 2)evtTime 3)evla 4)evlo 5)evdp 6)mag 7)magType\n\n is optional, in username:password format for downloading restricted data\n\n Similar to jweed event list:\n evtDate: yyyy-mm-dd\n evtTime: hh:mm:ss.sss\n'''\n# Coded by omid.bagherpur@gmail.com\n# UPDATE: 10 June 2020\n#=====Adjustable Parameters=====#\nduration = 2*60*60 # in sec\nchannels = [\"HHE\", \"HHN\", \"HHZ\"] # list of channels to download\n# list of station location codes to download; [\"\"] for no location code\nlocations = [\"\", \"00\", \"10\"]\nshift = -500 # in sec; e.g. if set to -1000, sac file begin time will be 1000 sec earlier than event origin time\n\nSAC = \"/usr/local/sac/bin/sac\" # path to SAC software\n# path to IRIS 'FetchData' perl script\nFetchData_Script = './FetchData-2018.337'\n# a list of data centres (see https://docs.obspy.org/packages/obspy.clients.fdsn.html)\nFDSN_data_centres = [\"IRIS\"]\n# a list in [minLat, maxLat] format (limiting station location)\nlongitude_range = [-180, 180]\n# a list in [minLon, maxLon] format (limiting station location)\nlatitude_range = [-90, 90]\n\n# Processing parameters:\n# Notes from Omid:\n# 1) rtrend command in SAC is equivalent to detrend_method='demean' here.\n# 2) taper command in SAC is equivalent to max_taper=0 here.\n# 3) 'spline' method gives the best detrending results (visually), but results highly depend on dspline parameter.\n# My tests indicate dspline=duration*10 gives the best results (hardwired in the code)\n# between 0 and 0.5; I recommend a very small number (0.001-0.01)\nmax_taper = 0.005\n# options: (1) 'spline', (2) 'polynomial' (3) 'demean', (4) 'linear'; I recommend 'spline' method\ndetrend_method = 'spline'\n# utilised only if detrend_method is either set to 'spline' or 'polynomial', I recommend 3-5\ndetrend_order = 4\n#===============================#\nos.system('clear')\nprint(about)\n\nif len(sys.argv) < 4:\n print(f\"\\nError! This Script at least requires 3 inputs.\\n{usage}\")\n exit()\nelse:\n stalist = os.path.abspath(sys.argv[1])\n eventlist = os.path.abspath(sys.argv[2])\n outdir = os.path.abspath(sys.argv[3])\n\nauthentication = False\nif len(sys.argv) == 5:\n authentication = True\n try:\n username = str(sys.argv[4]).split(':')[0]\n password = str(sys.argv[4]).split(':')[1]\n except:\n print(f\"\\nError reading !\\n{usage}\")\n exit()\n\nif not os.path.isfile(FetchData_Script):\n print(f\"Error!\\n Could not find IRIS 'FetchData' perl script\\n\\nVisit http://service.iris.edu/clients/ to download the script.\\n\\n\")\n exit()\nelse:\n FetchData_Script = os.path.abspath(FetchData_Script)\n\nif not os.path.isfile(SAC):\n print(f\"Error! Path to SAC software does not exist!\\nCheck 'Adjustable Parameters'\\n\\n\")\n exit()\n\ntry:\n import obspy\n import re\n import shutil\n import numpy as np\n from obspy import UTCDateTime\n from obspy.clients.fdsn.client import Client\n from obspy.clients.fdsn.mass_downloader import RectangularDomain, Restrictions, MassDownloader\nexcept ImportError as e:\n print(f'\\nError! {e}\\n')\n exit()\n\nif not os.path.isfile(stalist):\n print(f\"\\nError! file does not exist.\\n{usage}\")\n exit()\nelse:\n stations = []\n networks = []\n with open(stalist, 'r') as stalist:\n for line in stalist:\n try:\n networks.append(line.split()[0])\n stations.append(line.split()[1])\n except:\n print(f\"\\nError! format is not correct.\\n{usage}\")\n exit()\n\nuniq_networks = []\nfor x in networks:\n if x not in uniq_networks:\n uniq_networks.append(x)\n\n\nif not os.path.isfile(eventlist):\n print(f\"\\nError! file does not exist.\\n{usage}\")\n exit()\nelse:\n event_date = []\n event_time = []\n event_datetime = []\n event_origin = []\n event_timestamp = []\n event_lat = []\n event_lon = []\n event_dep = []\n event_mag = []\n event_magType = []\n with open(eventlist, 'r') as eventlist:\n for line in eventlist:\n try:\n event_date.append(f\"{line.split()[0]}\")\n event_time.append(f\"{line.split()[1]}\")\n datetime = f\"{line.split()[0]}T{line.split()[1]}\"\n utcdatetime = UTCDateTime(datetime)\n event_origin.append(utcdatetime)\n utcdatetime += shift\n event_datetime.append(utcdatetime)\n event_timestamp.append(int(event_datetime[-1].timestamp))\n event_lat.append(float(line.split()[2]))\n event_lon.append(float(line.split()[3]))\n event_dep.append(float(line.split()[4]))\n event_mag.append(float(line.split()[5]))\n event_magType.append(line.split()[6])\n except Exception as e:\n print(f\"{e}\\nError! format is not correct.\\n{usage}\")\n exit()\n\nif not os.path.isdir(outdir):\n print(f\"\\nError! directory does not exist.\\n{usage}\")\n exit()\n\n#=====FUNCTIONS=====#\n\n\n# returns 2 lists of start and end times [t1, t2] to be used in FetchData and FDSN methods\ndef event_timeRange(events, duration):\n timeRange = []\n utcTimeRange = []\n for eventStart in events:\n eventEnd = eventStart+duration\n t1 = f\"%4s-%02d-%02d,%02d:%02d:%02d.0000\" % (str(eventStart.year), eventStart.month, eventStart.day, eventStart.hour, eventStart.minute, eventStart.second)\n t2 = f\"%4s-%02d-%02d,%02d:%02d:%02d.0000\" % (str(eventEnd.year), eventEnd.month, eventEnd.day, eventEnd.hour, eventEnd.minute, eventEnd.second)\n t3 = f\"%4s-%02d-%02dT%02d:%02d:%02d.0000\" % (str(eventStart.year), eventStart.month, eventStart.day, eventStart.hour, eventStart.minute, eventStart.second)\n t4 = f\"%4s-%02d-%02dT%02d:%02d:%02d.0000\" % (str(eventEnd.year), eventEnd.month, eventEnd.day, eventEnd.hour, eventEnd.minute, eventEnd.second)\n timeRange.append([t1, t2])\n utcTimeRange.append([t3, t4])\n return timeRange, utcTimeRange\n\n\ndef getxml(sta, net, chn, outfile):\n if authentication:\n shell_cmd = f\"perl {FetchData_Script} -S {sta} -N {net} -C {chn} -a {username}:{password} -X {outfile} -v\\n\"\n else:\n shell_cmd = f\"perl {FetchData_Script} -S {sta} -N {net} -C {chn} -X {outfile} -v\\n\"\n subprocess.call(shell_cmd, shell=True)\n\n\ndef check_IRIS_availability(sta, net, chn, loc, t1, t2, longitude_range, latitude_range):\n # a trick to find out if data is available: If metafile is created, data is available!\n metafile = os.path.join(outdir, 'meta.tmp')\n if os.path.isfile(metafile):\n os.remove(metafile)\n if authentication:\n shell_cmd = f\"perl {FetchData_Script} -S {sta} -N {net} -C {chn} -L {loc} -s {t1} -e {t2} --lon {longitude_range[0]}:{longitude_range[1]} --lat {latitude_range[0]}:{latitude_range[1]} -a {username}:{password} -m {metafile} -q\\n\"\n else:\n shell_cmd = f\"perl {FetchData_Script} -S {sta} -N {net} -C {chn} -L {loc} -s {t1} -e {t2} --lon {longitude_range[0]}:{longitude_range[1]} --lat {latitude_range[0]}:{latitude_range[1]} -m {metafile} -q\\n\"\n subprocess.call(shell_cmd, shell=True)\n if os.path.isfile(metafile):\n os.remove(metafile)\n return True\n else:\n return False\n\n\ndef get_IRIS_data(sta, net, chn, loc, t1, t2, outfile, longitude_range, latitude_range):\n if authentication:\n shell_cmd = f\"perl {FetchData_Script} -S {sta} -N {net} -C {chn} -L {loc} -s {t1} -e {t2} --lon {longitude_range[0]}:{longitude_range[1]} --lat {latitude_range[0]}:{latitude_range[1]} -a {username}:{password} -o {outfile} -v\\n\"\n else:\n shell_cmd = f\"perl {FetchData_Script} -S {sta} -N {net} -C {chn} -L {loc} -s {t1} -e {t2} --lon {longitude_range[0]}:{longitude_range[1]} --lat {latitude_range[0]}:{latitude_range[1]} -o {outfile} -v\\n\"\n subprocess.call(shell_cmd, shell=True)\n\n\ndef write_sac_headers(evla, evlo, evdp, stla, stlo, stel, cmpaz, cmpinc, sacfile):\n shell_cmd = [\"export SAC_DISPLAY_COPYRIGHT=0\", f\"{SAC}< 1:\n st.resample(np.round(np.unique(fs).min()))\n \n st.merge(method=1, fill_value=0)\n\n tr = st[0]\n\n # find and add event info\n for j in range(len(event_timestamp)):\n delta = abs(event_timestamp[j]-int(tr.stats.starttime.timestamp))\n if delta < 0.1*duration:\n event_index = j\n break\n\n evtDateTime = event_datetime[event_index]-shift\n event_yy = f\"%s\" % (str(evtDateTime.year)[2:])\n event_jjj = f\"%03d\" % (evtDateTime.julday)\n event_hh = f\"%02d\" % (evtDateTime.hour)\n event_mm = f\"%02d\" % (evtDateTime.minute)\n event_ss = f\"%02d\" % (evtDateTime.second)\n event_name = f\"{event_yy}{event_jjj}{event_hh}{event_mm}{event_ss}\"\n event_dir = os.path.join(sacfiles_dir, event_name)\n if not os.path.isdir(event_dir):\n os.mkdir(event_dir)\n\n tr.stats.sac = obspy.core.AttribDict()\n #tr.stats.sac.evla = np.float(event_lat[event_index])\n #tr.stats.sac.evlo = np.float(event_lon[event_index])\n #tr.stats.sac.evdp = np.float(event_dep[event_index])\n tr.stats.sac.mag = event_mag[event_index]\n tr.stats.sac.b = shift\n tr.stats.sac.o = 0\n tr.stats.sac.iztype = 11\n\n if event_magType[event_index].lower() == 'mb':\n magtype = 52\n elif event_magType[event_index].lower() == 'ms':\n magtype = 53\n elif event_magType[event_index].lower() == 'ml':\n magtype = 54\n elif event_magType[event_index].lower() == 'mw':\n magtype = 55\n elif event_magType[event_index].lower() == 'md':\n magtype = 56\n else:\n magtype = 57\n\n tr.stats.sac.imagtyp = magtype\n\n # find and add station info\n tr_stxml = f\"getWaveforms_stationxml_{tr.stats.channel}/{tr.stats.network}.{tr.stats.station}.{tr.stats.channel}\"\n stationxml_index = stationxmls.index(tr_stxml)\n inv = obspy.read_inventory(\n stationxmls[stationxml_index], format=\"STATIONXML\")\n\n if loc == \"\":\n sacfile = f\"{event_dir}/{event_name}_{tr.stats.station}.{tr.stats.channel}\"\n else:\n sacfile = f\"{event_dir}/{event_name}_{tr.stats.station}.{loc}.{tr.stats.channel}\"\n\n tr.write(sacfile, format=\"sac\")\n # write sac headers using SAC and not obspy! -> There was issues with saclst reading BAZ ...\n evla = np.float(event_lat[event_index])\n evlo = np.float(event_lon[event_index])\n evdp = np.float(event_dep[event_index])\n stla = np.float(inv[0][0].latitude)\n stlo = np.float(inv[0][0].longitude)\n stel = np.float(inv[0][0].elevation)\n cmpaz = np.float(inv[0][0][0].azimuth)\n cmpinc = np.float(inv[0][0][0].dip)+90\n write_sac_headers(evla, evlo, evdp, stla, stlo,\n stel, cmpaz, cmpinc, sacfile)\n\nprint(\"\\n\\nDone!\\n\\n\")\n","repo_name":"omid-b/phd_thesis_codes","sub_path":"chapter1/getWaveforms/getWaveforms.py","file_name":"getWaveforms.py","file_ext":"py","file_size_in_byte":17613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"28660563884","text":"import logging\nimport pymongo\nimport os\nimport smart_forms_types\n\nclass Database:\n __instance__ = None\n\n def __init__(self):\n if Database.__instance__ is not None:\n raise Exception(\"Instanciated twice the database!\")\n Database.__instance__ = self\n\n self.client = pymongo.MongoClient(\n f\"mongodb+srv://\" +\n f\"{os.environ['MONGO_USER']}:{os.environ['MONGO_PASSWORD']}\" +\n f\"@{os.environ['MONGO_CLUSTER']}\"\n )\n\n self.database = self.client.get_database(os.environ[\"MONGO_DB_NAME\"])\n logging.info(\"Connected to mongo cloud.\")\n \n \ndef get_database() -> Database:\n \"\"\"\n Returns a Database object.\n \"\"\"\n if Database.__instance__ is None:\n Database()\n return Database.__instance__\n\nFORMS = \"Forms\"\nENTRIES = \"Entries\"\nUSERS = \"Users\"\n# stores all the characters we received as feedback from the users\nCHARACTERS_DATASET = \"Characters-Datasets\"\n# stores characters used for inference\nINFERENCE_CHARACTERS = \"Inference-Characters\"\n\n\ndef get_collection(collection: str):\n return get_database().database.get_collection(collection)\n\ndef get_form_by_id(form_id: str) -> smart_forms_types.PdfForm:\n \"\"\"\n Returns a form description for a given id.\n Throws an exception if no form is found.\n \"\"\"\n form_dict = get_collection(FORMS).find_one({ \"formId\": form_id })\n\n # unable to find form\n if form_dict is None:\n raise Exception(f\"Unable to find form {form_id} on mongo cloud!\")\n\n form = smart_forms_types.PdfForm.from_dict(form_dict)\n return form\n\ndef get_entry_by_id(entry_id: str) -> smart_forms_types.FormAnswer:\n \"\"\"\n Returns a form description for a given id.\n Throws an exception if no form is found.\n \"\"\"\n entry_dict = get_collection(ENTRIES).find_one({ \"answerId\": entry_id })\n\n # unable to find form\n if entry_dict is None:\n raise Exception(f\"Unable to find entry {entry_id} on mongo cloud!\")\n\n entry = smart_forms_types.FormAnswer(**entry_dict)\n return entry\n\ndef get_user_by_email(email: str) -> smart_forms_types.User:\n \"\"\"\n Returns the user saved in the database for a given email.\n Throws an exception if no user is found.\n \"\"\"\n user_dict = get_collection(USERS).find_one({ email: email })\n\n # unable to find user\n if user_dict is None:\n raise Exception(f\"Unable to find {email} in the DB\")\n\n user = smart_forms_types.User(**user_dict)\n return user\n\ndef update_user(user: smart_forms_types.User, create=True):\n \"\"\"\n Updates the user, and creates it if it doesn't exist\n \"\"\"\n if create:\n get_collection(USERS).insert_one(user.dict())\n else:\n get_collection(USERS).replace_one({ \"email\": user.email }, user.dict())\n","repo_name":"TeamUnibuc/SmartForms","sub_path":"backend/sources/database/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"69815968877","text":"# -*- coding:utf-8 _*-\n# coding=utf-8\n\"\"\"\n@Time: 2023/8/13 14:44\n@Auth: 王浩鹏\n@File :driver.py.py\n\"\"\"\nimport os\nfrom selenium import webdriver\nfrom configparser import ConfigParser\n\n\ndef get_driver():\n config = ConfigParser()\n config.read(os.path.join(os.path.dirname(__file__), '..\\\\config\\\\config.ini'))\n\n browser_name = config.get('Browser', 'browser_name')\n if browser_name.lower() == 'chrome':\n return webdriver.Chrome()\n # elif browser_name.lower() == 'firefox':\n # return webdriver.Firefox()\n # elif browser_name.lower() == 'edge':\n # return webdriver.Edge()\n else:\n raise ValueError('Unsupported browser: {}'.format(browser_name))","repo_name":"TheOneBuff/uitest","sub_path":"utils/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"31483683845","text":"\"\"\"25.\tWAP to print the Income, Tax & Surcharge of Employ. The Tax and Surcharge based on the following conditions\r\nIncome\t\t\t\t Tax %\t\tSurcharge\r\n< Rs. 15000\t\t\t 15%\t\t 7%\r\nRs. 15001 to Rs. 20000\t18% \t\t 11%\r\nAbove Rs. 21000\t\t 20%\t\t 13%\r\n[Total Income =Income- Tax-Surcharge][Tax=Income)/100]display all information like Income, Tax & Surcharge .\r\n\"\"\"\r\ns=int(input(\"enter value of sale:\"))\r\ndiscount=0\r\nsubcharge=0\r\nif s<5000:\r\n discount=5\r\n subcharge=5 \r\nelif s>5000 and s<10000:\r\n discount=10\r\n subcharge=15\r\nelse:\r\n discount=15\r\n subcharge=20\r\n dc=s/100*discount\r\n total=s-dc\r\n print(\"amount is\",s)\r\n print(\"discount is \",dc)\r\n print(\"subcharge is \",subcharge)\r\n print(\"total amount is \",total)\r\n","repo_name":"Madhav2108/Python-","sub_path":"basic programs/abhi.py","file_name":"abhi.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"75089667755","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.next = None\n self.prev = None\n\nclass LinkedList:\n def __init__(self, head=None):\n self.head = head\n\n def __str__(self):\n\n res = \"\"\n ptr = self.head\n\n while ptr:\n res += str(ptr.data) + \", \"\n ptr = ptr.next\n\n res = res.strip(\", \")\n\n if len(res):\n return \"[\" + res + \"]\"\n else:\n return \"[]\"\n\n def insertion_sorted_list(self, data):\n new_node = Node(data)\n\n if self.head is None:\n self.head = new_node\n elif self.head.data >= new_node.data:\n new_node.next = self.head\n new_node.next.prev = new_node\n self.head = new_node\n\n else:\n ptr = self.head\n\n while ptr.next and ptr.next.data < new_node.data:\n ptr = ptr.next\n\n new_node.next = ptr.next\n if ptr.next is not None:\n new_node.next.prev = new_node\n\n ptr.next = new_node\n new_node.prev = ptr\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n\n ll1 = LinkedList()\n\n node1 = Node(10)\n node2 = Node(20)\n node3 = Node(30)\n\n ll1.head = node1\n node1.next = node2\n node2.prev = node1\n node2.next = node3\n node3.prev = node2\n\n\n print(ll1)\n\n ll1.insertion_sorted_list(15)\n\n print(ll1)\n","repo_name":"deepanshimarsha/problem-solving","sub_path":"Hackerrank-Solutions/DataStructure/LinkedList/insertion_sortedDLL.py","file_name":"insertion_sortedDLL.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"27037326350","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 17 16:10:37 2021\n\n@author: Alejandro\n\"\"\"\n\nfrom scipy import signal\nfrom sympy import *\nimport matplotlib.pyplot as plt\nimport math as m\nfrom splane import pzmap, grpDelay, bodePlot\nimport numpy as np\nimport matplotlib as mpl\n\n## DATOS\nf = 900e6\nw = f*2*m.pi\nL = 4.7e-9\nQ = 140\nCD = 5.78e-12\nC3 = 1e-12\nC4 = 1e-12\nR1 = 25e3\nICQ = 170.72e-6\n\nformatter = mpl.ticker.EngFormatter()\nw_f = formatter(w)\nprint(\"El valor de W es\", w_f, \"1/s\")\n\nY21 = 40*ICQ\nY21_f = formatter(Y21)\nprint(\"Y21 equivalente a gm es: \",Y21_f, \"S\")\n\nRp = w*L/Q\nRp_f = formatter(Rp)\nprint(\"R pérdida serie: \", Rp_f, \"Ω\")\n\nYL = (1/(L*w))\n\nYL_f = formatter(YL)\nprint(\"El valor de Admitancia del inductor\", YL_f, \"S\")\n\nXL = 1/YL\n\nYC = (w*CD)\nXC = 1/YC\nYC_f = formatter(YC)\nprint(\"El valor de Admitancia del capacitor\", YC_f, \"S\")\n\nYt = YC - YL\nX = -(1/Yt)\n\n\n\nYt_f = formatter(Yt)\nprint(\"El valor de Admitancia paralelo total es\", Yt_f, \"S\")\n\n\n\nXL_f = formatter(XL)\nprint(\"XL @900Mhz\", XL_f, \"Ω\")\n\nXC_f = formatter(XC)\nprint(\"XC @900Mhz\", XC_f, \"Ω\")\n\nX_f = formatter(X)\nprint(\"Xtotal @900Mhz\", X_f, \"Ω\")\n\n\n\nbc3 = w*C3\nbc3_f = formatter(bc3)\nprint(\"Bc3 equivale a: \",bc3_f, \"S\")\n\n\nbc4 = w*C4\nbc4_f = formatter(bc4)\nprint(\"Bc4 equivale a: \",bc4_f, \"S\")\n\ng1 = 1/R1\ng1_f = formatter(g1)\nprint(\"g1 por R1 es \", g1_f, \"S\")\ng11 = 182.7e-6\ng11_f = formatter(g11)\nprint(\"g11 es\",g11_f, \"S\")\ng22 = 9.479e-6\ng22_f = formatter(g22)\nprint(\"g22 es\",g22_f, \"S\")\nb11 = 5.259e-3\nb11_f = formatter(b11)\nprint(\"b11 es\",b11_f, \"S\")\nb22 = 1.357e-3\nb22_f = formatter(b22)\nprint(\"b22 es\",b22_f, \"S\") \n\nG11 = g11 \nG22 = g22 + g1\nB11 = b11 + bc3\nB22 = b22 + bc4\n\nG11_f = formatter(G11)\nprint(\"G11 total es\", G11_f, \"S\")\nG22_f = formatter(G22)\nprint(\"G22 total es\", G22_f, \"S\")\nB11_f = formatter(B11)\nprint(\"B11 total es\", B11_f, \"S\")\nB22_f = formatter(B22)\nprint(\"B22 total es\", B22_f, \"S\")\n\n\narranque = G11 + G22 + ((G11*G22-B11*B22)*Rp) - ((G11*B22-G22*B11)*X)\narranque_f = formatter(arranque)\nprint(\"Condición de arranque:\", arranque_f, \"S\")\n\n\n\nfosci = B11+B22+(G11*G22-B11*B22)*X - (G11*B22-G22*B11)*Rp\nfosci_f = formatter(fosci)\nprint(\"La condición de oscilación arroja: \", fosci_f,\"S\")\n\n#formatter = mpl.ticker.EngFormatter()\n\n\nLx = symbols('Lx')\nL_equation = Eq(B11+B22+(G11*G22-B11*B22)*(1/(-(YC)+(1/(Lx*w)))) - (G11*B22-G22*B11)*Rp)\n\nsolu = solve(L_equation, force = True)\nLx_value = solu[0]\nlx_f = formatter(Lx_value)\nprint(\"El inductor equivalente es: \",lx_f, \"Hy\")\n\n\nwx = symbols('wx')\nw_equation = Eq(B11+B22+(G11*G22-B11*B22)*(1/(-(YC)+(1/(Lx_value*wx)))) - (G11*B22-G22*B11)*Rp)\n\nsolu = solve(w_equation, force = True)\nwx_value = solu[0]/(2*m.pi)\nwx_f = formatter(wx_value)\nprint(\"La frecuencia recalculada equivale a \",wx_f, \"Hz\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"AlejandroSobral/Proyecto-EA3","sub_path":"Python/Calculos Collpits.py","file_name":"Calculos Collpits.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"35523485647","text":"ec=0\noc=0\nfor i in range(10):\n num=eval(input())\n if num%2==0:\n ec=ec+1\n else:\n oc=oc+1\n \nprint('Even numbers:', ec)\nprint('Odd numbers:', oc)","repo_name":"GMO517/practise","sub_path":"GMO/python/PYA/PYA408.py","file_name":"PYA408.py","file_ext":"py","file_size_in_byte":172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"27233653307","text":"# -*- coding: utf-8 -*-\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass AlexNet(nn.Module):\n\n def __init__(self, embedding_size=1000, **kwargs):\n super(AlexNet, self).__init__()\n self.features = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),\n nn.ReLU(inplace=True),\n nn.BatchNorm2d(64),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(64, 192, kernel_size=5, padding=2),\n nn.ReLU(inplace=True),\n nn.BatchNorm2d(192),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(192, 384, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.BatchNorm2d(384),\n nn.Conv2d(384, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.BatchNorm2d(256),\n nn.Conv2d(256, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.BatchNorm2d(256),\n nn.MaxPool2d(kernel_size=3, stride=2),\n )\n self.classifier = nn.Sequential(\n nn.Dropout(),\n nn.Linear(256 * 6 * 6, 4096),\n nn.ReLU(inplace=True),\n nn.BatchNorm1d(4096),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(inplace=True),\n nn.BatchNorm1d(4096),\n nn.Linear(4096, embedding_size),\n )\n\n def forward_once(self, x):\n x = self.features(x)\n x = x.view(x.size(0), 256 * 6 * 6)\n x = self.classifier(x)\n x = F.normalize(x, p=2, dim=1)\n return x\n\n def forward(self, x):\n return self.forward_once(x)\n\n\ndef get_network():\n return AlexNet\n","repo_name":"cenkbircanoglu/triplet-network-pytorch","sub_path":"models/alexnet.py","file_name":"alexnet.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"37103486734","text":"import pandas as pd\r\nimport plotly.express as px\r\nimport streamlit as st \r\n\r\n\r\n# emojiwebsite: https://www.webfx.com/tools/emoji-cheat-sheet/\r\nst.set_page_config(\r\n page_title='Sales Dashboard',\r\n page_icon=':bar_chart:',\r\n layout='wide'\r\n)\r\n@st.cache_data\r\ndef get_data_from_excel():\r\n data = pd.read_excel(\r\n io='supermarkt_sales.xlsx',\r\n engine='openpyxl',\r\n sheet_name='Sales',\r\n skiprows=3,\r\n usecols='B:R',\r\n nrows=1000,\r\n )\r\n # Add 'hour' column to dataframe\r\n data[\"hour\"] = pd.to_datetime(data[\"Time\"], format=\"%H:%M:%S\").dt.hour\r\n return data\r\ndata = get_data_from_excel()\r\n# st.dataframe(data)\r\n\r\n\r\n# --- SIDEBAR ----\r\nst.sidebar.header('Silahkan pilih disini: ')\r\ncity = st.sidebar.multiselect(\r\n \"Pilih Kota: \",\r\n options=data['City'].unique(),\r\n default=data['City'].unique()\r\n)\r\n\r\ncostumer_type = st.sidebar.multiselect(\r\n \"Pilih Customer: \",\r\n options=data['Customer_type'].unique(),\r\n default=data['Customer_type'].unique()\r\n)\r\n\r\ngender = st.sidebar.multiselect(\r\n \"Pilih Jenis kelamin: \",\r\n options=data['Gender'].unique(),\r\n default=data['Gender'].unique()\r\n)\r\n\r\ndata_selection = data.query(\r\n \"City == @city & Customer_type == @costumer_type & Gender == @gender\"\r\n)\r\n\r\nst.dataframe(data_selection)\r\n\r\n# --- MAINPAGE ---\r\nst.title(':bar_chart: Sales Dashboard')\r\nst.markdown('##')\r\n\r\n#TOP KPI's\r\ntotal_sales = int(data_selection['Total'].sum())\r\navarage_rating = round(data_selection['Rating'].mean(), 1)\r\nstar_rating = \":star:\" * int(round(avarage_rating,0))\r\navarage_sale_by_transaction = round(data_selection['Total'].mean(),2)\r\n\r\nleft_column, middle_column, right_column = st.columns(3)\r\n\r\nwith left_column:\r\n st.subheader(\"Total Sales: \")\r\n st.subheader(f'IDR RP.{total_sales:,}')\r\nwith middle_column:\r\n st.subheader('Average Rating: ')\r\n st.subheader(f'{avarage_rating}{star_rating}')\r\nwith right_column:\r\n st.subheader('Avarage Sales Per Transaction: ')\r\n st.subheader(f'{avarage_sale_by_transaction}')\r\n \r\n# st.markdown(\"---\")\r\n \r\n# SALES BY PRODUCT LINE [BAR CHART]\r\nsales_by_product_line = data_selection.groupby(by=[\"Product line\"])[[\"Total\"]].sum().sort_values(by=\"Total\")\r\nfig_product_sales = px.bar(\r\n sales_by_product_line,\r\n x=\"Total\",\r\n y=sales_by_product_line.index,\r\n orientation=\"h\",\r\n title=\"Sales by Product Line\",\r\n color_discrete_sequence=[\"#0083B8\"] * len(sales_by_product_line),\r\n template=\"plotly_white\",\r\n)\r\n\r\nfig_product_sales.update_layout(\r\n plot_bgcolor=\"rgba(0,0,0,0)\",\r\n xaxis=(dict(showgrid=False))\r\n)\r\n# st.plotly_chart(fig_product_sales)\r\n\r\n\r\n# SALES BY HOUR [BAR CHART]\r\nsales_by_hour = data_selection.groupby(by=[\"hour\"])[[\"Total\"]].sum()\r\nfig_hourly_sales = px.bar(\r\n sales_by_hour,\r\n x=sales_by_hour.index,\r\n y=\"Total\",\r\n title=\"Sales by hour\",\r\n color_discrete_sequence=[\"#0083B8\"] * len(sales_by_hour),\r\n template=\"plotly_white\",\r\n)\r\n\r\nleft_column, right_column = st.columns(2)\r\nleft_column.plotly_chart(fig_hourly_sales, use_container_width=True)\r\nright_column.plotly_chart(fig_product_sales, use_container_width=True)\r\n\r\n# st.plotly_chart(fig_hourly_sales)\r\n\r\n\r\n# ---- HIDE STREAMLIT STYLE ----\r\nhide_st_style = \"\"\"\r\n \r\n \"\"\"\r\nst.markdown(hide_st_style, unsafe_allow_html=True)","repo_name":"Jalil98/INTERACTIVE-DASHBOARD-WITH-PYTHON-STREAMLIT","sub_path":"sales dashboard/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"15721160779","text":"import csv\nimport io\nimport os\nfrom unittest.mock import patch\nfrom uuid import uuid4\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.core.management import call_command\n\nfrom openwisp_users.tests.utils import TestOrganizationMixin\n\nfrom ..utils import load_model\n\n# it's 21 of April on UTC, this date is fabricated on purpose\n# to test possible timezone related bugs in the date filtering\n_TEST_DATE = '2019-04-20T22:14:09-04:00'\n_RADACCT = {\n 'username': 'bob',\n 'nas_ip_address': '127.0.0.1',\n 'start_time': '2017-06-10 10:50:00',\n 'authentication': 'RADIUS',\n 'connection_info_start': 'f',\n 'connection_info_stop': 'hgh',\n 'input_octets': '1',\n 'output_octets': '4',\n 'session_id': uuid4().int,\n}\n_CALLED_STATION_IDS = {\n 'test-org': {\n 'openvpn_config': [\n {'host': '127.0.0.1', 'port': 7505, 'password': 'somepassword'}\n ],\n 'unconverted_ids': ['AA-AA-AA-AA-AA-0A'],\n }\n}\n\nNas = load_model('Nas')\nRadiusAccounting = load_model('RadiusAccounting')\nRadiusBatch = load_model('RadiusBatch')\nRadiusReply = load_model('RadiusReply')\nRadiusToken = load_model('RadiusToken')\nRadiusCheck = load_model('RadiusCheck')\nRadiusGroup = load_model('RadiusGroup')\nRadiusPostAuth = load_model('RadiusPostAuth')\nRadiusUserGroup = load_model('RadiusUserGroup')\nRadiusGroupCheck = load_model('RadiusGroupCheck')\nRadiusGroupReply = load_model('RadiusGroupReply')\nOrganizationRadiusSettings = load_model('OrganizationRadiusSettings')\nUser = get_user_model()\n\n\nclass CreateRadiusObjectsMixin(TestOrganizationMixin):\n def _get_org(self, org_name='test org'):\n organization = super()._get_org(org_name)\n OrganizationRadiusSettings.objects.get_or_create(\n organization_id=organization.pk\n )\n return organization\n\n def _get_user_with_org(self):\n # Used where User model instance is required\n # but User shall be a member of 'default' org.\n self._get_org_user()\n return self._get_user()\n\n def _get_defaults(self, opts, model=None):\n options = {}\n if not model or hasattr(model, 'organization'):\n options.update({'organization': self._get_org()})\n options.update(opts)\n return options\n\n def _create_radius_check(self, **kwargs):\n options = self._get_defaults(kwargs)\n rc = RadiusCheck(**options)\n rc.full_clean()\n rc.save()\n return rc\n\n def _create_radius_accounting(self, **kwargs):\n options = self._get_defaults(kwargs)\n ra = RadiusAccounting(**options)\n ra.full_clean()\n ra.save()\n return ra\n\n def _create_radius_reply(self, **kwargs):\n options = self._get_defaults(kwargs)\n rr = RadiusReply(**options)\n rr.full_clean()\n rr.save()\n return rr\n\n def _create_nas(self, **kwargs):\n options = self._get_defaults(kwargs)\n n = Nas(**options)\n n.full_clean()\n n.save()\n return n\n\n def _create_radius_group(self, **kwargs):\n options = self._get_defaults(kwargs)\n rg = RadiusGroup(**options)\n rg.full_clean()\n rg.save()\n return rg\n\n def _create_radius_groupcheck(self, **kwargs):\n options = self._get_defaults(kwargs, model=RadiusGroupCheck)\n c = RadiusGroupCheck(**options)\n c.full_clean()\n c.save()\n return c\n\n def _create_radius_groupreply(self, **kwargs):\n options = self._get_defaults(kwargs, model=RadiusGroupReply)\n r = RadiusGroupReply(**options)\n r.full_clean()\n r.save()\n return r\n\n def _create_radius_usergroup(self, **kwargs):\n options = self._get_defaults(kwargs, model=RadiusUserGroup)\n ug = RadiusUserGroup(**options)\n ug.full_clean()\n ug.save()\n return ug\n\n def _create_radius_postauth(self, **kwargs):\n options = self._get_defaults(kwargs)\n rp = RadiusPostAuth(**options)\n rp.full_clean()\n rp.save()\n return rp\n\n def _create_radius_batch(self, **kwargs):\n options = self._get_defaults(kwargs)\n rb = RadiusBatch(**options)\n rb.full_clean()\n rb.save()\n return rb\n\n def _create_radius_token(self, **kwargs):\n options = {'user': self._get_user(), 'can_auth': True, 'key': '1234'}\n options.update(self._get_defaults(kwargs))\n radtoken = RadiusToken(**options)\n radtoken.full_clean()\n radtoken.save()\n return radtoken\n\n\nclass PostParamsMixin(object):\n def _get_post_defaults(self, opts, model=None):\n options = {}\n options.update(**opts)\n return options\n\n def _get_postauth_params(self, **kwargs):\n params = {\n 'username': 'molly',\n 'password': 'barbar',\n 'reply': 'Access-Accept',\n 'called_station_id': '00-11-22-33-44-55:hostname',\n 'calling_station_id': '00:26:b9:20:5f:10',\n }\n params.update(kwargs)\n return self._get_post_defaults(params)\n\n def _get_accounting_params(self, **kwargs):\n return self._get_post_defaults(kwargs)\n\n\nclass FileMixin(object):\n def _get_path(self, file):\n d = os.path.dirname(os.path.abspath(__file__))\n return os.path.join(d, file)\n\n def _get_csvfile(self, rows):\n output = io.StringIO()\n writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)\n for row in rows:\n writer.writerow(row)\n return SimpleUploadedFile(\n 'test.csv',\n bytes(output.getvalue(), encoding='utf8'),\n content_type='text/csv',\n )\n\n def _get_openvpn_status(self):\n with open(self._get_path('static/openvpn.status')) as file:\n status = file.read()\n return status\n\n def _get_openvpn_status_mock(self):\n return patch(\n 'openwisp_radius.management.commands.base.convert_called_station_id'\n '.BaseConvertCalledStationIdCommand._get_raw_management_info',\n return_value=self._get_openvpn_status(),\n )\n\n\nclass CallCommandMixin(object):\n def _call_command(self, command, **kwargs):\n call_command(command, **kwargs)\n","repo_name":"openwisp/openwisp-radius","sub_path":"openwisp_radius/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6273,"program_lang":"python","lang":"en","doc_type":"code","stars":324,"dataset":"github-code","pt":"73"} +{"seq_id":"25462253499","text":"from math import sqrt\nimport numpy as np\nfrom copy import deepcopy\nfrom os.path import join\nfrom astropy.io import fits\nfrom astropy.table import Table\nfrom astropy.wcs import WCS\nfrom matplotlib.path import Path\nfrom startrail import paths\nimport matplotlib.pyplot as plt\n\ndef time_to_seconds(time):\n h = int(time[:2])\n m = int(time[3:5])\n s = float(time[6:])\n t = 3600 * h + 60 * m + s\n # so that survey is ordered correctly despite being through midnight\n if t < 40000:\n t += 24 * 3600\n return t\n\ndef lazy_property(fn):\n attr_name = '_lazy_' + fn.__name__\n\n @property\n def _lazy_property(self):\n if not hasattr(self, attr_name):\n setattr(self, attr_name, fn(self))\n return getattr(self, attr_name)\n return _lazy_property\n\n# singleton\nclass Survey:\n valid_table = Table.read(paths.valid_table)\n adjust_table = Table.read(paths.adjust_table)\n data = Table.read(paths.summary_table)\n data['seconds'] = [time_to_seconds(x) for x in data['TIME-OBS']]\n data.sort('seconds')\n science_mask = np.array(['2019-07' in x for x in data['DATE-OBS']])\n _instance = None\n\n def __new__(cls):\n if cls._instance is None:\n print('Creating Survey singleton.')\n cls._instance = super(Survey, cls).__new__(cls)\n mask = Survey.science_mask\n data = Survey.data[mask]\n sequences = []\n for row in data:\n inp = [row[x] for x in ('fname', 'TELSTAT', 'EXPTIME', 'seconds', 'BAND', 'CENTRA', 'CENTDEC')]\n inp[0] = join(paths.data_dir, inp[0])\n if row['TELSTAT'] == 'Track':\n exp_ind = 0\n exp = StaticExposure(exp_ind, *inp)\n seq_ind = len(sequences)\n seq = Sequence(seq_ind, exp)\n sequences.append(seq)\n \n else:\n adj_sub = Survey.adjust_subtable(seq_ind, exp_ind)\n val_sub = Survey.valid_subtable(seq_ind, exp_ind)\n exp = StarTrailExposure(exp_ind, *inp, adjust_table=adj_sub, valid_table=val_sub)\n seq.add_exposure(exp)\n exp_ind += 1\n\n cls._instance.sequences = sequences\n cls._instance.seconds_to_seq = dict()\n for seq in cls._instance.sequences:\n cls._instance.seconds_to_seq[int(seq.seconds)] = seq\n\n return cls._instance\n\n @staticmethod\n def valid_subtable(seq_ind, exp_ind):\n sub = Survey.valid_table[(Survey.valid_table['seq'] == seq_ind) * (Survey.valid_table['exp'] == exp_ind)]\n return sub[['ccd', 'valid']]\n\n @staticmethod\n def adjust_subtable(seq_ind, exp_ind):\n sub = Survey.adjust_table[(Survey.adjust_table['seq'] == seq_ind) * (Survey.adjust_table['exp'] == exp_ind)]\n return sub\n\n def contains(self, x, y):\n for seq in Survey._instance.sequences:\n if seq.contains(x, y):\n return True\n return False\n\n def __len__(self):\n return len(Survey._instance.sequences)\n\nclass Sequence:\n def __init__(self, index, keyExposure):\n self.index = index\n self.exposures = [keyExposure]\n self.seconds = keyExposure.seconds\n self.band = keyExposure.band\n self.ra = keyExposure.ra\n self.dec = keyExposure.dec\n self.registration = Registration(self.index)\n\n def add_exposure(self, exposure):\n self.exposures.append(exposure)\n self.exposures.sort(key=lambda exp: exp.seconds)\n\n def extract(self, exp_ind, ccd_ind, cutoff):\n # TODO: return iterator\n raise NotImplementedError()\n\n def contains(self, x, y):\n cutoff = 2\n r = sqrt((x - self.ra) ** 2 + (y - self.dec) ** 2)\n if r > cutoff:\n return False\n\n for exp in self.exposures:\n if exp.contains(x,y):\n return True\n return False\n\n def __getitem__(self, n):\n return self.exposures[n]\n\n def __len__(self):\n return len(self.exposures)\n\nclass Exposure:\n def __init__(self, index, fname, tracking, exptime, seconds, band, ra, dec):\n self.index = index\n self.fname = fname\n self.tracking = tracking\n self.exptime = exptime\n self.seconds = seconds\n self.band = band\n self.ra = ra\n self.dec = dec\n\n @lazy_property\n def header(self):\n return fits.open(self.fname)[0].header\n\n @lazy_property\n def ccds(self):\n hdus = fits.open(self.fname)\n return [CCD(i,hdu,True) for i,hdu in enumerate(hdus[1:])]\n\n @lazy_property\n def dqmasks(self):\n hdus = fits.open(self.fname.replace('oki', 'ood'))\n # Doesn't match up with https://www.noao.edu/meetings/decam/media/DECam_Data_Handbook.pdf\n # 1 = bad pixel\n # 3,4 = something to do with saturation\n # 5 = cosmic ray\n return [hdu.data for i,hdu in enumerate(hdus[1:])] \n\n def contains(self, x, y):\n cutoff = 1.2\n r = sqrt((x - self.ra) ** 2 + (y - self.dec) ** 2)\n if r > cutoff:\n return False\n \n return any((ccd.contains(x, y) for ccd in self.ccds))\n\nclass StarTrailExposure(Exposure):\n MID_CCD = 30\n\n def __init__(self, index, fname, tracking, exptime, time_obs, band, ra, dec, adjust_table=None, valid_table=None):\n self.adjust_table = adjust_table\n self.valid_table = valid_table\n if adjust_table and len(adjust_table) > 0:\n self.adjust_table.sort('ccd')\n idx = np.where(self.adjust_table['ccd'] == StarTrailExposure.MID_CCD)[0][0]\n row = self.adjust_table[idx]\n ra = row['CENTRA'] + row['ra'] \n dec = row['CENTDEC'] + row['dec'] \n super(StarTrailExposure, self).__init__(index, fname, tracking, exptime, time_obs, band, ra, dec)\n if valid_table:\n self.valid_table.sort('ccd')\n\n @lazy_property\n def header(self):\n header = fits.open(self.fname)[0].header\n \n if self.adjust_table:\n idx = np.where(self.adjust_table['ccd'] == StarTrailExposure.MID_CCD)[0][0]\n row = self.adjust_table[idx]\n d_ra = row['ra'] \n d_dec = row['dec'] \n for key in ['CENTRA', 'CORN1RA', 'CORN2RA', 'CORN3RA', 'CORN4RA']:\n header[key] = row[key] + d_ra\n for key in ['CENTDEC', 'CORN1DEC', 'CORN2DEC', 'CORN3DEC', 'CORN4DEC']:\n header[key] = row[key] + d_dec\n return header\n\n @lazy_property\n def __hdus(self):\n hdus = fits.open(self.fname)\n \n if self.adjust_table:\n for ccd_ind, hdu in enumerate(hdus[1:]):\n idx = np.where(self.adjust_table['ccd'] == ccd_ind)[0][0]\n row = self.adjust_table[idx]\n d_ra = row['ra']\n d_dec = row['dec']\n for key in ['CRVAL1', 'CENRA1', 'COR1RA1', 'COR2RA1', 'COR3RA1', 'COR4RA1']:\n hdu.header[key] = row[key] + d_ra\n for key in ['CRVAL2', 'CENDEC1', 'COR1DEC1', 'COR2DEC1', 'COR3DEC1', 'COR4DEC1']:\n hdu.header[key] = row[key] + d_dec\n for key in ['PV1_7','PV2_8','PV2_9','CD1_1','PV2_0','PV2_1','PV2_2','PV2_3',\\\n 'PV2_4','PV2_5','PV2_6','PV2_7','PV1_6','PV2_10','PV1_4','PV1_3','PV1_2','PV1_1',\\\n 'PV1_0','PV1_9','PV1_8','CD1_2','PV1_5','CD2_1','CD2_2','PV1_10']:\n hdu.header[key] = row[key]\n\n return hdus\n\n\n @lazy_property\n def ccds(self, onlyValid=False):\n if self.valid_table:\n return [CCD(i,hdu,valid) for i,hdu,valid in zip(range(61), self.__hdus[1:], self.valid_table['valid'])]\n return [CCD(i,hdu,True) for i,hdu in zip(range(61), self.__hdus[1:])]\n\n\n @lazy_property\n def valid_ccds(self):\n if self.valid_table:\n return [CCD(i,hdu,valid) for i,hdu,valid in zip(range(61), self.__hdus[1:], self.valid_table['valid']) if valid]\n return [CCD(i,hdu,True) for i,hdu in zip(range(61), self.__hdus[1:])]\n\nclass StaticExposure(Exposure):\n def __init__(self, index, fname, tracking, exptime, time_obs, band, ra, dec):\n super(StaticExposure, self).__init__(index, fname, tracking, exptime, time_obs, band, ra, dec)\n\nclass Box:\n def __init__(self, corners_x, corners_y):\n if len(corners_x) != 4 or len(corners_y) !=4:\n raise RuntimeError('Need 4 corners for Box.')\n self.corners_x = corners_x\n self.corners_y = corners_y\n self.polygon = Path(np.vstack([corners_x, corners_y]).T)\n\n def contains(self, x, y):\n return self.polygon.contains_point((x,y))\n\nclass CCD(Box):\n def __init__(self, index, hdu, valid):\n corners_x = [hdu.header['COR{}RA1'.format(i)] for i in [1,2,4,3]]\n corners_y = [hdu.header['COR{}DEC1'.format(i)] for i in [1,2,4,3]]\n super(CCD, self).__init__(corners_x, corners_y)\n self.index = index\n self.wcs = None\n self.hdu = hdu\n self.valid = valid\n\n @lazy_property\n def image(self):\n return self.hdu.data\n\n @lazy_property\n def header(self):\n return self.hdu.header\n\n def plot(self, vmin=0, vmax=100, cmap='gray', origin='lower'):\n fig, ax = plt.subplots()\n ax.set_title(f'CCD: {ccd_ind}')\n ax.imshow(self.image.T, vmin=vmin, vmax=vmax, cmap=cmap, origin=origin)\n return fig, ax\n\n def pix_to_world(self, pix_x, pix_y):\n if self.wcs is None:\n self.wcs = WCS(self.header)\n return self.wcs.all_pix2world(np.array([pix_x, pix_y]).T, 1)\n\n def world_to_pix(self, ra, dec):\n if self.wcs is None:\n self.wcs = WCS(self.header)\n return self.wcs.all_world2pix(np.array([ra, dec]).T, 1)\n\nclass Registration:\n def __init__(self, index):\n self.full_catalog_file = join(paths.registration_dir, f'registration_{index}_500.csv')\n self.sparse_catalog_file = join(paths.registration_dir, f'registration_{index}_3000.csv')\n\n def get_sources_in(self, polygon, cutoff=3000):\n if isinstance(polygon, Box):\n polygon = polygon.polygon\n if cutoff >= 3000:\n sub = self.sparse_catalog[[self.sparse_catalog.columns[3] >= cutoff]]\n else:\n sub = self.full_catalog[[self.full_catalog.columns[3] >= cutoff]]\n mask = [polygon.contains_point((x[1], x[2])) for x in sub]\n return sub[[mask]]\n\n def get_sources_around(self, polygon, cutoff=3000):\n if isinstance(polygon, Box):\n polygon = polygon.polygon\n if cutoff >= 3000:\n sub = self.sparse_catalog[[self.sparse_catalog.columns[3] >= cutoff]]\n else:\n sub = self.full_catalog[[self.full_catalog.columns[3] >= cutoff]]\n\n cpy = deepcopy(polygon)\n # for stars that trail in\n sidereal_buffer = 0.0625\n cpy.vertices[:2,0] -= sidereal_buffer \n\n # for trails right on the edge\n edge_buffer = 0.005\n cpy.vertices[1:3,1] -= edge_buffer\n cpy.vertices[0,1] += edge_buffer\n cpy.vertices[3,1] += edge_buffer\n\n mask = [cpy.contains_point((x[1], x[2])) for x in sub]\n return sub[[mask]]\n\n @lazy_property\n def full_catalog(self):\n '''\n gaia mean flux > 500\n '''\n return Table.read(self.full_catalog_file)\n\n @lazy_property\n def sparse_catalog(self):\n '''\n gaia mean flux > 3000\n '''\n return Table.read(self.sparse_catalog_file)\n \nclass Trail:\n def __init__(self, gaiaId, ra, dec, flux, img, start, end):\n self.id = gaiaId\n self.ra = ra\n self.dec = dec\n self.flux = flux\n self.image = image\n self.start = start\n self.end = end\n\n def plot(self, vmin=0, vmax=100, cmap='gray', origin='lower'):\n fig, ax = plt.subplots()\n ax.set_title(f'CCD: {ccd_ind}')\n ax.imshow(self.image.T, vmin=vmin, vmax=vmax, cmap=cmap, origin=origin)\n return fig, ax\n","repo_name":"davidthomas5412/StarTrailPipeline","sub_path":"startrail/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":12083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"40665323724","text":"import torch\nfrom torch.utils.data import DataLoader\n\nfrom config import device, batch_size, model_params, embed_params, encoder_params, data_params, training_params, paths\n\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score\n\nfrom modules.layers.embeddings import LookUp, ELMoForManyLangs\nfrom modules.common import ConcatVocabulary\nfrom modules.common.preprocessing import Preprocessing\nfrom modules.common.utils import class_weigths\nfrom modules.common.dataloading import load_data_file, load_data_file_without_split, split_data, collate_fn_cf\nfrom modules.trainers import ClassificationTrainer\nfrom modules.datasets.classification_dataset import ClassificationDataset\n\nfrom utils.model import RNNClassifier\nfrom utils.data import remap_data_to_3, remap_data_to_3_from_text, load_vectors\n\n# vectors = load_vectors('./data/prim-6.1-public-all.shuffled.300cbow.bin')\n# vectors.save_word2vec_format('./data/prim-6.1-public-all.shuffled.300cbow.txt', binary=False)\n\nprint('Loading dataset...')\npreprocessing = Preprocessing()\n\ndata = load_data_file_without_split(**data_params)\n# data = remap_data_to_3(data)\ndata = remap_data_to_3_from_text(data)\n\nprint(get_histogram_data(data[:, 2]))\n\ntrain_data, valid_data, test_data = split_data(data)\nx_column, y_column = data_params['x_column'], data_params['y_column']\n\ntrain_set = ClassificationDataset(train_data[:, x_column], train_data[:, y_column], preprocessing=preprocessing.process_text)\nvalid_set = ClassificationDataset(valid_data[:, x_column], valid_data[:, y_column], preprocessing=preprocessing.process_text)\ntest_set = ClassificationDataset(test_data[:, x_column], test_data[:, y_column], preprocessing=preprocessing.process_text)\n\nvocab = ConcatVocabulary([train_set.vocab, valid_set.vocab, test_set.vocab])\n\ntrain_loader = DataLoader(train_set, batch_size, shuffle=True, collate_fn=collate_fn_cf)\nvalid_loader = DataLoader(valid_set, batch_size, shuffle=True, collate_fn=collate_fn_cf)\ntest_loader = DataLoader(test_set, batch_size, collate_fn=collate_fn_cf)\n\nprint(get_histogram_data(train_set.labels))\nprint(get_histogram_data(valid_set.labels))\nprint(get_histogram_data(test_set.labels))\n\nprint('Creating model...')\n\n# embeddings = LookUp(vocab=vocab, file_name='data/cc.sk.300.vec', type='word2vec', trainable=True, embedding_dim=300, embedding_dropout=.0)\nembeddings = LookUp(vocab=vocab, file_name='data/prim-6.1-public-all.shuffled.300cbow.txt', type='word2vec', trainable=True, embedding_dim=300, embedding_dropout=.0)\n# embeddings = LookUp(vocab=vocab, file_name=None, trainable=True, embedding_dim=300, embedding_dropout=.0)\n\nmodel = RNNClassifier(embeddings, encoder_params, **model_params).to(device)\n\noptimizer = torch.optim.Adam(model.parameters())\n\nweights = class_weigths(train_set.labels).to(device)\ncriterion = torch.nn.NLLLoss(weight=weights)\n\ntrainer = ClassificationTrainer(model, criterion, optimizer, device)\n\nprint('Training...')\nbest_macro_f1 = None\ngold_labels = test_set.labels.astype(int)\n\nfor epoch in range(training_params['n_epochs']):\n\n train_loss = trainer.train_model(train_loader)\n\n valid_loss, predicted, model_predictions, labels = trainer.evaluate_model(valid_loader)\n\n print('| Epoch: {} | Train Loss: {:2.5f} | Val. Loss: {:2.5f} | Val. Acc: {:2.5f} | Val. Macro F1: {:2.5f} | Val. Micro F1: {:2.5f} |'\n .format(epoch + 1, train_loss, valid_loss, accuracy_score(labels, predicted),\n f1_score(labels, predicted, average='macro'), f1_score(labels, predicted, average='micro')))\n\n macro_f1 = f1_score(labels, predicted, average='macro')\n\n test_loss, predicted, model_predictions, labels = trainer.evaluate_model(test_loader)\n print('----------------------------------------------------Test results----------------------------------------------------')\n print('| Loss: {} | Acc: {}% |'.format(test_loss, accuracy_score(labels, predicted)))\n print('| Macro Precision: {} | Micro Precision: {} |'.format(precision_score(gold_labels, predicted, average='macro'), precision_score(gold_labels, predicted, average='micro')))\n print('| Macro Recall: {} | Micro Recall: {} |'.format(recall_score(gold_labels, predicted, average='macro'), recall_score(gold_labels, predicted, average='micro')))\n print('| Macro F1: {} | Micro F1: {}|'.format(f1_score(gold_labels, predicted, average='macro'), f1_score(gold_labels, predicted, average='micro')))\n print('--------------------------------------------------------------------------------------------------------------------')\n\n if not best_macro_f1 or macro_f1 > best_macro_f1:\n print('saving...')\n best_macro_f1 = macro_f1\n torch.save(model, paths['f1_score']['model_path'])\n","repo_name":"SamuelPecar/Slovak-sentiment-analysis","sub_path":"train_lookup.py","file_name":"train_lookup.py","file_ext":"py","file_size_in_byte":4721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"9033024618","text":"import json\nfrom pprint import pprint\n\n\n\ndef get_list_from_json():\n list_of_lists = []\n\n with open('metadata_5-15.json', 'r') as f:\n data = json.load(f)\n\n i=0\n while True:\n try:\n #Get extension, filename, filesize\n extension = data['files'][i]['system']['extension']\n filename = data['files'][i]['system']['file']\n filesize = data['files'][i]['system']['size']\n mimetype = data['files'][i]['system']['mime_type']\n\n try:\n headers = data['files'][i]['headers']\n except:\n headers=[]\n\n ### Create URI ###\n #Begin by extracting tskluzac path\n raw_path = data['files'][i]['system']['path'].split('/')\n #print(raw_path)\n\n globus_url = \"globus:45a53408-c797-11e6-9c33-22000a1e3b52/cdiac/cdiac.ornl.gov/pub8old\"\n\n #print(raw_path)\n\n #Now turn this into CDIAC pub8 path\n j=0 #0(''), 1(home), 2(tskluzac), 3(pub8)--- we may keep 3.\n for item in raw_path:\n if j<3:\n j+=1\n continue\n\n elif item == '':\n j+=1\n continue\n\n else:\n globus_url = globus_url + \"/\" + item\n j+=1\n\n\n if extension=='no extension':\n pass\n\n else:\n globus_url = globus_url + \"/\" + filename\n\n\n #globus_url + \"/\" + filename\n\n list = [globus_url, headers, mimetype,filesize, extension]\n\n list_of_lists.append(list)\n i+=1\n\n except:\n break\n\n\n #print(list_of_lists)\n return list_of_lists\n\n#def entry_builder():\n\n# thing = get_list_from_json()\n# for item in thing:\n# print(item[0])\n","repo_name":"tskluzac/CDIACGlobusSearch","sub_path":"json_sort.py","file_name":"json_sort.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"20222305573","text":"T=int(input())\n\ndx=[-1,-2,-2,-1,1,2,2,1]\ndy=[-2,-1,1,2,2,1,-1,-2]\n\ndef bfs(queue,target,visited):\n while queue:\n cur=queue.pop(0)\n if cur == target:\n print(visited[target[0]][target[1]])\n break\n else:\n for dir in range(8):\n next_x=cur[0]+dx[dir]\n next_y=cur[1]+dy[dir]\n if 0<=next_x 0:\n\n db_meta[db_nickname] = dict()\n\n else:\n\n print(\"Empty Nickname\")\n exit(1)\n\n temp = {}\n\n temp[\"db_name\"] = input(\"Please enter your db_name: \")\n\n if db_type != \"sqlite3\":\n \n temp[\"server\"] = input(\"Please enter your db_hostname: \")\n temp[\"username\"] = input(\"Please enter your db_username: \")\n temp[\"password\"] = getpass.getpass(\"Please enter your db_password: \")\n\n count = 0\n empty_key = []\n\n for key, value in temp.items():\n\n if not value or len(value) == 0:\n\n empty_key.append(key)\n count += 1\n \n if count > 0:\n\n print(\n \"These attributes have empty entries: {}\".format(\n ','.join(empty_key)\n )\n )\n exit(1)\n\n else:\n\n config_data = None\n db_meta[db_nickname].update(temp)\n\n with open(\"config/config.json\", \"r\") as config_file:\n\n config_data = json.loads(config_file.read())\n \n config_data.update(db_meta)\n \n with open(\"config/config.json\", \"w\", newline=\"\") as config_file:\n\n config_file.write(\n json.dumps(config_data, indent=4)\n )\n \ndef delete_db_meta(db_nickname):\n\n config_data = None\n \n\n with open(\"config/config.json\", \"r\") as config_file:\n\n config_data = json.loads(config_file.read())\n \n del config_data[db_nickname]\n\n with open(\"config/config.json\", \"w\", newline=\"\") as config_file:\n\n config_file.write(\n json.dumps(config_data, indent=4)\n )\n\n\ndef init_db():\n\n if re.search(r\"^(?i)init$\", sys.argv[-1]) and len(sys.argv) == 2:\n\n init_install()\n \n elif re.search(r\"^(?i)del$\", sys.argv[1]) and len(sys.argv) == 3:\n\n delete_db_meta(sys.argv[-1])\n\n else:\n\n print(\"Invalid parameters\")\n\n\n#init_db()\n","repo_name":"tangingw/python_sqlclient","sub_path":"tools/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"73"} +{"seq_id":"9275137993","text":"\nfrom django.contrib.auth.decorators import login_required\n\nfrom _view_helpers import mix_response, make_crumbs, message_convert\nfrom _git_helpers import get_repo, get_commit_tree\n\nfrom stratus.settings import REPO_BRANCH, STRATUS_MEDIA_URL\n\n@login_required\ndef view(request, repo_name, branch=REPO_BRANCH, path=None, commit_sha=None ):\n json_convert = None\n repo = get_repo( repo_name )\n commit, tree = get_commit_tree(repo, commit_sha)\n the_tree = tree\n dir_path = path.split(\"/\")\n\n if commit_sha:\n template_name = 'stratus/view_commit_tree.html'\n else:\n template_name = 'stratus/view_tree.html'\n\n if path:\n the_tree = tree[path]\n\n # if its file try to get file directory \n if the_tree.type != \"tree\":\n path = \"/\".join(dir_path[:-1])\n if len(dir_path) == 1:\n the_tree = tree\n else:\n the_tree = tree[path]\n\n if request.is_ajax():\n json_convert = message_convert\n\n context = dict(\n STRATUS_MEDIA_URL = STRATUS_MEDIA_URL,\n repo_name = repo_name,\n branch_name = branch,\n commit = commit,\n repo = repo,\n tree = the_tree.list_traverse(depth = 1),\n breadcrumbs = make_crumbs(path),\n parent_dir = dir_path[:-1],\n dir_path = dir_path,\n path = path,\n )\n\n return mix_response( \n request, \n template_name, \n context,\n )","repo_name":"k1000/django-stratus","sub_path":"stratus/views/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"9466103858","text":"x=5\r\ny=3\r\nspisk=[]\r\nfor i in range(x):\r\n spisk2=[]\r\n for g in range(y):\r\n spisk2.append(g*i)\r\n spisk.append(spisk2)\r\nprint(spisk)\r\n\r\nfor i in range(len(spisk)):\r\n for g in range(len(spisk[i])):\r\n if g==len(spisk[i])-1:\r\n print(spisk[i][g])\r\n else:\r\n print(spisk[i][g], end=' ')","repo_name":"krytoynik228/djorno","sub_path":"first_script.py","file_name":"first_script.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"da","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"71617984875","text":"import math\r\n\r\nimport torch.nn as nn\r\n\r\ndef upsample(x, size):\r\n x_up = nn.functional.interpolate(x, size=size, mode='bicubic', align_corners=True)\r\n return x_up\r\n\r\nclass ESPCN(nn.Module):\r\n def __init__(self, inchannel,out_channels=1, scale_factor=4):\r\n super(ESPCN, self).__init__()\r\n self.first_part = nn.Sequential(\r\n nn.Conv2d(inchannel, inchannel, kernel_size=3),\r\n nn.BatchNorm2d(inchannel),\r\n nn.LeakyReLU(0.05, inplace=True),\r\n\r\n\r\n nn.Conv2d(inchannel, inchannel, kernel_size=3),\r\n nn.BatchNorm2d(inchannel),\r\n nn.LeakyReLU(0.05, inplace=True),\r\n )\r\n self.body_part = nn.Sequential(\r\n nn.Conv2d(inchannel, out_channels * (scale_factor ** 2), kernel_size=3),\r\n nn.PixelShuffle(scale_factor),\r\n\r\n )\r\n\r\n self.last_part = nn.Sequential(\r\n nn.Conv2d(out_channels,out_channels,kernel_size=3),\r\n nn.BatchNorm2d(inchannel),\r\n nn.LeakyReLU(0.05,inplace=True),\r\n\r\n nn.Conv2d(out_channels, out_channels, kernel_size=3),\r\n nn.BatchNorm2d(inchannel),\r\n nn.LeakyReLU(0.05, inplace=True),\r\n\r\n nn.Conv2d(out_channels, out_channels, kernel_size=3),\r\n nn.BatchNorm2d(inchannel),\r\n nn.LeakyReLU(0.05, inplace=True)\r\n )\r\n\r\n self._initialize_weights()\r\n\r\n def _initialize_weights(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n if m.in_channels == 32:\r\n nn.init.normal_(m.weight.data, mean=0.0, std=0.001)\r\n nn.init.zeros_(m.bias.data)\r\n else:\r\n nn.init.normal_(m.weight.data, mean=0.0, std=math.sqrt(2/(m.out_channels*m.weight.data[0][0].numel())))\r\n nn.init.zeros_(m.bias.data)\r\n\r\n def forward(self, x):\r\n x_1 = self.first_part(x)\r\n x_2 = self.body_part(x_1)\r\n x_3 = upsample(x_2,[x_2.shape[2]+6,x_2.shape[3]+6])\r\n x_4 = self.last_part(x_3)\r\n return x_2\r\n","repo_name":"ZhongHY123/ParaGAN","sub_path":"paraGAN/espcn.py","file_name":"espcn.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"70118611757","text":"import requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup, element\nimport re\nimport string \nfrom collections import Counter\nimport os\n\ndef get_url(url):\n \"\"\"\n Returns the html in text from the url provided.\n\n Args:\n url (str): url of webapge\n\n Returns:\n html_text (str): string contaning html text for url\n \"\"\"\n\n # Get html str from url\n html_text = requests.get(url).text\n\n return html_text\n\n\ndef get_poetry_html():\n \"\"\"\n Check if inputed ICD10 code exists. \n\n Returns:\n (bool): determines if inputed ICD10 code exists\n \"\"\"\n \n # Get html containg all subdirectories for code ranges \n icd_home_html = get_url(\"https://poetry-archive.com/\")\n \n # Create BeautifulSoup object\n home_soup_html = BeautifulSoup(icd_home_html, \"html.parser\")\n\n return home_soup_html\n\n\ndef get_word_dict(text):\n # Split each text into words\n word_list = text.split(\" \")\n\n # Loop through list, make sure no extra whitespace\n for word_ind in range(len(word_list)):\n # Check if word contains white space\n if len(word_list[word_ind]) >= 20 :\n temp = word_list[word_ind].split(\" \")\n\n # Count occurence of each word to dict\n word_dict = Counter(word_list)\n\n # Split if two words registerd as one, keep first and \n # Only keep words that have 3 or more letters\n word_dict = dict((word, occurence) for word, occurence in word_dict.items() if len(word) >= 3)\n\n return word_dict\n\n\n# Initiate output df\npoem_line_df = pd.DataFrame(columns=[\"title\", \"author\", \"date\", \"category\", \"line\"])\npoem_word_df = pd.DataFrame(columns=[\"title\", \"author\", \"date\", \"category\", \"word\", \"count\"])\n\n# Get Beautiful soup object\nhome_soup_html = get_poetry_html()\n\n# Get first 16 elements with \"a\" tag, these are categories\nsorted_element_list = home_soup_html.find_all(\"a\")[0:16]\n\n# Initiate url list\ncategory_url_list = []\n\n# Initiate category name list\ncategory_name_list = []\n\n# Loop through each element\nfor element in sorted_element_list:\n # Get href\n category_url_list.append(element.get(\"href\"))\n\n # Get Category name\n category_name_list.append(element.get_text())\n\n# Loop through each category cat_ind\nfor cat_ind in range(len(category_url_list)):\n # Set category cat_ind\n category_url = \"https://poetry-archive.com\" + category_url_list[cat_ind]\n\n # Get html for cat_ind\n category_html = get_url(category_url)\n\n # Create BeautifulSoup object\n category_soup_html = BeautifulSoup(category_html, \"html.parser\")\n\n # Get elements with a\n category_element_list = category_soup_html.find_all(\"a\")[0:16]\n\n # Get elements with a\n date_element_list = category_soup_html.find_all(\"font\")\n\n # Initiate poem url list\n poem_url_list = []\n\n # Initiate poem date list\n poem_date_list = []\n\n # Loop through each element\n for url_elem in category_element_list[4:-7]:\n # Edge case, random amazon href\n if \"amazon\" in url_elem.get(\"href\")[2:]:\n continue\n else:\n # Get href, skip first two characters \"..\"\n poem_url_list.append(url_elem.get(\"href\")[2:])\n\n # Loop through each element\n for date_elem in date_element_list:\n # Get text\n date_elem_text = date_elem.get_text()\n\n # Check if has double parenthesis\n if \"(\" in date_elem_text and \")\" in date_elem_text:\n # Get date\n elem_date = date_elem_text[date_elem_text.find(\"(\")+1:date_elem_text.find(\")\")]\n\n # Remove \" \" in date\n elem_date = elem_date.replace(\" \", \"\")\n\n # Append date to lit\n poem_date_list.append(elem_date)\n\n # Loop through each poem url\n for po_ind in range(len(poem_url_list)):\n # Set final poem url\n poem_url = \"https://poetry-archive.com\" + poem_url_list[po_ind]\n\n # Get html for poem url\n poem_html = get_url(poem_url)\n\n # Create BeautifulSoup object\n poem_soup_html = BeautifulSoup(poem_html, \"html.parser\")\n \n # Get title from 5th element with \"p\" tag\n poem_title = poem_soup_html.find_all(\"p\")[5].get_text()\n \n # Get elements with \"i\" tag\n i_element_list = poem_soup_html.find_all(\"i\")\n\n # Loop through list\n for i_ele in i_element_list:\n # Get text\n i_ele_text = i_ele.get_text()\n\n # Check if correct element\n if \"by: \" in i_ele_text:\n # Remove first 4 characters\n poem_author = i_ele_text[4:]\n\n # Get poem text from 1st element with \"dt\" tag\n poem_text = poem_soup_html.find_all(\"dt\")[0].get_text()\n \n # Remove punctuation\n poem_text = poem_text.translate(str.maketrans('', '', string.punctuation))\n\n # Split text for line df\n poem_line_list = poem_text.split(\"\\r\\n \")\n\n # Remove empty lines\n poem_line_list = [poem_line for poem_line in poem_line_list if not str.isspace(poem_line)]\n\n # Clean text for word df\n clean_poem_text = poem_text.replace(\"\\r\\n \", \" \")\n clean_poem_text = clean_poem_text.replace(\"\\r\" , \"\")\n clean_poem_text = clean_poem_text.replace(\"\\t\" , \"\")\n clean_poem_text = clean_poem_text.replace(\"\\n\" , \" \")\n\n # Make all lowercase\n clean_poem_text = clean_poem_text.lower()\n\n # Get each word(len >= 3) and it's occurence as dict\n poem_word_dict = get_word_dict(clean_poem_text)\n\n # Stop word list\n stop_word_list = ['stop', 'the', 'to', 'and', 'a', 'in', 'it', 'is', 'i', 'that', 'had', 'on', 'for', 'were', 'was', 'they', \n 'but', 'ast', 'its', 'i could', 'not', 'from', 'with', 'are', \"16181667\", 'his', 'her', \"she\", \"hers\"]\n\n # Loop and remove keywords\n for key, value in list(poem_word_dict.items()):\n # Check if key or value matches list\n if key in stop_word_list or value in stop_word_list:\n del poem_word_dict[key]\n\n yoo = poem_word_dict.keys()\n\n # Set df for poem\n word_df = pd.DataFrame(poem_word_dict.items(), columns=['word', 'count'])\n line_df = pd.DataFrame()\n\n # Set line column\n line_df[\"line\"] = poem_line_list\n\n # Set author column\n word_df[\"author\"] = poem_author\n line_df[\"author\"] = poem_author\n\n # Set title column\n word_df[\"title\"] = poem_title\n line_df[\"title\"] = poem_title\n\n # Check if date is range\n if \"-\" in poem_date_list[po_ind]:\n # Keep lower \n poem_date = poem_date_list[po_ind].split(\"-\")[0]\n\n # Remove punctuation\n poem_date = poem_date.translate(str.maketrans('', '', string.punctuation))\n\n # Remove all non numeric characters\n poem_date = re.sub('[^0-9]', \"\", poem_date)\n\n elif not str.isdigit(poem_date_list[po_ind]):\n # No date associated\n poem_date = 0\n \n # Set date column\n word_df[\"date\"] = poem_date\n line_df[\"date\"] = poem_date\n\n # Set category column\n word_df[\"category\"] = category_name_list[cat_ind]\n line_df[\"category\"] = category_name_list[cat_ind]\n\n # Concat df\n poem_word_df = pd.concat([poem_word_df, word_df], ignore_index=True)\n poem_line_df = pd.concat([poem_line_df, line_df], ignore_index=True)\n\n# Only keep count and word columns\nonly_word_count_df = poem_word_df[[\"count\", \"word\"]]\n\n# Trim df and drop na\ndate_count_df = poem_word_df[[\"author\", \"date\", \"count\", \"word\"]]\ndate_count_df = date_count_df.dropna(subset=['date'])\n\n# Remove edge case\nedge = date_count_df[\"date\"].unique()[31]\n\n# Keep rows not edge\ndate_count_df = date_count_df[date_count_df[\"date\"] != edge]\n\n# Convert \ndate_count_df[\"date\"] = date_count_df[\"date\"].astype(int)\ndate_count_df[\"century\"] = date_count_df['date'].apply(lambda x: (x // 100 * 100) + 100)\ndate_count_df = date_count_df.groupby([\"century\", \"word\"]).agg({\"count\": \"sum\"})\ndate_count_df = date_count_df.reset_index().sort_values(by=[\"count\"], ascending=False)\n\n# Def function to trim df\ndef trim_count_df(date_df, col='century', n=5):\n return date_df.groupby(col).head(n).reset_index()\n\n# Trim DF, keep top 5 of each\n#date_count_df = trim_count_df(date_count_df).sort_values(by=[\"century\", \"count\"])\n\n# Group by word\nonly_word_count_df = only_word_count_df.groupby(only_word_count_df[\"word\"]).aggregate({\"count\": \"sum\"}).sort_values(by=[\"count\"], ascending=True)\n\n# Save to csv\ndate_count_df.to_csv(\"/Users/davidshaknovich/Desktop/Northeastern/ds3500/ds3500_fa22/hw/final/century_count.csv\")\npoem_word_df.sort_values(by=[\"count\"], ascending=False).to_csv(\"/Users/davidshaknovich/Desktop/Northeastern/ds3500/ds3500_fa22/hw/final/poem_word.csv\")\npoem_line_df.to_csv(\"/Users/davidshaknovich/Desktop/Northeastern/ds3500/ds3500_fa22/hw/final/poem_line.csv\")\n\n\ndef to_century(date):\n century = (date // 100 * 100) + 100\n return f\" {century} Century\"\n\n","repo_name":"TeeChenxing/Evolution-of-Poetry","sub_path":"webscrape_final.py","file_name":"webscrape_final.py","file_ext":"py","file_size_in_byte":9122,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"36788108442","text":"\"\"\"Forms for the `tutors` app.\"\"\"\nfrom django import forms\n\nfrom profiles.models import Profile\nfrom tutors.models import Service, Subject\n\n\nclass ServiceForm(forms.ModelForm):\n \"\"\"Form for inputting info about services offered by a given Tutor.\"\"\"\n\n class Meta:\n model = Service\n exclude = [\"tutor\", \"is_default\"]\n\n SESSION_LENGTH_CHOICES = [\n (length, str(length)) for length in range(30, 181, 15)\n ]\n\n widgets = {\n \"subject\": forms.Select(attrs={\"class\": \"options\"}),\n \"price_per_hour\": forms.NumberInput(attrs={\"class\": \"number-input\"}),\n \"number_of_hours\": forms.NumberInput(attrs={\"class\": \"number-input\"}),\n \"session_length\": forms.Select(\n attrs={\"class\": \"options\"}, choices=SESSION_LENGTH_CHOICES\n ),\n }\n\n\nclass ServiceInlineFormset(forms.BaseInlineFormSet):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.instance and self.instance.pk:\n for form in self.forms:\n form.fields[\"subject\"].queryset = (\n Subject.objects.filter(service__tutor=self.instance.pk)\n .filter(service__is_default=True)\n .filter(service__number_of_hours=1)\n .distinct()\n )\n\n\nservice_formset = forms.inlineformset_factory(\n parent_model=Profile,\n model=Service,\n form=ServiceForm,\n formset=ServiceInlineFormset,\n extra=1,\n can_delete=True,\n can_delete_extra=False,\n min_num=0,\n)\n","repo_name":"maciej185/tutoring-app","sub_path":"tutoringApp/tutors/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"17508994115","text":"# Python program for spatial filtering and visualisation of direction of arrival of sound from sources\n# The program is based on the MATLAB version. \n# Another speed up version (multiprocessing or vectorization) of the algorithm for spatial filtering exists.\n\nimport numpy as np\nimport math\nimport sounddevice\nimport soundfile as sf\nimport matplotlib.pyplot as plt\nfrom scipy import signal\nfrom Audio_data import Audio_data\nfrom Matrix_array import Matrix_array\nfrom Audio_source import Audio_source\nfrom Color_map import Color_map\nfrom pathlib import Path\n\nimport config\n\n# GLOBAL VARIABLES\nc = 340 # propagation speed of sound\n\ndef antenna_setup():\n r_a1 = config.r_a1 # coordinate position of origin of array1\n r_a2 = config.r_a2 # coordinate position of origin of array2\n r_a3 = config.r_a3 # coordinate position of origin of array3\n r_a4 = config.r_a4 # coordinate position of origin of array4\n uni_distance = config.distance\n row_elements = config.rows\n column_elements = config.columns\n\n # array_matrix_1, array_matrix_2, array_matrix_3, array_matrix_4 below can be generated in parallell\n array_matrix_1 = Matrix_array(r_a1,uni_distance,row_elements,column_elements)\n array_matrix_2 = Matrix_array(r_a2,uni_distance,row_elements,column_elements)\n array_matrix_3 = Matrix_array(r_a3,uni_distance,row_elements,column_elements)\n array_matrix_4 = Matrix_array(r_a4,uni_distance,row_elements,column_elements)\n array_list = [array_matrix_1, array_matrix_2, array_matrix_3, array_matrix_4]\n\n # array_matrices contains the current active arrays that should be used \n # (number of arrays defined by config.matrices)\n array_matrices = np.array(array_list[:config.active_arrays], dtype=object)\n \n # array_matrices = np.array([array_matrix_1, array_matrix_2, array_matrix_3, array_matrix_4], dtype=object)\n\n sub_arrays = len(array_matrices)\n\n for array in range(sub_arrays):\n plt.title('Array setup')\n plt.scatter(array_matrices[array].get_r_prime()[0,:], array_matrices[array].get_r_prime()[1,:])\n \n return array_matrices\n\ndef generate_array_signals(matrix_array, sources, t):\n r_prime = matrix_array.get_r_prime()\n Audio_signal = np.zeros((len(t), len(r_prime[0,:])))\n\n for sample in range(len(t)):\n if (sample+1 in np.linspace(0,len(t),10)) or (sample == 0): # print stuff so user know how many samples that have been generated\n print(sample+1) # print stuff so user know how many samples that have been generated\n for mic in range(len(r_prime[0,:])):\n x_i = r_prime[0,mic]\n y_i = r_prime[1,mic]\n temp_signal_sample = 0\n for source in range(len(sources)):\n if (sources[source].get_t_start() < t[sample]) and (t[sample] < sources[source].get_t_end()):\n frequencies_ps = sources[source].get_frequency()\n theta_source = sources[source].get_theta()\n phi_source = sources[source].get_phi()\n rho_soruce = sources[source].get_rho()\n for freq_ind in range(len(frequencies_ps)):\n k = 2*math.pi*frequencies_ps[freq_ind]/c\n r_1 = np.array([x_i,y_i,0])\n r_2 = rho_soruce * r_vec(theta_source,phi_source)\n norm_coeff = np.linalg.norm(r_2-r_1)\n phase_offset = -k*norm_coeff\n element_amplitude = 1/norm_coeff\n temp_signal_sample += element_amplitude * math.sin(2*math.pi* frequencies_ps[freq_ind] * t[sample] + phase_offset)\n Audio_signal[sample,mic] = temp_signal_sample\n return Audio_signal\n\ndef r_vec(theta,phi):\n r = np.array([(math.sin(theta)*math.cos(phi)), math.sin(theta)*math.sin(phi), math.cos(theta)])\n return r\n\ndef filtering(array_audio_signals, sub_arrays, frequency_bands, f_sampling, elements):\n audio_filtered_complete = np.zeros((sub_arrays, len(frequency_bands)), dtype=object)\n filter_order = config.filter_order # filter order\n scale_factor = config.scale_factor # scale factor, making filter bandwidth more narrow\n\n for array in range(sub_arrays):\n Audio_signal = array_audio_signals[array].get_audio_signals()\n calibration_weights = load_calibration_weights(array, elements, len(frequency_bands))\n for freq_ind in range(len(frequency_bands)):\n # filter design for each band\n nu_0 = 2*frequency_bands[freq_ind]/f_sampling # normalized frequency # scale factor, making filter bandwidth more narrow\n cut_off = [nu_0 - nu_0/scale_factor, nu_0 + nu_0/scale_factor]\n b = signal.firwin(filter_order, cut_off, window=\"hamming\", pass_zero=False) # filter coefficients\n audio_temp = np.zeros((len(Audio_signal[:,0]),elements))\n for mic_ind in range(elements):\n # apply filter on every signal recorded from the elements\n audio_temp[:,mic_ind] = calibration_weights[freq_ind,mic_ind] * signal.lfilter(b, 1.0, Audio_signal[:,mic_ind])\n\n audio_filtered_complete[array,freq_ind] = Audio_data(audio_temp)\n\n # plot all filters\n plt.figure(2)\n w, h = signal.freqz(b, worN=8000)\n H = 20*np.log10(abs(h))\n plt.plot((w/math.pi)*f_sampling/2, 20*np.log10(abs(h)), linewidth=2)\n plt.xlabel('Frequency (Hz)')\n plt.ylabel('Gain (dB)')\n plt.title('Frequency Response of all filters')\n plt.ylim(-5, 0.5)\n plt.xlim(70, f_sampling/2)\n plt.grid(True)\n print('Filtering of signals completed.')\n return audio_filtered_complete\n\ndef load_calibration_weights(array, elements, f_bands):\n # placeholder function, to be completed later\n # function should load calibration weights form file\n # returns matrix with calibration weightsfor all microphones, at all calibration frequencies\n weights = np.ones((f_bands,elements))\n #hamming_array = np.blackman(config.rows)\n #for i in range(3):\n # hamming_array = np.append(hamming_array, hamming_array)\n #weights[35:45,] = np.multiply(weights[35:45,], np.transpose(hamming_array))\n return weights\n\ndef scanning(y_listen, x_listen, r_scan, frequency_bands, audio_filtered_complete, array_matrices, f_sampling, sub_arrays):\n # Start scanning\n color_maps_complete = np.zeros(len(frequency_bands), dtype=object)\n for freq_ind in range(len(frequency_bands)):\n color_map_new = np.zeros((len(y_listen),len(x_listen)))\n color_maps_complete[freq_ind] = Color_map(color_map_new)\n for x_ind in range(len(x_listen)):\n print('\\n x_ind: '+str(x_ind+1)) # print for user\n x = x_listen[x_ind] # current x-coordinate\n print('\\t y_ind:', end=' ') # print for user\n for y_ind in range(len(y_listen)):\n print(str(y_ind+1), end=' ') # print for user\n y = y_listen[y_ind] # current x-coordinate\n z_0 = math.sqrt(r_scan**2 - x**2 - y**2)\n theta = math.acos(z_0/r_scan) # get theta from our x,y coordinates\n phi = math.atan2(y,x) # get phi from our x,y coordinates\n for freq_ind in range(len(frequency_bands)):\n # apply beamforming algo. in every frequency band \n #mic_data = beam(audio_filtered_complete, array_matrices, theta, phi, frequency_bands[freq_ind], freq_ind, f_sampling)\n mic_data = beam_forming_algorithm(audio_filtered_complete, array_matrices, theta, phi, frequency_bands[freq_ind], freq_ind, f_sampling)\n \n # obtain relative power in the listening direction\n color = sum(abs(mic_data)**2)\n\n # relative power inte the direction [theta, phi] saved in matrix\n color_maps_complete[freq_ind].set_color(y_ind, x_ind, color)\n return color_maps_complete\n\ndef spatial_filtering(array_audio_signals, array_matrices, theta, phi, adaptive_weight_matrix, calibration_weights, filter_coefficients, frequency_bands, f_sampling, samples, sub_arrays):\n x_factor = math.sin(theta) * math.cos(phi)\n y_factor = math.sin(theta) * math.sin(phi)\n audio_out = np.zeros((samples,1))\n\n for array in range(sub_arrays):\n r_prime = array_matrices[array].get_r_prime()\n audio_signals = array_audio_signals[array].get_audio_signals()\n \n elements = config.rows*config.columns\n print('freq_ind: ', end=' ')\n for freq_ind in range(len(filter_coefficients[:,0])):\n print(str(freq_ind+1), end=' ') # print for user\n b = filter_coefficients[freq_ind,:] # filter coefficient for the current band\n frequency = frequency_bands[freq_ind] # center frequency\n k = 2*math.pi*frequency/c # the narrowband frequency\n ny = frequency/f_sampling # normalized frequency\n weights = adaptive_weight_matrix[weight_index(frequency)-1,:]\n\n audio_temp = np.zeros((samples,1))\n mic_data = np.zeros((samples,1))\n\n for mic_ind in range(elements):\n if weights[mic_ind] == 1:\n audio_temp[:,0] = calibration_weights[freq_ind,mic_ind] * signal.lfilter(b, 1.0, audio_signals[:,mic_ind])\n phase_shift_value = -k*(r_prime[0,mic_ind] * x_factor + r_prime[1,mic_ind]*y_factor)\n\n # Sum the individually shifted data from the atnenna elements as well as weight them with\n # appropriate weight.\n mic_data += weights[mic_ind] * phase_shift(audio_temp,ny,phase_shift_value)\n\n norm_coeff = 1/sum(weights)\n audio_out += mic_data * norm_coeff\n return audio_out\n\ndef beam_forming_algorithm(audio_filtered_complete, array_matrices, theta, phi, frequency, freq_ind, f_sampling):\n #samples = len(audio_filtered_complete[0, 0].get_audio_signals()[:,0]) # amount of samples of the audio tracks #################### ORIGINAL ####################\n samples = len(audio_filtered_complete[0, 0].get_audio_signals()[200:,0])\n array_data = np.zeros((samples,1))\n\n for array in range(len(array_matrices)):\n # Use the filtered audio signals\n \n #audio_temp_signals = audio_filtered_complete[array, freq_ind].get_audio_signals() #################### ORIGINAL ####################\n audio_temp_signals = audio_filtered_complete[array, freq_ind].get_audio_signals()[200:,]\n\n # The r_prime vector of the matrix array to know the location of every element, as well as how many \n # elements exists.\n r_prime = array_matrices[array].get_r_prime()\n elements = array_matrices[array].get_elements()\n\n # The narrowband wavevnumber\n k = 2*math.pi*frequency/c\n\n # The normalized frequency\n ny = frequency/f_sampling\n\n # Initialize output vector\n mic_data = np.zeros((samples,1))\n\n # The compensation factors to obtain uniform phase in the direction r_hat(theta,phi)\n x_factor = math.sin(theta)*math.cos(phi)\n y_factor = math.sin(theta)*math.sin(phi)\n # Adaptive configuration of the antanna array\n \n adaptive_weight = adaptive_array_config(array_matrices[array], frequency)\n\n for mic_ind in range(elements):\n # calculate the narrowband phase-shift\n phase_shift_value = -k*(r_prime[0,mic_ind] * x_factor + r_prime[1,mic_ind]*y_factor)\n\n # Sum the individually shifted data from the atnenna elements as well as weight them with\n # appropriate weight.\n mic_data = mic_data + adaptive_weight[0,mic_ind] * phase_shift(audio_temp_signals[:,mic_ind],ny,phase_shift_value)\n\n norm_coeff = 1/sum(adaptive_weight[0])\n mic_data = mic_data * norm_coeff\n\n # Perform the beamforming algoritm (phase-shift input signal according to listening direction)\n array_data += mic_data\n\n return array_data\n\ndef adaptive_array_config(matrix_array, frequency):\n # Adaptive configuration of the antanna array\n # Selects only necessary antenna-elements to maintain small beamwidth\n\n row_elements = matrix_array.get_row_elements()\n column_elements = matrix_array.get_row_elements()\n uni_distance = matrix_array.get_uni_distance()\n \n wavelength = c/frequency # wavelength of signal\n wavelength_rel = uni_distance/wavelength # relative wavelenght to distance between microphone elements\n\n if wavelength_rel>0.1581:\n # mode 1\n weight = np.ones((1, row_elements*column_elements))\n return weight\n elif (wavelength_rel <= 0.156) and (wavelength_rel > 0.0986):\n mode = 3\n elif (wavelength_rel <= 0.0986) and (wavelength_rel > 0.085):\n mode = 5\n elif (wavelength_rel <= 0.085) and (wavelength_rel > 0.07):\n mode = 6\n else:\n mode = 7\n\n weight = np.zeros((1,row_elements*column_elements))\n row_lim = math.ceil(row_elements/mode)\n column_lim = math.ceil(column_elements/mode)\n\n for i in range(row_lim):\n for j in range(column_lim):\n element_index = (mode*i*row_elements + mode*j) # this calculation could be wrong thanks to matlab and python index :))\n weight[0,element_index] = 1\n return weight\n\n\ndef adaptive_array_config_matrix(matrix_array):\n # Creates the weight matrix\n row_elements = matrix_array.get_row_elements()\n column_elements = matrix_array.get_row_elements()\n\n weight_matrix = np.zeros((7, row_elements*column_elements))\n\n\n for mode in range(1,config.modes+1):\n weight = np.zeros((1,row_elements*column_elements))\n row_lim = math.ceil(row_elements/mode)\n column_lim = math.ceil(column_elements/mode)\n for i in range(row_lim):\n for j in range(column_lim):\n element_index = (mode*i*row_elements + mode*j) # this calculation could be wrong thanks to matlab and python index :))\n weight[0,element_index] = 1\n weight_matrix[mode-1,:] = weight\n return weight_matrix\n\ndef weight_index(frequency):\n # calculates what mode to use, depending on the wavelength of the signal\n uni_distance = config.distance # distance between elements\n wavelength_rel = frequency*uni_distance/c # relative wavelength to distance between microphone elements\n\n if wavelength_rel>0.1581:\n mode = 1\n elif (wavelength_rel <= 0.156) and (wavelength_rel > 0.0986):\n mode = 3\n elif (wavelength_rel <= 0.0986) and (wavelength_rel > 0.085):\n mode = 5\n elif (wavelength_rel <= 0.085) and (wavelength_rel > 0.07):\n mode = 6\n else:\n mode = 7\n return mode\n\ndef phase_shift(x,ny,phase):\n # Input signal x\n #\n # Output signal y\n #\n # if x = cos(n*2*pi*ny), then y = cos(n*2*pi*ny + phase)\n #\n x_length = len(x)\n y = np.zeros((x_length,1))\n\n for i in range(x_length-1):\n y[i] = math.cos(phase) * x[i] + math.sin(phase)/(2*math.pi*ny)*(x[i+1]/2 - x[i-1]/2)\n return y\n\ndef extents(f):\n # function to show images with correct grid values (with plt.imshow()), corresponding to our scanning window\n delta = f[1]-f[0]\n return [f[0] - delta/2, f[-1] + delta/2]\n\ndef validation_check(y_listen, x_listen, sources, r_scan):\n # Validation check\n xy_val_check = np.zeros((len(y_listen),len(x_listen)))\n\n for x_ind in range(len(x_listen)):\n x = x_listen[x_ind]\n for y_ind in range(len(y_listen)):\n y = y_listen[y_ind]\n temp_val = 0\n for source_ind in range(len(sources)):\n x_s = r_scan * math.sin(sources[source_ind].get_theta()) * math.cos(sources[source_ind].get_phi())\n y_s = r_scan * math.sin(sources[source_ind].get_theta()) * math.sin(sources[source_ind].get_phi())\n temp_val += 1/math.sqrt(((x_s -x)**2 + (y_s - y)**2))\n xy_val_check[y_ind, x_ind] = temp_val\n\n plt.figure(3)\n plt.imshow(xy_val_check, extent= extents(x_listen) + extents(y_listen), origin='lower')\n plt.title('Actual location of sources')\n\ndef show_beamforming_results(y_listen, x_listen, color_maps):\n color_map_intensity = np.zeros((len(y_listen), len(x_listen)))\n for freq_ind in range(config.f_bands_N):\n color_map_intensity += color_maps[freq_ind].get_color_data_matrix() \n plt.figure(4)\n plt.imshow(color_map_intensity, extent= extents(x_listen) + extents(y_listen), origin='lower')\n plt.title('Beamforming results')\n return color_map_intensity\n\ndef show_colormap(filename, x_listen, y_listen):\n color_map = np.load('color_maps/'+filename+'.npy', allow_pickle=True)\n plt.figure(figsize=(8, 6), dpi=150)\n plt.imshow(color_map, extent= extents(x_listen) + extents(y_listen), origin='lower')\n plt.suptitle('Beamforming results')\n plt.title(filename)\n plt.savefig('color_maps_jpg/'+filename+'micorder.jpg')\n\ndef maximum_intensity(color_maps_complete, frequency_bands):\n # Get maximum intensity of a signals, at any frequency\n max_intensity = 0\n for freq_ind in range(len(frequency_bands)):\n intensity = np.max(np.max(color_maps_complete[freq_ind].get_color_data_matrix()))\n if intensity > max_intensity:\n max_intensity = intensity\n return max_intensity\n\ndef calculate_filter_coefficients(f_sampling, frequency_bands):\n scale_factor = config.scale_factor\n f_bands_N = config.f_bands_N\n filter_order = config.filter_order\n f_coefficients = np.zeros((f_bands_N, filter_order))\n for freq_ind in range(config.f_bands_N):\n nu_0 = 2*frequency_bands[freq_ind]/f_sampling\n cut_off = [nu_0 - nu_0/scale_factor, nu_0 + nu_0/scale_factor]\n b = signal.firwin(filter_order, cut_off, window=\"hamming\", pass_zero=False) # filter coefficients\n f_coefficients[freq_ind,:] = b\n return f_coefficients\n\ndef load_data_FPGA(filename):\n # Function that loads data from txt file, recorded by FPGA\n initial = config.initial_values\n \n # Load recorded data from file\n path = Path(config.path + filename + '.txt')\n data = np.loadtxt(open(path,'rb').readlines()[:-1],delimiter=',')\n f_sampling = data[0,2] # read sampling frequency from header\n data = data[:,4:] # take out microphone signals (ignore header)\n data = data[initial:initial+300,:] # skip inital samples, and take out onöy 300 samples from the signal to process\n order = np.empty((64),dtype=int)\n for i in range(64):\n order[i] = i\n for i in range(0,63,8):\n order[i:i+8] = np.flip(order[i:i+8])\n data = data[:,order]\n return Audio_data(data), int(f_sampling)\n\ndef play_sound(sound_signal, f_sampling):\n\n sounddevice.play(sound_signal, f_sampling, blocking=True)\n\n sf.write(\"test.wav\", sound_signal, int(f_sampling), 'PCM_24')\n\ndef generate_filename():\n if config.sources == 1:\n filename ='emul_'+str(config.f_start1)+'Hz_'+'theta='+str(config.theta_deg1)+'_phi='+str(config.phi_deg1)+ \\\n '_samples='+str(config.samples)+'_A'+str(config.active_arrays)\n elif config.sources == 2:\n filename ='emul_'+str(config.f_start1)+str(config.f_start2)+'Hz_'+'theta='+str(config.theta_deg1)+str(config.theta_deg2)+ \\\n '_phi='+str(config.phi_deg1)+str(config.phi_deg2)+'_samples='+str(config.samples)+'_A'+str(config.active_arrays)\n return filename\n\ndef validation_check(y_listen, x_listen, sources, r_scan):\n # Validation check\n xy_val_check = np.zeros((len(y_listen),len(x_listen)))\n\n for x_ind in range(len(x_listen)):\n x = x_listen[x_ind]\n for y_ind in range(len(y_listen)):\n y = y_listen[y_ind]\n temp_val = 0\n for source_ind in range(len(sources)):\n x_s = r_scan * math.sin(sources[source_ind].get_theta()) * math.cos(sources[source_ind].get_phi())\n y_s = r_scan * math.sin(sources[source_ind].get_theta()) * math.sin(sources[source_ind].get_phi())\n temp_val += 1/math.sqrt(((x_s -x)**2 + (y_s - y)**2))\n xy_val_check[y_ind, x_ind] = temp_val\n\n plt.figure(3)\n plt.imshow(xy_val_check, extent= extents(x_listen) + extents(y_listen), origin='lower')\n plt.title('Actual location of sources')\n\n\ndef main():\n # Initialization\n # f_sampling, t_start, t_end, away_distance are user defined variables\n if config.audio_signals == 'emulated':\n f_sampling = config.f_sampling # sampling frequency in Hz\n t_start = config.t_start # start time of simulation \n t_end = config.t_end # end time of simulation\n t_total = t_end - t_start # total simulation time\n samples = math.floor(f_sampling*t_total)\n t = np.linspace(t_start, t_end-1/f_sampling, samples) # time vector\n\n away_distance = config.away_distance # distance between the array and sources\n\n # set up arrays\n array_matrices = antenna_setup()\n sub_arrays = len(array_matrices)\n print('Numer of active arrays: '+ str(sub_arrays))\n\n # AUDIO SIGNALS\n if config.audio_signals == 'emulated':\n filename = generate_filename()\n # Create and place out sources\n source1 = Audio_source(config.f_start1, config.f_end1, config.f_res1,\n config.theta_deg1, config.phi_deg1, config.away_distance, \n config.t_start1, config.t_end1)\n source2 = Audio_source(config.f_start2, config.f_end2, config.f_res2,\n config.theta_deg2, config.phi_deg2, config.away_distance, \n config.t_start2, config.t_end2)\n source_list = [source1, source2]\n sources = np.array(source_list[:config.sources])\n try:\n # array_audio_signals = np.load(filename)\n array_audio_signals = np.load('emulated_data/'+filename+'.npy', allow_pickle=True)\n print(\"Loading from Memory: \" + filename)\n except:\n print(\"Creating data file:\"+filename)\n\n # GENERATE AUDIO SIGNAL\n # will only be used to emulate data, this will not be used when the algoritm runs with real data\n array_audio_signals = np.zeros((sub_arrays), dtype=object)\n print('Number of samples generated (of '+str(f_sampling*t_total)+'):')\n for array in range(sub_arrays): # PARALLELL\n # generate the audio signals on each array-element for each sub-array\n temp_signal = generate_array_signals(array_matrices[array], sources, t)\n array_audio_signals[array] = Audio_data(temp_signal)\n print('Audio signal for array '+str(array+1)+' generated')\n\n np.save('emulated_data/'+filename, array_audio_signals)\n\n elif config.audio_signals == 'recorded':\n filename = config.filename\n print('Loading recorded data: '+filename)\n array_audio_signals = np.zeros((sub_arrays), dtype=object)\n array_audio_signals[0], f_sampling = load_data_FPGA(filename)\n\n else:\n array_audio_signals = 0\n\n # BEAMFORMING values\n # set up scanning window\n x_res = config.x_res # resolution in x\n y_res = config.y_res # resolution in y\n x_listen = np.linspace(-1,1,x_res) # scanning window, x coordinates\n y_listen = np.linspace(-1,1,y_res) # scanning window, y coordinates\n r_scan = math.sqrt(2) # radius of our scanning window, r_scan² = x²+y²+z²\n\n # Filtering values\n f_bands_N = config.f_bands_N # number of frequency bands\n bandwidth = [100, 16000/2-16000/100] # bandwidth of incoming audio signal\n frequency_bands = np.linspace(bandwidth[0],bandwidth[1],f_bands_N) # vector holding center frequencies of all frequency bands\n samples = len(array_audio_signals[0].get_audio_signals()[:,0])\n\n # load adaptive weights, calibration weights and filter coefficients\n adaptive_weight_matrix = adaptive_array_config_matrix(array_matrices[0])\n calibration_weights = load_calibration_weights(0,config.rows*config.columns, config.f_bands_N)\n filter_coefficients = calculate_filter_coefficients(f_sampling, frequency_bands)\n\n # FILTERING\n audio_filtered_complete = filtering(array_audio_signals, sub_arrays, frequency_bands, f_sampling, array_matrices[0].get_elements())\n \n # start scanning after sources, at all different angles, \n intensity_maps = scanning(y_listen, x_listen, r_scan, frequency_bands, audio_filtered_complete, array_matrices, f_sampling, sub_arrays)\n\n # Check where the source is actually located\n if config.audio_signals == 'emulated':\n validation_check(y_listen, x_listen, sources, r_scan)\n\n # show the beamforming results\n total_intensity = show_beamforming_results(y_listen, x_listen, intensity_maps)\n\n # save color map\n filename_colormap = filename +'_colormap'\n np.save('color_maps/'+filename_colormap, total_intensity)\n show_colormap(filename_colormap, x_listen, y_listen)\n\n plt.show()\n\n # SPATIAL FILTERING\n audio_filtered = spatial_filtering(array_audio_signals, array_matrices, config.theta_listen, config.phi_listen, adaptive_weight_matrix, calibration_weights, filter_coefficients, frequency_bands, f_sampling, samples, sub_arrays)\n\n\nmain()","repo_name":"acoustic-warfare/micarray","sub_path":"python/Batprogram.py","file_name":"Batprogram.py","file_ext":"py","file_size_in_byte":25767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"16929011130","text":"def solution(left, right):\n answer = 0\n for num in range(left, right+1):\n mcount = 0\n for m in range(1, int(num**(1/2))+1):\n if num%m == 0:\n mcount += 1\n if m ** 2 != num:\n mcount += 1\n if mcount % 2 == 0:\n answer += num\n else:\n answer -= num\n \n return answer","repo_name":"chae-heechan/Algorithm_Study","sub_path":"Python/Programmers/Level1/add_measure_and_count.py","file_name":"add_measure_and_count.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"27268895077","text":"from random import randint\n\nimport gym\nfrom gym.spaces import Discrete\n\nMOVE_LEFT = 0\nMOVE_RIGHT = 1\n\n\nclass Corridor(gym.Env):\n metadata = {'render.modes': ['human', 'ansi']}\n\n REWARD = 1000\n\n def __init__(self, size=20):\n self._size = size\n self._position = None\n self._transitions = self._calculate_transitions()\n\n self.observation_space = Discrete(size)\n self.action_space = Discrete(2)\n\n def reset(self):\n self._position = randint(0, self._size - 2)\n return str(self._position)\n\n def step(self, action):\n if action == MOVE_LEFT:\n self._position -= 1\n elif action == MOVE_RIGHT:\n self._position += 1\n else:\n raise ValueError(\"Illegal action passed\")\n\n if self._position == self._size - 1:\n return str(self._position), self.REWARD, True, {}\n\n if self._position == -1:\n self._position = 0\n\n return str(self._position), 0, False, {}\n\n def render(self, mode='human'):\n if mode == 'human':\n print(self._visualize())\n elif mode == 'ansi':\n return self._visualize()\n else:\n raise ValueError('Unknown visualisation mode')\n\n def get_transitions(self):\n return self._transitions\n\n def _visualize(self):\n corridor = [\"\" for _ in range(0, self._size - 1)]\n corridor[self._position - 1] = \"X\"\n corridor[self._size - 2] = \"$\"\n return \"[\" + \".\".join(corridor) + \"]\"\n\n def _calculate_transitions(self):\n START, END = 0, self._size - 1\n LEFT, RIGHT = 0, 1\n\n def _handle_state(state):\n moves = []\n if state == START:\n moves.append((state, RIGHT, state + 1))\n else:\n moves.append((state, LEFT, state - 1))\n moves.append((state, RIGHT, state + 1))\n\n return moves\n\n transitions = []\n\n for state in range(START, END):\n transitions += _handle_state(state)\n\n return transitions\n\n def _state_action(self):\n \"\"\"\n Return states and possible actions in each of them\n \"\"\"\n mapping = {}\n for p in range(0, self._size):\n mapping[p] = [MOVE_LEFT, MOVE_RIGHT]\n\n # Corner cases\n # mapping[0] = [MOVE_RIGHT]\n mapping[self._size - 1] = []\n\n # Cast int key str\n mapping = {str(k): v for k, v in mapping.items()}\n\n return mapping\n","repo_name":"ParrotPrediction/openai-envs","sub_path":"gym_corridor/corridor.py","file_name":"corridor.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"73"} +{"seq_id":"28791886921","text":"\n\nclass RbcConfig:\n \"\"\"\n RBC counting config\n \"\"\"\n\n def __init__(self):\n self.threshold: int = 120\n self.boundary_low: int = 10\n self.boundary_up: int = 25\n \n\nclass WbcClassifyConfig:\n \"\"\"\n WBC classify config\n \"\"\"\n\n def __init__(self): \n self.kernel_size:int = 3\n self.in_channels:int = 3\n self.filtes:list = [16,32,64]\n self.linear_dim:int = 512\n self.n_category:int = 5\n self.epochs:int = 500\n self.batch_size:int = 8\n self.lr:int = 1e-10\n \nclass WbcSegmentConfig:\n \"\"\"\n WBC segmentation config\n \"\"\"\n\n def __init__(self):\n self.threshold: int = 50\n self.kernel: int = 13\n self.boundary: int = 5000 ","repo_name":"saeed5959/CellCounter","sub_path":"core/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"16405322009","text":"class Solution:\n def removeKdigits(self, num: str, k: int) -> str:\n stack = []\n for i in range(len(num)):\n while stack and stack[-1]>num[i] and k>0:\n stack.pop()\n k-=1\n stack.append(num[i])\n \n while k>0:\n stack.pop()\n k-=1\n \n ans = \"\".join(stack)\n if not ans:\n return \"0\"\n return str(int(ans))\n","repo_name":"kky0426/TIL","sub_path":"Leetcode/leetcode_402.py","file_name":"leetcode_402.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"72641873196","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n\n# For example, here's several helpful packages to load in \n\n\n\nimport numpy as np # linear algebra\n\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\nimport torch\n\nimport torch.nn as nn\n\nimport torch.utils.data as data_utils\n\nfrom torchvision import transforms\n\nfrom torchvision.models import resnet50\n\nfrom PIL import Image\n\nfrom sklearn.model_selection import train_test_split\n\nimport matplotlib.pyplot as plt\n\nimport numpy as np\n\n# Input data files are available in the \"../input/\" directory.\n\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\nfrom IPython.core.debugger import set_trace\n\nimport os\n\n# for dirname, _, filenames in os.walk('/kaggle/input'):\n\n# for filename in filenames:\n\n# print(os.path.join(dirname, filename))\n\n\n\n# Any results you write to the current directory are saved as output.\nall_data = pd.read_csv(\"/kaggle/input/aptos2019-blindness-detection/\" + \"train.csv\")\n\nall_data.head()\n\nall_data[\"diagnosis\"].value_counts().plot(kind=\"pie\")\n\ntrain_data, validation_data = train_test_split(all_data, stratify = all_data.diagnosis.values, test_size=0.01)\n\n\nfig, axes = plt.subplots(nrows=1, ncols=2)\n\ntrain_data.diagnosis.value_counts().plot(kind=\"pie\", ax=axes[0],title=\"train data\")\n\nvalidation_data.diagnosis.value_counts().plot(kind=\"pie\", ax=axes[1], title=\"validation data\")\nvalidation_data.shape\nclass dataSet_(data_utils.Dataset):\n\n def __init__(self, data_dir, data_frame, image_transform):\n\n super().__init__()\n\n self.dir = data_dir\n\n self.label = data_frame.values[:,0]\n\n self.diagnosis = data_frame.values[:,1]\n\n self.transfrom = image_transform\n\n\n\n \n\n def __len__(self):\n\n return len(self.label)\n\n \n\n def __getitem__(self, idx):\n\n file_to_be_loaded = self.label[idx] \n\n array = Image.open(self.dir + file_to_be_loaded + \".png\")\n\n array = self.transfrom(array)\n\n return array, torch.tensor(self.diagnosis[idx], dtype=torch.float32)\n\n \n\n \n\n \ntrain_dir = '/kaggle/input/aptos2019-blindness-detection/train_images/'\n\nvalid_dir = '/kaggle/input/aptos2019-blindness-detection/train_images/'\n\ntest_dir = '/kaggle/input/aptos2019-blindness-detection/test_images/'\nimage_transform={\"train\":transforms.Compose([transforms.RandomRotation(degrees=50),transforms.RandomResizedCrop((224,224)),transforms.RandomHorizontalFlip(p=0.5),transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])]),\n\n \"test\": transforms.Compose([transforms.Resize((224,224)),transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])])}\n\nTrainData = dataSet_(train_dir, train_data, image_transform[\"train\"])\n\ntrainloader = data_utils.DataLoader(TrainData,batch_size=64)\n\n##############################\n\n## Validation data\n\n\n\nValidData = dataSet_(valid_dir, validation_data, image_transform[\"test\"])\n\nvalidationloader = data_utils.DataLoader(ValidData, batch_size=64)\n\n\n\n### test data\n\ntest_data = pd.read_csv(\"/kaggle/input/aptos2019-blindness-detection/\" + \"sample_submission.csv\")\n\nTestData = dataSet_(test_dir, test_data, image_transform[\"test\"])\n\ntestloader = data_utils.DataLoader(TestData, batch_size=64)\n\nimages,_ = next(iter(testloader))\n\nplt.imshow(images.data.numpy().squeeze()[0,:,:,:].reshape(224,224,3))\n\nmodel = resnet50(pretrained=True)\nfrom collections import OrderedDict\n\nimport torch.functional as F\n\n\n\nfor param in model.parameters():\n\n param.requires_grad = False\n\n\n\nin_feature = model.fc.in_features\n\n\n\nfc = nn.Sequential(OrderedDict([(\"fc1\", nn.Linear(in_feature, 512)),\n\n (\"relu1\", nn.ReLU()),\n\n (\"fc2\", nn.Linear(512, 5)),\n\n (\"output\",nn.LogSoftmax(dim=1))]))\n\nmodel.fc = fc\ndevice = (\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\ncriterion = nn.NLLLoss()\n\noptimizer = torch.optim.Adam(model.fc.parameters(), lr=0.01)\nvalid_loss_min = np.Inf\n\nmodel.to(device)\n\nfor epoch in range(10):\n\n running_loss = 0.0\n\n model.train()\n\n for train_data, train_label in trainloader:\n\n train_data, train_label = train_data.to(device), train_label.to(device)\n\n log_ps = model.forward(train_data)\n\n optimizer.zero_grad() \n\n train_loss = criterion(log_ps, train_label.long())\n\n running_loss += train_loss.item()\n\n train_loss.backward()\n\n optimizer.step()\n\n else:\n\n running_test_loss = 0.0\n\n with torch.no_grad():\n\n model.eval()\n\n for test_data, test_label in validationloader:\n\n test_data, test_label = test_data.to(device), test_label.to(device)\n\n test_log_ps = model.forward(test_data)\n\n test_loss = criterion(test_log_ps, test_label.long())\n\n running_test_loss += test_loss.item()\n\n if test_loss <= valid_loss_min:\n\n print(\"Validation loss decreased from {:.7f} -----> {:.7f}\".format(valid_loss_min,test_loss))\n\n # save the model if validation error decreases \n\n torch.save(model.state_dict(), 'model_augmented.pt')\n\n valid_loss_min = test_loss\n\n \n\n\n\n print(f\" Epoch {epoch} , Training loss = {running_loss/len(trainloader)} Test loss = {running_test_loss/len(validationloader)}\")\n\ncheckpoint = torch.load('model_augmented.pt')\n\nmodel.load_state_dict(checkpoint)\nnb_classes = 5\n\n\n\nconfusion_matrix = torch.zeros(nb_classes, nb_classes)\n\nmodel.to(device)\n\nmodel.eval()\n\nwith torch.no_grad():\n\n for i, (inputs, classes) in enumerate(validationloader):\n\n inputs = inputs.to(device)\n\n classes = classes.to(device)\n\n outputs = model.forward(inputs)\n\n _, preds = torch.max(outputs.data.cpu(), 1)\n\n #set_trace()\n\n for t, p in zip(classes.view(-1), preds.view(-1)):\n\n confusion_matrix[t.long(), p.long()] += 1\n\n\n\nprint(confusion_matrix)\n# initialize lists to monitor test loss and accuracy\n\ntest_loss = 0.0\n\nclass_correct = list(0. for i in range(10))\n\nclass_total = list(0. for i in range(10))\n\n\n\n\n\nmodel.eval() # prep model for evaluation\n\nwith torch.no_grad(): \n\n for data, target in validationloader:\n\n # forward pass: compute predicted outputs by passing inputs to the model\n\n data, target = data.to(device), target.to(device)\n\n output = model(data)\n\n # calculate the loss\n\n loss = criterion(output, target.long())\n\n # update test loss \n\n test_loss += loss.item()*data.size(0)\n\n # convert output probabilities to predicted class\n\n _, pred = torch.max(output, 1)\n\n # compare predictions to true label\n\n correct = np.squeeze(pred.eq(target.long().data.view_as(pred)))\n\n # calculate test accuracy for each object class\n\n for i in range(len(target)):\n\n #set_trace()\n\n label = target[i].long().item()\n\n #set_trace()\n\n class_correct[label] += correct[i].item()\n\n class_total[label] += 1\n\n\n\n # calculate and print avg test loss\n\n test_loss = test_loss/len(validationloader)\n\n print('Test Loss: {:.6f}\\n'.format(test_loss))\n\n\n\n for i in range(10):\n\n if class_total[i] > 0:\n\n print('Validation Accuracy of %5s: %2d%% (%2d/%2d)' % (\n\n str(i), 100 * class_correct[i] / class_total[i],\n\n np.sum(class_correct[i]), np.sum(class_total[i])))\n\n else:\n\n print('Validation Accuracy of %5s: N/A (no training examples)' % (classes[i]))\n\n\n\n print('\\n Validation Accuracy (Overall): %2d%% (%2d/%2d)' % (\n\n 100. * np.sum(class_correct) / np.sum(class_total),\n\n np.sum(class_correct), np.sum(class_total)))\n# obtain one batch of test images\n\ndataiter = iter(validationloader)\n\nimages, labels = dataiter.next()\n\nimages, labels = images.to(device), labels.to(device)\n\nwith torch.no_grad(): \n\n model.eval()\n\n # get sample outputs\n\n output = model.forward(images)\n\n # convert output probabilities to predicted class\n\n _, preds = torch.max(output, 1)\n\n # prep images for display\n\n images = images.data.cpu().numpy()\n\n\n\n # plot the images in the batch, along with predicted and true labels\n\n fig = plt.figure(figsize=(25, 4))\n\n for idx in np.arange(20):\n\n ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])\n\n #set_trace()\n\n ax.imshow(images[idx].reshape(224,224,3))\n\n ax.set_title(\"{} ({})\".format(str(preds[idx].item()), str(labels[idx].long().item())),\n\n color=(\"green\" if preds[idx]==labels[idx].long() else \"red\"))\n#filename_arrays = []\n\ntest_output =[]\n\nwith torch.no_grad():\n\n model.eval()\n\n for testdata,_ in testloader:\n\n testdata = testdata.to(device)\n\n #filename_arrays += list(filename)\n\n log_ps = model.forward(testdata)\n\n test_prob = torch.exp(log_ps)\n\n _, test_pred_class = torch.max(test_prob, dim=1)\n\n test_output += test_pred_class.cpu().data.tolist()\nsample_output = {\"id\": test_data.id_code.values.tolist(), \"diagnosis\": test_output}\n\nsample_output_dataframe = pd.DataFrame(sample_output)\n\nsample_output_dataframe.to_csv(\"submission.csv\",index=False)\nimport os\n\nos.chdir(r'../working')\n\nfrom IPython.display import FileLink\n\nFileLink(r'submission.csv')","repo_name":"aorursy/new-nb-7","sub_path":"spandan28_resnet-based-fine-tuning.py","file_name":"spandan28_resnet-based-fine-tuning.py","file_ext":"py","file_size_in_byte":9579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"34151889463","text":"def reader(plik):\n f = open(plik)\n T = int(f.readline()) #liczba działek\n cases = []\n for t in range(T):\n cases.append({})\n l_1 = f.readline()\n NK = list(eval(l_1.replace(\" \",\",\")))\n N = NK[0] #liczba punktów FP\n K = NK[1] #liczba max wyłączonych FP\n lista = [] #połączenia wejściowe\n for i in range(N): \n li_2 = f.readline()\n lista.append({})\n cxy = list(eval(li_2.replace(\" \",\",\")))\n lista[i][\"id\"] = cxy[0]\n lista[i][\"yx\"] = cxy[1:] \n cases[-1] = {\"N\":N, \"K\": K, 'FPs': lista}\n return cases\n\ndef sort(xy,coordinates):\n \"\"\"xy - string 'x' or 'y', for an axis you want to sort by\"\"\"\n coords = coordinates[:]\n if xy == \"x\":\n k = 1\n else:\n k = 0\n flag = True\n while flag:\n flag = False\n for i in range(1,len(coords)-1):\n if coords[i][k]>coords[i+1][k]:\n coords[i],coords[i+1] = coords[i+1],coords[i]\n flag = True\n return coords\n\ndef Jarvis(coords):\n coords_y = sort('y',coords)\n coords_x = sort('x',coords)\n pass \n\ndef main():\n cases = reader('rancho00.in')\n for case in cases:\n print(\"CASE: {}\".format(cases.index(case)))\n FPs = case[\"FPs\"]\n N = case[\"N\"]\n K = case[\"K\"]\n minx,miny,maxx,maxy = 10**4,10**4,0,0\n for fp in FPs:\n if fp['yx'][0] < miny:\n miny = fp['yx'][0]\n if fp['yx'][1] < minx:\n minx = fp['yx'][1]\n if fp['yx'][0] > maxy:\n maxy = fp['yx'][0]\n if fp['yx'][1] > maxx:\n maxx = fp['yx'][1]\n #print('{}: {}'.format(fp[\"id\"],fp[\"xy\"]))\n rancho = []\n for y in range(miny,maxy+1):\n rancho.append([])\n for x in range(minx,maxx+1):\n rancho[-1].append(0)\n for fp in FPs:\n fp['yx'][0] -= miny\n fp['yx'][1] -= minx\n #print('{}: {}'.format(fp[\"id\"],fp[\"xy\"]))\n rancho[fp['yx'][0]][fp['yx'][1]] = fp['id']\n coords = [f[\"yx\"] for f in FPs]\n for i in rancho:\n for j in i:\n print(j,end=\"\")\n print(\"\")\n ##MAX\n max_rancho = []\n for x in range(len(rancho)):\n max_rancho.append([])\n for y in range(len(rancho[x])):\n pass\n ##MIN\n min_rancho = []\n \n#main()\n \n","repo_name":"marcinowski/competitions","sub_path":"Deadline24 2016/Eliminacje/sets/Rancho.py","file_name":"Rancho.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"27386827291","text":"\nfrom flask import Blueprint,render_template, request,session,redirect,make_response\nfrom app.models.base import db\nfrom app.models.course import Course\nfrom app.models.student import Student\nfrom app.models.CourseAndStudent import CourseAndStudent\nfrom app.models.member import Member\n\nimport xlrd\nfrom io import BytesIO\n\nteacherBP = Blueprint('teacher',__name__)\nimport io\nimport xlsxwriter\n\n\n\n\n\n@teacherBP.route('', methods=['GET'])\ndef get_teacher():\n if session.get(\"id\") is None:\n return redirect(\"/user/login\")\n name=session.get(\"name\")\n courselst = Course.query.filter_by(teacherId=session.get(\"id\"))\n\n\n return render_template('teacher_templates/teacher.html',name=name,result=courselst)\n\n\n\n\n\n@teacherBP.route(\"/form/group\",methods=['GET'])\ndef FormGroups():\n if session.get(\"id\") is None:\n return redirect(\"/user/login\")\n name=session.get(\"name\")\n\n return render_template(\"teacher_templates/enternumofgroup.html\")\n\n@teacherBP.route(\"/export\",methods=['GET'])\ndef Export():\n if session.get(\"id\") is None:\n return redirect(\"/user/login\")\n name=session.get(\"name\")\n courseId=request.args.get(\"id\")\n\n return render_template(\"teacher_templates/selectstudents.html\",name=name,courseId=courseId)\n\n\n@teacherBP.route('/export_excel/', methods=['GET'])\ndef export_excel():\n if request.method == 'GET':\n courseId=request.args.get(\"courseId\")\n studentlst=CourseAndStudent.query.filter_by(CourseId=courseId)\n output = BytesIO()\n # 创建Excel文件,不保存,直接输出\n workbook = xlsxwriter.Workbook(output, {'in_memory': True})\n # 设置Sheet的名字为download\n worksheet = workbook.add_worksheet('download')\n # 列首\n title = [\"Id\",\"name\", \"email\", \"password\",\"GPA\",\"programme\",\"contribution\"]\n worksheet.write_row('A1', title)\n count=0\n for i in studentlst:\n count+=1\n i=Student.query.filter_by(id=i.studentId).first()\n member=Member.query.filter_by(studentId=i.id).first()\n if member is None:\n row = [i.id, i.name, i.email,i.password,i.GPA,i.programme,\"\"]\n\n else:\n\n row = [i.id, i.name, i.email,i.password,i.GPA,i.programme,member.contribution]\n\n worksheet.write_row('A' + str(count + 2), row)\n workbook.close()\n response = make_response(output.getvalue())\n output.close()\n response.headers['Content-Type'] = \"utf-8\"\n response.headers[\"Cache-Control\"] = \"no-cache\"\n response.headers[\"Content-Disposition\"] = \"attachment; filename=download.xlsx\"\n return response\n\n@teacherBP.route('/import',methods=[\"GET\"])\ndef Import():\n if session.get(\"id\") is None:\n return redirect(\"/user/login\")\n name=session.get(\"name\")\n id=request.args.get(\"id\")\n\n return render_template(\"teacher_templates/import.html\", name=name,id=id)\n\n\n\n@teacherBP.route('/import/class',methods=[\"GET\",\"POST\"])\ndef ImportClass():\n if session.get(\"id\") is None:\n return redirect(\"/user/login\")\n name=session.get(\"name\")\n courseId=request.args.get(\"courseId\")\n\n if request.method==\"GET\":\n\n return render_template(\"teacher_templates/importclassinfo.html\",name=name,courseId=courseId)\n else:\n file = request.files.get(\"files\")\n f = file.read() # 文件内容\n data = xlrd.open_workbook(file_contents=f)\n table = data.sheets()[0]\n names = data.sheet_names() # 返回book中所有工作表的名字\n status = data.sheet_loaded(names[0]) # 检查sheet1是否导入完毕\n nrows = table.nrows # 获取该sheet中的有效行数\n\n\n\n for i in range(0,nrows):\n s = table.row_values(i) # 第1列数据\n student = Student.query.filter_by(email=s[5]).first()\n if student:\n continue\n student = Student.query.filter_by(id=s[3]).first()\n if student:\n continue\n with db.auto_commit():\n # name password programme id gpa email contribution\n try:\n student = Student(s[0], s[4],s[3],s[5], s[2], s[1])\n # 数据库的insert操作\n db.session.add(student)\n except:\n continue\n\n student = Student.query.filter_by(email=s[5]).first()\n if student is not None:\n with db.auto_commit():\n courseAndStudent = CourseAndStudent()\n courseAndStudent.CourseId = courseId\n courseAndStudent.studentId = student.id\n\n db.session.add(courseAndStudent)\n\n\n\n\n return render_template(\"teacher_templates/importclassinfo.html\",message=\"添加成功\",courseId=courseId,name=name)\n\n\n\n\n\n\n\n\n\n@teacherBP.route(\"import/individual\",methods=['GET',\"POST\"])\ndef ImportIndivaidual():\n if session.get(\"id\") is None:\n return redirect(\"/user/login\")\n name=session.get(\"name\")\n courseId =request.args.get(\"id\")\n\n if request.method==\"GET\":\n\n\n return render_template(\"teacher_templates/importindividual.html\",courseId=courseId,name=name)\n else:\n name=request.form.get(\"name\")\n password=request.form.get(\"password\")\n gpa=request.form.get(\"gpa\")\n email=request.form.get(\"email\")\n programme=request.form.get(\"programme\")\n id=request.form.get(\"id\")\n\n if email ==\"\" or password ==\"\" or gpa==\"\" or programme==\"\" or name==\"\" or id==\"\":\n return render_template(\"teacher_templates/importindividual.html\", courseId=courseId, name=name,message=\"参数不完全\")\n\n student=Student.query.filter_by(email=email).first()\n if student:\n return render_template(\"teacher_templates/importindividual.html\", courseId=courseId, name=name,message=\"该学生已经存在\")\n student=Student.query.filter_by(id=id).first()\n if student:\n return render_template(\"teacher_templates/importindividual.html\", courseId=courseId, name=name,message=\"该学生已经存在\")\n\n\n\n with db.auto_commit():\n student = Student(name,id,gpa,email,password,programme)\n\n # 数据库的insert操作\n db.session.add(student)\n\n student=Student.query.filter_by(email=email).first()\n if student is not None:\n with db.auto_commit():\n courseAndStudent = CourseAndStudent()\n courseAndStudent.CourseId = courseId\n courseAndStudent.studentId = student.id\n db.session.add(courseAndStudent)\n\n return render_template(\"teacher_templates/importindividual.html\", courseId=courseId, name=name, message=\"学生提交成功\")\n\n\n\n\n\n\n","repo_name":"hui1995/student_teacher_flask","sub_path":"project_v1/project_v1/app/controller/teacher.py","file_name":"teacher.py","file_ext":"py","file_size_in_byte":6770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"28123042627","text":"#!usr/bin/python\n# -*- coding:utf8 -*-\n\"\"\"\n定义一个函数,输入一个链表的头节点,反转该链表并输出反转后链表的头节点。\n\n输入: 1->2->3->4->5->NULL\n输出: 5->4->3->2->1->NULL\n\n[address](https://leetcode-cn.com/problems/fan-zhuan-lian-biao-lcof/)\n\"\"\"\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\ndef reverse_list(head: ListNode) -> ListNode:\n # 1. 双指针\n if not head:\n return head\n pre = None\n cur = head\n while cur:\n cur.next, pre, cur = pre, cur, cur.next\n\n return pre\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"youaresherlock/PythonPractice","sub_path":"Foundation/algorithms/leetcode翻转链表(剑指offer).py","file_name":"leetcode翻转链表(剑指offer).py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"13154483830","text":"import importlib\nimport os\nimport tempfile\n\nfrom collections import Counter\n\nimport numpy as np\nfrom openbabel import pybel\n\nfrom rmgpy import settings\nfrom rmgpy.molecule import Molecule\nfrom rmgpy.quantity import ScalarQuantity\n\nfrom arkane.encorr.bac import BAC, CrossVal\nfrom arkane.encorr.data import BACDataset, BOND_SYMBOLS, _pybel_to_rmg\nfrom arkane.encorr.reference import ReferenceDatabase\nfrom arkane.exceptions import BondAdditivityCorrectionError\nfrom arkane.modelchem import LevelOfTheory, CompositeLevelOfTheory\nimport pytest\n\n\nclass TestBAC:\n \"\"\"\n A class for testing that the BAC class functions properly.\n \"\"\"\n\n @classmethod\n def setup_class(cls):\n cls.lot_get = LevelOfTheory(method=\"CCSD(T)-F12\", basis=\"cc-pVTZ-F12\", software=\"Molpro\")\n cls.lot_get_composite = CompositeLevelOfTheory(\n freq=LevelOfTheory(method=\"wb97xd3\", basis=\"def2tzvp\", software=\"qchem\"),\n energy=LevelOfTheory(method=\"ccsd(t)f12\", basis=\"ccpvtzf12\", software=\"molpro\"),\n )\n cls.lot_fit = LevelOfTheory(method=\"wB97M-V\", basis=\"def2-TZVPD\", software=\"Q-Chem\")\n cls.lot_nonexisting = LevelOfTheory(\"notamethod\")\n\n cls.bac = BAC(cls.lot_get)\n\n cls.tmp_melius_params = {\n \"atom_corr\": {\n \"H\": 1.0,\n \"C\": 2.0,\n \"N\": 3.0,\n \"O\": 4.0,\n \"S\": 5.0,\n \"F\": 6.0,\n \"Cl\": 7.0,\n \"Br\": 8.0,\n },\n \"bond_corr_length\": {\n \"H\": 1.0,\n \"C\": 2.0,\n \"N\": 3.0,\n \"O\": 4.0,\n \"S\": 5.0,\n \"F\": 6.0,\n \"Cl\": 7.0,\n \"Br\": 8.0,\n },\n \"bond_corr_neighbor\": {\n \"H\": 1.0,\n \"C\": 2.0,\n \"N\": 3.0,\n \"O\": 4.0,\n \"S\": 5.0,\n \"F\": 6.0,\n \"Cl\": 7.0,\n \"Br\": 8.0,\n },\n \"mol_corr\": 1.0,\n }\n cls.tmp_petersson_params = {\"C-H\": 1.0, \"C-C\": 2.0, \"C=C\": 3.0, \"C-O\": 4.0}\n\n # Set molecule, bonds, nums, and coords for testing Petersson and Melius BACs\n cls.multiplicity = 1\n smi = \"C=C(OSC=S)C#CC1C(=O)N=CNC1SSC(O)C#N\"\n\n mol = Molecule(smiles=smi, multiplicity=cls.multiplicity)\n cls.bonds = Counter(f\"{b.atom1.element.symbol}{BOND_SYMBOLS[b.order]}{b.atom2.element.symbol}\" for b in mol.get_all_edges())\n\n pybel_mol = pybel.readstring(\"smi\", smi)\n pybel_mol.addh()\n pybel_mol.make3D()\n mol_3d = _pybel_to_rmg(pybel_mol)\n cls.nums = [atom.number for atom in mol_3d.atoms]\n cls.coords = np.array([atom.coords for atom in mol_3d.atoms])\n\n def test_loading_parameters(self):\n \"\"\"\n Test that BAC parameters for levels of theory are loaded\n correctly and that errors are raised otherwise.\n \"\"\"\n self.bac.level_of_theory = self.lot_get\n self.bac.bac_type = \"p\"\n assert isinstance(self.bac.bacs, dict)\n\n self.bac.level_of_theory = self.lot_nonexisting\n self.bac.bac_type = \"m\"\n assert self.bac.bacs is None\n\n with pytest.raises(BondAdditivityCorrectionError):\n self.bac.bac_type = \"\"\n\n def test_load_database(self):\n \"\"\"\n Test that reference database can be loaded.\n \"\"\"\n key = self.bac.load_database(names=\"main\")\n expected_key = (os.path.join(settings[\"database.directory\"], \"reference_sets\", \"main\"),)\n assert key == expected_key\n assert isinstance(self.bac.ref_databases[key], ReferenceDatabase)\n\n # Test that other instance already has loaded database\n bac = BAC(self.lot_fit)\n assert isinstance(bac.ref_databases[key], ReferenceDatabase)\n\n def test_get_correction(self):\n \"\"\"\n Test that BAC corrections can be obtained.\n \"\"\"\n self.bac.level_of_theory = self.lot_get\n self.bac.bac_type = \"p\"\n corr = self.bac.get_correction(bonds=self.bonds)\n assert isinstance(corr, ScalarQuantity)\n\n # Can use actual Melius parameters once they're available in database\n self.bac.bac_type = \"m\"\n with pytest.raises(BondAdditivityCorrectionError):\n # No multiplicity specified\n self.bac._get_melius_correction(coords=self.coords, nums=self.nums, params=self.tmp_melius_params)\n corr1 = self.bac._get_melius_correction(\n coords=self.coords,\n nums=self.nums,\n multiplicity=self.multiplicity,\n params=self.tmp_melius_params,\n )\n assert isinstance(corr1, ScalarQuantity)\n\n self.bac.level_of_theory = self.lot_nonexisting\n with pytest.raises(BondAdditivityCorrectionError):\n self.bac.get_correction()\n\n def _clear_bac_data(self):\n self.bac.bacs = None\n self.bac.species = self.bac.ref_data = self.bac.calc_data = self.bac.bac_data = None\n\n def _check_bac_data(self):\n assert isinstance(self.bac.bacs, dict)\n assert isinstance(self.bac.dataset, BACDataset)\n assert self.bac.database_key is not None\n assert self.bac.dataset.calculate_stats(for_bac_data=True).rmse < self.bac.dataset.calculate_stats().rmse\n\n def test_fit_petersson(self):\n \"\"\"\n Test that Petersson BAC parameters can be derived.\n \"\"\"\n self.bac.level_of_theory = self.lot_fit\n self.bac.bac_type = \"p\"\n self._clear_bac_data()\n self.bac.fit()\n\n self._check_bac_data()\n assert \"C-H\" in self.bac.bacs\n\n self.bac.level_of_theory = self.lot_nonexisting\n with pytest.raises(BondAdditivityCorrectionError):\n self.bac.fit()\n\n def test_fit_melius(self):\n \"\"\"\n Test that Melius BAC parameters can be derived.\n \"\"\"\n self.bac.level_of_theory = self.lot_fit\n self.bac.bac_type = \"m\"\n self._clear_bac_data()\n\n # With molecular correction, no global opt\n self.bac.fit(fit_mol_corr=True, global_opt=False, lsq_max_nfev=50)\n self._check_bac_data()\n assert set(self.bac.bacs.keys()) == {\"atom_corr\", \"bond_corr_length\", \"bond_corr_neighbor\", \"mol_corr\"}\n assert round(abs(self.bac.bacs[\"mol_corr\"] - 0.0), 7) != 0\n\n # Without molecular correction, with global opt\n self._clear_bac_data()\n self.bac.fit(fit_mol_corr=False, global_opt=True, global_opt_iter=1, lsq_max_nfev=50)\n self._check_bac_data()\n assert round(abs(self.bac.bacs[\"mol_corr\"] - 0.0), 7) == 0\n\n def test_test(self):\n \"\"\"\n Test that enthalpies of formation can be evaluated.\n \"\"\"\n with pytest.raises(BondAdditivityCorrectionError) as e:\n self.bac.test(species=[], db_names=[])\n assert \"several data sources\" in str(e.exconly())\n\n with pytest.raises(BondAdditivityCorrectionError) as e:\n self.bac.test(species=[])\n assert \"No data\" in str(e.exconly())\n\n self.bac.level_of_theory = self.lot_fit\n self.bac.bac_type = \"m\"\n self.bac.bacs = self.tmp_melius_params\n\n # Get a few species to test on\n key = self.bac.load_database(names=\"main\")\n species = self.bac.ref_databases[key].extract_level_of_theory(self.bac.level_of_theory, as_error_canceling_species=False)[:10]\n\n dataset = self.bac.test(species=species)\n assert isinstance(dataset, BACDataset)\n assert dataset.bac_data is not None\n\n def test_write_to_database(self):\n \"\"\"\n Test that BAC parameters can be written to a file.\n \"\"\"\n # Check that error is raised when no BACs are available\n self.bac.bacs = None\n with pytest.raises(BondAdditivityCorrectionError) as e:\n self.bac.write_to_database()\n assert \"No BACs\" in str(e.exconly())\n\n self.bac.level_of_theory = self.lot_get\n self.bac.bac_type = \"p\"\n self.bac.bacs = self.tmp_petersson_params\n\n tmp_datafile_fd, tmp_datafile_path = tempfile.mkstemp(suffix=\".py\")\n\n # Check that error is raised if BACs already exist and overwrite is False\n with pytest.raises(IOError) as e:\n self.bac.write_to_database(alternate_path=tmp_datafile_path)\n assert \"overwrite\" in str(e.exconly())\n\n # Dynamically set data file as module\n spec = importlib.util.spec_from_file_location(os.path.basename(tmp_datafile_path), tmp_datafile_path)\n module = importlib.util.module_from_spec(spec)\n\n # Check that existing Petersson BACs can be overwritten\n self.bac.write_to_database(overwrite=True, alternate_path=tmp_datafile_path)\n spec.loader.exec_module(module) # Load data as module\n assert self.bac.bacs == module.pbac[repr(self.bac.level_of_theory)]\n\n # Check that existing Composite Petersson BACs can be overwritten\n self.bac.level_of_theory = self.lot_get_composite\n self.bac.write_to_database(overwrite=True, alternate_path=tmp_datafile_path)\n spec.loader.exec_module(module) # Load data as module\n assert self.bac.bacs == module.pbac[repr(self.bac.level_of_theory)]\n\n # Check that new Petersson BACs can be written\n self.bac.level_of_theory = self.lot_nonexisting\n self.bac.bacs = self.tmp_petersson_params\n self.bac.write_to_database(alternate_path=tmp_datafile_path)\n spec.loader.exec_module(module) # Reload data module\n assert self.bac.bacs == module.pbac[repr(self.bac.level_of_theory)]\n\n # Check that new Melius BACs can be written\n self.bac.bac_type = \"m\"\n self.bac.bacs = self.tmp_melius_params\n self.bac.write_to_database(alternate_path=tmp_datafile_path)\n spec.loader.exec_module(module)\n assert self.bac.bacs == module.mbac[repr(self.bac.level_of_theory)]\n\n os.close(tmp_datafile_fd)\n os.remove(tmp_datafile_path)\n\n def test_save_correlation_mat(self):\n \"\"\"\n Test that visual of correlation matrix can be generated.\n \"\"\"\n self.bac.correlation = None\n with pytest.raises(BondAdditivityCorrectionError):\n self.bac.save_correlation_mat(\"\")\n\n self.bac.bacs = self.tmp_melius_params\n self.bac.correlation = np.random.uniform(size=(24, 24))\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n tmp_corr_path = os.path.join(tmpdirname, \"corr.pdf\")\n self.bac.save_correlation_mat(tmp_corr_path)\n assert os.path.exists(tmp_corr_path)\n\n\nclass TestCrossVal:\n \"\"\"\n A class for testing that the CrossVal class functions properly.\n \"\"\"\n\n def setup_class(self):\n lot = LevelOfTheory(method=\"wB97M-V\", basis=\"def2-TZVPD\", software=\"Q-Chem\")\n self.cross_val = CrossVal(lot)\n\n def test_init(self):\n \"\"\"\n Test that CrossVal is initialized correctly.\n \"\"\"\n assert isinstance(self.cross_val.level_of_theory, LevelOfTheory)\n assert self.cross_val.bac_type == \"p\"\n assert self.cross_val.n_folds == -1\n assert self.cross_val.dataset is None\n assert self.cross_val.bacs is None\n\n def test_leave_one_out(self):\n \"\"\"\n Test leave-one-out cross-validation.\n Setting n_folds as -1 causes the number of folds to equal the length of the dataset.\n \"\"\"\n idxs = [19, 94, 191]\n self.cross_val.n_folds = -1\n self.cross_val.fit(idxs=idxs)\n\n assert isinstance(self.cross_val.dataset, BACDataset)\n assert len(self.cross_val.dataset) == 3\n assert self.cross_val.dataset.bac_data is not None\n assert len(self.cross_val.bacs) == 3\n\n train_folds = [[19, 94], [19, 191], [94, 191]]\n for i, bac in enumerate(self.cross_val.bacs):\n assert isinstance(bac, BAC)\n assert len(bac.dataset) == 2\n for d in bac.dataset:\n assert d.spc.index != train_folds[i]\n\n def test_kfold(self):\n \"\"\"\n Test k-fold cross-validation.\n \"\"\"\n idxs = [0, 1, 2, 3]\n self.cross_val.n_folds = 2\n self.cross_val.fit(idxs=idxs)\n\n assert isinstance(self.cross_val.dataset, BACDataset)\n assert len(self.cross_val.dataset) == 4\n assert self.cross_val.dataset.bac_data is not None\n assert len(self.cross_val.bacs) == 2\n\n train_folds = [\n [0, 1],\n [0, 2],\n [0, 3],\n [1, 2],\n [1, 3],\n [2, 3],\n ]\n for i, bac in enumerate(self.cross_val.bacs):\n assert isinstance(bac, BAC)\n assert len(bac.dataset) == 2\n for d in bac.dataset:\n assert d.spc.index != train_folds[i]\n","repo_name":"ReactionMechanismGenerator/RMG-Py","sub_path":"test/arkane/encorr/bacTest.py","file_name":"bacTest.py","file_ext":"py","file_size_in_byte":12792,"program_lang":"python","lang":"en","doc_type":"code","stars":348,"dataset":"github-code","pt":"73"} +{"seq_id":"30606466926","text":"import json\r\nfrom point import Point\r\nfrom locations import *\r\n\r\nHOME = Point(0.12, 0.34, 0.56)\r\nME = Point(0.78, 0.89, 0.90)\r\n\r\n\r\nclass IntentHandler:\r\n def __init__(self, aircraft_api, send_text_to_speech):\r\n super().__init__()\r\n self.aircraft_api = aircraft_api\r\n self.send_text_to_speech = send_text_to_speech\r\n\r\n def handle_intent(self, intent):\r\n x = json.loads(intent)\r\n intent_name = x['intent']['name']\r\n entities = x['entities']\r\n handlers = {\r\n 'FlyMission': self._handle_fly_mission,\r\n 'FlyToMe': self._handle_fly_to_me,\r\n 'FlyToWaypoint': self._handle_fly_to_waypoint,\r\n 'GetAircraftLocation': self._handle_get_aircraft_location,\r\n 'GetStarepointLocation': self._handle_get_aircraft_location,\r\n 'ReturnHome': self._handle_return_home,\r\n 'StareAtWaypoint': self._handle_stare_at_waypoint,\r\n 'StareAtMe': self._handle_stare_at_me,\r\n 'SwitchToBlackHot': self._switch_to_black_hot,\r\n 'SwitchToEo': self._switch_to_eo,\r\n 'SwitchToIr': self._switch_to_ir,\r\n 'SwitchToWhiteHot': self._switch_to_white_hot,\r\n 'ZoomIn': self._zoom_in,\r\n 'ZoomOut': self._zoom_out,\r\n }\r\n func = handlers.get(\r\n intent_name, lambda x: print('cannot handle function: {}'.format(intent)))\r\n func(entities)\r\n\r\n def _handle_fly_mission(self, entities):\r\n try:\r\n mission_name = entities[0]['value']\r\n mission = MISSIONS[mission_name]\r\n except Exception as e:\r\n print(\"Exception {}\".format(e))\r\n raise e\r\n self.aircraft_api.fly_mission(mission)\r\n self.send_text_to_speech('Executing mission {}.'.format(mission_name))\r\n\r\n def _handle_fly_to_me(self, entities):\r\n self.aircraft_api.fly_to_point(ME)\r\n self.send_text_to_speech('Flying to you.')\r\n\r\n def _handle_fly_to_waypoint(self, entities):\r\n try:\r\n waypoint_name = entities[0]['value']\r\n point = WAYPOINTS[waypoint_name]\r\n except Exception as e:\r\n print(\"Exception {}\".format(e))\r\n raise e\r\n self.aircraft_api.fly_to_point(point)\r\n self.send_text_to_speech(\r\n 'Flying to waypoint {}.'.format(waypoint_name))\r\n\r\n def _handle_get_aircraft_location(self, entities):\r\n location = self.aircraft_api.get_aircraft_location()\r\n self.send_text_to_speech('The location of the aircraft is unknown.')\r\n\r\n def _handle_get_starepoint_location(self, entities):\r\n location = self.aircraft_api.get_starepoint_location()\r\n self.send_text_to_speech('The location of the starepoint is unknown.')\r\n\r\n def _handle_return_home(self, entities):\r\n self.aircraft_api.fly_to_point(HOME)\r\n self.send_text_to_speech('Flying home.')\r\n\r\n def _handle_stare_at_waypoint(self, entities):\r\n try:\r\n waypoint_name = entities[0]['value']\r\n point = WAYPOINTS[waypoint_name]\r\n except Exception as e:\r\n print(\"Exception {}\".format(e))\r\n raise e\r\n self.aircraft_api.stare_at_point(point)\r\n self.send_text_to_speech(\r\n 'Staring at waypoint {}.'.format(waypoint_name))\r\n\r\n def _handle_stare_at_me(self, entities):\r\n self.aircraft_api.stare_at_point(ME)\r\n self.send_text_to_speech(\r\n 'Staring at Me {}.'.format(ME)) \r\n\r\n def _switch_to_black_hot(self, entities):\r\n self.aircraft_api.switch_to_black_hot()\r\n self.send_text_to_speech('Switching to Black Hot.')\r\n\r\n def _switch_to_eo(self, entities):\r\n self.aircraft_api.switch_to_eo()\r\n self.send_text_to_speech('Switching to EO.')\r\n\r\n def _switch_to_ir(self, entities):\r\n self.aircraft_api.switch_to_ir()\r\n self.send_text_to_speech('Switching to IR.')\r\n\r\n def _switch_to_white_hot(self, entities):\r\n self.aircraft_api.switch_to_white_hot()\r\n self.send_text_to_speech('Switching to White Hot.')\r\n\r\n def _zoom_in(self, entities):\r\n self.aircraft_api.zoom_in()\r\n self.send_text_to_speech('Zooming in.')\r\n\r\n def _zoom_out(self, entities):\r\n self.aircraft_api.zoom_out()\r\n self.send_text_to_speech('Zooming out.')\r\n","repo_name":"mclayton7/rhasspy-uas-control","sub_path":"intent_handler.py","file_name":"intent_handler.py","file_ext":"py","file_size_in_byte":4338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"23116702462","text":"# Run a program to determine if the credit card number provided is valid according to the Luhn algorithm.\ndef main():\n card_number = get_card()\n checksum = calc_checksun(card_number)\n check_card_type(card_number, checksum)\n\n\ndef get_card():\n # The \"while true\" loop in python runs without any conditions until the break statement executes inside the loop. To run a statement if a python while loop fails, the programmer can implement a python \"while\" with else loop. Python does not support the \"do while\" loop\n while True:\n card_number = input(\"Number: \")\n # Python isnumeric() method checks whether all the characters of the string are numeric characters or not. It returns True if all the characters are true, otherwise returns False.\n if card_number.isnumeric():\n # 'Break' in Python is a loop control statement. ... If you are using it in nested loops, it will terminate the innermost loop where you have used it, and the control of the program will flow to the outer loop. In other words, it breaks the sequence of the loop, and the control goes to the first statement outside the loop\n break\n # The return keyword in Python exits a function and tells Python to run the rest of the main program. A return keyword can send a value back to the main program. While values may have been defined in a function, you can send them back to your main program and read them throughout your code\n return card_number\n\n\ndef calc_checksun(card_number):\n even_sum = 0\n odd_sum = 0\n # The reversed() method returns the reversed iterator of the given sequence. It is the same as the iter() method but in reverse order. ... If the given object is not a sequence, then override __reversed__() method in the class to be used with the reversed() function.\n card_number = reversed([int(digit) for digit in card_number])\n # The Python enumerate() function adds a counter to an iterable object. ... The built-in enumerate() function allows you to loop over a list of items while keeping track of the index value in a separate variable.\n for i, digit in enumerate(card_number):\n if (i + 1) % 2 == 0:\n odd_digit = digit * 2\n if odd_digit > 9:\n odd_sum += int(odd_digit / 10) + odd_digit % 10\n else:\n odd_sum += odd_digit\n else:\n even_sum += digit\n checksum = even_sum + odd_sum\n return checksum\n\n\ndef check_card_type(card_number, checksum):\n start_number = int(card_number[0:2])\n # The len() Python method returns the length of a list, string, dictionary, or any other iterable data format in Python. ... The Python len() method is a built-in function that can be used to calculate the length of any iterable object.\n card_lenght = len(card_number)\n checksum_last_digit = checksum % 10\n\n if checksum_last_digit == 0:\n # There are three Boolean operators in Python: and , or , and not . With them, you can test conditions and decide which execution path your programs will take. In this tutorial, you'll learn about the Python or operator and how to use it.\n if start_number in [34, 37] and card_lenght == 15:\n print(\"AMEX\")\n elif (int(card_number[0]) == 4) and card_lenght in [13, 16]:\n print(\"VISA\")\n elif (start_number in range(51, 56)) and card_lenght == 16:\n print(\"MASTERCARD\")\n else:\n print(\"INVALID\")\n else:\n print(\"INVALID\")\n\n\nmain()\n","repo_name":"Amirhosseinsediqi/Cs50-labs-and-Problem-sets","sub_path":"pset6/credit/credit.py","file_name":"credit.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"40046158279","text":"import json\n# import sys\n\nf = open('blocks.json')\nblocks = json.load(f)\n\n# match sys.argv[0]:\n# case \"example\":\n# print(\"process\")\n\n# ETHEREUM FUNCTION SIGNATURES\n # ERC721\n # transferFrom(address,address,uint256):\t0x23b872dd\n # safeTransferFrom(address,address,uint256):\t0x42842e0e\n\n# for each block\n # for each transaction\n # if the input begins with one of the specified function signatures\n # record an edge using the from and to addresses\n # (future: also include the value to give this edge a weight)\n\ne = ''\nn = {'count': 0}\n\nfor block in blocks:\n for tx in block['transactions']:\n funcSig = tx['input'][0:10]\n if funcSig == '0x23b872dd' or funcSig == '0x42842e0e':\n e += tx['from'] + '\\t' + tx['to'] + \"\\n\"\n if tx['from'] not in n:\n n[tx['from']] = n['count']\n n['count'] += 1\n if tx['to'] not in n:\n n[tx['to']] = n['count']\n n['count'] += 1\n\nf = open('edges', 'w')\nf.write(e)\nf = open('nodes.json', 'w')\njson.dump(n, f)","repo_name":"EmblemStudio/web3-social-graph","sub_path":"get_graph.py","file_name":"get_graph.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"39166870064","text":"def max_pay(pancakes):\n i = 0\n j = len(pancakes) - 1\n prev_max = min(pancakes[0], pancakes[-1])\n pays = 0\n while i <= j:\n if pancakes[i] < pancakes[j]:\n if pancakes[i] >= prev_max:\n prev_max = pancakes[i]\n pays += 1\n i += 1\n else:\n if pancakes[j] >= prev_max:\n prev_max = pancakes[j]\n pays += 1\n j -= 1\n return pays\n\nif __name__ == \"__main__\":\n T = int(input())\n out = \"\"\n for t in range(T):\n N = int(input())\n pancakes = [int(x) for x in input().split(\" \")]\n out += f\"Case #{t+1}: {max_pay(pancakes)}\\n\"\n out = out[:-1]\n print(out)\n\n# Example\n# 4\n# 2\n# 1 5\n# 4\n# 1 4 2 3\n# 5\n# 10 10 10 10 10\n# 4\n# 7 1 3 1000000\n","repo_name":"4gatepylon/gists","sub_path":"Algos/codejam_1b_2022/pancakes.py","file_name":"pancakes.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"41222923062","text":"# https://programmers.co.kr/learn/courses/30/lessons/77486\n# 풀이) 구현, 그래프 - 시간초과..!!\n\n\ndef solution(enroll, referral, seller, amount):\n parent = dict()\n result = dict(zip(enroll, [0]*len(enroll)))\n for i in range(len(referral)):\n parent[enroll[i]] = referral[i]\n \n for i in range(len(seller)):\n x = seller[i]\n price = amount[i] * 100\n tmp = dict(zip(enroll, [0]*len(enroll)))\n\n while True:\n parent_price = int(price * 0.1)\n x_price = price - parent_price\n price = parent_price\n\n tmp[x] = x_price\n if parent[x] != '-':\n tmp[parent[x]] = price\n\n x = parent[x]\n if x == '-':\n break\n \n for key in result.keys():\n result[key] += tmp[key]\n\n return list(result.values())\n\n\n\n\nenroll = [\"john\", \"mary\", \"edward\", \"sam\", \"emily\", \"jaimie\", \"tod\", \"young\"]\nreferral = [\"-\", \"-\", \"mary\", \"edward\", \"mary\", \"mary\", \"jaimie\", \"edward\"]\nseller = [\"young\", \"john\", \"tod\", \"emily\", \"mary\"]\namount = [12, 4, 2, 5, 10]\n\nprint(solution(enroll, referral, seller, amount))","repo_name":"hyez/Algorithms","sub_path":"프로그래머스/다단계칫솔판매.py","file_name":"다단계칫솔판매.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"70569009837","text":"# auth: starkizard\n# derived a formula by observation\n# for n =3 let's see\n# a= 1 2 4 5 7 8 10 11 13 14\n# k= 1 2 3 4 5 6 7 8 9 10\n# a-k=0 0 1 1 2 2 3 3 4 4\n#\n# We can see that a-k is just k/(n-1) -1 when k%n-1 ==0\n# else it is k/(n-1)\n# add k to these results and we get our answer\n\nfor t in range(int(input())):\n n,k=map(int,input().split())\n if k%(n-1)==0:\n print(k+ (k//(n-1) -1))\n else:\n print(k+(k//(n-1)))","repo_name":"starkizard/CrudeCode","sub_path":"Codeforces/Codeforces Round #640 (Div 4)/C-K-thNotDivisibleByN.py","file_name":"C-K-thNotDivisibleByN.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"73"} +{"seq_id":"74078870636","text":"class check_privilege_revoke_all_rls():\n \"\"\"\n check_privilege_revoke_all_rls\n Ensure 'ALL' Is Revoked from Unauthorized 'GRANTEE' on FGA$. \n The RLS$ table contains columns that contains the schema and name of a \n procedure to execute when a table is accessed and that has a Row Level \n Security policy defined on it.\n \"\"\"\n # References:\n # http://www.davidlitchfield.com/AddendumtotheOracle12cCISGuidelines.pdf\n # http://www.davidlitchfield.com/oracle_backdoors.pdf\n \n TITLE = 'Revoke ALL from RLS$'\n CATEGORY = 'Privilege'\n TYPE = 'sql'\n SQL = \"SELECT GRANTEE, PRIVILEGE FROM DBA_TAB_PRIVS WHERE TABLE_NAME = 'RLS$'\"\n\n verbose = False\n skip = False\n result = {}\n\n def do_check(self, *results):\n self.result['level'] = 'GREEN'\n output = ''\n\n for rows in results:\n for row in rows:\n self.result['level'] = 'RED'\n output += row[0] + ' with ' + row[1] + 'on RLS$\\n'\n\n if 'GREEN' == self.result['level']:\n output = 'No user with grants to RLS$.'\n\n self.result['output'] = output\n\n return self.result\n\n def __init__(self, parent):\n print('Performing check: ' + self.TITLE)\n","repo_name":"foospidy/DbDat","sub_path":"plugins/oracle/check_privilege_revoke_all_rls.py","file_name":"check_privilege_revoke_all_rls.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":209,"dataset":"github-code","pt":"73"} +{"seq_id":"3799941657","text":"import random\nimport string\nfrom graph import Graph, Vertex\nimport re\nimport os\n\ndef get_words_from_text(text_path):\n with open(text_path, 'r') as f:\n text = f.read()\n #remove brackets and text in it\n text = re.sub(r'\\[(.+)\\]', ' ', text)\n #removing white spaces\n text = ' '.join(text.split())\n text = text.lower()\n #removing punctuation\n text = text.translate(str.maketrans('','', string.punctuation))\n\n words = text.split()\n return words\n\ndef make_garph(words):\n g = Graph()\n\n previous_word = None\n\n #check if word is in graph and add an edge if not add it to graph (done in get_vertex)\n for word in words:\n word_vertex = g.get_vertex(word)\n \n #add edge if there is previous word \n if previous_word:\n previous_word.increment_edge(word_vertex)\n\n previous_word = word_vertex \n \n #set probability mapping\n g.generate_probability_mappings()\n\n return g\n\ndef compose(g, words, length=50):\n composition = []\n word = g.get_vertex(random.choice(words))\n for i in range(length):\n if (i+1)%5 == 0:\n composition.append(word.value)\n composition.append('\\n')\n else:\n composition.append(word.value)\n word = g.get_next_word(word)\n\n return ' '.join(composition)\n\ndef main(artist):\n \n words = []\n\n #get the words we want to make chain from\n for song in os.listdir(f'songs/{artist}'):\n song_words = get_words_from_text(f'songs/{artist}/{song}')\n words.extend(song_words)\n \n\n #make graph using the words\n g = make_garph(words)\n\n return compose(g, words, 100)\n \nif __name__ == \"__main__\":\n print(main('Eminem'))\n\n\n","repo_name":"XxTyaftioNxX/LyricalBot","sub_path":"compose.py","file_name":"compose.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"22389130247","text":"from django import forms\nfrom django_countries.fields import CountryField\nfrom . import models\n\n\nclass SearchForm(forms.Form):\n\n city = forms.CharField(initial=\"Anywhere\")\n country = CountryField(default=\"KR\").formfield()\n room_type = forms.ModelChoiceField(\n queryset=models.RoomType.objects.all(), empty_label=\"Any kind\", required=False\n )\n price = forms.IntegerField(required=False)\n guests = forms.IntegerField(required=False)\n beds = forms.IntegerField(required=False)\n bedrooms = forms.IntegerField(required=False)\n baths = forms.IntegerField(required=False)\n instant_book = forms.BooleanField(required=False)\n superhost = forms.BooleanField(required=False)\n amenities = forms.ModelMultipleChoiceField(\n queryset=models.Amenity.objects.all(),\n widget=forms.CheckboxSelectMultiple,\n required=False,\n )\n facilities = forms.ModelMultipleChoiceField(\n queryset=models.Facility.objects.all(),\n widget=forms.CheckboxSelectMultiple,\n required=False,\n )\n\nclass UploadPhotoForm(forms.ModelForm):\n class Meta:\n model = models.Photo\n fields = (\"caption\", \"image_file\")\n widgets = {\n \"caption\": forms.TextInput(attrs={\"placeholder\":\"Caption\"}),\n }\n\n def save(self, pk, *arg, **kwargs):\n photo = super().save(commit=False)\n room = models.Room.objects.get(pk=pk)\n photo.room = room\n photo.save()\n\n\nclass UploadRoomForm(forms.ModelForm):\n class Meta:\n model = models.Room\n fields = (\n \"name\",\n \"description\",\n \"country\",\n \"city\",\n \"price\",\n \"address\",\n \"guests\",\n \"beds\",\n \"bedrooms\",\n \"baths\",\n \"check_in\",\n \"check_out\",\n \"instant_book\",\n \"room_type\",\n \"amenities\",\n \"facilities\",\n \"house_rules\",\n )\n\n def save(self, *args, **kwargs):\n room = super().save(commit=False)\n return room","repo_name":"ironsoft/airbnb-clone2","sub_path":"rooms/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"30512776481","text":"import GST\n\n\ndef longestRepeatedSubstring(str):\n n = len(str)\n LCSRe = [[0 for x in range(n + 1)]\n for y in range(n + 1)]\n\n res = \"\"\n res_length = 0\n\n index = 0\n for i in range(1, n + 1):\n for j in range(i + 1, n + 1):\n\n if (str[i - 1] == str[j - 1] and\n LCSRe[i - 1][j - 1] < (j - i)):\n LCSRe[i][j] = LCSRe[i - 1][j - 1] + 1\n\n if (LCSRe[i][j] > res_length):\n res_length = LCSRe[i][j]\n index = max(i, index)\n\n else:\n LCSRe[i][j] = 0\n\n if (res_length > 0):\n for i in range(index - res_length + 1,\n index + 1):\n res = res + str[i - 1]\n\n return res\ndef MRS():\n input1 = input(\"input:\")\n st = GST.STree(input1)\n print(\"suffix tree generated!\")\n k = int(input(\"please enter k :\"))\n s = st.find_all(longestRepeatedSubstring(input1))\n size = len(s)\n if size >= k:\n print(\"the longest repeated substring is :\")\n print( longestRepeatedSubstring(input1))\n print(s)\n else :\n print(\"nothing\")","repo_name":"vveeiiss/Bioinformatics","sub_path":"Bioinformatics/most_repeated.py","file_name":"most_repeated.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"22101194902","text":"from src.models.pcb import PCB\nfrom src.models.page import Page\nfrom src.models.statistic import Statistic\n\nclass HistoricElement:\n time: int\n duration: int\n process_in_execution: PCB or None\n process_to_await: PCB or None\n process_to_execution: PCB or None\n process_to_finish: PCB or None\n page_fault: bool\n swap_in: Page or None\n swap_out: Page or None\n statistic: Statistic\n\n def __init__(self, \n time: int = 0, \n duration: int = 0,\n process_in_execution = None, \n process_to_await = None, \n process_to_execution = None, \n process_to_finish = None, \n page_fault: bool = False, \n swap_in = None, \n swap_out = None,\n statistic: Statistic=Statistic()):\n self.time = time\n self.duration = duration\n self.process_in_execution = process_in_execution\n self.process_to_await = process_to_await\n self.process_to_execution = process_to_execution\n self.process_to_finish = process_to_finish\n self.page_fault = page_fault\n self.swap_in = swap_in\n self.swap_out = swap_out\n self.statistic = statistic\n","repo_name":"Paul0Cesar/OSS","sub_path":"src/models/historic_element.py","file_name":"historic_element.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"8697645825","text":"import sys\nfrom collections import deque\n\nN, M, R = map(int, sys.stdin.readline().rstrip().split())\n\narr = [[] for _ in range(N+1)]\n\nvisited = [0 for _ in range(N + 1)]\n\nfor _ in range(M):\n n1, n2 = map(int, sys.stdin.readline().rstrip().split())\n arr[n1].append(n2)\n arr[n2].append(n1)\n\nqueue = deque([R])\nvisited[R] = 1\ncnt = 1\n\nfor i in range(1, N + 1):\n arr[i].sort()\n\nwhile queue:\n now = queue.popleft()\n\n for next in arr[now]:\n if not visited[next]:\n queue.append(next)\n cnt += 1\n visited[next] = cnt\n\nprint(*visited[1:], sep='\\n')","repo_name":"SSAFY-9-S4-STUDY/SWEAB","sub_path":"day11/B_24444/B_24444_jangwon.py","file_name":"B_24444_jangwon.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"72038369801","text":"from typing import Union, Tuple\n\nfrom RE.FiniteStateMachine import FiniteStateMachine, Symbol\nfrom RE.RegularExpression.Expression import Expression\n\n__all__ = (\n \"Wildcard\"\n)\n\n\nclass Wildcard(Expression):\n \"\"\"Wildcard expression implementation.\n\n Attributes:\n wildcard_set (Union of str and frozenset): The set which contains the expected elements.\n\n Examples:\n >>> from RE.RegularExpression.Wildcard import Wildcard\n >>> expression = Wildcard()\n >>> expression.compile()\n >>> print(expression.match(input(\"> \")))\n \"\"\"\n\n wildcard_set: Union[str, frozenset]\n\n def __init__(self, wildcard_set: Union[Symbol, frozenset] = Symbol.SIGMA):\n super().__init__()\n self.wildcard_set = wildcard_set\n\n def build(\n self,\n finite_state_machine: FiniteStateMachine,\n base_state: int,\n counter: int,\n end_state: int = None\n ) -> Tuple[int, int]:\n finite_state_machine.add_transition(\n self.wildcard_set,\n base_state,\n {counter}\n if end_state is None\n else {end_state}\n )\n if end_state is None:\n base_state = counter\n counter += 1\n else:\n base_state = end_state\n return base_state, counter\n","repo_name":"000alen/RE","sub_path":"RE/RegularExpression/Wildcard.py","file_name":"Wildcard.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"26516824682","text":"import sys\nfrom bluepy import btle\n\n# mqtt proxy characteristic\n\nsrvc_uuid = '6E400001-B5A3-F393-E0A9-E50E24DCCA9E'\nread_uuid = '6E400003-B5A3-F393-E0A9-E50E24DCCA9E' # read, notify\nsend_uuid = '6E400002-B5A3-F393-E0A9-E50E24DCCA9E' # write\n\nclass MyDelegate(btle.DefaultDelegate):\n def __init__(self):\n btle.DefaultDelegate.__init__(self)\n\n def handleNotification(self, cHandle, data):\n print('data [%s]' % data)\n\naddress = sys.argv[1]\np = btle.Peripheral()\np.connect(address)\np.setMTU(512)\n\nsvc = p.getServiceByUUID(srvc_uuid) # 2800\nch = svc.getCharacteristics(read_uuid)[0] # 2803\ncout = svc.getCharacteristics(send_uuid)[0] # 2803\np.setDelegate(MyDelegate())\n\nsetup_data = b\"\\x01\\x00\"\nhandle = ch.getHandle() + 1\np.writeCharacteristic(handle, setup_data, withResponse=True)\n\nwhile True:\n if p.waitForNotifications(1):\n continue\n cout.write('/topic mqtt message'.encode())\n\n","repo_name":"aikudo/ble-nordic-uart-mqtt-proxy","sub_path":"ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"4902271176","text":"import app\nfrom flask.ext.testing import TestCase\nfrom middleware.auth import Auth\nimport json\nimport constants\n\n\nclass TestAuth(TestCase):\n def create_app(self):\n return app.create_app(testing=True)\n\n def setUp(self):\n self.auth = Auth(self.app)\n self.app.redis_client.setex(\n constants.REDIS_AUTH_KEY_FORMAT % \"TEST_AUTH_KEY\",\n json.dumps({\"type\": \"flixbus_data_app\"}), 300)\n self.app.redis_client.setex(\n constants.REDIS_ACCESS_TOKEN_KEY_FORMAT % \"TEST_ACCESS_TOKEN\",\n json.dumps({\"user_id\": 123}), 300)\n\n def test_authenticate_without_key(self):\n cred = self.auth._authenticate(\"\", \"\")\n self.assertEquals(cred, {})\n\n def test_authenticate_with_key(self):\n cred = self.auth._authenticate(\"TEST_AUTH_KEY\", \"TEST_ACCESS_TOKEN\")\n self.assertNotEquals(cred, {})\n self.assertEquals(cred['client_type'], 'flixbus_data_app')\n self.assertEquals(cred['user_id'], 123)\n","repo_name":"shubham837/flix_api","sub_path":"flixbus/tests/test_auth.py","file_name":"test_auth.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"43091531211","text":"\"\"\"\nImplement a function called closest_power that meets the specifications below.\ndef closest_power(base, num):\n '''\n base: base of the exponential, integer > 1\n num: number you want to be closest to, integer > 0\n Find the integer exponent such that base**exponent is closest to num.\n Note that the base**exponent may be either greater or smaller than num.\n In case of a tie, return the smaller value.\n Returns the exponent.\n '''\n closest_power(3,12) returns 2\nclosest_power(4,12) returns 2\nclosest_power(4,1) returns 0\n\n\"\"\"\ndef closest_power(base, num):\n '''\n base: base of the exponential, integer > 1\n num: number you want to be closest to, integer > 0\n Find the integer exponent such that base**exponent is closest to num.\n Note that the base**exponent may be either greater or smaller than num.\n In case of a tie, return the smaller value.\n Returns the exponent.\n '''\n # Your code here\n\n exponent = 0\n if abs(base**exponent) <= abs(num) and abs(base**(exponent+1)) > abs(num): \n print(base**exponent)\n if ((abs(num)- abs(base**exponent)) <= (abs(base**(exponent+1))-abs(num))):\n return exponent\n else:\n return exponent+1\n else:\n exponent = exponent+1\n print(\"___\")\n print(exponent)\n \n#print(closest_power(3,12))\n#print(closest_power(4,12))\nprint(closest_power(4,16))\n\n","repo_name":"lingzt/PythonPractice","sub_path":"MidTerm/ps3_hangman.py","file_name":"ps3_hangman.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"7935246101","text":"from django.urls import path\nfrom .views import (\n TuneListView,\n TuneDetailView,\n TuneUpdateView,\n TuneDeleteView,\n TuneCreateView,\n SetlistCollection,\n SetlistDetailView,\n SetlistUpdateView,\n SetlistDeleteView,\n SetlistCreateView,\n TuneListApi,\n TuneDetailApi,\n)\n\nurlpatterns = [\n # Tunes\n path(\"/\", TuneDetailView.as_view(), name=\"tune_detail\"),\n path(\"/edit/\", TuneUpdateView.as_view(), name=\"tune_edit\"),\n path(\"/delete/\", TuneDeleteView.as_view(), name=\"tune_delete\"),\n path(\"new/\", TuneCreateView.as_view(), name=\"tune_new\"),\n path(\"\", TuneListView.as_view(), name=\"tune_list\"),\n # Setlists\n path(\"setlists//\", SetlistDetailView.as_view(), name=\"setlist_detail\"),\n path(\"setlists//edit/\", SetlistUpdateView.as_view(), name=\"setlist_edit\"),\n path(\n \"setlists//delete/\", SetlistDeleteView.as_view(), name=\"setlist_delete\"\n ),\n path(\"setlists\", SetlistCollection.as_view(), name=\"setlist_collection\"),\n path(\"setlists/new/\", SetlistCreateView.as_view(), name=\"setlist_new\"),\n # Api views\n path(\"tunes_api/\", TuneListApi.as_view(), name=\"tunes_api\"),\n path(\"tunes_api//\", TuneDetailApi.as_view(), name='tunes_api_detail'),\n]\n","repo_name":"tamaradement/gig-forte-1","sub_path":"tunes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"27276141584","text":"import importlib\nimport logging\nimport os\nfrom logging.handlers import RotatingFileHandler\n\nfrom flask import Flask\n\n# from app.extensions import babel, csrf, db, executor, mail, migrate, security, session, toolbar\nfrom app.extensions import db, migrate\n\n# from app.filters import format_price, format_price_float, pluralize, round_dt\n# from app.modules.core.mail import SecMailUtil\n# from app.modules.core.models import user_datastore\nfrom app.routes import bps\n# from app.utils import url_for_icon\nfrom config import config\n\n\ndef register_extensions(app: Flask) -> None:\n \"\"\"Вызов метода 'init_app' для регистрации расширений\n в flask.Flask объект через параметр.\n \"\"\"\n # babel.init_app(app)\n # csrf.init_app(app)\n # executor.init_app(app)\n db.init_app(app)\n # mail.init_app(app)\n migrate.init_app(app, db)\n # security.init_app(app, user_datastore, mail_util_cls=SecMailUtil)\n # session.init_app(app)\n # toolbar.init_app(app)\n\n\ndef register_blueprints(app: Flask) -> None:\n \"\"\"Регистрация роутов.\n \"\"\"\n for bp in bps:\n bp_module = importlib.import_module(f\"app.modules.{bp['module']}.{bp['view']}\")\n bp_instance = getattr(bp_module, bp[\"name\"])\n app.register_blueprint(bp_instance, url_prefix=bp.get(\"prefix\", None))\n\n\ndef create_app():\n app = Flask(__name__)\n app.config.from_object(config[os.getenv(\"FLASK_ENV\", \"production\")])\n\n if app.debug:\n import colorlog\n\n handler = colorlog.StreamHandler()\n\n werkzeug_logger = logging.getLogger(\"werkzeug\")\n # coloredlogs.install(level=\"DEBUG\", logger=werkzeug_logger)\n # coloredlogs.install(level=\"DEBUG\", logger=app.logger)\n\n\n logger = colorlog.getLogger(__name__)\n logger.addHandler(handler)\n\n werkzeug_logger.setLevel(logging.DEBUG)\n app.logger.setLevel(logging.DEBUG)\n app.logger.info(f\"{app.config['APP_NAME']} startup\")\n\n if not os.path.exists(\"logs\"):\n os.mkdir(\"logs\")\n file_handler = RotatingFileHandler(\"logs/app.log\", maxBytes=1048576, backupCount=10)\n file_handler.setFormatter(logging.Formatter(\"%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]\"))\n file_handler.setLevel(logging.DEBUG)\n app.logger.addHandler(file_handler)\n app.logger.setLevel(logging.DEBUG)\n\n # app.template_filter(\"format_price\")(format_price)\n # app.template_filter(\"format_price_float\")(format_price_float)\n # app.template_filter(\"pluralize\")(pluralize)\n # app.template_filter(\"round_dt\")(round_dt)\n # app.template_global(\"url_for_icon\")(url_for_icon)\n\n register_extensions(app)\n register_blueprints(app)\n\n # os.makedirs(app.config.get(\"IMG_PRODUCTS_DIR\"), exist_ok=True)\n\n return app\n","repo_name":"makehtml/python-pikabu-tutorial","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"15680619512","text":"from objects.Item import Item\r\nimport os\r\nimport unittest\r\nfrom data.ItemData import ItemData\r\n\r\nclass ItemDataTest(unittest.TestCase):\r\n\r\n def setUp(self):\r\n self.item_data = ItemData(\"test_items.csv\")\r\n\r\n def tearDown(self): \r\n if os.path.exists(\"test_items.csv\"):\r\n os.remove(\"test_items.csv\")\r\n\r\n def test_save_and_load_info(self):\r\n items = [\r\n Item('Arenchine', 'Starter', 32, 'Cheese, Rice'),\r\n Item('Pasta', 'Main course', 45, 'Pasta, Mushrooms, Cream'),\r\n Item('Water', 'Beverage', 6.5, 'Water'),\r\n Item('A piece of cake', 'Dessert', 25, 'Suger, Egg, Butter, flour')]\r\n item_data=ItemData('item.csv')\r\n item_data.save_info(items)\r\n watch_info = item_data.load_info()\r\n\r\n self.item_data.save_info(items)\r\n\r\n\r\n loaded_items = self.item_data.load_info()\r\n\r\n\r\n self.assertEqual(len(loaded_items), len(items))\r\n for i in range(len(items)):\r\n self.assertEqual(loaded_items[i].name, items[i].name)\r\n self.assertEqual(loaded_items[i].category, items[i].category)\r\n self.assertEqual(loaded_items[i].price, items[i].price)\r\n self.assertEqual(loaded_items[i].ingredients, items[i].ingredients)\r\n\r\n\r\n\r\nunittest.main(argv=[''], exit=False)","repo_name":"RoeiAviv/Resturant_Management","sub_path":"ItemDataTest.py","file_name":"ItemDataTest.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"73511844041","text":"from Felipe import Felipe\n\nobjeto = Felipe()\nobjeto.nombre=\"felipe\"\nobjeto.estudiar()\nfrom Willian import Willian\n\n# PARA USUAR UNA CLASE DEBO CREAR UNA INSTANCIA\n\n# UN OBJETO ES UNA VARIABLE PERO (ESPECIAL)\n\nobjeto = Willian()\n\n# COMO USO EL OBJETO\n\n# LLAMO LOS ATRIBUTOS\n\nobjeto.nombre = \"willian\"\n\nobjeto.edad=35\n\nobjeto.saludar()\n","repo_name":"jjosegallegocesde/repocolectivopoo1sabado","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"41595261753","text":"import argparse\nfrom contracheque import Contracheque\n\n\ndef min_year(y):\n try:\n y = int(y)\n except ValueError:\n raise argparse.ArgumentTypeError('Year must be int')\n if y < 2015:\n raise argparse.ArgumentTypeError(\"No data before 2015\")\n return y\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Coletor de dados de contracheque do MPF'\n )\n parser.add_argument(\n '-m',\n '--month',\n type=int,\n help='Month you want to consult',\n required=True,\n choices=range(1, 13),\n dest='month'\n )\n\n parser.add_argument(\n '-y',\n '--year',\n type=min_year,\n help='Year you want to consult',\n required=True,\n dest='year',\n )\n\n parser.add_argument(\n '-d',\n '--dir',\n type=str,\n help='CSV Output directory. Default: .',\n default='.',\n dest='dir_output'\n )\n\n args = parser.parse_args()\n cc = Contracheque(args.month, args.year)\n if (args.year > 2019):\n cc.write_new_to_csv(args.dir_output)\n elif (args.year < 2019):\n cc.write_old_to_csv(args.dir_output)\n else:\n if(args.month < 7):\n cc.write_old_to_csv(args.dir_output)\n else:\n cc.write_new_to_csv(args.dir_output)\n","repo_name":"filipegl/coletor-mpf-selecao-dadosjus","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"11576639915","text":"import logging\nimport subprocess\n\nfrom telegram.ext import CommandHandler, MessageHandler, Filters, Updater\nfrom yaml import load\n\ntry:\n from yaml import CLoader as Loader\nexcept ImportError:\n from yaml import Loader\n\n\ndef start(bot, update):\n bot.send_message(chat_id=update.message.chat_id, text=text[\"start\"])\n open(report_dir.format(update.message.chat_id), \"w\")\n\n\ndef report(bot, update):\n bot.send_message(chat_id=update.message.chat_id, text=open(report_dir.format(update.message.chat_id), \"r\"))\n\n\ndef adjust(bot, update):\n bot.send_message(chat_id=update.message.chat_id, text=text[\"adjust\"])\n\n\ndef fortune(bot, update):\n bot.send_message(chat_id=update.message.chat_id,\n text=subprocess.run(['fortune'], stdout=subprocess.PIPE).stdout.decode('utf-8'))\n\n\ndef error(bot, update, error):\n \"\"\"Log Errors caused by Updates.\"\"\"\n logging.warning('Update \"%s\" caused error \"%s\"', update, error)\n\n\ndef reminder(bot,job):\n bot.send_message(chat_id=update.message.chat_id,text=text[\"done\"])\n bot.send_message(chat_id=update.message.chat_id,text=text[\"todo\"])\n bot.send_message(chat_id=update.message.chat_id,text=text[\"obstacles\"])\n\n\ndef unknown(bot, update):\n bot.send_message(chat_id=update.message.chat_id, text=\"Sorry, I didn't understand that command.\")\n\n\ndef setTime(bot, update):\n print(update.message.text)\n\n\nreport_dir = \"./reports/{}.txt\"\n\ntext = load(open(\"text.yml\", \"r\"), Loader=Loader)\n\nSETUP, READY = range(2)\n\nupdater = Updater(token='741012984:AAF_qrjF9LiclB-owhFP6Yi7NZ7T-lPhHR0')\n\ndispatcher = updater.dispatcher\n\njob_queue = updater.job_queue\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nstart_handler = CommandHandler('start', start)\nreport_handler = CommandHandler('report', report)\nfortune_handler = CommandHandler('fortune', fortune)\nadjust_handler = CommandHandler('adjust', adjust)\nunknown_handler = MessageHandler(Filters.command, unknown)\nunknown_handler = MessageHandler(Filters.text, setTime)\n\ndispatcher.add_handler(start_handler)\ndispatcher.add_handler(report_handler)\ndispatcher.add_handler(fortune_handler)\ndispatcher.add_handler(adjust_handler)\ndispatcher.add_handler(unknown_handler)\n\ndispatcher.add_error_handler(error)\n\nupdater.start_polling()\n\nupdater.idle()","repo_name":"jcamposobando/dailyScrumBot","sub_path":"dailyScrumBot.py","file_name":"dailyScrumBot.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"7065913412","text":"import logging\nimport sys\n\nfrom asyncio import CancelledError, Future, Task, create_task, ensure_future, sleep\nfrom functools import partial, wraps\nfrom typing import Awaitable, Callable, Coroutine, Union\n\n\n_logger = logging.getLogger(__name__)\n\n\nasync def cancel_and_stop_task(\n task: Union[Task, Future], reraise_exception: bool = True\n) -> None:\n \"\"\"\n Cancels the task and waits for it to complete.\n\n Args:\n task: asyncio.Task or asyncio.Future.\n\n Returns:\n None\n \"\"\"\n task.cancel()\n\n try:\n await task\n\n except CancelledError:\n _logger.debug(\"The task was canceled\")\n # WARN: DO NOT `raise' here because then the function will never end.\n\n except Exception as err:\n _logger.exception(f\"The task was completed with an error ({err}):\")\n\n if reraise_exception:\n raise\n\n else:\n _logger.debug(\"Task completed successfully\")\n\n\ndef run_forever(\n repeat_delay: int = 0, failure_delay: int = None, reraise_exception: bool = True\n) -> Callable[[Callable], Callable]:\n \"\"\"\n A decorator that allows you to make the function for asyncio.Task repeatable, with a given time interval.\n\n Args:\n repeat_delay: Delay between calls, seconds.\n failure_delay: Delay between calls in case of a runtime error, seconds.\n \"\"\"\n if failure_delay is None:\n failure_delay = repeat_delay\n\n def decorator(func: Callable[..., Coroutine]):\n @wraps(func)\n async def task_wrapper(*args, **kwargs):\n _logger.debug(\"Running an endless task\")\n\n while True:\n try:\n await func(*args, **kwargs)\n\n except CancelledError:\n _logger.debug(\"Endless task canceled\")\n raise\n\n except Exception as err:\n _logger.exception(\n f\"Unexpected error while running an infinite task ({err}):\"\n )\n if reraise_exception:\n raise\n\n await sleep(failure_delay)\n\n else:\n await sleep(repeat_delay)\n\n return task_wrapper\n\n return decorator\n\n\ndef _default_on_complete(name: str, future: Future, exit_on_error: bool = True) -> None:\n \"\"\"\n Default handler on task completion\n\n Args:\n name: Task name that was completed\n future: Future after task completion\n \"\"\"\n if future.cancelled():\n _logger.debug(f\"Task {name} canceled\")\n return\n\n error = future.exception()\n if error is not None:\n _logger.error(f\"Unexpected error in task {name}:\", exc_info=error)\n\n if exit_on_error:\n sys.exit(1)\n\n _logger.debug(f\"Task {name} completed successfully\")\n\n\ndef run_background_task(\n awaitable: Awaitable,\n name: str,\n on_complete: Callable[[str, Future], None] = _default_on_complete,\n exit_on_error: bool = True,\n) -> Task:\n \"\"\"\n A wrapper for running tasks in the background.\n\n Args:\n awaitable: task or coro\n name: name for logging\n on_complete: callback after task will comlpete\n\n Returns:\n Task\n \"\"\"\n task = create_task(awaitable)\n task.add_done_callback(partial(on_complete, name, exit_on_error=exit_on_error))\n return task\n","repo_name":"WindowGenerator/asyncio_task_helpers","sub_path":"asyncio_task_helpers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"3729062491","text":"import random\nimport os\nemptychar = ' '\nusevectors = True\n\ntheBoard = [\n [emptychar, emptychar, emptychar], \n [emptychar, emptychar, emptychar], \n [emptychar, emptychar, emptychar]\n ]\n\n#Vector2 Class\n\nclass Vector2:\n def __init__(self, x, y):\n self.x = int(x)\n self.y = int(y)\n def reset(self):\n self.x = 0\n self.y = 0\n def getpos(self):\n pass\n\ndef drawBoard(board):\n for i in range(len(board)):\n row = ''\n for j in range(len(board[i])):\n row += ' |' + board[i][j] + '| '\n print(row + '\\n')\n # print(' | |')\n # print(' ' + board[1] + ' | ' + board[2] + ' | ' + board[3])\n # print(' | |')\n # print('-----------')\n # print(' | |')\n # print(' ' + board[4] + ' | ' + board[5] + ' | ' + board[6])\n # print(' | |')\n # print('-----------')\n # print(' | |')\n # print(' ' + board[7] + ' | ' + board[8] + ' | ' + board[9])\n # print(' | |')\n\ndef inputPlayerLetter():\n letter = ''\n\n while not (letter == 'X' or letter == 'O'):\n\n print('Do you want to be X or O?')\n\n letter = input().upper()\n\n if letter == 'X':\n\n return ['X', 'O']\n\n else:\n return ['O', 'X']\n\ndef whoGoesFirst():\n\n if random.randint(0, 1) == 0:\n\n return 'computer'\n\n else:\n\n return 'player'\n\ndef playAgain():\n\n print('Do you want to play again? (yes or no)')\n\n return input().lower().startswith('y')\n\ndef makeMove(board, letter, move):\n\n board[move.y][move.x] = letter\n\ndef canWin(bo, le): # NOT FULLY IMPLEMENTED YET!\n if usevectors: # USING 2D ARRAYS\n\n possiblecount = 0 # Define integer to count the number of possible wins there can be.\n \n pos = Vector2(-1, -1) # Define a Vector2 to return; Will return as \"(-1, -1)\" if unchanged, representing there was no possible win yet.\n\n possible = [] # Define list of possible remaining moves, from which to simulate outcomes.\n for i in range(len(bo)):\n for j in range(len(bo[i])):\n if isSpaceFree(bo, Vector2(i, j), emptychar):\n possible.append(Vector2(i, j)) # Add possible move to array, because that space is empty.\n \n # Iterate over each possible move (Empty spaces) for a way that a player might win.\n for i in range(len(possible)): \n # Establish simulated board (2D Array), so as to not tamper with the original board while simulating it.\n print('Attempt: (' + str(possible[i].x) + ', ' + str(possible[i].y) + ')')\n sim_board = bo\n # Iterate over all positions of simulated board to find possible wins.\n for i in range(len(sim_board)):\n for j in range(len(sim_board[i])):\n if isSpaceFree(sim_board, Vector2(i, j), emptychar):\n pass\n i1 = possible[i].x\n j1 = possible[i].y\n sim_board = getBoardCopy(sim_board)\n sim_board[possible[i].y][possible[i].x] = le\n makeMove(sim_board, 'X', possible[i]) # Simulate Move\n drawBoard(sim_board)\n \n #os.system('cls' if os.name == 'nt' else 'clear')\n if ((sim_board[2][0] == le and sim_board[2][1] == le and sim_board[2][2] == le) or # across the top\n\n (sim_board[1][0] == le and sim_board[1][1] == le and sim_board[1][2] == le) or # across the middle\n\n (sim_board[0][0] == le and sim_board[0][1] == le and sim_board[0][2] == le) or # across the sim_boardttom\n (sim_board[0][0] == le and sim_board[1][0] == le and sim_board[2][0] == le) or # down the left side\n (sim_board[0][1] == le and sim_board[1][1] == le and sim_board[2][1] == le) or # down the middle\n (sim_board[0][2] == le and sim_board[1][2] == le and sim_board[2][2] == le) or # down the right side\n (sim_board[0][0] == le and sim_board[1][1] == le and sim_board[2][2] == le) or # diagonal\n (sim_board[0][2] == le and sim_board[1][1] == le and sim_board[2][0] == le)): # diagonal\n possiblecount += 1\n pos = Vector2(i, j) # Set new possible win position\n if possiblecount > 0:\n return pos\n else:\n return pos\n\n \n else:\n return ((bo[7] == le and bo[8] == le and bo[9] == le) or # across the top\n\n (bo[4] == le and bo[5] == le and bo[6] == le) or # across the middle\n\n (bo[1] == le and bo[2] == le and bo[3] == le) or # across the bottom\n (bo[7] == le and bo[4] == le and bo[1] == le) or # down the left side\n (bo[8] == le and bo[5] == le and bo[2] == le) or # down the middle\n (bo[9] == le and bo[6] == le and bo[3] == le) or # down the right side\n (bo[7] == le and bo[5] == le and bo[3] == le) or # diagonal\n (bo[9] == le and bo[5] == le and bo[1] == le)) # diagonal\n\ndef isWinner(bo, le):\n if usevectors: # USING 2D ARRAYS\n return ((bo[2][0] == le and bo[2][1] == le and bo[2][2] == le) or # across the top\n\n (bo[1][0] == le and bo[1][1] == le and bo[1][2] == le) or # across the middle\n\n (bo[0][0] == le and bo[0][1] == le and bo[0][2] == le) or # across the bottom\n (bo[0][0] == le and bo[1][0] == le and bo[2][0] == le) or # down the left side\n (bo[0][1] == le and bo[1][1] == le and bo[2][1] == le) or # down the middle\n (bo[0][2] == le and bo[1][2] == le and bo[2][2] == le) or # down the right side\n (bo[0][0] == le and bo[1][1] == le and bo[2][2] == le) or # diagonal\n (bo[0][2] == le and bo[1][1] == le and bo[2][0] == le)) # diagonal\n else:\n return ((bo[7] == le and bo[8] == le and bo[9] == le) or # across the top\n\n (bo[4] == le and bo[5] == le and bo[6] == le) or # across the middle\n\n (bo[1] == le and bo[2] == le and bo[3] == le) or # across the bottom\n (bo[7] == le and bo[4] == le and bo[1] == le) or # down the left side\n (bo[8] == le and bo[5] == le and bo[2] == le) or # down the middle\n (bo[9] == le and bo[6] == le and bo[3] == le) or # down the right side\n (bo[7] == le and bo[5] == le and bo[3] == le) or # diagonal\n (bo[9] == le and bo[5] == le and bo[1] == le)) # diagonal\ndef getBoardCopy(board):\n return theBoard\n dupeBoard = []\n for i in board:\n dupeBoard.append(i)\n return dupeBoard\ndef isSpaceFree(board, move, emptychar):\n # Check (with Vector2 move) if position is empty on 2 Dimensonal array.\n return board[move.y][move.x] == emptychar\ndef getPlayerMove(board):\n move = ' '\n\n while move not in '1 2 3 4 5 6 7 8 9'.split() or not isSpaceFree(board, int(move), ' '):\n\n print('What is your next move? (1-9)')\n\n move = input()\n return int(move)\ndef chooseRandomMoveFromList(board, movesList):\n possibleMoves = []\n\n for i in movesList:\n\n if isSpaceFree(board, i, ' '):\n\n possibleMoves.append(i)\n if len(possibleMoves) != 0:\n\n return random.choice(possibleMoves)\n\n else:\n # Not return None, return random to not die.\n if (isBoardFull(theBoard)):\n pass\n else:\n # Get random move\n while True:\n rand = Vector2(random.randint(0,2), random.randint(0,2))\n if isSpaceFree(theBoard, rand, emptychar):\n print('oof avoided') #Successfully returned random\n return rand\ndef getComputerMove(board, computerLetter):\n if computerLetter == 'X':\n\n playerLetter = 'O'\n\n else:\n\n playerLetter = 'X'\n for i in range(1, 10):\n\n copy = getBoardCopy(board)\n\n \n move = chooseRandomMoveFromList(board, [Vector2(0, 0), Vector2(0, 2), Vector2(2, 0), Vector2(2, 2)])\n print(\"move: \" + str(move))\n if move != None:\n\n return move\n else:\n if (isBoardFull(theBoard)):\n print('board full')\n pass\n else:\n # Get random move\n while True:\n rand = Vector2(random.randint(0,2), random.randint(0,2))\n if isSpaceFree(theBoard, rand, emptychar):\n print('oof avoided') #Successfully returned random\n return rand\n return move\n\ndef isBoardFull(board):\n for i in range(len(board)):\n for j in range(len(board[i])):\n if isSpaceFree(board, Vector2(i, j), emptychar):\n return False\n return True\n\ndef domove():\n # Define player input\n playerx = input('Input X Coordinate: ') \n playery = input('Input Y Coordinate: ')\n playerthing = input('Input new thing: ')\n\n pos = Vector2(playerx, playery)\n\n print('x=' + str(pos.x) + ', y=' + str(pos.y))\n\n theBoard[pos.y][pos.x] = playerthing\n\nprint('Welcome to Tic Tac Toe!')\nwhile True:\n playerLetter, computerLetter = inputPlayerLetter()\n turn = whoGoesFirst()\n print('The ' + turn + ' will go first.')\n gameIsPlaying = True\n\n\n\n while gameIsPlaying:\n\n if turn == 'player':\n \n usevectors = True\n\n\n drawBoard(theBoard)\n if usevectors:\n domove()\n for i in range(len(theBoard)):\n row = ''\n for j in range(len(theBoard[i])):\n row += ' ' + theBoard[i][j] + ' '\n print(row + '\\n')\n else:\n\n move = getPlayerMove(theBoard)\n\n makeMove(theBoard, playerLetter, move)\n\n\n\n if isWinner(theBoard, playerLetter):\n\n drawBoard(theBoard)\n\n print('SKO! You have won the game!')\n # End game\n gameIsPlaying = False\n\n # Simulate possible winning moves \n # NOT FULLY IMPLEMENTED YET!\n # wins = canWin(theBoard, playerLetter)\n # if wins.x != Vector2(-1, -1).x and wins.y != Vector2(-1, -1).y:\n\n # #drawBoard(theBoard)\n\n # print('Win available! (' + str(wins.x) + ', ' + str(wins.y) + ')')\n else:\n \n if isBoardFull(theBoard):\n\n drawBoard(theBoard)\n\n print('The game is a tie!')\n\n break\n\n else:\n turn = 'computer'\n #print('Can\\'t win yet' + '(' + str(wins.x) + ', ' + str(wins.y) + ')') # Using Escape Character \"\\\" to allow for apostrophe -> (\\')\n\n\n\n else:\n move = getComputerMove(theBoard, computerLetter)\n\n makeMove(theBoard, computerLetter, move)\n\n\n\n if isWinner(theBoard, computerLetter):\n\n drawBoard(theBoard)\n\n print('The computer has beaten you! You lose.')\n\n gameIsPlaying = False\n\n else:\n\n if isBoardFull(theBoard):\n\n drawBoard(theBoard)\n\n print('The game is a tie!')\n\n break\n\n else:\n\n turn = 'player'\n\n\n\n if not playAgain():\n \n break\n","repo_name":"fuzzy-sasuage/python-tic-tac-toe","sub_path":"tic-tac-toe.py","file_name":"tic-tac-toe.py","file_ext":"py","file_size_in_byte":11112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"71935304840","text":"\"\"\"stocktracker URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.contrib import admin\nfrom django.conf.urls import url, include\nfrom tracker_app import views #for frontend only\n\nurlpatterns = [\n\n url(r'^api/', include('tracker_app.urls')),\n url(r'^admin/', admin.site.urls),\n\n url(r'^search/$', views.render_search_form, name='search-form'),\n url(r'^stocks/$', views.stock_index, name='stock-index'),\n url(r'^stocks/(?P.+)/$', views.stock_detail, name='stock-detail'),\n url(r'^stockDELETE/(?P[0-9]+)/$', views.delete_stock, name='delete_stock'),\n\n url(r'^stocksOLD/$', views.stockOLD_index, name='stockOLD-index'),\n url(r'^stocksOLD/(?P[0-9]+)/$', views.stockOLD_detail, name='stockOLD-detail'),\n\n url(r'^portfolioUPDATE/$', views.update_portfolio_chris, name='load_portfolio_chris'),\n\n # implement below for system with multiple portfolios\n # url(r'^portfolios/$', views.portfolio_index, name='index'),\n # url(r'^portfolios/(?P[0-9]+)/$', views.portfolio_detail, name='detail'),\n\n url(r'^$', views.load_portfolio_chris, name='load_portfolio_chris'), #the top level shows Chris' portfolio detail\n\n]\n","repo_name":"chrisbrickey/equity-portfolio","sub_path":"stocktracker/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"37526131105","text":"import unittest\nfrom pathlib import Path\n\nfrom pysh import cat, cat_list, rm, to_bz2, bz2_cat, to_list\n\n\nclass SourcesTest(unittest.TestCase):\n\n def setUp(self):\n with open(\"/tmp/pysh_cat_test\", \"w\") as outfile:\n outfile.write(\"a\\nb\\ncde\\nbde\\n\\n\")\n\n def tearDown(self) -> None:\n rm(\"/tmp/pysh_cat_test\")\n\n def test_cat(self):\n content = list(cat(\"/tmp/pysh_cat_test\"))\n self.assertEqual(content, [\"a\", \"b\", \"cde\", \"bde\", \"\"])\n gen = cat(\"/tmp/pysh_cat_test\", with_len=True)\n self.assertEqual(len(gen), 5)\n self.assertEqual(list(gen), [\"a\", \"b\", \"cde\", \"bde\", \"\"])\n self.assertEqual(len(gen), 5)\n\n def test_cat_list(self):\n lst = ['a', 'b', 'cde', 'fgh', 'x']\n self.assertEqual(list(cat_list(lst[:])), lst)\n lst = [0, 1, 2, 3, 'a', 'b', 'c', [1, 2, 3]]\n self.assertEqual(list(cat_list(lst[:])), lst)\n\n def test_bz2_cat(self):\n FNAME = '/tmp/pysh_bz2_test.bz2'\n content = ['a', 'bc', 'def', 'ghi', 'xx']\n cat_list(content) | to_bz2(FNAME)\n self.assertTrue(Path(FNAME).exists())\n result = bz2_cat(FNAME) | to_list()\n self.assertEqual(content, result)\n gen = bz2_cat(FNAME, with_len=True)\n self.assertEqual(len(gen), 5)\n self.assertEqual(content, list(gen))\n content = ['Lorem ipsum dolor sit amet,', 'consectetur adipiscing elit,', 'sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.', '×÷±°¾≈']\n cat_list(content) | to_bz2(FNAME)\n self.assertTrue(Path(FNAME).exists())\n result = bz2_cat(FNAME) | to_list()\n self.assertEqual(content, result)\n gen = bz2_cat(FNAME, with_len=True)\n self.assertEqual(len(gen), 4)\n self.assertEqual(content, list(gen))\n rm(FNAME)\n","repo_name":"RadagastRotN/pysh","sub_path":"tests/sources.py","file_name":"sources.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"29094248094","text":"import abc\nimport logging\nfrom .common.exceptions import ATCommandException\n\nlogger = logging.getLogger()\nlogger.addHandler(logging.NullHandler())\n\n\nclass AtCommand(metaclass=abc.ABCMeta):\n COMMAND = 'command'\n\n def __init__(self, serial_target, timeout):\n self.serial_target = serial_target\n self.timeout = timeout\n\n def read(self, read_char='?', message='Error while running command'):\n state = self.serial_target.run(command=self.COMMAND + read_char,\n timeout=self.timeout,\n exception=ATCommandException,\n message=message)\n return self.parse_output(state)\n\n @abc.abstractmethod\n def parse_error(self):\n raise NotImplementedError\n\n @abc.abstractmethod\n def parse_output(self, result):\n match = self.COMMAND[2:] + ':'\n for value in result:\n if match in value:\n return value.split(':')[-1]","repo_name":"elagheb/at_commands","sub_path":"at/server/commands/at.py","file_name":"at.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"16861564481","text":"#!/usr/bin/env python\n\nfrom multiprocessing import Queue, Process\nfrom threading import Thread\nfrom serial import Serial, EIGHTBITS\nimport os\nimport time\n\n\nclass InvalidPortError(BaseException):\n pass\n\n\nclass CommBase:\n preamble, delimiter, escape = b'\\x80', b'\\x81', b'\\x82'\n defaultPorts = {\n 'Darwin': '/dev/cu.usbmodem144111',\n 'Linux': '/dev/ttyACM0'\n }\n\n def __init__(self, port=None, baudrate=None, verbose=True):\n self.alive = True\n self.incoming, self.outgoing = Queue(), Queue()\n self.verbose = verbose\n self.port, self.baudrate = port, baudrate\n self.serial = Serial(\n port=self.port, baudrate=self.baudrate,\n bytesize=EIGHTBITS, timeout=1\n )\n self.procs = [\n Thread(target=self.read),\n Thread(target=self.__reader),\n Thread(target=self.__writer)\n ]\n for proc in self.procs:\n proc.start()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n\n @property\n def port(self):\n return self._port\n\n @port.setter\n def port(self, val):\n if val is None:\n try:\n self.port = self.defaultPorts[os.uname().sysname]\n except:\n raise InvalidPortError('Invalid port')\n assert isinstance(val, str), 'Port must be str'\n self._port = val\n\n @property\n def baudrate(self):\n return self._baudrate\n\n @baudrate.setter\n def baudrate(self, val):\n if val is None:\n self.baudrate = 9600\n assert isinstance(val, int), 'Baudrate must be int'\n self._baudrate = val\n\n def close(self):\n self.alive = False\n for proc in self.procs:\n if isinstance(proc, Process):\n proc.terminate()\n for proc in self.procs:\n proc.join()\n self.serial.close()\n\n def join(self):\n \"\"\"\n Method with no other purpose than to block\n This method is not required to be called.\n \"\"\"\n while self.alive:\n try:\n pass\n except KeyboardInterrupt:\n break\n time.sleep(.0001)\n\n def __reader(self):\n \"\"\"\n Read and decode messages from a serial device,\n then place them in a queue to be read from.\n \"\"\"\n message, receiving = [], False\n while self.alive:\n data = self.serial.read()\n if data:\n if data == self.escape:\n self.incoming.put(b''.join(message))\n message, receiving = [], False\n elif data == self.preamble:\n message, receiving = [], True\n elif receiving:\n message.append(data)\n del data\n time.sleep(.0001)\n\n def __writer(self):\n \"\"\"Writes messages from the outgoing queue to the serial port\"\"\"\n while self.alive:\n transmission = self.outgoing.get()\n self.serial.write(self.preamble + transmission + self.escape)\n\n def read(self):\n \"\"\"Yields received serial messages from the queue\"\"\"\n while self.alive:\n message = self.incoming.get()\n try:\n payload = message.decode('utf-8')\n except:\n raise\n else:\n if self.verbose:\n print(payload)\n yield message\n del message\n time.sleep(.0001)\n\n def write(self, message):\n \"\"\"Non-blocking method to write serial messages\"\"\"\n transmission = b''\n if isinstance(message, (list, tuple)):\n transmission = bytearray(message)\n elif isinstance(message, str):\n transmission = bytes(message.encode('utf-8'))\n else:\n raise TypeError('Must be list, tuple, or str')\n self.outgoing.put(transmission)\n","repo_name":"khaudio/Utilities","sub_path":"Python/commbase.py","file_name":"commbase.py","file_ext":"py","file_size_in_byte":4026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"16288083453","text":"import json, random, string, os, math, time, datetime, dateutil.parser\nimport boto3\nfrom datetime import datetime, timedelta, timezone\nimport logging\nfrom FHIRClient import FHIRClient\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n## Establish the connection to SMART on FHIR backend services\nfhirclient = FHIRClient(\n os.environ['client_id'],\n os.environ['endpoint_token'],\n os.environ['endpoint_stu3'],\n os.environ['kms_key_id']\n)\n\n\n\n\"\"\" --- Helpers to build responses which match the structure of the necessary dialog actions --- \"\"\"\ndef elicit_slot(session_attributes, intent_name, slots, slot_to_elicit, message, response_card):\n return {\n 'sessionAttributes': session_attributes,\n 'dialogAction': {\n 'type': 'ElicitSlot',\n 'intentName': intent_name,\n 'slots': slots,\n 'slotToElicit': slot_to_elicit,\n 'message': message,\n 'responseCard': response_card\n }\n }\n\n\ndef confirm_intent(session_attributes, intent_name, slots, message, response_card):\n return {\n 'sessionAttributes': session_attributes,\n 'dialogAction': {\n 'type': 'ConfirmIntent',\n 'intentName': intent_name,\n 'slots': slots,\n 'message': message,\n 'responseCard': response_card\n }\n }\n\n\ndef close(session_attributes, fulfillment_state, message):\n response = {\n 'sessionAttributes': session_attributes,\n 'dialogAction': {\n 'type': 'Close',\n 'fulfillmentState': fulfillment_state,\n 'message': message\n }\n }\n\n return response\n \n \ndef delegate(session_attributes, slots):\n return {\n 'sessionAttributes': session_attributes,\n 'dialogAction': {\n 'type': 'Delegate',\n 'slots': slots\n }\n }\n\n\ndef build_response_card(title, subtitle, options):\n \"\"\"\n Build a responseCard with a title, subtitle, and an optional set of options which should be displayed as buttons.\n \"\"\"\n buttons = None\n if len(options) > 1:\n buttons = []\n for i in range(min(5, len(options))):\n buttons.append(options[i])\n\n return {\n 'contentType': 'application/vnd.amazonaws.card.generic',\n 'version': 1,\n 'genericAttachments': [{\n 'title': title,\n 'subTitle': subtitle,\n 'buttons': buttons\n }]\n }\n else:\n return {\n 'contentType': 'application/vnd.amazonaws.card.generic',\n 'version': 1,\n 'genericAttachments': [{\n 'title': title,\n 'subTitle': subtitle\n }]\n }\n\n\n\"\"\" --- Functions that control the bot's behavior --- \"\"\"\ndef getPatientAuth(intent_request):\n \"\"\"\n Authenticate caller and return FHIR STU3 patient ID for the following query\n \"\"\"\n logger.debug('intent request: {}'.format(intent_request))\n output_session_attributes = intent_request['currentIntent']['slots'] if intent_request['currentIntent']['slots'] is not None else {}\n telecom = intent_request['sessionAttributes']['telecom'] if intent_request['sessionAttributes']['telecom'] is not None else {}\n \n ## format phone number\n telecom = '{0}-{1}-{2}'.format(telecom[-10:-7], telecom[-7:-4], telecom[-4:])\n logger.debug('phone number after tranformation: {}'.format(telecom))\n patientinfo = {\n 'birthdate': output_session_attributes['patientBirthday'], \n 'gender': output_session_attributes['patientGender'],\n 'telecom': telecom\n }\n r = fhirclient.get_patient(patientinfo)\n output_session_attributes['patientid']=r['response']\n if r['status']==200:\n statusMessage = \"Thank you for authenticating\"\n else:\n statusMessage = \"I'm sorry, I didn't find a patient with that information\"\n return close(\n output_session_attributes,\n 'Fulfilled',\n {\n 'contentType': 'PlainText',\n 'content': statusMessage\n }\n )\n\n\ndef getMedHelp(intent_request):\n \"\"\"\n Retrieve Medication Information for a given patient\n \"\"\"\n logger.info('intent request: {}'.format(intent_request))\n output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}\n patientid = output_session_attributes['patientid'] if output_session_attributes['patientid'] is not None else {}\n res = fhirclient.get_meds(patientid)\n logger.debug(res)\n if res['status']==200:\n if type(res['response'])==str:\n outputtext = res['response']\n else:\n outputtext = 'I have found the following medications and instructions for you. '\n for med in res[\"response\"]:\n outputtext += med['medicationReference'] + '. '\n if 'dosage' in med and 'patientInstruction' in med['dosage'][0]:\n outputtext += 'The dosage for this is the following... ' + med['dosage'][0]['patientInstruction']\n else:\n outputtext += 'I did not find a dosage for this medication. '\n else:\n outputtext = 'I do not have any medication for you.'\n \n return close(\n output_session_attributes,\n 'Fulfilled',\n {\n 'contentType': 'PlainText',\n 'content': outputtext\n }\n )\n\n\n\"\"\" --- Intents --- \"\"\"\ndef dispatch(intent_request):\n \"\"\"\n Called when the user specifies an intent for this bot.\n \"\"\"\n logger.debug (intent_request)\n intent_name = intent_request['currentIntent']['name']\n\n # Dispatch to your bot's intent handlers\n if intent_name == 'getAppointments':\n return findFutureAppt(intent_request)\n if intent_name == 'getMedication':\n return getMedHelp(intent_request)\n if intent_name == 'authenticateUser':\n return getPatientAuth(intent_request)\n raise Exception('Intent with name ' + intent_name + ' not supported')\n\n\n\"\"\" --- Main handler --- \"\"\"\ndef lambda_handler(event, context):\n \"\"\"\n Route the incoming request based on intent.\n The JSON body of the request is provided in the event slot.\n \"\"\"\n # By default, treat the user request as coming from the America/New_York time zone.\n os.environ['TZ'] = 'America/New_York'\n time.tzset()\n logger.debug('event.bot.name={}'.format(event['bot']['name']))\n\n return dispatch(event)\n","repo_name":"aws-samples/integrate-amazon-connect-with-smart-on-fhir-backend-services","sub_path":"LambdaFunction/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":6434,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"29554022895","text":"#!C:/Users/Greg/Anaconda/Python\nimport time\n\n#Simple Sum Function\ndef simplefunc():\n\tstart_time = time.time() \n\tfor x in range(500):\n\t\tsum = 0.0\n\t\tfor i in range(1,10001):\n\t\t\tsum += 1.0 / pow(i,2)\n\tprint(\"----Python took %s seconds----\" % (time.time()-start_time) );\n\n \n#Function Call\nsimplefunc()\n","repo_name":"terratenney/Computational-Tools-For-Big-Data","sub_path":"Exercise3/exercise3_5.py","file_name":"exercise3_5.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"25930221679","text":"import pandas as pd\n\nfrom .plots import plotMessagesInChats, plotActivityOverTime, plotActivityForMostFrequentNonGroupChats, plotActivityOverWeek, plotActivityOverDay, plotMessageLengthDistributionPerChat, plotAverageMessageLength, generateKeywordClouds, plotLanguageDiversityRank\nfrom .parameters import getParam\n\nUSER = getParam('user')\nLANGUAGE = getParam('language')\nPLOTS_DIR = getParam('plotsDirectory')\nWORDCLOUDS_SUBDIR = getParam('wordClouds')['subDirectory']\n\n\ndef generatePlots(data: pd.DataFrame):\n plotMessagesInChats(data, chats=15, user=USER, save_dir=PLOTS_DIR)\n\n plotActivityOverTime(data, user=USER, save_dir=PLOTS_DIR, order=6)\n\n plotActivityForMostFrequentNonGroupChats(\n data, chats=4, order=3, save_dir=PLOTS_DIR)\n\n plotActivityOverWeek(data, user=USER, save_dir=PLOTS_DIR)\n\n plotActivityOverDay(data, user=USER, save_dir=PLOTS_DIR)\n\n plotMessageLengthDistributionPerChat(\n data, user=USER, save_dir=PLOTS_DIR)\n\n plotAverageMessageLength(\n data, user=USER, chats=20, messages_treshold=0.1, save_dir=PLOTS_DIR)\n\n generateKeywordClouds(\n data, user=USER, language=LANGUAGE, chats=10, save_dir=PLOTS_DIR, clouds_subdir=WORDCLOUDS_SUBDIR, background_color=\"white\")\n\n plotLanguageDiversityRank(\n data, user=USER, language=LANGUAGE, save_dir=PLOTS_DIR, batch_size=500)\n","repo_name":"BartekPog/Messenger-analysis","sub_path":"src/plots_generation.py","file_name":"plots_generation.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"63"} +{"seq_id":"12692548839","text":"from gui.widgets.dialogs.iteration_dialog import IterationDialog\nfrom core.iterators.radius_iterator import RadiusIterator\nfrom core.enums import Geometry\nfrom core import config\n\n\nclass RadiusIterationDialog(IterationDialog):\n\n def initComponents(self):\n super().initComponents()\n self.widget.iterateButton.setEnabled(\n config.geometry == Geometry.CYLINDRICAL\n )\n\n def iterate(self):\n self.iterator = RadiusIterator(self.gudrunFile)\n self.iterator.setTargetRadius(\"inner\")\n self.enqueueTasks()\n self.text = \"Iterate by Radius\"\n self.widget.close()\n","repo_name":"disorderedmaterials/GudPy","sub_path":"gudpy/gui/widgets/dialogs/iterate_radius_dialog.py","file_name":"iterate_radius_dialog.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"63"} +{"seq_id":"40313525324","text":"from machine import Timer, Pin, PWM, ADC, SPI\nimport time\nimport data\nfrom ST7735 import TFT\nfrom sysfont import sysfont\n\ndef startTimers():\n global dacTimer, measurementTimer, plottingTimer\n dacTimer = Timer(mode=Timer.PERIODIC, period=1, callback=dacHandler)\n measurementTimer = Timer(mode=Timer.PERIODIC, period=10, callback=measurementHandler)\n plottingTimer = Timer(mode=Timer.PERIODIC, period=10, callback=plottingHandler)\n\ndef stopTimers():\n global dacTimer, measurementTimer, plottingTimer\n dacTimer.deinit()\n measurementTimer.deinit()\n plottingTimer.deinit()\n\ndef dacHandler(timer):\n global dacStep, waveDAC\n waveformSetting = settings[0][1][settings[0][0]]\n if(waveformSetting == \"sine\"):\n waveDAC.duty_u16(int(sine[dacStep]))\n elif(waveformSetting == \"triangle\"):\n waveDAC.duty_u16(int(tri[dacStep]))\n elif(waveformSetting == \"square\"):\n if (dacStep < 500):\n waveDAC.duty_u16(65535)\n else:\n waveDAC.duty_u16(32768)\n if (dacStep != 999):\n dacStep += 1\n else:\n dacStep = 0\n\ndef measurementHandler(timer):\n global cursorPos, voltageDataC1, voltageDataC2, yValsC1, yValsC2, ADCDataC1, ADCDataC2, tft, measuring, currentArrayIndex\n valSumC1 = 0\n valSumC2 = 0\n for y in range(100):\n valSumC1 += C1.read_u16() - C1Ref.read_u16()\n valSumC2 += C2.read_u16() - C2Ref.read_u16()\n ADCDataC1[currentArrayIndex] = valSumC1*0.01\n ADCDataC2[currentArrayIndex] = valSumC2*0.01\n currentArrayIndex += 1\n if (currentArrayIndex == 160):\n currentArrayIndex = 0\n\ndef plottingHandler(timer):\n global voltageDataC1, voltageDataC2, yValsC1, yValsC2, ADCDataC1, ADCDataC2, tft, currentArrayIndex, plottingIndex, measuring, cursorPos\n if (plottingIndex == 0 and settings[2][1][settings[2][0]] == \"wave out peak\" and not (dacStep > 445 and dacStep < 455)):\n plottingIndex = 0\n elif(plottingIndex < 160):\n cursorPos = plottingIndex\n if(settings[1][1][settings[1][0]] == \"volts\"):\n if(currentArrayIndex >= 20):\n voltageDataC1[plottingIndex] = voltageFactor*(ADCDataC1[currentArrayIndex-20])\n voltageDataC2[plottingIndex] = voltageFactor*(ADCDataC2[currentArrayIndex-20])\n yValsC1[plottingIndex] = int(69-displayFactor*voltageDataC1[plottingIndex])\n yValsC2[plottingIndex] = int(69-displayFactor*voltageDataC2[plottingIndex])\n \n tft.pixel((plottingIndex, yValsC2[plottingIndex]),TFT.BLUE)\n tft.pixel((plottingIndex, yValsC1[plottingIndex]),TFT.RED)\n else:\n voltageDataC1[plottingIndex] = voltageFactor*(ADCDataC1[currentArrayIndex+140])\n voltageDataC2[plottingIndex] = voltageFactor*(ADCDataC2[currentArrayIndex+140])\n yValsC1[plottingIndex] = int(69-displayFactor*voltageDataC1[plottingIndex])\n yValsC2[plottingIndex] = int(69-displayFactor*voltageDataC2[plottingIndex])\n \n tft.pixel((plottingIndex, yValsC2[plottingIndex]),TFT.BLUE)\n tft.pixel((plottingIndex, yValsC1[plottingIndex]),TFT.RED)\n else: #power plot mode\n if(currentArrayIndex >= 20):\n voltageDataC1[plottingIndex] = voltageFactor*(ADCDataC1[currentArrayIndex-20])*voltageFactor*(ADCDataC2[currentArrayIndex-20])\n yValsC1[plottingIndex] = int(69-displayFactor*voltageDataC1[plottingIndex])\n \n tft.pixel((plottingIndex, yValsC1[plottingIndex]),TFT.PURPLE)\n else:\n voltageDataC1[plottingIndex] = voltageFactor*(ADCDataC1[currentArrayIndex+140])*voltageFactor*(ADCDataC2[currentArrayIndex+140])\n yValsC1[plottingIndex] = int(69-displayFactor*voltageDataC1[plottingIndex])\n \n tft.pixel((plottingIndex, yValsC1[plottingIndex]),TFT.PURPLE)\n plottingIndex+=1\n if(plottingIndex == 160):\n cursorPos = 0\n else:\n measuring=False\n\ndef updateText():\n global tft\n tft.text((0, 0), '{:.2f}S'.format(cursorPos*0.01), TFT.GREEN, sysfont, 1, nowrap=True)\n if(settings[1][1][settings[1][0]] == \"volts\"):\n tft.text((40, 0), '{:+.2f}V'.format(voltageDataC1[cursorPos]), TFT.RED, sysfont, 1, nowrap=True)\n tft.text((90, 0), '{:+.2f}V'.format(voltageDataC2[cursorPos]), TFT.BLUE, sysfont, 1, nowrap=True)\n else:\n tft.text((40, 0), '{:+.2f}mW'.format(voltageDataC1[cursorPos]), TFT.PURPLE, sysfont, 1, nowrap=True)\n \n waveformSetting = settings[0][1][settings[0][0]]\n if(waveformSetting == \"sine\"):\n tft.text((140, 0), 'SIN', TFT.YELLOW, sysfont, 1, nowrap=True)\n elif(waveformSetting == \"triangle\"):\n tft.text((140, 0), 'TRI', TFT.YELLOW, sysfont, 1, nowrap=True)\n elif(waveformSetting == \"square\"):\n tft.text((140, 0), 'SQR', TFT.YELLOW, sysfont, 1, nowrap=True)\n\ndef drawAxes():\n tft.text((6, 8), 'v', TFT.YELLOW, sysfont, 1, nowrap=True)\n tft.text((155, 58), 's', TFT.YELLOW, sysfont, 1, nowrap=True)\n \n tft.vline((0,10), 118, TFT.YELLOW)\n tft.line((0,10), (3, 13), TFT.YELLOW)\n tft.line((0,127), (3, 124), TFT.YELLOW)\n\n tft.pixel((1,61), TFT.YELLOW)\n tft.pixel((1,53), TFT.YELLOW)\n tft.pixel((1,45), TFT.YELLOW)\n tft.pixel((1,37), TFT.YELLOW)\n tft.pixel((1,29), TFT.YELLOW)\n tft.pixel((1,21), TFT.YELLOW)\n\n tft.pixel((1,77), TFT.YELLOW)\n tft.pixel((1,85), TFT.YELLOW)\n tft.pixel((1,93), TFT.YELLOW)\n tft.pixel((1,101), TFT.YELLOW)\n tft.pixel((1,109), TFT.YELLOW)\n tft.pixel((1,117), TFT.YELLOW)\n\n tft.hline((1,69), 159, TFT.YELLOW)\n tft.line((159, 69), (156, 72), TFT.YELLOW)\n tft.line((159, 69), (156, 66), TFT.YELLOW)\n\n for x in range(15):\n tft.pixel((9 + 10*x,68), TFT.YELLOW)\n tft.pixel((9 + 10*x,70), TFT.YELLOW)\n\ndef moveCursor(newPos):\n global cursorPos\n tft.vline((cursorPos, 10), 118, TFT.BLACK)\n drawAxes()\n if(settings[1][1][settings[1][0]] == \"volts\"):\n tft.pixel((cursorPos, yValsC2[cursorPos]),TFT.BLUE)\n tft.pixel((cursorPos, yValsC1[cursorPos]),TFT.RED)\n else:\n tft.pixel((cursorPos, yValsC1[cursorPos]),TFT.PURPLE)\n cursorPos = newPos\n tft.vline((cursorPos, 10), 118, TFT.GREEN)\n if(settings[1][1][settings[1][0]] == \"volts\"):\n tft.pixel((cursorPos, yValsC2[cursorPos]),TFT.BLUE)\n tft.pixel((cursorPos, yValsC1[cursorPos]),TFT.RED)\n else:\n tft.pixel((cursorPos, yValsC1[cursorPos]),TFT.PURPLE)\n\ndef openMenu():\n global tft, leftButton, rightButton, leftCounter, rightCounter, selectButton, selectCounter, settings, cursorPos\n stopTimers()\n tft.fill(TFT.BLACK)\n tft.text((0, 0 ), 'Waveform:', TFT.WHITE, sysfont, 1, nowrap=True)\n tft.text((0, 12), ' Units:', TFT.WHITE, sysfont, 1, nowrap=True)\n tft.text((0, 24), ' Trigger:', TFT.WHITE, sysfont, 1, nowrap=True)\n menuIndex = 0\n updateMenuSelection(menuIndex)\n while(True):\n time.sleep(0.001)\n if (not leftButton.value()):\n leftCounter += 1\n if (leftCounter == 5):\n settings[menuIndex][0] = settings[menuIndex][0] - 1\n if(settings[menuIndex][0] < 0):\n settings[menuIndex][0] = len(settings[menuIndex][1]) - 1\n updateMenuSelection(menuIndex)\n else:\n leftCounter = 0\n \n if (not rightButton.value()):\n rightCounter += 1\n if (rightCounter == 5):\n settings[menuIndex][0] = settings[menuIndex][0] + 1\n if(settings[menuIndex][0] >= len(settings[menuIndex][1])):\n settings[menuIndex][0] = 0\n updateMenuSelection(menuIndex)\n else:\n rightCounter = 0\n \n if (not selectButton.value()):\n selectCounter += 1\n if (selectCounter == 5):\n menuIndex += 1\n if menuIndex == len(settings):\n if (settings[1][1][settings[1][0]] == \"watts\"):\n tft.fill(TFT.BLACK)\n tft.text((0, 0), \"Note: Power plot mode on. This mode assumes channel 1 is attached across the component whose power drawis being measured, and channel 2 is attached across a 1 kOhm resistor placed in series with the component. Press select to continue.\", TFT.WHITE, sysfont, 1, nowrap=False)\n time.sleep(1)\n while(True):\n if (not selectButton.value()):\n break\n break\n updateMenuSelection(menuIndex)\n else:\n selectCounter = 0\n startTimers()\n tft.fill(TFT.BLACK)\n drawAxes()\n cursorPos = 0\n\ndef updateMenuSelection(menuIndex):\n global tft\n tft.fillrect((60,12*menuIndex - 4), (99, 14), TFT.BLACK)\n tft.hline((60, 12*menuIndex + 9), 6*len(settings[menuIndex][1][settings[menuIndex][0]]), TFT.WHITE)\n for x in range(len(settings)):\n tft.text((60, x*12), settings[x][1][settings[x][0]], TFT.WHITE, sysfont, 1, nowrap=True)\n\nspi = SPI(1, baudrate=133000000, polarity=0, phase=0, sck=Pin(14), mosi=Pin(15), miso=Pin(12))\ntft=TFT(spi,10,11,13)\ntft.initr()\ntft.rgb(True)\ntft.rotation(3)\ntft.fill(TFT.BLACK)\ndrawAxes()\n\nsine = data.sineWave()\ntri = data.triWave()\nwavePin = Pin(20, mode=Pin.OUT)\nwaveDAC = PWM(wavePin)\nwaveDAC.freq(1000000)\ndacStep = 0\n\nC1Ref = ADC(29)\nC1 = ADC(28)\nC2Ref = ADC(27)\nC2 = ADC(26)\n\nc1gnd = Pin(25, Pin.OUT)\nc1gnd.value(0)\nc2gnd = Pin(24, Pin.OUT)\nc2gnd.value(0)\nc3gnd = Pin(23, Pin.OUT)\nc3gnd.value(0)\nc4gnd = Pin(22, Pin.OUT)\nc4gnd.value(0)\n\nvoltageFactor = 6.6/65535\ndisplayFactor = 8\n\nADCDataC1 = [0] * 160\nADCDataC2 = [0] * 160\n\nvoltageDataC1 = [0] * 160\nvoltageDataC2 = [0] * 160\n\nyValsC1 = [69] * 160\nyValsC2 = [69] * 160\n\ncurrentArrayIndex = 0\nplottingIndex = 160\ncursorPos = 0\n\nsettings = [\n[0, [\"sine\", \"triangle\", \"square\"]],\n[0, [\"volts\", \"watts\"]],\n[0, [\"immediate\", \"wave out peak\"]]\n]\n\nleftButton = Pin(0, Pin.IN, Pin.PULL_UP)\nrightButton = Pin(1, Pin.IN, Pin.PULL_UP)\nmodeButton = Pin(2, Pin.IN, Pin.PULL_UP)\nselectButton = Pin(3, Pin.IN, Pin.PULL_UP)\nstartSwitch = Pin(9, Pin.IN, Pin.PULL_UP)\nreadySwitch = Pin(8, Pin.IN, Pin.PULL_UP)\n\nleftCounter = 0\nrightCounter = 0\nmodeCounter = 0\nselectCounter = 0\nreadyCounter = 0\nstartCounter = 0\n\nleftPressed = False\nrightPressed = False\n\ncurrentSwitchState = \"invalid\"\nlastSwitchState = \"none\"\n\nif (not readySwitch.value()):\n currentState = \"ready\"\nelif (not startSwitch.value()):\n currentState = \"start\"\n\nmeasuring = False\ntextUpdateCounter = 0\n\nstartTimers()\n\nwhile(True):\n time.sleep(0.001)\n textUpdateCounter += 1\n if(textUpdateCounter >= 9):\n textUpdateCounter = 0\n updateText()\n \n if (not leftButton.value()):\n leftCounter += 1\n if (leftCounter == 5):\n leftPressed = True\n elif (leftCounter > 100):\n leftCounter -= 5\n leftPressed = True\n else:\n leftCounter = 0\n \n if (not rightButton.value()):\n rightCounter += 1\n if (rightCounter == 5):\n rightPressed = True\n elif (rightCounter > 100):\n rightCounter -= 5\n rightPressed = True\n else:\n rightCounter = 0\n \n if (not modeButton.value()):\n modeCounter += 1\n if (modeCounter == 5):\n modePressed = True\n else:\n modeCounter = 0\n modePressed = False\n \n if (not selectButton.value()):\n selectCounter += 1\n if (modeCounter == 5):\n selectPressed = True\n else:\n selectCounter = 0\n selectPressed = False\n \n if (not readySwitch.value()):\n readyCounter += 1\n if (readyCounter > 5):\n readyCounter = 0\n currentSwitchState = \"ready\"\n else:\n readyCounter = 0\n \n if (not startSwitch.value()):\n startCounter += 1\n if (startCounter > 5):\n startCounter = 0\n currentSwitchState = \"start\"\n else:\n startCounter = 0\n \n if(not measuring):\n if(leftPressed):\n leftPressed = False\n if(cursorPos == 0):\n moveCursor(159)\n else:\n moveCursor(cursorPos - 1)\n \n if(rightPressed):\n rightPressed = False\n if(cursorPos == 159):\n moveCursor(0)\n else:\n moveCursor(cursorPos + 1)\n \n if(modePressed):\n modePressed = False\n openMenu()\n \n if (currentSwitchState == \"start\" and lastSwitchState == \"ready\"):\n tft.fillrect((0,9), (160, 128), TFT.BLACK)\n drawAxes()\n cursorPos = 0\n plottingIndex = 0\n measuring = True\n lastSwitchState = currentSwitchState\n\n","repo_name":"Cutout1/scope","sub_path":"MicroPython Code/Advanced/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"35337506754","text":"nuber = int(input())\n\nA = ord('A')\nZ = ord('Z')\na = ord('a')\nz = ord('z')\n\nif A <= nuber <= Z or a <= nuber <= z:\n print('Это буква', chr(nuber))\nelse:\n print('Это не буква, а символ', chr(nuber))\n\n","repo_name":"VolkUralskiy/Education","sub_path":"Zadacha_5.py","file_name":"Zadacha_5.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"20275820158","text":"#!/usr/bin/python3\n## mulePierre.py for zappy in /home/karraz_s/rendu/PSU_2015_zappy\n## \n## Made by stephane karraz\n## Login \n## \n## Started on Sun Jun 26 23:39:16 2016 stephane karraz\n## Last update Sun Jun 26 23:39:23 2016 stephane karraz\n##\n\n\nfrom random import *\nimport time\n\ndef init_list(player, List):\n List.append((\"linemate\", player.search_object(\"linemate\")))\n List.append((\"deraumere\", player.search_object(\"deraumere\")))\n List.append((\"sibur\", player.search_object(\"sibur\")))\n List.append((\"mendiane\", player.search_object(\"mendiane\")))\n List.append((\"phiras\", player.search_object(\"phiras\")))\n List.append((\"thystame\", player.search_object(\"thystame\")))\n\ndef level1(player):\n List = []\n player.see_objects()\n init_list(player, List)\n\n while (player.ressources[\"nourriture\"] < 15 and player.dead == False):\n player.see_objects()\n coor = player.search_object(\"nourriture\")\n if (coor[0] != -1):\n player.walk_to_case(coor[0], coor[1])\n player.get_object(\"nourriture\")\n else:\n r = randint(0, 1)\n if (r == 0):\n player.turn_left()\n if (r == 1):\n player.turn_right()\n player.walk()\n player.get_inventory()\n\n player.get_inventory()\n while (player.ressources[\"nourriture\"] > 10 and player.get_nb_pierres_inv() < 3 and player.dead == False):\n player.see_objects()\n newList = []\n for maillon in List:\n tmp = maillon\n newList.append((tmp[0], player.search_object(tmp[0])))\n List = newList\n res = player.find_smaller(List)\n if (res[1][0] != -1):\n player.walk_to_case(res[1][0], res[1][1])\n if (player.cli.broad[\"BIP_BIP\"] != 0 and player.cli.broad[\"GENKIDAMA\"] != 0\n and player.cli.broad[\"TAKE_MY_ENERGY\"] != 0):\n player.get_object(res[0])\n else:\n player.walk()\n\n else:\n r = randint(0, 1)\n if (r == 0):\n player.turn_left()\n player.walk()\n if (r == 1):\n player.turn_right()\n player.walk()\n player.walk()\n player.get_inventory()\n \n player.get_inventory()\n player.see_objects()\n while (player.ressources[\"nourriture\"] > 2 and player.get_nb_pierres_inv() > 0\n and player.dead == False):\n if (player.cli.broad[\"BIP_BIP\"] > 0):\n player.go_to_broadPos(\"BIP_BIP\")\n if (player.cli.broad[\"BIP_BIP\"] == 0):\n for obj in List:\n player.get_inventory()\n while (player.ressources[obj[0]] > 0 and player.dead == False):\n player.drop_object(str(obj[0]))\n player.get_inventory()\n player.get_inventory()\n player.get_inventory()\n","repo_name":"sebgtfr/zappy-epitech","sub_path":"ia_client/IA/mulePierre.py","file_name":"mulePierre.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"24006269199","text":"import json\nimport re\nimport requests\nfrom pyquery import PyQuery as pq\n\nurlx=\"http://www.8wenku.com/book/\"\npurl=\"http://www.8wenku.com\"\nfor i in range(10000):\n if i<39:\n pass\n else:\n url=urlx+str(i)\n info=requests.get(url)\n html=pq(info.text)\n head=html(\"head\").find('title').text()\n print(head)\n if re.findall('Application',head):\n pass\n else:\n book={}\n book['title']=html(\".abook\").find('h2').text()\n book['state']=html(\".abook\").find('.state').text()\n book['desc']=html(\".abook\").find('.desc').text()\n book['count']=html(\".abook\").find('.info>li:eq(0)').text()\n book['time']=html(\".abook\").find('.info>li:eq(1)').text()\n content={}\n juan=html(\".section_list\").find('.hd')\n for j in juan:\n onekey=pq(j).find('h3').text()\n li=pq(j).next(\".bd\").find(\"li\")\n oneone={}\n one=[]#每卷的内容\n for l in li:\n oneone['title']=pq(l).text()#每卷中的章节标题\n oneone['url']=pq(l).find('a').attr(\"href\")#每卷的地址\n oneone['url']=purl+oneone['url']\n two=requests.get(oneone['url'])\n hw=pq(two.text)\n post=hw(\".article-body\").html()\n oneone['post']=post.replace(\"最新最全的日本动漫轻小说 轻小说文库(http://www.8wenku.com) 为你一网打尽!\",'')\n one.append(oneone.copy())\n content[onekey]=one\n boid=re.findall(r\"\\d+$\",url)\n book['id']=boid[0]\n book['content']=content\n f=open(\"8wenku/\"+book['title']+\".json\",'w')\n f.write(json.dumps(book))\n f.close()\n \n \n\n","repo_name":"MrPiy/YandeMrPiy","sub_path":"wenku.py","file_name":"wenku.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"18765912321","text":"import os\n\nimport allure\nimport pytest\nimport json\n\nfrom base import ApiBase\n\n\n@pytest.mark.API\nclass TestApi(ApiBase):\n\n @pytest.fixture()\n def file_path_big(self, repo_root):\n return os.path.join(repo_root, 'files', 'srvvr_cover.png')\n\n @pytest.fixture()\n def file_path_small(self, repo_root):\n return os.path.join(repo_root, 'files', 'srvvr_logo_cut.jpeg')\n\n @pytest.fixture()\n def file_path_poker_segment(self, repo_root):\n return os.path.join(repo_root, 'json_data', 'poker_segment.json')\n\n @pytest.fixture()\n def file_path_vk_study_segment(self, repo_root):\n return os.path.join(repo_root, 'json_data', 'vk_study_segment.json')\n\n @pytest.fixture()\n def file_path_campaign(self, repo_root):\n return os.path.join(repo_root, 'json_data', 'campaign.json')\n\n @allure.step(\"Testing creation of campaign.\")\n def test_creation_of_campaign(self, file_path_big, file_path_small, file_path_campaign):\n id_big_pic = self.api_client.big_pic_upload(file_path_big)\n id_small_pic = self.api_client.small_pic_upload(file_path_small)\n id_primary = self.api_client.get_primary_id_banner()\n new_created_campaign, id_of_created_campaign = self.api_client.create_new_campaign(\n file_path_campaign, id_big_pic, id_small_pic, id_primary\n )\n assert new_created_campaign.status_code == 200\n with open(f'{file_path_campaign}', 'r') as file:\n jsn_data = json.loads(file.read())\n assert jsn_data['name'] in self.api_client.check_campaign(id_of_created_campaign).values()\n self.api_client.delete_campaign(id_of_created_campaign)\n\n @allure.step(\"Testing creation of and audience segment based on Poker VK game.\")\n def test_creation_of_audience_segment(self, file_path_poker_segment):\n id_of_poker_game = self.api_client.get_pocker_game_source_id()\n id_of_poker_game_source = self.api_client.add_new_apps_and_games_source(id_of_poker_game)\n new_created_segment_poker, id_new_created_segment_poker = self.api_client.add_new_segment(\n file_path_poker_segment, id_of_poker_game)\n assert new_created_segment_poker.status_code == 200\n with open(f'{file_path_poker_segment}', 'r') as file:\n jsn_data = json.loads(file.read())\n assert jsn_data['name'] in self.api_client.check_segment(id_new_created_segment_poker).values()\n self.api_client.delete_segment(id_new_created_segment_poker)\n self.api_client.delete_source_poker(id_of_poker_game_source)\n\n @allure.step(\"Testing creation of an audience segment based on VK Study group source.\")\n def test_creation_of_audience_vk_study_group(self, file_path_vk_study_segment):\n id_of_vk_study_group = self.api_client.get_vk_study_group_source_id()\n id_of_vk_group_source = self.api_client.add_new_vk_group_source(id_of_vk_study_group)\n new_created_segment_vk_study, id_new_created_segment_vk_study = self.api_client.add_new_segment(\n file_path_vk_study_segment, id_of_vk_study_group)\n assert new_created_segment_vk_study.status_code == 200\n with open(f'{file_path_vk_study_segment}', 'r') as file:\n jsn_data = json.loads(file.read())\n assert jsn_data['name'] in self.api_client.check_segment(id_new_created_segment_vk_study).values()\n self.api_client.delete_segment(id_new_created_segment_vk_study)\n self.api_client.delete_source_vk_group(id_of_vk_group_source)\n","repo_name":"VK-Education-QA-Python/2022-2-VK-QA-PYTHON-tmlnv","sub_path":"homework3/test_my_target_api.py","file_name":"test_my_target_api.py","file_ext":"py","file_size_in_byte":3495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"12089181753","text":"\nimport turtle\n\ns = turtle.Screen()\ns.title('My Turtle Drawing')\ns.bgcolor('white')\nt = turtle.Pen()\nt.shape('classic')\nt.shapesize(1,1,1)\nt.pen(pencolor='red',fillcolor='yellow',speed=5)\n\nt.begin_fill()\n\n# petal, 4\nfor i in range(4):\n t.circle(100,90)\n t.left(90)\n t.circle(100,90)\n\nt.end_fill()\n\nturtle.done()\n","repo_name":"bon-six/PythonBasicCourseCode","sub_path":"unit5/drawing_2e.py","file_name":"drawing_2e.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"74357546759","text":"\"\"\"\nSearch handled for Rakuten Shopping service\n\"\"\"\nfrom shopping_search.settings import SERVICES_CONFIG\n\nfrom itertools import chain\nfrom rakutenichiba import RakutenIchibaAPI\nfrom shopping_search.shopping_services.utils import IS_DATA_VALID\n# import logging\n# LOGGER = logging.getLogger(__name__)\n\n\nRAKUTEN = RakutenIchibaAPI(**SERVICES_CONFIG['rakuten'])\n\n\n# pylint: disable=too-many-arguments\ndef search(category, keywords, maximum_price,\n minimum_price, sort, page):\n \"\"\"\n Performs Search. Returns 100 results per page\n \"\"\"\n params = dict(\n genreId=category,\n keyword=keywords\n )\n if maximum_price is not None:\n params['maxPrice'] = int(float(maximum_price))\n if minimum_price is not None:\n params['minPrice'] = int(float(minimum_price))\n if sort is not None:\n params['sort'] = '+itemPrice' if sort == 'price' else '-itemPrice'\n\n # we threat 100 results as 1 page which equals to 4 pages\n # on current API search request\n page = page * 4\n\n # lets load 100 search results\n results = []\n # pylint: disable=bad-builtin\n for i in range(page, page + 4):\n result = RAKUTEN.item_search(hits=25, page=i+1, **params)\n result = map(extract_data, result.get('Items', []))\n results.append(result)\n responce = list(filter(IS_DATA_VALID, chain(*results)))\n return responce\n\n\ndef extract_data(product):\n \"\"\"\n Extracts data from search result item\n \"\"\"\n if not isinstance(product, dict) and product:\n return\n image = product.get('mediumImageUrls', None)\n price = product.get('itemPrice', None)\n data = {\n 'service': 'rakuten',\n 'currency': None,\n 'price': price and int(price) or price,\n 'image': image[0] if image else 0,\n 'id': product.get('itemCode', None),\n # 'ProductId': product['itemCode', None],\n 'DetailPageURL': product.get('itemUrl', None),\n 'Label': product.get('itemCaption', None),\n 'EditorialReview': [\n {'name': 'Description',\n 'value': product.get('itemCaption', None)}],\n 'ProductGroup': product.get('genreId', None), # get it name to display\n 'Title': product.get('itemName', None),\n 'Manufacturer': product.get('shopName', None),\n 'CustomerReviews': product.get('itemUrl', None), # INFO: no such thing\n 'images': [\n {'SmallImage': small,\n 'LargeImage': small.rsplit('?', 1)[0]}\n for small in product.get('smallImageUrls', [])],\n 'ItemAttributes': [],\n }\n return data\n","repo_name":"enkidulan/goods_search","sub_path":"shopping_search/shopping_services/rakuten.py","file_name":"rakuten.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"3236488679","text":"#!/usr/bin/env python3\n\"\"\"7. Transformer Decoder Block\"\"\"\n\nimport tensorflow as tf\nMultiHeadAttention = __import__('6-multihead_attention').MultiHeadAttention\n\n\nclass DecoderBlock(tf.keras.layers.Layer):\n \"\"\"\n A class that inherits from tensorflow.keras.layers.Layer\n to create an encoder block for a transformer\n \"\"\"\n def __init__(self, dm, h, hidden, drop_rate=0.1):\n \"\"\"\n Constructor class\n \"\"\"\n super().__init__()\n\n self.mha1 = MultiHeadAttention(dm, h)\n self.mha2 = MultiHeadAttention(dm, h)\n self.dense_hidden = tf.keras.layers.Dense(units=hidden,\n activation='relu')\n self.dense_output = tf.keras.layers.Dense(units=dm)\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n self.dropout1 = tf.keras.layers.Dropout(drop_rate)\n self.dropout2 = tf.keras.layers.Dropout(drop_rate)\n self.dropout3 = tf.keras.layers.Dropout(drop_rate)\n\n def call(self, x, encoder_output, training, look_ahead_mask, padding_mask):\n \"\"\"\n Call function\n \"\"\"\n attention, attention_block = self.mha1(x, x, x, look_ahead_mask)\n attention = self.dropout1(attention, training=training)\n out1 = self.layernorm1(attention + x)\n attention2, attn_weights_block2 = self.mha2(out1,\n encoder_output,\n encoder_output,\n padding_mask)\n attention2 = self.dropout2(attention2, training=training)\n out2 = self.layernorm2(attention2 + out1)\n hidden_output = self.dense_hidden(out2)\n output_output = self.dense_output(hidden_output)\n ffn_output = self.dropout3(output_output, training=training)\n output = self.layernorm3(ffn_output + out2)\n\n return output\n","repo_name":"rubenoliveros/holbertonschool-machine_learning","sub_path":"supervised_learning/0x11-attention/8-transformer_decoder_block.py","file_name":"8-transformer_decoder_block.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"32917025929","text":"import numpy as np\n\nfrom gaussians import G1D\nfrom gaussians.one_dim import (\n construct_arbitrary_potential_elements,\n construct_multipole_moment_matrix_elements,\n)\n\n\nclass HOPotential:\n def __init__(self, omega):\n self.omega = omega\n\n def __call__(self, x):\n return 0.5 * self.omega**2 * x**2\n\n\nclass DWPotentialSmooth:\n \"\"\"\n This is the double-well potential used by J. Kryvi and S. Bøe in their\n thesis work. See Eq. [13.11] in Bøe: https://www.duo.uio.no/handle/10852/37170\n \"\"\"\n\n def __init__(self, a=4):\n self.a = a\n\n def __call__(self, x):\n return (\n (1.0 / (2 * self.a**2))\n * (x + 0.5 * self.a) ** 2\n * (x - 0.5 * self.a) ** 2\n )\n\n\ndef test_ho_potential():\n l = 10\n omega = 0.5\n grid = np.linspace(-10, 10, 1001)\n\n gaussians = [G1D(i, omega / 2, 0) for i in range(l)]\n\n v_mm = (\n 0.5\n * omega**2\n * construct_multipole_moment_matrix_elements(2, 0, gaussians)\n )\n v_num = construct_arbitrary_potential_elements(\n HOPotential(omega), gaussians, grid\n )\n\n np.testing.assert_allclose(v_mm, v_num, atol=1e-12)\n np.testing.assert_allclose(v_mm, v_mm.T)\n\n\ndef test_dw_smooth():\n l = 4\n a = 4\n grid = np.linspace(-10, 10, 1001)\n\n gaussians = [G1D(i, 1, -2) for i in range(l // 2)]\n gaussians.extend([G1D(i, 1, 2) for i in range(l // 2)])\n\n s = construct_multipole_moment_matrix_elements(0, 0, gaussians)\n x_2 = construct_multipole_moment_matrix_elements(2, 0, gaussians)\n x_4 = construct_multipole_moment_matrix_elements(4, 0, gaussians)\n\n v_mm = 1 / (2 * a**2) * (x_4 - 0.5 * a**2 * x_2 + 1 / 16 * a**4 * s)\n v_num = construct_arbitrary_potential_elements(\n DWPotentialSmooth(a), gaussians, grid\n )\n\n np.testing.assert_allclose(v_mm, v_num, atol=1e-12)\n np.testing.assert_allclose(v_mm, v_mm.T)\n","repo_name":"Schoyen/gaussians","sub_path":"tests/test_arbitrary_one_dim_potentials.py","file_name":"test_arbitrary_one_dim_potentials.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"30718028934","text":"from dao import shapes\nfrom mbr import *\n\n\ndef main():\n\n while True:\n shapeid = raw_input('shape id> ')\n\n try:\n shapeid = int(shapeid)\n except ValueError:\n break\n\n try:\n shape = shapes[shapeid]\n except IndexError:\n print('shape # {} does not exist! '.format(shapeid))\n continue\n\n print(mbr(shape))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"lizy14/GIS-assignments","sub_path":"soil/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"13122179496","text":"# Acrescentei funções para otimizar esse programa, deixando ele mais prático\ndef imp_vetor(vdes, vcol):\n # imprime vetor em linhas..........\n lin_total = len(vdes)\n col_total = len(vcol)\n for lin in range(0, lin_total):\n for col in range(0, col_total):\n print(alinha_campo(vdes[lin][col], vcol[col]), end='')\n print()\n\n\ndef alinha_campo(campo_original, campo_tamanho):\n espaco = campo_tamanho - len(campo_original)\n espaco = ' ' * espaco\n return campo_original + espaco\n\n\ndef soma_itens_lista(vlista): # soma os itens de uma lista (vetor)\n car_lin = 0\n col_tot = len(vlista)\n for pos in range(0, col_tot):\n if type(vlista[pos]) == int:\n car_lin = car_lin + vlista[pos]\n else:\n print('campo não númerico encontrado no vetor')\n return 0\n return car_lin\n\n\n# main program #\nlistagem = [('Monitor', '989.99', 'duplo', '1', 'sim', 'jose', ),\n ('Teclado', '50.99', 'simples', '2', 'nao', 'pedro', ),\n ('Mouse', '40.99', 'duplo', '3', 'nao', 'maria', ),\n ('Headset', '80.00', 'simples', '4', 'sim', 'janaina',),\n ('(Controle sem fio', '399.99', 'duplo', '5', 'não', 'maria eduarda', ),\n ('Caixas de som', '99.99', 'simples', '6', 'sim', 'josefina', ),\n ('Carregador celular', '29.99', 'duplo', '7', 'nao', 'eliete')]\n\ncabecalho = [('Descrição do Produto',\n 'Valor',\n 'Tipo',\n 'Em estoque',\n 'Lote',\n 'Nome vendedor')]\n\ncoluna = (23,\n 8,\n 9,\n 6,\n 5,\n 20)\n\nlin_sep = '-' * soma_itens_lista(coluna)\n# ---------------------------------------@\n# __execução__\n#\nprint()\nprint('Listagem de Produtos')\nprint(lin_sep)\nimp_vetor(cabecalho, coluna)\nprint(lin_sep)\nimp_vetor(listagem, coluna)\nprint(lin_sep)\n# #\n# ______fim______#\n","repo_name":"IgorInterliche/exercicios","sub_path":"ex_76_tuplass_listadeprodutos_3.py","file_name":"ex_76_tuplass_listadeprodutos_3.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"23122066117","text":"import datetime\nimport pytest\nimport random\nimport time\n\n\n@pytest.fixture(autouse=True)\ndef check_duration(request, cache):\n key = 'duration/' + request.node.nodeid.replace(':', '_')\n # nodeid's can have colons\n # keys become filenames within .cache\n # replace colons with something filename safe\n start_time = datetime.datetime.now()\n yield\n stop_time = datetime.datetime.now()\n this_duration = (stop_time - start_time).total_seconds()\n last_duration = cache.get(key, None)\n cache.set(key, this_duration)\n if last_duration is not None:\n errorstring = \"test duration over 2x last duration\"\n assert this_duration <= last_duration * 2, errorstring\n\n\n@pytest.mark.parametrize('i', range(5))\ndef test_slow_stuff(i):\n time.sleep(random.random())\n","repo_name":"china-testing/python-api-tesing","sub_path":"python3_libraries/pytest_testing/ch4/cache/test_slower.py","file_name":"test_slower.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":2314,"dataset":"github-code","pt":"63"} +{"seq_id":"37805049065","text":"from typing import Union, Tuple\n\n# type hints\nNumber = Union[int, float]\nFloatPair = Tuple[float, float]\n\n\nclass BBox:\n \"\"\"\n Bounding Box\n ------------\n - box - left, top, width, height\n - iterable - can be cast to tuple or list\n - indexable (view only)\n \"\"\"\n\n def __init__(self, box: tuple):\n\n arglist = list(map(float, box)) # type: ignore\n\n # all params specified\n if len(arglist) == 4:\n self.left, self.top, self.width, self.height = arglist\n\n # only size specified\n elif len(arglist) == 2:\n self.left, self.top = 0.0, 0.0\n self.width, self.height = arglist\n\n # invalid\n else:\n raise ValueError(\"invalid bbox constructor\")\n\n def __getitem__(self, key):\n return [self.left, self.top, self.width, self.height][key]\n\n def __iter__(self):\n return iter((self.left, self.top, self.width, self.height))\n\n @property\n def offset(self) -> FloatPair:\n return (self.left, self.top)\n\n @property\n def size(self) -> FloatPair:\n return (self.width, self.height)\n\n @property\n def center(self) -> FloatPair:\n hcenter = self.left + self.width / 2\n vcenter = self.top + self.height / 2\n return (hcenter, vcenter)\n\n def get_sub_bbox(\n self, fraction: Union[Number, FloatPair], alignment=\"MM\"\n ) -> \"BBox\":\n \"\"\"\n Get a smaller bounding box from this box\n -------------\n - fraction - the fraction of box occupied by sub bbox\n - alignment - alignment of sub bbox (horiz, vert)\n \"\"\"\n # convert to tuple if number given\n if isinstance(fraction, float) or isinstance(fraction, int):\n fraction = (fraction, fraction)\n\n # illegal fraction\n if fraction[0] > 1 or fraction[1] > 1:\n raise ValueError(\"fractions should be strictly <= 1.0\")\n\n # create smaller bbox\n new_width = self.width * fraction[0]\n new_height = self.height * fraction[1]\n smaller = BBox((new_width, new_height))\n\n # unpack (strictly 2 values)\n halign, valign = tuple(alignment.upper())\n\n # table for alignment\n htable = {\"L\": 0, \"M\": 0.5, \"R\": 1}\n vtable = {\"T\": 0, \"M\": 0.5, \"B\": 1}\n\n # align the smaller bbox\n smaller.left = self.left + (self.width - smaller.width) * htable[halign]\n smaller.top = self.top + (self.height - smaller.height) * vtable[valign]\n\n return smaller\n","repo_name":"notshridhar/custom-icon-gen","sub_path":"icongen/utils/geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"7609696916","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n\nfrom odoo import api, fields, models, SUPERUSER_ID, _\nfrom odoo.osv import expression\nfrom odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT\nfrom odoo.tools.float_utils import float_compare\nfrom odoo.exceptions import AccessError, UserError, ValidationError\nfrom odoo.tools.misc import formatLang, get_lang\n\n\nclass PurchaseQuotation(models.Model):\n _name = \"purchase.quotation\"\n _inherit = ['mail.thread', 'mail.activity.mixin', 'portal.mixin']\n _description = \"Purchase Quotation\"\n _order = 'date_quotation desc, id desc'\n\n @api.depends('quotation_line.price_total')\n def _amount_all(self):\n for quotation in self:\n amount_untaxed = amount_tax = 0.0\n for line in quotation.quotation_line:\n line._compute_amount()\n amount_untaxed += line.price_subtotal\n\n amount_tax += line.price_tax\n quotation.update({\n 'amount_untaxed': quotation.currency_id.round(amount_untaxed),\n 'amount_tax': quotation.currency_id.round(amount_tax),\n 'amount_total': amount_untaxed + amount_tax,\n })\n\n @api.depends('state', 'quotation_line.qty_invoiced', 'quotation_line.qty_received', 'quotation_line.product_qty')\n def _get_invoiced(self):\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n for quotation in self:\n if quotation.state not in ('purchase', 'done'):\n quotation.invoice_status = 'no'\n continue\n if any(\n float_compare(\n line.qty_invoiced,\n line.product_qty if line.product_id.purchase_method == 'purchase' else line.qty_received,\n precision_digits=precision,)\n == -1\n for line in quotation.quotation_line.filtered(lambda l: not l.display_type)\n ):\n quotation.invoice_status = 'to invoice'\n elif (\n all(\n float_compare(\n line.qty_invoiced,\n line.product_qty if line.product_id.purchase_method == \"purchase\" else line.qty_received,\n precision_digits=precision,\n )\n >= 0\n for line in quotation.quotation_line.filtered(lambda l: not l.display_type)\n )\n and quotation.invoice_ids\n ):\n quotation.invoice_status = 'invoiced'\n else:\n quotation.invoice_status = 'no'\n\n @api.depends('quotation_line.invoice_lines.move_id')\n def _compute_invoice(self):\n for quotation in self:\n invoices = quotation.mapped('quotation_line.invoice_lines.move_id')\n quotation.invoice_ids = invoices\n quotation.invoice_count = len(invoices)\n\n READONLY_STATES = {\n 'purchase': [('readonly', True)],\n 'done': [('readonly', True)],\n 'cancel': [('readonly', True)],\n }\n\n name = fields.Char('Quotation Reference', index=True, copy=False, default='New')\n origin = fields.Char('Source Document', copy=False,\n help=\"Reference of the document that generated this purchase quotation \"\n \"request (e.g. a sales quotation)\")\n partner_ref = fields.Char('Vendor Reference', copy=False,\n help=\"Reference of the sales quotation or bid sent by the vendor. \"\n \"It's used to do the matching when you receive the \"\n \"products as this reference is usually written on the \"\n \"delivery quotation sent by your vendor.\")\n date_quotation = fields.Datetime('Quotation Date', required=True, states=READONLY_STATES, index=True, copy=False,\n default=fields.Datetime.now, \\\n help=\"Depicts the date where the Quotation should be validated and converted into a purchase quotation.\")\n date_approve = fields.Datetime('Confirmation Date', readonly=1, index=True, copy=False)\n partner_id = fields.Many2one('res.partner', string='Vendor', required=True, states=READONLY_STATES,\n change_default=True, tracking=True,\n domain=\"['|', ('company_id', '=', False), ('company_id', '=', company_id)]\",\n help=\"You can find a vendor by its Name, TIN, Email or Internal Reference.\")\n dest_address_id = fields.Many2one('res.partner',\n domain=\"['|', ('company_id', '=', False), ('company_id', '=', company_id)]\",\n string='Drop Ship Address', states=READONLY_STATES,\n help=\"Put an address if you want to deliver directly from the vendor to the customer. \"\n \"Otherwise, keep empty to deliver to your own company.\")\n currency_id = fields.Many2one('res.currency', 'Currency', required=True, states=READONLY_STATES,\n default=lambda self: self.env.company.currency_id.id)\n state = fields.Selection([\n ('draft', 'Draft'),\n ('confirmed', 'Confirmed'),\n ('sent', 'RFQ Sent'),\n ('to approve', 'To Approve'),\n ('purchase', 'Purchase Quotation'),\n ('done', 'Locked'),\n ('cancel', 'Cancelled')\n ], string='Status', readonly=True, index=True, copy=False, default='draft', tracking=True)\n quotation_line = fields.One2many('purchase.quotation.line', 'quotation_id', string='Quotation Lines',\n states={'cancel': [('readonly', True)], 'done': [('readonly', True)]}, copy=True)\n notes = fields.Text('Terms and Conditions')\n\n invoice_count = fields.Integer(compute=\"_compute_invoice\", string='Bill Count', copy=False, default=0, store=True)\n invoice_ids = fields.Many2many('account.move', compute=\"_compute_invoice\", string='Bills', copy=False, store=True)\n invoice_status = fields.Selection([\n ('no', 'Nothing to Bill'),\n ('to invoice', 'Waiting Bills'),\n ('invoiced', 'Fully Billed'),\n ], string='Billing Status', compute='_get_invoiced', store=True, readonly=True, copy=False, default='no')\n\n # There is no inverse function on purpose since the date may be different on each line\n date_planned = fields.Datetime(string='Receipt Date', index=True)\n\n amount_untaxed = fields.Monetary(string='Untaxed Amount', store=True, readonly=True, compute='_amount_all',\n tracking=True)\n amount_tax = fields.Monetary(string='Taxes', store=True, readonly=True, compute='_amount_all')\n amount_total = fields.Monetary(string='Total', store=True, readonly=True, compute='_amount_all')\n\n fiscal_position_id = fields.Many2one('account.fiscal.position', string='Fiscal Position',\n domain=\"['|', ('company_id', '=', False), ('company_id', '=', company_id)]\")\n payment_term_id = fields.Many2one('account.payment.term', 'Payment Terms',\n domain=\"['|', ('company_id', '=', False), ('company_id', '=', company_id)]\")\n incoterm_id = fields.Many2one('account.incoterms', 'Incoterm', states={'done': [('readonly', True)]},\n help=\"International Commercial Terms are a series of predefined commercial terms used in international transactions.\")\n\n product_id = fields.Many2one('product.product', related='quotation_line.product_id', string='Product',\n readonly=False)\n user_id = fields.Many2one(\n 'res.users', string='Purchase Representative', index=True, tracking=True,\n default=lambda self: self.env.user, check_company=True)\n company_id = fields.Many2one('res.company', 'Company', required=True, index=True, states=READONLY_STATES,\n default=lambda self: self.env.company.id)\n currency_rate = fields.Float(\"Currency Rate\", compute='_compute_currency_rate', compute_sudo=True, store=True,\n readonly=True,\n help='Ratio between the purchase quotation currency and the company currency')\n\n purchase_order_ids = fields.One2many('purchase.order', 'quotation_id', string='Purchase Orders')\n purchase_order_count = fields.Integer(string='Sale Orders', compute='_compute_purchase_order_count')\n revision_ids = fields.One2many('purchase.quotation.revision', 'quotation_id', string='Revisions')\n revision_count = fields.Integer(string='Revisions', compute='_compute_revision_count')\n quotation_id = fields.Many2one('purchase.quotation', string='Purchase Quotation')\n number = fields.Integer(string='Revisions Number', default=1)\n type = fields.Selection([\n ('normal', 'Quotation'),\n ('revise', 'Revise Quotation'),\n ], string='Status', copy=False, index=True, default='normal')\n\n last_purchase_ids = fields.One2many('last.five.purchase', 'last_purchase_id', string='Last Purchase')\n stock_details_id = fields.One2many('show.stock.details', 'stock_details_ids', string='Last Purchase')\n\n @api.onchange('quotation_line')\n def onchange_quotation_lines(self):\n if self.quotation_line:\n list = [(5, 0)]\n list1 = [(5, 0)]\n for rec in self.quotation_line:\n purchase_lines = self.env['purchase.order.line'].sudo().search(\n [('product_id', '=', rec.product_id.id)\n ], order='create_date desc', limit=5)\n quant_ids = self.env['stock.quant'].sudo().search(\n [('product_id', '=', rec.product_id.id), ('location_id.usage', '=', 'internal')])\n for i in purchase_lines:\n list.append((0, 0, {\n 'product_id': i.product_id,\n 'vendor_id': i.partner_id,\n 'price': i.price_total,\n 'product_qty': i.product_qty,\n 'date_planned': i.date_planned,\n }))\n for i in quant_ids:\n list1.append((0, 0, {\n 'location_id': i.location_id,\n 'available_quantity': i.available_quantity,\n 'quantity': i.quantity,\n 'company_id': i.company_id,\n }))\n self.last_purchase_ids = list\n self.stock_details_id = list1\n\n @api.depends('purchase_order_ids')\n def _compute_purchase_order_count(self):\n for order in self:\n order.purchase_order_count = len(order.purchase_order_ids)\n\n def action_view_purchase_order(self):\n action = self.env.ref('purchase.purchase_form_action').read()[0]\n action['domain'] = [('id', 'in', self.mapped('purchase_order_ids.id')), ]\n orders = self.mapped('purchase_order_ids')\n if len(orders) == 1:\n action['views'] = [(self.env.ref('purchase.purchase_order_form').id, 'form')]\n action['res_id'] = orders.id\n return action\n\n @api.depends('revision_ids')\n def _compute_revision_count(self):\n for order in self:\n order.revision_count = self.env['purchase.quotation'].search_count([('origin', '=', self.name)])\n\n # disabled by me\n def action_view_revisions(self):\n action = self.env.ref('purchase_quotations.action_revisions').read()[0]\n revisions = self.env['purchase.quotation'].search([('origin', '=', self.name)])\n action['domain'] = [('id', 'in', revisions.ids)]\n action['name'] = 'Quotation Revision'\n if len(revisions.ids) == 1:\n action['views'] = [(self.env.ref('purchase_quotations.purchase_quotation_form').id, 'form')]\n action['res_id'] = revisions.id\n return action\n\n # def action_revise_quotation(self):\n # revise_quotation = self.env['purchase.quotation'].create({\n # 'partner_id': self.partner_id.id,\n # 'quotation_id': self.id,\n # 'state': 'draft',\n # 'origin': self.name,\n # 'type': 'revise',\n # 'number': self.number,\n # 'date_quotation': fields.Datetime.now()\n # })\n # for line in self.quotation_line:\n # self.env['purchase.quotation.line'].create({\n # 'quotation_id': revise_quotation.id,\n # 'product_id': line.product_id.id,\n # 'name': line.product_id.display_name,\n # 'product_qty': line.product_uom_qty,\n # 'product_uom': line.product_uom.id,\n # 'display_type': line.display_type,\n # 'price_unit': line.price_unit,\n # 'date_planned': line.date_planned,\n # 'taxes_id': [(6, 0, line.taxes_id.ids)],\n #\n # })\n # self.number += 1\n # # self.write({\n # # 'revision_ids': [(4, revise_quotation.id)],\n # # })\n\n @api.constrains('company_id', 'quotation_line')\n def _check_quotation_line_company_id(self):\n for quotation in self:\n companies = quotation.quotation_line.product_id.company_id\n if companies and companies != quotation.company_id:\n bad_products = quotation.quotation_line.product_id.filtered(\n lambda p: p.company_id and p.company_id != quotation.company_id)\n raise ValidationError((\n _(\"Your quotation contains products from company %s whereas your quotation belongs to company %s. \\n Please change the company of your quotation or remove the products from other companies (%s).\") % (\n ', '.join(companies.mapped('display_name')),\n quotation.company_id.display_name,\n ', '.join(bad_products.mapped('display_name')))))\n\n def _compute_access_url(self):\n super(PurchaseQuotation, self)._compute_access_url()\n for quotation in self:\n quotation.access_url = '/my/purchase/%s' % (quotation.id)\n\n # @api.model\n # def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):\n # args = args or []\n # domain = []\n # if name:\n # domain = ['|', ('name', operator, name), ('partner_ref', operator, name)]\n # purchase_quotation_ids = self._search(expression.AND([domain, args]), limit=limit, access_rights_uid=name_get_uid)\n # return models.lazy_name_get(self.browse(purchase_quotation_ids).with_user(name_get_uid))\n\n @api.depends('date_quotation', 'currency_id', 'company_id', 'company_id.currency_id')\n def _compute_currency_rate(self):\n for quotation in self:\n quotation.currency_rate = self.env['res.currency']._get_conversion_rate(quotation.company_id.currency_id,\n quotation.currency_id,\n quotation.company_id,\n quotation.date_quotation)\n\n @api.depends('name', 'partner_ref')\n def name_get(self):\n result = []\n for po in self:\n name = po.name\n if po.partner_ref:\n name += ' (' + po.partner_ref + ')'\n if self.env.context.get('show_total_amount') and po.amount_total:\n name += ': ' + formatLang(self.env, po.amount_total, currency_obj=po.currency_id)\n result.append((po.id, name))\n return result\n\n # @api.model\n # def create(self, vals):\n # company_id = vals.get('company_id', self.default_get(['company_id'])['company_id'])\n # self_comp = self.with_company(company_id)\n # if vals.get('name', 'New') == 'New':\n # seq_date = None\n # if 'date_quotation' in vals:\n # seq_date = fields.Datetime.context_timestamp(self, fields.Datetime.to_datetime(vals['date_quotation']))\n # vals['name'] = self_comp.env['ir.sequence'].next_by_code('purchase.quotation',\n # sequence_date=seq_date) or '/'\n # return super(PurchaseQuotation, self.with_context(company_id=company_id)).create(vals)\n\n def write(self, vals):\n res = super(PurchaseQuotation, self).write(vals)\n if vals.get('date_planned'):\n self.quotation_line.filtered(lambda line: not line.display_type).date_planned = vals['date_planned']\n return res\n\n def unlink(self):\n for quotation in self:\n if not quotation.state == 'cancel':\n raise UserError(_('In quotation to delete a purchase quotation, you must cancel it first.'))\n return super(PurchaseQuotation, self).unlink()\n\n def copy(self, default=None):\n ctx = dict(self.env.context)\n ctx.pop('default_product_id', None)\n self = self.with_context(ctx)\n new_po = super(PurchaseQuotation, self).copy(default=default)\n for line in new_po.quotation_line:\n if new_po.date_planned and not line.display_type:\n line.date_planned = new_po.date_planned\n elif line.product_id:\n seller = line.product_id._select_seller(\n partner_id=line.partner_id, quantity=line.product_qty,\n date=line.quotation_id.date_quotation and line.quotation_id.date_quotation.date(),\n uom_id=line.product_uom)\n line.date_planned = line._get_date_planned(seller)\n return new_po\n\n # def _track_subtype(self, init_values):\n # self.ensure_one()\n # if 'state' in init_values and self.state == 'purchase':\n # return self.env.ref('purchase.mt_rfq_approved')\n # elif 'state' in init_values and self.state == 'to approve':\n # return self.env.ref('purchase.mt_rfq_confirmed')\n # elif 'state' in init_values and self.state == 'done':\n # return self.env.ref('purchase.mt_rfq_done')\n # return super(PurchaseQuotation, self)._track_subtype(init_values)\n\n @api.onchange('partner_id', 'company_id')\n def onchange_partner_id(self):\n # Ensures all properties and fiscal positions\n # are taken with the company of the quotation\n # if not defined, with_company doesn't change anything.\n self = self.with_company(self.company_id)\n if not self.partner_id:\n self.fiscal_position_id = False\n self.currency_id = self.env.company.currency_id.id\n else:\n self.fiscal_position_id = self.env['account.fiscal.position'].get_fiscal_position(self.partner_id.id)\n self.payment_term_id = self.partner_id.property_supplier_payment_term_id.id\n self.currency_id = self.partner_id.property_purchase_currency_id.id or self.env.company.currency_id.id\n return {}\n\n @api.onchange('fiscal_position_id')\n def _compute_tax_id(self):\n \"\"\"\n Trigger the recompute of the taxes if the fiscal position is changed on the PO.\n \"\"\"\n for quotation in self:\n quotation.quotation_line._compute_tax_id()\n\n @api.onchange('partner_id')\n def onchange_partner_id_warning(self):\n if not self.partner_id or not self.env.user.has_group('purchase.group_warning_purchase'):\n return\n warning = {}\n title = False\n message = False\n\n partner = self.partner_id\n\n # If partner has no warning, check its company\n if partner.purchase_warn == 'no-message' and partner.parent_id:\n partner = partner.parent_id\n\n if partner.purchase_warn and partner.purchase_warn != 'no-message':\n # Block if partner only has warning but parent company is blocked\n if partner.purchase_warn != 'block' and partner.parent_id and partner.parent_id.purchase_warn == 'block':\n partner = partner.parent_id\n title = _(\"Warning for %s\") % partner.name\n message = partner.purchase_warn_msg\n warning = {\n 'title': title,\n 'message': message\n }\n if partner.purchase_warn == 'block':\n self.update({'partner_id': False})\n return {'warning': warning}\n return {}\n\n def action_rfq_send(self):\n '''\n This function opens a window to compose an email, with the edi purchase template message loaded by default\n '''\n # self.ensure_one()\n ir_model_data = self.env['ir.model.data']\n try:\n if self.env.context.get('send_rfq', False):\n template_id = ir_model_data.get_object_reference('purchase', 'email_template_edi_purchase')[1]\n else:\n template_id = ir_model_data.get_object_reference('purchase', 'email_template_edi_purchase_done')[1]\n except ValueError:\n template_id = False\n try:\n compose_form_id = ir_model_data.get_object_reference('mail', 'email_compose_message_wizard_form')[1]\n except ValueError:\n compose_form_id = False\n ctx = dict(self.env.context or {})\n ctx.update({\n 'default_model': 'purchase.quotation',\n 'active_model': 'purchase.quotation',\n 'active_id': self.ids[0],\n 'default_res_id': self.ids[0],\n 'default_use_template': bool(template_id),\n 'default_template_id': template_id,\n 'default_composition_mode': 'comment',\n 'custom_layout': \"mail.mail_notification_paynow\",\n 'force_email': True,\n 'mark_rfq_as_sent': True,\n })\n\n # In the case of a RFQ or a PO, we want the \"View...\" button in line with the state of the\n # object. Therefore, we pass the model description in the context, in the language in which\n # the template is rendered.\n lang = self.env.context.get('lang')\n if {'default_template_id', 'default_model', 'default_res_id'} <= ctx.keys():\n template = self.env['mail.template'].browse(ctx['default_template_id'])\n if template and template.lang:\n lang = template._render_template(template.lang, ctx['default_model'], ctx['default_res_id'])\n\n self = self.with_context(lang=lang)\n if self.state in ['draft', 'sent']:\n ctx['model_description'] = _('Request for Quotation')\n else:\n ctx['model_description'] = _('Purchase Quotation')\n\n return {\n 'name': _('Compose Email'),\n 'type': 'ir.actions.act_window',\n 'view_mode': 'form',\n 'res_model': 'mail.compose.message',\n 'views': [(compose_form_id, 'form')],\n 'view_id': compose_form_id,\n 'target': 'new',\n 'context': ctx,\n }\n\n @api.returns('mail.message', lambda value: value.id)\n def message_post(self, **kwargs):\n if self.env.context.get('mark_rfq_as_sent'):\n self.filtered(lambda o: o.state == 'draft').write({'state': 'sent'})\n return super(PurchaseQuotation, self.with_context(mail_post_autofollow=True)).message_post(**kwargs)\n\n def print_quotation(self):\n self.write({'state': \"sent\"})\n return self.env.ref('purchase.report_purchase_quotation').report_action(self)\n\n # def button_approve(self, force=False):\n # self.write({'state': 'purchase', 'date_approve': fields.Datetime.now()})\n # self.filtered(lambda p: p.company_id.po_lock == 'lock').write({'state': 'done'})\n # return {}\n\n def button_draft(self):\n self.write({'state': 'draft'})\n return {}\n\n def confirmation(self):\n # company_id = vals.get('company_id', self.default_get(['company_id'])['company_id'])\n # self_comp = self.with_company(company_id)\n # if vals.get('name', 'New') == 'New':\n # print('sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss')\n # seq_date = None\n # if 'date_quotation' in vals:\n # seq_date = fields.Datetime.context_timestamp(self, fields.Datetime.to_datetime(vals['date_quotation']))\n # vals['name'] = self_comp.env['ir.sequence'].next_by_code('purchase.quotation',\n # sequence_date=seq_date) or '/'\n # return super(PurchaseQuotation, self.with_context(company_id=company_id)).create(vals)\n self.name = self.env[\"ir.sequence\"].next_by_code(\"purchase.quotation\")\n self.write({'state': 'confirmed'})\n\n def button_confirm(self):\n purchase_order = self.env['purchase.order'].create({\n 'partner_id': self.partner_id.id,\n 'quotation_id': self.id,\n 'origin': self.name,\n 'date_order': fields.Datetime.now()\n })\n for line in self.quotation_line:\n self.env['purchase.order.line'].create({\n 'order_id': purchase_order.id,\n 'product_id': line.product_id.id,\n 'name': line.product_id.display_name,\n 'product_qty': line.product_qty,\n 'product_uom': line.product_uom.id,\n 'display_type': line.display_type,\n 'price_unit': line.price_unit,\n 'date_planned': line.date_planned,\n 'taxes_id': [(6, 0, line.taxes_id.ids)],\n })\n self.write({\n 'purchase_order_ids': [(4, purchase_order.id)],\n 'state': 'purchase',\n # 'date_order': fields.Datetime.now()\n })\n return purchase_order\n\n # disabled by me\n\n # def button_confirm(self):\n # purchase_order = self.env['purchase.order'].create({\n # 'partner_id': self.partner_id.id,\n # 'quotation_id': self.id,\n # 'state': 'purchase',\n # 'origin': self.name,\n # 'date_order': fields.Datetime.now()\n # })\n # for line in self.quotation_line:\n # self.env['purchase.order.line'].create({\n # 'order_id': purchase_order.id,\n # 'product_id': line.product_id.id,\n # 'name': line.product_id.display_name,\n # 'product_qty': line.product_qty,\n # 'product_uom': line.product_uom.id,\n # 'display_type': line.display_type,\n # 'price_unit': line.price_unit,\n # 'date_planned': line.date_planned,\n # 'taxes_id': [(6, 0, line.taxes_id.ids)],\n # })\n # self.write({\n # 'purchase_order_ids': [(4, purchase_order.id)],\n # 'state': 'purchase',\n # # 'date_order': fields.Datetime.now()\n # })\n # purchase_order.button_confirm()\n # view = self.env.ref('purchase.purchase_form_action')\n # action = self.env.ref('purchase.purchase_form_action').read()[0]\n # action['domain'] = [('id', 'in', self.mapped('purchase_order_ids.id'))]\n # orders = self.mapped('purchase_order_ids')\n # if len(orders) == 1:\n # action['views'] = [(self.env.ref('purchase.purchase_order_form').id, 'form')]\n # action['res_id'] = orders.id\n # return action\n\n def button_cancel(self):\n for quotation in self:\n for inv in quotation.invoice_ids:\n if inv and inv.state not in ('cancel', 'draft'):\n raise UserError(\n _(\"Unable to cancel this purchase quotation. You must first cancel the related vendor bills.\"))\n\n self.write({'state': 'cancel'})\n\n def button_unlock(self):\n self.write({'state': 'purchase'})\n\n def button_done(self):\n self.write({'state': 'done'})\n\n def _add_supplier_to_product(self):\n # Add the partner in the supplier list of the product if the supplier is not registered for\n # this product. We limit to 10 the number of suppliers for a product to avoid the mess that\n # could be caused for some generic products (\"Miscellaneous\").\n for line in self.quotation_line:\n # Do not add a contact as a supplier\n partner = self.partner_id if not self.partner_id.parent_id else self.partner_id.parent_id\n if line.product_id and partner not in line.product_id.seller_ids.mapped('name') and len(\n line.product_id.seller_ids) <= 10:\n # Convert the price in the right currency.\n currency = partner.property_purchase_currency_id or self.env.company.currency_id\n price = self.currency_id._convert(line.price_unit, currency, line.company_id,\n line.date_quotation or fields.Date.today(), round=False)\n # Compute the price for the template's UoM, because the supplier's UoM is related to that UoM.\n if line.product_id.product_tmpl_id.uom_po_id != line.product_uom:\n default_uom = line.product_id.product_tmpl_id.uom_po_id\n price = line.product_uom._compute_price(price, default_uom)\n\n supplierinfo = {\n 'name': partner.id,\n 'sequence': max(\n line.product_id.seller_ids.mapped('sequence')) + 1 if line.product_id.seller_ids else 1,\n 'min_qty': 0.0,\n 'price': price,\n 'currency_id': currency.id,\n 'delay': 0,\n }\n # In case the quotation partner is a contact address, a new supplierinfo is created on\n # the parent company. In this case, we keep the product name and code.\n seller = line.product_id._select_seller(\n partner_id=line.partner_id,\n quantity=line.product_qty,\n date=line.quotation_id.date_quotation and line.quotation_id.date_quotation.date(),\n uom_id=line.product_uom)\n if seller:\n supplierinfo['product_name'] = seller.product_name\n supplierinfo['product_code'] = seller.product_code\n vals = {\n 'seller_ids': [(0, 0, supplierinfo)],\n }\n try:\n line.product_id.write(vals)\n except AccessError: # no write access rights -> just ignore\n break\n\n # disabled by me\n\n # def action_view_invoice(self):\n # '''\n # This function returns an action that display existing vendor bills of given purchase quotation ids.\n # When only one found, show the vendor bill immediately.\n # '''\n # action = self.env.ref('account.action_move_in_invoice_type')\n # result = action.read()[0]\n # create_bill = self.env.context.get('create_bill', False)\n # # override the context to get rid of the default filtering\n # result['context'] = {\n # 'default_type': 'in_invoice',\n # 'default_company_id': self.company_id.id,\n # 'default_purchase_id': self.id,\n # 'default_partner_id': self.partner_id.id,\n # }\n # # Invoice_ids may be filtered depending on the user. To ensure we get all\n # # invoices related to the purchase quotation, we read them in sudo to fill the\n # # cache.\n # self.sudo()._read(['invoice_ids'])\n # # choose the view_mode accordingly\n # if len(self.invoice_ids) > 1 and not create_bill:\n # result['domain'] = \"[('id', 'in', \" + str(self.invoice_ids.ids) + \")]\"\n # else:\n # res = self.env.ref('account.view_move_form', False)\n # form_view = [(res and res.id or False, 'form')]\n # if 'views' in result:\n # result['views'] = form_view + [(state, view) for state, view in action['views'] if view != 'form']\n # else:\n # result['views'] = form_view\n # # Do not set an invoice_id if we want to create a new bill.\n # if not create_bill:\n # result['res_id'] = self.invoice_ids.id or False\n # result['context']['default_invoice_origin'] = self.name\n # result['context']['default_ref'] = self.partner_ref\n # return result\n\n # @api.onchange(\"quotation_line\")\n # def _onchange_product_id(self):\n #\n # list = []\n # self._cr.execute('''select product_id,price_unit from purchase_order where (product_id='%s')''' % (self.product_id))\n # product_id = self._cr.fetchall()\n # for id in product_id:\n # list.append(id[0])\n #\n # print(list,\"++++++++++++++++++++++++++++++++++++++++++++++++++\")\n # # for element in main_list:\n # # val = {\"code\": self.code,\n # # \"name\": self.account_name,\n # # \"user_type_id\": self.user_type_id.id,\n # # \"company_id\": element,\n # # 'reconcile': True}\n # # self.env['account.account'].create(val)\n # # else:\n # # raise ValidationError(\"The code of the account must be unique per company !\")\n #\n # # for rec in self:\n # # lines = []\n # # for lines in self.quotation_line:\n # # vals = {\n # # \"product_id\": lines.id,\n # # }\n # # lines.append((0, 0, vals))\n # # rec.last_purchase = lines\n\n\nclass PurchaseQuotationLine(models.Model):\n _name = 'purchase.quotation.line'\n _description = 'Purchase Quotation Line'\n _order = 'quotation_id, sequence, id'\n\n name = fields.Text(string='Description', required=True)\n sequence = fields.Integer(string='Sequence', default=10)\n product_qty = fields.Float(string='Quantity', digits='Product Unit of Measure', required=True)\n product_uom_qty = fields.Float(string='Total Quantity', compute='_compute_product_uom_qty', store=True)\n date_planned = fields.Datetime(string='Scheduled Date', index=True)\n taxes_id = fields.Many2many('account.tax', string='Taxes',\n domain=['|', ('active', '=', False), ('active', '=', True)])\n product_uom = fields.Many2one('uom.uom', string='Unit of Measure',\n domain=\"[('category_id', '=', product_uom_category_id)]\")\n product_uom_category_id = fields.Many2one(related='product_id.uom_id.category_id')\n product_id = fields.Many2one('product.product', string='Product', domain=[('purchase_ok', '=', True)],\n change_default=True)\n product_type = fields.Selection(related='product_id.type', readonly=True)\n price_unit = fields.Float(string='Unit Price', required=True, digits='Product Price')\n\n price_subtotal = fields.Monetary(compute='_compute_amount', string='Subtotal', store=True)\n price_total = fields.Monetary(compute='_compute_amount', string='Total', store=True)\n price_tax = fields.Float(compute='_compute_amount', string='Tax', store=True)\n\n quotation_id = fields.Many2one('purchase.quotation', string='Quotation Reference', index=True, required=True,\n ondelete='cascade')\n account_analytic_id = fields.Many2one('account.analytic.account', string='Analytic Account')\n analytic_tag_ids = fields.Many2many('account.analytic.tag', string='Analytic Tags')\n company_id = fields.Many2one('res.company', related='quotation_id.company_id', string='Company', store=True,\n readonly=True)\n state = fields.Selection(related='quotation_id.state', store=True, readonly=False)\n\n invoice_lines = fields.One2many('account.move.line', 'purchase_line_id', string=\"Bill Lines\", readonly=True,\n copy=False)\n\n # Replace by invoiced Qty\n qty_invoiced = fields.Float(compute='_compute_qty_invoiced', string=\"Billed Qty\", digits='Product Unit of Measure',\n store=True)\n\n qty_received_method = fields.Selection([('manual', 'Manual')], string=\"Received Qty Method\",\n compute='_compute_qty_received_method', store=True,\n help=\"According to product configuration, the recieved quantity can be automatically computed by mechanism :\\n\"\n \" - Manual: the quantity is set manually on the line\\n\"\n \" - Stock Moves: the quantity comes from confirmed pickings\\n\")\n qty_received = fields.Float(\"Received Qty\", compute='_compute_qty_received', inverse='_inverse_qty_received',\n compute_sudo=True, store=True, digits='Product Unit of Measure')\n qty_received_manual = fields.Float(\"Manual Received Qty\", digits='Product Unit of Measure', copy=False)\n\n partner_id = fields.Many2one('res.partner', related='quotation_id.partner_id', string='Partner', readonly=True,\n store=True)\n currency_id = fields.Many2one(related='quotation_id.currency_id', store=True, string='Currency', readonly=True)\n date_quotation = fields.Datetime(related='quotation_id.date_quotation', string='Quotation Date', readonly=True)\n\n display_type = fields.Selection([\n ('line_section', \"Section\"),\n ('line_note', \"Note\")], default=False, help=\"Technical field for UX purpose.\")\n\n _sql_constraints = [\n ('accountable_required_fields',\n \"CHECK(display_type IS NOT NULL OR (product_id IS NOT NULL AND product_uom IS NOT NULL AND date_planned IS NOT NULL))\",\n \"Missing required fields on accountable purchase quotation line.\"),\n ('non_accountable_null_fields',\n \"CHECK(display_type IS NULL OR (product_id IS NULL AND price_unit = 0 AND product_uom_qty = 0 AND product_uom IS NULL AND date_planned is NULL))\",\n \"Forbidden values on non-accountable purchase quotation line\"),\n ]\n\n @api.depends('product_qty', 'price_unit', 'taxes_id')\n def _compute_amount(self):\n for line in self:\n vals = line._prepare_compute_all_values()\n taxes = line.taxes_id.compute_all(\n vals['price_unit'],\n vals['currency_id'],\n vals['product_qty'],\n vals['product'],\n vals['partner'])\n line.update({\n 'price_tax': sum(t.get('amount', 0.0) for t in taxes.get('taxes', [])),\n 'price_total': taxes['total_included'],\n 'price_subtotal': taxes['total_excluded'],\n })\n\n def _prepare_compute_all_values(self):\n # Hook method to returns the different argument values for the\n # compute_all method, due to the fact that discounts mechanism\n # is not implemented yet on the purchase quotations.\n # This method should disappear as soon as this feature is\n # also introduced like in the sales module.\n self.ensure_one()\n return {\n 'price_unit': self.price_unit,\n 'currency_id': self.quotation_id.currency_id,\n 'product_qty': self.product_qty,\n 'product': self.product_id,\n 'partner': self.quotation_id.partner_id,\n }\n\n def _compute_tax_id(self):\n for line in self:\n line = line.with_company(line.company_id)\n fpos = line.quotation_id.fiscal_position_id or line.quotation_id.fiscal_position_id.get_fiscal_position(\n line.quotation_id.partner_id.id)\n # filter taxes by company\n taxes = line.product_id.supplier_taxes_id.filtered(lambda r: r.company_id == line.env.company)\n line.taxes_id = fpos.map_tax(taxes, line.product_id, line.quotation_id.partner_id)\n\n @api.depends('invoice_lines.move_id.state', 'invoice_lines.quantity')\n def _compute_qty_invoiced(self):\n for line in self:\n qty = 0.0\n for inv_line in line.invoice_lines:\n if inv_line.move_id.state not in ['cancel']:\n if inv_line.move_id.move_type == 'in_invoice':\n qty += inv_line.product_uom_id._compute_quantity(inv_line.quantity, line.product_uom)\n elif inv_line.move_id.move_type == 'in_refund':\n qty -= inv_line.product_uom_id._compute_quantity(inv_line.quantity, line.product_uom)\n line.qty_invoiced = qty\n\n @api.depends('product_id')\n def _compute_qty_received_method(self):\n for line in self:\n if line.product_id and line.product_id.type in ['consu', 'service']:\n line.qty_received_method = 'manual'\n else:\n line.qty_received_method = False\n\n @api.depends('qty_received_method', 'qty_received_manual')\n def _compute_qty_received(self):\n for line in self:\n if line.qty_received_method == 'manual':\n line.qty_received = line.qty_received_manual or 0.0\n else:\n line.qty_received = 0.0\n\n @api.onchange('qty_received')\n def _inverse_qty_received(self):\n \"\"\" When writing on qty_received, if the value should be modify manually (`qty_received_method` = 'manual' only),\n then we put the value in `qty_received_manual`. Otherwise, `qty_received_manual` should be False since the\n received qty is automatically compute by other mecanisms.\n \"\"\"\n for line in self:\n if line.qty_received_method == 'manual':\n line.qty_received_manual = line.qty_received\n else:\n line.qty_received_manual = 0.0\n\n @api.model\n def create(self, values):\n if values.get('display_type', self.default_get(['display_type'])['display_type']):\n values.update(product_id=False, price_unit=0, product_uom_qty=0, product_uom=False, date_planned=False)\n\n quotation_id = values.get('quotation_id')\n if 'date_planned' not in values:\n quotation = self.env['purchase.quotation'].browse(quotation_id)\n if quotation.date_planned:\n values['date_planned'] = quotation.date_planned\n line = super(PurchaseQuotationLine, self).create(values)\n if line.quotation_id.state == 'purchase':\n msg = _(\"Extra line with %s \") % (line.product_id.display_name,)\n line.quotation_id.message_post(body=msg)\n return line\n\n def write(self, values):\n if 'display_type' in values and self.filtered(lambda line: line.display_type != values.get('display_type')):\n raise UserError(\n _(\"You cannot change the type of a purchase quotation line. Instead you should delete the current line and create a new line of the proper type.\"))\n\n if 'product_qty' in values:\n for line in self:\n if line.quotation_id.state == 'purchase':\n line.quotation_id.message_post_with_view('purchase.track_po_line_template',\n values={'line': line,\n 'product_qty': values['product_qty']},\n subtype_id=self.env.ref('mail.mt_note').id)\n return super(PurchaseQuotationLine, self).write(values)\n\n def unlink(self):\n for line in self:\n if line.quotation_id.state in ['purchase', 'done']:\n raise UserError(_('Cannot delete a purchase quotation line which is in state \\'%s\\'.') % (line.state,))\n return super(PurchaseQuotationLine, self).unlink()\n\n @api.model\n def _get_date_planned(self, seller, po=False):\n \"\"\"Return the datetime value to use as Schedule Date (``date_planned``) for\n PO Lines that correspond to the given product.seller_ids,\n when quotationed at `date_quotation_str`.\n\n :param Model seller: used to fetch the delivery delay (if no seller\n is provided, the delay is 0)\n :param Model po: purchase.quotation, necessary only if the PO line is\n not yet attached to a PO.\n :rtype: datetime\n :return: desired Schedule Date for the PO line\n \"\"\"\n date_quotation = po.date_quotation if po else self.quotation_id.date_quotation\n if date_quotation:\n return date_quotation + relativedelta(days=seller.delay if seller else 0)\n else:\n return datetime.today() + relativedelta(days=seller.delay if seller else 0)\n\n @api.onchange('product_id')\n def onchange_product_id(self):\n if not self.product_id:\n return\n\n # Reset date, price and quantity since _onchange_quantity will provide default values\n self.date_planned = datetime.today().strftime(DEFAULT_SERVER_DATETIME_FORMAT)\n self.price_unit = self.product_qty = 0.0\n\n self._product_id_change()\n\n self._suggest_quantity()\n self._onchange_quantity()\n\n def _product_id_change(self):\n if not self.product_id:\n return\n\n self.product_uom = self.product_id.uom_po_id or self.product_id.uom_id\n product_lang = self.product_id.with_context(\n lang=get_lang(self.env, self.partner_id.lang).code,\n partner_id=self.partner_id.id,\n company_id=self.company_id.id,\n )\n self.name = self._get_product_purchase_description(product_lang)\n\n self._compute_tax_id()\n\n @api.onchange('product_id')\n def onchange_product_id_warning(self):\n if not self.product_id or not self.env.user.has_group('purchase.group_warning_purchase'):\n return\n warning = {}\n title = False\n message = False\n\n product_info = self.product_id\n\n if product_info.purchase_line_warn != 'no-message':\n title = _(\"Warning for %s\") % product_info.name\n message = product_info.purchase_line_warn_msg\n warning['title'] = title\n warning['message'] = message\n if product_info.purchase_line_warn == 'block':\n self.product_id = False\n return {'warning': warning}\n return {}\n\n @api.onchange('product_qty', 'product_uom')\n def _onchange_quantity(self):\n if not self.product_id:\n return\n params = {'quotation_id': self.quotation_id}\n seller = self.product_id._select_seller(\n partner_id=self.partner_id,\n quantity=self.product_qty,\n date=self.quotation_id.date_quotation and self.quotation_id.date_quotation.date(),\n uom_id=self.product_uom,\n params=params)\n\n if seller or not self.date_planned:\n self.date_planned = self._get_date_planned(seller).strftime(DEFAULT_SERVER_DATETIME_FORMAT)\n\n if not seller:\n if self.product_id.seller_ids.filtered(lambda s: s.name.id == self.partner_id.id):\n self.price_unit = 0.0\n return\n\n price_unit = self.env['account.tax']._fix_tax_included_price_company(seller.price,\n self.product_id.supplier_taxes_id,\n self.taxes_id,\n self.company_id) if seller else 0.0\n if price_unit and seller and self.quotation_id.currency_id and seller.currency_id != self.quotation_id.currency_id:\n price_unit = seller.currency_id._convert(\n price_unit, self.quotation_id.currency_id, self.quotation_id.company_id,\n self.date_quotation or fields.Date.today())\n\n if seller and self.product_uom and seller.product_uom != self.product_uom:\n price_unit = seller.product_uom._compute_price(price_unit, self.product_uom)\n\n self.price_unit = price_unit\n\n @api.depends('product_uom', 'product_qty', 'product_id.uom_id')\n def _compute_product_uom_qty(self):\n for line in self:\n if line.product_id and line.product_id.uom_id != line.product_uom:\n line.product_uom_qty = line.product_uom._compute_quantity(line.product_qty, line.product_id.uom_id)\n else:\n line.product_uom_qty = line.product_qty\n\n def _suggest_quantity(self):\n '''\n Suggest a minimal quantity based on the seller\n '''\n if not self.product_id:\n return\n seller_min_qty = self.product_id.seller_ids \\\n .filtered(\n lambda r: r.name == self.quotation_id.partner_id and (not r.product_id or r.product_id == self.product_id)) \\\n .sorted(key=lambda r: r.min_qty)\n if seller_min_qty:\n self.product_qty = seller_min_qty[0].min_qty or 1.0\n self.product_uom = seller_min_qty[0].product_uom\n else:\n self.product_qty = 1.0\n\n def _get_product_purchase_description(self, product_lang):\n self.ensure_one()\n name = product_lang.display_name\n if product_lang.description_purchase:\n name += '\\n' + product_lang.description_purchase\n\n return name\n\n def _prepare_account_move_line(self, move):\n self.ensure_one()\n if self.product_id.purchase_method == 'purchase':\n qty = self.product_qty - self.qty_invoiced\n else:\n qty = self.qty_received - self.qty_invoiced\n if float_compare(qty, 0.0, precision_rounding=self.product_uom.rounding) <= 0:\n qty = 0.0\n\n return {\n 'name': '%s: %s' % (self.quotation_id.name, self.name),\n 'move_id': move.id,\n 'currency_id': move.currency_id.id,\n 'purchase_line_id': self.id,\n 'date_maturity': move.invoice_date_due,\n 'product_uom_id': self.product_uom.id,\n 'product_id': self.product_id.id,\n 'price_unit': self.price_unit,\n 'quantity': qty,\n 'partner_id': move.commercial_partner_id.id,\n 'analytic_account_id': self.account_analytic_id.id,\n 'analytic_tag_ids': [(6, 0, self.analytic_tag_ids.ids)],\n 'tax_ids': [(6, 0, self.taxes_id.ids)],\n 'display_type': self.display_type,\n }\n\n\nclass LastFivePurchase(models.TransientModel):\n _name = 'last.five.purchase'\n\n last_purchase_id = fields.Many2one('purchase.quotation', 'last_purchase_ids')\n product_id = fields.Many2one('product.product')\n vendor_id = fields.Many2one('res.partner', 'Vendor')\n price = fields.Float('Price')\n product_qty = fields.Integer('Quantity')\n date_planned = fields.Date('Date')\n\n\nclass StockDetails(models.TransientModel):\n _name = 'show.stock.details'\n\n stock_details_ids = fields.Many2one('purchase.quotation', 'stock_details_id')\n location_id = fields.Many2one('stock.location')\n available_quantity = fields.Float('Available Quantity')\n quantity = fields.Float('Price')\n company_id = fields.Many2one('res.company')\n","repo_name":"anjanakp927/querry","sub_path":"test1_addon/purchase_quotations/models/purchase_quotation.py","file_name":"purchase_quotation.py","file_ext":"py","file_size_in_byte":51512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"29082334165","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport math\n\nfrom hpo.trainers.emul.trainer import EarlyTerminateTrainer\nfrom ws.shared.logger import *\n\n\nclass VizMedianETRTrainer(EarlyTerminateTrainer): #\n\n def __init__(self, lookup):\n \n super(VizMedianETRTrainer, self).__init__(lookup)\n\n self.epoch_length = lookup.num_epochs \n self.eval_epoch = int(self.epoch_length/3)\n self.threshold_percentile = 50 # median\n\n def train(self, cand_index, estimates, min_train_epoch=None, space=None):\n acc = 0 # stopping accuracy\n min_epoch = 0\n cur_max_acc = 0\n debug(\"cand_index:{}\".format(cand_index))\n acc_curve = self.acc_curves.loc[cand_index].values\n\n history = [] \n\n for i in range(len(self.history)):\n curve = self.history[i][\"curve\"]\n history.append(np.mean(curve[:self.eval_epoch]))\n if len(history) > 0:\n threshold = np.percentile(history, self.threshold_percentile)\n else:\n threshold = 0.0\n\n debug(\"commencing iteration {}\".format(len(self.history)))\n #debug(\"accuracy curve: {}\".format(acc_curve))\n test_error = 1.0 - max(acc_curve)\n train_epoch = len(acc_curve)\n exec_time = self.total_times[cand_index]\n early_terminated = False\n for i in range(min_epoch, self.epoch_length-1):\n acc = acc_curve[i]\n if acc > cur_max_acc:\n cur_max_acc = acc\n \n #debug(\"current accuracy at epoch{}: {:.4f}\".format(i+1, acc))\n\n if i+1 == self.eval_epoch:\n if acc < threshold:\n debug(\"terminated at epoch{}\".format(i+1))\n train_epoch = self.eval_epoch\n acc_curve = acc_curve[:train_epoch]\n early_terminated = True\n exec_time = self.get_train_time(cand_index, i+1)\n break\n\n self.add_train_history(acc_curve, exec_time, \n train_epoch, early_terminated)\n return {\n \"test_error\": test_error, \n \"train_epoch\": train_epoch,\n \"exec_time\" : exec_time, \n 'early_terminated' : early_terminated\n } \n\n\n","repo_name":"SNU-DRL/DEEP-BO","sub_path":"hpo/trainers/emul/median_etr.py","file_name":"median_etr.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"63"} +{"seq_id":"25298531667","text":"#!/usr/bin/env python3\n\n\ndef main():\n N, X = map(int, input().split())\n L = []\n for _ in range(N):\n lst = [int(e) for e in input().split()]\n L.append(lst[1:])\n d = {X: 1}\n for l in L:\n next_d = {}\n for a in l:\n for x, num in d.items():\n if x % a == 0:\n next_d[x // a] = next_d.get(x // a, 0) + num\n d = next_d\n print(d.get(1, 0))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dmiyakawa/atcoder-workspace","sub_path":"abc233/C/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"10638026290","text":"#Resolución del ejercicio 04 (d) de la clase 0}4 del curso Introducción al lenguaje Python orientado a ingenierías y física - Instituto Balseiro\n#Pablo Chehade\n#Creación: 19/02/2023\n#Última modificación: 20/02/2023\n#Comentarios:\n#(1) en mi pc corre con python archivo.py, no con python3 archivo.py\n\n\n\ndef string_to_list(sudoku_str):\n '''\n Convierte un string con formato sudoku a una lista bidimensional de la forma [[...],[...],...]\n\n Parameters\n ----------\n sudoku_str: string de 9 filas, cada una formada por 9 números\n\n Returns\n -------\n sudoku_list: lista de números enteros tamaño 9x9 donde cada elemento hace referencia al correspondiente en sudoku_str\n '''\n size = 9 #tamaño vertical y horizontal del sudoku\n #Creo la lista\n sudoku_list = []\n\n sudoku_split = sudoku_str.split()\n\n linea = [0]*9\n for i in range(size):\n for j in range(size):\n linea[j] = int(sudoku_split[i][j])\n sudoku_list.append(linea.copy())\n\n return sudoku_list\n\ndef check_repetidos(lista):\n '''\n Verifica si una lista tiene elementos repetidos\n\n Parameters\n ----------\n list: lista\n\n Returns\n -------\n bool: True si tiene elementos repetidos y False en caso contrario\n \n '''\n conj = set(lista)\n if len(conj) != len(lista):\n return True #Hay elementos repetidos\n else:\n return False\n\ndef check_sudoku(grilla):\n '''\n Dada una grilla bidimensional de nros, verifica si es solución correcta del Sudoku. Para que sea una solución correcta debe cumplirse que\n - Los números estén entre 1 y 9\n - En cada fila no deben repetirse\n - En cada columna no deben repetirse\n - En todas las regiones de 3x3 que no se solapan, empezando de cualquier esquina, no deben repetirse\n\n Parameters\n ----------\n grilla: lista bidimensional de números enteros de tamaño 9x9\n\n Returns\n -------\n bool: True si corresponde a la resolución correcta del Sudoku y False en caso contrario.\n \n '''\n size = 9\n\n #Verifico que los nros estén entre el 1 y el 9\n numeros = [1,2,3,4,5,6,7,8,9]\n for i in range(size):\n for j in range(size):\n if (grilla[i][j] in numeros) == False: #Si el nro no está en la lista de nros\n # print(\"Los nros no están entre 1 y 9\")\n return False\n \n #Verifico que los nros de cada fila no se repitan\n for i in range(size):\n if check_repetidos(grilla[i]):\n # print(\"Los nros se repiten en una fila\")\n return False\n \n #Verifico que los nros en cada columna no se repitan\n for i in range(size):\n columna = [0]*9\n for j in range(size):\n columna[j] = grilla[j][i]\n if check_repetidos(columna):\n # print(\"Los nros se repiten en una columna\")\n return False\n \n #Verifico que en todas las regiones de 3x3 que no se solapan, los nros no se repitan\n for i in range(size):\n j, k = (i // 3) * 3, (i % 3) * 3 #Según esto, cuando i = 0,1,2 j=0 y k = 0,3,6. De este modo, se recorren los 4 cuadrados 3x3 de arriba de izquierda a derecha. De forma análoga cuando i = 3,4,5,6,7,8\n r = [grilla[a][b] for a in range(j, j+3) for b in range(k, k+3)] #Se genera una lista con los elementos de cada cuadrado\n if check_repetidos(r):\n # print(\"Los nros se repiten en un cuadrado\")\n return False\n \n #Si la ejecución llegó hasta acá, el sudoku es resolución correcta\n return True\n\n\n#Test:\n\n# #Dado el sudoku\n# sudoku = \"\"\"145327698\n# 839654127\n# 672918543\n# 496185372\n# 218473956\n# 753296481\n# 367542819 \n# 984761235\n# 521839764\"\"\"\n\n# #Lo convierto a lista y checkeo si está resuelto\n# sudoku_list = string_to_list(sudoku)\n# print(\"Sudoku resuelto?\", check_sudoku(sudoku_list))\n\n# #Agrego un 0\n# import copy\n# sudoku_list1 = copy.deepcopy(sudoku_list) #Si no hago una deepcopy, los elementos de sudoku_list1 (que son listas) siguen siendo los mismos que de sudoku_list. Por lo tanto, al modificar uno también estaría modificando el otro.\n# sudoku_list1[0][0] = 0\n# print(\"Agrego un 0. Sudoku resuelto?\", check_sudoku(sudoku_list1))\n\n# #Agrego un 1 para que se repita en una fila\n# sudoku_list1 = copy.deepcopy(sudoku_list)\n# sudoku_list1[0][1] = 1\n# print(\"Repetición en fila. Sudoku resuelto?\", check_sudoku(sudoku_list1))","repo_name":"Lupama2/IntroPython","sub_path":"Ejercicios a entregar/04_Chehade.py","file_name":"04_Chehade.py","file_ext":"py","file_size_in_byte":4462,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"39480663515","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n@File : forms.py\n@Time : 2020/1/3 11:14\n@Author : chise\n@Email : chise123@live.com\n@Software: PyCharm\n@info :simpleui自定义表单\n\"\"\"\nfrom django import forms\nimport uuid\n\nfrom django.core import validators\nfrom django.core.exceptions import ValidationError\n# Provide this import for backwards compatibility.\nfrom django.core.validators import EMPTY_VALUES # NOQA\nfrom django.forms.boundfield import BoundField\nfrom django.forms.forms import DeclarativeFieldsMetaclass, BaseForm\nfrom django.forms.utils import from_current_timezone, to_current_timezone\nfrom django.forms.widgets import (\n FILE_INPUT_CONTRADICTION, CheckboxInput, ClearableFileInput, DateInput,\n DateTimeInput, EmailInput, FileInput, HiddenInput, MultipleHiddenInput,\n NullBooleanSelect, NumberInput, Select, SelectMultiple,\n SplitDateTimeWidget, SplitHiddenDateTimeWidget, TextInput, TimeInput,\n URLInput,\n)\nfrom django.utils import formats\nfrom django.utils.dateparse import parse_duration\nfrom django.utils.duration import duration_string\nfrom django.utils.ipv6 import clean_ipv6_address\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _, ngettext_lazy\n\n\nimport copy\n\nfrom django.core.exceptions import NON_FIELD_ERRORS, ValidationError\n# BoundField is imported for backwards compatibility in Django 1.9\nfrom django.forms.boundfield import BoundField # NOQA\nfrom django.forms.fields import Field, FileField\n# pretty_name is imported for backwards compatibility in Django 1.9\nfrom django.forms.utils import ErrorDict, ErrorList, pretty_name # NOQA\nfrom django.forms.widgets import Media, MediaDefiningClass\nfrom django.utils.functional import cached_property\nfrom django.utils.html import conditional_escape, html_safe\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext as _\nfrom simpleui.widgets import *\n\nclass SimpleForm(BaseForm, metaclass=DeclarativeFieldsMetaclass):\n def get_vue_app_js(self,app_id):\n \"\"\"\n 将该方法需要的js渲染进去\n :return:\n \"\"\"\n base_vue_app=\"\"\"\n\n \"\"\"\n data={}\n for name, field in self.fields.items():\n data[name] = ''\n data.update(self.data)\n data_s=\"\"\n for key,value in data.items():\n data_s+=\"%(key)s:'%(value)s'\"%{\n 'key':key,\n 'value':value\n }+','\n return base_vue_app %{\"app_name\":app_id,\"data\":data_s}\n def submit_button(self):\n \"\"\"\n 渲染模板增加submit格式\n :return:\n \"\"\"\n # return ''\n\n\n pass\n def as_element(self):\n \"Return this form rendered as HTML s -- excluding the
.\"\n x= self._html_output(\n normal_row='
%(label)s %(field)s%(help_text)s
',\n error_row='%s',\n row_ender='',\n help_text_html=' %s',\n errors_on_separate_row=True,flag=True\n )\n return x\n\n def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row,flag=False):\n \"Output HTML. Used by as_table(), as_ul(), as_p().\"\n top_errors = self.non_field_errors() # Errors that should be displayed above all fields.\n output, hidden_fields = [], []\n\n for name, field in self.fields.items():\n html_class_attr = ''\n bf = self[name]\n if flag:\n bf.field.widget.flag=True\n bf_errors = self.error_class(bf.errors)\n if bf.is_hidden:\n if bf_errors:\n top_errors.extend(\n [_('(Hidden field %(name)s) %(error)s') % {'name': name, 'error': str(e)}\n for e in bf_errors])\n hidden_fields.append(str(bf))\n else:\n # Create a 'class=\"...\"' attribute if the row should have any\n # CSS classes applied.\n css_classes = bf.css_classes()\n if css_classes:\n html_class_attr = ' class=\"%s\"' % css_classes\n\n if errors_on_separate_row and bf_errors:\n output.append(error_row % str(bf_errors))\n\n if bf.label:\n label = conditional_escape(bf.label)\n label = bf.label_tag(label) or ''\n else:\n label = ''\n\n if field.help_text:\n help_text = help_text_html % field.help_text\n else:\n help_text = ''\n\n output.append(normal_row % {\n 'errors': bf_errors,\n 'label': label,\n 'field': bf,\n 'help_text': help_text,\n 'html_class_attr': html_class_attr,\n 'css_classes': css_classes,\n 'field_name': bf.html_name,\n })\n\n if top_errors:\n output.insert(0, error_row % top_errors)\n if hidden_fields: # Insert any hidden fields in the last row.\n str_hidden = ''.join(hidden_fields)\n if output:\n last_row = output[-1]\n # Chop off the trailing row_ender (e.g. '') and\n # insert the hidden fields.\n if not last_row.endswith(row_ender):\n # This can happen in the as_p() case (and possibly others\n # that users write): if there are only top errors, we may\n # not be able to conscript the last row for our purposes,\n # so insert a new, empty row.\n last_row = (normal_row % {\n 'errors': '',\n 'label': '',\n 'field': '',\n 'help_text': '',\n 'html_class_attr': html_class_attr,\n 'css_classes': '',\n 'field_name': '',\n })\n output.append(last_row)\n output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender\n else:\n # If there aren't any rows in the output, just append the\n # hidden fields.\n output.append(str_hidden)\n if flag:\n app_id = \"x\" + str(uuid.uuid4())[0:5]\n output.insert(0, \"
\" %app_id)\n output.append('
')\n output.append(self.get_vue_app_js(app_id))\n return mark_safe('\\n'.join(output))\n\nclass SCharField(forms.CharField):\n widget = STextInput\nclass SIntegerField(forms.IntegerField):\n def widget_attrs(self, widget):\n \"\"\"\n 解决max和min无法传递到widget的问题\n \"\"\"\n attrs = super().widget_attrs(widget)\n if isinstance(widget, NumberInput) or isinstance(widget,SNumberInput):\n if self.min_value is not None:\n attrs['min'] = self.min_value\n if self.max_value is not None:\n attrs['max'] = self.max_value\n return attrs\n widget = SNumberInput\n\nclass SEmailField(forms.EmailField):\n widget = SEmailInput\n\nclass SURLField(forms.URLField):\n widget = SURLInput","repo_name":"newpanjing/simpleui","sub_path":"simpleui/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":7514,"program_lang":"python","lang":"en","doc_type":"code","stars":3115,"dataset":"github-code","pt":"63"} +{"seq_id":"71094076680","text":"# coding=UTF-8\nimport cv2\nimport numpy as np\nimport math\n\n\ndef crop_fn(frame, box, h_scale=1, w_scale=1):\n\n box = [int(i) for i in box]\n\n max_h, max_w = frame.shape[:2]\n h, w = box[3] - box[1], box[2] - box[0]\n\n new_box = [\n max(0, box[0] - w_scale*w),\n max(0, box[1] - h_scale*h),\n min(box[2] + w_scale*w, max_w),\n min(box[3] + h_scale*h, max_h),\n ]\n\n crop_img = frame[new_box[1] : new_box[3], new_box[0] : new_box[2], :]\n\n ## 在crop数据当中的位置\n box_in_crop = [\n box[0] - new_box[0],\n box[1] - new_box[1],\n box[2] - new_box[0],\n box[3] - new_box[1],\n ]\n\n return crop_img, box_in_crop\n\n\nif __name__ == \"__main__\":\n import sys\n from plot import plot_one_box\n\n frame = cv2.imread(\n \"/sdc/jjlv/CODE/视频样本挖掘集合/someTestPic/SZ_CH_Q_05_20181002000115-00-00-0066_00000000.jpg\"\n )\n box = [0, 500, 100, 600]\n cv2.imwrite(\"box.jpg\", frame[box[1] : box[3], box[0] : box[2], :])\n crop_img, box_in_crop = crop_fn(frame, box)\n plot_one_box(box_in_crop, crop_img)\n cv2.imwrite(\"box_2.jpg\", crop_img)\n","repo_name":"lyyaixuexi/Quantization","sub_path":"Network-Slimming/utils/crop_fn.py","file_name":"crop_fn.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"17899474078","text":"import operator\nfrom math import sqrt\n\n# Critics\ncritics = {'Clerk Kent': {'Avengers': 4.0, 'Matrix': 4.2, 'Wonder Woman': 3.0, 'Pacific Rim': 3.5, 'Harry Potter': 3.0,\n 'Hunger Games': 1.0},\n 'Bruce Wayne': {'Avengers': 3.0, 'Matrix': 3.5, 'Wonder Woman': 1.5, 'Pacific Rim': 5.0, 'Harry Potter': 3.0,\n 'Hunger Games': 3.5},\n 'Pepper Pots': {'Avengers': 2.5, 'Matrix': 3.0, 'Pacific Rim': 3.5, 'Harry Potter': 4.0},\n 'Bruce Banner': {'Matrix': 3.5, 'Wonder Woman': 3.0, 'Harry Potter': 4.5, 'Pacific Rim': 4.0,\n 'Hunger Games': 2.5},\n 'Thor Odinson': {'Avengers': 3.0, 'Matrix': 4.0, 'Wonder Woman': 2.0, 'Pacific Rim': 3.0,\n 'Harry Potter': 3.0, 'Hunger Games': 2.0},\n 'Kara Kent': {'Avengers': 3.0, 'Matrix': 4.0, 'Harry Potter': 3.0, 'Pacific Rim': 5.0,\n 'You, Me and Dupree': 3.5},\n 'Oliver Queen': {'Matrix': 4.5, 'Hunger Games': 1.0, 'Pacific Rim': 4.0}}\n\n# My ratings\ntest = {'Eshan Herath': {'Avengers': 4.5, 'Matrix': 4.7,\n 'Wonder Woman': 3.5, 'Pacific Rim': 4.0, 'Harry Potter': 3.5,\n 'Hunger Games': 1.5}}\n\n\n# Objective is to find the critic who has a similar taste to mine on movies\n\n\ndef similarity_score(critic, me):\n # Shared Items\n si = {}\n for item in critic:\n if item in me:\n si[item] = 1\n\n # Number of elements\n n = len(si)\n\n # No items in common\n if n == 0:\n return 0\n\n # Preferences Sum\n sum_critic = sum([critic[it] for it in si])\n sum_me = sum([me[it] for it in si])\n\n # Preferences Squared Sum\n sum_square_critic = sum([pow(critic[it], 2) for it in si])\n sum_square_me = sum([pow(me[it], 2) for it in si])\n\n # Sum of products\n sum_of_products = sum([critic[it] * me[it] for it in si])\n\n # Calculating Pearson score\n numerator = sum_of_products - (sum_critic * sum_me / n)\n denominator = sqrt((sum_square_critic - pow(sum_critic, 2) / n) * (sum_square_me - pow(sum_me, 2) / n))\n\n if denominator == 0:\n return 0\n\n r = numerator / denominator\n\n return r\n\n\nmatches = {}\nfor c in critics.keys():\n matches[c] = similarity_score(critics[c], test['Eshan Herath'])\n\n# Higher the similarity score closer the critic's flavor as ours\nmatches_sorted = sorted(matches.items(), key=operator.itemgetter(1), reverse=True)\nprint(matches_sorted)\n","repo_name":"eshanmherath/collaborative-filtering","sub_path":"recommendation-systems/user-based-collaborative-filtering/basic_approach_with_pearson_correlation.py","file_name":"basic_approach_with_pearson_correlation.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"8667065901","text":"\"\"\"\nGiven a string S and a string T, find the minimum window in S which will contain\n all the characters in T in complexity O(n).\n\nFor example,\nS = \"ADOBECODEBANC\"\nT = \"ABC\"\nMinimum window is \"BANC\".\n\nNote:\nIf there is no such window in S that covers all characters in T, return the empty string \"\".\n\nIf there are multiple such windows, you are guaranteed that there will always be only one unique\nminimum window in S.\n\n\"\"\"\n\nfrom collections import defaultdict\n\n\nclass Solution(object):\n def minWindow(self, s, t):\n counter = defaultdict(int)\n for char in t:\n counter[char] += 1\n\n # print(all([True if x <= 0 else False for x in counter.values()]))\n\n left, right = 0, 0\n minwindow = len(s) + 1\n res = \"\"\n\n while right <= len(s):\n # if all character in t are also in s[left:right]\n if all([True if x <= 0 else False for x in counter.values()]):\n if minwindow > right - left:\n minwindow = right - left\n # could be better only record the index, could be faster, like res[left, right], where\n # left and right are indexes\n res = s[left:right]\n char = s[left]\n if char in counter:\n counter[char] += 1\n left += 1\n else:\n if right == len(s):\n break\n char = s[right]\n if char in counter:\n counter[char] -= 1\n right += 1\n\n return res\n\n\ntest = Solution()\nprint(test.minWindow(\"ADOBECODEBANC\", \"ABC\"))\n","repo_name":"ChrisLiu95/Leetcode","sub_path":"hard/Minimum_Window_Substring.py","file_name":"Minimum_Window_Substring.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"31616475409","text":"# Задача №1\n#\n# Нужно реализовать Польскую нотацию для двух положительных чисел. Реализовать нужно будет следующие оп��рации:\n#\n# Сложение\n# Вычитание\n# Умножение\n# Деление\n# Например, пользователь вводит: + 2 2 Ответ должен быть: 4\n\ntask = 'Домашнее задание к лекции 2.3 «Исключения»'\nline = '\\n' + '-' * (len(task))\nprint(task + line)\n\n\ndef polish():\n ask = 'Введите через пробел сначала операцию, а потом два положительных числа: '\n operation, number_1, number_2 = input('\\n' + ask).split()\n all_operation = ['+', '-', '*', '/']\n number_1 = int(number_1)\n number_2 = int(number_2)\n assert number_1 >= 0 and number_2 >= 0, 'Отрицательное число'\n assert operation in all_operation, 'Вы не ввели операцию! (+, -, *, /)' # подпись к задаче №2\n if operation == '+':\n answer = number_1 + number_2\n elif operation == '-':\n answer = number_1 - number_2\n elif operation == '*':\n answer = number_1 * number_2\n elif operation == '/':\n answer = number_1 / number_2\n return answer\n\n\n# Задача №2\n#\n# С помощью выражения assert проверять, что первая операция в списке доступных операций (+, -, *, /). С помощью\n# конструкций try/expcept ловить ошибки и выводить предупреждения Типы ошибок:\n#\n# Деление на 0\n# Деление строк\n# Передано необходимое количество аргументов\n# и тд.\n\n\ntry:\n print(polish())\nexcept ZeroDivisionError:\n print('Вы пытались разделить на ноль')\nexcept AssertionError:\n print('Assert: отрицательное число или не соответствующий знак')\nexcept ValueError:\n print('Не корректное значение')\nelse:\n print('Передано необходимое количество аргументов')\n\n# Задача №3\n#\n# Расширить домашние задание из лекции 1.4 «Функции — использование встроенных и создание собственных» новой функцией,\n# выводящей имена всех владельцев документов. С помощью исключения KeyError проверяйте, если поле \"name\" у документа.\n\ntask_3 = 'Задача №3'\nask = 'Введите вашу команду(\"n\" - для поика владельца документа,'\nline = '\\n' + '-' * (len(task_3))\nprint('\\n' + task_3 + line)\n\ndocuments = [\n {\"type\": \"card\", \"number\": \"007\"},\n {\"type\": \"passport\", \"number\": \"2207 876234\", \"name\": \"Василий Гупкин\"},\n {\"type\": \"invoice\", \"number\": \"11-2\", \"name\": \"Геннадий Покемонов\"},\n {\"type\": \"insurance\", \"number\": \"10006\", \"name\": \"Аристарх Павлов\"}\n]\n\n\ndef names():\n for search_names in documents:\n try:\n print('{} \"{}\" \"{}\"'.format(search_names['type'],search_names['number'] ,search_names['name']))\n except KeyError:\n print('В документах {} номер \"{}\" нет имени'.format(search_names['type'], search_names['number']))\n\nnames()","repo_name":"DrGsan/python-netology","sub_path":"1.Basic/2.3.exceptions/2.3.exceptions.py","file_name":"2.3.exceptions.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"41886228363","text":"from typing import List\nimport numpy as np\nfrom scipy.stats import pearsonr, spearmanr, kendalltau\n\n\nclass DTW:\n @staticmethod\n def naive_dtw_distance(ts_a, ts_b, mww, d=lambda x, y: abs(x-y)**2):\n \"\"\"Computes dtw distance between two time series\n\n Args:\n ts_a: time series a\n ts_b: time series b\n mww: max warping window, int\n d: distance function\n\n Returns:\n dtw distance\n \"\"\"\n\n # Create cost matrix via broadcasting with large int\n ts_a, ts_b = np.array(ts_a), np.array(ts_b)\n M, N = len(ts_a), len(ts_b)\n cost = np.ones((M, N))\n\n # Initialize the first row and column\n cost[0, 0] = d(ts_a[0], ts_b[0])\n for i in range(1, M):\n cost[i, 0] = cost[i-1, 0] + d(ts_a[i], ts_b[0])\n\n for j in range(1, N):\n cost[0, j] = cost[0, j-1] + d(ts_a[0], ts_b[j])\n\n # Populate rest of cost matrix within window\n for i in range(1, M):\n for j in range(max(1, i - mww), min(N, i + mww)):\n choices = cost[i-1, j-1], cost[i, j-1], cost[i-1, j]\n cost[i, j] = min(choices) + d(ts_a[i], ts_b[j])\n\n # Return DTW distance given window\n return cost[-1, -1]\n\n @staticmethod\n def dsw_distance(ts_c, ts_p, mpw, delta=1, d=lambda x, y: abs(x-y)**2):\n \"\"\"Computes dsw distance between parent and child\n\n Args:\n ts_c: time series child\n ts_p: time series parent\n mpw: max propagation window, int\n delta: allowed time shift in the system\n d: distance function\n\n Returns:\n dsw distance\n \"\"\"\n\n # Create cost matrix via broadcasting with large int\n ts_p, ts_c = np.array(ts_p), np.array(ts_c)\n M, N = len(ts_c), len(ts_p)\n cost = np.ones((M, N))\n\n # Initialize the first row and column\n cost[0, 0] = d(ts_p[0], ts_c[0])\n for i in range(1, M):\n cost[i, 0] = cost[i-1, 0] + d(ts_c[i], ts_p[0])\n\n for j in range(1, N):\n cost[0, j] = cost[0, j-1] + d(ts_c[0], ts_p[j])\n\n # Populate rest of cost matrix within window\n for i in range(1, M):\n for j in range(max(1, i - mpw - delta), min(N, i + delta)):\n choices = cost[i-1, j-1], cost[i, j-1], cost[i-1, j]\n cost[i, j] = min(choices) + d(ts_c[i], ts_p[j])\n\n # Return DSW\n return cost[-1, -1]\n\n\nclass Correlation:\n @staticmethod\n def pearson(ts_a, ts_b):\n \"\"\"Computes pearson correlation between two time series\n\n Args:\n ts_a: time series a\n ts_b: time series b\n\n Returns:\n r: correlation of a and b\n p: p value\n \"\"\"\n ts_a, ts_b = np.array(ts_a), np.array(ts_b)\n r, p = pearsonr(ts_a, ts_b)\n return r, p\n\n @staticmethod\n def spearman(ts_a, ts_b):\n \"\"\"Computes spearman correlation between two time series\n\n Args:\n ts_a: time series a\n ts_b: time series b\n\n Returns:\n r: correlation of a and b\n p: p value\n \"\"\"\n ts_a, ts_b = np.array(ts_a), np.array(ts_b)\n r, p = spearmanr(ts_a, ts_b)\n return r, p\n\n @staticmethod\n def kendall(ts_a, ts_b):\n \"\"\"Computes kendall correlation between two time series\n\n Args:\n ts_a: time series a\n ts_b: time series b\n\n Returns:\n r: correlation of a and b\n p: p value\n \"\"\"\n ts_a, ts_b = np.array(ts_a), np.array(ts_b)\n r, p = kendalltau(ts_a, ts_b)\n return r, p\n\n\nclass Aggregator:\n @staticmethod\n def mean_agg(metrics: List[float]):\n return np.mean(metrics)\n\n @staticmethod\n def max_agg(metrics: List[float]):\n return np.max(metrics)\n\n @staticmethod\n def min_agg(metrics: List[float]):\n return np.min(metrics)\n","repo_name":"OpsPAI/aid","sub_path":"model/similarity.py","file_name":"similarity.py","file_ext":"py","file_size_in_byte":3967,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"63"} +{"seq_id":"70183903561","text":"def sum_even_numbers(numbers):\n # Initialize a variable to hold the sum of even numbers\n even_sum = 0\n \n # Iterate through the list of numbers\n for num in numbers:\n if num % 2 == 0:\n even_sum += num\n \n return even_sum\n\n# Prompt the user for input by taking a comma-separated list of numbers\ninput_str = input(\"Введите список чисел через запятую: \")\n\n# Split the input string into a list of numbers\nnumbers = [int(x) for x in input_str.split(',')]\n\n# Calculate the sum of even numbers in the list\nresult = sum_even_numbers(numbers)\n\n# Display the result\nprint(\"Сумма чётных чисел в списке:\", result)","repo_name":"MikitaTsiarentsyeu/Md-PT1-68-23","sub_path":"Tasks/Lazovsliy/Task3/Task3-2.py","file_name":"Task3-2.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"12284244497","text":"import xlwt\nimport time\n\nclass StoreMore:\n def __init__(self) -> None:\n self.keyVal = {}\n self.DepotsUseCount = {}\n self.learningRate = []\n self.existPathCount = []\n self.selectedLines = []\n\n def setKeyVal(self, key, val):\n self.keyVal[key] = val\n \n def increaseKeyVal(self, key, val):\n if key in self.keyVal:\n self.keyVal[key] += val\n else :\n self.keyVal[key] = val\n\n def increaseDepotUsed(self, depot_id):\n if depot_id in self.DepotsUseCount:\n self.DepotsUseCount[depot_id] += 1\n else:\n self.DepotsUseCount[depot_id] = 1\n \n def storedecideParams(self, lr, pathCount, sel_line):\n self.learningRate.append(lr)\n self.existPathCount.append(pathCount)\n self.selectedLines.append(sel_line)\n\n \n def Save2file(self, nameAddation):\n workbook = xlwt.Workbook()\n sheet = workbook.add_sheet(\"sheet1\")\n header_font = xlwt.Font()\n header_font.name = 'Arial'\n header_font.bold = True\n header_style = xlwt.XFStyle()\n header_style.font = header_font\n sheet.write(0, 0, 'Key', header_style)\n sheet.write(0, 1, 'Value', header_style)\n\n sheet.write(0, 3, 'LR', header_style)\n sheet.write(0, 4, 'PathCnt', header_style)\n sheet.write(0, 5, 'Selected line', header_style)\n\n rowCounter = 1\n for d in self.DepotsUseCount:\n sheet.write(rowCounter, 0, d)\n sheet.write(rowCounter, 1, self.DepotsUseCount[d])\n rowCounter += 1\n \n for i in self.keyVal:\n sheet.write(rowCounter, 0, i)\n sheet.write(rowCounter, 1, self.keyVal[i])\n rowCounter += 1\n\n for i in range(len(self.learningRate)):\n sheet.write(i+1,3, self.learningRate[i])\n sheet.write(i+1,4, self.existPathCount[i])\n sheet.write(i+1,5, self.selectedLines[i])\n \n workbook.save('result/moreData'+ str(nameAddation) + \"_\" + time.strftime('%Y-%m-%d_%H-%M-%S') +'.xls')\n","repo_name":"mojtabasji/droneDeliveryPlanning","sub_path":"StoreMoreData.py","file_name":"StoreMoreData.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"74340328519","text":"\"\"\"\nright_click.py\n\nThis file contain the menu object displayed\nnext to the mouse when right clicking.\n#pylint disable=django-not-configured\n\"\"\"\n\n#------------------------------------------------------------------------------#\n\nfrom src.utils.graphical_utils import Menu\n\n#------------------------------------------------------------------------------#\n\nclass RCMStudy(Menu):\n \"\"\"\n Right Click Menu Study\n Frame displaying when right clicking\n on the study timeline.\n \"\"\"\n def __init__(self, manager, study_template):\n super().__init__(manager)\n self.manager = manager\n self.config(tearoff=False)\n self.add_command(label=\"Display\", command=None)\n self.add_command(label=\"Modify\",\n command=study_template.setup_add_study)\n self.add_separator()\n self.add_command(label=\"Delete\", command=study_template.delete_study)\n\n def show(self, event):\n \"\"\"\n Shows the menu at the given coordinates.\n \"\"\"\n self.tk_popup(event.x_root, event.y_root)\n\nclass RCMSerial(Menu):\n \"\"\"\n Right Click Menu Serial\n Frame displaying when right clicking\n on the serials.\n \"\"\"\n def __init__(self, manager, study_template):\n super().__init__(manager)\n self.manager = manager\n self.study_template = study_template\n self.widget = None\n self.config(tearoff=False)\n self.add_command(label=\"Add a template\", command=None)\n self.add_command(label=\"Add a task\",\n command=self.add_task)\n self.add_command(label=\"Activate\", command=None)\n self.add_separator()\n self.add_command(label=\"Delete\", command=None)\n\n def show(self, event):\n \"\"\"\n Shows the menu at the given coordinates.\n \"\"\"\n self.widget = event.widget\n self.tk_popup(event.x_root, event.y_root)\n\n\n def add_task(self):\n \"\"\"\n Adds a task to the serial.\n \"\"\"\n self.study_template.show_add_task_template(self.widget)\n\nclass RCMTemplates(Menu):\n \"\"\"\n Right Click Menu Templates\n Frame displaying when right clicking\n on the Templates.\n \"\"\"\n def __init__(self, manager):\n super().__init__(manager)\n self.manager = manager\n self.header_manager = self.manager.manager.manager.manager.header\n self.config(tearoff=False)\n self.add_command(label=\"Add a task\",\n command=self.header_manager.display_add_task)\n self.add_command(label=\"Modify\", command=None)\n self.add_command(label=\"Display a task\", command=None)\n self.add_separator()\n self.add_command(label=\"Delete\", command=None)\n\n def show(self, event):\n \"\"\"\n Shows the menu at the given coordinates.\n \"\"\"\n self.tk_popup(event.x_root, event.y_root)\n","repo_name":"LOISGALLAUD/CILcare-agenda","sub_path":"src/interface/widgets/right_click.py","file_name":"right_click.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"}