\")\n\n# Adapted from https://github.com/undees/fftw-example/blob/master/fftw_example.c\nNUM_POINTS = 64\nfftw_complex = FFT.types.fftw_complex\nsignal = FFI.arrayType(fftw_complex, NUM_POINTS)();\nresult = FFI.arrayType(fftw_complex, NUM_POINTS)();\n\ndef acquire_from_somewhere(signal):\n for i in range(NUM_POINTS):\n theta = float(i) / float(NUM_POINTS) * math.pi;\n\n signal[i][0] = 1.0 * math.cos(10.0 * theta) + \\\n 0.5 * math.cos(25.0 * theta);\n\n signal[i][1] = 1.0 * math.sin(10.0 * theta) + \\\n 0.5 * math.sin(25.0 * theta);\n\ndef do_something_with(result):\n for i in range(NUM_POINTS):\n mag = math.sqrt(result[i][0] * result[i][0] + \\\n result[i][1] * result[i][1]);\n print(\"%0.4f\" % mag);\n\n\nplan = FFT.funcs.fftw_plan_dft_1d(NUM_POINTS, signal, result, -1, 1<<6)\nacquire_from_somewhere(signal)\nFFT.funcs.fftw_execute(plan)\ndo_something_with(result)\n","repo_name":"aguinet/dragonffi","sub_path":"examples/fftw.py","file_name":"fftw.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":539,"dataset":"github-code","pt":"92"}
+{"seq_id":"15167182886","text":"import pandas as pd\nimport os\nimport numpy as np\n\nos.chdir('/Users/mab8354/granddb/data')\n\n# GTEx samples (ALL)\ngtex_sex=pd.read_csv('GTExSamples_AllVariables.txt',sep='\\t')\ngtex=pd.read_csv('GTEx_v7_Annotations_SampleAttributesDS.txt',sep='\\t')\n\n# EGRET samples\negret=pd.read_csv('LCL_expression.csv')\nsamples = egret.columns[1:]\n# replace hyphens\nsamples = [x.replace('.','-') for x in samples]\n\n# find samples in df\naa = np.intersect1d(samples, gtex_sex['SampleID'], return_indices=True)\na = np.intersect1d(samples, gtex['SAMPID'], return_indices=True)\nb = gtex.iloc[a[2],]\nb.rename(columns = {'SAMPID':'SampleID'}, inplace = True)\n\n# select relevant columns\nsex_samples = gtex_sex.iloc[aa[2],[0,1,2,3,4,5,6,15,31,35]]\n\n# merge two subdfs\nouter_merged = pd.merge(b, sex_samples, how='outer')\n\n# do clean names\ncleannames = [x.replace('-','_') for x in outer_merged['SampleID']]\nouter_merged['cleanname'] = cleannames\n\n# save final df\nouter_merged.to_csv('egret_gtex.csv', index=False)","repo_name":"QuackenbushLab/grand","sub_path":"src/builddbDf/cells/buildegret_gtex.py","file_name":"buildegret_gtex.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"92"}
+{"seq_id":"6503723153","text":"import wndMgr\nimport ui\nimport ime\nimport constInfo\nif constInfo.PSM:\n\timport localeInfo as _localeInfo\n\tlocaleInfo = _localeInfo.localeInfo()\nelse:\n\timport localeInfo\nimport app\nimport rRCvfR4c_fL4e\n\nclass PickMoneyDialog(ui.ScriptWindow):\n\tdef __init__(self):\n\t\tui.ScriptWindow.__init__(self)\n\n\t\tself.unitValue = 1\n\t\tself.maxValue = 0\n\t\tself.eventAccept = 0\n\t\tif app.ENABLE_FATIH_SAHIN_REDCOIN_SYSTEM:\n\t\t\tself.unitValue2 = 0\n\t\t\tself.maxValue2 = 0\n\t\tif app.ENABLE_WINDOW_SLIDE_EFFECT:\n\t\t\tself.EnableSlidingEffect()\n\n\tdef __del__(self):\n\t\tui.ScriptWindow.__del__(self)\n\n\tdef LoadDialog(self):\n\t\ttry:\n\t\t\tpyScrLoader = ui.PythonScriptLoader()\n\t\t\tpyScrLoader.LoadScriptFile(self, \"UIScript/PickMoneyDialog.py\")\n\t\texcept:\n\t\t\timport exception\n\t\t\texception.Abort(\"MoneyDialog.LoadDialog.LoadScript\")\n\n\t\ttry:\n\t\t\tself.board = self.GetChild(\"board\")\n\t\t\tself.maxValueTextLine = self.GetChild(\"max_value\")\n\t\t\tself.pickValueEditLine = self.GetChild(\"money_value\")\n\t\t\tif app.ENABLE_FATIH_SAHIN_REDCOIN_SYSTEM:\n\t\t\t\tself.maxValueTextLine2 = self.GetChild(\"redcoin_max_value\")\n\t\t\t\tself.pickValueEditLine2 = self.GetChild(\"redcoin_value\")\n\t\t\tself.acceptButton = self.GetChild(\"accept_button\")\n\t\t\tself.cancelButton = self.GetChild(\"cancel_button\")\n\t\texcept:\n\t\t\timport exception\n\t\t\texception.Abort(\"MoneyDialog.LoadDialog.BindObject\")\n\n\t\tself.pickValueEditLine.SetReturnEvent(ui.__mem_func__(self.OnAccept))\n\t\tself.pickValueEditLine.SetEscapeEvent(ui.__mem_func__(self.Close))\n\t\tif app.ENABLE_FATIH_SAHIN_REDCOIN_SYSTEM:\n\t\t\tself.pickValueEditLine2.SetReturnEvent(ui.__mem_func__(self.OnAccept))\n\t\t\tself.pickValueEditLine2.SetEscapeEvent(ui.__mem_func__(self.Close))\n\t\tself.acceptButton.SetEvent(ui.__mem_func__(self.OnAccept))\n\t\tself.cancelButton.SetEvent(ui.__mem_func__(self.Close))\n\t\tself.board.SetCloseEvent(ui.__mem_func__(self.Close))\n\n\tdef Destroy(self):\n\t\tself.ClearDictionary()\n\t\tself.eventAccept = 0\n\t\tself.maxValue = 0\n\t\tself.pickValueEditLine = 0\n\t\tif app.ENABLE_FATIH_SAHIN_REDCOIN_SYSTEM:\n\t\t\tself.maxValue2 = 0\n\t\t\tself.pickValueEditLine2 = 0\n\t\tself.acceptButton = 0\n\t\tself.cancelButton = 0\n\t\tself.board = None\n\n\tdef SetTitleName(self, text):\n\t\tself.board.SetTitleName(text)\n\n\tdef SetAcceptEvent(self, event):\n\t\tself.eventAccept = event\n\n\tdef SetMax(self, max):\n\t\tself.pickValueEditLine.SetMax(max)\n\t\tif app.ENABLE_FATIH_SAHIN_REDCOIN_SYSTEM:\n\t\t\tself.pickValueEditLine2.SetMax(max)\n\n\tif app.ENABLE_FATIH_SAHIN_REDCOIN_SYSTEM:\n\t\tdef Open(self, maxValue, maxValue2, unitValue=0, unitValue2=0):\n\t\t\tif localeInfo.IsYMIR() or localeInfo.IsCHEONMA() or localeInfo.IsHONGKONG():\n\t\t\t\tunitValue = \"\"\n\t\t\t\tunitValue2 = \"\"\n\n\t\t\twidth = self.GetWidth()\n\t\t\t(mouseX, mouseY) = wndMgr.GetMousePosition()\n\n\t\t\tif mouseX + width/2 > wndMgr.GetScreenWidth():\n\t\t\t\txPos = wndMgr.GetScreenWidth() - width\n\t\t\telif mouseX - width/2 < 0:\n\t\t\t\txPos = 0\n\t\t\telse:\n\t\t\t\txPos = mouseX - width/2\n\n\t\t\tself.SetPosition(xPos, mouseY - self.GetHeight() - 20)\n\n\t\t\tself.maxValueTextLine.SetText(\" / \" + str(maxValue))\n\t\t\tself.maxValueTextLine2.SetText(\" / \" + str(maxValue2))\n\n\t\t\tself.pickValueEditLine.SetText(str(unitValue))\n\t\t\tself.pickValueEditLine.SetFocus()\n\t\t\t\n\t\t\tself.pickValueEditLine2.SetText(str(unitValue2))\n\t\t\tself.pickValueEditLine2.SetFocus()\n\n\t\t\time.SetCursorPosition(1)\n\t\t\t\n\t\t\t# rRCvfR4c_fL4e.AppendChat(rRCvfR4c_fL4e.CHAT_TYPE_INFO, \"unitValue:\"+str(unitValue)+\"unitValue2\"+str(unitValue2))\n\n\t\t\tself.unitValue = unitValue\n\t\t\tself.unitValue2 = unitValue2\n\t\t\tself.maxValue = maxValue\n\t\t\tself.maxValue2 = maxValue2\n\t\t\tself.Show()\n\t\t\tself.SetTop()\n\telse:\n\t\tdef Open(self, maxValue, unitValue=1):\n\n\t\t\tif localeInfo.IsYMIR() or localeInfo.IsCHEONMA() or localeInfo.IsHONGKONG():\n\t\t\t\tunitValue = \"\"\n\n\t\t\twidth = self.GetWidth()\n\t\t\t(mouseX, mouseY) = wndMgr.GetMousePosition()\n\n\t\t\tif mouseX + width/2 > wndMgr.GetScreenWidth():\n\t\t\t\txPos = wndMgr.GetScreenWidth() - width\n\t\t\telif mouseX - width/2 < 0:\n\t\t\t\txPos = 0\n\t\t\telse:\n\t\t\t\txPos = mouseX - width/2\n\n\t\t\tself.SetPosition(xPos, mouseY - self.GetHeight() - 20)\n\n\t\t\tif localeInfo.IsARABIC():\n\t\t\t\tself.maxValueTextLine.SetText(\"/\" + str(maxValue))\n\t\t\telse:\n\t\t\t\tself.maxValueTextLine.SetText(\" / \" + str(maxValue))\n\n\t\t\tself.pickValueEditLine.SetText(str(unitValue))\n\t\t\tself.pickValueEditLine.SetFocus()\n\n\t\t\time.SetCursorPosition(1)\n\n\t\t\tself.unitValue = unitValue\n\t\t\tself.maxValue = maxValue\n\t\t\tself.Show()\n\t\t\tself.SetTop()\n\n\tdef Close(self):\n\t\tself.pickValueEditLine.KillFocus()\n\t\tif app.ENABLE_FATIH_SAHIN_REDCOIN_SYSTEM:\n\t\t\tself.pickValueEditLine2.KillFocus()\n\t\tself.Hide()\n\n\tif app.ENABLE_FATIH_SAHIN_REDCOIN_SYSTEM:\n\t\tdef OnAccept(self):\n\t\t\ttext = self.pickValueEditLine.GetText()\n\t\t\ttext2 = self.pickValueEditLine2.GetText()\n\n\t\t\t# rRCvfR4c_fL4e.AppendChat(rRCvfR4c_fL4e.CHAT_TYPE_INFO, \"text:\"+str(text)+\"text2\"+str(text2))\n\n\t\t\tif len(text) > 0 and text.isdigit() or len(text2) > 0 and text2.isdigit():\n\t\t\t\tmoney = long(text)\n\t\t\t\tmoney = min(money, self.maxValue)\n\t\t\t\tredcoin = int(text2)\n\t\t\t\tredcoin = min(redcoin, self.maxValue2)\n\n\t\t\t\tif money > 0 or redcoin > 0:\n\t\t\t\t\tif self.eventAccept:\n\t\t\t\t\t\tself.eventAccept(money, redcoin)\n\t\t\t\telse:\n\t\t\t\t\trRCvfR4c_fL4e.AppendChat(rRCvfR4c_fL4e.CHAT_TYPE_INFO, \"Bilgi: Lütfen bir deðer girin.\")\n\t\t\telse:\n\t\t\t\trRCvfR4c_fL4e.AppendChat(rRCvfR4c_fL4e.CHAT_TYPE_INFO, \"Bilgi: Lütfen bir deðer girin.\")\n\n\t\t\tself.Close()\n\telse:\n\t\tdef OnAccept(self):\n\n\t\t\ttext = self.pickValueEditLine.GetText()\n\n\t\t\tif len(text) > 0 and text.isdigit():\n\n\t\t\t\tmoney = long(text)\n\t\t\t\tmoney = min(money, self.maxValue)\n\n\t\t\t\tif money > 0:\n\t\t\t\t\tif self.eventAccept:\n\t\t\t\t\t\tself.eventAccept(money)\n\n\t\t\tself.Close()\n","repo_name":"fatihsahinn/Metin2-Coin-System","sub_path":"Python/uipickmoney.py","file_name":"uipickmoney.py","file_ext":"py","file_size_in_byte":5535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"18263189853","text":"import re\n\n# input_lines='''\\\n# aba[bab]xyz\n# xyx[xyx]xyx\n# aaa[kek]eke\n# zazbz[bzb]cdb'''.splitlines()\n\ninput_lines = open('input.txt')\n\ncount = 0\nfor line in input_lines:\n supernet = re.split(r'\\[\\w+\\]', line)\n hypernet = re.compile(r'\\[(\\w+)\\]').findall(line)\n for part in supernet:\n matches = re.compile(r'(?=(\\w)(\\w)\\1)').findall(part)\n found = False\n for match in matches:\n a, b = match\n bab = b + a + b\n if a != b and any(bab in h for h in hypernet):\n found = True\n break\n if found:\n count += 1\nprint(count)","repo_name":"ceronman/adventofcode","sub_path":"2016/day7/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"94"}
+{"seq_id":"31420216576","text":"from collections.abc import Iterable\n\n\ndef assert_prolog_output_the_same(self, expected: list, actual: list, ignore_duplicates=False, nested_ignore=False):\n if len(expected) >= 2 and expected[-1] == \"false\":\n expected.pop()\n if len(actual) >= 2 and actual[-1] == \"false\":\n actual.pop()\n if ignore_duplicates and nested_ignore:\n comparison = compare_list_just_like_a_set_nested(expected, actual)\n self.assertTrue(comparison)\n return\n if ignore_duplicates:\n comparison = compare_list_just_like_a_set(expected, actual)\n self.assertTrue(comparison)\n return\n self.assertCountEqual(expected, actual)\n\n\ndef compare_list_just_like_a_set_nested(list1, list2):\n def comparator(x, y):\n if not isinstance(x, Iterable) or not isinstance(y, Iterable):\n return x == y\n return compare_list_just_like_a_set_nested(x, y)\n return compare_list_just_like_a_set(list1, list2, comparator)\n\n\ndef compare_list_just_like_a_set(list1, list2, comparator=(lambda x, y: x == y)):\n for item1 in list1:\n for item2 in list2:\n if comparator(item1, item2):\n break\n else:\n return False\n for item2 in list2:\n for item1 in list1:\n if comparator(item1, item2):\n break\n else:\n return False\n return True\n\n\n\ndef remove_duplicates(lst, nested_remove_duplicates=False):\n if not isinstance(lst, list):\n return lst\n\n ret = []\n for curr_lst in lst:\n if isinstance(curr_lst, list) and nested_remove_duplicates:\n curr_lst = remove_duplicates(curr_lst, nested_remove_duplicates=nested_remove_duplicates)\n for added in ret:\n if added == curr_lst:\n break\n else:\n ret.append(curr_lst)\n return ret\n\n\ndef remove_trailing_false_or_true(value):\n value = value[:]\n if len(value) == 0:\n return value\n if value[-1] in ('true', 'false'):\n value.pop()\n return value","repo_name":"Hzzkygcs/heizscheduler-prolog","sub_path":"python/HzzProlog/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"21401142184","text":"\"\"\"Removing pure_org so I can re-create it correctly.\n\nRevision ID: 9f257b57fca6\nRevises: 8a1caca53d6c\nCreate Date: 2017-04-16 19:10:13.521359\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import oracle\n\n# revision identifiers, used by Alembic.\nrevision = '9f257b57fca6'\ndown_revision = '8a1caca53d6c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.drop_table('pure_org')\n\ndef downgrade():\n op.create_table(\n 'pure_org',\n sa.Column('id', sa.VARCHAR(length=50), nullable=False),\n sa.Column('type', sa.VARCHAR(length=25), nullable=True),\n sa.Column('name_en', sa.VARCHAR(length=255), nullable=True),\n sa.Column('level', oracle.NUMBER(scale=0, asdecimal=False), nullable=False),\n sa.Column('lft', oracle.NUMBER(scale=0, asdecimal=False), nullable=False),\n sa.Column('rgt', oracle.NUMBER(scale=0, asdecimal=False), nullable=False),\n sa.Column('parent_id', sa.VARCHAR(length=50), nullable=True),\n sa.Column('tree_id', oracle.NUMBER(scale=0, asdecimal=False), nullable=True),\n sa.Column('pure_id', sa.VARCHAR(length=50), nullable=False),\n sa.ForeignKeyConstraint(['parent_id'], ['pure_org.id'], name='SYS_C00281952'),\n sa.PrimaryKeyConstraint('id', name='sys_c00281951')\n )\n","repo_name":"UMNLibraries/experts_dw","sub_path":"alembic/versions/9f257b57fca6_removing_pure_org_so_i_can_re_create_it_.py","file_name":"9f257b57fca6_removing_pure_org_so_i_can_re_create_it_.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"}
+{"seq_id":"30202722658","text":"import pytest\n\nfrom eth_web3._utils.method_formatters import (\n get_error_formatters,\n raise_solidity_error_on_revert,\n)\nfrom eth_web3._utils.rpc_abi import (\n RPC,\n)\nfrom eth_web3.exceptions import (\n ContractLogicError,\n)\nfrom eth_web3.types import (\n RPCResponse,\n)\n\n# OpenEthereum/default case:\nREVERT_WITH_MSG = RPCResponse(\n {\n \"jsonrpc\": \"2.0\",\n \"error\": {\n \"code\": -32015,\n \"message\": \"VM execution error.\",\n \"data\": (\n \"Reverted \"\n \"0x08c379a\"\n \"00000000000000000000000000000000000000000000000000000000000000020\"\n \"0000000000000000000000000000000000000000000000000000000000000016\"\n \"6e6f7420616c6c6f77656420746f206d6f6e69746f7200000000000000000000\"\n ),\n },\n \"id\": 2987,\n }\n)\n\nREVERT_WITHOUT_MSG = RPCResponse(\n {\n \"jsonrpc\": \"2.0\",\n \"error\": {\n \"code\": -32015,\n \"message\": \"VM execution error.\",\n \"data\": \"Reverted 0x\",\n },\n \"id\": 2987,\n }\n)\n\nOTHER_ERROR = RPCResponse(\n {\n \"jsonrpc\": \"2.0\",\n \"error\": {\n \"code\": -32601,\n \"message\": \"Method not found\",\n },\n \"id\": 1,\n }\n)\n\nGETH_RESPONSE = RPCResponse(\n {\n \"jsonrpc\": \"2.0\",\n \"id\": 2,\n \"error\": {\n \"code\": 3,\n \"message\": \"execution reverted: Function has been reverted.\",\n \"data\": (\n \"0x08c379a0000000000000000000000000000000000000000000000\"\n \"0000000000000000020000000000000000000000000000000000000\"\n \"000000000000000000000000001b46756e6374696f6e20686173206\"\n \"265656e2072657665727465642e0000000000\"\n ),\n },\n }\n)\n\nGANACHE_RESPONSE = RPCResponse(\n {\n \"id\": 24,\n \"jsonrpc\": \"2.0\",\n \"error\": {\n \"message\": \"VM Exception while processing transaction: revert Custom revert message\", # noqa: E501\n \"code\": -32000,\n \"data\": {\n \"stack\": \"o: VM Exception while processing transaction: revert Custom revert message\\n\", # noqa: E501\n \"name\": \"o\",\n },\n },\n }\n)\n\n\n@pytest.mark.parametrize(\n \"response,expected\",\n (\n (REVERT_WITH_MSG, \"execution reverted: not allowed to monitor\"),\n (REVERT_WITHOUT_MSG, \"execution reverted\"),\n (GETH_RESPONSE, \"execution reverted: Function has been reverted.\"),\n (\n GANACHE_RESPONSE,\n \"execution reverted: VM Exception while processing transaction: revert Custom revert message\", # noqa: 501\n ),\n ),\n ids=[\n \"test-get-revert-reason-with-msg\",\n \"test-get-revert-reason-without-msg\",\n \"test-get-geth-revert-reason\",\n \"test_get-ganache-revert-reason\",\n ],\n)\ndef test_get_revert_reason(response, expected) -> None:\n with pytest.raises(ContractLogicError, match=expected):\n raise_solidity_error_on_revert(response)\n\n\ndef test_get_revert_reason_other_error() -> None:\n assert raise_solidity_error_on_revert(OTHER_ERROR) is OTHER_ERROR\n\n\ndef test_get_error_formatters() -> None:\n formatters = get_error_formatters(RPC.eth_call)\n with pytest.raises(ContractLogicError, match=\"not allowed to monitor\"):\n formatters(REVERT_WITH_MSG)\n with pytest.raises(ContractLogicError):\n formatters(REVERT_WITHOUT_MSG)\n assert formatters(OTHER_ERROR) == OTHER_ERROR\n","repo_name":"Foundation-Eth/eth-web3","sub_path":"tests/core/utilities/test_method_formatters.py","file_name":"test_method_formatters.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"94"}
+{"seq_id":"3845976223","text":"\"\"\"Test disqus_shortname config value scenarios.\"\"\"\n\nimport re\n\nimport py\nimport pytest\nfrom _pytest.monkeypatch import MonkeyPatch\nfrom docutils.parsers.rst import directives, roles\nfrom sphinx import application, errors\n\nBASE_CONFIG = \"\"\"\\\nimport sys\nsys.path.append('{}')\nextensions = ['sphinx_disqus.disqus']\nmaster_doc = 'index'\nnitpicky = True\n\"\"\"\n\nPARAMS = [\n (\"disqus_shortname = 'good'\", \"\"),\n (\"\", \"disqus_shortname config value must be set for the disqus extension to work.\"),\n (\"disqus_shortname = ''\", \"disqus_shortname config value must be set for the disqus extension to work.\"),\n (\"disqus_shortname = 'B@D'\", \"disqus_shortname config value must be 3-50 letters, numbers, and hyphens only.\"),\n]\n\n\n@pytest.mark.parametrize(\"tail,expected_error\", PARAMS)\ndef test(monkeypatch: MonkeyPatch, tmpdir: py.path.local, tail: str, expected_error: str):\n \"\"\"Test valid and invalid values.\"\"\"\n tmpdir.join(\"conf.py\").write(BASE_CONFIG.format(py.path.local(__file__).join(\"..\", \"..\")))\n tmpdir.join(\"conf.py\").write(tail, mode=\"a\")\n tmpdir.join(\"index.rst\").write(\"====\\nMain\\n====\\n\\n.. toctree::\\n :maxdepth: 2\\n.. disqus::\")\n monkeypatch.setattr(directives, \"_directives\", getattr(directives, \"_directives\").copy())\n monkeypatch.setattr(roles, \"_roles\", getattr(roles, \"_roles\").copy())\n\n srcdir = confdir = str(tmpdir)\n outdir = tmpdir.join(\"_build\", \"html\")\n doctreedir = outdir.join(\"doctrees\").ensure(dir=True, rec=True)\n app = application.Sphinx(srcdir, confdir, str(outdir), str(doctreedir), \"html\")\n\n if not expected_error:\n app.builder.build_all()\n html_body = outdir.join(\"index.html\").read()\n disqus_div = re.findall(r'(]+ id=\"disqus_thread\"[^>]*>
)', html_body)[0]\n assert 'data-disqus-shortname=\"good\"' in disqus_div\n return\n\n with pytest.raises(errors.ExtensionError) as exc:\n app.builder.build_all()\n assert expected_error == exc.value.args[0]\n","repo_name":"Robpol86/sphinx-disqus","sub_path":"tests/unit_tests/test_shortname.py","file_name":"test_shortname.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"94"}
+{"seq_id":"33190941440","text":"#!/usr/bin/env python3\n\"\"\"\nA simple calendar made with rofi and python3.\n\nCycle through month and create linked event to days.\n\"\"\"\n\n__author__ = \"Daguhh\"\n__license__ = \"MIT-0\"\n__status__ = \"Released\"\n__version__ = \"2.0.1\"\n\nimport glob, os, sys, subprocess, shutil\nfrom pathlib import Path\nimport re, argparse, configparser\nimport datetime, calendar, locale\nfrom itertools import chain\nfrom functools import wraps\nimport time\n\n#START = time.time()\n\ndef get_arguments():\n \"\"\"Parse command line arguments\n\n Returns\n -------\n args : argparse.Namespace\n command line arguments\n unknown : str\n rofi output\n \"\"\"\n\n parser = argparse.ArgumentParser(\n prog=\"naivecalendar\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description='''A simple popup calendar\n\nsubcommands:\n update-themes Update a calendar parameter for all user themes at once\n add-event Add, modify, delete event in all user themes config at once\n configure Clone or open configuration files'''\n )\n\n parser.add_argument(\n '-V',\n '--version',\n action='version',\n version=\"%(prog)s \" + __version__\n )\n\n parser.add_argument(\n '-v',\n '--verbose',\n help=\"direct rofi error to stdout\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"-p\",\n \"--print\",\n help=\"print date to stdout instead of opening a event\",\n action=\"store_true\",\n )\n\n parser.add_argument(\n \"-x\",\n \"--clipboard\",\n help=\"copy date to clipboard\",\n action=\"store_true\",\n )\n\n parser.add_argument(\n \"-f\",\n \"--format\",\n help=\"\"\"option '-p' or '-x' output format (datetime.strftime format, defaut='%%Y-%%m-%%d')\"\"\",\n dest=\"format\",\n default=\"%Y-%m-%d\",\n )\n\n parser.add_argument(\n \"-e\",\n \"--editor\",\n help=\"\"\"editor command to open events\"\"\",\n dest=\"editor\",\n default=\"xdg-open\",\n )\n\n parser.add_argument(\n \"-l\",\n \"--locale\",\n help=\"\"\"force system locale, for example '-l es_ES.utf8'\"\"\",\n dest=\"locale\",\n default=\"\",\n )\n\n parser.add_argument(\n \"-c\",\n \"--read-cache\",\n dest=\"is_force_read_cache\",\n action=\"store_true\",\n help=\"\"\"force calendar to read old date from cache\"\"\"\n )\n\n parser.add_argument(\n \"-t\",\n \"--theme\",\n help=\"\"\"set calendar theme, default=classic_dark_extended (theme file name without extention)\"\"\",\n dest=\"theme\",\n default=False\n )\n\n parser.add_argument(\n \"-d\",\n \"--date\",\n help=\"\"\"display calendar at the given month, format='%%m-%%Y'\"\"\",\n dest=\"date\",\n default=False\n )\n\n args, unknown = parser.parse_known_args()\n unknown = unknown if len(unknown) == 0 else \"\".join(unknown).strip(' ')\n\n return args, unknown\n\n\n# get command line arguments and if exist : rofi output\nARGS, ROFI_OUTPUT = get_arguments()\n\n# Global var :\nEMPTY = -1\nROFI_RELOAD_TEMPO = 0.2\n\n######################\n### Path constants ###\n######################\nHOME = Path.home()\nDIRNAME = Path(__file__).parent.absolute()\n\n# cache files\nCACHE_PATH = HOME / \".cache/naivecalendar\"\nDATE_CACHE = CACHE_PATH / \"date_cache.ini\"\nPP_CACHE = CACHE_PATH / \"pretty_print_cache.txt\"\nTHEME_CACHE = CACHE_PATH / \"theme_cache.txt\"\nEVENT_CACHE = CACHE_PATH / \"event_cache.txt\"\n\n# config files\nCONFIG_PATH = HOME / \".config/naivecalendar\"\n\nTHEME_PATHS = {\n 'user' : CONFIG_PATH / \"themes\",\n 'rel' : DIRNAME / \"themes\"\n}\nSCRIPT_PATHS = {\n 'user' : CONFIG_PATH / \"scripts\",\n 'rel' : DIRNAME / \"scripts\"\n}\nEVENT_FILES = {\n 'user' : CONFIG_PATH / \"global/events.cfg\",\n 'rel' : DIRNAME / \"global/events.cfg\"\n}\nCUSTOM_ACTION_FILES = {\n 'user' : CONFIG_PATH / \"global/custom_actions.cfg\",\n 'rel' : DIRNAME / \"global/custom_actions.cfg\"\n}\n\n\n#######################################\n### load a theme configuration file ###\n#######################################\n\n# get wanted theme\ntheme = \"classic_dark_extended\"\nif ARGS.theme:\n theme = ARGS.theme\nelse:\n if THEME_CACHE.exists():\n with open(THEME_CACHE, 'r') as theme_cache:\n theme = theme_cache.read()\n\n# look for theme in config paths\nif (THEME_PATHS['user'] / f\"{theme}.cfg\").exists():\n theme_path = THEME_PATHS['user']\nelse:\n theme_path = THEME_PATHS['rel']\n\nTHEME_CONFIG_FILE = theme_path / f\"{theme}.cfg\"\nTHEME_RASI_FILE = theme_path / f\"{theme}.rasi\"\n\n\n########################\n### Load config file ###\n########################\n# -T-heme config\ncfg_t = configparser.ConfigParser(interpolation=None)\ncfg_t.read(THEME_CONFIG_FILE)\n\n# -E-vent config\ncfg_e = configparser.ConfigParser(interpolation=None)\nif EVENT_FILES['user'].exists():\n cfg_e.read(EVENT_FILES['user'])\nelif EVENT_FILES['rel'].exists():\n cfg_e.read(EVENT_FILES['rel'])\nelse:\n cfg_e['EVENTS'] = {'Notes' : '.naivecalendar_events/MyNotes/note_%Y-%m-%d.txt'}\n\n# custom -A-ction config\ncfg_a = configparser.ConfigParser(interpolation=None)\nif CUSTOM_ACTION_FILES['user'].exists():\n cfg_a.read(CUSTOM_ACTION_FILES['user'])\nelse:\n cfg_a.read(CUSTOM_ACTION_FILES['rel'])\n\n\n###########################\n### Get last event type ###\n###########################\ntry:\n with open(EVENT_CACHE, 'r') as event_cache:\n EVENTS_DEFAULT = event_cache.read()\n try :\n cfg_e['EVENTS'][EVENTS_DEFAULT]\n except KeyError:\n #print(f'no event \"{EVENTS_DEFAULT}\" found', file=sys.stderr)\n EVENTS_DEFAULT = ''\nexcept FileNotFoundError:\n #print(f'no event file \"{EVENT_CACHE}\" found', file=sys.stderr)\n EVENTS_DEFAULT = ''\n\n############################\n### Load user parameters ###\n############################\n\n# Some Functions\n################\n# Functions to parse list and int from configparser\ndef strip_list(lst):\n \"\"\"strip all element in a list\"\"\"\n return [x.strip() for x in lst]\n\ndef to_list(cfg_list):\n \"\"\"convert string with comma separated elements into python list\"\"\"\n # align all elements to right\n return [DAY_FORMAT.format(word) for word in cfg_list.split(',')]\n\ndef set_list(default, section, key, row):\n \"\"\"set, set default or desactivate given user config \"\"\"\n vals = section[key]\n if row == EMPTY: # don't display row\n return []\n elif vals == '': # use default vals\n return [DAY_FORMAT.format(s) for s in default]\n elif key == 'SYMS_DAYS_NUM':\n return to_list(vals)\n else: # parse config values\n return [CONTROL_MENU_ID[x.strip()] if x.strip() in CONTROL_MENU_ID.keys() else x for x in to_list(vals)]\n\n# def old_conf_file_compat(key):\n# dct = {\n# 'ROW_CONTROL_MENU' : 'ROW_BAR_1',\n# 'ROW_SHORTCUTS' : 'ROW_BAR_2',\n# 'SYMS_CONTROL_MENU' : 'SYMS_BAR_1',\n# 'SYMS_SHORTCUTS' : 'SYMS_BAR_2'\n# }\n#\n# return dct.setdefault(key, key)\n\ndef to_int(section, key):\n \"\"\"Convert a configparser entry into an int\"\"\"\n val = section[key]\n if val == '':\n val = EMPTY\n else:\n try:\n val = int(val)\n except ValueError as e:\n print(40*'*'+f\"\\nwarning : wrong value '{val}' for '{key}'.\\nShould be an interger or an empty value.\\n\"+40*'*', file=sys.stderr)\n raise e\n return val\n\ndef to_path(path_str, parent=HOME):\n \"\"\"make path relative to home or absolute\"\"\"\n\n path = Path(path_str)\n\n if path.is_absolute():\n return path\n else:\n return parent / path\n\n# week days symbols : can be changed by locale\ndef set_locale_n_week_day_names(arg_locale, user_locale, day_format, first_day_week, day_abbr_lenght):\n \"\"\" Set SYMS_WEEK_DAYS constante given command line argument \"\"\"\n\n if arg_locale: # locale overwrited by user\n locale.setlocale(locale.LC_ALL, arg_locale)\n else: # system locale\n locale.setlocale(locale.LC_ALL, user_locale)\n\n def get_loc_day(day_num, lenght):\n \"\"\"return locale day names truncated at lenght and titlized\"\"\"\n return locale.nl_langinfo(locale.DAY_1 + day_num)[:lenght].title()\n\n days_order = chain(range(first_day_week, 7), range(0, first_day_week))\n\n sym_week_days = [day_format.format(\n get_loc_day(day_num, day_abbr_lenght)\n ) for day_num in days_order]\n\n return sym_week_days\n\n# cfg_ture locate\n###################\nUSER_LOCALE = cfg_t['LOCALE'][\"USER_LOCALE\"] # use 'locale -a' on your system to list locales\n\n# Day names abbreviations\n#########################\nDAY_ABBR_LENGHT = int(cfg_t['DAY NAMES'][\"DAY_ABBR_LENGHT\"]) # ex : 3 => Mon\nDAY_FORMAT = '{:>' + str(max(DAY_ABBR_LENGHT,2)) + '}' # align symbols right\nFIRST_DAY_WEEK = int(cfg_t['DAY NAMES'][\"FIRST_DAY_WEEK\"]) # 0 = sunday, 1 = monday...\n\n# Day events configuration\n##########################\nEVENTS_PATHS = {n:to_path(cfg_e['EVENTS'][n]) for n in cfg_e['EVENTS']}\n# default date events folder to display\nEVENTS_DEFAULT = EVENTS_DEFAULT if EVENTS_DEFAULT != '' else next(EVENTS_PATHS.keys().__iter__()) #cfg['DEFAULT'].lower()\n\n# Rofi/Calendar shape\n#####################\nNB_COL = 7\nNB_WEEK = 6 # nb row of calendar \"days number\" part\n#NB_ROW = int(cfg_t['SHAPE']['NB_ROW'])\n\n# Calendar symbols and shortcuts\n################################\nSYM_NEXT_MONTH = to_list(cfg_t['CONTROL']['SYM_NEXT_MONTH'])\nSYM_NEXT_YEAR = to_list(cfg_t['CONTROL']['SYM_NEXT_YEAR'])\nSYM_PREV_MONTH = to_list(cfg_t['CONTROL']['SYM_PREV_MONTH'])\nSYM_PREV_YEAR = to_list(cfg_t['CONTROL']['SYM_PREV_YEAR'])\n\n# Shortcuts for popup windows\n#############################\nSYM_SHOW_EVENTS = to_list(cfg_t['SHORTCUTS']['SYM_SHOW_EVENTS'])\nSYM_SHOW_HELP = to_list(cfg_t['SHORTCUTS']['SYM_SHOW_HELP'])\nSYM_SWITCH_THEME = to_list(cfg_t['SHORTCUTS']['SYM_SWITCH_THEME'])\nSYM_SWITCH_EVENT = to_list(cfg_t['SHORTCUTS']['SYM_SWITCH_EVENT'])\nSYM_SHOW_MENU = to_list(cfg_t['SHORTCUTS']['SYM_SHOW_MENU'])\nSYM_GO_TODAY = to_list(cfg_t['SHORTCUTS']['SYM_GO_TODAY'])\n\n# Custom Functions\n##################\nCUSTOM_ACTIONS = {s:{'sym':to_list(cfg_a[s]['sym']), 'cmd':to_list(cfg_a[s]['cmd'])} for s in cfg_a.sections()}\n\n# Today header display\n######################\nPROMT_DATE_FORMAT = cfg_t['HEADER']['PROMT_DATE_FORMAT']\nIS_TODAY_HEAD_MSG = cfg_t.getboolean('HEADER', 'IS_TODAY_HEAD_MSG')\nIS_LOOP_TODAY_HEAD_MSG = cfg_t.getboolean('HEADER', 'IS_LOOP_TODAY_HEAD_MSG')\n\n# pango markup props\nTODAY_HEAD_MSG_TXT = cfg_t['HEADER']['TODAY_HEAD_MSG_TXT']\n\n# Calendar content and organisation\n###################################\n# row number where to display day symbols\nROW_DAY_NAMES = to_int(cfg_t['CONTENT'], 'ROW_DAY_NAMES')\n# symbols for week day names\n#_syms_week_days = to_list(cfg_t['CONTENT'][\"SYMS_WEEK_DAYS\"]) if not ROW_DAY_NAMES == EMPTY else []\nSYMS_WEEK_DAYS = set_locale_n_week_day_names(ARGS.locale, USER_LOCALE, DAY_FORMAT, FIRST_DAY_WEEK, DAY_ABBR_LENGHT)\n\n# row number where to display calendar first line\nROW_CAL_START = to_int(cfg_t['CONTENT'], 'ROW_CAL_START')\n# symbols for day numbers\n#default = (str(x) for x in range(1,32))\n#SYMS_DAYS_NUM= set_list(default, cfg_t['CONTENT'], 'SYMS_DAYS_NUM', ROW_CAL_START)\nSYMS_DAYS_NUM = [str(x) for x in range(1,32)]\n\n\nCONTROL_MENU_ID = {\n 'p' : SYM_PREV_MONTH[0],\n 'pp': SYM_PREV_YEAR[0],\n 'n' : SYM_NEXT_MONTH[0],\n 'nn': SYM_NEXT_YEAR[0],\n 'h' : SYM_SHOW_HELP[0],\n 't' : SYM_SWITCH_THEME[0],\n 'e' : SYM_SHOW_EVENTS[0],\n 's' : SYM_SWITCH_EVENT[0],\n 'm' : SYM_SHOW_MENU[0],\n 'bb': SYM_GO_TODAY[0],\n **{s:v['sym'][0] for s,v in CUSTOM_ACTIONS.items()}\n}\n\n# row number where to display buttons\nROW_BAR_1 = to_int(cfg_t['CONTENT'], 'ROW_BAR_1')\n# symbols for control menu row\ndefault = (s[0] for s in (SYM_PREV_YEAR, SYM_PREV_MONTH, ' ', SYM_SHOW_MENU, ' ', SYM_NEXT_MONTH, SYM_NEXT_YEAR))\nSYMS_BAR_1 = set_list(default, cfg_t['CONTENT'], 'SYMS_BAR_1', ROW_BAR_1)\n\n# row number where to display shortcuts buttons\nROW_BAR_2 = to_int(cfg_t['CONTENT'], 'ROW_BAR_2')\n# symbols to display in shortcuts row\ndefault = (s[0] for s in (SYM_SHOW_HELP, SYM_SWITCH_THEME, SYM_SHOW_EVENTS, SYM_SWITCH_EVENT, ' ', ' ', SYM_SHOW_MENU))\nSYMS_BAR_2 = set_list(default, cfg_t['CONTENT'], 'SYMS_BAR_2', ROW_BAR_2)\n\nNB_ROW = int(bool(SYMS_BAR_2)) + int(bool(SYMS_BAR_1)) + int(bool(SYMS_WEEK_DAYS)) + 6\n\n##############\n### Script ###\n##############\n\ndef main(args, rofi_output):\n \"\"\"Print calendar to stdout and react to rofi output\"\"\"\n\n # create event path n test rofi intall\n first_time_init()\n\n is_first_loop = not bool(rofi_output)\n if isinstance(rofi_output, str):\n out = DAY_FORMAT.format(rofi_output) # rofi strip blank character so reformat\n else:\n out = 'Nothing'\n\n cdate = CacheDate() # manage operation and writing to cache\n cdate = set_date(cdate, is_first_loop, args.is_force_read_cache, args.date)\n cdate, is_match = process_event_date(cdate, out, args)\n\n update_rofi(cdate.date, is_first_loop)\n cdate.write_cache()\n if not is_match: # don't test if out already match one condition in process_event_date\n process_event_popup(out, cdate)\n\n\ndef set_date(cdate, is_first_loop, is_force_read_cache, arg_date):\n \"\"\"set date given context\n\n (read cache, get today date or set date argument)\n\n Parameters\n ----------\n is_first_loop : bool\n true on first calendar call\n is_force_read_cache : bool\n force date from cache\n arg_date : str\n date in '%m%Y' format\n\n Returns\n -------\n CacheDate\n CacheDate object that contain the date to display\n \"\"\"\n\n if not is_first_loop or is_force_read_cache:\n cdate.read_cache() # read previous date\n elif is_first_loop and arg_date:\n cdate.set_month(arg_date) # command line force date\n else: # at first loop if no force option\n cdate.now()\n\n return cdate\n\n\ndef process_event_date(cdate, out, args):\n \"\"\"React to rofi output for \"date\" events\n\n Parameters\n ----------\n cdate : CacheDate\n current month\n out : str\n rofi output\n args : argparse.Namespace\n print, clipboard, format, editor arguments\n\n Returns\n -------\n CacheDate\n new month to display\n \"\"\"\n\n is_match = True\n out = out.strip()\n if out in strip_list(SYM_PREV_YEAR):\n cdate.year -= 1\n elif out in strip_list(SYM_PREV_MONTH):\n cdate.month -= 1\n elif out in strip_list(SYM_NEXT_MONTH):\n cdate.month += 1\n elif out in strip_list(SYM_NEXT_YEAR):\n cdate.year += 1\n elif out in strip_list(SYMS_DAYS_NUM):\n set_pp_date(out, cdate.date, args.format)\n if args.print or args.clipboard:\n sys.exit(0)\n else:\n open_event(out, cdate.date, args.editor)\n elif out in strip_list(SYM_GO_TODAY):\n cdate.now()\n else:\n is_match = False\n\n return cdate, is_match\n\n\ndef process_event_popup(out, cdate):\n \"\"\"React to rofi event hat open a popup window\n\n Parameters\n ----------\n out : str\n rofi output\n cdate : CacheDate\n current month\n \"\"\"\n\n out = out.strip()\n if out in strip_list(SYM_SHOW_EVENTS):\n show_events(cdate.date)\n elif out in strip_list(SYM_SHOW_HELP):\n display_help()\n elif out in strip_list(SYM_SWITCH_THEME):\n ask_theme()\n elif out in strip_list(SYM_SWITCH_EVENT):\n ask_event_to_display()\n elif out in strip_list(SYM_SHOW_MENU):\n show_menu(cdate)\n elif out in strip_list(SYM_GO_TODAY):\n cdate.now()\n cdate.write_cache()\n else:\n for sym_act, cmd_act in ((act['sym'], act['cmd']) for act in CUSTOM_ACTIONS.values()):\n if out in strip_list(sym_act):\n execute_external_cmd(cmd_act)\n break\n\n\ndef update_rofi(date, is_first_loop):\n \"\"\"generate and send calendar data to stdout/rofi\n\n It use the rofi `custom script mode `_ to communicate with rofi\n and `pango markup `_ for theming\n\n Parameters\n ----------\n date : datetime.date\n A day of the month to display\n is_first_loop : bool\n True on first loop, if true, update today highlights\n \"\"\"\n\n date_prompt = date.strftime(PROMT_DATE_FORMAT).title()\n print(f\"\\0prompt\\x1f{date_prompt}\\n\")\n\n events_inds = get_month_events_ind(date)\n print(f\"\\0urgent\\x1f{events_inds}\\n\")\n\n if is_first_loop or IS_LOOP_TODAY_HEAD_MSG:\n today_ind = cal2rofi_ind(date.day, date.month, date.year)\n print(f\"\\0active\\x1f{today_ind}\\n\")\n if IS_TODAY_HEAD_MSG:\n msg = date.strftime(TODAY_HEAD_MSG_TXT)\n print(f\"\\0message\\x1f{msg}\\n\")\n\n if not ROW_DAY_NAMES == EMPTY:\n week_sym_row = get_row_rofi_inds(ROW_DAY_NAMES)\n print(f\"\\0active\\x1f{week_sym_row}\\n\")\n\n if not ROW_BAR_1 == EMPTY:\n control_sym_row =get_row_rofi_inds(ROW_BAR_1)\n print(f\"\\0active\\x1f{control_sym_row}\\n\")\n\n if not ROW_BAR_2 == EMPTY:\n shortcut_sym_row = get_row_rofi_inds(ROW_BAR_2)\n print(f\"\\0active\\x1f{shortcut_sym_row}\\n\")\n\n cal = get_calendar_from_date(date)\n print(cal)\n\n\ndef get_calendar_from_date(date):\n r\"\"\"Return a montly calendar given date\n\n Calendar is a string formated to be shown by rofi (i.e. column bu column)::\n\n L M M J V S D\n 1\n 2 3 4 5 6 7 8\n date -> 9 10 11 12 13 14 15 -> 'L\\n \\n2\\n9\\n16\\n23\\n30\\n<\\nM\\n \\n3\\n10\\n17\\n24\\n...'\n 16 17 18 19 20 21 22\n 23 24 25 26 27 28 29\n 30\n\n Parameters\n ----------\n date : datetime.date\n Any day of the month to display\n\n Returns\n -------\n str\n A str that contain chained columns of a calendar in a rofi format\n\n \"\"\"\n\n start_day, month_length = calendar.monthrange(date.year, date.month)\n\n # init calendar with NB_WEEK blank week\n cal = [\" \"] * NB_WEEK * NB_COL\n\n # fill with day numbers\n ind_first_day = (start_day - (FIRST_DAY_WEEK - 1)) % 7\n ind_last_day = ind_first_day + month_length\n cal[ind_first_day : ind_last_day] = SYMS_DAYS_NUM[:month_length]\n\n # join calendar parts given user order\n index = (ROW_DAY_NAMES, ROW_CAL_START, ROW_BAR_1, ROW_BAR_2)\n content = [SYMS_WEEK_DAYS, cal, SYMS_BAR_1, SYMS_BAR_2]\n index, content = (list(x) for x in zip(*sorted(zip(index, content))))\n\n # transform\n cal = list(chain(*content)) # row-by-row list\n cal = list_transpose(cal) # col-by-col list\n cal = list2rofi(cal) # rofi formated\n\n return cal\n\n\ndef list_transpose(lst, col_nb=NB_COL):\n \"\"\"\n Transpose (math) a row by row list into column by column list\n given column number\n\n Parameters\n ----------\n lst : list\n row by row elements\n col_nb : int\n number of column to display\n\n Returns\n -------\n list\n A list that represent column by column elements\n\n Examples\n --------\n >>> my_list = [1,2,3,4,5,6]\n >>> list_transpose(my_list, col_nb=3)\n [1,4,2,5,3,6]\n\n \"\"\"\n\n # split into row\n iter_col = range(len(lst) // col_nb)\n row_list = [lst[i * col_nb : (i + 1) * col_nb] for i in iter_col]\n\n # transpose : take 1st element for each row, then 2nd...\n iter_row = range(len(row_list[0]))\n col_list = [[row[i] for row in row_list] for i in iter_row]\n\n # chain columns\n lst = list(chain(*col_list))\n\n return lst\n\n\ndef list2rofi(datas):\n \"\"\"\n Convert python list into a list formatted for rofi\n\n Parameters\n ----------\n datas : list\n elements stored in a list\n\n Returns\n -------\n str\n elements separated by line-breaks\n\n Examples\n --------\n\n >>> my_list = [1,2,3,4,5,6]\n >>> list2rofi(my_list]\n \"1\\\\n2\\\\n3\\\\n4\\\\n5\\\\n6\"\n \"\"\"\n\n return \"\\n\".join(datas)\n\n\ndef rofi2list(datas):\n \"\"\"\n Convert list formatted for rofi into python list object\n\n Parameters\n ----------\n datas : str\n a string with element separeted by line-breaks\n\n Returns\n -------\n list\n elements of datas in a list\n\n Examples\n --------\n\n >>> rofi_list = \"1\\\\n2\\\\n3\\\\n4\\\\n5\\\\n6\"\n >>> rofi2list\n [1,2,3,4,5,6]\n \"\"\"\n\n return datas.split(\"\\n\")\n\n\ndef parse_month_events_files(date):\n \"\"\"\n Return a list of file's first line of a specific month\n\n Parameters\n ----------\n date : datetime.date\n Any day of the month to display\n\n Returns\n -------\n str\n A rofi formatted list of month's events first line\n str\n Rows to highlight (date header)\n \"\"\"\n\n # paths\n events_paths = get_month_events(date)\n\n if not events_paths:\n return \"No events this month\", 0\n else:\n # first line\n heads = [parse_event_file(n) for n in events_paths]\n # file name\n prompts = [Path(n).stem for n in events_paths]\n # sort by file name (usually by date)\n prompts, heads = (list(x) for x in zip(*sorted(zip(prompts, heads))))\n\n prompts_pos = [0]\n for head in heads[:-1]:\n prompts_pos += [prompts_pos[-1] + len(head.split('\\n'))]\n prompts_pos = ','.join(str(x) for x in prompts_pos)\n\n # return : : for each event\n text = \"\\n\".join([f\"{p} : {h}\" for p, h in sorted(zip(prompts, heads))])\n\n return text, prompts_pos\n\n\ndef parse_event_file(event_path):\n \"\"\"Parse event file for compact display\n\n **Event format:**\n\n - Section ::\n\n [9H30] rdv with truc <---- will be displayed\n Some text\n Some text again\n [14H30] rdv with muche <----- will be displayed\n Some text again again\n\n - header ::\n\n # Note Title <---- only first line is displayed\n Some text\n Some text again...\n\n Parameters\n ----------\n event_path : str\n A text file path\n\n Returns\n -------\n str\n Parsed lines\n \"\"\"\n\n with open(event_path, \"r\") as f:\n note_txt = f.read()\n\n # get lines with [section]\n head = list(re.findall('\\[.*\\].*', note_txt))\n\n if head: # if sections\n return '\\n' + '\\n'.join(head) # join them into multilines\n else: # otherwise\n return '\\n' + note_txt.split(\"\\n\")[0] # get first line\n\n\ndef get_row_rofi_inds(row):\n \"\"\"Get all rofi index of a row\n\n Parameters\n ----------\n row : int\n row number (start at 0)\n\n Returns\n -------\n str\n a ',' separate list of rofi indexes\n \"\"\"\n\n return \",\".join(str(i * NB_ROW + row) for i in range(NB_COL))\n\n\n\ndef cal2rofi_ind(day, month, year):\n \"\"\"\n Convert calendar date into coordinates for rofi\n\n Parameters\n ----------\n day : int\n A day number (1-31)\n month : int\n A month number (1-12)\n year : int\n A year number\n\n Returns\n -------\n int\n A rofi index\n \"\"\"\n\n # day number area offset in calendar\n cal_offset = NB_COL * ROW_CAL_START\n\n # offset due to first month day\n start_day, _ = calendar.monthrange(year, month)\n # and correct by day starting the week\n ind_start_day = (start_day - (FIRST_DAY_WEEK - 1)) % 7\n\n # make month start at 0\n day = int(day) - 1\n\n # row-by-row index\n ind_r = cal_offset + day + ind_start_day\n # calendar coordinate\n row, col = ind_r // NB_COL, ind_r % NB_COL\n # rofi coordinate (column-by-column index)\n ind_c = col * NB_ROW + row\n\n return ind_c\n\n\ndef get_month_events(date):\n \"\"\"\n Return events files paths that are attached to date's month\n\n Parameters\n ----------\n date : datetime.date\n Any day of the month displayed\n\n Returns\n -------\n list\n list of files that belong to date.month\n \"\"\"\n\n # folder of the actual watched events\n path = EVENTS_PATHS[EVENTS_DEFAULT]\n\n # transform all directive '< montth' into regex\n # \"%a-%d-%b-%m-%Y\" --> \"[a-zA-Z.]*-[0-9]*-%b-%m-%Y\"\n file_pattern = re.sub('%-{0,1}[dwjhHIMSfzZ]', '[0-9]*', str(path))\n file_pattern = re.sub('%[aAp]', '[a-zA-Z.]*', file_pattern)\n\n # format all others directives (>= month) with date\n # \"[a-zA-Z.]*-[0-9]*-%b-%m-%Y\" --> \"[a-zA-Z.]*-[0-9]*-Jan.-01-2021\"\n file_pattern = date.strftime(file_pattern) #f\"{date.year}-{date.month}-\"\n\n # return all elements that belong to current month (match previous regex)\n path = Path(file_pattern)\n events_paths = list(Path(path.parent).glob(path.name))\n\n return events_paths\n\n\ndef get_month_events_ind(date):\n \"\"\"\n Return rofi-formatted index of days with attached event\n\n Parameters\n ----------\n date : datetime.date\n Any day of the month displayed\n\n Returns\n -------\n str\n Column index list formatted for rofi\n \"\"\"\n\n # get file list\n events_paths = get_month_events(date)\n # event name\n date_format = EVENTS_PATHS[EVENTS_DEFAULT].name\n # make capture group for day number (%d)\n pattern = re.sub('%d',r'([0-9]*)', date_format)\n # create pattern for directives < month\n pattern = re.sub('%-{0,1}[dwjhHIMSfzZ]',r'[0-9]*', pattern)\n pattern = re.sub('%[aAp]',r'[a-zA-Z.]*', pattern)\n # replace other (>= month) with real date\n pattern = date.strftime(pattern)\n # match the day (%d) capture group for each event in events_paths\n days = [re.match(pattern, f.name).group(1) for f in events_paths]\n # transform into rofi index\n inds = [cal2rofi_ind(int(d), date.month, date.year) for d in days]\n # format into rofi command\n inds = \",\".join([str(i) for i in inds])\n\n return inds\n\n# Count recursive call from open_n_reload_rofi\n# and prevent relaunching rofi if it's already planned\nROFI_RELAUNCH_COUNT = 0\n\ndef open_n_reload_rofi(func):\n \"\"\" decorator to open and reload the rofi script at the same date\"\"\"\n\n script_path = DIRNAME# os.path.abspath(os.path.dirname(sys.argv[0]))\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n global ROFI_RELAUNCH_COUNT\n\n ROFI_RELAUNCH_COUNT += 1\n subprocess.Popen([\"pkill\", \"-9\", \"rofi\"])\n time.sleep(ROFI_RELOAD_TEMPO)\n\n out = func(*args)\n\n ROFI_RELAUNCH_COUNT -= 1\n if ROFI_RELAUNCH_COUNT == 0:\n time.sleep(ROFI_RELOAD_TEMPO)\n #cmd_args = ' '.join(sys.argv[1:-1])\n cmd_args = sys.argv[1:-1] # 1 = command name, -1 = rofi outpub\n cmd = (str(DIRNAME / \"naivecalendar.sh\"), '-c', *cmd_args)\n #os.system(cmd)\n subprocess.Popen(cmd)\n\n return out\n\n return wrapper\n\n\n@open_n_reload_rofi\ndef show_events(date):\n \"\"\"open rofi popup with events list of selected month\n\n Parameters\n ----------\n date : datetime.date\n current month\n \"\"\"\n\n # Show month events\n parsed_events, prompts_pos = parse_month_events_files(date)\n output = rofi_popup(EVENTS_DEFAULT, parsed_events, highlights=prompts_pos, nb_lines=10)\n\n # open event file of selected day\n event= EVENTS_PATHS[EVENTS_DEFAULT]\n\n event_folder = date.strftime(str(event.parent))\n event_name = output.split(':')[0].strip()\n event_ext = event.suffix\n\n event_path = f'{event_folder}/{event_name}{event_ext}'\n\n if os.path.isfile(event_path):\n edit_event_file(event_path)\n\n\n@open_n_reload_rofi\ndef show_menu(cdate):\n \"\"\"open popup menu\n\n (list .cfg SHORTCUTS section entries)\"\"\"\n\n menu = '\\n'.join([to_list(cfg_t['SHORTCUTS'][s])[-1] for s in cfg_t['SHORTCUTS']])\n menu += '\\n' + '\\n'.join([act['sym'][-1] for act in CUSTOM_ACTIONS.values()])\n output = rofi_popup(\"menu\", menu, nb_lines=7, width='20em')\n process_event_popup(output, cdate)\n\n\n#@open_n_reload_rofi\ndef open_event(day_sym, date, editor):\n \"\"\"open event with editor for the selected date\"\"\"\n\n day_ind = strip_list(SYMS_DAYS_NUM).index(day_sym) +1\n\n date_format = str(EVENTS_PATHS[EVENTS_DEFAULT])\n event_path = datetime.date(date.year, date.month, day_ind).strftime(date_format)\n\n edit_event_file(event_path, editor)\n\n\n@open_n_reload_rofi\ndef edit_event_file(event_path, editor=ARGS.editor):\n \"\"\"open event file with text editor\"\"\"\n\n event_folder = Path(event_path).parent\n if not os.path.isdir(event_folder):\n os.makedirs(event_folder)\n Path(event_path).touch()\n cmd = (*editor.split(' '), event_path)\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n sdtout, sdterr = p.communicate()\n\n\n@open_n_reload_rofi\ndef ask_event_to_display():\n \"\"\"Popup that show all events type\"\"\"\n\n events = list(EVENTS_PATHS.keys())\n events = list2rofi(events)\n\n event = rofi_popup(f\"select what to display (actual = {EVENTS_DEFAULT})\", events, nb_lines=6)\n\n set_event_cache(event)\n\n\n@open_n_reload_rofi\ndef ask_theme():\n \"\"\"Search themes in paths and open a popup\"\"\"\n\n themes = list(chain(*[glob.glob(f'{path}/*.rasi') for path in THEME_PATHS.values()]))\n themes = (t.split('/')[-1].split('.')[0]for t in themes)\n themes = list2rofi(sorted(set(themes)))\n #themes = '\\n'.join((t.split('/')[-1] for t in themes))\n\n theme = rofi_popup(\"select theme\", themes, nb_col=3, nb_lines=9, width='45em')\n if theme in themes:\n set_theme_cache(theme)\n else :\n print(\"this is not a valid theme\", file=sys.stderr)\n\n@open_n_reload_rofi\ndef execute_external_cmd(cmd):\n \"\"\"Execute an external system command\n try to find command in different directories:\n\n - in $HOME/.config/naivecalendar/scripts/, then in\n - in ./scripts/, then\n - in system path\n \"\"\"\n cmd_path = Path(cmd[0])\n\n if (SCRIPT_PATHS['user'] / cmd_path).exists():\n cmd = [str(SCRIPT_PATHS['user'] / cmd_path)] + cmd[1:]\n elif (SCRIPT_PATHS['rel'] / cmd_path).exists():\n cmd = [str(SCRIPT_PATHS['rel'] / cmd_path)] + cmd[1:]\n\n subprocess.Popen(cmd)\n\ndef set_pp_date(day, date, f):\n \"\"\"write date to cache with command line specified format\"\"\"\n\n d = int(day)\n m = date.month\n y = date.year\n\n pretty_date = datetime.date(y, m, d).strftime(f)\n with open(PP_CACHE, \"w\") as f:\n f.write(pretty_date + \"\\n\")\n\n\n@open_n_reload_rofi\ndef send2clipboard(day, date, f):\n \"\"\"return select date to stdout given cmd line parameter '--format'\"\"\"\n\n if shutil.which(\"xclip\") == None:\n print(\"\\nplease install xclip to use 'copy-to-clipboard' option (-x/--clipboard)\\n\", file=sys.stderr)\n sys.exit(0)\n\n d = int(day)\n m = date.month\n y = date.year\n\n pretty_date = datetime.date(y, m, d).strftime(f)\n p = subprocess.Popen(('echo', pretty_date), stdout=subprocess.PIPE)\n subprocess.check_output(('xclip', '-selection', 'clipboard'), stdin=p.stdout)\n\n sys.exit(0)\n\n\ndef first_time_init():\n \"\"\"Create config files and paths given script head variables\"\"\"\n\n if shutil.which(\"rofi\") == None:\n print(\"please install rofi\")\n sys.exit()\n\n if not os.path.exists(THEME_PATHS['user']):\n os.makedirs(THEME_PATHS['user'])\n\n if not os.path.exists(SCRIPT_PATHS['user']):\n os.makedirs(SCRIPT_PATHS['user'])\n\n for events_path in EVENTS_PATHS.values():\n if not os.path.exists(events_path.parent):\n os.makedirs(events_path.parent)\n\n if not os.path.exists(CACHE_PATH):\n os.mkdir(CACHE_PATH)\n date = datetime.date.today()\n date_buff = configparser.ConfigParser()\n date_buff[\"buffer\"] = {\"year\": date.year, \"month\": date.month}\n with open(DATE_CACHE, 'w') as date_cache:\n date_buff.write(date_cache)\n display_help(head_txt=\"Welcome to naivecalendar\")\n\n\nclass CacheDate:\n \"\"\"Class to store date\n Make easier reading and writing to date cache file\n Make easier operation on date\n\n Attributes\n ----------\n\n year : Year\n month: Month\n\n \"\"\"\n\n def __init__(self):\n\n self.now()\n self._cache = configparser.ConfigParser()\n self.year = Year(self)\n self.month = Month(self)\n\n def now(self):\n \"\"\"Set and return today date\"\"\"\n self.date = datetime.datetime.now()\n return self.date\n\n def set_month(self, month):\n \"\"\"Set and return date of the given Month\n\n Parameters\n ----------\n month : str\n month to set in '%m-%Y' format\n\n Returns\n -------\n datetime.date\n a day of the month\n \"\"\"\n\n m, y = [int(x) for x in month.split('-')]\n self.date = datetime.date(y,m,1)\n\n return self.date\n\n def read_cache(self):\n \"\"\"load cache ini file\"\"\"\n\n self._cache.read(DATE_CACHE)\n day = 1\n month = int(self._cache[\"buffer\"][\"month\"])\n year = int(self._cache[\"buffer\"][\"year\"])\n\n self.date = datetime.date(year, month, day)\n\n def write_cache(self):\n \"\"\"write date to ini cache file\"\"\"\n\n date = self.date\n self._cache[\"buffer\"] = {\"year\": date.year, \"month\": date.month}\n with open(DATE_CACHE, \"w\") as buff:\n self._cache.write(buff)\n\n\nclass Year:\n \"\"\"Make computation on date years\"\"\"\n def __init__(self, outer):\n self.outer = outer\n\n def __repr__(self):\n return f\"Year({self.outer.date.year})\"\n\n def __add__(self, years):\n \"\"\"\n Increment or decrement date by a number of years\n\n Parameters\n ----------\n sourcedate : datetime.date\n CacheDate to Increment\n months : int\n number of years to add\n\n Returns\n -------\n datetime.date\n Incremented date\n \"\"\"\n\n year = self.outer.date.year + years\n month = self.outer.date.month\n day = min(self.outer.date.day, calendar.monthrange(year, month)[1])\n self.outer.date = datetime.date(year, month, day)\n\n def __sub__(self, years):\n self.__add__(-years)\n\n\nclass Month:\n \"\"\"Make computation on date months\"\"\"\n def __init__(self, outer):\n self.outer = outer\n\n def __repr__(self):\n return f\"Month({self.outer.date.month})\"\n\n def __add__(self, months):\n \"\"\"\n Increment or decrement date by a number of month\n\n Parameters\n ----------\n sourcedate : datetime.date\n CacheDate to Increment\n months : int\n number of month to add\n\n Returns\n -------\n datetime.date\n Incremented date\n \"\"\"\n\n month = self.outer.date.month - 1 + months\n year = self.outer.date.year + month // 12\n month = month % 12 + 1\n day = min(self.outer.date.day, calendar.monthrange(year, month)[1])\n\n self.outer.date = datetime.date(year, month, day)\n # return datetime.date(year, month, day)\n\n def __sub__(self, months):\n self.__add__(-months)\n\n\ndef joke(sym):\n \"\"\"Just display stupid jokes in french\"\"\"\n\n if sym == DAY_FORMAT.format(\"\"):\n print(\n \"Vous glissez entre les mois, vous perdez la notion du temps.\",\n file=sys.stderr,\n )\n elif sym in SYMS_WEEK_DAYS:\n print(\"Ceci n'est pas un jour! R.Magritte.\", file=sys.stderr)\n\n\ndef set_theme_cache(selected):\n \"\"\"Write theme name to cache file\"\"\"\n\n with open(THEME_CACHE, 'w') as f:\n f.write(selected)\n\n\ndef set_event_cache(selected):\n \"\"\"Write theme name to cache file\"\"\"\n\n with open(EVENT_CACHE, 'w') as f:\n f.write(selected)\n\n\ndef rofi_popup(txt_head, txt_body, nb_lines=15, nb_col=1, width='40%', highlights=1000):\n \"\"\"Launch a rofi window\n\n Parameters\n ----------\n txt_body : str\n Text to display in rofi window\n txt_head : str\n Text to display in rofi prompt\n\n Returns\n -------\n str\n Rofi selected cell content\n \"\"\"\n\n cmd = subprocess.Popen(('echo', txt_body), stdout=subprocess.PIPE)\n\n theme_str = f'''\n @import \"{THEME_RASI_FILE}\"\n #window {{\n location: center;\n width: {width};\n }}\n #listview {{\n columns: {nb_col};\n lines: {nb_lines};\n witdh: {width};\n }}\n '''\n\n #rofi_cmd = f'''rofi -dmenu -theme-str '{theme_str}' -p \"{txt_head}\" -u {highlights}'''\n rofi_cmd = ('rofi', '-dmenu', '-theme-str', theme_str, '-p', txt_head, '-u', str(highlights))\n selection = (\n subprocess.check_output(rofi_cmd, stdin=cmd.stdout)\n .decode(\"utf-8\")\n .replace(\"\\n\", \"\")\n )\n\n return selection\n\n\n@open_n_reload_rofi\ndef display_help(head_txt=\"help:\"):\n \"\"\"Show a rofi popup with help message\"\"\"\n\n\n txt = f\"\"\"NaïveCalendar {__version__}\n\nUsage:\n - Use mouse or keyboard to interact with the calendar.\n - Hit bottom arrows to cycle through months.\n - Hit a day to create a linked event.\n(A day with attached event will appear yellow.)\n - Create multiple event type and with between them\n\nShortcuts (type it in rofi prompt) :\"\"\"\n\n txt += '\\n{:>20} : display this help'.format(','.join(SYM_SHOW_HELP[:-1]))\n txt += '\\n{:>20} : go to previous year'.format(','.join(SYM_PREV_YEAR))\n txt += '\\n{:>20} : go to previous month'.format(','.join(SYM_PREV_MONTH))\n txt += '\\n{:>20} : go to next month'.format(','.join(SYM_NEXT_MONTH))\n txt += '\\n{:>20} : go to next year'.format(','.join(SYM_NEXT_YEAR))\n txt += '\\n{:>20} : display events of the month (first line)'.format(','.join(SYM_SHOW_EVENTS[:-1]))\n txt += '\\n{:>20} : switch events folder to display'.format(','.join(SYM_SWITCH_EVENT[:-1]))\n txt += '\\n{:>20} : show theme selector'.format(','.join(SYM_SWITCH_THEME[:-1]))\n txt += '\\n{:>20} : display a selection menu (skip shortcuts)'.format(','.join(SYM_SHOW_MENU[:-1]))\n\n txt += f\"\"\"\\n\nCommand line option:\n\nsubcommands:\n update-themes Update a calendar parameter for all user themes at once\n add-event Add, modify, delete event in all user themes config at once\n configure Clone or open configuration files\n\noptional arguments:\n -h, --help\n -V, --version\n -v, --verbose\n -p, --print\n -x, --clipboard\n -f FORMAT, --format FORMAT\n -e EDITOR, --editor EDITOR\n -l LOCALE, --locale LOCALE\n -c, --read-cache\n -t THEME, --theme THEME\n -d DATE, --date DATE\n\nThat's all : press enter to continue...\n\"\"\"\n\n rofi_popup(\"Help\", txt, nb_lines=20, width='45em')\n\n\nif __name__ == \"__main__\":\n main(ARGS, ROFI_OUTPUT)\n\n #print(\"loop time =\", \"{:.2f}\".format(1000*(time.time() - START)), 'ms', file=sys.stderr)\n\n","repo_name":"Daguhh/naivecalendar","sub_path":"src/naivecalendar.py","file_name":"naivecalendar.py","file_ext":"py","file_size_in_byte":38383,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"94"}
+{"seq_id":"24298008565","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 6 14:28:42 2021\n\n@author: abdul\n\"\"\"\n\n# to handle datasets\nimport pandas as pd\nimport numpy as np\n\n# for plotting\nimport matplotlib.pyplot as plt\n\n# to save the model\nimport joblib\n\n# to build the model\nfrom sklearn.linear_model import Lasso\n\n# to evaluate the model\nfrom sklearn.metrics import mean_squared_error, r2_score\n\n# to visualise al the columns in the dataframe\npd.pandas.set_option('display.max_columns', None)\n\n\n# to find out how to create these datasets\n\nX_train = pd.read_csv('xtrain.csv')\nX_test = pd.read_csv('xtest.csv')\n\nX_train.head()\n\n\ny_train = pd.read_csv('ytrain.csv')\ny_test = pd.read_csv('ytest.csv')\n\ny_train.head()\n\n# load the pre-selected features\n# ==============================\nfeatures = pd.read_csv('selected_features.csv')\nfeatures = features['0'].to_list() \n\n# display final feature set\nfeatures\n\n# reduce the train and test set to the selected features\n\nX_train = X_train[features]\nX_test = X_test[features]\n\n\n\nlin_model = Lasso(alpha=0.001, random_state=0)\n\n# train the model\n\nlin_model.fit(X_train, y_train)\n\n# make predictions for train set\npred = lin_model.predict(X_train)\n\n# determine mse, rmse and r2\nprint('train mse: {}'.format(int(\n mean_squared_error(np.exp(y_train), np.exp(pred)))))\nprint('train rmse: {}'.format(int(\n mean_squared_error(np.exp(y_train), np.exp(pred), squared=False))))\nprint('train r2: {}'.format(\n r2_score(np.exp(y_train), np.exp(pred))))\nprint()\n\n# make predictions for test set\npred = lin_model.predict(X_test)\n\n# determine mse, rmse and r2\nprint('test mse: {}'.format(int(\n mean_squared_error(np.exp(y_test), np.exp(pred)))))\nprint('test rmse: {}'.format(int(\n mean_squared_error(np.exp(y_test), np.exp(pred), squared=False))))\nprint('test r2: {}'.format(\n r2_score(np.exp(y_test), np.exp(pred))))\nprint()\n\nprint('Average house price: ', int(np.exp(y_train).median()))\n\n\n# let's evaluate our predictions respect to the real sale price\nplt.scatter(y_test, lin_model.predict(X_test))\nplt.xlabel('True House Price')\nplt.ylabel('Predicted House Price')\nplt.title('Evaluation of Lasso Predictions')\n\ny_test.reset_index(drop=True)\n\n# they should be fairly normally distributed\ny_test.reset_index(drop=True, inplace=True)\n\npreds = pd.Series(lin_model.predict(X_test))\n\npreds\n\n# they should be fairly normally distributed\nerrors = y_test['SalePrice'] - preds\nerrors.hist(bins=30)\nplt.show()\n\n\n# Finally, just for fun, let's look at the feature importance\nimportance = pd.Series(np.abs(lin_model.coef_.ravel()))\nimportance.index = features\nimportance.sort_values(inplace=True, ascending=False)\nimportance.plot.bar(figsize=(18,6))\nplt.ylabel('Lasso Coefficients')\nplt.title('Feature Importance')\n\n\n\n# we save Model\n# to score new data\n\njoblib.dump(lin_model, 'linear_regression.joblib') \n\n\n\n\n\n\n\n\n","repo_name":"2ahmedabdullah/Advanced-Linear-Regression-Project","sub_path":"4_model_training.py","file_name":"4_model_training.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"37486968332","text":"from django.db import models\nfrom wagtail.search import index\nfrom nsra.base.validators import phone_validator\nfrom wagtail.core.models import Page, Orderable\nfrom django.core.exceptions import ValidationError\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom wagtail.core.fields import RichTextField, StreamField\nfrom modelcluster.fields import ParentalKey, ParentalManyToManyField\nfrom wagtail.contrib.forms.models import AbstractFormField, AbstractEmailForm, AbstractForm\nfrom nsra.base.blocks import BaseStreamBlock, ParagraphStreamBlock, ImageBlock\nfrom wagtail.contrib.settings.models import BaseSetting, register_setting\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom wagtail.admin.edit_handlers import TabbedInterface, ObjectList\nfrom wagtail.snippets.edit_handlers import SnippetChooserPanel\nfrom nsra.news_and_events.models import NewsEventsIndexPage\nfrom wagtail.snippets.models import register_snippet\nfrom modelcluster.models import ClusterableModel\nfrom wagtail.core.blocks import StreamBlock\nfrom modelcluster.fields import ParentalKey\nfrom nsra.base.blocks import BaseStreamBlock\nfrom nsra.base.models import StandardPage\nfrom nsra.base.choices import COLORS\nfrom django import forms\nimport datetime\nfrom wagtail.admin.edit_handlers import (\n StreamFieldPanel,\n PageChooserPanel,\n MultiFieldPanel,\n FieldRowPanel,\n InlinePanel,\n FieldPanel,\n)\n\nfrom nsra.regional_profiles.models import RegionalProfilePage\n\nclass AboutUsPageCoreFunctionOrderable(Orderable):\n page = ParentalKey('about_us.AboutUsPage', on_delete=models.CASCADE, related_name='functions')\n function = models.ForeignKey('core_functions.CoreFunction', on_delete=models.CASCADE)\n\n panels = [\n SnippetChooserPanel('function'),\n ]\n\n\nclass AboutUsPageCarouselImages(Orderable):\n \"\"\"Between 1 and 5 images for the home page carousel.\"\"\"\n\n page = ParentalKey(\"about_us.AboutUsPage\", related_name=\"carousel_images\")\n carousel_image = models.ForeignKey(\n \"wagtailimages.Image\",\n null=True,\n blank=False,\n on_delete=models.SET_NULL,\n related_name=\"+\",\n )\n\n panels = [ImageChooserPanel(\"carousel_image\")]\n\nclass ExecutiveOrderable(Orderable):\n page = ParentalKey('about_us.AboutUsPage', on_delete=models.CASCADE, related_name='executives')\n executive = models.ForeignKey('executive.Executive', on_delete=models.CASCADE)\n\n panels = [\n SnippetChooserPanel('executive'),\n ]\n\nclass RelatedOrganizationOrderable(Orderable):\n page = ParentalKey('about_us.AboutUsPage', on_delete=models.CASCADE, related_name='related_organizations')\n organization = models.ForeignKey('base.RelatedOrganization', on_delete=models.CASCADE)\n\n panels = [\n SnippetChooserPanel('organization'),\n ]\n\n# REGIONAL PROFILES\n \nclass AboutUsPage(StandardPage):\n \n templates = \"about_us/about_us_page.html\"\n max_count = 1\n\n body = StreamField(\n [\n ('base', BaseStreamBlock()), # each block is stacked in template \n ('grid', StreamBlock( # each block is arranged in 2 grid system\n [ \n ('paragraph', ParagraphStreamBlock()), # each block is stacked in template \n ('image', ImageBlock()),\n ]\n ))\n ],\n null=True,\n blank=True\n )\n\n executive_panels = [\n InlinePanel('executives', min_num=0),\n ]\n\n organization_panels = [\n InlinePanel('related_organizations', min_num=0),\n ]\n\n # mission\n # add validation here for when there is title description required\n mission_title = models.CharField( \n max_length=1000,\n verbose_name='Section CTA link',\n help_text='mission title here',\n default='MISSION'\n )\n mission_sub_title = models.CharField(\n max_length=1000,\n verbose_name='mission sub title',\n help_text='mission subtitle here', null=True, blank=True\n )\n mission_description = models.TextField(null=True, blank=True)\n mission_icon = models.ForeignKey('wagtailimages.Image', on_delete=models.SET_NULL, related_name='+', null=True, blank=True)\n\n # vision\n\n vision_title = models.CharField(\n max_length=1000,\n verbose_name='vision title',\n help_text='vision title here',\n default='VISION'\n )\n vision_sub_title = models.CharField(\n max_length=1000,\n verbose_name='vision subtitle',\n help_text='vision subtitle here', null=True, blank=True\n )\n vision_description = models.TextField(null=True, blank=True)\n vision_icon = models.ForeignKey('wagtailimages.Image', on_delete=models.SET_NULL, related_name='+', null=True, blank=True)\n\n # mandate\n\n mandate_title = models.CharField(\n max_length=1000,\n verbose_name='mandate title',\n help_text='mandate title here',\n default='MANDATE'\n )\n mandate_sub_title = models.CharField(\n max_length=1000,\n verbose_name='mandate subtitle',\n help_text='mandate subtitle here', null=True, blank=True\n )\n mandate_description = models.TextField(null=True, blank=True)\n mandate_icon = models.ForeignKey('wagtailimages.Image', on_delete=models.SET_NULL, related_name='+', null=True, blank=True)\n \n\n # core functions\n functions_title = models.CharField(max_length=1000, null=True, blank=True, verbose_name='title',)\n functions_sub_title = models.CharField(max_length=1000, null=True, blank=True, verbose_name='Subtitle',)\n functions_description = models.TextField(null=True, blank=True, verbose_name='Description',)\n functions_cta_text = models.CharField(max_length=1000, null=True, blank=True, verbose_name='CTA text',)\n functions_cta_link = models.ForeignKey(\n 'core_functions.CoreFunctionIndexPage',\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n related_name='+',\n verbose_name='Core Function Index CTA link',\n help_text='Choose a core function index page to link to for the Call to Action(normally some index page)'\n )\n\n search_fields = StandardPage.search_fields + [\n index.SearchField('title'),\n index.SearchField('body'),\n ]\n\n standard_page_content_panels = [item for item in StandardPage.content_panels if not(isinstance(item, FieldPanel) and item.field_name=='body')]\n\n content_panels = [\n StreamFieldPanel('body'),\n ]\n\n core_function_panel = [\n MultiFieldPanel([\n FieldPanel('functions_title'),\n FieldPanel('functions_sub_title'),\n FieldPanel('functions_description'),\n FieldPanel('functions_cta_text'),\n FieldPanel('functions_cta_link'),\n ]),\n InlinePanel('functions', min_num=1),\n ]\n\n mvm_panels = [\n MultiFieldPanel([\n FieldPanel('mission_title'),\n FieldPanel('mission_sub_title'),\n FieldPanel('mission_description'),\n FieldPanel('mission_icon'),\n ], heading=\"mission\"),\n\n MultiFieldPanel([\n FieldPanel('vision_title'),\n FieldPanel('vision_sub_title'),\n FieldPanel('vision_description'),\n FieldPanel('vision_icon'),\n ], heading=\"vision\"),\n\n MultiFieldPanel([\n FieldPanel('mandate_title'),\n FieldPanel('mandate_sub_title'),\n FieldPanel('mandate_description'),\n FieldPanel('mandate_icon'),\n ], heading=\"mandate\"),\n ]\n\n carousel_panel = [\n MultiFieldPanel(\n [InlinePanel(\"carousel_images\", min_num=0, label=\"Image\")],\n heading=\"Carousel Images\",\n ),\n ]\n\n edit_handler = TabbedInterface([\n ObjectList(standard_page_content_panels, heading='page & hero'),\n ObjectList(carousel_panel, heading='carousel images'),\n ObjectList(content_panels, heading='body'), \n ObjectList(core_function_panel, heading='core functions'), \n ObjectList(executive_panels, heading='Executives'), \n ObjectList(organization_panels, heading='Related Organizations'), \n ObjectList(mvm_panels, heading='mvm'), \n ObjectList(StandardPage.promote_panels, heading='promote'),\n ObjectList(StandardPage.settings_panels, heading='settings'),\n ])\n\n def get_executives(self):\n return self.executives.filter(executive__featured=True).all()\n\n def get_related_organizations(self):\n return self.related_organizations.filter(organization__featured=True).all()\n \n def get_context(self, request):\n context = super(AboutUsPage, self).get_context(request)\n context['executives'] = self.get_executives()\n context['related_organizations'] = self.get_related_organizations()\n context['regional_profiles'] = self.get_children().type(RegionalProfilePage).live()\n return context","repo_name":"eliblurr/nrsa","sub_path":"nsra/about_us/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"15869966799","text":"from result import Result\n\nclass FemGrid(object):\n\n def __init__(self, elements, nodes):\n self.__elements=elements\n self.__nodes=nodes\n self.__kg=[[]]\n self.__fg=[]\n self.__result=Result()\n self.__dTau=0\n self.__tauArray=[]\n\n def getElement(self, i):\n return self.__elements[i]\n def getNode(self, i):\n return self.__nodes[i]\n\n def getKg(self):\n return self.__kg\n\n def getFg(self):\n return self.__fg\n\n def getNodesR(self):\n r=[]\n for node in self.__nodes:\n r.append(node.getR())\n return r\n\n def setTemperatures(self, temperatures):\n for n in self.__nodes:\n n.setTemp(temperatures.pop(0))\n\n def getTauArray(self):\n return self.__tauArray\n\n def setLocalMatrixAndVectors(self, globalData):\n for element in self.__elements:\n element.setLocalMatrixAndVector(globalData, self.__dTau)\n element.printLocalMatrixAndVector()\n\n def setGlobalMatrixAndVector(self, nh):\n self.__kg=[[0]* nh for i in range(0,nh)]\n self.__fg=[0 for i in range(0,nh)]\n\n for i in range(0, nh-1):\n ke=self.__elements[i].getKe()\n self.__kg[i][i] += ke[0][0]\n self.__kg[i][i+1] += ke[0][1]\n self.__kg[i+1][i] += ke[1][0]\n self.__kg[i+1][i+1] += ke[1][1]\n\n fe=self.__elements[i].getFe()\n self.__fg[i] += fe[0]\n self.__fg[i+1] += fe[1]\n\n def solveSystemOfEquatios(self):\n temperatures = self.__result.solveSystemOfEquation(self.__kg, self.__fg)\n return temperatures\n\n def simulateProcess(self, globalData):\n self.__dTau = globalData.getTauMax() / globalData.getNTime()\n tau = self.__dTau;\n\n while tau <= globalData.getTauMax():\n self.setLocalMatrixAndVectors(globalData)\n self.setGlobalMatrixAndVector(globalData.getNh())\n temperatures = self.solveSystemOfEquatios()\n self.setTemperatures(temperatures)\n self.__tauArray.append(tau)\n tau += self.__dTau\n\n return self.__result\n\n\n def printGlobalMatrixAndVector(self):\n print(\"Macierz globalna [K]: \\n\")\n for x in self.__kg:\n print(x)\n\n print(\"wektor globalny [F]: \\n\")\n\n for x in self.__fg:\n print(x)\n\n","repo_name":"karolskora1993/MES2","sub_path":"femgrid.py","file_name":"femgrid.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"3851985604","text":"from functools import partial\nfrom passivetotal.analyzer._common import (\n RecordList, Record, FirstLastSeen, PagedRecordList, ForPandas, AnalyzerError, AnalyzerAPIError,\n FilterDomains\n)\nfrom passivetotal.analyzer import get_api, get_config, get_object\n\n\n\nclass TrackerHistory(RecordList, PagedRecordList, ForPandas):\n\n \"\"\"Historical web component data.\"\"\"\n\n def _get_shallow_copy_fields(self):\n return ['_totalrecords','_query']\n \n def _get_sortable_fields(self):\n return ['firstseen','lastseen','category','label','hostname']\n \n def _get_dict_fields(self):\n return ['totalrecords']\n \n @property\n def as_dict(self):\n d = super().as_dict\n d.update({\n 'distinct_hostnames': [ str(host) for host in self.hostnames ],\n 'distinct_categories': list(self.categories),\n 'distinct_values': list(self.values)\n })\n return d\n \n def parse(self, api_response):\n \"\"\"Parse an API response.\"\"\"\n self._totalrecords = api_response.get('totalRecords', 0)\n self._records = []\n for result in api_response.get('results', []):\n self._records.append(TrackerRecord(result, self._query))\n\n @property\n def hostnames(self):\n \"\"\"List of unique hostnames in the tracker record list.\"\"\"\n return set(\n get_object(host) for host in set([record.hostname for record in self if record.hostname is not None])\n )\n \n @property\n def categories(self):\n \"\"\"List of unique categories (types) in the tracker record list.\"\"\"\n return set([record.category for record in self if record.category is not None])\n \n @property\n def values(self):\n \"\"\"List of unique tracker values in the tracker record list.\"\"\"\n return set([record.value for record in self if record.value is not None])\n\n\n\nclass TrackerRecord(Record, FirstLastSeen, ForPandas):\n\n \"\"\"Record of an observed trackers.\"\"\"\n\n def __init__(self, api_response, query=None):\n self._firstseen = api_response.get('firstSeen')\n self._lastseen = api_response.get('lastSeen')\n self._value = api_response.get('attributeValue')\n self._trackertype = api_response.get('attributeType')\n self._hostname = api_response.get('hostname')\n self._query = query\n \n def __str__(self):\n return '[{0.trackertype}] \"{0.value}\" ({0.firstseen_date} to {0.lastseen_date})'.format(self)\n \n def __repr__(self):\n return ''.format(self)\n \n def _get_dict_fields(self):\n return ['str:firstseen','str:lastseen','value','trackertype','hostname']\n \n def to_dataframe(self):\n \"\"\"Render this object as a Pandas DataFrame.\n\n :rtype: :class:`pandas.DataFrame`\n \"\"\"\n pd = self._get_pandas()\n cols = ['query','firstseen','lastseen','trackertype','value','hostname']\n as_d = {\n 'query': self._query,\n 'firstseen': self.firstseen,\n 'lastseen': self.lastseen,\n 'trackertype': self.trackertype,\n 'value': self.value,\n 'hostname': self.hostname,\n }\n return pd.DataFrame([as_d], columns=cols)\n\n @property\n def value(self):\n \"\"\"Value of the tracker.\"\"\"\n return self._value\n\n @property\n def hostname(self):\n \"\"\"Hostname the tracker was observed on.\"\"\"\n return self._hostname\n\n @property\n def trackertype(self):\n \"\"\"Type or category of web tracker.\"\"\"\n return self._trackertype\n \n @property\n def category(self):\n \"\"\"Category or type of web tracker; alias of `TrackerRecord.trackertype`.\"\"\"\n return self._trackertype\n \n @property\n def tracker(self):\n \"\"\"Tracker as a `Tracker` object to aid pivoting to other related IPs or hosts.\n \n :rtype: :class:`passivetotal.analyzer.trackers.Tracker`\n \"\"\"\n return Tracker(self.trackertype, self.value)\n\n\n\nclass TrackerSearchResults(RecordList, PagedRecordList, ForPandas, FilterDomains):\n\n \"\"\"Search results from a tracker query.\"\"\"\n\n def __init__(self, query=None, tracker_type=None, search_type=None):\n self._query = query\n self._tracker_type = tracker_type\n self._search_type = search_type\n self._records = []\n self._totalrecords = None\n self._pagination_current_page = 0\n self._pagination_page_size = 2000 # API is fixed at this page size\n self._pagination_has_more = True\n self._pagination_callable = partial(\n get_api('Trackers').search_trackers,\n value=self._query, \n tracker_type=self._tracker_type, \n result_type=self._search_type\n )\n\n def _get_shallow_copy_fields(self):\n return ['_totalrecords','_query', '_pagination_current_page','_pagination_page_size',\n '_pagination_callable','_pagination_has_more']\n \n def _get_sortable_fields(self):\n return ['firstseen','lastseen','searchtype','trackertype','query','host']\n \n def _get_dict_fields(self):\n return ['totalrecords']\n \n def _pagination_parse_page(self, api_response):\n self._totalrecords = api_response.get('totalRecords')\n results = api_response['results']\n self._records.extend([\n TrackerSearchRecord(r, self._query, self._tracker_type, self._search_type) for r in results\n ])\n \n @property\n def as_dict(self):\n d = super().as_dict\n return d\n \n @property\n def query(self):\n \"\"\"Query used to return this set of search results.\"\"\"\n return self._query\n \n @property\n def totalrecords(self):\n \"\"\"Total number of available records; may be greater than the number of results returned by the API.\"\"\"\n return self._totalrecords\n\n\n\nclass TrackerSearchRecord(Record, FirstLastSeen, ForPandas):\n\n \"\"\"Record representing a single search result in a tracker search.\"\"\"\n\n def __init__(self, api_response, query=None, tracker_type=None, search_type=None):\n self._firstseen = api_response.get('firstSeen')\n self._lastseen = api_response.get('lastSeen')\n self._query = query\n self._trackertype = tracker_type\n self._searchtype = search_type\n self._entity = api_response.get('entity',None)\n \n def __str__(self):\n return '[{0.trackertype}] @ \"{0.entity}\" ({0.firstseen_date} to {0.lastseen_date})'.format(self)\n \n def __repr__(self):\n return ' {0.entity}\">'.format(self)\n \n def _get_dict_fields(self):\n return ['str:firstseen','str:lastseen','query','str:host','trackertype','searchtype']\n \n def to_dataframe(self):\n \"\"\"Render this object as a Pandas DataFrame.\n\n :rtype: :class:`pandas.DataFrame`\n \"\"\"\n pd = self._get_pandas()\n cols = ['query','host','trackertype','firstseen','lastseen','searchtype']\n as_d = {\n 'query': self._query,\n 'host': self.host,\n 'trackertype': self.trackertype,\n 'firstseen': self.firstseen,\n 'lastseen': self.lastseen,\n 'searchtype': self.searchtype\n }\n return pd.DataFrame([as_d], columns=cols)\n\n @property\n def entity(self):\n \"\"\"Entity where a tracker was found - typically a hostname or an IP address.\n \n Returns the actual value returned by the API in the 'entity' response field.\n \"\"\"\n return self._entity\n \n @property\n def host(self):\n \"\"\"Host where a tracker was found.\n \n Returns either an `analyzer.Hostname` or `analyzer.IPAddress` object depending on\n the type of search which produced this record.\n \"\"\"\n if self._searchtype == 'addresses':\n return get_object(self.entity, type='IPAddress')\n elif self._searchtype == 'hosts' or self._searchtype is None:\n return get_object(self.entity, type='Hostname')\n else:\n return None\n \n @property\n def query(self):\n \"\"\"Query that produced this search result.\"\"\"\n return self._query\n \n @property\n def searchtype(self):\n \"\"\"Type of search (hostnames or IP addresses) that produced this search result.\n \n This value defines the type of records returned - either hostnames or IPs.\"\"\"\n return self._searchtype\n \n @property\n def trackertype(self):\n \"\"\"Type of tracker found on the entity (host) referenced in this search result.\"\"\"\n return self._trackertype\n\n @property\n def tracker(self):\n \"\"\"Tracker as a `Tracker` object to aid pivoting to other related IPs or hosts.\n \n :rtype: :class:`passivetotal.analyzer.trackers.Tracker`\n \"\"\"\n return Tracker(self.trackertype, self.value)\n\n\n\nclass Tracker:\n\n \"\"\"A web tracker with a type and value.\n \n In addition to a simple type/value mapping, this class also provides\n `ips` and `hostname` properties to find other entities that\n have the same type/value tuple.\n \"\"\"\n\n _instances = {}\n\n def __new__(cls, trackertype, value):\n valuehash = hash((trackertype, value))\n self = cls._instances.get(valuehash)\n if not self:\n self = cls._instances[valuehash] = object.__new__(cls)\n self._type = trackertype\n self._value = value\n self._ips = None\n self._hostnames = None\n return self\n \n def __str__(self):\n return '{0.trackertype}:{0.value}'.format(self)\n \n def __repr__(self):\n return ''.format(str(self))\n \n def _api_search(self, searchtype):\n attrs = {\n 'hosts': '_hostnames',\n 'addresses': '_ips'\n }\n results = TrackerSearchResults(self._value, self._type, searchtype)\n results.load_all_pages()\n setattr(self, attrs[searchtype], results)\n \n @property\n def trackertype(self):\n \"\"\"Type of tracker as defined by RiskIQ analysts.\"\"\"\n return self._type\n \n @property\n def value(self):\n \"\"\"Tracker value as observed.\"\"\"\n return self._value\n \n @property\n def observations_by_ip(self):\n \"\"\"IP addresses of hosts where this tracker was observed.\n \n :rtype: :class:`passivetotal.analyzer.trackers.TrackerSearchResults`\n \"\"\"\n if self._ips is None:\n self._api_search('addresses')\n return self._ips\n \n @property\n def observations_by_hostname(self):\n \"\"\"Hostnames of sites where this tracker was observed.\n \n :rtype: :class:`passivetotal.analyzer.trackers.TrackerSearchResults`\n \"\"\"\n if self._hostnames is None:\n self._api_search('hosts')\n return self._hostnames\n\n\n\nclass HasTrackers:\n\n \"\"\"An object with web tracker history.\"\"\"\n\n _REFERENCE_TRACKER_TYPES = {\n 'Hostname': ['DocumentBaseHost','HTTrackSourceHost','MarkOfTheWebSourceHost','SingleFileSourceHost'],\n 'IPAddress': ['DocumentBaseAddress','HTTrackSourceAddress','MarkOfTheWebSourceAddress','SingleFileSourceAddress']\n }\n\n def _api_get_trackers(self, start_date=None, end_date=None):\n \"\"\"Query the host attributes API for web tracker history.\n \n Only the first page of results is returned; pagination is not\n supported. Check the totalrecords attribute of the response object\n to determine if more records are available.\n \"\"\"\n query=self.get_host_identifier()\n response = get_api('HostAttributes').get_trackers(\n query=query,\n start=start_date,\n end=end_date\n )\n self._trackers = TrackerHistory(response, query)\n return self._trackers\n \n def _api_get_tracker_references(self):\n \"\"\"Query the host attributes API and search trackers for multiple trackertypes and searchtypes.\"\"\"\n self._tracker_references = TrackerSearchResults(query=self.get_host_identifier())\n tracker_types = self._REFERENCE_TRACKER_TYPES.get('Hostname' if self.is_hostname else 'IPAddress')\n for trackertype in tracker_types:\n for searchtype in ['addresses','hosts']:\n try:\n result = get_api('HostAttributes').search_trackers_by_type(\n query=self.get_host_identifier(),\n type=trackertype,\n searchType=searchtype\n )\n self._tracker_references.parse(result, trackertype, searchtype)\n except AnalyzerAPIError as e:\n if e.status_code == 404:\n continue\n raise e\n return self._tracker_references\n\n @property\n def trackers(self):\n \"\"\"History of trackers observed on this host.\n\n Trackers are analytics codes, social network accounts, and other unique\n details extracted from the web page by RiskIQ crawlers based on detection\n logic programmed by RiskIQ analysts.\n\n :rtype: :class:`passivetotal.analyzer.trackers.TrackerHistory`\n \"\"\"\n if getattr(self, '_trackers', None) is not None:\n return self._trackers\n config = get_config()\n return self._api_get_trackers(\n start_date=config['start_date'],\n end_date=config['end_date']\n )\n \n @property\n def tracker_references(self):\n \"\"\"Hosts with trackers that have this host as the value.\n \n Performs several API queries to create a composite result; create an instance of\n :class:`passivetotal.analyzer.Tracker` if you need more granular control.\n\n :rtype: :class:`passivetotal.analyzer.trackers.TrackerSearchResults`\n \"\"\"\n if getattr(self, '_tracker_references', None) is not None:\n return self._tracker_references\n return self._api_get_tracker_references()","repo_name":"passivetotal/python_api","sub_path":"passivetotal/analyzer/trackers.py","file_name":"trackers.py","file_ext":"py","file_size_in_byte":14036,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"94"}
+{"seq_id":"28294409674","text":"def check(a, b, c):\n if a**2 + b**2 == c**2:\n return True\n else:\n return False\n\n\nfor a in range(1, 1000):\n for b in range(a, 1000 - a):\n c = 1000 - a - b\n if check(a, b, c):\n print(a*b*c)\n","repo_name":"lraczyn/Project-Euler","sub_path":"009.py","file_name":"009.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"35580906908","text":"from os import environ, stat\nfrom os.path import exists, join, abspath\nfrom sys import stdout\nimport re\nfrom base64 import b64decode, b64encode\nfrom json import dump, load\nimport SCRAM\nfrom SCRAM.BuildSystem.ToolManager import ToolManager\n\n\nRUNTIME_SHELLS = {'-sh': 'BOURNE', '-csh': 'TCSH', '-win': 'CYGWIN'}\nORIG_SCRAM_ARCH = ''\ntry:\n ORIG_SCRAM_ARCH = environ['SCRAM_ARCH']\nexcept:\n pass\n\nclass RuntimeEnv(object):\n def __init__(self, area):\n self.recursive = True if 'SCRAM_RTBOURNE_SET' in environ else False\n self.optional_paths = {}\n self.area = area\n for e in [i for i in environ.keys() if i.startswith('SCRAMV3_BACKUP_')] :\n environ[e[15:]] = environ[e]\n del environ[e]\n self.OENV = environ.copy()\n self.OENV['SCRAM_ARCH'] = ORIG_SCRAM_ARCH\n self.env_backup_prefix = 'SRT_'\n self.skip_env = re.compile('^(_|PWD|PROMPT_COMMAND|SCRAM_.+|SCRAMV1_.+|SCRAM|LOCALTOP|RELEASETOP|BASE_PATH)$')\n self.shell = {}\n self.shell['BOURNE'] = {'EQUALS': '=', 'SEP': ':', 'EXPORT': 'export', 'UNEXPORT': 'unset'}\n self.shell['TCSH'] = {'EQUALS': ' ', 'SEP': ':', 'EXPORT': 'setenv', 'UNEXPORT': 'unsetenv'}\n self.shell['CYGWIN'] = {'EQUALS': '=', 'SEP': ';', 'EXPORT': 'unset', 'UNEXPORT': 'set'}\n self.shell['RTBOURNE'] = self.shell['BOURNE']\n self.env = {'variables': {}, 'paths': {}}\n self.force_tools_env = {}\n self.skip_runtime = {}\n self._unsetenv = False\n self.ignore_env = {}\n self._read_ignore_env()\n return\n\n def runtimebuildenv(self):\n save_env = {}\n environ[\"SCRAM_RUNTIME_TYPE\"]=\"BUILD\"\n for k in ['LD_PRELOAD']:\n if k in environ:\n save_env[k] = environ[k]\n del environ[k]\n self.save('RTBOURNE')\n for k, v in save_env.items():\n if k in self.ignore_env: continue\n environ[k] = v\n self.setenv(\"RTBOURNE\")\n if 'rtstring' in self.env:\n if 'RTBOURNE' in self.env['rtstring']:\n for e in self.env['rtstring']['RTBOURNE']:\n if e in self.ignore_env: continue\n environ[e] = self.env['rtstring']['RTBOURNE'][e]\n return environ\n\n def _fixpathvar(self, var, sep):\n if (var in environ) and (environ[var] != ''):\n return '%s%s' % (sep, environ[var])\n return ''\n\n def _fixlibenv(self, var):\n if environ['SCRAM_ARCH'].startswith('osx') and var == 'LD_LIBRARY_PATH':\n var = 'DYLD_FALLBACK_LIBRARY_PATH'\n return var\n\n def setenv(self, shell, ostream=None):\n if self.recursive:\n return\n if not ostream:\n ostream = stdout\n shell_data = self.shell[shell]\n sep = shell_data['SEP']\n udata = {}\n data = []\n if not self._unsetenv:\n env_prefix = self.env_backup_prefix\n env = self._runtime()\n for d in env['variables']:\n for var, val in d.items():\n udata[var] = 1\n data.append({var: val[0]})\n for var in env['path']:\n if '_SRTOPT_' in var:\n continue\n udata[var] = 1\n benv = '%s%s%s' % (env_prefix, var, self.backup_type[var])\n val = self._fixpathvar(var, sep)\n if benv in environ:\n val = environ[benv] + val\n data.append({var: val})\n if shell == 'RTBOURNE':\n data.append({'SCRAM_RTBOURNE_SET': environ['SCRAMRT_SET']})\n for var, val in env['xenv'].items():\n udata[var] = 1\n data.append({var: val})\n for var, val in environ.items():\n if var not in udata:\n data.insert(0, {var: val})\n udata[var] = 1\n oenv = self.OENV\n unset = \"\"\n unset_vars = \"\"\n for v in oenv:\n if v in udata:\n continue\n if v in environ:\n del environ[v]\n if shell == 'RTBOURNE':\n continue\n unset += \" %s\" % v\n if not v.startswith('SCRAMRT_') and \\\n not v.endswith('_SCRAMRT') and \\\n not v.endswith('_SCRAMRTDEL'):\n unset_vars += \" %s\\n\" % v\n if unset:\n if unset_vars and not self._unsetenv:\n SCRAM.printerror(\"**** Following environment variables are going to be unset.\\n%s\" % unset_vars)\n print(\"%s %s;\" % (shell_data['UNEXPORT'], unset), file=ostream)\n for d in data:\n for var, val in d.items():\n if var in self.ignore_env: continue\n environ[var] = val\n if shell == 'RTBOURNE': continue\n if var != 'PATH' and var in oenv:\n if val == oenv[var]:\n continue\n print('%s %s%s\\\"%s\\\";' % (shell_data['EXPORT'], var,\n shell_data['EQUALS'], val), file=stdout)\n return True\n\n def save(self, shell, ostream=None):\n if self.recursive:\n return\n if not ostream:\n ostream = stdout\n if 'SCRAMRT_SET' in environ:\n self._restore_environment(shell)\n env_prefix = self.env_backup_prefix\n env = self._runtime()\n data = []\n sep = self.shell[shell]['SEP']\n backup_vars = \"\"\n for h in env['variables']:\n for (name, value) in h.items():\n if name in self.ignore_env: continue\n btype = '_SCRAMRT'\n if name not in environ:\n btype += 'DEL'\n else:\n backup_vars += \"%s=%s;\" % (name, environ[name])\n data.append({'%s%s%s' % (env_prefix, name, btype): value[0]})\n if backup_vars:\n backup_vars = backup_vars.strip(';')\n data.append({'SCRAMRT_BACKUP_ENV': b64encode(backup_vars.encode('utf-8')).decode('utf-8')})\n self.backup_type = {}\n opt = {}\n regexp = re.compile('^(.+?)_SRTOPT_(.+)$')\n for (name, value) in env['path'].items():\n m = regexp.match(name)\n if m:\n if m.group(2) in self.ignore_env: continue\n if m.group(1) in self.optional_paths:\n if not m.group(2) in opt:\n opt[m.group(2)] = {}\n opt[m.group(2)][m.group(1)] = 1\n continue\n btype = '_SCRAMRT'\n if name not in environ:\n btype += 'DEL'\n data.append({'%s%s%s' % (env_prefix, name, btype): self._cleanpath(sep.join(value), sep)})\n self.backup_type[name] = btype\n for v in opt:\n btype = ''\n nbtype = ''\n if v in self.backup_type:\n btype = self.backup_type[v]\n nbtype = btype\n else:\n nbtype = '_SCRAMRT'\n if v in environ:\n nbtype += 'DEL'\n for t in opt[v]:\n xindex = len(data)\n pval = ''\n if btype:\n k = '%s%s%s' % (env_prefix, v, btype)\n i = -1\n for d in data:\n i += 1\n if k not in d:\n continue\n xindex = i\n pval = d[k]\n break\n nval = sep.join(env['path']['%s_SRTOPT_%s' % (t, v)])\n if pval:\n nval = '%s%s%s' % (nval, sep, pval)\n if xindex == len(data):\n data.append({})\n data[xindex]['%s%s%s' % (env_prefix, v, nbtype)] = self._cleanpath(nval, sep)\n scram_set = ''\n for e in ['SCRAM_PROJECTNAME', 'SCRAM_PROJECTVERSION', 'SCRAM_ARCH', 'SCRAM_VERSION']:\n scram_set += '%s:' % environ[e]\n data.append({'SCRAMRT_SET':\n '%s%s' % (scram_set, env_prefix)})\n for v in data:\n for name, value in v.items():\n environ[name] = value.replace('\"', '\\\\\"').replace('`', '\\\\`')\n return\n\n def optional_env(self, types=[]):\n self.optional_paths = {}\n for t in types:\n self.optional_paths[t.upper()] = 1\n return\n\n def unsetenv(self, shell):\n if 'SCRAMRT_SET' not in environ:\n return\n self._unsetenv = True\n self._restore_environment(shell)\n self.setenv(shell)\n self._unsetenv = False\n return\n\n def _restore_environment(self, shell):\n global environ\n penv = environ['SCRAMRT_SET'].split(':')\n del environ['SCRAMRT_SET']\n sep = self.shell[shell]['SEP']\n backup_env = environ.copy()\n prefix = self.env_backup_prefix if len(penv)<5 else penv[4]\n bvar = 'SCRAMRT_BACKUP_ENV'\n bval = {} if bvar not in environ else \\\n dict([item.split('=', 1)\n for item in b64decode(environ[bvar]).decode('utf-8').split(';')\n if item])\n for name, value in environ.items():\n if name.startswith('SCRAMRT_'):\n del backup_env[name]\n elif self.skip_env.match(name):\n continue\n elif name.endswith('_SCRAMRT') or name.endswith('_SCRAMRTDEL'):\n del backup_env[name]\n type = ''\n var = name\n if name.endswith('_SCRAMRTDEL'):\n var = name[:-11]\n type = 'DEL'\n else:\n var = name[:-8]\n if prefix:\n var = var[len(prefix):]\n if var in backup_env:\n if type == 'DEL':\n del backup_env[var]\n continue\n val = backup_env[var]\n if var in bval:\n val = bval[var]\n elif val == value:\n val = ''\n else:\n regex = re.compile('^(.*?%s|)%s(%s.*|)$' % (sep, re.escape(value), sep))\n m = regex.match(val)\n if m:\n val = '%s%s' % (m.group(1), m.group(2))\n val = val.strip(sep)\n val = val.replace('%s%s' % (sep, sep), sep)\n if not val:\n del backup_env[var]\n else:\n backup_env[var] = val\n for e in backup_env:\n environ[e] = backup_env[e]\n for e in list(environ.keys()):\n if not e in backup_env:\n del environ[e]\n\n def _update_overrides(self):\n if 'PATH' in self.env['rtstring']['path']:\n override = join(SCRAM.BASEPATH, 'share', 'overrides', 'bin')\n if exists(override):\n self.env['rtstring']['path']['PATH'].insert(0, override)\n override = join(SCRAM.BASEPATH, 'share', 'overrides', 'python')\n if exists(override):\n for v in [\"PYTHONPATH\", \"PYTHON27PATH\", \"PYTHON3PATH\"]:\n if v in self.env['rtstring']['path']:\n self.env['rtstring']['path'][v].insert(0, override)\n for e in [\"PATH\", \"LD_LIBRARY_PATH\", \"PYTHONPATH\", \"PYTHON27PATH\", \"PYTHON3PATH\"]:\n if e not in self.env['rtstring']['path']:\n continue\n ev = \"SCRAM_PREFIX_%s\" % e\n if ev not in self.OENV:\n continue\n for override in self.OENV[ev].split(\":\"):\n if exists(override):\n self.env['rtstring']['path'][e].insert(0, override)\n if 'SCRAM_IGNORE_RUNTIME_HOOK' not in self.OENV:\n self._runtime_hooks()\n if 'SCRAM_IGNORE_SITE_RUNTIME_HOOK' not in self.OENV:\n self._runtime_hooks(SCRAM.get_site_hooks())\n return\n\n def _runtime_hooks(self, hook_dir=None):\n if not hook_dir: hook_dir = self.area.config()\n debug='SCRAM_HOOKS_DEBUG' in self.OENV\n hook = join(hook_dir, 'SCRAM', 'hooks', 'runtime-hook')\n if debug:\n SCRAM.printerror(\"SCRAM_HOOK: %s\" % hook)\n if not exists(hook):\n return\n if debug:\n SCRAM.printerror(\"SCRAM_HOOK: Found\")\n regexp = re.compile(\n '^runtime:((path:(append|prepend|remove|replace):[a-zA-Z0-9-_]+)|(variable:[a-zA-Z0-9-_]+))=(.*)$',\n re.I)\n err, out = SCRAM.run_command('SCRAMRT_SET=true %s 2>&1' % hook)\n if debug:\n SCRAM.printerror(\"SCRAM_HOOK:\\n%s\" % out)\n for line in out.split('\\n'):\n if not regexp.match(line):\n continue\n vals = line.split('=', 1)\n items = vals[0].split(':')\n vtype = items[1].lower()\n if vtype == 'path':\n if vtype not in self.env[\"rtstring\"]:\n self.env[\"rtstring\"][vtype] = {}\n cache = self.env[\"rtstring\"][vtype]\n vtype = items[2].lower()\n evar = items[3]\n if (vtype == 'replace'):\n xitems = vals[1].split(\"=\", 1)\n vals[1] = xitems[0]\n vals.append(xitems[1])\n elif (vtype != 'remove') and (evar not in cache):\n cache[evar] = []\n for d in vals[1].split(':'):\n d = d.strip()\n if not d:\n continue\n if vtype == 'append':\n cache[evar].append(d)\n elif vtype == 'prepend':\n cache[evar].insert(0, d)\n elif vtype == 'remove':\n if d in cache[evar]:\n cache[evar].remove(d)\n elif vtype == 'replace':\n npath = []\n for x in cache[evar]:\n if x != d:\n npath.append(x)\n else:\n for r in vals[2].split(\":\"):\n npath.append(r)\n cache[evar] = npath\n elif vtype == 'variable':\n if 'variables' not in self.env['rtstring']:\n self.env['rtstring']['variables'] = []\n found = False\n for i, val in enumerate(self.env['rtstring']['variables']):\n if items[2] in val:\n val[items[2]] = [vals[1]]\n found = True\n break\n if not found:\n self.env['rtstring']['variables'].append({items[2]: [vals[1]]})\n return\n\n def _runtime(self):\n if 'rtstring' in self.env:\n return self.env['rtstring']\n self.env['rtstring'] = {'variables': [], 'path': {}, 'RTBOURNE': {}, 'xenv': {}}\n cache = join(self.area.archdir(), 'RuntimeCache.json')\n if exists(cache):\n st = stat(cache)\n if (st.st_size > 0):\n toolcache = self.area.toolcachename()\n if st.st_mtime >= stat(toolcache).st_mtime:\n with open(cache) as ref:\n self.env['rtstring'] = load(ref)\n self._update_overrides()\n return self.env['rtstring']\n toolmanager = ToolManager(self.area)\n tools = toolmanager.loadtools()\n otools = toolmanager.toolsdata()\n self.force_tools_env = {'self': 1, environ['SCRAM_PROJECTNAME'].lower(): 1}\n self.skip_runtime = {}\n if 'self' in tools:\n stool = tools['self']\n otools.append(stool)\n if 'FLAGS' in stool:\n for f in ['NO_EXTERNAL_RUNTIME', 'SKIP_TOOLS_SYMLINK', 'DEFAULT_COMPILER']:\n if f not in stool['FLAGS']:\n continue\n if f == 'NO_EXTERNAL_RUNTIME':\n for x in stool['FLAGS'][f]:\n x = self._fixlibenv(x)\n self.skip_runtime[self._fixlibenv(x)] = 1\n elif f == 'SKIP_TOOLS_SYMLINK':\n for t in stool['FLAGS'][f]:\n self.force_tools_env[t.lower()] = 1\n elif f == 'DEFAULT_COMPILER':\n self.env['rtstring']['RTBOURNE'][f] = stool['FLAGS'][f][0]\n compilertools = []\n for t in otools[::-1]:\n if 'SCRAM_COMPILER' in t:\n compilertools.append(t)\n else:\n self._toolenv(t)\n for t in compilertools:\n self._toolenv(t)\n for k in list(self.env):\n if k != 'rtstring':\n del self.env[k]\n try:\n with open(cache, 'w') as ref:\n dump(self.env['rtstring'], ref, sort_keys=True, indent=2)\n except (OSError, IOError) as e:\n pass\n self._update_overrides()\n return self.env['rtstring']\n\n def _toolenv(self, tool):\n tname = tool['TOOLNAME']\n if (tname != 'self') and ('FLAGS' in tool) and ('SKIP_TOOL_SYMLINKS' in tool['FLAGS']):\n self.force_tools_env[tname] = 1\n if ('RUNTIME' not in tool) or \\\n not tool['RUNTIME']:\n return\n projTool = True if tname == environ['SCRAM_PROJECTNAME'].lower() else False\n gmake = \"\"\n for trtvar, trtval in tool['RUNTIME'].items():\n if trtvar in self.ignore_env: continue\n if trtvar.startswith('PATH:'):\n var = trtvar[5:]\n if var in self.ignore_env: continue\n if projTool and environ['SCRAM_ARCH'].startswith('osx') and \\\n var == 'DYLD_LIBRARY_PATH':\n var = 'LD_LIBRARY_PATH'\n var = self._fixlibenv(var)\n if var not in self.env['rtstring']['path']:\n self.env['rtstring']['path'][var] = []\n self.env['paths'][var] = {}\n for val in trtval:\n if tname == 'gmake' and var == 'PATH' and \\\n gmake == '' and exists(join(val, 'gmake')):\n gmake = val + \"/\"\n self.env['rtstring']['xenv']['SCRAM_GMAKE_PATH'] = gmake\n if (var not in self.skip_runtime) or (tname in self.force_tools_env):\n if val not in self.env['paths'][var]:\n self.env['paths'][var][val] = 1\n self.env['rtstring']['path'][var].append(val)\n elif trtvar not in self.env['variables']:\n self.env['rtstring']['variables'].append({trtvar: trtval})\n\n def _read_ignore_env(self):\n if not 'HOME' in environ: return\n env_file = join(environ[\"HOME\"], \".scramrc\", \"runtime\")\n if not exists(env_file): return\n ignore_env = \"\"\n with open(env_file) as f_in:\n for line in f_in.readlines():\n line = line.strip()\n if not line or line.startswith(\"#\"):\n continue\n items = line.split(\":\", 1)\n if (len(items)==2) and (items[0]==\"ignore\"):\n for e in [ x for x in items[1].split(\" \") if x]:\n ignore_env += \" %s\\n\" % e\n self.ignore_env[e] = 1\n if ignore_env:\n SCRAM.printerror(\"**** Following environment variables are ignored via ~/.scramrc/runtime and will not be set/changed.\\n%s\" % ignore_env)\n return\n\n\n def _cleanpath(self, path, sep):\n upath = {}\n opath = []\n for p in path.split(sep):\n p = abspath(p)\n if not p:\n continue\n while '/./' in p:\n p = p.replace('/./', '/')\n while '//' in p:\n p = p.replace('//', '/')\n while p.endswith('/.'):\n p = p[:-2]\n if not p:\n p = '/'\n if p not in upath:\n upath[p] = 1\n opath.append(p)\n return sep.join(opath)\n","repo_name":"cms-sw/SCRAM","sub_path":"SCRAM/Core/RuntimeEnv.py","file_name":"RuntimeEnv.py","file_ext":"py","file_size_in_byte":20420,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"94"}
+{"seq_id":"28800899096","text":"import os\nimport pandas as pd\nfrom bids import BIDSLayout\n\nimport boldsignals as bold\nimport process_connectome as pc\nimport lib\n\nimport importlib\n\nimportlib.reload(lib)\nimportlib.reload(bold)\nimportlib.reload(pc)\n\npd.options.mode.chained_assignment = None\n\n# %%\n# Lecture du format BIDS\ndataset_name = 'lightduo_sample'\nderivatives_path = os.path.abspath(r'datasets_sample\\lightduo-preproc-fmriprep')\nlayout = BIDSLayout(derivatives_path, index_metadata=True, reset_database=False, validate=False,\n config=[\"bids\", \"derivatives\"])\n\natlas_name = 'CAB-NP'\natlas_path = os.path.abspath(r'..\\..\\atlas\\CAB-NP_volumetric\\CAB-NP_volumetric_liberal.nii.gz')\n\nconfounds_strategies = 'compcor'\n# %%\n\nsubjects_id_list = layout.get_subjects()\ndirectory = 'timeSeries_files'\nsub_dir = os.path.join(directory, dataset_name, atlas_name)\nif not os.path.exists(sub_dir):\n os.makedirs(sub_dir)\n\nbold_signals_config = pd.DataFrame(columns=subjects_id_list, index=['path', 'nlevels'])\ninfo_csv_path = os.path.join(sub_dir, 'bold_signals_config.csv')\n\n\n# %%\nfor subj_id in subjects_id_list:\n runs_bidsfiles_per_subjects = layout.get(subject=subj_id, suffix='bold', scope='derivatives', extension='nii.gz')\n runs_bidsfiles_ids_list_per_subject = []\n bold_signals_list_per_subject = []\n tr_runs_per_subject = []\n\n for run_bidsfile in runs_bidsfiles_per_subjects:\n run_bidsfile_ids = bold.get_keys_of_interest(run_bidsfile)\n tr_per_run = bold.get_tr_per_run(run_bidsfile)\n\n run_nifti_path = run_bidsfile.path\n bold_signals_confounds = bold.get_bold_signals_confounds(run_nifti_path, confounds_strategies)\n\n bold_signal = bold.calculate_bold_signals(run_nifti_path, atlas_path)\n bold_signal_cleaned = bold.clean_bold_signals(bold_signal, bold_signals_confounds, tr_per_run)\n\n runs_bidsfiles_ids_list_per_subject.append(run_bidsfile_ids)\n bold_signals_list_per_subject.append(str(bold_signal_cleaned))\n tr_runs_per_subject.append(tr_per_run)\n\n bold_signals_per_subject_df_output = bold.store_bold_signals_in_df(bold_signals_list_per_subject, tr_runs_per_subject,\n runs_bidsfiles_ids_list_per_subject,\n confounds_strategies)\n\n csv_output_path = os.path.join(sub_dir, 'id_boldsignals.csv'.replace('id', subj_id))\n bold.save_bold_signals_output(bold_signals_per_subject_df_output, csv_output_path)\n\n bold_signals_config.loc['path', subj_id] = csv_output_path\n bold_signals_config.loc['nlevels', subj_id] = bold_signals_per_subject_df_output.columns.nlevels\n\nbold_signals_config.to_csv(info_csv_path, header=True)\n\n","repo_name":"pnplab/biotypes_robust","sub_path":"connectomes_extraction/calcul_boldsignals.py","file_name":"calcul_boldsignals.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"20065912269","text":"import sys\nimport time\nimport telepot\nimport pyautogui\nfrom telepot.loop import MessageLoop\nfrom tokens import *\n\nclass MyBot(telepot.Bot):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(MyBot, self).__init__(*args, **kwargs)\n\t\tself.answerer = telepot.helper.Answerer(self)\n\t\tself._message_with_inline_keyboard = None\n\t\t\n\tdef on_chat_message(self, msg):\n\t\tcontent_type, chat_type, chat_id = telepot.glance(msg)\n\t\t\n\t\t# For debugging and get admin id\n\t\t# print(content_type, chat_type, chat_id)\n\t\n\t\tif chat_id in adminId:\n\t\t\tif content_type == 'text':\n\t\t\t\tif msg['text'] == '/capture':\n\t\t\t\t\tbot.sendChatAction(chat_id, 'typing')\n\t\t\t\t\tbot.sendMessage(chat_id, \"Capturing image\")\n\t\t\t\t\tself.capture_img()\n\t\t\t\t\tbot.sendPhoto(chat_id, photo=open('img\\\\screenshot.png', 'rb'))\n\t\t\n\t\telse:\n\t\t\tbot.sendMessage(chat_id, \"Not admin\")\n\tdef capture_img(self):\n\t\tpic = pyautogui.screenshot()\n\t\tpic.save('img\\\\screenshot.png')\n\t\treturn\n\t\nTOKEN = telegrambot\n\nbot = MyBot(TOKEN)\nMessageLoop(bot).run_as_thread()\n# Umcomment for debugging\n# print('Listening ...')\n\nwhile 1:\n\ttime.sleep(10)","repo_name":"shafiqsaaidin/monbot","sub_path":"mon.py","file_name":"mon.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"94"}
+{"seq_id":"30369125974","text":"\"\"\"\nObjects relating to the support region of the basis used to impose the boundary\nconditions.\n\"\"\"\n\nimport numpy as np\nfrom devito.tools.data_structures import frozendict\nfrom functools import reduce\nfrom schism.geometry.skin import stencil_footprint\n\n\ndef get_points_and_oob(support_points, modified_points, geometry):\n \"\"\"\n Get the points used by each stencil and a mask indicating where these are\n out of bounds.\n\n Parameters\n ----------\n support_points : tuple\n Points in the support region of the stencil\n modified_points : tuple\n Points where modified stencils are required\n geometry : BoundaryGeometry\n Geometry of the boundary. Used to obtain the Grid.\n\n Returns\n -------\n points : tuple\n Points accessed by the stencil when applied at the modified points\n oob : ndarray\n Boolean mask for points. True where points are out of bounds\n \"\"\"\n grid = geometry.grid\n ndims = len(grid.dimensions)\n points = tuple([support_points[dim][:, np.newaxis]\n + modified_points[dim][np.newaxis, :]\n for dim in range(ndims)])\n\n # Out of bounds points\n oob = [np.logical_or(points[dim] < 0, points[dim] >= grid.shape[dim])\n for dim in range(ndims)]\n\n # If a point is out of bounds in any dimension, then label as oob\n oob_msk = reduce(np.logical_or, oob)\n\n return points, oob_msk\n\n\ndef footprint_union(fp1, fp2):\n \"\"\"Get the union of two stencil footprints\"\"\"\n fpa1 = np.array(fp1)\n fpa2 = np.array(fp2)\n fp_all = np.concatenate((fpa1, fpa2), axis=-1)\n fp_union = np.unique(fp_all, axis=-1)\n # The union footprint\n footprint = tuple([fp_union[i] for i in range(fp_union.shape[0])])\n # The mask points of fp2 in union footprint\n # Compares the coordinates, checks there is a match in all dims,\n # then sets true where a coordinate from fpa2 is located in\n # fp_union\n mask = (fp_union[:, np.newaxis] == fpa2[..., np.newaxis]).all(0).any(0)\n return footprint, mask\n\n\nclass SupportRegion:\n \"\"\"\n The support region for a set of basis functions.\n\n Parameters\n ----------\n basis_map : dict\n Mapping between functions and their respective basis functions\n radius_map : dict\n Mapping between functions and the radius of their basis. Note that this\n is not a true radius, so much as a convenient measure of extent\n measured in grid increments.\n deriv : Derivative\n The derivative of the underlying stencil. Used to ensure that\n the resultant support region is the union of extrapolant and\n interior stencils.\n\n Attributes\n ----------\n footprint_map : dict\n Mapping between functions and the points within their support region\n npts_map : dict\n Mapping between functions and the number of points within their support\n region.\n max_span_func : Function\n The function with the largest span\n\n Methods\n -------\n expand_radius(inc)\n Return a support region with an expanded radius\n \"\"\"\n def __init__(self, basis_map, radius_map, deriv):\n self._basis_map = basis_map\n self._radius_map = radius_map\n self._max_span_func = max(self.radius_map, key=self.radius_map.get)\n\n # Derivative for footprint of the underlying stencil\n self._deriv = deriv\n\n self._get_footprint()\n\n def _get_footprint(self):\n \"\"\"Get the stencil footprint for each function\"\"\"\n footprints = {}\n npts_map = {}\n if self.basis_map.keys() != self.radius_map.keys():\n # Should never end up here\n raise ValueError(\"Mismatch in functions supplied\")\n\n for func in self.basis_map:\n if self.basis_map[func].dims == func.space_dimensions:\n # N-D basis so N-D support region\n footprint = self._get_circle_support(func)\n else:\n if len(self.basis_map[func].dims) != 1:\n # Should never end up here\n raise ValueError(\"Basis neither 1D or N-D\")\n # 1D basis\n footprint = self._get_linear_support(func)\n footprints[func] = footprint\n\n if func is self.deriv.expr:\n base_footprint = self._get_base_support()\n # Get union with support region of interior\n # stencil. This prevents issues when interior\n # stencils have a larger footprint than that\n # used for extrapolation.\n union, mask = footprint_union(base_footprint,\n footprints[func])\n footprints[func] = union\n self._extrapolant_mask = mask\n npts_map[func] = footprints[func][0].shape[0]\n self._footprint_map = frozendict(footprints)\n self._npts_map = frozendict(npts_map)\n\n def _get_circle_support(self, func):\n \"\"\"Get the footprint of a circular support region\"\"\"\n # Essentially makes a square then cookie-cutters it\n radius = self.radius_map[func]\n dims = func.space_dimensions\n ndims = len(dims)\n # Make a meshgrid of indices (of int type)\n # Indexing type changes order of points but not points overall\n # 'ij' results in most logical ordering however\n msh = np.meshgrid(*[np.arange(-radius, radius+1, dtype=int)\n for dim in dims],\n indexing='ij')\n # Mask it by radius\n mask = np.sqrt(sum(msh[i]**2 for i in range(ndims))) < radius + 0.5\n # Do np.where to get meshgrid indices\n locs = np.where(mask)\n # Use indices to get the physical indices from the meshgrid\n footprint = [msh[i][locs].flatten() for i in range(ndims)]\n # Return these as a tuple of arrays\n return tuple(footprint)\n\n def _get_linear_support(self, func):\n \"\"\"Get the footprint of a 1D support region\"\"\"\n footprint = []\n basis = self.basis_map[func]\n radius = self.radius_map[func]\n for dim in func.space_dimensions:\n if dim in basis.dims:\n footprint.append(np.arange(-radius, radius+1, dtype=int))\n else: # No offset in other dimensions\n footprint.append(np.zeros(1+2*radius, dtype=int))\n return tuple(footprint)\n\n def _get_base_support(self):\n \"\"\"Get the footprint of the interior stencil\"\"\"\n footprint = stencil_footprint(self.deriv)\n return footprint\n\n def expand_radius(self, inc):\n \"\"\"\n Return another support region with radius expanded by the increment\n specified\n\n Parameters\n ----------\n inc : int\n The amount by which the radius should be incremented\n\n Returns\n -------\n expanded : SupportRegion\n The expanded support region\n \"\"\"\n new_radius_map = {func: rad + inc\n for func, rad in self.radius_map.items()}\n return self.__class__(self.basis_map, new_radius_map, self.deriv)\n\n @property\n def basis_map(self):\n \"\"\"Mapping between functions and respective basis functions\"\"\"\n return self._basis_map\n\n @property\n def radius_map(self):\n \"\"\"Mapping between functions and the radius of their basis\"\"\"\n return self._radius_map\n\n @property\n def max_span_func(self):\n \"\"\"The function with the largest support region span\"\"\"\n return self._max_span_func\n\n @property\n def footprint_map(self):\n \"\"\"\n Mapping between functions and the footprint of their support region.\n \"\"\"\n return self._footprint_map\n\n @property\n def npts_map(self):\n \"\"\"\n Mapping between functions and the number of points in their support\n regions.\n \"\"\"\n return self._npts_map\n\n @property\n def deriv(self):\n \"\"\"\n Footprint of the underlying derivative stencil. Used when i\n \"\"\"\n return self._deriv\n\n @property\n def extrapolant_mask(self):\n \"\"\"\n Return the mask limiting the footprint in the field on which the\n derivative is taken to that of the extrapolation support region.\n \"\"\"\n return self._extrapolant_mask\n","repo_name":"devitocodes/schism","sub_path":"schism/geometry/support_region.py","file_name":"support_region.py","file_ext":"py","file_size_in_byte":8337,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"}
+{"seq_id":"44157011496","text":"# Write a function SkewArray(Genome) that takes a DNA string Genome as input \n# and returns the skew array of Genome in the form of a list whose i-th \n# element is Skew[i]. Then add this function to Replication.py.\nimport matplotlib.pyplot as plt\n\ndef skewArray(genome):\n array = [0 for i in range(len(genome) + 1)]\n\n for i in range(len(genome)):\n if genome[i] == 'A' or genome[i] == 'T':\n array[i+1] = array[i]\n elif genome[i] == 'G':\n array[i+1] = array[i] + 1\n elif genome[i] == 'C':\n array[i+1] = array[i] - 1\n return array\n\n\ndef skewArray2(genome):\n array = [0 for i in range(len(genome) + 1)]\n result = {}\n\n for i in range(len(genome)):\n if genome[i] == 'A' or genome[i] == 'T':\n array[i+1] = array[i]\n elif genome[i] == 'G':\n array[i+1] = array[i] + 1\n elif genome[i] == 'C':\n array[i+1] = array[i] - 1\n for i in range(len(array)):\n result[i] = array[i]\n return result\n\nif __name__ == '__main__':\n \"\"\" array = skewArray('AGCGTGCCGAAATATGCCGCCAGACCTGCTGCGGTGGCCTCGCCGACTTCACGGATGCCAAGTGCATAGAGGAAGCGAGCAAAGGTGGTTTCTTTCGCTTTATCCAGCGCGTTAACCACGTTCTGTGCCGACTTT')\n plt.plot(array[:], marker='o')\n plt.show() \"\"\"\n skew = skewArray(\"GATACACTTCCCGAGTAGGTACTG\")\n print(skew)\n\n","repo_name":"PaulOnyekwelu/biology-meets-programming","sub_path":"week-2/skew_array.py","file_name":"skew_array.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"23204884080","text":"import pickle\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport sagemaker\nimport boto3 \nimport s3fs\nimport json\nimport datetime\nfrom tzlocal import get_localzone as tzlocal\nimport numpy as np\nimport datetime\nfrom tzlocal import get_localzone as tzlocal\nimport boto3\nfrom tqdm import tqdm_notebook as tqdm\nimport os\nfrom os import path \nimport json\nfrom deep_ar import series_to_jsonline\n\nbmw_bucket_name = os.environ['BMW_DATA_BUCKET'] #'fog-bigdata-bmw-data'\ndata_bucket_name = os.environ['SANITIZED_DATA_BUCKET'] #'fog-datasets'\ndata_freq = os.environ['DATA_FREQUENCY'] #'5min'\n\ns3_con = boto3.client('s3')\nobj_list = s3_con.list_objects(Bucket=bmw_bucket_name,Prefix='metrics2/output')['Contents']\nfile_names = [key['Key'] for key in obj_list]\n\ndata = []\nfor file_name in tqdm(file_names):\n # Select file with the correct extension\n if not file_name.endswith('output.json'):\n continue\n file_str = s3_con.get_object(Bucket=bmw_bucket_name, Key=file_name).get('Body').read().decode('utf-8')\n batch = eval(file_str)\n \n # Aggregates response code into a unique time series\n for code in ['response-code-200','response-code-4xx','response-code-5xx']:\n if code not in batch.keys():\n continue\n data = data + batch[code]['Datapoints']\n \n# Creates a pandas Dataframe from data\ndf = pd.DataFrame(data)\ndf.index = [i.replace(tzinfo=None) for i in pd.to_datetime(df.Timestamp)]\ndf = df.drop(columns=['Unit'])\ndf = df.groupby('Timestamp').max()\nseries = pd.Series(data=df.SampleCount.values, index=[i.replace(tzinfo=None) for i in pd.to_datetime(df.index)])\nseries = series.sort_index()\n#series = series[series.index < datetime.datetime(2019,1,26,0,0,0)]\nseries = series.groupby([pd.Grouper(freq=data_freq)]).sum()\n\n# Apply a running mean of the previous 15 minutes to each datapoint -> smoothing to remove anomalies and have a clean training set\nn_backsteps = 5\nconv = np.hstack([np.ones(n_backsteps)/n_backsteps,np.zeros(n_backsteps-1)])\npad_vals = np.pad(series.values,n_backsteps-1,mode='edge')\nseries = pd.Series(data=np.convolve(pad_vals,conv,mode='valid'),index=series.index)\n\n\n# Scale down a part of the data (that was incorrectly scaled, for unknown reasons)\nseries[np.logical_and(series.index >= pd.Timestamp(2019,1,26),series.index < pd.Timestamp(2019,1,31,8,55))] /= 2 \n\ntest_idx = np.logical_and(series.index > datetime.datetime(2019,1,28,0,0,0), series.index <= datetime.datetime(2019,2,4,0,0,0))\ntrain_idx = series.index <= datetime.datetime(2019,1,28,0,0,0)\n\n\n# Upload RCF-shaped data\nprint(\"Uploading RCF-shaped data\")\nprefix = 'rcf'\n\ns3_data_path = \"{}/{}/data\".format(data_bucket_name, prefix)\ns3filesystem = s3fs.S3FileSystem()\n\nwith s3filesystem.open(s3_data_path + \"/train/data.csv\", 'w') as fp:\n fp.write(series[train_idx].to_csv())\n\nwith s3filesystem.open(s3_data_path + \"/test/data.csv\", 'w') as fp:\n fp.write(series[test_idx].to_csv())\n\n# Upload DeepAR-shaped data\nprint(\"Uploading DeepAR-shaped data\")\n\n# Create feature series of holidays\nend_of_holiday = datetime.date(2019, 1, 7)\nholidays_data = [1 if time < pd.Timestamp(end_of_holiday,tz=None) else 0 for time in series.index]\nholidays_feature_serie = pd.Series(data=holidays_data, index=series.index)\n\n# Create feature series of weekends\nweekends_date = [0 if time.weekday() < 5 else 1 for time in series.index]\nweekends_feature_series = pd.Series(data=weekends_date, index=series.index)\n\n\n# Upload preprocessed data for deep AR\nprefix = 'deep_ar'\ns3_data_path = \"{}/{}/data\".format(data_bucket_name, prefix)\n\nwith s3filesystem.open(s3_data_path + \"/train/data.json\", 'w') as fp:\n fp.write(series_to_jsonline(series[train_idx], [list(holidays_feature_serie[train_idx]), list(weekends_feature_series[train_idx])]))\n\nwith s3filesystem.open(s3_data_path + \"/test/data.json\", 'w') as fp:\n fp.write(series_to_jsonline(series[test_idx], [list(holidays_feature_serie[test_idx]), list(weekends_feature_series[test_idx])]))\n\n","repo_name":"AlexandreRozier/BigDataAnalytics","sub_path":"models/deep_ar/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"}
+{"seq_id":"72341191029","text":"\nfrom qiskit import *\nimport matplotlib.pyplot as plt\nimport numpy as np\n\noracle = QuantumCircuit(2, name='oracle')\noracle.cz(0,1)\noracle.to_gate()\noracle.draw()\n\nbackend = Aer.get_backend('statevector_simulator')\ngrover_circ = QuantumCircuit(2, 2)\ngrover_circ.h([0, 1])\ngrover_circ.append(oracle, [0,1])\ngrover_circ.draw()\n\njob = execute(grover_circ, backend)\nresult = job.result()\nsv = result.get_statevector()\nnp.round(sv, 2)","repo_name":"watermelonich/quantum-programs","sub_path":"grover search algorithm/gr2.py","file_name":"gr2.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"28690334718","text":"import pdb\nimport sys\nimport pyrealsense2 as rs\nimport numpy as np\nimport cv2\nfrom math import pi\nimport time\n\nimport zmq\nfrom msgpack import loads\nimport threading\n\nimport numpy as np\nimport math\nimport time\nimport scipy.io\nfrom sklearn.linear_model import LinearRegression\n\nfrom skimage import color, data, restoration, metrics\n\nimport screeninfo\n\n#Get ScreenInfo\nscreen_id = 1\nscreen = screeninfo.get_monitors()[screen_id]\nresolution = [screen.width, screen.height]\n\n## Setting for realsense\nREALSENSE_CAMERA = \"D435\" #D435 / L515\n\n# Camera setting and tracking setting\nif REALSENSE_CAMERA == \"D435\" :\n DEPTH_CAMERA_MAX_THETA = 57 / 2.0 * (pi / 180)\n DEPTH_CAMERA_MAX_PHI = 86 / 2.0 * (pi / 180)\n COLOR_CAMERA_MAX_THETA = 41 / 2.0 * (pi / 180)\n COLOR_CAMERA_MAX_PHI = 64 / 2.0 * (pi / 180)\nelif REALSENSE_CAMERA == \"L515\" :\n DEPTH_CAMERA_MAX_THETA = 55 / 2.0 * (pi / 180)\n DEPTH_CAMERA_MAX_PHI = 70 / 2.0 * (pi / 180)\n COLOR_CAMERA_MAX_THETA = 43 / 2.0 * (pi / 180)\n COLOR_CAMERA_MAX_PHI = 70 / 2.0 * (pi / 180)\n\nif REALSENSE_CAMERA == \"D435\" :\n DEPTH_CAMERA_RES = 640,480\n COLOR_CAMERA_RES = 640,480\nelif REALSENSE_CAMERA == \"L515\" :\n DEPTH_CAMERA_RES = 1024,768\n COLOR_CAMERA_RES = 1280,720\n\n## Setting for Pupil_tracker\naddr = '127.0.0.1' # remote ip or localhost\nreq_port = \"50020\" # same as in the pupil remote gui\n\n## Setting for inverse filtering image\npupil_diameter = 2e-3\neye_length = 24e-3\neye_relief = 1e-1\nkernel_radius_pixel = 21\nnum_slicing_imgs = 4 # for rendering_display\n\n# Convert matrix default setting\nconvert_matrix = np.array([[1.0078, 0.1722, 0.0502], [0, 0, 0], [0.0532, -0.6341, 0.7817]])\n\n\ndef make_convert_matrix(sub):\n \"\"\"\n front side = input degree is 184\n (x_1, y_1, z_1): world coordinate\n (x_2, y_2, z_2): viewing coordinate (pupil camera coordinate)\n \"\"\"\n coord_1 = None\n coord_2 = None\n\n degree = input(\"type observed degree:\")\n while(degree != 'end' and degree != '-1'):\n degree = float(degree)\n theta_1 = pi / 2\n phi_1 = (degree - 184 + 90) * pi / 180\n x_1 = - np.sin(theta_1) * np.cos(phi_1)\n y_1 = np.cos(theta_1)\n z_1 = np.sin(theta_1) * np.sin(phi_1)\n\n sub.connect(b\"tcp://%s:%s\" %(addr.encode('utf-8'),sub_port))\n topic,msg_1 = sub.recv_multipart()\n message_1 = loads(msg_1)\n theta_2 = message_1[b'theta']\n phi_2 = message_1[b'phi']\n sub.disconnect(b\"tcp://%s:%s\" %(addr.encode('utf-8'),sub_port))\n\n x_2 = np.sin(theta_2) * np.cos(phi_2)\n y_2 = np.cos(theta_2)\n z_2 = np.sin(theta_2) * np.sin(phi_2)\n\n if coord_1 is None:\n coord_1 = np.array([[x_1, y_1, z_1]])\n else:\n coord_1 = np.append(coord_1, [[x_1, y_1, z_1]], axis=0)\n if coord_2 is None:\n coord_2 = np.array([[x_2, y_2, z_2]])\n else:\n coord_2 = np.append(coord_2, [[x_2, y_2, z_2]], axis=0)\n\n degree = input(\"type observed degree:\")\n\n # coord_1 = np.array([[-0.5,\t0,\t0.866025404],[-0.342020143,\t0,\t0.939692621],[-0.173648178,\t0,\t0.984807753],[0,\t0,\t1],[0.173648178,\t0,\t0.984807753],[0.342020143,\t0,\t0.939692621],[0.5,\t0,\t0.866025404]])\n # coord_2 = np.array([[-0.420787921,\t-0.390899842,\t0.818617639],[-0.32087076,\t-0.557559388,\t0.765617061],[-0.137999648,\t-0.649991026,\t0.747307007],[0.083080865,\t-0.662271994,\t0.74464312],[0.274478632,\t-0.610046409,\t0.743306706],[0.418590941,\t-0.480357372,\t0.770738879],[0.460415755,\t-0.314987365,\t0.829939933]])\n\n print(\"coord_1 : \", coord_1)\n print(\"coord_2 : \", coord_2)\n\n model_x = LinearRegression(fit_intercept=False).fit(coord_2,coord_1[:,0])\n model_y = LinearRegression(fit_intercept=False).fit(coord_2, coord_1[:,1])\n model_z = LinearRegression(fit_intercept=False).fit(coord_2, coord_1[:,2])\n\n convert_matrix = np.array([model_x.coef_, model_y.coef_, model_z.coef_])\n return convert_matrix\n\n\ndef convert_pupil_to_realsense(theta, phi) :\n x = np.sin(theta) * np.cos(phi)\n y = np.cos(theta)\n z = np.sin(theta) * np.sin(phi)\n coord = np.array([[x],[y],[z]])\n\n converted_coord = np.dot(convert_matrix, coord)\n\n converted_x, converted_y, converted_z = converted_coord\n converted_theta = np.arctan(converted_y / converted_z)\n converted_phi = np.arctan(converted_x / converted_z)\n\n return converted_theta, converted_phi\n\ndef full_rendering_display(color_img, depth_img, gaze_depth, c = 8.0e+4):\n \"\"\"\n slice color image by 'each depth' in depth image. Create corresponding PSF on each slice. \n Apply convolution on every slice and add up every slice. return normalized reconstructed image.\n c : coefficient for gaussian psf\n \"\"\"\n eye_focal_length = 1 / (1 / gaze_depth + 1 / eye_length)\n color_img=color_img.astype(float)\n depth_img=depth_img.astype(float)\n filtered_img=np.zeros_like(color_img)\n edge = np.zeros_like(depth_img)\n\n # Calculate target intensity sum\n target_intensity_sum = np.sum(color_img)\n\n RES = COLOR_CAMERA_RES #resolution of color_img, detph_img\n x,y = np.meshgrid(np.linspace(-RES[0]//2, RES[0]//2-1,RES[0]), np.linspace(-RES[1]//2,RES[1]//2-1,RES[1]))\n radius = np.sqrt(x*x+y*y)\n\n depths = np.unique(depth_img[depth_img>0])\n\n sliced_color_imgs = []\n\n for depth in depths : \n pixel_select=np.zeros_like(depth_img)\n pixel_select[depth_img == depth] = 1 \n\n edge += cv2.Canny(np.uint8(pixel_select*255), 50, 100)\n pixel_select = np.stack((pixel_select,pixel_select,pixel_select), axis = 2)\n sliced_color_img = color_img * pixel_select\n sliced_color_imgs.append((sliced_color_img, depth / 1000.0))\n\n for sliced_color_img, mean_depth in sliced_color_imgs:\n b = (eye_focal_length / (gaze_depth - eye_focal_length))* pupil_diameter * abs(mean_depth - gaze_depth) / mean_depth # blur diameter\n kernel = np.zeros_like(sliced_color_img[:,:,0]) # same size with single channel of image (2D)\n \n if b == 0 :\n kernel[RES[1]//2, RES[0]//2] = 1 # delta function\n else :\n kernel = 2 / (pi * (c * b)**2) * np.exp(-2 * radius**2 / (c * b)**2)\n kernel[radius > kernel_radius_pixel] = 0 #Use 21*21 nonzero points near origin, otherwise, value is zero\n \n #normalization\n if np.sum(kernel) == 0: # when does this occurs? if psf is too small in every pixel\n kernel[res_window[1]//2, res_window[0]//2]=1\n else:\n kernel = kernel / np.sum(kernel)\n\n compensate_img = cv2.filter2D(sliced_color_img, -1, kernel)\n filtered_img += compensate_img\n\n #just add zero depth pixel to filtered image\n pixel_select = np.zeros_like(depth_img)\n pixel_select[depth_img==0] = 1\n pixel_select = np.stack((pixel_select, pixel_select, pixel_select), axis = 2)\n color_img_zero_depth = color_img * pixel_select\n filtered_img += color_img_zero_depth\n\n edge = np.clip(edge, 0, 255).astype('uint8')\n dilated_edge = cv2.dilate(edge, np.ones((3, 3)))\n dilated_edge = np.stack((dilated_edge, dilated_edge, dilated_edge), axis=2)\n\n #blurred_filtered_img = cv2.GaussianBlur(filtered_img, (5, 5), 0) # Smoothing boundary\n blurred_filtered_img = filtered_img # No smoothing boundary\n smoothed_filtered_img = np.where(dilated_edge==np.array([255,255,255]), blurred_filtered_img, filtered_img)\n\n #smoothed_filtered_img = filtered_img\n smoothed_filtered_img = np.clip(smoothed_filtered_img, 0, np.max(smoothed_filtered_img))\n smoothed_filtered_img = smoothed_filtered_img / np.sum(smoothed_filtered_img) * target_intensity_sum / 255.0\n smoothed_filtered_img = np.clip(smoothed_filtered_img, 0, 1)\n\n return smoothed_filtered_img, len(sliced_color_imgs)\n\ndef rendering_display(color_img, depth_img, gaze_depth, c = 8.0e+4, num_slicing_imgs = 4):\n \"\"\"\n slice color image by 'num_slicing_imgs' in depth image. Create corresponding PSF on each slice. \n Apply convolution on every slice and add up every slice. return normalized reconstructed image.\n c : coefficient for gaussian psf\n \"\"\"\n eye_focal_length = 1 / (1 / gaze_depth + 1 / eye_length)\n color_img=color_img.astype(float)\n depth_img=depth_img.astype(float)\n filtered_img=np.zeros_like(color_img)\n edge = np.zeros_like(depth_img)\n\n # Calculate target intensity sum\n target_intensity_sum = np.sum(color_img)\n\n RES = COLOR_CAMERA_RES #resolution of color_img, detph_img\n x,y = np.meshgrid(np.linspace(-RES[0]//2, RES[0]//2-1,RES[0]), np.linspace(-RES[1]//2,RES[1]//2-1,RES[1]))\n radius = np.sqrt(x*x+y*y)\n\n depths = depth_img[depth_img>0]\n percentiles = np.linspace(0,100,num_slicing_imgs+1)\n depth_bounds = np.percentile(depths, percentiles, interpolation='nearest')\n depths = np.unique(depths)\n\n sliced_color_imgs = []\n\n for idx in range(num_slicing_imgs): # idx th slice\n pixel_select=np.zeros_like(depth_img)\n for depth in depths: # create boolean mask\n if depth_bounds[idx] <= depth and depth< depth_bounds[idx+1]:\n pixel_select[depth_img==depth] = 1 \n \n if idx == num_slicing_imgs - 1 : # add last depth on last slice\n pixel_select[depth_img == depth_bounds[num_slicing_imgs]] = 1\n\n masked_depth_img = depth_img[pixel_select == 1]\n\n if len(masked_depth_img) == 0: # if masked_depth_img is blank\n continue\n \n mean_depth = np.mean(masked_depth_img)\n if int(gaze_depth*1000) in masked_depth_img :\n mean_depth = int(gaze_depth*1000)\n\n edge += cv2.Canny(np.uint8(pixel_select*255), 50, 100)\n pixel_select = np.stack((pixel_select,pixel_select,pixel_select), axis = 2)\n sliced_color_img = color_img * pixel_select\n sliced_color_imgs.append((sliced_color_img, mean_depth / 1000.0))\n\n for sliced_color_img, mean_depth in sliced_color_imgs:\n b = (eye_focal_length / (gaze_depth - eye_focal_length))* pupil_diameter * abs(mean_depth - gaze_depth) / mean_depth # blur diameter\n kernel = np.zeros_like(sliced_color_img[:,:,0]) # same size with single channel of image (2D)\n \n if b == 0 :\n kernel[RES[1]//2, RES[0]//2] = 1 # delta function\n else :\n kernel = 2 / (pi * (c * b)**2) * np.exp(-2 * radius**2 / (c * b)**2)\n kernel[radius > kernel_radius_pixel] = 0 #Use 21*21 nonzero points near origin, otherwise, value is zero\n \n #normalization\n if np.sum(kernel) == 0: # when does this occurs? if psf is too small in every pixel\n kernel[res_window[1]//2, res_window[0]//2]=1\n else:\n kernel = kernel / np.sum(kernel)\n\n compensate_img = cv2.filter2D(sliced_color_img, -1, kernel)\n filtered_img += compensate_img\n\n #just add zero depth pixel to filtered image\n pixel_select = np.zeros_like(depth_img)\n pixel_select[depth_img==0] = 1\n pixel_select = np.stack((pixel_select, pixel_select, pixel_select), axis = 2)\n color_img_zero_depth = color_img * pixel_select\n filtered_img += color_img_zero_depth\n\n edge = np.clip(edge, 0, 255).astype('uint8')\n dilated_edge = cv2.dilate(edge, np.ones((3, 3)))\n dilated_edge = np.stack((dilated_edge, dilated_edge, dilated_edge), axis=2)\n\n #blurred_filtered_img = cv2.GaussianBlur(filtered_img, (5, 5), 0) # Smoothing boundary\n blurred_filtered_img = filtered_img # No smoothing boundary\n smoothed_filtered_img = np.where(dilated_edge==np.array([255,255,255]), blurred_filtered_img, filtered_img)\n\n #smoothed_filtered_img = filtered_img\n smoothed_filtered_img = np.clip(smoothed_filtered_img, 0, np.max(smoothed_filtered_img))\n smoothed_filtered_img = smoothed_filtered_img / np.sum(smoothed_filtered_img) * target_intensity_sum / 255.0\n smoothed_filtered_img = np.clip(smoothed_filtered_img, 0, 1)\n\n return smoothed_filtered_img, len(sliced_color_imgs)\n\n\nif __name__ == \"__main__\":\n\n # Configure depth and color streams\n pipeline = rs.pipeline()\n config = rs.config()\n\n config.enable_stream(rs.stream.depth, DEPTH_CAMERA_RES[0], DEPTH_CAMERA_RES[1], rs.format.z16, 30)\n config.enable_stream(rs.stream.color, COLOR_CAMERA_RES[0], COLOR_CAMERA_RES[1], rs.format.bgr8, 30)\n\n # zero hole filling filter\n spatial = rs.spatial_filter()\n spatial.set_option(rs.option.filter_magnitude, 5)\n spatial.set_option(rs.option.filter_smooth_alpha, 1)\n spatial.set_option(rs.option.filter_smooth_delta, 50)\n spatial.set_option(rs.option.holes_fill, 3)\n hole_filling = rs.hole_filling_filter()\n\n # Align process for realsense frames\n align_to = rs.stream.color\n align = rs.align(align_to)\n\n # Start streaming for Realsense\n pipeline.start(config)\n\n # Start connecting pupil tracker\n context = zmq.Context()\n req = context.socket(zmq.REQ) #open a req port to talk to pupil\n req.connect(\"tcp://%s:%s\" %(addr,req_port))\n req.send(b'SUB_PORT') # ask for the sub port\n sub_port = req.recv()\n\n # open a sub port to listen to pupil in eye_1_3d\n sub_1_3d = context.socket(zmq.SUB)\n sub_1_3d.connect(b\"tcp://%s:%s\" %(addr.encode('utf-8'),sub_port))\n sub_1_3d.setsockopt(zmq.SUBSCRIBE, b'pupil.1.3d')\n\n need_calculate = input(\"Start Calculating convert matrix?(Y/n) : \")\n\n if (need_calculate.upper() == \"Y\") :\n convert_matrix = make_convert_matrix(sub_1_3d)\n print(\"convert matrix : \", convert_matrix)\n np.save('./convert_matrix',convert_matrix)\n\n input(\"Start convert filtered image(press enter)\")\n time_0 = time.time()\n\n # Start convert filtered image\n try:\n while True:\n current_time = time.time()\n\n # Collect Data from pupil_tracker & Wait for a coherent pair of frames: depth and color\n sub_1_3d.connect(b\"tcp://%s:%s\" %(addr.encode('utf-8'),sub_port))\n topic,msg_1 = sub_1_3d.recv_multipart() # pupil tracker (maximum 120Hz)\n\n frames = pipeline.wait_for_frames() # realsense (maximum 30Hz)\n\n aligned_frames = align.process(frames)\n\n depth_frame = aligned_frames.get_depth_frame()\n color_frame = aligned_frames.get_color_frame()\n\n if not depth_frame or not color_frame:\n continue\n\n message_1 = loads(msg_1)\n theta = message_1[b'theta']\n phi = message_1[b'phi']\n\n # Filter the depth frame\n depth_frame = spatial.process(depth_frame)\n depth_frame = hole_filling.process(depth_frame)\n\n # Convert images to numpy arrays\n depth_image = np.asanyarray(depth_frame.get_data())\n diopter_image = 1000.0/depth_image\n color_image = np.asanyarray(color_frame.get_data())\n # breakpoint()\n\n # Apply colormap on depth image (image must be converted to 8-bit per pixel first)\n # depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.085), cv2.COLORMAP_JET)\n depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(diopter_image, alpha=63.75), cv2.COLORMAP_JET)\n #depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)\n\n # Check tracking point on images\n converted_theta, converted_phi = convert_pupil_to_realsense(theta, phi)\n\n H, W = color_image.shape[0], color_image.shape[1]\n point_y = int(H/2 + H/2 * (np.tan(converted_theta) / np.tan(COLOR_CAMERA_MAX_THETA)))\n point_x = int(W/2 + W/2 * (np.tan(converted_phi) / np.tan(COLOR_CAMERA_MAX_PHI)))\n point_y = np.clip(point_y, 0, H-1)\n point_x = np.clip(point_x, 0, W-1)\n\n # depth_colormap = cv2.circle(depth_colormap, (point_x, point_y), 5, (0,0,255), -1)\n # text = \"depth : \" + str(depth_image[point_y][point_x]) + \"mm\"\n # depth_colormap = cv2.putText(depth_colormap, text, (10,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)\n\n \n #filtered_image = color_image\n # filtered_image, _ = full_rendering_display(color_image, depth_image, depth_image[point_y][point_x] / 1000.0)\n filtered_image, _ = rendering_display(color_image, depth_image, depth_image[point_y][point_x] / 1000.0, num_slicing_imgs = num_slicing_imgs)\n filtered_image = cv2.circle(filtered_image, (point_x, point_y), 5, (0,0,255), -1)\n # color_image = cv2.circle(color_image, (point_x, point_y), 5, (0,0,255), -1)\n\n # print(\"time : \", round(current_time - time_0, 4))\n # print(\"theta, phi : \", theta, phi)\n # print(\"position(x,y), depth : \", point_x, point_y, depth_image[point_y][point_x], \"\\n\")\n\n\n # Show images\n cv2.namedWindow('Convert_filtered_image', cv2.WND_PROP_FULLSCREEN)\n cv2.resizeWindow(\"Convert_filtered_image\", resolution[0], resolution[1])\n cv2.moveWindow('Convert_filtered_image', screen.x - 1, screen.y - 1)\n cv2.setWindowProperty('Convert_filtered_image', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\n display_filtered_image = cv2.copyMakeBorder(filtered_image, int((resolution[1]-filtered_image.shape[0])/2), int((resolution[1]-filtered_image.shape[0])/2), int((resolution[0]-filtered_image.shape[1])/2), int((resolution[0]-filtered_image.shape[1])/2), 0)\n #display_filtered_image = filtered_image\n\n cv2.imshow('Convert_filtered_image', display_filtered_image)\n\n images = np.hstack((color_image, depth_colormap))\n cv2.namedWindow('original_image', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('original_image', images)\n cv2.waitKey(1)\n\n sub_1_3d.disconnect(b\"tcp://%s:%s\" %(addr.encode('utf-8'),sub_port))\n\n finally:\n # Stop streaming\n pipeline.stop()\n sub_1_3d.disconnect(b\"tcp://%s:%s\" %(addr.encode('utf-8'),sub_port))","repo_name":"wogur110/OEQE_project","sub_path":"display_filtered_image.py","file_name":"display_filtered_image.py","file_ext":"py","file_size_in_byte":17918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"42017552411","text":"List = []\n\nsize = int(input(\"Input number of elements of list: \"))\n\nfor i in range(size):\n element = int(input(\"List[\" + str(i) + \"] = \"))\n List.append(element)\n\n\n\nprint(\"Input elements you want to swap in list: \")\n\nPos_1 = int(input(\"Position 1: \"))\nPos_2 = int(input(\"Position 2: \"))\n\nprint(\"Original List:\", List)\ntempt = List[Pos_1]\nList[Pos_1] = List[Pos_2]\nList[Pos_2] = tempt\nprint(\"Swapped List:\", List)\n","repo_name":"giang09101999/Data","sub_path":"Python/Data type/List/Exercises/Swap two elements in a list.py","file_name":"Swap two elements in a list.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"258029939","text":"import logging\nimport os\nimport queue\nimport random\nimport threading\nimport time\n\nfrom collections import namedtuple\nfrom os import listdir, walk\nfrom os.path import isfile, join\n\nlogging.basicConfig(level=logging.DEBUG,\n format='(%(threadName)-9s) %(message)s',)\n\nBUF_SIZE = 2\nq = queue.Queue(maxsize=BUF_SIZE)\nBUF_OBJECT_SIZE = 1024*1024*25\n\nBufferObject = namedtuple(\"BufferObject\", \"buffer filepath index\")\n\n\nclass ProducerThread(threading.Thread):\n def __init__(self, src, name=None):\n super(ProducerThread, self).__init__()\n self.name = name\n self.src = src\n\n def run(self):\n logging.debug(\"producer started!\")\n for r, d, f in os.walk(self.src):\n for file in f:\n filepath = os.path.join(r, file)\n with open(filepath, \"rb\", buffering=0) as file:\n current_loc = 0\n file.seek(current_loc)\n while current_loc < os.path.getsize(filepath):\n buffer = BufferObject(\n buffer=file.read(BUF_OBJECT_SIZE),\n filepath=filepath, index=current_loc)\n current_loc += BUF_OBJECT_SIZE\n q.put(buffer)\n\n\nclass ConsumerThread(threading.Thread):\n def __init__(self, src, dest, name=None):\n super(ConsumerThread, self).__init__()\n self.name = name\n self.src = src\n self.dest = dest\n\n def run(self):\n logging.debug(\"consumer started!\")\n\n src_size = self.get_dir_size(self.src)\n bytes_copied = 0\n\n while bytes_copied < src_size:\n while q.qsize() > 0:\n buffer = q.get()\n logging.debug('Got ' + str(buffer.filepath) + ' from q')\n filepath = buffer.filepath.replace(self.src, self.dest)\n dir_name = os.path.dirname(filepath)\n\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n if not os.path.exists(filepath):\n open(filepath, 'w').close()\n\n with open(filepath, \"r+b\", buffering=0) as file:\n logging.debug('writing to:' + str(filepath))\n file.seek(buffer.index)\n file.write(buffer.buffer)\n bytes_copied += len(buffer.buffer)\n time.sleep(random.random())\n\n def get_dir_size(self, start_path):\n total_size = 0\n for dirpath, dirnames, filenames in os.walk(start_path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n total_size += os.path.getsize(fp)\n return total_size\n\n\nif __name__ == \"__main__\":\n start = time.time()\n\n src = \"enter path here\"\n dest = \"enter path here\"\n\n p = ProducerThread(name='producer', src=src)\n c = ConsumerThread(name='consumer', src=src, dest=dest)\n\n p.start()\n time.sleep(1)\n c.start()\n time.sleep(1)\n\n p.join()\n c.join()\n\n end = time.time()\n print(end - start)\n","repo_name":"KaTaiHo/FastCopy","sub_path":"fastcopy.py","file_name":"fastcopy.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"12301633373","text":"soma = 0\r\nlixo = 0\r\n\r\nfor i in range(1, 7):\r\n num = float(input('digite um numero quantas vezes pedir '))\r\n\r\n if num % 2 == 0:\r\n soma = soma + num\r\n else:\r\n lixo = lixo + num\r\n\r\nprint(soma)\r\n","repo_name":"LiR4/ex-python","sub_path":"ex/ex-21.py","file_name":"ex-21.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"}
+{"seq_id":"44709610257","text":"from guitar_class import Guitar\n\nprint(\"My Guitars!\")\n\nguitars = []\nname = input(\"Name:\")\n\nwhile name != \"\":\n year = int(input(\"Year:\"))\n cost = float(input(\"Cost:\"))\n guitar = Guitar(name, year, cost)\n guitars.append(guitar)\n print()\n name = input(\"Name:\")\n\n\nname_length = max(len(guitar.name) for guitar in guitars)\ncost_length = max(len(str(guitar.cost)) for guitar in guitars)\n\nprint()\nprint(\"These are my guitars:\")\nfor i, guitar in enumerate(guitars, 1):\n vintage_string = \"(Vintage)\" if guitar.is_vintage() else \"\"\n print(\"Guitar {}: {:{}} ({}), worth ${:.2f}{}\".format(i, guitar.name, name_length, guitar.year, guitar.cost,\n vintage_string))\n","repo_name":"Dante-Gaius/CP1404_Practicals","sub_path":"cp1404practicals/Prac_06/guitars.py","file_name":"guitars.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"22638216695","text":"import random\r\n#16 A\r\nE16a = 15 * 38\r\nprint(E16a)\r\n\r\n#16 B\r\nE16b = (3 + 4) * (5 + 6)\r\nprint(E16b)\r\n\r\n#16 C\r\nE16C = 7 / 2\r\nprint(int(E16C))\r\n\r\n#16 D\r\nE16D = 48 // 5\r\nprint(E16D)\r\n\r\n#16 E\r\nE16E = (8 + 7 + 4 + 2) / 4\r\nprint(E16E)\r\n\r\n#16 F\r\nE16F = 2 ** 10\r\nprint(int(E16F))\r\n\r\n#16 G\r\nE16G = 49 ** 0.5\r\nprint(E16G)\r\n\r\n#16 H\r\nE16H = 80 * 0.25\r\nprint(int(E16H))\r\n\r\n\r\n\r\n\r\n\r\n#18\r\n#I am 170cm tall, i.e. 5 feet and 7 inches.\r\nl = 179\r\nl2 = l % 30.48\r\nprint(\"I am \", l, \"cm tall, i.e. \", (int(l2)), \"feet,\", \"6\", \"inches.\")\r\n\r\n\r\n\r\n#19\r\n#The length of the sides of the triangle is a, b and c. Write a program \r\n#that calculates the area of the triangle using the Heron formula.\r\n#Read the values of the sides of the triangle from the keyboard. Using the\r\n#program, calculate the area of the triangle for the sides 3, 4 and 5.\r\n\r\na = 3\r\nb = 4\r\nc = 5\r\np = (a + b + c) // 2\r\nS = (p * (p - a) * (p - b) * (p - c)) ** 0.5\r\nprint(int(S))\r\n\r\n'''\r\n#20\r\nh = float(input(\"Enter your heigh: \"))\r\nw = int(input(\"Enter your weight: \"))\r\nBMI = (w / (h ** 2))\r\nprint(\"Your BMI index is \", BMI)\r\n'''\r\n\r\n#21\r\nroll1 = random.randint(1, 6)\r\nroll2 = random.randint(1, 6)\r\nroll3 = random.randint(1, 6)\r\ns1 = roll3 + roll2 + roll1\r\nr = f\"The 1 roll is: {roll1};\\nThe 2 roll is: {roll2};\\nThe 3 roll is: {roll3}.\"\r\ns = f\"The sum of 3 rolls is: {s1}.\"\r\nprint(r)\r\nprint(s)\r\n\r\n\r\n#23\r\n#23% VAT was paid from the amount of PLN 15.84. Calculate and display VAT.\r\n# Apply formatting with decimal places. Sample result:\r\n#Amount : 15.84 zł\r\n#VAT 23% : 3.64 zł\r\na = int(input(\"Enter your full amount: \"))\r\nv = a * 0.23\r\nprint(\"Amount : \", a)\r\nprint(\"VAT 23% : \", v)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"DmytroKorodchenkov/HW_0","sub_path":"HW_0.py","file_name":"HW_0.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"11586787477","text":"from tkinter import *\n#main/root window \nroot = Tk()\n#setting window size & title\nroot.geometry(\"600x600\")\nroot.title(\"Simple B.M.I calculator\")\n#adding background\nC = Canvas(root,bg=\"purple\",height=200,width=200).grid()\nfileName = PhotoImage(file = \"background.png\")\nbackground_label=Label(root , image=fileName)\nbackground_label.place(x=0,y=0,relwidth=1,relheight=1) \n#entry widgets\nw_entry = Entry(root,width=10)\nw_entry.grid(row=4,column=3,padx=10,pady=10,columnspan=2)\nh_entry = Entry(root,width=10)\nh_entry.grid(row=4,column=5,padx=10,pady=10,columnspan=2)\n#function for entry weight and height and calculating the BMI \n#BMI formula = weight(kg) / height^2 (m)\ndef Calculate():\n weight = float(w_entry.get())\n height = float(h_entry.get())\n BMI = round(weight/height**2,2)\n bg_color = \"\"\n result = \"\"\n #checking if : underweight , normal weight , overweight \n if(BMI < 18.5):\n bg_color = \"red\"\n result = \"Underweight\" \n if(BMI >= 18.5 and BMI <=24.9):\n bg_color = \"Green\"\n result = \"Normal Weight\"\n if(BMI >= 25.0):\n bg_color = \"Red\"\n result = \"Overweight\" \n BMI_label = Label(root,text=\"BMI : {0} {1}\".format(BMI,result) , bg=bg_color).grid(row=8,column=8)\n#widgets\nw_label = Label(root,text=\"Weight(in kg)\").grid(row=3,column=3) \nh_label = Label(root,text=\"Height(in m)\").grid(row=3,column=5)\nr_label = Label(root,text=\"BMI\").grid(row=6,column=3)\ncalc_btn = Button(root,text=\"Calculate\",command=Calculate).grid(row=8,column=3,padx=10,pady=10)\nroot.mainloop() ","repo_name":"CarlosArro2001/BMI_Calculator_Tkinter","sub_path":"BMI_app.py","file_name":"BMI_app.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"43280362017","text":"\"\"\" Alpha-Beta-Gamma Filter Implementation\n\nBased on the examples:\nhttps://www.kalmanfilter.net/alphabeta.html\n\"\"\"\n\nimport math\nimport numpy as np\n\n\nclass AlphaBetaGammaFilter():\n def __init__(\n self, \n alpha:float = 1.0,\n beta:float = 1.0,\n gamma:float = 1.0,\n delta_t:float = 1.0\n ) -> None:\n \"\"\" Alpha-Beta-Gamma Filter class for tracking\n \n Input:\n alpha: (float) multiplier for position prediction equation (0.0 <= alpha <= 1.0)\n beta: (float) multiplier for velocity prediction equation (0.0 <= beta <= 1.0)\n gamma: (float) multiplier for acceleration prediction equation (0.0 <= gamma <= 1.0)\n delta_t: (float) timing interval\n \n Return: None\n \"\"\"\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n \n self.delta_t = delta_t\n self.timer = 0\n \n # State update equation variables\n # This is for estimating the current position, velocity, and acceleration\n # considering previous estimates and the current measurement\n self.measured_pos = 0.0 # measured position\n self.current_x = 0.0 # initial (current) position\n self.current_v = 0.0 # initial (current) velocity\n self.current_a = 0.0 # initial (current) acceleration\n \n # State extrapolation variables\n # This is for predicting position, velocity, and acceleration for next time frame\n # We use the \"current\" variables to predict the below attributes\n self.pred_x = 0.0\n self.pred_v = 0.0\n self.pred_a = 0.0\n \n\n def initialize(\n self, \n x:float = None, \n v:float = None, \n a:float = None\n ) -> None:\n \"\"\" Initialize initial conditions (position, velocity, and acceleration)\"\"\"\n self.current_x = x\n self.current_v = v\n self.current_a = a\n \n # With initial conditions set, predict for an initial guess\n self.pred_x = self.current_x + self.current_v * self.delta_t + (0.5 * self.current_a * self.delta_t**2)\n self.pred_v = self.current_v + self.current_a * self.delta_t\n self.pred_a = self.current_a\n \n print('Position (t={}): {}'.format(self.timer,self.pred_x))\n print('Velocity (t={}): {}'.format(self.timer,self.pred_v))\n print('Acceleration (t={}): {}'.format(self.timer,self.pred_a))\n \n\n def update(\n self,\n z:float = None\n ) -> None:\n \"\"\" Compute:\n i) current estimate with state update equations\n ii) next state estimate (prediction)\n \"\"\"\n # Update timer first...\n self.timer += 1\n self.current_x = self.pred_x\n self.current_v = self.pred_v\n self.current_a = self.pred_a\n \n # Compute current estimate\n x = self.current_x\n diff = z - x\n self.current_x = x + self.alpha * diff\n self.current_v = self.current_v + self.beta * (diff / self.delta_t)\n self.current_a = self.current_a + self.gamma * (diff / (0.5 * self.delta_t**2))\n \n print('Position Est. (t={}): {}'.format(self.timer,self.current_x))\n print('Velocity Est. (t={}): {}'.format(self.timer,self.current_v))\n print('Acceleration Est. (t={}): {}'.format(self.timer,self.current_a))\n \n \n # Compute prediction for current time\n self.pred_x = self.current_x + self.current_v * self.delta_t + (0.5 * self.current_a * self.delta_t**2)\n self.pred_v = self.current_v + self.current_a * self.delta_t\n self.pred_a = self.current_a\n \n print('Position (t={}): {}'.format(self.timer,self.pred_x))\n print('Velocity (t={}): {}'.format(self.timer,self.pred_v))\n print('Acceleration (t={}): {}'.format(self.timer,self.pred_a))\n \n\n\nif __name__ == \"__main__\":\n abg_filter = AlphaBetaGammaFilter(\n alpha=0.5, \n beta=0.4, \n gamma=0.1, \n delta_t=5\n )\n \n abg_filter.initialize(\n x=30000,\n v=50,\n a=0\n )\n \n abg_filter.update(z=30160)\n abg_filter.update(z=30365)\n \n","repo_name":"ManuelSerna/cv-notes","sub_path":"estimation/filters/AlphaBetaGammaFilter.py","file_name":"AlphaBetaGammaFilter.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"23276096446","text":"#Runs an injection recovery for a given light curve. Each iteration injects a different random period and planet radius, performs a periodogram analysis to try and find the period, \n#and outputs a 0 or 1 depending on whether the correct period was recovered or not. It is output into a pandas table column as ['recover?']. Plotting script is also included.\n#####normalized_corrected: a processed light curve that will be used for the injection recovery\n#####stellar_radius: the radius of the star in solar radii\n#####trials: number of injected planets to try for the injected recovery. Recommend more than 1000 since the statistics are a bit iffy when a lower value is selected\n\nimport numpy as np\nimport pandas as pd\nimport lightkurve as lk\nimport astropy.units as u\nimport batman\nfrom astropy.timeseries import BoxLeastSquares\n\n\ndef vary_params(normalized_corrected, stellar_radius=1.325, trials=1000):\n output_table = pd.DataFrame()\n Rplanet = []\n Pinject = []\n Pdetermine = []\n recover = []\n \n #print(output_table['depth'])\n rad_min = 0.02115/stellar_radius #now in R_hoststar\n rad_max = 0.2/stellar_radius #now in R_hoststar\n \n depths = np.random.uniform(rad_min, rad_max, trials) # random transit depths to inject\n midtimes = np.random.uniform(min(normalized_corrected.time.value), max(normalized_corrected.time.value), trials) # mid-transit times to inject if you want\n periods = np.random.uniform(0.3,18,trials) # periods to inject\n\n for depth, midtime, period in zip(depths, midtimes, periods):\n params = batman.TransitParams() #object to store transit parameters\n params.t0 = midtime #time of inferior conjunction\n params.per = period #orbital period\n params.rp = depth #planet radius (in units of stellar radii)\n semimaj = ((((7.496*(10**(-6)))*(period**2))**(1/3))*215.032)/stellar_radius #calc a based on period, and in terms of host star radius\n params.a = semimaj #semi-major axis (in units of stellar radii)\n params.inc = 89. #orbital inclination (in degrees)\n params.ecc = 0. #eccentricity\n params.w = 90. \n params.u = [0.1, 0.3] #limb darkening coefficients [u1, u2]\n params.limb_dark = \"quadratic\" #limb darkening model\n\n # Define the times at which to evaluate the fake transit\n t=normalized_corrected.time.value\n\n # Create the batman transit model\n m = batman.TransitModel(params, t)\n\n # Generate the fake light curve transit\n injected_model = m.light_curve(params)\n\n\n # Inject the fake transit into the real data\n injected_flux = normalized_corrected.flux.value + injected_model - 1.0\n\n\n lc_injected=normalized_corrected.copy()\n lc_injected.flux = injected_flux\n #fig,axs=plt.subplots(3,1,figsize=(10,10))\n planetrad = depth*stellar_radius * 9.73116 #convert the solar radii to jupiter radii\n #lc_injected.scatter(ax=axs[0],s=25,color='r',label='injected transit signal')\n #normalized_corrected.scatter(ax=axs[0],s=25)\n\n period_grid = np.linspace(0.4, 18, 1000)\n bls = lc_injected.to_periodogram(method='bls', period=period_grid, frequency_factor=500);\n #bls.plot(ax=axs[1],label=f'best p = {bls.period_at_max_power:.2f}');\n planet_b_period = bls.period_at_max_power\n planet_b_t0 = bls.transit_time_at_max_power\n planet_b_dur = bls.duration_at_max_power\n #lc_injected.fold(period=planet_b_period, epoch_time=planet_b_t0).scatter(ax=axs[2],label='')\n\n blsorig = normalized_corrected.to_periodogram(method='bls', period=period_grid, frequency_factor=500);\n origplanet_b_period = blsorig.period_at_max_power\n detectorig = abs((origplanet_b_period.value/period)-round(origplanet_b_period.value/period))\n\n detect = abs((planet_b_period.value/period)-round(planet_b_period.value/period))\n\n #axs[0].set_title(f'Rplanet(Rjup Radii) = {planetrad:.4f}, Midtime = {midtime:.2f}, Period = {period:.2f},operiod = {origplanet_b_period:.2f}')\n\n\n #print(f'detectval={detect}')\n accept_thresh=0.05\n if detectorig < accept_thresh:\n found = 0 #\"False\"\n\n elif detect < accept_thresh:\n found = 1 #\"True\"\n else:\n found = 0 #\"False\"\n\n #print(f'depth: {planetrad:.4f} actual:{period:.4f} calc:{planet_b_period:.4f} Recover?:{found}')\n Rplanet.append(planetrad)\n Pinject.append(period)\n Pdetermine.append(planet_b_period.value)\n recover.append(found)\n\n output_table['Rplanet'] = Rplanet\n output_table['Pinject'] = Pinject\n output_table['Pdetermine'] = Pdetermine\n output_table['recover?'] = recover\n \n return output_table\n\n\n\n\noutput_table = output_table\n\nradlist1=np.linspace(0.2,1.9,18)\nradlist2=np.linspace(0.3,2,18)\n\ndetectionmatrix = np.empty((1,18))\n\nfor r1,r2 in zip(radlist1,radlist2):\n p1 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(0, 1))]\n recoverpercentp1 = (p1['recover?'].sum())/len(p1.index)\n #print(recoverpercentp1)\n\n p2 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(1, 2))]\n recoverpercentp2 = (p2['recover?'].sum())/len(p2.index)\n #print(recoverpercentp2)\n\n p3 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(2, 3))]\n recoverpercentp3 = (p3['recover?'].sum())/len(p3.index)\n #print(recoverpercentp3)\n\n p4 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(3, 4))]\n recoverpercentp4 = (p4['recover?'].sum())/len(p4.index)\n #print(recoverpercentp4)\n\n p5 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(4, 5))]\n recoverpercentp5 = (p5['recover?'].sum())/len(p5.index)\n #print(recoverpercentp5)\n\n p6 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(5, 6))]\n recoverpercentp6 = (p6['recover?'].sum())/len(p6.index)\n #print(recoverpercentp6)\n\n p7 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(6, 7))]\n recoverpercentp7 = (p7['recover?'].sum())/len(p7.index)\n #print(recoverpercentp7)\n\n p8 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(7, 8))]\n recoverpercentp8 = (p8['recover?'].sum())/len(p8.index)\n #print(recoverpercentp8)\n\n p9 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(8, 9))]\n recoverpercentp9 = (p9['recover?'].sum())/len(p9.index)\n #print(recoverpercentp9)\n #print(' ')\n\n \n p10 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(9, 10))]\n recoverpercentp10 = (p10['recover?'].sum())/len(p10.index)\n #print(recoverpercentp1)\n\n p11 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(10, 11))]\n recoverpercentp11 = (p11['recover?'].sum())/len(p11.index)\n #print(recoverpercentp2)\n\n p12 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(11, 12))]\n recoverpercentp12 = (p12['recover?'].sum())/len(p12.index)\n #print(recoverpercentp3)\n\n p13 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(12, 13))]\n recoverpercentp13 = (p13['recover?'].sum())/len(p13.index)\n #print(recoverpercentp4)\n\n p14 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(13, 14))]\n recoverpercentp14 = (p14['recover?'].sum())/len(p14.index)\n #print(recoverpercentp5)\n\n p15 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(14, 15))]\n recoverpercentp15 = (p15['recover?'].sum())/len(p15.index)\n #print(recoverpercentp6)\n\n p16 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(15, 16))]\n recoverpercentp16 = (p16['recover?'].sum())/len(p16.index)\n #print(recoverpercentp7)\n\n p17 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(16, 17))]\n recoverpercentp17 = (p17['recover?'].sum())/len(p17.index)\n #print(recoverpercentp8)\n\n p18 = output_table[(output_table['Rplanet'].between(r1, r2)) & (output_table['Pinject'].between(17, 18))]\n recoverpercentp18 = (p18['recover?'].sum())/len(p18.index) \n \n matrixgrid = np.array([[recoverpercentp1,recoverpercentp2,recoverpercentp3,recoverpercentp4,recoverpercentp5,\n recoverpercentp6,recoverpercentp7,recoverpercentp8,recoverpercentp9,recoverpercentp10,\n recoverpercentp11,recoverpercentp12,recoverpercentp13,recoverpercentp14,\n recoverpercentp15,recoverpercentp16,recoverpercentp17,recoverpercentp18]])\n detectionmatrix = np.concatenate((detectionmatrix, matrixgrid), axis=0)\n\ndetectionmatrix = np.delete(detectionmatrix, obj=0, axis=0)\n\n\n\nfig, ax = plt.subplots(figsize=(10, 10))\n# Using matshow here just because it sets the ticks up nicely. imshow is faster.\nax.matshow(detectionmatrix, cmap='plasma')\n\nfor (i, j), z in np.ndenumerate(detectionmatrix):\n ax.text(j, i, '{:0.3f}'.format(z), ha='center', va='center')\n ax.margins(x=0)\n\nax.set_xticks([-0.5,0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5,11.5,12.5,13.5,14.5,15.5,16.5,17.5])\nax.set_xticklabels([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18])\n\nax.set_yticks([-0.5,0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5,9.5,10.5,11.5,12.5,13.5,14.5,15.5,16.5,17.5])\nax.set_yticklabels([0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2])\n\nplt.gca().invert_yaxis()\n\nplt.tick_params(labelbottom=True, labeltop=False)\nplt.title(f'INSERT TITLE HERE')\nplt.xlabel('Period [days]')\nplt.ylabel('R_planet [R_Jup]')\n\n#plt.xscale('log')\n\n#ax.scatter(output_table['Pinject'],output_table['Rplanet'])\n\nplt.show() \n","repo_name":"billy210/research","sub_path":"inject-recover.py","file_name":"inject-recover.py","file_ext":"py","file_size_in_byte":10220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"15187067387","text":"import os\nimport sys\nimport time\nimport music_reports\nimport print_ascii_art\nfrom prettytable import PrettyTable\n\n\ndef display_menu():\n os.system(\"clear\")\n print_ascii_art.print_starting_art()\n loop = True\n while loop:\n albums_list = []\n albums_list = music_reports.import_library(albums_list)\n choice = input(\"\"\"\n1: View all imported albums\n2: Find albums by genre\n3: Find albums from given time range\n4: Find shortest/longest album\n5: Find albums created by given artist\n6: Find album by name\n7: Generate full report\n8: Add new album\n9: Delete album\n10: Exit\n What you want to do? \"\"\")\n\n if choice == \"1\":\n os.system(\"clear\")\n music_reports.drawing_table(albums_list)\n\n elif choice == \"2\":\n os.system(\"clear\")\n music_reports.find_by_genre(albums_list)\n\n elif choice == \"3\":\n loop2 = True\n while loop2:\n os.system(\"clear\")\n search_by_year_option = input(\"\"\"\n1: Search for albums made in given year\n2: Search for albums made between years (yyyy-yyyy)\n What you want to do? \"\"\")\n\n if search_by_year_option == \"1\":\n os.system(\"clear\")\n music_reports.find_albums_made_in_given_year(albums_list)\n loop2 = False\n\n elif search_by_year_option == \"2\":\n os.system(\"clear\")\n music_reports.find_albums_made_between_years(albums_list)\n loop2 = False\n\n else:\n os.system(\"clear\")\n print(\"Next time please enter '1' or '2'\")\n time.sleep(3)\n os.system(\"clear\")\n continue\n\n elif choice == \"4\":\n os.system(\"clear\")\n loop3 = True\n while loop3:\n search_shortest_longest_album = input(\"\"\"\n1: Search for longest album\n2: Search for shortest album\n What you want to do? \"\"\")\n if search_shortest_longest_album == \"1\":\n os.system(\"clear\")\n music_reports.find_longest_album(albums_list)\n loop3 = False\n\n elif search_shortest_longest_album == \"2\":\n os.system(\"clear\")\n loop3 = False\n music_reports.find_shortest_album(albums_list)\n\n else:\n os.system(\"clear\")\n print(\"Next time please enter '1' or '2'\")\n time.sleep(3)\n os.system(\"clear\")\n continue\n\n elif choice == \"5\":\n os.system(\"clear\")\n music_reports.find_by_artist(albums_list)\n\n elif choice == \"6\":\n os.system(\"clear\")\n music_reports.find_by_name(albums_list)\n\n elif choice == \"7\":\n os.system(\"clear\")\n music_reports.find_longest_album(albums_list)\n music_reports.find_shortest_album(albums_list)\n music_reports.oldest_or_youngest_album(albums_list, \"youngest\")\n music_reports.oldest_or_youngest_album(albums_list, \"oldest\")\n music_reports.amount_of_albums(albums_list)\n music_reports.albums_by_genres(albums_list)\n\n elif choice == \"8\":\n os.system(\"clear\")\n music_reports.add_album()\n\n elif choice == \"9\":\n os.system(\"clear\")\n music_reports.drawing_table(albums_list)\n music_reports.delete_album(albums_list)\n\n elif choice == \"10\":\n os.system(\"clear\")\n print_ascii_art.print_ending_art()\n loop = False\n\n else:\n os.system(\"clear\")\n print(\"Please enter numbers from 1 to 10: \")\n\n\ndef main():\n os.system(\"clear\")\n display_menu()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"MichalPula/MusicLibrary","sub_path":"display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":3903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"7953565873","text":"def diff21(n):\r\n if (n > 21): \r\n return (2 * (abs(n - 21)))\r\n else: \r\n return (abs(n - 21))\r\n\r\ndef fix_teen(n):\r\n if (n >= 13 and n <= 19) and (n != 15 and n != 16):\r\n return 0\r\n else:\r\n return n\r\n\r\ndef no_teen_sum(a, b, c):\r\n return fix_teen(a) + fix_teen(b) + fix_teen(c)\r\n \r\ndef sorta_sum(a, b):\r\n if (a + b >= 10 and a + b <= 19):\r\n return 20\r\n else: return a + b\r\n\r\ndef lone_sum(a, b, c):\r\n if a == b and b == c:\r\n return 0\r\n elif a == b:\r\n return c\r\n elif a == c:\r\n return b\r\n elif b == c:\r\n return a\r\n else: return a + b + c\r\n \r\ndef lucky_sum(a, b, c):\r\n if a == 13:\r\n return 0\r\n elif b == 13:\r\n return a\r\n elif c == 13:\r\n return a + b\r\n else: return a + b + c\r\n \r\ndef round10(num):\r\n if (num % 10 >= 5):\r\n return ((num / 10) + 1) * 10\r\n else: return ((num / 10) * 10)\r\n\r\ndef round_sum(a, b, c):\r\n return round10(a) + round10(b) + round10(c)\r\n\r\ndef make_bricks(small, big, goal):\r\n if big == 0:\r\n return goal <= small\r\n elif small == 0 and big != 0:\r\n return goal % 5 == 0\r\n elif (small) + (big * 5) < goal:\r\n return False\r\n elif (goal % 5) <= small:\r\n return True\r\n else:\r\n return False\r\n\r\ndef close_far(a, b, c):\r\n if ((abs(b - a)) <= 1 and ((abs(c - a)) >= 2 and (abs(c - b))) >= 2):\r\n return True\r\n elif (abs(c - a)) <= 1 and (abs(b - a)) >= 2 and (abs(b - c)) >= 2:\r\n return True\r\n else: return False\r\n \r\ndef magicPair(a,b):\r\n if b < 10:\r\n if (a / 10) == b:\r\n if (a / 10) + b == (a % 10):\r\n return True\r\n if (a % 10) == b:\r\n if (a % 10) + b == (a / 10):\r\n return True\r\n else:\r\n if (a / 10) == (b / 10):\r\n if (a / 10) + (b / 10) == (a % 10) + (b % 10):\r\n return True\r\n if (a % 10) == (b % 10):\r\n if (a % 10) + (b % 10) == (a / 10) + (b / 10):\r\n return True\r\n if (a / 10) == (b % 10):\r\n if (a / 10) + (b % 10) == (a % 10) + (b / 10):\r\n return True\r\n else: return False\r\n if (a % 10) == (b / 10):\r\n if (a % 10) + (b / 10) == (a / 10) + (b % 10):\r\n return True\r\n if (a / 100) == (b / 10):\r\n if (a / 100) + (b / 10) == ((a / 10) % 10) + ((a % 100) % 10):\r\n return True\r\n if ((a % 100) % 10) == (b / 10):\r\n if ((a % 100) % 10) + (b / 10) == ((a / 10) % 10) + (b % 10):\r\n return True\r\n else: return False\r\n else: return False","repo_name":"rickey-dong/introcs","sub_path":"intro1/conditionals.py","file_name":"conditionals.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"25058221450","text":"from __future__ import print_function\nfrom __future__ import division\nimport logging, os\nimport numpy as np\nfrom utils.optparse import Arguments as arguments\nfrom canon60 import tfidf\nfrom data import process\nfrom sklearn.metrics import confusion_matrix, f1_score, accuracy_score, precision_score, recall_score\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\ndef prepare():\n \"\"\"\n Logging and arguments\n :return:\n \"\"\"\n\n # Logger\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n # --- keep this logger at DEBUG level, until aguments are processed\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter(\n \"%(asctime)s - %(module)s - %(levelname)s - %(message)s\"\n )\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n # --- Get Input Arguments\n in_args = arguments(logger)\n opts = in_args.parse()\n\n fh = logging.FileHandler(opts.log_file, mode=\"a\")\n fh.setLevel(logging.INFO)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n # --- restore ch logger to INFO\n ch.setLevel(logging.INFO)\n\n return logger, opts\n\n\n\ndef evaluate(y_test, pred):\n acc = accuracy_score(y_test, pred)\n f1_micro = f1_score(y_test, pred, average='micro')\n p_micro = precision_score(y_test, pred, average='micro')\n r_micro = recall_score(y_test, pred, average='micro')\n\n f1_macro = f1_score(y_test, pred, average='macro')\n p_macro = precision_score(y_test, pred, average='macro')\n r_macro = recall_score(y_test, pred, average='macro')\n\n return acc, f1_macro, p_macro, r_macro\n\n\ndef main():\n \"\"\"\n Starts all\n :return:\n \"\"\"\n logger, opts = prepare()\n logger.info(\"---- CANON60 ----\")\n train_path = opts.i + \"/train\"\n test_path = opts.i + \"/test\"\n dt_train = process.canon60Dataset(train_path, join_all=True)\n dt_test = process.canon60Dataset(test_path, join_all=True)\n\n x_train = dt_train.X\n y_train = dt_train.y\n fnames_train = dt_train.fnames\n\n x_test = dt_test.X\n y_test = dt_test.y\n fnames_test = dt_test.fnames\n vocab = process.read_vocab_list(opts.vocab_path)\n logger.info(\"Results\")\n modelos = [\"FF\", ]\n representation = [\"tfidf\", ]\n max_features = [500,1000,2000,5000,10000,15000, 20000,len(vocab)]\n # max_features = [500,10000]\n min_ngrams = [1]\n max_ngram = [2,3,4,5,6,7,8,9]\n # max_ngram = [3,4]\n\n\n for model_type in modelos:\n for repren in representation:\n model_type_name = model_type+\"_\"+repren\n logger.info(\"Clasificador: {}\".format(model_type_name))\n file = open(\"{}/{}\".format(opts.work_dir, model_type_name), \"w\")\n file.write(\"classifier,max_features,min_ngram,max_ngram,accuracy,f1_macro,precision_macro,recall_macro\\n\")\n opts.model = model_type\n for max_feat in max_features:\n\n for min_ngram in min_ngrams:\n for up in max_ngram:\n rep = TfidfVectorizer(ngram_range=(min_ngram, up), max_features=max_feat, vocabulary=vocab[:max_feat])\n texts_rep_train = rep.fit_transform(x_train)\n texts_rep_train = texts_rep_train.toarray()\n text_test_rep = rep.transform(x_test)\n text_test_rep = text_test_rep.toarray()\n logger.info(texts_rep_train.shape)\n logger.info(text_test_rep.shape)\n\n model = tfidf.Model(texts_rep_train, y_train, text_test_rep, y_test,fnames_train,fnames_test,\n layers=opts.layers,\n logger=logger, opts=opts)\n\n pred, y_true = model.get_results()\n # pred, y_true = results[0], results[2]\n\n acc, f1_macro, p_macro, r_macro = evaluate(y_true, pred)\n # acc, f1_macro, p_macro, r_macro = random.random(), random.random(), random.random(), random.random()\n res = \"{},{},{},{},{},{},{},{}\\n\".format(model_type, max_feat, min_ngram, up, acc, f1_macro, p_macro, r_macro)\n\n logger.info(res)\n file.write(res)\n file.flush()\n file.close()\n\n logger.info(\"---- FIN ----\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"JoseRPrietoF/autoria","sub_path":"canon60_results_NN.py","file_name":"canon60_results_NN.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"}
+{"seq_id":"34181377967","text":"from os import path, getcwd\nfrom pathlib import Path\nfrom shutil import copyfile, copytree\nfrom typing import Optional\n\nfrom modelgen.helper import Helper\nfrom modelgen import (__file__, constants, Validate, Parser, \n Template, alchemygen, metagen, walk)\n\nclass ModelGenerator(Helper):\n\n def __init__(self, init: Optional[str]=False, createmodel: bool=False, \n file: str=None, alembic: bool=False,**kwargs):\n '''\n This class is initialized by taking in the argument values \n passed from the cli. \n\n Args: \n init (Optional[str] or False): init is set to true if \n --init is called from the cli and a folder name is\n passed. The folder name has to be new, if an existing\n folder name is passed, an exception will be raised\n asking to pass a new folder name/path. \n If init is not called from cli, it is set to False \n by default.\n\n createmodel (bool): createmodel is set to true if \n --createmodel is called from the command line.\n --createmodel also needs another argument\n -f or --file which points to the path of the \n schema yaml file. This file will be used to create\n sqlalchemy model code in python\n\n file (str): filepath of the yaml schema template file.\n \n alembic (bool): if set, alembic support will be set\n to true. A folder named metadata will be created\n with an __init__.py. This py file will have the\n sqlalchemy metadata imported from the file \n generated by the createmodel command.\n '''\n Helper.__init__(self)\n self.create_structure(init=init)\n self.create_models(createmodel=createmodel, file=file, alembic=alembic) \n\n def _create_template_folder(self, **kwargs) -> bool:\n '''\n Create a folder called `templates`. This folder contains an\n example schema template file required to get started.\n\n Returns bool, True if creation is successful, False otherwise. \n '''\n try:\n init = kwargs.get('init')\n templates_src_path = path.join('/',*(__file__.split('/')[:-1]),'templates')\n templates_dst_path = path.join(init, 'templates')\n if path.exists(templates_dst_path):\n raise FileExistsError\n self.logger.info(f'Creating templates folder at {templates_dst_path}')\n Path(templates_dst_path).mkdir(parents=True, exist_ok=False)\n self.logger.debug('Templates folder creation successful')\n self.logger.info(f'Creating an example yaml schema file at {templates_dst_path}/example.yaml')\n copyfile((path.join(templates_src_path, 'example.yaml')), \n path.join(templates_dst_path, 'example.yaml'))\n return True\n except FileExistsError as e:\n self.logger.exception('Error occurred while creating templates folder')\n self.logger.exception(e)\n raise FileExistsError(\"Folder exists. Please specify a new folder name\") from FileExistsError \n\n def _create_alembic_folder(self, **kwargs):\n '''\n This function is responsible for creating alembic's \n folder structure. The folder created is named `alembic`.\n This folder contains files __init__.py, evn.py, README,\n script.py.mako and a folder named `versions`. This folder\n stores version files for every table level change made.\n\n Returns bool, True if folder creation is successful,\n False otherwise.\n '''\n try:\n init = kwargs.get('init')\n if path.isabs(init):\n dst_path = path.join(init)\n else:\n dst_path = path.join(getcwd(), init)\n if path.exists(dst_path):\n raise FileExistsError\n alembic_path = path.join('/',*(__file__.split('/')[:-1]),'alembic_migrate')\n self.logger.info(f'Creating alembic folder at {dst_path}')\n ini_src_path = path.join('/',*(__file__.split('/')[:-1]),'alembic.ini')\n copytree(alembic_path, path.join(dst_path, 'alembic_migrate'))\n # Path(path.join(self.dst_path, 'alembic','versions')).mkdir(parents=True, exist_ok=False)\n copyfile(ini_src_path, path.join(dst_path, 'alembic.ini'))\n return True\n except FileExistsError as e:\n self.logger.exception('Error occurred while creating alembic folder')\n self.logger.exception(e)\n raise FileExistsError(\"Folder exists. Please specify a new folder name\") from FileExistsError \n\n def _create_checkpoint_file(self, **kwargs) -> bool:\n '''\n Create a checkpoint file in the folder name/path\n passed while initializing modelgen. The file created\n is named `.modelgen`. This file let's the program know\n that modelgen has been initialized in the directory\n\n Returns bool, True if successful, False otherwise.\n '''\n init = kwargs.get('init')\n self.write_to_file(path=path.join(init, '.modelgen'), data='')\n return True\n\n def _find_checkpoint_file(self) -> bool:\n '''\n Check if the checkpoint file `.modelgen` exists in\n the directory or not. This function is run before \n creating the sqlalchemy python code.\n\n Returns bool, True if file exists, False if file\n doesn't exist.\n '''\n chkpnt_filepath = path.join(getcwd(), '.modelgen')\n if not path.exists(chkpnt_filepath):\n err_str = 'Either modelgen is not initialized, or you are in the wrong folder\\n'\n err_str += 'Please initialize modelgen (modelgen --source yaml --init ./YOUR_FOLDER_NAME)'\n err_str += ' or execute commands from /path/YOUR_FOLDER_NAME'\n raise FileNotFoundError(err_str)\n else:\n return True\n\n def _create_model(self, datasource: str, alembic: bool=False, \n filepath: str=None) -> bool:\n '''\n Create sqlalchemy code, based on the schema\n defined in the yaml schema template file. The code files\n are created in a folder called `models` and the files\n are created by the datasource name. Example: if the datasource \n name is inventory, the model file will be \n `models/inventory.py`.\n\n Args:\n datasource (str): name of the datasource.\n This is defined by the name of the\n schema template yaml file. \n for example, if the schema file is named \n inventory.yaml, the datasource name will be \n inventory\n\n alembic (bool, default: False): If set to True,\n python code to support alembic migrations \n will also be created.\n\n filepath (str, default: None): filepath of the\n schema template yaml file. If nothing is passed,\n a path will be constructed using current directory\n and the datasource name. This consturcted path\n will be current_working_dir/templates/datasource.yaml\n\n Returns:\n (bool): True, if sqlalchemy model code generation is successful\n False, if sqlalchemy model code generation fails\n '''\n if not filepath:\n filepath = path.join(constants.templates_folder, f\"{datasource}.yaml\")\n Validate(filepath=filepath).validate()\n parser = Parser(filepath=filepath)\n src_template = Template(alchemygen)\n py_code = src_template.render(datasource=datasource,yaml_data=parser.data, cst=constants, bool=bool)\n Path(constants.models_folder).mkdir(parents=True, exist_ok=True)\n py_filepath = path.join(constants.models_folder, f'{datasource}.py')\n self.write_to_file(path=py_filepath, data=py_code)\n if alembic:\n self._create_alembic_meta()\n return True\n\n def _create_alembic_meta(self) -> bool:\n '''\n Creates code required to support alembic migrations.\n The code is created in a folder `metadata`. A file\n named __init__.py is created in the `metadata` folder\n which imports the sqlalchemy metadata from all the models\n sitting in the `models` folder.\n\n Returns bool, True if code creation is successful,\n False if code creation fails.\n '''\n alembic_template = Template(metagen)\n _, _, filenames = next(walk(constants.models_folder))\n alembic_meta = alembic_template.render(filenames=filenames, cst=constants,\n splitext=path.splitext)\n Path(constants.alembic_meta_folder).mkdir(parents=True, exist_ok=True)\n alembic_meta_filepath = path.join(constants.alembic_meta_folder, '__init__.py')\n self.write_to_file(path=alembic_meta_filepath, data=alembic_meta)\n return True\n\n def create_structure(self, init: bool=False) -> bool:\n if bool(init):\n self._create_alembic_folder(init=init)\n self._create_template_folder(init=init)\n self._create_checkpoint_file(init=init)\n return True\n return None\n\n def create_models(self, createmodel: bool=False, file: str=None, alembic: bool=False) -> bool:\n if bool(createmodel) and bool(file):\n if file.endswith('.yaml'):\n datasource = file.split('.yaml')[0].split('/')[-1]\n elif file.endswith('yml'):\n datasource = file.split('.yml')[0].split('/')[-1]\n else:\n raise NameError('Please specify a .yaml or .yml file')\n self._find_checkpoint_file()\n self.logger.info(f\"Creating models at {file}\")\n self._create_model(datasource=datasource, alembic=alembic)\n return True\n return None","repo_name":"shrinivdeshmukh/sqlalchemy-modelgen","sub_path":"modelgen/modelgenerator.py","file_name":"modelgenerator.py","file_ext":"py","file_size_in_byte":10052,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"94"}
+{"seq_id":"23880907983","text":"from Project.____Domain.Entities.Book import Book\nfrom Project.____Domain.Validators.Validator import Validator\nfrom Project._____Utils.Exceptions import bookNotInMemError, duplicatedBookError, noBooksInMemError, shouldNotBeRaisedError\nfrom Project.___Repository.BookRepo import BookRepository\nclass BookService:\n def __init__(self, bookRepo, validator):\n \"\"\"\n initializeaza un nou service pentru carti care are acces la repositoriul de carti si la un validator\n :param bookRepo: repositoriul cu carti\n :param validator: validatorul de obiecte\n \"\"\"\n self.__bookRepo = bookRepo\n self.__validator = validator\n\n def adauga_carte(self, title, author, description, k_copies=1):\n \"\"\"\n adauga o carte nou in memorie\n :param title: str: titlu\n :param author: str: autor\n :param description: str: descriere\n :param k_copies: int: nr exemplare\n :return: none\n :raises: duplicatedBookError daca cartea e deja in memorie\n \"\"\"\n new_book = Book(title, author, description, k_copies)\n self.__validator.validateBook(new_book)\n\n book_in_memory = self.__bookRepo.find(new_book)\n if book_in_memory is not None:\n raise duplicatedBookError\n\n self.__bookRepo.append(new_book)\n def sterge_carte(self, title, author):\n \"\"\"\n sterge o carte din memorie\n :param title: str: titlu\n :param author: str: autor\n :return: none\n :raises: bookNotInMemError daca cartea nu e in memorie\n \"\"\"\n new_book = Book(title, author)\n self.__validator.validateBook(new_book)\n\n book_in_memory = self.__bookRepo.find(new_book)\n if book_in_memory is None:\n raise bookNotInMemError\n\n self.__bookRepo.remove(book_in_memory)\n def modifica_carte(self, title, author, newTitle=None, newAuthor=None, new_k_copies=None, new_desc=None):\n \"\"\"\n modifica un camp dintr-o carte din memorie\n :param title: str: titlu\n :param author: str: autor\n :param newTitle: str: titlu nou\n :param newAuthor: str: autor nou\n :param new_k_copies: int: nr exemplare nou\n :param new_desc: str: descriere noua\n :return: none\n :raises: bookNotInMemError daca cartea nu e in memorie\n \"\"\"\n new_book = Book(title, author)\n self.__validator.validateBook(new_book)\n\n book_in_memory = self.__bookRepo.find(new_book)\n if book_in_memory is None:\n raise bookNotInMemError\n\n self.__bookRepo.remove(book_in_memory)\n if newTitle is not None:\n try_book = Book(newTitle, author)\n self.__validator.validateBook(try_book)\n if self.__bookRepo.find(try_book) is not None:\n raise duplicatedBookError\n\n book_in_memory.setTitle(newTitle)\n elif newAuthor is not None:\n try_book = Book(title, newAuthor)\n self.__validator.validateBook(try_book)\n if self.__bookRepo.find(try_book) is not None:\n raise duplicatedBookError\n\n book_in_memory.setAuthor(newAuthor)\n elif new_k_copies is not None:\n book_in_memory.setTotalCopies(new_k_copies)\n elif new_desc is not None:\n book_in_memory.setDescription(new_desc)\n else:\n raise shouldNotBeRaisedError\n self.__bookRepo.append(book_in_memory)\n\n def cauta_exemplare_carte(self, title, author):\n \"\"\"\n returneaza numarul de exemplare dintr-o carte\n :param title: str: titlu\n :param author: str: autor\n :return: int: nr exemplare\n :raises: bookNotInMemError daca cartea nu exista in memorie\n \"\"\"\n new_book = Book(title, author)\n self.__validator.validateBook(new_book)\n\n book_in_memory = self.__bookRepo.find(new_book)\n if book_in_memory is None:\n raise bookNotInMemError\n\n k_copies = book_in_memory.getTotalCopies()\n return k_copies\n # def raport_carti_inchiriate(self):\n # \"\"\"\n # creeaza un raport cu cartile sortate dupa nr de inchirieri\n # :return: str: un text cu toate cartile sortate\n # \"\"\"\n # if len(self.__bookRepo.getAll()) == 0:\n # raise noBooksInMemError\n #\n # self.__bookRepo.sort_by(byBorrows=1, reverse=True)\n # raport = self.__bookRepo.get_report()\n # return raport\n\n\n# def test_adauga_carte():\n# validator = Validator()\n# bookRepository = BookRepository()\n# bookService = BookService(bookRepository, validator)\n# bookService.adauga_carte(\"Ana\", \"Maria\", \"pisici\", 10)\n#\n# assert len(bookRepository.getAll()) == 1\n#\n# try:\n# bookService.adauga_carte(\"Ana\", \"Maria\", \"pisici\", 10)\n# assert False\n# except duplicatedBookError:\n# assert True\n# def test_sterge_carte():\n# validator = Validator()\n# bookRepository = BookRepository()\n# bookService = BookService(bookRepository, validator)\n# bookService.adauga_carte(\"Ana\", \"Maria\", \"pisici\", 10)\n# bookService.sterge_carte(\"Ana\", \"Maria\")\n#\n# assert len(bookRepository.getAll()) == 0\n#\n# try:\n# bookService.sterge_carte(\"Ana\", \"Maria\")\n# assert False\n# except bookNotInMemError:\n# assert True\n# def test_modifica_carte():\n# validator = Validator()\n# bookRepository = BookRepository()\n# bookService = BookService(bookRepository, validator)\n# bookService.adauga_carte(\"Ana\", \"Maria\", \"pisici\", 10)\n#\n# bookService.modifica_carte(\"Ana\", \"Maria\", \"Pisici\")\n#\n# assert bookRepository.getAll()[0].getTitle() == \"Pisici\"\n#\n# try:\n# bookService.modifica_carte(\"Ana\", \"Maria\")\n# assert False\n# except bookNotInMemError:\n# assert True\n# def test_cautare():\n# validator = Validator()\n# bookRepository = BookRepository()\n# bookService = BookService(bookRepository, validator)\n# bookService.adauga_carte(\"Ana\", \"Maria\", \"pisici\", 10)\n#\n# assert bookService.cauta_exemplare_carte(\"Ana\", \"Maria\") == 10\n#\n# try:\n# bookService.sterge_carte(\"Ana\", \"nush\")\n# assert False\n# except bookNotInMemError:\n# assert True\n# def test_raport():\n# validator = Validator()\n# bookRepository = BookRepository()\n# bookService = BookService(bookRepository, validator)\n# bookService.adauga_carte(\"Ana\", \"Maria\", \"pisici\", 10)\n#\n# assert bookService.raport_carti_inchiriate() is not None\n#\n# try:\n# bookService.sterge_carte(\"Ana\", \"Maria\")\n# bookService.raport_carti_inchiriate()\n# assert False\n# except noBooksInMemError:\n# assert True\n#\n#\n# test_adauga_carte()\n# test_sterge_carte()\n# test_modifica_carte()\n# test_cautare()\n# test_raport()","repo_name":"beji02/College-projects-2021-2022","sub_path":"Python projects/Library manager/Project/__Controller/BookService.py","file_name":"BookService.py","file_ext":"py","file_size_in_byte":6831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"1015851493","text":"import pickle\nimport numpy as np\nimport scipy.sparse as sparseMatrix\n\ndef loadFiles():\n durgDict = {}\n drugidx = 0\n\n with open(\"word_dict.dict\", \"rb\") as f:\n wordDict = pickle.load(f)\n #get dimension\n dimension = 0\n for i in wordDict.items():\n if i[1] > dimension:\n dimension = i[1]\n f.close()\n\n print(\"word dict loaded.\")\n\n print(\"word dict inverse loaded.\")\n\n with open(\"drug_adr.dict\", \"rb\") as f:\n subDict = pickle.load(f)\n f.close()\n for i in subDict.keys():\n durgDict[i.strip().lower()] = drugidx\n drugidx += 1\n\n with open(\"/data/work/huaminz2/CS410/project/GuoShijie/word_matrix.matrix\", \"rb\") as f:\n matrix = pickle.load(f)\n f.close()\n print(\"doc dict loaded.\")\n print(\"matrix shape: \" + str(matrix.shape))\n print(\"=======Initialization completed========\")\n return matrix, wordDict, durgDict, dimension\n\ndef getDegree(matrix,drugDict,wordDict):\n rs = {}\n drugDegree = {}\n for drug in drugDict:\n idx = wordDict[drug]\n degree = len(matrix[idx].nonzero()[1])\n if degree >= 0:\n rs[drug] = idx\n drugDegree[drug] = degree\n rs = rs.items()\n drugDegree = drugDegree.items()\n rs = sorted(rs, key=lambda x: x[1])\n drugDegree = sorted(drugDegree, key=lambda x: x[1])\n return rs,drugDegree\n\n\nif __name__ == '__main__':\n matrix, wordDict, durgDict, dimension = loadFiles()\n rs, drugDegree = getDegree(matrix,durgDict,wordDict)\n #l = len(rs)\n rs = rs[0:99]\n drugDegree = drugDegree[0:99]\n for i in rs:\n print(i)\n fo = open(\"drug_degree.dict\", \"wb\")\n pickle.dump(drugDegree, fo)\n fo.close()\n","repo_name":"Lycbel/410ProjectFinal","sub_path":"Web/cs410/searchAlgorithm/checkdrug.py","file_name":"checkdrug.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"14035720489","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nimport pickle\n\nwith open('/var/www/html/information_theory/feima/uploads/s300.pkl', 'rb') as f:\n (stock_to_id, id_to_stock) = pickle.load(f)\n\nm = 300 # num of stocks we use\nmax_n = 6000\n\ndata_mat = np.ones((m, max_n))*-1\n\n_step = 0\nstep = 1\nall_step = 0\nB = []\nstock_state = np.zeros(m, dtype=int)\n\ntime_dict = {}\ndef time_to_id(time): \n global time_dict\n if time not in time_dict.keys():\n time_dict[time] = len(time_dict.keys())\n return time_dict[time]\n\ndef build_mat(ss, history_stock_data):\n global data_mat\n '''returns a m x s data matrix'''\n df = history_stock_data\n for ind, row in df.iterrows():\n s = row['Stock Code']\n if s not in stock_to_id.keys():\n continue\n t = str(row['Time'])\n p = row['Opening Price']\n s_id = stock_to_id[s]\n t_id = time_to_id(t)\n if s_id < m and t_id < max_n:\n data_mat[s_id, t_id] = p\n\ndef predict_next_x(P, index, w=5):\n t = index-1\n x = np.zeros(P.shape[1]) # x_t+1\n if index < w:\n w = index\n for k in range(m):\n cnt = 0\n for i in range(w):\n if P[t-i, k] > 0:\n x[k] += (P[t-i, k]/P[t, k]) # formula(1)\n cnt += 1\n if cnt > 0:\n x[k] /= cnt\n return x\n\ndef predict_next_b(B, P, mask, index, epsilon, w=5):\n t = index-1\n b_t = B[-1]\n x_t1 = predict_next_x(P, index, w)\n x_mean = np.mean(x_t1)\n #print(x_t1, x_mean)\n lam = max(0.0, (epsilon-np.dot(b_t,x_t1))/(np.linalg.norm(x_t1-x_mean)**2))\n lam = min(100000, lam)\n #print(lam)\n #print(x_t1 - x_mean)\n b_t1 = b_t + lam * (x_t1 - x_mean)\n res = simplex_proj(b_t1)*mask \n res *= 1/sum(res)\n return res #normalization\n\ndef simplex_proj(y):\n \"\"\" Projection of y onto simplex. \"\"\"\n m = len(y)\n bget = False\n\n s = sorted(y, reverse=True)\n tmpsum = 0.\n\n for ii in range(m-1):\n tmpsum = tmpsum + s[ii]\n tmax = (tmpsum - 1) / (ii + 1);\n if tmax >= s[ii+1]:\n bget = True\n break\n\n if not bget:\n tmax = (tmpsum + s[m-1] -1)/m\n \n return np.maximum(y-tmax,0.)\n\ndef update_state(money, mask, pv, bv):\n global stock_state\n #print(stock_state)\n old_state = stock_state\n all_money = money \n for k in range(m):\n if pv[k] > 0:\n all_money += pv[k] * stock_state[k]\n all_money = all_money*0.8\n new_state = np.zeros(m, dtype=int)\n for i in range(m):\n if pv[i] > 0:\n new_state[i] = int(all_money * bv[i] / pv[i])\n buy_code, buy_num, sell_code, sell_num = [], [], [], []\n for i in range(m):\n if i not in id_to_stock.keys():\n continue\n if new_state[i] < old_state[i] and mask[i] == 1:\n sell_code.append(id_to_stock[i])\n sell_num.append(old_state[i] - new_state[i])\n if new_state[i] > old_state[i] and mask[i] == 1:\n buy_code.append(id_to_stock[i])\n buy_num.append(new_state[i] - old_state[i])\n stock_state = new_state\n #print(buy_code)\n return sell_code, sell_num, buy_code, buy_num\n\ndef invest(data_mat, n, money, mask, w=1, epsilon=1.00001):\n global B\n \n P = data_mat\n '''\n X = np.ones_like(P)\n for i in range(m):\n for j in range(1, n):\n X[i, j] = P[i, j] / P[i, j-1]\n '''\n P = P.transpose()\n #print(P)\n \n if n == 0:\n B.append(np.array([1/m for i in range(m)]))\n else:\n b = predict_next_b(B, P, mask, n, epsilon, w)\n B.append(b)\n #print('B:', B[-1])\n sell_code, sell_num, buy_code, buy_num = update_state(money, mask, P[n], B[-1])\n return sell_code, sell_num, buy_code, buy_num\n\ndef get_avail(hist, tt):\n mask = np.zeros(m)\n for ind, row in hist.iterrows():\n s = row['Stock Code']\n t = row['Time']\n p = row['Opening Price']\n if s not in stock_to_id.keys():\n continue\n s_id = stock_to_id[s]\n if tt == t:\n mask[s_id] = 1\n return mask\n \ndef model(s, money, history_stock_data, investment_data):\n #path='/var/www/html/information_theory/feima/test_data.csv'\n global _step, all_step\n all_step += 1\n \n w=5\n epsilon=1.000000001\n \n if _step > 0 or all_step > 10:\n _step -= 1\n add_data=pd.DataFrame(columns=['Time','Stocks you sell','Corresponding number of stocks you sell',\n 'Stocks you buy','Corresponding number of stocks you buy']) \n add_data=add_data.append({'Time': s}, ignore_index=True)\n return add_data\n \n ss = str(s)\n history_stock_data = history_stock_data[-w*520:]\n history_stock_data = history_stock_data.loc[history_stock_data['Time'] == s]\n build_mat(ss, history_stock_data)\n \n _step = step-1\n mask = get_avail(history_stock_data, s)\n sell_code, sell_num, buy_code, buy_num = invest(data_mat, time_to_id(ss), money, mask, w=w, epsilon=epsilon) \n \n add_data=pd.DataFrame(columns=['Time','Stocks you sell','Corresponding number of stocks you sell',\n 'Stocks you buy','Corresponding number of stocks you buy']) \n \n if len(sell_code) > 0 and len(buy_code) > 0:\n s1 = ', '.join([\"%d\"%(x) for x in sell_code])\n s2 = ', '.join([\"%d\"%(x) for x in sell_num])\n s3 = ', '.join([\"%d\"%(x) for x in buy_code])\n s4 = ', '.join([\"%d\"%(x) for x in buy_num])\n add_data=add_data.append({'Time': s,'Stocks you sell':s1,'Corresponding number of stocks you sell':s2,\n 'Stocks you buy':s3,'Corresponding number of stocks you buy':s4}, ignore_index=True) \n elif len(sell_code) > 0:\n s1 = ', '.join([\"%d\"%(x) for x in sell_code])\n s2 = ', '.join([\"%d\"%(x) for x in sell_num])\n add_data=add_data.append({'Time': s,'Stocks you sell':s1,'Corresponding number of stocks you sell':s2}, ignore_index=True) \n elif len(buy_code) > 0:\n s1 = ', '.join([\"%d\"%(x) for x in buy_code])\n s2 = ', '.join([\"%d\"%(x) for x in buy_num])\n add_data=add_data.append({'Time': s,'Stocks you buy':s1,'Corresponding number of stocks you buy':s2}, ignore_index=True) \n else:\n add_data=add_data.append({'Time': s}, ignore_index=True)\n\n return add_data","repo_name":"naiqili/it","sub_path":"task2/Wsp6pQGEPp_20181229_13.py","file_name":"Wsp6pQGEPp_20181229_13.py","file_ext":"py","file_size_in_byte":6396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"37388904380","text":"import os\nfrom skimage import io, transform\nimport torch\nimport torchvision\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms#, utils\nimport collections\n# import torch.optim as optim\n\nimport numpy as np\nfrom PIL import Image\nimport glob\nimport cv2\n\nfrom data_loader_albu import generate_transforms, SalObjDataset, SalObjDatasetT\nfrom albumentations import (\n Compose,\n\tSmallestMaxSize,\n)\n\nfrom model import U2NET # full size version 173.6 MB\nfrom model import U2NETP # small version u2net 4.7 MB\n\n# normalize the predicted SOD probability map\ndef normPRED(d):\n ma = torch.max(d)\n mi = torch.min(d)\n dn = (d-mi) / (ma-mi)\n return dn\n\ndef save_output(image_name, pred, d_dir):\n\n predict = pred\n predict = predict.squeeze()\n predict_np = predict.cpu().data.numpy()\n\n im = Image.fromarray(predict_np * 255).convert('RGB')\n img_name = image_name.split(os.sep)[-1]\n image = io.imread(image_name)\n imo = im.resize((image.shape[1], image.shape[0]), resample=Image.BILINEAR)\n\n pb_np = np.array(imo)\n\n aaa = img_name.split(\".\")\n bbb = aaa[0:-1]\n imidx = bbb[0]\n for i in range(1, len(bbb)):\n imidx = imidx + \".\" + bbb[i]\n\n imo.save(d_dir + imidx + '.png')\n\n######### modified by wjj for more smooth results generate ###########\n# def save_output(image_name, pred, d_dir):\n# img_name = image_name.split(os.sep)[-1]\n# image = cv2.imread(image_name)\n# h, w = image.shape[:2]\n# predict = pred\n# predict = predict.squeeze()\n# predict_np = predict.cpu().data.numpy()\n# predict_np = np.uint8(predict_np * 255)\n# predict_np = cv2.resize(predict_np, (w, h))\n# ret, thresh = cv2.threshold(predict_np, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n# kernel = np.ones((3, 3), np.uint8)\n# opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)\n# im = cv2.dilate(opening, kernel, iterations=3) # sure_bg -> im\n# # dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)\n# # ret, sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(), 255, 0)\n# # cv2.imwrite('./sure_fg.jpg', sure_fg)\n# # cv2.imwrite('./sure_bg.jpg', sure_bg)\n# # sure_fg = np.uint8(sure_fg)\n# # unknown = cv2.subtract(sure_bg, sure_fg)\n# # cv2.imwrite('./subtract.jpg', unknown)\n# # ret, markers = cv2.connectedComponents(sure_fg)\n# # markers += 1\n# # markers[unknown == 255] = 0\n# # markers = cv2.watershed(image, markers)\n# # image[markers == -1] = [0, 255, 0]\n# # cv2.imwrite('./res.jpg', image)\n\n# # _, im = cv2.threshold(predict_np * 255, 100, 255, cv2.THRESH_BINARY)\n# # im = cv2.resize(im, (w, h))\n# # im /= 255\n# # im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)\n# # image = np.uint8(image * im)\n\n# # im = np.uint8(im)\n# im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)\n# cv2.bitwise_and(image, im, image)\n# # cv2.imwrite('./res.jpg', image)\n\n# aaa = img_name.split(\".\")\n# bbb = aaa[0:-1]\n# imidx = bbb[0]\n# for i in range(1, len(bbb)):\n# imidx = imidx + \".\" + bbb[i]\n# # cv2.imwrite(d_dir + imidx + 'mask.jpg', predict_np*255)\n# cv2.imwrite(d_dir + imidx + '.jpg', image)\n######### modified ended ###########\n\ndef main():\n\n # --------- 1. get image path and name ---------\n model_name = 'u2netp' # u2netp u2net\n data_dir = '/data2/wangjiajie/datasets/scene_segment1023/u2data/'\n image_dir = os.path.join(data_dir, 'test_imgs')\n prediction_dir = os.path.join('./outputs/', model_name + '/')\n if not os.path.exists(prediction_dir):\n os.makedirs(prediction_dir, exist_ok=True)\n # tra_label_dir = 'test_lbls/'\n\n image_ext = '.jpg'\n # label_ext = '.jpg' # '.png'\n model_dir = os.path.join(os.getcwd(), 'saved_models', model_name, model_name + '.pth')\n\n img_name_list = glob.glob(image_dir + os.sep + '*')\n print(f'test img numbers are: {len(img_name_list)}')\n\n # --------- 2. dataloader ---------\n #1. dataloader\n test_salobj_dataset = SalObjDataset(img_name_list = img_name_list,\n lbl_name_list = [],\n transform=Compose([SmallestMaxSize(max_size=320),])\n )\n test_salobj_dataloader = DataLoader(test_salobj_dataset,\n batch_size=1,\n shuffle=False,\n num_workers=1)\n\n # --------- 3. model define ---------\n if (model_name == 'u2net'):\n print(\"...load U2NET---173.6 MB\")\n net = U2NET(3, 1)\n elif(model_name == 'u2netp'):\n print(\"...load U2NEP---4.7 MB\")\n net = U2NETP(3, 1)\n \n # net.load_state_dict(torch.load(model_dir))\n checkpoint = torch.load(model_dir)\n d = collections.OrderedDict()\n for key, value in checkpoint.items():\n tmp = key[7:]\n d[tmp] = value\n net.load_state_dict(d)\n if torch.cuda.is_available():\n net.cuda()\n net.eval()\n\n # --------- 4. inference for each image ---------\n for i_test, data_test in enumerate(test_salobj_dataloader):\n\n print(\"inferencing:\", img_name_list[i_test].split(os.sep)[-1])\n\n inputs_test = data_test['image']\n inputs_test = inputs_test.type(torch.FloatTensor)\n\n if torch.cuda.is_available():\n inputs_test = Variable(inputs_test.cuda())\n else:\n inputs_test = Variable(inputs_test)\n\n d1, d2, d3, d4, d5, d6, d7= net(inputs_test)\n\n # normalization\n pred = 1.0 - d1[:, 0, :, :]\n pred = normPRED(pred)\n\n # save results to test_results folder\n save_output(img_name_list[i_test], pred, prediction_dir)\n\n del d1, d2, d3, d4, d5, d6, d7\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"trarynight/u2net","sub_path":"multi_u2net_test.py","file_name":"multi_u2net_test.py","file_ext":"py","file_size_in_byte":5913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"24389950001","text":"import json\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.db import IntegrityError\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.core.paginator import Paginator\n\nfrom .models import User, Post\n\n\nfrom . import util \n\ndef index(request):\n return render(request, \"network/index.html\")\n\n\ndef login_view(request):\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n\n # Check if authentication successful\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"network/login.html\", {\n \"message\": \"Invalid username and/or password.\"\n })\n else:\n return render(request, \"network/login.html\")\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n\n # Ensure password matches confirmation\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n if password != confirmation:\n return render(request, \"network/register.html\", {\n \"message\": \"Passwords must match.\"\n })\n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username, email, password)\n user.save()\n except IntegrityError:\n return render(request, \"network/register.html\", {\n \"message\": \"Username already taken.\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"network/register.html\")\n\n@login_required\ndef publish(request):\n \"\"\"\n Handles a POST request for a new post\n \"\"\"\n # Composing a new post must be via POST\n if request.method != \"POST\":\n return JsonResponse({\"error\": \"POST request required.\"}, status=400)\n \n data = json.loads(request.body)\n user = request.user\n\n post = Post(author=user, body = data['body'])\n print(post)\n post.save()\n\n print(data)\n\n return JsonResponse({\"message\": \"Post created successfully.\"}, status=201)\n\ndef user_posts(request, username):\n page = request.GET.get('page', '1')\n try:\n posts = util.get_all_user_posts(username)\n except User.DoesNotExist:\n return JsonResponse({\"error\": \"User not found.\"}, status=404)\n paginator = Paginator(posts, 10)\n out = paginator.page(page).object_list \n return JsonResponse({\"posts\" : [post.serialize() for post in out],\n \"current_page\": int(page),\n \"num_pages\" : paginator.num_pages},\n safe=False)\ndef all_posts(request):\n page = request.GET.get('page', '1')\n posts = Post.objects.all().order_by('-timestamp_created')\n paginator = Paginator(posts, 10)\n out = paginator.page(page).object_list \n return JsonResponse({\"posts\" : [post.serialize() for post in out],\n \"current_page\": page,\n \"num_pages\" : paginator.num_pages},\n safe=False)\n\n@csrf_exempt\ndef post(request, post_id):\n \"\"\"\n Handles GET and PUT requests to get or edit the post with the id. When successful,\n both return the current post in JSON form.\n \"\"\"\n # Query for requested post\n try:\n post = Post.objects.get(pk=post_id)\n except Post.DoesNotExist:\n return JsonResponse({\"error\": \"Post not found.\"}, status=404)\n if request.method == 'GET':\n return JsonResponse(post.serialize())\n \n if request.method == 'PUT':\n if not request.user.is_authenticated:\n return JsonResponse({'error': 'User must be authenticated to like'})\n\n data = json.loads(request.body)\n print(data)\n\n if data.get(\"body\") is not None:\n # User wants to update body\n if request.user != post.author:\n return JsonResponse({\"error\": \"Must be the owner of the post to modify it!\"})\n post.body = data.get(\"body\")\n post.save()\n return JsonResponse(post.serialize(), safe=True)\n \n if data.get(\"like\") is not None:\n try:\n liked = post.users_who_liked.get(pk=request.user.id)\n # if the user has liked the post already, remove them\n post.users_who_liked.remove(liked)\n post.save()\n except User.DoesNotExist:\n # if user hasn't liked it, add them to the list of likers\n post.users_who_liked.add(request.user)\n post.save()\n return JsonResponse(post.serialize(), safe=True)\n\n@login_required\ndef following_posts(request):\n username = request.user.username\n if request.method != 'GET':\n return JsonResponse({'error': 'Request to following feed must be a GET'})\n try:\n posts = util.get_all_following_posts(username)\n except User.DoesNotExist:\n return JsonResponse({\"error\": \"User not found.\"}, status=404)\n page = request.GET.get('page', '1') \n paginator = Paginator(posts, 10)\n out = paginator.page(page).object_list \n return JsonResponse({\"posts\" : [post.serialize() for post in out],\n \"current_page\": page,\n \"num_pages\" : paginator.num_pages},\n safe=False) \n\n@login_required\ndef user(request, username):\n \"\"\"\n Handles GET and PUT requests to get or edit the user with the username. When successful,\n both return the current post in JSON form.\n \"\"\"\n # Query for requested post\n try:\n queried_user = User.objects.get(username=username)\n except User.DoesNotExist:\n return JsonResponse({\"error\": \"User not found.\"}, status=404)\n if request.method == 'GET':\n return JsonResponse(queried_user.serialize())\n \n if request.method == 'PUT':\n if not request.user.is_authenticated:\n return JsonResponse({'error': 'User must be authenticated to change user info'})\n data = json.loads(request.body)\n\n if data.get(\"follow\") is not None:\n if request.user.id == queried_user.id:\n return JsonResponse({'error': 'Cannot follow self!'})\n try:\n queried_user.followers.get(pk=request.user.id)\n # if the user has followed, remove them\n queried_user.followers.remove(request.user)\n queried_user.save()\n except User.DoesNotExist:\n # if user hasn't followed, follow\n queried_user.followers.add(request.user)\n queried_user.save()\n return JsonResponse(queried_user.serialize(), safe=True)","repo_name":"ajnipp/network","sub_path":"network/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"20778638443","text":"#!/usr/bin/env python3\n# _*_ coding:utf-8 _*_\n#\n# Author: Payne Zheng \n# Date: 2019/7/31\n# Location: DongGuang\n# Desc: do the right thing\n\n\"\"\"\n信号量也是一把锁,可以指定信号量为5,\n对比互斥锁同一时间只能有一个任务抢到锁去执行,信号量同一时间可以有5个任务拿到锁去执行\n\"\"\"\nimport time\nfrom threading import Thread, Semaphore, currentThread, activeCount\n\nsm = Semaphore(3) # 信号量锁对象(允许同时三个线程拿到锁)\n\ndef task():\n with sm: # sm.acquire; sm.release\n print(f'{currentThread().name} is work...')\n print(f'activeCount: {activeCount()}')\n time.sleep(3)\n print(f'{currentThread().name} is done...')\n\n\nif __name__ == \"__main__\":\n for i in range(10):\n t = Thread(target=task)\n t.start()\n\n","repo_name":"PAYNE1Z/python-learn","sub_path":"luffycity-s8/第四模块_并发编程/13_信号量.py","file_name":"13_信号量.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"6150206022","text":"from unittest import TestCase\nfrom unittest.mock import create_autospec\n\nfrom python_dice.interface.constraint.i_constraint_factory import IConstraintFactory\nfrom python_dice.interface.constraint.i_constraint_merger import IConstraintMerger\nfrom python_dice.interface.constraint.i_constraint_set import IConstraintSet\nfrom python_dice.src.constraint.constraint_set_factory import ConstraintSetFactory\n\n\nclass TestConstraintSetFactory(TestCase):\n def test_create_constraint_set(self):\n constraint_set_factory = ConstraintSetFactory()\n self.assertIsInstance(constraint_set_factory.create_constraint_set(), IConstraintSet)\n\n @staticmethod\n def test_uses_constraint_merger():\n mock_constraint_merger = create_autospec(IConstraintMerger)\n constraint_set_factory = ConstraintSetFactory(constraint_merger=mock_constraint_merger)\n constraint_set_factory.create_constraint_set()\n mock_constraint_merger.merge_new_constraints.assert_called_once()\n\n def test_uses_constraint_factory(self):\n mock_constraint_factory = create_autospec(IConstraintFactory)\n constraint_set_factory = ConstraintSetFactory(constraint_factory=mock_constraint_factory)\n new_set = constraint_set_factory.create_constraint_set()\n self.assertEqual({mock_constraint_factory.null_constraint}, set(new_set.constraints))\n","repo_name":"markbrockettrobson/python_dice","sub_path":"python_dice/test/constraint/test_constraint_set_factory.py","file_name":"test_constraint_set_factory.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"94"}
+{"seq_id":"456301844","text":"import os.path\n\nfrom glue.lal import CacheEntry\n\nfrom .. import version\nfrom ..time import to_gps\nfrom ..utils import with_import\n\n__version__ = version.version\n__author__ = 'Duncan Macleod '\n\n\n@with_import('glue.datafind')\ndef connect(host=None, port=None):\n \"\"\"Open a new datafind connection\n\n Parameters\n ----------\n host : `str`\n name of datafind server to query\n port : `int`\n port of datafind server on host\n\n Returns\n -------\n connection : `~glue.datafind.GWDataFindHTTPConnection`\n the new open connection\n \"\"\"\n port = port and int(port)\n if port is not None and port != 80:\n cert, key = datafind.find_credential()\n return datafind.GWDataFindHTTPSConnection(\n host=host, port=port, cert_file=cert, key_file=key)\n else:\n return datafind.GWDataFindHTTPConnection(host=host, port=port)\n\n\ndef find_frametype(channel, gpstime=None, frametype_match=None,\n host=None, port=None, return_all=False, exclude_tape=False):\n \"\"\"Find the frametype(s) that hold data for a given channel\n \"\"\"\n from ..detector import Channel\n channel = Channel(channel)\n name = channel.name\n if gpstime is not None:\n gpstime = to_gps(gpstime).seconds\n connection = connect(host, port)\n types = connection.find_types(channel.ifo[0], match=frametype_match)\n # get reference frame for all types\n frames = []\n for ft in types:\n try:\n if gpstime is None:\n frame = connection.find_latest(\n channel.ifo[0], ft, urltype='file')[0]\n else:\n frame = connection.find_frame_urls(\n channel.ifo[0], ft, gpstime, gpstime, urltype='file',\n on_gaps='ignore')[0]\n except (IndexError, RuntimeError):\n continue\n else:\n if os.access(frame.path, os.R_OK) and (\n not exclude_tape or not on_tape(frame)):\n frames.append((ft, frame.path))\n # sort frames by allocated block size and regular size\n # (to put frames on tape at the bottom of the list)\n frames.sort(key=lambda x: (on_tape(x[1]), num_channels(x[1])))\n # search each frametype for the given channel\n found = []\n for ft, path in frames:\n if get_channel_type(name, path):\n if not return_all:\n return ft\n else:\n found.append(ft)\n if len(found) == 0 and gpstime:\n raise ValueError(\"Cannot locate %r in any known frametype at GPS=%d\"\n % (name, gpstime))\n elif len(found) == 0:\n raise ValueError(\"Cannot locate %r in any known frametype\" % name)\n else:\n return found\n\n\n@with_import('lalframe')\ndef num_channels(framefile):\n \"\"\"Find the total number of channels in this framefile\n \"\"\"\n frfile = lalframe.FrameUFrFileOpen(framefile, \"r\")\n frtoc = lalframe.FrameUFrTOCRead(frfile)\n return sum(\n getattr(lalframe, 'FrameUFrTOCQuery%sN' % type_.title())(frtoc) for\n type_ in ['adc', 'proc', 'sim'])\n\n\n@with_import('lalframe')\ndef get_channel_type(channel, framefile):\n \"\"\"Find the channel type in a given frame file\n\n Parameters\n ----------\n channel : `str`, `~gwpy.detector.Channel`\n name of data channel to find\n framefile : `str`\n path of GWF file in which to search\n\n Returns\n -------\n ctype : `str`\n the type of the channel ('adc', 'sim', or 'proc') if the\n channel exists in the table-of-contents for the given frame,\n otherwise `False`\n \"\"\"\n name = str(channel)\n # read frame and table of contents\n frfile = lalframe.FrameUFrFileOpen(framefile, \"r\")\n frtoc = lalframe.FrameUFrTOCRead(frfile)\n for type_ in ['sim', 'proc', 'adc']:\n query = getattr(lalframe, 'FrameUFrTOCQuery%sName' % type_.title())\n i = 0\n while True:\n try:\n c = query(frtoc, i)\n except RuntimeError:\n break\n else:\n if c == name:\n return type_\n i += 1\n return False\n\n\ndef find_best_frametype(channel, start, end, urltype='file',\n host=None, port=None, allow_tape=True):\n \"\"\"Intelligently select the best frametype from which to read this channel\n \"\"\"\n start = to_gps(start).seconds\n end = to_gps(end).seconds\n frametype = find_frametype(channel, gpstime=start, host=host, port=port,\n exclude_tape=not allow_tape)\n connection = connect(host=host, port=port)\n try:\n cache = connection.find_frame_urls(channel[0], frametype,\n start, end, urltype=urltype,\n on_gaps='error')\n if not allow_tape and on_tape(*cache):\n raise RuntimeError()\n except RuntimeError:\n alltypes = find_frametype(channel, gpstime=start, host=host, port=port,\n return_all=True, exclude_tape=not allow_tape)\n cache = [(ft, connection.find_frame_urls(\n channel[0], ft, start, end, urltype=urltype,\n on_gaps='ignore')) for ft in alltypes]\n if not allow_tape:\n cache = [ftc for ftc in cache if not on_tape(*ftc[1])]\n cache.sort(key=lambda x:\n len(x[1]) and -abs(x[1].to_segmentlistdict().values()[0]) or 0)\n try:\n return cache[0][0]\n except IndexError:\n raise ValueError(\"Cannot find any valid frametypes for %r\"\n % channel)\n else:\n return frametype\n\n\ndef on_tape(*files):\n \"\"\"Determine whether any of the given files are on tape\n\n Parameters\n ----------\n *files : `str`, `~glue.lal.CacheEntry`\n one or more paths to GWF files\n\n Returns\n -------\n True/False : `bool`\n `True` if any of the files are determined to be on tape,\n otherwise `False`\n \"\"\"\n for f in files:\n if isinstance(f, CacheEntry):\n f = f.path\n if os.stat(f).st_blocks == 0:\n return True\n return False\n","repo_name":"garywu921207/gwpy","sub_path":"gwpy/io/datafind.py","file_name":"datafind.py","file_ext":"py","file_size_in_byte":6181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"94"}
+{"seq_id":"32262366807","text":"import sys\nimport os\nimport re\nfrom operator import itemgetter\n\n\ndef print_stdout(command):\n \"\"\"\n Print commands to stdout, which are then interpreted by shell.\n\n :param command: string, command to be interpreted by shell\n :return: None\n \"\"\"\n sys.stdout.write(\"%s\\n\" % command)\n sys.stdout.flush()\n\n\ndef print_stderr(message):\n \"\"\"\n Print message to stderr, which WILL NOT be interpreted by shell.\n This function is also utilized to print banners and tables.\n\n :param message: string, message to write to stderr\n :return: None\n \"\"\"\n sys.stderr.write(\"%s\\n\" % message)\n sys.stderr.flush()\n\n\ndef split_list(raw_list, num_group, algorithm=\"remainder\"):\n \"\"\"\n Split given list into different groups.\n\n Two algorithms are implemented: by the remainder of the index of each\n element divided by the number of group, or the range of index. For example,\n if we are to split the list of [0, 1, 2, 3] into two groups, by remainder\n we will get [[0, 2], [1, 3]] while by range we will get [[0, 1], [2, 3]].\n\n :param raw_list: list to split\n :param num_group: integer, number of groups\n :param algorithm: string, should be either \"remainder\" or \"range\"\n :return: a list containing the split list\n \"\"\"\n assert num_group in range(1, len(raw_list)+1)\n assert algorithm in (\"remainder\", \"range\")\n num_element = len(raw_list)\n if algorithm == \"remainder\":\n list_split = [[raw_list[i] for i in range(num_element)\n if i % num_group == k] for k in range(num_group)]\n else:\n # Get the numbers of items for each group\n num_item = [num_element // num_group for i in range(num_group)]\n for i in range(num_element % num_group):\n num_item[i] += 1\n # Divide the list according to num_item\n list_split = []\n for i in range(num_group):\n j0 = sum(num_item[:i])\n j1 = j0 + num_item[i]\n list_split.append([raw_list[j] for j in range(j0, j1)])\n return list_split\n\n\ndef get_terminal_size():\n \"\"\"\n Get the current size of the terminal in characters. We do not use\n os.get_terminal_size() as it is available only in Python 3.\n\n :return: (integer, integer), size of the terminal\n \"\"\"\n rows, columns = os.popen('stty size', 'r').read().split()\n return int(rows), int(columns)\n\n\ndef print_banner(banner, columns):\n \"\"\"\n Print a banner like --------------- FOO ------------------ to stderr.\n\n The number '2' in this piece of code counts for the two spaces wrapping the\n central text.\n\n :param banner: the central text in the banner\n :param columns: total width of the banner\n :return: None\n \"\"\"\n if len(banner) + 2 > columns:\n print_stderr(banner)\n else:\n num_marks_total = columns - len(banner) - 2\n num_marks_left = num_marks_total // 2 \n num_marks_right = num_marks_total - num_marks_left\n banner_with_marks = \"\"\n mark = \"-\"\n for i in range(num_marks_left):\n banner_with_marks += mark\n banner_with_marks += \" %s \" % banner\n for i in range(num_marks_right):\n banner_with_marks += mark\n print_stderr(banner_with_marks)\n\n\ndef print_table(table_head, table_body, number_items=True):\n \"\"\"\n Print a table to stderr.\n\n :param table_head: string, head of the table\n :param table_body: list of strings\n :param number_items: boolean, whether to number the items in table_body\n :return: None\n \"\"\"\n rows, columns = get_terminal_size()\n\n # Print table head\n print_stderr(\"\")\n print_banner(table_head, columns)\n\n # Print table body\n if len(table_body) == 0:\n print_stderr(\"None\\n\")\n else:\n # Get the maximum length of string with reserved spaces.\n # DO NOT CHANGE THE NUMBER of RESERVED SPACES.\n max_length = max([len(string) for string in table_body])\n if not number_items:\n max_length += 2\n else:\n max_length += 6\n\n # Determine the number of columns and rows of the table\n num_table_column = columns // max_length\n num_table_row = len(table_body) // num_table_column\n if len(table_body) % num_table_column > 0:\n num_table_row += 1\n\n # Break table_body into rows and print\n table_rows = split_list(table_body, num_table_row)\n if not number_items:\n for row in table_rows:\n for string in row:\n fmt = \"%-\" + str(max_length) + \"s\"\n sys.stderr.write(fmt % string)\n sys.stderr.write(\"\\n\")\n sys.stderr.write(\"\\n\")\n sys.stderr.flush()\n else:\n # Determine the dimension of the transposed table, for numbering\n # the items\n table_dim_trans = []\n for i in range(num_table_column):\n if i < len(table_rows[-1]):\n table_dim_trans.append(num_table_row)\n else:\n table_dim_trans.append(num_table_row - 1)\n\n # Print the table with numbered items\n for i, row in enumerate(table_rows):\n for j, string in enumerate(row):\n fmt = \"%4d) %-\" + str(max_length-6) + \"s\"\n item_number = sum(table_dim_trans[:j]) + i + 1\n sys.stderr.write(fmt % (item_number, string))\n sys.stderr.write(\"\\n\")\n sys.stderr.write(\"\\n\")\n sys.stderr.flush()\n\n\ndef print_list(list_head, list_body, number_items=True):\n \"\"\"\n Prints a list to stderr.\n\n :param list_head: string, head of the list\n :param list_body: list of strings\n :param number_items: boolean, whether to number the items\n :return:\n \"\"\"\n sys.stderr.write(\"%s: \" % list_head)\n if len(list_body) == 0:\n sys.stderr.write(\"None\")\n else:\n if number_items:\n for i, item in enumerate(list_body):\n sys.stderr.write(\"%4d) %s\" % (i+1, item))\n else:\n for i, item in enumerate(list_body):\n sys.stderr.write(\" %s\" % item)\n sys.stderr.write(\"\\n\")\n sys.stderr.flush()\n\n\ndef get_latest_version(versions):\n \"\"\"\n Get the latest version for given software.\n :param versions: list of string, different versions of the software, each\n version should be in the form of\n [a-zA-Z0-9]+[-/]+[0-9\\.]+.?\n :return: string, the latest version of this software\n \"\"\"\n # Extract and normalize version numbers from software names\n ver_str = [re.search(r\"[0-9\\.]+\", ver).group().split(\".\")\n for ver in versions]\n ver_num = [[int(i) for i in ver if i != \"\"] for ver in ver_str]\n num_digit = max([len(ver) for ver in ver_num])\n for ver in ver_num:\n while len(ver) < num_digit:\n ver.append(0)\n\n # Sort version numbers\n ver_num = sorted(ver_num, key=itemgetter(slice(0, num_digit, 1)))\n\n # Get the software name corresponding to the latest version\n latest_version = sorted(versions)[-1]\n for ver_check in versions:\n ver_str_check = re.search(r\"[0-9\\.]+\", ver_check).group().split(\".\")\n ver_num_check = [int(i) for i in ver_str_check if i != \"\"]\n while len(ver_num_check) < num_digit:\n ver_num_check.append(0)\n difference = [abs(ver_num_check[i] - ver_num[-1][i])\n for i in range(num_digit)]\n if sum(difference) == 0:\n latest_version = ver_check\n break\n return latest_version\n\n\ndef str2list(string, separator=\",\"):\n \"\"\"\n Split a string into a list according to specified separator and remove\n empty elements.\n\n :param string: string to split\n :param separator: separator, should not be a space\n :return: list containing the segments\n \"\"\"\n string_trimmed = string.replace(\"\\n\", \"\").replace(\" \", \"\")\n string_list = [s for s in string_trimmed.split(separator) if s != \"\"]\n return string_list\n\n\ndef str2env(string_list):\n \"\"\"\n Convert the strings in a list to the form of (command, env_name, pattern).\n See the \"environ\" attribute of \"Module\" class for more details.\n\n :param string_list: list of strings to be parsed\n :return: list of tuples in the form of (command, env_name, pattern).\n \"\"\"\n environ = [tuple(s.split()) for s in string_list]\n return environ\n","repo_name":"yhli1016/Pmod","sub_path":"pmod/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":8420,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"}
+{"seq_id":"31126073276","text":"#!/usr/bin/env python\n\nimport sys\nimport argparse\nfrom Bio import SeqIO\n# changed calling the function to work better with symbolic links\nfrom seq_functions.seq_funcs import *\n\nparser = argparse.ArgumentParser(description='extract sequences from a list of IDs')\n\nparser.add_argument('-s', '--sequence_file', dest='seqfile',\n type=str,\n help=\"name of sequence file to source\")\nparser.add_argument('-o', '--output', dest='output', \n type=str, help=\"file name for extracted sequences (optional)\")\nparser.add_argument('-l', '--listfile', dest='listing', \n type=str, help=\"file listing seq IDs to extract (one per line)\")\nparser.add_argument('-q', '--fastq', dest='fastq', action='store_true', \n help=\"add -q or --fastq arg is file is fastq (default fasta)\")\n\nargs = parser.parse_args()\ninfile = args.seqfile\noutseq = args.output\nlistfile = args.listing\nfastqfmt = args.fastq\n\nif args.fastq:\n seqtype = 'fastq'\nelse:\n seqtype = 'fasta'\n\nseqlist = make_list(listfile)\n\nseq_set = set(seqlist)\n\nif args.output:\n\n extract_seqs(seq_set, infile, seqtype, outseq)\n\nelse:\n extract_seqs(seq_set, infile, seqtype)\n\nprint(\"all done\")\n\n\n","repo_name":"hughcross/seq_tools","sub_path":"seq_extractor.py","file_name":"seq_extractor.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"}
+{"seq_id":"5620257039","text":"\"\"\"\nThis example shows connecting to the PN532 and writing & reading a mifare classic\ntype RFID tag\n\"\"\"\nimport time\nimport os\nimport unicodedata\n\nimport audio\nimport board\nimport busio\n\nfrom adafruit_pn532.spi import PN532_SPI\nimport digitalio\nfrom digitalio import DigitalInOut\nimport RPi.GPIO as GPIO\n\nfrom adafruit_pn532.adafruit_pn532 import MIFARE_CMD_AUTH_B\n\nspi = busio.SPI(board.SCK, board.MOSI, board.MISO)\nspi.try_lock()\nspi.configure(baudrate=12000000)\nspi.unlock()\n\nreader1_pin = DigitalInOut(board.D24)\npn532 = PN532_SPI(spi, reader1_pin, debug=False)\nic, ver, rev, support = pn532.firmware_version\npn532.SAM_configuration()\n\n\nic, ver, rev, support = pn532.firmware_version\nprint(\"Found PN532 with firmware version: {0}.{1}\".format(ver, rev))\n\n# Configure PN532 to communicate with MiFare cards\npn532.SAM_configuration()\n\nprint(\"Waiting for RFID/NFC card to write to!\")\n\nkey = b\"\\xFF\\xFF\\xFF\\xFF\\xFF\\xFF\"\n\nwhile True:\n # Check if a card is available to read\n uid = pn532.read_passive_target(timeout=0.5)\n print(\".\", end=\"\")\n # Try again if no card is available.\n if uid is not None:\n break\n\nprint(\"\")\n\nprint(\"Found card with UID:\", [hex(i) for i in uid])\n\n#mifare 1K layout (chip + card)\n# 1 kByte\n\n# 16 Sektoren zu je 4 Blöcken (16 Bytes/16 Ascii Characters pro Block)\n\n#writeable blocks (https://support.ccs.com.ph/portal/en/kb/articles/mifare-classic-1k-memory-structure)\n# 4, 5, 6\n# 8, 9, 0A,\n# 0C, 0D, 0E,...\n\n#allow only 16 ascii characters, so i only need one block (block 4)\n# 2 characters for prefix \"en\", 1 for suffix \"#\", so my word can have 13 characters!\n\nprint(\"Authenticating block 4 ...\")\nauthenticated = pn532.mifare_classic_authenticate_block(uid, 4, MIFARE_CMD_AUTH_B, key)\nif not authenticated:\n print(\"Authentication failed!\")\n\n\ndata = bytearray(16)\n\nlang = \"en\"\nmessage = \"MANFREd\" #can be 13 characters long\nendofmessage = \"#\"\nmessage = lang+message+endofmessage\n\ndata[0:len(message)] = message.encode()\n\nprint(data)\n\n# Set 16 bytes of block to 0xFEEDBEEF\n#data = bytearray(16)\n#data[0:16] = b\"\\xFE\\xED\\xBE\\xEF\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n\n# Write 16 byte to block 4.\npn532.mifare_classic_write_block(4, data)\n\n# Read block\nprint(\n \"Wrote to block 4, now trying to read that data:\",\n [hex(x) for x in pn532.mifare_classic_read_block(4)],\n)","repo_name":"jimsio/hoorch","sub_path":"rework_mifare_readwrite.py","file_name":"rework_mifare_readwrite.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"}
+{"seq_id":"31950418298","text":"import pandas as pd\nfrom sklearn.linear_model import RidgeCV, Ridge, Lasso\nimport numpy as np\nfrom datetime import datetime\nfrom scipy.optimize import minimize\nfrom random import random\n\ndef get_team_ranks(teams, coefs):\n\n ranks = (-coefs).argsort()\n\n for i in range(len(team_list)):\n print (i+1, teams[ranks[i]], coefs[ranks[i]])\n\ndef model_pred(model, games):\n games = np.asarray(games)\n return model.predict(games)\n\ndef calculate_ridge(Xs, ys, alpha):#\n # print (\"rolling\", len(Xs), len(ys))\n # print (Xs)\n # print(ys)\n Xs = np.asarray(Xs)\n Ys = np.asarray(ys)\n # Ys = Ys.reshape((Ys.shape[0], 1))\n # print (Xs)\n # print (Ys)\n # print (Xs.shape, Ys.shape)\n clf = Ridge(alpha=alpha, fit_intercept=True).fit(Xs, Ys)\n return clf\n\n\ndef get_inverse_sup_and_tot(sup_goal, tot_goal, sup_grid, tot_grid, sup_value_grid, tot_value_grid):\n new_grid = np.abs(sup_grid - sup_goal) + np.abs(tot_grid - tot_goal) + np.abs(sup_value_grid - sup_goal) / 100 + np.abs(\n tot_value_grid - tot_goal) / 100 + random() / 10000000\n\n cell = np.where(new_grid == new_grid.min())\n print(new_grid.min())\n print(np.abs(sup_grid - sup_goal)[cell], np.abs(tot_grid - tot_goal)[cell], np.abs(sup_value_grid - sup_goal)[cell], np.abs(tot_value_grid - tot_goal)[cell])\n if sup_value_grid[cell] == 0.79 and tot_value_grid[cell] == 3.16:\n pass\n i = 0\n return sup_value_grid[cell], tot_value_grid[cell]\n\nclass BetMaker:\n def __init__(self):\n self.hc_bets = 0\n self.ou_bets = 0\n self.hc_th = 0.25\n self.ou_th = 0.25\n self.hc_pl = 0\n self.ou_pl = 0\n\nimport pickle\n\nfolder = \"op_data/MLS/\"\nfile = \"MLS2021.csv\"\n\nsup_estimator = pickle.load(open(folder + \"ML_models/sup_estimator_pre_lin_5.pkl\", 'rb'))\ntot_estimator = pickle.load(open(folder + \"ML_models/tot_estimator_pre_lin_5.pkl\", 'rb'))\n\nsup_list = []\ntot_list = []\nsup_value_list = []\ntot_value_list = []\nfor s in np.arange(-80, 81):\n #print(s)\n sup_temp = []\n tot_temp = []\n sup_value_temp = []\n tot_value_temp = []\n for t in np.arange(200, 501):\n sup = s / 100\n tot = t / 100\n\n\n\n value = sup_estimator.predict(np.asarray([[sup, tot, abs(sup), sup if sup > 0 else 0, -sup if sup <0 else 0]]))[0]\n value2 = tot_estimator.predict(np.asarray([[sup, tot, abs(sup), sup if sup > 0 else 0, -sup if sup <0 else 0]]))[0]\n\n sup_temp.append(value)\n tot_temp.append(value2)\n sup_value_temp.append(sup)\n tot_value_temp.append(tot)\n\n sup_list.append(sup_temp)\n tot_list.append(tot_temp)\n sup_value_list.append(sup_value_temp)\n tot_value_list.append(tot_value_temp)\n\nsup_list = np.asarray(sup_list)\ntot_list = np.asarray(tot_list)\nsup_value_list = np.asarray(sup_value_list)\ntot_value_list = np.asarray(tot_value_list)\n\nprint (\"build grids\")\n\n\ndf = pd.read_csv(folder + \"fit/\" + file)\n\ndf[\"sup_per_goal\"] = (df[\"home_underlying\"] - df[\"away_underlying\"])/(df[\"home_underlying\"] + df[\"away_underlying\"])\ndf[\"ex_total\"] = (df[\"home_underlying\"] + df[\"away_underlying\"])\n\n#need to roll x games/days\n#need to build_features\n\n#date, teama, teamb, scorea, scoreb, sup per goal, ex total\n\ngames_details = []\n\nfor x, row in df.iterrows():\n sup, tot = get_inverse_sup_and_tot(row[\"sup_per_goal\"], row[\"ex_total\"], sup_list, tot_list, sup_value_list, tot_value_list)\n print (row[\"sup_per_goal\"], row[\"ex_total\"], sup, tot )\n # this_row = [row[\"Date\"],\n # row[\"Time\"],\n # row[\"HomeTeam\"],\n # row[\"AwayTeam\"],\n # row[\"FTHG\"],\n # row[\"FTAG\"],\n # row[\"AHh\"],\n # row[\"AvgAHH\"],\n # row[\"AvgAHA\"],\n # row[\"Avg>2.5\"],\n # row[\"Avg<2.5\"],\n # row[\"sup_per_goal\"],\n # row[\"ex_total\"],\n # sup[0],\n # tot[0]] #row[\"home_underlying\"], row[\"away_underlying\"]]\n\n this_row =[row[\"date\"],\n \"\", # row[\"Time\"],\n row[\"home_team\"],\n row[\"away_team\"],\n row[\"home_score\"],\n row[\"away_score\"],\n row[\"sup_per_goal\"],\n row[\"ex_total\"],\n sup[0],\n tot[0]]\n\n print (this_row)\n games_details.append(this_row)\n\n#print (games_details)\n\nteam_list = []\nfor game in games_details:\n if game[2] not in team_list:\n team_list.append(game[2])\n if game[3] not in team_list:\n team_list.append(game[3])\n\n\n\ndef run_league(args, should_print=False, write_csv=False):\n\n bm = BetMaker()\n total_adjust = [0] * len(team_list)\n sup_adjust = [0] * len(team_list)\n\n csv_rows = []\n rolling_game_list = []\n rolling_game_list2 = []\n rolling_y_list = []\n rolling_y_list_2 = []\n next_fixtures_list = []\n\n start_date = None\n the_alpha = 0.001\n game_carry = 0.0125\n required_games = 50\n the_alpha = args[0]\n required_games = int(args[1])\n differences = []\n differences2 = []\n for game in games_details:\n\n # date_as_dt = datetime.strptime(game[0], \"%d/%m/%Y\")\n\n if True:#date_as_dt < datetime(2022, 3, 2):\n if game[0] != start_date and len(rolling_game_list) > required_games:\n #trigger calcs\n\n model = calculate_ridge(rolling_game_list, rolling_y_list, the_alpha)\n model2 = calculate_ridge(rolling_game_list2, rolling_y_list_2, the_alpha)\n # if should_print:\n # print (\"ha\", model.coef_[-1])\n # get_team_ranks(team_list, model.coef_)\n\n start_date = game[0]\n\n for game2 in games_details:\n if game2[0] == start_date:\n fake_row = [0] * (len(team_list)) #one for ha\n fake_row_2 = [0] * (len(team_list)) # one for ha\n home_ind = team_list.index(game2[2])\n away_ind = team_list.index(game2[3])\n fake_row[home_ind] = 1\n fake_row[away_ind] = -1\n #fake_row[-1] = 1\n\n fake_row_2[home_ind] = 1\n fake_row_2[away_ind] = 1\n # fake_row_2[-1] = -1\n\n predicted_value = model_pred(model, [fake_row])[0] + sup_adjust[home_ind] - sup_adjust[away_ind]\n predicted_value_2 = model_pred(model2, [fake_row_2])[0] + total_adjust[home_ind] + total_adjust[\n away_ind]\n actual_sup_prd = sup_estimator.predict(np.asarray([[predicted_value,\n predicted_value_2,\n abs(predicted_value),\n predicted_value if predicted_value > 0 else 0,\n -predicted_value if predicted_value < 0 else 0]]))[0]\n actual_tot_prd = \\\n tot_estimator.predict(np.asarray([[predicted_value,\n predicted_value_2,\n abs(predicted_value),\n predicted_value if predicted_value > 0 else 0,\n -predicted_value if predicted_value < 0 else 0]]))[0]\n\n\n\n\n if should_print:\n print (game2[0], \",\",\n game2[1], \",\",\n game2[2], \",\",\n game2[3], \",\",\n game2[4], \",\",\n game2[5], \",\",\n predicted_value, \",\",\n predicted_value_2, \",\",\n actual_sup_prd, \",\",\n actual_tot_prd, \",\",\n game2[-2], \",\",\n game2[-1], \",\",\n game2[-4], \",\",\n game2[-3])\n # print (model.coef_[home_ind], model.coef_[away_ind], model.intercept_)\n\n\n # sup update\n if game2[4] + game2[5] == 0:\n sup_adjust[home_ind] = 0\n sup_adjust[away_ind] = 0\n else:\n sup_adjust[home_ind] = ((game2[4] - game2[5]) / (game2[4] + game2[5]) - game2[\n -4]) * game_carry\n sup_adjust[away_ind] = ((-game2[4] + game2[5]) / (game2[4] + game2[5]) + game2[\n -4]) * game_carry\n\n # tot_update\n total_adjust[home_ind] = ((game2[4] + game2[5]) - game2[-3]) * game_carry\n total_adjust[away_ind] = ((game2[4] + game2[5]) - game2[-3]) * game_carry\n\n fake_row = [0] * (len(team_list)) # one for ha\n fake_row_2 = [0] * (len(team_list)) # one for ha\n home_ind = team_list.index(game[2])\n away_ind = team_list.index(game[3])\n fake_row[home_ind] = 1\n fake_row[away_ind] = -1\n\n fake_row_2[home_ind] = 1\n fake_row_2[away_ind] = 1\n\n rolling_game_list.append(fake_row)\n rolling_game_list2.append(fake_row_2)\n rolling_y_list.append(game[-2])\n rolling_y_list_2.append(game[-1])\n rolling_y_list = rolling_y_list[-(required_games + 1):]\n rolling_y_list_2 = rolling_y_list_2[-(required_games + 1):]\n rolling_game_list = rolling_game_list[-(required_games +1 ):]\n rolling_game_list2 = rolling_game_list2[-(required_games + 1):]\n\n model = calculate_ridge(rolling_game_list, rolling_y_list, the_alpha)\n model2 = calculate_ridge(rolling_game_list2, rolling_y_list_2, the_alpha)\n\n rating_dict = {}\n print (\"******************\")\n for x, team in enumerate(team_list):\n print (team, model.coef_[x] + sup_adjust[x], model2.coef_[x] + total_adjust[x])\n rating_dict[team] = {\"sup\": model.coef_[x] + sup_adjust[x],\n \"tot\": model2.coef_[x] + total_adjust[x]}\n\n print (\"sup int\", model.intercept_)\n print(\"tot int\", model2.intercept_)\n rating_dict[\"ints\"] = {\"sup\": model.intercept_,\n \"tot\": model2.intercept_}\n import json\n with open(folder + \"ratings/ratings.json\", \"w\") as outfile:\n json_object = json.dump(rating_dict, outfile)\n return None\n\n#optim = minimize(run_league, np.asarray([1, 40]), method=\"BFGS\")\n\n#print (optim)\n\nrun_league([0.0000001, 50], should_print=True, write_csv=True)\n","repo_name":"BarneyThePD/footything","sub_path":"rolling_regression_alt_5_from_oddsportal.py","file_name":"rolling_regression_alt_5_from_oddsportal.py","file_ext":"py","file_size_in_byte":11199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"6584202406","text":"#!/usr/bin/env python3\nimport argparse\nimport tempfile\nimport os\nimport sys\nfrom typing import * # pylint: disable=wildcard-import,unused-wildcard-import\n\nimport shelve\n\n\ndef parse_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser(\n description=\"Map the entities and the relations of an edgelist\"\n )\n parser.add_argument(\"edgelist\", help=\"Path of the edgelist file\")\n parser.add_argument(\n \"-em\",\n \"--ent-map\",\n default=\"entities_map.tsv\",\n help=\"Output path of the mapping for entities\",\n )\n parser.add_argument(\n \"-rm\",\n \"--rel-map\",\n default=\"relations_map.tsv\",\n help=\"Output path of the mapping for relations\",\n )\n parser.add_argument(\n \"-me\",\n \"--mapped-edgelist\",\n default=\"mapped_edgelist.tsv\",\n help=\"Output path of the mapped edgelist\",\n )\n return parser.parse_args()\n\n\ndef normalize_args(args: argparse.Namespace) -> None:\n args.edgelist = os.path.realpath(args.edgelist)\n args.ent_map = os.path.realpath(args.ent_map)\n args.rel_map = os.path.realpath(args.rel_map)\n args.mapped_edgelist = os.path.realpath(args.mapped_edgelist)\n\n\ndef validate_args(args: argparse.Namespace) -> None:\n if not os.path.isfile(args.edgelist):\n print(\"The edgelist file does not exists\")\n sys.exit(1)\n if not os.path.isfile(args.ent_map):\n print(\"The entities mapping file does not exists\")\n sys.exit(1)\n if not os.path.isfile(args.rel_map):\n print(\"The relations mapping file does not exists\")\n sys.exit(1)\n\n\ndef main(args: argparse.Namespace) -> None:\n edgelist_path = args.edgelist\n ent_map_path = args.ent_map\n rel_map_path = args.rel_map\n el_map_path = args.mapped_edgelist\n\n with tempfile.TemporaryDirectory() as tmp:\n ent_dict_path = os.path.join(tmp, \"ent\")\n rel_dict_path = os.path.join(tmp, \"rel\")\n\n with shelve.open(ent_dict_path) as rel_dict, shelve.open(\n rel_dict_path\n ) as ent_dict:\n print(\"Processing entities mapping\")\n with open(ent_map_path, \"r\") as em_handle:\n for line in em_handle:\n parts = line.rstrip(\"\\n\").split(\"\\t\")\n ent_dict[parts[1]] = parts[0]\n\n print(\"Processing relations mapping\")\n with open(rel_map_path, \"r\") as rm_handle:\n for line in rm_handle:\n parts = line.rstrip(\"\\n\").split(\"\\t\")\n rel_dict[parts[1]] = parts[0]\n\n print(\"Writing the mapped edgelist\")\n os.makedirs(os.path.dirname(el_map_path), exist_ok=True)\n with open(el_map_path, \"w+\") as mel_handle, open(\n edgelist_path, \"r\"\n ) as el_handle:\n for line in el_handle:\n parts = line.rstrip(\"\\n\").split(\"\\t\")\n mel_handle.write(\n ent_dict[parts[0]]\n + \"\\t\"\n + rel_dict[parts[1]]\n + \"\\t\"\n + ent_dict[parts[2]]\n + \"\\n\"\n )\n\n\nif __name__ == \"__main__\":\n try:\n ARGS = parse_args()\n\n normalize_args(ARGS)\n validate_args(ARGS)\n main(ARGS)\n except (KeyboardInterrupt, SystemExit):\n print(\"\\nAborted!\")\n","repo_name":"simonepri/edgelist-mapper","sub_path":"edgelist_mapper/bin/map_edgelist.py","file_name":"map_edgelist.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"}
+{"seq_id":"41459310054","text":"users = {\n 'niklas': [\n 'computer',\n 'sports'\n ],\n 'luca': [\n 'computer',\n 'science'\n ],\n 'florian': [\n 'politics',\n 'sports'\n ]\n}\n\nthreshold = 2.0\n\ndoc_path = 'docs/'\n\nmodel_path = 'model/'\n\ncategories = [\n 'alt.atheism',\n 'comp.graphics',\n 'comp.os.ms-windows.misc',\n 'comp.sys.ibm.pc.hardware',\n 'comp.sys.mac.hardware',\n 'comp.windows.x',\n # 'misc.forsale',\n 'rec.autos',\n 'rec.motorcycles',\n 'rec.sport.baseball',\n 'rec.sport.hockey',\n 'sci.crypt',\n 'sci.electronics',\n 'sci.med',\n 'sci.space',\n 'soc.religion.christian',\n 'talk.politics.guns',\n 'talk.politics.mideast',\n 'talk.politics.misc',\n 'talk.religion.misc'\n]\n\nnew_categories = {\n 'comp': 'computer',\n 'rec': 'sports',\n 'sci': 'science',\n 'religion': 'religion',\n 'politics': 'politics',\n 'atheism': 'religion'\n}\n","repo_name":"covix/vector-space-model","sub_path":"Assignment01/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"18449456154","text":"import copy\nimport json\nimport pandas as pd\nimport sys\nimport torch\nimport pytorch_lightning as pl\nimport numpy as np\n\nfrom pathlib import Path\nfrom torch.utils.data import DataLoader\n\nfrom argparse import ArgumentParser\n\nfrom survival_plus_x.data.dataset import GensheimerDatasetInMemory\nfrom survival_plus_x.data.transforms import get_preprocess_transforms\nfrom survival_plus_x.models.survival_plus_unetr import MultitaskPlusUNETR\nfrom survival_plus_x.models.survival_plus_unet import MultitaskPlusUNET\nfrom survival_plus_x.models.multitask import multitask_metrics_from_step_outputs\nfrom survival_plus_x.utils.commandline_params import add_common_args\nfrom survival_plus_x.models.cox_lightning import compute_stratification_logrank_pvalue\n\n\ndef inference_single_sample(args, test_ids, model):\n test_dataset = GensheimerDatasetInMemory(\n image_directories=args.input,\n image_filename=args.img_filename,\n mask_filename=args.mask_filename,\n patient_ids=test_ids,\n outcome_file=args.outcome,\n outcome_file_sep=args.outcome_sep,\n outcome_file_id_column=args.id_col,\n outcome_file_time_column=args.time_col,\n outcome_file_event_column=args.event_col,\n interval_breaks=model.hparams.gensheimer_interval_breaks,\n preprocess_transform=get_preprocess_transforms(list(args.image_size)),\n augmentation_transform=None)\n\n test_loader = DataLoader(\n test_dataset,\n shuffle=False,\n batch_size=args.batch_size,\n num_workers=args.num_workers)\n\n trainer = pl.Trainer(gpus=args.gpus)\n\n test_pred_step_outputs = trainer.predict(\n model,\n dataloaders=test_loader)\n\n return test_pred_step_outputs\n\n\ndef inference_multiple_samples(args, test_ids, model):\n # we have to set up the dataset in the way that initially a larger\n # crop is chosen and then a transform that creates random crops of the\n # actually wanted size\n from monai.transforms import RandSpatialCropSamplesd, Resized, Compose\n\n n_samples = args.n_samples\n aggregation_fn = {\n \"min\": torch.min,\n \"max\": torch.max,\n \"mean\": torch.mean,\n \"median\": torch.median\n }[args.sample_aggregation]\n\n random_crops_transform = Compose([\n RandSpatialCropSamplesd(\n keys=[\"img\", \"mask\"],\n roi_size=args.image_size,\n random_size=False,\n random_center=True,\n num_samples=n_samples\n ),\n Resized(\n keys=[\"img\", \"mask\"],\n spatial_size=args.image_size)\n ])\n\n test_dataset = GensheimerDatasetInMemory(\n image_directories=args.input,\n image_filename=args.img_filename,\n mask_filename=args.mask_filename,\n patient_ids=test_ids,\n outcome_file=args.outcome,\n outcome_file_sep=args.outcome_sep,\n outcome_file_id_column=args.id_col,\n outcome_file_time_column=args.time_col,\n outcome_file_event_column=args.event_col,\n interval_breaks=model.hparams.gensheimer_interval_breaks,\n preprocess_transform=get_preprocess_transforms(\n (1.25 * np.array(args.image_size)).astype(int).tolist()), # increase spatial size so we can random crop\n augmentation_transform=random_crops_transform)\n\n test_loader = DataLoader(\n test_dataset,\n shuffle=False,\n batch_size=args.batch_size,\n num_workers=args.num_workers)\n\n # now each batch the data loader produces is a list of dicts of length n_samples\n # for which we have to make predictions and boil down results to a single dict\n # per batch\n\n step_outputs = []\n\n surv_heads = model.hparams.heads_to_use\n print(\"models survival heads\", surv_heads)\n with torch.no_grad():\n for batch_idx, batch in enumerate(test_loader):\n # batch is now a list of dicts, one dict for each sample\n # and we have to aggregate over all samples to make a final\n # prediction for each patient in the batch\n aggregated_step_result = {}\n for sample_idx, sample_dict in enumerate(batch):\n print(\n f\"\\nPredicting for Batch {batch_idx+1}, sample {sample_idx + 1}\\n\")\n\n # results for the first sample of the batch patients\n # has keys 'survival' and 'segmentation' where\n # survival is another dict for each head containing the keys 'patient', 'label' and 'prediction'\n sample_result = model.predict_step(\n sample_dict, batch_idx=None) # batch_idx is not used anyway\n # print(sample_result.keys())\n # print(\"sample_result['survival'].keys()\",\n # sample_result[\"survival\"].keys())\n\n if sample_idx == 0:\n for head in surv_heads: # sample_result[\"survival\"]:\n aggregated_step_result[head] = dict()\n # copy all non-prediction keys\n for k in sample_result[\"survival\"][head].keys():\n if \"prediction\" in k:\n continue\n aggregated_step_result[head][k] = sample_result[\"survival\"][head][k]\n\n aggregated_step_result[head][\"sample_predictions\"] = [\n sample_result[\"survival\"][head][\"prediction\"].detach()]\n\n else:\n for head in surv_heads: # sample_result[\"survival\"]:\n aggregated_step_result[head][\"sample_predictions\"].append(\n sample_result[\"survival\"][head][\"prediction\"].detach())\n\n # stack all the predictions we aggregated along the second dimension,\n # so the output has shape B, n_samples, n_predictions for each head\n for head in surv_heads:\n aggregated_step_result[head][\"sample_predictions\"] = torch.stack(\n aggregated_step_result[head][\"sample_predictions\"], dim=1)\n\n # now final aggregation\n for head in surv_heads:\n aggregated = aggregation_fn(\n aggregated_step_result[head][\"sample_predictions\"], dim=1)\n\n # NOTE: for min, max and median, torch calls return a tuple of\n # values and indices if dim= argument is passed (but not for mean)\n if not isinstance(aggregated, torch.Tensor):\n assert len(aggregated) == 2\n vals, _ = aggregated\n aggregated = vals\n aggregated_step_result[head][\"sample_predictions_std\"] = torch.std(\n aggregated_step_result[head][\"sample_predictions\"],\n dim=1,\n unbiased=False\n )\n\n aggregated_step_result[head][\"prediction\"] = aggregated\n\n step_outputs.append(dict(survival=aggregated_step_result))\n\n return step_outputs\n\n\ndef main(args):\n pl.seed_everything(args.seed)\n test_ids = pd.read_csv(args.test_id_file,\n header=None).values.squeeze().tolist()\n\n if args.vit_or_cnn == \"vit\":\n cls = MultitaskPlusUNETR\n\n elif args.vit_or_cnn == \"cnn\":\n cls = MultitaskPlusUNET\n\n model = cls.load_from_checkpoint(\n checkpoint_path=args.ckpt_file)\n model.eval()\n model.freeze()\n print(f\"Loaded trained model from checkpoint {args.ckpt_file}.\")\n\n if args.n_samples > 1:\n test_pred_step_outputs = inference_multiple_samples(\n args, test_ids, model)\n elif args.n_samples == 1:\n test_pred_step_outputs = inference_single_sample(\n args, test_ids, model)\n else:\n raise ValueError(\n f\"n_samples must be >= 1, not \"\n f\"{args.n_samples}\")\n\n # NOTE: information of variance among multiple predictions gets lost here\n # since only the \"prediction\" keys are taken into account when evaluating\n # metrics and returning test_pred\n # TODO: maybe write out the step_outputs in some meaningful way as well?\n\n test_metrics, test_pred = multitask_metrics_from_step_outputs(\n [d[\"survival\"] for d in test_pred_step_outputs],\n task_names=model.hparams.heads_to_use,\n timepoints_cindex=model.hparams.timepoints_cindex,\n timepoints_brier=model.hparams.timepoints_brier,\n training_labels=model.hparams.training_labels,\n gensheimer_interval_breaks=model.hparams.gensheimer_interval_breaks\n )\n\n # if inference_multiple_samples -> also add the standard deviations for the predictions\n # to the test_pred dataframe\n if args.n_samples > 1:\n for head in test_pred:\n stds = torch.cat([d['survival'][head]['sample_predictions_std']\n for d in test_pred_step_outputs])\n #pats = [d['survival'][head]['patient']]\n stds_dict = {}\n for i in range(stds.shape[1]):\n stds_dict[f'std_prediction_{i}'] = stds[:, i]\n stds_dict = pd.DataFrame(stds_dict)\n test_pred[head] = pd.concat([test_pred[head], stds_dict], axis=1)\n\n print()\n print(f\"Storing test predictions to {args.output_dir}\")\n for head in test_pred:\n pred_df = test_pred[head]\n pred_df.set_index(\"patient\").to_csv(\n args.output_dir / f\"{head}_predictions.csv\")\n\n for head in test_metrics:\n metrics = pd.DataFrame(test_metrics[head], index=[0])\n\n # stratification cutoff for cox model only\n # TODO: can we use stratification cutoff for other losses?\n if head == \"cox\":\n if args.stratification_cutoff_cox is None:\n print(\"Note: No stratification cutoff was provided. Will \"\n \"determine it as median of predictions. This might \"\n \"not be intended for data other than the training \"\n \"data! If you are not using the training data now \"\n \"you should determine the cutoff from that beforehand!\")\n stratification_cutoff = np.median(\n test_pred[head]['prediction'])\n else:\n stratification_cutoff = args.stratification_cutoff_cox\n\n test_logrank_pval = compute_stratification_logrank_pvalue(\n test_pred[head], cutoff=stratification_cutoff)\n\n metrics['stratification_cutoff'] = stratification_cutoff\n metrics['stratification_logrank_pval'] = test_logrank_pval\n\n print()\n print(f\"{head.capitalize()} metrics (storing to {args.output_dir})\")\n print(metrics)\n metrics.to_csv(args.output_dir / f\"{head}_metrics.csv\", index=False)\n\n return 0\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(\"Inference\")\n parser = add_common_args(parser)\n parser.add_argument(\n '--ckpt_file',\n type=str,\n help='Full path to a checkpoint file of the trained model.')\n parser.add_argument(\n '--output_dir',\n type=str,\n default=None)\n parser.add_argument(\n '--gpus',\n type=int,\n default=0)\n parser.add_argument(\n '--stratification_cutoff_cox',\n type=float,\n help=\"Cutoff value applied to the test predictions to divide into low and high risk groups.\")\n parser.add_argument(\n '--n_samples',\n type=int,\n default=1,\n help=\"Number of random crops to create per patient and from which to make a final prediction\"\n )\n parser.add_argument(\n '--sample_aggregation',\n type=str,\n choices=[\"mean\", \"median\", \"min\", \"max\"],\n default=\"mean\"\n )\n parser.add_argument(\n \"--vit_or_cnn\",\n type=str,\n choices=[\"vit\", \"cnn\"],\n #default=\"vit\"\n )\n\n # parser.add_argument('--plot_predictions',\n # action=\"store_true\",\n # default=False,\n # help=\"Flag to decide whether predictions for each\"\n # \" patient should be plotted after training.\")\n\n args = parser.parse_args()\n print(f\"parsed args are\\n{args}\")\n\n if args.output_dir is None:\n args.output_dir = \"./cox_vit/inference\"\n if not isinstance(args.output_dir, Path):\n args.output_dir = Path(args.output_dir)\n\n if not args.output_dir.is_dir():\n args.output_dir.mkdir(parents=True)\n else:\n raise ValueError(f\"Output_dir {args.output_dir} already exists!\")\n\n # storing the commandline arguments to a json file\n with open(args.output_dir / \"commandline_args.json\", 'w') as of:\n # pathlib objects cant be serialized so we convert to string\n storage_args = vars(copy.deepcopy(args))\n storage_args[\"output_dir\"] = str(\n storage_args[\"output_dir\"])\n\n json.dump(storage_args, of, indent=2)\n\n args.input = [Path(inp) for inp in args.input]\n\n retval = main(args)\n sys.exit(retval)\n","repo_name":"oncoray/multitask-hnscc","sub_path":"scripts/multitask/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":13005,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"}
+{"seq_id":"72783039019","text":"from unittest.mock import Mock\n\nfrom _sadm.devops.wapp.view import index\n\ndef test_handle(devops_wapp):\n\twapp = devops_wapp()\n\twith wapp.mock() as ctx:\n\t\tbup = Mock()\n\t\tbup._repos = index._repos\n\t\ttry:\n\t\t\trepos = object()\n\t\t\tindex._repos = Mock(return_value = repos)\n\t\t\tindex.handle(user = None)\n\t\tfinally:\n\t\t\tdel index._repos\n\t\t\tindex._repos = bup._repos\n\t\tctx.tpl.parse.assert_called_with('index', repos = repos, user = None)\n","repo_name":"jrmsdev/pysadm","sub_path":"t/devops/devops_wapp_view_index_test.py","file_name":"devops_wapp_view_index_test.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"41234490612","text":"import maya.cmds as cmds\n\nvertexlist = cmds.ls(sl=True, fl=True)\nmesh = vertexlist[0].split('.')[0]\ncmds.select(mesh)\nmeshShape = cmds.listRelatives(shapes=True)[0]\norig = cmds.duplicate(mesh, name=mesh + 'Orig')\ntodelete = cmds.ls(sl=True)\ncpv = cmds.rename(meshShape, mesh + 'CPVShape')\ntomove = cmds.listRelatives(shapes=True)[0]\ncmds.parent(tomove, mesh, shape=True, relative=True)\ncmds.delete(todelete)\norig = orig[0] + 'Shape'\n\n# create input graph\n\ncmds.select(orig)\natpc = soup().create('arrayToPointColor')[0]\ncmds.connectAttr(atpc + '.outGeometry', cpv + '.inMesh', force=True)\ncmds.setAttr(atpc + '.solidAlpha', 1)\ncmds.select(orig)\ntta = soup().create('textureToArray')[0]\ncmds.connectAttr(tta + '.outRgbaPP', atpc + '.inRgbaPP')\ncmds.setAttr(tta + '.accurateSampling', 1)\ntex = cmds.shadingNode('surfaceShader', asTexture=True, name=mesh + 'CPVcolor')\ncmds.connectAttr(tex + '.outColor', tta + '.inColor')\ncmds.setAttr(orig + '.intermediateObject', 1)\ncmds.setAttr(cpv + '.displayColors', 1)\n\n# create output graph\n\ncmds.select(mesh)\nsoup().create('pointAttributeToArray')\narray = cmds.ls(sl=True)[0]\nsoup().create('rgbaToColorAndAlpha')\nrgba = cmds.ls(sl=True)[0]\nsoup().create('pointCloudToMesh')\nbakemesh = cmds.ls(sl=True)[0]\ncmds.polyCube(name=mesh + 'colorBake')\ncolorBake = cmds.ls(sl=True)[0]\n\n# set nodes attributes\ncmds.setAttr(array + '.pointColor', 1)\ncmds.setAttr(bakemesh + '.normal', 0)\ncmds.setAttr(bakemesh + '.rgba', 0)\ncmds.setAttr(bakemesh + '.map', 0)\ncmds.setAttr(bakemesh + '.position', 1)\ncmds.setAttr(colorBake + '.visibility', 0)\n\n# connect nodes\ncmds.connectAttr(array + '.outRgbaPP', rgba + '.inRgbaPP', force=True)\ncmds.connectAttr(rgba + '.outRgbPP', bakemesh + '.inPositionPP', force=True)\ncmds.connectAttr(bakemesh + '.outMesh', colorBake + '.inMesh', force=True)\n\n# 1 locator per color\n\nvertexNo = [x.replace(x.split('.')[0], colorBake) for x in vertexlist]\ncmds.select(vertexNo)\nvertexConstraint_SOuP().main()\n\n# 1 locator per light\n\ncmds.select(vertexlist)\nvertexConstraint_SOuP().main()\nlocs = cmds.ls(sl=True, fl=True)\n\n# Controler\n\nctrl = cmds.spaceLocator(name=mesh + '_Light_CTRL')[0]\ncmds.move(0, 5, 0)\ncmds.addAttr(longName='lightsDisplay', attributeType='bool', defaultValue=True, hidden=False, writable=True,keyable=True)\ncmds.addAttr(longName='lightsScale', attributeType='float', defaultValue=1, hidden=False, writable=True, keyable=True)\ncmds.addAttr(longName='targetsDisplay', attributeType='bool', defaultValue=True, hidden=False, writable=True,keyable=True)\ncmds.addAttr(longName='targetsScale', attributeType='float', defaultValue=1, hidden=False, writable=True, keyable=True)\ncmds.addAttr(longName='locatorsDisplay', attributeType='bool', defaultValue=False, hidden=False, writable=True,keyable=True)\n# cmds.addAttr(longName='IESfile', dataType=\"string\", hidden=False, writable=True, keyable=True)\ncmds.addAttr(longName='NormalOffset', attributeType='float', hidden=False, writable=True, keyable=True)\n\ncmds.addAttr(longName='Intensity', attributeType='float', defaultValue=1, hidden=False, writable=True, keyable=True)\ncmds.addAttr(longName='Exposure', attributeType='float', defaultValue=12, hidden=False, writable=True, keyable=True)\n\ncmds.addAttr(longName='Spread', attributeType='float', defaultValue=1, hidden=False, writable=True, keyable=True)\n\ncmds.addAttr(longName='DiffuseContribution', min=0,max=1,attributeType='float', defaultValue=1, hidden=False, writable=True,keyable=True)\ncmds.addAttr(longName='SpecularContribution', min=0,max=1, attributeType='float', defaultValue=1, hidden=False, writable=True,keyable=True)\ncmds.addAttr(longName='SSSContribution', min=0,max=1,attributeType='float', defaultValue=1, hidden=False, writable=True,keyable=True)\ncmds.addAttr(longName='IndirectContribution', min=0,max=1,attributeType='float', defaultValue=1, hidden=False, writable=True,keyable=True)\ncmds.addAttr(longName='VolumeContribution', min=0,max=1,attributeType='float', defaultValue=1, hidden=False, writable=True,keyable=True)\n\ncmds.addAttr(longName='EmitDiffuse', attributeType='bool', defaultValue=True, hidden=False, writable=True, keyable=True)\ncmds.addAttr(longName='EmitSpec', attributeType='bool', defaultValue=True, hidden=False, writable=True, keyable=True)\n# cmds.setAttr(ctrl+'.IESfile',\"IES profile here...\",type=\"string\")\n\nfor vtx in locs:\n id = vtx.split('_')[3]\n lightShape = cmds.createNode('aiAreaLight', name='AreaLight_' + id)\n light = cmds.listRelatives(lightShape, parent=True)[0]\n cmds.addAttr(light, longName='cpvColor', attributeType='float3')\n cmds.addAttr(light, longName='cpvColorX', attributeType='float', parent='cpvColor')\n cmds.addAttr(light, longName='cpvColorY', attributeType='float', parent='cpvColor')\n cmds.addAttr(light, longName='cpvColorZ', attributeType='float', parent='cpvColor')\n cmds.connectAttr('vertexConstraint*_' + colorBake + '_vtx_' + id + '.translateX', light + '.cpvColorX')\n cmds.connectAttr('vertexConstraint*_' + colorBake + '_vtx_' + id + '.translateY', light + '.cpvColorY')\n cmds.connectAttr('vertexConstraint*_' + colorBake + '_vtx_' + id + '.translateZ', light + '.cpvColorZ')\n cmds.connectAttr(ctrl + '.locatorsDisplay', 'vertexConstraint*_' + mesh + '_vtx_Shape' + id + '.lodVisibility')\n cmds.connectAttr(ctrl + '.lightsDisplay', light + '.lodVisibility')\n #cmds.connectAttr(ctrl + '.IESfile', lightShape + '.aiFilename')\n cmds.connectAttr(ctrl + '.NormalOffset', light + '.translateX')\n cmds.connectAttr(ctrl + '.lightsScale', light + '.scaleX')\n cmds.connectAttr(ctrl + '.lightsScale', light + '.scaleY')\n cmds.connectAttr(ctrl + '.lightsScale', light + '.scaleZ')\n cmds.connectAttr(ctrl + '.Exposure', lightShape + '.aiExposure')\n cmds.connectAttr(ctrl + '.EmitDiffuse', lightShape + '.emitDiffuse')\n cmds.connectAttr(ctrl + '.EmitSpec', lightShape + '.emitSpecular')\n\n cmds.connectAttr(ctrl + '.Intensity', lightShape + '.intensity')\n cmds.connectAttr(ctrl + '.Spread', lightShape + '.aiSpread')\n\n cmds.connectAttr(ctrl + '.DiffuseContribution', lightShape + '.aiDiffuse')\n cmds.connectAttr(ctrl + '.SpecularContribution', lightShape + '.aiSpecular')\n cmds.connectAttr(ctrl + '.SSSContribution', lightShape + '.aiSss')\n cmds.connectAttr(ctrl + '.IndirectContribution', lightShape + '.aiIndirect')\n cmds.connectAttr(ctrl + '.VolumeContribution', lightShape + '.aiVolume')\n\n lightShape = cmds.listRelatives(light, shapes=True)\n cmds.connectAttr(light + '.cpvColor', lightShape[0] + '.color')\n cmds.parent(light, vtx, relative=True)\n target = cmds.spaceLocator(name=mesh + light + id + '_target')[0]\n cmds.parent(target, vtx, relative=True)\n cmds.setAttr(target + '.translateX', 10)\n cmds.aimConstraint(target, light, weight=1, offset=(0, -90, 90), aimVector=(1, 0, 0), upVector=(0, 1, 0),worldUpType='vector', worldUpVector=(0, 1, 0))\n cmds.connectAttr(ctrl + '.lightsScale', target + '.localScaleX')\n cmds.connectAttr(ctrl + '.lightsScale', target + '.localScaleY')\n cmds.connectAttr(ctrl + '.lightsScale', target + '.localScaleZ')\n cmds.connectAttr(ctrl + '.targetsDisplay', target + '.visibility')\n cmds.connectAttr(ctrl + '.targetsScale', target + '.scaleX')\n cmds.connectAttr(ctrl + '.targetsScale', target + '.scaleY')\n cmds.connectAttr(ctrl + '.targetsScale', target + '.scaleZ')\n\n# Sort things\n\nlocsCol = cmds.select('vertexConstraint*_' + colorBake + '_vtx_*')\ncmds.group(name=mesh + '_colorHisto')\ncolGrp = cmds.ls(sl=True)[0]\nlocsPos = cmds.select('vertexConstraint*_' + mesh + '_vtx_*')\ncmds.group(name=mesh + '_posHisto')\nposGrp = cmds.ls(sl=True)[0]\ncmds.parent(colorBake, colGrp)\ncmds.setAttr(colGrp + '.visibility', 0)\ncmds.group(name=mesh + '_targetsPos', empty=True, world=True)\ntargetGrp = cmds.ls(sl=True)[0]\ncmds.select(mesh + '*_target')\ntargets = cmds.ls(sl=True)\ncmds.parent(targets, targetGrp, absolute=True)\n\nanno = cmds.annotate(ctrl, text='Light controls for ' + mesh, point=(0, 7, 0))\nannoTransform = cmds.listRelatives(anno, parent=True)[0]\ncmds.parent(anno, ctrl, shape=True, relative=True)\ncmds.delete(annoTransform)\ncmds.select(ctrl)\n","repo_name":"nagasimon/Maya","sub_path":"divers/lightRig_paolo.py","file_name":"lightRig_paolo.py","file_ext":"py","file_size_in_byte":8135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"4530548895","text":"#!/usr/bin/env python3\n\nimport sys\n\ndef main():\n for line in sys.stdin:\n a = []\n for i in line.strip():\n if i.islower():\n i = i.replace(i, \" \")\n a.append(i)\n\n a = \"\".join(a).split(\" \")\n print(max(a))\n\nif __name__ == '__main__':\n main()\n","repo_name":"AnzheYuan1217/DCU","sub_path":"CA117/Sample_LabExam/uppers_052.py","file_name":"uppers_052.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"27807218423","text":"import cv2\r\nfrom tkinter import *\r\nfrom PIL import Image,ImageTk\r\nfrom tkinter import ttk\r\nfrom tkinter import filedialog as fd\r\nimport pickle\r\n\r\ndef ml_work(filename,x):\r\n new_img = cv2.imread(filename)\r\n new_img = cv2.resize(new_img,(150,200))\r\n new_img = new_img.flatten()\r\n p = open(\"obj.txt\",\"rb\")\r\n model = pickle.load(p)\r\n ar = model.predict([new_img])\r\n l = ['computer mouse', 'neckband headset', 'smart watch']\r\n name_obj = l[int(ar[0])]\r\n if x == 1:\r\n obj_l.config(text=name_obj)\r\n elif x==2:\r\n obj_c.config(text=name_obj)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ncancel = False\r\n\r\ndef prompt_ok(event = 0):\r\n global cancel, button, button1, button2\r\n cancel = True\r\n\r\n button.place_forget()\r\n button1 = Button(cam, text=\"Good Image!\", command=saveAndExit)\r\n button1.place(anchor=CENTER, relx=0.2, rely=0.9, width=150, height=50)\r\n button2 = Button(cam, text=\"Try Again\", command=resume)\r\n button2.place(anchor=CENTER, relx=0.8, rely=0.9, width=150, height=50)\r\n button1.focus()\r\n\r\ndef saveAndExit(event = 0):\r\n global img,lmain\r\n\r\n filepath = \"imageCap.png\"\r\n img.save(filepath)\r\n lmain.focus()\r\n ml_work(filepath, 2)\r\n\r\ndef resume(event = 0):\r\n global button1, button2, button, lmain, cancel\r\n\r\n cancel = False\r\n\r\n button1.place_forget()\r\n button2.place_forget()\r\n\r\n button.place(bordermode=INSIDE, relx=0.5, rely=0.9, anchor=CENTER, width=300, height=50)\r\n lmain.after(10, video_stream)\r\n\r\ndef video_stream():\r\n global img\r\n _, frame = cap.read()\r\n frame = cv2.flip(frame,1)\r\n frame = cv2.resize(frame,(600,500))\r\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\r\n img = Image.fromarray(cv2image)\r\n imgtk = ImageTk.PhotoImage(image=img)\r\n lmain.imgtk = imgtk\r\n lmain.configure(image=imgtk)\r\n if not cancel:\r\n lmain.after(10, video_stream)\r\n\r\n\r\ndef open_camera():\r\n global cap,lmain,cam,button\r\n cam = Toplevel()\r\n cam.resizable(width=False, height=False)\r\n cap = cv2.VideoCapture(0)\r\n capWidth = cap.get(3)\r\n capHeight = cap.get(4)\r\n\r\n lmain = Label(cam, compound=CENTER, anchor=CENTER, relief=RAISED)\r\n lmain.pack()\r\n\r\n button = Button(cam, text=\"Capture\", command=prompt_ok)\r\n button.place(bordermode=INSIDE, relx=0.5, rely=0.9, anchor=CENTER, width=300, height=50)\r\n button.focus()\r\n\r\n video_stream()\r\n\r\n cam.mainloop()\r\n\r\n\r\ndef local_com():\r\n filetypes = (('image jpg files', '*.jpg'),('png files', '*.png'))\r\n filename = fd.askopenfilename(title='Open a file',initialdir='/',filetypes=filetypes)\r\n ml_work(filename,1)\r\n\r\n\r\n\r\n\r\n\r\n\r\nwin = Tk()\r\nwin.geometry(\"1020x500\")\r\nwin.resizable(False,False)\r\nwin.title(\"Object Classification\")\r\nwin.config(bg = \"yellow\")\r\n\r\n\r\n\r\nLabel(win,text=\"Local Computer Drive\",font = (\"Time New Roman\",30,\"bold\"),bg = \"yellow\").place(x=50,y=70,height=60,width=410)\r\n\r\nlocal = Button(win,text = \"Open Folder\",font = (\"Time New Roman\",20,\"bold\"),command=local_com)\r\nlocal.place(x=150,y=180,height=60,width=200)\r\n\r\ncanvas = Canvas(win, width=5, height=win.winfo_screenheight(), bg='yellow',borderwidth=0)\r\ncanvas.place(x = 510,y=0)\r\ncanvas.create_line((5, 0), (5, win.winfo_screenheight()), width=5, fill='gray')\r\n\r\nLabel(win,text=\"Object Name : \",font = (\"Time New Roman\",30,\"bold\"),bg = \"yellow\").place(x=50,y=290,height=60,width=410)\r\n\r\nobj_l = Label(win,text=\"\",font = (\"Time New Roman\",30,\"bold\"))\r\nobj_l.place(x=50,y=370,height=60,width=410)\r\n\r\n\r\nLabel(win,text=\"Open Camera\",font = (\"Time New Roman\",30,\"bold\"),bg = \"yellow\").place(x=560,y=70,height=60,width=410)\r\n\r\ncam1 = Button(win,text = \"Open Camera\",font = (\"Time New Roman\",20,\"bold\"),command=open_camera)\r\ncam1.place(x=660,y=180,height=60,width=200)\r\n\r\nLabel(win,text=\"Object Name : \",font = (\"Time New Roman\",30,\"bold\"),bg = \"yellow\").place(x=560,y=290,height=60,width=410)\r\n\r\nobj_c = Label(win,text=\"\",font = (\"Time New Roman\",30,\"bold\"))\r\nobj_c.place(x=560,y=370,height=60,width=410)\r\n\r\nwin.mainloop()","repo_name":"gauravprajapat29/object_detection_and_name_prediction","sub_path":"object_classification.py","file_name":"object_classification.py","file_ext":"py","file_size_in_byte":4034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"713686373","text":"import requests\nfrom bs4 import BeautifulSoup, element\n\n\nclass Indeed:\n def __init__(self, words, location, offset):\n self.url = \"https://www.indeed.com/jobs?as_and={}&l={}&sort=date&start={}\".format(\n \"+\".join(set(d.strip().lower() for d in words.split(\",\") if d)),\n \"+\".join(list(d.lower() for d in location.split(\" \") if d)),\n int(offset),\n )\n\n def extract(self, soup):\n if not soup:\n return []\n jobs = []\n for tag in soup.find_all(name=\"div\", attrs={\"class\": \"jobsearch-SerpJobCard\"}):\n job = {}\n for child in tag.children:\n if child and type(child) == element.Tag and child.attrs:\n if child.attrs[\"class\"][0] == \"title\":\n job[\"title\"] = child.get_text().strip()\n for grandchild in child.find_all(name=\"a\"):\n if grandchild.has_attr(\"href\"):\n job[\"link\"] = (\n \"https://www.indeed.com\" + grandchild[\"href\"]\n )\n elif child.attrs[\"class\"][0] == \"sjcl\":\n lines = child.get_text().strip().split(\"\\n\")\n job[\"company\"] = lines[0]\n job[\"location\"] = lines[-1]\n elif child.attrs[\"class\"][0] == \"jobsearch-SerpJobCard-footer\":\n job[\"date\"] = \"n/a\"\n for grandchild in child.find_all(\n name=\"span\", attrs={\"class\": \"date\"}\n ):\n job[\"date\"] = grandchild.get_text()\n jobs.append(job)\n return jobs\n\n def fetch(self):\n soup = None\n try:\n r = requests.get(self.url)\n r.raise_for_status()\n soup = BeautifulSoup(r.text, \"html.parser\")\n finally:\n return soup\n\n def search(self):\n soup = self.fetch()\n jobs = self.extract(soup)\n return jobs\n","repo_name":"kzkaneoka/custom-job-search","sub_path":"services/backend/project/api/sites.py","file_name":"sites.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"5450055364","text":"from django.shortcuts import render, get_object_or_404\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.views.generic import DetailView\nfrom django.views.generic.edit import FormView\nfrom django.contrib.auth import login, logout\nfrom django.views.generic.base import View\nfrom django.http import HttpResponseRedirect\n\nfrom shop.forms import CreationItemForm\nfrom shop.models import Item\nfrom users.forms import *\nfrom users.models import Profile\n\n\nclass LoginFormView(FormView):\n form_class = AuthenticationForm\n template_name = \"login.html\"\n success_url = \"/\"\n\n def form_valid(self, form):\n self.user = form.get_user()\n login(self.request, self.user)\n return super(LoginFormView, self).form_valid(form)\n pass\n\n\ndef registration(request):\n if request.method == 'POST':\n form = SingUp(request.POST, request.FILES)\n if form.is_valid():\n user = form.save(commit=False)\n #user.is_active = False\n user.save()\n\n profile = Profile()\n profile.user = user\n profile.ava = request.FILES['account_image']\n profile.save()\n return HttpResponseRedirect(\"/users/login\")\n else:\n form = SingUp()\n return render(request, 'reg.html', {'form': form})\n\n\nclass LogoutFormView(View):\n\n def get(self, request):\n logout(request)\n return HttpResponseRedirect(\"/\")\n pass\n\n\nclass ProfileUser(DetailView):\n\n template_name = \"profile.html\"\n\n def get(self, request, id):\n form = CreationItemForm()\n user = get_object_or_404(User, id=id)\n profile = get_object_or_404(Profile, user=user)\n items = Item.objects.filter(owner=id)\n #users = User.objects.all().select_related('profile')\n return render(request, self.template_name, {'current_user': user, 'profile': profile, 'form': form, 'items': items})\n\n def post(self, request, *args, **kwargs):\n form = CreationItemForm(request.POST, request.FILES)\n if form.is_valid():\n id = kwargs['id']\n cd = form.cleaned_data\n user = get_object_or_404(User, id=id)\n profile = get_object_or_404(Profile, user=user)\n\n item = Item(owner=user,\n category=cd['category'],\n icon=cd['icon'],\n name=cd['name'],\n price=cd['price'],\n description=cd['description']).save()\n self.get(request, id)\n form = CreationItemForm()\n return render(request, self.template_name, {'current_user': user, 'profile': profile, 'form': form})","repo_name":"vadimsmilgin/salePlace","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"18236375962","text":"# -*- coding: utf-8 -*-\n# @Time : 18/03/2018\n# @Author : Luke\n\nimport math\nimport os\nimport random\n\nimport numpy as np\nimport pymongo\nfrom fake_useragent import UserAgent\n\nMINPRICE = 2000 # 最低价格\nMAXPRICE = 3500 # 最高价格\nMAXDISTANCE = 7 # 距离目标地点与可选地点之间的距离之和\nCITY = \"上海\" # 城市\nNB_ROOM = {'1室', '2室', } # 房间数\n\nCOOKIE = None\nproxies = [\n \"http://localhost:1087\",\n ''\n] # 代理地址\n\nak = \"\" #百度lbs服务key,请自行申请(http://lbsyun.baidu.com/apiconsole/key)\n# 若使用其他lbs服务,请同时修改pipelines中的 get_lbs 函数\n\n# GPS, (longitude,latitude)\n# primary\nGPS1 = {\"lat\": 31.239777,\n \"lng\": 121.669717} # 目标地点的gps信息\n# Secondary # 可选地点gps信息\nGPS2 = [\n # {\"lat\": 31.219828,\n # \"lng\": 121.662625}, # 唐镇地铁站\n # {\"lat\": 31.216703,\n # \"lng\": 121.627179}, # 广兰路地铁站\n {\"lat\": 31.269485,\n \"lng\": 121.64549}, # 金海路地铁站\n {\"lat\": 31.272188,\n \"lng\": 121.663}, # 顾唐路地铁站\n {\"lat\": 31.274649,\n \"lng\": 121.674609}, # 明雷路地铁站\n {\"lat\": 31.26994,\n \"lng\": 121.634401} # 金吉路地铁站\n]\nlocations = [ # 与GPS2对应的地点名\n # \"唐镇地铁站\",\n # \"广兰路地铁站\",\n \"金海路地铁站\",\n \"顾唐路地铁站\",\n \"明雷路地铁站\",\n \"金吉路地铁站\",\n]\n\n\ndef get_collection(host, db, collection):\n client = pymongo.MongoClient(host)\n db = client[db]\n collection = db[collection]\n return collection\n\ncollection = get_collection('localhost', 'mydb', 'rent_info') # 保存mango表信息\n\n\nRADIUS = 6378.137 # km\n\nutils_path = os.path.abspath(__file__)\nutils_path = os.path.split(utils_path)[0]\n\n\ndef write_files(file_path, list: iter):\n length = len(list)\n with open(file_path, 'w') as f:\n for i, item in list:\n if hasattr(item, '__len__'):\n f.writelines(' '.join(item))\n else:\n f.writelines(item)\n if i < length:\n f.writelines('\\n')\n\n\ndef random_interval():\n print('generate interval')\n return np.random.rand() * 5\n\n\nua = UserAgent(use_cache_server=False, verify_ssl=False)\n\n\ndef random_agent():\n headers = {'User-Agent': ua.random}\n return headers\n\n\ndef gps2distance(origin, destination):\n lat1, lon1 = origin['lat'], origin['lng']\n lat2, lon2 = destination['lat'], destination['lng']\n\n dlat = math.radians(lat2 - lat1)\n dlon = math.radians(lon2 - lon1)\n a = math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1)) \\\n * math.cos(math.radians(lat2)) * math.sin(dlon / 2) * math.sin(dlon / 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = RADIUS * c\n\n return d\n\n\nalphabet = [chr(c) for c in range(97, 123)]\n\n\ndef random_string():\n length = random.randint(3, 7)\n return ''.join([random.choice(alphabet) for _ in range(length)])\n\n\ndef random_key_value():\n key = random_string()\n value = random_string()\n return key + '=' + value\n\n\nif __name__ == '__main__':\n print(random_key_value())\n","repo_name":"nju-luke/RentInfo","sub_path":"spider/tc58/tc58/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"35650986643","text":"# -*- coding: utf8 -*-\nfrom config import cos_config\nfrom qcloud_cos import CosConfig, CosS3Client\n\nconfig = CosConfig(Region=cos_config[\"region\"], Secret_id=cos_config[\"secret_id\"],\n Secret_key=cos_config[\"secret_key\"], Token=cos_config[\"cos_token\"])\ncos_client = CosS3Client(config)\n\n\ndef calculate_sign(path=None, method=\"POST\", headers=None, params=None):\n if params is None:\n params = {}\n if headers is None:\n headers = {}\n if path is None:\n path = {}\n sign = cos_client.get_auth(Method=method,\n Bucket=cos_config[\"bucket\"] + \"-\" + cos_config[\"app_id\"],\n Key=path,\n Headers=headers,\n Params=params)\n return sign\n","repo_name":"MeiCorl/ShoppingMall","sub_path":"utils/cos_util.py","file_name":"cos_util.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"}
+{"seq_id":"13354846414","text":"from pymongo import MongoClient\nclient = MongoClient()\n\n# print(client.database_names()) #print the name of available databases\ndb = client.Northwind\ncustomers = db.customers\nproducts = db.products\norders = db.orders\norder_details = db['order-details']\n\n\nfor order in orders.find({\"CustomerID\":\"ALFKI\"}):\n for order_detail in order_details.find({\"OrderID\":order[\"OrderID\"]}):\n for product in products.find({\"ProductID\":order_detail[\"ProductID\"]}):\n print(order[\"OrderID\"], product[\"ProductName\"], product[\"ProductID\"])\n","repo_name":"Anonyme38/Computational-Tools-for-Big-Data","sub_path":"SQL_NO-SQL/ex1_pymongo.py","file_name":"ex1_pymongo.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"21819329615","text":"import sqlite3\nimport pandas as pd\n\nfrom .conn import conn, cur\n\n\n#Functions for 'tanks' table.\n\n\ndef get_percentiles_data(tank_ids):\n columns = [\n 'battle_life_time', 'battles', 'capture_points', 'damage_assisted_radio',\n 'damage_assisted_track', 'damage_dealt', 'damage_received', 'direct_hits_received',\n 'dropped_capture_points', 'explosion_hits', 'explosion_hits_received', 'frags',\n 'hits', 'losses', 'mark_of_mastery', 'max_frags',\n 'max_xp', 'no_damage_direct_hits_received', 'piercings', 'piercings_received',\n 'shots', 'spotted', 'survived_battles', 'trees_cut',\n 'wins', 'xp'\n ]\n\n tank_ids_str = ', '.join([str(x) for x in tank_ids])\n columns_str = ', '.join(columns)\n\n data = cur.execute(f'''\n SELECT {columns_str} FROM tanks WHERE tank_id IN ({tank_ids_str});\n ''').fetchall()\n\n return columns, data\n\n\ndef get_dataframe(tank_ids, columns, min_battles=1):\n\n tank_ids_str = ', '.join([str(x) for x in tank_ids])\n columns_str = ', '.join(columns)\n\n return pd.read_sql(f'''\n SELECT {columns_str} FROM tanks\n WHERE tank_id IN ({tank_ids_str}) AND battles >= {min_battles}\n ''', conn)\n\n\ndef insert_tank(tank_data):\n '''Insert one tank into database.\n\n Arguments:\n tank_data:Dict[str, num] - data dictionary for a tank.\n Returns:\n None\n '''\n\n columns = [\n 'tank_id', 'last_battle_time', 'account_id',\n 'server', 'battle_life_time', 'battles',\n 'capture_points', 'damage_assisted_radio', 'damage_assisted_track',\n 'damage_dealt', 'damage_received', 'direct_hits_received',\n 'dropped_capture_points', 'explosion_hits', 'explosion_hits_received',\n 'frags', 'hits', 'losses',\n 'mark_of_mastery', 'max_frags', 'max_xp',\n 'no_damage_direct_hits_received', 'piercings', 'piercings_received',\n 'shots', 'spotted', 'survived_battles',\n 'trees_cut', 'wins', 'xp'\n ]\n\n columns_str = ', '.join(columns)\n question_marks = ', '.join(['?' for _ in columns])\n\n #Triggers replace if there is a tank_id for the same player in database.\n query = f'INSERT OR REPLACE INTO tanks ({columns_str}) VALUES ({question_marks});'\n values = [tank_data[name] for name in columns]\n cur.execute(query, values)\n\n\ndef cleanup_space(tank_id, min_battles):\n '''Remove up to 10 records with less than minimum number of battles.\n Or remove 50 oldest records.\n\n Arguments:\n tank_id:int - tank_id to remove rows of.\n min_battles:int - minimum battles for the tank_id.\n Returns:\n None\n '''\n\n #Getting count of tanks with battles less than minimum.\n count = cur.execute('''\n SELECT COUNT(*) FROM tanks\n WHERE tank_id = ? AND battles < ?;\n ''', (tank_id, min_battles)).fetchone()[0]\n\n\n if count > 0:\n #Deleting oldest 50 with battles less than minimum.\n cur.execute('''\n DELETE FROM tanks\n WHERE tank_id = ? AND account_id IN (\n SELECT account_id FROM tanks\n WHERE tank_id = ? AND battles < ?\n ORDER BY last_battle_time ASC LIMIT 50\n );\n ''', (tank_id, tank_id, min_battles))\n else:\n #Deleting oldest 10.\n cur.execute('''\n DELETE FROM tanks\n WHERE tank_id = ? AND last_battle_time IN (\n SELECT last_battle_time FROM tanks\n WHERE tank_id = ?\n ORDER BY last_battle_time ASC LIMIT 10\n );\n ''', (tank_id, tank_id))\n\n\ndef insert_player(player_data, tankopedia):\n '''Insert tanks for one player.\n \n Arguments:\n player_data:List[Obj] - player tanks as list of dictionaries.\n tankopedia:Dict[str, Obj] - tankopedia object.\n Returns:\n None\n '''\n\n for tank_data in player_data:\n tank_id = tank_data['tank_id']\n\n #Getting count of the tank_id.\n count = cur.execute('SELECT COUNT(account_id) FROM tanks WHERE tank_id = ?', (tank_id,)).fetchone()[0]\n\n #No min_battles check.\n if count < 1000:\n insert_tank(tank_data)\n continue\n\n #Calculating min_battles. Skip if tank not in tankopedia.\n tier = tankopedia.get(str(tank_id), {}).get('tier')\n if tier:\n min_battles = tier * 10 + tier * 10 / 2\n\n #Cleanup if too many.\n if count >= 1100:\n cleanup_space(tank_id, min_battles)\n\n if tank_data['battles'] >= min_battles:\n insert_tank(tank_data)\n\n conn.commit()\n","repo_name":"chipsi007/wot-console-wn8","sub_path":"main/database/table_tanks.py","file_name":"table_tanks.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"42827524752","text":"\"\"\" ssbm_format.py - interfaces for manipulating Melee savefiles \"\"\"\n\nimport os\nimport struct\n\nfrom ssbmpack import unpack, pack\n\nclass melee_gci(object):\n \"\"\" Base class for GCI files. Just basic setter/getter stuff for dentry\n data, and some machinery for reading files \"\"\"\n\n def __init__(self, filename, packed=None):\n self._filename = os.path.basename(filename).split(\".\")[0]\n self.raw_bytes = bytearray()\n try:\n self.fd = open(filename, \"rb\")\n self.filesize = os.stat(filename).st_size\n self.raw_bytes = bytearray(self.fd.read(self.filesize))\n self.fd.seek(0x0)\n print(\"Read {} bytes from input GCI\".format(hex(self.filesize)))\n except FileNotFoundError as e:\n err(e)\n self.fd = None\n self.raw_bytes = None\n self.filesize = None\n return None\n\n # Let the user tell us whether or not the GCI is packed when importing\n # a file - this should help us tell the user not to do something that\n # might end up corrupting their data (or something to that effect).\n self.packed = packed\n\n ''' These functions return other types '''\n\n def blocksize(self):\n return struct.unpack(\">h\", self.raw_bytes[0x38:0x3a])[0]\n\n ''' These functions return raw bytes '''\n\n def dump(self):\n return self.raw_bytes\n def get_dentry(self):\n return self.raw_bytes[0:0x40]\n def get_game_id(self):\n return self.raw_bytes[0x00:0x04]\n def get_maker_code(self):\n return self.raw_bytes[0x04:0x06]\n def get_filename(self):\n return self.raw_bytes[0x08:0x28]\n def get_modtime(self):\n return self.raw_bytes[0x28:0x2c]\n def get_image_off(self):\n return self.raw_bytes[0x2c:0x30]\n def get_icon_fmt(self):\n return self.raw_bytes[0x30:0x32]\n def get_anim_speed(self):\n return self.raw_bytes[0x32:0x34]\n def get_permissions(self):\n return self.raw_bytes[0x34:0x35]\n def get_copy_ctr(self):\n return self.raw_bytes[0x35:0x36]\n def get_first_block(self):\n return self.raw_bytes[0x36:0x38]\n def get_block_count(self):\n return self.raw_bytes[0x38:0x3a]\n def get_comment_addr(self):\n return self.raw_bytes[0x3c:0x40]\n def set_filename(self, new_filename):\n self.raw_bytes[0x08:0x28] = new_filename\n def set_modtime(self, new_modtime):\n self.raw_bytes[0x28:0x2c] = struct.pack(\">L\", new_modtime)\n def set_block_count(self, new_bc):\n self.raw_bytes[0x38:0x3a] = new_bc\n def set_comment_addr(self, new_addr):\n self.raw_bytes[0x3c:0x40] = new_addr\n def set_permissions(self, new_perm):\n self.raw_bytes[0x34:0x35] = struct.pack(\">B\", new_perm)\n def _checksum(self, target_offset, count):\n \"\"\" Given some offset into raw_bytes and a count, compute checksum\n over the set of bytes in the GCI \"\"\"\n\n # This is the seed for all checksum values\n new_checksum = bytearray( b'\\x01\\x23\\x45\\x67\\x89\\xAB\\xCD\\xEF' +\n b'\\xFE\\xDC\\xBA\\x98\\x76\\x54\\x32\\x10' )\n cur = 0\n cur_arr = 0\n arr_pos = 0\n x = 0\n y = 0\n ctr = (count) / 8\n while (ctr > 0):\n for i in range(0, 8):\n cur = self.raw_bytes[target_offset + i]\n cur_arr = new_checksum[(arr_pos & 0xf)]\n new_checksum[(arr_pos & 0xf)] = (cur + cur_arr) & 0xff\n arr_pos += 1\n ctr -= 1\n target_offset += 8\n for i in range(1, 0xf):\n x = new_checksum[i-1]\n y = new_checksum[i]\n if (x == y):\n x = y ^ 0x00FF\n new_checksum[i] = x\n return new_checksum\n\nclass melee_gamedata(melee_gci):\n ''' Class representing a plain-ol' Melee gamedata savefile (0x16040 bytes).\n The checksum/packing functions here are specific to the format,\n so you'll need another class for other types of save files. '''\n\n def get_raw_checksum(self, blknum):\n \"\"\" Return checksum bytes for some block 0-10 \"\"\"\n base_offset = 0x2040\n if (blknum >= 0) and (blknum <= (self.blocksize()-1)):\n target_offset = base_offset + (blknum * 0x2000)\n return self.raw_bytes[target_offset:target_offset + 0x10]\n else:\n return None\n\n def set_raw_checksum(self, blknum, new_checksum):\n \"\"\" Given some blknum 0-10 and a 0x10-byte bytearray, replace the\n specified checksum bytes with the new bytes \"\"\"\n base_offset = 0x2040\n if (blknum >= 0) and (blknum <= (self.blocksize() -1)):\n target_offset = base_offset + (blknum * 0x2000)\n self.raw_bytes[target_offset:target_offset + 0x10] = new_checksum\n else:\n print(\"[!] Can't set checksum bytes for block {}\".format(blknum))\n exit(-1)\n\n def checksum_block(self, blknum):\n \"\"\" Given some block number 0-10, compute the checksum for the\n associated data. Returns the raw checksum bytes. \"\"\"\n base_offset = 0x2050\n data_size = 0x1ff0\n if (blknum >= 0) and (blknum <= (self.blocksize() - 1)):\n target_offset = base_offset + (blknum * 0x2000)\n return self._checksum(target_offset, data_size)\n else:\n print(\"[!] Can't compute checksum bytes for block {}\".format(blknum))\n exit(-1)\n\n def recompute_checksums(self):\n \"\"\" Recompute all checksum values and write them back \"\"\"\n if (self.packed is True):\n print(\"[!] You can only recompute checksums on unpacked data\")\n exit(-1)\n\n # Retrieve checksum values for all blocks\n current = []\n for i in range(0, self.blocksize()-1):\n current.append(self.get_raw_checksum(i))\n\n # Compute checksum values for all blocks\n computed = []\n for i in range(0, self.blocksize()-1):\n computed.append(self.checksum_block(i))\n\n # If current checksums don't match, write them back\n for i in range(0, self.blocksize()-1):\n if (current[i] != computed[i]):\n print(\"[*] Block {} checksum mismatch, fixing ..\".format(i))\n self.set_raw_checksum(i, computed[i])\n else:\n print(\"[*] Block {} checksum unchanged\".format(i))\n\n def get_block(self, blknum):\n ''' Get the data portion of some block '''\n if (blknum > 10):\n return None\n base = 0x2000 * blknum + 0x2060\n return self.raw_bytes[base:(base + 0x1fe0)]\n\n def set_block(self, blknum, data):\n ''' Set the data on some block; takes a 0x1fe0-byte bytearray '''\n if (blknum > 10):\n return None\n base = 0x2000 * blknum + 0x2060\n self.raw_bytes[base:(base + 0x1fe0)] = data\n\n def unpack(self):\n \"\"\" Unpack all blocks of data \"\"\"\n if (self.packed is False):\n print(\"[!] Data is already unpacked - refusing to unpack\")\n exit(-1)\n print(\"[*] Unpacking GCI data\")\n\n PREV_BYTE_OFFSET = 0x204f\n BASE_OFFSET = 0x2050\n DATA_SIZE = 0x1ff0\n for j in range(0, self.blocksize()-1):\n prev = self.raw_bytes[PREV_BYTE_OFFSET]\n for i in range(BASE_OFFSET, BASE_OFFSET + DATA_SIZE):\n cursor = self.raw_bytes[i]\n res = unpack(prev, cursor)\n self.raw_bytes[i] = res\n prev = cursor\n PREV_BYTE_OFFSET += 0x2000\n BASE_OFFSET += 0x2000\n if (self.packed is True):\n self.packed = False\n\n def pack(self):\n \"\"\" Pack all blocks of data \"\"\"\n if (self.packed is True):\n print(\"[!] Data is already packed -- refusing to pack\")\n exit(-1)\n print(\"[*] Packing GCI data\")\n\n PREV_BYTE_OFFSET = 0x204f\n BASE_OFFSET = 0x2050\n DATA_SIZE = 0x1ff0\n for j in range(0, self.blocksize()-1):\n prev = self.raw_bytes[PREV_BYTE_OFFSET]\n for i in range(BASE_OFFSET, BASE_OFFSET + DATA_SIZE):\n cursor = self.raw_bytes[i]\n res = pack(prev, cursor)\n self.raw_bytes[i] = res\n prev = res\n PREV_BYTE_OFFSET += 0x2000\n BASE_OFFSET += 0x2000\n if (self.packed is False):\n self.packed = True\n\n\nclass melee_snapshot(melee_gci):\n \"\"\" Class representing a snapshot file. \"\"\"\n def get_raw_region_0_checksum(self):\n return self.raw_bytes[0x1e80:0x1e90]\n def get_raw_header_checksum(self):\n return self.raw_bytes[0x1eb0:0x1ec0]\n def get_raw_checksum(self, blknum):\n \"\"\" Return checksum bytes for some block 0-10 \"\"\"\n base_offset = 0x2040\n if (blknum >= 0) and (blknum <= (self.blocksize()-1)):\n target_offset = base_offset + (blknum * 0x2000)\n return self.raw_bytes[target_offset:target_offset + 0x10]\n else:\n return None\n\n def set_raw_region_0_checksum(self, new_checksum):\n self.raw_bytes[0x1e80:0x1e90] = new_checksum\n def set_raw_header_checksum(self, new_checksum):\n self.raw_bytes[0x1eb0:0x1ec0] = new_checksum\n def set_raw_checksum(self, blknum, new_checksum):\n \"\"\" Given some blknum 0-10 and a 0x10-byte bytearray, replace the\n specified checksum bytes with the new bytes \"\"\"\n base_offset = 0x2040\n if (blknum >= 0) and (blknum <= (self.blocksize() -1)):\n target_offset = base_offset + (blknum * 0x2000)\n self.raw_bytes[target_offset:target_offset + 0x10] = new_checksum\n else:\n print(\"[!] Can't set checksum bytes for block {}\".format(blknum))\n exit(-1)\n\n def checksum_region_0(self):\n \"\"\" Compute the header checksum \"\"\"\n base_offset = 0x40\n data_size = 0x1e40\n return self._checksum(base_offset, data_size)\n def checksum_header(self):\n \"\"\" Compute the header checksum \"\"\"\n base_offset = 0x1ec0\n data_size = 0x180\n return self._checksum(base_offset, data_size)\n def checksum_block(self, blknum):\n \"\"\" Given some block number 0-10, compute the checksum for the\n associated data. Returns the raw checksum bytes. \"\"\"\n base_offset = 0x2050\n data_size = 0x1ff0\n if (blknum >= 0) and (blknum <= (self.blocksize() - 1)):\n target_offset = base_offset + (blknum * 0x2000)\n return self._checksum(target_offset, data_size)\n else:\n print(\"[!] Can't compute checksum bytes for block {}\".format(blknum))\n exit(-1)\n\n def recompute_checksums(self):\n \"\"\" Recompute all checksum values and write them back \"\"\"\n if (self.packed is True):\n print(\"[!] You can only recompute checksums on unpacked data\")\n exit(-1)\n\n if (self.get_raw_header_checksum() != self.checksum_header()):\n print(\"[*] Header checksum mismatch, fixing ..\")\n self.set_raw_header_checksum(self.checksum_header())\n else:\n print(\"[*] Header checksum unchanged\")\n\n # Retrieve checksum values for all blocks\n current = []\n for i in range(0, self.blocksize()-1):\n current.append(self.get_raw_checksum(i))\n\n # Compute checksum values for all blocks\n computed = []\n for i in range(0, self.blocksize()-1):\n computed.append(self.checksum_block(i))\n\n # If current checksums don't match, write them back\n for i in range(0, self.blocksize()-1):\n if (current[i] != computed[i]):\n print(\"[*] Block {} checksum mismatch, fixing ..\".format(i))\n self.set_raw_checksum(i, computed[i])\n else:\n print(\"[*] Block {} checksum unchanged\".format(i))\n\n\n def unpack(self):\n \"\"\" Unpack all data \"\"\"\n if (self.packed is False):\n print(\"[!] Data is already unpacked - refusing to unpack\")\n exit(-1)\n print(\"[*] Unpacking GCI data\")\n\n # Unpack the data header region\n PREV_BYTE_OFFSET = 0x1ebf\n BASE_OFFSET = 0x1ec0\n DATA_SIZE = 0x180\n prev = self.raw_bytes[PREV_BYTE_OFFSET]\n for i in range(BASE_OFFSET, BASE_OFFSET + DATA_SIZE):\n cursor = self.raw_bytes[i]\n res = unpack(prev, cursor)\n self.raw_bytes[i] = res\n prev = cursor\n\n PREV_BYTE_OFFSET = 0x204f\n BASE_OFFSET = 0x2050\n DATA_SIZE = 0x1ff0\n for j in range(0, self.blocksize() - 1):\n prev = self.raw_bytes[PREV_BYTE_OFFSET]\n for i in range(BASE_OFFSET, BASE_OFFSET + DATA_SIZE):\n cursor = self.raw_bytes[i]\n res = unpack(prev, cursor)\n self.raw_bytes[i] = res\n prev = cursor\n PREV_BYTE_OFFSET += 0x2000\n BASE_OFFSET += 0x2000\n\n if (self.packed is True):\n self.packed = False\n\n\n def pack(self):\n \"\"\" Pack all blocks of data \"\"\"\n if (self.packed is True):\n print(\"[!] Data is already packed -- refusing to pack\")\n exit(-1)\n print(\"[*] Packing GCI data\")\n\n # Pack the data header region\n PREV_BYTE_OFFSET = 0x1ebf\n BASE_OFFSET = 0x1ec0\n DATA_SIZE = 0x180\n prev = self.raw_bytes[PREV_BYTE_OFFSET]\n for i in range(BASE_OFFSET, BASE_OFFSET + DATA_SIZE):\n cursor = self.raw_bytes[i]\n res = pack(prev, cursor)\n self.raw_bytes[i] = res\n prev = res\n\n PREV_BYTE_OFFSET = 0x204f\n BASE_OFFSET = 0x2050\n DATA_SIZE = 0x1ff0\n for j in range(0, self.blocksize()-1):\n prev = self.raw_bytes[PREV_BYTE_OFFSET]\n for i in range(BASE_OFFSET, BASE_OFFSET + DATA_SIZE):\n cursor = self.raw_bytes[i]\n res = pack(prev, cursor)\n self.raw_bytes[i] = res\n prev = res\n PREV_BYTE_OFFSET += 0x2000\n BASE_OFFSET += 0x2000\n if (self.packed is False):\n self.packed = True\n\n","repo_name":"eigenform/melee-re","sub_path":"src/meleegci-py/meleegci.py","file_name":"meleegci.py","file_ext":"py","file_size_in_byte":14222,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"92"}
+{"seq_id":"32144921517","text":"import unittest\n\nimport zipfile\n\nimport codecs\nimport csv\nfrom csv import writer\n\nfrom csv import reader\n\nfrom zipfile import ZipFile\n\nimport os\n\n#Script test case to test function in the main\n\n#Class to test the Ziplist function in the main\nclass TestZipList(unittest.TestCase):\n\n def test_zip_list(self):\n\n z_read = zipfile.ZipFile(\"zipfile.zip\", \"r\")\n z_write = zipfile.ZipFile(\"zipfile.zip\", \"a\")\n\n for file in z_read.namelist():\n\n print('File:', file)\n\n z_read.namelist()\n\n with z_read.open(file, \"r\") as read_files:\n\n reader = csv.DictReader(codecs.iterdecode(read_files, 'utf-8'))\n\n for line in reader:\n print(line)\n\n with z_write.open('Combined.csv', \"w\") as write:\n\n fieldname = ['Adress', 'Name']\n\n csv_writer = csv.DictWriter(write, fieldnames=fieldname, delimiter='\\t')\n\n csv_writer.writeheader\n\n for line in reader:\n csv_writer.writerow(line)\n\n return line, file\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n unittest.TestCase()\n","repo_name":"Stan5597/Data-Engineering-test-code","sub_path":"TestZipfile.py","file_name":"TestZipfile.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"75019829739","text":"import pandas as pd\nimport unicodedata\nimport regex as re\nimport json\n\ndef jsonl_loader(filename):\n \"\"\"\n trial_file: File with JSONL input\n \"\"\"\n file_io = open(filename)\n json_content = [json.loads(jline) for jline in file_io.read().splitlines()]\n dataframe = pd.DataFrame(json_content)\n return dataframe\n\ndef csv_loader(filename):\n dataframe = pd.read_csv(filename, sep = \",\", quotechar=\"\\\"\")\n return dataframe\n\ndef get_all_drugs_names(dataframe):\n dataframe['alts'] = dataframe.altLabel_list.apply(lambda x: x.split('|'))\n drug_list = []\n\n drug_list = [drug for alt_drugs in dataframe['alts'] for drug in alt_drugs]\n drug_list.extend(dataframe.itemLabel)\n drug_list = list(filter(None, drug_list))\n return drug_list\n\ndef group_drugs_by_first_letter(drug_list):\n drug_dict = {}\n for drug in drug_list:\n try:\n if len(drug) > 1:\n drug_name = preprocess_name(drug)\n if drug_name[0] in drug_dict:\n drug_dict[drug_name[0]].append(drug_name)\n else:\n drug_dict[drug_name[0]] = [drug_name]\n except IndexError:\n pass\n return drug_dict\n\ndef preprocess_name(drug_name):\n parentheses_trans = str.maketrans({\"(\":None, \")\":None, \"{\":None, \"}\":None, \"[\":None, \"]\":None, \"/\":\" \", \"\\\\\":\" \"})\n drug_name = remove_accented_chars(drug_name)\n drug_name = drug_name.translate(parentheses_trans)\n drug_name = drug_name.lower().rstrip().lstrip()\n return drug_name\n\ndef get_multiple_names(drug_name):\n drug_names = []\n drug_name = drug_name.split('+')\n for drug in drug_name:\n drug = drug.split('and')\n if len(drug) > 1:\n drug_names.extend(drug)\n return drug_names\n\ndef remove_accented_chars(drug_name):\n drug_name = unicodedata.normalize('NFKD', drug_name).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n return drug_name\n\ndef create_drug_references(dataframe):\n drug_ref = {}\n dataframe['alts'] = dataframe.altLabel_list.apply(lambda x: x.split('|'))\n drug_ref_df = dataframe[[\"itemLabel\", \"alts\"]]\n for idx, row in drug_ref_df.iterrows():\n for val in row.alts:\n val = preprocess_name(val)\n label = preprocess_name(row.itemLabel)\n drug_ref[val] = label\n drug_ref[label] = label\n return drug_ref\n ","repo_name":"jh2048/clinical_trials","sub_path":"scripts/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"12328775500","text":"from django.core.files.uploadedfile import UploadedFile\nfrom django.shortcuts import render\nfrom django.utils.text import slugify\n\nfrom .forms import UploadForm\nfrom .models import Document, WordResult\n\n\ndef handle_upload(f: UploadedFile): # pragma: no cover\n \"\"\"Store uploaded file as a document. Existing documents (determined by filename) are overwritten.\n Non-text files are rejected.\"\"\"\n\n if f.content_type != \"text/plain\":\n raise NotImplementedError(\n f\"Filetype not handled. Please upload a text/plain file.\"\n )\n\n name = slugify(f.name.replace(\".txt\", \"\"))\n content = f.read().decode(\n \"utf-8\"\n ) # we assume small files, so we don't use f.chunk()\n\n existing_queryset = Document.objects.filter(name__exact=name)\n\n # create new document, or overwrite existing one\n if len(existing_queryset) == 0:\n document = Document(name=name, full_text=content)\n else:\n document = existing_queryset[0]\n document.full_text = content\n\n document.save()\n\n # process document\n try:\n document.ingest()\n except LookupError:\n raise LookupError(\n \"LookupError while running ingest function. Did you run initwordy before starting the site?\"\n )\n\n\ndef index(request): # pragma: no cover\n \"\"\"On GET, generate app form and results. On POST, process the uploaded text file.\"\"\"\n\n if request.method == \"POST\":\n form = UploadForm(request.POST, request.FILES)\n if form.is_valid():\n handle_upload(request.FILES[\"document_file\"])\n\n form = UploadForm() # reset form\n words_by_frequency = WordResult.get_words_by_frequency()\n context = {\"form\": form, \"word_by_frequency\": words_by_frequency}\n\n return render(request, \"wordy/index.html\", context)\n","repo_name":"lofidevops/simplenlp","sub_path":"wordy/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"31222442507","text":"\"\"\"\nDrift Detection Method\nGama et al.\n\"\"\"\nfrom ._base_drift import BaseDrift\nimport numpy as np\n\nclass DDM(BaseDrift):\n def __init__(self, warning_level=2.0, alarm_level=3.0, min_n_errors = 30) -> None:\n super().__init__()\n self.warning_level = warning_level\n self.alarm_level = alarm_level\n self.min_n_errors = min_n_errors\n \n # Highest\n self.min_std = np.inf\n self.min_error = np.inf\n self.min_std_error = np.inf\n \n self.error_prob = 1\n self.error_std = 0\n self.error_prob_std = 0\n\n # [DEBUG]\n self.errors = []\n\n def _apply(self):\n if self.n < self.min_n_errors:\n return False\n # Go over elements in window and compute probs\n for e in self.window:\n self.error_prob += (e-self.error_prob)/self.n\n self.error_std = np.sqrt(self.error_prob*(1-self.error_prob)/self.n)\n \n # [DEBUG]\n self.errors.append(self.error_prob)\n \n # Clear window\n self.window = []\n \n if (self.error_prob+self.error_std) < self.min_std_error:\n self.min_error = self.error_prob\n self.min_std = self.error_std\n self.min_std_error = self.min_error+self.min_std\n \n if self.error_prob+self.error_std > self.min_error + self.alarm_level*self.min_std:\n self._drift_alarm = True\n self.reset()\n elif self.error_prob+self.error_std > self.min_error + self.warning_level*self.min_std:\n self._drift_warning = True\n \n def reset(self):\n self.min_std = np.inf\n self.min_error = np.inf\n self.min_std_error = np.inf\n self.n = 0\n\nif __name__ == \"__main__\":\n from matplotlib import pyplot as plt\n r1 = np.random.binomial(1,0.3,1000)\n r2 = np.random.binomial(1,0.7,1000)\n r = np.concatenate((r1,r2))\n \n dd = DDM()\n for i,x in enumerate(r):\n dd.add_element(x)\n if dd.drift_alarm:\n print(f'drift alarm {i}')\n \n plt.plot(dd.errors)\n plt.show()\n","repo_name":"charliehpearce/drift-lib","sub_path":"drift_detection/_DDM.py","file_name":"_DDM.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"40395474317","text":"import os\r\nimport re\r\nfrom datetime import datetime\r\nimport json\r\nimport pandas as pd\r\nimport tomotopy as tp\r\nimport spacy\r\nimport numpy as np\r\n\r\n\r\ndef cleaning_docs(df, docs_file):\r\n\r\n docs_d = {} # dictionary of documents to perform topic modeling on, here documents are posts' clean sentences\r\n stopwords = set([line.strip() for line in open(\"stoplist_final.txt\")]) # creating list of stop words\r\n nlp = spacy.load(\"en_core_web_sm\") # loading the spacy language model\r\n lemmatizer = nlp.get_pipe(\"lemmatizer\") # getting the spacy lemmatizer\r\n\r\n for index, row in df.iterrows(): # iterating over posts\r\n post_id = row['concat_id'] # id of the post, e.g. 'Endo_xyz'\r\n post = row['selftext'] # textual content of the post\r\n post_url = row['url'] # url of the post\r\n doc = nlp(post) # processing the post: tokenizing and lemmatizing\r\n sent_n = 0 # counter of sentences in the post\r\n for sent in doc.sents:\r\n sent_id = f'{post_id}_{sent_n}' # creating an id for each post' sentence\r\n sent_n += 1\r\n clean_sent = [] # sentences represented as list of lemmatized tokens\r\n for token in sent:\r\n lemma = token.lemma_\r\n clean_lemma = re.sub(r'[^\\w\\s\\d]', '', lemma) # remove punctuation from tokens\r\n clean_lemma = re.sub(r'[\\n+\\s+]', '', clean_lemma) # remove empty spaces and new lines\r\n if clean_lemma and clean_lemma not in stopwords: # remove empty tokens/stopwords\r\n clean_sent.append(clean_lemma) # adding clean lemma to the clean sentence's list\r\n if len(clean_sent) > 4: # exclude sentences that are less than 5 words\r\n # add sentence id and clean sentence, og sentence, url to the dictionary as a key,value pair\r\n # the clean - tokenized and lemmatized - sentences are our documents\r\n docs_d[sent_id] = [clean_sent, sent.text, post_url]\r\n\r\n with open(docs_file, 'w') as jsonfile: # creating a file with the dict of documents to topic model\r\n json.dump(docs_d, jsonfile)\r\n\r\n return docs_d\r\n\r\n\r\ndef perform_tm(s_ids, corpus, n_topics, rm_top, topwords_file):\r\n\r\n # setting and loading the LDA model\r\n lda_model = tp.LDAModel(k=n_topics, # number of topics in the model\r\n min_df=3, # remove words that occur in less than n documents\r\n rm_top=rm_top) # remove n most frequent words\r\n vocab = set()\r\n for doc in corpus:\r\n lda_model.add_doc(doc) # adding document to the model\r\n vocab.update(doc) # adding tokens in the document to the vocabulary\r\n print('Num docs:{}'.format(len(lda_model.docs)))\r\n print(\"Vocabulary Size: {}\".format(len(list(vocab))))\r\n print('Removed Top words: ', lda_model.removed_top_words)\r\n\r\n iterations = 10\r\n for i in range(0, 100, iterations): # train model 10 times with 10 iterations at each training = 100 iterations\r\n lda_model.train(iterations)\r\n print(f'Iteration: {i}\\tLog-likelihood: {lda_model.ll_per_word}')\r\n\r\n #TOP WORDS\r\n num_top_words = 10 # number of top words to print for each topic\r\n with open(topwords_file, \"w\", encoding=\"utf-8\") as file:\r\n file.write(f\"\\nTopics in LDA model: {n_topics} topics {rm_top} removed top words\\n\\n\") # write settings of the model in file\r\n topic_individual_words = []\r\n for topic_number in range(0, n_topics): # for each topic number in the total number of topics\r\n topic_words = ' '.join( # string of top words in the topic\r\n word for word, prob in lda_model.get_topic_words(topic_id=topic_number, top_n=num_top_words)) # get_topic_words is a tomotopy function that returns a dict of words and their probabilities\r\n topic_individual_words.append(topic_words.split(' ')) # append list of the topic's top words for later\r\n file.write(f\"Topic {topic_number}\\n{topic_words}\\n\\n\") # write topic number and top words in file\r\n print(topic_individual_words)\r\n\r\n #TOPIC DISTRIBUTIONS\r\n topic_distributions = [list(doc.get_topic_dist()) for doc in lda_model.docs] # list of lists of topic distributions for each document, get_topic_dist() is a tomotopy function\r\n topic_results = []\r\n for topic_distribution in topic_distributions: # list of dicts of documents' topic distributions to convert into pandas' dataframe\r\n topic_results.append({'topic_distribution': topic_distribution})\r\n df = pd.DataFrame(topic_results, index=s_ids) # df where each row is the list of topic distributions of a document, s_ids are the ids of the sentences\r\n column_names = [f\"Topic {number} {' '.join(topic[:4])}\" for number, topic in enumerate(topic_individual_words)] # create list of column names from topic numbers and top words\r\n df[column_names] = pd.DataFrame(df['topic_distribution'].tolist(), index=df.index) # df where topic distributions are not in a list and match the list of column names\r\n df = df.drop('topic_distribution', axis='columns') # drop old topic distributions' column\r\n dominant_topic = np.argmax(df.values, axis=1) # get dominant topic for each document\r\n df['dominant_topic'] = dominant_topic # add column for the dominant topic in the document\r\n\r\n return df\r\n\r\n\r\ndef main(subreddit):\r\n\r\n reddit_df = pd.read_csv(os.path.join('data', f'{subreddit}.csv')) # path of csv with reddit data\r\n tomo_folder = os.path.join('output', 'topic_modeling') # results' folder\r\n if not os.path.exists(tomo_folder): # create folder if it doesn't exist\r\n os.makedirs(tomo_folder)\r\n\r\n clean_docs_file = os.path.join(tomo_folder, f'{subreddit}.json') # file with clean documents - here, post sentences\r\n if not os.path.exists(clean_docs_file): # if clean documents file doesn't exist, executes data cleaning\r\n start = datetime.now()\r\n print(\"Data Cleaning...\")\r\n docs_dict = cleaning_docs(reddit_df, clean_docs_file)\r\n print(f'{str(datetime.now())}________________{str(datetime.now() - start)}\\n') # print timing of data cleaning\r\n else:\r\n with open(clean_docs_file) as json_file:\r\n docs_dict = json.load(json_file)\r\n doc_ids = [doc_id for doc_id in docs_dict.keys()] # get list of document ids for later\r\n clean_docs = [sent_url[0] for sent_url in docs_dict.values()] # get list of clean documents for later\r\n og_docs = [[sent_url[1]] for sent_url in docs_dict.values()] # get list of original documents for later\r\n #doc_urls = [sent_url[2] for sent_url in docs_dict.values()] # get list of document urls for later\r\n\r\n for num_topics in [7, 10, 15]: # for number of topics - for loops allow to run multiple models with different settings with one execution\r\n for rm_frequent in [15]: # for number of most frequent words to remove\r\n\r\n txt_topwords = os.path.join(tomo_folder, f'{subreddit}-{num_topics}_{rm_frequent}.txt') # path for top words file\r\n csv_dtm = os.path.join(tomo_folder, f'{subreddit}-{num_topics}_{rm_frequent}.csv') # path for doc-topic matrix file\r\n\r\n if not os.path.exists(txt_topwords) or not os.path.exists(csv_dtm): # if result files don't exist, performs topic modeling\r\n start = datetime.now()\r\n print(\"Performing Topic Modeling...\")\r\n lda_dtm = perform_tm(doc_ids, clean_docs, num_topics, rm_frequent, txt_topwords)\r\n lda_dtm['sent'] = og_docs # add original sentences to doc-topic df\r\n #lda_dtm['post_url'] = doc_urls # add urls of the posts of the sentences to matrix\r\n lda_dtm.to_csv(csv_dtm) # convert doc-topic df in csv file\r\n print(f'{str(datetime.now())}____Topic modeling {num_topics}, {rm_frequent} time:____{str(datetime.now() - start)}\\n') # print timing of topic modeling\r\n\r\n\r\nif __name__ == '__main__':\r\n main('endo+endometriosis') # name of the subreddit file\r\n","repo_name":"federicabologna/endometriosis","sub_path":"topic_modeling.py","file_name":"topic_modeling.py","file_ext":"py","file_size_in_byte":8030,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"}
+{"seq_id":"23062378533","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 02 16:55:22 2018\n\n@author: Daniel\n\"\"\"\nimport pandas as pd\nimport seaborn as sns\nimport os as os\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndegree_sign= u'\\N{DEGREE SIGN}'\nimport matplotlib\nmatplotlib.rcParams['mathtext.fontset'] = 'custom'\nmatplotlib.rcParams['mathtext.rm'] = 'Bitstream Vera Sans'\nmatplotlib.rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic'\nmatplotlib.rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold'\n\n\n\n\n\n#Change directory to folder containing rates.csv file\nindir='C:\\\\Users\\\\Daniel\\\\Documents\\\\farmscripts\\\\Stuff for mark\\\\'\nrates = pd.read_csv(indir+'rates.csv')\n\nrates1 = rates[rates['experiment']==1]\n\nrates1['J+Tot']=rates1['J']+rates1['J+']\nrates1['J-Tot']=rates1['J']-rates1['J-']\n\nrates1['logJ']=rates1['J'].apply(np.log10)\nrates1['logJ+']= rates1['J+Tot'].apply(np.log10)-rates1['logJ']\nrates1['logJ-']=rates1['logJ']-rates1['J-Tot'].apply(np.log10)\nyerr=(rates1['logJ+'], rates1['logJ-'])\n\n\nrates3 = rates[rates['experiment']==3]\n\nrates3['J+Tot']=rates3['J']+rates3['J+']\nrates3['J-Tot']=rates3['J']-rates3['J-']\n\nrates3['logJ']=rates3['J'].apply(np.log10)\nrates3['logJ+']= rates3['J+Tot'].apply(np.log10)-rates3['logJ']\nrates3['logJ-']=rates3['logJ']-rates3['J-Tot'].apply(np.log10)\nyerr3=(rates3['logJ+'], rates3['logJ-'])\n\n\nfig, ax1 = plt.subplots()\nax1.errorbar(x= rates3['T'], y=rates3['logJ'], xerr = 0.4, yerr=yerr3,fmt='o', \n ecolor = 'b', lw=0.5, label = 'exp. 3' )\nsns.regplot(x=\"T\", y=\"logJ\", data=rates3, ax = ax1, color = 'b', ci= None)\n\n\nax1.errorbar(x= rates1['T'], y=rates1['logJ'], xerr = 0.4, yerr=yerr,fmt='o', ecolor = 'r',\n markerfacecolor = 'r', mec='r', lw=0.5, label = 'exp. 1')\nsns.regplot(x=\"T\", y=\"logJ\", data=rates1, ax = ax1, color = 'r', ci= None)\nax1.set_xlabel ('Temperature ('+degree_sign+'C)')\nax1.set_ylabel(r'$\\mathrm{Log_{10} \\enspace J \\enspace (cm^{-2})}$' )\nplt.legend()\n\n\n","repo_name":"danielosullivan2007/Farmscripts","sub_path":"formark.py","file_name":"formark.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"8525290352","text":"class Problem:\n def __init__(self, value_coefficient, weight_penalty, count_penalty, population_size):\n self.value_coefficient = value_coefficient\n self.weight_penalty = weight_penalty\n self.count_penalty = count_penalty\n self.population_size = population_size\n\n self.weights = []\n self.max_weight = 0\n self.values = []\n self.individual_size = 0\n self.minimum_objects = 0\n self.maximum_objects = 0\n\n def load_file(self, filename):\n with open(filename, 'r') as file:\n self.max_weight = int(file.readline())\n self.minimum_objects = int(file.readline())\n self.maximum_objects = int(file.readline())\n self.individual_size = int(file.readline())\n\n self.weights = list(map(int, file.readline().split(' ')))\n self.values = list(map(int, file.readline().split(' ')))\n","repo_name":"andrei-i-gavrila/JewelryExhibitionEA","sub_path":"ea/problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"18566134991","text":"import torch\nimport util\nimport argparse\nfrom model import *\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\nfrom engine import trainer\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--device',type=str,default='cuda:0',help='')\nparser.add_argument('--data',type=str,default='data/METR-LA',help='data path')\nparser.add_argument('--data_id',type=str,default='METR-LA',help='data path')\nparser.add_argument('--adjdata',type=str,default='data/sensor_graph/adj_mx.pkl',help='adj data path')\nparser.add_argument('--adjtype',type=str,default='doubletransition',help='adj type')\nparser.add_argument('--gcn_bool',action='store_true',help='whether to add graph convolution layer')\nparser.add_argument('--aptonly',action='store_true',help='whether only adaptive adj')\nparser.add_argument('--addaptadj',action='store_true',help='whether add adaptive adj')\nparser.add_argument('--randomadj',action='store_true',help='whether random initialize adaptive adj')\nparser.add_argument('--seq_length',type=int,default=12,help='')\nparser.add_argument('--nhid',type=int,default=32,help='')\nparser.add_argument('--in_dim',type=int,default=2,help='inputs dimension')\nparser.add_argument('--num_nodes',type=int,default=207,help='number of nodes')\nparser.add_argument('--batch_size',type=int,default=64,help='batch size')\nparser.add_argument('--learning_rate',type=float,default=0.001,help='learning rate')\nparser.add_argument('--dropout',type=float,default=0.3,help='dropout rate')\nparser.add_argument('--weight_decay',type=float,default=0.0001,help='weight decay rate')\nparser.add_argument('--checkpoint',type=str,help='')\nparser.add_argument('--plotheatmap',type=str,default='True',help='')\n\n# EMA\nparser.add_argument('--use_ema', action='store_true')\nparser.add_argument('--epsilon', type=float, default=0.001)\nparser.add_argument('--moving_average_decay', type=float, default=0.99)\nparser.add_argument('--standing_steps', type=int, default=100)\nparser.add_argument('--start_iter', type=int, default=300)\nparser.add_argument('--ema_loss', type=str, default='BDFMSE', choices=['DFMSE', 'BDFMSE', 'TDFMSE', 'PDFMSE', 'CSMSE'])\nparser.add_argument('--ema_eval_model', type=str, default='target', choices=['source', 'target'])\n\nargs = parser.parse_args()\n\n# exp id\nargs.exp_id = \"id_\"\nargs.exp_id += \"data_\" + str(args.data_id) + \"_\"\nargs.exp_id += \"ema_\" + str(args.use_ema) + \"_\"\nargs.exp_id += \"eps_\" + str(args.epsilon) + \"_\"\nargs.exp_id += \"mad_\" + str(args.moving_average_decay) + \"_\"\nargs.exp_id += \"sit_\" + str(args.start_iter) + \"_\"\nargs.exp_id += \"lr_\" + str(args.learning_rate) + \"_\"\n#args.exp_id += \"lr_\" + str(args.learning_rate)\n\nprint(args.exp_id)\n\n# checkpoints, outputs\nif args.use_ema:\n os.makedirs(os.path.join(\"outputs\", args.data_id, \"wavebound\", args.exp_id, \"epoch\"), exist_ok=True)\n args.output_dir = os.path.join(\"outputs\", args.data_id, \"wavebound\")\n args.checkpoint_dir = os.path.join(\"checkpoints\", args.data_id, \"wavebound\")\nelse:\n os.makedirs(os.path.join(\"outputs\", args.data_id, \"origin\", args.exp_id, \"epoch\"), exist_ok=True)\n args.output_dir = os.path.join(\"outputs\", args.data_id, \"origin\")\n args.checkpoint_dir = os.path.join(\"checkpoints\", args.data_id, \"origin\")\n\n\ndef main():\n device = torch.device(args.device)\n _, _, adj_mx = util.load_adj(args.adjdata,args.adjtype)\n dataloader = util.load_dataset(args.data, args.batch_size, args.batch_size, args.batch_size)\n scaler = dataloader['scaler']\n supports = [torch.tensor(i).to(device) for i in adj_mx]\n\n print(args)\n\n if args.randomadj:\n adjinit = None\n else:\n adjinit = supports[0]\n\n if args.aptonly:\n supports = None\n\n # testing\n print(\"Testing\")\n eval_path = os.path.join(args.checkpoint_dir, args.exp_id, \"epoch\", \"best.pth\") if not args.use_ema \\\n else os.path.join(args.checkpoint_dir, \"target\", args.exp_id, \"epoch\", \"best.pth\")\n engine = trainer(scaler, args.in_dim, args.seq_length, args.num_nodes, args.nhid, args.dropout,\n args.learning_rate, args.weight_decay, device, supports, args.gcn_bool, args.addaptadj,\n adjinit, args)\n\n engine.eval_model.load_state_dict(torch.load(eval_path))\n engine.eval_model.eval()\n\n print('model load successfully')\n\n outputs = []\n realy = torch.Tensor(dataloader['y_test']).to(device)\n realy = realy.transpose(1,3)[:,0,:,:]\n\n for iter, (x, y) in enumerate(dataloader['test_loader'].get_iterator()):\n testx = torch.Tensor(x).to(device)\n testx = testx.transpose(1,3)\n with torch.no_grad():\n preds = engine.eval_model(testx).transpose(1,3)\n outputs.append(preds.squeeze())\n\n yhat = torch.cat(outputs,dim=0)\n yhat = yhat[:realy.size(0),...]\n\n\n\n\n amae = []\n amape = []\n armse = []\n for i in range(12):\n pred = scaler.inverse_transform(yhat[:,:,i])\n real = realy[:,:,i]\n metrics = util.metric(pred,real)\n log = 'Evaluate best model on test data for horizon {:d}, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'\n print(log.format(i+1, metrics[0], metrics[1], metrics[2]))\n amae.append(metrics[0])\n amape.append(metrics[1])\n armse.append(metrics[2])\n\n log = 'On average over 12 horizons, Test MAE: {:.4f}, Test MAPE: {:.4f}, Test RMSE: {:.4f}'\n print(log.format(np.mean(amae),np.mean(amape),np.mean(armse)))\n\n # save output as np\n print(f\"save output as numpy... {os.path.join(args.output_dir, args.exp_id, 'epoch')}\")\n\n outp = scaler.inverse_transform(yhat)\n outp = outp.cpu().numpy()\n realy = realy.cpu().numpy()\n with open(os.path.join(args.output_dir, args.exp_id, 'epoch', 'true.npy'), 'wb') as f:\n np.save(f, realy)\n with open(os.path.join(args.output_dir, args.exp_id, 'epoch', 'pred.npy'), 'wb') as f:\n np.save(f, outp)\n print(\"done.\")\n\n '''\n Heatmaps\n if args.plotheatmap == \"True\":\n adp = F.softmax(F.relu(torch.mm(model.nodevec1, model.nodevec2)), dim=1)\n device = torch.device('cpu')\n adp.to(device)\n adp = adp.cpu().detach().numpy()\n adp = adp*(1/np.max(adp))\n df = pd.DataFrame(adp)\n sns.heatmap(df, cmap=\"RdYlBu\")\n plt.savefig(\"./emb\"+ '.pdf')\n\n y12 = realy[:,99,11].cpu().detach().numpy()\n yhat12 = scaler.inverse_transform(yhat[:,99,11]).cpu().detach().numpy()\n\n y3 = realy[:,99,2].cpu().detach().numpy()\n yhat3 = scaler.inverse_transform(yhat[:,99,2]).cpu().detach().numpy()\n\n df2 = pd.DataFrame({'real12':y12,'pred12':yhat12, 'real3': y3, 'pred3':yhat3})\n df2.to_csv('./wave.csv',index=False)\n\n '''\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"carrtesy/Graph-WaveNet-WaveBound","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6721,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"}
+{"seq_id":"33953101775","text":"import threading\nimport time\n\nfrom src.main.python.net.i2cat.cnsmo.lib.model.service import Service\n\nfrom src.main.python.net.i2cat.cnsmo.deployment.bash import BashDeployer\nfrom src.main.python.net.i2cat.cnsmo.manager.cnsmo import CNSMOManager\nfrom src.main.python.net.i2cat.cnsmoservices.vpn.manager.vpn import VPNManager\nfrom src.main.python.net.i2cat.cnsmo.factory.system.state.factory import SystemStateFactory\n\n\ndef get_server_app_request():\n \"\"\"\n Main request necessary to create an APP from a service.\n It contains the ID, no resources and no dependencies. The trigger, which is the command to execute this app\n The trigger is quite intrusive, it requires some refactor to make it clearer and easy to use\n :return:\n \"\"\"\n d = dict(service_id=\"server_123\",\n trigger= \"cp /home/oscarcillo/example/server.py /home/CNSMO/ENVS/server_123/server.py && python /home/CNSMO/ENVS/server_123/server.py\",\n resources = [],\n dependencies=[],\n endpoints= [{ \"uri\":\"http://127.0.0.1:9092/server/{param}\",\n \"driver\":\"REST\",\n \"logic\":\"get\",\n \"name\":\"start\"}])\n\n service = Service()\n service.objectify(**d)\n return service\n\n\ndef get_cert_app_request():\n d = dict(service_id=\"cert_123\",\n trigger= \"cp /home/oscarcillo/example/cert.py /home/CNSMO/ENVS/cert_123/cert.py && python cert.py\",\n resources = [],\n dependencies=[],\n endpoints=[{\"uri\":\"http://127.0.0.1:9091/dh/\",\n \"driver\":\"REST\",\n \"logic\":\"get\",\n \"name\":\"get_dh\"}])\n\n\n service = Service()\n service.objectify(**d)\n return service\n\n\ndef main():\n \"\"\"\n This is the second proof of concept of the CYCLONE CNSMO architecture\n The idea is the following:\n :We have a distributed system state, which is actually implmemented.\n :We also have the VPN Manager which is a kind of orchestrator for different services\n :the credentialManager service represents the entity that will provide the config files and stuff\n :The Server is meant to be the service that will deploy the VPN server daemon\n\n the credential and server services are both configured with a basic bash deployer. That means\n that any launched app in that service, will be spawned via bash.\n For simplicity this PoC just launches to python REST servers that only respond with dummy responses.\n :return:\n \"\"\"\n #Configuring the System State Manager, it listen to new services\n system_state = SystemStateFactory.generate_system_state_manager(\"localhost:6379\")\n t = threading.Thread(target=system_state.start)\n t.start()\n time.sleep(1) #Sleeping for synchronization\n\n #The bash deployer to be used by the Server and the credential manager\n bash_deployer = BashDeployer(None)\n\n #Configuring the VPN Orchestrator in a different Thread to make things feel real\n vpn = VPNManager(\"localhost:6379\")\n t2 = threading.Thread(target=vpn.start)\n t2.start()\n time.sleep(1) #Sleeping for synch\n\n #At this point the VPN Manager is advertising himself to the SystemState,\n #There is a Main topic called Discovery.\n #By default the VPN Orchestrator is susbcribed to Client, Server and Credential MAnager Topics\n\n\n #Configuring the Server Manager\n server = CNSMOManager(\"localhost:6379\", \"server_123\", \"Server\", bash_deployer, None)\n\n #Configuring the Credential Manager\n credential = CNSMOManager(\"localhost:6379\", \"cert_123\", \"CredentialManager\", bash_deployer, None)\n\n #Launching the server in another thread to make things feel real\n t3 = threading.Thread(target=server.start)\n t3.start()\n time.sleep(1)\n\n #Launching the credential manager in a different thread to make things feel real\n t4 = threading.Thread(target=credential.start)\n t4.start()\n time.sleep(1)\n\n #Now we simulate that we are composing a server service for the ServerManager\n server.compose_service(**get_server_app_request().dictionarize())\n #...And launch it\n server.launch_service(\"server_123\")\n time.sleep(0.5)# Again, for synch, this is just to help to read the logs in the correct order\n\n\n #Let's compose a service for the credential manager as well\n credential.compose_service(**get_cert_app_request().dictionarize())\n #...And of course, launch it\n credential.launch_service(\"cert_123\")\n\n #We sleep here in order to let the servers spawn correctly...\n time.sleep(0.5)\n #to finally deploy the VPN...\n vpn.deploy()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"dana-i2cat/cnsmo","sub_path":"src/test/python/cnsmo/poc/pocv2.py","file_name":"pocv2.py","file_ext":"py","file_size_in_byte":4680,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"92"}
+{"seq_id":"12919138957","text":"import torch\nfrom torchvision import transforms\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom .models import get_conv4_model\nfrom os.path import join, splitext, isfile\nfrom os import listdir\nfrom PIL import Image\n\nfrom .ransac import Ransac\n\n\nclass FeatureMatching:\n def __init__(self, model):\n self.strideNet = 16\n self.minNet = 16\n self.base = 20\n self.model = model\n\n def rescale_image(self, I, featMax, featMin=1):\n w, h = I.size\n ratio = float(w) / h\n if ratio < 1:\n feat_h = featMax\n feat_w = max(round(ratio * feat_h), featMin)\n\n else:\n feat_w = featMax\n feat_h = max(round(feat_w / ratio), featMin)\n resize_w = (feat_w - 1) * self.strideNet + self.minNet\n resize_h = (feat_h - 1) * self.strideNet + self.minNet\n\n return resize_w, resize_h\n\n def multi_scale_resize(self, image, feature_sizes):\n images = list()\n for size in feature_sizes:\n w, h = self.rescale_image(image, size)\n images.append(image.resize((w, h)))\n return images\n\n @staticmethod\n def normalize(vec, axis, eta=1e-7, is_tensor=False):\n if is_tensor:\n return vec.div(torch.norm(vec, p=2, dim=axis).detach() + eta)\n else:\n return vec / (np.linalg.norm(vec, ord=2, axis=axis, keepdims=True) + eta)\n\n @staticmethod\n def get_sizes(base, abs_range, step=1, scale_type=\"affine\"):\n if scale_type == \"log\":\n feature_sizes = [int(base * 2 ** (i / abs_range)) for i in range(-abs_range, abs_range + 1, 1)]\n else:\n feature_sizes = [base + step * i for i in range(-abs_range, abs_range + 1, 1)]\n return feature_sizes\n\n @staticmethod\n def get_2d_idx(i, width):\n w = i % width\n h = i // width\n return w, h\n\n def compute_multi_scale_descriptors(self, images, feature_sizes):\n descriptors = list()\n preprocess = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n if torch.cuda.is_available():\n self.model.to('cuda')\n self.model.eval()\n with torch.no_grad():\n print(\"Computing descriptors...\")\n for image in tqdm(images):\n rescaled_images = self.multi_scale_resize(image, feature_sizes)\n cur_features = list()\n for cur_img in rescaled_images:\n inp_tensor = preprocess(cur_img).unsqueeze(0)\n if torch.cuda.is_available():\n inp_tensor = inp_tensor.to('cuda')\n cur_features.append(np.squeeze(self.model(inp_tensor).cpu().numpy(), axis=0))\n descriptors.append(cur_features)\n\n return descriptors\n\n @staticmethod\n def get_feats_tensors(feats1):\n return [torch.from_numpy(feat) for feat in feats1]\n\n @staticmethod\n def compute_mutual_match(feat1, feat2):\n match1 = []\n match2 = []\n similarity = []\n grid_size = []\n\n n_features, feat2H, feat2W = feat2.shape\n _, feat1H, feat1W = feat1.shape\n feat1 = FeatureMatching.normalize(feat1, axis=0, is_tensor=True).permute(1, 2, 0).view(-1, n_features)\n feat2 = FeatureMatching.normalize(feat2, axis=0, is_tensor=True).permute(1, 2, 0).view(-1, n_features)\n score = torch.mm(feat1, feat2.transpose(0, 1))\n topk0_score, topk0_index = score.topk(k=1, dim=0)\n topk1_score, topk1_index = score.topk(k=1, dim=1)\n\n index0 = torch.zeros((score.shape[0], score.shape[1])).scatter_(0, topk0_index,\n topk0_score)\n index1 = torch.zeros((score.shape[0], score.shape[1])).scatter_(1, topk1_index,\n topk1_score)\n\n intersection_score = index0 * index1\n intersection = intersection_score.nonzero()\n\n for i1, i2 in intersection:\n i1 = i1.item()\n i2 = i2.item()\n w1, h1 = FeatureMatching.get_2d_idx(i1, feat1W)\n w2, h2 = FeatureMatching.get_2d_idx(i2, feat2W)\n match1.append([(w1 + 0.5) / feat1W, (h1 + 0.5) / feat1H])\n match2.append([(w2 + 0.5) / feat2W, (h2 + 0.5) / feat2H])\n similarity.append(intersection_score[i1, i2].item() ** 0.5)\n grid_size.append([1. / feat1W, 1. / feat1H])\n return match1, match2, similarity, grid_size\n\n @staticmethod\n def compute_feature_matching(feats1, feat2):\n match1 = []\n match2 = []\n similarity = []\n grid_size = []\n _, feat2_h, feat2_w = feat2.shape\n for feat1 in feats1:\n feat1 = FeatureMatching.normalize(feat1, axis=0, is_tensor=True)\n match1_, match2_, similarity_, grid_size_ = FeatureMatching.compute_mutual_match(feat1, feat2)\n match1 += match1_\n match2 += match2_\n similarity += similarity_\n grid_size += grid_size_\n\n match1 = torch.from_numpy(np.array(match1))\n match2 = torch.from_numpy(np.array(match2))\n similarity = torch.from_numpy(np.array(similarity))\n grid_size = torch.from_numpy(np.array(grid_size))\n\n return match1, match2, similarity, grid_size, feat2_h*feat2_w\n\n\n\n\ndef get_file_extension(file):\n return splitext(file)[1][1:].lower()\n\n\ndef list_folder_images(folder):\n image_types = ['jpg', 'tif', 'png', 'bmp']\n return [join(folder, f) for f in listdir(folder) if (isfile(join(folder, f)) and (get_file_extension(f) in image_types))]\n\n\ndef get_image_list(folder):\n images_path = list_folder_images(folder)\n return [Image.open(image_path).convert('RGB') for image_path in images_path]\n\n\nif __name__ == \"__main__\":\n conv4 = get_conv4_model()\n feature_matching = FeatureMatching(conv4)\n feature_sizes = feature_matching.get_sizes(20, 2)\n\n images1 = get_image_list(\"../tmp_manuscripts/P2_/illustration\")\n images2 = get_image_list(\"../tmp_manuscripts/P4_/illustration\")\n\n descriptors1 = feature_matching.compute_multi_scale_descriptors(images1, feature_sizes)\n descriptors2 = feature_matching.compute_multi_scale_descriptors(images2, feature_sizes)\n feat1 = torch.from_numpy(descriptors1[6][0])\n feats1 = [torch.from_numpy(feat) for feat in descriptors1[6]]\n feat2 = torch.from_numpy(descriptors2[5][0])\n\n #match1, match2, similarity, grid_size = feature_matching.compute_mutual_match(feat1, feat2)\n match1, match2, similarity, grid_size, feature_map_size = feature_matching.compute_feature_matching(feats1, feat2)\n ransac = Ransac()\n score, _ = ransac.get_ransac_score(match1, match2, similarity, grid_size, feature_map_size, tolerance=0.1,\n nb_iter=100,\n transformation_name=\"affine\", nb_max_iter=100)\n exit()","repo_name":"Rykoua/ImageCollation","sub_path":"IllustrationMatcher/utils/feature_matching.py","file_name":"feature_matching.py","file_ext":"py","file_size_in_byte":6983,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"92"}
+{"seq_id":"34855203939","text":"# Code by omoknooni\n# Tensorflow Serving API\n\nfrom flask import Flask, render_template, request, Response\nfrom werkzeug.utils import secure_filename\nfrom google.cloud import storage\nfrom PIL import Image, ImageOps\n\nimport os\nimport io\nimport json\nimport uuid, traceback\nimport requests\nimport numpy as np\n\napp = Flask(__name__)\napp.config['UPLOAD_DIR'] = '/tmp/'\nTENSOR_URL = 'http://[internal docker network ip]:8501/v1/models/wowboard:predict'\n\nALLOWED_CONTENT_TYPE = {\n 'jpg':'image/jpeg',\n 'jpeg':'image/jpeg',\n 'png':'image/png',\n}\nALLOWED_EXTENSION = sorted(ALLOWED_CONTENT_TYPE.keys())\n\nBUCKET_NAME = 'wowbd-detected-img'\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '/app/[gcp storage iam account json file]'\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.',1)[1] in ALLOWED_EXTENSION\n\ndef prepare_image(image, target):\n if image.mode != \"RGB\":\n image = image.convert(\"RGB\")\n image = image.resize(target)\n image = np.array(image, dtype=np.uint8)\n return image\n\n\n@app.route('/')\ndef home():\n return Response(json.dumps({'status': 'healthy'}), mimetype='application/json', status=200)\n\n# @app.route('/upload_test')\n# def upload_test():\n# return render_template('upload_test.html')\n\n@app.route('/upload', methods=['POST'])\ndef upload():\n if request.method == \"POST\":\n file_obj = request.files['file']\n TEMP_FILENAME = os.path.join(app.config['UPLOAD_DIR'],secure_filename(file_obj.filename))\n task_id = str(uuid.uuid4())\n dest_name = task_id + os.path.splitext(file_obj.filename)[1]\n file_obj.save(os.path.join(TEMP_FILENAME))\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(BUCKET_NAME)\n blob = bucket.blob(task_id + '/' + dest_name)\n blob.upload_from_filename(TEMP_FILENAME)\n print(f'File {TEMP_FILENAME} uploaded to {dest_name}')\n\n try:\n # preprocessing\n with open(TEMP_FILENAME, 'rb') as f:\n img_data = f.read()\n image = Image.open(io.BytesIO(img_data))\n image = ImageOps.exif_transpose(image)\n image = prepare_image(image, target=(1024,1024)) \n height, width, _ = image.shape\n origin_image = image.copy()\n\n # add axis\n image = image[np.newaxis, :, :]\n\n # detection\n image_data = json.dumps({\"signature_name\": \"serving_default\", \"instances\": image.tolist()})\n res = requests.post(TENSOR_URL, data=image_data)\n\n # extract Result\n result = res.json()[\"predictions\"][0]\n num_detections = int(result[\"num_detections\"])\n\n result['detection_scores'] = np.array(result[\"detection_scores\"], dtype=np.float32)\n result['detection_boxes'] = np.array(result[\"detection_boxes\"], dtype=np.float32)\n\n obj_index = result['detection_scores'] > 0.65\n score = result[\"detection_scores\"][obj_index] # 0.973602414\n boxes = result[\"detection_boxes\"][obj_index] # [0.302173734, 0.617861152, 0.531666338, 0.770464897]\n num_detections = obj_index.tolist().count(True)\n\n for idx, obj in enumerate(boxes):\n obj_image = origin_image[int(obj[0]*height):int(obj[2]*height),int(obj[1]*width):int(obj[3]*width)].copy()\n obj_save = Image.fromarray(obj_image)\n obj_local = os.path.join(app.config['UPLOAD_DIR'], f'obj_{idx}.png')\n obj_save.save(obj_local)\n\n #TODO : extension\n obj_blob = bucket.blob(task_id + '/' + f'obj_{idx}.png')\n obj_blob.upload_from_filename(obj_local)\n\n \n except Exception as e:\n print(traceback.print_exc())\n if res.json:\n return Response(json.dumps({'Error' : 'detection failed', 'traceback': str(res.json())[:50]}), mimetype='application/json', status=500)\n \n if num_detections:\n return Response(json.dumps({'status': 'success', 'task_id': task_id, 'num_detections': num_detections,'score': score.tolist(), 'boxes': boxes.tolist()}), mimetype='application/json', status=200)\n else:\n return Response(json.dumps({'status': 'success', 'task_id': task_id}), mimetype='application/json', status=200)\n else:\n return Response(json.dumps({'Error':'Method not allowed'}), mimetype='application/json', status=405)\n\n\nif __name__ == \"__main__\":\n # Only for debugging while developing\n app.run(host='0.0.0.0', debug=True, port=80)\n","repo_name":"omoknooni/wowboard-detector","sub_path":"serving/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"30629795075","text":"import praw\n\ndef authenticate_reddit(credentials):\n reddit = praw.Reddit(\n client_id=credentials['client_id'],\n client_secret=credentials['client_secret'],\n password=credentials['password'],\n user_agent=f\"SemanticForum by u/{credentials['username']}\",\n username=credentials['username']\n )\n return reddit\n\ndef reddit_search(reddit, subreddit_name, query, limit):\n subreddit = reddit.subreddit(subreddit_name)\n\n search_obj = subreddit.search(query=query, sort='hot', time_filter='all')\n \n search_result = []\n for i, submission in enumerate(search_obj):\n if i < limit:\n title = submission.title\n permalink = f'https://reddit.com{submission.permalink}'\n comment_limit = 5 # arbitrary, but allowing more comments is slower and need to deal with MoreComments objects\n comments = [comment.body for comment in submission.comments[:comment_limit]]\n search_result.append({'title': title, 'permalink': permalink, 'comments': comments})\n else: break\n \n return search_result","repo_name":"micahcantor/RedditSemanticSearchFlask","sub_path":"reddit_utils.py","file_name":"reddit_utils.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"18335795585","text":"class ListNode(object):\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution(object):\n def addTwoNumbers(self, l1, l2, remainder=0):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n\n l3 = None\n current_node = l3\n\n while l1 or l2 or remainder != 0:\n sum_numbers = (l1.val if l1 else 0) + (l2.val if l2 else 0) + remainder\n\n remainder = sum_numbers // 10\n node_value = sum_numbers % 10\n\n new_node = ListNode(node_value)\n\n if not l3:\n l3 = new_node\n current_node = new_node\n else:\n current_node.next = new_node\n current_node = new_node\n\n l1 = (l1.next if l1 else None)\n l2 = (l2.next if l2 else None)\n\n return l3\n\nl1 = ListNode(10, next=ListNode(5))\nl2 = ListNode(10, next=ListNode(5))\n\nsolution = Solution()\nl3 = solution.addTwoNumbers(l1=l1,l2=l2)\n\nprint(l3)\n","repo_name":"mattiasu96/leetcoding-practice","sub_path":"Random problems/2. Add Two Numbers/add_two_numbers_iterative.py","file_name":"add_two_numbers_iterative.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"}
+{"seq_id":"74810068459","text":"from models.ullava import UllavaConfig, UllavaForCausalLM\nfrom models.ullava_core import UllavaCoreConfig, UllavaCoreForCausalLM\nfrom models.grounding_module import load_groundingdino_model, GroundingModule\nfrom models.tools import KeywordsStoppingCriteria, smart_resize_token_embedding, \\\n smart_special_token_and_embedding_resize, multi_modal_resize_token_embedding\n\nDEFAULT_IMG_TOKEN = ''\n\nDEFAULT_IMG_PATCH_TOKEN = \"\"\nDEFAULT_IMG_START_TOKEN = \"\"\nDEFAULT_IMG_END_TOKEN = \"\"\n\nDEFAULT_VID_PATCH_TOKEN = \"\"\nDEFAULT_VID_START_TOKEN = \"\"\nDEFAULT_VID_END_TOKEN = \"\"\n\nDEFAULT_SEG_TOKEN = '[SEG]'\nDEFAULT_TAG_START = '[tag]'\nDEFAULT_TAG_END = '[/tag]'\n\nDEFAULT_BOS_TOKEN = ''\nDEFAULT_EOS_TOKEN = ''\nDEFAULT_UNK_TOKEN = ''\nDEFAULT_PAD_TOKEN = '[PAD]'\nIGNORE_INDEX = -100\n\n\n__all__ = [\n \"UllavaConfig\",\n \"UllavaForCausalLM\",\n \"UllavaCoreConfig\",\n \"UllavaCoreForCausalLM\",\n \"GroundingModule\",\n \"load_groundingdino_model\",\n \"KeywordsStoppingCriteria\",\n \"smart_resize_token_embedding\",\n \"multi_modal_resize_token_embedding\",\n \"smart_special_token_and_embedding_resize\",\n \"DEFAULT_IMG_TOKEN\",\n \"DEFAULT_SEG_TOKEN\",\n \"DEFAULT_IMG_PATCH_TOKEN\",\n \"DEFAULT_IMG_START_TOKEN\",\n \"DEFAULT_IMG_END_TOKEN\",\n \"DEFAULT_VID_PATCH_TOKEN\",\n \"DEFAULT_VID_START_TOKEN\",\n \"DEFAULT_VID_END_TOKEN\",\n \"DEFAULT_BOS_TOKEN\",\n \"DEFAULT_EOS_TOKEN\",\n \"DEFAULT_UNK_TOKEN\",\n \"DEFAULT_PAD_TOKEN\",\n \"IGNORE_INDEX\",\n \"DEFAULT_TAG_START\",\n \"DEFAULT_TAG_END\"\n]\n\n\n\n","repo_name":"VeritasXu/u-LLaVA","sub_path":"models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"}
+{"seq_id":"12455611751","text":"import re\nimport numpy as np\nimport pandas as pd\nimport time\nfrom tqdm import tqdm\nimport matplotlib.pylab as plt\nimport seaborn as sns\nfrom pymongo import MongoClient\n\nconnection_url = ''\nconnection = MongoClient(connection_url)\n\ndb = connection.get_database('')\ncollection = db.get_collection('')\n\ncursor = collection.find()\nlist_c = list(cursor)\n\ndanawa_data = pd.DataFrame(list_c)\ndanawa_data.info()\n\n\n\n# 리뷰 데이터 제거\ndanawa_data = danawa_data.drop(danawa_data.iloc[:, [17,18,19]].columns, axis=1)\n\n# null값을 제거한 데이터프레임\ndanawa_notnull_data = danawa_data.dropna(axis=0)\ndanawa_notnull_data.info()\n\n# 전체 size의 값이 없는 데이터 제거(51개)\ndanawa_size_drop_data = danawa_notnull_data.loc[danawa_notnull_data['size'].notnull()]\ndanawa_size_drop_data.reset_index(inplace=True, drop=True)\ndanawa_size_drop_data.info()\n\n# 수납칸수 수치형으로 변환\ndanawa_size_drop_data['closet']\n\n# closet 수납칸수를 int형으로 바꾸는 함수(수납칸수가 없는경우는 0)\ndef change_closet_int(data):\n if not data:\n closet_int = 0\n elif '~' in data:\n closet_int = data.lstrip('~').rstrip('칸')\n else:\n closet_int = data.rstrip('칸')\n return int(closet_int)\ndanawa_size_drop_data['int_closet'] = [change_closet_int(i) for i in danawa_size_drop_data['closet']]\ndanawa_size_drop_data.info()\n\n\n# size변수를 를 각각 하나로 변경 \n\n# cm없애는 함수\ndef del_cm(data):\n size = data.rstrip('cm')\n return size\n\n# 잘못된 수집으로 인한 한글제거\ndef del_hangul(data): \n remove_in = '\\(.*\\)'\n hangul = '[:/\\[\\]\\(\\)가-힣+]'\n result = re.sub(remove_in, '', data) \n result = re.sub(hangul, '', result)\n return result.strip()\n\n# 범위로 주어진 사이즈를 범위의 가장 큰 부분으로 대체하는 함수\ndef del_range(lst):\n if lst[lst.find('~')+1] == 'x':\n lst = lst[:lst.find('~')] + lst[lst.find('~')+1:]\n else:\n lst = lst\n if '~' in lst:\n if 'x' in lst[:lst.find('~')+1]:\n range1 = lst[lst.find('x', lst.find('x')+2)+1:lst.find('~')+1]\n else:\n range1 = lst[:lst.find('~')+1]\n \n result = re.sub(range1, '', lst)\n else:\n result = lst\n \n return result\n\n# 가로*세로*높이를 각각 나누는 함수\ndef divide_size(data):\n if 'x' in data:\n division = data.split('x')\n elif '×' in data:\n division = data.split('×')\n \n return division\n\n# 사이즈를 하나의 값들로 나누는 함수\ndef divide_size2(data):\n global data2\n if 'x' in data:\n data2 = data.replace('x', ' ')\n elif '×' in data:\n data2 = data.replace('×', ' ')\n data3 = ' '.join(data2.split()).split()\n return data3\n\n# 각각의 값을 구하는 함수\ndef get_har_size(data):\n har = float(data[0])\n return har\n\ndef get_ver_size(data):\n ver = float(data[1])\n return ver\n\ndef get_hei_size(data):\n if len(data) != 2:\n hei = float(data[2])\n else:\n hei = np.nan\n return hei\n\ndanawa_size_drop_data['modified_size'] = [divide_size2(del_range(del_hangul(del_cm(i)))) for i in danawa_size_drop_data['size']]\nlist(danawa_size_drop_data['modified_size'])\n\ndanawa_size_drop_data['size_har'] = [get_har_size(i) for i in list(danawa_size_drop_data['modified_size'])]\ndanawa_size_drop_data['size_ver'] = [get_ver_size(i) for i in list(danawa_size_drop_data['modified_size'])]\ndanawa_size_drop_data['size_hei'] = [get_hei_size(i) for i in list(danawa_size_drop_data['modified_size'])]\ndanawa_size_drop_data.info()\n\n# 6개월 가격추이의 값이 존재하지 않는 3개의 데이터를 제거\ndanawa_data2 = danawa_size_drop_data.loc[danawa_size_drop_data['price_6month'].notnull()]\ndanawa_data2.info()\n\n\n# 기울기\n\n# y = 최근 minPrice - 과거 minPrice, x = 6(관측된 최근의 개월) - 1(첫달), 기울기 = y/x \n# x1 = 1, \n# x2 = len(가격추이), 만약 1이��면 기울기=0 \n# y2 = len(가격추이)의 minPrice \n# y1 = 1의 minPrice \n\n# 기울기를 가져오는 함수\ndef get_slope(x1, x2, y1, y2):\n if x2 == 1:\n result = 0\n else:\n result = (y2-y1) / (x2-x1)\n return result\n\ndef get_price_slope(data):\n x1 = 1\n x2 = len(data)\n y2 = data[x2-1].get('minPrice')\n y1 = data[0].get('minPrice')\n \n result = get_slope(x1,x2,y1,y2)\n return result\n\nget_price_slope(danawa_data2['price_6month'][0])\ndanawa_data2['price_slope'] = [get_price_slope(lst) for lst in danawa_data2['price_6month']]\n\n\n\n\n\n# 토크나이저를 통해 json값으로 돌려주는 함수\ndef get_token_json(data):\n text = data\n import requests as req\n import json\n url = ''\n body = {\n \"text\" : text,\n \"analyzer\": \"nori_korean_analyzer\",\n # \"explain\": True\n }\n headers = {\n 'Content-Type': 'application/json; charset=utf-8'\n }\n noun = req.post(url, json.dumps(body), headers = headers)\n \n return noun.json()\n\n# 데이터의 하나의 컬럼을 넣었을 때, 하나의 값당 토크나이저를 돌리고 그를 공백값으로 join해서 내보내주는 함수\ndef get_token_df(data):\n json_data = [get_token_json(i) for i in data]\n tokens_data = [i.get('tokens') for i in json_data]\n \n token_df = pd.DataFrame(columns=['data'])\n for q in range(len(tokens_data)):\n token_one = [i.get('token') for i in tokens_data[q]]\n token_df = token_df.append({'data':' '.join(token_one)}, ignore_index=True)\n final = [i.split(' ') for i in token_df['data']]\n return final\n\n\n\n# token화가 필요한 변수(info_all, with_in, form, color, function) 수정 \n# with_in 수정\ndanawa_data2['with_in'] = get_token_df(danawa_data2['with_in'])\ndanawa_data2['with_in']\n\n# form 수정\ndanawa_data2['form'] = [i.strip('형') for i in danawa_data2['form']]\n\n# color 수정\ndanawa_data2['color'] = get_token_df(danawa_data2['color'])\ndanawa_data2['color']\n\n# function 수정\ndanawa_data2['function'] = get_token_df(danawa_data2['function'])\ndanawa_data2['function']\n\n# info_all 수정\ndanawa_data2['info_all'] = get_token_df(danawa_data2['info_all'])\ndanawa_data2['info_all']\n\n\n# 화장대의 특성중 하나인 레일의 종류가 토크나이저로 인해 특성을 무시하고 '레일'만 남는다는 사실을 발견!! \n# -> 이부분에 대해 '레일'을 제거하고 '볼'을 남기는 방법을 택함 \n# ('볼 레일' 이런식으로 띄워도 '레일'만 인식됨)\nget_token_json(['볼', '레일'])\nget_token_json(['볼레일'])\n\n# function의 '볼레일' 수정\na = [' '.join(i).replace('레일', '') if '레일' in ' '.join(i) else i for i in danawa_data2['function']]\nb = [i.replace('레일', '') if '레일' in i else i for i in a]\nlen(b)\ndanawa_data2['function'] = get_token_df(b)\n\n# info_all의 '볼레일' 수정\na = [' '.join(i).replace('레일', \"'/ 레일\") if '레일' in ' '.join(i) else i for i in danawa_data2['info_all']]\nb = [i.replace('레일', \"'/ 레일\") if '레일' in i else i for i in a]\nlen(b)\ndanawa_data2['info_all'] = get_token_df(b)\n\n\n# 이 토큰들을 하나의 벡터로 만드는 작업\npath = './word2vec/word2vec_210813.bin'\nfrom gensim.models import Word2Vec\nmodels = Word2Vec.load(path)\n\n# token화 처리한 4개의 변수에 대해서 벡터화\nform_vector = [np.mean(models.wv[i]) for i in danawa_data2['form']]\nlen(form_vector)\n\nwith_in_vector = [np.mean(models.wv[i]) for i in danawa_data2['with_in']]\nlen(with_in_vector)\n\ncolor_vector = [np.mean(models.wv[i]) for i in danawa_data2['color']]\nlen(color_vector)\n\nfunction_vector = [np.mean(models.wv[i]) for i in danawa_data2['function']]\nlen(function_vector)\n\nfor i in danawa_data2['info_all']:\n for j in i:\n if j not in models.wv.index_to_key:\n i.remove(j)\ninfo_all_vector = [np.mean(models.wv[i]) for i in danawa_data2['info_all']]\nlen(info_all_vector)\n\n# 제조사의 경우, word2vec에서 인식하지 못하는 글자가 많고, 다르다는 구분만 존재해도 된다 생각하여 정수 인코딩 실시\nmade_by = dict(zip(list(danawa_data2.iloc[:,7].unique()), \n list(range(len(danawa_data2.iloc[:,7].unique())))))\n\n# 데이터에 추가\ndanawa_data3 = danawa_data2.copy()\n\ndanawa_data3['made_by'] = [made_by.get(i) for i in danawa_data2['made_by']]\ndanawa_data3['form'] = form_vector\ndanawa_data3['with_in'] = with_in_vector\ndanawa_data3['color'] = color_vector\ndanawa_data3['function'] = function_vector\ndanawa_data3['info_all'] = info_all_vector\ndanawa_data3.info()\n\n\n\n# 가격이 존재하지 않는 데이터(주로 일시품절로 인해 가격정보가 없음)를 제거후, 최저가를 뽑아내 변수를 하나 만듦\ndanawa_data4 = danawa_data3.loc[danawa_data3['shoppingmall_price'] != {}]\ndanawa_data4.reset_index(inplace=True, drop=True)\ndanawa_data4.info()\n\n# 최저가를 뽑아내는 함수\ndef get_min_price(data):\n value_list = list(data.values())\n price = min(value_list)\n return price\n\n\n# 최저가 변수를 넣고, 등록일자를 수치형으로 변환하여 넣음\ndanawa_data4['min_price'] = [get_min_price(i) for i in danawa_data4['shoppingmall_price']]\ndanawa_data4['register_date'] = [int(i.replace('.', '')) for i in danawa_data4['regisgter_date']]\n\n# 모든 변수를 포함한 최종 데이터셋\ndanawa_final_all_data = danawa_data4.copy()\ndanawa_final_all_data.info()\n\n# 필요한 변수만 추려낸 최종데이터셋\ndanawa_final_data = danawa_data4.drop(danawa_data4.iloc[:, [0,1,2,3,4,5,6,9,10,11,12,13,14,15,16,18]].columns, axis=1)\ndanawa_final_data.info()\n\n# 군집에 필요없는 변수 제거하고 최종 데이터셋\ndanawa_drop_data = danawa_data4.drop(danawa_data4.iloc[:, [0,1,2,3,4,5,6,8,9,14,15,16,18]].columns, axis=1)\ndanawa_drop_data.info()\n\n\n","repo_name":"woonooo/danawa_scarapy_project","sub_path":"04.data_preprocessing.py","file_name":"04.data_preprocessing.py","file_ext":"py","file_size_in_byte":9757,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"11411547424","text":"import random\nimport string\n\n\ndef simple_match(target, pattern):\n assert type(target) is str and type(pattern) is str, 'wrong type'\n target_len = len(target)\n pattern_len = len(pattern)\n\n for i in range(target_len-pattern_len+1):\n position = i\n for j in range(pattern_len):\n if target[i+j] != pattern[j]:\n position = -1\n break\n if position >= 0: # 找到了对应子串\n return position+1\n\n\ndef build_next_list(pattern):\n assert type(pattern) is str, 'wrong type'\n next = [-1, 0]\n for i in range(1, len(pattern)):\n if pattern[i] == pattern[next[i]]:\n next.append(next[i]+1)\n else:\n index = next[i]\n while index != 0: # 不断分割,尝试匹配\n if pattern[i] == pattern[next[index]]:\n next.append(next[index]+1)\n break\n else:\n index = next[index]\n if index == 0: # 已没有能匹配的部分\n next.append(0)\n return next\n\n\ndef KMP(target, pattern):\n assert type(target) is str and type(pattern) is str, 'wrong type'\n next = build_next_list(pattern) # 构建next数组\n index = j = 0\n while len(pattern)+index <= len(target):\n for i in range(j, len(pattern)):\n if pattern[i] != target[index+i]:\n index += i - next[i]\n j = next[i]\n if j < 0:\n j = 0\n break\n if len(pattern) == i+1:\n return index+1 \n return None\n\n\ndef main():\n for i in range(10000):\n target = ''.join(random.sample(string.ascii_letters + string.digits, 40))\n pattern = ''.join(random.sample(string.ascii_letters + string.digits, 2))\n ans1 = simple_match(target, pattern)\n ans2 = KMP(target, pattern)\n if ans1 != ans2:\n print('target string: {}'.format(target))\n print('pattern string: {}'.format(pattern))\n print('simple_match: {}'.format(ans1))\n print('KMP: {}'.format(ans2))\n\nif __name__ == '__main__':\n main() ","repo_name":"pancerZH/algorithm_with_python","sub_path":"str_match.py","file_name":"str_match.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"6577127899","text":"#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\nfrom setuptools import find_packages, setup\n\nwith open(\"README.md\") as readme_file:\n readme = readme_file.read()\n\nrequirements = []\n\ntest_requirements = [\n \"pytest>=3.6\",\n]\n\nsetup(\n author=\"Samarpan Rai\",\n author_email=\"samarpan-rai@live.com\",\n python_requires=\">=3.6\",\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n ],\n description=\"Context manager around service provided by HealthChecks for easy use\",\n install_requires=requirements,\n license=\"MIT license\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords=\"healthchecks_context_manager\",\n name=\"healthchecks_wrapper\",\n packages=find_packages(include=[\"healthchecks_wrapper\", \"healthchecks_wrapper.*\"]),\n test_suite=\"tests\",\n tests_require=test_requirements,\n url=\"https://github.com/samarpan-rai/healthchecks_wrapper\",\n version=\"0.1.6\",\n zip_safe=False,\n)\n","repo_name":"samarpan-rai/healthchecks_wrapper","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"}
+{"seq_id":"70908111031","text":"from django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom apps.tables.forms import ProductForm\nfrom apps.common.models import Product\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.decorators import login_required\nfrom apps.tables.utils import product_filter\n\n# Create your views here.\n\ndef datatables(request):\n filters = product_filter(request)\n product_list = Product.objects.filter(**filters)\n form = ProductForm()\n\n page = request.GET.get('page', 1)\n paginator = Paginator(product_list, 5)\n products = paginator.page(page)\n\n if request.method == 'POST':\n form = ProductForm(request.POST)\n if form.is_valid():\n return post_request_handling(request, form)\n\n context = {\n 'segment' : 'datatables',\n 'parent' : 'apps',\n 'form' : form,\n 'products' : products\n }\n \n return render(request, 'apps/datatables.html', context)\n\n\n\n@login_required(login_url='/users/signin/')\ndef post_request_handling(request, form):\n form.save()\n return redirect(request.META.get('HTTP_REFERER'))\n\n@login_required(login_url='/users/signin/')\ndef delete_product(request, id):\n product = Product.objects.get(id=id)\n product.delete()\n return redirect(request.META.get('HTTP_REFERER'))\n\n\n@login_required(login_url='/users/signin/')\ndef update_product(request, id):\n product = Product.objects.get(id=id)\n if request.method == 'POST':\n product.name = request.POST.get('name')\n product.price = int(request.POST.get('price'))\n product.info = request.POST.get('info')\n product.save()\n return redirect(request.META.get('HTTP_REFERER'))","repo_name":"app-generator/rocket-django","sub_path":"apps/tables/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"94"}
+{"seq_id":"72338515508","text":"from nvidia.dali.plugin.pytorch import DALIClassificationIterator\nfrom nvidia.dali.pipeline import Pipeline\nimport nvidia.dali.ops as ops\nimport nvidia.dali.types as types\n\n\nclass HybridTrainPipe(Pipeline):\n def __init__(self, data_dir, batch_size, num_threads=4):\n super().__init__(batch_size, num_threads, device_id=0)\n self.input = ops.FileReader(\n file_root='.', file_list='train_list', random_shuffle=True)\n self.shape = 512\n self.pre_transforms = [\n ops.nvJPEGDecoder(\n device='mixed',\n device_memory_padding=211025920,\n host_memory_padding=140544512),\n ops.Resize(\n device='gpu',\n resize_x=self.shape,\n resize_y=self.shape,\n interp_type=types.INTERP_TRIANGULAR),\n ]\n self.post_transforms = [\n ops.NormalizePermute(\n device='gpu',\n height=self.shape,\n width=self.shape,\n #mean=[105.0, 72.7, 51.8],\n #std=[255 * 6.90, 255 * 4.76, 255 * 3.38]),\n mean=[0., 0., 0.],\n std=[255., 255., 255.]),\n ]\n self.coin = ops.CoinFlip()\n self.fh_op = ops.Flip(device='gpu', horizontal=1, vertical=0)\n self.fv_op = ops.Flip(device='gpu', horizontal=0, vertical=1)\n #self.twist = ops.ColorTwist(device='gpu')\n #self.rng1 = ops.Uniform(range=(-0.1, 0.1))\n #self.rng2 = ops.Uniform(range=(0.75, 1.5))\n #self.rng3 = ops.Uniform(range=(-0.15, 0.15))\n\n def define_graph(self):\n images, labels = self.input(name='Reader')\n for transform in self.pre_transforms:\n images = transform(images)\n if self.coin():\n images = self.fh_op(images)\n if self.coin():\n images = self.fv_op(images)\n #images = self.twist(\n # images,\n # saturation=self.rng2(),\n # contrast=self.rng2(),\n # brightness=self.rng1(),\n # hue=self.rng3())\n for transform in self.post_transforms:\n images = transform(images)\n return images, labels\n\n\nclass HybridTestPipe(Pipeline):\n def __init__(self, data_dir, batch_size, num_threads=4):\n super().__init__(batch_size, num_threads, device_id=0)\n self.input = ops.FileReader(\n file_root='.', file_list='test_list', random_shuffle=True)\n self.shape = 512\n self.transforms = [\n ops.nvJPEGDecoder(device='mixed'),\n ops.Resize(\n device='gpu',\n resize_x=self.shape,\n resize_y=self.shape,\n interp_type=types.INTERP_TRIANGULAR),\n ops.NormalizePermute(\n device='gpu',\n height=self.shape,\n width=self.shape,\n #mean=[105.0, 72.7, 51.8],\n #std=[255 * 6.90, 255 * 4.76, 255 * 3.38]),\n mean=[0., 0., 0.],\n std=[255., 255., 255.]),\n ]\n\n def define_graph(self):\n images, labels = self.input(name='Reader')\n for transform in self.transforms:\n images = transform(images)\n return images, labels\n\n\ndef get_dataloaders(batch_size, _):\n def get_loader(Pipe):\n pipe = Pipe('data', batch_size, 1)\n pipe.build()\n return DALIClassificationIterator(pipe, size=pipe.epoch_size('Reader'))\n\n return get_loader(HybridTrainPipe), get_loader(HybridTestPipe)\n","repo_name":"chengscott/dlp2019","sub_path":"lab3/daliloader.py","file_name":"daliloader.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"17366169137","text":"import random\nimport time\n\n##----------------------------------------------------------------##\nfrom gii.core import app, signals\nfrom gii.qt import QtEditorModule\n\nfrom gii.qt.IconCache import getIcon\nfrom gii.qt.controls.GenericTreeWidget import GenericTreeWidget\nfrom gii.qt.dialogs import alertMessage\nfrom gii.moai.MOAIRuntime import MOAILuaDelegate\nfrom gii.SceneEditor import SceneEditorModule, getSceneSelectionManager\nfrom gii.qt.helpers import addWidgetWithLayout, QColorF, unpackQColor\n\n##----------------------------------------------------------------##\nfrom PyQt4 import QtCore, QtGui, uic\nfrom PyQt4.QtCore import Qt\n\n##----------------------------------------------------------------##\nfrom mock import _MOCK, isMockInstance\n##----------------------------------------------------------------##\nfrom AnimatorWidget import AnimatorWidget\n##----------------------------------------------------------------##\n\ndef _getModulePath( path ):\n\timport os.path\n\treturn os.path.dirname( __file__ ) + '/' + path\n\ndef _fixDuplicatedName( names, name, id = None ):\n\tif id:\n\t\ttestName = name + '_%d' % id\n\telse:\n\t\tid = 0\n\t\ttestName = name\n\t#find duplicated name\n\tif testName in names:\n\t\treturn _fixDuplicatedName( names, name, id + 1)\n\telse:\n\t\treturn testName\n\n##----------------------------------------------------------------##\nPREVIEW_SPEED_OPTIONS = [\n\t\t( '1/10', 0.1 ),\n\t\t( '1/5', 0.2 ),\n\t\t( '1/3', 0.33 ),\n\t\t( '1/2', 0.5 ),\n\t\t( '1x', 1.0 ),\n\t\t( '1.5x', 1.5 ),\n\t\t( '2x', 2.0 ),\n\t\t( '4x', 4.0 ),\n\t\t( '10x', 10.0 ),\n]\n\n##----------------------------------------------------------------##\nclass AnimatorView( SceneEditorModule ):\n\tname = 'animator'\n\tdependency = [ 'scene_editor', 'mock' ]\n\n\tdef onLoad( self ):\n\t\t#UI\n\t\tself.windowTitle = 'Animator'\n\t\tself.window = self.requestDockWindow( 'AnimatorView',\n\t\t\ttitle = 'Animator',\n\t\t\tsize = (120,120),\n\t\t\tminSize = (120,120),\n\t\t\tdock = 'bottom'\n\t\t\t)\n\t\t\n\t\tself.widget = AnimatorWidget()\n\t\tself.window.addWidget( self.widget )\n\t\tself.toolbarTarget = self.addToolBar( 'animator_target', self.widget.toolbarTarget )\n\t\tself.toolbarClips = self.addToolBar( 'animator_clips', self.widget.toolbarClips )\n\t\tself.toolbarPlay = self.addToolBar( 'animator_play', self.widget.toolbarPlay )\n\t\tself.toolbarTrack = self.addToolBar( 'animator_track', self.widget.toolbarTrack )\n\t\t# self.toolbarEdit = self.addToolBar( 'animator_play', self.widget.toolbarEdit )\n\n\t\tsignals.connect( 'scene.close', self.onSceneClose )\n\t\tsignals.connect( 'scene.save', self.preSceneSave )\n\t\tsignals.connect( 'scene.saved', self.postSceneSave )\n\n\t\t# addWidgetWithLaytut( toolbar,\n\t\t# \tself.widget.containerEditTool )\n\t\tself.addTool( 'animator_target/change_context', label = 'Change Context', icon = 'in' )\n\t\tself.addTool( 'animator_target/save_data', label = 'Save Data', icon = 'save' )\n\n\t\tself.addTool( 'animator_clips/add_clip_group', label = 'add group', icon = 'add_folder' )\n\t\tself.addTool( 'animator_clips/add_clip', label = 'add', icon = 'add' )\n\t\tself.addTool( 'animator_clips/remove_clip', label = 'remove', icon = 'remove' )\n\t\tself.addTool( 'animator_clips/clone_clip', label = 'clone', icon = 'clone' )\n\n\n\t\tself.addTool( 'animator_play/goto_start', label = 'to start', icon = 'rewind' )\n\t\t# self.addTool( 'animator_play/prev_key', label = 'prev key', icon = 'previous' )\n\t\tself.addTool( 'animator_play/stop', label = 'stop', icon = 'stop' )\n\t\tself.addTool( 'animator_play/play', label = 'play', icon = 'play', type = 'check' )\n\t\t# self.addTool( 'animator_play/next_key', label = 'next key', icon = 'next' )\n\t\tself.addTool( 'animator_play/goto_end', label = 'to end', icon = 'fast_forward' )\n\t\tself.addTool( 'animator_play/toggle_repeat', label = 'toggle repeat', icon = 'repeat', type = 'check' )\n\t\tself.comboPreviewSpeed = QtGui.QComboBox()\n\t\tself.comboPreviewSpeed.addItems([ e[0] for e in PREVIEW_SPEED_OPTIONS ] )\t\t\t\n\t\tself.comboPreviewSpeed.setCurrentIndex( 4 ) #1x\n\t\tself.comboPreviewSpeed.currentIndexChanged.connect( self.onPreviewSpeedChange )\n\t\tself.addTool( 'animator_play/preview_speed', widget = self.comboPreviewSpeed )\n\t\t\n\t\t#SIGNALS\n\t\tself.addTool( 'animator_track/locate_target', label = 'locate', icon = 'find' )\n\t\tself.addTool( 'animator_track/----' )\n\t\tself.addTool( 'animator_track/add_track_group', label = 'add group', icon = 'add_folder' )\n\t\tself.addTool( 'animator_track/add_track', label = 'add', icon = 'add' )\n\t\tself.addTool( 'animator_track/remove_track', label = 'remove', icon = 'remove' )\n\n\t\t#\n\t\tsignals.connect( 'selection.changed', self.onSceneSelectionChanged )\n\n\t\tself.delegate = MOAILuaDelegate( self )\n\t\tself.delegate.load( _getModulePath( 'AnimatorView.lua' ) )\n\n\t\tself.widget.setOwner( self )\n\n\t\t#playback\n\t\tself.previewing = False\n\t\tself.setEditing( False )\n\n\t\tself.targetAnimator = None\n\t\tself.targetClip = None\n\t\tself.targetAnimatorData = None\n\t\tself.currentTrack = None\n\n\t\tself.previewing = False\n\t\tself.previewLoop = False\n\t\tself.previewTime = 0.0\n\t\tself.previewStep = 1.0/60.0\n\n\t\tself.previewTimer = QtCore.QTimer( self.widget )\n\t\tself.previewTimer.setInterval( 1000.0/65 )\n\t\tself.previewTimer.stop()\n\n\t\tself.previewTimer.timeout.connect( self.onPreviewTimer )\n\n\tdef onStart( self ):\n\t\tpass\n\n\tdef setEditing( self, editing ):\n\t\tself.widget.timeline.setEnabled( editing )\n\t\tself.widget.treeTracks.setEnabled( editing )\n\t\tself.findTool( 'animator_play' ).setEnabled( editing )\n\t\tself.findTool( 'animator_track' ).setEnabled( editing )\n\t\tself.findTool( 'animator_clips/add_clip_group').setEnabled( editing )\n\t\tself.findTool( 'animator_clips/add_clip' ).setEnabled( editing )\n\t\tself.findTool( 'animator_clips/remove_clip' ).setEnabled( editing )\n\t\tself.findTool( 'animator_clips/clone_clip' ).setEnabled( editing )\n\n\tdef setTargetAnimator( self, target ):\n\t\tself.saveAnimatorData()\n\t\tif target == self.targetAnimator: return\n\t\tif self.previewing:\n\t\t\tself.stopPreview()\n\t\tself.targetAnimator = target\n\t\tself.targetClip = None\n\t\tself.delegate.callMethod( 'view', 'setTargetAnimator', target )\n\t\tself.targetAnimatorData = self.delegate.callMethod( 'view', 'getTargetAnimatorData' )\n\t\tself.widget.rebuild()\n\t\tif self.targetAnimator:\n\t\t\tself.setEditing( True )\n\t\t\tsignals.emit( 'animator.start' )\n\t\telse:\n\t\t\tself.setEditing( False )\n\t\t\tsignals.emit( 'animator.stop' )\n\t\t\t\n\t\tpath = self.delegate.callMethod( 'view', 'getTargetAnimatorDataPath' )\n\t\tif path:\n\t\t\tself.window.setWindowTitle( 'Animator - %s' % path )\n\t\telse:\n\t\t\tself.window.setWindowTitle( 'Animator' )\n\t\tclip = self.delegate.callMethod( 'view', 'getPreviousTargeClip', target )\n\t\tself.enableTool( 'animator_play' , False )\n\t\tself.enableTool( 'animator_track', False )\n\t\tif clip:\n\t\t\tself.widget.treeClips.selectNode( clip )\n\t\telse:\n\t\t\tself.widget.treeClips.selectFirstItem()\n\t\tself.applyTime( 0, True )\n\n\tdef setTargetClip( self, clip ):\n\t\twasPreviewing = self.previewing\n\t\tif self.previewing:\n\t\t\tself.stopPreview()\n\n\t\tself.targetClip = clip\n\t\tself.delegate.callMethod( 'view', 'setTargetClip', clip )\n\t\tself.widget.rebuildTimeline()\n\t\tself.enableTool( 'animator_play' , bool( clip ) )\n\t\tself.enableTool( 'animator_track', bool( clip ) )\n\t\tself.applyTime( 0, True )\n\t\tif wasPreviewing:\n\t\t\tself.startPreview()\n\n\tdef setCurrentTrack( self, track ):\n\t\tself.currentTrack = track\n\t\tself.delegate.callMethod( 'view', 'setCurrentTrack', track )\n\n\tdef getTargetClipLength( self ):\n\t\treturn self.delegate.callMethod( 'view', 'getTargetClipLength' )\n\n\tdef getClipList( self ):\n\t\tif self.targetAnimatorData:\n\t\t\tclipList = self.targetAnimatorData.clips\n\t\t\treturn [ clip for clip in clipList.values() ]\n\t\telse:\n\t\t\treturn []\n\n\tdef getRootClipGroup( self ):\n\t\tif self.targetAnimatorData:\n\t\t\treturn self.targetAnimatorData.getRootGroup( self.targetAnimatorData )\n\n\tdef getTrackList( self ):\n\t\tif self.targetClip:\n\t\t\ttrackList = self.targetClip.getTrackList( self.targetClip )\n\t\t\treturn [ track for track in trackList.values() ]\n\t\telse:\n\t\t\treturn []\n\n\tdef getMarkerList( self ):\n\t\tif self.targetClip:\n\t\t\tmarkerList = self.targetClip.getMarkerList( self.targetClip )\n\t\t\treturn [ track for track in markerList.values() ]\n\t\telse:\n\t\t\treturn []\n\n\tdef getClipRoot( self ):\n\t\tif self.targetClip:\n\t\t\treturn self.targetClip.getRoot( self.targetClip )\n\t\telse:\n\t\t\treturn None\n\n\tdef addClip( self ):\n\t\tif not self.targetAnimatorData: return\n\t\ttargetGroup = self.widget.getCurrentClipGroup()\n\t\tcmd = self.doCommand( 'scene_editor/animator_add_clip',\n\t\t\tanimator_data = self.targetAnimatorData,\n\t\t\tparent_group = targetGroup\n\t\t )\n\t\tclip = cmd.getResult()\n\t\tif clip:\n\t\t\tself.widget.addClip( clip, True )\n\t\treturn clip\n\n\tdef addClipGroup( self ):\n\t\tif not self.targetAnimatorData: return\n\t\ttargetGroup = self.widget.getCurrentClipGroup()\n\t\tcmd = self.doCommand( 'scene_editor/animator_add_clip_group',\n\t\t\tanimator_data = self.targetAnimatorData,\n\t\t\tparent_group = targetGroup\n\t\t )\n\t\tgroup = cmd.getResult()\n\t\tif group:\n\t\t\tself.widget.addClip( group, True )\n\t\treturn group\n\n\tdef removeClipNode( self ):\n\t\tfor clip in self.widget.treeClips.getSelection():\n\t\t\tif self.doCommand( 'scene_editor/animator_remove_clip_node',\n\t\t\t\tanimator_data = self.targetAnimatorData,\n\t\t\t\ttarget_node = clip\n\t\t\t):\n\t\t\t\tself.widget.removeClip( clip )\n\n\tdef cloneClipNode( self ):\n\t\tif not self.targetClip: return\n\t\tresult = []\n\t\tfor clip in self.widget.treeClips.getSelection():\n\t\t\tcmd = self.doCommand( 'scene_editor/animator_clone_clip_node',\n\t\t\t\tanimator_data = self.targetAnimatorData,\n\t\t\t\ttarget_node = clip\n\t\t\t)\n\t\t\tif cmd:\n\t\t\t\tcloned = cmd.getResult()\n\t\t\t\tself.widget.addClip( cloned )\n\t\t\t\tresult.append( cloned )\n\t\treturn result\n\n\tdef onObjectEdited( self, obj ):\n\t\tif self.targetClip:\n\t\t\tself.delegate.callMethod( 'view', 'clearPreviewState' )\n\t\t\tself.delegate.callMethod( 'view', 'markClipDirty' )\n\n\tdef onSceneSelectionChanged( self, selection, key ):\n\t\tif key != 'scene': return\n\t\t#find animator component\n\t\t# self.findTargetAnimator()\n\n\tdef findTargetAnimator( self ):\n\t\ttarget = self.delegate.callMethod( 'view', 'findTargetAnimator' )\n\t\tself.setTargetAnimator( target )\n\t\treturn target\n\n\tdef checkTargetAnimator( self ):\n\t\tif not self.targetAnimator:\n\t\t\talertMessage( 'No Animator', 'No Animator Selected', 'question' )\n\t\t\treturn False\n\t\treturn True\n\n\tdef addMarker( self ):\n\t\tif not self.targetClip: return\n\t\tcmd = self.doCommand( 'scene_editor/animator_add_marker' ,\n\t\t\t\ttarget_clip = self.targetClip,\n\t\t\t\ttarget_pos = self.widget.getCursorPos()\n\t\t\t)\n\t\tif cmd:\n\t\t\tmarker = cmd.getResult()\n\t\t\tself.widget.addMarker( marker )\n\n\tdef addKeyForField( self, target, fieldId ):\n\t\tif not self.checkTargetAnimator(): return \n\n\t\tif not self.targetClip:\n\t\t\tself.addClip()\n\t\t\t# alertMessage( 'No Clip', 'You need to select a Clip first', 'question' )\n\t\t\t# return False\n\t\tkeys = self.delegate.callMethod( 'view', 'addKeyForField', target, fieldId )\n\t\tif keys:\n\t\t\tfor key in keys.values():\n\t\t\t\tself.widget.addKey( key, True )\n\n\tdef addKeyForEvent( self, target, eventId ):\n\t\tpass\n\n\tdef addCustomAnimatorTrack( self, target, trackClasId ):\n\t\tif not self.checkTargetAnimator(): return\n\t\t\t\n\t\ttrack = self.delegate.callMethod( 'view', 'addCustomAnimatorTrack', target, trackClasId )\n\t\tif track:\n\t\t\tself.widget.addTrack( track )\n\n\tdef addKeyForSelectedTracks( self ):\n\t\t#TODO: command\n\t\tselectedTracks = self.widget.getTrackSelection()\n\t\tfor track in selectedTracks:\n\t\t\tkeys = self.delegate.callMethod( 'view', 'addKeyForSelectedTrack', track )\n\t\t\tif keys:\n\t\t\t\tfor key in keys.values():\n\t\t\t\t\tself.widget.addKey( key, True )\n\n\tdef removeSelectedKeys( self ):\n\t\t#TODO: command\n\t\tselectedKeys = self.widget.getKeySelection()\n\t\tfor key in selectedKeys:\n\t\t\tself.widget.removeKey( key )\n\n\tdef cloneSelectedKeys( self ):\n\t\t#TODO: command\n\t\tselectedKeys = self.widget.getKeySelection()\n\t\tcloned = []\n\t\tfor key in selectedKeys:\n\t\t\tclonedKey = self.delegate.callMethod( 'view', 'cloneKey', key )\n\t\t\tif clonedKey:\n\t\t\t\tcloned.append( clonedKey )\n\n\t\tfor clonedKey in cloned:\n\t\t\tself.widget.addKey( clonedKey, False )\n\n\tdef onKeyRemoving( self, key ):\n\t\tif self.delegate.callMethod( 'view', 'removeKey', key ) != False:\n\t\t\treturn True\n\n\tdef onMarkerRemoving( self, marker ):\n\t\tif self.delegate.callMethod( 'view', 'removeMarker', marker ) != False:\n\t\t\treturn True\n\n\tdef onClipLengthChanging( self, t1 ):\n\t\tif self.delegate.callMethod( 'view', 'setTargetClipLength', t1 ) != False:\n\t\t\treturn True\n\n\tdef onTimelineKeyChanged( self, key, pos, length ):\n\t\tself.delegate.callMethod( 'view', 'updateTimelineKey', key, pos, length )\n\n\tdef onTimelineKeyCurveValueChanged( self, key, value ):\n\t\tself.delegate.callMethod( 'view', 'updateTimelineKeyCurveValue', key, value )\n\n\tdef onTimelineKeyTweenModeChanged( self, key, mode ):\n\t\tself.delegate.callMethod( 'view', 'updateTimelineKeyTweenMode', key, mode )\n\n\tdef onTimelineKeyBezierPointChanged( self, key, bpx0, bpy0, bpx1, bpy1 ):\n\t\tself.delegate.callMethod( 'view', 'updateTimelineKeyBezierPoint', key, bpx0, bpy0, bpx1, bpy1 )\n\n\tdef onTimelineMarkerChanged( self, marker, pos ):\n\t\tself.delegate.callMethod( 'view', 'updateTimelineMarker', marker, pos )\n\n\tdef toggleTrackActive( self, track ):\n\t\t#TODO: command\n\t\t# self.module.doCommand( 'scene_editor/toggle_entity_visibility', target = node )\n\t\tself.delegate.callMethod( 'view', 'toggleTrackActive', track )\n\n\n\tdef renameTrack( self, track, name ):\n\t\tself.delegate.callMethod( 'view', 'renameTrack', track, name )\n\n\tdef renameClip( self, clip, name ):\n\t\tself.delegate.callMethod( 'view', 'renameClip', clip, name )\n\n\tdef onTool( self, tool ):\n\t\tname = tool.name\n\t\tif name == 'change_context':\n\t\t\ttarget0 = self.targetAnimator\n\t\t\ttarget1 = self.findTargetAnimator()\n\t\t\tif ( not target0 ) and ( not target1 ):\n\t\t\t\talertMessage( 'No Animator', 'No Animator found in selected entity scope', 'question' )\n\t\t\t\t\n\t\telif name == 'save_data':\n\t\t\tself.saveAnimatorData()\n\n\t\telif name == 'add_clip':\n\t\t\tif self.checkTargetAnimator():\n\t\t\t\tself.addClip()\n\n\t\telif name == 'add_clip_group':\n\t\t\tif self.checkTargetAnimator():\n\t\t\t\tself.addClipGroup()\n\n\t\telif name == 'remove_clip':\n\t\t\tif self.checkTargetAnimator():\n\t\t\t\tself.removeClipNode()\t\t\t\n\n\t\telif name == 'clone_clip':\n\t\t\tif self.checkTargetAnimator():\n\t\t\t\tself.cloneClipNode()\t\t\t\n\n\t\telif name == 'add_track_group':\n\t\t\tgroup = self.delegate.callMethod( 'view', 'addTrackGroup' )\n\t\t\tif group:\n\t\t\t\tself.widget.addTrack( group, True )\n\n\t\telif name == 'remove_track':\n\t\t\tfor track in self.widget.treeTracks.getSelection():\n\t\t\t\tself.delegate.callMethod( 'view', 'removeTrack', track )\n\t\t\t\tself.widget.removeTrack( track )\n\t\telif name == 'locate_target':\n\t\t\tfor track in self.widget.treeTracks.getSelection():\n\t\t\t\tsceneGraphEditor = self.getModule( 'scenegraph_editor')\n\t\t\t\tif sceneGraphEditor:\n\t\t\t\t\ttargetEntity = self.delegate.callMethod( 'view', 'findTrackEntity', track )\n\t\t\t\t\tif targetEntity:\n\t\t\t\t\t\tsceneGraphEditor.selectEntity( targetEntity, focus_tree = True )\n\t\t\t\t#pass\n\t\t\t\treturn\n\n\t\t#preview\n\t\telif name == 'goto_start':\n\t\t\tself.gotoStart()\n\t\telif name == 'goto_end':\n\t\t\tself.gotoEnd()\n\t\telif name == 'play':\n\t\t\tif tool.getValue():\n\t\t\t\tself.startPreview()\n\t\t\telse:\n\t\t\t\tself.stopPreview( False )\n\t\telif name == 'stop':\n\t\t\tself.stopPreview( True )\n\t\telif name == 'toggle_repeat':\n\t\t\tself.delegate.callMethod( 'view', 'togglePreviewRepeat', tool.getValue() )\n\t\t\t\n\n\tdef getActiveSceneView( self ):\n\t\treturn self.getModule( 'scene_view' )\n\n\t#preview\n\tdef startPreview( self ):\n\t\tself.saveAnimatorData()\n\t\tif self.delegate.callMethod( 'view', 'startPreview', self.previewTime ):\n\t\t\tself.widget.setCursorMovable( False )\n\t\t\tself.previewing = True\n\t\t\tself.findTool( 'animator_play/play' ).setValue( True )\n\t\t\tself.previewTimer.start()\n\t\t\tself.getApp().setMinimalMainLoopBudget()\n\t\t\t\n\tdef stopPreview( self, rewind = False ):\t\t\n\t\tif self.previewing:\n\t\t\tself.delegate.callMethod( 'view', 'stopPreview' )\n\t\t\tself.getApp().resetMainLoopBudget()\n\t\t\tself.widget.setCursorMovable( True )\n\t\t\tself.previewing = False\n\t\t\tself.findTool( 'animator_play/play' ).setValue( False )\n\t\t\tself.previewTimer.stop()\n\t\t\tsignals.emit( 'entity.modified', None , '' )\n\t\tif rewind:\n\t\t\tself.gotoStart()\n\n\tdef onPreviewTimer( self ):\n\t\tplaying, currentTime = self.delegate.callMethod( 'view', 'doPreviewStep' )\n\t\tself.previewTime = currentTime\n\t\tself.getActiveSceneView().forceUpdate()\n\t\tself.widget.setCursorPos( self.previewTime )\n\t\tif not playing:\n\t\t\tself.stopPreview()\n\t\t# signals.emit( 'entity.modified', None , '' )\n\n\tdef gotoStart( self ):\n\t\tif self.previewing:\n\t\t\tself.delegate.callMethod( 'view', 'applyTime', 0 )\n\t\telse:\n\t\t\tself.widget.setCursorPos( 0, True )\n\n\tdef gotoEnd( self ):\n\t\tif self.previewing:\n\t\t\tself.delegate.callMethod( 'view', 'applyTime', 10 )\n\t\telse:\n\t\t\tself.widget.setCursorPos( 10, True )\n\n\tdef applyTime( self, t, syncCursor = False ):\n\t\tself.previewTime = self.delegate.callMethod( 'view', 'applyTime', t )\n\t\tself.getActiveSceneView().forceUpdate()\n\t\tsignals.emit( 'entity.modified', None , '' )\n\t\tif syncCursor:\n\t\t\tself.widget.setCursorPos( t )\n\n\tdef saveAnimatorData( self ):\n\t\tif not self.targetAnimator:\n\t\t\treturn\n\t\tself.delegate.callMethod( 'view', 'saveData' )\n\n\tdef preSceneSave( self ):\n\t\tif self.targetAnimator:\n\t\t\tself.delegate.callMethod( 'view', 'restoreEntityState' )\n\n\tdef postSceneSave( self ):\n\t\tif self.targetAnimator:\n\t\t\tself.applyTime( self.previewTime )\n\n\tdef onSceneClose( self, scene ):\n\t\tself.setTargetAnimator( None )\n\n\tdef onPreviewSpeedChange( self, index ):\n\t\tlabel, throttle = PREVIEW_SPEED_OPTIONS[ index ]\n\t\tself.delegate.callMethod( 'view', 'setPreviewThrottle', throttle )\n\n\tdef refreshTimeline( self ):\n\t\tself.widget.rebuildTimeline()\n\n\tdef refreshClipList( self ):\n\t\tself.widget.rebuildClipList()\n\n\tdef refreshAll( self ):\n\t\tself.widget.rebuild()\n","repo_name":"tommo/gii","sub_path":"packages/Mock/Animator/AnimatorView.py","file_name":"AnimatorView.py","file_ext":"py","file_size_in_byte":17792,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"94"}
+{"seq_id":"3609587021","text":"import os\nimport torch.nn.functional as F\nfrom collections import OrderedDict\nfrom pretrainedmodels import se_resnext50_32x4d, se_resnext101_32x4d\nfrom lib.net.scg_gcn import *\n\nfrom enum import Enum\nimport ssl\nssl._create_default_https_context = ssl._create_unverified_context\n\nfrom torchvision.utils import save_image\n\n# assuming (N, R, G, B) order #TODO make sure RGB/BGR?\nNIR = 0\nRED = 1\nGREEN = 2\nBLUE = 3\n\nchannel_params= dict(\n NDVI = dict(alphas = torch.tensor([1, -1, 0, 0, 0, 1, 1, 0, 0, 0], dtype=torch.double), min=-1, max=1, min_clip=-1, max_clip=1),\n gNDVI = dict(alphas = torch.tensor([1, 0, -1, 0, 0, 1, 0, 1, 0, 0], dtype=torch.double), min=-1, max=1, min_clip=-1, max_clip=1),\n SAVI = dict(alphas = torch.tensor([1, -1, 0, 0, 0, 1.5, 1.5, 0, 0, 0.75], dtype=torch.double), min=-10000, max=10000, min_clip=-10000, max_clip=10000), # L = 0.5\n RVI = dict(alphas = torch.tensor([0, 1, 0, 0, 0, 1, 0, 0, 0, 0], dtype=torch.double), min=-10000, max=10000, min_clip=-10000, max_clip=10000),\n DVI = dict(alphas = torch.tensor([1, -1, 0, 0, 0, 0, 0, 0, 0, 0], dtype=torch.double), min=-1, max=1, min_clip=-1, max_clip=1),\n VDVI = dict(alphas = torch.tensor([0, -1, 2, -1, 0, 0, 1, 2, 1, 0], dtype=torch.double), min=-1, max=1, min_clip=-1, max_clip=1),\n GCC = dict(alphas = torch.tensor([0, 0, 0, 1, 0, 0, 1, 1, 1, 0], dtype=torch.double), min=0, max=1, min_clip=0, max_clip=1),\n EVI = dict(alphas = 2.5*torch.tensor([1, -1, 0, 0, 0, 1, 6, 0, -7.5, 1], dtype=torch.double), min=-10000, max=10000, min_clip=-10000, max_clip=10000),\n VARI = dict(alphas = torch.tensor([0, -1, 1, 0, 0, 0, 1, 1, -1, 0], dtype=torch.double), min=-10000, max=10000, min_clip=-10000, max_clip=10000),\n)\n\n\n\nclass AppendGenericAgriculturalIndices(nn.Module):\n \"\"\"GAI = (a0N + a1R + a2G + a3B + a4)/(a5N + a6R + a7G + a8B + a9)\"\"\"\n def __init__(self, alphas = None, epsilon=1e-7, learn=False, std=1.0, min=None, max=None, min_clip=None, max_clip=None)->None:\n super().__init__()\n # self.bn = nn.BatchNorm2d(1)\n if alphas == None:\n alphas = torch.normal(mean=0.0, std=std, size=(10, ))\n\n if learn:\n self.alphas = nn.Parameter(alphas)\n else:\n self.alphas = alphas\n \n\n self.epsilon = epsilon\n self.dim = -3\n self.min = min\n self.max = max\n self.min_clip = min_clip\n self.max_clip = max_clip\n \n def _min_max_normalize(self, x):\n return (x - self.min)/(self.max - self.min)\n \n def forward(self, x):\n if self.min_clip or self.max_clip:\n x = torch.clip(x, min=self.min_clip, max=self.max_clip)\n\n red_band, green_band, blue_band, nir_band = x[:, RED, :, :], x[:, GREEN, :, :], x[:, BLUE, :, :], x[:, NIR, :, :]\n nomin = self.alphas[0]*nir_band + self.alphas[1]*red_band + self.alphas[2]*green_band + self.alphas[3]*blue_band + self.alphas[4]\n denom = self.alphas[5]*nir_band + self.alphas[6]*red_band + self.alphas[7]*green_band + self.alphas[8]*blue_band + self.alphas[9]\n # index = nomin/(denom + self.epsilon)\n index = nomin/(torch.clamp(denom, min=self.epsilon))\n\n if self.max and self.min:\n index = self._min_max_normalize(index)\n\n index = index.unsqueeze(self.dim)\n # index = self.bn(index) # batch norm after non-linearity\n y = torch.cat((x, index), dim=self.dim)\n return y\n\nclass IndexTransforms(nn.Module):\n def __init__(self, args) -> None:\n super().__init__()\n self.transforms = []\n\n if args.NDVI:\n self.transforms.append(AppendGenericAgriculturalIndices(**channel_params[\"NDVI\"]))\n if args.gNDVI:\n self.transforms.append(AppendGenericAgriculturalIndices(**channel_params[\"gNDVI\"]))\n if args.SAVI:\n self.transforms.append(AppendGenericAgriculturalIndices(**channel_params[\"SAVI\"]))\n if args.RVI:\n self.transforms.append(AppendGenericAgriculturalIndices(**channel_params[\"RVI\"]))\n if args.DVI:\n self.transforms.append(AppendGenericAgriculturalIndices(**channel_params[\"DVI\"]))\n if args.VDVI:\n self.transforms.append(AppendGenericAgriculturalIndices(**channel_params[\"VDVI\"]))\n if args.GCC:\n self.transforms.append(AppendGenericAgriculturalIndices(**channel_params[\"GCC\"]))\n if args.EVI:\n self.transforms.append(AppendGenericAgriculturalIndices(**channel_params[\"EVI\"]))\n if args.VARI:\n self.transforms.append(AppendGenericAgriculturalIndices(**channel_params[\"VARI\"]))\n\n if args.GAI: #pass min, max, clip...\n self.transforms.append(AppendGenericAgriculturalIndices(alphas = args.GAI))\n if args.learn:\n for init_channel in args.learn:\n if init_channel == \"gaussian\":\n self.transforms.append(AppendGenericAgriculturalIndices(learn=True))\n else:\n self.transforms.append(AppendGenericAgriculturalIndices(**channel_params[init_channel], learn=True))\n\n \n self.number_of_transforms = len(self.transforms)\n self.index_transform = nn.Sequential(*self.transforms)\n\n def forward(self, x):\n return self.index_transform(x)\n\n\n\n \n\ndef load_model(args, name='MSCG-Rx50', classes=7, node_size=(32,32)):\n if name == 'MSCG-Rx50':\n net = rx50_gcn_3head_4channel(args=args, out_channels=classes)\n elif name == 'MSCG-Rx101':\n net = rx101_gcn_3head_4channel(args=args, out_channels=classes)\n else:\n print('not found the net')\n return -1\n\n return net\n\n\nclass rx50_gcn_3head_4channel(nn.Module):\n def __init__(self, args, out_channels=7, pretrained=True,\n nodes=(32, 32), dropout=0,\n enhance_diag=True, aux_pred=True):\n super(rx50_gcn_3head_4channel, self).__init__() # same with res_fdcs_v5\n\n self.aux_pred = aux_pred\n self.node_size = nodes\n self.num_cluster = out_channels\n\n resnet = se_resnext50_32x4d()\n\n self.index_transforms_layer = IndexTransforms(args)\n self.layer0, self.layer1, self.layer2, self.layer3, = \\\n resnet.layer0, resnet.layer1, resnet.layer2, resnet.layer3\n\n conv_in_channels = 4 + self.index_transforms_layer.number_of_transforms\n\n self.conv0 = torch.nn.Conv2d(conv_in_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n\n for child in self.layer0.children():\n for param in child.parameters():\n par = param\n break\n break\n\n self.conv0.parameters = torch.cat([par[:, 0, :, :].unsqueeze(1), par], 1)\n self.layer0 = torch.nn.Sequential(self.conv0, *list(self.layer0)[1:4])\n\n self.graph_layers1 = GCN_Layer(1024, 128, bnorm=True, activation=nn.ReLU(True), dropout=dropout)\n\n self.graph_layers2 = GCN_Layer(128, out_channels, bnorm=False, activation=None)\n\n self.scg = SCG_block(in_ch=1024,\n hidden_ch=out_channels,\n node_size=nodes,\n add_diag=enhance_diag,\n dropout=dropout)\n\n weight_xavier_init(self.graph_layers1, self.graph_layers2, self.scg)\n\n def forward(self, x):\n # add prepocess channels\n\n x = self.index_transforms_layer(x)\n \n\n x_size = x.size()\n # for i, param in enumerate(self.layer0.parameters()):\n # print(f\"conv Parameter #{i} of shape {param.shape}:\\n{param.data}\\n\")\n\n gx = self.layer3(self.layer2(self.layer1(self.layer0(x))))\n gx90 = gx.permute(0, 1, 3, 2)\n gx180 = gx.flip(3)\n B, C, H, W = gx.size()\n\n A, gx, loss, z_hat = self.scg(gx)\n gx, _ = self.graph_layers2(\n self.graph_layers1((gx.reshape(B, -1, C), A))) # + gx.reshape(B, -1, C)\n if self.aux_pred:\n gx += z_hat\n gx = gx.reshape(B, self.num_cluster, self.node_size[0], self.node_size[1])\n\n A, gx90, loss2, z_hat = self.scg(gx90)\n gx90, _ = self.graph_layers2(\n self.graph_layers1((gx90.reshape(B, -1, C), A))) # + gx.reshape(B, -1, C)\n if self.aux_pred:\n gx90 += z_hat\n gx90 = gx90.reshape(B, self.num_cluster, self.node_size[1], self.node_size[0])\n gx90 = gx90.permute(0, 1, 3, 2)\n gx += gx90\n\n A, gx180, loss3, z_hat = self.scg(gx180)\n gx180, _ = self.graph_layers2(\n self.graph_layers1((gx180.reshape(B, -1, C), A))) # + gx.reshape(B, -1, C)\n if self.aux_pred:\n gx180 += z_hat\n gx180 = gx180.reshape(B, self.num_cluster, self.node_size[0], self.node_size[1])\n gx180 = gx180.flip(3)\n gx += gx180\n\n gx = F.interpolate(gx, (H, W), mode='bilinear', align_corners=False)\n\n if self.training:\n return F.interpolate(gx, x_size[2:], mode='bilinear', align_corners=False), loss + loss2 + loss3\n else:\n return F.interpolate(gx, x_size[2:], mode='bilinear', align_corners=False)\n\n\nclass rx101_gcn_3head_4channel(nn.Module):\n def __init__(self, args, out_channels=7, pretrained=True,\n nodes=(32, 32), dropout=0,\n enhance_diag=True, aux_pred=True):\n super(rx101_gcn_3head_4channel, self).__init__() # same with res_fdcs_v5\n\n self.aux_pred = aux_pred\n self.node_size = nodes\n self.num_cluster = out_channels\n\n resnet = se_resnext101_32x4d()\n self.index_transforms_layer = IndexTransforms(args)\n self.layer0, self.layer1, self.layer2, self.layer3, = \\\n resnet.layer0, resnet.layer1, resnet.layer2, resnet.layer3\n\n conv_in_channels = 4 + self.index_transforms_layer.number_of_transforms\n\n self.conv0 = torch.nn.Conv2d(conv_in_channels, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n\n for child in self.layer0.children():\n for param in child.parameters():\n par = param\n break\n break\n\n self.conv0.parameters = torch.cat([par[:, 0, :, :].unsqueeze(1), par], 1)\n self.layer0 = torch.nn.Sequential(self.conv0, *list(self.layer0)[1:4])\n\n self.graph_layers1 = GCN_Layer(1024, 128, bnorm=True, activation=nn.ReLU(True), dropout=dropout)\n\n self.graph_layers2 = GCN_Layer(128, out_channels, bnorm=False, activation=None)\n\n self.scg = SCG_block(in_ch=1024,\n hidden_ch=out_channels,\n node_size=nodes,\n add_diag=enhance_diag,\n dropout=dropout)\n\n weight_xavier_init(self.graph_layers1, self.graph_layers2, self.scg)\n\n def forward(self, x):\n x = self.index_transforms_layer(x)\n x_size = x.size()\n\n gx = self.layer3(self.layer2(self.layer1(self.layer0(x))))\n gx90 = gx.permute(0, 1, 3, 2)\n gx180 = gx.flip(3)\n\n B, C, H, W = gx.size()\n\n A, gx, loss, z_hat = self.scg(gx)\n\n gx, _ = self.graph_layers2(\n self.graph_layers1((gx.view(B, -1, C), A))) # + gx.reshape(B, -1, C)\n if self.aux_pred:\n gx += z_hat\n gx = gx.view(B, self.num_cluster, self.node_size[0], self.node_size[1])\n\n A, gx90, loss2, z_hat = self.scg(gx90)\n gx90, _ = self.graph_layers2(\n self.graph_layers1((gx90.view(B, -1, C), A))) # + gx.reshape(B, -1, C)\n if self.aux_pred:\n gx90 += z_hat\n gx90 = gx90.view(B, self.num_cluster, self.node_size[1], self.node_size[0])\n gx90 = gx90.permute(0, 1, 3, 2)\n gx += gx90\n\n A, gx180, loss3, z_hat = self.scg(gx180)\n gx180, _ = self.graph_layers2(\n self.graph_layers1((gx180.view(B, -1, C), A))) # + gx.reshape(B, -1, C)\n if self.aux_pred:\n gx180 += z_hat\n gx180 = gx180.view(B, self.num_cluster, self.node_size[0], self.node_size[1])\n gx180 = gx180.flip(3)\n gx += gx180\n\n gx = F.interpolate(gx, (H, W), mode='bilinear', align_corners=False)\n\n if self.training:\n return F.interpolate(gx, x_size[2:], mode='bilinear', align_corners=False), loss + loss2 + loss3\n else:\n return F.interpolate(gx, x_size[2:], mode='bilinear', align_corners=False)\n\n","repo_name":"ronbenc/Agrivision-project","sub_path":"src_Mor_Ron/MSCG-Net-master/tools/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":12369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"71469451188","text":"from operator import add, sub\n\n\nclass Vector:\n def __init__(self, *coords):\n self.coords = coords\n\n def __valid(self, other):\n if len(self.coords) != len(other.coords):\n raise TypeError('размерности векторов не совпадают')\n\n def __add__(self, other):\n self.__valid(other)\n return Vector(*(map(add, self.coords, other.coords)))\n\n def __sub__(self, other):\n self.__valid(other)\n return Vector(*(map(sub, self.coords, other.coords)))\n\n def get_coords(self):\n return self.coords\n\n\nclass VectorInt(Vector):\n def __init__(self, *coords):\n if len(tuple(filter(lambda x: isinstance(x, int), coords))) != len(coords):\n raise ValueError('координаты должны быть целыми числами')\n super().__init__(*coords)\n\n # def __add__(self, other):\n # _class = Vector if not isinstance(other, type(self)) else type(self)\n # return _class(*tuple(map(add, self.coords, other.coords)))\n\n def __add__(self, other):\n if not isinstance(other, type(self)):\n return super().__add__(other)\n return __class__(*tuple(map(add, self.coords, other.coords)))\n\n # def __sub__(self, other):\n # _class = Vector if not isinstance(other, type(self)) else type(self)\n # return _class(*tuple(map(add, self.coords, other.coords)))\n\n def __sub__(self, other):\n if not isinstance(other, type(self)):\n return super().__add__(other)\n return __class__(*tuple(map(add, self.coords, other.coords)))\n\n\n# Test:\nv1 = Vector(1, 2, 3)\nv2 = Vector(3, 4, 5)\n# print((v1 + v2).get_coords())\nassert (v1 + v2).get_coords() == (\n 4, 6, 8), \"операция сложения дала неверные значения (или некорректно работает метод get_coords)\"\nassert (v1 - v2).get_coords() == (\n -2, -2, -2), \"операция вычитания дала неверные значения (или некорректно работает метод get_coords)\"\n\nv = VectorInt(1, 2, 3, 4)\nassert isinstance(v, Vector), \"класс VectorInt должен наследоваться от класса Vector\"\n\ntry:\n v = VectorInt(1, 2, 3.4, 4)\nexcept ValueError:\n assert True\nelse:\n assert False, \"не сгенерировалось исключение ValueError для команды v = VectorInt(1, 2, 3.4, 4)\"\n\nv1 = VectorInt(1, 2, 3, 4)\nv2 = VectorInt(4, 2, 3, 4)\nv3 = Vector(1.0, 2, 3, 4)\n\nv = v1 + v2\nassert type(\n v) == VectorInt, \"при сложении вектором с целочисленными координатами должен формироваться объект класс�� VectorInt\"\nv = v1 + v3\nassert type(v) == Vector, \"при сложении вектором с вещественными координатами должен формироваться объект класса Vector\"\n","repo_name":"albert2126/StepikOOP","sub_path":"Part04/vector.py","file_name":"vector.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"30409594412","text":"from pytest import mark\n\n\nclass Solution:\n def min_remove_to_make_valid_parentheses(self, s: str) -> str:\n remove = []\n stack = []\n for i in range(len(s)):\n if s[i] == \"(\":\n stack.append(i)\n elif s[i] == \")\":\n if not stack:\n remove.append(i)\n else:\n stack.pop()\n remove.extend(stack)\n\n for j in range(len(remove)):\n s = s[: remove[j] - j] + s[remove[j] - j + 1 :]\n\n return s\n\n\nclass TestSolution:\n data_provider = [\n [\"((a)))\", \"((a))\"],\n [\"))((\", \"\"],\n [\"(()a(()\", \"()a()\"],\n ]\n\n @mark.parametrize(\"s, expected\", data_provider)\n def test_min_remove_to_make_valid_parentheses(self, s: str, expected: str):\n solution = Solution()\n assert solution.min_remove_to_make_valid_parentheses(s) == expected\n","repo_name":"Ariel-Yu/leetcode-pratices-and-tests","sub_path":"stack/test_1249_minimum_remove_to_make_valid_parentheses.py","file_name":"test_1249_minimum_remove_to_make_valid_parentheses.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"16715476969","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\nfrom q36 import count_words\nsns.set(font='AppleGothic')\n\nlabel, y = [], []\nfor word, i in count_words.most_common(10):\n label.append(word)\n y.append(i)\nx = [i for i in range(10)]\nplt.bar(x, y, tick_label =label, align=\"center\")\nplt.ylabel(\"frequency\", fontsize=20)\nplt.show()","repo_name":"kuribayashi4/100knock","sub_path":"chapter04/q37.py","file_name":"q37.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"43658307092","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 14 10:50:53 2018\n\n@author: 123\n\"\"\"\n\nimport sys\nfrom PyQt5 import QtGui, QtWidgets\n\ndef show_image(image_path='spyder.png'):\n app = QtWidgets.QApplication(sys.argv)\n pixmap = QtGui.QPixmap(image_path)\n screen = QtWidgets.QLabel() # The QLabel widget provides a text or image display. \n screen.setPixmap(pixmap)\n screen.showFullScreen()\n # sys.exit() 会抛出一个异常: SystemExit,如果这个异常没有被捕获,那么python解释器将会退出。如果有捕获该异常的代码,那么这些代码还是会执行。\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n show_image()","repo_name":"seed-fe/face_recognition_using_opencv_keras_scikit-learn","sub_path":"show_image.py","file_name":"show_image.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"94"}
+{"seq_id":"72375679349","text":"\"\"\"\nPydantic models for configuring AFEP.\n\"\"\"\nfrom pathlib import Path\n\nfrom pydantic import BaseModel\n\n\nclass AfepRun(BaseModel):\n note_directories: list[Path] = None\n mml_format: str = None # default to json if not specified by MultiAfepConfig\n outdir: Path = None # should be assigned by parent; to alter name, use 'name'\n expand_cuis: bool = False\n apikey: str = None\n skip_greedy_algorithm: bool = False\n min_kb: int = None # default to ceiling(n_articles/2)\n max_kb: int = None\n data_directory: list[Path] = None\n name: str = None # for naming output directory\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n # post init\n if not self.name:\n self.name = self.data_directory[0].stem\n\n def set_outdir(self, default: Path):\n self.outdir = self.get_outdir(default)\n\n def set_note_directories(self, default: list[Path]):\n if not self.note_directories:\n self.note_directories = default\n\n def set_mml_format(self, default: str):\n if not self.mml_format:\n if default:\n self.mml_format = default\n else: # default to json if not otherwise specified\n self.mml_format = 'json'\n\n def get_outdir(self, default: Path):\n name = f'{self.name if self.name else self.note_directories[0].stem}' \\\n f'-selected{\"-cui-exp\" if self.expand_cuis else \"\"}'\n if default is None:\n return Path('.') / name\n elif self.name:\n return default / f'{self.name}-selected{\"-cui-exp\" if self.expand_cuis else \"\"}'\n else:\n return default / f'{self.note_directories[0].stem}-selected{\"-cui-exp\" if self.expand_cuis else \"\"}'\n\n def is_valid(self):\n assert self.note_directories is not None\n\n\nclass MultiAfepConfig(BaseModel):\n runs: list[AfepRun]\n outdir: Path = None # general output directory\n build_summary: bool = True\n base_directory: Path = None\n note_directories: list[Path] = None\n mml_format: str = None\n apikey: str = None\n expand_cuis: bool = False\n min_kb: int = None\n max_kb: int = None\n\n def __init__(self, **kw):\n super().__init__(**kw)\n # post init\n for run in self.runs:\n run.set_outdir(self.outdir)\n run.set_note_directories(self.note_directories)\n run.set_mml_format(self.mml_format)\n if self.expand_cuis or run.expand_cuis:\n run.apikey = self.apikey\n run.expand_cuis = True\n if self.min_kb and run.min_kb is None:\n run.min_kb = self.min_kb\n if self.max_kb and run.max_kb is None:\n run.max_kb = self.max_kb\n run.is_valid()\n","repo_name":"kpwhri/mml_utils","sub_path":"src/mml_utils/config/run_afep.py","file_name":"run_afep.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"34017602651","text":"#-*- coding: utf -8 -*-\nimport logging\nimport multiprocessing\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\n\nstopword = []\n\ndef get_stopword():\n\tglobal stopword\n\tread_stopword = open('stopword.txt', 'r')\n\twhile True:\n\t\tstr = read_stopword.readline()\n\t\tif str == '' :\n\t\t\tbreak\n\t\tstr = str.split('\\n')[0]\n\t\tstopword.append(str)\n\t\t\ndef LDA_format(fin, fout, num):\n\tfout.write(num.__str__() + '\\n')\n\twhile True:\n\t\tstr = fin.readline()\n\t\tif str == '':\n\t\t\tbreak\n\t\tfout.write(str)\n\t\ndef remove_note(str):\n\tcount = -1\n\tdot = False\n\twhile (count < len(str)-1):\n\t\tcount = count +1;\n\t\tif (ord(str[count]) >=97 and ord(str[count]) <= 122):\n\t\t\tcontinue\n\t\telif ord(str[count]) == 46:\n\t\t\tif not dot:\n\t\t\t\tstr = str.replace(str[count], ' . ')\n\t\t\t\tdot = True\n\t\t\tcontinue\n\t\telif (ord(str[count]) == 39 and ord(str[count-1]) >= 97 and ord(str[count-1]) <= 122 ):\n\t\t\tif count == len(str)-1:\n\t\t\t\tcontinue\n\t\t\telif (ord(str[count]) == 39 and ord(str[count+1]) >= 97 and ord(str[count+1]) <= 122 ):\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tstr = str.replace(str[count], ' ')\n\t\telse:\n\t\t\tstr = str.replace(str[count], ' ')\n\treturn str\n\ndef main():\n\topen_attraction_file=open('Attractions_eng2.txt','r')\n\twhile True:\n\t\tattraction_url = open_attraction_file.readline()\n\t\tif attraction_url == '':\n\t\t\tbreak\n\t\tcity = attraction_url.split('-')[3].split('.')[0]\n\t\tf = open('eng_data/' + city + '/eng_property_title_and_link.txt', 'r')\n\t\twhile True:\n\t\t\ttitle = f.readline() # title\n\t\t\tif title == '':\n\t\t\t\tbreak\n\t\t\ttitle = title.split('\\n')[0]\n\t\t\ttry:\n\t\t\t\topen_comment = open('eng_data/'+ city + '/' + title + '/' + title + '_all.txt', 'r')\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\twrite_preprocess = open('eng_data/'+ city + '/' + title + '/' + title + '_preprocess.txt', 'w')\n\t\t\tdocument_count = 0\n\t\t\twhile True:\n\t\t\t\tstr = open_comment.readline()\n\t\t\t\tif str == '':\n\t\t\t\t\tbreak\n\t\t\t\tstr = str.lower() # 轉小寫\n\t\t\t\tstr = str.split('\\n')[0]\n\t\t\t\tstr = remove_note(str) # 移除標點符號\n\t\t\t\ttemp = str.split(' ') # 切空格,重組\n\t\t\t\t# 去除stopword\n\t\t\t\tfor item in stopword:\n\t\t\t\t\tfor word in temp:\n\t\t\t\t\t\tif word == item:\n\t\t\t\t\t\t\ttemp.remove(word)\n\t\t\t\t# 輸出至preprocess\n\t\t\t\tcheck_space = False\n\t\t\t\tfor word in temp:\n\t\t\t\t\tif word != '':\n\t\t\t\t\t\tif not check_space:\n\t\t\t\t\t\t\tcheck_space = True\n\t\t\t\t\t\t\twrite_preprocess.write(word)\n\t\t\t\t\t\telif word == '.':\n\t\t\t\t\t\t\twrite_preprocess.write('.')\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\twrite_preprocess.write(' ' + word)\n\t\t\t\twrite_preprocess.write('\\n')\n\t\t\t\tdocument_count = document_count + 1\n\t\t\tf.readline() # link\n\t\t\twrite_preprocess.close()\n\t\t\t# 增加文章數目\n\t\t\twrite_for_LDA = open('eng_data/' + city + '/' + title + '/' + title + '_LDA.txt', 'w')\n\t\t\topen_preprocess = open('eng_data/' + city + '/' + title + '/' + title + '_preprocess.txt', 'r')\n\t\t\tLDA_format(open_preprocess, write_for_LDA, document_count)\n\t\t\twrite_for_LDA.close()\n\t\t\topen_preprocess.close()\n\t\nif __name__ == '__main__':\n\tFORMAT = '%(asctime)s %(lineno)04d %(levelname)05s %(message)s'\n\tlogging.basicConfig(level=logging.DEBUG, filename='preprocess.log', format = FORMAT)\n\tget_stopword()\n\tmain()\n\t\n","repo_name":"a5135324/Crawler","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"}
+{"seq_id":"31236153330","text":"def fatorial(n):\n \"\"\"calcula o fatorial de de um número\"\"\"\n fat = 1\n while n > 1:\n fat *= n\n n -= 1\n return fat\n\n\ndef epsilon(x):\n eps = 1.0 + x\n i = 2\n aprox = True\n while aprox:\n termo = (x ** i) / fatorial(i)\n eps += termo\n aprox = not abs(termo) < (x / 100)\n i += 1\n return eps\n\n\ndef get_exp(n):\n return 2.718281828459045 ** n\n\n\ndef main():\n for j in range(10):\n print(epsilon(float(j+1)), get_exp(j + 1))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Joaquim1302/usp-python","sub_path":"src/parte02/semana02/exerc_14_2_epsilon.py","file_name":"exerc_14_2_epsilon.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"42652830745","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 27 12:24:19 2019\n\n@author: angelo\n\"\"\"\n\nfrom images import load, save\nfrom point import Rectangle, Square, Point\n\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\nred = (255, 0, 0)\ngreen = (0, 255, 0)\nblue = (0, 0, 255)\namazon = (59, 122, 87)\n\n\nclass Image:\n \"\"\"The image class\"\"\"\n\n def __init__(self, height=640, length=480, background_color=white):\n self.imm = [[background_color] * length for _ in range(height)]\n self.length = length\n self.height = height\n self.background_color = background_color\n\n def __str__(self):\n return \"Image (h = {}, l = {}, background = {})\".format(\n self.height, self.length, self.background_color\n )\n\n def __repr__(self):\n return self.__str__()\n\n def draw_rectangle(self, rectangle, color):\n if not isinstance(rectangle, (Rectangle, Quadrato)):\n raise TypeError(\"Disegna rettangolo disegna rettangoli\")\n for y in range(rectangle.vertice_as.y, rectangle.vertice_bs.y + 1):\n for x in range(rectangle.vertice_as.x, rectangle.vertice_ad.x + 1):\n self.imm[y][x] = color\n\n def sava(self, nome_file):\n if type(nome_file) != str or not nome_file.endswith(\".png\"):\n raise TypeError(\"I can only save to file .png\")\n save(self.imm, file_name)\n\n def __add__(self, r):\n self.draw_rectangle(r, black)\n","repo_name":"edoardottt/programming-fundamentals","sub_path":"programming_lab/lab271119/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"94"}
+{"seq_id":"17814877147","text":"#!/usr/bin/env python3\nfrom heapq import heappush, heappop\n\nN, M, K, A, B = [int(x) for x in input().split()]\ngraph = [[] for _ in range(N)]\nfor i in range(M):\n a,b,c = [int(x) for x in input().split()]\n graph[a].append((c,b))\n\ndist = [1e18]*N\npq = [(0,A)]\ndist[A] = 0\nnimprov = [0]*N\nnegative_cycle = False\nwhile len(pq) > 0:\n d,i = heappop(pq)\n nimprov[i] += 1\n # Some extra for good measure, this is incorrect but might go though bad test data\n if nimprov[i] > 2*K+3:\n # In case of both having a negative cycle AND unreachable B,\n # unreachability takes precedence\n negative_cycle = True\n continue\n for c,j in graph[i]:\n if d+c >= dist[j]:\n continue\n dist[j] = d+c\n heappush(pq, (d+c, j))\nif dist[B] == 1e18:\n print(\"POSITIVE INFINITY\")\nelif negative_cycle:\n print(\"NEGATIVE INFINITY\")\nelse:\n print(dist[B])\n","repo_name":"ChalmersCodingClub/chalmerschallenge23-public","sub_path":"problems/negativegraph/submissions/wrong_answer/loke_superdijkstra.py","file_name":"loke_superdijkstra.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"24527239523","text":"from django.urls import path\r\nfrom . import views\r\n\r\n\r\nurlpatterns = [\r\n path('', views.index, name='index'),\r\n path('about/', views.about, name='about'),\r\n path('shop/', views.shop, name='shop'),\r\n path('contact/', views.contact, name='contact'),\r\n path('service/', views.service, name='service'),\r\n path('howtobuy/', views.howtobuy, name='howtobuy'),\r\n path('products/', views.products, name='products'),\r\n \r\n]","repo_name":"Saleemkk/ShoeShipping","sub_path":"shipping/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"9081117036","text":"\"\"\"Decorators for deprecating classes, functions and function parameters.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = [\"deprecated\", \"deprecated_params\"]\n\n\nimport inspect\nimport re\nfrom typing import Any, Callable, Iterable\n\nfrom decorator import decorate, decorator\n\nfrom .. import logger\n\n\ndef _get_callable_info(callable: Callable) -> tuple[str, str]:\n \"\"\"Returns type and name of a callable.\n\n Parameters\n ----------\n callable\n The callable\n\n Returns\n -------\n Tuple[str, str]\n The type and name of the callable. Type can can be one of \"class\", \"method\" (for\n functions defined in classes) or \"function\"). For methods, name is Class.method.\n \"\"\"\n what = type(callable).__name__\n name = callable.__qualname__\n if what == \"function\" and \".\" in name:\n what = \"method\"\n elif what != \"function\":\n what = \"class\"\n return (what, name)\n\n\ndef _deprecation_text_component(\n since: str | None,\n until: str | None,\n message: str,\n) -> str:\n \"\"\"Generates a text component used in deprecation messages.\n\n Parameters\n ----------\n since\n The version or date since deprecation\n until\n The version or date until removal of the deprecated callable\n message\n The reason for why the callable has been deprecated\n\n Returns\n -------\n str\n The deprecation message text component.\n \"\"\"\n since = f\"since {since} \" if since else \"\"\n until = (\n f\"is expected to be removed after {until}\"\n if until\n else \"may be removed in a later version\"\n )\n msg = \" \" + message if message else \"\"\n return f\"deprecated {since}and {until}.{msg}\"\n\n\ndef deprecated(\n func: Callable = None,\n since: str | None = None,\n until: str | None = None,\n replacement: str | None = None,\n message: str | None = \"\",\n) -> Callable:\n \"\"\"Decorator to mark a callable as deprecated.\n\n The decorated callable will cause a warning when used. The docstring of the\n deprecated callable is adjusted to indicate that this callable is deprecated.\n\n Parameters\n ----------\n func\n The function to be decorated. Should not be set by the user.\n since\n The version or date since deprecation.\n until\n The version or date until removal of the deprecated callable.\n replacement\n The identifier of the callable replacing the deprecated one.\n message\n The reason for why the callable has been deprecated.\n\n Returns\n -------\n Callable\n The decorated callable.\n\n Examples\n --------\n Basic usage::\n\n from manim.utils.deprecation import deprecated\n\n @deprecated\n def foo(**kwargs):\n pass\n\n @deprecated\n class Bar:\n def __init__(self):\n pass\n\n @deprecated\n def baz(self):\n pass\n\n foo()\n # WARNING The function foo has been deprecated and may be removed in a later version.\n\n a = Bar()\n # WARNING The class Bar has been deprecated and may be removed in a later version.\n\n a.baz()\n # WARNING The method Bar.baz has been deprecated and may be removed in a later version.\n\n You can specify additional information for a more precise warning::\n\n from manim.utils.deprecation import deprecated\n\n @deprecated(\n since=\"v0.2\",\n until=\"v0.4\",\n replacement=\"bar\",\n message=\"It is cooler.\"\n )\n def foo():\n pass\n\n foo()\n # WARNING The function foo has been deprecated since v0.2 and is expected to be removed after v0.4. Use bar instead. It is cooler.\n\n You may also use dates instead of versions::\n\n from manim.utils.deprecation import deprecated\n\n @deprecated(since=\"05/01/2021\", until=\"06/01/2021\")\n def foo():\n pass\n\n foo()\n # WARNING The function foo has been deprecated since 05/01/2021 and is expected to be removed after 06/01/2021.\n\n \"\"\"\n # If used as factory:\n if func is None:\n return lambda func: deprecated(func, since, until, replacement, message)\n\n what, name = _get_callable_info(func)\n\n def warning_msg(for_docs: bool = False) -> str:\n \"\"\"Generate the deprecation warning message.\n\n Parameters\n ----------\n for_docs\n Whether or not to format the message for use in documentation.\n\n Returns\n -------\n str\n The deprecation message.\n \"\"\"\n msg = message\n if replacement is not None:\n repl = replacement\n if for_docs:\n mapper = {\"class\": \"class\", \"method\": \"meth\", \"function\": \"func\"}\n repl = f\":{mapper[what]}:`~.{replacement}`\"\n msg = f\"Use {repl} instead.{' ' + message if message else ''}\"\n deprecated = _deprecation_text_component(since, until, msg)\n return f\"The {what} {name} has been {deprecated}\"\n\n def deprecate_docs(func: Callable):\n \"\"\"Adjust docstring to indicate the deprecation.\n\n Parameters\n ----------\n func\n The callable whose docstring to adjust.\n \"\"\"\n warning = warning_msg(True)\n doc_string = func.__doc__ or \"\"\n func.__doc__ = f\"{doc_string}\\n\\n.. attention:: Deprecated\\n {warning}\"\n\n def deprecate(func: Callable, *args, **kwargs):\n \"\"\"The actual decorator used to extend the callables behavior.\n\n Logs a warning message.\n\n Parameters\n ----------\n func\n The callable to decorate.\n args\n The arguments passed to the given callable.\n kwargs\n The keyword arguments passed to the given callable.\n\n Returns\n -------\n Any\n The return value of the given callable when being passed the given\n arguments.\n \"\"\"\n logger.warning(warning_msg())\n return func(*args, **kwargs)\n\n if type(func).__name__ != \"function\":\n deprecate_docs(func)\n func.__init__ = decorate(func.__init__, deprecate)\n return func\n\n func = decorate(func, deprecate)\n deprecate_docs(func)\n return func\n\n\ndef deprecated_params(\n params: str | Iterable[str] | None = None,\n since: str | None = None,\n until: str | None = None,\n message: str | None = \"\",\n redirections: None\n | (Iterable[tuple[str, str] | Callable[..., dict[str, Any]]]) = None,\n) -> Callable:\n \"\"\"Decorator to mark parameters of a callable as deprecated.\n\n It can also be used to automatically redirect deprecated parameter values to their\n replacements.\n\n Parameters\n ----------\n params\n The parameters to be deprecated. Can consist of:\n\n * An iterable of strings, with each element representing a parameter to deprecate\n * A single string, with parameter names separated by commas or spaces.\n since\n The version or date since deprecation.\n until\n The version or date until removal of the deprecated callable.\n message\n The reason for why the callable has been deprecated.\n redirections\n A list of parameter redirections. Each redirection can be one of the following:\n\n * A tuple of two strings. The first string defines the name of the deprecated\n parameter; the second string defines the name of the parameter to redirect to,\n when attempting to use the first string.\n\n * A function performing the mapping operation. The parameter names of the\n function determine which parameters are used as input. The function must\n return a dictionary which contains the redirected arguments.\n\n Redirected parameters are also implicitly deprecated.\n\n Returns\n -------\n Callable\n The decorated callable.\n\n Raises\n ------\n ValueError\n If no parameters are defined (neither explicitly nor implicitly).\n ValueError\n If defined parameters are invalid python identifiers.\n\n Examples\n --------\n Basic usage::\n\n from manim.utils.deprecation import deprecated_params\n\n @deprecated_params(params=\"a, b, c\")\n def foo(**kwargs):\n pass\n\n foo(x=2, y=3, z=4)\n # No warning\n\n foo(a=2, b=3, z=4)\n # WARNING The parameters a and b of method foo have been deprecated and may be removed in a later version.\n\n You can also specify additional information for a more precise warning::\n\n from manim.utils.deprecation import deprecated_params\n\n @deprecated_params(\n params=\"a, b, c\",\n since=\"v0.2\",\n until=\"v0.4\",\n message=\"The letters x, y, z are cooler.\"\n )\n def foo(**kwargs):\n pass\n\n foo(a=2)\n # WARNING The parameter a of method foo has been deprecated since v0.2 and is expected to be removed after v0.4. The letters x, y, z are cooler.\n\n Basic parameter redirection::\n\n from manim.utils.deprecation import deprecated_params\n\n @deprecated_params(redirections=[\n # Two ways to redirect one parameter to another:\n (\"old_param\", \"new_param\"),\n lambda old_param2: {\"new_param22\": old_param2}\n ])\n def foo(**kwargs):\n return kwargs\n\n foo(x=1, old_param=2)\n # WARNING The parameter old_param of method foo has been deprecated and may be removed in a later version.\n # returns {\"x\": 1, \"new_param\": 2}\n\n Redirecting using a calculated value::\n\n from manim.utils.deprecation import deprecated_params\n\n @deprecated_params(redirections=[\n lambda runtime_in_ms: {\"run_time\": runtime_in_ms / 1000}\n ])\n def foo(**kwargs):\n return kwargs\n\n foo(runtime_in_ms=500)\n # WARNING The parameter runtime_in_ms of method foo has been deprecated and may be removed in a later version.\n # returns {\"run_time\": 0.5}\n\n Redirecting multiple parameter values to one::\n\n from manim.utils.deprecation import deprecated_params\n\n @deprecated_params(redirections=[\n lambda buff_x=1, buff_y=1: {\"buff\": (buff_x, buff_y)}\n ])\n def foo(**kwargs):\n return kwargs\n\n foo(buff_x=2)\n # WARNING The parameter buff_x of method foo has been deprecated and may be removed in a later version.\n # returns {\"buff\": (2, 1)}\n\n Redirect one parameter to multiple::\n\n from manim.utils.deprecation import deprecated_params\n\n @deprecated_params(redirections=[\n lambda buff=1: {\"buff_x\": buff[0], \"buff_y\": buff[1]} if isinstance(buff, tuple)\n else {\"buff_x\": buff, \"buff_y\": buff}\n ])\n def foo(**kwargs):\n return kwargs\n\n foo(buff=0)\n # WARNING The parameter buff of method foo has been deprecated and may be removed in a later version.\n # returns {\"buff_x\": 0, buff_y: 0}\n\n foo(buff=(1,2))\n # WARNING The parameter buff of method foo has been deprecated and may be removed in a later version.\n # returns {\"buff_x\": 1, buff_y: 2}\n\n\n \"\"\"\n # Check if decorator is used without parenthesis\n if callable(params):\n raise ValueError(\"deprecate_parameters requires arguments to be specified.\")\n\n if params is None:\n params = []\n\n # Construct params list\n params = re.split(r\"[,\\s]+\", params) if isinstance(params, str) else list(params)\n\n # Add params which are only implicitly given via redirections\n if redirections is None:\n redirections = []\n for redirector in redirections:\n if isinstance(redirector, tuple):\n params.append(redirector[0])\n else:\n params.extend(list(inspect.signature(redirector).parameters))\n # Keep ordering of params so that warning message is consistently the same\n # This will also help pass unit testing\n params = list(dict.fromkeys(params))\n\n # Make sure params only contains valid identifiers\n identifier = re.compile(r\"^[^\\d\\W]\\w*\\Z\", re.UNICODE)\n if not all(re.match(identifier, param) for param in params):\n raise ValueError(\"Given parameter values are invalid.\")\n\n redirections = list(redirections)\n\n def warning_msg(func: Callable, used: list[str]):\n \"\"\"Generate the deprecation warning message.\n\n Parameters\n ----------\n func\n The callable with deprecated parameters.\n used\n The list of deprecated parameters used in a call.\n\n Returns\n -------\n str\n The deprecation message.\n \"\"\"\n what, name = _get_callable_info(func)\n plural = len(used) > 1\n parameter_s = \"s\" if plural else \"\"\n used_ = \", \".join(used[:-1]) + \" and \" + used[-1] if plural else used[0]\n has_have_been = \"have been\" if plural else \"has been\"\n deprecated = _deprecation_text_component(since, until, message)\n return f\"The parameter{parameter_s} {used_} of {what} {name} {has_have_been} {deprecated}\"\n\n def redirect_params(kwargs: dict, used: list[str]):\n \"\"\"Adjust the keyword arguments as defined by the redirections.\n\n Parameters\n ----------\n kwargs\n The keyword argument dictionary to be updated.\n used\n The list of deprecated parameters used in a call.\n \"\"\"\n for redirector in redirections:\n if isinstance(redirector, tuple):\n old_param, new_param = redirector\n if old_param in used:\n kwargs[new_param] = kwargs.pop(old_param)\n else:\n redirector_params = list(inspect.signature(redirector).parameters)\n redirector_args = {}\n for redirector_param in redirector_params:\n if redirector_param in used:\n redirector_args[redirector_param] = kwargs.pop(redirector_param)\n if len(redirector_args) > 0:\n kwargs.update(redirector(**redirector_args))\n\n def deprecate_params(func, *args, **kwargs):\n \"\"\"The actual decorator function used to extend the callables behavior.\n\n Logs a warning message when a deprecated parameter is used and redirects it if\n specified.\n\n Parameters\n ----------\n func\n The callable to decorate.\n args\n The arguments passed to the given callable.\n kwargs\n The keyword arguments passed to the given callable.\n\n Returns\n -------\n Any\n The return value of the given callable when being passed the given\n arguments.\n\n \"\"\"\n used = []\n for param in params:\n if param in kwargs:\n used.append(param)\n\n if len(used) > 0:\n logger.warning(warning_msg(func, used))\n redirect_params(kwargs, used)\n return func(*args, **kwargs)\n\n return decorator(deprecate_params)\n","repo_name":"ManimCommunity/manim","sub_path":"manim/utils/deprecation.py","file_name":"deprecation.py","file_ext":"py","file_size_in_byte":15062,"program_lang":"python","lang":"en","doc_type":"code","stars":16609,"dataset":"github-code","pt":"94"}
+{"seq_id":"1505955909","text":"from select import *\nfrom socket import *\nfrom time import sleep\n\ntcp_sock = socket()\ntcp_sock.bind((\"0.0.0.0\", 8090))\ntcp_sock.listen()\n\nudp_sock = socket(AF_INET, SOCK_DGRAM)\nudp_sock.bind((\"0.0.0.0\", 8888))\n\nf = open(\"test.txt\")\n\np = poll() # poll对象\np.register(tcp_sock, POLLIN) # 关注IO\n\np.register(f, POLLIN)\nprint(tcp_sock.fileno(), POLLIN)\nsleep(2)\n\nprint(f.fileno(),POLLIN)\nevents = p.poll()\nprint(events)\n","repo_name":"seabedforest/study","sub_path":"month02/day16/poll_test.py","file_name":"poll_test.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"15124363354","text":"import unittest\nimport json\nfrom src.cms_visualizer.topography import (Topography, RectangularSource, RectangularTarget, RectangularObstacle,\n InvalidTopographyObjectException, DuplicateTopographyObjectIdException, TopographyReconstructionException,\n UndefinedTopographyObjectType)\n\n\nclass TopographyTest(unittest.TestCase):\n\n def test_topography_creation(self):\n topography = (\n Topography(width=200, height=200)\n .with_sources([RectangularSource(1, 5, 5, 1, 1)])\n .with_targets([RectangularTarget(2, 95, 95, 1, 1)])\n .with_obstacles([RectangularObstacle(3, 50, 50, 1, 1)])\n )\n self.assertIsInstance(topography, Topography)\n self.assertEqual(topography.width, 200)\n self.assertEqual(topography.height, 200)\n self.assertEqual(len(topography.sources), 1)\n self.assertEqual(len(topography.targets), 1)\n self.assertEqual(len(topography.obstacles), 1)\n\n def test_invalid_rect_source(self):\n with self.assertRaises(InvalidTopographyObjectException):\n Topography(width=100, height=100).with_sources(\n [RectangularSource(1, 105, 105, 1, 1)])\n\n def test_invalid_rect_target(self):\n with self.assertRaises(InvalidTopographyObjectException):\n Topography(width=100, height=100).with_targets(\n [RectangularTarget(1, -21, -21, 1, 1)])\n\n def test_invalid_rect_obstacle(self):\n with self.assertRaises(InvalidTopographyObjectException):\n Topography(width=100, height=100).with_obstacles(\n [RectangularObstacle(1, 5, 5, 0, 1)])\n\n def test_duplicate_id(self):\n with self.assertRaises(DuplicateTopographyObjectIdException):\n Topography(width=100, height=100).with_sources(\n [RectangularSource(1, 5, 5, 1, 1)]).with_targets(\n [RectangularTarget(1, 25, 25, 1, 1)])\n\n with self.assertRaises(DuplicateTopographyObjectIdException):\n Topography(width=100, height=100).with_targets(\n [RectangularTarget(1, 5, 5, 1, 1)]).with_sources(\n [RectangularSource(1, 25, 25, 1, 1)])\n\n with self.assertRaises(DuplicateTopographyObjectIdException):\n Topography(width=100, height=100).with_sources(\n [RectangularSource(1, 5, 5, 1, 1)]).with_obstacles(\n [RectangularObstacle(1, 25, 25, 1, 1)])\n\n def test_topography_creation_from_dict(self):\n with open('tests/valid_simulation.json') as f:\n simulation = json.load(f)\n\n topography = Topography.from_dict(simulation['topography'])\n self.assertIsInstance(topography, Topography)\n self.assertEqual(topography.width, 200)\n self.assertEqual(topography.height, 100)\n self.assertEqual(len(topography.sources), 1)\n self.assertEqual(len(topography.targets), 1)\n self.assertEqual(len(topography.obstacles), 2)\n\n def test_topography_creation_from_dict_missing_width_height(self):\n with self.assertRaises(TopographyReconstructionException):\n Topography.from_dict({\n 'sources': [],\n 'targets': [],\n 'obstacles': []\n })\n\n def test_topography_creation_from_dict_missing_objects(self):\n with self.assertRaises(TopographyReconstructionException):\n Topography.from_dict({\n 'targets': [],\n 'obstacles': []\n })\n with self.assertRaises(TopographyReconstructionException):\n Topography.from_dict({\n 'sources': [],\n 'obstacles': []\n })\n with self.assertRaises(TopographyReconstructionException):\n Topography.from_dict({\n 'targets': [],\n 'sources': []\n })\n\n def test_topography_creation_from_dict_unknown_type(self):\n with self.assertRaises(UndefinedTopographyObjectType):\n Topography.from_dict({\n \"targets\": [\n {\n \"id\": 1,\n \"x\": 5,\n \"y\": 5,\n \"radius\": 25,\n \"type\": \"CIRCLE\"\n }\n ],\n \"sources\": [],\n \"obstacles\": [],\n \"width\": 100,\n \"height\": 100\n })\n\n def test_topography_to_dict(self):\n topography = (\n Topography(width=200, height=200)\n .with_sources([RectangularSource(1, 5, 5, 1, 1)])\n .with_targets([RectangularTarget(2, 95, 95, 1, 1)])\n .with_obstacles([RectangularObstacle(3, 50, 50, 1, 1)])\n )\n topography_dict = topography.to_dict()\n self.assertEqual(topography_dict['width'], 200)\n self.assertEqual(topography_dict['sources'][0]['type'], 'RECTANGULAR')\n","repo_name":"gjke/cms-visualizer","sub_path":"tests/test_topography.py","file_name":"test_topography.py","file_ext":"py","file_size_in_byte":4985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"17910727223","text":"#!/usr/bin/python\n\nimport sys\nimport math\nimport json\nimport os\nimport traceback\n\n#environment specific\nclose_fds=False\n\nbase_path=os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..'))\ndata_path=base_path+\"/data\"\nsystem_data_path=data_path+\"/system\"\nsamples_path=data_path+\"/samples\"\nupload_path=data_path+\"/uploaded\"\nthumbnail_image_path=upload_path+\"/thumbnail\"\nmetadata_path=data_path+\"/metadata\"\nmfcc_path=metadata_path+\"/mfcc\"\ntags_path=metadata_path+\"/tags\"\ncodebook_path=data_path+\"/codebooks/current\"\ntag_to_generate_codebook=True\n\nversion_path=\"v0.2\"\n#\nSYSTEM_USER_ID=\"0\"\nMAX_DATA_SIZE=256\nSIZE_CODEBOOK=128\n\n#dynamic VQ\ndvq_scheme_n=3\ndvq_scheme_a=[1/3,1/3] #the last one is inferred\n\npy_executable=sys.executable\nffmpeg=\"/usr/bin/ffmpeg\"\nthumbnail_image_type=\".jpeg\"\n\ntry:\n\turl = os.environ[\"REQUEST_URI\"] \n\tserver_addr=os.environ[\"SERVER_ADDR\"]\n\tserver_port=os.environ[\"SERVER_PORT\"]\n\tdownload_baseUrl=\"http://\"+server_addr+\":\"+server_port+\"/\"+version_path+\"/data/uploaded\"\nexcept:\n\tdownload_baseUrl=None\n\ttraceback.print_exc(file=sys.stderr)\n","repo_name":"liesheng/dog-emotion-detector","sub_path":"application/web/cgi-bin/app_config.py","file_name":"app_config.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"14073272755","text":"# encoding: utf-8\n\nimport os\n\n\n##########################################\n# Init db connection.\n##########################################\n\n\nconn = None\n\n\n# Remove the comment notations. Make a connenction to real db. [TODO_DB]\n\n\"\"\"\nimport psycopg2\nimport urllib.parse as urlparse\n\nurlparse.uses_netloc.append(\"postgres\")\nurl = urlparse.urlparse(os.environ[\"DATABASE_URL\"])\n\nconn = psycopg2.connect(\n database=url.path[1:],\n user=url.username,\n password=url.password,\n host=url.hostname,\n port=url.port\n)\n\"\"\"\n\n\n##########################################\n# Init bot.\n##########################################\n\nfrom DataManager import DataManager\ndata_manager = DataManager(conn)\n\nfrom CianCianBot import CianCianBot\nbot = CianCianBot(data_manager)\n\n\n##########################################\n# Init flask backend and linebot facility.\n##########################################\n\nfrom flask import Flask, request, abort\n\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import (\n MessageEvent, TextMessage, TextSendMessage,\n)\n\napp = Flask(__name__)\n\nchannel_secret = 'd07d23b1fb9c10f9e2d638bf56856344'\nchannel_access_token = '6buiIgstKTe+PYLqN/jmy8MNjsn4qBSFr1IwYRLgb5x9BWtD6qPHLi/KVMCGB00ZbcSAJfjOByFezjQSL4IvdCb6wT12BwdbrZ+/zhDRthAsW967CMnh4W9zmntX2oYybmPjx4pk50e4dhnaTVUHHwdB04t89/1O/w1cDnyilFU='\nhandler = WebhookHandler(channel_secret)\nline_bot_api = LineBotApi(channel_access_token)\n\n\n@app.route('/')\ndef index():\n return \"Hello World!
\"\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n\n # handle webhook body\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n abort(400)\n\n return 'OK'\n\n@handler.add(MessageEvent, message=TextMessage) # default\ndef handle_text_message(event): # default\n # User's message\n msg = event.message.text # message from user\n\n # User's chatting window id, could be `user_id`, `room_id`, `group_id`.\n if event.source.type == \"user\":\n src_id = event.source.user_id\n elif event.source.type == \"room\":\n src_id = event.source.room_id\n elif event.source.type == \"group\":\n src_id = event.source.group_id\n else:\n src_id = \"error\"\n unique_id = str(event.source.type) + \"_\" + src_id\n\n # Responding algorithm\n bot_response = bot.respond(msg, unique_id)\n\n # Reply\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=bot_response)\n )\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0',port=os.environ['PORT'])\n","repo_name":"AliciaTsai/LineBot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"}
+{"seq_id":"72663172468","text":"import os\nimport pymysql\nfrom flask import jsonify\n\ndb_user = os.environ.get('CLOUD_SQL_USERNAME')\ndb_password = os.environ.get('CLOUD_SQL_PASSWORD')\ndb_name = os.environ.get('CLOUD_SQL_DATABASE_NAME')\ndb_connection_name = os.environ.get('CLOUD_SQL_CONNECTION_NAME')\n\n\ndef open_connection():\n unix_socket = '/cloudsql/{}'.format(db_connection_name)\n try:\n if os.environ.get('GAE_ENV') == 'standard':\n conn = pymysql.connect(user=db_user, password=db_password,\n unix_socket=unix_socket, db=db_name,\n cursorclass=pymysql.cursors.DictCursor\n )\n except pymysql.MySQLError as e:\n print(e)\n\n return conn\n\n# Read all projects in DB front-end index it\ndef get_projects():\n conn = open_connection()\n with conn.cursor() as cursor:\n result = cursor.execute('SELECT * FROM PROJECTS;')\n projects = cursor.fetchall()\n if result > 0:\n got_projects = jsonify(projects)\n else:\n got_projects = 'No projects found in Database'\n conn.close()\n return got_projects\n\n\n# Create Survey Project\ndef add_projects(project):\n conn = open_connection()\n with conn.cursor() as cursor:\n cursor.execute('INSERT INTO PROJECTS (name, description, formData) VALUES(%s, %s, %s)', (project[\"name\"], project[\"description\"], project[\"formData\"]))\n conn.commit()\n conn.close()\n \n # Get User response by Selected ProjectID \ndef get_responses():\n conn = open_connection()\n with conn.cursor() as cursor:\n result = cursor.execute('SELECT * FROM USER_RESPONSES;')\n responses = cursor.fetchall()\n if result > 0:\n got_responses = jsonify(responses)\n else:\n got_responses = 'No user responses on database'\n conn.close()\n return got_responses\n\n\n# def get_responsesByProjectID():\n# conn = open_connection()\n# with conn.cursor() as cursor:\n# result = cursor.execute('SELECT USER_RESPONSES.responseData, USER_RESPONSES.ProjectID, PROJECTS.projectID FROM USER_RESPONSES INNER JOIN PROJECTS ON USER_RESPONSES.ProjectID=PROJECTS.projectID;')\n# responsesByID = cursor.fetchall()\n# if result > 0:\n# got_responses = jsonify(responsesByID)\n# else:\n# got_responses = 'No projects found in Database'\n# conn.close()\n# return got_responses\n\n\n# Submit response\ndef submit_response(response):\n conn = open_connection()\n with conn.cursor() as cursor:\n cursor.execute('INSERT INTO USER_RESPONSES (responseID, responseData, projectID) VALUES(%s, %s, %s)', (response[\"responseID\"],response[\"responseData\"], response[\"projectID\"]))\n conn.commit()\n conn.close()\n ","repo_name":"carolher/ChainedSurveyApp","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"24611455356","text":"from django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\n\n\ndef validate_positive(value):\n if value < 0:\n raise ValidationError(\n _('%(value)s must be positive'),\n params={'value': value},\n )\n\n\ndef is_percent(value):\n if value < 0 and value > 100:\n raise ValidationError(\n _('%(value)s must be an integer between 0 and 100'),\n params={'value': value},\n )\n","repo_name":"Ircam-WAM/mezzanine-organization","sub_path":"organization/network/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"}
+{"seq_id":"73813405749","text":"import csv\nimport os\nimport pandas as pd\nfrom yahoo_fin.stock_info import *\nfrom definitions import TICKERS_DIR\n\n\n\ndef download_ticker():\n file_date = datetime.datetime.utcnow()\n file_date = file_date.strftime(\"%Y%m%d\")\n\n file_name = 'all_tickers.csv'\n if os.path.exists(TICKERS_DIR + file_date + '_' + file_name):\n df_tickers = pd.read_csv(TICKERS_DIR + file_date + '_' + file_name)\n # print(df_tickers.columns)\n return df_tickers\n\n else:\n # download sp500 tickers\n sp500_tickers = tickers_sp500()\n file_sp500 = 'sp500_tickers.csv'\n if os.path.exists(TICKERS_DIR + file_date + '_' + file_sp500):\n df_sp500 = pd.read_csv(TICKERS_DIR + file_date + '_' + file_sp500)\n else:\n with open(TICKERS_DIR + file_date + '_' + file_sp500, mode='w') as ticker_file:\n ticker_writer = csv.writer(ticker_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n ticker_writer.writerow(sp500_tickers)\n df_sp500 = pd.DataFrame()\n df_sp500['ticker'] = sp500_tickers\n df_sp500['sp500'] = True\n\n # # download dow\n # dow_tickers = tickers_dow()\n # file_dow = 'dow_tickers.csv'\n # if os.path.exists(TICKERS_DIR + file_date + '_' + file_dow):\n # df_dow = pd.read_csv(TICKERS_DIR + file_date + '_' + file_dow)\n # else:\n # with open(TICKERS_DIR + file_date + '_' + file_dow, mode='w') as ticker_file:\n # ticker_writer = csv.writer(ticker_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n # ticker_writer.writerow(dow_tickers)\n # df_dow = pd.DataFrame()\n # df_dow['ticker'] = dow_tickers\n # df_dow['dow'] = True\n\n # download nasdaq\n nasdaq_tickers = tickers_nasdaq()\n file_nasdaq = 'nasdaq_tickers.csv'\n if os.path.exists(TICKERS_DIR + file_date + '_' + file_nasdaq):\n df_nasdaq = pd.read_csv(TICKERS_DIR + file_date + '_' + file_nasdaq)\n else:\n with open(TICKERS_DIR + file_date + '_' + file_nasdaq, mode='w') as ticker_file:\n ticker_writer = csv.writer(ticker_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n ticker_writer.writerow(nasdaq_tickers)\n df_nasdaq = pd.DataFrame()\n df_nasdaq['ticker'] = nasdaq_tickers\n\n # download others\n other_tickers = tickers_other()\n file_other = 'other_tickers.csv'\n if os.path.exists(TICKERS_DIR + file_date + '_' + file_other):\n df_other = pd.read_csv(TICKERS_DIR + file_date + '_' + file_other)\n else:\n with open(TICKERS_DIR + file_date + '_' + file_other, mode='w') as ticker_file:\n ticker_writer = csv.writer(ticker_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n ticker_writer.writerow(other_tickers)\n df_other = pd.DataFrame()\n df_other['ticker'] = other_tickers\n\n # tickers all\n # tickers = list(set().union(sp500_tickers, dow_tickers, nasdaq_tickers, other_tickers))\n tickers = list(set().union(sp500_tickers, nasdaq_tickers, other_tickers))\n\n df_tickers = pd.DataFrame()\n df_tickers['ticker'] = tickers\n # df_tickers['dow'] = df_tickers['ticker'].apply(lambda x: True if x in dow_tickers else False)\n df_tickers['sp500'] = df_tickers['ticker'].apply(lambda x: True if x in sp500_tickers else False)\n df_tickers['exchange'] = df_tickers['ticker'].apply(lambda x: 'nasdaq' if x in nasdaq_tickers else None)\n df_tickers = df_tickers.loc[df_tickers['ticker'].str.len() > 0]\n\n # write to csv\n file_name = 'all_tickers.csv'\n df_tickers.to_csv(TICKERS_DIR + file_date + '_' + file_name, index=False)\n\n # check\n print('number of total stocks: {}'.format(len(df_tickers['ticker'])))\n print('number of unique stocks: {}'.format(df_tickers['ticker'].nunique()))\n # print('number of stocks in DOW: {}'.format(len(df_tickers.loc[df_tickers['dow'] == True])))\n print('number of stocks in SP500: {}'.format(len(df_tickers.loc[df_tickers['sp500'] == True])))\n print('number of stocks in NASDAQ: {}'.format(len(df_tickers.loc[df_tickers['exchange'] == 'nasdaq'])))\n\n return df_tickers\n\nif __name__ == '__main__':\n download_ticker()\n","repo_name":"dark7wind/stock_fundamental_valuation","sub_path":"src/data/download_ticker.py","file_name":"download_ticker.py","file_ext":"py","file_size_in_byte":4416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"72572654708","text":"from django.contrib.auth import get_user_model\nfrom django.http import JsonResponse\nfrom django.db.utils import IntegrityError\n\nfrom rest_framework import viewsets\nfrom rest_framework import permissions\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom rest_framework.parsers import FileUploadParser, MultiPartParser\n\nUser = get_user_model()\n\nfrom apps.utils import hash_file\nfrom apps.api.permissions import UserIDPermission, WhiteListPermission\nfrom apps.api.serializers import AudioDocSerializer, DocSerializer\n\nfrom apps.docs.models import TextDocument\nfrom apps.docs.tasks import processa_textdoc_task\n\nfrom apps.audios.models import AudioDocument\nfrom apps.audios.tasks import processa_audiodoc_task\n\n\ndef root(request):\n return JsonResponse({\"projeto\": 'Sophia' })\n\nclass DocViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = TextDocument.objects.all()\n serializer_class = DocSerializer\n permission_classes = [\n permissions.IsAuthenticated\n & WhiteListPermission\n & UserIDPermission\n ]\n # parser_classes = [FileUploadParser]\n parser_classes = [MultiPartParser]\n\n @action(detail=True, methods=['get'])\n def status(self, request, *args, **kwargs):\n doc = self.get_object()\n\n return Response({\n 'processando': doc.processando,\n 'processado': doc.foi_processado,\n })\n\n def create(self, request):\n data = dict(request.data)\n\n if 'files' not in data:\n return Response({\n 'status': False,\n 'msg': 'Nenhum documento anexado encontrado'\n })\n\n resultado = []\n anexos = data['files']\n for anexo in anexos:\n\n if not anexo.content_type.startswith('audio') \\\n and not anexo.content_type.startswith('video'):\n\n api_userid = request.META.get('HTTP_X_API_USERID', None)\n hash_sha256 = hash_file(anexo)\n\n try:\n textdoc = TextDocument.objects.create(user=request.user, api_user=api_userid, hashfile=hash_sha256, file=anexo)\n\n textdoc.nome = anexo.name\n textdoc.size = anexo.size\n textdoc.filename = anexo.name\n textdoc.mime = anexo.content_type\n textdoc.ext = textdoc.file.name.split('.')[-1]\n # textdoc.save()\n\n resultado.append({\n 'filename': anexo.name,\n 'msg': 'Documento criado com sucesso'\n })\n except IntegrityError:\n resultado.append({\n 'filename': anexo.name,\n 'msg': 'Documento já existente para o usuário'\n })\n else:\n resultado.append({\n 'filename': anexo.name,\n 'msg': 'Rota não funciona para audios ou videos, favor consultar rota /audios'\n })\n\n return Response({\n 'status': True,\n 'msg': 'Consulta processada com sucesso',\n 'data': resultado\n })\n\n\n @action(detail=True, methods=['post'])\n def processa(self, request, *args, **kwargs):\n\n docid = kwargs['pk']\n textdoc = TextDocument.objects.filter(user__id=request.user.id, id=docid).first()\n\n if textdoc is None:\n return Response({ 'status': False, 'msg': 'Doc não encontrado' })\n\n if textdoc.foi_processado:\n return Response({ 'status': False, 'msg': 'Doc já processado', 'data': textdoc.id })\n\n if textdoc.processando:\n return Response({ 'status': False, 'msg': 'Doc está sendo processado', 'data': textdoc.id })\n\n textdoc.processando = True\n textdoc.save()\n\n processa_task = processa_textdoc_task.delay(userid=request.user.id, docid=docid)\n\n return Response({\n 'status': True,\n 'msg': 'Doc está sendo processado'\n\n })\n\nclass AudioDocViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = AudioDocument.objects.all()\n serializer_class = AudioDocSerializer\n permission_classes = [\n permissions.IsAuthenticated\n & WhiteListPermission\n & UserIDPermission\n ]\n parser_classes = [FileUploadParser]\n\n @action(detail=True, methods=['get'])\n def status(self, request, *args, **kwargs):\n doc = self.get_object()\n\n return Response({\n 'processando': doc.processando,\n 'processado': doc.foi_processado,\n })\n\n def create(self, request):\n\n if 'file' not in request.data:\n return Response({\n 'status': False,\n 'msg': 'Documento anexado não encontrado'\n })\n\n if not request.data['file'].content_type.startswith('audio') and \\\n not request.data['file'].content_type.startswith('video'):\n return Response({\n 'status': False,\n 'msg': 'Rota funciona apenas para audios ou videos, favor consultar outra rota.'\n })\n\n api_userid = request.META.get('HTTP_X_API_USERID', None)\n hash_sha256 = hash_file(request.data['file'])\n\n try:\n audiodoc = AudioDocument.objects.create(user=request.user, api_user=api_userid, hashfile=hash_sha256, file=request.data['file'])\n except IntegrityError:\n return Response({\n 'status': False,\n 'msg': 'Audio já existente para o usuário'\n })\n\n audiodoc.nome = request.data['file'].name\n audiodoc.size = audiodoc.file.size\n audiodoc.filename = request.data['file'].name\n audiodoc.mime = request.FILES['file'].content_type\n audiodoc.ext = audiodoc.file.name.split('.')[-1]\n audiodoc.save()\n\n return Response({\n 'status': True,\n 'msg': 'Audio criado com sucesso',\n 'data': audiodoc.id,\n })\n\n @action(detail=True, methods=['post'])\n def processa(self, request, *args, **kwargs):\n\n docid = kwargs['pk']\n audiodoc = AudioDocument.objects.filter(user__id=request.user.id, id=docid).first()\n\n if audiodoc is None:\n return Response({ 'status': False, 'msg': 'Audio não encontrado' })\n\n if audiodoc.foi_processado:\n return Response({ 'status': False, 'msg': 'Audio já processado', 'data': audiodoc.id })\n\n if audiodoc.processando:\n return Response({ 'status': False, 'msg': 'Audio está sendo processado', 'data': audiodoc.id })\n\n audiodoc.processando = True\n audiodoc.save()\n\n processa_task = processa_audiodoc_task.delay(userid=request.user.id, docid=docid)\n\n return Response({\n 'status': True,\n 'msg': 'Audio está sendo processado'\n })\n\n\n","repo_name":"acba/sophia","sub_path":"apps/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7074,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"9580975446","text":"from fastapi.testclient import TestClient\nfrom models import Product_model\nfrom fastapi.encoders import jsonable_encoder\n\nfrom main import app\n\nclient = TestClient(app)\n\nroutes = \"\"\"path full name\n-------------------------------- ---------------------------------------------------------------\n/customers/all/ routes.customer.Customer_resource.r_get_customers\n/customers/filtered/{product_id} routes.customer.Customer_resource.r_get_customers_with_product\n/docs fastapi.applications.FastAPI.setup..swagger_ui_html\n/docs/oauth2-redirect fastapi.applications.FastAPI.setup..swagger_ui_redirect\n/openapi.json fastapi.applications.FastAPI.setup..openapi\n/products/all/ routes.product.Product_resource.r_get_products\n/products/create/ routes.product.Product_resource.r_add_product\n/products/delete/{id} routes.product.Product_resource.r_delete_product\n/products/edit/ routes.product.Product_resource.r_patch_product\n/products/filtered/{customer_id} routes.product.Product_resource.r_get_products_with_customer\n/redoc fastapi.applications.FastAPI.setup..redoc_html\"\"\"\n\n\ndef test_get_customers():\n response = client.get(\"/customers/all/\")\n assert response.status_code == 200\n\n\ndef test_get_products():\n response = client.get(\"/products/all/\")\n assert response.status_code == 200\n\n\ndef test_get_filtered_clients():\n response = client.get(\"/customers/filtered/1\")\n assert response.status_code == 200\n\n\ndef test_get_filtered_products():\n response = client.get(\"/products/filtered/1\")\n assert response.status_code == 200\n\n\ndef test_create_delete():\n '''post and delete'''\n response = client.get(\"/products/all\")\n start = response.json()\n client.post(\"/products/create\", json={'name': 'testTestTest'})\n response = client.get(\"/products/all\")\n end = response.json()\n assert len(end) > len(start)\n target = [i['id'] for i in end if i['name'] == 'testTestTest']\n for t in target:\n client.delete(f\"/products/delete/{t}\")\n\n\ndef test_patch():\n '''create, patch, delete'''\n client.post(\"/products/create\",\n json={'name': 'testPatchtestPatchtestPatch'})\n start = client.get(\"/products/all\").json()\n target_ids = [i['id']\n for i in start if i['name'] == 'testPatchtestPatchtestPatch']\n resp = client.patch(\n f\"/products/edit/{target_ids[0]}\", json={\"idPhoto\": 1234})\n end = client.get(\"/products/all\").json()\n assert [i['idPhoto'] for i in end if i['id'] == target_ids[0]][0] == 1234\n for t in target_ids:\n client.delete(f\"/products/delete/{t}\")\n\n\ndef test_invalid_patch():\n '''create, patch, delete'''\n client.post(\"/products/create\",\n json={'name': 'testPatchtestPatchtestPatch'})\n start = client.get(\"/products/all\").json()\n target_ids = [i['id']\n for i in start if i['name'] == 'testPatchtestPatchtestPatch']\n resp = client.patch(\n f\"/products/edit/{target_ids[0]}\", json={\"id_____Photo\": 1234})\n print(resp, resp.text)\n end = client.get(\"/products/all\").json()\n for t in target_ids:\n client.delete(f\"/products/delete/{t}\")\n","repo_name":"ressiwage/TESTTASK-fastapi-crud","sub_path":"server/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"14202398215","text":"from .libio import MIO\nimport csv \n\nclass MHCIDBData(MIO):\n \"\"\" existing data path and file names \n \"\"\"\n def __init__(self, args):\n self.args = args\n self.UPDATE = args.UPDATE\n self.data_dir = args.data_dir\n self.mhcI_db_dir = \"MHCIDB\"\n self.setMHCIDBPath(mhcidb_dirname=self.mhcI_db_dir, data_dir=self.data_dir)\n \n \n def __setMHCI_DB_Path(self, mhcidb_dirname):\n \"\"\" set MHCI Database directory path \"\"\"\n if not hasattr(self, \"mhcIdb_path\") or self.mhcIdb_path is None:\n cwd = self.getCWD() \n pre_dir, after = cwd.split(mhcidb_dirname)\n self.mhcIdb_path = self.joinPath(pre_dir,mhcidb_dirname )\n print((\"# MHCIDB workding path: {}\".format(self.mhcIdb_path)))\n \n \n def setMHCIDBPath(self, mhcidb_dirname=\"MHCIDB\", data_dir=\"existing_data\"):\n \"\"\" set MHCIDB working directory path and there existing data \"\"\"\n self.__setMHCI_DB_Path(mhcidb_dirname)\n self.mhcIdb_existing_data_path = self.joinPath(self.mhcIdb_path, data_dir)\n self.mhcIdb_hla_path = self.joinPath(self.mhcIdb_existing_data_path, \"hla\" )\n self.mhcIdb_pdb_path = self.joinPath(self.mhcIdb_existing_data_path, \"pdb\" )\n self.mhcIdb_ba_path = self.joinPath(self.mhcIdb_existing_data_path, \"ba\" ) \n self.mhcIdb_pdb3d_path = self.joinPath(self.mhcIdb_pdb_path, \"raw_pdbs\")\n \n \n def get_hla_aligned_seq_fp(self):\n \"\"\" return the path of the file contains the alinged protein seqeuences of \n HLA gene A, B and C\n \"\"\"\n fn_aln = \"ClassI_prot.txt\"\n return self.joinPath(self.mhcIdb_hla_path, fn_aln) \n \n def getAnchorMajorSeqFp(self):\n fn = \"HLA_amseq.bin\"\n return self.joinPath(self.mhcIdb_hla_path, fn)\n \n \n def getHLAAlleleGrpFp(self):\n fn = \"hla_allele_grps.bin\"\n return self.joinPath(self.mhcIdb_hla_path, fn)\n \n \n def getDLFastaFp(self, fn=\"fasta.txt\"):\n return self.joinPath(self.mhcIdb_pdb_path, fn)\n \n \n def pdbid2Fp(self, pdbid):\n return self.joinPath(self.mhcIdb_pdb3d_path, \"{}.pdb\".format(pdbid.lower()))\n \n \n def getSeqFilteredFpBin(self):\n fn_out_pre = \"mhcI_filter_by_seq\" \n fn_out_pre = self.joinPath(self.mhcIdb_pdb_path,fn_out_pre)\n fp_out_bin = \"%s.bin\" % fn_out_pre\n return fp_out_bin\n \n def getMHCIPDBFpBin(self):\n \"\"\" return existing pdbids' filename and path \"\"\"\n fn = \"mhcI_pdbs.bin\"\n return self.joinPath(self.mhcIdb_pdb_path, fn)\n \n def getPDB2ALLeleFpBin(self):\n fn = \"mhcI_pdb_to_allele.bin\"\n return self.joinPath(self.mhcIdb_path, fn)\n \n def getIEDBFp(self):\n fn_bind_data = 'bdata.20130222.mhci.txt'\n return self.joinPath(self.mhcIdb_ba_path, fn_bind_data)\n \n def getBindDataFp(self):\n fn_bind_bin = \"mhcI_bdata.bin\"\n return self.joinPath(self.mhcIdb_ba_path, fn_bind_bin)\n \n def loadMHCIBindData(self):\n fp_bind_data = self.getBindDataFp()\n if not self.isNew(fp_bind_data):\n self.mhcI_bind_data = self.loadObj(fp_bind_data)\n else:\n self.readBindData(self.getIEDBFp())\n self.dumpObj(self.mhcI_bind_data, fp_bind_data)\n \n \n def readBindData(self, fp):\n txt = self.readTxtFile(fp)\n self.mhcI_bind_data = {}\n cnt = 0\n for ln in txt[1:]:\n ln = ln.strip()\n elems = ln.split()\n if not ln: continue\n if len(elems) != 6:\n print(\"# Error in parsing: %s\" % ln) \n else:\n allele_name = elems[1]\n if allele_name.startswith(\"HLA-\"):\n cnt += 1\n lig_len = elems[2]\n lig = elems[3]\n inq = elems[4]\n bd = elems[5]\n if allele_name in self.mhcI_bind_data:\n self.mhcI_bind_data[allele_name].append((lig_len, lig, inq, bd))\n else:\n self.mhcI_bind_data[allele_name]= [(lig_len, lig, inq, bd)]\n ln = \"# num HLA binding data: %s\" % cnt\n self.add2log(ln, vb=1) \n \n \n def loadMHCIPDB2AlleleData(self):\n fp_bin = self.getPDB2ALLeleFpBin()\n self.matched_pdbs, self.non_matched_pdbs = self.loadObj(fp_bin)\n \n \n\n def loadMHCIPDBInf(self):\n \"\"\" mhci_pdbs[pdbid] = [(chain_ids, chain_seq(xxx,...), [ligand chains]), ...] \"\"\"\n fp = self.getMHCIPDBFpBin()\n self.mhcI_pdbs = self.loadObj(fp)\n self.mhcI_pdbids = self.mhcI_pdbs.keys() \n \n def getMHCILigandFpBin(self):\n fn = \"mhcI_pdb_ligand.bin\"\n fp = self.joinPath(self.mhcIdb_pdb_path, fn)\n return fp \n \n \n def saveMPBDB2CSV(self, mpbdb, fn=\"mpbdb.csv\"):\n \"\"\" lig_seq, lig_chain_id, lig_len \"\"\"\n with open(fn, 'w') as fh:\n writer = csv.writer(fh)\n pdbids = list(mpbdb.keys())\n pdbids.sort()\n heads = [\"PDBID\", \"Allele\", \"Ligand_len\", \"Ligand_seq\", \"Binding_operator\", \"Binding_affinity\"]\n writer.writerow(heads)\n for pdbid in pdbids:\n pdb_bds, cnt = mpbdb[pdbid]\n allele_name = pdb_bds[0][0]\n if cnt == 0:\n bd_opt, ba = \"\", \"\"\n lig_seq, lig_chain_id, lig_len = self.getLigandSeq(pdbid)\n if lig_seq is None:\n lig_seq = ''\n lig_len = ''\n else:\n lig_len = len(lig_seq) \n elif cnt > 1:\n print(\"#Error: muliple binding affinities for pdb: {} -> {}\".format(pdbid, pdb_bds))\n else:\n #print(pdb_bds[0][1])\n lig_len, lig_seq, bd_opt, ba = pdb_bds[0][1][0]\n row = [pdbid, allele_name, lig_len, lig_seq, bd_opt, ba] \n writer.writerow(row)\n print(\"# Saving Mpdbdb into: {}\".format(fn))\n \n \n def loadMHCIPDBLigandSeq(self):\n fp_bin = self.getMHCILigandFpBin()\n if self.isNew(fp_bin):\n fp_fasta = self.getDLFastaFp()\n self.readPdbSeqs(fp_fasta)\n #print( self.pdb_seqs)\n self.mhcI_pdb_ligands_seqs = {}\n for pdbid in self.mhcI_pdbids:\n ligand_inf = self.mhcI_pdbs[pdbid][-1]\n if ligand_inf:\n ligand_chain_id = ligand_inf[0][0]\n ligand_length = ligand_inf[0][1]\n self.mhcI_pdb_ligands_seqs[pdbid] = self.pdb_seqs[pdbid][ligand_chain_id], ligand_chain_id, ligand_length\n self.dumpObj(self.mhcI_pdb_ligands_seqs, fp_bin, vb=1)\n else:\n self.mhcI_pdb_ligands_seqs = self.loadObj(fp_bin) \n \n ","repo_name":"jinbuw/mpbdb","sub_path":"src/lib/libmhcidb.py","file_name":"libmhcidb.py","file_ext":"py","file_size_in_byte":6937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"26782793768","text":"# pylint: disable=missing-docstring, protected-access\n\nimport os\nimport unittest\nimport tempfile\nimport warnings\n\nimport iCount\n\n\nclass TestExamplesScriptsInstall(unittest.TestCase):\n\n def setUp(self):\n self.tempdir = tempfile.mkdtemp()\n self.examples_dir = os.path.join(self.tempdir, 'examples')\n warnings.simplefilter(\"ignore\", ResourceWarning)\n\n def test_examples(self):\n iCount.examples.run(out_dir=self.tempdir)\n # check if two scripts are present in subfolder examples\n self.assertTrue(\n os.path.isfile(os.path.join(self.examples_dir, 'hnRNPC.sh'))\n )\n self.assertTrue(\n os.path.isfile(os.path.join(self.examples_dir, 'hnRNPC_reduced.sh'))\n )\n\n def tearDown(self):\n files = os.listdir(self.examples_dir)\n for fn in files:\n os.remove(os.path.join(self.examples_dir, fn))\n\n os.rmdir(self.examples_dir)\n os.rmdir(self.tempdir)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"tomazc/iCount","sub_path":"iCount/tests/test_examples.py","file_name":"test_examples.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"94"}
+{"seq_id":"32278533839","text":"#!/anaconda3/bin/python3.7\n# -*- coding: UTF-8 -*-\n\nimport os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\ndef get_results_by_folds(nbr_folds, df):\n list_finale_metrics_dev = list()\n list_text_added = list()\n list_all_metrics_test = list()\n for fold in range(nbr_folds):\n df_fold = df.loc[df['fold'] == fold]\n list_finale_metrics_dev.append(np.array(df_fold.iloc[-1:,-2:].values.tolist()[0]))\n list_all_metrics_test.append(df_fold.iloc[:,-2:].values.tolist())\n return np.array(list_finale_metrics_dev), np.array(list_text_added), np.array(list_all_metrics_test)\n\ndef get_finale_results(nbr_folds, df):\n metrics, text_added, all_metrics_test = get_results_by_folds(nbr_folds, df)\n evolution_text_added = np.sum(text_added, axis=0)/nbr_folds\n evolution_metrics_test = np.sum(all_metrics_test, axis=0)/nbr_folds\n finale_metrics_dev = np.sum(metrics, axis=0)/nbr_folds\n\n mse_evolution = evolution_metrics_test[:,0]\n mae_evolution = evolution_metrics_test[:,1]\n accuracy = evolution_metrics_test[:,2]\n mse_finale = finale_metrics_dev[0]\n mae_finale = finale_metrics_dev[1]\n\n return evolution_text_added, mse_evolution, mae_evolution, mse_finale, mae_finale, accuracy\n\nif __name__ == \"__main__\":\n\n df = pd.read_csv(\"../projetAA/semi_supervised_multilabels-expertFeatures_epoch-30_seuil-0-6_iter-8.csv\", sep=\"\\t\", index_col=0)\n\n evolution_text_added, mse_evolution, mae_evolution, mse_finale, mae_finale, accuracy = get_finale_results(5, df)\n\n title = \"Variation des performances durant les itérations\"\n fig = plt.figure()\n plt.title(title)\n x = [el for el in range(len(mae_evolution))]\n plt.xlabel(\"itérations\")\n plt.ylabel(\"mesure\")\n\n plt.plot(x, mse_evolution, color=\"green\", label='MSE')\n plt.plot(x, mae_evolution, color=\"orange\", label='MAE')\n plt.plot(x, accuracy, color=\"blue\", label='Accuracy')\n\n plt.legend(loc='upper left')\n plt.savefig('../{}.png'.format(title.replace(\"\\n\", \"\").replace(\" : \",\"_\").replace(\" \", \"_\").replace(\",\", \"-\").strip()), dpi=300, format='png', bbox_inches='tight')","repo_name":"xingyuliuNLP/tweet_register","sub_path":"5_performance_evolution.py","file_name":"5_performance_evolution.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"72246056630","text":"import time\nimport serial\n \nser = serial.Serial( \n port='/dev/serial0',\n baudrate = 115200,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=1\n)\n\nwhile True:\n x = input()\n ser.write(x.encode())","repo_name":"jstkyle/Senior-Design","sub_path":"Raspi_Software/kb_control.py","file_name":"kb_control.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"4309081169","text":"from __future__ import annotations\nfrom dataclasses import dataclass\nfrom typing import List\nfrom sys import intern\nimport cv2\nimport numpy as np\nimport math\nfrom typing import List, Tuple\nfrom imageutil import *\n\nIMG_FILE = 'contract_house.png'\nMAX_PIXEL = 3508\nMAX_WIDTH = 2480\nMAX_HEIGHT = MAX_PIXEL\n\ndef convertColor(img):\n tmp = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n tmp = cv2.threshold(tmp, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\n return tmp\n\ndef resizeImage(img):\n height, width = img.shape\n # print(\"height: {}, width : {}\".format(height, width))\n sampling_method = cv2.INTER_LINEAR\n if height * width > MAX_WIDTH * MAX_HEIGHT :\n # This image needs downsampling\n sampling_method = cv2.INTER_LENEAR\n else :\n sampling_method = cv2.INTER_AREA\n \n if width > height :\n newheight = int(height * MAX_WIDTH / width)\n # print(\"new h : {}, w : {}\".format(newheight, MAX_WIDTH))\n tmp = cv2.resize(img, (MAX_WIDTH, newheight), interpolation=sampling_method)\n else :\n newwidth = int(width * MAX_HEIGHT / height)\n # print(\"new h : {}, w : {}\".format(newwidth, MAX_HEIGHT))\n tmp = cv2.resize(img, (newwidth, MAX_HEIGHT), interpolation=sampling_method)\n return tmp\n\ndef filteredAngle(angles):\n npangles = np.array(angles)\n mean = np.mean(npangles)\n std = np.std(npangles)\n dfm = abs(npangles-mean)\n max_deviation = 2\n not_outliers = dfm < max_deviation * std\n std_angles = npangles[not_outliers]\n if(len(std_angles) > 0):\n return np.median(std_angles)\n else:\n return 0\n\ndef getSkewnessFromVlines(img, vlines):\n angles = []\n # print('vlines:{}'.format(vlines))\n for line in vlines:\n for x1,y1,x2,y2 in line:\n if abs(y2-y1) == 0:\n angles.append(0)\n continue\n if abs(x2-x1) == 0:\n angles.append(0)\n continue\n yd = y1 - y2\n xd = x2 - x1\n if xd == 0:\n continue\n angle = math.degrees(math.atan(yd/xd))\n angle = 90 - angle\n # print(angle)\n if abs(angle) > 4:\n continue\n angles.append(angle)\n # print('v angles:{}'.format(angles))\n return filteredAngle(angles)\n\ndef getSkewnessFromLines(img, lines):\n angles = []\n threshold_pixel = 12\n h_milestonpoints = []\n hlines = {}\n for line in lines:\n for x1,y1,x2,y2 in line:\n if abs(y2-y1) == 0 :\n angles.append(0)\n continue\n if abs(x2-x1) == 0 :\n angles.append(0)\n continue\n yd = y2-y1\n xd = x2-x1\n angle = math.atan(yd/xd)*180/math.pi\n if angle > 4 :\n continue\n if angle < -4 :\n continue\n angles.append(angle)\n if (abs(h_milestonpoints - y1) < threshold_pixel).sum() == 0:\n h_milestonpoints.append(y1)\n newlist = []\n newlist.append((x1,y1,x2,y2))\n hlines[y1] = newlist\n else:\n idx = [i for i,v in enumerate(abs(h_milestonpoints - y1) < threshold_pixel) if v > 0][0]\n targetlist = hlines[h_milestonpoints[idx]]\n targetlist.append((x1,y1,x2,y2))\n\n # print('angles:{}'.format(angles))\n\n ## Calculate median value from the longest hline\n anglesfromlline = []\n # print('horizontal lines for skewness detecting: {}'.format(hlines))\n for linepaths in hlines.values():\n linepaths.sort(key=lambda line:line[0])\n x1 = linepaths[0][0] # x1\n y1 = linepaths[0][1]\n linepaths.sort(key=lambda line:line[2])\n x2 = linepaths[-1][2]\n y2 = linepaths[-1][3]\n yd = y2-y1\n xd = x2-x1\n angle = math.atan(yd/xd)*180/math.pi\n anglesfromlline.append(angle)\n # print(anglesfromlline)\n\n ## Calculate via HoughLine\n sorted(h_milestonpoints)\n h_milestonpoints = np.sort(h_milestonpoints)\n heights = np.diff(h_milestonpoints)\n angleFromHoughLine = None\n if(len(heights)>0):\n average_span_height = np.median(heights)\n # print('avg height:{}'.format(average_span_height))\n threshold = 10\n std_line_index = int(np.argmin(abs(heights - average_span_height) < threshold, axis=0))\n std_line_ypoint = h_milestonpoints[std_line_index]\n largest_element = hlines[std_line_ypoint]\n \n # print('largest elements:{}'.format(largest_element))\n x_values = np.array([])\n x_values = np.append(x_values,sorted(set([item[0] for item in largest_element])))\n x_values = np.append(x_values,sorted(set([item[2] for item in largest_element])))\n y_values = np.array([])\n y_values = np.append(y_values,sorted(set([item[1] for item in largest_element])))\n y_values = np.append(y_values,sorted(set([item[3] for item in largest_element])))\n sorted(x_values)\n sorted(y_values)\n # print(x_values)\n # print(y_values)\n x1 = int(x_values[0])\n x2 = int(x_values[-1])\n y1 = int(y_values[0])\n y2 = int(y_values[-1])\n if x1 > x2 :\n x1,x2 = x2,x1\n if y1 > y2 :\n y1,y2 = y2,y1\n # print('largest elements ROI: {},{},{},{}'.format(x1,x2,y1,y2)) \n roi = img[y1:y2, x1:x2]\n debugShow('lineroi', roi)\n\n anglefromhline = []\n houghlines = cv2.HoughLines(roi,1,np.pi/180 / 10,int(abs(x2-x1)*9/10))\n if houghlines is not None :\n for oneline in houghlines:\n rho, theta = oneline[0]\n degree = math.degrees(theta)\n # print('rho, theta, skewness: {}, {}'.format(rho, degree, 90-degree))\n angleFromHoughLine = (90-degree) * -1\n anglefromhline.append(angleFromHoughLine)\n angleFromHoughLine = filteredAngle(anglefromhline)\n\n angleFromShortPaths = filteredAngle(angles)\n angleFromLongestPaths = filteredAngle(anglesfromlline)\n if angleFromHoughLine is None:\n angleFromHoughLine = 0.0\n\n # print('s.angle, l.angle, h.angle: {}, {}, {}'.format(angleFromShortPaths, angleFromLongestPaths, angleFromHoughLine))\n\n if abs(angleFromLongestPaths) > abs(angleFromShortPaths):\n if abs(angleFromHoughLine) < abs(angleFromLongestPaths):\n return angleFromHoughLine\n else:\n return angleFromLongestPaths\n else:\n return angleFromShortPaths\n\ndef get_median_angle(binary_image):\n # applying morphological transformations on the binarised image\n # to eliminate maximum noise and obtain text ares only\n # boxes = getLineDetection(binary_image)\n erode_otsu = cv2.erode(binary_image,np.ones((7,7),np.uint8),iterations=1)\n negated_erode = ~erode_otsu\n debugShow('erode_otsu', negated_erode)\n opening = cv2.morphologyEx(negated_erode,cv2.MORPH_OPEN,np.ones((5,5),np.uint8),iterations=2)\n debugShow('opening', opening)\n double_opening = cv2.morphologyEx(opening,cv2.MORPH_OPEN,np.ones((3,3),np.uint8),iterations=5)\n debugShow('double_opening', double_opening)\n double_opening_dilated_3x3 = cv2.dilate(double_opening,np.ones((3,3),np.uint8),iterations=4)\n debugShow('dilated_3x3', double_opening_dilated_3x3)\n # finding the contours in the morphologically transformed image\n contours_otsu,_ = cv2.findContours(double_opening_dilated_3x3,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n # debugShowContours('contours', double_opening_dilated_3x3, contours_otsu)\n # iniatialising the empty angles list to collet the angles of each contour\n angles = []\n\n # obtaining the angles of each contour using a for loop\n for cnt in range(len(contours_otsu)):\n # the last output of the cv2.minAreaRect() is the orientation of the contour\n rect = cv2.minAreaRect(contours_otsu[cnt])\n\n # appending the angle to the angles-list\n angles.append(rect[-1])\n \n # finding the median of the collected angles\n angles.sort()\n median_angle = np.median(angles)\n\n # returning the median angle\n return median_angle\n\n# funtion to correct the median-angle to give it to the cv2.warpaffine() function\ndef corrected_angle(angle):\n if 0 <= angle <= 90:\n corrected_angle = angle - 90\n elif -45 <= angle < 0:\n corrected_angle = angle - 90\n elif -90 <= angle < -45:\n corrected_angle = 90 + angle\n return corrected_angle\n\ndef rotate(img, angle):\n (h, w) = img.shape[:2]\n center = (w // 2, h // 2)\n # print('center and radian:{}, {}', center, math.radians(angle))\n M = cv2.getRotationMatrix2D(center, angle, 1.0)\n rotated = cv2.warpAffine(img, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)\n return rotated\n\n# https://github.com/TarunChakitha/OCR/blob/master/OCR.py\n# https://stackoverflow.com/questions/45322630/how-to-detect-lines-in-opencv\ndef getLines(img, low_threshold, min_line_length, line_gap, granulity):\n rho = 1 # distance resolution in pixels of the Hough grid\n theta = np.pi / 180 / granulity # angular resolution in radians of the Hough grid\n lines = cv2.HoughLinesP(img, rho, theta, low_threshold, np.array([]), min_line_length, line_gap)\n return lines\n\ndef filterHVLines(lines, standard_degree):\n hlines = []\n for line in lines:\n for x1, y1, x2, y2 in line:\n degree = math.degrees(math.atan2(y1-y2, x2-x1))\n degree = degree - standard_degree\n if abs(degree) < 5:\n hlines.append(line)\n return hlines\n\ndef getHLines(img, low_threshold, min_line_length, line_gap, granulity):\n lines = getLines(img, low_threshold, min_line_length, line_gap, granulity)\n return filterHVLines(lines, 0)\n\ndef getVLines(img, low_threshold, min_line_length, line_gap, granulity):\n lines = getLines(img, low_threshold, min_line_length, line_gap, granulity)\n return filterHVLines(lines, 90)\n\ndef getAverageAngles(standard_degree, lines):\n filtered = []\n filteredlines = []\n for line in lines:\n for x1,y1,x2,y2 in line:\n degree = math.degrees(math.atan2(y1-y2, x2-x1))\n degree = degree - standard_degree\n if abs(degree) < 5:\n filtered.append(degree)\n filteredlines.append(line)\n return filteredAngle(filtered), filteredlines\n\ndef drawLines(img, lines):\n imgcopy = np.copy(img) * 0\n for line in lines:\n for x1,y1,x2,y2 in line:\n cv2.line(imgcopy,(x1,y1),(x2,y2),(255,255,255),2)\n # debugShow('drawlines', imgcopy)\n return imgcopy\n\ndef deskew(img):\n debug = False\n low_threshold = 30\n line_length_unit = int(img.shape[1] / 10)\n angle = 0\n filteredlines = []\n for multiple in reversed(range(9)):\n min_line_length = multiple * line_length_unit\n line_gap = 10\n hlines = getHLines(img, low_threshold, min_line_length,line_gap, 5)\n # print('max line, # of lines:{},{}'.format(min_line_length, len(hlines)))\n if len(hlines) > 3:\n hlines = getHLines(img, low_threshold, min_line_length, line_gap, 10)\n angle, filteredlines = getAverageAngles(0, hlines)\n if len(filteredlines) > 3:\n break\n # print(angle)\n debugShow('lines', drawLines(img, filteredlines), debug)\n angle = angle * -1\n rotatedimg = rotate(img, angle)\n return rotatedimg, angle\n\ndef deskewFromVline(img):\n low_threshold = 30\n line_length_unit = int(img.shape[0] / 10)\n angle = 0\n filteredlines = []\n for multiple in reversed(range(9)):\n min_line_length = multiple * line_length_unit\n line_gap = 10\n vlines = getVLines(img, low_threshold, min_line_length,line_gap, 5)\n # print('max line, # of lines:{},{}'.format(min_line_length, len(vlines)))\n if len(vlines) > 3:\n vlines = getVLines(img, low_threshold, min_line_length, line_gap, 10)\n angle, filteredlines = getAverageAngles(90, vlines)\n if len(filteredlines) > 3:\n break\n # print(angle)\n debugShow('lines', drawLines(img, filteredlines))\n angle = angle * -1\n rotatedimg = rotate(img, angle)\n return rotatedimg\n\ndef rotatePoint(point, center, angrad:float):\n point = (point[0] - center[0], point[1] - center[1])\n x = math.cos(angrad) * point[0] - math.sin(angrad) * point[1]\n y = math.sin(angrad) * point[0] + math.cos(angrad) * point[1]\n point = (int(x + center[0]), int(y + center[1]))\n return point\n\ndef recoverOriginalPoint(orgsize, resized, skewnessRad: float, topleft, bottomright) -> List[tuple(int, int)]:\n resizedratio = orgsize[0] / resized[0]\n resizedx1 = topleft[0] * resizedratio\n resizedx2 = bottomright[0] * resizedratio\n resizedy1 = topleft[1] * resizedratio\n resizedy2 = bottomright[1] * resizedratio\n center = (orgsize[0] // 2, orgsize[1] // 2)\n point1 = (resizedx1, resizedy1)\n point2 = (resizedx1, resizedy2)\n point3 = (resizedx2, resizedy2)\n point4 = (resizedx2, resizedy1)\n reverseang = skewnessRad\n orgpoint1 = rotatePoint(point1, center, reverseang)\n orgpoint2 = rotatePoint(point2, center, reverseang)\n orgpoint3 = rotatePoint(point3, center, reverseang)\n orgpoint4 = rotatePoint(point4, center, reverseang)\n rtn = [orgpoint1, orgpoint2, orgpoint3, orgpoint4]\n return rtn\n\nif __name__ == '__main__':\n\n img = cv2.imread(IMG_FILE)\n\n # 0. Converting color to grey & binarization\n thresh_inv = convertColor(img)\n # 1. Resizing - Upsampling or Downsampling\n resized = resizeImage(thresh_inv)\n # debugShow('resizeImage', resized)\n # 2. deskew\n deskewed = deskew(resized)\n debugShow('deskewed', deskewed)\n deskewed = deskewFromVline(deskewed)\n debugShow('deskewed', deskewed)\n","repo_name":"kpyopark/pytesseract_tableform_text","sub_path":"deskew.py","file_name":"deskew.py","file_ext":"py","file_size_in_byte":13719,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"}
+{"seq_id":"8264500286","text":"closing = [']', '}', ')']\nbracket_dict = {'[': ']', '{': '}', '(': ')'}\nfrom Stack import stack\n\n\ndef check_balance(string):\n string = list(string)\n br_stack = stack.Stack()\n for bracket in string:\n if bracket in closing:\n if br_stack.size() == 0 or bracket_dict[br_stack.pop()] != bracket:\n return 'Несбалансированно'\n else:\n br_stack.push(bracket)\n if br_stack.size() != 0:\n return 'Несбалансированно'\n return 'Cбалансированно'\n\n\nprint(check_balance('[]{({})}'))\n","repo_name":"krushmuk/stack","sub_path":"stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"72837407029","text":"from .pexchange import ccxt, ccxt_async, httpx\nfrom model import MarketOrder\n\n\nclass Binance:\n def __init__(self, key, secret):\n self.future = ccxt.binance({\n 'apiKey': key,\n 'secret': secret,\n 'enableRateLimit': True,\n 'options': {\n 'defaultType': 'future'\n }\n })\n self.future_async = ccxt_async.binance({\n 'apiKey': key,\n 'secret': secret,\n 'options': {\n 'defaultType': 'future'\n }\n })\n self.spot = ccxt.binance({\n 'apiKey': key,\n 'secret': secret,\n })\n self.spot_async = ccxt_async.binance({\n 'apiKey': key,\n 'secret': secret,\n })\n self.spot.load_markets()\n self.future.load_markets()\n self.order_info: MarketOrder = None\n\n def parse_quote(self, quote: str):\n if self.order_info is None:\n return quote.replace(\"PERP\", \"\")\n else:\n if self.order_info.is_futures:\n return quote.replace(\"PERP\", \"\")\n else:\n return quote\n\n def parse_symbol(self, base: str, quote: str):\n quote = self.parse_quote(quote)\n if self.order_info is None:\n return f\"{base}{quote}\"\n else:\n if self.order_info.is_futures:\n return f\"{base}/{quote}\"\n else:\n return f\"{base}/{quote}\"\n\n def parse_side(self, side: str):\n if side.startswith(\"entry/\") or side.startswith(\"close/\"):\n return side.split(\"/\")[-1]\n else:\n return side\n\n def get_amount(self, base, quote, amount, percent) -> float:\n if amount is not None and percent is not None:\n raise Exception(\"amount와 percent는 동시에 사용할 수 없습니다\")\n elif amount is not None:\n result = amount\n elif percent is not None:\n if self.order_info.side in (\"buy\", \"entry/buy\", \"entry/sell\"):\n cash = self.get_balance(quote) * percent/100\n current_price = self.fetch_price(base, quote)\n result = cash / current_price\n elif self.order_info.side in (\"sell\", \"close/buy\", \"close/sell\"):\n symbol = self.parse_symbol(base, quote)\n free_amount = self.get_futures_position(symbol) if self.order_info.is_crypto and self.order_info.is_futures else self.get_balance(base)\n result = free_amount * float(percent)/100\n else:\n raise Exception(\"amount와 percent 중 하나는 입력해야 합니다\")\n return result\n\n def market_order(self, base: str, quote: str, type: str, side: str, amount: float, price: float = None):\n symbol = self.parse_symbol(base, quote)\n side = self.parse_side(side)\n return self.spot.create_order(symbol, type.lower(), side.lower(), amount)\n\n async def market_order_async(self, base: str, quote: str, type: str, side: str, amount: float, price: float = None):\n symbol = self.parse_symbol(base, quote)\n side = self.parse_side(side)\n return await self.spot_async.create_order(symbol, type.lower(), side.lower(), amount)\n\n def market_buy(self, base: str, quote: str, type: str, side: str, amount: float, price: float = None, buy_percent: float = None):\n buy_amount = self.get_amount(base, quote, amount, buy_percent)\n return self.market_order(base, quote, type, side, buy_amount)\n\n async def market_buy_async(self, base: str, quote: str, type: str, side: str, amount: float, price: float = None, buy_percent: float = None):\n buy_amount = self.get_amount(base, quote, amount, buy_percent)\n return await self.market_order_async(base, quote, type, side, buy_amount)\n\n def market_sell(self, base: str, quote: str, type: str, side: str, amount: float, price: str = None, sell_percent: float = None):\n sell_amount = self.get_amount(base, quote, amount, sell_percent)\n return self.market_order(base, quote, type, side, sell_amount)\n\n async def market_sell_async(self, base: str, quote: str, type: str, side: str, amount: float, price: str = None, sell_percent: float = None):\n sell_amount = self.get_amount(base, quote, amount, sell_percent)\n return await self.market_order_async(base, quote, type, side, sell_amount)\n\n def is_hedge_mode(self):\n response = self.future.fapiPrivate_get_positionside_dual()\n if response['dualSidePosition']:\n return True\n else:\n return False \n\n def market_entry(self, base: str, quote: str, type: str, side: str, amount: float, price: str = None, entry_percent: float = None, leverage: int = None):\n symbol = self.parse_symbol(base, quote)\n side = self.parse_side(side)\n quote = self.parse_quote(quote)\n entry_amount = self.get_amount(base, quote, amount, entry_percent)\n if leverage is not None:\n self.set_leverage(leverage, symbol)\n try:\n return self.future.create_order(symbol, type.lower(), side, abs(entry_amount))\n except Exception as e:\n if \"position side does not match\" in str(e):\n if side == \"buy\":\n positionSide = \"LONG\"\n elif side == \"sell\":\n positionSide = \"SHORT\"\n return self.future.create_order(symbol, type.lower(), side, abs(entry_amount), params={'positionSide': positionSide})\n else:\n raise Exception(\"진입 실패\")\n\n \n\n def market_long_entry(self, base: str, quote: str, amount: float, price:str =None, entry_percent: float = None, leverage: int = None):\n return self.market_entry(base, quote, \"market\", \"entry/buy\", amount, price, entry_percent, leverage)\n \n def market_short_entry(self, base: str, quote: str, amount: float, price:str =None, entry_percent: float = None, leverage: int = None):\n return self.market_entry(base, quote, \"market\", \"entry/sell\", amount, price, entry_percent, leverage)\n\n # def market_stop_order(self, base: str, quote: str, type: str, side: str, amount: float, price: float, stop_price: float):\n # symbol = f\"{base}/{quote}\"\n # return self.future.create_stop_market_order(symbol, type.lower(), side.lower(), amount, price, {\"stopPrice\": stop_price})\n\n def market_sltp_order(self, base: str, quote: str, type: str, side: str, amount: float, stop_price: float, profit_price: float):\n symbol = self.parse_symbol(base, quote)\n inverted_side = 'sell' if side.lower() == 'buy' else 'buy' # buy면 sell, sell이면 buy * 진입 포지션과 반대로 주문 넣어줘 야함\n self.future.create_order(symbol, \"STOP_MARKET\", inverted_side, amount, None, {\"stopPrice\": stop_price, \"newClientOrderId\": \"STOP_MARKET\"}) # STOP LOSS 오더\n self.future.create_order(symbol, \"TAKE_PROFIT_MARKET\", inverted_side, amount, None, {\"stopPrice\": profit_price, \"newClientOrderId\": \"TAKE_PROFIT_MARKET\"}) # TAKE profit 오더\n\n # response = self.future.private_post_order_oco({\n # 'symbol': self.future.market(symbol)['id'],\n # 'side': 'BUY', # SELL, BUY\n # 'quantity': self.future.amount_to_precision(symbol, amount),\n # 'price': self.future.price_to_precision(symbol, profit_price),\n # 'stopPrice': self.future.price_to_precision(symbol, stop_price),\n # # 'stopLimitPrice': self.future.price_to_precision(symbol, stop_limit_price), # If provided, stopLimitTimeInForce is required\n # # 'stopLimitTimeInForce': 'GTC', # GTC, FOK, IOC\n # # 'listClientOrderId': exchange.uuid(), # A unique Id for the entire orderList\n # # 'limitClientOrderId': exchange.uuid(), # A unique Id for the limit order\n # # 'limitIcebergQty': exchangea.amount_to_precision(symbol, limit_iceberg_quantity),\n # # 'stopClientOrderId': exchange.uuid() # A unique Id for the stop loss/stop loss limit leg\n # # 'stopIcebergQty': exchange.amount_to_precision(symbol, stop_iceberg_quantity),\n # # 'newOrderRespType': 'ACK', # ACK, RESULT, FULL\n # })\n\n async def market_entry_async(self, base: str, quote: str, type: str, side: str, amount: float, price: str = None, entry_percent: float = None, leverage: int = None):\n symbol = self.parse_symbol(base, quote)\n side = self.parse_side(side)\n quote = self.parse_quote(quote)\n entry_amount = self.get_amount(base, quote, amount, entry_percent)\n if leverage is not None:\n self.set_leverage(leverage, symbol)\n return await self.future_async.create_order(symbol, type.lower(), side, abs(entry_amount))\n\n def market_close(self, base: str, quote: str, type: str, side: str, amount: float = None, price: str = None, close_percent: str = None):\n symbol = self.parse_symbol(base, quote)\n side = self.parse_side(side)\n quote = self.parse_quote(quote)\n close_amount = self.get_amount(base, quote, amount, close_percent)\n try:\n return self.future.create_order(symbol, type.lower(), side, close_amount, params={\"reduceOnly\": True})\n except Exception as e:\n if \"position side does not match\" in str(e):\n if side == \"buy\":\n positionSide = \"SHORT\"\n elif side == \"sell\":\n positionSide = \"LONG\"\n return self.future.create_order(symbol, type.lower(), side, close_amount, params={'positionSide': positionSide})\n else:\n raise Exception(\"종료 실패\")\n\n \n def market_long_close(self, base: str, quote: str, amount: float = None, price: str = None, close_percent: str = None):\n return self.market_close(base, quote, \"market\", \"close/sell\", amount, price, close_percent)\n\n def market_short_close(self, base: str, quote: str, amount: float = None, price: str = None, close_percent: str = None):\n return self.market_close(base, quote, \"market\", \"close/buy\", amount, price, close_percent)\n\n async def market_close_async(self, base: str, quote: str, type: str, side: str, amount: float = None, price: str = None, close_percent: str = None):\n symbol = self.parse_symbol(base, quote)\n side = self.parse_side(side)\n quote = self.parse_quote(quote)\n close_amount = self.get_amount(base, quote, amount, close_percent)\n return await self.future_async.create_order(symbol, type.lower(), side, close_amount, params={\"reduceOnly\": True})\n\n def set_leverage(self, leverage, symbol):\n self.future.set_leverage(leverage, symbol)\n\n def fetch_ticker(self, base: str, quote: str):\n symbol = self.parse_symbol(base, quote)\n if self.order_info.is_futures:\n return self.future.fetch_ticker(symbol)\n else:\n return self.spot.fetch_ticker(symbol)\n\n def fetch_price(self, base: str, quote: str):\n return self.fetch_ticker(base, quote)[\"last\"]\n\n def get_balance(self, base: str):\n balance = self.future.fetch_free_balance().get(base) if self.order_info.is_crypto and self.order_info.is_futures else self.spot.fetch_free_balance().get(base)\n if balance is None or balance == 0:\n raise Exception(\"거래할 수량이 없습니다\")\n return balance\n\n def get_futures_position(self, symbol):\n position = self.future.fetch_positions_risk(symbols=[symbol])\n if position:\n balance = position[0].get(\"contracts\")\n if balance is None or balance == 0:\n raise Exception(\"거래할 수량이 없습니다\")\n return balance\n else:\n raise Exception(\"거래할 수량이 없습니다\")\n\n def get_listen_key(self):\n url = 'https://fapi.binance.com/fapi/v1/listenKey'\n\n listenkey = httpx.post(url, headers={'X-MBX-APIKEY': self.future.apiKey}).json()[\"listenKey\"]\n return listenkey\n","repo_name":"jangdokang/poabot","sub_path":"exchange/binance.py","file_name":"binance.py","file_ext":"py","file_size_in_byte":12099,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"}
+{"seq_id":"10312440330","text":"from socket import *\n\nserver = socket(AF_INET, SOCK_STREAM)\nserver.bind(('',9999))\nserver.listen(5)\nprint(\"waiting .... \")\n\n# 여기다가 accept하면 에러가 안남\nclient, addr = server.accept()\nprint(\"Connect from :\", addr)\n\nwhile True:\n msg = client.recv(1024)\n if not msg :\n break\n\n message = msg.decode()\n\n msg = message.split(\" \")\n fi = int(msg[0])\n cal = msg[1]\n se = int(msg[2])\n if cal == \"+\":\n client.send(str((lambda x, y : x+y)(fi,se)).encode())\n elif cal == \"-\":\n client.send(str((lambda x, y : x-y)(fi,se)).encode())\n elif cal == \"*\":\n client.send(str((lambda x, y : x*y)(fi,se)).encode())\n elif cal == \"/\":\n client.send(str('%.1f' %(lambda x,y : x/y)(fi,se)).encode())\n\nclient.close()","repo_name":"jjimini98/Network-Programming","sub_path":"Homework_Review/hw5/hw5_server.py","file_name":"hw5_server.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"26528457089","text":"#! /bin/env python\r\n#\r\n# Michael Gibson 27 April 2015\r\n\r\ndef data_to_result(header, data, data_present):\r\n \"\"\"Moves the header and data (if present) into a common object.\"\"\"\r\n\r\n result = {}\r\n result['notes'] = header['notes']\r\n result['frequency_parameters'] = header['frequency_parameters']\r\n\r\n if header['num_amplifier_channels'] > 0:\r\n result['amplifier_channels'] = header['amplifier_channels']\r\n if data_present:\r\n result['amplifier_data'] = data['amplifier_data']\r\n result['stim_data'] = data['stim_data']\r\n result['t_amplifier'] = data['t_amplifier']\r\n result['spike_triggers'] = header['spike_triggers']\r\n if header['dc_amplifier_data_saved']:\r\n result['dc_amplifier_data'] = data['dc_amplifier_data']\r\n\r\n if header['num_board_adc_channels'] > 0:\r\n result['board_adc_channels'] = header['board_adc_channels']\r\n if data_present:\r\n result['board_adc_data'] = data['board_adc_data']\r\n result['t_board_adc'] = data['t_board_adc']\r\n\r\n if header['num_board_dac_channels'] > 0:\r\n result['board_dac_channels'] = header['board_dac_channels']\r\n if data_present:\r\n result['board_adc_data'] = data['board_adc_data']\r\n result['t_board_dac'] = data['t_board_dac']\r\n\r\n if header['num_board_dig_in_channels'] > 0:\r\n result['board_dig_in_channels'] = header['board_dig_in_channels']\r\n if data_present:\r\n result['board_dig_in_data'] = data['board_dig_in_data']\r\n result['t_dig'] = data['t_dig']\r\n\r\n if header['num_board_dig_out_channels'] > 0:\r\n result['board_dig_out_channels'] = header['board_dig_out_channels']\r\n if data_present:\r\n result['board_dig_out_data'] = data['board_dig_out_data']\r\n result['t_dig'] = data['t_dig']\r\n\r\n return result\r\n","repo_name":"zekearneodo/swissknife","sub_path":"swissknife/bci/core/intan_rhs/util/data_to_result.py","file_name":"data_to_result.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"94"}
+{"seq_id":"71101936311","text":"#Jules Henry, Ahmir Ghorbanian\n#Spring 2020\n\n#Script to test functions of other scripts\n\n#NOT TO BE USED WITH ANY OTHER FILE FROM PROJECT\n\nimport filter_by_handle\nimport tweepy\n\nCONSUMER_KEY = 'LMxsDbA4lx7RqWhf2DqGeM1yx'\nCONSUMER_SECRET = 'azc96uPycF05zlIslDudv6YaWM40OIWhOd22VBBFVsUVjtdwdp'\nACCESS_KEY = '228978699-mFQ0w0U3rEvohSQnuADEOfgu3rqQSIIVEeMMQrbU'\nACCESS_SECRET = 'cikbqBaSgseWCHIJm3NRXx3WRDgO9zRLkEiSoQest0T7i'\n\n\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_KEY, ACCESS_SECRET)\napi = tweepy.API(auth, wait_on_rate_limit = True)\n\n\nnamelist = filter_by_handle.get_info_minute()\n\nprint(len(namelist).__str__() + \" accounts\")\n\nfor x in namelist:\n try:\n user = api.get_user(x)\n screen_name = user.screen_name\n verified = user.verified.__str__()\n protect = user.protected.__str__()\n num_tweets = user.statuses_count.__str__()\n bio = user.description.__str__()\n link = user.url.__str__()\n following = user.friends_count.__str__()\n followers = user.followers_count.__str__()\n id = user.id.__str__()\n favorites = user.favourites_count.__str__()\n print(\"screen name: @\" + screen_name)\n print(\"verified: \" + verified)\n #print(\"private: \" + protect)\n print(\"number of tweets: \" + num_tweets)\n #print(\"link in bio: \" + bio)\n print(\"following: \" + following)\n print(\"followers: \" + followers)\n print(\"favorites: \" + favorites)\n print(\" \")\n print(\"-----------\")\n print(\" \")\n\n except:\n print(\"user couldnt be fetched\")\n print(\" \")\n print(\"-----------\")\n print(\" \")\n\n\n\n\n\n","repo_name":"jululules/4823","sub_path":"newsfilter/cred_validity.py","file_name":"cred_validity.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"19777910109","text":"from rest_framework import serializers\n\nfrom ..messages import (\n INVALID_TIME_MESSAGE,\n no_class_found,\n timetable_clash_message,\n)\nfrom ..models import Classes, TimeTable\nfrom .classes_serializer import ListAllClassesSerializer\n\n\nclass TimeTableSerializer(serializers.ModelSerializer):\n _class_ = serializers.UUIDField()\n\n class Meta:\n model = TimeTable\n exclude = [\"_class\"]\n\n def validate(self, data):\n days = data.get(\"days\")\n start_time = data.get(\"start_time\")\n end_time = data.get(\"end_time\")\n room_no = data.get(\"room_no\")\n _class = data.get(\"_class_\")\n is_class_exists = Classes.objects.filter(id=_class).exists()\n\n if not is_class_exists:\n raise serializers.ValidationError(no_class_found(_class))\n\n if start_time > end_time:\n raise serializers.ValidationError(INVALID_TIME_MESSAGE)\n\n if TimeTable.objects.filter(\n start_time__lt=end_time,\n end_time__gt=start_time,\n room_no=room_no,\n days=days,\n ).exists():\n\n raise serializers.ValidationError(timetable_clash_message(room_no))\n\n timetable: TimeTable = TimeTable.objects.create(\n days=days,\n start_time=start_time,\n end_time=end_time,\n room_no=room_no,\n _class=Classes.objects.get(id=_class),\n )\n\n timetable.save()\n\n return data\n\n\nclass PureTimeTableSerializer(serializers.ModelSerializer):\n _class = ListAllClassesSerializer(read_only=True)\n\n class Meta:\n model = TimeTable\n exclude = [\"id\"]\n","repo_name":"AhzamAhmed6/online_school","sub_path":"src/classes/serializer/timetable_serializer.py","file_name":"timetable_serializer.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"42737140730","text":"import json\nimport requests\nfrom hatebase import HatebaseAPI\n\n\nkey = \"TuwmngrrxcytZkgqyfvtpdUb4yjsKsru\"\n\nhatebase = HatebaseAPI({\"key\": key})\nfilters = {'is_about_nationality': \"false\", 'is_about_ethnicity':\"false\",'is_about_religion':\"false\",'is_about_gender':\"false\",'is_about_sexual_orientation':\"false\",'is_about_disability':\"false\",'is_about_class':\"true\", 'language': 'ENG', 'country': 'US', 'year': \"2015\"}\nformat = \"json\"\njson_response = hatebase.getSightings(filters=filters, format=format)\n\nwith open('classOnly2015.txt', 'w+') as outfile:\n \toutfile.write(json.dumps(json_response, indent=4))\n","repo_name":"chingyuany/Twitter-hatespeech-detection","sub_path":"HateBaseAPICode/hateBaseAPI.py","file_name":"hateBaseAPI.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"}
+{"seq_id":"40859222268","text":"#You are given a string and your task is to swap cases.\n# In other words, convert all lowercase letters to uppercase letters and vice versa.\n#For Example:\n#Www.HackerRank.com → wWW.hACKERrANK.COM\n#Pythonist 2 → pYTHONIST 2\n\n\ns = input()\nnew_string = \"\"\nfor i in range(len(s)):\n if s[i].isupper():\n new_string += s[i].lower()\n else:\n new_string += s[i].upper()\nprint(new_string)","repo_name":"Rashid786-nadaf/100-days-of-code","sub_path":"swap case.py","file_name":"swap case.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"}
+{"seq_id":"72531862388","text":"# 10-8. Cats and Dogs: Make two files, cats.txt and dogs.txt. Store at\n# least three names of cats in the first file and three names of dogs\n# in the second file. Write a program that tries to read these files\n# and print the contents of the file to the screen. Wrap your code in\n# a try-except block to catch the FileNotFound error, and print a\n# friendly message if a file is missing. Move one of the files to a\n# different location on your system, and make sure the code in the\n# except block executes properly.\n\nprint(\"\\nEx 10.8 Cats and Dogs\\n\" + \"-\"*70)\n\ndef read_txt(filename):\n try:\n with open(filename, encoding='utf-8') as file_object:\n lines = file_object.readlines()\n except FileNotFoundError:\n print(f\"\\n{filename} does not exist.\")\n else:\n print(f\"\\n{filename}:\")\n for line in lines:\n print(f\"- {line.title().rstrip()}\")\n\nfilenames = ['cats.txt', 'dogs.txt', 'cat.txt', 'dog.txt']\nfor filename in filenames:\n read_txt(filename)","repo_name":"TrongPhamDA/Python-Crash-Course-2nd-edition","sub_path":"chapter_10/tryityourself108.py","file_name":"tryityourself108.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"94"}
+{"seq_id":"7555688028","text":"import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2 as cv\nimport math\n\nline_collection = []\n\n\n#take the original frame and convert it to greyscale\ndef apply_greyscale(image):\n return cv.cvtColor(image, cv.COLOR_RGB2GRAY)\n\n#apply a Gaussian Blur to the greyed image\ndef add_blur(grey):\n kernel_size = 5\n return cv.GaussianBlur(grey,(kernel_size, kernel_size), 0)\n\n#Implemented the canny algorithim on the Blurred photo\ndef apply_canny(grey):\n blur_grey = add_blur(grey)\n\n #declare the low and high thresholds. The Canny algorithim will identify edges where the gradient is near the midpoint of those\n # two values\n low_threshold = 60\n high_threshold = 100\n return cv.Canny(blur_grey, low_threshold, high_threshold)\n\n#add a mask to try to eliminate the amount of edges that are displayed in the final photo\ndef add_mask(image, edges):\n mask = np.zeros_like(edges)\n ignore_mask_color = 255\n\n imshape = image.shape\n\n\n vertices = np.array([[(0, imshape[0]),(450, 290), (490, 290), (imshape[1],imshape[0])]], dtype=np.int32)\n cv.fillPoly(mask, vertices, ignore_mask_color)\n return cv.bitwise_and(edges, mask)\n\n#check if a point is within the valid area (within the region of interest)\ndef valid_point(x, y, left, right, top, bottom):\n if(x >= left and x <= right):\n if(y >= bottom and y <= top):\n return True\n return False\n\n#compare the lines on the edge of the region of interest to the lane lines (or any edges found)\ndef compare_lines(x1, y1, x2, y2, start, end, is_left):\n compared_slope = ((end[1] - start[1])/(end[0]- start[0]))\n drawn_slope = ((y2-y1)/(x2-x1))\n\n #left side\n if(is_left):\n #compare to the left side of the region of interest\n if(valid_point(x1, y1, start[0], end[0], end[1], start[1]) and valid_point(x2, y2, start[0], end[0], end[1], start[1]) ):\n #return whether the slope of the edge is less than the slope of the left edge of the region of interest\n return (compared_slope > drawn_slope)\n elif(not is_left):\n #not left side so flip the slope\n compared_slope = -compared_slope\n drawn_slope = -drawn_slope\n if(valid_point(x1, y1, end[0], start[0], start[1], end[1]) and valid_point(x2, y2, end[0], start[0], start[1], end[1])):\n return (compared_slope > drawn_slope)\n\n return (compared_slope > drawn_slope)\n\n#draw the lines on the image\ndef drawLines(image):\n #create a greyscaled image\n grey = apply_greyscale(image)\n #apply the canny algorithim to the grey photo\n edges = apply_canny(grey)\n\n # add the mask to the image\n mask = add_mask(image, edges)\n\n #declare the parameters for the HoughLines function\n rho = 1\n theta = np.pi/180\n threshold = 1\n min_line_length = 10\n max_line_gap = 1\n line_image = np.copy(image)*0\n\n lines = cv.HoughLinesP(mask, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)\n\n #define the edges of the rgion of interest\n left_bottom = (135, 539)\n right_bottom = (940,539)\n apex = (489, 300)\n\n for line in lines:\n for x1,y1,x2,y2 in line:\n #to the left of the apex (compute the values accordingly)\n if(x1 < apex[0] and x2 <= apex[0]):\n if(compare_lines(x1, y1, x2, y2, left_bottom, apex, True)):\n #valid points draw the line\n line_collection.append([x1, y1, x2, y2, (y2-y1/x2-x1), math.sqrt((x2-x1)**2 + (y2-y1)**2)])\n cv.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)\n elif(x1 > apex[0] and x2 >= apex[0]):\n if(compare_lines(x1, y1, x2, y2, right_bottom, apex, False)):\n line_collection.append([x1, y1, x2, y2, (y2-y1/x2-x1), math.sqrt((x2-x1)**2 + (y2-y1)**2)])\n cv.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)\n\n color_edges = np.dstack((edges, edges, edges))\n\n for drawn in line_collection:\n print(drawn, \"\\n\")\n\n #return the completed images\n return cv.addWeighted(image, 0.8, line_image, 1, 0)","repo_name":"GiffinOsborne/OpenCV_Lane_Lines_V1","sub_path":"Finding_Lane_Lines_OOP_Version_1/houghLines.py","file_name":"houghLines.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"41231749559","text":"import binascii\nimport bisect\nimport hashlib\nimport hmac\nimport itertools\nimport os\nimport sys\nimport unicodedata\n#from pbkdf2 import PBKDF2\n\nPBKDF2_ROUNDS = 2048\n\n\nclass ConfigurationError(Exception):\n pass\n\n\n# From \ndef binary_search(a, x, lo=0, hi=None): # can't use a to specify default for hi\n hi = hi if hi is not None else len(a) # hi defaults to len(a)\n pos = bisect.bisect_left(a, x, lo, hi) # find insertion position\n return (pos if pos != hi and a[pos] == x else -1) # don't walk off the end\n\n\nclass Mnemonic(object):\n def __init__(self, language):\n self.radix = 2048\n with open('%s/%s.txt' % (self._get_directory(), language), 'r') as f:\n self.wordlist = [w.strip().decode('utf8') if sys.version < '3' else w.strip() for w in f.readlines()]\n if len(self.wordlist) != self.radix:\n raise ConfigurationError('Wordlist should contain %d words, but it contains %d words.' % (self.radix, len(self.wordlist)))\n\n @classmethod\n def _get_directory(cls):\n return os.path.join(os.path.dirname(__file__), 'wordlist')\n\n @classmethod\n def list_languages(cls):\n return [f.split('.')[0] for f in os.listdir(cls._get_directory()) if f.endswith('.txt')]\n\n @classmethod\n def normalize_string(cls, txt):\n if isinstance(txt, str if sys.version < '3' else bytes):\n utxt = txt.decode('utf8')\n elif isinstance(txt, unicode if sys.version < '3' else str): # noqa: F821\n utxt = txt\n else:\n raise TypeError(\"String value expected\")\n\n return unicodedata.normalize('NFKD', utxt)\n\n @classmethod\n def detect_language(cls, code):\n code = cls.normalize_string(code)\n first = code.split(' ')[0]\n languages = cls.list_languages()\n\n for lang in languages:\n mnemo = cls(lang)\n if first in mnemo.wordlist:\n return lang\n\n raise ConfigurationError(\"Language not detected\")\n\n def generate(self, strength=128):\n if strength not in [128, 160, 192, 224, 256]:\n raise ValueError('Strength should be one of the following [128, 160, 192, 224, 256], but it is not (%d).' % strength)\n return self.to_mnemonic(os.urandom(strength // 8))\n\n # Adapted from \n def to_entropy(self, words):\n if not isinstance(words, list):\n words = words.split(' ')\n if len(words) not in [12, 15, 18, 21, 24]:\n raise ValueError('Number of words must be one of the following: [12, 15, 18, 21, 24], but it is not (%d).' % len(words))\n # Look up all the words in the list and construct the\n # concatenation of the original entropy and the checksum.\n concatLenBits = len(words) * 11\n concatBits = [False] * concatLenBits\n wordindex = 0\n if self.detect_language(' '.join(words)) == 'english':\n use_binary_search = True\n else:\n use_binary_search = False\n for word in words:\n # Find the words index in the wordlist\n ndx = binary_search(self.wordlist, word) if use_binary_search else self.wordlist.index(word)\n if ndx < 0:\n raise LookupError('Unable to find \"%s\" in word list.' % word)\n # Set the next 11 bits to the value of the index.\n for ii in range(11):\n concatBits[(wordindex * 11) + ii] = (ndx & (1 << (10 - ii))) != 0\n wordindex += 1\n checksumLengthBits = concatLenBits // 33\n entropyLengthBits = concatLenBits - checksumLengthBits\n # Extract original entropy as bytes.\n entropy = bytearray(entropyLengthBits // 8)\n for ii in range(len(entropy)):\n for jj in range(8):\n if concatBits[(ii * 8) + jj]:\n entropy[ii] |= 1 << (7 - jj)\n # Take the digest of the entropy.\n hashBytes = hashlib.sha256(entropy).digest()\n if sys.version < '3':\n hashBits = list(itertools.chain.from_iterable(([ord(c) & (1 << (7 - i)) != 0 for i in range(8)] for c in hashBytes)))\n else:\n hashBits = list(itertools.chain.from_iterable(([c & (1 << (7 - i)) != 0 for i in range(8)] for c in hashBytes)))\n # Check all the checksum bits.\n for i in range(checksumLengthBits):\n if concatBits[entropyLengthBits + i] != hashBits[i]:\n raise ValueError('Failed checksum.')\n return entropy\n\n def to_mnemonic(self, data):\n if len(data) not in [16, 20, 24, 28, 32]:\n raise ValueError('Data length should be one of the following: [16, 20, 24, 28, 32], but it is not (%d).' % len(data))\n h = hashlib.sha256(data).hexdigest()\n b = bin(int(binascii.hexlify(data), 16))[2:].zfill(len(data) * 8) + \\\n bin(int(h, 16))[2:].zfill(256)[:len(data) * 8 // 32]\n result = []\n for i in range(len(b) // 11):\n idx = int(b[i * 11:(i + 1) * 11], 2)\n result.append(self.wordlist[idx])\n if self.detect_language(' '.join(result)) == 'japanese': # Japanese must be joined by ideographic space.\n result_phrase = u'\\u3000'.join(result)\n else:\n result_phrase = ' '.join(result)\n return result_phrase\n\n def check(self, mnemonic):\n mnemonic = self.normalize_string(mnemonic).split(' ')\n # list of valid mnemonic lengths\n if len(mnemonic) not in [12, 15, 18, 21, 24]:\n return False\n try:\n idx = map(lambda x: bin(self.wordlist.index(x))[2:].zfill(11), mnemonic)\n b = ''.join(idx)\n except ValueError:\n return False\n l = len(b) # noqa: E741\n d = b[:l // 33 * 32]\n h = b[-l // 33:]\n nd = binascii.unhexlify(hex(int(d, 2))[2:].rstrip('L').zfill(l // 33 * 8))\n nh = bin(int(hashlib.sha256(nd).hexdigest(), 16))[2:].zfill(256)[:l // 33]\n return h == nh\n\n def expand_word(self, prefix):\n if prefix in self.wordlist:\n return prefix\n else:\n matches = [word for word in self.wordlist if word.startswith(prefix)]\n if len(matches) == 1: # matched exactly one word in the wordlist\n return matches[0]\n else:\n # exact match not found.\n # this is not a validation routine, just return the input\n return prefix\n\n def expand(self, mnemonic):\n return ' '.join(map(self.expand_word, mnemonic.split(' ')))\n\n #@classmethod\n #def to_seed(cls, mnemonic, passphrase=''):\n #mnemonic = cls.normalize_string(mnemonic)\n #passphrase = cls.normalize_string(passphrase)\n #return PBKDF2(mnemonic, u'mnemonic' + passphrase, iterations=PBKDF2_ROUNDS, macmodule=hmac, digestmodule=hashlib.sha512).read(64)\n\n\ndef main():\n import binascii\n import sys\n if len(sys.argv) > 1:\n data = sys.argv[1]\n else:\n data = sys.stdin.readline().strip()\n data = binascii.unhexlify(data)\n m = Mnemonic('english')\n print(m.to_mnemonic(data))\n\n\nif __name__ == '__main__':\n main()\n \n","repo_name":"snopf/snopf","sub_path":"src/host/pc/bip39_mnemonic_reference_trezor.py","file_name":"bip39_mnemonic_reference_trezor.py","file_ext":"py","file_size_in_byte":7189,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"94"}
+{"seq_id":"70746831671","text":"import os\r\n\r\nfrom flask import Flask, make_response, jsonify, request\r\nfrom flask import g\r\nfrom dal.db import Db\r\nfrom transaction.process_transaction import Process_Transaction\r\nfrom exception.app_exception import AppException, ClientException, ServerException\r\n\r\nimport config\r\n\r\nprocess_transaction = Process_Transaction()\r\napp = Flask(__name__)\r\napp.config.from_object(config.Config)\r\n\r\n\r\n@app.errorhandler(AppException)\r\ndef app_error(err):\r\n app.logger.exception(err)\r\n return make_response(jsonify(err.error), err.http_code)\r\n\r\n@app.errorhandler(Exception)\r\ndef handle_generic_error(err):\r\n app.logger.exception(err)\r\n return make_response(jsonify(str(err)), 500)\r\n\r\n@app.route('/process_payment', methods=['POST'])\r\ndef process_payment():\r\n data = request.get_json()\r\n result, code = process_transaction.initialize_payment(data)\r\n return make_response(jsonify(result),code)\r\n\r\ndef init_app(flask_app):\r\n flask_app.config.from_object(config.DEVConfig)\r\n db_instance = Db(flask_app)\r\n print('DB Connection: ' + str(db_instance))\r\n\r\n\r\nif __name__ == '__main__':\r\n init_app(app)\r\n app.run(host='127.0.0.1', port='5000')\r\n","repo_name":"harshilpatel99/Filed_PythonCodingTest","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"2794638063","text":"from typing import List\nfrom collections import Counter\n\n\nclass Solution:\n def countNegatives(self, grid: List[List[int]]) -> int:\n count = 0\n for i in grid:\n c = Counter(i)\n for j, k in c.items():\n if j < 0:\n count += k\n\n return count\n","repo_name":"rich-03/LeetPractice","sub_path":"Problem1351/Problem1351.py","file_name":"Problem1351.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"5556297055","text":"import time\r\nimport os\r\nimport json\r\nimport gi\r\ngi.require_version('Notify', '0.7')\r\nfrom gi.repository import Notify, GObject, Peas, RB\r\nfrom pypresence import Presence\r\nfrom status_prefs import discord_status_prefs\r\n\r\nDEFAULT_APPID = \"589905203533185064\"\r\n\r\nclass DiscordStatus(GObject.Object, Peas.Activatable):\r\n object = GObject.property(type=GObject.Object)\r\n\r\n def __init__(self):\r\n super(DiscordStatus, self).__init__()\r\n\r\n print(f\"discord_status: GOBJECT SELF OBJECT: {self.object}\")\r\n\r\n settings_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"settings.json\")\r\n \r\n with open(settings_path) as settings_file:\r\n self.settings = json.load(settings_file)\r\n \r\n self.notify_available = False\r\n self.connected = False\r\n self.streaming = False\r\n self.stream_flag = False\r\n self.playing = False\r\n self.song_started_at = 0\r\n self.playing_date = 0\r\n self.elapsed_time = 0\r\n\r\n def send_notification(self, message):\r\n if self.notify_available and self.settings[\"show_notifs\"]:\r\n Notify.Notification.new(\"Rhythmbox Discord Status Plugin\", message).show()\r\n\r\n \r\n def do_activate(self):\r\n self.notify_available = Notify.init(\"rhythmbox_discord_status\")\r\n\r\n try:\r\n self.rpc = Presence(self.settings[\"appid\"] if \"appid\" in self.settings else DEFAULT_APPID)\r\n self.rpc.connect()\r\n self.connected = True\r\n self.send_notification(\"Connected to Discord\")\r\n except ConnectionRefusedError as err:\r\n print(\"discord_status: failed to connect to discord:\", err)\r\n self.send_notification(f\"Failed to connect to discord: {err}\\nRe-enable the plugin to retry\")\r\n return\r\n\r\n sp = self.object.props.shell_player\r\n self.playing_song_changed_id = sp.connect('playing-song-changed', self.on_playing_song_changed)\r\n self.playing_state_changed_id = sp.connect('playing-changed', self.on_playing_state_changed)\r\n self.elapsed_changed_id = sp.connect('elapsed-changed', self.on_elapsed_changed)\r\n self.playing_changed_id = sp.connect('playing-song-property-changed', self.on_playing_song_property_changed)\r\n \r\n self.rpc.update(state=\"Playback Stopped\", details=\"Rhythmbox Status Plugin\", large_image=\"rhythmbox\", small_image=\"stop\", small_text=\"Stopped\")\r\n\r\n def do_deactivate(self):\r\n sp = self.object.props.shell_player\r\n sp.disconnect(self.playing_song_changed_id)\r\n sp.disconnect(self.playing_state_changed_id)\r\n sp.disconnect(self.elapsed_changed_id)\r\n sp.disconnect(self.playing_changed_id)\r\n\r\n if self.connected:\r\n self.rpc.close()\r\n\r\n if self.notify_available:\r\n Notify.uninit()\r\n\r\n def get_current_song_info(self, sp):\r\n playing_entry = sp.get_playing_entry()\r\n if not playing_entry:\r\n return {\r\n \"album\": \"Unknown\",\r\n \"title\": \"Unknown\",\r\n \"artist\": \"Unknown\",\r\n \"duration\": 0\r\n }\r\n\r\n album = playing_entry.get_string(RB.RhythmDBPropType.ALBUM)\r\n title = playing_entry.get_string(RB.RhythmDBPropType.TITLE)\r\n artist = playing_entry.get_string(RB.RhythmDBPropType.ARTIST)\r\n duration = playing_entry.get_ulong(RB.RhythmDBPropType.DURATION)\r\n\r\n # If there is anything with less than 2 characters, Discord won't show our presence\r\n # So, lets add a cool empty unicode character to the end\r\n if album and len(album) < 2:\r\n album = f\"{album}\"\r\n if title and len(title) < 2:\r\n title = f\"{title}\"\r\n if artist and len(artist) < 2:\r\n artist = f\"{artist}\"\r\n\r\n print(f\"discord_status: album={album} artist={artist} title={title} len_al={len(album)} len_art={len(artist)} len_title={len(title)}\")\r\n return {\r\n \"album\": album or \"Unknown\",\r\n \"title\": title or \"Unknown\",\r\n \"artist\": artist or \"Unknown\",\r\n \"duration\": duration or 0\r\n }\r\n\r\n def update_rpc(self, sp, playing):\r\n if not playing and not sp.get_playing_entry():\r\n self.playing = False\r\n\r\n self.rpc.update(\r\n state=\"Playback Stopped\",\r\n details=\"Rhythmbox Status Plugin\",\r\n large_image=\"rhythmbox\",\r\n small_image=\"stop\",\r\n small_text=\"Stopped\"\r\n )\r\n else:\r\n song_info = self.get_current_song_info(sp)\r\n\r\n if self.streaming or self.stream_flag:\r\n self.rpc.update(\r\n state=song_info[\"title\"][0:127],\r\n details=\"Stream\",\r\n large_image=\"rhythmbox\",\r\n small_image=\"play\",\r\n small_text=\"Streaming\",\r\n start=int(time.time())\r\n )\r\n \r\n return\r\n\r\n self.playing = playing\r\n title = song_info[\"title\"]\r\n artist = song_info[\"artist\"]\r\n details = f\"{title} - {artist}\"\r\n pos = sp.get_playing_time().time\r\n start_time = int(time.time()) if self.settings[\"time_style\"] == 1 else int(time.time()) - pos\r\n end_time = (start_time + song_info[\"duration\"] - pos) if self.settings[\"time_style\"] == 1 else None\r\n\r\n self.rpc.update(\r\n state=song_info[\"album\"][0:127],\r\n details=details[0:127],\r\n large_image=\"rhythmbox\",\r\n small_image=\"play\" if playing else \"pause\",\r\n small_text=\"Playing\" if playing else \"Paused\",\r\n start=start_time if playing else None,\r\n end=end_time if playing else None\r\n )\r\n\r\n def on_playing_song_changed(self, sp, entry):\r\n print(f\"discord_status: playing song changed sp={sp} entry={entry}\")\r\n\r\n if not sp.get_playing_entry():\r\n return\r\n\r\n self.song_started_at = int(time.time())\r\n self.playing_date = self.song_started_at\r\n self.elapsed_time = 0\r\n current_song_info = self.get_current_song_info(sp)\r\n\r\n self.streaming = current_song_info[\"duration\"] == 0 and self.streaming\r\n \r\n self.update_rpc(sp, True)\r\n\r\n\r\n def on_playing_state_changed(self, sp, playing):\r\n print(f\"discord_status: playing state changed sp={sp} playing={playing}\")\r\n self.update_rpc(sp, playing)\r\n\r\n def on_elapsed_changed(self, sp, elapsed):\r\n print(f\"discord_status: elapsed changed sp={sp} elapsed={elapsed}\")\r\n\r\n if self.playing:\r\n self.playing_date += 1\r\n\r\n if self.playing_date - elapsed != self.song_started_at and elapsed != 0:\r\n self.playing_date = self.song_started_at + elapsed\r\n print(\"discord_status: elapsed changed too much\")\r\n self.update_rpc(sp, True)\r\n\r\n\r\n def on_playing_song_property_changed(self, sp, uri, property, old, newvalue):\r\n print(f\"discord_status: playing song property changed sp={sp} uri={uri} property={property} old={old} newvalue={newvalue}\")\r\n if property == \"rb:stream-song-title\":\r\n self.streaming = True\r\n self.update_rpc(sp, True)\r\n","repo_name":"ToppleKek/discord-rhythmbox-plugin","sub_path":"discord-status.py","file_name":"discord-status.py","file_ext":"py","file_size_in_byte":7364,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"94"}
+{"seq_id":"8249966222","text":"# -*- coding: utf-8 -*- \n# Software: PyCharm\n# Author: df\n# CreateTime: 2022-08-05 14:29\n# file: pachong.py\nimport re # 正则表达式,进行文字匹配\nimport urllib.request, urllib.error # 制定url,获取网页数据 ,\nfrom urllib import parse # 用来解析web需要的字符串\nimport json\nimport pymysql # mysql操作\n\n\ndef main():\n serach = \"java开发\"\n # 处理中文字符搜索问题\n # keysword=parse.quote(serach)\n # 再进行转义才能达到链接里的效果:java%25E5%25BC%2580%25E5%258F%2591\n # 二次编码\n # newkeyword=parse.quote(keysword)\n dataList = getData()\n saveDB(dataList)\n\n\ndef askurl(url):\n # 模拟浏览器头部信息,像对应的url发送信息\n # 有时候403就放cookie就好使了\n head = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.5060.134 Safari/537.36 Edg/103.0.1264.77\",\n }\n request = urllib.request.Request(url=url, headers=head)\n html = \"\"\n try:\n reponse = urllib.request.urlopen(request);\n # 这里51job界面是gbk的模式,如果这里用utf-8则报错,为: 'utf-8' codec can't decode byte 0xa1 in position 293: invalid start byte\n html = reponse.read().decode(\"gbk\")\n # print(html)\n except urllib.error.URLError as e:\n if hasattr(e, \"code\"):\n print(e.code)\n if hasattr(e, \"reason\"):\n print(e.reason)\n return html;\n\n\n# 获取并解析数据\ndef getData():\n # htmlx=open(\"51job.html\",\"r\")\n # bs=BeautifulSoup(htmlx,\"html.parser\")\n # ss=bs.select(\"div\")\n # print(bs)\n serach = \"java开发\"\n keysword = parse.quote(parse.quote(serach))\n page = 0\n\n totalDataList = []\n\n # 循环分页处理,当查询不到数据就跳出循环\n while True:\n page = page + 1;\n url = \"https://search.51job.com/list/010000,000000,0000,00,9,99,\" + keysword + \",2,\" + str(page) + \".html\"\n html = askurl(url)\n print(\"baseUrl\", url)\n\n # 得到脚本数据里需要的数据,得到的数据就是一整个列表,取出下标0则可以进行遍历\n datas = re.findall('window.__SEARCH_RESULT__ =(.*?)', str(html))[0]\n # 转换json可以根据键值对获取数据\n json_data = json.loads(datas)\n engines = json_data['engine_jds']\n\n # 跳出死循环\n if len(engines) == 0:\n break\n\n for engine in engines:\n dataGroup = []\n # 招聘职位\n if engine.get(\"job_name\") == \"\":\n dataGroup.append(\" \")\n else:\n dataGroup.append(engine.get(\"job_name\"))\n # 公司名称\n if engine.get(\"company_name\") == \"\":\n dataGroup.append(\" \")\n else:\n dataGroup.append(engine.get(\"company_name\"))\n # 薪资范围\n if engine.get(\"providesalary_text\") == \"\":\n dataGroup.append(\" \")\n else:\n dataGroup.append(engine.get(\"providesalary_text\"))\n # 地点\n if engine.get(\"workarea_text\") == \"\":\n dataGroup.append(\" \")\n else:\n dataGroup.append(engine.get(\"workarea_text\"))\n # 公司类型\n if engine.get(\"companytype_text\") == \"\":\n dataGroup.append(\" \")\n else:\n dataGroup.append(engine.get(\"companytype_text\"))\n # 学历要求\n if engine.get(\"degreefrom\") == \"\":\n dataGroup.append(\"0\")\n else:\n dataGroup.append(engine.get(\"degreefrom\"))\n # 工作年限\n if engine.get(\"workyear\") == \"\":\n dataGroup.append(\"0\")\n else:\n dataGroup.append(engine.get(\"workyear\"))\n # 公司福利\n if engine.get(\"jobwelf\") == \"\":\n dataGroup.append(\" \")\n else:\n dataGroup.append(engine.get(\"jobwelf\"))\n\n # 公司规模\n if engine.get(\"companysize_text\") == \"\":\n dataGroup.append(\" \")\n else:\n dataGroup.append(engine.get(\"companysize_text\"))\n\n # 公司经营方向\n if engine.get(\"companyind_text\") == \"\":\n dataGroup.append(\" \")\n else:\n dataGroup.append(engine.get(\"companyind_text\"))\n\n # 发布时间\n if engine.get(\"updatedate\") == \"\":\n dataGroup.append(\" \")\n else:\n dataGroup.append(engine.get(\"updatedate\"))\n\n\n totalDataList.append(dataGroup)\n return totalDataList\n\n\ndef saveDB(dataList):\n conn = pymysql.connect(host=\"localhost\", user=\"root\", password=\"root\", port=3306, db='spider', charset=\"utf8\")\n cursor = conn.cursor()\n\n try:\n for data in dataList:\n for index in range(len(data)):\n if index == 6 or index == 5:\n continue\n data[index] = '\"' + str(data[index]) + '\"'\n\n sql = '''insert into 51job (job_name,company_name,providesalary_text,workarea_text,companytype_text,degreefrom,workyear,jobwelf,companysize_text,companyind_text,updatedate)\n values(%s)'''% \",\".join(data)\n print(sql)\n cursor.execute(sql)\n print(\"保存成功\")\n except Exception as result:\n print(result)\n conn.rollback()\n finally:\n conn.commit()\n cursor.close()\n conn.close()\n\n\n\ndef test():\n ss={\"name\":\"\"}\n print(ss.get(\"name\"))\n if ss.get(\"name\")==\"\":\n print(\"pp\")\n\n\nif __name__ == \"__main__\":\n #test()\n main()\n # parserData(\"\")\n","repo_name":"dufGIT/python-progect","sub_path":"51job/pachong.py","file_name":"pachong.py","file_ext":"py","file_size_in_byte":5732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"23657629773","text":"from collections import defaultdict\n\n#dfs\nclass Solution:\n def validPath(self, n: int, edges: List[List[int]], start: int, end: int) -> bool:\n s = []\n d = defaultdict(list)\n for i in edges:\n d[i[0]].append(i[1])\n d[i[1]].append(i[0])\n\n seen = set()\n s.append(start)\n\n while s:\n n = s.pop()\n if n == end:\n return True\n\n if n not in seen:\n seen.add(n)\n for i in d[n]:\n s.append(i)\n\n return False\n\n\n\n","repo_name":"salonikalsekar/LC","sub_path":"graph_theory/find_if_path_exists.py","file_name":"find_if_path_exists.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"24504285834","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\n#Import Modules\nimport os\nimport csv\n\n\n# In[16]:\n\n\n#Build the path to the csv file\ncsv_path = os.path.join(\"Resources\",\"budget_data.csv\")\n\n#Open a file handler\nwith open(csv_path,\"r\",newline=\"\") as csv_file:\n \n #connect the csv file with a file reader\n csv_reader = csv.reader(csv_file, delimiter=\",\")\n \n #remove the header\n header = next(csv_reader)\n \n #define counters\n total_months = 0\n total_profit = 0\n first = 0\n first_iteration = True\n \n delta_list = []\n date_list = []\n \n for row in csv_reader:\n \n #Calculate the total months\n total_months += 1\n \n #Calculate total profit\n profit = int(row[1])\n total_profit += profit\n \n #Calculate the change\n second = int(row[1])\n delta = second - first\n \n #Skip the first calculation for delta as it is not a true delta\n if first_iteration == False:\n delta_list.append(delta)\n date_list.append(row[0])\n \n \n \n first_iteration = False\n first = second\n \n #Final Calculations\n max_change = max(delta_list)\n min_change = min(delta_list)\n average_change = round(sum(delta_list)/len(delta_list), 2)\n \n #Identify dates for max and min change\n index = 0\n for index in range(len(delta_list)):\n \n if int(delta_list[index]) == max_change:\n max_index = index\n \n \n if int(delta_list[index]) == min_change:\n min_index = index\n \n \n index += 1\n \nmax_date = date_list[max_index]\nmin_date = date_list[min_index]\n \n#Print report on terminal\nprint(\"Financial Analysis\")\nprint(\"---------------------------\")\nprint(f\"Total Months: {total_months}\")\nprint(f\"Total: ${total_profit}\")\nprint(f\"Average Change: ${average_change}\")\nprint(f\"Greatest Increase in Profits: {max_date} (${max_change})\")\nprint(f\"Greatest Decrease in Profits: {min_date} (${min_change})\")\n \n \noutput_file_path = os.path.join(\"Resources\", \"output.txt\")\n\nwith open(output_file_path,\"w\",newline = \"\") as output_file:\n \n output_file.write(\"Financial Analysis\\n\")\n output_file.write(\"-------------------------------\\n\")\n output_file.write(\"Total Months: \" + str(total_months) + \"\\n\")\n output_file.write(\"Total: $\" +str(total_profit) + \"\\n\")\n output_file.write(\"Average Change: $\" + str(average_change) + \"\\n\")\n output_file.write(\"Greatest Increase in Profits: \" + max_date + \" ($\" + str(max_change) + \")\\n\")\n output_file.write(\"Greatest Decrease in Profits: \" + min_date + \" ($\" + str(min_change) + \")\\n\")\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"ahmar-jamal/python-challenge","sub_path":"PyBank/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"}
+{"seq_id":"43601376005","text":"from __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nfrom collections.abc import Callable\nfrom typing import Generic, Self, TypeVar\n\nfrom ._exceptions import Error\n\n_T = TypeVar(\"_T\")\n_U = TypeVar(\"_U\")\n\n\nclass Receiver(ABC, Generic[_T]):\n \"\"\"A channel Receiver.\"\"\"\n\n async def __anext__(self) -> _T:\n \"\"\"Await the next value in the async iteration over received values.\n\n Returns:\n The next value received.\n\n Raises:\n StopAsyncIteration: if the receiver stopped producing messages.\n ReceiverError: if there is some problem with the receiver.\n \"\"\"\n try:\n await self.ready()\n return self.consume()\n except ReceiverStoppedError as exc:\n raise StopAsyncIteration() from exc\n\n @abstractmethod\n async def ready(self) -> bool:\n \"\"\"Wait until the receiver is ready with a value or an error.\n\n Once a call to `ready()` has finished, the value should be read with\n a call to `consume()` (`receive()` or iterated over). The receiver will\n remain ready (this method will return immediately) until it is\n consumed.\n\n Returns:\n Whether the receiver is still active.\n \"\"\"\n\n @abstractmethod\n def consume(self) -> _T:\n \"\"\"Return the latest value once `ready()` is complete.\n\n `ready()` must be called before each call to `consume()`.\n\n Returns:\n The next value received.\n\n Raises:\n ReceiverStoppedError: if the receiver stopped producing messages.\n ReceiverError: if there is some problem with the receiver.\n \"\"\"\n\n def __aiter__(self) -> Self:\n \"\"\"Initialize the async iterator over received values.\n\n Returns:\n `self`, since no extra setup is needed for the iterator.\n \"\"\"\n return self\n\n async def receive(self) -> _T:\n \"\"\"Receive a message from the channel.\n\n Returns:\n The received message.\n\n Raises:\n ReceiverStoppedError: if there is some problem with the receiver.\n ReceiverError: if there is some problem with the receiver.\n \"\"\"\n try:\n received = await self.__anext__() # pylint: disable=unnecessary-dunder-call\n except StopAsyncIteration as exc:\n # If we already had a cause and it was the receiver was stopped,\n # then reuse that error, as StopAsyncIteration is just an artifact\n # introduced by __anext__.\n if (\n isinstance(exc.__cause__, ReceiverStoppedError)\n # pylint is not smart enough to figure out we checked above\n # this is a ReceiverStoppedError and thus it does have\n # a receiver member\n and exc.__cause__.receiver is self # pylint: disable=no-member\n ):\n raise exc.__cause__\n raise ReceiverStoppedError(self) from exc\n return received\n\n def map(self, call: Callable[[_T], _U]) -> Receiver[_U]:\n \"\"\"Return a receiver with `call` applied on incoming messages.\n\n Args:\n call: function to apply on incoming messages.\n\n Returns:\n A `Receiver` to read results of the given function from.\n \"\"\"\n return _Map(self, call)\n\n\nclass ReceiverError(Error, Generic[_T]):\n \"\"\"An error produced in a [Receiver][frequenz.channels.Receiver].\n\n All exceptions generated by receivers inherit from this exception.\n \"\"\"\n\n def __init__(self, message: str, receiver: Receiver[_T]):\n \"\"\"Create an instance.\n\n Args:\n message: An error message.\n receiver: The [Receiver][frequenz.channels.Receiver] where the\n error happened.\n \"\"\"\n super().__init__(message)\n self.receiver: Receiver[_T] = receiver\n \"\"\"The receiver where the error happened.\"\"\"\n\n\nclass ReceiverStoppedError(ReceiverError[_T]):\n \"\"\"The [Receiver][frequenz.channels.Receiver] stopped producing messages.\"\"\"\n\n def __init__(self, receiver: Receiver[_T]):\n \"\"\"Create an instance.\n\n Args:\n receiver: The [Receiver][frequenz.channels.Receiver] where the\n error happened.\n \"\"\"\n super().__init__(f\"Receiver {receiver} was stopped\", receiver)\n\n\nclass _Map(Receiver[_U], Generic[_T, _U]):\n \"\"\"Apply a transform function on a channel receiver.\n\n Has two generic types:\n\n - The input type: value type in the input receiver.\n - The output type: return type of the transform method.\n \"\"\"\n\n def __init__(self, receiver: Receiver[_T], transform: Callable[[_T], _U]) -> None:\n \"\"\"Create a `Transform` instance.\n\n Args:\n receiver: The input receiver.\n transform: The function to run on the input data.\n \"\"\"\n self._receiver: Receiver[_T] = receiver\n \"\"\"The input receiver.\"\"\"\n\n self._transform: Callable[[_T], _U] = transform\n \"\"\"The function to run on the input data.\"\"\"\n\n async def ready(self) -> bool:\n \"\"\"Wait until the receiver is ready with a value or an error.\n\n Once a call to `ready()` has finished, the value should be read with\n a call to `consume()` (`receive()` or iterated over). The receiver will\n remain ready (this method will return immediately) until it is\n consumed.\n\n Returns:\n Whether the receiver is still active.\n \"\"\"\n return await self._receiver.ready() # pylint: disable=protected-access\n\n # We need a noqa here because the docs have a Raises section but the code doesn't\n # explicitly raise anything.\n def consume(self) -> _U: # noqa: DOC502\n \"\"\"Return a transformed value once `ready()` is complete.\n\n Returns:\n The next value that was received.\n\n Raises:\n ChannelClosedError: if the underlying channel is closed.\n \"\"\"\n return self._transform(\n self._receiver.consume()\n ) # pylint: disable=protected-access\n\n def __str__(self) -> str:\n \"\"\"Return a string representation of the timer.\"\"\"\n return f\"{type(self).__name__}:{self._receiver}:{self._transform}\"\n\n def __repr__(self) -> str:\n \"\"\"Return a string representation of the timer.\"\"\"\n return f\"{type(self).__name__}({self._receiver!r}, {self._transform!r})\"\n","repo_name":"frequenz-floss/frequenz-channels-python","sub_path":"src/frequenz/channels/_receiver.py","file_name":"_receiver.py","file_ext":"py","file_size_in_byte":6423,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"96"}
+{"seq_id":"31010107394","text":"# -*- coding: utf-8 -*-\n# czatpro/czat/urls.py\n\nfrom django.conf.urls import url\nfrom czat import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^loguj/$', views.loguj, name='loguj'),\n url(r'^wyloguj/$', views.wyloguj, name='wyloguj'),\n url(r'^wiadomosci/$', views.wiadomosci, name='wiadomosci'),\n]\n","repo_name":"koduj-z-klasa/python101-py2","sub_path":"docs/webapps/czat/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"}
+{"seq_id":"4896337911","text":"import os.path\r\nfrom itertools import islice\r\nimport ijson\r\nimport pandas as pd\r\nimport numpy as np\r\nimport torch\r\n\r\nfrom tqdm import tqdm\r\nfrom transformers import AutoTokenizer, AutoModel\r\nfrom sentence_transformers import SentenceTransformer\r\n\r\nfrom embeddings.utils import get_underscored_name, mkdirs\r\n\r\n\r\ndef get_embedding(text, model, tokenizer, model_type=\"specter\"):\r\n if model_type.startswith(\"specter_simcse\") : return torch.tensor(model.encode(text))\r\n else:\r\n inputs = tokenizer(text, padding=True, truncation=True, return_tensors=\"pt\", max_length=512)\r\n return model(**inputs).last_hidden_state[:, 0, :]\r\n\r\n\r\ndef get_scibert_model():\r\n tokenizer = AutoTokenizer.from_pretrained('allenai/scibert_scivocab_uncased')\r\n model = AutoModel.from_pretrained('allenai/scibert_scivocab_uncased')\r\n return model, tokenizer\r\n\r\n\r\ndef get_specter_model():\r\n tokenizer = AutoTokenizer.from_pretrained('allenai/specter')\r\n model = AutoModel.from_pretrained('allenai/specter')\r\n return model, tokenizer\r\n\r\n\r\ndef get_custom_model(model_dir: str):\r\n if not os.path.exists(model_dir):\r\n print(f\"Error while parsing: {model_dir}. Model path directory with this name does not exist!\")\r\n return None\r\n\r\n model = SentenceTransformer(model_dir)\r\n tokenizer = None\r\n return model, tokenizer\r\n\r\n\r\ndef get_model(model_type: str,\r\n custom_model_dir=\"\"):\r\n if model_type == \"specter\": return get_specter_model()\r\n if model_type in ['scibert_average', 'scibert_cls']: return get_scibert_model()\r\n if model_type.startswith(\"specter_simcse\"): return get_custom_model(custom_model_dir)\r\n print(\"Error!! Invalid model name in get_model()\")\r\n return None, None\r\n\r\n\r\ndef create_author_embeddings(author, model_name=\"specter\", model=[], tokenizer=[], in_or_out=\"in\"):\r\n if \"Publications\" not in author: return\r\n\r\n auth_underscore_name = get_underscored_name(author['romanize name'])\r\n fname_out = f'./author_embeddings/{model_name}_embeddings/{in_or_out}'\r\n emb_total = []\r\n mkdirs(fname_out)\r\n publication_texts = []\r\n\r\n print(f\"{auth_underscore_name}, total papers:{len(author['Publications'])}\")\r\n\r\n for paper in author['Publications']:\r\n try: title_abs = paper['Title'] + \" [SEP] \" + paper['Abstract'] if (\"Abstract\" in paper and paper[\"Abstract\"]) else paper['Title']\r\n except: title_abs = paper['title'] + \" [SEP] \" + paper['Abstract'] if (\"Abstract\" in paper and paper[\"Abstract\"]) else paper['title']\r\n publication_texts.append(title_abs)\r\n\r\n if model_name == \"specter\":\r\n for title_abs in publication_texts:\r\n emb_total.append(get_embedding(title_abs, model, tokenizer, model_name))\r\n else: emb_total = model.encode(publication_texts)\r\n\r\n pd.DataFrame(emb_total).to_csv(fname_out + f'/{auth_underscore_name}.csv', header=False, index=False)\r\n\r\n\r\nif __name__ == '__main__':\r\n pass\r\n","repo_name":"nikifori/Apella-plus-thesis","sub_path":"embeddings_py/sentence_transformer_models.py","file_name":"sentence_transformer_models.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"19745000718","text":"import sqlite3\nimport pandas as pd\nfrom tabulate import tabulate\n\ns_id = input('Service ID:')\n\ndef getDeveloperName(id):\n try:\n sqliteConnection = sqlite3.connect('/home/wnaina/Bureau/fanompo_script/fanompo.db')\n cursor = sqliteConnection.cursor()\n select_query = \"SELECT * FROM services where numSVC = ?\"\n fanome_query = \"SELECT max(c.Daty), m.membID, m.Fiantso, s.anarFohy from membres m \\\n join calend c on m.membID = c.mbID JOIN services s on \\\n c.svcID = s.numSVC where c.svcID=? group by m.Fiantso order by c.Daty desc\"\n memb_query = \"SELECT m.membID, m.Fiantso from membres m join fanome f on m.membID = f.mb \\\n where f.svc=? \"\n\n cursor.execute(select_query, (id,))\n name = cursor.fetchone()\n\n print(name)\n cursor.execute(fanome_query, (id,))\n fanome = cursor.fetchall()\n df=pd.DataFrame(fanome)\n cursor.execute(memb_query, (id,))\n memb = cursor.fetchall()\n dff=pd.DataFrame(memb)\n print(\"Isan ny Mpanao: \",len(memb))\n print(\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\")\n print (tabulate(df,headers=[\"Daty\",\"Fiantso\",\"Service\"]))\n print(\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\")\n print (tabulate(dff,headers=[\"Num\",\"Fiantso\"]))\n cursor.close()\n \n\n except sqlite3.Error as error:\n print(\"Failed to read data from sqlite table\", error)\n finally: \n sqliteConnection.close()\n print(\"sqlite connection is closed\")\n\ngetDeveloperName(s_id)","repo_name":"wraivo/djfanompo","sub_path":"Bureau/fanompo_script/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"2930623044","text":"import pandas\n\n[NAME, INIT, INIT_SD, REAN, REAN_SD, NOOPT, NOOPT_SD, CI, CI_SD, DI, DI_SD, WI, WI_SD, CIDI, CIDI_SD, CIWI, CIWI_SD, DIWI, DIWI_SD, CIDIWI, CIDIWI_SD] = range(21)\n# Preprocessing: first run tail +1 performance\\ generated.txt | grep -v \", ,\" > perf.csv\ndata = pandas.read_csv('perf.csv', header=None)\n\ndata_slower_than_100ms = data[data[INIT] >= 100]\n\n# Filter the data to only keep these benchmarks that have 5 variants.\n# This is less than optimal, but good enough\nactual_data = data_slower_than_100ms\nfor name in data_slower_than_100ms[NAME]:\n # Get the base name of the benchmark (remove the -1.scm part)\n basename = '-'.join(name.split('-')[:-1])\n count = len([name2 for name2 in data_slower_than_100ms[NAME] if name2.startswith(basename)])\n if count < 5:\n print('Removing %s' % name)\n actual_data = actual_data[actual_data[NAME] != name]\n\nprint(actual_data)","repo_name":"softwarelanguageslab/maf","sub_path":"scripts/Python/filterBenchData.py","file_name":"filterBenchData.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"96"}
+{"seq_id":"21739713726","text":"import sys\r\nfrom collections import deque\r\nimport heapq\r\ninput = sys.stdin.readline\r\nn = int(input())\r\narr = [list(input().rstrip().split()) for _ in range(n)]\r\nba = []\r\nst = []\r\nfor i in range(n):\r\n for j in range(n):\r\n if arr[i][j] == \"X\": ba.append([i, j])\r\n elif arr[i][j] == \"S\": st.append([i, j])\r\nfor i in range(len(ba) - 2):\r\n arr[ba[i][0]][ba[i][1]] = \"B\"\r\n for j in range(i + 1, len(ba) - 1):\r\n arr[ba[j][0]][ba[j][1]] = \"B\"\r\n for k in range(j + 1, len(ba)):\r\n arr[ba[k][0]][ba[k][1]] = \"B\"\r\n flag = True\r\n for x, y in st:\r\n for z in range(y + 1, n):\r\n if arr[x][z] == \"T\": flag = False; break\r\n elif arr[x][z] == \"B\": break\r\n for z in range(y - 1, -1, -1):\r\n if arr[x][z] == \"T\": flag = False; break\r\n elif arr[x][z] == \"B\": break\r\n for z in range(x + 1, n):\r\n if arr[z][y] == \"T\": flag = False; break\r\n elif arr[z][y] == \"B\": break\r\n for z in range(x - 1, -1, -1):\r\n if arr[z][y] == \"T\": flag = False; break\r\n elif arr[z][y] == \"B\": break\r\n if flag: print(\"YES\"); exit(0)\r\n arr[ba[k][0]][ba[k][1]] = \"X\"\r\n arr[ba[j][0]][ba[j][1]] = \"X\"\r\n arr[ba[i][0]][ba[i][1]] = \"X\"\r\nprint(\"NO\")","repo_name":"secrett2633/replit_algorithm","sub_path":"백준/Gold/18428. 감시 피하기/감시 피하기.py","file_name":"감시 피하기.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"34638118463","text":"import os\nfrom collections.abc import Iterable\nfrom typing import Optional, Tuple, Union, List\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors as C\nfrom matplotlib.patches import Patch\nfrom PIL import Image as PILImage\n\nfrom .label_color import LabelColor\nfrom .utils import open_with_PIL\n\n\ndef overlay_masks(\n image: Union[os.PathLike, PILImage.Image, np.ndarray],\n boolean_masks: Union[np.ndarray, List[np.ndarray]],\n labels: Optional[List[str]] = None,\n colors: Optional[Union[np.ndarray, List[Union[str, List[float]]]]] = None,\n figsize: Tuple[int, int] = (8, 8),\n dpi: int = 90,\n mask_alpha: float = 0.4,\n mpl_colormap: str = \"tab20\",\n return_pil_image: bool = False,\n):\n \"\"\"Overlays masks on the image.\n Parameters\n ----------\n image : Union[str, PIL.Image.Image, np.ndarray]\n Image path or PIl.Image or numpy array. If image size inconsistent with\n the masks size, image will be resized.\n boolean_masks : List[np.ndarray[bool]]\n List of segmentation masks or numpy array of shape (height, width, n_classes).\n All masks should be the same size, equal to size of the image.\n labels : Optional[List[str]], optional\n Optional label names. Provide in the same order as the corresponding masks.\n If not provided, will be set as range(len(boolean_masks)), by default None\n colors : Union[np.ndarray, List[Union[str, List[float]]]], optional\n Array of shape (n_labels x 4) or list of matplotlib acceptable colornames.\n Example to get persistent colormap: `plt.cm.tab20(np.arange(NUM_LABELS))`\n figsize : tuple, optional\n Size in inches of the output image, by default (12, 12)\n dpi : int, optional\n Resolution of the output image. Note: 'px, py = w * dpi, h * dpi', by default 120\n mask_alpha : float, optional\n Masks opaque value, by default 0.4\n mpl_colormap : str\n Matplotlib colormap name\n return_pil_image : bool\n If True, will return PIL image instead of matpotlib figure.\n\n Returns\n -------\n plt.Figure | PIL.Image\n Output mpl figure or pillow image with masks.\n \"\"\"\n\n if isinstance(boolean_masks, np.ndarray):\n assert (boolean_masks.ndim == 3 and boolean_masks.dtype == bool), (\n \"boolean_masks should be a list boolean numpy\"\n + \" arrays or 3-dim numpy array with the last dim\"\n + \" as a channel to store masks of different classes\"\n )\n boolean_masks = [boolean_masks[:, :, i] for i in range(boolean_masks.shape[-1])]\n\n if labels is not None:\n assert len(labels) == len(boolean_masks), (\n \"Number of provided labels != number of masks\"\n )\n else:\n labels = [f\"{_:02d}\" for _ in range(len(boolean_masks))]\n\n pil_image = open_with_PIL(image)\n image_size = tuple(np.array(pil_image.size)[::-1])\n\n assert all(\n mask.shape == image_size for mask in boolean_masks\n ), \"Label mask size is not equal to image size\"\n\n if colors is None:\n cbar = LabelColor(\n num_labels=len(boolean_masks),\n alpha=mask_alpha,\n return_legend_color=True,\n mpl_colormap=mpl_colormap,\n )\n\n else:\n assert len(colors) == len(boolean_masks), (\n \"Number of provided colors != number of masks\"\n )\n if all(isinstance(c, str) for c in colors):\n colors = [C.to_rgba(c) for c in colors]\n\n if isinstance(colors, Iterable):\n colors = np.array(colors)\n\n assert colors.ndim == 2 and colors.shape[-1] == 4, (\n \"Unsupported color format:\"\n + \" should be list of matplotlib colorname strings for each mask/mask_channel,\"\n + \" list of RGBA arrays or 2-dim numpy array of shape (n_labels x 4)\"\n )\n\n mask_colors = colors.copy()\n mask_colors[:, -1] *= mask_alpha\n mask_colors = (mask_colors * 255).astype(\"uint8\")\n cbar = zip(mask_colors, colors)\n\n segmentation_overlay = np.zeros((*image_size, 4), dtype=np.uint16)\n segmentation_mask = np.zeros(image_size, dtype=bool)\n legend_elements = []\n\n for mask, label, (color, legend_color) in zip(boolean_masks, labels, cbar):\n\n assert mask.dtype == \"bool\"\n\n intersection = mask & segmentation_mask\n segmentation_mask = mask | segmentation_mask\n\n # Paint non-overlapping area\n segmentation_overlay[mask ^ intersection] = color\n\n # Blend overlapping area\n segmentation_overlay[intersection] = (\n segmentation_overlay[intersection] + color\n ) / 2\n\n legend_elements.append(Patch(color=legend_color, label=label))\n\n segmentation_overlay = PILImage.fromarray(segmentation_overlay.astype(\"uint8\"))\n pil_image.paste(segmentation_overlay, mask=segmentation_overlay)\n\n if return_pil_image:\n return pil_image\n \n else:\n fig = plt.figure(figsize=figsize, dpi=dpi)\n plt.imshow(pil_image)\n plt.axis(\"off\")\n mask_legend = plt.legend(\n handles=legend_elements,\n loc=\"upper left\",\n frameon=False,\n bbox_to_anchor=(1.01, 1),\n )\n plt.subplots_adjust(left=0.8)\n plt.tight_layout()\n plt.gca().add_artist(mask_legend)\n\n return fig\n","repo_name":"Irtaza147/Smart_parking","sub_path":"venv/Lib/site-packages/segmentation_mask_overlay/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"26433739462","text":"import skimage\nimport skimage.filters\nimport skimage.color\nimport random\nfrom random import randint\nfrom random import shuffle\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass AugmentBatch:\n\n def __init__(self):\n # Initialize seed\n #random.seed(42)\n # Create a list of functions that could be applied on the batch\n self.__list_func = [lambda img: self.convert_to_gray(img), lambda img: self.add_noise(img),\n lambda img: self.add_gaussian(img), lambda img: self.convert_to_sepia(img),\n lambda img: self.color_swap(img), lambda img: self.invert_color(img)]\n\n def augment(self, batch):\n # Roll the dice\n prob = random.random()\n\n # Half chance of nothing half do some augmentation\n if prob < 0.5:\n return batch\n else:\n # Do a copy of the batch\n new_batch = batch\n\n # Flip steering independent of other augmentations (Idea is to have more steering actions on training)\n batch_fliped = self.flip_horizontal(new_batch)\n\n # Do augmentations based on the lambda list __list_func\n idx = 0\n for (img, label) in batch_fliped:\n # Choose one operation to be applied on each image of the batch\n operation = randint(0, len(self.__list_func) - 1)\n # Choose the operation randomically\n img = self.__list_func[operation](img)\n batch_fliped[idx] = (img, label)\n idx += 1\n\n return batch_fliped\n\n def convert_to_gray(self, img):\n # Get each channel\n r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n # To keep same number of channels add gray to each one.\n img[:, :, 0] = gray\n img[:, :, 1] = gray\n img[:, :, 2] = gray\n return img\n\n def convert_to_sepia(self, img):\n # Get each channel\n r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]\n # To keep same number of channels add gray to each one.\n img[:, :, 0] = 0.393 * r + 0.769 * g + 0.189 * b\n img[:, :, 1] = 0.349 * r + 0.686 * g + 0.168 * b\n img[:, :, 2] = 0.272 * r + 0.534 * g + 0.131 * b\n return img\n\n def add_noise(self, img):\n new_img = skimage.util.random_noise(img,var=0.001)\n return new_img\n\n def invert_color(self, img):\n new_img = skimage.util.invert(img)\n return new_img\n\n def add_gaussian(self, img):\n new_img = skimage.filters.gaussian(img,sigma=0.9, multichannel=True)\n return new_img\n\n def color_swap(self, img):\n new_img = img\n list_chanels = [0, 1, 2]\n random.shuffle(list_chanels)\n new_img[:, : ,0] = img[:, :, list_chanels[0]]\n new_img[:, :, 1] = img[:, :, list_chanels[1]]\n new_img[:, :, 2] = img[:, :, list_chanels[2]]\n return new_img\n\n # Flip both the image and the steering\n def flip_horizontal(self, batch):\n # Do a copy of the batch\n new_batch = batch\n idx = 0\n for (img, label) in new_batch:\n img = np.fliplr(img)\n label = np.fliplr(label)\n new_batch[idx] = (img, label)\n idx += 1\n return new_batch\n\n def display_batch(self, batch):\n for img, steering in batch:\n plt.imshow(img)\n plt.show()","repo_name":"leonardoaraujosantos/LearnSegmentation","sub_path":"src/tensorflow/augment_batch.py","file_name":"augment_batch.py","file_ext":"py","file_size_in_byte":3447,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"96"}
+{"seq_id":"37461800652","text":"from flask import Flask, request, render_template, jsonify, url_for, send_file\nimport pandas as pd\nimport subprocess\nimport os\nimport io\nimport tempfile\nimport plotly.express as px\nimport plotly.io as pio\nfrom award import main\n\n\napp = Flask(__name__)\n\n@app.after_request\ndef add_header(response):\n if 'Cache-Control' not in response.headers:\n response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'\n return response\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef index():\n if request.method == \"POST\":\n quarter = int(request.form[\"quarter\"]) # Get the selected quarter from the form\n year = request.form.get('year')\n action = request.form.get('action')\n print(action)\n \n try:\n df_data = process_quarter(quarter, year)\n if action == 'download':\n fname = f\"quarterly_{quarter}_report.csv\"\n fname = download_csv(df_data, fname)\n return jsonify({'success': True, 'filename': fname, 'action': 'download'})\n elif action == 'plot':\n return bonus_chart(quarter,year)\n\n except AttributeError:\n return jsonify({'error': \"No data present for this quarter or year yet\"})\n except Exception as e:\n return jsonify({'error': f\"An error occurred: {e}\"})\n else:\n return render_template(\"index.html\")\n\n@app.route(\"/download/\")\ndef download_file(filename):\n try:\n return send_file(os.path.join('static', filename), as_attachment=True, mimetype='text/csv')\n except Exception as e:\n return str(e)\n\n@app.route(\"/delete/\", methods=['POST'])\ndef delete_file(filename):\n file_path = os.path.join('static', filename)\n try:\n os.remove(file_path)\n return jsonify({'success': True})\n except Exception as e:\n return jsonify({'error': str(e)})\n\n\n\n@app.route(\"/bonus_plot//\", methods=[\"GET\"])\ndef bonus_plot(quarter, year):\n df = process_quarter(quarter, year)\n\n \n # Sort the dataframe by 'Total Bonus'\n df = df.sort_values('Total Bonus')\n \n fig = px.bar(df, \n x='Driver ID', \n y='Total Bonus', \n color='Total Bonus', # change the color to be based on 'Total Bonus'\n title='Total Bonus per Driver',\n hover_data=['Total Bonus'], # this will add a hover text for 'Total Bonus'\n labels={'Total Bonus':'Total Bonus', 'Driver ID':'Driver ID'})\n\n # Convert the figures to HTML and remove the surrounding tags\n plot_html = pio.to_html(fig, full_html=False)\n \n return render_template('plot.html', plot=plot_html)\n\n@app.route(\"/bonus_chart///\", methods=[\"GET\"])\ndef bonus_chart(quarter, year, driver_id = None):\n df = process_quarter(quarter, year, str(driver_id))\n # Sort the dataframe by 'Total Bonus'\n df = df.sort_values('Total Bonus')\n # print(df.to_json(orient='records'))\n # Send DataFrame as JSON to the client\n return render_template('chart.html', data=df.to_json(orient='records'), driverId=driver_id, year=year, quarter=quarter)\n\n@app.route(\"/scorecard///\", methods=[\"GET\"])\ndef score_card(quarter, year, driver_id):\n df = process_quarter(quarter, year, str(driver_id))\n df = df.sort_values('Total Bonus')\n return render_template('scorecard.html', data=df.to_json(orient='records'), driverId=driver_id, year=year, quarter=quarter)\n\n@app.route(\"/pie//\", methods=[\"GET\"])\ndef pie(quarter, year):\n print(\"hello\")\n df = process_quarter(quarter, year)\n df = df.sort_values('Total Bonus')\n return render_template('pie.html', data=df.to_json(orient='records'), year=year, quarter=quarter)\n\n\n\n\ndef process_quarter(quarter: int, year: int, driver_id = None):\n \"\"\"Process the file and delete it afterwards.\"\"\"\n print(driver_id)\n return main(int(quarter), int(year), driver_id)\n\n# def download_csv(df: pd.DataFrame, fname:str):\n# temp = tempfile.NamedTemporaryFile(suffix=\".csv\")\n# df.to_csv(temp.name, index=False)\n# return send_file(temp.name, as_attachment=True, attachment_filename=fname)\ndef download_csv(df: pd.DataFrame, fname:str):\n if not os.path.isdir('static'):\n os.makedirs('static')\n file_path = os.path.join('static', fname)\n df.to_csv(file_path, index=False)\n return fname\n\n\nif __name__ == \"__main__\":\n app.run(host=\"127.0.0.1\", port=8080, debug=True)\n\n\n\n#4400324","repo_name":"armaanchhina/safety_award-app","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"23093324361","text":"from bids import BIDSLayout\nimport bids\nimport tempfile\nimport argparse\nimport os\nimport sys\nimport time\nimport math\n\ndef check_subject_session_directories( path, base, sub, session ):\n dirs = os.listdir( os.path.join(path, sub, session) )\n os.mkdir( os.path.join(base, sub, session) )\n for d in dirs:\n os.symlink(os.path.join(path,sub,session,d), os.path.join(base, sub, session,d) )\n \n try:\n x = BIDSLayout(root=base)\n except:\n print(\"FAILURE - \" + sub + \" - \" + session + \" - \" + d)\n ret = False\n os.unlink( os.path.join(base, sub, session,d) )\n\n os.rmdir( os.path.join(base, sub, session) )\n\ndef check_subject_sessions(path, base, sub):\n sessions = os.listdir( os.path.join(path, sub ) )\n os.mkdir( os.path.join(base, sub) )\n for ses in sessions:\n #print(\"subject - \" + sub + \", session - \" + ses)\n os.symlink( os.path.join(path,sub,ses), os.path.join(base, sub, ses) )\n \n try:\n x = BIDSLayout(root=base) \n except:\n print(\"FAILURE - \" + sub + \" - \" + ses)\n os.unlink( os.path.join(base, sub, ses))\n check_subject_session_directories( path,base,sub,ses )\n ret = False\n\n if os.path.isdir( os.path.join(base, sub, ses) ):\t\n os.unlink(os.path.join(base, sub, ses))\n \n os.rmdir( os.path.join(base, sub) )\n\ndef check_subject_level(path, base, sub):\n \n os.symlink( os.path.join(path,sub), os.path.join(base,sub) )\n ret = True\n try:\n x = BIDSLayout(root=base)\n except:\n ret = False\n os.unlink(os.path.join(base,sub))\n\n return(ret)\n\n\n\n\ndef main():\n\n # avoid warning\n bids.config.set_option('extension_initial_dot', True)\n \n my_parser = argparse.ArgumentParser(description='Identify abdominal slab')\n my_parser.add_argument('-p', '--path', type=str, help='base path', required=True)\n my_parser.add_argument('-t', '--temp', type=str, help='temp path', required=False, default=\"/scratch\")\n args = my_parser.parse_args()\n\n tpath = args.temp\n if not os.path.isdir(tpath):\n tpath = \"/tmp\" \n\n jobid = os.environ[\"LSB_JOBID\"]\n base = tempfile.mkdtemp(dir=tpath, prefix=\"job_\"+str(jobid)+\"_\", suffix=\"_bidslayout\")\n #print(base) \n\n sTime = time.time()\n items = os.listdir(path=args.path)\n print(\"Checking \" + str(len(items)) + \" items\" )\n desc = os.path.join(args.path, \"dataset_description.json\")\n print(desc) \n if not os.path.isfile(desc):\n print(\"Missing: \"+desc)\n exit(1)\n os.symlink(desc, os.path.join(base, \"dataset_description.json\"))\n\n failure=[]\n items.remove(\"dataset_description.json\")\n for i,itm in enumerate(items):\n #print(\"Testing: \"+itm+\" \"+str(i)+\"/\"+str(len(items)))\n if not check_subject_level(args.path, base, itm):\n failure.append(itm)\n print(\"FAILURE - \"+str(itm))\n check_subject_sessions(args.path, base, itm)\n\n\n os.unlink(os.path.join(base, \"dataset_description.json\"))\n os.rmdir(base)\n\n ret=0\n if len(failure) > 0:\n ret=1\n print(failure)\n\n rTime = time.time() - sTime\n h = math.floor( rTime / 60 / 60)\n m = math.floor( (rTime - 60*60*h)/60 )\n s = math.floor( (rTime - 60*60*h - 60*m) )\n print(\"Run time = \" + str(h) + \"h \" + str(m) + \"m \" + str(s) + \"s\")\n\n print(\"Done\")\n return(ret)\n \n \n\nif __name__==\"__main__\":\n sys.exit(main())\n","repo_name":"ftd-u01/checkBids","sub_path":"checkBids.py","file_name":"checkBids.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"26160075007","text":"import sys\nimport time\n\nsys.stdin = open(\"ExpertAcademy1244.txt\")\n\nstart = time.time()\n\n\n# 첫제출(시간초과)\ndef powerset(arr, cnt):\n if cnt == change:\n global res\n global maxs\n an = ''\n for f in arr:\n an += f\n maxs += 1\n print(an, maxs)\n res.append(int(an))\n return\n tarr = arr.copy()\n for g in range(len(arr)):\n for f in range(len(arr)):\n if f != g:\n tarr[f], tarr[g] = tarr[g], tarr[f]\n powerset(tarr, cnt + 1)\n\n\nfor tc in range(int(input())):\n maxs = 0\n num, change = input().split()\n num = list(num)\n change = int(change)\n res = []\n powerset(num, 0)\n print(len(res), res)\n print(\"#{} {}\".format(tc + 1, max(res)))\nprint('time', time.time() - start)\n","repo_name":"namnamDev/namnamDev","sub_path":"ExpertAcademy1244.py","file_name":"ExpertAcademy1244.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"}
+{"seq_id":"15744704409","text":"from typing import List\nfrom operators.property_sourcing_base import PropertySourcingBaseOperator\nfrom bs4 import BeautifulSoup\n\n\nclass MidLandRealitySourcingOperator(PropertySourcingBaseOperator):\n @staticmethod\n def get_sfa_gfa(space: List):\n if len(space) > 1:\n sfa, gfa = space[0].get_text().replace(\"SFA\", \"\").replace(\"ft²\", \"\").replace(\"\\xa0\", \"\"), \\\n space[1].get_text().replace(\"GFA\", \"\").replace(\"ft²\", \"\").replace(\"\\xa0\", \"\")\n return sfa, gfa\n elif len(space) == 1:\n return space[0].get_text().replace(\"SFA\", \"\").replace(\"ft²\", \"\").replace(\"\\xa0\", \"\"), None\n else:\n return None, None\n\n def get_property_info(self, html_source):\n import pandas as pd\n soup = BeautifulSoup(html_source, 'html.parser')\n \n rooms = []\n rents = soup.find_all(\"div\", class_=\"sc-1r1odlb-23 etCoIy\")\n\n for rent in rents:\n titles = rent.find(\"div\", class_=\"sc-wivooq-1 hCnCJl\").get_text()\n title_list = titles.strip().split(\"\\n\")\n title = title_list[0].strip()\n\n if len(title_list) < 3:\n sub_title = None\n else:\n sub_title = title_list[2].strip()\n\n space = rent.find_all(\"div\", class_=\"sc-gqqyk9-1 kYfBEV\")\n space_element = self.get_sfa_gfa(space)\n mon_price = rent.find(\"span\", class_=\"sc-hlnw2x-6 kktEPG\").get_text()[1:]\n location = rent.find(\"span\", class_=\"sc-1r1odlb-9 dHhWAt\").get_text()\n features = rent.find_all(\"div\", class_=\"sc-1r1odlb-16 gopLNA\")\n features_combined = \"\"\n\n for i in range(len(features) // 2):\n features_combined += features[i].get_text() + \"&&\"\n\n age = rent.find(\"div\", class_=\"sc-w2gv6f-0 eMkKmr\")\n\n if age:\n age = age.get_text()\n else:\n age = None\n\n url = rent.find('a', href=True)['href']\n room_idx = url.split(\"-\")[-1]\n\n room_info = {\"date\": self.execution_date, \"room_idx\": room_idx, \"title\": title, \"sub_title\": sub_title,\n \"sfa\": space_element[0], \"gfa\": space_element[1], \"mon_price\": mon_price, \"age\": age,\n \"location\": location, \"features_combined\": features_combined, \"url\": url}\n\n if room_info not in rooms:\n rooms.append(room_info)\n\n self.log.info(\"------- Property Info -------\")\n self.log.info(rooms)\n\n self.log.info(f\"# of properties: {len(rooms)}\")\n return rooms\n","repo_name":"yelee20/airflow-gke","sub_path":"dags/operators/midland_reality_sourcing.py","file_name":"midland_reality_sourcing.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"13837166174","text":"from abc import ABC, abstractmethod\n\nfrom aiohttp import ClientSession as Client\n\nfrom .web import Request\n\nclass Auth(ABC):\n 'Implement for any authentication scheme.'\n @abstractmethod\n async def sign(self, client: Client, request: Request) -> Request: pass\n\n @staticmethod\n def none() -> 'NoAuth': return NoAuth()\n\n\nclass UrlApiKey(Auth):\n 'URL parameter with a secret to authorize requests.'\n params: dict[str, str]\n\n def __init__(self, param_name: str, secret: str):\n self.params = {param_name: secret}\n\n async def sign(self, client: Client, request: Request) -> Request:\n request.query_params |= self.params\n return request\n\nclass HeaderApiKey(Auth):\n 'Header with a secret to authorize requests.'\n headers: dict[str, str]\n\n def __init__(self, param_name: str, secret: str):\n self.headers = {param_name: secret}\n\n async def sign(self, client: Client, request: Request) -> Request:\n request.headers |= self.headers\n return request\n\nclass NoAuth(Auth):\n 'Does nothing.'\n\n async def sign(self, client: Client, request: Request) -> Request: return request\n\n\n\n","repo_name":"dunkyl/SlyAPI-Python","sub_path":"src/SlyAPI/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"13022316750","text":"import os\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom architectures.custom_unet import U_Net as Model\nfrom dataset import SegmentationDataset\nfrom losses import tversky_loss, tversky_coeff\n\n\ndef train(model, device, train_loader, optimizer, epoch, scheduler=None):\n model.train()\n nb_samples = 0\n epoch_loss = 0\n\n for batch_idx, (data, target) in enumerate(train_loader):\n nb_samples += len(data)\n data, target = data.to(device), target.to(device)\n\n output = model(data)\n\n if isinstance(output, list):\n loss = tversky_loss(output[0], target, reduction=\"sum\")\n for i in range(1, len(output)):\n target_resized = F.interpolate(\n target, scale_factor=1 / 2 ** i, mode=\"bilinear\", align_corners=True\n )\n target_resized = torch.where(\n target_resized > 0.1,\n torch.tensor(1.0, device=device),\n torch.tensor(0.0, device=device),\n )\n loss = loss + tversky_loss(output[i], target_resized, reduction=\"sum\")\n loss = loss / len(output)\n else:\n loss = tversky_loss(output, target, reduction=\"sum\")\n\n epoch_loss += loss.item()\n loss = loss / len(data)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if scheduler:\n scheduler.step()\n\n print(\n \"Train Epoch: {} [{}/{} ({:.0f}%)], Loss: {:.6f}\".format(\n epoch,\n nb_samples,\n len(train_loader.dataset),\n 100.0 * (batch_idx + 1) / len(train_loader),\n loss.item(),\n ),\n end=\"\\r\",\n )\n\n epoch_loss /= len(train_loader.dataset)\n print(\n \"Train Epoch: {} [{}/{} ({:.0f}%)], Average Loss: {:.6f}\".format(\n epoch, nb_samples, len(train_loader.dataset), 100.0, epoch_loss\n )\n )\n return epoch_loss\n\n\ndef validate(model, device, test_loader):\n model.eval()\n test_loss = 0\n test_dice = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n if isinstance(output, list):\n output = output[0]\n test_loss += tversky_loss(output, target, reduction=\"sum\").item()\n test_dice += tversky_coeff(\n output, target, hard=True, reduction=\"sum\"\n ).item()\n\n test_loss /= len(test_loader.dataset)\n test_dice /= len(test_loader.dataset)\n print(\"Test set: Average score: {:.6f} (loss: {:.6f})\".format(test_dice, test_loss))\n return test_loss, test_dice\n\n\ndef checkpoint(model, test_dice, optimizer, epoch, input_size, weight_decay, infos=\"\"):\n file_name = \"{}_dice={:.3f}_{}_ep={}_{}_wd={}_{}.pth\".format(\n model.__class__.__name__,\n test_dice,\n optimizer.__class__.__name__,\n epoch,\n input_size,\n weight_decay,\n infos,\n )\n path = os.path.join(\"../../models/\", file_name)\n if test_dice > 0.47 and not os.path.isfile(path):\n torch.save(model.state_dict(), path)\n print(\"Saved: \", file_name)\n\n\ndef main():\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(device)\n\n # Hyperparams\n batch_size = 16\n epochs = 40\n input_size = (216, 320)\n weight_decay = 1e-4\n print(f\"Batch size: {batch_size}, input size: {input_size}, wd: {weight_decay}\")\n\n # Create datasets\n train_indices = np.load(\"../../data/processed/train_indices.npy\")\n test_indices = np.load(\"../../data/processed/test_indices.npy\")\n # valid_indices = np.load(\"../../data/processed/valid_indices.npy\")\n\n # Merge train and test\n # train_indices = np.concatenate((train_indices, test_indices))\n # test_indices = valid_indices\n\n # Make sure there's no overlap\n assert not set(train_indices) & set(test_indices)\n\n # Datasets\n train_set = torch.utils.data.Subset(\n SegmentationDataset(\n \"../../data/raw/training_set/\", input_size=input_size, train_mode=True\n ),\n train_indices,\n )\n test_set = torch.utils.data.Subset(\n SegmentationDataset(\n \"../../data/raw/training_set/\", input_size=input_size, train_mode=True\n ),\n test_indices,\n )\n print(\"Training set size: \", len(train_set))\n print(\"Test set size : \", len(test_set))\n print(\"Total: \", len(train_set) + len(test_set))\n\n # Dataloaders\n train_loader = torch.utils.data.DataLoader(\n dataset=train_set,\n batch_size=batch_size,\n shuffle=True,\n num_workers=4,\n pin_memory=True,\n )\n\n test_loader = torch.utils.data.DataLoader(\n dataset=test_set,\n batch_size=batch_size,\n shuffle=False,\n num_workers=4,\n pin_memory=True,\n )\n\n model = Model().to(device)\n print(Model.__name__)\n\n # he initialization\n for m in model.modules():\n if isinstance(m, (nn.Conv2d, nn.Linear)):\n nn.init.kaiming_normal_(m.weight, mode=\"fan_in\")\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n optimizer = torch.optim.SGD(\n model.parameters(), lr=1e-1, momentum=0.9, weight_decay=weight_decay\n )\n print(\"Optimizer: \", optimizer.__class__.__name__)\n\n scheduler = torch.optim.lr_scheduler.MultiStepLR(\n optimizer, milestones=[8, 14, 19, 25, 30, 35], gamma=0.1\n )\n\n train_loss_history = list()\n test_loss_history = list()\n test_dice_history = list()\n\n for epoch in range(1, epochs + 1):\n print(\"################## EPOCH {}/{} ##################\".format(epoch, epochs))\n\n for param_group in optimizer.param_groups:\n print(\"Current learning rate:\", param_group[\"lr\"])\n\n train_loss = train(model, device, train_loader, optimizer, epoch)\n test_loss, test_dice = validate(model, device, test_loader)\n\n scheduler.step()\n\n # Save model\n if epoch > 1 and test_dice > max(test_dice_history):\n checkpoint(\n model,\n test_dice,\n optimizer,\n epoch,\n input_size,\n weight_decay,\n infos=\"tversky_loss\",\n )\n\n train_loss_history.append(train_loss)\n test_loss_history.append(test_loss)\n test_dice_history.append(test_dice)\n\n # # Save history at each epoch (overwrite previous history)\n history = [train_loss_history, test_loss_history, test_dice_history]\n np.save(\"history.npy\", np.array(history))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Jonas1312/ChallengeHC18","sub_path":"src/models/train_segmentation.py","file_name":"train_segmentation.py","file_ext":"py","file_size_in_byte":6820,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"96"}
+{"seq_id":"38458378504","text":"import privacyraven as pr\nfrom privacyraven.utils.data import get_emnist_data\n\n# from privacyraven.extraction.core import ModelExtractionAttack\nfrom privacyraven.m_inference.core import MembershipInferenceAttack\nfrom privacyraven.utils.query import get_target\nfrom privacyraven.models.victim import train_mnist_victim\nfrom privacyraven.models.pytorch import ImagenetTransferLearning, ThreeLayerClassifier\n\n# Create a query function for a PyTorch Lightning model\nmodel = train_mnist_victim()\n\n\ndef query_mnist(input_data):\n return get_target(model, input_data)\n\n\n# Obtain seed (or public) data to be used\nemnist_train, emnist_test = get_emnist_data()\n\nattack = MembershipInferenceAttack(\n query_mnist,\n 100,\n (1, 28, 28, 1),\n 10,\n (1, 3, 28, 28),\n \"copycat\",\n ThreeLayerClassifier,\n 1000,\n emnist_train,\n emnist_test,\n)\n","repo_name":"suhacker1/SecureMLExperiments","sub_path":"m_inf_mnist.py","file_name":"m_inf_mnist.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"6379784179","text":"from urllib.parse import urlparse\n\nfrom django.conf import settings\nfrom django.utils.http import is_same_domain\nfrom rest_framework import permissions\n\n\ndef _report_warning_to_rollbar(message, extra_data=None):\n ROLLBAR = getattr(settings, 'ROLLBAR', {})\n if ROLLBAR:\n import rollbar\n rollbar.report_message(message, level='warning', extra_data=extra_data)\n\n\ndef referring_host(request):\n referer = urlparse(request.META.get('HTTP_REFERER', ''))\n return referer.netloc.split(':')[0]\n\n\ndef referring_host_is_allowed(host):\n for pattern in settings.ALLOWED_HOSTS:\n if is_same_domain(host, pattern):\n return True\n return False\n\n\nclass IsAuthenticatedOrWebClient(permissions.BasePermission):\n def has_permission(self, request, view):\n if request.user and request.user.is_authenticated:\n return True\n\n if settings.OAR_CLIENT_KEY == '':\n return True\n\n if request.path.startswith(\"/api/info\"):\n return True\n\n client_key = request.META.get('HTTP_X_OAR_CLIENT_KEY')\n if client_key == settings.OAR_CLIENT_KEY:\n host = referring_host(request)\n if referring_host_is_allowed(host):\n return True\n else:\n _report_warning_to_rollbar(\n 'Unallowed referring host passed with API request',\n extra_data={'host': host})\n else:\n _report_warning_to_rollbar(\n 'Incorrect client key submitted with API request',\n extra_data={'client_key': client_key})\n\n return False\n\n\nclass IsAllowedHost(permissions.BasePermission):\n def has_permission(self, request, view):\n host = referring_host(request)\n if referring_host_is_allowed(host):\n return True\n else:\n _report_warning_to_rollbar(\n 'Unallowed referring host passed with API request',\n extra_data={'host': host})\n\n\nclass IsRegisteredAndConfirmed(permissions.BasePermission):\n message = 'Insufficient permissions'\n\n def has_permission(self, request, view):\n if not request.user.is_authenticated:\n return False\n\n if not request.user.is_active:\n return False\n\n if not request.user.did_register_and_confirm_email:\n return False\n\n return True\n","repo_name":"opensupplyhub/open-apparel-registry","sub_path":"src/django/api/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"96"}
+{"seq_id":"52549058","text":"from django.views.generic import TemplateView\nfrom ..models import AgentTransport\nfrom django.shortcuts import redirect\nfrom django.urls import reverse, reverse_lazy\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\n\n\nclass AgentTransportDeleteView(TemplateView):\n\n @login_required(login_url=reverse_lazy('login'))\n def delete_data_agent_transport(request, pk):\n agent_transport = AgentTransport.objects.get(pk=pk)\n agent_transport.delete()\n\n if request.method == \"GET\":\n filter_by = request.GET.get(\"filter_by\")\n date_filter = request.GET.get(\"date_filter\")\n if not date_filter:\n return redirect(reverse('agent-transport-table'))\n else:\n return redirect(reverse('agent-transport-table') + '?filter_by=' + filter_by + '&date_filter=' + date_filter)\n\n\n @login_required(login_url=reverse_lazy('login'))\n def delete_multiple_data_agent_transport(request):\n \n if request.method == \"POST\":\n pk_list = request.POST.getlist('pk')\n filter_by = request.POST['filter_by']\n date_filter = request.POST['date_filter']\n\n for pk in pk_list:\n agent_transport = AgentTransport.objects.get(pk=pk)\n agent_transport.delete()\n\n if not date_filter:\n return redirect(reverse('agent-transport-table'))\n else:\n return redirect(reverse('agent-transport-table') + '?filter_by=' + filter_by + '&date_filter=' + date_filter)","repo_name":"ipoobest/demo-auto-deployment","sub_path":"ndd-app/agent_transport/views/agent_transport_delete_view.py","file_name":"agent_transport_delete_view.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"41005964480","text":"import logging\nimport os\nimport threading\nimport weakref\nfrom .models import ProbeSource\nfrom .sync import ProbeViewSync\n\n\nlogger = logging.getLogger(\"zentral.core.probes.conf\")\n\n\nclass ProbeView(object):\n def __init__(self, parent=None, with_sync=False):\n self.parent = parent\n self._probes = None\n self._lock = threading.Lock()\n self.with_sync = with_sync\n self.sync = None\n\n def clear(self):\n with self._lock:\n self._probes = None\n\n def iter_parent_probes(self):\n if self.parent is None:\n for p in ProbeSource.objects.active():\n yield p.load()\n else:\n yield from self.parent\n\n def _start_sync(self):\n if self.with_sync:\n if self.sync is not None:\n if self.sync.is_alive():\n return\n else:\n logger.error(\"Sync thread is not alive. Last heartbeat %s.\", self.sync.last_heartbeat or \"-\")\n # separate thread to listen to the probe change signal\n self.sync = ProbeViewSync(self)\n self.sync.start()\n\n def __iter__(self):\n with self._lock:\n self._load()\n yield from self._probes\n\n def __len__(self):\n with self._lock:\n self._load()\n return len(self._probes)\n\n\nclass ProbesDict(ProbeView):\n def __init__(self, parent=None, item_func=None, unique_key=True, with_sync=False):\n super(ProbesDict, self).__init__(parent, with_sync=with_sync)\n if item_func is None:\n self.item_func = lambda p: [(p.name, p)]\n else:\n self.item_func = item_func\n self.unique_key = unique_key\n\n def _load(self):\n self._start_sync()\n if self._probes is None:\n self._probes = {}\n for probe in self.iter_parent_probes():\n for key, val in self.item_func(probe):\n if self.unique_key:\n self._probes[key] = val\n else:\n self._probes.setdefault(key, []).append(val)\n\n def __getitem__(self, key):\n with self._lock:\n self._load()\n return self._probes[key]\n\n def keys(self):\n with self._lock:\n self._load()\n return self._probes.keys()\n\n def get(self, *args, **kwargs):\n with self._lock:\n self._load()\n return self._probes.get(*args, **kwargs)\n\n\nclass ProbeList(ProbeView):\n def __init__(self, parent=None, filter_func=None, with_sync=False):\n super(ProbeList, self).__init__(parent, with_sync=with_sync)\n self.filter_func = filter_func\n self._children = weakref.WeakSet()\n\n def clear(self):\n with self._lock:\n self._probes = None\n for child in self._children:\n child.clear()\n\n def _load(self):\n self._start_sync()\n if self._probes is None:\n self._probes = []\n for probe in self.iter_parent_probes():\n if self.filter_func is None or self.filter_func(probe):\n self._probes.append(probe)\n\n def filter(self, filter_func):\n child = self.__class__(self, filter_func)\n self._children.add(child)\n return child\n\n def dict(self, item_func=None, unique_key=True):\n child = ProbesDict(self, item_func, unique_key)\n self._children.add(child)\n return child\n\n def event_filtered(self, event):\n def _filter(probe):\n return probe.test_event(event)\n return self.filter(_filter)\n\n\n# used for the tests, to avoid having an extra DB connection\nzentral_probes_sync = os.environ.get(\"ZENTRAL_PROBES_SYNC\", \"1\") == \"1\"\n\n\nall_probes = ProbeList(with_sync=zentral_probes_sync)\nall_probes_dict = all_probes.dict(item_func=lambda p: [(p.pk, p)], unique_key=True)\n","repo_name":"zentralopensource/zentral","sub_path":"zentral/core/probes/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","stars":705,"dataset":"github-code","pt":"96"}
+{"seq_id":"10913233064","text":"numeros = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\ndef multiplicar_por_10(numero):\n\n return numero * 2\n\n\nnueva_multi = multiplicar_por_10(numeros)\nnueva_multi_2 = list(map(multiplicar_por_10, numeros))\n\nprint(nueva_multi)\nprint(nueva_multi_2)\n\n\ndef convertir_en_string_mas_unidad(numero):\n return f'{numero} seg'\n\n\nnuevo_output = list(map(convertir_en_string_mas_unidad, numeros))\nprint(nuevo_output)\n\n\ndef convertir_a_numeros_negativos(numero):\n return numero * -1\n\n\nlist(map(convertir_a_numeros_negativos, numeros))\n\n\ndef convertir_en_0_si_menor_a_5(numero):\n if numero < 5:\n return 0\n else:\n return numero\n\n\nlist(map(convertir_en_0_si_menor_a_5, numeros))\n\n\ndef convertir_en_true_si_mayor_a_6(numero):\n if numero > 6:\n return True\n else:\n return False\n\n\nlist(map(convertir_en_true_si_mayor_a_6, numeros))\n\n\n#filter\nnumeros_2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\ndef numero_es_par(numero):\n if numero % 2 == 0:\n return True\n else:\n return False\n\nprint('Filter')\nprint(list(filter(numero_es_par, numeros_2)))\n\n\ndef palabra_tiene_mas_de_5_caracteres(palabra):\n if len(palabra) > 5:\n return True\n\n\npalabras = [\"achicoria\", \"pasto\", \"sol\", \"loquillo\", \"moquillo\", \"sed\", \"pez\", \"jacaranda\", \"mil\"]\n\nlist(filter(palabra_tiene_mas_de_5_caracteres, palabras))\n\n\ndef numero_es_negativo(numero):\n if numero < 0:\n return True\n\n\nnumeros = [3, 5, -1, -7, -8, 4, -78, 5, -46, 56, 98, 9, -1, -2, -4]\n\nlist(filter(numero_es_negativo, numeros))\n\n\ndef numero_es_divisible_entre_9(numero):\n if numero % 9 == 0:\n return True\n\n\nnumeros = [3, 7, 9, 34, 72, 90, 87, 34, 99, 56, 12, 18]\n\nlist(filter(numero_es_divisible_entre_9, numeros))\n\n\n#AND\ndef numero_es_divisible_entre_3(numero):\n if numero % 3 == 0:\n return True\n else:\n return False\n\n\ndef numero_es_menor_que_10(numero):\n if numero < 10:\n return True\n else:\n return False\n\nnumero_es_divisible_entre_3(9) and numero_es_menor_que_10(9)\n\n\nnumeros = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]\n\n\ndef numero_es_divisible_entre_3_y_menor_que_10(numero):\n return numero_es_divisible_entre_3(numero) and numero_es_menor_que_10(numero)\n\nnuevo_output_2 = list(filter(numero_es_divisible_entre_3_y_menor_que_10, numeros))\nprint(nuevo_output_2)\n\n\n#not\nnuevo_output_3 = not(numero_es_divisible_entre_3(9))\nprint(nuevo_output_3)\n\n#lambda\n\nnuevo_output_4 = list(filter(lambda x: not numero_es_divisible_entre_3(x), numeros))\nprint(nuevo_output_4)\n\npalabras = [\"achicoria\", \"pasto\", \"sol\", \"loquillo\", \"moquillo\", \"sed\", \"pez\", \"jacaranda\", \"mil\"]\n\nlist(filter(lambda x: len(x) > 5, palabras))\n\n\n\n","repo_name":"Fresitaconcrema/Modulo4","sub_path":"Procesamiento_de_datos/bd_3_programacion_funcional.py","file_name":"bd_3_programacion_funcional.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"12922418326","text":"N = int(input())\ninfo_dict = {}\norder_list = []\nboard = [[0 for _ in range(N)] for _ in range(N)]\nfor i in range(N**2):\n tmp = list(map(int, input().split()))\n info_dict[tmp[0]] = tmp[1:]\n order_list.append(tmp[0])\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\ndef search(ns):\n # 0 : like_cnt, 1 : empty_cnt\n like_info = {0 : [], 1: [], 2:[], 3:[], 4:[]}\n empty_info = {0 : [], 1: [], 2:[], 3:[], 4:[]}\n for i in range(N):\n for j in range(N):\n x, y = i, j\n if board[x][y] != 0 : continue\n like_cnt, empty_cnt = 0, 0\n for d in range(4):\n nx = x + dx[d]\n ny = y + dy[d]\n if 0 <= nx < N and 0 <= ny < N :\n if board[nx][ny] in info_dict[ns] : like_cnt += 1\n like_info[like_cnt].append([x, y])\n for k in range(4, -1, -1):\n if len(like_info[k]) == 1 :\n x, y = like_info[k][0]\n board[x][y] = ns\n break\n elif len(like_info[k]) > 1 :\n like_info[k].sort()\n for it in range(len(like_info[k])):\n x, y = like_info[k][it]\n empty_cnt = 0\n for d in range(4):\n nx = x + dx[d]\n ny = y + dy[d]\n if 0 <= nx < N and 0 <= ny < N :\n if board[nx][ny] == 0 : empty_cnt += 1\n empty_info[empty_cnt].append([x, y])\n for empty in range(4, -1, -1):\n if empty_info[empty] :\n empty_info[empty].sort()\n x, y = empty_info[empty][0]\n board[x][y] = ns\n break\n break\ndef scoring():\n global score\n s_list = [0, 1, 10, 100, 1000]\n for i in range(N):\n for j in range(N):\n x, y = i, j\n student = board[x][y]\n cnt = 0\n for d in range(4):\n nx = x + dx[d]\n ny = y + dy[d]\n if 0 <= nx < N and 0 <= ny < N :\n if board[nx][ny] in info_dict[student] : cnt += 1\n score += s_list[cnt]\n\ndef solve():\n for st in range(N**2):\n search(order_list[st])\n scoring()\n\nscore = 0\nsolve()\nprint(score)","repo_name":"HPYoo/swcodingtest","sub_path":"prob21608.py","file_name":"prob21608.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"41563830126","text":"__author__ = 'bilge'\ndef distance(point1, point2):\n \"\"\"\n Returns the Euclidean distance of two points in the Cartesian Plane.\n\n distance([3,4],[0,0])\n 5.0\n distance([3,6],[10,6])\n 7.0\n \"\"\"\n return ((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2) ** 0.5\n\n\ndata = [[0,1],[2,3],[4,5]]\ndata2 = data\nwhile data2:\n print(min(data2, key=lambda x: distance(data[-1], x)))","repo_name":"aysebilgegunduz/vehicle_routing_problem","sub_path":"deneme.py","file_name":"deneme.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"}
+{"seq_id":"74194790074","text":"import logging\nimport json\nfrom weibo import Client\n\nfrom .base import BaseHandler\n\n\nclass HomeHandler(BaseHandler):\n\n logger = logging.getLogger('tomorrow.utiltiy.sina.home')\n def get(self):\n key, secret = self.get_app()\n error = self.get_argument('err', None)\n return self.render(\n 'utility/sina/home.html',\n key=key,\n secret=secret,\n error=error\n )\n\n def post(self):\n app_key = self.get_argument('app-key')\n app_secret = self.get_argument('app-secret')\n self.set_app(app_key, app_secret)\n client = Client(api_key=app_key, api_secret=app_secret,\n redirect_uri=self.callback_url)\n\n return self.write(json.dumps({'url': client.authorize_url}))\n\n\n","repo_name":"TylerTemp/tomorrow","sub_path":"lib/hdlr/utility/sina/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"71259439355","text":"# Find Digits\n# https://www.hackerrank.com/challenges/find-digits/problem\n\nl = []\nfor _ in range(int(input().strip())):\n n = int(input().strip())\n a = list(map(int, str(n)))\n count = 0\n for i in a:\n if i != 0 and n % i == 0: count += 1\n l.append(count)\nprint(*l, sep = '\\n')\n","repo_name":"harshildarji/Algorithms-HackerRank","sub_path":"Implementation Challenges/Find Digits.py","file_name":"Find Digits.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"96"}
+{"seq_id":"16402024685","text":"# -*- coding: utf-8 -*-\n\n'''\n Skai Player Addon\n Author Twilight0\n\n SPDX-License-Identifier: GPL-3.0-only\n See LICENSES/GPL-3.0-only for more information.\n'''\n\nimport json, re\nfrom base64 import b64decode\nfrom tulip import bookmarks, directory, client, cache, control, youtube\nfrom tulip.parsers import itertags\nfrom tulip.compat import zip, iteritems\nfrom youtube_resolver import resolve as yt_resolver\n\ncache_method = cache.FunctionCache().cache_method\n\n\nclass Indexer:\n\n def __init__(self):\n\n self.list = []; self.data = []\n self.base_link = 'https://www.skaitv.gr'\n self.old_base = 'https://www.skai.gr'\n self.radio_base = 'http://www.skairadio.gr'\n self.yt_channel = 'UCmHgxU394HiIAsN1fMegqzw'\n self.yt_key = b64decode('0AXQxNFejdVT2w2RtY0V1cWMrl3YSFjVyQEUUl3Sfp0Q5NVY6lUQ'[::-1])\n self.tvshows_link = ''.join([self.base_link, '/shows/seires'])\n self.entertainment_link = ''.join([self.base_link, '/shows/psuchagogia'])\n self.news_link = ''.join([self.base_link, '/shows/enimerosi'])\n self.live_link = ''.join([self.base_link, '/live'])\n self.podcasts_link = ''.join([self.radio_base, '/shows?page=0'])\n self.play_link = 'https://videostream.skai.gr/skaivod/_definst_/mp4:skai/'\n self.radio_link = 'https://skai.live24.gr/skai1003'\n\n def root(self, audio_only=False):\n\n self.list = [\n {\n 'label': control.lang(30001),\n 'title': 'Skai Live TV',\n 'action': 'play',\n 'isFolder': 'False',\n 'icon': 'live.png',\n 'url': self.live_link\n }\n ,\n {\n 'label': control.lang(30014),\n 'title': 'Skai Radio 100.3FM',\n 'action': 'play',\n 'url': self.radio_link,\n 'isFolder': 'False',\n 'icon': 'live.png'\n }\n ,\n {\n 'title': control.lang(30006),\n 'action': 'news',\n 'icon': 'news.png'\n }\n ,\n {\n 'title': control.lang(30002),\n 'action': 'shows',\n 'icon': 'tvshows.png',\n 'url': self.tvshows_link\n }\n ,\n {\n 'title': control.lang(30015),\n 'action': 'shows',\n 'icon': 'entertainment.png',\n 'url': self.entertainment_link\n }\n ,\n {\n 'title': control.lang(30003),\n 'action': 'podcasts',\n 'icon': 'podcasts.png'\n }\n ,\n {\n 'title': control.lang(30004),\n 'action': 'archive',\n 'icon': 'archive.png'\n }\n ,\n {\n 'title': control.lang(30005),\n 'action': 'latest',\n 'icon': 'latest.png'\n }\n ,\n {\n 'title': control.lang(30008),\n 'action': 'bookmarks',\n 'icon': 'bookmarks.png'\n }\n ]\n\n if audio_only:\n\n self.list = [self.list[1]] + [self.list[3]]\n\n for item in self.list:\n\n cache_clear = {'title': 30009, 'query': {'action': 'cache_clear'}}\n item.update({'cm': [cache_clear]})\n\n directory.add(self.list, content='videos')\n\n def bookmarks(self):\n\n self.list = bookmarks.get()\n\n if not self.list:\n na = [{'title': control.lang(30018), 'action': None}]\n directory.add(na)\n return\n\n for i in self.list:\n bookmark = dict((k, v) for k, v in iteritems(i) if not k == 'next')\n bookmark['delbookmark'] = i['url']\n i.update({'cm': [{'title': 30502, 'query': {'action': 'deleteBookmark', 'url': json.dumps(bookmark)}}]})\n\n self.list = sorted(self.list, key=lambda k: k['title'].lower())\n\n directory.add(self.list, content='videos')\n\n @cache_method(172800)\n def yt_playlists(self):\n\n return youtube.youtube(key=self.yt_key).playlists(self.yt_channel)\n\n @cache_method(3600)\n def yt_videos(self):\n\n return youtube.youtube(key=self.yt_key).videos(self.yt_channel, limit=2)\n\n @cache_method(3600)\n def yt_playlist(self, url):\n\n return youtube.youtube(key=self.yt_key).playlist(url)\n\n def archive(self):\n\n self.list = self.yt_playlists()\n\n if self.list is None:\n return\n\n for i in self.list:\n i['title'] = client.replaceHTMLCodes(i['title'])\n i.update({'action': 'episodes'})\n bookmark = dict((k, v) for k, v in iteritems(i) if not k == 'next')\n bookmark['bookmark'] = i['url']\n i.update({'cm': [{'title': 30501, 'query': {'action': 'addBookmark', 'url': json.dumps(bookmark)}}]})\n\n control.sortmethods('title')\n\n directory.add(self.list, content='videos')\n\n def shows(self, url):\n\n self.list = self.generic_listing(url)\n\n if self.list is None:\n return\n\n for i in self.list:\n\n i.update({'action': 'episodes'})\n\n bookmark = dict((k, v) for k, v in iteritems(i) if not k == 'next')\n bookmark['bookmark'] = i['url']\n\n i.update({'cm': [{'title': 30501, 'query': {'action': 'addBookmark', 'url': json.dumps(bookmark)}}]})\n\n directory.add(self.list, content='videos')\n\n @cache_method(3600)\n def pod_listing(self, url):\n\n html = client.request(url)\n\n listing = client.parseDOM(html, 'div', attrs={'class': 'row border-bottom pt-4 m-0 show-item'})\n\n nexturl = re.sub(r'\\d(?!\\d)', lambda x: str(int(x.group(0)) + 1), url)\n\n for item in listing:\n\n title = client.parseDOM(item, 'h3')[0].replace(''', '\\'')\n if title.startswith(')', select, re.S)\n\n for pod in pods:\n\n date = re.search(r'(\\d{2}/\\d{2}/\\d{4})', pod).group(1)\n title = ' - '.join([client.parseDOM(html, 'h2', attrs={'class': 'mb-3.+?'})[0], date])\n url = ''.join([self.radio_base, re.search(r'data-url = \"([\\w\\-/]+)\"', pod).group(1)])\n\n self.list.append({'title': title, 'image': image, 'url': url})\n\n return self.list\n\n def episodes(self, url):\n\n if self.base_link in url:\n self.list = self.episodes_listing(url)\n elif self.radio_base in url:\n self.list = self.pod_episodes(url)\n else:\n self.list = self.yt_playlist(url)\n\n if self.list is None:\n\n return\n\n for i in self.list:\n\n i.update({'action': 'play', 'isFolder': 'False'})\n\n directory.add(self.list, content='videos')\n\n @cache_method(3600)\n def video_listing(self, url):\n\n html = client.request(url)\n\n try:\n nexturl = ''.join(\n [\n self.old_base, '/videos',\n client.parseDOM(html, 'a', attrs={'rel': 'next'}, ret='href')[0].replace('&', '&')\n ]\n )\n except IndexError:\n nexturl = None\n\n video_list = client.parseDOM(html, 'div', attrs={'class': 'videoItem cell'}, ret='data-video-url')\n thumbnails = client.parseDOM(html, 'div', attrs={'class': 'videoItem cell'}, ret='data-video-poster')\n titles = client.parseDOM(html, 'div', attrs={'class': 'videoItem cell'}, ret='data-video-name')\n dates = client.parseDOM(html, 'div', attrs={'class': 'videoItem cell'}, ret='data-video-date')\n\n listing = list(zip(titles, dates, thumbnails, video_list))\n\n for title, date, image, video in listing:\n\n title = client.replaceHTMLCodes(title)\n\n label = ''.join([title, ' ', '(', date, ')'])\n\n self.list.append(\n {\n 'title': label, 'image': image, 'url': video, 'next': nexturl, 'nextlabel': 30500,\n 'nextaction': 'videos'\n }\n )\n\n return self.list\n\n def videos(self, url):\n\n self.list = self.video_listing(url)\n\n if self.list is None:\n return\n\n for i in self.list:\n\n i.update({'action': 'play', 'isFolder': 'False'})\n\n directory.add(self.list)\n\n def latest(self):\n\n self.list = self.yt_videos()\n\n if self.list is None:\n return\n\n self.list = [i for i in self.list if int(i['duration']) > 60]\n\n for i in self.list:\n i.update({'action': 'play', 'isFolder': 'False'})\n\n directory.add(self.list)\n\n def news(self):\n\n self.list = [\n {\n 'title': 30011,\n 'action': 'episodes',\n 'icon': 'news.png',\n 'url': ''.join([self.base_link, '/show/enimerosi/oi-eidiseis-tou-ska-stis-2/sezon-2021-2022'])\n }\n ,\n {\n 'title': 30012,\n 'action': 'episodes',\n 'icon': 'news.png',\n 'url': ''.join([self.base_link, '/show/enimerosi/ta-nea-tou-ska-stis-2000/sezon-2021-2022'])\n }\n ,\n {\n 'title': 30005,\n 'action': 'videos',\n 'icon': 'latest.png',\n 'url': ''.join([self.old_base, '/videos?type=recent'])\n }\n ,\n {\n 'title': 30016,\n 'action': 'videos',\n 'icon': 'popular.png',\n 'url': ''.join([self.old_base, '/videos?type=popular'])\n }\n ,\n {\n 'title': 30017,\n 'action': 'videos',\n 'icon': 'recommended.png',\n 'url': ''.join([self.old_base, '/videos?type=featured'])\n }\n ]\n\n directory.add(self.list, content='videos')\n\n def play(self, url):\n\n resolved = self.resolve(url)\n\n if 'youtu' in resolved:\n resolved = self.yt_session(resolved)\n\n if isinstance(resolved, tuple):\n\n stream, plot = resolved\n meta = {'plot': plot}\n\n else:\n\n stream = resolved\n meta = None\n\n icon = None\n\n if url == self.live_link:\n\n icon = {'poster': control.icon(), 'icon': control.icon(), 'thumb': control.icon()}\n\n dash = ('dash' in stream or '.mpd' in stream or 'm3u8' in stream) and control.kodi_version() >= 18.0\n\n directory.resolve(\n url=stream, meta=meta, dash=dash, icon=icon,\n mimetype='application/vnd.apple.mpegurl' if 'm3u8' in stream else None,\n manifest_type='hls' if '.m3u8' in stream else None\n )\n\n @cache_method(1440)\n def generic_listing(self, url):\n\n html = client.request(url)\n\n if url == self.news_link:\n new = 'row m-0 listrow new-videos'\n new_items = 'col-12 pl-0 pr-0 list1 list-item color_enimerosi'\n archive = 'row m-0 listrow s234 '\n archived_items = 'col-12 pl-0 pr-0 list1 list-item color_enimerosi'\n elif url == self.entertainment_link:\n new = 'row listrow list2 '\n new_items = 'd-none d-md-block col-md-4 listimg color_psuchagogia'\n archive = 'row listrow list2 s234 '\n archived_items = 'd-none d-md-block col-md-3 listimg color_psuchagogia'\n else:\n new = 'row listrow list2 '\n new_items = 'd-none d-md-block col-md-4 listimg color_seires'\n archive = 'row listrow list2 s234 '\n archived_items = 'd-none d-md-block col-md-3 listimg color_seires'\n\n div = client.parseDOM(html, 'div', attrs={'class': new})[0]\n\n listing = client.parseDOM(div, 'div', attrs={'class': new_items})\n\n for item in listing:\n\n title = client.parseDOM(item, 'h3')[0]\n image = client.parseDOM(item, 'img', ret='src')[0]\n\n url = ''.join([self.base_link, client.parseDOM(item, 'a', ret='href')[0]])\n\n self.list.append({'title': title, 'url': url, 'image': image})\n\n if 's234' in html:\n\n div = client.parseDOM(html, 'div', attrs={'class': archive})[0]\n items = client.parseDOM(div, 'div', attrs={'class': archived_items})\n\n for item in items:\n\n title = ' - '.join([client.parseDOM(item, 'h3')[0], control.lang(30013)])\n image = client.parseDOM(item, 'img', ret='src')[0]\n\n url = ''.join([self.base_link, client.parseDOM(item, 'a', ret='href')[0]])\n\n self.list.append({'title': title, 'url': url, 'image': image})\n\n return self.list\n\n @cache_method(180)\n def episodes_listing(self, url):\n\n html = client.request(url)\n\n div = client.parseDOM(html, 'div', attrs={'class': 'row listrow list2 ?'})[0]\n\n listing = [i.text for i in itertags(div, 'div')]\n\n for item in listing:\n\n try:\n title = client.parseDOM(item, 'h3')[0].replace('
', ' ').replace('
', ' ')\n except Exception:\n continue\n image = client.parseDOM(item, 'img', ret='src')[0]\n\n url = ''.join([self.base_link, client.parseDOM(item, 'a', ret='href')[0]])\n\n self.list.append({'title': title, 'url': url, 'image': image})\n\n return self.list\n\n @cache_method(720)\n def episode_resolver(self, url):\n\n html = client.request(url)\n\n if url.startswith(self.radio_base):\n\n url = re.search(r'[\"\\'](.+?\\.mp3)[\"\\']', html).group(1)\n\n return url\n\n else:\n\n json_ = re.search(r'var data = ({.+})', html).group(1)\n\n json_ = json.loads(json_)\n\n url = ''.join([self.play_link, json_['episode'][0]['media_item_file'], '/chunklist.m3u8'])\n\n plot = client.stripTags(json_['episode'][0]['descr'])\n\n return url, plot\n\n def resolve(self, url):\n\n if url == self.live_link:\n\n html = client.request(self.live_link)\n\n json_ = re.search(r'var data = ({.+?});', html).group(1)\n\n json_ = json.loads(json_)\n\n return json_['now']['livestream']\n\n elif len(url) == 11:\n\n link = self.yt_session(url)\n\n return link\n\n elif 'episode' in url:\n\n return self.episode_resolver(url)\n\n else:\n\n return url\n\n @staticmethod\n def yt_session(link):\n\n streams = yt_resolver(link)\n\n try:\n addon_enabled = control.addon_details('inputstream.adaptive').get('enabled')\n except KeyError:\n addon_enabled = False\n\n if not addon_enabled:\n\n streams = [s for s in streams if 'mpd' not in s['title']]\n\n stream = streams[0]['url']\n\n return stream\n","repo_name":"gggbbbuuu/GM","sub_path":"addons/plugin.video.skai.gr/resources/lib/skai.py","file_name":"skai.py","file_ext":"py","file_size_in_byte":16294,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"96"}
+{"seq_id":"21518654853","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom argparse import ArgumentParser\r\nfrom collections import namedtuple\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport os\r\n\r\nfrom PIL import Image\r\n\r\n\r\nRectangle = namedtuple('Recltangle', 'x,y,width,height')\r\n\r\nparser = ArgumentParser(\r\n '''\r\n Compare two rectangular areas of an image\r\n '''\r\n)\r\nparser.add_argument(\r\n '--rectangle1',\r\n nargs=4,\r\n type=int,\r\n required=True,\r\n help='''\r\n X, Y, Width and Height of the first rectangle to compare\r\n ''',\r\n)\r\nparser.add_argument(\r\n '--rectangle2',\r\n nargs=4,\r\n type=int,\r\n required=True,\r\n help='''\r\n X, Y, Width and Height of the second rectangle to compare\r\n ''',\r\n)\r\nparser.add_argument(\r\n 'image',\r\n help='''\r\n The image whose areas to compare\r\n ''',\r\n)\r\n\r\n\r\ndef split_and_avg_channels(sample):\r\n return (\r\n sample[:, :, 0].mean(),\r\n sample[:, :, 1].mean(),\r\n sample[:, :, 2].mean(),\r\n )\r\n\r\n\r\ndef main():\r\n args = parser.parse_args()\r\n image = Image.open(args.image)\r\n nparray = np.array(image)\r\n sample_rect1 = Rectangle(*args.rectangle1)\r\n sample_rect2 = Rectangle(*args.rectangle2)\r\n sample1 = nparray[\r\n sample_rect1.y : sample_rect1.y + sample_rect1.height,\r\n sample_rect1.x : sample_rect1.x + sample_rect1.width\r\n ]\r\n sample2 = nparray[\r\n sample_rect2.y : sample_rect2.y + sample_rect2.height,\r\n sample_rect2.x : sample_rect2.x + sample_rect2.width\r\n ]\r\n r1, g1, b1 = split_and_avg_channels(sample1)\r\n r2, g2, b2 = split_and_avg_channels(sample2)\r\n print('R1 = {}, G1 = {}, B1 = {}'.format(r1, g1, b1))\r\n print('R2 = {}, G2 = {}, B2 = {}'.format(r2, g2, b2))\r\n\r\nmain()\r\n# with love from makeda and oleg","repo_name":"drcandacemakedamoore/opensource4medicine","sub_path":"compare_swatches.py","file_name":"compare_swatches.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"}
+{"seq_id":"40272958307","text":"# RECEIVER\nimport socket\nimport struct\n\nMCAST_GRP = '224.1.1.1'\nMCAST_PORT = 5004\n\n# Multicast\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\nsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\nsock.bind(('', MCAST_PORT))\nmreq = struct.pack(\"4sl\", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)\n\nsock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n\n# Broadcast\nUDP_IP = '127.0.0.2' # Mengikat ke semua antarmuka jaringan yang tersedia\nUDP_PORT = 5006\n\nclient_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nclient_socket.bind((UDP_IP, UDP_PORT))\n\n\nwhile True:\n # Multicast\n print(f\"ini multicast : {sock.recv(10240)}\")\n \n # Broadcast\n data, address = client_socket.recvfrom(1024)\n print(f\"Menerima data dari {address}: {data.decode()}\")","repo_name":"IvanSholana/TubesProgjar","sub_path":"Client_2.py","file_name":"Client_2.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"}
+{"seq_id":"73649779836","text":"# 約数の数を数える\ndef divisor_count(num):\n if num == 1:\n return 1\n if num < 1:\n return 0\n result = 1\n\n calcNum = num\n count = 0\n\n while calcNum % 2 == 0:\n calcNum //= 2\n count += 1\n\n if count != 0:\n result *= count + 1\n\n suggest_prime = 3\n while suggest_prime <= calcNum:\n count = 0\n while calcNum % suggest_prime == 0:\n calcNum //= suggest_prime\n count += 1\n if count != 0:\n result *= count + 1\n suggest_prime += 2\n\n return result\n\ncount = 500\ntriangle = 1\n\ntmp_num = 2\nwhile divisor_count(triangle) <= count:\n triangle += tmp_num\n tmp_num += 1\n\nprint(triangle)\n","repo_name":"KKishikawa/project-euler-for-study-code","sub_path":"011-020/012.py","file_name":"012.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"74676260155","text":"class Node:\r\n def __init__(self, data):\r\n self.data = data\r\n self.next = None\r\n\r\n\r\nclass Stack:\r\n def __init__(self):\r\n self.head = None\r\n self.minimum = []\r\n self.size = 0\r\n\r\n def get_size(self):\r\n return self.size\r\n\r\n def push(self, data):\r\n node = Node(data)\r\n if self.head == None:\r\n self.head = node\r\n else:\r\n node.next = self.head\r\n self.head = node\r\n self.minimum.append(node.data)\r\n self.size += 1\r\n\r\n def print_list(self):\r\n current = self.head\r\n show = \"\"\r\n while current != None:\r\n show += str(current.data) + ' '\r\n current = current.next\r\n return show\r\n\r\n def pop(self):\r\n if self.head == None:\r\n self.minimum = []\r\n self.size = 0\r\n return None\r\n else:\r\n tmp = self.head\r\n self.head = tmp.next\r\n self.minimum.remove(tmp.data)\r\n\r\n if tmp.data is not None:\r\n self.size += 1\r\n return tmp.data\r\n\r\n\r\nclass SetOfStacks:\r\n def __init__(self):\r\n self.stacks = []\r\n self.capacity = 3\r\n self.number_stacks = 0\r\n\r\n def push(self, data):\r\n if len(self.stacks) == 0:\r\n stack = Stack()\r\n stack.push(data)\r\n self.stacks.append(stack)\r\n\r\n else:\r\n current_stack = self.stacks[self.number_stacks]\r\n if current_stack.get_size() >= 3:\r\n new_stack = Stack()\r\n new_stack.push(data)\r\n self.stacks.append(new_stack)\r\n self.number_stacks += 1\r\n else:\r\n current_stack.push(data)\r\n\r\n def pop(self):\r\n last_index = len(self.stacks) - 1\r\n last_stack = self.stacks[last_index]\r\n last_stack.pop()\r\n if last_stack.get_size() == 0:\r\n self.number_stacks -= 1\r\n del self.stacks[last_index]\r\n\r\n def print_list(self):\r\n show = \"\"\r\n for i in range(len(self.stacks)):\r\n show += \"Stack \" + str(i+1) + \": \"\r\n stack = self.stacks[i]\r\n show += stack.print_list()\r\n show += \"\\n\"\r\n print(show)\r\n\r\n\r\nset_of_stacks = SetOfStacks()\r\nset_of_stacks.push(3)\r\nset_of_stacks.push(4)\r\nset_of_stacks.push(5)\r\nset_of_stacks.push(1)\r\nset_of_stacks.push(0)\r\nset_of_stacks.push(7)\r\nset_of_stacks.push(9)\r\n\r\nset_of_stacks.print_list()\r\nset_of_stacks.pop()\r\nset_of_stacks.pop()\r\nset_of_stacks.pop()\r\nset_of_stacks.print_list()","repo_name":"zalogarciam/CrackingTheCodeInterview","sub_path":"Chapter 3/StackOfPlates.py","file_name":"StackOfPlates.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"}
+{"seq_id":"16500462601","text":"from qtpy import QtCore, QtGui, QtWidgets\nfrom graide.utils import ModelSuper, DataObj\nimport traceback\nimport os\n\n#for line in traceback.format_stack(): print(line.strip())\n\n\"\"\"\n Here is a summary of how the Python Slots and Signals interact to update the Glyph tab when a glyph is clicked:\n\n Set up connections:\n runView.glyphSelect.connect(passesView.changeGlyph)\n mainWindow.tab_passes.glyphSelected.connect(mainWindow.glyphSelected)\n mainWindow.tab_passes.glyphSelected.connect(mainwindow.glyphAttrib.changeData)\n\n Then when a glyph is clicked on:\n RunView::changeSelection\n calls self.glyphSelected.emit (defined as Signal)\n PassesView::changeGlyph\n calls self.glyphSelected.emit (defined as Signal)\n MainWindow::glyphSelected\n ...\n AttribView::changeData\n\"\"\"\n\nclass LinePlainTextEdit(QtWidgets.QPlainTextEdit) :\n\n editFinished = QtCore.Signal()\n\n def keyPressEvent(self, key) :\n if key.matches(QtGui.QKeySequence.InsertParagraphSeparator) :\n # or key.matches(QtGui.QKeySequence.InsertLineSeparator) :\n self.editFinished.emit()\n else :\n return super(LinePlainTextEdit, self).keyPressEvent(key)\n \n\nclass AttrValueListDialog(QtWidgets.QDialog) :\n \n def __init__(self, parent, glyphName, gClassList) :\n super(AttrValueListDialog,self).__init__(parent)\n \n # Hide the help icon, all it does it take up space.\n #icon = self.windowIcon() -- just in case icon gets lost\n flags = self.windowFlags()\n helpFlag = QtCore.Qt.WindowContextHelpButtonHint\n flags = flags & (~helpFlag)\n self.setWindowFlags(flags)\n #self.setWindowIcon(icon)\n\n self.setWindowTitle(glyphName)\n listWidget = QtWidgets.QListWidget(self)\n #listWidget.clicked.connect(self.doReturn)\n itemHeight = 18\n cnt = 0\n for gClass in gClassList:\n if gClass == \"\" or gClass == \" \" :\n continue\n \n item = QtWidgets.QListWidgetItem(gClass)\n item.setSizeHint(QtCore.QSize(200, itemHeight))\n listWidget.addItem(item)\n cnt = cnt + 1\n \n listWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n if cnt <= 25 :\n displayCnt = 4 if cnt < 5 else cnt\n listWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n # It's okay if the list and dialog widths don't match, since there's no scroll bar.\n # Make the list widget wide enough that they can expand the dialog and see wide names.\n listWidget.setFixedWidth(300)\n self.setMinimumWidth(200)\n else :\n displayCnt = 25\n listWidget.setFixedWidth(300) # make it wide enough to handle long names\n self.setMinimumWidth(300)\n #listWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)\n \n listWidget.setFixedHeight(displayCnt * itemHeight + 10)\n self.setMinimumHeight(displayCnt * itemHeight + 10)\n \n # end of __init_\n \n \n def doReturn(self) :\n self.done(0) # close\n \n# end of class AttrValueListDialog\n\n\nclass AttributeDelegate(QtWidgets.QStyledItemDelegate) :\n\n def __init__(self, parent) :\n super(AttributeDelegate, self).__init__(parent)\n self.parent = parent\n\n def createEditor(self, parent, option, index) :\n dat = index.data()\n if index.column() == 0 :\n pass\n elif index.column() == 1 and dat and len(dat) > 20 :\n editor = LinePlainTextEdit(parent)\n editor.editFinished.connect(self.commitAndCloseEditor)\n editor.setMinimumHeight(100)\n return editor\n else :\n return super(AttributeDelegate, self).createEditor(parent, option, index)\n\n def setEditorData(self, editor, index) :\n if index.column() == 1 and len(index.data()) > 20 :\n editor.setPlainText(index.data())\n else :\n super(AttributeDelegate, self).setEditorData(editor, index)\n\n def setModelData(self, editor, model, index) :\n if index.column() == 1 and index.data and len(index.data()) > 20 :\n model.setData(index, editor.toPlainText(), QtCore.Qt.EditRole)\n else :\n super(AttributeDelegate, self).setModelData(editor, model, index)\n\n def commitAndCloseEditor(self) :\n editor = self.sender()\n self.commitData.emit(editor)\n self.closeEditor.emit(editor)\n \n#end of class AttributeDelegate\n\n\nclass Attribute(object) :\n\n def __init__(self, name, getter, setter, isTree = False, fileLoc = None, extraPath = None, listPopup = False, *params) :\n self.name = name\n self.setter = setter\n self.getter = getter\n self.params = params\n self.isTree = isTree # debugging\n self.tree = params[0] if isTree else None # an AttribModel, if this has an embedded tree\n if fileLoc and extraPath:\n fileLocFile = os.path.join(extraPath, fileLoc[0]) # make file path relative to GDX file\n fileLoc = (fileLocFile, fileLoc[1])\n self.fileLoc = fileLoc\n self.doesListPopup = listPopup\n \n def child(self, row) :\n if self.tree :\n return self.tree.child(row)\n return None\n\n def childNumber(self, row) :\n if self.tree :\n return self.tree.rowCount(None)\n return 0\n\n def getData(self, column) :\n if column == 0 :\n return self.name\n elif column == 1 and self.getter :\n return self.getter(*self.params)\n return None\n\n def setData(self, column, value) :\n if column == 0 and value:\n self.name = value\n return True\n elif column == 1 and self.setter:\n params = list(self.params[:]) + [value]\n self.setter(*params)\n return True\n return False\n\n def isEditable(self, column) :\n if self.setter : return True\n return False\n \n def getFileLoc(self, treePath) :\n if self.fileLoc :\n return self.fileLoc\n elif self.tree :\n return self.tree.fileLocAt(treePath) # tree is an AttribModel\n else :\n return None\n \n def listForPopup(self) :\n if self.doesListPopup :\n classListStr = self.getData(1)\n # turn into list\n res = classListStr.split(' ') # two spaces\n res.sort()\n return res\n else :\n return None\n \n def showPopupList(self, listToShow, widget) :\n dialog = AttrValueListDialog(widget, self.name, listToShow)\n dialog.show() # modeless\n\n \n def debugPrintData(self) :\n print(self.name)\n if self.isTree : \n print(\">>>\")\n self.tree.debugPrintData()\n print(\"<<<\")\n\n# end of class Attribute\n\n\n# An AttribModel consists of a list of Attributes, corresponding to a row in the AttribView control.\n# An Attribute can be a sub-tree which in turn contains an AttribModel with the list of sub-items.\n\nclass AttribModel(QtCore.QAbstractItemModel) :\n\n def __init__(self, data, parent = None, root = None) : # data is a list of Attributes\n super(AttribModel, self).__init__(parent)\n self.__data = data\n self.__root = root if root else self\n self.__parent = parent\n \n def add(self, data) :\n self.__data.append(data)\n\n def rowCount(self, parent) :\n if not parent or not parent.isValid() :\n return len(self.__data)\n else :\n pitem = self.getItem(parent)\n return pitem.__data[parent.row()].childNumber(parent.row())\n\n def columnCount(self, parent) :\n return 2\n\n def data(self, index, role) :\n if not index.isValid() or (role != QtCore.Qt.DisplayRole and role != QtCore.Qt.EditRole) :\n return None\n\n item = self.getItem(index)\n dat = item.__data[index.row()]\n return dat.getData(index.column())\n\n def flags(self, index) :\n res = QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable\n if index and index.isValid() :\n item = self.getItem(index)\n dat = item.__data[index.row()]\n if dat.isEditable(index.column()) :\n res |= QtCore.Qt.ItemIsEditable\n return res\n\n def child(self, num) :\n return self.__data[num]\n\n def getChildRow(self, model) :\n for i, d in enumerate(self.__data) :\n if len(d.params) and d.params[0] == model :\n return i\n return -1\n \n def parent(self, index) :\n if index and index.isValid() :\n child = self.getItem(index)\n parent = child.__parent\n if parent :\n row = parent.getChildRow(child)\n if row >= 0 :\n return parent.createIndex(row, 0, parent)\n return QtCore.QModelIndex()\n\n def getItem(self, index) :\n if index and index.isValid() :\n item = index.internalPointer()\n if item : return item\n return self\n\n def index(self, row, column, parent = None) :\n if not parent or not parent.isValid() :\n return self.createIndex(row, column, self.__root)\n else :\n parentModel = self.getItem(parent)\n parentItem = parentModel.__data[parent.row()]\n if parentItem.tree :\n return self.createIndex(row, column, parentItem.tree)\n return QtCore.QModelIndex()\n\n def setData(self, index, value, role) :\n if role != QtCore.Qt.EditRole:\n return False\n item = self.getItem(index)\n attrib = item.__data[index.row()]\n res = attrib.setData(index.column(), value)\n if res :\n self.__root.dataChanged.emit(index, index)\n return res\n \n def fileLocAt(self, treePath) :\n i = treePath[0]\n attrData = self.__data[i]\n return attrData.getFileLoc(treePath[1:])\n \n def listForPopup(self, treePath) :\n i = treePath[0]\n attrData = self.__data[i]\n return attrData.listForPopup()\n \n def showPopupList(self, treePath, listToShow, widget) :\n i = treePath[0]\n attrData = self.__data[i]\n return attrData.showPopupList(listToShow, widget) \n\n def debugPrintData(self) :\n print(self.__data)\n for d in self.__data :\n d.debugPrintData()\n\n# end of class AttribModel\n \n\nclass AttribView(QtWidgets.QTreeView) :\n\n def __init__(self, app, parent = None) :\n super(AttribView, self).__init__(parent)\n self.app = app\n self.header().setStretchLastSection(True)\n self.header().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)\n self.header().hide()\n self.attribDelegate = AttributeDelegate(self)\n #self.setItemDelegateForColumn(1, self.attribDelegate)\n\n @QtCore.Slot(DataObj, ModelSuper)\n def changeData(self, data, modelBogus) : # data is a Slot, GraideGlyph, etc.; modelBogus is eg RunView\n self.data = data\n self.model = data.attribModel() if data else None\n self.setModel(self.model)\n self.expandAll()\n \n def dataObject(self) :\n try :\n return self.data\n except :\n return None\n\n def removeCurrent(self) :\n index = self.currentIndex()\n self.model.setData(index, None, QtCore.Qt.EditRole)\n \n def mouseDoubleClickEvent(self, event) :\n #print(\"mouseDoubleClickEvent\")\n super(AttribView, self).mouseDoubleClickEvent(event)\n \n # Generate a path to where the click was in the tree control.\n row = self.currentIndex().row()\n parentIndex = self.currentIndex().parent()\n treePath = [row]\n while parentIndex.row() > -1 :\n treePath.insert(0, parentIndex.row()) # prepend\n parentIndex = parentIndex.parent()\n \n pList = self.model.listForPopup(treePath)\n if pList :\n self.model.showPopupList(treePath, pList, self)\n else :\n fileLoc = self.model.fileLocAt(treePath)\n if fileLoc : \n self.app.selectLine(*fileLoc)\n\n def findMainFileLoc(self) :\n treePath = [0] # for Glyph tab, assumes glyph number is the first\n fileLoc = self.model.fileLocAt(treePath)\n if fileLoc :\n self.app.selectLine(*fileLoc)\n \n# end of class AttribView\n\n\nif __name__ == '__main__' :\n\n from graide.font import GraideFont\n import sys, os\n \n app = QtWidgets.QApplication(sys.argv)\n font = GraideFont()\n tpath = os.path.join(os.path.dirname(sys.argv[0]), '../../tests/fonts/Padauk')\n font.loadFont(os.path.join(tpath, 'Padauk.ttf'), os.path.join(tpath, 'padauk.xml'))\n glyph = font.psnames['u1000']\n model = glyph.attribModel()\n view = AttribView(model)\n view.show()\n sys.exit(app.exec_())\n\n","repo_name":"silnrsi/graide","sub_path":"lib/graide/attribview.py","file_name":"attribview.py","file_ext":"py","file_size_in_byte":13158,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"96"}
+{"seq_id":"30122426228","text":"def no_ones(number: int) -> int:\n \"\"\"\n nr: 101010\n nr - 1: 101001\n\n nr & (nr - 1) == 101000\n \"\"\"\n\n ones = 0\n\n while number:\n ones += 1\n number = number & (number - 1)\n\n return ones\n\n\ndef solve_ok(number: int) -> int:\n \"\"\" O(k) complexity, where k == no ones \"\"\"\n return no_ones(number) % 2\n\n\ndef solve_efficient(number: int) -> int:\n number ^= number >> 32\n number ^= number >> 16\n number ^= number >> 8\n number ^= number >> 4\n number ^= number >> 2\n number ^= number >> 1\n return number & 1\n\n\nfor solve in [solve_ok, solve_efficient]:\n print(f\"Using {solve.__name__}\")\n\n for (use_case, expected_result) in [\n (int('101010101', 2), 1),\n (int('101010100', 2), 0),\n (int('0', 2), 0),\n (int('1', 2), 1),\n ]:\n result = solve(use_case)\n assert result == expected_result, \\\n f\"Invalid parity {result} for {use_case}, expected {expected_result}\"\n","repo_name":"vtemian/interviews-prep","sub_path":"elements-of-programming-interviews/5.1-parity.py","file_name":"5.1-parity.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"96"}
+{"seq_id":"74991847676","text":"import tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\nimport seaborn as sns\nimport numpy as np\n\nimport model as m\nimport cleanup_preprocessing as p\n\n# --------------------------------- Build model ---------------------------------\nmodel = m.cnn_model(input_shape=(68, 68, 3))\nmodel.summary()\n\n# --------------------------------- Train model ---------------------------------\nprint(\"\\nTraining model...\")\nmodel.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3), # OG: 1e-4\n loss='categorical_crossentropy',\n metrics=['accuracy', tf.keras.metrics.Precision(), tf.keras.metrics.Recall(), tf.keras.metrics.AUC()])\n\nval_preds = model.predict(p.train_images)\n\nhistory = model.fit(p.x_train_augmented,\n validation_data=[p.val_images, p.val_labels],\n epochs=20,\n verbose=1,\n class_weight=p.class_weights_dict\n )\nprint(\"Training done!\\n\")\n\n# --------------------------------- Evaluate model ---------------------------------\nprint(\"Performing test..\")\nresults = model.evaluate(x=p.val_images, y=p.val_labels)\n\nfor i, metric in enumerate(model.metrics_names):\n print('Final validation {}: {}'.format(metric, results[i]))\n\n# --------------------------------- Plot confusion matrix ---------------------------------\nval_pred = np.argmax(val_preds, axis=1)\nval_true = np.argmax(p.train_labels, axis=1)\n\ncm = confusion_matrix(val_true, val_pred)\ncm = cm / cm.astype(float).sum(axis=1)[:, np.newaxis]\n\nfigure = plt.figure(figsize=(8, 8))\nsns.heatmap(cm,\n annot=True,\n cmap=plt.cm.Blues,\n xticklabels=p.label_dict.keys(),\n yticklabels=p.label_dict.keys())\nplt.tight_layout()\nplt.ylabel('True label')\nplt.xlabel('Predicted label')\nplt.show()\n","repo_name":"Berkanktk/SE05-DL","sub_path":"Assignment02/src/train-and-test.py","file_name":"train-and-test.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"22269458060","text":"from threading import Condition\n\nclass BoundedBlockingQueue(object):\n\n def __init__(self, capacity: int):\n self.queue = [None for i in range(capacity)]\n self.capacity = capacity\n self.head = 0\n self.tail = 0\n self.qsize = 0\n self.condition = Condition()\n\n def enqueue(self, element: int) -> None:\n # with self.lock is equivalent to acquiring a lock at start of with block \n # and releasing it at the end of with block\n with self.condition:\n while self.qsize >= self.capacity:\n self.condition.wait()\n\n if self.tail == self.capacity:\n self.tail = 0\n self.queue[self.tail] = element\n self.tail += 1\n self.qsize += 1\n self.condition.notify_all()\n\n\n def dequeue(self) -> int:\n with self.condition:\n while self.qsize == 0:\n self.condition.wait()\n\n if self.head == self.capacity:\n self.head = 0\n ans = self.queue[self.head]\n self.head += 1\n self.qsize -= 1\n self.condition.notify_all()\n return ans\n\n def size(self):\n return self.qsize\n\n","repo_name":"chetan8888/dsa-templates","sub_path":"bounded_queue_multithreading.py","file_name":"bounded_queue_multithreading.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"177545239","text":"import os, sys # A system-specific prefix, for working between linux and windows\nprefix = 'C:/' if os.name == 'nt' else '/home/leandro/'\nsys.path.append(os.path.join(prefix, 'gitRepos/bimvee')) # A path to this library\nname = 'parking1'\n\n#%%\nfrom bimvee.importRpgDvsRos import importRpgDvsRos\nfrom bimvee.split import cropTime\nfilePathOrName = os.path.join(prefix, 'data/ETH_HDR/'+name+'/'+name+'.bag')\n#inspected = importRpgDvsRos(filePathOrName=filePathOrName)\n\ntemplate = {\n 'ch0': {\n 'dvs': '/dvs/cam1/events'\n }\n }\n\n#imported = importRpgDvsRos(filePathOrName=filePathOrName, template=template)\n\n \n#%%\n\nimported = cropTime(importRpgDvsRos(filePathOrName=filePathOrName, \n template=template),\n startTime = 16,\n stopTime = 24)\n\n\n#imported['data'] = imported['data'].pop('ch0')\n#%% Choose to export only specific datatypes; \n# overwrite data if the export is already there\n\nfrom bimvee.exportIitYarp import exportIitYarp\n\nexportIitYarp(imported,\n exportFilePath = prefix+'data/ETH_HDR/'+name+'/'+name,\n pathForPlayback = prefix+'data/ETH_HDR/'+name+'/'+name,\n dataTypes = ['dvs'],\n protectedWrite = False)\n","repo_name":"event-driven-robotics/high-throughput-convolutions","sub_path":"src/python/ros2yarp.py","file_name":"ros2yarp.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"}
+{"seq_id":"13794742233","text":"from mcpi.minecraft import Minecraft\n\nmc = Minecraft.create()\n\n# level = world\n# # # # # # # #\n# home\nSonarNeedle = mc.getPlayerEntityId(\"SonarNeedle\")\nmc.entity.setPos(SonarNeedle,-200.32,33.0,-370.72)\n\n# lava home\nSirleech = mc.getPlayerEntityId(\"sirleech\")\nmc.entity.setPos(Sirleech,-157.824659,35.0,-372.25975)\n","repo_name":"sirleech/minecraft-python-scripts","sub_path":"TakeMeHome.py","file_name":"TakeMeHome.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"10617031789","text":"import tensorflow as tf\nimport matplotlib.pyplot as plt #we will use it to draw the learning curve after training the network\n\ntrain_x = [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]\ntrain_y = [[0], [1], [1], [0]]\n\nINPUT_NEURONS = 2\nHIDDEN_NEURONS = 3\nOUTPUT_NEURONS = 1\n\nNUM_OF_EPOCHS = 100000\n\n\"\"\"(tf.float32, [None, 2]) specifies the datatype and the dimensions of the data.\n#Since we don't know the number of the training data, we make it None which means it accepts any from the user.\n#2 specifies that we have 2 input bits\n\"\"\"\nx = tf.placeholder(tf.float32, [None, 2])\ny_target = tf.placeholder(tf.float32, [None, 1])\n\n\"\"\"\n1- Create the Input-to-hidden weights and bias matrices from the given figure.\nThey should be Variable datatype because they will be changed during the learning process\n\"\"\"\ninput_hidden_weights = tf.Variable([[-0.99, 1.05, .19], [-0.43, -0.44, -0.30]]) #Init. from the given network\ninput_hidden_bias = tf.Variable(tf.ones([HIDDEN_NEURONS])) # The bias is one for each hidden neuron\n\n\"\"\"\n2- Get the values of the hidden layer by multiplying the features with the weight matrix [Input to Hidden feedforward]\nApply the hidden layer activation to the multiplication result\n\"\"\"\nhidden_neurons_values = tf.matmul(x, input_hidden_weights) + input_hidden_bias\nhidden_activation_result = tf.nn.sigmoid(hidden_neurons_values)\n\n\"\"\"\n3- Create the hidden-to-output weights and bias matrices from the given figure.\nThey should be Variable datatype because they will be changed during the learning process\n\"\"\"\n\nhidden_output_weights = tf.Variable([[0.18], [1.11], [-0.26]])\nhidden_output_bias = tf.Variable(tf.ones([1]))\n\n\"\"\"\n4- Get the values of the output layer by multiplying the hidden layer with the weight matrix [Hidden to Output feedforward]\nApply the output layer activation to the multiplication result\n\"\"\"\n\nhidden_output_value = tf.matmul(hidden_activation_result, hidden_output_weights) + hidden_output_bias\ny_estimated = tf.nn.sigmoid(hidden_output_value)\n\n\n\"\"\"\n5- Calculate the mean squared error given your prediction and the actual output\n\"\"\"\n\nmean_squared_error = 0.5 * tf.reduce_sum((tf.square(y_estimated - y_target)))\n\ntrain = tf.train.GradientDescentOptimizer(0.1).minimize(mean_squared_error)\n\n\n\"\"\"\nInitiate a Tensorflow graph and session variables\n\"\"\"\nsession = tf.Session()\nsession.run(tf.initialize_all_variables())\n\nerrors = []\nepochs = []\n\nfor i in range(0, NUM_OF_EPOCHS):\n session.run(train, feed_dict={x: train_x, y_target: train_y})\n\n if i % 10 == 0:\n print(\"Iteration number: \", i, \"\\n\")\n error = session.run(mean_squared_error, feed_dict={x: train_x, y_target: train_y})\n print(\"Cost: \", error, \"\\n\")\n errors.append(error)\n epochs.append(i)\n\n if error < 0.01:\n print(\"Input to hidden Weights\",\n session.run(input_hidden_weights, feed_dict={x: train_x, y_target: train_y}))\n print(\"Input to hidden bias\",\n session.run(input_hidden_bias, feed_dict={x: train_x, y_target: train_y}))\n print(\"Hidden to output weight\",\n session.run(hidden_output_weights, feed_dict={x: train_x, y_target: train_y}))\n print(\"Hidden to output bias\",\n session.run(hidden_output_bias, feed_dict={x: train_x, y_target: train_y}))\n\n plt.title(\"Learning Curve using mean squared error cost function\")\n print(\"Cost: \", error, \"\\n\")\n plt.xlabel(\"Number of Epochs\")\n plt.ylabel(\"Cost\")\n plt.plot(epochs, errors)\n plt.show()\n\n break","repo_name":"AhmedHani/FCIS-Machine-Learning-2017","sub_path":"Session4/Practical/Solution/XOR/xor_nn.py","file_name":"xor_nn.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"96"}
+{"seq_id":"4400121725","text":"import glob\nfrom abc import ABC, abstractmethod\nfrom contextlib import ExitStack, contextmanager\nfrom dataclasses import dataclass\nfrom functools import reduce\nfrom logging import getLogger\nfrom pathlib import Path\nfrom sys import stderr\nfrom tempfile import TemporaryDirectory\nfrom typing import Iterator, List, Optional, Union\nfrom urllib.parse import urlparse\nfrom uuid import uuid4\n\nimport boto3\nimport pandas as pd\nimport pyarrow.parquet as pq\nfrom pyarrow import lib as pyarrowlib\nfrom halo import Halo\n\nlogger = getLogger(__name__)\n\n\nclass InvalidCommandExcpetion(Exception):\n '''Exception for invalid command. Argment parser raises this Exception.\n '''\n pass\n\n\nclass FileNotFoundException(Exception):\n pass\n\n\nclass ParquetFile(ABC):\n '''Abstract ParquetFile.\n One object does not correspond one parquet file but one expression about file\n such as ./target.parquet, ./*.parquet, s3://bucket/foo.parquet or s3://bucket/*\n '''\n\n def __post_init__(self):\n self.validation()\n\n def validation(self) -> None:\n '''validate properties\n '''\n pass\n\n @abstractmethod\n def is_wildcard(self) -> bool:\n '''Return if this object correspond one or more object.\n '''\n raise NotImplementedError()\n\n @abstractmethod\n def resolve_wildcard(self) -> List['ParquetFile']:\n '''Return concrete Parquert file objects.\n '''\n raise NotImplementedError()\n\n @contextmanager\n @abstractmethod\n def get_local_path(self) -> Iterator[str]:\n '''Return local file path.\n If call this function of S3ParquetFile, return the path of downloaded file.\n '''\n raise NotImplementedError()\n\n @contextmanager\n def get_dataframe(self) -> pd.DataFrame:\n with self.get_local_path() as local_path:\n try:\n yield pq.read_table(local_path).to_pandas()\n except pyarrowlib.ArrowInvalid:\n print(f\"File({local_path}) cannot be read as parquet.\", file=stderr)\n yield None\n\n\n@dataclass\nclass LocalParquetFile(ParquetFile):\n '''Parquet file object on local disk\n '''\n path: str\n\n def is_wildcard(self) -> bool:\n return '*' in self.path\n\n def resolve_wildcard(self) -> List[ParquetFile]:\n return sorted(\n [LocalParquetFile(f) for f in glob.glob(self.path)],\n key=lambda x: x.path\n )\n\n @contextmanager\n def get_local_path(self) -> Iterator[str]:\n if self.is_wildcard():\n raise Exception('Please resolve first.')\n if not Path(self.path).exists():\n raise FileNotFoundException(f'File({self.path}) not found')\n yield self.path\n\n\n@dataclass\nclass S3ParquetFile(ParquetFile):\n '''Parquet file object on S3\n '''\n aws_session: boto3.Session\n bucket: str\n key: str\n endpoint_url: Optional[str] = None\n\n def validation(self):\n ''' key can have *. But it must be last of the string.\n '''\n if self.is_wildcard() and not self.key.index('*') in (-1, len(self.key) - 1):\n raise InvalidCommandExcpetion('You can use * only end of the path')\n\n def is_wildcard(self) -> bool:\n return '*' in self.key\n\n def resolve_wildcard(self) -> List[ParquetFile]:\n list_res = self.aws_session.client('s3', endpoint_url=self.endpoint_url)\\\n .list_objects_v2(\n Bucket=self.bucket,\n Prefix=self.key[:-1] # remove *\n )\n if list_res['IsTruncated']:\n raise Exception(f'Too much file match s3://{self.bucket}/{self.key}')\n\n if list_res['KeyCount'] == 0:\n return []\n keys = [e['Key'] for e in list_res['Contents']]\n return sorted(\n [S3ParquetFile(aws_session=self.aws_session, bucket=self.bucket, key=key, endpoint_url=self.endpoint_url) for key in keys],\n key=lambda x: x.key\n )\n\n @contextmanager\n def get_local_path(self) -> Iterator[str]:\n if self.is_wildcard():\n raise Exception('Please resolve first.')\n with TemporaryDirectory() as tmp_path:\n localfile = f'{tmp_path}/{uuid4()}.parquet'\n logger.info(f'Download stat parquet file on s3://{self.bucket}/{self.key} -> {localfile}')\n try:\n with Halo(text='Downloading from s3', spinner='dots', stream=stderr) as spinner:\n self.aws_session.resource('s3', endpoint_url=self.endpoint_url)\\\n .meta.client.download_file(self.bucket, self.key, localfile)\n spinner.info(f's3://{self.bucket}/{self.key} => {localfile}')\n except Exception:\n raise FileNotFoundException(f's3://{self.bucket}/{self.key} not found or cannot access')\n else:\n yield localfile\n\n\ndef get_aws_session(profile_name: Optional[str]) -> boto3.Session:\n return boto3.Session(profile_name=profile_name)\n\n\ndef _is_s3_file(filename: str) -> bool:\n return filename[:5] == 's3://'\n\n\ndef to_parquet_file(file_exp: str, awsprofile: Optional[str], endpoint_url: Optional[str]) -> ParquetFile:\n '''Transform file_exp to ParquetFile object.\n '''\n if _is_s3_file(file_exp):\n parsed_url = urlparse(file_exp)\n return S3ParquetFile(\n aws_session=get_aws_session(awsprofile),\n bucket=parsed_url.netloc,\n key=parsed_url.path[1:],\n endpoint_url=endpoint_url\n )\n else:\n return LocalParquetFile(\n path=file_exp\n )\n\n\n@contextmanager\ndef get_datafame_from_objs(objs: List[ParquetFile], head: Union[int, float] = None):\n '''Get pandas dataframe of ParquetFile object list.\n '''\n\n if head is None or head <= 0:\n head = float('inf')\n\n cumsum_row: int = 0\n dfs: List[pd.DataFrame] = []\n with ExitStack() as stack:\n for obj in objs:\n for pf in _resolve_wildcard(obj):\n df: Optional[pd.DataFrame] = stack.enter_context(pf.get_dataframe())\n if df is None:\n continue\n cumsum_row += len(df)\n dfs.append(df)\n\n if cumsum_row >= head:\n break\n if cumsum_row >= head:\n break\n if dfs:\n yield reduce(lambda x, y: pd.concat([x, y]), dfs)\n else:\n yield None\n\n\ndef _resolve_wildcard(obj: ParquetFile) -> List[ParquetFile]:\n if not obj.is_wildcard():\n return [obj]\n else:\n return obj.resolve_wildcard()\n","repo_name":"ktrueda/parquet-tools","sub_path":"parquet_tools/commands/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6548,"program_lang":"python","lang":"en","doc_type":"code","stars":131,"dataset":"github-code","pt":"96"}
+{"seq_id":"74475119354","text":"from ..lang import *\n\n\ndef test_defs_folder():\n assert DEFS_FOLDER.endswith('/.langtool')\n\n\nSAMPLE_LANG = '''\n{\n \"vowels\": \"aeiou\",\n \"consonants\": \"stpnmfhzdb\",\n \"sylpats\": [\"CV\", \"CCV\", \"CVC\", \"VC\", \"V\"]\n}\n'''\n\ndef test_syllables():\n l = Lang('foo', SAMPLE_LANG)\n for s in l.syllables:\n print(s)","repo_name":"dhh1128/langtool","sub_path":"langtool/tests/lang_test.py","file_name":"lang_test.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"24840969040","text":"#\n# @lc app=leetcode id=164 lang=python3\n#\n# [164] Maximum Gap\n#\n\n\nclass Solution:\n\n def maximumGap(self, nums: List[int]) -> int:\n if len(nums) < 2:\n return 0\n nums = sorted(nums)\n maximum = 0\n for i in range(len(nums)):\n maximum = max(maximum, nums[i]-nums[i-1])\n # print(start, end)\n return maximum\n","repo_name":"gusibi/leetcode","sub_path":"codes/164.maximum-gap.py","file_name":"164.maximum-gap.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"24976063434","text":"'''\nCreated on Jul 23, 2011\n\n@author: Rio\n'''\n#from mclevel import fromFile, loadWorldNumber, BoundingBox\n#from infiniteworld import MCInfdevOldLevel\n#from schematic import MCSchematic\nfrom pymclevel import *\n\nimport itertools\nimport traceback\nimport unittest\nimport tempfile\nimport logging\nimport shutil\nimport os\nimport numpy\nfrom numpy import *\nfrom logging import info\n#logging.basicConfig(format=u'%(levelname)s:%(message)s')\n#logging.getLogger().level = logging.INFO\n\nclass TempLevel(object):\n def __init__(self, filename):\n if not os.path.exists(filename):\n filename = os.path.join(\"testfiles\", filename)\n#def tempCopy(filename):\n if os.path.isdir(filename):\n tmpname = tempfile.mkdtemp(os.path.basename(filename))\n os.rmdir(tmpname)\n shutil.copytree(filename, tmpname)\n else:\n fd, tmpname = tempfile.mkstemp(os.path.basename(filename))\n os.close(fd)\n os.unlink(tmpname)\n shutil.copy(filename, tmpname)\n\n self.tmpname = tmpname\n self.level = fromFile(tmpname)\n\n#def tempRemove(filename):\n def __del__(self):\n self.level.close()\n del self.level\n filename = self.tmpname\n\n if os.path.isdir(filename):\n shutil.rmtree(filename)\n else:\n os.unlink(filename)\n\nclass TestIndevLevel(unittest.TestCase):\n def setUp(self):\n self.srclevel = TempLevel(\"hell.mclevel\")\n self.indevlevel = TempLevel(\"hueg.mclevel\")\n\n def testEntities(self):\n level = self.indevlevel.level\n entityTag = Entity.Create(\"Zombie\")\n tileEntityTag = TileEntity.Create(\"Painting\")\n level.addEntity(entityTag)\n level.addTileEntity(tileEntityTag)\n schem = level.extractSchematic(level.bounds)\n level.copyBlocksFrom(schem, schem.bounds, (0, 0, 0))\n\n #raise Failure \n\n def testCopy(self):\n info(\"Indev level\")\n indevlevel = self.indevlevel.level\n srclevel = self.srclevel.level\n indevlevel.copyBlocksFrom(srclevel, BoundingBox((0, 0, 0), (64, 64, 64,)), (0, 0, 0))\n assert((indevlevel.Blocks[0:64, 0:64, 0:64] == srclevel.Blocks[0:64, 0:64, 0:64]).all())\n\n def testFill(self):\n indevlevel = self.indevlevel.level\n indevlevel.fillBlocks(BoundingBox((0, 0, 0), (64, 64, 64,)), indevlevel.materials.Sand, [indevlevel.materials.Rock, indevlevel.materials.Dirt])\n indevlevel.saveInPlace()\n\n\nclass TestJavaLevel(unittest.TestCase):\n def setUp(self):\n self.creativelevel = TempLevel(\"Dojo_64_64_128.dat\")\n self.indevlevel = TempLevel(\"hell.mclevel\")\n\n def testCopy(self):\n indevlevel = self.indevlevel.level\n creativelevel = self.creativelevel.level\n\n creativelevel.copyBlocksFrom(indevlevel, BoundingBox((0, 0, 0), (64, 64, 64,)), (0, 0, 0))\n assert(numpy.array((indevlevel.Blocks[0:64, 0:64, 0:64]) == (creativelevel.Blocks[0:64, 0:64, 0:64])).all())\n\n creativelevel.saveInPlace()\n #xxx old survival levels\n\n\nclass TestAlphaLevelCreate(unittest.TestCase):\n def testCreate(self):\n temppath = tempfile.mktemp(\"AlphaCreate\")\n self.alphaLevel = MCInfdevOldLevel(filename=temppath, create=True);\n\nclass TestAlphaLevel(unittest.TestCase):\n def setUp(self):\n #self.alphaLevel = TempLevel(\"Dojo_64_64_128.dat\")\n self.indevlevel = TempLevel(\"hell.mclevel\")\n self.alphalevel = TempLevel(\"PyTestWorld\")\n\n\n def testCreateChunks(self):\n indevlevel = self.indevlevel.level\n level = self.alphalevel.level\n\n for ch in list(level.allChunks): level.deleteChunk(*ch)\n level.createChunksInBox(BoundingBox((0, 0, 0), (32, 0, 32)))\n\n def testCopyConvertBlocks(self):\n indevlevel = self.indevlevel.level\n level = self.alphalevel.level\n level.copyBlocksFrom(indevlevel, BoundingBox((0, 0, 0), (256, 128, 256)), (-0, 0, 0))\n\n convertedSourceBlocks, convertedSourceData = indevlevel.convertBlocksFromLevel(level, indevlevel.Blocks[0:16, 0:16, 0:indevlevel.Height], indevlevel.Data[0:16, 0:16, 0:indevlevel.Height])\n assert (level.getChunk(0, 0).Blocks[0:16, 0:16, 0:indevlevel.Height] == convertedSourceBlocks).all()\n\n def testImportSchematic(self):\n indevlevel = self.indevlevel.level\n level = self.alphalevel.level\n\n schem = fromFile(\"schematics\\\\CreativeInABox.schematic\");\n level.copyBlocksFrom(schem, BoundingBox((0, 0, 0), (1, 1, 3)), (0, 64, 0));\n schem = MCSchematic(shape=(1, 1, 3))\n schem.copyBlocksFrom(level, BoundingBox((0, 64, 0), (1, 1, 3)), (0, 0, 0));\n convertedSourceBlocks, convertedSourceData = schem.convertBlocksFromLevel(level, schem.Blocks, schem.Data)\n assert (level.getChunk(0, 0).Blocks[0:1, 0:3, 64:65] == convertedSourceBlocks).all()\n\n def testRecreateChunks(self):\n level = self.alphalevel.level\n\n for x, z in itertools.product(xrange(-1, 3), xrange(-1, 2)):\n level.deleteChunk(x, z);\n level.createChunk(x, z)\n\n def testFill(self):\n level = self.alphalevel.level\n\n level.fillBlocks(BoundingBox((-11, 0, -7), (38, 128, 25)) , level.materials.WoodPlanks);\n c = level.getChunk(0, 0)\n assert (c.Blocks == 5).all()\n\n def testReplace(self):\n level = self.alphalevel.level\n\n level.fillBlocks(BoundingBox((-11, 0, -7), (38, 128, 25)) , level.materials.WoodPlanks, [level.materials.Dirt, level.materials.Grass]);\n\n def testSaveRelight(self):\n indevlevel = self.indevlevel.level\n level = self.alphalevel.level\n\n cx, cz = -3, -1;\n\n level.deleteChunk(cx, cz);\n\n level.createChunk(cx, cz);\n level.copyBlocksFrom(indevlevel, BoundingBox((0, 0, 0), (64, 64, 64,)), (-96, 32, 0))\n\n level.generateLights();\n level.saveInPlace();\n\n\nclass TestSchematics(unittest.TestCase):\n def setUp(self):\n #self.alphaLevel = TempLevel(\"Dojo_64_64_128.dat\")\n self.indevlevel = TempLevel(\"hell.mclevel\")\n\n def testCreate(self):\n #info(\"Schematic from indev\")\n\n size = (64, 64, 64)\n schematic = MCSchematic(shape=size, filename=\"hell.schematic\", mats='Classic');\n level = self.indevlevel.level\n schematic.rotateLeft();\n\n self.failUnlessRaises(ValueError, lambda:(\n schematic.copyBlocksFrom(level, BoundingBox((-32, -32, -32), (64, 64, 64,)), (0, 0, 0))\n ))\n\n schematic.copyBlocksFrom(level, BoundingBox((0, 0, 0), (64, 64, 64,)), (0, 0, 0))\n assert((schematic.Blocks[0:64, 0:64, 0:64] == level.Blocks[0:64, 0:64, 0:64]).all())\n schematic.compress();\n\n schematic.copyBlocksFrom(level, BoundingBox((0, 0, 0), (64, 64, 64,)), (-32, -32, -32))\n assert((schematic.Blocks[0:32, 0:32, 0:32] == level.Blocks[32:64, 32:64, 32:64]).all())\n\n schematic.compress();\n\n schematic.saveInPlace();\n\n schem = fromFile(\"schematics\\CreativeInABox.schematic\");\n tempSchematic = MCSchematic(shape=(1, 1, 3))\n tempSchematic.copyBlocksFrom(schem, BoundingBox((0, 0, 0), (1, 1, 3)), (0, 0, 0))\n\n info(\"Schematic from alpha\")\n level = loadWorldNumber(1)\n for cx, cz in itertools.product(xrange(0, 4), xrange(0, 4)):\n try:\n level.createChunk(cx, cz)\n except ValueError:\n pass\n schematic.copyBlocksFrom(level, BoundingBox((0, 0, 0), (64, 64, 64,)), (0, 0, 0))\n\n def testINVEditChests(self):\n info(\"INVEdit chest\")\n invFile = fromFile(\"schematics/Chests/TinkerersBox.inv\");\n info(\"Blocks: \", invFile.Blocks)\n info(\"Data: \", invFile.Data)\n info(\"Entities: \", invFile.Entities)\n info(\"TileEntities: \", invFile.TileEntities)\n #raise SystemExit;\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()\n","repo_name":"YRSNorwich/ProjectBlock","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":7919,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"}
+{"seq_id":"8261796904","text":"\"\"\"\n This module takes in a task and returns the decoded task\n\n You can instantiate by calling decoder(task_length)\n\n Task structure:\n\n Total - 128 bit\n Supported max DRAM - 4GB\n Min Block Size - 64kB\n Max allocable blocks- 256\n Max allocable Size - 8MB\n\n OPCODE Format for TFUs\n\n [7:0] Acc ID\n [23:8] Input0 memory\n [31:24] Input0 Size\n [47:32] Input1 memory\n [55:48] Input1 Size\n [71:56] Output memory\n [79:72] Output size\n [87:80] Control - Optional\n [95:88] Task ID - Optional\n [99:96] Process ID - Optional\n\n OPCODE Format for CPU\n\n [7:0] Instr ID\n [23:8] src0\n [39:24] src1\n [55:40] dst0\n [63:56] control - Optional\n\n\"\"\"\n\nimport sys\n\nimport accelerators\n\nclass TaskDecode:\n\n def __init__(self, task_len):\n self.task = '0' * task_len\n self.task_len = task_len\n self.task_dict = {}\n self.task_id = 0\n self.task_id_max = 1024*1024\n\n def run_cycle(self, task_tuple):\n task = task_tuple[0]\n task_valid = task_tuple[1]\n\n self.task = str(task)\n flag = False\n\n if(task_valid):\n flag = self.decode()\n\n # Decode success and input was valid\n if(flag):\n return (self.task_dict, 1)\n\n else:\n # Decode failed\n sys.exit(\"Invalid unidentified task: %s\" % (self.task))\n # Input was not valid, return the output valid to be 0\n else:\n return (None, 0)\n\n def decode(self):\n\n # First check whether the instruction format is for CPU/TFU\n instr = accelerators.instr_decode[int(self.task[0:2], 16)]\n\n if(instr in accelerators.iTFU):\n #Since each field is atleast 4 bits, we don't need binary rep\n acc_id = self.task[0:2]\n inp0_mem = self.task[2:10]\n inp0_size = self.task[10:12]\n inp1_mem = self.task[12:20]\n inp1_size = self.task[20:22]\n out_mem = self.task[22:30]\n out_size = self.task[30:32]\n control = self.task[32:34]\n\n #TEMP task id is local\n self.task_id= self.task_id + 1 if ((self.task_id + 1) < self.task_id_max) else 0\n task_id = str(self.task_id)\n\n # populate the task dict\n self.task_dict = {'accelerator' : instr,\n 'inp0_mem' : int(inp0_mem,16),\n 'inp0_size' : int(inp0_size,16),\n 'inp1_mem' : int(inp1_mem,16),\n 'inp1_size' : int(inp1_size,16),\n 'out0_mem' : int(out_mem,16),\n 'out0_size' : int(out_size,16),\n 'task_id' : task_id,\n 'control' : int(control,16),\n 'instrType' : 'TFU'\n }\n\n return True\n\n elif(instr in accelerators.iCPU):\n\n src0 = self.task[2:10]\n src1 = self.task[10:18]\n dst0 = self.task[18:26]\n control = self.task[26:28]\n\n #TEMP task id is local\n self.task_id= self.task_id + 1 if ((self.task_id + 1) < self.task_id_max) else 0\n task_id = str(self.task_id)\n\n # populate the task dict\n self.task_dict = {'accelerator' : instr,\n 'src0' : int(src0,16),\n 'src1' : int(src1,16),\n 'dst0' : int(dst0,16),\n 'control' : int(control,16),\n 'task_id' : task_id,\n 'instrType' : 'CPU'\n }\n\n return True\n\n else:\n # Unidentified instruction\n return False\n\nif __name__ == '__main__':\n\n decoder_hts = TaskDecode(16)\n print(decoder_hts.run_cycle(('1000R000R100R200', 1)))\n print(decoder_hts.run_cycle(('1000R000R100R200', 0)))\n print(decoder_hts.run_cycle(('06000000000000000000123123123', 1)))\n","repo_name":"hpu-developers/HPUSim","sub_path":"DUT/task_decode.py","file_name":"task_decode.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"}
+{"seq_id":"40491512682","text":"\"\"\"\nThis script interfaces with canvas and codepost.io\nto transfer grades of any assignment from codepost.io\ninto the canvas gradebook.\n\nThis is intended to be run once after the conclusion of\nthe grading of an assignment. It will only ever update\ngrades in Canvas if the canvas grade is missing (*and*\nthe --commit option has been provided; otherwise a\nreport of potential actions will be produced). If\ngrades need to be changed after this run, it should be\ndone manually.\n\nTODO: this script is complete, but needs troubleshooting\n\n\"\"\"\nfrom config import config\nfrom course import course\nfrom codepostUtils import get_assignment_id\nfrom canvasUtils import getAssignments\nfrom canvasUtils import getGrade\nfrom canvasUtils import setGrade\nimport argparse\nimport codepost\nimport pprint\nimport sys\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"assignment_name\", help=\n \"\"\"The codePost assignment id to pull scores FROM\n \"\"\")\nparser.add_argument(\"--commit\", action='store_true', help=\n \"\"\"Commmits the changes to the gradebook. If this is not\n provided, the script will run in coward mode and not make\n any modifications, only reporting what would have been done.\n \"\"\")\nargs = parser.parse_args()\n\ncodepost.configure_api_key(config.codepost_api_key)\n\n# alternative:\n# codepost_assignment_id = codepostUtils.get_assignment_id(assignment_name)\nassignment_name = args.assignment_name\ncodepost_assignment_id = get_assignment_id(assignment_name)\nif codepost_assignment_id is None:\n print(f\"Codepost assignment for '{assignment_name}' not found\", file=sys.stderr)\n exit(1)\ncanvas_assignments = getAssignments(name=assignment_name)\nif not canvas_assignments:\n print(f\"Canvas assignment for '{assignment_name}' not found\", file=sys.stderr)\n exit(1)\nelif len(canvas_assignments) > 1:\n print(f\"Multiple Canvas assignments for '{assignment_name}' found!\", file=sys.stderr)\n exit(1)\ncanvas_assignment = canvas_assignments[0]\ncanvas_assignment_id = canvas_assignment.id\ncommit_to_canvas = args.commit\n\nprint(f\"Processing assignment '{assignment_name}' (codepost id={codepost_assignment_id}) to (canvas id={canvas_assignment_id})...\")\nif not commit_to_canvas:\n print(f\"\\t(Coward mode)\")\n\n# 1. Request the submissions for an assignment directly\nassignment_submissions = codepost.assignment.list_submissions(id=codepost_assignment_id)\n\n# email (codepost) -> (codepostScore)\n# non-finalized submissions will have a None value\n# no submits will not have a key\ncodepost_grades = {}\n\nfor submission in assignment_submissions:\n submission_id = submission.id\n students = submission.students\n # only the actual submission contains the grade\n the_submission = codepost.submission.retrieve(id=submission_id)\n #grade is only defined if it is finalized, otherwise it is None\n #despite the API docs, it is a float!!\n if not the_submission.isFinalized:\n print(f\"WARNING Submission {students} not finalized!\")\n grade = the_submission.grade\n for student in students:\n codepost_grades[student] = grade\n\n# for each student in roster:\n# retrieve canvas grade\n# report\nfor nuid,p in course.students.items():\n codepost_grade = None if p.canvasEmail not in codepost_grades else codepost_grades[p.canvasEmail]\n canvas_grade = getGrade(canvas_assignment_id, p.canvasId)\n print(f\"{p}:\")\n print(f\" codepost: {codepost_grade}\")\n print(f\" canvas: {canvas_grade}\")\n message = None\n if canvas_grade is not None:\n print(f\" Skipping, Canvas grade exists...\")\n else:\n # change canvas grade...\n if codepost_grade is None:\n log = f\" Updating Canvas grade to 0, No Submission...\"\n score = 0\n comment = \"No Submission\"\n else:\n log = f\" Updating Canvas grade to {codepost_grade}...\"\n score = codepost_grade;\n comment = None\n print(log)\n if commit_to_canvas:\n setGrade(canvas_assignment_id, p.canvasId, score, comment)\n\n#pprint.pprint(codepost_grades)\nif not commit_to_canvas:\n print(\"Cowardly refusing to commit grades to canvas; rerun with --commit if you wanna.\")\n","repo_name":"cbourke/ComputerScienceI","sub_path":"scripts/codepost/codepostToCanvas.py","file_name":"codepostToCanvas.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"96"}
+{"seq_id":"33384624193","text":"import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture(0)\n\nlower_green = np.array([40, 100, 100])\nupper_green = np.array([100, 255, 255])\n\nwhile True:\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # convert image into gray\n # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # converts to HSV format\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # detect only green\n mask = cv2.inRange(image, lower_green, upper_green)\n res = cv2.bitwise_and(image, image, mask=mask)\n\n # flip image\n # image = cv2.flip(image,1)\n # Display the resulting frame\n cv2.imshow('frame', res)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"rifa73/Covid19","sub_path":"GreenMask.py","file_name":"GreenMask.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"24321061986","text":"import csv\n\ndef write_to_csv(num_senders_desc, msgs_rcvd, both_dict_key):\n \"\"\"\n create a csv of person, sent and received columns\n \"\"\"\n\n file_for_question_1 = open('file_for_question_1.csv', 'w')\n\n with file_for_question_1:\n writer = csv.writer(file_for_question_1, lineterminator='\\n')\n writer.writerow(['person', 'sent', 'received'])\n for key in both_dict_key:\n try:\n # number of messages sent by a sender. need this try except clause\n # because the list of keys may have people that have received\n # an email and did not send an email and vice versa\n messages_sent = num_senders_desc[key]\n except:\n messages_sent = 0\n try:\n # number of messages received by a sender\n messages_received = msgs_rcvd[key]\n except:\n messages_received = 0\n\n writer.writerow((key, messages_sent, messages_received))\n\n return None\n","repo_name":"Allen8838/Data-Science-Projects","sub_path":"Red Owl/Modules/write.py","file_name":"write.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}
+{"seq_id":"32182670369","text":"\"\"\"\nRead file into texts and calls.\nIt's ok if you don't understand how to read files.\n\"\"\"\nimport csv\nwith open('texts.csv', 'r') as f:\n reader = csv.reader(f)\n texts = list(reader)\n\nwith open('calls.csv', 'r') as f:\n reader = csv.reader(f)\n calls = list(reader)\n\n\n\"\"\"\nTASK 1:\nHow many different telephone numbers are there in the records? \nPrint a message:\n\"There are different telephone numbers in the records.\"\n\"\"\"\nunique_text_no = set(i for j in texts for i in j[:2])\nunique_call_no = set(i for j in calls for i in j[:2])\ncount = len(unique_text_no.union(unique_call_no))\nprint(\"There are {} different telephone numbers in the records.\".format(count))","repo_name":"Akshatt/unscramble-computer-science-problems","sub_path":"submit_Project 1/Task1.py","file_name":"Task1.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"}